summaryrefslogtreecommitdiff
path: root/tools/perf/scripts/python/bin/flamegraph-report
diff options
context:
space:
mode:
authorAvraham Stern <avraham.stern@intel.com>2020-10-08 18:09:48 +0300
committerKalle Valo <kvalo@codeaurora.org>2020-10-08 20:09:39 +0300
commitbebc14db4eb7120a2c12fb08ee2ea95cdecb8042 (patch)
tree07751c280e891936c44ee273e27142424cbb0ceb /tools/perf/scripts/python/bin/flamegraph-report
parent2c2c3647cde4755409aecc57c471d51c24f14c2a (diff)
iwlwifi: mvm: avoid possible NULL pointer dereference
When adding a PASN station, the station is added to the list only if a TK is configured. Otherwise the station pointer should not be used. Signed-off-by: Avraham Stern <avraham.stern@intel.com> Signed-off-by: Luca Coelho <luciano.coelho@intel.com> Signed-off-by: Kalle Valo <kvalo@codeaurora.org> Link: https://lore.kernel.org/r/iwlwifi.20201008180656.b8a493c168a7.Ie9a0f9dfd9e9c58c603dd06e45151119467a7804@changeid
Diffstat (limited to 'tools/perf/scripts/python/bin/flamegraph-report')
0 files changed, 0 insertions, 0 deletions
class='rem' style='width: 0.0%;'/> -rw-r--r--include/acpi/acbuffer.h2
-rw-r--r--include/acpi/acconfig.h3
-rw-r--r--include/acpi/acexcep.h12
-rw-r--r--include/acpi/acnames.h2
-rw-r--r--include/acpi/acoutput.h7
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpi_bus.h38
-rw-r--r--include/acpi/acpi_numa.h5
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h20
-rw-r--r--include/acpi/acrestyp.h2
-rw-r--r--include/acpi/actbl.h10
-rw-r--r--include/acpi/actbl1.h64
-rw-r--r--include/acpi/actbl2.h380
-rw-r--r--include/acpi/actbl3.h17
-rw-r--r--include/acpi/actypes.h11
-rw-r--r--include/acpi/acuuid.h2
-rw-r--r--include/acpi/battery.h2
-rw-r--r--include/acpi/cppc_acpi.h78
-rw-r--r--include/acpi/ghes.h3
-rw-r--r--include/acpi/pcc.h30
-rw-r--r--include/acpi/platform/acenv.h8
-rw-r--r--include/acpi/platform/acenvex.h2
-rw-r--r--include/acpi/platform/acgcc.h10
-rw-r--r--include/acpi/platform/acgccex.h2
-rw-r--r--include/acpi/platform/aclinux.h4
-rw-r--r--include/acpi/platform/aclinuxex.h11
-rw-r--r--include/acpi/platform/aczephyr.h2
-rw-r--r--include/acpi/processor.h9
-rw-r--r--include/acpi/video.h1
-rw-r--r--include/asm-generic/Kbuild5
-rw-r--r--include/asm-generic/audit_change_attr.h6
-rw-r--r--include/asm-generic/bitops/__ffs.h2
-rw-r--r--include/asm-generic/bitops/__fls.h2
-rw-r--r--include/asm-generic/bitops/builtin-__ffs.h2
-rw-r--r--include/asm-generic/bitops/builtin-__fls.h2
-rw-r--r--include/asm-generic/bitops/builtin-fls.h2
-rw-r--r--include/asm-generic/bitops/ffs.h2
-rw-r--r--include/asm-generic/bitops/fls.h2
-rw-r--r--include/asm-generic/bitops/fls64.h4
-rw-r--r--include/asm-generic/bug.h80
-rw-r--r--include/asm-generic/codetag.lds.h21
-rw-r--r--include/asm-generic/delay.h96
-rw-r--r--include/asm-generic/div64.h121
-rw-r--r--include/asm-generic/early_ioremap.h2
-rw-r--r--include/asm-generic/fixmap.h3
-rw-r--r--include/asm-generic/fprobe.h46
-rw-r--r--include/asm-generic/hugetlb.h45
-rw-r--r--include/asm-generic/hyperv-tlfs.h874
-rw-r--r--include/asm-generic/io.h190
-rw-r--r--include/asm-generic/iomap.h36
-rw-r--r--include/asm-generic/mcs_spinlock.h6
-rw-r--r--include/asm-generic/memory_model.h30
-rw-r--r--include/asm-generic/mm_hooks.h11
-rw-r--r--include/asm-generic/mmzone.h5
-rw-r--r--include/asm-generic/module.h8
-rw-r--r--include/asm-generic/mshyperv.h161
-rw-r--r--include/asm-generic/msi.h1
-rw-r--r--include/asm-generic/numa.h8
-rw-r--r--include/asm-generic/param.h2
-rw-r--r--include/asm-generic/percpu.h42
-rw-r--r--include/asm-generic/pgalloc.h114
-rw-r--r--include/asm-generic/pgtable_uffd.h17
-rw-r--r--include/asm-generic/qspinlock.h2
-rw-r--r--include/asm-generic/rqspinlock.h254
-rw-r--r--include/asm-generic/runtime-const.h15
-rw-r--r--include/asm-generic/rwonce.h10
-rw-r--r--include/asm-generic/sections.h2
-rw-r--r--include/asm-generic/simd.h9
-rw-r--r--include/asm-generic/spinlock.h87
-rw-r--r--include/asm-generic/spinlock_types.h12
-rw-r--r--include/asm-generic/syscall.h32
-rw-r--r--include/asm-generic/syscalls.h2
-rw-r--r--include/asm-generic/text-patching.h5
-rw-r--r--include/asm-generic/thread_info_tif.h51
-rw-r--r--include/asm-generic/ticket_spinlock.h105
-rw-r--r--include/asm-generic/tlb.h109
-rw-r--r--include/asm-generic/uaccess.h2
-rw-r--r--include/asm-generic/unwind_user.h5
-rw-r--r--include/asm-generic/vdso/vsyscall.h28
-rw-r--r--include/asm-generic/vga.h23
-rw-r--r--include/asm-generic/vmlinux.lds.h220
-rw-r--r--include/clocksource/arm_arch_timer.h11
-rw-r--r--include/clocksource/hyperv_timer.h4
-rw-r--r--include/clocksource/timer-xilinx.h2
-rw-r--r--include/crypto/acompress.h341
-rw-r--r--include/crypto/aead.h87
-rw-r--r--include/crypto/akcipher.h69
-rw-r--r--include/crypto/algapi.h54
-rw-r--r--include/crypto/authenc.h2
-rw-r--r--include/crypto/blake2b.h128
-rw-r--r--include/crypto/blake2s.h126
-rw-r--r--include/crypto/chacha.h99
-rw-r--r--include/crypto/chacha20poly1305.h19
-rw-r--r--include/crypto/ctr.h50
-rw-r--r--include/crypto/curve25519.h58
-rw-r--r--include/crypto/df_sp80090a.h28
-rw-r--r--include/crypto/drbg.h25
-rw-r--r--include/crypto/engine.h1
-rw-r--r--include/crypto/gf128mul.h6
-rw-r--r--include/crypto/ghash.h4
-rw-r--r--include/crypto/hash.h212
-rw-r--r--include/crypto/hkdf.h20
-rw-r--r--include/crypto/if_alg.h10
-rw-r--r--include/crypto/internal/acompress.h174
-rw-r--r--include/crypto/internal/akcipher.h4
-rw-r--r--include/crypto/internal/blake2b.h115
-rw-r--r--include/crypto/internal/blake2s.h21
-rw-r--r--include/crypto/internal/blockhash.h52
-rw-r--r--include/crypto/internal/chacha.h43
-rw-r--r--include/crypto/internal/drbg.h54
-rw-r--r--include/crypto/internal/ecc.h19
-rw-r--r--include/crypto/internal/engine.h20
-rw-r--r--include/crypto/internal/geniv.h1
-rw-r--r--include/crypto/internal/hash.h135
-rw-r--r--include/crypto/internal/poly1305.h26
-rw-r--r--include/crypto/internal/rsa.h29
-rw-r--r--include/crypto/internal/scompress.h26
-rw-r--r--include/crypto/internal/sig.h80
-rw-r--r--include/crypto/internal/simd.h22
-rw-r--r--include/crypto/internal/skcipher.h51
-rw-r--r--include/crypto/krb5.h165
-rw-r--r--include/crypto/md5.h189
-rw-r--r--include/crypto/null.h3
-rw-r--r--include/crypto/poly1305.h64
-rw-r--r--include/crypto/polyval.h182
-rw-r--r--include/crypto/public_key.h3
-rw-r--r--include/crypto/rng.h19
-rw-r--r--include/crypto/scatterwalk.h228
-rw-r--r--include/crypto/sha1.h191
-rw-r--r--include/crypto/sha1_base.h109
-rw-r--r--include/crypto/sha2.h863
-rw-r--r--include/crypto/sha256_base.h135
-rw-r--r--include/crypto/sha3.h332
-rw-r--r--include/crypto/sha512_base.h134
-rw-r--r--include/crypto/sig.h155
-rw-r--r--include/crypto/skcipher.h19
-rw-r--r--include/crypto/sm2.h28
-rw-r--r--include/crypto/sm3.h4
-rw-r--r--include/crypto/sm3_base.h94
-rw-r--r--include/crypto/streebog.h5
-rw-r--r--include/crypto/utils.h2
-rw-r--r--include/cxl/einj.h (renamed from include/linux/einj-cxl.h)0
-rw-r--r--include/cxl/event.h323
-rw-r--r--include/cxl/features.h88
-rw-r--r--include/cxl/mailbox.h70
-rw-r--r--include/drm/Makefile18
-rw-r--r--include/drm/amd/isp.h51
-rw-r--r--include/drm/bridge/analogix_dp.h11
-rw-r--r--include/drm/bridge/dw_dp.h20
-rw-r--r--include/drm/bridge/dw_hdmi.h11
-rw-r--r--include/drm/bridge/dw_hdmi_qp.h38
-rw-r--r--include/drm/bridge/dw_mipi_dsi2.h95
-rw-r--r--include/drm/bridge/imx.h17
-rw-r--r--include/drm/bridge/samsung-dsim.h16
-rw-r--r--include/drm/clients/drm_client_setup.h26
-rw-r--r--include/drm/display/drm_dp.h69
-rw-r--r--include/drm/display/drm_dp_dual_mode_helper.h2
-rw-r--r--include/drm/display/drm_dp_helper.h154
-rw-r--r--include/drm/display/drm_dp_mst_helper.h31
-rw-r--r--include/drm/display/drm_dsc_helper.h3
-rw-r--r--include/drm/display/drm_hdmi_audio_helper.h23
-rw-r--r--include/drm/display/drm_hdmi_cec_helper.h72
-rw-r--r--include/drm/display/drm_hdmi_helper.h10
-rw-r--r--include/drm/display/drm_hdmi_state_helper.h33
-rw-r--r--include/drm/drm_accel.h26
-rw-r--r--include/drm/drm_aperture.h38
-rw-r--r--include/drm/drm_atomic.h314
-rw-r--r--include/drm/drm_atomic_helper.h2
-rw-r--r--include/drm/drm_atomic_uapi.h3
-rw-r--r--include/drm/drm_bridge.h811
-rw-r--r--include/drm/drm_bridge_helper.h12
-rw-r--r--include/drm/drm_buddy.h32
-rw-r--r--include/drm/drm_client.h78
-rw-r--r--include/drm/drm_client_event.h29
-rw-r--r--include/drm/drm_color_mgmt.h57
-rw-r--r--include/drm/drm_colorop.h464
-rw-r--r--include/drm/drm_connector.h461
-rw-r--r--include/drm/drm_crtc.h22
-rw-r--r--include/drm/drm_damage_helper.h2
-rw-r--r--include/drm/drm_debugfs.h11
-rw-r--r--include/drm/drm_device.h87
-rw-r--r--include/drm/drm_drv.h79
-rw-r--r--include/drm/drm_dumb_buffers.h14
-rw-r--r--include/drm/drm_edid.h22
-rw-r--r--include/drm/drm_encoder_slave.h241
-rw-r--r--include/drm/drm_fb_helper.h74
-rw-r--r--include/drm/drm_fbdev_dma.h13
-rw-r--r--include/drm/drm_fbdev_generic.h15
-rw-r--r--include/drm/drm_fbdev_shmem.h20
-rw-r--r--include/drm/drm_fbdev_ttm.h22
-rw-r--r--include/drm/drm_file.h43
-rw-r--r--include/drm/drm_fixed.h43
-rw-r--r--include/drm/drm_format_helper.h31
-rw-r--r--include/drm/drm_fourcc.h4
-rw-r--r--include/drm/drm_framebuffer.h7
-rw-r--r--include/drm/drm_gem.h108
-rw-r--r--include/drm/drm_gem_dma_helper.h1
-rw-r--r--include/drm/drm_gem_framebuffer_helper.h6
-rw-r--r--include/drm/drm_gem_shmem_helper.h63
-rw-r--r--include/drm/drm_gem_vram_helper.h15
-rw-r--r--include/drm/drm_gpusvm.h542
-rw-r--r--include/drm/drm_gpuvm.h103
-rw-r--r--include/drm/drm_kunit_helpers.h17
-rw-r--r--include/drm/drm_managed.h23
-rw-r--r--include/drm/drm_mipi_dbi.h10
-rw-r--r--include/drm/drm_mipi_dsi.h254
-rw-r--r--include/drm/drm_mm.h3
-rw-r--r--include/drm/drm_mode_config.h47
-rw-r--r--include/drm/drm_mode_object.h2
-rw-r--r--include/drm/drm_modeset_helper.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h23
-rw-r--r--include/drm/drm_of.h9
-rw-r--r--include/drm/drm_pagemap.h248
-rw-r--r--include/drm/drm_panel.h64
-rw-r--r--include/drm/drm_panic.h55
-rw-r--r--include/drm/drm_plane.h38
-rw-r--r--include/drm/drm_prime.h6
-rw-r--r--include/drm/drm_print.h196
-rw-r--r--include/drm/drm_probe_helper.h2
-rw-r--r--include/drm/drm_rect.h15
-rw-r--r--include/drm/drm_util.h16
-rw-r--r--include/drm/drm_utils.h10
-rw-r--r--include/drm/drm_vblank.h69
-rw-r--r--include/drm/drm_vblank_helper.h56
-rw-r--r--include/drm/drm_vblank_work.h2
-rw-r--r--include/drm/drm_writeback.h6
-rw-r--r--include/drm/gpu_scheduler.h364
-rw-r--r--include/drm/i2c/ch7006.h87
-rw-r--r--include/drm/i2c/sil164.h64
-rw-r--r--include/drm/i2c/tda998x.h40
-rw-r--r--include/drm/i915_pciids.h767
-rw-r--r--include/drm/intel/display_member.h42
-rw-r--r--include/drm/intel/display_parent_interface.h45
-rw-r--r--include/drm/intel/i915_component.h (renamed from include/drm/i915_component.h)1
-rw-r--r--include/drm/intel/i915_drm.h (renamed from include/drm/i915_drm.h)0
-rw-r--r--include/drm/intel/i915_gsc_proxy_mei_interface.h (renamed from include/drm/i915_gsc_proxy_mei_interface.h)0
-rw-r--r--include/drm/intel/i915_hdcp_interface.h (renamed from include/drm/i915_hdcp_interface.h)0
-rw-r--r--include/drm/intel/i915_pxp_tee_interface.h (renamed from include/drm/i915_pxp_tee_interface.h)0
-rw-r--r--include/drm/intel/intel-gtt.h (renamed from include/drm/intel-gtt.h)2
-rw-r--r--include/drm/intel/intel_lb_mei_interface.h70
-rw-r--r--include/drm/intel/intel_lpe_audio.h (renamed from include/drm/intel_lpe_audio.h)0
-rw-r--r--include/drm/intel/pciids.h903
-rw-r--r--include/drm/intel/xe_sriov_vfio.h143
-rw-r--r--include/drm/spsc_queue.h4
-rw-r--r--include/drm/ttm/ttm_allocation.h12
-rw-r--r--include/drm/ttm/ttm_backup.h72
-rw-r--r--include/drm/ttm/ttm_bo.h178
-rw-r--r--include/drm/ttm/ttm_device.h14
-rw-r--r--include/drm/ttm/ttm_pool.h16
-rw-r--r--include/drm/ttm/ttm_resource.h154
-rw-r--r--include/drm/ttm/ttm_tt.h72
-rw-r--r--include/drm/xe_pciids.h195
-rw-r--r--include/dt-bindings/arm/qcom,ids.h22
-rw-r--r--include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h1
-rw-r--r--include/dt-bindings/clock/amlogic,a1-pll-clkc.h1
-rw-r--r--include/dt-bindings/clock/amlogic,c3-peripherals-clkc.h212
-rw-r--r--include/dt-bindings/clock/amlogic,c3-pll-clkc.h40
-rw-r--r--include/dt-bindings/clock/amlogic,c3-scmi-clkc.h27
-rw-r--r--include/dt-bindings/clock/aspeed,ast2700-scu.h167
-rw-r--r--include/dt-bindings/clock/ast2600-clock.h2
-rw-r--r--include/dt-bindings/clock/at91.h12
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h7
-rw-r--r--include/dt-bindings/clock/axis,artpec8-clk.h169
-rw-r--r--include/dt-bindings/clock/cirrus,ep9301-syscon.h46
-rw-r--r--include/dt-bindings/clock/cix,sky1.h279
-rw-r--r--include/dt-bindings/clock/en7523-clk.h2
-rw-r--r--include/dt-bindings/clock/exynos7885.h32
-rw-r--r--include/dt-bindings/clock/exynos850.h1
-rw-r--r--include/dt-bindings/clock/fsd-clk.h20
-rw-r--r--include/dt-bindings/clock/google,gs101-acpm.h26
-rw-r--r--include/dt-bindings/clock/imx8ulp-clock.h5
-rw-r--r--include/dt-bindings/clock/imx93-clock.h7
-rw-r--r--include/dt-bindings/clock/loongson,ls2k-clk.h36
-rw-r--r--include/dt-bindings/clock/marvell,pxa1908.h88
-rw-r--r--include/dt-bindings/clock/mediatek,mt6735-apmixedsys.h16
-rw-r--r--include/dt-bindings/clock/mediatek,mt6735-imgsys.h15
-rw-r--r--include/dt-bindings/clock/mediatek,mt6735-infracfg.h25
-rw-r--r--include/dt-bindings/clock/mediatek,mt6735-mfgcfg.h8
-rw-r--r--include/dt-bindings/clock/mediatek,mt6735-pericfg.h37
-rw-r--r--include/dt-bindings/clock/mediatek,mt6735-topckgen.h79
-rw-r--r--include/dt-bindings/clock/mediatek,mt6735-vdecsys.h9
-rw-r--r--include/dt-bindings/clock/mediatek,mt6735-vencsys.h11
-rw-r--r--include/dt-bindings/clock/mediatek,mt8188-clk.h2
-rw-r--r--include/dt-bindings/clock/mediatek,mt8196-clock.h803
-rw-r--r--include/dt-bindings/clock/mediatek,mtmips-sysc.h130
-rw-r--r--include/dt-bindings/clock/mobileye,eyeq5-clk.h67
-rw-r--r--include/dt-bindings/clock/mt7622-clk.h2
-rw-r--r--include/dt-bindings/clock/nvidia,tegra264.h466
-rw-r--r--include/dt-bindings/clock/nxp,imx94-clock.h13
-rw-r--r--include/dt-bindings/clock/nxp,imx95-clock.h3
-rw-r--r--include/dt-bindings/clock/px30-cru.h4
-rw-r--r--include/dt-bindings/clock/qcom,apss-ipq.h6
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sc7280.h4
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sm6350.h4
-rw-r--r--include/dt-bindings/clock/qcom,dsi-phy-28nm.h9
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8917.h19
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h5
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc8180x.h15
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm660.h8
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8450.h3
-rw-r--r--include/dt-bindings/clock/qcom,glymur-dispcc.h114
-rw-r--r--include/dt-bindings/clock/qcom,glymur-gcc.h578
-rw-r--r--include/dt-bindings/clock/qcom,glymur-tcsr.h24
-rw-r--r--include/dt-bindings/clock/qcom,ipq-cmn-pll.h22
-rw-r--r--include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h16
-rw-r--r--include/dt-bindings/clock/qcom,ipq5332-gcc.h20
-rw-r--r--include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h22
-rw-r--r--include/dt-bindings/clock/qcom,ipq5424-gcc.h157
-rw-r--r--include/dt-bindings/clock/qcom,ipq5424-nsscc.h65
-rw-r--r--include/dt-bindings/clock/qcom,ipq9574-gcc.h23
-rw-r--r--include/dt-bindings/clock/qcom,ipq9574-nsscc.h152
-rw-r--r--include/dt-bindings/clock/qcom,kaanapali-gcc.h241
-rw-r--r--include/dt-bindings/clock/qcom,milos-camcc.h131
-rw-r--r--include/dt-bindings/clock/qcom,milos-dispcc.h61
-rw-r--r--include/dt-bindings/clock/qcom,milos-gcc.h210
-rw-r--r--include/dt-bindings/clock/qcom,milos-gpucc.h56
-rw-r--r--include/dt-bindings/clock/qcom,milos-videocc.h36
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-msm8960.h2
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-sdm660.h1
-rw-r--r--include/dt-bindings/clock/qcom,qca8k-nsscc.h101
-rw-r--r--include/dt-bindings/clock/qcom,qcm2290-gpucc.h32
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-camcc.h110
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-dispcc.h52
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-gcc.h211
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-gpucc.h39
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-videocc.h30
-rw-r--r--include/dt-bindings/clock/qcom,qcs8300-camcc.h16
-rw-r--r--include/dt-bindings/clock/qcom,qcs8300-gcc.h234
-rw-r--r--include/dt-bindings/clock/qcom,qcs8300-gpucc.h17
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h4
-rw-r--r--include/dt-bindings/clock/qcom,sa8775p-camcc.h108
-rw-r--r--include/dt-bindings/clock/qcom,sa8775p-dispcc.h87
-rw-r--r--include/dt-bindings/clock/qcom,sa8775p-videocc.h47
-rw-r--r--include/dt-bindings/clock/qcom,sar2130p-gcc.h185
-rw-r--r--include/dt-bindings/clock/qcom,sar2130p-gpucc.h33
-rw-r--r--include/dt-bindings/clock/qcom,sc8180x-camcc.h181
-rw-r--r--include/dt-bindings/clock/qcom,sm4450-camcc.h106
-rw-r--r--include/dt-bindings/clock/qcom,sm4450-dispcc.h51
-rw-r--r--include/dt-bindings/clock/qcom,sm4450-gpucc.h62
-rw-r--r--include/dt-bindings/clock/qcom,sm6115-lpasscc.h15
-rw-r--r--include/dt-bindings/clock/qcom,sm6350-videocc.h27
-rw-r--r--include/dt-bindings/clock/qcom,sm7150-camcc.h113
-rw-r--r--include/dt-bindings/clock/qcom,sm7150-dispcc.h62
-rw-r--r--include/dt-bindings/clock/qcom,sm7150-videocc.h28
-rw-r--r--include/dt-bindings/clock/qcom,sm8150-camcc.h135
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-camcc.h195
l---------[-rw-r--r--]include/dt-bindings/clock/qcom,sm8650-dispcc.h103
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-videocc.h23
-rw-r--r--include/dt-bindings/clock/qcom,sm8750-dispcc.h112
-rw-r--r--include/dt-bindings/clock/qcom,sm8750-gcc.h226
-rw-r--r--include/dt-bindings/clock/qcom,sm8750-tcsr.h15
-rw-r--r--include/dt-bindings/clock/qcom,sm8750-videocc.h40
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-dispcc.h3
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-gcc.h63
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-gpucc.h13
-rw-r--r--include/dt-bindings/clock/r8a7779-clock.h1
-rw-r--r--include/dt-bindings/clock/r8a7790-clock.h158
-rw-r--r--include/dt-bindings/clock/r8a7791-clock.h161
-rw-r--r--include/dt-bindings/clock/r8a7792-clock.h98
-rw-r--r--include/dt-bindings/clock/r8a7793-clock.h159
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h137
-rw-r--r--include/dt-bindings/clock/r8a779a0-cpg-mssr.h1
-rw-r--r--include/dt-bindings/clock/r9a07g043-cpg.h53
-rw-r--r--include/dt-bindings/clock/r9a07g044-cpg.h58
-rw-r--r--include/dt-bindings/clock/r9a07g054-cpg.h58
-rw-r--r--include/dt-bindings/clock/r9a08g045-cpg.h70
-rw-r--r--include/dt-bindings/clock/raspberrypi,rp1-clocks.h65
-rw-r--r--include/dt-bindings/clock/renesas,r9a08g045-vbattb.h13
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g047-cpg.h28
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g056-cpg.h27
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g057-cpg.h30
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h35
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h35
-rw-r--r--include/dt-bindings/clock/rk3036-cru.h3
-rw-r--r--include/dt-bindings/clock/rk3128-cru.h4
-rw-r--r--include/dt-bindings/clock/rk3188-cru-common.h4
-rw-r--r--include/dt-bindings/clock/rk3228-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3288-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3308-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3328-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3368-cru.h3
-rw-r--r--include/dt-bindings/clock/rk3399-cru.h4
-rw-r--r--include/dt-bindings/clock/rk3568-cru.h6
-rw-r--r--include/dt-bindings/clock/rockchip,rk3506-cru.h285
-rw-r--r--include/dt-bindings/clock/rockchip,rk3528-cru.h459
-rw-r--r--include/dt-bindings/clock/rockchip,rk3562-cru.h379
-rw-r--r--include/dt-bindings/clock/rockchip,rk3576-cru.h607
-rw-r--r--include/dt-bindings/clock/rockchip,rv1126b-cru.h392
-rw-r--r--include/dt-bindings/clock/samsung,exynos2200-cmu.h431
-rw-r--r--include/dt-bindings/clock/samsung,exynos7870-cmu.h324
-rw-r--r--include/dt-bindings/clock/samsung,exynos8895.h453
-rw-r--r--include/dt-bindings/clock/samsung,exynos990.h438
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov9.h11
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov920.h308
-rw-r--r--include/dt-bindings/clock/sophgo,sg2042-clkgen.h111
-rw-r--r--include/dt-bindings/clock/sophgo,sg2042-pll.h14
-rw-r--r--include/dt-bindings/clock/sophgo,sg2042-rpgate.h58
-rw-r--r--include/dt-bindings/clock/sophgo,sg2044-clk.h153
-rw-r--r--include/dt-bindings/clock/sophgo,sg2044-pll.h27
-rw-r--r--include/dt-bindings/clock/spacemit,k1-syscon.h394
-rw-r--r--include/dt-bindings/clock/st,stm32mp21-rcc.h426
-rw-r--r--include/dt-bindings/clock/stm32fx-clock.h2
-rw-r--r--include/dt-bindings/clock/stm32h7-clks.h4
-rw-r--r--include/dt-bindings/clock/sun50i-a64-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun50i-h616-ccu.h5
-rw-r--r--include/dt-bindings/clock/sun55i-a523-ccu.h190
-rw-r--r--include/dt-bindings/clock/sun55i-a523-mcu-ccu.h54
-rw-r--r--include/dt-bindings/clock/sun55i-a523-r-ccu.h37
-rw-r--r--include/dt-bindings/clock/sun8i-v3s-ccu.h2
-rw-r--r--include/dt-bindings/clock/tegra30-car.h3
-rw-r--r--include/dt-bindings/clock/thead,th1520-clk-ap.h130
-rw-r--r--include/dt-bindings/clock/toshiba,tmpv770x.h14
-rw-r--r--include/dt-bindings/clock/xlnx-zynqmp-clk.h7
-rw-r--r--include/dt-bindings/gpio/tegra256-gpio.h28
-rw-r--r--include/dt-bindings/i3c/i3c.h16
-rw-r--r--include/dt-bindings/iio/adc/adi,ad4695.h16
-rw-r--r--include/dt-bindings/iio/adc/adi,ad7606.h9
-rw-r--r--include/dt-bindings/iio/adc/adi,ad7768-1.h10
-rw-r--r--include/dt-bindings/iio/adc/gehc,pmc-adc.h10
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6357-auxadc.h21
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6358-auxadc.h22
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6359-auxadc.h22
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6363-auxadc.h24
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6373-auxadc.h19
-rw-r--r--include/dt-bindings/input/cros-ec-keyboard.h104
-rw-r--r--include/dt-bindings/interconnect/mediatek,mt8183.h23
-rw-r--r--include/dt-bindings/interconnect/mediatek,mt8195.h44
-rw-r--r--include/dt-bindings/interconnect/qcom,glymur-rpmh.h205
-rw-r--r--include/dt-bindings/interconnect/qcom,ipq5332.h46
-rw-r--r--include/dt-bindings/interconnect/qcom,ipq5424.h60
-rw-r--r--include/dt-bindings/interconnect/qcom,ipq9574.h59
-rw-r--r--include/dt-bindings/interconnect/qcom,kaanapali-rpmh.h149
-rw-r--r--include/dt-bindings/interconnect/qcom,milos-rpmh.h141
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8937.h93
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8953.h93
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8976.h97
-rw-r--r--include/dt-bindings/interconnect/qcom,qcs615-rpmh.h136
-rw-r--r--include/dt-bindings/interconnect/qcom,qcs8300-rpmh.h189
-rw-r--r--include/dt-bindings/interconnect/qcom,sar2130p-rpmh.h137
-rw-r--r--include/dt-bindings/interconnect/qcom,sdx75.h2
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8350.h10
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8650-rpmh.h1
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8750-rpmh.h143
-rw-r--r--include/dt-bindings/interrupt-controller/arm-gic.h2
-rw-r--r--include/dt-bindings/interrupt-controller/aspeed-scu-ic.h14
-rw-r--r--include/dt-bindings/mailbox/qcom-ipcc.h2
-rw-r--r--include/dt-bindings/media/c8sectpfe.h13
-rw-r--r--include/dt-bindings/media/tvp5150.h2
-rw-r--r--include/dt-bindings/media/video-interfaces.h11
-rw-r--r--include/dt-bindings/memory/mediatek,mt6893-memory-port.h288
-rw-r--r--include/dt-bindings/memory/mediatek,mt8189-memory-port.h283
-rw-r--r--include/dt-bindings/memory/nvidia,tegra264.h136
-rw-r--r--include/dt-bindings/memory/tegra210-mc.h74
-rw-r--r--include/dt-bindings/mfd/qcom-pm8008.h19
-rw-r--r--include/dt-bindings/mfd/st,stpmic1.h2
-rw-r--r--include/dt-bindings/net/renesas,r9a09g077-pcs-miic.h36
-rw-r--r--include/dt-bindings/net/ti-dp83867.h4
-rw-r--r--include/dt-bindings/net/ti-dp83869.h4
-rw-r--r--include/dt-bindings/pinctrl/amlogic,pinctrl.h46
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-cv1800b.h63
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-cv1812h.h127
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-cv18xx.h19
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-sg2000.h127
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-sg2002.h79
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-sg2042.h196
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-sg2044.h221
-rw-r--r--include/dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h41
-rw-r--r--include/dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h31
-rw-r--r--include/dt-bindings/pinctrl/renesas,r9a09g077-pinctrl.h22
-rw-r--r--include/dt-bindings/pinctrl/stm32-pinfunc.h1
-rw-r--r--include/dt-bindings/power/allwinner,sun55i-a523-pck-600.h15
-rw-r--r--include/dt-bindings/power/allwinner,sun55i-a523-ppu.h12
-rw-r--r--include/dt-bindings/power/allwinner,sun8i-v853-ppu.h10
-rw-r--r--include/dt-bindings/power/amlogic,a4-pwrc.h21
-rw-r--r--include/dt-bindings/power/amlogic,a5-pwrc.h21
-rw-r--r--include/dt-bindings/power/amlogic,s6-pwrc.h29
-rw-r--r--include/dt-bindings/power/amlogic,s7-pwrc.h20
-rw-r--r--include/dt-bindings/power/amlogic,s7d-pwrc.h27
-rw-r--r--include/dt-bindings/power/marvell,pxa1908-power.h17
-rw-r--r--include/dt-bindings/power/mediatek,mt6735-power-controller.h14
-rw-r--r--include/dt-bindings/power/mediatek,mt6893-power.h35
-rw-r--r--include/dt-bindings/power/mediatek,mt8196-power.h58
-rw-r--r--include/dt-bindings/power/nvidia,tegra264-bpmp.h24
-rw-r--r--include/dt-bindings/power/qcom,rpmhpd.h236
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h390
-rw-r--r--include/dt-bindings/power/rockchip,rk3528-power.h19
-rw-r--r--include/dt-bindings/power/rockchip,rk3562-power.h35
-rw-r--r--include/dt-bindings/power/rockchip,rk3576-power.h30
-rw-r--r--include/dt-bindings/power/rockchip,rv1126b-power-controller.h17
-rw-r--r--include/dt-bindings/power/thead,th1520-power.h19
-rw-r--r--include/dt-bindings/regulator/nxp,pca9450-regulator.h18
-rw-r--r--include/dt-bindings/regulator/st,stm32mp15-regulator.h40
-rw-r--r--include/dt-bindings/regulator/st,stm32mp25-regulator.h48
-rw-r--r--include/dt-bindings/reset/airoha,en7523-reset.h61
-rw-r--r--include/dt-bindings/reset/airoha,en7581-reset.h66
-rw-r--r--include/dt-bindings/reset/amlogic,meson-a1-audio-reset.h36
-rw-r--r--include/dt-bindings/reset/aspeed,ast2700-scu.h124
-rw-r--r--include/dt-bindings/reset/canaan,k230-rst.h90
-rw-r--r--include/dt-bindings/reset/eswin,eic7700-reset.h298
-rw-r--r--include/dt-bindings/reset/fsl,imx8ulp-sim-lpav.h16
-rw-r--r--include/dt-bindings/reset/imx8mp-reset-audiomix.h13
-rw-r--r--include/dt-bindings/reset/mediatek,mt6735-infracfg.h27
-rw-r--r--include/dt-bindings/reset/mediatek,mt6735-mfgcfg.h9
-rw-r--r--include/dt-bindings/reset/mediatek,mt6735-pericfg.h31
-rw-r--r--include/dt-bindings/reset/mediatek,mt6735-vdecsys.h9
-rw-r--r--include/dt-bindings/reset/mediatek,mt8196-resets.h26
-rw-r--r--include/dt-bindings/reset/nvidia,tegra114-car.h13
-rw-r--r--include/dt-bindings/reset/nvidia,tegra264.h92
-rw-r--r--include/dt-bindings/reset/qcom,ipq5424-gcc.h310
-rw-r--r--include/dt-bindings/reset/qcom,ipq5424-nsscc.h46
-rw-r--r--include/dt-bindings/reset/qcom,ipq9574-nsscc.h134
-rw-r--r--include/dt-bindings/reset/qcom,qca8k-nsscc.h76
-rw-r--r--include/dt-bindings/reset/qcom,sar2130p-gpucc.h14
-rw-r--r--include/dt-bindings/reset/rockchip,rk3506-cru.h211
-rw-r--r--include/dt-bindings/reset/rockchip,rk3528-cru.h241
-rw-r--r--include/dt-bindings/reset/rockchip,rk3562-cru.h358
-rw-r--r--include/dt-bindings/reset/rockchip,rk3576-cru.h564
-rw-r--r--include/dt-bindings/reset/rockchip,rk3588-cru.h41
-rw-r--r--include/dt-bindings/reset/rockchip,rv1126b-cru.h405
-rw-r--r--include/dt-bindings/reset/st,stm32mp21-rcc.h138
-rw-r--r--include/dt-bindings/reset/sun50i-h616-ccu.h4
-rw-r--r--include/dt-bindings/reset/sun55i-a523-ccu.h88
-rw-r--r--include/dt-bindings/reset/sun55i-a523-mcu-ccu.h30
-rw-r--r--include/dt-bindings/reset/sun55i-a523-r-ccu.h26
-rw-r--r--include/dt-bindings/reset/thead,th1520-reset.h236
-rw-r--r--include/dt-bindings/reset/toshiba,tmpv770x.h9
-rw-r--r--include/dt-bindings/soc/qe-fsl,tsa.h13
-rw-r--r--include/dt-bindings/soc/samsung,exynos-usi.h17
-rw-r--r--include/dt-bindings/sound/audio-graph.h26
-rw-r--r--include/dt-bindings/sound/cs48l32.h20
-rw-r--r--include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h1
-rw-r--r--include/dt-bindings/sound/qcom,wcd9335.h1
-rw-r--r--include/dt-bindings/sound/qcom,wcd934x.h16
-rw-r--r--include/dt-bindings/thermal/mediatek,lvts-thermal.h12
-rw-r--r--include/dt-bindings/thermal/tegra114-soctherm.h19
-rw-r--r--include/dt-bindings/watchdog/aspeed-wdt.h138
-rw-r--r--include/hyperv/hvgdk.h308
-rw-r--r--include/hyperv/hvgdk_ext.h46
-rw-r--r--include/hyperv/hvgdk_mini.h1528
-rw-r--r--include/hyperv/hvhdk.h899
-rw-r--r--include/hyperv/hvhdk_mini.h531
-rw-r--r--include/keys/asymmetric-type.h2
-rw-r--r--include/keys/dns_resolver-type.h4
-rw-r--r--include/keys/rxrpc-type.h17
-rw-r--r--include/keys/system_keyring.h2
-rw-r--r--include/keys/trusted_tpm.h79
-rw-r--r--include/kunit/assert.h13
-rw-r--r--include/kunit/clk.h33
-rw-r--r--include/kunit/of.h121
-rw-r--r--include/kunit/platform_device.h21
-rw-r--r--include/kunit/run-in-irq-context.h129
-rw-r--r--include/kunit/skbuff.h5
-rw-r--r--include/kunit/test.h270
-rw-r--r--include/kunit/try-catch.h1
-rw-r--r--include/kunit/visibility.h6
-rw-r--r--include/kvm/arm_arch_timer.h50
-rw-r--r--include/kvm/arm_pmu.h49
-rw-r--r--include/kvm/arm_psci.h4
-rw-r--r--include/kvm/arm_vgic.h55
-rw-r--r--include/linux/acpi.h153
-rw-r--r--include/linux/acpi_dma.h9
-rw-r--r--include/linux/acpi_pmtmr.h13
-rw-r--r--include/linux/acpi_rimt.h28
-rw-r--r--include/linux/adi-axi-common.h77
-rw-r--r--include/linux/adreno-smmu-priv.h13
-rw-r--r--include/linux/aer.h20
-rw-r--r--include/linux/alarmtimer.h10
-rw-r--r--include/linux/alcor_pci.h1
-rw-r--r--include/linux/align.h10
-rw-r--r--include/linux/alloc_tag.h107
-rw-r--r--include/linux/amba/bus.h7
-rw-r--r--include/linux/amd-iommu.h31
-rw-r--r--include/linux/amd-pmf-io.h15
-rw-r--r--include/linux/amd-pstate.h137
-rw-r--r--include/linux/annotate.h127
-rw-r--r--include/linux/arch_topology.h35
-rw-r--r--include/linux/args.h6
-rw-r--r--include/linux/arm-smccc.h239
-rw-r--r--include/linux/arm_ffa.h73
-rw-r--r--include/linux/arm_mpam.h66
-rw-r--r--include/linux/arm_sdei.h4
-rw-r--r--include/linux/asn1_decoder.h1
-rw-r--r--include/linux/asn1_encoder.h1
-rw-r--r--include/linux/async_tx.h5
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/ath9k_platform.h51
-rw-r--r--include/linux/atmdev.h7
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h6
-rw-r--r--include/linux/atomic/atomic-instrumented.h32
-rw-r--r--include/linux/atomic/atomic-long.h4
-rw-r--r--include/linux/attribute_container.h6
-rw-r--r--include/linux/audit.h34
-rw-r--r--include/linux/auxiliary_bus.h48
-rw-r--r--include/linux/avf/virtchnl.h343
-rw-r--r--include/linux/backing-dev-defs.h10
-rw-r--r--include/linux/backing-dev.h20
-rw-r--r--include/linux/backing-file.h11
-rw-r--r--include/linux/backlight.h73
-rw-r--r--include/linux/badblocks.h10
-rw-r--r--include/linux/balloon_compaction.h90
-rw-r--r--include/linux/base64.h10
-rw-r--r--include/linux/bcm47xx_nvram.h1
-rw-r--r--include/linux/bcm47xx_sprom.h2
-rw-r--r--include/linux/bcm963xx_nvram.h16
-rw-r--r--include/linux/bcma/bcma_driver_pci.h2
-rw-r--r--include/linux/binfmts.h14
-rw-r--r--include/linux/bio-integrity.h148
-rw-r--r--include/linux/bio.h225
-rw-r--r--include/linux/bit_spinlock.h8
-rw-r--r--include/linux/bitfield.h124
-rw-r--r--include/linux/bitmap-str.h10
-rw-r--r--include/linux/bitmap.h177
-rw-r--r--include/linux/bitops.h39
-rw-r--r--include/linux/bits.h74
-rw-r--r--include/linux/blk-crypto-profile.h73
-rw-r--r--include/linux/blk-crypto.h73
-rw-r--r--include/linux/blk-integrity.h144
-rw-r--r--include/linux/blk-mq-dma.h76
-rw-r--r--include/linux/blk-mq-pci.h11
-rw-r--r--include/linux/blk-mq-virtio.h11
-rw-r--r--include/linux/blk-mq.h383
-rw-r--r--include/linux/blk_types.h73
-rw-r--r--include/linux/blkdev.h731
-rw-r--r--include/linux/blktrace_api.h3
-rw-r--r--include/linux/bnxt/hsi.h11166
-rw-r--r--include/linux/bootconfig.h2
-rw-r--r--include/linux/bootmem_info.h42
-rw-r--r--include/linux/bpf-cgroup-defs.h1
-rw-r--r--include/linux/bpf-cgroup.h54
-rw-r--r--include/linux/bpf.h612
-rw-r--r--include/linux/bpf_local_storage.h23
-rw-r--r--include/linux/bpf_lsm.h26
-rw-r--r--include/linux/bpf_mem_alloc.h3
-rw-r--r--include/linux/bpf_types.h2
-rw-r--r--include/linux/bpf_verifier.h316
-rw-r--r--include/linux/bpfptr.h2
-rw-r--r--include/linux/brcmphy.h95
-rw-r--r--include/linux/btf.h104
-rw-r--r--include/linux/btf_ids.h1
-rw-r--r--include/linux/buffer_head.h35
-rw-r--r--include/linux/bug.h18
-rw-r--r--include/linux/build_bug.h10
-rw-r--r--include/linux/buildid.h29
-rw-r--r--include/linux/bus/stm32_firewall_device.h15
-rw-r--r--include/linux/bvec.h29
-rw-r--r--include/linux/byteorder/generic.h32
-rw-r--r--include/linux/cache.h64
-rw-r--r--include/linux/cache_coherency.h61
-rw-r--r--include/linux/cacheinfo.h35
-rw-r--r--include/linux/call_once.h66
-rw-r--r--include/linux/can/bittiming.h125
-rw-r--r--include/linux/can/dev.h148
-rw-r--r--include/linux/can/dev/peak_canfd.h4
-rw-r--r--include/linux/capability.h5
-rw-r--r--include/linux/cc_platform.h20
-rw-r--r--include/linux/cdrom.h3
-rw-r--r--include/linux/cdx/bitfield.h90
-rw-r--r--include/linux/cdx/cdx_bus.h4
-rw-r--r--include/linux/cdx/edac_cdx_pcol.h28
-rw-r--r--include/linux/cdx/mcdi.h199
-rw-r--r--include/linux/ceph/ceph_fs.h18
-rw-r--r--include/linux/ceph/decode.h2
-rw-r--r--include/linux/ceph/libceph.h11
-rw-r--r--include/linux/ceph/messenger.h10
-rw-r--r--include/linux/ceph/osd_client.h10
-rw-r--r--include/linux/ceph/pagelist.h12
-rw-r--r--include/linux/cfag12864b.h17
-rw-r--r--include/linux/cfi.h49
-rw-r--r--include/linux/cfi_types.h29
-rw-r--r--include/linux/cgroup-defs.h202
-rw-r--r--include/linux/cgroup.h102
-rw-r--r--include/linux/cgroup_dmem.h66
-rw-r--r--include/linux/cgroup_namespace.h58
-rw-r--r--include/linux/cgroup_subsys.h4
-rw-r--r--include/linux/cleanup.h370
-rw-r--r--include/linux/clk-provider.h77
-rw-r--r--include/linux/clk.h45
-rw-r--r--include/linux/clk/at91_pmc.h2
-rw-r--r--include/linux/clk/davinci.h6
-rw-r--r--include/linux/clk/renesas.h145
-rw-r--r--include/linux/clk/ti.h8
-rw-r--r--include/linux/clockchips.h2
-rw-r--r--include/linux/clocksource.h30
-rw-r--r--include/linux/clocksource_ids.h2
-rw-r--r--include/linux/closure.h65
-rw-r--r--include/linux/cma.h25
-rw-r--r--include/linux/codetag.h56
-rw-r--r--include/linux/comedi/comedidev.h17
-rw-r--r--include/linux/comedi/comedilib.h34
-rw-r--r--include/linux/compaction.h10
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/compiler-clang.h47
-rw-r--r--include/linux/compiler-gcc.h52
-rw-r--r--include/linux/compiler-version.h30
-rw-r--r--include/linux/compiler.h177
-rw-r--r--include/linux/compiler_attributes.h13
-rw-r--r--include/linux/compiler_types.h165
-rw-r--r--include/linux/component.h4
-rw-r--r--include/linux/configfs.h15
-rw-r--r--include/linux/console.h236
-rw-r--r--include/linux/console_struct.h4
-rw-r--r--include/linux/consolemap.h24
-rw-r--r--include/linux/container_of.h3
-rw-r--r--include/linux/context_tracking.h38
-rw-r--r--include/linux/context_tracking_irq.h8
-rw-r--r--include/linux/context_tracking_state.h96
-rw-r--r--include/linux/coredump.h29
-rw-r--r--include/linux/coresight-pmu.h17
-rw-r--r--include/linux/coresight.h174
-rw-r--r--include/linux/counter.h3
-rw-r--r--include/linux/cper.h20
-rw-r--r--include/linux/cpu.h72
-rw-r--r--include/linux/cpu_cooling.h1
-rw-r--r--include/linux/cpu_rmap.h3
-rw-r--r--include/linux/cpufreq.h164
-rw-r--r--include/linux/cpuhotplug.h19
-rw-r--r--include/linux/cpuhplock.h49
-rw-r--r--include/linux/cpuidle.h8
-rw-r--r--include/linux/cpumask.h561
-rw-r--r--include/linux/cpumask_types.h66
-rw-r--r--include/linux/cpuset.h39
-rw-r--r--include/linux/crash_core.h7
-rw-r--r--include/linux/crash_dump.h43
-rw-r--r--include/linux/crash_reserve.h32
-rw-r--r--include/linux/crc-t10dif.h12
-rw-r--r--include/linux/crc16.h9
-rw-r--r--include/linux/crc32.h123
-rw-r--r--include/linux/crc32c.h7
-rw-r--r--include/linux/crc32poly.h16
-rw-r--r--include/linux/crc64.h28
-rw-r--r--include/linux/crc7.h7
-rw-r--r--include/linux/cred.h55
-rw-r--r--include/linux/crypto.h131
-rw-r--r--include/linux/cxl-event.h182
-rw-r--r--include/linux/damon.h366
-rw-r--r--include/linux/dax.h59
-rw-r--r--include/linux/dcache.h164
-rw-r--r--include/linux/dccp.h289
-rw-r--r--include/linux/debugfs.h99
-rw-r--r--include/linux/debugobjects.h12
-rw-r--r--include/linux/decompress/unxz.h5
-rw-r--r--include/linux/delay.h79
-rw-r--r--include/linux/delayacct.h14
-rw-r--r--include/linux/dev_printk.h9
-rw-r--r--include/linux/devcoredump.h53
-rw-r--r--include/linux/devfreq-governor.h102
-rw-r--r--include/linux/devfreq.h4
-rw-r--r--include/linux/device-mapper.h70
-rw-r--r--include/linux/device.h298
-rw-r--r--include/linux/device/bus.h22
-rw-r--r--include/linux/device/class.h14
-rw-r--r--include/linux/device/devres.h189
-rw-r--r--include/linux/device/driver.h22
-rw-r--r--include/linux/device/faux.h69
-rw-r--r--include/linux/device_cgroup.h7
-rw-r--r--include/linux/dfl.h2
-rw-r--r--include/linux/dibs.h464
-rw-r--r--include/linux/dim.h118
-rw-r--r--include/linux/dio.h2
-rw-r--r--include/linux/dlm.h50
-rw-r--r--include/linux/dma-buf-mapping.h17
-rw-r--r--include/linux/dma-buf.h42
-rw-r--r--include/linux/dma-buf/heaps/cma.h16
-rw-r--r--include/linux/dma-direct.h17
-rw-r--r--include/linux/dma-fence-array.h9
-rw-r--r--include/linux/dma-fence-chain.h4
-rw-r--r--include/linux/dma-fence-unwrap.h2
-rw-r--r--include/linux/dma-fence.h76
-rw-r--r--include/linux/dma-heap.h25
-rw-r--r--include/linux/dma-map-ops.h119
-rw-r--r--include/linux/dma-mapping.h161
-rw-r--r--include/linux/dma-resv.h6
-rw-r--r--include/linux/dma/ipu-dma.h174
-rw-r--r--include/linux/dma/k3-udma-glue.h5
-rw-r--r--include/linux/dmaengine.h68
-rw-r--r--include/linux/dmapool.h29
-rw-r--r--include/linux/dmar.h1
-rw-r--r--include/linux/dpll.h47
-rw-r--r--include/linux/dsa/8021q.h8
-rw-r--r--include/linux/dsa/lan9303.h4
-rw-r--r--include/linux/dsa/ocelot.h48
-rw-r--r--include/linux/dw_apb_timer.h3
-rw-r--r--include/linux/dynamic_debug.h17
-rw-r--r--include/linux/dynamic_queue_limits.h2
-rw-r--r--include/linux/edac.h222
-rw-r--r--include/linux/eeprom_93cx6.h11
-rw-r--r--include/linux/eeprom_93xx46.h32
-rw-r--r--include/linux/efi.h83
-rw-r--r--include/linux/ehl_pse_io_aux.h24
-rw-r--r--include/linux/eisa.h9
-rw-r--r--include/linux/elfnote.h13
-rw-r--r--include/linux/energy_model.h69
-rw-r--r--include/linux/entry-common.h450
-rw-r--r--include/linux/entry-virt.h (renamed from include/linux/entry-kvm.h)26
-rw-r--r--include/linux/err.h20
-rw-r--r--include/linux/etherdevice.h30
-rw-r--r--include/linux/ethtool.h380
-rw-r--r--include/linux/ethtool_netlink.h36
-rw-r--r--include/linux/eventpoll.h6
-rw-r--r--include/linux/execmem.h75
-rw-r--r--include/linux/export.h33
-rw-r--r--include/linux/exportfs.h67
-rw-r--r--include/linux/f2fs_fs.h23
-rw-r--r--include/linux/falloc.h19
-rw-r--r--include/linux/fanotify.h30
-rw-r--r--include/linux/fault-inject.h55
-rw-r--r--include/linux/fb.h29
-rw-r--r--include/linux/fbcon.h9
-rw-r--r--include/linux/fdtable.h13
-rw-r--r--include/linux/fiemap.h16
-rw-r--r--include/linux/file.h209
-rw-r--r--include/linux/file_ref.h218
-rw-r--r--include/linux/fileattr.h38
-rw-r--r--include/linux/filelock.h122
-rw-r--r--include/linux/filter.h269
-rw-r--r--include/linux/find.h102
-rw-r--r--include/linux/firewire.h93
-rw-r--r--include/linux/firmware.h12
-rw-r--r--include/linux/firmware/cirrus/cs_dsp.h45
-rw-r--r--include/linux/firmware/cirrus/cs_dsp_test_utils.h159
-rw-r--r--include/linux/firmware/imx/sm.h97
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h111
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h104
-rw-r--r--include/linux/firmware/mediatek/mtk-adsp-ipc.h2
-rw-r--r--include/linux/firmware/qcom/qcom_qseecom.h53
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h52
-rw-r--r--include/linux/firmware/qcom/qcom_tzmem.h80
-rw-r--r--include/linux/firmware/samsung/exynos-acpm-protocol.h70
-rw-r--r--include/linux/firmware/thead/thead,th1520-aon.h200
-rw-r--r--include/linux/firmware/xlnx-event-manager.h10
-rw-r--r--include/linux/firmware/xlnx-zynqmp-ufs.h38
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h93
-rw-r--r--include/linux/folio_queue.h282
-rw-r--r--include/linux/font.h4
-rw-r--r--include/linux/fortify-string.h24
-rw-r--r--include/linux/fpga/adi-axi-common.h23
-rw-r--r--include/linux/fprobe.h68
-rw-r--r--include/linux/freezer.h14
-rw-r--r--include/linux/fs.h1611
-rw-r--r--include/linux/fs/super.h238
-rw-r--r--include/linux/fs/super_types.h336
-rw-r--r--include/linux/fs_context.h37
-rw-r--r--include/linux/fs_dirent.h (renamed from include/linux/fs_types.h)11
-rw-r--r--include/linux/fs_parser.h18
-rw-r--r--include/linux/fs_stack.h2
-rw-r--r--include/linux/fs_struct.h17
-rw-r--r--include/linux/fscache-cache.h6
-rw-r--r--include/linux/fscache.h5
-rw-r--r--include/linux/fscrypt.h74
-rw-r--r--include/linux/fsi.h4
-rw-r--r--include/linux/fsl/enetc_mdio.h3
-rw-r--r--include/linux/fsl/mc.h38
-rw-r--r--include/linux/fsl/netc_global.h19
-rw-r--r--include/linux/fsl/ntmp.h121
-rw-r--r--include/linux/fsl/ptp_qoriq.h10
-rw-r--r--include/linux/fsl_devices.h1
-rw-r--r--include/linux/fsnotify.h124
-rw-r--r--include/linux/fsnotify_backend.h131
-rw-r--r--include/linux/fsverity.h57
-rw-r--r--include/linux/ftrace.h306
-rw-r--r--include/linux/ftrace_regs.h43
-rw-r--r--include/linux/futex.h31
-rw-r--r--include/linux/fw_table.h2
-rw-r--r--include/linux/fwctl.h135
-rw-r--r--include/linux/fwnode.h10
-rw-r--r--include/linux/gameport.h2
-rw-r--r--include/linux/gcd.h3
-rw-r--r--include/linux/generic-radix-tree.h105
-rw-r--r--include/linux/generic_pt/common.h191
-rw-r--r--include/linux/generic_pt/iommu.h293
-rw-r--r--include/linux/gfp.h124
-rw-r--r--include/linux/gfp_types.h14
-rw-r--r--include/linux/gpio.h68
-rw-r--r--include/linux/gpio/consumer.h104
-rw-r--r--include/linux/gpio/driver.h204
-rw-r--r--include/linux/gpio/forwarder.h41
-rw-r--r--include/linux/gpio/generic.h190
-rw-r--r--include/linux/gpio/gpio-nomadik.h6
-rw-r--r--include/linux/gpio/legacy-of-mm-gpiochip.h36
-rw-r--r--include/linux/gpio/regmap.h27
-rw-r--r--include/linux/greybus.h2
-rw-r--r--include/linux/group_cpus.h2
-rw-r--r--include/linux/habanalabs/cpucp_if.h24
-rw-r--r--include/linux/habanalabs/hl_boot_if.h31
-rw-r--r--include/linux/hdmi.h10
-rw-r--r--include/linux/hfs_common.h653
-rw-r--r--include/linux/hid-over-i2c.h117
-rw-r--r--include/linux/hid-over-spi.h155
-rw-r--r--include/linux/hid-sensor-hub.h19
-rw-r--r--include/linux/hid-sensor-ids.h2
-rw-r--r--include/linux/hid.h107
-rw-r--r--include/linux/hid_bpf.h208
-rw-r--r--include/linux/hidraw.h1
-rw-r--r--include/linux/highmem-internal.h64
-rw-r--r--include/linux/highmem.h75
-rw-r--r--include/linux/hisi_acc_qm.h93
-rw-r--r--include/linux/hmm-dma.h33
-rw-r--r--include/linux/hmm.h24
-rw-r--r--include/linux/host1x.h6
-rw-r--r--include/linux/host1x_context_bus.h2
-rw-r--r--include/linux/hrtimer.h74
-rw-r--r--include/linux/hrtimer_defs.h3
-rw-r--r--include/linux/hrtimer_types.h4
-rw-r--r--include/linux/huge_mm.h487
-rw-r--r--include/linux/hugetlb.h238
-rw-r--r--include/linux/hugetlb_inline.h15
-rw-r--r--include/linux/hung_task.h101
-rw-r--r--include/linux/hw_bitfield.h62
-rw-r--r--include/linux/hw_random.h3
-rw-r--r--include/linux/hwmon.h12
-rw-r--r--include/linux/hwspinlock.h18
-rw-r--r--include/linux/hyperv.h166
-rw-r--r--include/linux/hypervisor.h3
-rw-r--r--include/linux/i2c-algo-pca.h2
-rw-r--r--include/linux/i2c-atr.h73
-rw-r--r--include/linux/i2c-of-prober.h140
-rw-r--r--include/linux/i2c-smbus.h6
-rw-r--r--include/linux/i2c.h120
-rw-r--r--include/linux/i3c/device.h53
-rw-r--r--include/linux/i3c/master.h78
-rw-r--r--include/linux/i8042.h28
-rw-r--r--include/linux/i8253.h2
-rw-r--r--include/linux/icmp.h32
-rw-r--r--include/linux/idr.h36
-rw-r--r--include/linux/ieee80211-eht.h1182
-rw-r--r--include/linux/ieee80211-he.h825
-rw-r--r--include/linux/ieee80211-ht.h292
-rw-r--r--include/linux/ieee80211-mesh.h230
-rw-r--r--include/linux/ieee80211-nan.h35
-rw-r--r--include/linux/ieee80211-p2p.h71
-rw-r--r--include/linux/ieee80211-s1g.h575
-rw-r--r--include/linux/ieee80211-vht.h236
-rw-r--r--include/linux/ieee80211.h2913
-rw-r--r--include/linux/if_bridge.h6
-rw-r--r--include/linux/if_ether.h3
-rw-r--r--include/linux/if_hsr.h26
-rw-r--r--include/linux/if_ltalk.h8
-rw-r--r--include/linux/if_macvlan.h6
-rw-r--r--include/linux/if_pppox.h2
-rw-r--r--include/linux/if_rmnet.h2
-rw-r--r--include/linux/if_team.h3
-rw-r--r--include/linux/if_tun.h5
-rw-r--r--include/linux/if_vlan.h91
-rw-r--r--include/linux/igmp.h2
-rw-r--r--include/linux/iio/adc-helpers.h27
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h55
-rw-r--r--include/linux/iio/adc/qcom-vadc-common.h27
-rw-r--r--include/linux/iio/backend.h123
-rw-r--r--include/linux/iio/buffer-dma.h32
-rw-r--r--include/linux/iio/buffer-dmaengine.h7
-rw-r--r--include/linux/iio/buffer.h36
-rw-r--r--include/linux/iio/buffer_impl.h38
-rw-r--r--include/linux/iio/common/cros_ec_sensors_core.h1
-rw-r--r--include/linux/iio/consumer.h33
-rw-r--r--include/linux/iio/driver.h5
-rw-r--r--include/linux/iio/events.h31
-rw-r--r--include/linux/iio/frequency/adf4350.h2
-rw-r--r--include/linux/iio/iio-gts-helper.h7
-rw-r--r--include/linux/iio/iio-opaque.h4
-rw-r--r--include/linux/iio/iio.h267
-rw-r--r--include/linux/iio/imu/adis.h163
-rw-r--r--include/linux/iio/timer/stm32-lptim-trigger.h9
-rw-r--r--include/linux/iio/timer/stm32-timer-trigger.h6
-rw-r--r--include/linux/iio/types.h2
-rw-r--r--include/linux/ima.h3
-rw-r--r--include/linux/in6.h7
-rw-r--r--include/linux/inet.h2
-rw-r--r--include/linux/inet_diag.h20
-rw-r--r--include/linux/inetdevice.h11
-rw-r--r--include/linux/init.h31
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/input.h22
-rw-r--r--include/linux/input/matrix_keypad.h48
-rw-r--r--include/linux/input/mt.h1
-rw-r--r--include/linux/input/touch-overlay.h25
-rw-r--r--include/linux/instrumentation.h11
-rw-r--r--include/linux/intel-ish-client-if.h4
-rw-r--r--include/linux/intel_dg_nvm_aux.h32
-rw-r--r--include/linux/intel_pmt_features.h157
-rw-r--r--include/linux/intel_rapl.h2
-rw-r--r--include/linux/intel_tcc.h1
-rw-r--r--include/linux/intel_tpmi.h29
-rw-r--r--include/linux/intel_vsec.h239
-rw-r--r--include/linux/interconnect-clk.h4
-rw-r--r--include/linux/interconnect-provider.h19
-rw-r--r--include/linux/interconnect.h5
-rw-r--r--include/linux/interrupt.h104
-rw-r--r--include/linux/interval_tree.h4
-rw-r--r--include/linux/interval_tree_generic.h10
-rw-r--r--include/linux/io-64-nonatomic-hi-lo.h16
-rw-r--r--include/linux/io-64-nonatomic-lo-hi.h16
-rw-r--r--include/linux/io-mapping.h3
-rw-r--r--include/linux/io-pgtable.h31
-rw-r--r--include/linux/io.h26
-rw-r--r--include/linux/io_uring.h4
-rw-r--r--include/linux/io_uring/cmd.h129
-rw-r--r--include/linux/io_uring_types.h210
-rw-r--r--include/linux/iocontext.h6
-rw-r--r--include/linux/iomap.h313
-rw-r--r--include/linux/iommu-dma.h64
-rw-r--r--include/linux/iommu.h400
-rw-r--r--include/linux/iommufd.h308
-rw-r--r--include/linux/iopoll.h172
-rw-r--r--include/linux/ioport.h94
-rw-r--r--include/linux/ioprio.h5
-rw-r--r--include/linux/ioremap.h1
-rw-r--r--include/linux/iosys-map.h7
-rw-r--r--include/linux/iov_iter.h106
-rw-r--r--include/linux/ipack.h23
-rw-r--r--include/linux/ipc_namespace.h13
-rw-r--r--include/linux/ipmi.h15
-rw-r--r--include/linux/ipmi_smi.h11
-rw-r--r--include/linux/ipv6.h44
-rw-r--r--include/linux/irq-entry-common.h458
-rw-r--r--include/linux/irq.h112
-rw-r--r--include/linux/irq_sim.h17
-rw-r--r--include/linux/irq_work.h9
-rw-r--r--include/linux/irq_work_types.h14
-rw-r--r--include/linux/irqbypass.h46
-rw-r--r--include/linux/irqchip.h8
-rw-r--r--include/linux/irqchip/arm-gic-common.h4
-rw-r--r--include/linux/irqchip/arm-gic-v3-prio.h52
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/irqchip/arm-gic-v4.h14
-rw-r--r--include/linux/irqchip/arm-gic-v5.h394
-rw-r--r--include/linux/irqchip/arm-gic.h6
-rw-r--r--include/linux/irqchip/arm-vgic-info.h4
-rw-r--r--include/linux/irqchip/irq-davinci-aintc.h27
-rw-r--r--include/linux/irqchip/irq-davinci-cp-intc.h25
-rw-r--r--include/linux/irqchip/irq-msi-lib.h28
-rw-r--r--include/linux/irqchip/irq-partition-percpu.h53
-rw-r--r--include/linux/irqchip/irq-renesas-rzv2h.h23
-rw-r--r--include/linux/irqchip/riscv-imsic.h10
-rw-r--r--include/linux/irqdesc.h1
-rw-r--r--include/linux/irqdomain.h658
-rw-r--r--include/linux/irqflags.h6
-rw-r--r--include/linux/irqnr.h36
-rw-r--r--include/linux/ism.h29
-rw-r--r--include/linux/jbd2.h91
-rw-r--r--include/linux/jhash.h14
-rw-r--r--include/linux/jiffies.h33
-rw-r--r--include/linux/jump_label.h3
-rw-r--r--include/linux/kallsyms.h3
-rw-r--r--include/linux/kasan-enabled.h32
-rw-r--r--include/linux/kasan.h112
-rw-r--r--include/linux/kcore.h13
-rw-r--r--include/linux/kcov.h6
-rw-r--r--include/linux/kdb.h21
-rw-r--r--include/linux/kernel-page-flags.h3
-rw-r--r--include/linux/kernel.h41
-rw-r--r--include/linux/kernel_read_file.h1
-rw-r--r--include/linux/kernel_stat.h1
-rw-r--r--include/linux/kernfs.h14
-rw-r--r--include/linux/kexec.h75
-rw-r--r--include/linux/kexec_handover.h143
-rw-r--r--include/linux/key-type.h9
-rw-r--r--include/linux/key.h4
-rw-r--r--include/linux/kfence.h2
-rw-r--r--include/linux/kfifo.h35
-rw-r--r--include/linux/kgdb.h13
-rw-r--r--include/linux/kho/abi/luo.h166
-rw-r--r--include/linux/kho/abi/memfd.h77
-rw-r--r--include/linux/khugepaged.h19
-rw-r--r--include/linux/kmemleak.h8
-rw-r--r--include/linux/kmod.h3
-rw-r--r--include/linux/kmsan.h91
-rw-r--r--include/linux/kmsan_types.h2
-rw-r--r--include/linux/kmsg_dump.h22
-rw-r--r--include/linux/kobject_ns.h2
-rw-r--r--include/linux/kprobes.h9
-rw-r--r--include/linux/kref.h48
-rw-r--r--include/linux/ksm.h58
-rw-r--r--include/linux/kstack_erase.h (renamed from include/linux/stackleak.h)20
-rw-r--r--include/linux/kstrtox.h1
-rw-r--r--include/linux/kthread.h56
-rw-r--r--include/linux/ktime.h5
-rw-r--r--include/linux/kvm_dirty_ring.h29
-rw-r--r--include/linux/kvm_host.h423
-rw-r--r--include/linux/kvm_irqfd.h5
-rw-r--r--include/linux/kvm_types.h39
-rw-r--r--include/linux/lcd.h50
-rw-r--r--include/linux/leafops.h619
-rw-r--r--include/linux/led-class-flash.h18
-rw-r--r--include/linux/leds.h49
-rw-r--r--include/linux/libata.h291
-rw-r--r--include/linux/libgcc.h4
-rw-r--r--include/linux/libnvdimm.h15
-rw-r--r--include/linux/linkage.h4
-rw-r--r--include/linux/linkmode.h5
-rw-r--r--include/linux/linux_logo.h3
-rw-r--r--include/linux/list.h48
-rw-r--r--include/linux/list_lru.h72
-rw-r--r--include/linux/list_nulls.h1
-rw-r--r--include/linux/livepatch.h25
-rw-r--r--include/linux/livepatch_external.h76
-rw-r--r--include/linux/livepatch_helpers.h77
-rw-r--r--include/linux/livepatch_sched.h14
-rw-r--r--include/linux/liveupdate.h138
-rw-r--r--include/linux/llist.h29
-rw-r--r--include/linux/local_lock.h65
-rw-r--r--include/linux/local_lock_internal.h174
-rw-r--r--include/linux/lockd/lockd.h17
-rw-r--r--include/linux/lockd/xdr.h2
-rw-r--r--include/linux/lockdep.h31
-rw-r--r--include/linux/lockdep_types.h3
-rw-r--r--include/linux/lockref.h29
-rw-r--r--include/linux/log2.h16
-rw-r--r--include/linux/logic_pio.h6
-rw-r--r--include/linux/lru_cache.h4
-rw-r--r--include/linux/lsm/apparmor.h17
-rw-r--r--include/linux/lsm/bpf.h16
-rw-r--r--include/linux/lsm/selinux.h16
-rw-r--r--include/linux/lsm/smack.h17
-rw-r--r--include/linux/lsm_audit.h26
-rw-r--r--include/linux/lsm_count.h135
-rw-r--r--include/linux/lsm_hook_defs.h71
-rw-r--r--include/linux/lsm_hooks.h165
-rw-r--r--include/linux/lz4.h6
-rw-r--r--include/linux/lzo.h8
-rw-r--r--include/linux/mailbox/exynos-message.h19
-rw-r--r--include/linux/mailbox/mchp-ipc.h33
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h11
-rw-r--r--include/linux/mailbox/riscv-rpmi-message.h243
-rw-r--r--include/linux/mailbox_client.h2
-rw-r--r--include/linux/mailbox_controller.h12
-rw-r--r--include/linux/maple.h2
-rw-r--r--include/linux/maple_tree.h77
-rw-r--r--include/linux/math.h27
-rw-r--r--include/linux/math64.h87
-rw-r--r--include/linux/mc33xs2410.h16
-rw-r--r--include/linux/mcb.h5
-rw-r--r--include/linux/mdio.h55
-rw-r--r--include/linux/mei_cl_bus.h6
-rw-r--r--include/linux/mem_encrypt.h23
-rw-r--r--include/linux/memblock.h105
-rw-r--r--include/linux/memcontrol.h704
-rw-r--r--include/linux/memfd.h20
-rw-r--r--include/linux/memory-failure.h17
-rw-r--r--include/linux/memory-tiers.h4
-rw-r--r--include/linux/memory.h76
-rw-r--r--include/linux/memory/ti-aemif.h32
-rw-r--r--include/linux/memory_hotplug.h78
-rw-r--r--include/linux/mempolicy.h6
-rw-r--r--include/linux/mempool.h60
-rw-r--r--include/linux/memregion.h16
-rw-r--r--include/linux/memremap.h102
-rw-r--r--include/linux/memstick.h2
-rw-r--r--include/linux/mfd/88pm80x.h2
-rw-r--r--include/linux/mfd/88pm886.h136
-rw-r--r--include/linux/mfd/aat2870.h3
-rw-r--r--include/linux/mfd/adp5585.h226
-rw-r--r--include/linux/mfd/arizona/pdata.h6
-rw-r--r--include/linux/mfd/atmel-hlcdc.h10
-rw-r--r--include/linux/mfd/axp20x.h32
-rw-r--r--include/linux/mfd/bcm590xx.h28
-rw-r--r--include/linux/mfd/bq257xx.h104
-rw-r--r--include/linux/mfd/cgbc.h44
-rw-r--r--include/linux/mfd/core.h2
-rw-r--r--include/linux/mfd/cs40l50.h137
-rw-r--r--include/linux/mfd/da9052/da9052.h2
-rw-r--r--include/linux/mfd/da9063/core.h1
-rw-r--r--include/linux/mfd/davinci_voicecodec.h8
-rw-r--r--include/linux/mfd/dbx500-prcmu.h8
-rw-r--r--include/linux/mfd/ds1wm.h29
-rw-r--r--include/linux/mfd/ezx-pcap.h1
-rw-r--r--include/linux/mfd/idt8a340_reg.h8
-rw-r--r--include/linux/mfd/lm3533.h5
-rw-r--r--include/linux/mfd/loongson-se.h53
-rw-r--r--include/linux/mfd/lp3943.h1
-rw-r--r--include/linux/mfd/macsmc.h280
-rw-r--r--include/linux/mfd/madera/pdata.h3
-rw-r--r--include/linux/mfd/max14577-private.h2
-rw-r--r--include/linux/mfd/max14577.h2
-rw-r--r--include/linux/mfd/max5970.h12
-rw-r--r--include/linux/mfd/max7360.h109
-rw-r--r--include/linux/mfd/max77686-private.h2
-rw-r--r--include/linux/mfd/max77686.h2
-rw-r--r--include/linux/mfd/max77693-common.h4
-rw-r--r--include/linux/mfd/max77693-private.h18
-rw-r--r--include/linux/mfd/max77693.h2
-rw-r--r--include/linux/mfd/max77705-private.h195
-rw-r--r--include/linux/mfd/max77759.h165
-rw-r--r--include/linux/mfd/max8997-private.h3
-rw-r--r--include/linux/mfd/max8997.h2
-rw-r--r--include/linux/mfd/max8998-private.h2
-rw-r--r--include/linux/mfd/max8998.h2
-rw-r--r--include/linux/mfd/mc13xxx.h6
-rw-r--r--include/linux/mfd/mt6328/core.h53
-rw-r--r--include/linux/mfd/mt6328/registers.h822
-rw-r--r--include/linux/mfd/mt6397/core.h11
-rw-r--r--include/linux/mfd/mt6397/rtc.h5
-rw-r--r--include/linux/mfd/nct6694.h102
-rw-r--r--include/linux/mfd/palmas.h4
-rw-r--r--include/linux/mfd/pcf50633/adc.h69
-rw-r--r--include/linux/mfd/pcf50633/backlight.h42
-rw-r--r--include/linux/mfd/pcf50633/core.h232
-rw-r--r--include/linux/mfd/pcf50633/gpio.h48
-rw-r--r--include/linux/mfd/pcf50633/mbc.h130
-rw-r--r--include/linux/mfd/pcf50633/pmic.h68
-rw-r--r--include/linux/mfd/pf1550.h273
-rw-r--r--include/linux/mfd/qnap-mcu.h28
-rw-r--r--include/linux/mfd/rk808.h2
-rw-r--r--include/linux/mfd/rohm-bd71828.h63
-rw-r--r--include/linux/mfd/rohm-bd96801.h217
-rw-r--r--include/linux/mfd/rohm-bd96802.h74
-rw-r--r--include/linux/mfd/rohm-generic.h4
-rw-r--r--include/linux/mfd/samsung/core.h9
-rw-r--r--include/linux/mfd/samsung/irq.h153
-rw-r--r--include/linux/mfd/samsung/rtc.h37
-rw-r--r--include/linux/mfd/samsung/s2mpg10.h454
-rw-r--r--include/linux/mfd/samsung/s2mpu05.h183
-rw-r--r--include/linux/mfd/sta2x11-mfd.h506
-rw-r--r--include/linux/mfd/stm32-lptimer.h37
-rw-r--r--include/linux/mfd/stm32-timers.h188
-rw-r--r--include/linux/mfd/syscon.h8
-rw-r--r--include/linux/mfd/syscon/atmel-smc.h8
-rw-r--r--include/linux/mfd/tmio.h133
-rw-r--r--include/linux/mfd/tps65219.h144
-rw-r--r--include/linux/mfd/tps65912.h1
-rw-r--r--include/linux/mfd/tps6594.h1
-rw-r--r--include/linux/mfd/twl.h21
-rw-r--r--include/linux/mfd/upboard-fpga.h55
-rw-r--r--include/linux/mfd/wl1273-core.h277
-rw-r--r--include/linux/mfd/wm8350/core.h10
-rw-r--r--include/linux/mhi.h22
-rw-r--r--include/linux/mhi_ep.h2
-rw-r--r--include/linux/micrel_phy.h2
-rw-r--r--include/linux/migrate.h84
-rw-r--r--include/linux/migrate_mode.h6
-rw-r--r--include/linux/mii.h7
-rw-r--r--include/linux/mii_timestamper.h15
-rw-r--r--include/linux/min_heap.h457
-rw-r--r--include/linux/minmax.h228
-rw-r--r--include/linux/misc/keba.h72
-rw-r--r--include/linux/misc_cgroup.h22
-rw-r--r--include/linux/miscdevice.h14
-rw-r--r--include/linux/mlx4/device.h3
-rw-r--r--include/linux/mlx5/cq.h2
-rw-r--r--include/linux/mlx5/device.h68
-rw-r--r--include/linux/mlx5/driver.h150
-rw-r--r--include/linux/mlx5/eswitch.h2
-rw-r--r--include/linux/mlx5/fs.h50
-rw-r--r--include/linux/mlx5/mlx5_ifc.h1162
-rw-r--r--include/linux/mlx5/mlx5_ifc_vdpa.h2
-rw-r--r--include/linux/mlx5/port.h94
-rw-r--r--include/linux/mlx5/qp.h26
-rw-r--r--include/linux/mlx5/vport.h5
-rw-r--r--include/linux/mm.h2095
-rw-r--r--include/linux/mm_inline.h175
-rw-r--r--include/linux/mm_types.h795
-rw-r--r--include/linux/mm_types_task.h24
-rw-r--r--include/linux/mman.h38
-rw-r--r--include/linux/mmap_lock.h288
-rw-r--r--include/linux/mmc/card.h41
-rw-r--r--include/linux/mmc/core.h33
-rw-r--r--include/linux/mmc/host.h129
-rw-r--r--include/linux/mmc/sd.h4
-rw-r--r--include/linux/mmc/sd_uhs2.h240
-rw-r--r--include/linux/mmc/sdio_ids.h3
-rw-r--r--include/linux/mmc/slot-gpio.h5
-rw-r--r--include/linux/mmdebug.h32
-rw-r--r--include/linux/mmu_context.h1
-rw-r--r--include/linux/mmu_notifier.h18
-rw-r--r--include/linux/mmzone.h434
-rw-r--r--include/linux/mnt_idmapping.h6
-rw-r--r--include/linux/mnt_namespace.h8
-rw-r--r--include/linux/mod_devicetable.h10
-rw-r--r--include/linux/module.h104
-rw-r--r--include/linux/moduleparam.h40
-rw-r--r--include/linux/mount.h91
-rw-r--r--include/linux/moxtet.h9
-rw-r--r--include/linux/mpi.h192
-rw-r--r--include/linux/mroute6.h7
-rw-r--r--include/linux/mroute_base.h11
-rw-r--r--include/linux/msi.h162
-rw-r--r--include/linux/mtd/cfi.h32
-rw-r--r--include/linux/mtd/map.h14
-rw-r--r--include/linux/mtd/nand-ecc-mxic.h4
-rw-r--r--include/linux/mtd/nand-qpic-common.h483
-rw-r--r--include/linux/mtd/nand.h99
-rw-r--r--include/linux/mtd/rawnand.h5
-rw-r--r--include/linux/mtd/spear_smi.h19
-rw-r--r--include/linux/mtd/spinand.h329
-rw-r--r--include/linux/mtd/ubi.h1
-rw-r--r--include/linux/mutex.h111
-rw-r--r--include/linux/mux/driver.h4
-rw-r--r--include/linux/mv643xx.h921
-rw-r--r--include/linux/namei.h173
-rw-r--r--include/linux/nd.h6
-rw-r--r--include/linux/net.h49
-rw-r--r--include/linux/net/intel/iidc.h107
-rw-r--r--include/linux/net/intel/iidc_rdma.h68
-rw-r--r--include/linux/net/intel/iidc_rdma_ice.h70
-rw-r--r--include/linux/net/intel/iidc_rdma_idpf.h55
-rw-r--r--include/linux/net/intel/libie/adminq.h399
-rw-r--r--include/linux/net/intel/libie/fwlog.h97
-rw-r--r--include/linux/net/intel/libie/pctype.h41
-rw-r--r--include/linux/net_tstamp.h39
-rw-r--r--include/linux/netdev_features.h47
-rw-r--r--include/linux/netdevice.h858
-rw-r--r--include/linux/netdevice_xmit.h26
-rw-r--r--include/linux/netfilter.h20
-rw-r--r--include/linux/netfilter/nf_conntrack_dccp.h38
-rw-r--r--include/linux/netfilter/x_tables.h12
-rw-r--r--include/linux/netfilter_netdev.h3
-rw-r--r--include/linux/netfs.h155
-rw-r--r--include/linux/netlink.h11
-rw-r--r--include/linux/netpoll.h27
-rw-r--r--include/linux/nfs.h16
-rw-r--r--include/linux/nfs4.h30
-rw-r--r--include/linux/nfs_common.h18
-rw-r--r--include/linux/nfs_fs.h30
-rw-r--r--include/linux/nfs_fs_sb.h47
-rw-r--r--include/linux/nfs_page.h10
-rw-r--r--include/linux/nfs_xdr.h148
-rw-r--r--include/linux/nfslocalio.h123
-rw-r--r--include/linux/nmi.h6
-rw-r--r--include/linux/node.h101
-rw-r--r--include/linux/nodemask.h137
-rw-r--r--include/linux/nodemask_types.h11
-rw-r--r--include/linux/notifier.h4
-rw-r--r--include/linux/ns/ns_common_types.h196
-rw-r--r--include/linux/ns/nstree_types.h55
-rw-r--r--include/linux/ns_common.h148
-rw-r--r--include/linux/nsfs.h43
-rw-r--r--include/linux/nsproxy.h13
-rw-r--r--include/linux/nstree.h96
-rw-r--r--include/linux/numa.h30
-rw-r--r--include/linux/numa_memblks.h62
-rw-r--r--include/linux/nvme-auth.h7
-rw-r--r--include/linux/nvme-fc-driver.h6
-rw-r--r--include/linux/nvme-keyring.h18
-rw-r--r--include/linux/nvme-rdma.h6
-rw-r--r--include/linux/nvme-tcp.h2
-rw-r--r--include/linux/nvme.h361
-rw-r--r--include/linux/nvmem-provider.h30
-rw-r--r--include/linux/oa_tc6.h24
-rw-r--r--include/linux/objagg.h1
-rw-r--r--include/linux/objpool.h7
-rw-r--r--include/linux/objtool.h74
-rw-r--r--include/linux/objtool_types.h15
-rw-r--r--include/linux/of.h139
-rw-r--r--include/linux/of_address.h7
-rw-r--r--include/linux/of_fdt.h14
-rw-r--r--include/linux/of_graph.h49
-rw-r--r--include/linux/of_irq.h16
-rw-r--r--include/linux/of_platform.h2
-rw-r--r--include/linux/of_reserved_mem.h26
-rw-r--r--include/linux/oid_registry.h1
-rw-r--r--include/linux/omap-gpmc.h14
-rw-r--r--include/linux/once.h4
-rw-r--r--include/linux/once_lite.h4
-rw-r--r--include/linux/oom.h3
-rw-r--r--include/linux/overflow.h126
-rw-r--r--include/linux/packing.h463
-rw-r--r--include/linux/padata.h4
-rw-r--r--include/linux/page-flags-layout.h11
-rw-r--r--include/linux/page-flags.h524
-rw-r--r--include/linux/page-isolation.h53
-rw-r--r--include/linux/page_counter.h40
-rw-r--r--include/linux/page_ext.h93
-rw-r--r--include/linux/page_frag_cache.h61
-rw-r--r--include/linux/page_owner.h8
-rw-r--r--include/linux/page_ref.h57
-rw-r--r--include/linux/page_table_check.h30
-rw-r--r--include/linux/pageblock-flags.h62
-rw-r--r--include/linux/pagemap.h562
-rw-r--r--include/linux/pagevec.h4
-rw-r--r--include/linux/pagewalk.h88
-rw-r--r--include/linux/panic.h24
-rw-r--r--include/linux/parport.h6
-rw-r--r--include/linux/part_stat.h8
-rw-r--r--include/linux/path.h13
-rw-r--r--include/linux/pci-ats.h6
-rw-r--r--include/linux/pci-bwctrl.h28
-rw-r--r--include/linux/pci-doe.h4
-rw-r--r--include/linux/pci-ecam.h10
-rw-r--r--include/linux/pci-ep-msi.h28
-rw-r--r--include/linux/pci-epc.h75
-rw-r--r--include/linux/pci-epf.h61
-rw-r--r--include/linux/pci-ide.h119
-rw-r--r--include/linux/pci-p2pdma.h116
-rw-r--r--include/linux/pci-pwrctrl.h54
-rw-r--r--include/linux/pci-tph.h46
-rw-r--r--include/linux/pci-tsm.h243
-rw-r--r--include/linux/pci.h299
-rw-r--r--include/linux/pci_hotplug.h5
-rw-r--r--include/linux/pci_ids.h34
-rw-r--r--include/linux/pcie-dwc.h38
-rw-r--r--include/linux/pcs/pcs-xpcs.h60
-rw-r--r--include/linux/pds/pds_adminq.h280
-rw-r--r--include/linux/pds/pds_common.h2
-rw-r--r--include/linux/pe.h279
-rw-r--r--include/linux/peci-cpu.h24
-rw-r--r--include/linux/peci.h6
-rw-r--r--include/linux/percpu-defs.h56
-rw-r--r--include/linux/percpu-rwsem.h30
-rw-r--r--include/linux/percpu.h12
-rw-r--r--include/linux/perf/arm_pmu.h38
-rw-r--r--include/linux/perf/arm_pmuv3.h12
-rw-r--r--include/linux/perf/riscv_pmu.h1
-rw-r--r--include/linux/perf_event.h532
-rw-r--r--include/linux/pfn.h9
-rw-r--r--include/linux/pfn_t.h131
-rw-r--r--include/linux/pgalloc.h29
-rw-r--r--include/linux/pgalloc_tag.h222
-rw-r--r--include/linux/pgtable.h494
-rw-r--r--include/linux/phy.h899
-rw-r--r--include/linux/phy/phy-hdmi.h21
-rw-r--r--include/linux/phy/phy-sun4i-usb.h2
-rw-r--r--include/linux/phy/phy.h40
-rw-r--r--include/linux/phy_fixed.h46
-rw-r--r--include/linux/phy_link_topology.h82
-rw-r--r--include/linux/phylib_stubs.h42
-rw-r--r--include/linux/phylink.h199
-rw-r--r--include/linux/pid.h26
-rw-r--r--include/linux/pid_namespace.h31
-rw-r--r--include/linux/pidfs.h11
-rw-r--r--include/linux/pinctrl/consumer.h10
-rw-r--r--include/linux/pinctrl/machine.h19
-rw-r--r--include/linux/pinctrl/pinconf-generic.h38
-rw-r--r--include/linux/pinctrl/pinctrl.h22
-rw-r--r--include/linux/pinctrl/pinmux.h12
-rw-r--r--include/linux/pipe_fs_i.h85
-rw-r--r--include/linux/pktcdvd.h198
-rw-r--r--include/linux/platform_data/ad5449.h39
-rw-r--r--include/linux/platform_data/amd_qdma.h38
-rw-r--r--include/linux/platform_data/asoc-s3c.h2
-rw-r--r--include/linux/platform_data/bcmgenet.h19
-rw-r--r--include/linux/platform_data/clk-davinci-pll.h21
-rw-r--r--include/linux/platform_data/cros_ec_commands.h199
-rw-r--r--include/linux/platform_data/cros_ec_proto.h31
-rw-r--r--include/linux/platform_data/cyttsp4.h62
-rw-r--r--include/linux/platform_data/dma-ep93xx.h94
-rw-r--r--include/linux/platform_data/dmtimer-omap.h4
-rw-r--r--include/linux/platform_data/emc2305.h6
-rw-r--r--include/linux/platform_data/eth-ep93xx.h10
-rw-r--r--include/linux/platform_data/gpio-ath79.h16
-rw-r--r--include/linux/platform_data/gpio-davinci.h21
-rw-r--r--include/linux/platform_data/huawei-gaokun-ec.h79
-rw-r--r--include/linux/platform_data/hwmon-s3c.h10
-rw-r--r--include/linux/platform_data/i2c-davinci.h26
-rw-r--r--include/linux/platform_data/i2c-mux-gpio.h2
-rw-r--r--include/linux/platform_data/keyboard-spear.h164
-rw-r--r--include/linux/platform_data/keypad-ep93xx.h32
-rw-r--r--include/linux/platform_data/keypad-nomadik-ske.h50
-rw-r--r--include/linux/platform_data/keypad-pxa27x.h73
-rw-r--r--include/linux/platform_data/keyscan-davinci.h29
-rw-r--r--include/linux/platform_data/lenovo-yoga-c630.h44
-rw-r--r--include/linux/platform_data/lp855x.h4
-rw-r--r--include/linux/platform_data/max6639.h15
-rw-r--r--include/linux/platform_data/max6697.h33
-rw-r--r--include/linux/platform_data/mcs.h30
-rw-r--r--include/linux/platform_data/media/omap4iss.h66
-rw-r--r--include/linux/platform_data/microchip-ksz.h6
-rw-r--r--include/linux/platform_data/mlxreg.h4
-rw-r--r--include/linux/platform_data/mmc-pxamci.h4
-rw-r--r--include/linux/platform_data/mtd-davinci-aemif.h36
-rw-r--r--include/linux/platform_data/mtd-davinci.h88
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h70
-rw-r--r--include/linux/platform_data/sa11x0-serial.h1
-rw-r--r--include/linux/platform_data/spi-davinci.h73
-rw-r--r--include/linux/platform_data/spi-ep93xx.h15
-rw-r--r--include/linux/platform_data/syscon.h9
-rw-r--r--include/linux/platform_data/ti-aemif.h45
-rw-r--r--include/linux/platform_data/tmio.h65
-rw-r--r--include/linux/platform_data/touchscreen-s3c2410.h22
-rw-r--r--include/linux/platform_data/usb-davinci.h22
-rw-r--r--include/linux/platform_data/video-pxafb.h1
-rw-r--r--include/linux/platform_data/x86/amd-fch.h13
-rw-r--r--include/linux/platform_data/x86/asus-wmi-leds-ids.h50
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h42
-rw-r--r--include/linux/platform_data/x86/int3472.h166
-rw-r--r--include/linux/platform_data/x86/intel-mid_wdt.h (renamed from include/linux/platform_data/intel-mid_wdt.h)6
-rw-r--r--include/linux/platform_data/x86/intel_pmc_ipc.h98
-rw-r--r--include/linux/platform_data/x86/intel_scu_ipc.h72
-rw-r--r--include/linux/platform_data/x86/pwm-lpss.h33
-rw-r--r--include/linux/platform_data/x86/soc.h12
-rw-r--r--include/linux/platform_data/zforce_ts.h15
-rw-r--r--include/linux/platform_device.h24
-rw-r--r--include/linux/platform_profile.h37
-rw-r--r--include/linux/pldmfw.h8
-rw-r--r--include/linux/pm.h37
-rw-r--r--include/linux/pm_clock.h5
-rw-r--r--include/linux/pm_domain.h109
-rw-r--r--include/linux/pm_opp.h115
-rw-r--r--include/linux/pm_qos.h9
-rw-r--r--include/linux/pm_runtime.h319
-rw-r--r--include/linux/pm_wakeirq.h6
-rw-r--r--include/linux/pm_wakeup.h57
-rw-r--r--include/linux/pmbus.h14
-rw-r--r--include/linux/pnp.h10
-rw-r--r--include/linux/poison.h20
-rw-r--r--include/linux/poll.h26
-rw-r--r--include/linux/posix-clock.h6
-rw-r--r--include/linux/posix-timers.h103
-rw-r--r--include/linux/posix_acl.h15
-rw-r--r--include/linux/power/bq27xxx_battery.h2
-rw-r--r--include/linux/power/max77705_charger.h193
-rw-r--r--include/linux/power_supply.h153
-rw-r--r--include/linux/ppp_channel.h3
-rw-r--r--include/linux/pps_gen_kernel.h78
-rw-r--r--include/linux/pps_kernel.h4
-rw-r--r--include/linux/prandom.h7
-rw-r--r--include/linux/preempt.h72
-rw-r--r--include/linux/printk.h68
-rw-r--r--include/linux/prmt.h9
-rw-r--r--include/linux/proc_fs.h12
-rw-r--r--include/linux/proc_ns.h38
-rw-r--r--include/linux/profile.h2
-rw-r--r--include/linux/property.h64
-rw-r--r--include/linux/pruss_driver.h12
-rw-r--r--include/linux/pse-pd/pse.h263
-rw-r--r--include/linux/pseudo_fs.h2
-rw-r--r--include/linux/psi_types.h6
-rw-r--r--include/linux/psp-platform-access.h7
-rw-r--r--include/linux/psp-sev.h119
-rw-r--r--include/linux/pstore.h2
-rw-r--r--include/linux/ptdump.h15
-rw-r--r--include/linux/ptp_classify.h2
-rw-r--r--include/linux/ptp_clock_kernel.h60
-rw-r--r--include/linux/ptr_ring.h63
-rw-r--r--include/linux/pwm.h178
-rw-r--r--include/linux/pwrseq/consumer.h56
-rw-r--r--include/linux/pwrseq/provider.h78
-rw-r--r--include/linux/qed/qed_ll2_if.h2
-rw-r--r--include/linux/quota.h2
-rw-r--r--include/linux/quotaops.h2
-rw-r--r--include/linux/raid/pq.h17
-rw-r--r--include/linux/random.h22
-rw-r--r--include/linux/randomize_kstack.h18
-rw-r--r--include/linux/range.h17
-rw-r--r--include/linux/ras.h16
-rw-r--r--include/linux/raspberrypi/vchiq.h112
-rw-r--r--include/linux/raspberrypi/vchiq_arm.h164
-rw-r--r--include/linux/raspberrypi/vchiq_bus.h60
-rw-r--r--include/linux/raspberrypi/vchiq_cfg.h41
-rw-r--r--include/linux/raspberrypi/vchiq_core.h646
-rw-r--r--include/linux/raspberrypi/vchiq_debugfs.h22
-rw-r--r--include/linux/ratelimit.h37
-rw-r--r--include/linux/ratelimit_types.h7
-rw-r--r--include/linux/rbtree.h136
-rw-r--r--include/linux/rbtree_latch.h20
-rw-r--r--include/linux/rcu_segcblist.h94
-rw-r--r--include/linux/rculist.h63
-rw-r--r--include/linux/rculist_nulls.h65
-rw-r--r--include/linux/rcupdate.h184
-rw-r--r--include/linux/rcupdate_trace.h5
-rw-r--r--include/linux/rcupdate_wait.h14
-rw-r--r--include/linux/rcuref.h31
-rw-r--r--include/linux/rcutiny.h34
-rw-r--r--include/linux/rcutree.h7
-rw-r--r--include/linux/rcuwait.h13
-rw-r--r--include/linux/reboot.h36
-rw-r--r--include/linux/ref_tracker.h50
-rw-r--r--include/linux/refcount.h129
-rw-r--r--include/linux/regmap.h163
-rw-r--r--include/linux/regset.h12
-rw-r--r--include/linux/regulator/consumer.h80
-rw-r--r--include/linux/regulator/coupler.h3
-rw-r--r--include/linux/regulator/driver.h12
-rw-r--r--include/linux/regulator/machine.h7
-rw-r--r--include/linux/regulator/max8952.h2
-rw-r--r--include/linux/regulator/mt6363-regulator.h330
-rw-r--r--include/linux/regulator/pca9450.h38
-rw-r--r--include/linux/regulator/s2dos05.h73
-rw-r--r--include/linux/relay.h27
-rw-r--r--include/linux/resctrl.h495
-rw-r--r--include/linux/resctrl_types.h60
-rw-r--r--include/linux/reset-controller.h33
-rw-r--r--include/linux/reset.h281
-rw-r--r--include/linux/restart_block.h4
-rw-r--r--include/linux/resume_user_mode.h2
-rw-r--r--include/linux/rfkill.h7
-rw-r--r--include/linux/rhashtable.h130
-rw-r--r--include/linux/ring_buffer.h29
-rw-r--r--include/linux/rio.h4
-rw-r--r--include/linux/rio_drv.h5
-rw-r--r--include/linux/rmap.h431
-rw-r--r--include/linux/rolling_buffer.h61
-rw-r--r--include/linux/rpmb.h167
-rw-r--r--include/linux/rpmsg.h22
-rw-r--r--include/linux/rseq.h211
-rw-r--r--include/linux/rseq_entry.h616
-rw-r--r--include/linux/rseq_types.h164
-rw-r--r--include/linux/rtc.h1
-rw-r--r--include/linux/rtc/ds1685.h2
-rw-r--r--include/linux/rtc/m48t59.h3
-rw-r--r--include/linux/rtmutex.h12
-rw-r--r--include/linux/rtnetlink.h81
-rw-r--r--include/linux/rtsx_common.h1
-rw-r--r--include/linux/rtsx_pci.h4
-rw-r--r--include/linux/rtsx_usb.h15
-rw-r--r--include/linux/rv.h89
-rw-r--r--include/linux/rw_hint.h1
-rw-r--r--include/linux/rwlock.h2
-rw-r--r--include/linux/rwlock_api_smp.h2
-rw-r--r--include/linux/rwlock_rt.h10
-rw-r--r--include/linux/rwsem.h15
-rw-r--r--include/linux/sbitmap.h30
-rw-r--r--include/linux/scatterlist.h35
-rw-r--r--include/linux/sched.h562
-rw-r--r--include/linux/sched/coredump.h94
-rw-r--r--include/linux/sched/deadline.h20
-rw-r--r--include/linux/sched/debug.h2
-rw-r--r--include/linux/sched/ext.h257
-rw-r--r--include/linux/sched/hotplug.h4
-rw-r--r--include/linux/sched/idle.h27
-rw-r--r--include/linux/sched/isolation.h21
-rw-r--r--include/linux/sched/mm.h67
-rw-r--r--include/linux/sched/nohz.h4
-rw-r--r--include/linux/sched/prio.h1
-rw-r--r--include/linux/sched/rt.h33
-rw-r--r--include/linux/sched/sd_flags.h8
-rw-r--r--include/linux/sched/signal.h18
-rw-r--r--include/linux/sched/smt.h2
-rw-r--r--include/linux/sched/task.h47
-rw-r--r--include/linux/sched/task_stack.h26
-rw-r--r--include/linux/sched/topology.h92
-rw-r--r--include/linux/sched/wake_q.h34
-rw-r--r--include/linux/scmi_imx_protocol.h102
-rw-r--r--include/linux/scmi_protocol.h4
-rw-r--r--include/linux/screen_info.h21
-rw-r--r--include/linux/sctp.h2
-rw-r--r--include/linux/seccomp.h7
-rw-r--r--include/linux/security.h257
-rw-r--r--include/linux/sed-opal.h1
-rw-r--r--include/linux/sem.h4
-rw-r--r--include/linux/semaphore.h15
-rw-r--r--include/linux/seq_buf.h21
-rw-r--r--include/linux/seq_file.h2
-rw-r--r--include/linux/seqlock.h262
-rw-r--r--include/linux/serdev.h6
-rw-r--r--include/linux/serial_8250.h10
-rw-r--r--include/linux/serial_core.h191
-rw-r--r--include/linux/serial_s3c.h24
-rw-r--r--include/linux/serial_sci.h1
-rw-r--r--include/linux/serio.h5
-rw-r--r--include/linux/set_memory.h14
-rw-r--r--include/linux/sfp.h62
-rw-r--r--include/linux/shdma-base.h2
-rw-r--r--include/linux/shmem_fs.h72
-rw-r--r--include/linux/sizes.h9
-rw-r--r--include/linux/skb_array.h17
-rw-r--r--include/linux/skbuff.h571
-rw-r--r--include/linux/skbuff_ref.h13
-rw-r--r--include/linux/skmsg.h20
-rw-r--r--include/linux/slab.h515
-rw-r--r--include/linux/slimbus.h2
-rw-r--r--include/linux/sm501.h3
-rw-r--r--include/linux/smp.h17
-rw-r--r--include/linux/soc/airoha/airoha_offload.h317
-rw-r--r--include/linux/soc/amd/isp4_misc.h12
-rw-r--r--include/linux/soc/apple/rtkit.h6
-rw-r--r--include/linux/soc/cirrus/ep93xx.h47
-rw-r--r--include/linux/soc/marvell/silicons.h25
-rw-r--r--include/linux/soc/mediatek/dvfsrc.h36
-rw-r--r--include/linux/soc/mediatek/infracfg.h5
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h55
-rw-r--r--include/linux/soc/mediatek/mtk_sip_svc.h3
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h5
-rw-r--r--include/linux/soc/qcom/apr.h2
-rw-r--r--include/linux/soc/qcom/geni-se.h15
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h33
-rw-r--r--include/linux/soc/qcom/mdt_loader.h7
-rw-r--r--include/linux/soc/qcom/pmic_glink.h11
-rw-r--r--include/linux/soc/qcom/qmi.h6
-rw-r--r--include/linux/soc/qcom/smem.h3
-rw-r--r--include/linux/soc/qcom/socinfo.h38
-rw-r--r--include/linux/soc/qcom/ubwc.h76
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h358
-rw-r--r--include/linux/soc/ti/ti_sci_protocol.h30
-rw-r--r--include/linux/socket.h39
-rw-r--r--include/linux/sockptr.h6
-rw-r--r--include/linux/sony-laptop.h39
-rw-r--r--include/linux/sort.h21
-rw-r--r--include/linux/soundwire/sdw.h380
-rw-r--r--include/linux/soundwire/sdw_amd.h16
-rw-r--r--include/linux/soundwire/sdw_intel.h34
-rw-r--r--include/linux/soundwire/sdw_registers.h32
-rw-r--r--include/linux/soundwire/sdw_type.h2
-rw-r--r--include/linux/spi/offload/consumer.h39
-rw-r--r--include/linux/spi/offload/provider.h47
-rw-r--r--include/linux/spi/offload/types.h109
-rw-r--r--include/linux/spi/sh_msiof.h125
-rw-r--r--include/linux/spi/spi-mem.h64
-rw-r--r--include/linux/spi/spi.h235
-rw-r--r--include/linux/spi/spi_bitbang.h8
-rw-r--r--include/linux/spinlock.h27
-rw-r--r--include/linux/spinlock_api_smp.h2
-rw-r--r--include/linux/spinlock_rt.h28
-rw-r--r--include/linux/spinlock_types_up.h2
-rw-r--r--include/linux/spinlock_up.h2
-rw-r--r--include/linux/sprintf.h6
-rw-r--r--include/linux/srcu.h321
-rw-r--r--include/linux/srcutiny.h56
-rw-r--r--include/linux/srcutree.h206
-rw-r--r--include/linux/ssb/ssb.h2
-rw-r--r--include/linux/stackdepot.h6
-rw-r--r--include/linux/stat.h9
-rw-r--r--include/linux/static_call.h6
-rw-r--r--include/linux/static_call_types.h4
-rw-r--r--include/linux/stddef.h36
-rw-r--r--include/linux/stmmac.h122
-rw-r--r--include/linux/stop_machine.h66
-rw-r--r--include/linux/string.h108
-rw-r--r--include/linux/string_choices.h45
-rw-r--r--include/linux/string_helpers.h1
-rw-r--r--include/linux/sungem_phy.h2
-rw-r--r--include/linux/sunrpc/cache.h2
-rw-r--r--include/linux/sunrpc/clnt.h6
-rw-r--r--include/linux/sunrpc/debug.h30
-rw-r--r--include/linux/sunrpc/gss_asn1.h81
-rw-r--r--include/linux/sunrpc/gss_krb5.h1
-rw-r--r--include/linux/sunrpc/msg_prot.h18
-rw-r--r--include/linux/sunrpc/rdma_rn.h27
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h6
-rw-r--r--include/linux/sunrpc/sched.h18
-rw-r--r--include/linux/sunrpc/svc.h109
-rw-r--r--include/linux/sunrpc/svc_rdma.h10
-rw-r--r--include/linux/sunrpc/svc_xprt.h29
-rw-r--r--include/linux/sunrpc/svcauth.h7
-rw-r--r--include/linux/sunrpc/svcsock.h9
-rw-r--r--include/linux/sunrpc/xdr.h49
-rw-r--r--include/linux/sunrpc/xdrgen/_builtins.h243
-rw-r--r--include/linux/sunrpc/xdrgen/_defs.h35
-rw-r--r--include/linux/sunrpc/xdrgen/nfs4_1.h153
-rw-r--r--include/linux/sunrpc/xprt.h17
-rw-r--r--include/linux/sunrpc/xprtmultipath.h2
-rw-r--r--include/linux/suspend.h22
-rw-r--r--include/linux/swap.h195
-rw-r--r--include/linux/swap_cgroup.h14
-rw-r--r--include/linux/swap_slots.h31
-rw-r--r--include/linux/swapops.h274
-rw-r--r--include/linux/swiotlb.h105
-rw-r--r--include/linux/switchtec.h2
-rw-r--r--include/linux/sys_info.h28
-rw-r--r--include/linux/syscalls.h64
-rw-r--r--include/linux/syscore_ops.h15
-rw-r--r--include/linux/sysctl.h202
-rw-r--r--include/linux/sysfb.h17
-rw-r--r--include/linux/sysfs.h138
-rw-r--r--include/linux/sysv_fs.h214
-rw-r--r--include/linux/t10-pi.h22
-rw-r--r--include/linux/task_work.h6
-rw-r--r--include/linux/tc.h2
-rw-r--r--include/linux/tca6416_keypad.h30
-rw-r--r--include/linux/tcp.h63
-rw-r--r--include/linux/tee_core.h125
-rw-r--r--include/linux/tee_drv.h24
-rw-r--r--include/linux/text-patching.h15
-rw-r--r--include/linux/tfrc.h51
-rw-r--r--include/linux/thermal.h60
-rw-r--r--include/linux/thread_info.h74
-rw-r--r--include/linux/thunderbolt.h43
-rw-r--r--include/linux/tick.h18
-rw-r--r--include/linux/time64.h5
-rw-r--r--include/linux/time_namespace.h20
-rw-r--r--include/linux/timecounter.h6
-rw-r--r--include/linux/timekeeper_internal.h164
-rw-r--r--include/linux/timekeeping.h36
-rw-r--r--include/linux/timer.h89
-rw-r--r--include/linux/timex.h8
-rw-r--r--include/linux/tnum.h8
-rw-r--r--include/linux/topology.h69
-rw-r--r--include/linux/torture.h5
-rw-r--r--include/linux/tpm.h149
-rw-r--r--include/linux/tpm_eventlog.h2
-rw-r--r--include/linux/tpm_svsm.h149
-rw-r--r--include/linux/trace.h4
-rw-r--r--include/linux/trace_events.h65
-rw-r--r--include/linux/trace_recursion.h39
-rw-r--r--include/linux/trace_seq.h23
-rw-r--r--include/linux/tracepoint-defs.h14
-rw-r--r--include/linux/tracepoint.h257
-rw-r--r--include/linux/tsm-mr.h89
-rw-r--r--include/linux/tsm.h92
-rw-r--r--include/linux/tty.h53
-rw-r--r--include/linux/tty_driver.h182
-rw-r--r--include/linux/tty_ldisc.h1
-rw-r--r--include/linux/tty_port.h38
-rw-r--r--include/linux/turris-omnia-mcu-interface.h397
-rw-r--r--include/linux/turris-signing-key.h35
-rw-r--r--include/linux/types.h23
-rw-r--r--include/linux/uaccess.h487
-rw-r--r--include/linux/ubsan.h9
-rw-r--r--include/linux/ucopysize.h63
-rw-r--r--include/linux/udp.h39
-rw-r--r--include/linux/uidgid.h6
-rw-r--r--include/linux/uio.h43
-rw-r--r--include/linux/unaligned.h (renamed from include/asm-generic/unaligned.h)17
-rw-r--r--include/linux/unicode.h4
-rw-r--r--include/linux/union_find.h41
-rw-r--r--include/linux/unroll.h78
-rw-r--r--include/linux/unwind_deferred.h79
-rw-r--r--include/linux/unwind_deferred_types.h55
-rw-r--r--include/linux/unwind_user.h14
-rw-r--r--include/linux/unwind_user_types.h46
-rw-r--r--include/linux/uprobes.h171
-rw-r--r--include/linux/usb.h76
-rw-r--r--include/linux/usb/cdc_ncm.h1
-rw-r--r--include/linux/usb/chipidea.h3
-rw-r--r--include/linux/usb/composite.h7
-rw-r--r--include/linux/usb/func_utils.h86
-rw-r--r--include/linux/usb/gadget.h44
-rw-r--r--include/linux/usb/gadget_configfs.h7
-rw-r--r--include/linux/usb/mctp-usb.h30
-rw-r--r--include/linux/usb/musb.h2
-rw-r--r--include/linux/usb/pd.h91
-rw-r--r--include/linux/usb/phy.h5
-rw-r--r--include/linux/usb/r8152.h1
-rw-r--r--include/linux/usb/serial.h7
-rw-r--r--include/linux/usb/storage.h10
-rw-r--r--include/linux/usb/tcpci.h39
-rw-r--r--include/linux/usb/tcpm.h3
-rw-r--r--include/linux/usb/tegra_usb_phy.h9
-rw-r--r--include/linux/usb/typec.h28
-rw-r--r--include/linux/usb/typec_altmode.h13
-rw-r--r--include/linux/usb/typec_dp.h1
-rw-r--r--include/linux/usb/typec_mux.h46
-rw-r--r--include/linux/usb/typec_tbt.h2
-rw-r--r--include/linux/usb/ulpi.h9
-rw-r--r--include/linux/usb/usbio.h177
-rw-r--r--include/linux/usb/usbnet.h20
-rw-r--r--include/linux/usb/uvc.h31
-rw-r--r--include/linux/usb/xhci-sideband.h111
-rw-r--r--include/linux/user_events.h4
-rw-r--r--include/linux/user_namespace.h33
-rw-r--r--include/linux/userfaultfd_k.h140
-rw-r--r--include/linux/usermode_driver.h19
-rw-r--r--include/linux/util_macros.h138
-rw-r--r--include/linux/uts_namespace.h65
-rw-r--r--include/linux/utsname.h53
-rw-r--r--include/linux/vdpa.h34
-rw-r--r--include/linux/vdso_datastore.h10
-rw-r--r--include/linux/verification.h3
-rw-r--r--include/linux/vermagic.h7
-rw-r--r--include/linux/vfio.h25
-rw-r--r--include/linux/vfio_pci_core.h98
-rw-r--r--include/linux/vfsdebug.h45
-rw-r--r--include/linux/videodev2.h2
-rw-r--r--include/linux/virtio.h107
-rw-r--r--include/linux/virtio_config.h194
-rw-r--r--include/linux/virtio_features.h89
-rw-r--r--include/linux/virtio_net.h213
-rw-r--r--include/linux/virtio_pci_admin.h11
-rw-r--r--include/linux/virtio_pci_modern.h43
-rw-r--r--include/linux/virtio_ring.h7
-rw-r--r--include/linux/virtio_vsock.h53
-rw-r--r--include/linux/vm_event_item.h34
-rw-r--r--include/linux/vmalloc.h113
-rw-r--r--include/linux/vmcore_info.h11
-rw-r--r--include/linux/vmstat.h84
-rw-r--r--include/linux/vmw_vmci_api.h7
-rw-r--r--include/linux/vmw_vmci_defs.h14
-rw-r--r--include/linux/vringh.h12
-rw-r--r--include/linux/vt_buffer.h24
-rw-r--r--include/linux/w1.h7
-rw-r--r--include/linux/wait.h28
-rw-r--r--include/linux/wait_bit.h444
-rw-r--r--include/linux/watchdog.h12
-rw-r--r--include/linux/wireless.h5
-rw-r--r--include/linux/wmi.h31
-rw-r--r--include/linux/wordpart.h8
-rw-r--r--include/linux/workqueue.h115
-rw-r--r--include/linux/writeback.h94
-rw-r--r--include/linux/ww_mutex.h14
-rw-r--r--include/linux/wwan.h6
-rw-r--r--include/linux/xarray.h37
-rw-r--r--include/linux/xattr.h8
-rw-r--r--include/linux/xxhash.h72
-rw-r--r--include/linux/xz.h81
-rw-r--r--include/linux/zorro.h2
-rw-r--r--include/linux/zpool.h103
-rw-r--r--include/linux/zsmalloc.h34
-rw-r--r--include/linux/zstd.h250
-rw-r--r--include/linux/zstd_errors.h30
-rw-r--r--include/linux/zstd_lib.h1123
-rw-r--r--include/linux/zswap.h34
-rw-r--r--include/media/cadence/cdns-csi2rx.h19
-rw-r--r--include/media/cec.h34
-rw-r--r--include/media/drv-intf/cx25840.h2
-rw-r--r--include/media/drv-intf/msp3400.h2
-rw-r--r--include/media/drv-intf/saa7146_vv.h3
-rw-r--r--include/media/i2c/bt819.h2
-rw-r--r--include/media/i2c/cs5345.h2
-rw-r--r--include/media/i2c/cs53l32a.h2
-rw-r--r--include/media/i2c/m52790.h2
-rw-r--r--include/media/i2c/mt9p031.h18
-rw-r--r--include/media/i2c/mt9v011.h2
-rw-r--r--include/media/i2c/mt9v022.h13
-rw-r--r--include/media/i2c/mt9v032.h12
-rw-r--r--include/media/i2c/saa7115.h2
-rw-r--r--include/media/i2c/saa7127.h2
-rw-r--r--include/media/i2c/ths7303.h2
-rw-r--r--include/media/i2c/tvaudio.h2
-rw-r--r--include/media/i2c/upd64031a.h2
-rw-r--r--include/media/i2c/upd64083.h2
-rw-r--r--include/media/i2c/wm8775.h2
-rw-r--r--include/media/ipu-bridge.h1
-rw-r--r--include/media/media-entity.h20
-rw-r--r--include/media/media-request.h2
-rw-r--r--include/media/rc-core.h48
-rw-r--r--include/media/rc-map.h2
-rw-r--r--include/media/rcar-fcp.h5
-rw-r--r--include/media/tuner-types.h6
-rw-r--r--include/media/tuner.h1
-rw-r--r--include/media/v4l2-common.h170
-rw-r--r--include/media/v4l2-ctrls.h60
-rw-r--r--include/media/v4l2-dev.h31
-rw-r--r--include/media/v4l2-device.h2
-rw-r--r--include/media/v4l2-dv-timings.h68
-rw-r--r--include/media/v4l2-fh.h30
-rw-r--r--include/media/v4l2-ioctl.h251
-rw-r--r--include/media/v4l2-isp.h91
-rw-r--r--include/media/v4l2-jpeg.h39
-rw-r--r--include/media/v4l2-mc.h3
-rw-r--r--include/media/v4l2-mediabus.h23
-rw-r--r--include/media/v4l2-mem2mem.h60
-rw-r--r--include/media/v4l2-subdev.h173
-rw-r--r--include/media/videobuf2-core.h3
-rw-r--r--include/media/vsp1.h93
-rw-r--r--include/memory/renesas-rpc-if.h4
-rw-r--r--include/misc/cxl-base.h48
-rw-r--r--include/misc/cxl.h265
-rw-r--r--include/misc/cxllib.h129
-rw-r--r--include/net/9p/client.h98
-rw-r--r--include/net/9p/transport.h15
-rw-r--r--include/net/act_api.h40
-rw-r--r--include/net/addrconf.h50
-rw-r--r--include/net/af_rxrpc.h56
-rw-r--r--include/net/af_unix.h97
-rw-r--r--include/net/af_vsock.h10
-rw-r--r--include/net/aligned_data.h22
-rw-r--r--include/net/ax25.h11
-rw-r--r--include/net/bluetooth/bluetooth.h38
-rw-r--r--include/net/bluetooth/hci.h193
-rw-r--r--include/net/bluetooth/hci_core.h394
-rw-r--r--include/net/bluetooth/hci_drv.h76
-rw-r--r--include/net/bluetooth/hci_mon.h2
-rw-r--r--include/net/bluetooth/hci_sock.h2
-rw-r--r--include/net/bluetooth/hci_sync.h45
-rw-r--r--include/net/bluetooth/l2cap.h12
-rw-r--r--include/net/bluetooth/mgmt.h26
-rw-r--r--include/net/bluetooth/rfcomm.h2
-rw-r--r--include/net/bond_3ad.h8
-rw-r--r--include/net/bond_alb.h2
-rw-r--r--include/net/bond_options.h4
-rw-r--r--include/net/bonding.h8
-rw-r--r--include/net/busy_poll.h36
-rw-r--r--include/net/caif/caif_layer.h6
-rw-r--r--include/net/caif/cfpkt.h2
-rw-r--r--include/net/caif/cfsrvl.h1
-rw-r--r--include/net/calipso.h2
-rw-r--r--include/net/cfg80211.h1161
-rw-r--r--include/net/checksum.h20
-rw-r--r--include/net/cipso_ipv4.h2
-rw-r--r--include/net/cls_cgroup.h2
-rw-r--r--include/net/devlink.h149
-rw-r--r--include/net/dropreason-core.h218
-rw-r--r--include/net/dropreason.h6
-rw-r--r--include/net/dsa.h75
-rw-r--r--include/net/dst.h71
-rw-r--r--include/net/dst_cache.h2
-rw-r--r--include/net/dst_metadata.h11
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/eee.h5
-rw-r--r--include/net/erspan.h4
-rw-r--r--include/net/fib_notifier.h2
-rw-r--r--include/net/fib_rules.h31
-rw-r--r--include/net/flow.h13
-rw-r--r--include/net/flow_dissector.h23
-rw-r--r--include/net/flow_offload.h36
-rw-r--r--include/net/genetlink.h19
-rw-r--r--include/net/gro.h106
-rw-r--r--include/net/hotdata.h8
-rw-r--r--include/net/hwbm.h4
-rw-r--r--include/net/icmp.h10
-rw-r--r--include/net/ieee80211_radiotap.h66
-rw-r--r--include/net/inet6_connection_sock.h2
-rw-r--r--include/net/inet6_hashtables.h34
-rw-r--r--include/net/inet_common.h13
-rw-r--r--include/net/inet_connection_sock.h80
-rw-r--r--include/net/inet_dscp.h6
-rw-r--r--include/net/inet_frag.h10
-rw-r--r--include/net/inet_hashtables.h74
-rw-r--r--include/net/inet_sock.h32
-rw-r--r--include/net/inet_timewait_sock.h30
-rw-r--r--include/net/inetpeer.h12
-rw-r--r--include/net/ip.h87
-rw-r--r--include/net/ip6_fib.h9
-rw-r--r--include/net/ip6_route.h32
-rw-r--r--include/net/ip6_tunnel.h5
-rw-r--r--include/net/ip_fib.h60
-rw-r--r--include/net/ip_tunnels.h68
-rw-r--r--include/net/ip_vs.h13
-rw-r--r--include/net/ipcomp.h13
-rw-r--r--include/net/ipv6.h50
-rw-r--r--include/net/ipv6_frag.h5
-rw-r--r--include/net/ipv6_stubs.h7
-rw-r--r--include/net/iucv/iucv.h32
-rw-r--r--include/net/iw_handler.h53
-rw-r--r--include/net/kcm.h2
-rw-r--r--include/net/l3mdev.h31
-rw-r--r--include/net/lapb.h2
-rw-r--r--include/net/lib80211.h122
-rw-r--r--include/net/libeth/cache.h66
-rw-r--r--include/net/libeth/rx.h94
-rw-r--r--include/net/libeth/tx.h159
-rw-r--r--include/net/libeth/types.h127
-rw-r--r--include/net/libeth/xdp.h1870
-rw-r--r--include/net/libeth/xsk.h685
-rw-r--r--include/net/llc_c_st.h4
-rw-r--r--include/net/llc_pdu.h2
-rw-r--r--include/net/llc_s_st.h4
-rw-r--r--include/net/lwtunnel.h9
-rw-r--r--include/net/mac80211.h414
-rw-r--r--include/net/mac802154.h6
-rw-r--r--include/net/macsec.h4
-rw-r--r--include/net/mana/gdma.h131
-rw-r--r--include/net/mana/hw_channel.h11
-rw-r--r--include/net/mana/mana.h258
-rw-r--r--include/net/mctp.h79
-rw-r--r--include/net/mctpdevice.h4
-rw-r--r--include/net/mptcp.h30
-rw-r--r--include/net/ndisc.h26
-rw-r--r--include/net/neighbour.h72
-rw-r--r--include/net/neighbour_tables.h12
-rw-r--r--include/net/net_debug.h6
-rw-r--r--include/net/net_namespace.h53
-rw-r--r--include/net/net_shaper.h120
-rw-r--r--include/net/netdev_lock.h138
-rw-r--r--include/net/netdev_netlink.h12
-rw-r--r--include/net/netdev_queues.h63
-rw-r--r--include/net/netdev_rx_queue.h14
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h3
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h8
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h10
-rw-r--r--include/net/netfilter/nf_conntrack.h35
-rw-r--r--include/net/netfilter/nf_conntrack_count.h25
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h12
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h15
-rw-r--r--include/net/netfilter/nf_flow_table.h44
-rw-r--r--include/net/netfilter/nf_hooks_lwtunnel.h2
-rw-r--r--include/net/netfilter/nf_log.h3
-rw-r--r--include/net/netfilter/nf_reject.h1
-rw-r--r--include/net/netfilter/nf_tables.h333
-rw-r--r--include/net/netfilter/nf_tables_core.h55
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h10
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h5
-rw-r--r--include/net/netfilter/nf_tproxy.h5
-rw-r--r--include/net/netfilter/nft_fib.h34
-rw-r--r--include/net/netfilter/nft_meta.h3
-rw-r--r--include/net/netfilter/nft_reject.h3
-rw-r--r--include/net/netlabel.h5
-rw-r--r--include/net/netlink.h374
-rw-r--r--include/net/netmem.h395
-rw-r--r--include/net/netns/conntrack.h13
-rw-r--r--include/net/netns/core.h3
-rw-r--r--include/net/netns/ipv4.h47
-rw-r--r--include/net/netns/ipv6.h2
-rw-r--r--include/net/netns/mctp.h20
-rw-r--r--include/net/netns/mpls.h1
-rw-r--r--include/net/netns/netfilter.h3
-rw-r--r--include/net/netns/nftables.h1
-rw-r--r--include/net/netns/sctp.h8
-rw-r--r--include/net/netns/smc.h5
-rw-r--r--include/net/netns/xfrm.h3
-rw-r--r--include/net/nexthop.h6
-rw-r--r--include/net/nfc/nci.h4
-rw-r--r--include/net/nfc/nci_core.h6
-rw-r--r--include/net/nfc/nfc.h12
-rw-r--r--include/net/nl802154.h5
-rw-r--r--include/net/p8022.h16
-rw-r--r--include/net/page_pool/helpers.h202
-rw-r--r--include/net/page_pool/memory_provider.h51
-rw-r--r--include/net/page_pool/types.h87
-rw-r--r--include/net/pfcp.h2
-rw-r--r--include/net/phonet/pn_dev.h8
-rw-r--r--include/net/ping.h3
-rw-r--r--include/net/pkt_cls.h22
-rw-r--r--include/net/pkt_sched.h36
-rw-r--r--include/net/proto_memory.h7
-rw-r--r--include/net/psample.h13
-rw-r--r--include/net/psp.h12
-rw-r--r--include/net/psp/functions.h209
-rw-r--r--include/net/psp/types.h216
-rw-r--r--include/net/raw.h1
-rw-r--r--include/net/red.h8
-rw-r--r--include/net/regulatory.h4
-rw-r--r--include/net/request_sock.h56
-rw-r--r--include/net/rose.h18
-rw-r--r--include/net/route.h102
-rw-r--r--include/net/rps.h109
-rw-r--r--include/net/rstreason.h41
-rw-r--r--include/net/rtnetlink.h92
-rw-r--r--include/net/sch_generic.h174
-rw-r--r--include/net/scm.h125
-rw-r--r--include/net/sctp/auth.h18
-rw-r--r--include/net/sctp/checksum.h32
-rw-r--r--include/net/sctp/constants.h9
-rw-r--r--include/net/sctp/sctp.h11
-rw-r--r--include/net/sctp/sm.h1
-rw-r--r--include/net/sctp/stream_sched.h12
-rw-r--r--include/net/sctp/structs.h71
-rw-r--r--include/net/secure_seq.h4
-rw-r--r--include/net/seg6.h7
-rw-r--r--include/net/seg6_hmac.h23
-rw-r--r--include/net/seg6_local.h1
-rw-r--r--include/net/selftests.h45
-rw-r--r--include/net/smc.h104
-rw-r--r--include/net/snmp.h10
-rw-r--r--include/net/sock.h515
-rw-r--r--include/net/sock_reuseport.h2
-rw-r--r--include/net/strparser.h4
-rw-r--r--include/net/tc_act/tc_connmark.h1
-rw-r--r--include/net/tc_act/tc_csum.h10
-rw-r--r--include/net/tc_act/tc_ct.h11
-rw-r--r--include/net/tc_act/tc_ctinfo.h7
-rw-r--r--include/net/tc_act/tc_gate.h9
-rw-r--r--include/net/tc_act/tc_mpls.h10
-rw-r--r--include/net/tc_act/tc_nat.h1
-rw-r--r--include/net/tc_act/tc_pedit.h1
-rw-r--r--include/net/tc_act/tc_police.h12
-rw-r--r--include/net/tc_act/tc_sample.h9
-rw-r--r--include/net/tc_act/tc_skbedit.h1
-rw-r--r--include/net/tc_act/tc_skbmod.h1
-rw-r--r--include/net/tc_act/tc_tunnel_key.h1
-rw-r--r--include/net/tc_act/tc_vlan.h10
-rw-r--r--include/net/tcp.h445
-rw-r--r--include/net/tcp_ao.h53
-rw-r--r--include/net/tcp_ecn.h642
-rw-r--r--include/net/tcx.h14
-rw-r--r--include/net/timewait_sock.h7
-rw-r--r--include/net/tls.h43
-rw-r--r--include/net/udp.h199
-rw-r--r--include/net/udp_tunnel.h118
-rw-r--r--include/net/vsock_addr.h2
-rw-r--r--include/net/vxlan.h11
-rw-r--r--include/net/x25.h3
-rw-r--r--include/net/xdp.h260
-rw-r--r--include/net/xdp_sock.h35
-rw-r--r--include/net/xdp_sock_drv.h126
-rw-r--r--include/net/xfrm.h251
-rw-r--r--include/net/xsk_buff_pool.h50
-rw-r--r--include/pcmcia/ss.h8
-rw-r--r--include/ras/ras_event.h149
-rw-r--r--include/rdma/ib_cache.h16
-rw-r--r--include/rdma/ib_cm.h19
-rw-r--r--include/rdma/ib_hdrs.h2
-rw-r--r--include/rdma/ib_mad.h1
-rw-r--r--include/rdma/ib_marshall.h3
-rw-r--r--include/rdma/ib_pack.h3
-rw-r--r--include/rdma/ib_sa.h37
-rw-r--r--include/rdma/ib_ucaps.h30
-rw-r--r--include/rdma/ib_umem.h43
-rw-r--r--include/rdma/ib_umem_odp.h25
-rw-r--r--include/rdma/ib_verbs.h308
-rw-r--r--include/rdma/iba.h2
-rw-r--r--include/rdma/rdma_cm.h22
-rw-r--r--include/rdma/rdma_counter.h7
-rw-r--r--include/rdma/rdma_netlink.h12
-rw-r--r--include/rdma/rdmavt_qp.h70
-rw-r--r--include/rdma/restrack.h4
-rw-r--r--include/rdma/uverbs_std_types.h2
-rw-r--r--include/rdma/uverbs_types.h33
-rw-r--r--include/rv/da_monitor.h187
-rw-r--r--include/rv/ltl_monitor.h173
-rw-r--r--include/scsi/fcoe_sysfs.h2
-rw-r--r--include/scsi/libfc.h2
-rw-r--r--include/scsi/libfcoe.h2
-rw-r--r--include/scsi/libiscsi_tcp.h16
-rw-r--r--include/scsi/libsas.h19
-rw-r--r--include/scsi/sas_ata.h91
-rw-r--r--include/scsi/scsi_bsg_iscsi.h2
-rw-r--r--include/scsi/scsi_cmnd.h2
-rw-r--r--include/scsi/scsi_dbg.h11
-rw-r--r--include/scsi/scsi_device.h46
-rw-r--r--include/scsi/scsi_devinfo.h4
-rw-r--r--include/scsi/scsi_host.h70
-rw-r--r--include/scsi/scsi_proto.h8
-rw-r--r--include/scsi/scsi_transport_fc.h13
-rw-r--r--include/scsi/scsi_transport_iscsi.h10
-rw-r--r--include/scsi/scsi_transport_sas.h2
-rw-r--r--include/scsi/scsicam.h7
-rw-r--r--include/soc/arc/arc_aux.h (renamed from include/soc/arc/aux.h)0
-rw-r--r--include/soc/arc/mcip.h2
-rw-r--r--include/soc/arc/timers.h2
-rw-r--r--include/soc/at91/sama7-sfrbu.h7
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h1
-rw-r--r--include/soc/fsl/caam-blob.h26
-rw-r--r--include/soc/fsl/qe/qe.h23
-rw-r--r--include/soc/fsl/qe/qmc.h27
-rw-r--r--include/soc/fsl/qman.h2
-rw-r--r--include/soc/microchip/mpfs.h3
-rw-r--r--include/soc/mscc/ocelot.h34
-rw-r--r--include/soc/mscc/ocelot_vcap.h2
-rw-r--r--include/soc/nuvoton/clock-npcm8xx.h18
-rw-r--r--include/soc/qcom/ice.h37
-rw-r--r--include/soc/qcom/qcom-spmi-pmic.h4
-rw-r--r--include/soc/qcom/tcs.h26
-rw-r--r--include/soc/rockchip/rk3588_grf.h8
-rw-r--r--include/soc/rockchip/rockchip_grf.h1
-rw-r--r--include/soc/rockchip/rockchip_sip.h3
-rw-r--r--include/soc/spacemit/k1-syscon.h161
-rw-r--r--include/soc/tegra/bpmp-abi.h2
-rw-r--r--include/sound/ac97/codec.h5
-rw-r--r--include/sound/aci.h1
-rw-r--r--include/sound/adau1373.h33
-rw-r--r--include/sound/asoundef.h15
-rw-r--r--include/sound/compress_driver.h52
-rw-r--r--include/sound/control.h27
-rw-r--r--include/sound/core.h70
-rw-r--r--include/sound/cs-amp-lib.h28
-rw-r--r--include/sound/cs35l41.h16
-rw-r--r--include/sound/cs35l56.h128
-rw-r--r--include/sound/cs42l52.h29
-rw-r--r--include/sound/cs42l56.h45
-rw-r--r--include/sound/cs42l73.h19
-rw-r--r--include/sound/cs48l32.h47
-rw-r--r--include/sound/cs48l32_registers.h530
-rw-r--r--include/sound/dmaengine_pcm.h8
-rw-r--r--include/sound/emu10k1.h3
-rw-r--r--include/sound/es1688.h1
-rw-r--r--include/sound/gus.h23
-rw-r--r--include/sound/hda-mlink.h29
-rw-r--r--include/sound/hda-sdw-bpt.h69
-rw-r--r--include/sound/hda_codec.h75
-rw-r--r--include/sound/hda_register.h2
-rw-r--r--include/sound/hdaudio.h35
-rw-r--r--include/sound/hdaudio_ext.h51
-rw-r--r--include/sound/hdmi-codec.h8
-rw-r--r--include/sound/jack.h10
-rw-r--r--include/sound/memalloc.h7
-rw-r--r--include/sound/pcm.h112
-rw-r--r--include/sound/pcm_drm_eld.h91
-rw-r--r--include/sound/q6usboffload.h20
-rw-r--r--include/sound/rawmidi.h11
-rw-r--r--include/sound/rt1318.h16
-rw-r--r--include/sound/sdca.h87
-rw-r--r--include/sound/sdca_asoc.h61
-rw-r--r--include/sound/sdca_fdl.h105
-rw-r--r--include/sound/sdca_function.h1475
-rw-r--r--include/sound/sdca_hid.h38
-rw-r--r--include/sound/sdca_interrupts.h87
-rw-r--r--include/sound/sdca_regmap.h33
-rw-r--r--include/sound/sdca_ump.h50
-rw-r--r--include/sound/seq_kernel.h4
-rw-r--r--include/sound/simple_card_utils.h31
-rw-r--r--include/sound/snd_wavefront.h8
-rw-r--r--include/sound/soc-acpi-intel-match.h4
-rw-r--r--include/sound/soc-acpi.h35
-rw-r--r--include/sound/soc-card.h2
-rw-r--r--include/sound/soc-component.h86
-rw-r--r--include/sound/soc-dai.h74
-rw-r--r--include/sound/soc-dapm.h328
-rw-r--r--include/sound/soc-dpcm.h27
-rw-r--r--include/sound/soc-topology.h2
-rw-r--r--include/sound/soc-usb.h138
-rw-r--r--include/sound/soc.h259
-rw-r--r--include/sound/soc_sdw_utils.h273
-rw-r--r--include/sound/sof.h2
-rw-r--r--include/sound/sof/ext_manifest.h1
-rw-r--r--include/sound/sof/ipc4/header.h19
-rw-r--r--include/sound/sof/topology.h2
-rw-r--r--include/sound/soundfont.h24
-rw-r--r--include/sound/tas2563-tlv.h279
-rw-r--r--include/sound/tas2770-tlv.h23
-rw-r--r--include/sound/tas2781-comlib-i2c.h37
-rw-r--r--include/sound/tas2781-dsp.h52
-rw-r--r--include/sound/tas2781-tlv.h6
-rw-r--r--include/sound/tas2781.h197
-rw-r--r--include/sound/tas2x20-tlv.h259
-rw-r--r--include/sound/tas5825-tlv.h24
-rw-r--r--include/sound/tlv320aic32x4.h9
-rw-r--r--include/sound/tlv320dac33-plat.h21
-rw-r--r--include/sound/tpa6130a2-plat.h17
-rw-r--r--include/sound/ump.h13
-rw-r--r--include/sound/ump_convert.h3
-rw-r--r--include/sound/ump_msg.h4
-rw-r--r--include/sound/vx_core.h1
-rw-r--r--include/sound/wm8904.h3
-rw-r--r--include/target/iscsi/iscsi_target_core.h3
-rw-r--r--include/target/target_core_backend.h8
-rw-r--r--include/target/target_core_base.h40
-rw-r--r--include/trace/bpf_probe.h22
-rw-r--r--include/trace/define_trace.h39
-rw-r--r--include/trace/events/afs.h321
-rw-r--r--include/trace/events/alarmtimer.h2
-rw-r--r--include/trace/events/amdxdna.h101
-rw-r--r--include/trace/events/asoc.h7
-rw-r--r--include/trace/events/block.h147
-rw-r--r--include/trace/events/btrfs.h184
-rw-r--r--include/trace/events/cachefiles.h193
-rw-r--r--include/trace/events/capability.h57
-rw-r--r--include/trace/events/cgroup.h49
-rw-r--r--include/trace/events/cma.h19
-rw-r--r--include/trace/events/damon.h43
-rw-r--r--include/trace/events/dma.h475
-rw-r--r--include/trace/events/dma_fence.h38
-rw-r--r--include/trace/events/erofs.h54
-rw-r--r--include/trace/events/exceptions.h43
-rw-r--r--include/trace/events/ext4.h175
-rw-r--r--include/trace/events/f2fs.h106
-rw-r--r--include/trace/events/fib.h4
-rw-r--r--include/trace/events/fib6.h8
-rw-r--r--include/trace/events/filelock.h5
-rw-r--r--include/trace/events/filemap.h84
-rw-r--r--include/trace/events/firewire.h654
-rw-r--r--include/trace/events/firewire_ohci.h101
-rw-r--r--include/trace/events/fs_dax.h84
-rw-r--r--include/trace/events/fscache.h4
-rw-r--r--include/trace/events/habanalabs.h2
-rw-r--r--include/trace/events/huge_memory.h32
-rw-r--r--include/trace/events/hugetlbfs.h156
-rw-r--r--include/trace/events/hwmon.h10
-rw-r--r--include/trace/events/intel_ifs.h27
-rw-r--r--include/trace/events/io_uring.h38
-rw-r--r--include/trace/events/ipi.h58
-rw-r--r--include/trace/events/irq_matrix.h8
-rw-r--r--include/trace/events/kmem.h103
-rw-r--r--include/trace/events/kvm.h111
-rw-r--r--include/trace/events/mce.h49
-rw-r--r--include/trace/events/memcg.h106
-rw-r--r--include/trace/events/memory-failure.h98
-rw-r--r--include/trace/events/migrate.h3
-rw-r--r--include/trace/events/mmap.h52
-rw-r--r--include/trace/events/mmap_lock.h44
-rw-r--r--include/trace/events/mmflags.h155
-rw-r--r--include/trace/events/mptcp.h2
-rw-r--r--include/trace/events/net.h37
-rw-r--r--include/trace/events/netfs.h331
-rw-r--r--include/trace/events/oom.h4
-rw-r--r--include/trace/events/osnoise.h96
-rw-r--r--include/trace/events/page_pool.h30
-rw-r--r--include/trace/events/page_ref.h4
-rw-r--r--include/trace/events/power.h93
-rw-r--r--include/trace/events/preemptirq.h8
-rw-r--r--include/trace/events/pwm.h132
-rw-r--r--include/trace/events/qdisc.h2
-rw-r--r--include/trace/events/rcu.h56
-rw-r--r--include/trace/events/readahead.h132
-rw-r--r--include/trace/events/rpcgss.h6
-rw-r--r--include/trace/events/rpcrdma.h116
-rw-r--r--include/trace/events/rseq.h4
-rw-r--r--include/trace/events/rust_sample.h31
-rw-r--r--include/trace/events/rv.h142
-rw-r--r--include/trace/events/rxrpc.h1074
-rw-r--r--include/trace/events/sched.h212
-rw-r--r--include/trace/events/sched_ext.h90
-rw-r--r--include/trace/events/scmi.h24
-rw-r--r--include/trace/events/scsi.h18
-rw-r--r--include/trace/events/skb.h11
-rw-r--r--include/trace/events/sock.h1
-rw-r--r--include/trace/events/spi-mem.h106
-rw-r--r--include/trace/events/sunrpc.h50
-rw-r--r--include/trace/events/syscalls.h4
-rw-r--r--include/trace/events/target.h4
-rw-r--r--include/trace/events/task.h50
-rw-r--r--include/trace/events/tcp.h472
-rw-r--r--include/trace/events/thp.h2
-rw-r--r--include/trace/events/timer.h8
-rw-r--r--include/trace/events/timer_migration.h20
-rw-r--r--include/trace/events/timestamp.h124
-rw-r--r--include/trace/events/tsm_mr.h80
-rw-r--r--include/trace/events/ufs.h399
-rw-r--r--include/trace/events/vmscan.h45
-rw-r--r--include/trace/events/writeback.h88
-rw-r--r--include/trace/events/xdp.h47
-rw-r--r--include/trace/misc/fs.h43
-rw-r--r--include/trace/misc/nfs.h1
-rw-r--r--include/trace/perf.h44
-rw-r--r--include/trace/stages/stage3_trace_output.h8
-rw-r--r--include/trace/stages/stage7_class_define.h1
-rw-r--r--include/trace/syscall.h8
-rw-r--r--include/trace/trace_events.h62
-rw-r--r--include/uapi/asm-generic/fcntl.h1
-rw-r--r--include/uapi/asm-generic/ioctl.h14
-rw-r--r--include/uapi/asm-generic/mman-common.h4
-rw-r--r--include/uapi/asm-generic/mman.h4
-rw-r--r--include/uapi/asm-generic/param.h6
-rw-r--r--include/uapi/asm-generic/posix_types.h1
-rw-r--r--include/uapi/asm-generic/siginfo.h2
-rw-r--r--include/uapi/asm-generic/socket.h15
-rw-r--r--include/uapi/asm-generic/unistd.h27
-rw-r--r--include/uapi/cxl/features.h179
-rw-r--r--include/uapi/drm/amdgpu_drm.h403
-rw-r--r--include/uapi/drm/amdxdna_accel.h698
-rw-r--r--include/uapi/drm/asahi_drm.h1194
-rw-r--r--include/uapi/drm/drm.h99
-rw-r--r--include/uapi/drm/drm_fourcc.h211
-rw-r--r--include/uapi/drm/drm_mode.h186
-rw-r--r--include/uapi/drm/ethosu_accel.h261
-rw-r--r--include/uapi/drm/i915_drm.h27
-rw-r--r--include/uapi/drm/ivpu_accel.h241
-rw-r--r--include/uapi/drm/msm_drm.h158
-rw-r--r--include/uapi/drm/nova_drm.h101
-rw-r--r--include/uapi/drm/panfrost_drm.h166
-rw-r--r--include/uapi/drm/panthor_drm.h222
-rw-r--r--include/uapi/drm/qaic_accel.h2
-rw-r--r--include/uapi/drm/rocket_accel.h142
-rw-r--r--include/uapi/drm/v3d_drm.h69
-rw-r--r--include/uapi/drm/virtgpu_drm.h6
-rw-r--r--include/uapi/drm/xe_drm.h931
-rw-r--r--include/uapi/fwctl/cxl.h56
-rw-r--r--include/uapi/fwctl/fwctl.h141
-rw-r--r--include/uapi/fwctl/mlx5.h36
-rw-r--r--include/uapi/fwctl/pds.h62
-rw-r--r--include/uapi/linux/acrn.h36
-rw-r--r--include/uapi/linux/android/binder.h38
-rw-r--r--include/uapi/linux/android/binder_netlink.h38
-rw-r--r--include/uapi/linux/aspeed-video.h7
-rw-r--r--include/uapi/linux/audit.h10
-rw-r--r--include/uapi/linux/auto_fs.h2
-rw-r--r--include/uapi/linux/batadv_packet.h29
-rw-r--r--include/uapi/linux/batman_adv.h18
-rw-r--r--include/uapi/linux/bits.h11
-rw-r--r--include/uapi/linux/blk-crypto.h44
-rw-r--r--include/uapi/linux/blkdev.h14
-rw-r--r--include/uapi/linux/blktrace_api.h57
-rw-r--r--include/uapi/linux/blkzoned.h46
-rw-r--r--include/uapi/linux/bpf.h241
-rw-r--r--include/uapi/linux/btf.h3
-rw-r--r--include/uapi/linux/btrfs.h53
-rw-r--r--include/uapi/linux/btrfs_tree.h22
-rw-r--r--include/uapi/linux/can.h3
-rw-r--r--include/uapi/linux/can/isotp.h2
-rw-r--r--include/uapi/linux/can/netlink.h48
-rw-r--r--include/uapi/linux/capability.h6
-rw-r--r--include/uapi/linux/cec-funcs.h40
-rw-r--r--include/uapi/linux/cec.h9
-rw-r--r--include/uapi/linux/cn_proc.h3
-rw-r--r--include/uapi/linux/const.h17
-rw-r--r--include/uapi/linux/coredump.h104
-rw-r--r--include/uapi/linux/counter.h2
-rw-r--r--include/uapi/linux/counter/microchip-tcb-capture.h40
-rw-r--r--include/uapi/linux/cryptouser.h5
-rw-r--r--include/uapi/linux/devlink.h37
-rw-r--r--include/uapi/linux/dlm.h2
-rw-r--r--include/uapi/linux/dm-ioctl.h9
-rw-r--r--include/uapi/linux/dma-heap.h2
-rw-r--r--include/uapi/linux/dpll.h43
-rw-r--r--include/uapi/linux/elf.h141
-rw-r--r--include/uapi/linux/energy_model.h62
-rw-r--r--include/uapi/linux/errqueue.h1
-rw-r--r--include/uapi/linux/ethtool.h401
-rw-r--r--include/uapi/linux/ethtool_netlink.h836
-rw-r--r--include/uapi/linux/ethtool_netlink_generated.h961
-rw-r--r--include/uapi/linux/exfat.h25
-rw-r--r--include/uapi/linux/ext4.h53
-rw-r--r--include/uapi/linux/f2fs.h8
-rw-r--r--include/uapi/linux/falloc.h18
-rw-r--r--include/uapi/linux/fanotify.h29
-rw-r--r--include/uapi/linux/fb.h2
-rw-r--r--include/uapi/linux/fcntl.h124
-rw-r--r--include/uapi/linux/fib_rules.h10
-rw-r--r--include/uapi/linux/fiemap.h47
-rw-r--r--include/uapi/linux/firewire-cdev.h3
-rw-r--r--include/uapi/linux/fou.h1
-rw-r--r--include/uapi/linux/fs.h275
-rw-r--r--include/uapi/linux/fscrypt.h6
-rw-r--r--include/uapi/linux/fuse.h130
-rw-r--r--include/uapi/linux/futex.h9
-rw-r--r--include/uapi/linux/gpib.h104
-rw-r--r--include/uapi/linux/gpib_ioctl.h167
-rw-r--r--include/uapi/linux/handshake.h2
-rw-r--r--include/uapi/linux/hidraw.h3
-rw-r--r--include/uapi/linux/i2c.h5
-rw-r--r--include/uapi/linux/i8k.h2
-rw-r--r--include/uapi/linux/if_addr.h4
-rw-r--r--include/uapi/linux/if_addrlabel.h4
-rw-r--r--include/uapi/linux/if_alg.h6
-rw-r--r--include/uapi/linux/if_arcnet.h6
-rw-r--r--include/uapi/linux/if_bonding.h6
-rw-r--r--include/uapi/linux/if_bridge.h13
-rw-r--r--include/uapi/linux/if_cablemodem.h23
-rw-r--r--include/uapi/linux/if_ether.h4
-rw-r--r--include/uapi/linux/if_fc.h6
-rw-r--r--include/uapi/linux/if_hippi.h6
-rw-r--r--include/uapi/linux/if_link.h47
-rw-r--r--include/uapi/linux/if_packet.h11
-rw-r--r--include/uapi/linux/if_plip.h4
-rw-r--r--include/uapi/linux/if_slip.h4
-rw-r--r--include/uapi/linux/if_team.h1
-rw-r--r--include/uapi/linux/if_tun.h9
-rw-r--r--include/uapi/linux/if_x25.h6
-rw-r--r--include/uapi/linux/if_xdp.h21
-rw-r--r--include/uapi/linux/iio/buffer.h22
-rw-r--r--include/uapi/linux/iio/types.h8
-rw-r--r--include/uapi/linux/in.h6
-rw-r--r--include/uapi/linux/in6.h4
-rw-r--r--include/uapi/linux/inet_diag.h2
-rw-r--r--include/uapi/linux/input-event-codes.h27
-rw-r--r--include/uapi/linux/input.h23
-rw-r--r--include/uapi/linux/io_uring.h327
-rw-r--r--include/uapi/linux/io_uring/mock_file.h47
-rw-r--r--include/uapi/linux/io_uring/query.h68
-rw-r--r--include/uapi/linux/ioam6_iptunnel.h6
-rw-r--r--include/uapi/linux/iommufd.h671
-rw-r--r--include/uapi/linux/ip.h16
-rw-r--r--include/uapi/linux/ip6_tunnel.h4
-rw-r--r--include/uapi/linux/ipsec.h3
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/isst_if.h76
-rw-r--r--include/uapi/linux/ivtv.h2
-rw-r--r--include/uapi/linux/kd.h96
-rw-r--r--include/uapi/linux/kernel-page-flags.h2
-rw-r--r--include/uapi/linux/kexec.h5
-rw-r--r--include/uapi/linux/kfd_ioctl.h127
-rw-r--r--include/uapi/linux/kfd_sysfs.h23
-rw-r--r--include/uapi/linux/kvm.h90
-rw-r--r--include/uapi/linux/landlock.h172
-rw-r--r--include/uapi/linux/libc-compat.h36
-rw-r--r--include/uapi/linux/liveupdate.h216
-rw-r--r--include/uapi/linux/lockd_netlink.h30
-rw-r--r--include/uapi/linux/lsm.h1
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--include/uapi/linux/map_benchmark.h (renamed from include/linux/map_benchmark.h)14
-rw-r--r--include/uapi/linux/mctp.h8
-rw-r--r--include/uapi/linux/mdio.h25
-rw-r--r--include/uapi/linux/media-bus-format.h11
-rw-r--r--include/uapi/linux/media/amlogic/c3-isp-config.h520
-rw-r--r--include/uapi/linux/media/arm/mali-c55-config.h794
-rw-r--r--include/uapi/linux/media/raspberrypi/pisp_be_config.h969
-rw-r--r--include/uapi/linux/media/raspberrypi/pisp_common.h202
-rw-r--r--include/uapi/linux/media/raspberrypi/pisp_fe_config.h273
-rw-r--r--include/uapi/linux/media/raspberrypi/pisp_fe_statistics.h64
-rw-r--r--include/uapi/linux/media/v4l2-isp.h102
-rw-r--r--include/uapi/linux/mempolicy.h12
-rw-r--r--include/uapi/linux/mman.h1
-rw-r--r--include/uapi/linux/mount.h30
-rw-r--r--include/uapi/linux/mptcp.h25
-rw-r--r--include/uapi/linux/mptcp_pm.h51
-rw-r--r--include/uapi/linux/mshv.h405
-rw-r--r--include/uapi/linux/nbd.h8
-rw-r--r--include/uapi/linux/neighbour.h9
-rw-r--r--include/uapi/linux/net_dropmon.h11
-rw-r--r--include/uapi/linux/net_shaper.h96
-rw-r--r--include/uapi/linux/net_tstamp.h24
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--include/uapi/linux/netdev.h43
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h54
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h1
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_hook.h2
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6t_srh.h40
-rw-r--r--include/uapi/linux/netlink_diag.h4
-rw-r--r--include/uapi/linux/nexthop.h10
-rw-r--r--include/uapi/linux/nfc.h3
-rw-r--r--include/uapi/linux/nfs4.h11
-rw-r--r--include/uapi/linux/nfsd_netlink.h11
-rw-r--r--include/uapi/linux/nilfs2_ondisk.h3
-rw-r--r--include/uapi/linux/nl80211-vnd-intel.h1
-rw-r--r--include/uapi/linux/nl80211.h493
-rw-r--r--include/uapi/linux/nsfs.h109
-rw-r--r--include/uapi/linux/ntsync.h42
-rw-r--r--include/uapi/linux/openvswitch.h37
-rw-r--r--include/uapi/linux/ovpn.h110
-rw-r--r--include/uapi/linux/pci_regs.h228
-rw-r--r--include/uapi/linux/pcitest.h8
-rw-r--r--include/uapi/linux/perf_event.h687
-rw-r--r--include/uapi/linux/pfrut.h1
-rw-r--r--include/uapi/linux/pidfd.h101
-rw-r--r--include/uapi/linux/pkt_cls.h38
-rw-r--r--include/uapi/linux/pkt_sched.h75
-rw-r--r--include/uapi/linux/pps_gen.h37
-rw-r--r--include/uapi/linux/pr.h14
-rw-r--r--include/uapi/linux/prctl.h62
-rw-r--r--include/uapi/linux/psample.h11
-rw-r--r--include/uapi/linux/psci.h5
-rw-r--r--include/uapi/linux/psp-sev.h125
-rw-r--r--include/uapi/linux/psp-sfs.h87
-rw-r--r--include/uapi/linux/psp.h85
-rw-r--r--include/uapi/linux/ptp_clock.h31
-rw-r--r--include/uapi/linux/ptrace.h7
-rw-r--r--include/uapi/linux/pwm.h53
-rw-r--r--include/uapi/linux/raid/md_p.h7
-rw-r--r--include/uapi/linux/raid/md_u.h2
-rw-r--r--include/uapi/linux/random.h17
-rw-r--r--include/uapi/linux/reiserfs_fs.h27
-rw-r--r--include/uapi/linux/reiserfs_xattr.h25
-rw-r--r--include/uapi/linux/rkisp1-config.h623
-rw-r--r--include/uapi/linux/rseq.h21
-rw-r--r--include/uapi/linux/rtnetlink.h22
-rw-r--r--include/uapi/linux/rxrpc.h77
-rw-r--r--include/uapi/linux/sched.h1
-rw-r--r--include/uapi/linux/sched/types.h6
-rw-r--r--include/uapi/linux/securebits.h24
-rw-r--r--include/uapi/linux/sed-opal.h1
-rw-r--r--include/uapi/linux/serio.h1
-rw-r--r--include/uapi/linux/sev-guest.h3
-rw-r--r--include/uapi/linux/smc.h6
-rw-r--r--include/uapi/linux/snmp.h23
-rw-r--r--include/uapi/linux/spi/spi.h5
-rw-r--r--include/uapi/linux/stat.h105
-rw-r--r--include/uapi/linux/stddef.h21
-rw-r--r--include/uapi/linux/sysctl.h1
-rw-r--r--include/uapi/linux/taskstats.h30
-rw-r--r--include/uapi/linux/tcp.h22
-rw-r--r--include/uapi/linux/tcp_metrics.h22
-rw-r--r--include/uapi/linux/tee.h108
-rw-r--r--include/uapi/linux/thermal.h29
-rw-r--r--include/uapi/linux/time.h11
-rw-r--r--include/uapi/linux/tiocl.h1
-rw-r--r--include/uapi/linux/tls.h2
-rw-r--r--include/uapi/linux/trace_mmap.h2
-rw-r--r--include/uapi/linux/types.h1
-rw-r--r--include/uapi/linux/ublk_cmd.h233
-rw-r--r--include/uapi/linux/udp.h3
-rw-r--r--include/uapi/linux/uio.h18
-rw-r--r--include/uapi/linux/um_timetravel.h190
-rw-r--r--include/uapi/linux/usb/cdc.h12
-rw-r--r--include/uapi/linux/usb/ch9.h24
-rw-r--r--include/uapi/linux/usb/functionfs.h105
-rw-r--r--include/uapi/linux/usb/g_hid.h40
-rw-r--r--include/uapi/linux/usb/gadgetfs.h2
-rw-r--r--include/uapi/linux/usb/video.h59
-rw-r--r--include/uapi/linux/uvcvideo.h13
-rw-r--r--include/uapi/linux/v4l2-controls.h142
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h2
-rw-r--r--include/uapi/linux/vbox_vmmdev_types.h5
-rw-r--r--include/uapi/linux/vdpa.h1
-rw-r--r--include/uapi/linux/vduse.h4
-rw-r--r--include/uapi/linux/vfio.h72
-rw-r--r--include/uapi/linux/vhost.h39
-rw-r--r--include/uapi/linux/vhost_types.h5
-rw-r--r--include/uapi/linux/videodev2.h81
-rw-r--r--include/uapi/linux/virtio_balloon.h16
-rw-r--r--include/uapi/linux/virtio_crypto.h1
-rw-r--r--include/uapi/linux/virtio_gpu.h4
-rw-r--r--include/uapi/linux/virtio_ids.h1
-rw-r--r--include/uapi/linux/virtio_net.h49
-rw-r--r--include/uapi/linux/virtio_pci.h146
-rw-r--r--include/uapi/linux/virtio_rtc.h237
-rw-r--r--include/uapi/linux/virtio_snd.h2
-rw-r--r--include/uapi/linux/virtio_spi.h181
-rw-r--r--include/uapi/linux/vm_sockets.h4
-rw-r--r--include/uapi/linux/vmclock-abi.h182
-rw-r--r--include/uapi/linux/vmcore.h9
-rw-r--r--include/uapi/linux/vt.h55
-rw-r--r--include/uapi/linux/wireguard.h184
-rw-r--r--include/uapi/linux/xattr.h11
-rw-r--r--include/uapi/linux/xfrm.h12
-rw-r--r--include/uapi/linux/zorro_ids.h3
-rw-r--r--include/uapi/misc/amd-apml.h152
-rw-r--r--include/uapi/misc/cxl.h156
-rw-r--r--include/uapi/misc/fastrpc.h2
-rw-r--r--include/uapi/misc/mrvl_cn10k_dpi.h39
-rw-r--r--include/uapi/misc/uacce/hisi_qm.h1
-rw-r--r--include/uapi/mtd/ubi-user.h33
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h15
-rw-r--r--include/uapi/rdma/efa-abi.h6
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h44
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h1
-rw-r--r--include/uapi/rdma/ib_user_sa.h14
-rw-r--r--include/uapi/rdma/ib_user_verbs.h16
-rw-r--r--include/uapi/rdma/ionic-abi.h115
-rw-r--r--include/uapi/rdma/irdma-abi.h16
-rw-r--r--include/uapi/rdma/mana-abi.h9
-rw-r--r--include/uapi/rdma/mlx5-abi.h5
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h14
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h6
-rw-r--r--include/uapi/rdma/rdma_netlink.h42
-rw-r--r--include/uapi/rdma/rdma_user_cm.h42
-rw-r--r--include/uapi/scsi/cxlflash_ioctl.h276
-rw-r--r--include/uapi/scsi/fc/fc_els.h58
-rw-r--r--include/uapi/scsi/scsi_bsg_mpi3mr.h3
-rw-r--r--include/uapi/sound/asequencer.h19
-rw-r--r--include/uapi/sound/asoc.h2
-rw-r--r--include/uapi/sound/asound.h34
-rw-r--r--include/uapi/sound/compress_offload.h99
-rw-r--r--include/uapi/sound/compress_params.h64
-rw-r--r--include/uapi/sound/fcp.h120
-rw-r--r--include/uapi/sound/intel/avs/tokens.h32
-rw-r--r--include/uapi/sound/snd_ar_tokens.h20
-rw-r--r--include/uapi/sound/sof/abi.h2
-rw-r--r--include/uapi/sound/sof/tokens.h4
-rw-r--r--include/uapi/sound/tlv.h2
-rw-r--r--include/uapi/xen/privcmd.h7
-rw-r--r--include/ufs/ufs.h93
-rw-r--r--include/ufs/ufs_quirks.h10
-rw-r--r--include/ufs/ufshcd.h231
-rw-r--r--include/ufs/ufshci.h44
-rw-r--r--include/ufs/unipro.h8
-rw-r--r--include/vdso/align.h15
-rw-r--r--include/vdso/auxclock.h13
-rw-r--r--include/vdso/cache.h15
-rw-r--r--include/vdso/datapage.h135
-rw-r--r--include/vdso/getrandom.h74
-rw-r--r--include/vdso/gettime.h1
-rw-r--r--include/vdso/helpers.h59
-rw-r--r--include/vdso/jiffies.h2
-rw-r--r--include/vdso/page.h31
-rw-r--r--include/vdso/unaligned.h15
-rw-r--r--include/video/da8xx-fb.h94
-rw-r--r--include/video/edid.h3
-rw-r--r--include/video/imx-ipu-image-convert.h32
-rw-r--r--include/video/imx-ipu-v3.h14
-rw-r--r--include/video/mach64.h3
-rw-r--r--include/video/mmp_disp.h4
-rw-r--r--include/video/omapfb_dss.h8
-rw-r--r--include/video/pixel_format.h102
-rw-r--r--include/video/platform_lcd.h3
-rw-r--r--include/video/sisfb.h6
-rw-r--r--include/video/vga.h58
-rw-r--r--include/xen/acpi.h31
-rw-r--r--include/xen/events.h2
-rw-r--r--include/xen/grant_table.h4
-rw-r--r--include/xen/interface/elfnote.h93
-rw-r--r--include/xen/interface/physdev.h17
-rw-r--r--include/xen/interface/xen-mca.h2
-rw-r--r--include/xen/mem-reservation.h4
-rw-r--r--include/xen/pci.h6
-rw-r--r--include/xen/xen-ops.h9
-rw-r--r--include/xen/xen.h9
-rw-r--r--include/xen/xenbus.h11
2717 files changed, 170314 insertions, 45887 deletions
diff --git a/include/Kbuild b/include/Kbuild
new file mode 100644
index 000000000000..5e76a599e2dd
--- /dev/null
+++ b/include/Kbuild
@@ -0,0 +1 @@
+obj-$(CONFIG_DRM_HEADER_TEST) += drm/
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index 252b235dce5a..cbc9aeabcd99 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -3,7 +3,7 @@
*
* Name: acbuffer.h - Support for buffers returned by ACPI predefined names
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index d768d9c568cf..521d4bfa6ef0 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -3,7 +3,7 @@
*
* Name: acconfig.h - Global configuration constants
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -67,7 +67,6 @@
* General Purpose Events (GPEs)
* Global Lock
* ACPI PM timer
- * FACS table (Waking vectors and Global Lock)
*/
#ifndef ACPI_REDUCED_HARDWARE
#define ACPI_REDUCED_HARDWARE FALSE
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index c5ecd0a0170c..a2db36d18419 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -3,7 +3,7 @@
*
* Name: acexcep.h - Exception codes returned by the ACPI subsystem
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -173,8 +173,10 @@ struct acpi_exception_info {
#define AE_AML_TARGET_TYPE EXCEP_AML (0x0023)
#define AE_AML_PROTOCOL EXCEP_AML (0x0024)
#define AE_AML_BUFFER_LENGTH EXCEP_AML (0x0025)
+#define AE_AML_TOO_FEW_ARGUMENTS EXCEP_AML (0x0026)
+#define AE_AML_TOO_MANY_ARGUMENTS EXCEP_AML (0x0027)
-#define AE_CODE_AML_MAX 0x0025
+#define AE_CODE_AML_MAX 0x0027
/*
* Internal exceptions used for control
@@ -353,7 +355,11 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
"A target operand of an incorrect type was encountered"),
EXCEP_TXT("AE_AML_PROTOCOL", "Violation of a fixed ACPI protocol"),
EXCEP_TXT("AE_AML_BUFFER_LENGTH",
- "The length of the buffer is invalid/incorrect")
+ "The length of the buffer is invalid/incorrect"),
+ EXCEP_TXT("AE_AML_TOO_FEW_ARGUMENTS",
+ "There are fewer than expected method arguments"),
+ EXCEP_TXT("AE_AML_TOO_MANY_ARGUMENTS",
+ "There are too many arguments for this method")
};
static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = {
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 76aa6aa346ba..cb6a4dcc4e8e 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -3,7 +3,7 @@
*
* Name: acnames.h - Global names and strings
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index b1571dd96310..3584f33e352c 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -3,7 +3,7 @@
*
* Name: acoutput.h -- debug output
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -193,6 +193,7 @@
*/
#ifndef ACPI_NO_ERROR_MESSAGES
#define AE_INFO _acpi_module_name, __LINE__
+#define ACPI_ONCE(_fn, _plist) { static char _done; if (!_done) { _done = 1; _fn _plist; } }
/*
* Error reporting. Callers module and line number are inserted by AE_INFO,
@@ -201,8 +202,10 @@
*/
#define ACPI_INFO(plist) acpi_info plist
#define ACPI_WARNING(plist) acpi_warning plist
+#define ACPI_WARNING_ONCE(plist) ACPI_ONCE(acpi_warning, plist)
#define ACPI_EXCEPTION(plist) acpi_exception plist
#define ACPI_ERROR(plist) acpi_error plist
+#define ACPI_ERROR_ONCE(plist) ACPI_ONCE(acpi_error, plist)
#define ACPI_BIOS_WARNING(plist) acpi_bios_warning plist
#define ACPI_BIOS_EXCEPTION(plist) acpi_bios_exception plist
#define ACPI_BIOS_ERROR(plist) acpi_bios_error plist
@@ -214,8 +217,10 @@
#define ACPI_INFO(plist)
#define ACPI_WARNING(plist)
+#define ACPI_WARNING_ONCE(plist)
#define ACPI_EXCEPTION(plist)
#define ACPI_ERROR(plist)
+#define ACPI_ERROR_ONCE(plist)
#define ACPI_BIOS_WARNING(plist)
#define ACPI_BIOS_EXCEPTION(plist)
#define ACPI_BIOS_ERROR(plist)
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index 8b4a497c1300..92bf80937e5f 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -3,7 +3,7 @@
*
* Name: acpi.h - Master public include file used to interface to ACPICA
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 1a4dfd7a1c4a..aad1a95e6863 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -43,9 +43,6 @@ acpi_status
acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
struct acpi_buffer *status_buf);
-acpi_status
-acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld);
-
bool acpi_has_method(acpi_handle handle, char *name);
acpi_status acpi_execute_simple_method(acpi_handle handle, char *method,
u64 arg);
@@ -60,6 +57,9 @@ bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs);
union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
u64 rev, u64 func, union acpi_object *argv4);
#ifdef CONFIG_ACPI
+bool
+acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld);
+
static inline union acpi_object *
acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
u64 func, union acpi_object *argv4,
@@ -134,6 +134,7 @@ struct acpi_scan_handler {
bool (*match)(const char *idstr, const struct acpi_device_id **matchid);
int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id);
void (*detach)(struct acpi_device *dev);
+ void (*post_eject)(struct acpi_device *dev);
void (*bind)(struct device *phys_dev);
void (*unbind)(struct device *phys_dev);
struct acpi_hotplug_profile hotplug;
@@ -227,10 +228,12 @@ struct acpi_device_dir {
/* Plug and Play */
+#define MAX_ACPI_DEVICE_NAME_LEN 40
+#define MAX_ACPI_CLASS_NAME_LEN 20
typedef char acpi_bus_id[8];
typedef u64 acpi_bus_address;
-typedef char acpi_device_name[40];
-typedef char acpi_device_class[20];
+typedef char acpi_device_name[MAX_ACPI_DEVICE_NAME_LEN];
+typedef char acpi_device_class[MAX_ACPI_CLASS_NAME_LEN];
struct acpi_hardware_id {
struct list_head list;
@@ -254,7 +257,6 @@ struct acpi_device_pnp {
struct list_head ids; /* _HID and _CIDs */
acpi_device_name device_name; /* Driver-determined */
acpi_device_class device_class; /* " */
- union acpi_object *str_obj; /* unicode string for _STR method */
};
#define acpi_device_bid(d) ((d)->pnp.bus_id)
@@ -562,7 +564,7 @@ static inline void *acpi_driver_data(struct acpi_device *d)
}
#define to_acpi_device(d) container_of(d, struct acpi_device, dev)
-#define to_acpi_driver(d) container_of(d, struct acpi_driver, drv)
+#define to_acpi_driver(d) container_of_const(d, struct acpi_driver, drv)
static inline struct acpi_device *acpi_dev_parent(struct acpi_device *adev)
{
@@ -736,8 +738,7 @@ struct iommu_ops;
bool acpi_dma_supported(const struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
int acpi_iommu_fwspec_init(struct device *dev, u32 id,
- struct fwnode_handle *fwnode,
- const struct iommu_ops *ops);
+ struct fwnode_handle *fwnode);
int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map);
int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
const u32 *input_id);
@@ -993,6 +994,8 @@ static inline void acpi_put_acpi_dev(struct acpi_device *adev)
int acpi_wait_for_acpi_ipmi(void);
+int acpi_scan_add_dep(acpi_handle handle, struct acpi_handle_list *dep_devices);
+u32 arch_acpi_add_auto_dep(acpi_handle handle);
#else /* CONFIG_ACPI */
static inline int register_acpi_bus_type(void *bus) { return 0; }
@@ -1000,6 +1003,23 @@ static inline int unregister_acpi_bus_type(void *bus) { return 0; }
static inline int acpi_wait_for_acpi_ipmi(void) { return 0; }
+static inline const char *acpi_device_hid(struct acpi_device *device)
+{
+ return "";
+}
+
+static inline bool
+acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld)
+{
+ return false;
+}
+
+#define for_each_acpi_consumer_dev(supplier, consumer) \
+ for (consumer = NULL; false && (supplier);)
+
+#define for_each_acpi_dev_match(adev, hid, uid, hrv) \
+ for (adev = NULL; false && (hid) && (uid) && (hrv); )
+
#endif /* CONFIG_ACPI */
#endif /*__ACPI_BUS_H__*/
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
index b5f594754a9e..99b960bd473c 100644
--- a/include/acpi/acpi_numa.h
+++ b/include/acpi/acpi_numa.h
@@ -17,11 +17,16 @@ extern int node_to_pxm(int);
extern int acpi_map_pxm_to_node(int);
extern unsigned char acpi_srat_revision;
extern void disable_srat(void);
+extern int fix_pxm_node_maps(int max_nid);
extern void bad_srat(void);
extern int srat_disabled(void);
#else /* CONFIG_ACPI_NUMA */
+static inline int fix_pxm_node_maps(int max_nid)
+{
+ return 0;
+}
static inline void disable_srat(void)
{
}
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 914c029f64c9..65c5737b6286 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -5,7 +5,7 @@
* interfaces must be implemented by OSL to interface the
* ACPI components to the host operating system.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 94d0fc3bd412..e65a2afe9250 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -3,7 +3,7 @@
*
* Name: acpixf.h - External interfaces to the ACPI subsystem
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20240322
+#define ACPI_CA_VERSION 0x20250807
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -214,6 +214,12 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
/*
+ * ACPI Global Lock is mainly used for systems with SMM, so no-SMM systems
+ * (such as loong_arch) may not have and not use Global Lock.
+ */
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use_global_lock, TRUE);
+
+/*
* Maximum timeout for While() loop iterations before forced method abort.
* This mechanism is intended to prevent infinite loops during interpreter
* execution within a host kernel.
@@ -660,6 +666,7 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
void *context))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_execute_reg_methods(acpi_handle device,
+ u32 nax_depth,
acpi_adr_space_type
space_id))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
@@ -762,6 +769,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
*event_status))
ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_hw_disable_all_gpes(void))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_hw_enable_all_wakeup_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
@@ -877,10 +885,10 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_leave_sleep_state_prep(u8 sleep_state))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_leave_sleep_state(u8 sleep_state))
-ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_set_firmware_waking_vector
- (acpi_physical_address physical_address,
- acpi_physical_address physical_address64))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_set_firmware_waking_vector
+ (acpi_physical_address physical_address,
+ acpi_physical_address physical_address64))
/*
* ACPI Timer interfaces
*/
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index efef208b0324..842f932e2c2b 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -3,7 +3,7 @@
*
* Name: acrestyp.h - Defines, types, and structures for resource descriptors
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 451f6276da49..8a67d4ea6e3f 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -3,7 +3,7 @@
*
* Name: actbl.h - Basic ACPI Table Definitions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -66,14 +66,14 @@
******************************************************************************/
struct acpi_table_header {
- char signature[ACPI_NAMESEG_SIZE]; /* ASCII table signature */
+ char signature[ACPI_NAMESEG_SIZE] ACPI_NONSTRING; /* ASCII table signature */
u32 length; /* Length of table in bytes, including this header */
u8 revision; /* ACPI Specification minor version number */
u8 checksum; /* To make sum of entire table == 0 */
- char oem_id[ACPI_OEM_ID_SIZE]; /* ASCII OEM identification */
- char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; /* ASCII OEM table identification */
+ char oem_id[ACPI_OEM_ID_SIZE] ACPI_NONSTRING; /* ASCII OEM identification */
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE] ACPI_NONSTRING; /* ASCII OEM table identification */
u32 oem_revision; /* OEM revision number */
- char asl_compiler_id[ACPI_NAMESEG_SIZE]; /* ASCII ASL compiler vendor ID */
+ char asl_compiler_id[ACPI_NAMESEG_SIZE] ACPI_NONSTRING; /* ASCII ASL compiler vendor ID */
u32 asl_compiler_revision; /* ASL compiler version */
};
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 841ef9f22795..7f35eb0e8458 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -3,7 +3,7 @@
*
* Name: actbl1.h - Additional ACPI table definitions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -155,6 +155,13 @@ struct acpi_aspt_acpi_mbox_regs {
u64 reserved2[2];
};
+/* Larger subtable header (when Length can exceed 255) */
+
+struct acpi_subtbl_hdr_16 {
+ u16 type;
+ u16 length;
+};
+
/*******************************************************************************
*
* ASF - Alert Standard Format table (Signature "ASF!")
@@ -403,6 +410,8 @@ struct acpi_cdat_dsmas {
/* Flags for subtable above */
#define ACPI_CDAT_DSMAS_NON_VOLATILE (1 << 2)
+#define ACPI_CDAT_DSMAS_SHAREABLE (1 << 3)
+#define ACPI_CDAT_DSMAS_READ_ONLY (1 << 6)
/* Subtable 1: Device scoped Latency and Bandwidth Information Structure (DSLBIS) */
@@ -551,11 +560,12 @@ struct acpi_cedt_cfmws_target_element {
/* Values for Restrictions field above */
-#define ACPI_CEDT_CFMWS_RESTRICT_TYPE2 (1)
-#define ACPI_CEDT_CFMWS_RESTRICT_TYPE3 (1<<1)
+#define ACPI_CEDT_CFMWS_RESTRICT_DEVMEM (1)
+#define ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM (1<<1)
#define ACPI_CEDT_CFMWS_RESTRICT_VOLATILE (1<<2)
#define ACPI_CEDT_CFMWS_RESTRICT_PMEM (1<<3)
#define ACPI_CEDT_CFMWS_RESTRICT_FIXED (1<<4)
+#define ACPI_CEDT_CFMWS_RESTRICT_BI (1<<5)
/* 2: CXL XOR Interleave Math Structure */
@@ -567,6 +577,10 @@ struct acpi_cedt_cxims {
u64 xormap_list[];
};
+struct acpi_cedt_cxims_target_element {
+ u64 xormap;
+};
+
/* 3: CXL RCEC Downstream Port Association Structure */
struct acpi_cedt_rdpas {
@@ -751,6 +765,7 @@ struct acpi_dbg2_device {
#define ACPI_DBG2_16550_WITH_GAS 0x0012
#define ACPI_DBG2_SDM845_7_372MHZ 0x0013
#define ACPI_DBG2_INTEL_LPSS 0x0014
+#define ACPI_DBG2_RISCV_SBI_CON 0x0015
#define ACPI_DBG2_1394_STANDARD 0x0000
@@ -812,7 +827,8 @@ enum acpi_dmar_type {
ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3,
ACPI_DMAR_TYPE_NAMESPACE = 4,
ACPI_DMAR_TYPE_SATC = 5,
- ACPI_DMAR_TYPE_RESERVED = 6 /* 6 and greater are reserved */
+ ACPI_DMAR_TYPE_SIDP = 6,
+ ACPI_DMAR_TYPE_RESERVED = 7 /* 7 and greater are reserved */
};
/* DMAR Device Scope structure */
@@ -820,7 +836,8 @@ enum acpi_dmar_type {
struct acpi_dmar_device_scope {
u8 entry_type;
u8 length;
- u16 reserved;
+ u8 flags;
+ u8 reserved;
u8 enumeration_id;
u8 bus;
};
@@ -916,6 +933,15 @@ struct acpi_dmar_satc {
u8 reserved;
u16 segment;
};
+
+/* 6: so_c Integrated Device Property Reporting Structure */
+
+struct acpi_dmar_sidp {
+ struct acpi_dmar_header header;
+ u16 reserved;
+ u16 segment;
+};
+
/*******************************************************************************
*
* DRTM - Dynamic Root of Trust for Measurement table
@@ -1017,17 +1043,18 @@ struct acpi_einj_entry {
/* Values for Action field above */
enum acpi_einj_actions {
- ACPI_EINJ_BEGIN_OPERATION = 0,
- ACPI_EINJ_GET_TRIGGER_TABLE = 1,
- ACPI_EINJ_SET_ERROR_TYPE = 2,
- ACPI_EINJ_GET_ERROR_TYPE = 3,
- ACPI_EINJ_END_OPERATION = 4,
- ACPI_EINJ_EXECUTE_OPERATION = 5,
- ACPI_EINJ_CHECK_BUSY_STATUS = 6,
- ACPI_EINJ_GET_COMMAND_STATUS = 7,
- ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS = 8,
- ACPI_EINJ_GET_EXECUTE_TIMINGS = 9,
- ACPI_EINJ_ACTION_RESERVED = 10, /* 10 and greater are reserved */
+ ACPI_EINJ_BEGIN_OPERATION = 0x0,
+ ACPI_EINJ_GET_TRIGGER_TABLE = 0x1,
+ ACPI_EINJ_SET_ERROR_TYPE = 0x2,
+ ACPI_EINJ_GET_ERROR_TYPE = 0x3,
+ ACPI_EINJ_END_OPERATION = 0x4,
+ ACPI_EINJ_EXECUTE_OPERATION = 0x5,
+ ACPI_EINJ_CHECK_BUSY_STATUS = 0x6,
+ ACPI_EINJ_GET_COMMAND_STATUS = 0x7,
+ ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS = 0x8,
+ ACPI_EINJ_GET_EXECUTE_TIMINGS = 0x9,
+ ACPI_EINJV2_GET_ERROR_TYPE = 0x11,
+ ACPI_EINJ_ACTION_RESERVED = 0x12, /* 0x12 and greater are reserved */
ACPI_EINJ_TRIGGER_ERROR = 0xFF /* Except for this value */
};
@@ -1791,7 +1818,7 @@ struct acpi_hmat_cache {
u32 reserved1;
u64 cache_size;
u32 cache_attributes;
- u16 reserved2;
+ u16 address_mode;
u16 number_of_SMBIOShandles;
};
@@ -1803,6 +1830,9 @@ struct acpi_hmat_cache {
#define ACPI_HMAT_WRITE_POLICY (0x0000F000)
#define ACPI_HMAT_CACHE_LINE_SIZE (0xFFFF0000)
+#define ACPI_HMAT_CACHE_MODE_UNKNOWN (0)
+#define ACPI_HMAT_CACHE_MODE_EXTENDED_LINEAR (1)
+
/* Values for cache associativity flag */
#define ACPI_HMAT_CA_NONE (0)
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index ae747c89d92c..f726bce3eb84 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
- * Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec)
+ * Name: actbl2.h - ACPI Table Definitions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -29,6 +29,7 @@
#define ACPI_SIG_BDAT "BDAT" /* BIOS Data ACPI Table */
#define ACPI_SIG_CCEL "CCEL" /* CC Event Log Table */
#define ACPI_SIG_CDAT "CDAT" /* Coherent Device Attribute Table */
+#define ACPI_SIG_ERDT "ERDT" /* Enhanced Resource Director Technology */
#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */
@@ -37,6 +38,7 @@
#define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */
#define ACPI_SIG_MPAM "MPAM" /* Memory System Resource Partitioning and Monitoring Table */
#define ACPI_SIG_MPST "MPST" /* Memory Power State Table */
+#define ACPI_SIG_MRRM "MRRM" /* Memory Range and Region Mapping table */
#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
#define ACPI_SIG_NHLT "NHLT" /* Non HD Audio Link Table */
@@ -50,10 +52,12 @@
#define ACPI_SIG_RAS2 "RAS2" /* RAS2 Feature table */
#define ACPI_SIG_RGRT "RGRT" /* Regulatory Graphics Resource Table */
#define ACPI_SIG_RHCT "RHCT" /* RISC-V Hart Capabilities Table */
+#define ACPI_SIG_RIMT "RIMT" /* RISC-V IO Mapping Table */
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */
#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
#define ACPI_SIG_SVKL "SVKL" /* Storage Volume Key Location Table */
+#define ACPI_SIG_SWFT "SWFT" /* SoundWire File Table */
#define ACPI_SIG_TDEL "TDEL" /* TD Event Log Table */
/*
@@ -450,10 +454,199 @@ struct acpi_table_ccel {
/*******************************************************************************
*
+ * ERDT - Enhanced Resource Director Technology (ERDT) table
+ *
+ * Conforms to "Intel Resource Director Technology Architecture Specification"
+ * Version 1.1, January 2025
+ *
+ ******************************************************************************/
+
+struct acpi_table_erdt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 max_clos; /* Maximum classes of service */
+ u8 reserved[24];
+ u8 erdt_substructures[];
+};
+
+/* Values for subtable type in struct acpi_subtbl_hdr_16 */
+
+enum acpi_erdt_type {
+ ACPI_ERDT_TYPE_RMDD = 0,
+ ACPI_ERDT_TYPE_CACD = 1,
+ ACPI_ERDT_TYPE_DACD = 2,
+ ACPI_ERDT_TYPE_CMRC = 3,
+ ACPI_ERDT_TYPE_MMRC = 4,
+ ACPI_ERDT_TYPE_MARC = 5,
+ ACPI_ERDT_TYPE_CARC = 6,
+ ACPI_ERDT_TYPE_CMRD = 7,
+ ACPI_ERDT_TYPE_IBRD = 8,
+ ACPI_ERDT_TYPE_IBAD = 9,
+ ACPI_ERDT_TYPE_CARD = 10,
+ ACPI_ERDT_TYPE_RESERVED = 11 /* 11 and above are reserved */
+};
+
+/*
+ * ERDT Subtables, correspond to Type in struct acpi_subtbl_hdr_16
+ */
+
+/* 0: RMDD - Resource Management Domain Description */
+
+struct acpi_erdt_rmdd {
+ struct acpi_subtbl_hdr_16 header;
+ u16 flags;
+ u16 IO_l3_slices; /* Number of slices in IO cache */
+ u8 IO_l3_sets; /* Number of sets in IO cache */
+ u8 IO_l3_ways; /* Number of ways in IO cache */
+ u64 reserved;
+ u16 domain_id; /* Unique domain ID */
+ u32 max_rmid; /* Maximun RMID supported */
+ u64 creg_base; /* Control Register Base Address */
+ u16 creg_size; /* Control Register Size (4K pages) */
+ u8 rmdd_structs[];
+};
+
+/* 1: CACD - CPU Agent Collection Description */
+
+struct acpi_erdt_cacd {
+ struct acpi_subtbl_hdr_16 header;
+ u16 reserved;
+ u16 domain_id; /* Unique domain ID */
+ u32 X2APICIDS[];
+};
+
+/* 2: DACD - Device Agent Collection Description */
+
+struct acpi_erdt_dacd {
+ struct acpi_subtbl_hdr_16 header;
+ u16 reserved;
+ u16 domain_id; /* Unique domain ID */
+ u8 dev_paths[];
+};
+
+struct acpi_erdt_dacd_dev_paths {
+ struct acpi_subtable_header header;
+ u16 segment;
+ u8 reserved;
+ u8 start_bus;
+ u8 path[];
+};
+
+/* 3: CMRC - Cache Monitoring Registers for CPU Agents */
+
+struct acpi_erdt_cmrc {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved1;
+ u32 flags;
+ u8 index_fn;
+ u8 reserved2[11];
+ u64 cmt_reg_base;
+ u32 cmt_reg_size;
+ u16 clump_size;
+ u16 clump_stride;
+ u64 up_scale;
+};
+
+/* 4: MMRC - Memory-bandwidth Monitoring Registers for CPU Agents */
+
+struct acpi_erdt_mmrc {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved1;
+ u32 flags;
+ u8 index_fn;
+ u8 reserved2[11];
+ u64 reg_base;
+ u32 reg_size;
+ u8 counter_width;
+ u64 up_scale;
+ u8 reserved3[7];
+ u32 corr_factor_list_len;
+ u32 corr_factor_list[];
+};
+
+/* 5: MARC - Memory-bandwidth Allocation Registers for CPU Agents */
+
+struct acpi_erdt_marc {
+ struct acpi_subtbl_hdr_16 header;
+ u16 reserved1;
+ u16 flags;
+ u8 index_fn;
+ u8 reserved2[7];
+ u64 reg_base_opt;
+ u64 reg_base_min;
+ u64 reg_base_max;
+ u32 mba_reg_size;
+ u32 mba_ctrl_range;
+};
+
+/* 6: CARC - Cache Allocation Registers for CPU Agents */
+
+struct acpi_erdt_carc {
+ struct acpi_subtbl_hdr_16 header;
+};
+
+/* 7: CMRD - Cache Monitoring Registers for Device Agents */
+
+struct acpi_erdt_cmrd {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved1;
+ u32 flags;
+ u8 index_fn;
+ u8 reserved2[11];
+ u64 reg_base;
+ u32 reg_size;
+ u16 cmt_reg_off;
+ u16 cmt_clump_size;
+ u64 up_scale;
+};
+
+/* 8: IBRD - Cache Monitoring Registers for Device Agents */
+
+struct acpi_erdt_ibrd {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved1;
+ u32 flags;
+ u8 index_fn;
+ u8 reserved2[11];
+ u64 reg_base;
+ u32 reg_size;
+ u16 total_bw_offset;
+ u16 Iomiss_bw_offset;
+ u16 total_bw_clump;
+ u16 Iomiss_bw_clump;
+ u8 reserved3[7];
+ u8 counter_width;
+ u64 up_scale;
+ u32 corr_factor_list_len;
+ u32 corr_factor_list[];
+};
+
+/* 9: IBAD - IO bandwidth Allocation Registers for device agents */
+
+struct acpi_erdt_ibad {
+ struct acpi_subtbl_hdr_16 header;
+};
+
+/* 10: CARD - IO bandwidth Allocation Registers for Device Agents */
+
+struct acpi_erdt_card {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved1;
+ u32 flags;
+ u32 contention_mask;
+ u8 index_fn;
+ u8 reserved2[7];
+ u64 reg_base;
+ u32 reg_size;
+ u16 cat_reg_offset;
+ u16 cat_reg_block_size;
+};
+
+/*******************************************************************************
+ *
* IORT - IO Remapping Table
*
* Conforms to "IO Remapping Table System Software on ARM Platforms",
- * Document number: ARM DEN 0049E.e, Sep 2022
+ * Document number: ARM DEN 0049E.f, Apr 2024
*
******************************************************************************/
@@ -524,6 +717,7 @@ struct acpi_iort_memory_access {
#define ACPI_IORT_MF_COHERENCY (1)
#define ACPI_IORT_MF_ATTRIBUTES (1<<1)
+#define ACPI_IORT_MF_CANWBS (1<<2)
/*
* IORT node specific subtables
@@ -1194,11 +1388,23 @@ struct acpi_madt_generic_translator {
struct acpi_madt_multiproc_wakeup {
struct acpi_subtable_header header;
- u16 mailbox_version;
+ u16 version;
u32 reserved; /* reserved - must be zero */
- u64 base_address;
+ u64 mailbox_address;
+ u64 reset_vector;
};
+/* Values for Version field above */
+
+enum acpi_madt_multiproc_wakeup_version {
+ ACPI_MADT_MP_WAKEUP_VERSION_NONE = 0,
+ ACPI_MADT_MP_WAKEUP_VERSION_V1 = 1,
+ ACPI_MADT_MP_WAKEUP_VERSION_RESERVED = 2, /* 2 and greater are reserved */
+};
+
+#define ACPI_MADT_MP_WAKEUP_SIZE_V0 16
+#define ACPI_MADT_MP_WAKEUP_SIZE_V1 24
+
#define ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE 2032
#define ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE 2048
@@ -1211,7 +1417,8 @@ struct acpi_madt_multiproc_wakeup_mailbox {
u8 reserved_firmware[ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE]; /* reserved for firmware use */
};
-#define ACPI_MP_WAKE_COMMAND_WAKEUP 1
+#define ACPI_MP_WAKE_COMMAND_WAKEUP 1
+#define ACPI_MP_WAKE_COMMAND_TEST 2
/* 17: CPU Core Interrupt Controller (ACPI 6.5) */
@@ -1594,7 +1801,7 @@ struct acpi_mpam_msc_node {
u32 max_nrdy_usec;
u64 hardware_id_linked_device;
u32 instance_id_linked_device;
- u32 num_resouce_nodes;
+ u32 num_resource_nodes;
};
struct acpi_table_mpam {
@@ -1724,6 +1931,47 @@ struct acpi_msct_proximity {
/*******************************************************************************
*
+ * MRRM - Memory Range and Region Mapping (MRRM) table
+ * Conforms to "Intel Resource Director Technology Architecture Specification"
+ * Version 1.1, January 2025
+ *
+ ******************************************************************************/
+
+struct acpi_table_mrrm {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 max_mem_region; /* Max Memory Regions supported */
+ u8 flags; /* Region assignment type */
+ u8 reserved[26];
+ u8 memory_range_entry[];
+};
+
+/* Flags */
+#define ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS (1<<0)
+
+/*******************************************************************************
+ *
+ * Memory Range entry - Memory Range entry in MRRM table
+ *
+ ******************************************************************************/
+
+struct acpi_mrrm_mem_range_entry {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved0; /* Reserved */
+ u64 addr_base; /* Base addr of the mem range */
+ u64 addr_len; /* Length of the mem range */
+ u16 region_id_flags; /* Valid local or remote Region-ID */
+ u8 local_region_id; /* Platform-assigned static local Region-ID */
+ u8 remote_region_id; /* Platform-assigned static remote Region-ID */
+ u32 reserved1; /* Reserved */
+ /* Region-ID Programming Registers[] */
+};
+
+/* Values for region_id_flags above */
+#define ACPI_MRRM_VALID_REGION_ID_FLAGS_LOCAL (1<<0)
+#define ACPI_MRRM_VALID_REGION_ID_FLAGS_REMOTE (1<<1)
+
+/*******************************************************************************
+ *
* MSDM - Microsoft Data Management table
*
* Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)",
@@ -2788,15 +3036,15 @@ struct acpi_ras2_pcc_desc {
/* RAS2 Platform Communication Channel Shared Memory Region */
-struct acpi_ras2_shared_memory {
+struct acpi_ras2_shmem {
u32 signature;
u16 command;
u16 status;
u16 version;
u8 features[16];
- u8 set_capabilities[16];
- u16 num_parameter_blocks;
- u32 set_capabilities_status;
+ u8 set_caps[16];
+ u16 num_param_blks;
+ u32 set_caps_status;
};
/* RAS2 Parameter Block Structure for PATROL_SCRUB */
@@ -2809,11 +3057,11 @@ struct acpi_ras2_parameter_block {
/* RAS2 Parameter Block Structure for PATROL_SCRUB */
-struct acpi_ras2_patrol_scrub_parameter {
+struct acpi_ras2_patrol_scrub_param {
struct acpi_ras2_parameter_block header;
- u16 patrol_scrub_command;
- u64 requested_address_range[2];
- u64 actual_address_range[2];
+ u16 command;
+ u64 req_addr_range[2];
+ u64 actl_addr_range[2];
u32 flags;
u32 scrub_params_out;
u32 scrub_params_in;
@@ -2990,6 +3238,88 @@ struct acpi_rhct_hart_info {
/*******************************************************************************
*
+ * RIMT - RISC-V IO Remapping Table
+ *
+ * https://github.com/riscv-non-isa/riscv-acpi-rimt
+ *
+ ******************************************************************************/
+
+struct acpi_table_rimt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 num_nodes; /* Number of RIMT Nodes */
+ u32 node_offset; /* Offset to RIMT Node Array */
+ u32 reserved;
+};
+
+struct acpi_rimt_node {
+ u8 type;
+ u8 revision;
+ u16 length;
+ u16 reserved;
+ u16 id;
+ char node_data[];
+};
+
+enum acpi_rimt_node_type {
+ ACPI_RIMT_NODE_TYPE_IOMMU = 0x0,
+ ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX = 0x1,
+ ACPI_RIMT_NODE_TYPE_PLAT_DEVICE = 0x2,
+};
+
+struct acpi_rimt_iommu {
+ u8 hardware_id[8]; /* Hardware ID */
+ u64 base_address; /* Base Address */
+ u32 flags; /* Flags */
+ u32 proximity_domain; /* Proximity Domain */
+ u16 pcie_segment_number; /* PCIe Segment number */
+ u16 pcie_bdf; /* PCIe B/D/F */
+ u16 num_interrupt_wires; /* Number of interrupt wires */
+ u16 interrupt_wire_offset; /* Interrupt wire array offset */
+ u64 interrupt_wire[]; /* Interrupt wire array */
+};
+
+/* IOMMU Node Flags */
+#define ACPI_RIMT_IOMMU_FLAGS_PCIE (1)
+#define ACPI_RIMT_IOMMU_FLAGS_PXM_VALID (1 << 1)
+
+/* Interrupt Wire Structure */
+struct acpi_rimt_iommu_wire_gsi {
+ u32 irq_num; /* Interrupt Number */
+ u32 flags; /* Flags */
+};
+
+/* Interrupt Wire Flags */
+#define ACPI_RIMT_GSI_LEVEL_TRIGGERRED (1)
+#define ACPI_RIMT_GSI_ACTIVE_HIGH (1 << 1)
+
+struct acpi_rimt_id_mapping {
+ u32 source_id_base; /* Source ID Base */
+ u32 num_ids; /* Number of IDs */
+ u32 dest_id_base; /* Destination Device ID Base */
+ u32 dest_offset; /* Destination IOMMU Offset */
+ u32 flags; /* Flags */
+};
+
+struct acpi_rimt_pcie_rc {
+ u32 flags; /* Flags */
+ u16 reserved; /* Reserved */
+ u16 pcie_segment_number; /* PCIe Segment number */
+ u16 id_mapping_offset; /* ID mapping array offset */
+ u16 num_id_mappings; /* Number of ID mappings */
+};
+
+/* PCIe Root Complex Node Flags */
+#define ACPI_RIMT_PCIE_ATS_SUPPORTED (1)
+#define ACPI_RIMT_PCIE_PRI_SUPPORTED (1 << 1)
+
+struct acpi_rimt_platform_device {
+ u16 id_mapping_offset; /* ID Mapping array offset */
+ u16 num_id_mappings; /* Number of ID mappings */
+ char device_name[]; /* Device Object Name */
+};
+
+/*******************************************************************************
+ *
* SBST - Smart Battery Specification Table
* Version 1
*
@@ -3150,6 +3480,26 @@ enum acpi_svkl_format {
};
/*******************************************************************************
+ * SWFT - SoundWire File Table
+ *
+ * Conforms to "Discovery and Configuration (DisCo) Specification for SoundWire"
+ * Version 2.1, 2 October 2023
+ *
+ ******************************************************************************/
+struct acpi_sw_file {
+ u16 vendor_id;
+ u32 file_id;
+ u16 file_version;
+ u32 file_length;
+ u8 data[];
+};
+
+struct acpi_table_swft {
+ struct acpi_table_header header;
+ struct acpi_sw_file files[];
+};
+
+/*******************************************************************************
*
* TDEL - TD-Event Log
* From: "Guest-Host-Communication Interface (GHCI) for Intel
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 8f775e3a08fd..79d3aa5a4bad 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -3,7 +3,7 @@
*
* Name: actbl3.h - ACPI Table Definitions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -92,10 +92,10 @@ struct acpi_table_slit {
/*******************************************************************************
*
* SPCR - Serial Port Console Redirection table
- * Version 2
+ * Version 4
*
* Conforms to "Serial Port Console Redirection Table",
- * Version 1.03, August 10, 2015
+ * Version 1.10, Jan 5, 2023
*
******************************************************************************/
@@ -112,7 +112,7 @@ struct acpi_table_spcr {
u8 stop_bits;
u8 flow_control;
u8 terminal_type;
- u8 reserved1;
+ u8 language;
u16 pci_device_id;
u16 pci_vendor_id;
u8 pci_bus;
@@ -120,7 +120,11 @@ struct acpi_table_spcr {
u8 pci_function;
u32 pci_flags;
u8 pci_segment;
- u32 reserved2;
+ u32 uart_clk_freq;
+ u32 precise_baudrate;
+ u16 name_space_string_length;
+ u16 name_space_string_offset;
+ char name_space_string[];
};
/* Masks for pci_flags field above */
@@ -265,7 +269,7 @@ struct acpi_srat_gicc_affinity {
#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */
-/* 4: GCC ITS Affinity (ACPI 6.2) */
+/* 4: GIC ITS Affinity (ACPI 6.2) */
struct acpi_srat_gic_its_affinity {
struct acpi_subtable_header header;
@@ -462,6 +466,7 @@ struct acpi_tpm2_phy {
#define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC 11 /* V1.2 Rev 8 */
#define ACPI_TPM2_RESERVED 12
#define ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON 13
+#define ACPI_TPM2_CRB_WITH_ARM_FFA 15
/* Optional trailer appears after any start_method subtables */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 85c2dcf2b704..8fe893d776dd 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -3,7 +3,7 @@
*
* Name: actypes.h - Common data types for the entire ACPI subsystem
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -522,12 +522,12 @@ typedef u64 acpi_integer;
#define ACPI_COPY_NAMESEG(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src)))
#else
#define ACPI_COMPARE_NAMESEG(a,b) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAMESEG_SIZE))
-#define ACPI_COPY_NAMESEG(dest,src) (strncpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAMESEG_SIZE))
+#define ACPI_COPY_NAMESEG(dest,src) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAMESEG_SIZE))
#endif
/* Support for the special RSDP signature (8 characters) */
-#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
+#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, (sizeof(a) < 8) ? ACPI_NAMESEG_SIZE : 8))
#define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
/* Support for OEMx signature (x can be any character) */
@@ -1311,6 +1311,7 @@ typedef enum {
#define ACPI_OSI_WIN_10_19H1 0x14
#define ACPI_OSI_WIN_10_20H1 0x15
#define ACPI_OSI_WIN_11 0x16
+#define ACPI_OSI_WIN_11_22H2 0x17
/* Definitions of getopt */
@@ -1326,4 +1327,8 @@ typedef enum {
#define ACPI_FLEX_ARRAY(TYPE, NAME) TYPE NAME[0]
#endif
+#ifndef ACPI_NONSTRING
+#define ACPI_NONSTRING /* No terminating NUL character */
+#endif
+
#endif /* __ACTYPES_H__ */
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
index 52a84523bfac..25dd3e998727 100644
--- a/include/acpi/acuuid.h
+++ b/include/acpi/acuuid.h
@@ -3,7 +3,7 @@
*
* Name: acuuid.h - ACPI-related UUID/GUID definitions
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/battery.h b/include/acpi/battery.h
index 611a2561a014..c93f16dfb944 100644
--- a/include/acpi/battery.h
+++ b/include/acpi/battery.h
@@ -2,6 +2,7 @@
#ifndef __ACPI_BATTERY_H
#define __ACPI_BATTERY_H
+#include <linux/device.h>
#include <linux/power_supply.h>
#define ACPI_BATTERY_CLASS "battery"
@@ -19,5 +20,6 @@ struct acpi_battery_hook {
void battery_hook_register(struct acpi_battery_hook *hook);
void battery_hook_unregister(struct acpi_battery_hook *hook);
+int devm_battery_hook_register(struct device *dev, struct acpi_battery_hook *hook);
#endif
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 930b6afba6f4..13fa81504844 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -32,6 +32,15 @@
#define CMD_READ 0
#define CMD_WRITE 1
+#define CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE (7)
+#define CPPC_AUTO_ACT_WINDOW_EXP_BIT_SIZE (3)
+#define CPPC_AUTO_ACT_WINDOW_MAX_SIG ((1 << CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) - 1)
+#define CPPC_AUTO_ACT_WINDOW_MAX_EXP ((1 << CPPC_AUTO_ACT_WINDOW_EXP_BIT_SIZE) - 1)
+/* CPPC_AUTO_ACT_WINDOW_MAX_SIG is 127, so 128 and 129 will decay to 127 when writing */
+#define CPPC_AUTO_ACT_WINDOW_SIG_CARRY_THRESH 129
+
+#define CPPC_ENERGY_PERF_MAX (0xFF)
+
/* Each register has the folowing format. */
struct cpc_reg {
u8 descriptor;
@@ -64,6 +73,8 @@ struct cpc_desc {
int cpu_id;
int write_cmd_status;
int write_cmd_id;
+ /* Lock used for RMW operations in cpc_write() */
+ raw_spinlock_t rmw_lock;
struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT];
struct acpi_psd_package domain_info;
struct kobject kobj;
@@ -128,7 +139,6 @@ struct cppc_perf_fb_ctrs {
/* Per CPU container for runtime CPPC management. */
struct cppc_cpudata {
- struct list_head node;
struct cppc_perf_caps perf_caps;
struct cppc_perf_ctrls perf_ctrls;
struct cppc_perf_fb_ctrs perf_fb_ctrs;
@@ -150,43 +160,49 @@ extern unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int f
extern bool acpi_cpc_valid(void);
extern bool cppc_allow_fast_switch(void);
extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
-extern unsigned int cppc_get_transition_latency(int cpu);
+extern int cppc_get_transition_latency(int cpu);
extern bool cpc_ffh_supported(void);
extern bool cpc_supported_by_cpu(void);
extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val);
extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val);
extern int cppc_get_epp_perf(int cpunum, u64 *epp_perf);
extern int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable);
-extern int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps);
+extern int cppc_set_epp(int cpu, u64 epp_val);
+extern int cppc_get_auto_act_window(int cpu, u64 *auto_act_window);
+extern int cppc_set_auto_act_window(int cpu, u64 auto_act_window);
+extern int cppc_get_auto_sel(int cpu, bool *enable);
extern int cppc_set_auto_sel(int cpu, bool enable);
+extern int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf);
+extern int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator);
+extern int amd_detect_prefcore(bool *detected);
#else /* !CONFIG_ACPI_CPPC_LIB */
static inline int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_set_enable(int cpu, bool enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline bool cppc_perf_ctrs_in_pcc(void)
{
@@ -200,9 +216,9 @@ static inline bool cppc_allow_fast_switch(void)
{
return false;
}
-static inline unsigned int cppc_get_transition_latency(int cpu)
+static inline int cppc_get_transition_latency(int cpu)
{
- return CPUFREQ_ETERNAL;
+ return -ENODATA;
}
static inline bool cpc_ffh_supported(void)
{
@@ -210,27 +226,51 @@ static inline bool cpc_ffh_supported(void)
}
static inline int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+static inline int cppc_set_epp(int cpu, u64 epp_val)
+{
+ return -EOPNOTSUPP;
+}
+static inline int cppc_get_auto_act_window(int cpu, u64 *auto_act_window)
+{
+ return -EOPNOTSUPP;
+}
+static inline int cppc_set_auto_act_window(int cpu, u64 auto_act_window)
+{
+ return -EOPNOTSUPP;
+}
+static inline int cppc_get_auto_sel(int cpu, bool *enable)
+{
+ return -EOPNOTSUPP;
}
static inline int cppc_set_auto_sel(int cpu, bool enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+static inline int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
+{
+ return -ENODEV;
+}
+static inline int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
+{
+ return -EOPNOTSUPP;
}
-static inline int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
+static inline int amd_detect_prefcore(bool *detected)
{
- return -ENOTSUPP;
+ return -ENODEV;
}
#endif /* !CONFIG_ACPI_CPPC_LIB */
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index be1dd4c1a917..ebd21b05fe6e 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -35,9 +35,6 @@ struct ghes_estatus_node {
struct llist_node llnode;
struct acpi_hest_generic *generic;
struct ghes *ghes;
-
- int task_work_cpu;
- struct callback_head task_work;
};
struct ghes_estatus_cache {
diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h
index 9b373d172a77..9af3b502f839 100644
--- a/include/acpi/pcc.h
+++ b/include/acpi/pcc.h
@@ -12,10 +12,40 @@
struct pcc_mbox_chan {
struct mbox_chan *mchan;
u64 shmem_base_addr;
+ void __iomem *shmem;
u64 shmem_size;
u32 latency;
u32 max_access_rate;
u16 min_turnaround_time;
+
+ /* Set to true to indicate that the mailbox should manage
+ * writing the dat to the shared buffer. This differs from
+ * the case where the drivesr are writing to the buffer and
+ * using send_data only to ring the doorbell. If this flag
+ * is set, then the void * data parameter of send_data must
+ * point to a kernel-memory buffer formatted in accordance with
+ * the PCC specification.
+ *
+ * The active buffer management will include reading the
+ * notify_on_completion flag, and will then
+ * call mbox_chan_txdone when the acknowledgment interrupt is
+ * received.
+ */
+ bool manage_writes;
+
+ /* Optional callback that allows the driver
+ * to allocate the memory used for receiving
+ * messages. The return value is the location
+ * inside the buffer where the mailbox should write the data.
+ */
+ void *(*rx_alloc)(struct mbox_client *cl, int size);
+};
+
+struct pcc_header {
+ u32 signature;
+ u32 flags;
+ u32 length;
+ u32 command;
};
/* Generic Communications Channel Shared Memory Region */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 337ffa931ee8..a11fa83955f8 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -3,7 +3,7 @@
*
* Name: acenv.h - Host and compiler configuration
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -252,6 +252,12 @@
#define ACPI_RELEASE_GLOBAL_LOCK(Glptr, pending) pending = 0
#endif
+/* NULL/invalid value to use for destroyed or not-yet-created semaphores. */
+
+#ifndef ACPI_SEMAPHORE_NULL
+#define ACPI_SEMAPHORE_NULL NULL
+#endif
+
/* Flush CPU cache - used when going to sleep. Wbinvd or similar. */
#ifndef ACPI_FLUSH_CPU_CACHE
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 7e67e3503f7b..8ffc4e1c87cf 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -3,7 +3,7 @@
*
* Name: acenvex.h - Extra host and compiler configuration
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 04b4bf620517..8e4cf2f6b383 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -3,7 +3,7 @@
*
* Name: acgcc.h - GCC specific defines, etc.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -72,4 +72,12 @@
TYPE NAME[]; \
}
+/*
+ * Explicitly mark strings that lack a terminating NUL character so
+ * that ACPICA can be built with -Wunterminated-string-initialization.
+ */
+#if __has_attribute(__nonstring__)
+#define ACPI_NONSTRING __attribute__((__nonstring__))
+#endif
+
#endif /* __ACGCC_H__ */
diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h
index 7c9f10e9633a..4a3c019a4d03 100644
--- a/include/acpi/platform/acgccex.h
+++ b/include/acpi/platform/acgccex.h
@@ -3,7 +3,7 @@
*
* Name: acgccex.h - Extra GCC specific defines, etc.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 565341c826e3..edbbc9061d1e 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -3,7 +3,7 @@
*
* Name: aclinux.h - OS specific defines, etc. for Linux
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -15,7 +15,7 @@
/* ACPICA external files should not include ACPICA headers directly. */
#if !defined(BUILDING_ACPICA) && !defined(_LINUX_ACPI_H)
-#error "Please don't include <acpi/acpi.h> directly, include <linux/acpi.h> instead."
+#error "Please do not include <acpi/acpi.h> directly, include <linux/acpi.h> instead."
#endif
#endif
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index 62cac266a1c8..73265650f46b 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -3,7 +3,7 @@
*
* Name: aclinuxex.h - Extra OS specific defines, etc. for Linux
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -46,6 +46,9 @@ acpi_status acpi_os_terminate(void);
* Interrupts are off during resume, just like they are for boot.
* However, boot has (system_state != SYSTEM_RUNNING)
* to quiet __might_sleep() in kmalloc() and resume does not.
+ *
+ * These specialized allocators have to be macros for their allocations to be
+ * accounted separately (to have separate alloc_tag).
*/
#define acpi_os_allocate(_size) \
kmalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
@@ -53,14 +56,14 @@ acpi_status acpi_os_terminate(void);
#define acpi_os_allocate_zeroed(_size) \
kzalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
+#define acpi_os_acquire_object(_cache) \
+ kmem_cache_zalloc(_cache, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
+
static inline void acpi_os_free(void *memory)
{
kfree(memory);
}
-#define acpi_os_acquire_object(_cache) \
- kmem_cache_zalloc(_cache, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
-
static inline acpi_thread_id acpi_os_get_thread_id(void)
{
return (acpi_thread_id) (unsigned long)current;
diff --git a/include/acpi/platform/aczephyr.h b/include/acpi/platform/aczephyr.h
index 703db4dc740d..03d9a4a39c80 100644
--- a/include/acpi/platform/aczephyr.h
+++ b/include/acpi/platform/aczephyr.h
@@ -3,7 +3,7 @@
*
* Module Name: aczephyr.h - OS specific defines, etc.
*
- * Copyright (C) 2000 - 2023, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 3f34ebb27525..d0eccbd920e5 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -217,7 +217,7 @@ struct acpi_processor_flags {
u8 has_lpi:1;
u8 power_setup_done:1;
u8 bm_rld_set:1;
- u8 need_hotplug_init:1;
+ u8 previously_online:1;
};
struct acpi_processor {
@@ -280,6 +280,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
struct acpi_processor_cx *cx,
struct acpi_power_register *reg);
void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cstate);
+void __noreturn acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx);
#else
static inline void acpi_processor_power_init_bm_check(struct
acpi_processor_flags
@@ -300,6 +301,10 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
{
return;
}
+static inline void __noreturn acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx)
+{
+ BUG();
+}
#endif
static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg,
@@ -465,4 +470,6 @@ extern int acpi_processor_ffh_lpi_probe(unsigned int cpu);
extern int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
#endif
+void acpi_processor_init_invariance_cppc(void);
+
#endif
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 3d538d4178ab..044c463138df 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -50,6 +50,7 @@ enum acpi_backlight_type {
acpi_backlight_native,
acpi_backlight_nvidia_wmi_ec,
acpi_backlight_apple_gmux,
+ acpi_backlight_dell_uart,
};
#if IS_ENABLED(CONFIG_ACPI_VIDEO)
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index b20fa25a7e8d..295c94a3ccc1 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -9,7 +9,6 @@ mandatory-y += archrandom.h
mandatory-y += barrier.h
mandatory-y += bitops.h
mandatory-y += bug.h
-mandatory-y += bugs.h
mandatory-y += cacheflush.h
mandatory-y += cfi.h
mandatory-y += checksum.h
@@ -46,6 +45,8 @@ mandatory-y += pci.h
mandatory-y += percpu.h
mandatory-y += pgalloc.h
mandatory-y += preempt.h
+mandatory-y += rqspinlock.h
+mandatory-y += runtime-const.h
mandatory-y += rwonce.h
mandatory-y += sections.h
mandatory-y += serial.h
@@ -58,7 +59,7 @@ mandatory-y += tlbflush.h
mandatory-y += topology.h
mandatory-y += trace_clock.h
mandatory-y += uaccess.h
-mandatory-y += unaligned.h
+mandatory-y += unwind_user.h
mandatory-y += vermagic.h
mandatory-y += vga.h
mandatory-y += video.h
diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h
index 331670807cf0..cc840537885f 100644
--- a/include/asm-generic/audit_change_attr.h
+++ b/include/asm-generic/audit_change_attr.h
@@ -11,9 +11,15 @@ __NR_lchown,
__NR_fchown,
#endif
__NR_setxattr,
+#ifdef __NR_setxattrat
+__NR_setxattrat,
+#endif
__NR_lsetxattr,
__NR_fsetxattr,
__NR_removexattr,
+#ifdef __NR_removexattrat
+__NR_removexattrat,
+#endif
__NR_lremovexattr,
__NR_fremovexattr,
#ifdef __NR_fchownat
diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h
index 2d08c750c8a7..3a899c626fdc 100644
--- a/include/asm-generic/bitops/__ffs.h
+++ b/include/asm-generic/bitops/__ffs.h
@@ -10,7 +10,7 @@
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned int generic___ffs(unsigned long word)
+static __always_inline __attribute_const__ unsigned int generic___ffs(unsigned long word)
{
unsigned int num = 0;
diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
index e974ec932ec1..35f33780ca6c 100644
--- a/include/asm-generic/bitops/__fls.h
+++ b/include/asm-generic/bitops/__fls.h
@@ -10,7 +10,7 @@
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned int generic___fls(unsigned long word)
+static __always_inline __attribute_const__ unsigned int generic___fls(unsigned long word)
{
unsigned int num = BITS_PER_LONG - 1;
diff --git a/include/asm-generic/bitops/builtin-__ffs.h b/include/asm-generic/bitops/builtin-__ffs.h
index cf4b3d33bf96..d3c3f567045d 100644
--- a/include/asm-generic/bitops/builtin-__ffs.h
+++ b/include/asm-generic/bitops/builtin-__ffs.h
@@ -8,7 +8,7 @@
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned int __ffs(unsigned long word)
+static __always_inline __attribute_const__ unsigned int __ffs(unsigned long word)
{
return __builtin_ctzl(word);
}
diff --git a/include/asm-generic/bitops/builtin-__fls.h b/include/asm-generic/bitops/builtin-__fls.h
index 6d72fc8a5259..7770c4f1bfcd 100644
--- a/include/asm-generic/bitops/builtin-__fls.h
+++ b/include/asm-generic/bitops/builtin-__fls.h
@@ -8,7 +8,7 @@
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned int __fls(unsigned long word)
+static __always_inline __attribute_const__ unsigned int __fls(unsigned long word)
{
return (sizeof(word) * 8) - 1 - __builtin_clzl(word);
}
diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h
index c8455cc28841..be707da8c7cd 100644
--- a/include/asm-generic/bitops/builtin-fls.h
+++ b/include/asm-generic/bitops/builtin-fls.h
@@ -9,7 +9,7 @@
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static __always_inline int fls(unsigned int x)
+static __always_inline __attribute_const__ int fls(unsigned int x)
{
return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
}
diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h
index 4c43f242daeb..5ff2b7fbda6d 100644
--- a/include/asm-generic/bitops/ffs.h
+++ b/include/asm-generic/bitops/ffs.h
@@ -10,7 +10,7 @@
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from ffz (man ffs).
*/
-static inline int generic_ffs(int x)
+static inline __attribute_const__ int generic_ffs(int x)
{
int r = 1;
diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
index 26f3ce1dd6e4..8eed3437edb9 100644
--- a/include/asm-generic/bitops/fls.h
+++ b/include/asm-generic/bitops/fls.h
@@ -10,7 +10,7 @@
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static __always_inline int generic_fls(unsigned int x)
+static __always_inline __attribute_const__ int generic_fls(unsigned int x)
{
int r = 32;
diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
index 866f2b2304ff..b5f58dd261a3 100644
--- a/include/asm-generic/bitops/fls64.h
+++ b/include/asm-generic/bitops/fls64.h
@@ -16,7 +16,7 @@
* at position 64.
*/
#if BITS_PER_LONG == 32
-static __always_inline int fls64(__u64 x)
+static __always_inline __attribute_const__ int fls64(__u64 x)
{
__u32 h = x >> 32;
if (h)
@@ -24,7 +24,7 @@ static __always_inline int fls64(__u64 x)
return fls(x);
}
#elif BITS_PER_LONG == 64
-static __always_inline int fls64(__u64 x)
+static __always_inline __attribute_const__ int fls64(__u64 x)
{
if (x == 0)
return 0;
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 387720933973..09e8eccee8ed 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -13,10 +13,19 @@
#define BUGFLAG_ONCE (1 << 1)
#define BUGFLAG_DONE (1 << 2)
#define BUGFLAG_NO_CUT_HERE (1 << 3) /* CUT_HERE already sent */
+#define BUGFLAG_ARGS (1 << 4)
#define BUGFLAG_TAINT(taint) ((taint) << 8)
#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
#endif
+#ifndef WARN_CONDITION_STR
+#ifdef CONFIG_DEBUG_BUGVERBOSE_DETAILED
+# define WARN_CONDITION_STR(cond_str) "[" cond_str "] "
+#else
+# define WARN_CONDITION_STR(cond_str)
+#endif
+#endif /* WARN_CONDITION_STR */
+
#ifndef __ASSEMBLY__
#include <linux/panic.h>
#include <linux/printk.h>
@@ -29,19 +38,20 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
#ifdef CONFIG_BUG
-#ifdef CONFIG_GENERIC_BUG
-struct bug_entry {
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
- unsigned long bug_addr;
+#define BUG_REL(type, name) type name
#else
- signed int bug_addr_disp;
+#define BUG_REL(type, name) signed int name##_disp
#endif
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
- const char *file;
-#else
- signed int file_disp;
+
+#ifdef CONFIG_GENERIC_BUG
+struct bug_entry {
+ BUG_REL(unsigned long, bug_addr);
+#ifdef HAVE_ARCH_BUG_FORMAT
+ BUG_REL(const char *, format);
#endif
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+ BUG_REL(const char *, file);
unsigned short line;
#endif
unsigned short flags;
@@ -92,28 +102,50 @@ void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
const char *fmt, ...);
extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
-#ifndef __WARN_FLAGS
-#define __WARN() __WARN_printf(TAINT_WARN, NULL)
+#ifdef __WARN_FLAGS
+#define __WARN() __WARN_FLAGS("", BUGFLAG_TAINT(TAINT_WARN))
+
+#ifndef WARN_ON
+#define WARN_ON(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_FLAGS(#condition, \
+ BUGFLAG_TAINT(TAINT_WARN)); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#ifndef WARN_ON_ONCE
+#define WARN_ON_ONCE(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_FLAGS(#condition, \
+ BUGFLAG_ONCE | \
+ BUGFLAG_TAINT(TAINT_WARN)); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+#endif /* __WARN_FLAGS */
+
+#if defined(__WARN_FLAGS) && !defined(__WARN_printf)
#define __WARN_printf(taint, arg...) do { \
instrumentation_begin(); \
- warn_slowpath_fmt(__FILE__, __LINE__, taint, arg); \
+ __warn_printk(arg); \
+ __WARN_FLAGS("", BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
instrumentation_end(); \
} while (0)
-#else
-#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN))
+#endif
+
+#ifndef __WARN_printf
#define __WARN_printf(taint, arg...) do { \
instrumentation_begin(); \
- __warn_printk(arg); \
- __WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
+ warn_slowpath_fmt(__FILE__, __LINE__, taint, arg); \
instrumentation_end(); \
} while (0)
-#define WARN_ON_ONCE(condition) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) \
- __WARN_FLAGS(BUGFLAG_ONCE | \
- BUGFLAG_TAINT(TAINT_WARN)); \
- unlikely(__ret_warn_on); \
-})
+#endif
+
+#ifndef __WARN
+#define __WARN() __WARN_printf(TAINT_WARN, NULL)
#endif
/* used internally by panic.c */
@@ -148,8 +180,10 @@ extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
DO_ONCE_LITE_IF(condition, WARN_ON, 1)
#endif
+#ifndef WARN_ONCE
#define WARN_ONCE(condition, format...) \
DO_ONCE_LITE_IF(condition, WARN, 1, format)
+#endif
#define WARN_TAINT_ONCE(condition, taint, format...) \
DO_ONCE_LITE_IF(condition, WARN_TAINT, 1, taint, format)
diff --git a/include/asm-generic/codetag.lds.h b/include/asm-generic/codetag.lds.h
index 64f536b80380..a14f4bdafdda 100644
--- a/include/asm-generic/codetag.lds.h
+++ b/include/asm-generic/codetag.lds.h
@@ -2,6 +2,12 @@
#ifndef __ASM_GENERIC_CODETAG_LDS_H
#define __ASM_GENERIC_CODETAG_LDS_H
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+#define IF_MEM_ALLOC_PROFILING(...) __VA_ARGS__
+#else
+#define IF_MEM_ALLOC_PROFILING(...)
+#endif
+
#define SECTION_WITH_BOUNDARIES(_name) \
. = ALIGN(8); \
__start_##_name = .; \
@@ -9,6 +15,19 @@
__stop_##_name = .;
#define CODETAG_SECTIONS() \
- SECTION_WITH_BOUNDARIES(alloc_tags)
+ IF_MEM_ALLOC_PROFILING(SECTION_WITH_BOUNDARIES(alloc_tags))
+
+#define MOD_SEPARATE_CODETAG_SECTION(_name) \
+ .codetag.##_name : { \
+ SECTION_WITH_BOUNDARIES(_name) \
+ }
+
+/*
+ * For codetags which might be used after module unload, therefore might stay
+ * longer in memory. Each such codetag type has its own section so that we can
+ * unload them individually once unused.
+ */
+#define MOD_SEPARATE_CODETAG_SECTIONS() \
+ IF_MEM_ALLOC_PROFILING(MOD_SEPARATE_CODETAG_SECTION(alloc_tags))
#endif /* __ASM_GENERIC_CODETAG_LDS_H */
diff --git a/include/asm-generic/delay.h b/include/asm-generic/delay.h
index e448ac61430c..03b0ec7afca6 100644
--- a/include/asm-generic/delay.h
+++ b/include/asm-generic/delay.h
@@ -2,6 +2,9 @@
#ifndef __ASM_GENERIC_DELAY_H
#define __ASM_GENERIC_DELAY_H
+#include <linux/math.h>
+#include <vdso/time64.h>
+
/* Undefined functions to get compile-time errors */
extern void __bad_udelay(void);
extern void __bad_ndelay(void);
@@ -12,34 +15,73 @@ extern void __const_udelay(unsigned long xloops);
extern void __delay(unsigned long loops);
/*
- * The weird n/20000 thing suppresses a "comparison is always false due to
- * limited range of data type" warning with non-const 8-bit arguments.
+ * The microseconds/nanosecond delay multiplicators are used to convert a
+ * constant microseconds/nanoseconds value to a value which can be used by the
+ * architectures specific implementation to transform it into loops.
+ */
+#define UDELAY_CONST_MULT ((unsigned long)DIV_ROUND_UP(1ULL << 32, USEC_PER_SEC))
+#define NDELAY_CONST_MULT ((unsigned long)DIV_ROUND_UP(1ULL << 32, NSEC_PER_SEC))
+
+/*
+ * The maximum constant udelay/ndelay value picked out of thin air to prevent
+ * too long constant udelays/ndelays.
*/
+#define DELAY_CONST_MAX 20000
-/* 0x10c7 is 2**32 / 1000000 (rounded up) */
-#define udelay(n) \
- ({ \
- if (__builtin_constant_p(n)) { \
- if ((n) / 20000 >= 1) \
- __bad_udelay(); \
- else \
- __const_udelay((n) * 0x10c7ul); \
- } else { \
- __udelay(n); \
- } \
- })
-
-/* 0x5 is 2**32 / 1000000000 (rounded up) */
-#define ndelay(n) \
- ({ \
- if (__builtin_constant_p(n)) { \
- if ((n) / 20000 >= 1) \
- __bad_ndelay(); \
- else \
- __const_udelay((n) * 5ul); \
- } else { \
- __ndelay(n); \
- } \
- })
+/**
+ * udelay - Inserting a delay based on microseconds with busy waiting
+ * @usec: requested delay in microseconds
+ *
+ * When delaying in an atomic context ndelay(), udelay() and mdelay() are the
+ * only valid variants of delaying/sleeping to go with.
+ *
+ * When inserting delays in non atomic context which are shorter than the time
+ * which is required to queue e.g. an hrtimer and to enter then the scheduler,
+ * it is also valuable to use udelay(). But it is not simple to specify a
+ * generic threshold for this which will fit for all systems. An approximation
+ * is a threshold for all delays up to 10 microseconds.
+ *
+ * When having a delay which is larger than the architecture specific
+ * %MAX_UDELAY_MS value, please make sure mdelay() is used. Otherwise a overflow
+ * risk is given.
+ *
+ * Please note that ndelay(), udelay() and mdelay() may return early for several
+ * reasons (https://lists.openwall.net/linux-kernel/2011/01/09/56):
+ *
+ * #. computed loops_per_jiffy too low (due to the time taken to execute the
+ * timer interrupt.)
+ * #. cache behaviour affecting the time it takes to execute the loop function.
+ * #. CPU clock rate changes.
+ */
+static __always_inline void udelay(unsigned long usec)
+{
+ if (__builtin_constant_p(usec)) {
+ if (usec >= DELAY_CONST_MAX)
+ __bad_udelay();
+ else
+ __const_udelay(usec * UDELAY_CONST_MULT);
+ } else {
+ __udelay(usec);
+ }
+}
+
+/**
+ * ndelay - Inserting a delay based on nanoseconds with busy waiting
+ * @nsec: requested delay in nanoseconds
+ *
+ * See udelay() for basic information about ndelay() and it's variants.
+ */
+static __always_inline void ndelay(unsigned long nsec)
+{
+ if (__builtin_constant_p(nsec)) {
+ if (nsec >= DELAY_CONST_MAX)
+ __bad_ndelay();
+ else
+ __const_udelay(nsec * NDELAY_CONST_MULT);
+ } else {
+ __ndelay(nsec);
+ }
+}
+#define ndelay(x) ndelay(x)
#endif /* __ASM_GENERIC_DELAY_H */
diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h
index 13f5aa68a455..25e7b4b58dcf 100644
--- a/include/asm-generic/div64.h
+++ b/include/asm-generic/div64.h
@@ -74,7 +74,8 @@
* do the trick here). \
*/ \
uint64_t ___res, ___x, ___t, ___m, ___n = (n); \
- uint32_t ___p, ___bias; \
+ uint32_t ___p; \
+ bool ___bias = false; \
\
/* determine MSB of b */ \
___p = 1 << ilog2(___b); \
@@ -87,22 +88,14 @@
___x = ~0ULL / ___b * ___b - 1; \
\
/* test our ___m with res = m * x / (p << 64) */ \
- ___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32; \
- ___t = ___res += (___m & 0xffffffff) * (___x >> 32); \
- ___res += (___x & 0xffffffff) * (___m >> 32); \
- ___t = (___res < ___t) ? (1ULL << 32) : 0; \
- ___res = (___res >> 32) + ___t; \
- ___res += (___m >> 32) * (___x >> 32); \
- ___res /= ___p; \
+ ___res = (___m & 0xffffffff) * (___x & 0xffffffff); \
+ ___t = (___m & 0xffffffff) * (___x >> 32) + (___res >> 32); \
+ ___res = (___m >> 32) * (___x >> 32) + (___t >> 32); \
+ ___t = (___m >> 32) * (___x & 0xffffffff) + (___t & 0xffffffff);\
+ ___res = (___res + (___t >> 32)) / ___p; \
\
- /* Now sanitize and optimize what we've got. */ \
- if (~0ULL % (___b / (___b & -___b)) == 0) { \
- /* special case, can be simplified to ... */ \
- ___n /= (___b & -___b); \
- ___m = ~0ULL / (___b / (___b & -___b)); \
- ___p = 1; \
- ___bias = 1; \
- } else if (___res != ___x / ___b) { \
+ /* Now validate what we've got. */ \
+ if (___res != ___x / ___b) { \
/* \
* We can't get away without a bias to compensate \
* for bit truncation errors. To avoid it we'd need an \
@@ -111,45 +104,18 @@
* \
* Instead we do m = p / b and n / b = (n * m + m) / p. \
*/ \
- ___bias = 1; \
+ ___bias = true; \
/* Compute m = (p << 64) / b */ \
___m = (~0ULL / ___b) * ___p; \
___m += ((~0ULL % ___b + 1) * ___p) / ___b; \
- } else { \
- /* \
- * Reduce m / p, and try to clear bit 31 of m when \
- * possible, otherwise that'll need extra overflow \
- * handling later. \
- */ \
- uint32_t ___bits = -(___m & -___m); \
- ___bits |= ___m >> 32; \
- ___bits = (~___bits) << 1; \
- /* \
- * If ___bits == 0 then setting bit 31 is unavoidable. \
- * Simply apply the maximum possible reduction in that \
- * case. Otherwise the MSB of ___bits indicates the \
- * best reduction we should apply. \
- */ \
- if (!___bits) { \
- ___p /= (___m & -___m); \
- ___m /= (___m & -___m); \
- } else { \
- ___p >>= ilog2(___bits); \
- ___m >>= ilog2(___bits); \
- } \
- /* No bias needed. */ \
- ___bias = 0; \
} \
\
+ /* Reduce m / p to help avoid overflow handling later. */ \
+ ___p /= (___m & -___m); \
+ ___m /= (___m & -___m); \
+ \
/* \
- * Now we have a combination of 2 conditions: \
- * \
- * 1) whether or not we need to apply a bias, and \
- * \
- * 2) whether or not there might be an overflow in the cross \
- * product determined by (___m & ((1 << 63) | (1 << 31))). \
- * \
- * Select the best way to do (m_bias + m * n) / (1 << 64). \
+ * Perform (m_bias + m * n) / (1 << 64). \
* From now on there will be actual runtime code generated. \
*/ \
___res = __arch_xprod_64(___m, ___n, ___bias); \
@@ -165,47 +131,42 @@
* Semantic: retval = ((bias ? m : 0) + m * n) >> 64
*
* The product is a 128-bit value, scaled down to 64 bits.
- * Assuming constant propagation to optimize away unused conditional code.
+ * Hoping for compile-time optimization of conditional code.
* Architectures may provide their own optimized assembly implementation.
*/
-static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
+#ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
+static __always_inline
+#else
+static inline
+#endif
+uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
{
uint32_t m_lo = m;
uint32_t m_hi = m >> 32;
uint32_t n_lo = n;
uint32_t n_hi = n >> 32;
- uint64_t res;
- uint32_t res_lo, res_hi, tmp;
-
- if (!bias) {
- res = ((uint64_t)m_lo * n_lo) >> 32;
- } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
- /* there can't be any overflow here */
- res = (m + (uint64_t)m_lo * n_lo) >> 32;
+ uint64_t x, y;
+
+ /* Determine if overflow handling can be dispensed with. */
+ bool no_ovf = __builtin_constant_p(m) &&
+ ((m >> 32) + (m & 0xffffffff) < 0x100000000);
+
+ if (no_ovf) {
+ x = (uint64_t)m_lo * n_lo + (bias ? m : 0);
+ x >>= 32;
+ x += (uint64_t)m_lo * n_hi;
+ x += (uint64_t)m_hi * n_lo;
+ x >>= 32;
+ x += (uint64_t)m_hi * n_hi;
} else {
- res = m + (uint64_t)m_lo * n_lo;
- res_lo = res >> 32;
- res_hi = (res_lo < m_hi);
- res = res_lo | ((uint64_t)res_hi << 32);
+ x = (uint64_t)m_lo * n_lo + (bias ? m_lo : 0);
+ y = (uint64_t)m_lo * n_hi + (uint32_t)(x >> 32) + (bias ? m_hi : 0);
+ x = (uint64_t)m_hi * n_hi + (uint32_t)(y >> 32);
+ y = (uint64_t)m_hi * n_lo + (uint32_t)y;
+ x += (uint32_t)(y >> 32);
}
- if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
- /* there can't be any overflow here */
- res += (uint64_t)m_lo * n_hi;
- res += (uint64_t)m_hi * n_lo;
- res >>= 32;
- } else {
- res += (uint64_t)m_lo * n_hi;
- tmp = res >> 32;
- res += (uint64_t)m_hi * n_lo;
- res_lo = res >> 32;
- res_hi = (res_lo < tmp);
- res = res_lo | ((uint64_t)res_hi << 32);
- }
-
- res += (uint64_t)m_hi * n_hi;
-
- return res;
+ return x;
}
#endif
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
index 9d0479f50f97..5db59a1efb65 100644
--- a/include/asm-generic/early_ioremap.h
+++ b/include/asm-generic/early_ioremap.h
@@ -35,7 +35,7 @@ extern void early_ioremap_reset(void);
/*
* Early copy from unmapped memory to kernel mapped memory.
*/
-extern void copy_from_early_mem(void *dest, phys_addr_t src,
+extern int copy_from_early_mem(void *dest, phys_addr_t src,
unsigned long size);
#else
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index 8cc7b09c1bc7..29cab7947980 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -97,8 +97,5 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#define set_fixmap_io(idx, phys) \
__set_fixmap(idx, phys, FIXMAP_PAGE_IO)
-#define set_fixmap_offset_io(idx, phys) \
- __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO)
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_GENERIC_FIXMAP_H */
diff --git a/include/asm-generic/fprobe.h b/include/asm-generic/fprobe.h
new file mode 100644
index 000000000000..8659a4dc6eb6
--- /dev/null
+++ b/include/asm-generic/fprobe.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Generic arch dependent fprobe macros.
+ */
+#ifndef __ASM_GENERIC_FPROBE_H__
+#define __ASM_GENERIC_FPROBE_H__
+
+#include <linux/bits.h>
+
+#ifdef CONFIG_64BIT
+/*
+ * Encoding the size and the address of fprobe into one 64bit entry.
+ * The 32bit architectures should use 2 entries to store those info.
+ */
+
+#define ARCH_DEFINE_ENCODE_FPROBE_HEADER
+
+#define FPROBE_HEADER_MSB_SIZE_SHIFT (BITS_PER_LONG - FPROBE_DATA_SIZE_BITS)
+#define FPROBE_HEADER_MSB_MASK \
+ GENMASK(FPROBE_HEADER_MSB_SIZE_SHIFT - 1, 0)
+
+/*
+ * By default, this expects the MSBs in the address of kprobe is 0xf.
+ * If any arch needs another fixed pattern (e.g. s390 is zero filled),
+ * override this.
+ */
+#define FPROBE_HEADER_MSB_PATTERN \
+ GENMASK(BITS_PER_LONG - 1, FPROBE_HEADER_MSB_SIZE_SHIFT)
+
+#define arch_fprobe_header_encodable(fp) \
+ (((unsigned long)(fp) & ~FPROBE_HEADER_MSB_MASK) == \
+ FPROBE_HEADER_MSB_PATTERN)
+
+#define arch_encode_fprobe_header(fp, size) \
+ (((unsigned long)(fp) & FPROBE_HEADER_MSB_MASK) | \
+ ((unsigned long)(size) << FPROBE_HEADER_MSB_SIZE_SHIFT))
+
+#define arch_decode_fprobe_header_size(val) \
+ ((unsigned long)(val) >> FPROBE_HEADER_MSB_SIZE_SHIFT)
+
+#define arch_decode_fprobe_header_fp(val) \
+ ((struct fprobe *)(((unsigned long)(val) & FPROBE_HEADER_MSB_MASK) | \
+ FPROBE_HEADER_MSB_PATTERN))
+#endif /* CONFIG_64BIT */
+
+#endif /* __ASM_GENERIC_FPROBE_H__ */
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index 6dcf4d576970..e1a2e1b7c8e7 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -5,11 +5,6 @@
#include <linux/swap.h>
#include <linux/swapops.h>
-static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
-{
- return mk_pte(page, pgprot);
-}
-
static inline unsigned long huge_pte_write(pte_t pte)
{
return pte_write(pte);
@@ -42,20 +37,26 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
return pte_modify(pte, newprot);
}
+#ifndef __HAVE_ARCH_HUGE_PTE_MKUFFD_WP
static inline pte_t huge_pte_mkuffd_wp(pte_t pte)
{
return huge_pte_wrprotect(pte_mkuffd_wp(pte));
}
+#endif
+#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR_UFFD_WP
static inline pte_t huge_pte_clear_uffd_wp(pte_t pte)
{
return pte_clear_uffd_wp(pte);
}
+#endif
+#ifndef __HAVE_ARCH_HUGE_PTE_UFFD_WP
static inline int huge_pte_uffd_wp(pte_t pte)
{
return pte_uffd_wp(pte);
}
+#endif
#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
@@ -65,15 +66,6 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
}
#endif
-#ifndef __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
-static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
- unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
-{
- free_pgd_range(tlb, addr, end, floor, ceiling);
-}
-#endif
-
#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned long sz)
@@ -84,7 +76,7 @@ static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
#ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep, unsigned long sz)
{
return ptep_get_and_clear(mm, addr, ptep);
}
@@ -105,27 +97,6 @@ static inline int huge_pte_none(pte_t pte)
}
#endif
-/* Please refer to comments above pte_none_mostly() for the usage */
-static inline int huge_pte_none_mostly(pte_t pte)
-{
- return huge_pte_none(pte) || is_pte_marker(pte);
-}
-
-#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr, unsigned long len)
-{
- struct hstate *h = hstate_file(file);
-
- if (len & ~huge_page_mask(h))
- return -EINVAL;
- if (addr & ~huge_page_mask(h))
- return -EINVAL;
-
- return 0;
-}
-#endif
-
#ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
@@ -144,7 +115,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
#endif
#ifndef __HAVE_ARCH_HUGE_PTEP_GET
-static inline pte_t huge_ptep_get(pte_t *ptep)
+static inline pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
return ptep_get(ptep);
}
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
deleted file mode 100644
index 814207e7c37f..000000000000
--- a/include/asm-generic/hyperv-tlfs.h
+++ /dev/null
@@ -1,874 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/*
- * This file contains definitions from Hyper-V Hypervisor Top-Level Functional
- * Specification (TLFS):
- * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
- */
-
-#ifndef _ASM_GENERIC_HYPERV_TLFS_H
-#define _ASM_GENERIC_HYPERV_TLFS_H
-
-#include <linux/types.h>
-#include <linux/bits.h>
-#include <linux/time64.h>
-
-/*
- * While not explicitly listed in the TLFS, Hyper-V always runs with a page size
- * of 4096. These definitions are used when communicating with Hyper-V using
- * guest physical pages and guest physical page addresses, since the guest page
- * size may not be 4096 on all architectures.
- */
-#define HV_HYP_PAGE_SHIFT 12
-#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT)
-#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1))
-
-/*
- * Hyper-V provides two categories of flags relevant to guest VMs. The
- * "Features" category indicates specific functionality that is available
- * to guests on this particular instance of Hyper-V. The "Features"
- * are presented in four groups, each of which is 32 bits. The group A
- * and B definitions are common across architectures and are listed here.
- * However, not all flags are relevant on all architectures.
- *
- * Groups C and D vary across architectures and are listed in the
- * architecture specific portion of hyperv-tlfs.h. Some of these flags exist
- * on multiple architectures, but the bit positions are different so they
- * cannot appear in the generic portion of hyperv-tlfs.h.
- *
- * The "Enlightenments" category provides recommendations on whether to use
- * specific enlightenments that are available. The Enlighenments are a single
- * group of 32 bits, but they vary across architectures and are listed in
- * the architecture specific portion of hyperv-tlfs.h.
- */
-
-/*
- * Group A Features.
- */
-
-/* VP Runtime register available */
-#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0)
-/* Partition Reference Counter available*/
-#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1)
-/* Basic SynIC register available */
-#define HV_MSR_SYNIC_AVAILABLE BIT(2)
-/* Synthetic Timer registers available */
-#define HV_MSR_SYNTIMER_AVAILABLE BIT(3)
-/* Virtual APIC assist and VP assist page registers available */
-#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4)
-/* Hypercall and Guest OS ID registers available*/
-#define HV_MSR_HYPERCALL_AVAILABLE BIT(5)
-/* Access virtual processor index register available*/
-#define HV_MSR_VP_INDEX_AVAILABLE BIT(6)
-/* Virtual system reset register available*/
-#define HV_MSR_RESET_AVAILABLE BIT(7)
-/* Access statistics page registers available */
-#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8)
-/* Partition reference TSC register is available */
-#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9)
-/* Partition Guest IDLE register is available */
-#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10)
-/* Partition local APIC and TSC frequency registers available */
-#define HV_ACCESS_FREQUENCY_MSRS BIT(11)
-/* AccessReenlightenmentControls privilege */
-#define HV_ACCESS_REENLIGHTENMENT BIT(13)
-/* AccessTscInvariantControls privilege */
-#define HV_ACCESS_TSC_INVARIANT BIT(15)
-
-/*
- * Group B features.
- */
-#define HV_CREATE_PARTITIONS BIT(0)
-#define HV_ACCESS_PARTITION_ID BIT(1)
-#define HV_ACCESS_MEMORY_POOL BIT(2)
-#define HV_ADJUST_MESSAGE_BUFFERS BIT(3)
-#define HV_POST_MESSAGES BIT(4)
-#define HV_SIGNAL_EVENTS BIT(5)
-#define HV_CREATE_PORT BIT(6)
-#define HV_CONNECT_PORT BIT(7)
-#define HV_ACCESS_STATS BIT(8)
-#define HV_DEBUGGING BIT(11)
-#define HV_CPU_MANAGEMENT BIT(12)
-#define HV_ENABLE_EXTENDED_HYPERCALLS BIT(20)
-#define HV_ISOLATION BIT(22)
-
-/*
- * TSC page layout.
- */
-struct ms_hyperv_tsc_page {
- volatile u32 tsc_sequence;
- u32 reserved1;
- volatile u64 tsc_scale;
- volatile s64 tsc_offset;
-} __packed;
-
-union hv_reference_tsc_msr {
- u64 as_uint64;
- struct {
- u64 enable:1;
- u64 reserved:11;
- u64 pfn:52;
- } __packed;
-};
-
-/*
- * The guest OS needs to register the guest ID with the hypervisor.
- * The guest ID is a 64 bit entity and the structure of this ID is
- * specified in the Hyper-V specification:
- *
- * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
- *
- * While the current guideline does not specify how Linux guest ID(s)
- * need to be generated, our plan is to publish the guidelines for
- * Linux and other guest operating systems that currently are hosted
- * on Hyper-V. The implementation here conforms to this yet
- * unpublished guidelines.
- *
- *
- * Bit(s)
- * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
- * 62:56 - Os Type; Linux is 0x100
- * 55:48 - Distro specific identification
- * 47:16 - Linux kernel version number
- * 15:0 - Distro specific identification
- *
- *
- */
-
-#define HV_LINUX_VENDOR_ID 0x8100
-
-/*
- * Crash notification flags.
- */
-#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62)
-#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63)
-
-/* Declare the various hypercall operations. */
-#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
-#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
-#define HVCALL_ENABLE_VP_VTL 0x000f
-#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
-#define HVCALL_SEND_IPI 0x000b
-#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
-#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
-#define HVCALL_SEND_IPI_EX 0x0015
-#define HVCALL_GET_PARTITION_ID 0x0046
-#define HVCALL_DEPOSIT_MEMORY 0x0048
-#define HVCALL_CREATE_VP 0x004e
-#define HVCALL_GET_VP_REGISTERS 0x0050
-#define HVCALL_SET_VP_REGISTERS 0x0051
-#define HVCALL_POST_MESSAGE 0x005c
-#define HVCALL_SIGNAL_EVENT 0x005d
-#define HVCALL_POST_DEBUG_DATA 0x0069
-#define HVCALL_RETRIEVE_DEBUG_DATA 0x006a
-#define HVCALL_RESET_DEBUG_SESSION 0x006b
-#define HVCALL_ADD_LOGICAL_PROCESSOR 0x0076
-#define HVCALL_MAP_DEVICE_INTERRUPT 0x007c
-#define HVCALL_UNMAP_DEVICE_INTERRUPT 0x007d
-#define HVCALL_RETARGET_INTERRUPT 0x007e
-#define HVCALL_START_VP 0x0099
-#define HVCALL_GET_VP_ID_FROM_APIC_ID 0x009a
-#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
-#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
-#define HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY 0x00db
-#define HVCALL_MMIO_READ 0x0106
-#define HVCALL_MMIO_WRITE 0x0107
-
-/* Extended hypercalls */
-#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001
-#define HV_EXT_CALL_MEMORY_HEAT_HINT 0x8003
-
-#define HV_FLUSH_ALL_PROCESSORS BIT(0)
-#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
-#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
-#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
-
-/* Extended capability bits */
-#define HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT BIT(8)
-
-enum HV_GENERIC_SET_FORMAT {
- HV_GENERIC_SET_SPARSE_4K,
- HV_GENERIC_SET_ALL,
-};
-
-#define HV_PARTITION_ID_SELF ((u64)-1)
-#define HV_VP_INDEX_SELF ((u32)-2)
-
-#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0)
-#define HV_HYPERCALL_FAST_BIT BIT(16)
-#define HV_HYPERCALL_VARHEAD_OFFSET 17
-#define HV_HYPERCALL_VARHEAD_MASK GENMASK_ULL(26, 17)
-#define HV_HYPERCALL_RSVD0_MASK GENMASK_ULL(31, 27)
-#define HV_HYPERCALL_NESTED BIT_ULL(31)
-#define HV_HYPERCALL_REP_COMP_OFFSET 32
-#define HV_HYPERCALL_REP_COMP_1 BIT_ULL(32)
-#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32)
-#define HV_HYPERCALL_RSVD1_MASK GENMASK_ULL(47, 44)
-#define HV_HYPERCALL_REP_START_OFFSET 48
-#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48)
-#define HV_HYPERCALL_RSVD2_MASK GENMASK_ULL(63, 60)
-#define HV_HYPERCALL_RSVD_MASK (HV_HYPERCALL_RSVD0_MASK | \
- HV_HYPERCALL_RSVD1_MASK | \
- HV_HYPERCALL_RSVD2_MASK)
-
-/* hypercall status code */
-#define HV_STATUS_SUCCESS 0
-#define HV_STATUS_INVALID_HYPERCALL_CODE 2
-#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
-#define HV_STATUS_INVALID_ALIGNMENT 4
-#define HV_STATUS_INVALID_PARAMETER 5
-#define HV_STATUS_ACCESS_DENIED 6
-#define HV_STATUS_OPERATION_DENIED 8
-#define HV_STATUS_INSUFFICIENT_MEMORY 11
-#define HV_STATUS_INVALID_PORT_ID 17
-#define HV_STATUS_INVALID_CONNECTION_ID 18
-#define HV_STATUS_INSUFFICIENT_BUFFERS 19
-#define HV_STATUS_TIME_OUT 120
-#define HV_STATUS_VTL_ALREADY_ENABLED 134
-
-/*
- * The Hyper-V TimeRefCount register and the TSC
- * page provide a guest VM clock with 100ns tick rate
- */
-#define HV_CLOCK_HZ (NSEC_PER_SEC/100)
-
-/* Define the number of synthetic interrupt sources. */
-#define HV_SYNIC_SINT_COUNT (16)
-/* Define the expected SynIC version. */
-#define HV_SYNIC_VERSION_1 (0x1)
-/* Valid SynIC vectors are 16-255. */
-#define HV_SYNIC_FIRST_VALID_VECTOR (16)
-
-#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0)
-#define HV_SYNIC_SIMP_ENABLE (1ULL << 0)
-#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0)
-#define HV_SYNIC_SINT_MASKED (1ULL << 16)
-#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17)
-#define HV_SYNIC_SINT_VECTOR_MASK (0xFF)
-
-#define HV_SYNIC_STIMER_COUNT (4)
-
-/* Define synthetic interrupt controller message constants. */
-#define HV_MESSAGE_SIZE (256)
-#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240)
-#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30)
-
-/*
- * Define hypervisor message types. Some of the message types
- * are x86/x64 specific, but there's no good way to separate
- * them out into the arch-specific version of hyperv-tlfs.h
- * because C doesn't provide a way to extend enum types.
- * Keeping them all in the arch neutral hyperv-tlfs.h seems
- * the least messy compromise.
- */
-enum hv_message_type {
- HVMSG_NONE = 0x00000000,
-
- /* Memory access messages. */
- HVMSG_UNMAPPED_GPA = 0x80000000,
- HVMSG_GPA_INTERCEPT = 0x80000001,
-
- /* Timer notification messages. */
- HVMSG_TIMER_EXPIRED = 0x80000010,
-
- /* Error messages. */
- HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
- HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021,
- HVMSG_UNSUPPORTED_FEATURE = 0x80000022,
-
- /* Trace buffer complete messages. */
- HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040,
-
- /* Platform-specific processor intercept messages. */
- HVMSG_X64_IOPORT_INTERCEPT = 0x80010000,
- HVMSG_X64_MSR_INTERCEPT = 0x80010001,
- HVMSG_X64_CPUID_INTERCEPT = 0x80010002,
- HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003,
- HVMSG_X64_APIC_EOI = 0x80010004,
- HVMSG_X64_LEGACY_FP_ERROR = 0x80010005
-};
-
-/* Define synthetic interrupt controller message flags. */
-union hv_message_flags {
- __u8 asu8;
- struct {
- __u8 msg_pending:1;
- __u8 reserved:7;
- } __packed;
-};
-
-/* Define port identifier type. */
-union hv_port_id {
- __u32 asu32;
- struct {
- __u32 id:24;
- __u32 reserved:8;
- } __packed u;
-};
-
-/* Define synthetic interrupt controller message header. */
-struct hv_message_header {
- __u32 message_type;
- __u8 payload_size;
- union hv_message_flags message_flags;
- __u8 reserved[2];
- union {
- __u64 sender;
- union hv_port_id port;
- };
-} __packed;
-
-/* Define synthetic interrupt controller message format. */
-struct hv_message {
- struct hv_message_header header;
- union {
- __u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
- } u;
-} __packed;
-
-/* Define the synthetic interrupt message page layout. */
-struct hv_message_page {
- struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
-} __packed;
-
-/* Define timer message payload structure. */
-struct hv_timer_message_payload {
- __u32 timer_index;
- __u32 reserved;
- __u64 expiration_time; /* When the timer expired */
- __u64 delivery_time; /* When the message was delivered */
-} __packed;
-
-
-/* Define synthetic interrupt controller flag constants. */
-#define HV_EVENT_FLAGS_COUNT (256 * 8)
-#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long))
-
-/*
- * Synthetic timer configuration.
- */
-union hv_stimer_config {
- u64 as_uint64;
- struct {
- u64 enable:1;
- u64 periodic:1;
- u64 lazy:1;
- u64 auto_enable:1;
- u64 apic_vector:8;
- u64 direct_mode:1;
- u64 reserved_z0:3;
- u64 sintx:4;
- u64 reserved_z1:44;
- } __packed;
-};
-
-
-/* Define the synthetic interrupt controller event flags format. */
-union hv_synic_event_flags {
- unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT];
-};
-
-/* Define SynIC control register. */
-union hv_synic_scontrol {
- u64 as_uint64;
- struct {
- u64 enable:1;
- u64 reserved:63;
- } __packed;
-};
-
-/* Define synthetic interrupt source. */
-union hv_synic_sint {
- u64 as_uint64;
- struct {
- u64 vector:8;
- u64 reserved1:8;
- u64 masked:1;
- u64 auto_eoi:1;
- u64 polling:1;
- u64 reserved2:45;
- } __packed;
-};
-
-/* Define the format of the SIMP register */
-union hv_synic_simp {
- u64 as_uint64;
- struct {
- u64 simp_enabled:1;
- u64 preserved:11;
- u64 base_simp_gpa:52;
- } __packed;
-};
-
-/* Define the format of the SIEFP register */
-union hv_synic_siefp {
- u64 as_uint64;
- struct {
- u64 siefp_enabled:1;
- u64 preserved:11;
- u64 base_siefp_gpa:52;
- } __packed;
-};
-
-struct hv_vpset {
- u64 format;
- u64 valid_bank_mask;
- u64 bank_contents[];
-} __packed;
-
-/* The maximum number of sparse vCPU banks which can be encoded by 'struct hv_vpset' */
-#define HV_MAX_SPARSE_VCPU_BANKS (64)
-/* The number of vCPUs in one sparse bank */
-#define HV_VCPUS_PER_SPARSE_BANK (64)
-
-/* HvCallSendSyntheticClusterIpi hypercall */
-struct hv_send_ipi {
- u32 vector;
- u32 reserved;
- u64 cpu_mask;
-} __packed;
-
-/* HvCallSendSyntheticClusterIpiEx hypercall */
-struct hv_send_ipi_ex {
- u32 vector;
- u32 reserved;
- struct hv_vpset vp_set;
-} __packed;
-
-/* HvFlushGuestPhysicalAddressSpace hypercalls */
-struct hv_guest_mapping_flush {
- u64 address_space;
- u64 flags;
-} __packed;
-
-/*
- * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited
- * by the bitwidth of "additional_pages" in union hv_gpa_page_range.
- */
-#define HV_MAX_FLUSH_PAGES (2048)
-#define HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB 0
-#define HV_GPA_PAGE_RANGE_PAGE_SIZE_1GB 1
-
-/* HvFlushGuestPhysicalAddressList, HvExtCallMemoryHeatHint hypercall */
-union hv_gpa_page_range {
- u64 address_space;
- struct {
- u64 additional_pages:11;
- u64 largepage:1;
- u64 basepfn:52;
- } page;
- struct {
- u64 reserved:12;
- u64 page_size:1;
- u64 reserved1:8;
- u64 base_large_pfn:43;
- };
-};
-
-/*
- * All input flush parameters should be in single page. The max flush
- * count is equal with how many entries of union hv_gpa_page_range can
- * be populated into the input parameter page.
- */
-#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) / \
- sizeof(union hv_gpa_page_range))
-
-struct hv_guest_mapping_flush_list {
- u64 address_space;
- u64 flags;
- union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT];
-};
-
-/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
-struct hv_tlb_flush {
- u64 address_space;
- u64 flags;
- u64 processor_mask;
- u64 gva_list[];
-} __packed;
-
-/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
-struct hv_tlb_flush_ex {
- u64 address_space;
- u64 flags;
- struct hv_vpset hv_vp_set;
- u64 gva_list[];
-} __packed;
-
-/* HvGetPartitionId hypercall (output only) */
-struct hv_get_partition_id {
- u64 partition_id;
-} __packed;
-
-/* HvDepositMemory hypercall */
-struct hv_deposit_memory {
- u64 partition_id;
- u64 gpa_page_list[];
-} __packed;
-
-struct hv_proximity_domain_flags {
- u32 proximity_preferred : 1;
- u32 reserved : 30;
- u32 proximity_info_valid : 1;
-} __packed;
-
-struct hv_proximity_domain_info {
- u32 domain_id;
- struct hv_proximity_domain_flags flags;
-} __packed;
-
-struct hv_lp_startup_status {
- u64 hv_status;
- u64 substatus1;
- u64 substatus2;
- u64 substatus3;
- u64 substatus4;
- u64 substatus5;
- u64 substatus6;
-} __packed;
-
-/* HvAddLogicalProcessor hypercall */
-struct hv_input_add_logical_processor {
- u32 lp_index;
- u32 apic_id;
- struct hv_proximity_domain_info proximity_domain_info;
-} __packed;
-
-struct hv_output_add_logical_processor {
- struct hv_lp_startup_status startup_status;
-} __packed;
-
-enum HV_SUBNODE_TYPE
-{
- HvSubnodeAny = 0,
- HvSubnodeSocket = 1,
- HvSubnodeAmdNode = 2,
- HvSubnodeL3 = 3,
- HvSubnodeCount = 4,
- HvSubnodeInvalid = -1
-};
-
-/* HvCreateVp hypercall */
-struct hv_create_vp {
- u64 partition_id;
- u32 vp_index;
- u8 padding[3];
- u8 subnode_type;
- u64 subnode_id;
- struct hv_proximity_domain_info proximity_domain_info;
- u64 flags;
-} __packed;
-
-enum hv_interrupt_source {
- HV_INTERRUPT_SOURCE_MSI = 1, /* MSI and MSI-X */
- HV_INTERRUPT_SOURCE_IOAPIC,
-};
-
-union hv_ioapic_rte {
- u64 as_uint64;
-
- struct {
- u32 vector:8;
- u32 delivery_mode:3;
- u32 destination_mode:1;
- u32 delivery_status:1;
- u32 interrupt_polarity:1;
- u32 remote_irr:1;
- u32 trigger_mode:1;
- u32 interrupt_mask:1;
- u32 reserved1:15;
-
- u32 reserved2:24;
- u32 destination_id:8;
- };
-
- struct {
- u32 low_uint32;
- u32 high_uint32;
- };
-} __packed;
-
-struct hv_interrupt_entry {
- u32 source;
- u32 reserved1;
- union {
- union hv_msi_entry msi_entry;
- union hv_ioapic_rte ioapic_rte;
- };
-} __packed;
-
-/*
- * flags for hv_device_interrupt_target.flags
- */
-#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
-#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
-
-struct hv_device_interrupt_target {
- u32 vector;
- u32 flags;
- union {
- u64 vp_mask;
- struct hv_vpset vp_set;
- };
-} __packed;
-
-struct hv_retarget_device_interrupt {
- u64 partition_id; /* use "self" */
- u64 device_id;
- struct hv_interrupt_entry int_entry;
- u64 reserved2;
- struct hv_device_interrupt_target int_target;
-} __packed __aligned(8);
-
-/*
- * These Hyper-V registers provide information equivalent to the CPUID
- * instruction on x86/x64.
- */
-#define HV_REGISTER_HYPERVISOR_VERSION 0x00000100 /*CPUID 0x40000002 */
-#define HV_REGISTER_FEATURES 0x00000200 /*CPUID 0x40000003 */
-#define HV_REGISTER_ENLIGHTENMENTS 0x00000201 /*CPUID 0x40000004 */
-
-/*
- * Synthetic register definitions equivalent to MSRs on x86/x64
- */
-#define HV_REGISTER_GUEST_CRASH_P0 0x00000210
-#define HV_REGISTER_GUEST_CRASH_P1 0x00000211
-#define HV_REGISTER_GUEST_CRASH_P2 0x00000212
-#define HV_REGISTER_GUEST_CRASH_P3 0x00000213
-#define HV_REGISTER_GUEST_CRASH_P4 0x00000214
-#define HV_REGISTER_GUEST_CRASH_CTL 0x00000215
-
-#define HV_REGISTER_GUEST_OS_ID 0x00090002
-#define HV_REGISTER_VP_INDEX 0x00090003
-#define HV_REGISTER_TIME_REF_COUNT 0x00090004
-#define HV_REGISTER_REFERENCE_TSC 0x00090017
-
-#define HV_REGISTER_SINT0 0x000A0000
-#define HV_REGISTER_SCONTROL 0x000A0010
-#define HV_REGISTER_SIEFP 0x000A0012
-#define HV_REGISTER_SIMP 0x000A0013
-#define HV_REGISTER_EOM 0x000A0014
-
-#define HV_REGISTER_STIMER0_CONFIG 0x000B0000
-#define HV_REGISTER_STIMER0_COUNT 0x000B0001
-
-/* HvGetVpRegisters hypercall input with variable size reg name list*/
-struct hv_get_vp_registers_input {
- struct {
- u64 partitionid;
- u32 vpindex;
- u8 inputvtl;
- u8 padding[3];
- } header;
- struct input {
- u32 name0;
- u32 name1;
- } element[];
-} __packed;
-
-/* HvGetVpRegisters returns an array of these output elements */
-struct hv_get_vp_registers_output {
- union {
- struct {
- u32 a;
- u32 b;
- u32 c;
- u32 d;
- } as32 __packed;
- struct {
- u64 low;
- u64 high;
- } as64 __packed;
- };
-};
-
-/* HvSetVpRegisters hypercall with variable size reg name/value list*/
-struct hv_set_vp_registers_input {
- struct {
- u64 partitionid;
- u32 vpindex;
- u8 inputvtl;
- u8 padding[3];
- } header;
- struct {
- u32 name;
- u32 padding1;
- u64 padding2;
- u64 valuelow;
- u64 valuehigh;
- } element[];
-} __packed;
-
-enum hv_device_type {
- HV_DEVICE_TYPE_LOGICAL = 0,
- HV_DEVICE_TYPE_PCI = 1,
- HV_DEVICE_TYPE_IOAPIC = 2,
- HV_DEVICE_TYPE_ACPI = 3,
-};
-
-typedef u16 hv_pci_rid;
-typedef u16 hv_pci_segment;
-typedef u64 hv_logical_device_id;
-union hv_pci_bdf {
- u16 as_uint16;
-
- struct {
- u8 function:3;
- u8 device:5;
- u8 bus;
- };
-} __packed;
-
-union hv_pci_bus_range {
- u16 as_uint16;
-
- struct {
- u8 subordinate_bus;
- u8 secondary_bus;
- };
-} __packed;
-
-union hv_device_id {
- u64 as_uint64;
-
- struct {
- u64 reserved0:62;
- u64 device_type:2;
- };
-
- /* HV_DEVICE_TYPE_LOGICAL */
- struct {
- u64 id:62;
- u64 device_type:2;
- } logical;
-
- /* HV_DEVICE_TYPE_PCI */
- struct {
- union {
- hv_pci_rid rid;
- union hv_pci_bdf bdf;
- };
-
- hv_pci_segment segment;
- union hv_pci_bus_range shadow_bus_range;
-
- u16 phantom_function_bits:2;
- u16 source_shadow:1;
-
- u16 rsvdz0:11;
- u16 device_type:2;
- } pci;
-
- /* HV_DEVICE_TYPE_IOAPIC */
- struct {
- u8 ioapic_id;
- u8 rsvdz0;
- u16 rsvdz1;
- u16 rsvdz2;
-
- u16 rsvdz3:14;
- u16 device_type:2;
- } ioapic;
-
- /* HV_DEVICE_TYPE_ACPI */
- struct {
- u32 input_mapping_base;
- u32 input_mapping_count:30;
- u32 device_type:2;
- } acpi;
-} __packed;
-
-enum hv_interrupt_trigger_mode {
- HV_INTERRUPT_TRIGGER_MODE_EDGE = 0,
- HV_INTERRUPT_TRIGGER_MODE_LEVEL = 1,
-};
-
-struct hv_device_interrupt_descriptor {
- u32 interrupt_type;
- u32 trigger_mode;
- u32 vector_count;
- u32 reserved;
- struct hv_device_interrupt_target target;
-} __packed;
-
-struct hv_input_map_device_interrupt {
- u64 partition_id;
- u64 device_id;
- u64 flags;
- struct hv_interrupt_entry logical_interrupt_entry;
- struct hv_device_interrupt_descriptor interrupt_descriptor;
-} __packed;
-
-struct hv_output_map_device_interrupt {
- struct hv_interrupt_entry interrupt_entry;
-} __packed;
-
-struct hv_input_unmap_device_interrupt {
- u64 partition_id;
- u64 device_id;
- struct hv_interrupt_entry interrupt_entry;
-} __packed;
-
-#define HV_SOURCE_SHADOW_NONE 0x0
-#define HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE 0x1
-
-/*
- * Version info reported by hypervisor
- */
-union hv_hypervisor_version_info {
- struct {
- u32 build_number;
-
- u32 minor_version : 16;
- u32 major_version : 16;
-
- u32 service_pack;
-
- u32 service_number : 24;
- u32 service_branch : 8;
- };
- struct {
- u32 eax;
- u32 ebx;
- u32 ecx;
- u32 edx;
- };
-};
-
-/*
- * The whole argument should fit in a page to be able to pass to the hypervisor
- * in one hypercall.
- */
-#define HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES \
- ((HV_HYP_PAGE_SIZE - sizeof(struct hv_memory_hint)) / \
- sizeof(union hv_gpa_page_range))
-
-/* HvExtCallMemoryHeatHint hypercall */
-#define HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD 2
-struct hv_memory_hint {
- u64 type:2;
- u64 reserved:62;
- union hv_gpa_page_range ranges[];
-} __packed;
-
-/* Data structures for HVCALL_MMIO_READ and HVCALL_MMIO_WRITE */
-#define HV_HYPERCALL_MMIO_MAX_DATA_LENGTH 64
-
-struct hv_mmio_read_input {
- u64 gpa;
- u32 size;
- u32 reserved;
-} __packed;
-
-struct hv_mmio_read_output {
- u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
-} __packed;
-
-struct hv_mmio_write_input {
- u64 gpa;
- u32 size;
- u32 reserved;
- u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
-} __packed;
-
-#endif
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 80de699bf6af..ca5a1ce6f0f8 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -75,6 +75,7 @@
#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
#include <linux/tracepoint-defs.h>
+#define rwmmio_tracepoint_enabled(tracepoint) tracepoint_enabled(tracepoint)
DECLARE_TRACEPOINT(rwmmio_write);
DECLARE_TRACEPOINT(rwmmio_post_write);
DECLARE_TRACEPOINT(rwmmio_read);
@@ -91,6 +92,7 @@ void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
#else
+#define rwmmio_tracepoint_enabled(tracepoint) false
static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
unsigned long caller_addr, unsigned long caller_addr0) {}
static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
@@ -189,11 +191,13 @@ static inline u8 readb(const volatile void __iomem *addr)
{
u8 val;
- log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __raw_readb(addr);
__io_ar(val);
- log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -204,11 +208,13 @@ static inline u16 readw(const volatile void __iomem *addr)
{
u16 val;
- log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
__io_ar(val);
- log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -219,11 +225,13 @@ static inline u32 readl(const volatile void __iomem *addr)
{
u32 val;
- log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
__io_ar(val);
- log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -235,11 +243,13 @@ static inline u64 readq(const volatile void __iomem *addr)
{
u64 val;
- log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
__io_ar(val);
- log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -249,11 +259,13 @@ static inline u64 readq(const volatile void __iomem *addr)
#define writeb writeb
static inline void writeb(u8 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writeb(value, addr);
__io_aw();
- log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -261,11 +273,13 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
#define writew writew
static inline void writew(u16 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writew((u16 __force)cpu_to_le16(value), addr);
__io_aw();
- log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -273,11 +287,13 @@ static inline void writew(u16 value, volatile void __iomem *addr)
#define writel writel
static inline void writel(u32 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
__io_aw();
- log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -286,11 +302,13 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define writeq writeq
static inline void writeq(u64 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
__io_aw();
- log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
}
#endif
#endif /* CONFIG_64BIT */
@@ -306,9 +324,11 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr)
{
u8 val;
- log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
val = __raw_readb(addr);
- log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -319,9 +339,11 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
{
u16 val;
- log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
- log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -332,9 +354,11 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
{
u32 val;
- log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
- log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -345,9 +369,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
{
u64 val;
- log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
- log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -356,9 +382,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
#define writeb_relaxed writeb_relaxed
static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
__raw_writeb(value, addr);
- log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -366,9 +394,11 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
#define writew_relaxed writew_relaxed
static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
__raw_writew((u16 __force)cpu_to_le16(value), addr);
- log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -376,9 +406,11 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
#define writel_relaxed writel_relaxed
static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
- log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -386,9 +418,11 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
#define writeq_relaxed writeq_relaxed
static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
- log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -540,6 +574,7 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
#if !defined(inb) && !defined(_inb)
#define _inb _inb
+#ifdef CONFIG_HAS_IOPORT
static inline u8 _inb(unsigned long addr)
{
u8 val;
@@ -549,10 +584,15 @@ static inline u8 _inb(unsigned long addr)
__io_par(val);
return val;
}
+#else
+u8 _inb(unsigned long addr)
+ __compiletime_error("inb()) requires CONFIG_HAS_IOPORT");
+#endif
#endif
#if !defined(inw) && !defined(_inw)
#define _inw _inw
+#ifdef CONFIG_HAS_IOPORT
static inline u16 _inw(unsigned long addr)
{
u16 val;
@@ -562,10 +602,15 @@ static inline u16 _inw(unsigned long addr)
__io_par(val);
return val;
}
+#else
+u16 _inw(unsigned long addr)
+ __compiletime_error("inw() requires CONFIG_HAS_IOPORT");
+#endif
#endif
#if !defined(inl) && !defined(_inl)
#define _inl _inl
+#ifdef CONFIG_HAS_IOPORT
static inline u32 _inl(unsigned long addr)
{
u32 val;
@@ -575,36 +620,55 @@ static inline u32 _inl(unsigned long addr)
__io_par(val);
return val;
}
+#else
+u32 _inl(unsigned long addr)
+ __compiletime_error("inl() requires CONFIG_HAS_IOPORT");
+#endif
#endif
#if !defined(outb) && !defined(_outb)
#define _outb _outb
+#ifdef CONFIG_HAS_IOPORT
static inline void _outb(u8 value, unsigned long addr)
{
__io_pbw();
__raw_writeb(value, PCI_IOBASE + addr);
__io_paw();
}
+#else
+void _outb(u8 value, unsigned long addr)
+ __compiletime_error("outb() requires CONFIG_HAS_IOPORT");
+#endif
#endif
#if !defined(outw) && !defined(_outw)
#define _outw _outw
+#ifdef CONFIG_HAS_IOPORT
static inline void _outw(u16 value, unsigned long addr)
{
__io_pbw();
__raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
__io_paw();
}
+#else
+void _outw(u16 value, unsigned long addr)
+ __compiletime_error("outw() requires CONFIG_HAS_IOPORT");
+#endif
#endif
#if !defined(outl) && !defined(_outl)
#define _outl _outl
+#ifdef CONFIG_HAS_IOPORT
static inline void _outl(u32 value, unsigned long addr)
{
__io_pbw();
__raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
__io_paw();
}
+#else
+void _outl(u32 value, unsigned long addr)
+ __compiletime_error("outl() requires CONFIG_HAS_IOPORT");
+#endif
#endif
#include <linux/logic_pio.h>
@@ -688,53 +752,83 @@ static inline void outl_p(u32 value, unsigned long addr)
#ifndef insb
#define insb insb
+#ifdef CONFIG_HAS_IOPORT
static inline void insb(unsigned long addr, void *buffer, unsigned int count)
{
readsb(PCI_IOBASE + addr, buffer, count);
}
+#else
+void insb(unsigned long addr, void *buffer, unsigned int count)
+ __compiletime_error("insb() requires HAS_IOPORT");
+#endif
#endif
#ifndef insw
#define insw insw
+#ifdef CONFIG_HAS_IOPORT
static inline void insw(unsigned long addr, void *buffer, unsigned int count)
{
readsw(PCI_IOBASE + addr, buffer, count);
}
+#else
+void insw(unsigned long addr, void *buffer, unsigned int count)
+ __compiletime_error("insw() requires HAS_IOPORT");
+#endif
#endif
#ifndef insl
#define insl insl
+#ifdef CONFIG_HAS_IOPORT
static inline void insl(unsigned long addr, void *buffer, unsigned int count)
{
readsl(PCI_IOBASE + addr, buffer, count);
}
+#else
+void insl(unsigned long addr, void *buffer, unsigned int count)
+ __compiletime_error("insl() requires HAS_IOPORT");
+#endif
#endif
#ifndef outsb
#define outsb outsb
+#ifdef CONFIG_HAS_IOPORT
static inline void outsb(unsigned long addr, const void *buffer,
unsigned int count)
{
writesb(PCI_IOBASE + addr, buffer, count);
}
+#else
+void outsb(unsigned long addr, const void *buffer, unsigned int count)
+ __compiletime_error("outsb() requires HAS_IOPORT");
+#endif
#endif
#ifndef outsw
#define outsw outsw
+#ifdef CONFIG_HAS_IOPORT
static inline void outsw(unsigned long addr, const void *buffer,
unsigned int count)
{
writesw(PCI_IOBASE + addr, buffer, count);
}
+#else
+void outsw(unsigned long addr, const void *buffer, unsigned int count)
+ __compiletime_error("outsw() requires HAS_IOPORT");
+#endif
#endif
#ifndef outsl
#define outsl outsl
+#ifdef CONFIG_HAS_IOPORT
static inline void outsl(unsigned long addr, const void *buffer,
unsigned int count)
{
writesl(PCI_IOBASE + addr, buffer, count);
}
+#else
+void outsl(unsigned long addr, const void *buffer, unsigned int count)
+ __compiletime_error("outsl() requires HAS_IOPORT");
+#endif
#endif
#ifndef insb_p
@@ -1051,7 +1145,7 @@ void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size,
pgprot_t prot);
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot);
+ pgprot_t prot);
void iounmap(volatile void __iomem *addr);
void generic_iounmap(volatile void __iomem *addr);
@@ -1060,7 +1154,7 @@ void generic_iounmap(volatile void __iomem *addr);
static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
{
/* _PAGE_IOREMAP needs to be supplied by the architecture */
- return ioremap_prot(addr, size, _PAGE_IOREMAP);
+ return ioremap_prot(addr, size, __pgprot(_PAGE_IOREMAP));
}
#endif
#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
@@ -1151,55 +1245,39 @@ static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
#endif
#ifndef memset_io
-#define memset_io memset_io
/**
- * memset_io Set a range of I/O memory to a constant value
+ * memset_io - Set a range of I/O memory to a constant value
* @addr: The beginning of the I/O-memory range to set
* @val: The value to set the memory to
* @count: The number of bytes to set
*
* Set a range of I/O memory to a given value.
*/
-static inline void memset_io(volatile void __iomem *addr, int value,
- size_t size)
-{
- memset(__io_virt(addr), value, size);
-}
+void memset_io(volatile void __iomem *addr, int val, size_t count);
#endif
#ifndef memcpy_fromio
-#define memcpy_fromio memcpy_fromio
/**
- * memcpy_fromio Copy a block of data from I/O memory
+ * memcpy_fromio - Copy a block of data from I/O memory
* @dst: The (RAM) destination for the copy
* @src: The (I/O memory) source for the data
* @count: The number of bytes to copy
*
* Copy a block of data from I/O memory.
*/
-static inline void memcpy_fromio(void *buffer,
- const volatile void __iomem *addr,
- size_t size)
-{
- memcpy(buffer, __io_virt(addr), size);
-}
+void memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count);
#endif
#ifndef memcpy_toio
-#define memcpy_toio memcpy_toio
/**
- * memcpy_toio Copy a block of data into I/O memory
+ * memcpy_toio - Copy a block of data into I/O memory
* @dst: The (I/O memory) destination for the copy
* @src: The (RAM) source for the data
* @count: The number of bytes to copy
*
* Copy a block of data to I/O memory.
*/
-static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
- size_t size)
-{
- memcpy(__io_virt(addr), buffer, size);
-}
+void memcpy_toio(volatile void __iomem *dst, const void *src, size_t count);
#endif
extern int devmem_is_allowed(unsigned long pfn);
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 196087a8126e..9f3f25d7fc58 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -31,42 +31,22 @@ extern unsigned int ioread16(const void __iomem *);
extern unsigned int ioread16be(const void __iomem *);
extern unsigned int ioread32(const void __iomem *);
extern unsigned int ioread32be(const void __iomem *);
-#ifdef CONFIG_64BIT
-extern u64 ioread64(const void __iomem *);
-extern u64 ioread64be(const void __iomem *);
-#endif
-#ifdef readq
-#define ioread64_lo_hi ioread64_lo_hi
-#define ioread64_hi_lo ioread64_hi_lo
-#define ioread64be_lo_hi ioread64be_lo_hi
-#define ioread64be_hi_lo ioread64be_hi_lo
-extern u64 ioread64_lo_hi(const void __iomem *addr);
-extern u64 ioread64_hi_lo(const void __iomem *addr);
-extern u64 ioread64be_lo_hi(const void __iomem *addr);
-extern u64 ioread64be_hi_lo(const void __iomem *addr);
-#endif
+extern u64 __ioread64_lo_hi(const void __iomem *addr);
+extern u64 __ioread64_hi_lo(const void __iomem *addr);
+extern u64 __ioread64be_lo_hi(const void __iomem *addr);
+extern u64 __ioread64be_hi_lo(const void __iomem *addr);
extern void iowrite8(u8, void __iomem *);
extern void iowrite16(u16, void __iomem *);
extern void iowrite16be(u16, void __iomem *);
extern void iowrite32(u32, void __iomem *);
extern void iowrite32be(u32, void __iomem *);
-#ifdef CONFIG_64BIT
-extern void iowrite64(u64, void __iomem *);
-extern void iowrite64be(u64, void __iomem *);
-#endif
-#ifdef writeq
-#define iowrite64_lo_hi iowrite64_lo_hi
-#define iowrite64_hi_lo iowrite64_hi_lo
-#define iowrite64be_lo_hi iowrite64be_lo_hi
-#define iowrite64be_hi_lo iowrite64be_hi_lo
-extern void iowrite64_lo_hi(u64 val, void __iomem *addr);
-extern void iowrite64_hi_lo(u64 val, void __iomem *addr);
-extern void iowrite64be_lo_hi(u64 val, void __iomem *addr);
-extern void iowrite64be_hi_lo(u64 val, void __iomem *addr);
-#endif
+extern void __iowrite64_lo_hi(u64 val, void __iomem *addr);
+extern void __iowrite64_hi_lo(u64 val, void __iomem *addr);
+extern void __iowrite64be_lo_hi(u64 val, void __iomem *addr);
+extern void __iowrite64be_hi_lo(u64 val, void __iomem *addr);
/*
* "string" versions of the above. Note that they
diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h
index 10cd4ffc6ba2..39c94012b88a 100644
--- a/include/asm-generic/mcs_spinlock.h
+++ b/include/asm-generic/mcs_spinlock.h
@@ -1,6 +1,12 @@
#ifndef __ASM_MCS_SPINLOCK_H
#define __ASM_MCS_SPINLOCK_H
+struct mcs_spinlock {
+ struct mcs_spinlock *next;
+ int locked; /* 1 if lock acquired */
+ int count; /* nesting count, see qspinlock.c */
+};
+
/*
* Architectures can define their own:
*
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 6796abe1900e..efa6610acbc7 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -19,17 +19,26 @@
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
ARCH_PFN_OFFSET)
+/* avoid <linux/mm.h> include hell */
+extern unsigned long max_mapnr;
+
#ifndef pfn_valid
static inline int pfn_valid(unsigned long pfn)
{
- /* avoid <linux/mm.h> include hell */
- extern unsigned long max_mapnr;
unsigned long pfn_offset = ARCH_PFN_OFFSET;
return pfn >= pfn_offset && (pfn - pfn_offset) < max_mapnr;
}
#define pfn_valid pfn_valid
-#endif
+
+#ifndef for_each_valid_pfn
+#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \
+ for ((pfn) = max_t(unsigned long, (start_pfn), ARCH_PFN_OFFSET); \
+ (pfn) < min_t(unsigned long, (end_pfn), \
+ ARCH_PFN_OFFSET + max_mapnr); \
+ (pfn)++)
+#endif /* for_each_valid_pfn */
+#endif /* valid_pfn */
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
@@ -44,7 +53,7 @@ static inline int pfn_valid(unsigned long pfn)
*/
#define __page_to_pfn(pg) \
({ const struct page *__pg = (pg); \
- int __sec = page_to_section(__pg); \
+ int __sec = memdesc_section(__pg->flags); \
(unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
})
@@ -64,6 +73,19 @@ static inline int pfn_valid(unsigned long pfn)
#define page_to_pfn __page_to_pfn
#define pfn_to_page __pfn_to_page
+#ifdef CONFIG_DEBUG_VIRTUAL
+#define page_to_phys(page) \
+({ \
+ unsigned long __pfn = page_to_pfn(page); \
+ \
+ WARN_ON_ONCE(!pfn_valid(__pfn)); \
+ PFN_PHYS(__pfn); \
+})
+#else
+#define page_to_phys(page) PFN_PHYS(page_to_pfn(page))
+#endif /* CONFIG_DEBUG_VIRTUAL */
+#define phys_to_page(phys) pfn_to_page(PHYS_PFN(phys))
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index 4dbb177d1150..6eea3b3c1e65 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap
- * and arch_unmap to be included in asm-FOO/mmu_context.h for any
- * arch FOO which doesn't need to hook these.
+ * Define generic no-op hooks for arch_dup_mmap and arch_exit_mmap
+ * to be included in asm-FOO/mmu_context.h for any arch FOO which
+ * doesn't need to hook these.
*/
#ifndef _ASM_GENERIC_MM_HOOKS_H
#define _ASM_GENERIC_MM_HOOKS_H
@@ -17,11 +17,6 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
-static inline void arch_unmap(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
-}
-
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign)
{
diff --git a/include/asm-generic/mmzone.h b/include/asm-generic/mmzone.h
new file mode 100644
index 000000000000..2ab5193e8394
--- /dev/null
+++ b/include/asm-generic/mmzone.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_MMZONE_H
+#define _ASM_GENERIC_MMZONE_H
+
+#endif
diff --git a/include/asm-generic/module.h b/include/asm-generic/module.h
index 98e1541b72b7..a8622501b975 100644
--- a/include/asm-generic/module.h
+++ b/include/asm-generic/module.h
@@ -19,12 +19,8 @@ struct mod_arch_specific
#define Elf_Dyn Elf64_Dyn
#define Elf_Ehdr Elf64_Ehdr
#define Elf_Addr Elf64_Addr
-#ifdef CONFIG_MODULES_USE_ELF_REL
#define Elf_Rel Elf64_Rel
-#endif
-#ifdef CONFIG_MODULES_USE_ELF_RELA
#define Elf_Rela Elf64_Rela
-#endif
#define ELF_R_TYPE(X) ELF64_R_TYPE(X)
#define ELF_R_SYM(X) ELF64_R_SYM(X)
@@ -36,12 +32,8 @@ struct mod_arch_specific
#define Elf_Dyn Elf32_Dyn
#define Elf_Ehdr Elf32_Ehdr
#define Elf_Addr Elf32_Addr
-#ifdef CONFIG_MODULES_USE_ELF_REL
#define Elf_Rel Elf32_Rel
-#endif
-#ifdef CONFIG_MODULES_USE_ELF_RELA
#define Elf_Rela Elf32_Rela
-#endif
#define ELF_R_TYPE(X) ELF32_R_TYPE(X)
#define ELF_R_SYM(X) ELF32_R_SYM(X)
#endif
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index 8fe7aaab2599..ecedab554c80 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -6,9 +6,8 @@
* independent. See arch/<arch>/include/asm/mshyperv.h for definitions
* that are specific to architecture <arch>.
*
- * Definitions that are specified in the Hyper-V Top Level Functional
- * Spec (TLFS) should not go in this file, but should instead go in
- * hyperv-tlfs.h.
+ * Definitions that are derived from Hyper-V code or headers should not go in
+ * this file, but should instead go in the relevant files in include/hyperv.
*
* Copyright (C) 2019, Microsoft, Inc.
*
@@ -25,13 +24,20 @@
#include <linux/cpumask.h>
#include <linux/nmi.h>
#include <asm/ptrace.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#define VTPM_BASE_ADDRESS 0xfed40000
+enum hv_partition_type {
+ HV_PARTITION_TYPE_GUEST,
+ HV_PARTITION_TYPE_ROOT,
+ HV_PARTITION_TYPE_L1VH,
+};
+
struct ms_hyperv_info {
u32 features;
u32 priv_high;
+ u32 ext_features;
u32 misc_features;
u32 hints;
u32 nested_features;
@@ -56,18 +62,37 @@ struct ms_hyperv_info {
};
};
u64 shared_gpa_boundary;
+ bool msi_ext_dest_id;
+ bool confidential_vmbus_available;
};
extern struct ms_hyperv_info ms_hyperv;
extern bool hv_nested;
+extern u64 hv_current_partition_id;
+extern enum hv_partition_type hv_curr_partition_type;
extern void * __percpu *hyperv_pcpu_input_arg;
extern void * __percpu *hyperv_pcpu_output_arg;
-extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
-extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
+u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
+u64 hv_do_fast_hypercall8(u16 control, u64 input8);
+u64 hv_do_fast_hypercall16(u16 control, u64 input1, u64 input2);
+
bool hv_isolation_type_snp(void);
bool hv_isolation_type_tdx(void);
+/*
+ * On architectures where Hyper-V doesn't support AEOI (e.g., ARM64),
+ * it doesn't provide a recommendation flag and AEOI must be disabled.
+ */
+static inline bool hv_recommend_using_aeoi(void)
+{
+#ifdef HV_DEPRECATING_AEOI_RECOMMENDED
+ return !(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED);
+#else
+ return false;
+#endif
+}
+
static inline struct hv_proximity_domain_info hv_numa_node_to_pxm_info(int node)
{
struct hv_proximity_domain_info pxm_info = {};
@@ -101,10 +126,12 @@ static inline unsigned int hv_repcomp(u64 status)
/*
* Rep hypercalls. Callers of this functions are supposed to ensure that
- * rep_count and varhead_size comply with Hyper-V hypercall definition.
+ * rep_count, varhead_size, and rep_start comply with Hyper-V hypercall
+ * definition.
*/
-static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
- void *input, void *output)
+static inline u64 hv_do_rep_hypercall_ex(u16 code, u16 rep_count,
+ u16 varhead_size, u16 rep_start,
+ void *input, void *output)
{
u64 control = code;
u64 status;
@@ -112,6 +139,7 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
+ control |= (u64)rep_start << HV_HYPERCALL_REP_START_OFFSET;
do {
status = hv_do_hypercall(control, input, output);
@@ -129,6 +157,14 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
return status;
}
+/* For the typical case where rep_start is 0 */
+static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
+ void *input, void *output)
+{
+ return hv_do_rep_hypercall_ex(code, rep_count, varhead_size, 0,
+ input, output);
+}
+
/* Generate the guest OS identifier as described in the Hyper-V TLFS */
static inline u64 hv_generate_guest_id(u64 kernel_version)
{
@@ -140,41 +176,6 @@ static inline u64 hv_generate_guest_id(u64 kernel_version)
return guest_id;
}
-/* Free the message slot and signal end-of-message if required */
-static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
-{
- /*
- * On crash we're reading some other CPU's message page and we need
- * to be careful: this other CPU may already had cleared the header
- * and the host may already had delivered some other message there.
- * In case we blindly write msg->header.message_type we're going
- * to lose it. We can still lose a message of the same type but
- * we count on the fact that there can only be one
- * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
- * on crash.
- */
- if (cmpxchg(&msg->header.message_type, old_msg_type,
- HVMSG_NONE) != old_msg_type)
- return;
-
- /*
- * The cmxchg() above does an implicit memory barrier to
- * ensure the write to MessageType (ie set to
- * HVMSG_NONE) happens before we read the
- * MessagePending and EOMing. Otherwise, the EOMing
- * will not deliver any more messages since there is
- * no empty slot
- */
- if (msg->header.message_flags.msg_pending) {
- /*
- * This will cause message queue rescan to
- * possibly deliver another msg from the
- * hypervisor
- */
- hv_set_msr(HV_MSR_EOM, 0);
- }
-}
-
int hv_get_hypervisor_version(union hv_hypervisor_version_info *info);
void hv_setup_vmbus_handler(void (*handler)(void));
@@ -186,11 +187,7 @@ void hv_setup_kexec_handler(void (*handler)(void));
void hv_remove_kexec_handler(void);
void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
void hv_remove_crash_handler(void);
-
-extern int vmbus_interrupt;
-extern int vmbus_irq;
-
-extern bool hv_root_partition;
+void hv_setup_mshv_handler(void (*handler)(void));
#if IS_ENABLED(CONFIG_HYPERV)
/*
@@ -208,14 +205,12 @@ extern u64 (*hv_read_reference_counter)(void);
#define VP_INVAL U32_MAX
int __init hv_common_init(void);
+void __init hv_get_partition_id(void);
void __init hv_common_free(void);
void __init ms_hyperv_late_init(void);
int hv_common_cpu_init(unsigned int cpu);
int hv_common_cpu_die(unsigned int cpu);
-
-void *hv_alloc_hyperv_page(void);
-void *hv_alloc_hyperv_zeroed_page(void);
-void hv_free_hyperv_page(void *addr);
+void hv_identify_partition_type(void);
/**
* hv_cpu_number_to_vp_number() - Map CPU to VP.
@@ -292,6 +287,20 @@ static inline int cpumask_to_vpset_skip(struct hv_vpset *vpset,
return __cpumask_to_vpset(vpset, cpus, func);
}
+#define _hv_status_fmt(fmt) "%s: Hyper-V status: %#x = %s: " fmt
+#define hv_status_printk(level, status, fmt, ...) \
+do { \
+ u64 __status = (status); \
+ pr_##level(_hv_status_fmt(fmt), __func__, hv_result(__status), \
+ hv_result_to_string(__status), ##__VA_ARGS__); \
+} while (0)
+#define hv_status_err(status, fmt, ...) \
+ hv_status_printk(err, status, fmt, ##__VA_ARGS__)
+#define hv_status_debug(status, fmt, ...) \
+ hv_status_printk(debug, status, fmt, ##__VA_ARGS__)
+
+const char *hv_result_to_string(u64 hv_status);
+int hv_result_to_errno(u64 status);
void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
bool hv_is_hyperv_initialized(void);
bool hv_is_hibernation_supported(void);
@@ -300,10 +309,15 @@ bool hv_is_isolation_supported(void);
bool hv_isolation_type_snp(void);
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2);
+void hv_enable_coco_interrupt(unsigned int cpu, unsigned int vector, bool set);
+void hv_para_set_sint_proxy(bool enable);
+u64 hv_para_get_synic_register(unsigned int reg);
+void hv_para_set_synic_register(unsigned int reg, u64 val);
void hyperv_cleanup(void);
bool hv_query_ext_cap(u64 cap_query);
void hv_setup_dma_ops(struct device *dev, bool coherent);
#else /* CONFIG_HYPERV */
+static inline void hv_identify_partition_type(void) {}
static inline bool hv_is_hyperv_initialized(void) { return false; }
static inline bool hv_is_hibernation_supported(void) { return false; }
static inline void hyperv_cleanup(void) {}
@@ -315,4 +329,45 @@ static inline enum hv_isolation_type hv_get_isolation_type(void)
}
#endif /* CONFIG_HYPERV */
+#if IS_ENABLED(CONFIG_MSHV_ROOT)
+static inline bool hv_root_partition(void)
+{
+ return hv_curr_partition_type == HV_PARTITION_TYPE_ROOT;
+}
+static inline bool hv_l1vh_partition(void)
+{
+ return hv_curr_partition_type == HV_PARTITION_TYPE_L1VH;
+}
+static inline bool hv_parent_partition(void)
+{
+ return hv_root_partition() || hv_l1vh_partition();
+}
+int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
+int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
+int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags);
+
+#else /* CONFIG_MSHV_ROOT */
+static inline bool hv_root_partition(void) { return false; }
+static inline bool hv_l1vh_partition(void) { return false; }
+static inline bool hv_parent_partition(void) { return false; }
+static inline int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages)
+{
+ return -EOPNOTSUPP;
+}
+static inline int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id)
+{
+ return -EOPNOTSUPP;
+}
+static inline int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_MSHV_ROOT */
+
+#if IS_ENABLED(CONFIG_HYPERV_VTL_MODE)
+u8 __init get_vtl(void);
+#else
+static inline u8 get_vtl(void) { return 0; }
+#endif
+
#endif
diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h
index 124c734ca5d9..92cca4b23f13 100644
--- a/include/asm-generic/msi.h
+++ b/include/asm-generic/msi.h
@@ -33,6 +33,7 @@ typedef struct msi_alloc_info {
/* Device generating MSIs is proxying for another device */
#define MSI_ALLOC_FLAGS_PROXY_DEVICE (1UL << 0)
+#define MSI_ALLOC_FLAGS_FIXED_MSG_DATA (1UL << 1)
#define GENERIC_MSI_DOMAIN_OPS 1
diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h
index c32e0cf23c90..e063d6487f66 100644
--- a/include/asm-generic/numa.h
+++ b/include/asm-generic/numa.h
@@ -32,10 +32,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
void __init arch_numa_init(void);
int __init numa_add_memblk(int nodeid, u64 start, u64 end);
-void __init numa_set_distance(int from, int to, int distance);
-void __init numa_free_distance(void);
void __init early_map_cpu_to_node(unsigned int cpu, int nid);
-int __init early_cpu_to_node(int cpu);
+int early_cpu_to_node(int cpu);
void numa_store_cpu_info(unsigned int cpu);
void numa_add_cpu(unsigned int cpu);
void numa_remove_cpu(unsigned int cpu);
@@ -51,4 +49,8 @@ static inline int early_cpu_to_node(int cpu) { return 0; }
#endif /* CONFIG_NUMA */
+#ifdef CONFIG_NUMA_EMU
+void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable);
+#endif
+
#endif /* __ASM_GENERIC_NUMA_H */
diff --git a/include/asm-generic/param.h b/include/asm-generic/param.h
index 8d3009dd28ff..8348c116aa3b 100644
--- a/include/asm-generic/param.h
+++ b/include/asm-generic/param.h
@@ -6,6 +6,6 @@
# undef HZ
# define HZ CONFIG_HZ /* Internal kernel timer frequency */
-# define USER_HZ 100 /* some user interfaces are */
+# define USER_HZ __USER_HZ /* some user interfaces are */
# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
#endif /* __ASM_GENERIC_PARAM_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 94cbd50cc870..6628670bcb90 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -2,10 +2,25 @@
#ifndef _ASM_GENERIC_PERCPU_H_
#define _ASM_GENERIC_PERCPU_H_
+#ifndef __ASSEMBLER__
+
#include <linux/compiler.h>
#include <linux/threads.h>
#include <linux/percpu-defs.h>
+/*
+ * __percpu_qual is the qualifier for the percpu named address space.
+ *
+ * Most arches use generic named address space for percpu variables but
+ * some arches define percpu variables in different named address space
+ * (on the x86 arch, percpu variable may be declared as being relative
+ * to the %fs or %gs segments using __seg_fs or __seg_gs named address
+ * space qualifier).
+ */
+#ifndef __percpu_qual
+# define __percpu_qual
+#endif
+
#ifdef CONFIG_SMP
/*
@@ -74,7 +89,7 @@ do { \
#define raw_cpu_generic_add_return(pcp, val) \
({ \
- typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
+ TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \
\
*__p += val; \
*__p; \
@@ -82,8 +97,8 @@ do { \
#define raw_cpu_generic_xchg(pcp, nval) \
({ \
- typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
- typeof(pcp) __ret; \
+ TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \
+ TYPEOF_UNQUAL(pcp) __ret; \
__ret = *__p; \
*__p = nval; \
__ret; \
@@ -91,7 +106,7 @@ do { \
#define __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, _cmpxchg) \
({ \
- typeof(pcp) __val, __old = *(ovalp); \
+ TYPEOF_UNQUAL(pcp) __val, __old = *(ovalp); \
__val = _cmpxchg(pcp, __old, nval); \
if (__val != __old) \
*(ovalp) = __val; \
@@ -100,8 +115,8 @@ do { \
#define raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval) \
({ \
- typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
- typeof(pcp) __val = *__p, ___old = *(ovalp); \
+ TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \
+ TYPEOF_UNQUAL(pcp) __val = *__p, ___old = *(ovalp); \
bool __ret; \
if (__val == ___old) { \
*__p = nval; \
@@ -115,14 +130,14 @@ do { \
#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
- typeof(pcp) __old = (oval); \
+ TYPEOF_UNQUAL(pcp) __old = (oval); \
raw_cpu_generic_try_cmpxchg(pcp, &__old, nval); \
__old; \
})
#define __this_cpu_generic_read_nopreempt(pcp) \
({ \
- typeof(pcp) ___ret; \
+ TYPEOF_UNQUAL(pcp) ___ret; \
preempt_disable_notrace(); \
___ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
preempt_enable_notrace(); \
@@ -131,7 +146,7 @@ do { \
#define __this_cpu_generic_read_noirq(pcp) \
({ \
- typeof(pcp) ___ret; \
+ TYPEOF_UNQUAL(pcp) ___ret; \
unsigned long ___flags; \
raw_local_irq_save(___flags); \
___ret = raw_cpu_generic_read(pcp); \
@@ -141,7 +156,7 @@ do { \
#define this_cpu_generic_read(pcp) \
({ \
- typeof(pcp) __ret; \
+ TYPEOF_UNQUAL(pcp) __ret; \
if (__native_word(pcp)) \
__ret = __this_cpu_generic_read_nopreempt(pcp); \
else \
@@ -160,7 +175,7 @@ do { \
#define this_cpu_generic_add_return(pcp, val) \
({ \
- typeof(pcp) __ret; \
+ TYPEOF_UNQUAL(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
__ret = raw_cpu_generic_add_return(pcp, val); \
@@ -170,7 +185,7 @@ do { \
#define this_cpu_generic_xchg(pcp, nval) \
({ \
- typeof(pcp) __ret; \
+ TYPEOF_UNQUAL(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
__ret = raw_cpu_generic_xchg(pcp, nval); \
@@ -190,7 +205,7 @@ do { \
#define this_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
- typeof(pcp) __ret; \
+ TYPEOF_UNQUAL(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
__ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \
@@ -544,4 +559,5 @@ do { \
this_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index 7c48f5fbf8aa..57137d3ac159 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -18,11 +18,17 @@
*/
static inline pte_t *__pte_alloc_one_kernel_noprof(struct mm_struct *mm)
{
- struct ptdesc *ptdesc = pagetable_alloc_noprof(GFP_PGTABLE_KERNEL &
- ~__GFP_HIGHMEM, 0);
+ struct ptdesc *ptdesc = pagetable_alloc_noprof(GFP_PGTABLE_KERNEL, 0);
if (!ptdesc)
return NULL;
+ if (!pagetable_pte_ctor(mm, ptdesc)) {
+ pagetable_free(ptdesc);
+ return NULL;
+ }
+
+ ptdesc_set_kernel(ptdesc);
+
return ptdesc_address(ptdesc);
}
#define __pte_alloc_one_kernel(...) alloc_hooks(__pte_alloc_one_kernel_noprof(__VA_ARGS__))
@@ -48,7 +54,7 @@ static inline pte_t *pte_alloc_one_kernel_noprof(struct mm_struct *mm)
*/
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
- pagetable_free(virt_to_ptdesc(pte));
+ pagetable_dtor_free(virt_to_ptdesc(pte));
}
/**
@@ -70,7 +76,7 @@ static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp)
ptdesc = pagetable_alloc_noprof(gfp, 0);
if (!ptdesc)
return NULL;
- if (!pagetable_pte_ctor(ptdesc)) {
+ if (!pagetable_pte_ctor(mm, ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}
@@ -109,8 +115,7 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
{
struct ptdesc *ptdesc = page_ptdesc(pte_page);
- pagetable_pte_dtor(ptdesc);
- pagetable_free(ptdesc);
+ pagetable_dtor_free(ptdesc);
}
@@ -138,10 +143,14 @@ static inline pmd_t *pmd_alloc_one_noprof(struct mm_struct *mm, unsigned long ad
ptdesc = pagetable_alloc_noprof(gfp, 0);
if (!ptdesc)
return NULL;
- if (!pagetable_pmd_ctor(ptdesc)) {
+ if (!pagetable_pmd_ctor(mm, ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}
+
+ if (mm == &init_mm)
+ ptdesc_set_kernel(ptdesc);
+
return ptdesc_address(ptdesc);
}
#define pmd_alloc_one(...) alloc_hooks(pmd_alloc_one_noprof(__VA_ARGS__))
@@ -153,8 +162,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
- pagetable_pmd_dtor(ptdesc);
- pagetable_free(ptdesc);
+ pagetable_dtor_free(ptdesc);
}
#endif
@@ -169,13 +177,16 @@ static inline pud_t *__pud_alloc_one_noprof(struct mm_struct *mm, unsigned long
if (mm == &init_mm)
gfp = GFP_PGTABLE_KERNEL;
- gfp &= ~__GFP_HIGHMEM;
ptdesc = pagetable_alloc_noprof(gfp, 0);
if (!ptdesc)
return NULL;
pagetable_pud_ctor(ptdesc);
+
+ if (mm == &init_mm)
+ ptdesc_set_kernel(ptdesc);
+
return ptdesc_address(ptdesc);
}
#define __pud_alloc_one(...) alloc_hooks(__pud_alloc_one_noprof(__VA_ARGS__))
@@ -202,8 +213,7 @@ static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
struct ptdesc *ptdesc = virt_to_ptdesc(pud);
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
- pagetable_pud_dtor(ptdesc);
- pagetable_free(ptdesc);
+ pagetable_dtor_free(ptdesc);
}
#ifndef __HAVE_ARCH_PUD_FREE
@@ -215,10 +225,88 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+#if CONFIG_PGTABLE_LEVELS > 4
+
+static inline p4d_t *__p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
+{
+ gfp_t gfp = GFP_PGTABLE_USER;
+ struct ptdesc *ptdesc;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+
+ ptdesc = pagetable_alloc_noprof(gfp, 0);
+ if (!ptdesc)
+ return NULL;
+
+ pagetable_p4d_ctor(ptdesc);
+
+ if (mm == &init_mm)
+ ptdesc_set_kernel(ptdesc);
+
+ return ptdesc_address(ptdesc);
+}
+#define __p4d_alloc_one(...) alloc_hooks(__p4d_alloc_one_noprof(__VA_ARGS__))
+
+#ifndef __HAVE_ARCH_P4D_ALLOC_ONE
+static inline p4d_t *p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
+{
+ return __p4d_alloc_one_noprof(mm, addr);
+}
+#define p4d_alloc_one(...) alloc_hooks(p4d_alloc_one_noprof(__VA_ARGS__))
+#endif
+
+static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
+{
+ struct ptdesc *ptdesc = virt_to_ptdesc(p4d);
+
+ BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
+ pagetable_dtor_free(ptdesc);
+}
+
+#ifndef __HAVE_ARCH_P4D_FREE
+static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
+{
+ if (!mm_p4d_folded(mm))
+ __p4d_free(mm, p4d);
+}
+#endif
+
+#endif /* CONFIG_PGTABLE_LEVELS > 4 */
+
+static inline pgd_t *__pgd_alloc_noprof(struct mm_struct *mm, unsigned int order)
+{
+ gfp_t gfp = GFP_PGTABLE_USER;
+ struct ptdesc *ptdesc;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+
+ ptdesc = pagetable_alloc_noprof(gfp, order);
+ if (!ptdesc)
+ return NULL;
+
+ pagetable_pgd_ctor(ptdesc);
+
+ if (mm == &init_mm)
+ ptdesc_set_kernel(ptdesc);
+
+ return ptdesc_address(ptdesc);
+}
+#define __pgd_alloc(...) alloc_hooks(__pgd_alloc_noprof(__VA_ARGS__))
+
+static inline void __pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ struct ptdesc *ptdesc = virt_to_ptdesc(pgd);
+
+ BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
+ pagetable_dtor_free(ptdesc);
+}
+
#ifndef __HAVE_ARCH_PGD_FREE
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- pagetable_free(virt_to_ptdesc(pgd));
+ __pgd_free(mm, pgd);
}
#endif
diff --git a/include/asm-generic/pgtable_uffd.h b/include/asm-generic/pgtable_uffd.h
index 828966d4c281..0d85791efdf7 100644
--- a/include/asm-generic/pgtable_uffd.h
+++ b/include/asm-generic/pgtable_uffd.h
@@ -1,6 +1,23 @@
#ifndef _ASM_GENERIC_PGTABLE_UFFD_H
#define _ASM_GENERIC_PGTABLE_UFFD_H
+/*
+ * Some platforms can customize the uffd-wp bit, making it unavailable
+ * even if the architecture provides the resource.
+ * Adding this API allows architectures to add their own checks for the
+ * devices on which the kernel is running.
+ * Note: When overriding it, please make sure the
+ * CONFIG_HAVE_ARCH_USERFAULTFD_WP is part of this macro.
+ */
+#ifndef pgtable_supports_uffd_wp
+#define pgtable_supports_uffd_wp() IS_ENABLED(CONFIG_HAVE_ARCH_USERFAULTFD_WP)
+#endif
+
+static inline bool uffd_supports_wp_marker(void)
+{
+ return pgtable_supports_uffd_wp() && IS_ENABLED(CONFIG_PTE_MARKER_UFFD_WP);
+}
+
#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
static __always_inline int pte_uffd_wp(pte_t pte)
{
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 0655aa5b57b2..bf47cca2c375 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -136,6 +136,7 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
}
#endif
+#ifndef __no_arch_spinlock_redefine
/*
* Remapping spinlock architecture specific functions to the corresponding
* queued spinlock functions.
@@ -146,5 +147,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
#define arch_spin_lock(l) queued_spin_lock(l)
#define arch_spin_trylock(l) queued_spin_trylock(l)
#define arch_spin_unlock(l) queued_spin_unlock(l)
+#endif
#endif /* __ASM_GENERIC_QSPINLOCK_H */
diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h
new file mode 100644
index 000000000000..0f2dcbbfee2f
--- /dev/null
+++ b/include/asm-generic/rqspinlock.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Resilient Queued Spin Lock
+ *
+ * (C) Copyright 2024-2025 Meta Platforms, Inc. and affiliates.
+ *
+ * Authors: Kumar Kartikeya Dwivedi <memxor@gmail.com>
+ */
+#ifndef __ASM_GENERIC_RQSPINLOCK_H
+#define __ASM_GENERIC_RQSPINLOCK_H
+
+#include <linux/types.h>
+#include <vdso/time64.h>
+#include <linux/percpu.h>
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#endif
+
+struct rqspinlock {
+ union {
+ atomic_t val;
+ u32 locked;
+ };
+};
+
+/* Even though this is same as struct rqspinlock, we need to emit a distinct
+ * type in BTF for BPF programs.
+ */
+struct bpf_res_spin_lock {
+ u32 val;
+};
+
+struct qspinlock;
+#ifdef CONFIG_QUEUED_SPINLOCKS
+typedef struct qspinlock rqspinlock_t;
+#else
+typedef struct rqspinlock rqspinlock_t;
+#endif
+
+extern int resilient_tas_spin_lock(rqspinlock_t *lock);
+#ifdef CONFIG_QUEUED_SPINLOCKS
+extern int resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val);
+#endif
+
+#ifndef resilient_virt_spin_lock_enabled
+static __always_inline bool resilient_virt_spin_lock_enabled(void)
+{
+ return false;
+}
+#endif
+
+#ifndef resilient_virt_spin_lock
+static __always_inline int resilient_virt_spin_lock(rqspinlock_t *lock)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Default timeout for waiting loops is 0.25 seconds
+ */
+#define RES_DEF_TIMEOUT (NSEC_PER_SEC / 4)
+
+/*
+ * Choose 31 as it makes rqspinlock_held cacheline-aligned.
+ */
+#define RES_NR_HELD 31
+
+struct rqspinlock_held {
+ int cnt;
+ void *locks[RES_NR_HELD];
+};
+
+DECLARE_PER_CPU_ALIGNED(struct rqspinlock_held, rqspinlock_held_locks);
+
+static __always_inline void grab_held_lock_entry(void *lock)
+{
+ int cnt = this_cpu_inc_return(rqspinlock_held_locks.cnt);
+
+ if (unlikely(cnt > RES_NR_HELD)) {
+ /* Still keep the inc so we decrement later. */
+ return;
+ }
+
+ /*
+ * Implied compiler barrier in per-CPU operations; otherwise we can have
+ * the compiler reorder inc with write to table, allowing interrupts to
+ * overwrite and erase our write to the table (as on interrupt exit it
+ * will be reset to NULL).
+ *
+ * It is fine for cnt inc to be reordered wrt remote readers though,
+ * they won't observe our entry until the cnt update is visible, that's
+ * all.
+ */
+ this_cpu_write(rqspinlock_held_locks.locks[cnt - 1], lock);
+}
+
+/*
+ * We simply don't support out-of-order unlocks, and keep the logic simple here.
+ * The verifier prevents BPF programs from unlocking out-of-order, and the same
+ * holds for in-kernel users.
+ *
+ * It is possible to run into misdetection scenarios of AA deadlocks on the same
+ * CPU, and missed ABBA deadlocks on remote CPUs if this function pops entries
+ * out of order (due to lock A, lock B, unlock A, unlock B) pattern. The correct
+ * logic to preserve right entries in the table would be to walk the array of
+ * held locks and swap and clear out-of-order entries, but that's too
+ * complicated and we don't have a compelling use case for out of order unlocking.
+ */
+static __always_inline void release_held_lock_entry(void)
+{
+ struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
+
+ if (unlikely(rqh->cnt > RES_NR_HELD))
+ goto dec;
+ WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL);
+dec:
+ /*
+ * Reordering of clearing above with inc and its write in
+ * grab_held_lock_entry that came before us (in same acquisition
+ * attempt) is ok, we either see a valid entry or NULL when it's
+ * visible.
+ *
+ * But this helper is invoked when we unwind upon failing to acquire the
+ * lock. Unlike the unlock path which constitutes a release store after
+ * we clear the entry, we need to emit a write barrier here. Otherwise,
+ * we may have a situation as follows:
+ *
+ * <error> for lock B
+ * release_held_lock_entry
+ *
+ * grab_held_lock_entry
+ * try_cmpxchg_acquire for lock A
+ *
+ * Lack of any ordering means reordering may occur such that dec, inc
+ * are done before entry is overwritten. This permits a remote lock
+ * holder of lock B (which this CPU failed to acquire) to now observe it
+ * as being attempted on this CPU, and may lead to misdetection (if this
+ * CPU holds a lock it is attempting to acquire, leading to false ABBA
+ * diagnosis).
+ *
+ * The case of unlock is treated differently due to NMI reentrancy, see
+ * comments in res_spin_unlock.
+ *
+ * In theory we don't have a problem if the dec and WRITE_ONCE above get
+ * reordered with each other, we either notice an empty NULL entry on
+ * top (if dec succeeds WRITE_ONCE), or a potentially stale entry which
+ * cannot be observed (if dec precedes WRITE_ONCE).
+ *
+ * Emit the write barrier _before_ the dec, this permits dec-inc
+ * reordering but that is harmless as we'd have new entry set to NULL
+ * already, i.e. they cannot precede the NULL store above.
+ */
+ smp_wmb();
+ this_cpu_dec(rqspinlock_held_locks.cnt);
+}
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+
+/**
+ * res_spin_lock - acquire a queued spinlock
+ * @lock: Pointer to queued spinlock structure
+ *
+ * Return:
+ * * 0 - Lock was acquired successfully.
+ * * -EDEADLK - Lock acquisition failed because of AA/ABBA deadlock.
+ * * -ETIMEDOUT - Lock acquisition failed because of timeout.
+ */
+static __always_inline int res_spin_lock(rqspinlock_t *lock)
+{
+ int val = 0;
+
+ /*
+ * Grab the deadlock detection entry before doing the cmpxchg, so that
+ * reentrancy due to NMIs between the succeeding cmpxchg and creation of
+ * held lock entry can correctly detect an acquisition attempt in the
+ * interrupted context.
+ *
+ * cmpxchg lock A
+ * <NMI>
+ * res_spin_lock(A) --> missed AA, leads to timeout
+ * </NMI>
+ * grab_held_lock_entry(A)
+ */
+ grab_held_lock_entry(lock);
+
+ if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
+ return 0;
+ return resilient_queued_spin_lock_slowpath(lock, val);
+}
+
+#else
+
+#define res_spin_lock(lock) resilient_tas_spin_lock(lock)
+
+#endif /* CONFIG_QUEUED_SPINLOCKS */
+
+static __always_inline void res_spin_unlock(rqspinlock_t *lock)
+{
+ struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
+
+ /*
+ * Release barrier, ensures correct ordering. Perform release store
+ * instead of queued_spin_unlock, since we use this function for the TAS
+ * fallback as well. When we have CONFIG_QUEUED_SPINLOCKS=n, we clear
+ * the full 4-byte lockword.
+ *
+ * Perform the smp_store_release before clearing the lock entry so that
+ * NMIs landing in the unlock path can correctly detect AA issues. The
+ * opposite order shown below may lead to missed AA checks:
+ *
+ * WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL)
+ * <NMI>
+ * res_spin_lock(A) --> missed AA, leads to timeout
+ * </NMI>
+ * smp_store_release(A->locked, 0)
+ */
+ smp_store_release(&lock->locked, 0);
+ if (likely(rqh->cnt <= RES_NR_HELD))
+ WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL);
+ this_cpu_dec(rqspinlock_held_locks.cnt);
+}
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; })
+#else
+#define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t){0}; })
+#endif
+
+#define raw_res_spin_lock(lock) \
+ ({ \
+ int __ret; \
+ preempt_disable(); \
+ __ret = res_spin_lock(lock); \
+ if (__ret) \
+ preempt_enable(); \
+ __ret; \
+ })
+
+#define raw_res_spin_unlock(lock) ({ res_spin_unlock(lock); preempt_enable(); })
+
+#define raw_res_spin_lock_irqsave(lock, flags) \
+ ({ \
+ int __ret; \
+ local_irq_save(flags); \
+ __ret = raw_res_spin_lock(lock); \
+ if (__ret) \
+ local_irq_restore(flags); \
+ __ret; \
+ })
+
+#define raw_res_spin_unlock_irqrestore(lock, flags) ({ raw_res_spin_unlock(lock); local_irq_restore(flags); })
+
+#endif /* __ASM_GENERIC_RQSPINLOCK_H */
diff --git a/include/asm-generic/runtime-const.h b/include/asm-generic/runtime-const.h
new file mode 100644
index 000000000000..670499459514
--- /dev/null
+++ b/include/asm-generic/runtime-const.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RUNTIME_CONST_H
+#define _ASM_RUNTIME_CONST_H
+
+/*
+ * This is the fallback for when the architecture doesn't
+ * support the runtime const operations.
+ *
+ * We just use the actual symbols as-is.
+ */
+#define runtime_const_ptr(sym) (sym)
+#define runtime_const_shift_right_32(val, sym) ((u32)(val)>>(sym))
+#define runtime_const_init(type,sym) do { } while (0)
+
+#endif
diff --git a/include/asm-generic/rwonce.h b/include/asm-generic/rwonce.h
index 8d0a6280e982..52b969c7cef9 100644
--- a/include/asm-generic/rwonce.h
+++ b/include/asm-generic/rwonce.h
@@ -79,10 +79,18 @@ unsigned long __read_once_word_nocheck(const void *addr)
(typeof(x))__read_once_word_nocheck(&(x)); \
})
-static __no_kasan_or_inline
+static __no_sanitize_or_inline
unsigned long read_word_at_a_time(const void *addr)
{
+ /* open-coded instrument_read(addr, 1) */
kasan_check_read(addr, 1);
+ kcsan_check_read(addr, 1);
+
+ /*
+ * This load can race with concurrent stores to out-of-bounds memory,
+ * but READ_ONCE() can't be used because it requires higher alignment
+ * than plain loads in arm64 builds with LTO.
+ */
return *(unsigned long *)addr;
}
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index c768de6f19a9..0755bc39b0d8 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -39,7 +39,7 @@ extern char __init_begin[], __init_end[];
extern char _sinittext[], _einittext[];
extern char __start_ro_after_init[], __end_ro_after_init[];
extern char _end[];
-extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
+extern char __per_cpu_start[], __per_cpu_end[];
extern char __kprobes_text_start[], __kprobes_text_end[];
extern char __entry_text_start[], __entry_text_end[];
extern char __start_rodata[], __end_rodata[];
diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h
index d0343d58a74a..70c8716ad32a 100644
--- a/include/asm-generic/simd.h
+++ b/include/asm-generic/simd.h
@@ -1,6 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_SIMD_H
+#define _ASM_GENERIC_SIMD_H
-#include <linux/hardirq.h>
+#include <linux/compiler_attributes.h>
+#include <linux/preempt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
/*
* may_use_simd - whether it is allowable at this time to issue SIMD
@@ -13,3 +18,5 @@ static __must_check inline bool may_use_simd(void)
{
return !in_interrupt();
}
+
+#endif /* _ASM_GENERIC_SIMD_H */
diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h
index 90803a826ba0..970590baf61b 100644
--- a/include/asm-generic/spinlock.h
+++ b/include/asm-generic/spinlock.h
@@ -1,94 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * 'Generic' ticket-lock implementation.
- *
- * It relies on atomic_fetch_add() having well defined forward progress
- * guarantees under contention. If your architecture cannot provide this, stick
- * to a test-and-set lock.
- *
- * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a
- * sub-word of the value. This is generally true for anything LL/SC although
- * you'd be hard pressed to find anything useful in architecture specifications
- * about this. If your architecture cannot do this you might be better off with
- * a test-and-set.
- *
- * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence
- * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with
- * a full fence after the spin to upgrade the otherwise-RCpc
- * atomic_cond_read_acquire().
- *
- * The implementation uses smp_cond_load_acquire() to spin, so if the
- * architecture has WFE like instructions to sleep instead of poll for word
- * modifications be sure to implement that (see ARM64 for example).
- *
- */
-
#ifndef __ASM_GENERIC_SPINLOCK_H
#define __ASM_GENERIC_SPINLOCK_H
-#include <linux/atomic.h>
-#include <asm-generic/spinlock_types.h>
-
-static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- u32 val = atomic_fetch_add(1<<16, lock);
- u16 ticket = val >> 16;
-
- if (ticket == (u16)val)
- return;
-
- /*
- * atomic_cond_read_acquire() is RCpc, but rather than defining a
- * custom cond_read_rcsc() here we just emit a full fence. We only
- * need the prior reads before subsequent writes ordering from
- * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we
- * have no outstanding writes due to the atomic_fetch_add() the extra
- * orderings are free.
- */
- atomic_cond_read_acquire(lock, ticket == (u16)VAL);
- smp_mb();
-}
-
-static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock)
-{
- u32 old = atomic_read(lock);
-
- if ((old >> 16) != (old & 0xffff))
- return false;
-
- return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */
-}
-
-static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
- u32 val = atomic_read(lock);
-
- smp_store_release(ptr, (u16)val + 1);
-}
-
-static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
-{
- u32 val = lock.counter;
-
- return ((val >> 16) == (val & 0xffff));
-}
-
-static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock)
-{
- arch_spinlock_t val = READ_ONCE(*lock);
-
- return !arch_spin_value_unlocked(val);
-}
-
-static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
-{
- u32 val = atomic_read(lock);
-
- return (s16)((val >> 16) - (val & 0xffff)) > 1;
-}
-
+#include <asm-generic/ticket_spinlock.h>
#include <asm/qrwlock.h>
#endif /* __ASM_GENERIC_SPINLOCK_H */
diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h
index 8962bb730945..f534aa5de394 100644
--- a/include/asm-generic/spinlock_types.h
+++ b/include/asm-generic/spinlock_types.h
@@ -3,15 +3,7 @@
#ifndef __ASM_GENERIC_SPINLOCK_TYPES_H
#define __ASM_GENERIC_SPINLOCK_TYPES_H
-#include <linux/types.h>
-typedef atomic_t arch_spinlock_t;
-
-/*
- * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the
- * include.
- */
-#include <asm/qrwlock_types.h>
-
-#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0)
+#include <asm-generic/qspinlock_types.h>
+#include <asm-generic/qrwlock_types.h>
#endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index 5a80fe728dc8..c5a3ad53beec 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -5,7 +5,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
*
* This file is a stub providing documentation for what functions
- * asm-ARCH/syscall.h files need to define. Most arch definitions
+ * arch/ARCH/include/asm/syscall.h files need to define. Most arch definitions
* will be simple inlines.
*
* All of these functions expect to be called with no locks,
@@ -38,6 +38,20 @@ struct pt_regs;
int syscall_get_nr(struct task_struct *task, struct pt_regs *regs);
/**
+ * syscall_set_nr - change the system call a task is executing
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ * @nr: system call number
+ *
+ * Changes the system call number @task is about to execute.
+ *
+ * It's only valid to call this when @task is stopped for tracing on
+ * entry to a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
+ */
+void syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr);
+
+/**
* syscall_rollback - roll back registers after an aborted system call
* @task: task of interest, must be in system call exit tracing
* @regs: task_pt_regs() of @task
@@ -118,6 +132,22 @@ void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
unsigned long *args);
/**
+ * syscall_set_arguments - change system call parameter value
+ * @task: task of interest, must be in system call entry tracing
+ * @regs: task_pt_regs() of @task
+ * @args: array of argument values to store
+ *
+ * Changes 6 arguments to the system call.
+ * The first argument gets value @args[0], and so on.
+ *
+ * It's only valid to call this when @task is stopped for tracing on
+ * entry to a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
+ */
+void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ const unsigned long *args);
+
+/**
* syscall_get_arch - return the AUDIT_ARCH for the current system call
* @task: task of interest, must be blocked
*
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h
index 933ca6581aba..fabcefe8a80a 100644
--- a/include/asm-generic/syscalls.h
+++ b/include/asm-generic/syscalls.h
@@ -19,7 +19,7 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
#ifndef sys_mmap
asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
- unsigned long fd, off_t pgoff);
+ unsigned long fd, unsigned long off);
#endif
#ifndef sys_rt_sigreturn
diff --git a/include/asm-generic/text-patching.h b/include/asm-generic/text-patching.h
new file mode 100644
index 000000000000..2245c641b741
--- /dev/null
+++ b/include/asm-generic/text-patching.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_TEXT_PATCHING_H
+#define _ASM_GENERIC_TEXT_PATCHING_H
+
+#endif /* _ASM_GENERIC_TEXT_PATCHING_H */
diff --git a/include/asm-generic/thread_info_tif.h b/include/asm-generic/thread_info_tif.h
new file mode 100644
index 000000000000..da1610a78f92
--- /dev/null
+++ b/include/asm-generic/thread_info_tif.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_THREAD_INFO_TIF_H_
+#define _ASM_GENERIC_THREAD_INFO_TIF_H_
+
+#include <vdso/bits.h>
+
+/* Bits 16-31 are reserved for architecture specific purposes */
+
+#define TIF_NOTIFY_RESUME 0 // callback before returning to user
+#define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME)
+
+#define TIF_SIGPENDING 1 // signal pending
+#define _TIF_SIGPENDING BIT(TIF_SIGPENDING)
+
+#define TIF_NOTIFY_SIGNAL 2 // signal notifications exist
+#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL)
+
+#define TIF_MEMDIE 3 // is terminating due to OOM killer
+#define _TIF_MEMDIE BIT(TIF_MEMDIE)
+
+#define TIF_NEED_RESCHED 4 // rescheduling necessary
+#define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED)
+
+#ifdef HAVE_TIF_NEED_RESCHED_LAZY
+# define TIF_NEED_RESCHED_LAZY 5 // Lazy rescheduling needed
+# define _TIF_NEED_RESCHED_LAZY BIT(TIF_NEED_RESCHED_LAZY)
+#endif
+
+#ifdef HAVE_TIF_POLLING_NRFLAG
+# define TIF_POLLING_NRFLAG 6 // idle is polling for TIF_NEED_RESCHED
+# define _TIF_POLLING_NRFLAG BIT(TIF_POLLING_NRFLAG)
+#endif
+
+#define TIF_USER_RETURN_NOTIFY 7 // notify kernel of userspace return
+#define _TIF_USER_RETURN_NOTIFY BIT(TIF_USER_RETURN_NOTIFY)
+
+#define TIF_UPROBE 8 // breakpointed or singlestepping
+#define _TIF_UPROBE BIT(TIF_UPROBE)
+
+#define TIF_PATCH_PENDING 9 // pending live patching update
+#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
+
+#ifdef HAVE_TIF_RESTORE_SIGMASK
+# define TIF_RESTORE_SIGMASK 10 // Restore signal mask in do_signal() */
+# define _TIF_RESTORE_SIGMASK BIT(TIF_RESTORE_SIGMASK)
+#endif
+
+#define TIF_RSEQ 11 // Run RSEQ fast path
+#define _TIF_RSEQ BIT(TIF_RSEQ)
+
+#endif /* _ASM_GENERIC_THREAD_INFO_TIF_H_ */
diff --git a/include/asm-generic/ticket_spinlock.h b/include/asm-generic/ticket_spinlock.h
new file mode 100644
index 000000000000..325779970d8a
--- /dev/null
+++ b/include/asm-generic/ticket_spinlock.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * 'Generic' ticket-lock implementation.
+ *
+ * It relies on atomic_fetch_add() having well defined forward progress
+ * guarantees under contention. If your architecture cannot provide this, stick
+ * to a test-and-set lock.
+ *
+ * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a
+ * sub-word of the value. This is generally true for anything LL/SC although
+ * you'd be hard pressed to find anything useful in architecture specifications
+ * about this. If your architecture cannot do this you might be better off with
+ * a test-and-set.
+ *
+ * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence
+ * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with
+ * a full fence after the spin to upgrade the otherwise-RCpc
+ * atomic_cond_read_acquire().
+ *
+ * The implementation uses smp_cond_load_acquire() to spin, so if the
+ * architecture has WFE like instructions to sleep instead of poll for word
+ * modifications be sure to implement that (see ARM64 for example).
+ *
+ */
+
+#ifndef __ASM_GENERIC_TICKET_SPINLOCK_H
+#define __ASM_GENERIC_TICKET_SPINLOCK_H
+
+#include <linux/atomic.h>
+#include <asm-generic/spinlock_types.h>
+
+static __always_inline void ticket_spin_lock(arch_spinlock_t *lock)
+{
+ u32 val = atomic_fetch_add(1<<16, &lock->val);
+ u16 ticket = val >> 16;
+
+ if (ticket == (u16)val)
+ return;
+
+ /*
+ * atomic_cond_read_acquire() is RCpc, but rather than defining a
+ * custom cond_read_rcsc() here we just emit a full fence. We only
+ * need the prior reads before subsequent writes ordering from
+ * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we
+ * have no outstanding writes due to the atomic_fetch_add() the extra
+ * orderings are free.
+ */
+ atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL);
+ smp_mb();
+}
+
+static __always_inline bool ticket_spin_trylock(arch_spinlock_t *lock)
+{
+ u32 old = atomic_read(&lock->val);
+
+ if ((old >> 16) != (old & 0xffff))
+ return false;
+
+ return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */
+}
+
+static __always_inline void ticket_spin_unlock(arch_spinlock_t *lock)
+{
+ u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
+ u32 val = atomic_read(&lock->val);
+
+ smp_store_release(ptr, (u16)val + 1);
+}
+
+static __always_inline int ticket_spin_value_unlocked(arch_spinlock_t lock)
+{
+ u32 val = lock.val.counter;
+
+ return ((val >> 16) == (val & 0xffff));
+}
+
+static __always_inline int ticket_spin_is_locked(arch_spinlock_t *lock)
+{
+ arch_spinlock_t val = READ_ONCE(*lock);
+
+ return !ticket_spin_value_unlocked(val);
+}
+
+static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
+{
+ u32 val = atomic_read(&lock->val);
+
+ return (s16)((val >> 16) - (val & 0xffff)) > 1;
+}
+
+#ifndef __no_arch_spinlock_redefine
+/*
+ * Remapping spinlock architecture specific functions to the corresponding
+ * ticket spinlock functions.
+ */
+#define arch_spin_is_locked(l) ticket_spin_is_locked(l)
+#define arch_spin_is_contended(l) ticket_spin_is_contended(l)
+#define arch_spin_value_unlocked(l) ticket_spin_value_unlocked(l)
+#define arch_spin_lock(l) ticket_spin_lock(l)
+#define arch_spin_trylock(l) ticket_spin_trylock(l)
+#define arch_spin_unlock(l) ticket_spin_unlock(l)
+#endif
+
+#endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 709830274b75..1fff717cae51 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -58,6 +58,11 @@
* Defaults to flushing at tlb_end_vma() to reset the range; helps when
* there's large holes between the VMAs.
*
+ * - tlb_free_vmas()
+ *
+ * tlb_free_vmas() marks the start of unlinking of one or more vmas
+ * and freeing page-tables.
+ *
* - tlb_remove_table()
*
* tlb_remove_table() is the basic primitive to free page-table directories
@@ -67,22 +72,21 @@
*
* See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
*
- * - tlb_remove_page() / __tlb_remove_page()
- * - tlb_remove_page_size() / __tlb_remove_page_size()
- * - __tlb_remove_folio_pages()
+ * - tlb_remove_page() / tlb_remove_page_size()
+ * - __tlb_remove_folio_pages() / __tlb_remove_page_size()
+ * - __tlb_remove_folio_pages_size()
*
- * __tlb_remove_page_size() is the basic primitive that queues a page for
- * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
- * boolean indicating if the queue is (now) full and a call to
- * tlb_flush_mmu() is required.
+ * __tlb_remove_folio_pages_size() is the basic primitive that queues pages
+ * for freeing. It will return a boolean indicating if the queue is (now)
+ * full and a call to tlb_flush_mmu() is required.
*
* tlb_remove_page() and tlb_remove_page_size() imply the call to
* tlb_flush_mmu() when required and has no return value.
*
- * __tlb_remove_folio_pages() is similar to __tlb_remove_page(), however,
- * instead of removing a single page, remove the given number of consecutive
- * pages that are all part of the same (large) folio: just like calling
- * __tlb_remove_page() on each page individually.
+ * __tlb_remove_folio_pages() is similar to __tlb_remove_page_size(),
+ * however, instead of removing a single page, assume PAGE_SIZE and remove
+ * the given number of consecutive pages that are all part of the
+ * same (large) folio.
*
* - tlb_change_page_size()
*
@@ -153,8 +157,9 @@
*
* Useful if your architecture has non-page page directories.
*
- * When used, an architecture is expected to provide __tlb_remove_table()
- * which does the actual freeing of these pages.
+ * When used, an architecture is expected to provide __tlb_remove_table() or
+ * use the generic __tlb_remove_table(), which does the actual freeing of these
+ * pages.
*
* MMU_GATHER_RCU_TABLE_FREE
*
@@ -207,16 +212,31 @@ struct mmu_table_batch {
#define MAX_TABLE_BATCH \
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
+#ifndef __HAVE_ARCH_TLB_REMOVE_TABLE
+static inline void __tlb_remove_table(void *table)
+{
+ struct ptdesc *ptdesc = (struct ptdesc *)table;
+
+ pagetable_dtor_free(ptdesc);
+}
+#endif
+
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
+#else /* !CONFIG_MMU_GATHER_TABLE_FREE */
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page);
/*
* Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
* page directories and we can use the normal page batching to free them.
*/
-#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
+static inline void tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+ struct ptdesc *ptdesc = (struct ptdesc *)table;
+ pagetable_dtor(ptdesc);
+ tlb_remove_page(tlb, ptdesc_page(ptdesc));
+}
#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
@@ -449,7 +469,12 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
*/
tlb->vma_huge = is_vm_hugetlb_page(vma);
tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
- tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
+
+ /*
+ * Track if there's at least one VM_PFNMAP/VM_MIXEDMAP vma
+ * in the tracked range, see tlb_free_vmas().
+ */
+ tlb->vma_pfn |= !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
}
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
@@ -473,32 +498,16 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
tlb_flush_mmu(tlb);
}
-static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb,
- struct page *page, bool delay_rmap)
-{
- return __tlb_remove_page_size(tlb, page, delay_rmap, PAGE_SIZE);
-}
-
-/* tlb_remove_page
- * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
- * required.
- */
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
return tlb_remove_page_size(tlb, page, PAGE_SIZE);
}
-static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
+static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
{
tlb_remove_table(tlb, pt);
}
-/* Like tlb_remove_ptdesc, but for page-like page directories. */
-static inline void tlb_remove_page_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
-{
- tlb_remove_page(tlb, ptdesc_page(pt));
-}
-
static inline void tlb_change_page_size(struct mmu_gather *tlb,
unsigned int page_size)
{
@@ -549,22 +558,38 @@ static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
+ if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
+ return;
+
+ /*
+ * Do a TLB flush and reset the range at VMA boundaries; this avoids
+ * the ranges growing with the unused space between consecutive VMAs,
+ * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
+ * this.
+ */
+ tlb_flush_mmu_tlbonly(tlb);
+}
+
+static inline void tlb_free_vmas(struct mmu_gather *tlb)
+{
if (tlb->fullmm)
return;
/*
* VM_PFNMAP is more fragile because the core mm will not track the
- * page mapcount -- there might not be page-frames for these PFNs after
- * all. Force flush TLBs for such ranges to avoid munmap() vs
- * unmap_mapping_range() races.
+ * page mapcount -- there might not be page-frames for these PFNs
+ * after all.
+ *
+ * Specifically() there is a race between munmap() and
+ * unmap_mapping_range(), where munmap() will unlink the VMA, such
+ * that unmap_mapping_range() will no longer observe the VMA and
+ * no-op, without observing the TLBI, returning prematurely.
+ *
+ * So if we're about to unlink such a VMA, and we have pending
+ * TLBI for such a vma, flush things now.
*/
- if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
- /*
- * Do a TLB flush and reset the range at VMA boundaries; this avoids
- * the ranges growing with the unused space between consecutive VMAs.
- */
+ if (tlb->vma_pfn)
tlb_flush_mmu_tlbonly(tlb);
- }
}
/*
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index a5be9e61a2a2..b276f783494c 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -11,7 +11,7 @@
#include <asm-generic/access_ok.h>
#ifdef CONFIG_UACCESS_MEMCPY
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static __always_inline int
__get_user_fn(size_t size, const void __user *from, void *to)
diff --git a/include/asm-generic/unwind_user.h b/include/asm-generic/unwind_user.h
new file mode 100644
index 000000000000..b8882b909944
--- /dev/null
+++ b/include/asm-generic/unwind_user.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_UNWIND_USER_H
+#define _ASM_GENERIC_UNWIND_USER_H
+
+#endif /* _ASM_GENERIC_UNWIND_USER_H */
diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h
index c835607f78ae..5c6d9799f4e7 100644
--- a/include/asm-generic/vdso/vsyscall.h
+++ b/include/asm-generic/vdso/vsyscall.h
@@ -4,25 +4,31 @@
#ifndef __ASSEMBLY__
-#ifndef __arch_get_k_vdso_data
-static __always_inline struct vdso_data *__arch_get_k_vdso_data(void)
+#ifndef __arch_get_vdso_u_time_data
+static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
{
- return NULL;
+ return &vdso_u_time_data;
}
-#endif /* __arch_get_k_vdso_data */
+#endif
-#ifndef __arch_update_vsyscall
-static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata,
- struct timekeeper *tk)
+#ifndef __arch_get_vdso_u_rng_data
+static __always_inline const struct vdso_rng_data *__arch_get_vdso_u_rng_data(void)
{
+ return &vdso_u_rng_data;
}
-#endif /* __arch_update_vsyscall */
+#endif
-#ifndef __arch_sync_vdso_data
-static __always_inline void __arch_sync_vdso_data(struct vdso_data *vdata)
+#ifndef __arch_update_vdso_clock
+static __always_inline void __arch_update_vdso_clock(struct vdso_clock *vc)
{
}
-#endif /* __arch_sync_vdso_data */
+#endif /* __arch_update_vdso_clock */
+
+#ifndef __arch_sync_vdso_time_data
+static __always_inline void __arch_sync_vdso_time_data(struct vdso_time_data *vdata)
+{
+}
+#endif /* __arch_sync_vdso_time_data */
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-generic/vga.h b/include/asm-generic/vga.h
index adf91a783b5c..5dcaf4ae904a 100644
--- a/include/asm-generic/vga.h
+++ b/include/asm-generic/vga.h
@@ -1,25 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Access to VGA videoram
- *
- * (c) 1998 Martin Mares <mj@ucw.cz>
- */
#ifndef __ASM_GENERIC_VGA_H
#define __ASM_GENERIC_VGA_H
-
-/*
- * On most architectures that support VGA, we can just
- * recalculate addresses and then access the videoram
- * directly without any black magic.
- *
- * Everyone else needs to ioremap the address and use
- * proper I/O accesses.
- */
-#ifndef VGA_MAP_MEM
-#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x)
-#endif
-
-#define vga_readb(x) (*(x))
-#define vga_writeb(x, y) (*(y) = (x))
-
-#endif /* _ASM_GENERIC_VGA_H */
+#endif /* __ASM_GENERIC_VGA_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 5703526d6ebf..8ca130af301f 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -87,32 +87,56 @@
#define ALIGN_FUNCTION() . = ALIGN(CONFIG_FUNCTION_ALIGNMENT)
/*
- * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
- * generates .data.identifier sections, which need to be pulled in with
- * .data. We don't want to pull in .data..other sections, which Linux
- * has defined. Same for text and bss.
+ * Support -ffunction-sections by matching .text and .text.*,
+ * but exclude '.text..*', .text.startup[.*], and .text.exit[.*].
*
- * With LTO_CLANG, the linker also splits sections by default, so we need
- * these macros to combine the sections during the final link.
+ * .text.startup and .text.startup.* are matched later by INIT_TEXT, and
+ * .text.exit and .text.exit.* are matched later by EXIT_TEXT, so they must be
+ * explicitly excluded here.
*
- * RODATA_MAIN is not used because existing code already defines .rodata.x
- * sections to be brought in with rodata.
+ * Other .text.* sections that are typically grouped separately, such as
+ * .text.unlikely or .text.hot, must be matched explicitly before using
+ * TEXT_MAIN.
+ *
+ * NOTE: builds *with* and *without* -ffunction-sections are both supported by
+ * this single macro. Even with -ffunction-sections, there may be some objects
+ * NOT compiled with the flag due to the use of a specific Makefile override
+ * like cflags-y or AUTOFDO_PROFILE_foo.o. So this single catchall rule is
+ * needed to support mixed object builds.
+ *
+ * One implication is that functions named startup(), exit(), split(),
+ * unlikely(), hot(), and unknown() are not allowed in the kernel due to the
+ * ambiguity of their section names with -ffunction-sections. For example,
+ * .text.startup could be __attribute__((constructor)) code in a *non*
+ * ffunction-sections object, which should be placed in .init.text; or it could
+ * be an actual function named startup() in an ffunction-sections object, which
+ * should be placed in .text. The build will detect and complain about any such
+ * ambiguously named functions.
+ */
+#define TEXT_MAIN \
+ .text \
+ .text.[_0-9A-Za-df-rt-z]* \
+ .text.s[_0-9A-Za-su-z]* .text.s .text.s.* \
+ .text.st[_0-9A-Zb-z]* .text.st .text.st.* \
+ .text.sta[_0-9A-Za-qs-z]* .text.sta .text.sta.* \
+ .text.star[_0-9A-Za-su-z]* .text.star .text.star.* \
+ .text.start[_0-9A-Za-tv-z]* .text.start .text.start.* \
+ .text.startu[_0-9A-Za-oq-z]* .text.startu .text.startu.* \
+ .text.startup[_0-9A-Za-z]* \
+ .text.e[_0-9A-Za-wy-z]* .text.e .text.e.* \
+ .text.ex[_0-9A-Za-hj-z]* .text.ex .text.ex.* \
+ .text.exi[_0-9A-Za-su-z]* .text.exi .text.exi.* \
+ .text.exit[_0-9A-Za-z]*
+
+/*
+ * Support -fdata-sections by matching .data, .data.*, and others,
+ * but exclude '.data..*'.
*/
-#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG)
-#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
-#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data.rel.* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L*
-#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral*
+#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral*
#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
-#else
-#define TEXT_MAIN .text
-#define DATA_MAIN .data
-#define SDATA_MAIN .sdata
-#define RODATA_MAIN .rodata
-#define BSS_MAIN .bss
-#define SBSS_MAIN .sbss
-#endif
/*
* GCC 4.5 and later have a 32 bytes section alignment for structures.
@@ -133,6 +157,7 @@
*(__dl_sched_class) \
*(__rt_sched_class) \
*(__fair_sched_class) \
+ *(__ext_sched_class) \
*(__idle_sched_class) \
__sched_class_lowest = .;
@@ -141,14 +166,6 @@
* often happens at runtime)
*/
-#if defined(CONFIG_MEMORY_HOTPLUG)
-#define MEM_KEEP(sec) *(.mem##sec)
-#define MEM_DISCARD(sec)
-#else
-#define MEM_KEEP(sec)
-#define MEM_DISCARD(sec) *(.mem##sec)
-#endif
-
#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
#define KEEP_PATCHABLE KEEP(*(__patchable_function_entries))
#define PATCHABLE_DISCARDS
@@ -157,7 +174,7 @@
#define PATCHABLE_DISCARDS *(__patchable_function_entries)
#endif
-#ifndef CONFIG_ARCH_SUPPORTS_CFI_CLANG
+#ifndef CONFIG_ARCH_SUPPORTS_CFI
/*
* Simply points to ftrace_stub, but with the proper protocol.
* Defined by the linker script in linux/vmlinux.lds.h
@@ -167,7 +184,7 @@
#define FTRACE_STUB_HACK
#endif
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#ifdef CONFIG_DYNAMIC_FTRACE
/*
* The ftrace call sites are logged to a section whose name depends on the
* compiler option used. A given kernel image will only use one, AKA
@@ -357,11 +374,11 @@
*(.data..decrypted) \
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
- MEM_KEEP(init.data*) \
- *(.data.unlikely) \
+ *(.data..unlikely) \
__start_once = .; \
- *(.data.once) \
+ *(.data..once) \
__end_once = .; \
+ *(.data..do_once) \
STRUCT_ALIGN(); \
*(__tracepoints) \
/* implement dynamic printk debug */ \
@@ -386,6 +403,11 @@
. = ALIGN(PAGE_SIZE); \
__nosave_end = .;
+#define CACHE_HOT_DATA(align) \
+ . = ALIGN(align); \
+ *(SORT_BY_ALIGNMENT(.data..hot.*)) \
+ . = ALIGN(align);
+
#define PAGE_ALIGNED_DATA(page_align) \
. = ALIGN(page_align); \
*(.data..page_aligned) \
@@ -405,7 +427,6 @@
__start_init_stack = .; \
init_thread_union = .; \
init_stack = .; \
- KEEP(*(.data..init_task)) \
KEEP(*(.data..init_thread_info)) \
. = __start_init_stack + THREAD_SIZE; \
__end_init_stack = .;
@@ -452,32 +473,13 @@
#endif
/*
- * Some symbol definitions will not exist yet during the first pass of the
- * link, but are guaranteed to exist in the final link. Provide preliminary
- * definitions that will be superseded in the final link to avoid having to
- * rely on weak external linkage, which requires a GOT when used in position
- * independent code.
- */
-#define PRELIMINARY_SYMBOL_DEFINITIONS \
- PROVIDE(kallsyms_addresses = .); \
- PROVIDE(kallsyms_offsets = .); \
- PROVIDE(kallsyms_names = .); \
- PROVIDE(kallsyms_num_syms = .); \
- PROVIDE(kallsyms_relative_base = .); \
- PROVIDE(kallsyms_token_table = .); \
- PROVIDE(kallsyms_token_index = .); \
- PROVIDE(kallsyms_markers = .); \
- PROVIDE(kallsyms_seqs_of_names = .);
-
-/*
* Read only Data
*/
#define RO_DATA(align) \
. = ALIGN((align)); \
- PRELIMINARY_SYMBOL_DEFINITIONS \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
__start_rodata = .; \
- *(.rodata) *(.rodata.*) \
+ *(.rodata) *(.rodata.*) *(.data.rel.ro*) \
SCHED_DATA \
RO_AFTER_INIT_DATA /* Read only after init */ \
. = ALIGN(8); \
@@ -542,7 +544,6 @@
/* __*init sections */ \
__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
*(.ref.rodata) \
- MEM_KEEP(init.rodata) \
} \
\
/* Built-in module parameters. */ \
@@ -577,25 +578,43 @@
__cpuidle_text_end = .; \
__noinstr_text_end = .;
+#define TEXT_SPLIT \
+ __split_text_start = .; \
+ *(.text.split .text.split.[0-9a-zA-Z_]*) \
+ __split_text_end = .;
+
+#define TEXT_UNLIKELY \
+ __unlikely_text_start = .; \
+ *(.text.unlikely .text.unlikely.*) \
+ __unlikely_text_end = .;
+
+#define TEXT_HOT \
+ __hot_text_start = .; \
+ *(.text.hot .text.hot.*) \
+ __hot_text_end = .;
+
/*
* .text section. Map to function alignment to avoid address changes
* during second ld run in second ld pass when generating System.map
*
- * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
- * code elimination is enabled, so these sections should be converted
- * to use ".." first.
+ * TEXT_MAIN here will match symbols with a fixed pattern (for example,
+ * .text.hot or .text.unlikely). Match those before TEXT_MAIN to ensure
+ * they get grouped together.
+ *
+ * Also placing .text.hot section at the beginning of a page, this
+ * would help the TLB performance.
*/
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
- *(.text.hot .text.hot.*) \
- *(TEXT_MAIN .text.fixup) \
- *(.text.unlikely .text.unlikely.*) \
+ *(.text.asan.* .text.tsan.*) \
*(.text.unknown .text.unknown.*) \
+ TEXT_SPLIT \
+ TEXT_UNLIKELY \
+ . = ALIGN(PAGE_SIZE); \
+ TEXT_HOT \
+ *(TEXT_MAIN .text.fixup) \
NOINSTR_TEXT \
- *(.ref.text) \
- *(.text.asan.* .text.tsan.*) \
- MEM_KEEP(init.text*) \
-
+ *(.ref.text)
/* sched.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
@@ -665,10 +684,11 @@
*/
#ifdef CONFIG_DEBUG_INFO_BTF
#define BTF \
+ . = ALIGN(PAGE_SIZE); \
.BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
BOUNDED_SECTION_BY(.BTF, _BTF) \
} \
- . = ALIGN(4); \
+ . = ALIGN(PAGE_SIZE); \
.BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \
*(.BTF_ids) \
}
@@ -701,7 +721,6 @@
#define INIT_DATA \
KEEP(*(SORT(___kentry+*))) \
*(.init.data .init.data.*) \
- MEM_DISCARD(init.data*) \
KERNEL_CTORS() \
MCOUNT_REC() \
*(.init.rodata .init.rodata.*) \
@@ -709,7 +728,6 @@
TRACE_SYSCALLS() \
KPROBE_BLACKLIST() \
ERROR_INJECT_WHITELIST() \
- MEM_DISCARD(init.rodata) \
CLK_OF_TABLES() \
RESERVEDMEM_OF_TABLES() \
TIMER_OF_TABLES() \
@@ -727,17 +745,16 @@
#define INIT_TEXT \
*(.init.text .init.text.*) \
- *(.text.startup) \
- MEM_DISCARD(init.text*)
+ *(.text.startup .text.startup.*)
#define EXIT_DATA \
*(.exit.data .exit.data.*) \
*(.fini_array .fini_array.*) \
- *(.dtors .dtors.*) \
+ *(.dtors .dtors.*)
#define EXIT_TEXT \
*(.exit.text) \
- *(.text.exit) \
+ *(.text.exit .text.exit.*)
#define EXIT_CALL \
*(.exitcall.exit)
@@ -831,6 +848,7 @@
/* Required sections not related to debugging. */
#define ELF_DETAILS \
+ .modinfo : { *(.modinfo) . = ALIGN(8); } \
.comment 0 : { *(.comment) } \
.symtab 0 : { *(.symtab) } \
.strtab 0 : { *(.strtab) } \
@@ -944,6 +962,18 @@
#define CON_INITCALL \
BOUNDED_SECTION_POST_LABEL(.con_initcall.init, __con_initcall, _start, _end)
+#define NAMED_SECTION(name) \
+ . = ALIGN(8); \
+ name : AT(ADDR(name) - LOAD_OFFSET) \
+ { BOUNDED_SECTION_PRE_LABEL(name, name, __start_, __stop_) }
+
+#define RUNTIME_CONST(t,x) NAMED_SECTION(runtime_##t##_##x)
+
+#define RUNTIME_CONST_VARIABLES \
+ RUNTIME_CONST(shift, d_hash_shift) \
+ RUNTIME_CONST(ptr, dentry_hashtable) \
+ RUNTIME_CONST(ptr, __dentry_cache)
+
/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
#define KUNIT_TABLE() \
. = ALIGN(8); \
@@ -1032,9 +1062,10 @@
*(.discard) \
*(.discard.*) \
*(.export_symbol) \
- *(.modinfo) \
+ *(.no_trim_symbol) \
/* ld.bfd warns about .gnu.version* even when not emitted */ \
*(.gnu.version*) \
+ *(__tracepoint_check) \
#define DISCARDS \
/DISCARD/ : { \
@@ -1055,10 +1086,13 @@
*/
#define PERCPU_INPUT(cacheline) \
__per_cpu_start = .; \
- *(.data..percpu..first) \
. = ALIGN(PAGE_SIZE); \
*(.data..percpu..page_aligned) \
. = ALIGN(cacheline); \
+ __per_cpu_hot_start = .; \
+ *(SORT_BY_ALIGNMENT(.data..percpu..hot.*)) \
+ __per_cpu_hot_end = .; \
+ . = ALIGN(cacheline); \
*(.data..percpu..read_mostly) \
. = ALIGN(cacheline); \
*(.data..percpu) \
@@ -1067,52 +1101,17 @@
__per_cpu_end = .;
/**
- * PERCPU_VADDR - define output section for percpu area
+ * PERCPU_SECTION - define output section for percpu area
* @cacheline: cacheline size
- * @vaddr: explicit base address (optional)
- * @phdr: destination PHDR (optional)
*
* Macro which expands to output section for percpu area.
*
* @cacheline is used to align subsections to avoid false cacheline
* sharing between subsections for different purposes.
- *
- * If @vaddr is not blank, it specifies explicit base address and all
- * percpu symbols will be offset from the given address. If blank,
- * @vaddr always equals @laddr + LOAD_OFFSET.
- *
- * @phdr defines the output PHDR to use if not blank. Be warned that
- * output PHDR is sticky. If @phdr is specified, the next output
- * section in the linker script will go there too. @phdr should have
- * a leading colon.
- *
- * Note that this macros defines __per_cpu_load as an absolute symbol.
- * If there is no need to put the percpu section at a predetermined
- * address, use PERCPU_SECTION.
- */
-#define PERCPU_VADDR(cacheline, vaddr, phdr) \
- __per_cpu_load = .; \
- .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \
- PERCPU_INPUT(cacheline) \
- } phdr \
- . = __per_cpu_load + SIZEOF(.data..percpu);
-
-/**
- * PERCPU_SECTION - define output section for percpu area, simple version
- * @cacheline: cacheline size
- *
- * Align to PAGE_SIZE and outputs output section for percpu area. This
- * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
- * __per_cpu_start will be identical.
- *
- * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
- * except that __per_cpu_load is defined as a relative symbol against
- * .data..percpu which is required for relocatable x86_32 configuration.
*/
#define PERCPU_SECTION(cacheline) \
. = ALIGN(PAGE_SIZE); \
.data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
- __per_cpu_load = .; \
PERCPU_INPUT(cacheline) \
}
@@ -1141,6 +1140,7 @@
INIT_TASK_DATA(inittask) \
NOSAVE_DATA \
PAGE_ALIGNED_DATA(pagealigned) \
+ CACHE_HOT_DATA(cacheline) \
CACHELINE_ALIGNED_DATA(cacheline) \
READ_MOSTLY_DATA(cacheline) \
DATA_DATA \
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index cbbc9a6dc571..2eda895f19f5 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -9,9 +9,6 @@
#include <linux/timecounter.h>
#include <linux/types.h>
-#define ARCH_TIMER_TYPE_CP15 BIT(0)
-#define ARCH_TIMER_TYPE_MEM BIT(1)
-
#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
@@ -22,6 +19,12 @@
#define CNTHCTL_EVNTDIR (1 << 3)
#define CNTHCTL_EVNTI (0xF << 4)
#define CNTHCTL_ECV (1 << 12)
+#define CNTHCTL_EL1TVT (1 << 13)
+#define CNTHCTL_EL1TVCT (1 << 14)
+#define CNTHCTL_EL1NVPCT (1 << 15)
+#define CNTHCTL_EL1NVVCT (1 << 16)
+#define CNTHCTL_CNTVMASK (1 << 18)
+#define CNTHCTL_CNTPMASK (1 << 19)
enum arch_timer_reg {
ARCH_TIMER_REG_CTRL,
@@ -45,8 +48,6 @@ enum arch_timer_spi_nr {
#define ARCH_TIMER_PHYS_ACCESS 0
#define ARCH_TIMER_VIRT_ACCESS 1
-#define ARCH_TIMER_MEM_PHYS_ACCESS 2
-#define ARCH_TIMER_MEM_VIRT_ACCESS 3
#define ARCH_TIMER_MEM_MAX_FRAMES 8
diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h
index 6cdc873ac907..d48dd4176fd3 100644
--- a/include/clocksource/hyperv_timer.h
+++ b/include/clocksource/hyperv_timer.h
@@ -15,7 +15,7 @@
#include <linux/clocksource.h>
#include <linux/math64.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
#define HV_MIN_DELTA_TICKS 1
@@ -38,6 +38,8 @@ extern void hv_remap_tsc_clocksource(void);
extern unsigned long hv_get_tsc_pfn(void);
extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
+extern void hv_adj_sched_clock_offset(u64 offset);
+
static __always_inline bool
hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
u64 *cur_tsc, u64 *time)
diff --git a/include/clocksource/timer-xilinx.h b/include/clocksource/timer-xilinx.h
index c0f56fe6d22a..d116f18de899 100644
--- a/include/clocksource/timer-xilinx.h
+++ b/include/clocksource/timer-xilinx.h
@@ -41,7 +41,7 @@ struct regmap;
struct xilinx_timer_priv {
struct regmap *map;
struct clk *clk;
- u32 max;
+ u64 max;
};
/**
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index 54937b615239..9eacb9fa375d 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -10,30 +10,93 @@
#define _CRYPTO_ACOMP_H
#include <linux/atomic.h>
+#include <linux/args.h>
+#include <linux/compiler_types.h>
#include <linux/container_of.h>
#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+/* Set this bit if source is virtual address instead of SG list. */
+#define CRYPTO_ACOMP_REQ_SRC_VIRT 0x00000002
+
+/* Set this bit for if virtual address source cannot be used for DMA. */
+#define CRYPTO_ACOMP_REQ_SRC_NONDMA 0x00000004
+
+/* Set this bit if destination is virtual address instead of SG list. */
+#define CRYPTO_ACOMP_REQ_DST_VIRT 0x00000008
+
+/* Set this bit for if virtual address destination cannot be used for DMA. */
+#define CRYPTO_ACOMP_REQ_DST_NONDMA 0x00000010
+
+/* Private flags that should not be touched by the user. */
+#define CRYPTO_ACOMP_REQ_PRIVATE \
+ (CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | \
+ CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA)
-#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
#define CRYPTO_ACOMP_DST_MAX 131072
+#define MAX_SYNC_COMP_REQSIZE 0
+
+#define ACOMP_REQUEST_ON_STACK(name, tfm) \
+ char __##name##_req[sizeof(struct acomp_req) + \
+ MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct acomp_req *name = acomp_request_on_stack_init( \
+ __##name##_req, (tfm))
+
+#define ACOMP_REQUEST_CLONE(name, gfp) \
+ acomp_request_clone(name, sizeof(__##name##_req), gfp)
+
+struct acomp_req;
+struct folio;
+
+struct acomp_req_chain {
+ crypto_completion_t compl;
+ void *data;
+ struct scatterlist ssg;
+ struct scatterlist dsg;
+ union {
+ const u8 *src;
+ struct folio *sfolio;
+ };
+ union {
+ u8 *dst;
+ struct folio *dfolio;
+ };
+ u32 flags;
+};
+
/**
* struct acomp_req - asynchronous (de)compression request
*
* @base: Common attributes for asynchronous crypto requests
- * @src: Source Data
- * @dst: Destination data
+ * @src: Source scatterlist
+ * @dst: Destination scatterlist
+ * @svirt: Source virtual address
+ * @dvirt: Destination virtual address
* @slen: Size of the input buffer
* @dlen: Size of the output buffer and number of bytes produced
- * @flags: Internal flags
+ * @chain: Private API code data, do not use
* @__ctx: Start of private context data
*/
struct acomp_req {
struct crypto_async_request base;
- struct scatterlist *src;
- struct scatterlist *dst;
+ union {
+ struct scatterlist *src;
+ const u8 *svirt;
+ };
+ union {
+ struct scatterlist *dst;
+ u8 *dvirt;
+ };
unsigned int slen;
unsigned int dlen;
- u32 flags;
+
+ struct acomp_req_chain chain;
+
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
@@ -43,15 +106,13 @@ struct acomp_req {
*
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
- * @dst_free: Frees destination buffer if allocated inside the
- * algorithm
* @reqsize: Context size for (de)compression requests
+ * @fb: Synchronous fallback tfm
* @base: Common crypto API algorithm data structure
*/
struct crypto_acomp {
int (*compress)(struct acomp_req *req);
int (*decompress)(struct acomp_req *req);
- void (*dst_free)(struct scatterlist *dst);
unsigned int reqsize;
struct crypto_tfm base;
};
@@ -133,7 +194,7 @@ static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
static inline void acomp_request_set_tfm(struct acomp_req *req,
struct crypto_acomp *tfm)
{
- req->base.tfm = crypto_acomp_tfm(tfm);
+ crypto_request_set_tfm(&req->base, crypto_acomp_tfm(tfm));
}
static inline bool acomp_is_async(struct crypto_acomp *tfm)
@@ -168,14 +229,72 @@ static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
return crypto_has_alg(alg_name, type, mask);
}
+static inline const char *crypto_acomp_alg_name(struct crypto_acomp *tfm)
+{
+ return crypto_tfm_alg_name(crypto_acomp_tfm(tfm));
+}
+
+static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm)
+{
+ return crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
+}
+
/**
* acomp_request_alloc() -- allocates asynchronous (de)compression request
*
* @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ * @gfp: gfp to pass to kzalloc (defaults to GFP_KERNEL)
+ *
+ * Return: allocated handle in case of success or NULL in case of an error
+ */
+static inline struct acomp_req *acomp_request_alloc_extra_noprof(
+ struct crypto_acomp *tfm, size_t extra, gfp_t gfp)
+{
+ struct acomp_req *req;
+ size_t len;
+
+ len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN);
+ if (check_add_overflow(len, extra, &len))
+ return NULL;
+
+ req = kzalloc_noprof(len, gfp);
+ if (likely(req))
+ acomp_request_set_tfm(req, tfm);
+ return req;
+}
+#define acomp_request_alloc_noprof(tfm, ...) \
+ CONCATENATE(acomp_request_alloc_noprof_, COUNT_ARGS(__VA_ARGS__))( \
+ tfm, ##__VA_ARGS__)
+#define acomp_request_alloc_noprof_0(tfm) \
+ acomp_request_alloc_noprof_1(tfm, GFP_KERNEL)
+#define acomp_request_alloc_noprof_1(tfm, gfp) \
+ acomp_request_alloc_extra_noprof(tfm, 0, gfp)
+#define acomp_request_alloc(...) alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__))
+
+/**
+ * acomp_request_alloc_extra() -- allocate acomp request with extra memory
+ *
+ * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ * @extra: amount of extra memory
+ * @gfp: gfp to pass to kzalloc
*
* Return: allocated handle in case of success or NULL in case of an error
*/
-struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
+#define acomp_request_alloc_extra(...) alloc_hooks(acomp_request_alloc_extra_noprof(__VA_ARGS__))
+
+static inline void *acomp_request_extra(struct acomp_req *req)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ size_t len;
+
+ len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN);
+ return (void *)((char *)req + len);
+}
+
+static inline bool acomp_req_on_stack(struct acomp_req *req)
+{
+ return crypto_req_on_stack(&req->base);
+}
/**
* acomp_request_free() -- zeroize and free asynchronous (de)compression
@@ -184,7 +303,12 @@ struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
*
* @req: request to free
*/
-void acomp_request_free(struct acomp_req *req);
+static inline void acomp_request_free(struct acomp_req *req)
+{
+ if (!req || acomp_req_on_stack(req))
+ return;
+ kfree_sensitive(req);
+}
/**
* acomp_request_set_callback() -- Sets an asynchronous callback
@@ -202,10 +326,9 @@ static inline void acomp_request_set_callback(struct acomp_req *req,
crypto_completion_t cmpl,
void *data)
{
- req->base.complete = cmpl;
- req->base.data = data;
- req->base.flags &= CRYPTO_ACOMP_ALLOC_OUTPUT;
- req->base.flags |= flgs & ~CRYPTO_ACOMP_ALLOC_OUTPUT;
+ flgs &= ~CRYPTO_ACOMP_REQ_PRIVATE;
+ flgs |= req->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
+ crypto_request_set_callback(&req->base, flgs, cmpl, data);
}
/**
@@ -232,9 +355,169 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->slen = slen;
req->dlen = dlen;
- req->flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT;
- if (!req->dst)
- req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
+ req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT |
+ CRYPTO_ACOMP_REQ_SRC_NONDMA |
+ CRYPTO_ACOMP_REQ_DST_VIRT |
+ CRYPTO_ACOMP_REQ_DST_NONDMA);
+}
+
+/**
+ * acomp_request_set_src_sg() -- Sets source scatterlist
+ *
+ * Sets source scatterlist required by an acomp operation.
+ *
+ * @req: asynchronous compress request
+ * @src: pointer to input buffer scatterlist
+ * @slen: size of the input buffer
+ */
+static inline void acomp_request_set_src_sg(struct acomp_req *req,
+ struct scatterlist *src,
+ unsigned int slen)
+{
+ req->src = src;
+ req->slen = slen;
+
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT;
+}
+
+/**
+ * acomp_request_set_src_dma() -- Sets DMA source virtual address
+ *
+ * Sets source virtual address required by an acomp operation.
+ * The address must be usable for DMA.
+ *
+ * @req: asynchronous compress request
+ * @src: virtual address pointer to input buffer
+ * @slen: size of the input buffer
+ */
+static inline void acomp_request_set_src_dma(struct acomp_req *req,
+ const u8 *src, unsigned int slen)
+{
+ req->svirt = src;
+ req->slen = slen;
+
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
+ req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
+}
+
+/**
+ * acomp_request_set_src_nondma() -- Sets non-DMA source virtual address
+ *
+ * Sets source virtual address required by an acomp operation.
+ * The address can not be used for DMA.
+ *
+ * @req: asynchronous compress request
+ * @src: virtual address pointer to input buffer
+ * @slen: size of the input buffer
+ */
+static inline void acomp_request_set_src_nondma(struct acomp_req *req,
+ const u8 *src,
+ unsigned int slen)
+{
+ req->svirt = src;
+ req->slen = slen;
+
+ req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA;
+ req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
+}
+
+/**
+ * acomp_request_set_src_folio() -- Sets source folio
+ *
+ * Sets source folio required by an acomp operation.
+ *
+ * @req: asynchronous compress request
+ * @folio: pointer to input folio
+ * @off: input folio offset
+ * @len: size of the input buffer
+ */
+static inline void acomp_request_set_src_folio(struct acomp_req *req,
+ struct folio *folio, size_t off,
+ unsigned int len)
+{
+ sg_init_table(&req->chain.ssg, 1);
+ sg_set_folio(&req->chain.ssg, folio, len, off);
+ acomp_request_set_src_sg(req, &req->chain.ssg, len);
+}
+
+/**
+ * acomp_request_set_dst_sg() -- Sets destination scatterlist
+ *
+ * Sets destination scatterlist required by an acomp operation.
+ *
+ * @req: asynchronous compress request
+ * @dst: pointer to output buffer scatterlist
+ * @dlen: size of the output buffer
+ */
+static inline void acomp_request_set_dst_sg(struct acomp_req *req,
+ struct scatterlist *dst,
+ unsigned int dlen)
+{
+ req->dst = dst;
+ req->dlen = dlen;
+
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT;
+}
+
+/**
+ * acomp_request_set_dst_dma() -- Sets DMA destination virtual address
+ *
+ * Sets destination virtual address required by an acomp operation.
+ * The address must be usable for DMA.
+ *
+ * @req: asynchronous compress request
+ * @dst: virtual address pointer to output buffer
+ * @dlen: size of the output buffer
+ */
+static inline void acomp_request_set_dst_dma(struct acomp_req *req,
+ u8 *dst, unsigned int dlen)
+{
+ req->dvirt = dst;
+ req->dlen = dlen;
+
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
+ req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
+}
+
+/**
+ * acomp_request_set_dst_nondma() -- Sets non-DMA destination virtual address
+ *
+ * Sets destination virtual address required by an acomp operation.
+ * The address can not be used for DMA.
+ *
+ * @req: asynchronous compress request
+ * @dst: virtual address pointer to output buffer
+ * @dlen: size of the output buffer
+ */
+static inline void acomp_request_set_dst_nondma(struct acomp_req *req,
+ u8 *dst, unsigned int dlen)
+{
+ req->dvirt = dst;
+ req->dlen = dlen;
+
+ req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA;
+ req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
+}
+
+/**
+ * acomp_request_set_dst_folio() -- Sets destination folio
+ *
+ * Sets destination folio required by an acomp operation.
+ *
+ * @req: asynchronous compress request
+ * @folio: pointer to input folio
+ * @off: input folio offset
+ * @len: size of the input buffer
+ */
+static inline void acomp_request_set_dst_folio(struct acomp_req *req,
+ struct folio *folio, size_t off,
+ unsigned int len)
+{
+ sg_init_table(&req->chain.dsg, 1);
+ sg_set_folio(&req->chain.dsg, folio, len, off);
+ acomp_request_set_dst_sg(req, &req->chain.dsg, len);
}
/**
@@ -246,10 +529,7 @@ static inline void acomp_request_set_params(struct acomp_req *req,
*
* Return: zero on success; error code in case of error
*/
-static inline int crypto_acomp_compress(struct acomp_req *req)
-{
- return crypto_acomp_reqtfm(req)->compress(req);
-}
+int crypto_acomp_compress(struct acomp_req *req);
/**
* crypto_acomp_decompress() -- Invoke asynchronous decompress operation
@@ -260,9 +540,18 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
*
* Return: zero on success; error code in case of error
*/
-static inline int crypto_acomp_decompress(struct acomp_req *req)
+int crypto_acomp_decompress(struct acomp_req *req);
+
+static inline struct acomp_req *acomp_request_on_stack_init(
+ char *buf, struct crypto_acomp *tfm)
{
- return crypto_acomp_reqtfm(req)->decompress(req);
+ struct acomp_req *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base, crypto_acomp_tfm(tfm));
+ return req;
}
+struct acomp_req *acomp_request_clone(struct acomp_req *req,
+ size_t total, gfp_t gfp);
+
#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 0e8a41638678..8e66a1fa9c78 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -159,6 +159,21 @@ struct crypto_aead {
struct crypto_tfm base;
};
+struct crypto_sync_aead {
+ struct crypto_aead base;
+};
+
+#define MAX_SYNC_AEAD_REQSIZE 384
+
+#define SYNC_AEAD_REQUEST_ON_STACK(name, _tfm) \
+ char __##name##_desc[sizeof(struct aead_request) + \
+ MAX_SYNC_AEAD_REQSIZE \
+ ] CRYPTO_MINALIGN_ATTR; \
+ struct aead_request *name = \
+ (((struct aead_request *)__##name##_desc)->base.tfm = \
+ crypto_sync_aead_tfm((_tfm)), \
+ (void *)__##name##_desc)
+
static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_aead, base);
@@ -180,11 +195,18 @@ static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
*/
struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
+struct crypto_sync_aead *crypto_alloc_sync_aead(const char *alg_name, u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
{
return &tfm->base;
}
+static inline struct crypto_tfm *crypto_sync_aead_tfm(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_tfm(&tfm->base);
+}
+
/**
* crypto_free_aead() - zeroize and free aead handle
* @tfm: cipher handle to be freed
@@ -196,6 +218,11 @@ static inline void crypto_free_aead(struct crypto_aead *tfm)
crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
}
+static inline void crypto_free_sync_aead(struct crypto_sync_aead *tfm)
+{
+ crypto_free_aead(&tfm->base);
+}
+
/**
* crypto_has_aead() - Search for the availability of an aead.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
@@ -238,6 +265,11 @@ static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
return crypto_aead_alg_ivsize(crypto_aead_alg(tfm));
}
+static inline unsigned int crypto_sync_aead_ivsize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_ivsize(&tfm->base);
+}
+
/**
* crypto_aead_authsize() - obtain maximum authentication data size
* @tfm: cipher handle
@@ -255,6 +287,11 @@ static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
return tfm->authsize;
}
+static inline unsigned int crypto_sync_aead_authsize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_authsize(&tfm->base);
+}
+
static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
{
return alg->maxauthsize;
@@ -265,6 +302,11 @@ static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
}
+static inline unsigned int crypto_sync_aead_maxauthsize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_maxauthsize(&tfm->base);
+}
+
/**
* crypto_aead_blocksize() - obtain block size of cipher
* @tfm: cipher handle
@@ -280,6 +322,11 @@ static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
}
+static inline unsigned int crypto_sync_aead_blocksize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_blocksize(&tfm->base);
+}
+
static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
{
return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
@@ -300,6 +347,21 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
}
+static inline u32 crypto_sync_aead_get_flags(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_get_flags(&tfm->base);
+}
+
+static inline void crypto_sync_aead_set_flags(struct crypto_sync_aead *tfm, u32 flags)
+{
+ crypto_aead_set_flags(&tfm->base, flags);
+}
+
+static inline void crypto_sync_aead_clear_flags(struct crypto_sync_aead *tfm, u32 flags)
+{
+ crypto_aead_clear_flags(&tfm->base, flags);
+}
+
/**
* crypto_aead_setkey() - set key for cipher
* @tfm: cipher handle
@@ -319,6 +381,12 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
int crypto_aead_setkey(struct crypto_aead *tfm,
const u8 *key, unsigned int keylen);
+static inline int crypto_sync_aead_setkey(struct crypto_sync_aead *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto_aead_setkey(&tfm->base, key, keylen);
+}
+
/**
* crypto_aead_setauthsize() - set authentication data size
* @tfm: cipher handle
@@ -331,11 +399,24 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
*/
int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
+static inline int crypto_sync_aead_setauthsize(struct crypto_sync_aead *tfm,
+ unsigned int authsize)
+{
+ return crypto_aead_setauthsize(&tfm->base, authsize);
+}
+
static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
{
return __crypto_aead_cast(req->base.tfm);
}
+static inline struct crypto_sync_aead *crypto_sync_aead_reqtfm(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ return container_of(tfm, struct crypto_sync_aead, base);
+}
+
/**
* crypto_aead_encrypt() - encrypt plaintext
* @req: reference to the aead_request handle that holds all information
@@ -417,6 +498,12 @@ static inline void aead_request_set_tfm(struct aead_request *req,
req->base.tfm = crypto_aead_tfm(tfm);
}
+static inline void aead_request_set_sync_tfm(struct aead_request *req,
+ struct crypto_sync_aead *tfm)
+{
+ aead_request_set_tfm(req, &tfm->base);
+}
+
/**
* aead_request_alloc() - allocate request data structure
* @tfm: cipher handle to be registered with the request
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index 18a10cad07aa..cdf7da74bf2f 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -12,24 +12,19 @@
#include <linux/crypto.h>
/**
- * struct akcipher_request - public key request
+ * struct akcipher_request - public key cipher request
*
* @base: Common attributes for async crypto requests
* @src: Source data
- * For verify op this is signature + digest, in that case
- * total size of @src is @src_len + @dst_len.
- * @dst: Destination data (Should be NULL for verify op)
+ * @dst: Destination data
* @src_len: Size of the input buffer
- * For verify op it's size of signature part of @src, this part
- * is supposed to be operated by cipher.
- * @dst_len: Size of @dst buffer (for all ops except verify).
+ * @dst_len: Size of @dst buffer
* It needs to be at least as big as the expected result
* depending on the operation.
* After operation it will be updated with the actual size of the
* result.
* In case of error where the dst sgl size was insufficient,
* it will be updated to the size required for the operation.
- * For verify op this is size of digest part in @src.
* @__ctx: Start of private context data
*/
struct akcipher_request {
@@ -55,15 +50,8 @@ struct crypto_akcipher {
};
/**
- * struct akcipher_alg - generic public key algorithm
+ * struct akcipher_alg - generic public key cipher algorithm
*
- * @sign: Function performs a sign operation as defined by public key
- * algorithm. In case of error, where the dst_len was insufficient,
- * the req->dst_len will be updated to the size required for the
- * operation
- * @verify: Function performs a complete verify operation as defined by
- * public key algorithm, returning verification status. Requires
- * digest value as input parameter.
* @encrypt: Function performs an encrypt operation as defined by public key
* algorithm. In case of error, where the dst_len was insufficient,
* the req->dst_len will be updated to the size required for the
@@ -94,8 +82,6 @@ struct crypto_akcipher {
* @base: Common crypto API algorithm data structure
*/
struct akcipher_alg {
- int (*sign)(struct akcipher_request *req);
- int (*verify)(struct akcipher_request *req);
int (*encrypt)(struct akcipher_request *req);
int (*decrypt)(struct akcipher_request *req);
int (*set_pub_key)(struct crypto_akcipher *tfm, const void *key,
@@ -110,9 +96,9 @@ struct akcipher_alg {
};
/**
- * DOC: Generic Public Key API
+ * DOC: Generic Public Key Cipher API
*
- * The Public Key API is used with the algorithms of type
+ * The Public Key Cipher API is used with the algorithms of type
* CRYPTO_ALG_TYPE_AKCIPHER (listed as type "akcipher" in /proc/crypto)
*/
@@ -243,10 +229,9 @@ static inline void akcipher_request_set_callback(struct akcipher_request *req,
*
* @req: public key request
* @src: ptr to input scatter list
- * @dst: ptr to output scatter list or NULL for verify op
+ * @dst: ptr to output scatter list
* @src_len: size of the src input scatter list to be processed
- * @dst_len: size of the dst output scatter list or size of signature
- * portion in @src for verify op
+ * @dst_len: size of the dst output scatter list
*/
static inline void akcipher_request_set_crypt(struct akcipher_request *req,
struct scatterlist *src,
@@ -348,44 +333,6 @@ int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm,
void *dst, unsigned int dlen);
/**
- * crypto_akcipher_sign() - Invoke public key sign operation
- *
- * Function invokes the specific public key sign operation for a given
- * public key algorithm
- *
- * @req: asymmetric key request
- *
- * Return: zero on success; error code in case of error
- */
-static inline int crypto_akcipher_sign(struct akcipher_request *req)
-{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
- return crypto_akcipher_alg(tfm)->sign(req);
-}
-
-/**
- * crypto_akcipher_verify() - Invoke public key signature verification
- *
- * Function invokes the specific public key signature verification operation
- * for a given public key algorithm.
- *
- * @req: asymmetric key request
- *
- * Note: req->dst should be NULL, req->src should point to SG of size
- * (req->src_size + req->dst_size), containing signature (of req->src_size
- * length) with appended digest (of req->dst_size length).
- *
- * Return: zero on verification success; error code in case of error.
- */
-static inline int crypto_akcipher_verify(struct akcipher_request *req)
-{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
- return crypto_akcipher_alg(tfm)->verify(req);
-}
-
-/**
* crypto_akcipher_set_pub_key() - Invoke set public key operation
*
* Function invokes the algorithm specific set key function, which knows
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 156de41ca760..05deea9dac5e 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -11,6 +11,7 @@
#include <linux/align.h>
#include <linux/cache.h>
#include <linux/crypto.h>
+#include <linux/list.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -42,8 +43,8 @@
* alias.
*/
#define MODULE_ALIAS_CRYPTO(name) \
- __MODULE_INFO(alias, alias_userspace, name); \
- __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+ MODULE_INFO(alias, name); \
+ MODULE_INFO(alias, "crypto-" name)
struct crypto_aead;
struct crypto_instance;
@@ -53,20 +54,7 @@ struct rtattr;
struct scatterlist;
struct seq_file;
struct sk_buff;
-
-struct crypto_type {
- unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
- unsigned int (*extsize)(struct crypto_alg *alg);
- int (*init_tfm)(struct crypto_tfm *tfm);
- void (*show)(struct seq_file *m, struct crypto_alg *alg);
- int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
- void (*free)(struct crypto_instance *inst);
-
- unsigned int type;
- unsigned int maskclear;
- unsigned int maskset;
- unsigned int tfmsize;
-};
+union crypto_no_such_thing;
struct crypto_instance {
struct crypto_alg alg;
@@ -80,16 +68,17 @@ struct crypto_instance {
struct crypto_spawn *spawns;
};
- struct work_struct free_work;
-
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
struct crypto_template {
struct list_head list;
struct hlist_head instances;
+ struct hlist_head dead;
struct module *module;
+ struct work_struct free_work;
+
int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
char name[CRYPTO_MAX_ALG_NAME];
@@ -119,6 +108,13 @@ struct crypto_queue {
};
struct scatter_walk {
+ /* Must be the first member, see struct skcipher_walk. */
+ union {
+ void *const addr;
+
+ /* Private API field, do not touch. */
+ union crypto_no_such_thing *__addr;
+ };
struct scatterlist *sg;
unsigned int offset;
};
@@ -162,8 +158,16 @@ void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
const char *crypto_attr_alg_name(struct rtattr *rta);
-int crypto_inst_setname(struct crypto_instance *inst, const char *name,
- struct crypto_alg *alg);
+int __crypto_inst_setname(struct crypto_instance *inst, const char *name,
+ const char *driver, struct crypto_alg *alg);
+
+#define crypto_inst_setname(inst, name, ...) \
+ CONCATENATE(crypto_inst_setname_, COUNT_ARGS(__VA_ARGS__))( \
+ inst, name, ##__VA_ARGS__)
+#define crypto_inst_setname_1(inst, name, alg) \
+ __crypto_inst_setname(inst, name, name, alg)
+#define crypto_inst_setname_2(inst, name, driver, alg) \
+ __crypto_inst_setname(inst, name, driver, alg)
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
int crypto_enqueue_request(struct crypto_queue *queue,
@@ -271,4 +275,14 @@ static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
}
+static inline bool crypto_tfm_req_virt(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_VIRT;
+}
+
+static inline u32 crypto_request_flags(struct crypto_async_request *req)
+{
+ return req->flags & ~CRYPTO_TFM_REQ_ON_STACK;
+}
+
#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h
index 5f92a986083c..15a9caa2354a 100644
--- a/include/crypto/authenc.h
+++ b/include/crypto/authenc.h
@@ -28,5 +28,7 @@ struct crypto_authenc_keys {
int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
unsigned int keylen);
+int crypto_krb5enc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+ unsigned int keylen);
#endif /* _CRYPTO_AUTHENC_H */
diff --git a/include/crypto/blake2b.h b/include/crypto/blake2b.h
index 0c0176285349..3bc37fd103a7 100644
--- a/include/crypto/blake2b.h
+++ b/include/crypto/blake2b.h
@@ -18,7 +18,16 @@ enum blake2b_lengths {
BLAKE2B_512_HASH_SIZE = 64,
};
-struct blake2b_state {
+/**
+ * struct blake2b_ctx - Context for hashing a message with BLAKE2b
+ * @h: compression function state
+ * @t: block counter
+ * @f: finalization indicator
+ * @buf: partial block buffer; 'buflen' bytes are valid
+ * @buflen: number of bytes buffered in @buf
+ * @outlen: length of output hash value in bytes, at most BLAKE2B_HASH_SIZE
+ */
+struct blake2b_ctx {
/* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
u64 h[8];
u64 t[2];
@@ -39,28 +48,109 @@ enum blake2b_iv {
BLAKE2B_IV7 = 0x5BE0CD19137E2179ULL,
};
-static inline void __blake2b_init(struct blake2b_state *state, size_t outlen,
+static inline void __blake2b_init(struct blake2b_ctx *ctx, size_t outlen,
const void *key, size_t keylen)
{
- state->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen);
- state->h[1] = BLAKE2B_IV1;
- state->h[2] = BLAKE2B_IV2;
- state->h[3] = BLAKE2B_IV3;
- state->h[4] = BLAKE2B_IV4;
- state->h[5] = BLAKE2B_IV5;
- state->h[6] = BLAKE2B_IV6;
- state->h[7] = BLAKE2B_IV7;
- state->t[0] = 0;
- state->t[1] = 0;
- state->f[0] = 0;
- state->f[1] = 0;
- state->buflen = 0;
- state->outlen = outlen;
+ ctx->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen);
+ ctx->h[1] = BLAKE2B_IV1;
+ ctx->h[2] = BLAKE2B_IV2;
+ ctx->h[3] = BLAKE2B_IV3;
+ ctx->h[4] = BLAKE2B_IV4;
+ ctx->h[5] = BLAKE2B_IV5;
+ ctx->h[6] = BLAKE2B_IV6;
+ ctx->h[7] = BLAKE2B_IV7;
+ ctx->t[0] = 0;
+ ctx->t[1] = 0;
+ ctx->f[0] = 0;
+ ctx->f[1] = 0;
+ ctx->buflen = 0;
+ ctx->outlen = outlen;
if (keylen) {
- memcpy(state->buf, key, keylen);
- memset(&state->buf[keylen], 0, BLAKE2B_BLOCK_SIZE - keylen);
- state->buflen = BLAKE2B_BLOCK_SIZE;
+ memcpy(ctx->buf, key, keylen);
+ memset(&ctx->buf[keylen], 0, BLAKE2B_BLOCK_SIZE - keylen);
+ ctx->buflen = BLAKE2B_BLOCK_SIZE;
}
}
+/**
+ * blake2b_init() - Initialize a BLAKE2b context for a new message (unkeyed)
+ * @ctx: the context to initialize
+ * @outlen: length of output hash value in bytes, at most BLAKE2B_HASH_SIZE
+ *
+ * Context: Any context.
+ */
+static inline void blake2b_init(struct blake2b_ctx *ctx, size_t outlen)
+{
+ __blake2b_init(ctx, outlen, NULL, 0);
+}
+
+/**
+ * blake2b_init_key() - Initialize a BLAKE2b context for a new message (keyed)
+ * @ctx: the context to initialize
+ * @outlen: length of output hash value in bytes, at most BLAKE2B_HASH_SIZE
+ * @key: the key
+ * @keylen: the key length in bytes, at most BLAKE2B_KEY_SIZE
+ *
+ * Context: Any context.
+ */
+static inline void blake2b_init_key(struct blake2b_ctx *ctx, size_t outlen,
+ const void *key, size_t keylen)
+{
+ WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2B_HASH_SIZE ||
+ !key || !keylen || keylen > BLAKE2B_KEY_SIZE));
+
+ __blake2b_init(ctx, outlen, key, keylen);
+}
+
+/**
+ * blake2b_update() - Update a BLAKE2b context with message data
+ * @ctx: the context to update; must have been initialized
+ * @in: the message data
+ * @inlen: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+void blake2b_update(struct blake2b_ctx *ctx, const u8 *in, size_t inlen);
+
+/**
+ * blake2b_final() - Finish computing a BLAKE2b hash
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting BLAKE2b hash. Its length will be equal to the
+ * @outlen that was passed to blake2b_init() or blake2b_init_key().
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void blake2b_final(struct blake2b_ctx *ctx, u8 *out);
+
+/**
+ * blake2b() - Compute BLAKE2b hash in one shot
+ * @key: the key, or NULL for an unkeyed hash
+ * @keylen: the key length in bytes (at most BLAKE2B_KEY_SIZE), or 0 for an
+ * unkeyed hash
+ * @in: the message data
+ * @inlen: the data length in bytes
+ * @out: (output) the resulting BLAKE2b hash, with length @outlen
+ * @outlen: length of output hash value in bytes, at most BLAKE2B_HASH_SIZE
+ *
+ * Context: Any context.
+ */
+static inline void blake2b(const u8 *key, size_t keylen,
+ const u8 *in, size_t inlen,
+ u8 *out, size_t outlen)
+{
+ struct blake2b_ctx ctx;
+
+ WARN_ON(IS_ENABLED(DEBUG) && ((!in && inlen > 0) || !out || !outlen ||
+ outlen > BLAKE2B_HASH_SIZE || keylen > BLAKE2B_KEY_SIZE ||
+ (!key && keylen)));
+
+ __blake2b_init(&ctx, outlen, key, keylen);
+ blake2b_update(&ctx, in, inlen);
+ blake2b_final(&ctx, out);
+}
+
#endif /* _CRYPTO_BLAKE2B_H */
diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h
index f9ffd39194eb..648cb7824358 100644
--- a/include/crypto/blake2s.h
+++ b/include/crypto/blake2s.h
@@ -22,7 +22,16 @@ enum blake2s_lengths {
BLAKE2S_256_HASH_SIZE = 32,
};
-struct blake2s_state {
+/**
+ * struct blake2s_ctx - Context for hashing a message with BLAKE2s
+ * @h: compression function state
+ * @t: block counter
+ * @f: finalization indicator
+ * @buf: partial block buffer; 'buflen' bytes are valid
+ * @buflen: number of bytes buffered in @buf
+ * @outlen: length of output hash value in bytes, at most BLAKE2S_HASH_SIZE
+ */
+struct blake2s_ctx {
/* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
u32 h[8];
u32 t[2];
@@ -43,62 +52,109 @@ enum blake2s_iv {
BLAKE2S_IV7 = 0x5BE0CD19UL,
};
-static inline void __blake2s_init(struct blake2s_state *state, size_t outlen,
+static inline void __blake2s_init(struct blake2s_ctx *ctx, size_t outlen,
const void *key, size_t keylen)
{
- state->h[0] = BLAKE2S_IV0 ^ (0x01010000 | keylen << 8 | outlen);
- state->h[1] = BLAKE2S_IV1;
- state->h[2] = BLAKE2S_IV2;
- state->h[3] = BLAKE2S_IV3;
- state->h[4] = BLAKE2S_IV4;
- state->h[5] = BLAKE2S_IV5;
- state->h[6] = BLAKE2S_IV6;
- state->h[7] = BLAKE2S_IV7;
- state->t[0] = 0;
- state->t[1] = 0;
- state->f[0] = 0;
- state->f[1] = 0;
- state->buflen = 0;
- state->outlen = outlen;
+ ctx->h[0] = BLAKE2S_IV0 ^ (0x01010000 | keylen << 8 | outlen);
+ ctx->h[1] = BLAKE2S_IV1;
+ ctx->h[2] = BLAKE2S_IV2;
+ ctx->h[3] = BLAKE2S_IV3;
+ ctx->h[4] = BLAKE2S_IV4;
+ ctx->h[5] = BLAKE2S_IV5;
+ ctx->h[6] = BLAKE2S_IV6;
+ ctx->h[7] = BLAKE2S_IV7;
+ ctx->t[0] = 0;
+ ctx->t[1] = 0;
+ ctx->f[0] = 0;
+ ctx->f[1] = 0;
+ ctx->buflen = 0;
+ ctx->outlen = outlen;
if (keylen) {
- memcpy(state->buf, key, keylen);
- memset(&state->buf[keylen], 0, BLAKE2S_BLOCK_SIZE - keylen);
- state->buflen = BLAKE2S_BLOCK_SIZE;
+ memcpy(ctx->buf, key, keylen);
+ memset(&ctx->buf[keylen], 0, BLAKE2S_BLOCK_SIZE - keylen);
+ ctx->buflen = BLAKE2S_BLOCK_SIZE;
}
}
-static inline void blake2s_init(struct blake2s_state *state,
- const size_t outlen)
+/**
+ * blake2s_init() - Initialize a BLAKE2s context for a new message (unkeyed)
+ * @ctx: the context to initialize
+ * @outlen: length of output hash value in bytes, at most BLAKE2S_HASH_SIZE
+ *
+ * Context: Any context.
+ */
+static inline void blake2s_init(struct blake2s_ctx *ctx, size_t outlen)
{
- __blake2s_init(state, outlen, NULL, 0);
+ __blake2s_init(ctx, outlen, NULL, 0);
}
-static inline void blake2s_init_key(struct blake2s_state *state,
- const size_t outlen, const void *key,
- const size_t keylen)
+/**
+ * blake2s_init_key() - Initialize a BLAKE2s context for a new message (keyed)
+ * @ctx: the context to initialize
+ * @outlen: length of output hash value in bytes, at most BLAKE2S_HASH_SIZE
+ * @key: the key
+ * @keylen: the key length in bytes, at most BLAKE2S_KEY_SIZE
+ *
+ * Context: Any context.
+ */
+static inline void blake2s_init_key(struct blake2s_ctx *ctx, size_t outlen,
+ const void *key, size_t keylen)
{
WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE ||
!key || !keylen || keylen > BLAKE2S_KEY_SIZE));
- __blake2s_init(state, outlen, key, keylen);
+ __blake2s_init(ctx, outlen, key, keylen);
}
-void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen);
-void blake2s_final(struct blake2s_state *state, u8 *out);
+/**
+ * blake2s_update() - Update a BLAKE2s context with message data
+ * @ctx: the context to update; must have been initialized
+ * @in: the message data
+ * @inlen: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+void blake2s_update(struct blake2s_ctx *ctx, const u8 *in, size_t inlen);
-static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
- const size_t outlen, const size_t inlen,
- const size_t keylen)
+/**
+ * blake2s_final() - Finish computing a BLAKE2s hash
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting BLAKE2s hash. Its length will be equal to the
+ * @outlen that was passed to blake2s_init() or blake2s_init_key().
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void blake2s_final(struct blake2s_ctx *ctx, u8 *out);
+
+/**
+ * blake2s() - Compute BLAKE2s hash in one shot
+ * @key: the key, or NULL for an unkeyed hash
+ * @keylen: the key length in bytes (at most BLAKE2S_KEY_SIZE), or 0 for an
+ * unkeyed hash
+ * @in: the message data
+ * @inlen: the data length in bytes
+ * @out: (output) the resulting BLAKE2s hash, with length @outlen
+ * @outlen: length of output hash value in bytes, at most BLAKE2S_HASH_SIZE
+ *
+ * Context: Any context.
+ */
+static inline void blake2s(const u8 *key, size_t keylen,
+ const u8 *in, size_t inlen,
+ u8 *out, size_t outlen)
{
- struct blake2s_state state;
+ struct blake2s_ctx ctx;
WARN_ON(IS_ENABLED(DEBUG) && ((!in && inlen > 0) || !out || !outlen ||
outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE ||
(!key && keylen)));
- __blake2s_init(&state, outlen, key, keylen);
- blake2s_update(&state, in, inlen);
- blake2s_final(&state, out);
+ __blake2s_init(&ctx, outlen, key, keylen);
+ blake2s_update(&ctx, in, inlen);
+ blake2s_final(&ctx, out);
}
#endif /* _CRYPTO_BLAKE2S_H */
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
index b3ea73b81944..1cc301a48469 100644
--- a/include/crypto/chacha.h
+++ b/include/crypto/chacha.h
@@ -15,7 +15,8 @@
#ifndef _CRYPTO_CHACHA_H
#define _CRYPTO_CHACHA_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
+#include <linux/string.h>
#include <linux/types.h>
/* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */
@@ -25,27 +26,30 @@
#define CHACHA_BLOCK_SIZE 64
#define CHACHAPOLY_IV_SIZE 12
-#define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32))
+#define CHACHA_KEY_WORDS 8
+#define CHACHA_STATE_WORDS 16
+#define HCHACHA_OUT_WORDS 8
/* 192-bit nonce, then 64-bit stream position */
#define XCHACHA_IV_SIZE 32
-void chacha_block_generic(u32 *state, u8 *stream, int nrounds);
-static inline void chacha20_block(u32 *state, u8 *stream)
+struct chacha_state {
+ u32 x[CHACHA_STATE_WORDS];
+};
+
+void chacha_block_generic(struct chacha_state *state,
+ u8 out[at_least CHACHA_BLOCK_SIZE], int nrounds);
+static inline void chacha20_block(struct chacha_state *state,
+ u8 out[at_least CHACHA_BLOCK_SIZE])
{
- chacha_block_generic(state, stream, 20);
+ chacha_block_generic(state, out, 20);
}
-void hchacha_block_arch(const u32 *state, u32 *out, int nrounds);
-void hchacha_block_generic(const u32 *state, u32 *out, int nrounds);
+void hchacha_block_generic(const struct chacha_state *state,
+ u32 out[at_least HCHACHA_OUT_WORDS], int nrounds);
-static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
- hchacha_block_arch(state, out, nrounds);
- else
- hchacha_block_generic(state, out, nrounds);
-}
+void hchacha_block(const struct chacha_state *state,
+ u32 out[at_least HCHACHA_OUT_WORDS], int nrounds);
enum chacha_constants { /* expand 32-byte k */
CHACHA_CONSTANT_EXPA = 0x61707865U,
@@ -54,58 +58,45 @@ enum chacha_constants { /* expand 32-byte k */
CHACHA_CONSTANT_TE_K = 0x6b206574U
};
-static inline void chacha_init_consts(u32 *state)
+static inline void chacha_init_consts(struct chacha_state *state)
{
- state[0] = CHACHA_CONSTANT_EXPA;
- state[1] = CHACHA_CONSTANT_ND_3;
- state[2] = CHACHA_CONSTANT_2_BY;
- state[3] = CHACHA_CONSTANT_TE_K;
+ state->x[0] = CHACHA_CONSTANT_EXPA;
+ state->x[1] = CHACHA_CONSTANT_ND_3;
+ state->x[2] = CHACHA_CONSTANT_2_BY;
+ state->x[3] = CHACHA_CONSTANT_TE_K;
}
-void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
-static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
+static inline void chacha_init(struct chacha_state *state,
+ const u32 key[at_least CHACHA_KEY_WORDS],
+ const u8 iv[at_least CHACHA_IV_SIZE])
{
chacha_init_consts(state);
- state[4] = key[0];
- state[5] = key[1];
- state[6] = key[2];
- state[7] = key[3];
- state[8] = key[4];
- state[9] = key[5];
- state[10] = key[6];
- state[11] = key[7];
- state[12] = get_unaligned_le32(iv + 0);
- state[13] = get_unaligned_le32(iv + 4);
- state[14] = get_unaligned_le32(iv + 8);
- state[15] = get_unaligned_le32(iv + 12);
+ state->x[4] = key[0];
+ state->x[5] = key[1];
+ state->x[6] = key[2];
+ state->x[7] = key[3];
+ state->x[8] = key[4];
+ state->x[9] = key[5];
+ state->x[10] = key[6];
+ state->x[11] = key[7];
+ state->x[12] = get_unaligned_le32(iv + 0);
+ state->x[13] = get_unaligned_le32(iv + 4);
+ state->x[14] = get_unaligned_le32(iv + 8);
+ state->x[15] = get_unaligned_le32(iv + 12);
}
-static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
- chacha_init_arch(state, key, iv);
- else
- chacha_init_generic(state, key, iv);
-}
-
-void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes, int nrounds);
-void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes, int nrounds);
+void chacha_crypt(struct chacha_state *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds);
-static inline void chacha_crypt(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes, int nrounds)
+static inline void chacha20_crypt(struct chacha_state *state,
+ u8 *dst, const u8 *src, unsigned int bytes)
{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
- chacha_crypt_arch(state, dst, src, bytes, nrounds);
- else
- chacha_crypt_generic(state, dst, src, bytes, nrounds);
+ chacha_crypt(state, dst, src, bytes, 20);
}
-static inline void chacha20_crypt(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes)
+static inline void chacha_zeroize_state(struct chacha_state *state)
{
- chacha_crypt(state, dst, src, bytes, 20);
+ memzero_explicit(state, sizeof(*state));
}
#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/chacha20poly1305.h b/include/crypto/chacha20poly1305.h
index d2ac3ff7dc1e..0f71b037702d 100644
--- a/include/crypto/chacha20poly1305.h
+++ b/include/crypto/chacha20poly1305.h
@@ -18,32 +18,33 @@ enum chacha20poly1305_lengths {
void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len,
const u64 nonce,
- const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+ const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]);
bool __must_check
chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len, const u64 nonce,
- const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+ const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]);
void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len,
- const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
- const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+ const u8 nonce[at_least XCHACHA20POLY1305_NONCE_SIZE],
+ const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]);
bool __must_check xchacha20poly1305_decrypt(
- u8 *dst, const u8 *src, const size_t src_len, const u8 *ad,
- const size_t ad_len, const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
- const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+ u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u8 nonce[at_least XCHACHA20POLY1305_NONCE_SIZE],
+ const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]);
bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src, size_t src_len,
const u8 *ad, const size_t ad_len,
const u64 nonce,
- const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+ const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]);
bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len,
const u8 *ad, const size_t ad_len,
const u64 nonce,
- const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+ const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]);
bool chacha20poly1305_selftest(void);
diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h
index a1c66d1001af..06984a26c8cf 100644
--- a/include/crypto/ctr.h
+++ b/include/crypto/ctr.h
@@ -8,58 +8,8 @@
#ifndef _CRYPTO_CTR_H
#define _CRYPTO_CTR_H
-#include <crypto/algapi.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
#define CTR_RFC3686_NONCE_SIZE 4
#define CTR_RFC3686_IV_SIZE 8
#define CTR_RFC3686_BLOCK_SIZE 16
-static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req,
- void (*fn)(struct crypto_skcipher *,
- const u8 *, u8 *))
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- int blocksize = crypto_skcipher_chunksize(tfm);
- u8 buf[MAX_CIPHER_BLOCKSIZE];
- struct skcipher_walk walk;
- int err;
-
- /* avoid integer division due to variable blocksize parameter */
- if (WARN_ON_ONCE(!is_power_of_2(blocksize)))
- return -EINVAL;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes > 0) {
- u8 *dst = walk.dst.virt.addr;
- u8 *src = walk.src.virt.addr;
- int nbytes = walk.nbytes;
- int tail = 0;
-
- if (nbytes < walk.total) {
- tail = walk.nbytes & (blocksize - 1);
- nbytes -= tail;
- }
-
- do {
- int bsize = min(nbytes, blocksize);
-
- fn(tfm, walk.iv, buf);
-
- crypto_xor_cpy(dst, src, buf, bsize);
- crypto_inc(walk.iv, blocksize);
-
- dst += bsize;
- src += bsize;
- nbytes -= bsize;
- } while (nbytes > 0);
-
- err = skcipher_walk_done(&walk, tail);
- }
- return err;
-}
-
#endif /* _CRYPTO_CTR_H */
diff --git a/include/crypto/curve25519.h b/include/crypto/curve25519.h
index ece6a9b5fafc..2362b48f8741 100644
--- a/include/crypto/curve25519.h
+++ b/include/crypto/curve25519.h
@@ -6,7 +6,6 @@
#ifndef CURVE25519_H
#define CURVE25519_H
-#include <crypto/algapi.h> // For crypto_memneq.
#include <linux/types.h>
#include <linux/random.h>
@@ -14,57 +13,28 @@ enum curve25519_lengths {
CURVE25519_KEY_SIZE = 32
};
-extern const u8 curve25519_null_point[];
-extern const u8 curve25519_base_point[];
+void curve25519_generic(u8 out[at_least CURVE25519_KEY_SIZE],
+ const u8 scalar[at_least CURVE25519_KEY_SIZE],
+ const u8 point[at_least CURVE25519_KEY_SIZE]);
-void curve25519_generic(u8 out[CURVE25519_KEY_SIZE],
- const u8 scalar[CURVE25519_KEY_SIZE],
- const u8 point[CURVE25519_KEY_SIZE]);
+bool __must_check
+curve25519(u8 mypublic[at_least CURVE25519_KEY_SIZE],
+ const u8 secret[at_least CURVE25519_KEY_SIZE],
+ const u8 basepoint[at_least CURVE25519_KEY_SIZE]);
-void curve25519_arch(u8 out[CURVE25519_KEY_SIZE],
- const u8 scalar[CURVE25519_KEY_SIZE],
- const u8 point[CURVE25519_KEY_SIZE]);
+bool __must_check
+curve25519_generate_public(u8 pub[at_least CURVE25519_KEY_SIZE],
+ const u8 secret[at_least CURVE25519_KEY_SIZE]);
-void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
- const u8 secret[CURVE25519_KEY_SIZE]);
-
-bool curve25519_selftest(void);
-
-static inline
-bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE],
- const u8 secret[CURVE25519_KEY_SIZE],
- const u8 basepoint[CURVE25519_KEY_SIZE])
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
- curve25519_arch(mypublic, secret, basepoint);
- else
- curve25519_generic(mypublic, secret, basepoint);
- return crypto_memneq(mypublic, curve25519_null_point,
- CURVE25519_KEY_SIZE);
-}
-
-static inline bool
-__must_check curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE],
- const u8 secret[CURVE25519_KEY_SIZE])
-{
- if (unlikely(!crypto_memneq(secret, curve25519_null_point,
- CURVE25519_KEY_SIZE)))
- return false;
-
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
- curve25519_base_arch(pub, secret);
- else
- curve25519_generic(pub, secret, curve25519_base_point);
- return crypto_memneq(pub, curve25519_null_point, CURVE25519_KEY_SIZE);
-}
-
-static inline void curve25519_clamp_secret(u8 secret[CURVE25519_KEY_SIZE])
+static inline void
+curve25519_clamp_secret(u8 secret[at_least CURVE25519_KEY_SIZE])
{
secret[0] &= 248;
secret[31] = (secret[31] & 127) | 64;
}
-static inline void curve25519_generate_secret(u8 secret[CURVE25519_KEY_SIZE])
+static inline void
+curve25519_generate_secret(u8 secret[at_least CURVE25519_KEY_SIZE])
{
get_random_bytes_wait(secret, CURVE25519_KEY_SIZE);
curve25519_clamp_secret(secret);
diff --git a/include/crypto/df_sp80090a.h b/include/crypto/df_sp80090a.h
new file mode 100644
index 000000000000..6b25305fe611
--- /dev/null
+++ b/include/crypto/df_sp80090a.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright Stephan Mueller <smueller@chronox.de>, 2014
+ */
+
+#ifndef _CRYPTO_DF80090A_H
+#define _CRYPTO_DF80090A_H
+
+#include <crypto/internal/cipher.h>
+#include <crypto/aes.h>
+
+static inline int crypto_drbg_ctr_df_datalen(u8 statelen, u8 blocklen)
+{
+ return statelen + /* df_data */
+ blocklen + /* pad */
+ blocklen + /* iv */
+ statelen + blocklen; /* temp */
+}
+
+int crypto_drbg_ctr_df(struct crypto_aes_ctx *aes,
+ unsigned char *df_data,
+ size_t bytes_to_return,
+ struct list_head *seedlist,
+ u8 blocklen_bytes,
+ u8 statelen);
+
+#endif /* _CRYPTO_DF80090A_H */
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index af5ad51d3eef..2d42518cbdce 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -47,6 +47,7 @@
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/slab.h>
+#include <crypto/internal/drbg.h>
#include <crypto/internal/rng.h>
#include <crypto/rng.h>
#include <linux/fips.h>
@@ -54,30 +55,6 @@
#include <linux/list.h>
#include <linux/workqueue.h>
-/*
- * Concatenation Helper and string operation helper
- *
- * SP800-90A requires the concatenation of different data. To avoid copying
- * buffers around or allocate additional memory, the following data structure
- * is used to point to the original memory with its size. In addition, it
- * is used to build a linked list. The linked list defines the concatenation
- * of individual buffers. The order of memory block referenced in that
- * linked list determines the order of concatenation.
- */
-struct drbg_string {
- const unsigned char *buf;
- size_t len;
- struct list_head list;
-};
-
-static inline void drbg_string_fill(struct drbg_string *string,
- const unsigned char *buf, size_t len)
-{
- string->buf = buf;
- string->len = len;
- INIT_LIST_HEAD(&string->list);
-}
-
struct drbg_state;
typedef uint32_t drbg_flag_t;
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index 545dbefe3e13..2e60344437da 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -76,7 +76,6 @@ int crypto_engine_stop(struct crypto_engine *engine);
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool retry_support,
- int (*cbk_do_batch)(struct crypto_engine *engine),
bool rt, int qlen);
void crypto_engine_exit(struct crypto_engine *engine);
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
index 81330c6446f6..b0853f7cada0 100644
--- a/include/crypto/gf128mul.h
+++ b/include/crypto/gf128mul.h
@@ -158,12 +158,10 @@
64...71 72...79 80...87 88...95 96..103 104.111 112.119 120.127
*/
-/* A slow generic version of gf_mul, implemented for lle and bbe
+/* A slow generic version of gf_mul, implemented for lle
* It multiplies a and b and puts the result in a */
void gf128mul_lle(be128 *a, const be128 *b);
-void gf128mul_bbe(be128 *a, const be128 *b);
-
/*
* The following functions multiply a field element by x in
* the polynomial field representation. They use 64-bit word operations
@@ -224,9 +222,7 @@ struct gf128mul_4k {
};
struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
-struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t);
-void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t);
void gf128mul_x8_ble(le128 *r, const le128 *x);
static inline void gf128mul_free_4k(struct gf128mul_4k *t)
{
diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h
index f832c9f2aca3..043d938e9a2c 100644
--- a/include/crypto/ghash.h
+++ b/include/crypto/ghash.h
@@ -7,18 +7,18 @@
#define __CRYPTO_GHASH_H__
#include <linux/types.h>
-#include <crypto/gf128mul.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
+struct gf128mul_4k;
+
struct ghash_ctx {
struct gf128mul_4k *gf128;
};
struct ghash_desc_ctx {
u8 buffer[GHASH_BLOCK_SIZE];
- u32 bytes;
};
#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 2d5ea9f9ff43..586700332c73 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -8,10 +8,17 @@
#ifndef _CRYPTO_HASH_H
#define _CRYPTO_HASH_H
-#include <linux/atomic.h>
#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/string.h>
+/* Set this bit for virtual address instead of SG list. */
+#define CRYPTO_AHASH_REQ_VIRT 0x00000001
+
+#define CRYPTO_AHASH_REQ_PRIVATE \
+ CRYPTO_AHASH_REQ_VIRT
+
struct crypto_ahash;
/**
@@ -52,11 +59,15 @@ struct ahash_request {
struct crypto_async_request base;
unsigned int nbytes;
- struct scatterlist *src;
+ union {
+ struct scatterlist *src;
+ const u8 *svirt;
+ };
u8 *result;
- /* This field may only be used by the ahash API code. */
- void *priv;
+ struct scatterlist sg_head[2];
+ crypto_completion_t saved_complete;
+ void *saved_data;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
@@ -78,6 +89,8 @@ struct ahash_request {
* transformation object. Data processing can happen synchronously
* [SHASH] or asynchronously [AHASH] at this point. Driver must not use
* req->result.
+ * For block-only algorithms, @update must return the number
+ * of bytes to store in the API partial block buffer.
* @final: **[mandatory]** Retrieve result from the driver. This function finalizes the
* transformation and retrieves the resulting hash from the driver and
* pushes it back to upper layers. No data processing happens at this
@@ -120,6 +133,10 @@ struct ahash_request {
* data so the transformation can continue from this point onward. No
* data processing happens at this point. Driver must not use
* req->result.
+ * @export_core: Export partial state without partial block. Only defined
+ * for algorithms that are not block-only.
+ * @import_core: Import partial state without partial block. Only defined
+ * for algorithms that are not block-only.
* @init_tfm: Initialize the cryptographic transformation object.
* This function is called only once at the instantiation
* time, right after the transformation context was
@@ -142,6 +159,8 @@ struct ahash_alg {
int (*digest)(struct ahash_request *req);
int (*export)(struct ahash_request *req, void *out);
int (*import)(struct ahash_request *req, const void *in);
+ int (*export_core)(struct ahash_request *req, void *out);
+ int (*import_core)(struct ahash_request *req, const void *in);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
int (*init_tfm)(struct crypto_ahash *tfm);
@@ -159,16 +178,44 @@ struct shash_desc {
#define HASH_MAX_DIGESTSIZE 64
/*
- * Worst case is hmac(sha3-224-generic). Its context is a nested 'shash_desc'
- * containing a 'struct sha3_state'.
+ * The size of a core hash state and a partial block. The final byte
+ * is the length of the partial block.
*/
-#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360)
+#define HASH_STATE_AND_BLOCK(state, block) ((state) + (block) + 1)
+
+
+/* Worst case is sha3-224. */
+#define HASH_MAX_STATESIZE HASH_STATE_AND_BLOCK(200, 144)
+
+/* This needs to match arch/s390/crypto/sha.h. */
+#define S390_SHA_CTX_SIZE 216
+
+/*
+ * Worst case is hmac(sha3-224-s390). Its context is a nested 'shash_desc'
+ * containing a 'struct s390_sha_ctx'.
+ */
+#define SHA3_224_S390_DESCSIZE HASH_STATE_AND_BLOCK(S390_SHA_CTX_SIZE, 144)
+#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + \
+ SHA3_224_S390_DESCSIZE)
+#define MAX_SYNC_HASH_REQSIZE (sizeof(struct ahash_request) + \
+ HASH_MAX_DESCSIZE)
#define SHASH_DESC_ON_STACK(shash, ctx) \
char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
__aligned(__alignof__(struct shash_desc)); \
struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
+#define HASH_REQUEST_ON_STACK(name, _tfm) \
+ char __##name##_req[sizeof(struct ahash_request) + \
+ MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct ahash_request *name = \
+ ahash_request_on_stack_init(__##name##_req, (_tfm))
+
+#define HASH_REQUEST_CLONE(name, gfp) \
+ hash_request_clone(name, sizeof(__##name##_req), gfp)
+
+#define CRYPTO_HASH_STATESIZE(coresize, blocksize) (coresize + blocksize + 1)
+
/**
* struct shash_alg - synchronous message digest definition
* @init: see struct ahash_alg
@@ -178,6 +225,8 @@ struct shash_desc {
* @digest: see struct ahash_alg
* @export: see struct ahash_alg
* @import: see struct ahash_alg
+ * @export_core: see struct ahash_alg
+ * @import_core: see struct ahash_alg
* @setkey: see struct ahash_alg
* @init_tfm: Initialize the cryptographic transformation object.
* This function is called only once at the instantiation
@@ -208,6 +257,8 @@ struct shash_alg {
unsigned int len, u8 *out);
int (*export)(struct shash_desc *desc, void *out);
int (*import)(struct shash_desc *desc, const void *in);
+ int (*export_core)(struct shash_desc *desc, void *out);
+ int (*import_core)(struct shash_desc *desc, const void *in);
int (*setkey)(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
int (*init_tfm)(struct crypto_shash *tfm);
@@ -231,7 +282,6 @@ struct crypto_ahash {
};
struct crypto_shash {
- unsigned int descsize;
struct crypto_tfm base;
};
@@ -245,6 +295,11 @@ struct crypto_shash {
* CRYPTO_ALG_TYPE_SKCIPHER API applies here as well.
*/
+static inline bool ahash_req_on_stack(struct ahash_request *req)
+{
+ return crypto_req_on_stack(&req->base);
+}
+
static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_ahash, base);
@@ -452,7 +507,11 @@ int crypto_ahash_finup(struct ahash_request *req);
* -EBUSY if queue is full and request should be resubmitted later;
* other < 0 if an error occurred
*/
-int crypto_ahash_final(struct ahash_request *req);
+static inline int crypto_ahash_final(struct ahash_request *req)
+{
+ req->nbytes = 0;
+ return crypto_ahash_finup(req);
+}
/**
* crypto_ahash_digest() - calculate message digest for a buffer
@@ -541,7 +600,7 @@ int crypto_ahash_update(struct ahash_request *req);
static inline void ahash_request_set_tfm(struct ahash_request *req,
struct crypto_ahash *tfm)
{
- req->base.tfm = crypto_ahash_tfm(tfm);
+ crypto_request_set_tfm(&req->base, crypto_ahash_tfm(tfm));
}
/**
@@ -575,10 +634,7 @@ static inline struct ahash_request *ahash_request_alloc_noprof(
* ahash_request_free() - zeroize and free the request data structure
* @req: request data structure cipher handle to be freed
*/
-static inline void ahash_request_free(struct ahash_request *req)
-{
- kfree_sensitive(req);
-}
+void ahash_request_free(struct ahash_request *req);
static inline void ahash_request_zero(struct ahash_request *req)
{
@@ -622,9 +678,9 @@ static inline void ahash_request_set_callback(struct ahash_request *req,
crypto_completion_t compl,
void *data)
{
- req->base.complete = compl;
- req->base.data = data;
- req->base.flags = flags;
+ flags &= ~CRYPTO_AHASH_REQ_PRIVATE;
+ flags |= req->base.flags & CRYPTO_AHASH_REQ_PRIVATE;
+ crypto_request_set_callback(&req->base, flags, compl, data);
}
/**
@@ -647,6 +703,30 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
req->src = src;
req->nbytes = nbytes;
req->result = result;
+ req->base.flags &= ~CRYPTO_AHASH_REQ_VIRT;
+}
+
+/**
+ * ahash_request_set_virt() - set virtual address data buffers
+ * @req: ahash_request handle to be updated
+ * @src: source virtual address
+ * @result: buffer that is filled with the message digest -- the caller must
+ * ensure that the buffer has sufficient space by, for example, calling
+ * crypto_ahash_digestsize()
+ * @nbytes: number of bytes to process from the source virtual address
+ *
+ * By using this call, the caller references the source virtual address.
+ * The source virtual address points to the data the message digest is to
+ * be calculated for.
+ */
+static inline void ahash_request_set_virt(struct ahash_request *req,
+ const u8 *src, u8 *result,
+ unsigned int nbytes)
+{
+ req->svirt = src;
+ req->nbytes = nbytes;
+ req->result = result;
+ req->base.flags |= CRYPTO_AHASH_REQ_VIRT;
}
/**
@@ -784,7 +864,7 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
*/
static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
{
- return tfm->descsize;
+ return crypto_shash_alg(tfm)->descsize;
}
static inline void *shash_desc_ctx(struct shash_desc *desc)
@@ -802,7 +882,7 @@ static inline void *shash_desc_ctx(struct shash_desc *desc)
* cipher handle must point to a keyed message digest cipher in order for this
* function to succeed.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -819,7 +899,7 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
* crypto_shash_update and crypto_shash_final. The parameters have the same
* meaning as discussed for those separate three functions.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
@@ -839,12 +919,15 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
* directly, and it allocates a hash descriptor on the stack internally.
* Note that this stack allocation may be fairly large.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 on success; < 0 if an error occurred.
*/
int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
unsigned int len, u8 *out);
+int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data,
+ unsigned int len, u8 *out);
+
/**
* crypto_shash_export() - extract operational state for message digest
* @desc: reference to the operational state handle whose state is exported
@@ -854,7 +937,7 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
* caller-allocated output buffer out which must have sufficient size (e.g. by
* calling crypto_shash_descsize).
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the export creation was successful; < 0 if an error occurred
*/
int crypto_shash_export(struct shash_desc *desc, void *out);
@@ -868,7 +951,7 @@ int crypto_shash_export(struct shash_desc *desc, void *out);
* the input buffer. That buffer should have been generated with the
* crypto_ahash_export function.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the import was successful; < 0 if an error occurred
*/
int crypto_shash_import(struct shash_desc *desc, const void *in);
@@ -881,19 +964,29 @@ int crypto_shash_import(struct shash_desc *desc, const void *in);
* operational state handle. Any potentially existing state created by
* previous operations is discarded.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest initialization was successful; < 0 if an
* error occurred
*/
-static inline int crypto_shash_init(struct shash_desc *desc)
-{
- struct crypto_shash *tfm = desc->tfm;
-
- if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
+int crypto_shash_init(struct shash_desc *desc);
- return crypto_shash_alg(tfm)->init(desc);
-}
+/**
+ * crypto_shash_finup() - calculate message digest of buffer
+ * @desc: see crypto_shash_final()
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This function is a "short-hand" for the function calls of
+ * crypto_shash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate functions.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
+int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out);
/**
* crypto_shash_update() - add data to message digest for processing
@@ -903,12 +996,15 @@ static inline int crypto_shash_init(struct shash_desc *desc)
*
* Updates the message digest state of the operational state handle.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest update was successful; < 0 if an error
* occurred
*/
-int crypto_shash_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
+static inline int crypto_shash_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return crypto_shash_finup(desc, data, len, NULL);
+}
/**
* crypto_shash_final() - calculate message digest
@@ -920,29 +1016,14 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data,
* into the output buffer. The caller must ensure that the output buffer is
* large enough by using crypto_shash_digestsize.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
-int crypto_shash_final(struct shash_desc *desc, u8 *out);
-
-/**
- * crypto_shash_finup() - calculate message digest of buffer
- * @desc: see crypto_shash_final()
- * @data: see crypto_shash_update()
- * @len: see crypto_shash_update()
- * @out: see crypto_shash_final()
- *
- * This function is a "short-hand" for the function calls of
- * crypto_shash_update and crypto_shash_final. The parameters have the same
- * meaning as discussed for those separate functions.
- *
- * Context: Any context.
- * Return: 0 if the message digest creation was successful; < 0 if an error
- * occurred
- */
-int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out);
+static inline int crypto_shash_final(struct shash_desc *desc, u8 *out)
+{
+ return crypto_shash_finup(desc, NULL, 0, out);
+}
static inline void shash_desc_zero(struct shash_desc *desc)
{
@@ -950,4 +1031,25 @@ static inline void shash_desc_zero(struct shash_desc *desc)
sizeof(*desc) + crypto_shash_descsize(desc->tfm));
}
+static inline bool ahash_is_async(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_is_async(&tfm->base);
+}
+
+static inline struct ahash_request *ahash_request_on_stack_init(
+ char *buf, struct crypto_ahash *tfm)
+{
+ struct ahash_request *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base, crypto_ahash_tfm(tfm));
+ return req;
+}
+
+static inline struct ahash_request *ahash_request_clone(
+ struct ahash_request *req, size_t total, gfp_t gfp)
+{
+ return container_of(crypto_request_clone(&req->base, total, gfp),
+ struct ahash_request, base);
+}
+
#endif /* _CRYPTO_HASH_H */
diff --git a/include/crypto/hkdf.h b/include/crypto/hkdf.h
new file mode 100644
index 000000000000..6a9678f508f5
--- /dev/null
+++ b/include/crypto/hkdf.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * HKDF: HMAC-based Key Derivation Function (HKDF), RFC 5869
+ *
+ * Extracted from fs/crypto/hkdf.c, which has
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef _CRYPTO_HKDF_H
+#define _CRYPTO_HKDF_H
+
+#include <crypto/hash.h>
+
+int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm,
+ unsigned int ikmlen, const u8 *salt, unsigned int saltlen,
+ u8 *prk);
+int hkdf_expand(struct crypto_shash *hmac_tfm,
+ const u8 *info, unsigned int infolen,
+ u8 *okm, unsigned int okmlen);
+#endif
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index f7b3b93f3a49..107b797c33ec 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -135,6 +135,7 @@ struct af_alg_async_req {
* SG?
* @enc: Cryptographic operation to be performed when
* recvmsg is invoked.
+ * @write: True if we are in the middle of a write.
* @init: True if metadata has been sent.
* @len: Length of memory allocated for this data structure.
* @inflight: Non-zero when AIO requests are in flight.
@@ -151,10 +152,11 @@ struct af_alg_ctx {
size_t used;
atomic_t rcvused;
- bool more;
- bool merge;
- bool enc;
- bool init;
+ bool more:1,
+ merge:1,
+ enc:1,
+ write:1,
+ init:1;
unsigned int len;
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index 8831edaafc05..2d97440028ff 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -11,13 +11,23 @@
#include <crypto/acompress.h>
#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/compiler_types.h>
+#include <linux/cpumask_types.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue_types.h>
+
+#define ACOMP_FBREQ_ON_STACK(name, req) \
+ char __##name##_req[sizeof(struct acomp_req) + \
+ MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct acomp_req *name = acomp_fbreq_on_stack_init( \
+ __##name##_req, (req))
/**
* struct acomp_alg - asynchronous compression algorithm
*
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
- * @dst_free: Frees destination buffer if allocated inside the algorithm
* @init: Initialize the cryptographic transformation object.
* This function is used to initialize the cryptographic
* transformation object. This function is called only once at
@@ -30,25 +40,67 @@
* counterpart to @init, used to remove various changes set in
* @init.
*
- * @reqsize: Context size for (de)compression requests
* @base: Common crypto API algorithm data structure
* @calg: Cmonn algorithm data structure shared with scomp
*/
struct acomp_alg {
int (*compress)(struct acomp_req *req);
int (*decompress)(struct acomp_req *req);
- void (*dst_free)(struct scatterlist *dst);
int (*init)(struct crypto_acomp *tfm);
void (*exit)(struct crypto_acomp *tfm);
- unsigned int reqsize;
-
union {
struct COMP_ALG_COMMON;
struct comp_alg_common calg;
};
};
+struct crypto_acomp_stream {
+ spinlock_t lock;
+ void *ctx;
+};
+
+struct crypto_acomp_streams {
+ /* These must come first because of struct scomp_alg. */
+ void *(*alloc_ctx)(void);
+ void (*free_ctx)(void *);
+
+ struct crypto_acomp_stream __percpu *streams;
+ struct work_struct stream_work;
+ cpumask_t stream_want;
+};
+
+struct acomp_walk {
+ union {
+ /* Virtual address of the source. */
+ struct {
+ struct {
+ const void *const addr;
+ } virt;
+ } src;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk in;
+ };
+
+ union {
+ /* Virtual address of the destination. */
+ struct {
+ struct {
+ void *const addr;
+ } virt;
+ } dst;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk out;
+ };
+
+ unsigned int slen;
+ unsigned int dlen;
+
+ int flags;
+};
+
/*
* Transform internal helpers.
*/
@@ -68,22 +120,6 @@ static inline void acomp_request_complete(struct acomp_req *req,
crypto_request_complete(&req->base, err);
}
-static inline struct acomp_req *__acomp_request_alloc_noprof(struct crypto_acomp *tfm)
-{
- struct acomp_req *req;
-
- req = kzalloc_noprof(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
- if (likely(req))
- acomp_request_set_tfm(req, tfm);
- return req;
-}
-#define __acomp_request_alloc(...) alloc_hooks(__acomp_request_alloc_noprof(__VA_ARGS__))
-
-static inline void __acomp_request_free(struct acomp_req *req)
-{
- kfree_sensitive(req);
-}
-
/**
* crypto_register_acomp() -- Register asynchronous compression algorithm
*
@@ -109,4 +145,100 @@ void crypto_unregister_acomp(struct acomp_alg *alg);
int crypto_register_acomps(struct acomp_alg *algs, int count);
void crypto_unregister_acomps(struct acomp_alg *algs, int count);
+static inline bool acomp_request_issg(struct acomp_req *req)
+{
+ return !(req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
+ CRYPTO_ACOMP_REQ_DST_VIRT));
+}
+
+static inline bool acomp_request_src_isvirt(struct acomp_req *req)
+{
+ return req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT;
+}
+
+static inline bool acomp_request_dst_isvirt(struct acomp_req *req)
+{
+ return req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT;
+}
+
+static inline bool acomp_request_isvirt(struct acomp_req *req)
+{
+ return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
+ CRYPTO_ACOMP_REQ_DST_VIRT);
+}
+
+static inline bool acomp_request_src_isnondma(struct acomp_req *req)
+{
+ return req->base.flags & CRYPTO_ACOMP_REQ_SRC_NONDMA;
+}
+
+static inline bool acomp_request_dst_isnondma(struct acomp_req *req)
+{
+ return req->base.flags & CRYPTO_ACOMP_REQ_DST_NONDMA;
+}
+
+static inline bool acomp_request_isnondma(struct acomp_req *req)
+{
+ return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_NONDMA |
+ CRYPTO_ACOMP_REQ_DST_NONDMA);
+}
+
+static inline bool crypto_acomp_req_virt(struct crypto_acomp *tfm)
+{
+ return crypto_tfm_req_virt(&tfm->base);
+}
+
+void crypto_acomp_free_streams(struct crypto_acomp_streams *s);
+int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s);
+
+struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires(stream);
+
+static inline void crypto_acomp_unlock_stream_bh(
+ struct crypto_acomp_stream *stream) __releases(stream)
+{
+ spin_unlock_bh(&stream->lock);
+}
+
+void acomp_walk_done_src(struct acomp_walk *walk, int used);
+void acomp_walk_done_dst(struct acomp_walk *walk, int used);
+int acomp_walk_next_src(struct acomp_walk *walk);
+int acomp_walk_next_dst(struct acomp_walk *walk);
+int acomp_walk_virt(struct acomp_walk *__restrict walk,
+ struct acomp_req *__restrict req, bool atomic);
+
+static inline bool acomp_walk_more_src(const struct acomp_walk *walk, int cur)
+{
+ return walk->slen != cur;
+}
+
+static inline u32 acomp_request_flags(struct acomp_req *req)
+{
+ return crypto_request_flags(&req->base) & ~CRYPTO_ACOMP_REQ_PRIVATE;
+}
+
+static inline struct crypto_acomp *crypto_acomp_fb(struct crypto_acomp *tfm)
+{
+ return __crypto_acomp_tfm(crypto_acomp_tfm(tfm)->fb);
+}
+
+static inline struct acomp_req *acomp_fbreq_on_stack_init(
+ char *buf, struct acomp_req *old)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(old);
+ struct acomp_req *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base,
+ crypto_acomp_tfm(crypto_acomp_fb(tfm)));
+ acomp_request_set_callback(req, acomp_request_flags(old), NULL, NULL);
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_PRIVATE;
+ req->base.flags |= old->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
+ req->src = old->src;
+ req->dst = old->dst;
+ req->slen = old->slen;
+ req->dlen = old->dlen;
+
+ return req;
+}
+
#endif
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
index a0fba4b2eccf..14ee62bc52b6 100644
--- a/include/crypto/internal/akcipher.h
+++ b/include/crypto/internal/akcipher.h
@@ -124,7 +124,7 @@ static inline struct akcipher_alg *crypto_spawn_akcipher_alg(
/**
* crypto_register_akcipher() -- Register public key algorithm
*
- * Function registers an implementation of a public key verify algorithm
+ * Function registers an implementation of a public key cipher algorithm
*
* @alg: algorithm definition
*
@@ -135,7 +135,7 @@ int crypto_register_akcipher(struct akcipher_alg *alg);
/**
* crypto_unregister_akcipher() -- Unregister public key algorithm
*
- * Function unregisters an implementation of a public key verify algorithm
+ * Function unregisters an implementation of a public key cipher algorithm
*
* @alg: algorithm definition
*/
diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h
deleted file mode 100644
index 982fe5e8471c..000000000000
--- a/include/crypto/internal/blake2b.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/*
- * Helper functions for BLAKE2b implementations.
- * Keep this in sync with the corresponding BLAKE2s header.
- */
-
-#ifndef _CRYPTO_INTERNAL_BLAKE2B_H
-#define _CRYPTO_INTERNAL_BLAKE2B_H
-
-#include <crypto/blake2b.h>
-#include <crypto/internal/hash.h>
-#include <linux/string.h>
-
-void blake2b_compress_generic(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc);
-
-static inline void blake2b_set_lastblock(struct blake2b_state *state)
-{
- state->f[0] = -1;
-}
-
-typedef void (*blake2b_compress_t)(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc);
-
-static inline void __blake2b_update(struct blake2b_state *state,
- const u8 *in, size_t inlen,
- blake2b_compress_t compress)
-{
- const size_t fill = BLAKE2B_BLOCK_SIZE - state->buflen;
-
- if (unlikely(!inlen))
- return;
- if (inlen > fill) {
- memcpy(state->buf + state->buflen, in, fill);
- (*compress)(state, state->buf, 1, BLAKE2B_BLOCK_SIZE);
- state->buflen = 0;
- in += fill;
- inlen -= fill;
- }
- if (inlen > BLAKE2B_BLOCK_SIZE) {
- const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE);
- /* Hash one less (full) block than strictly possible */
- (*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE);
- in += BLAKE2B_BLOCK_SIZE * (nblocks - 1);
- inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1);
- }
- memcpy(state->buf + state->buflen, in, inlen);
- state->buflen += inlen;
-}
-
-static inline void __blake2b_final(struct blake2b_state *state, u8 *out,
- blake2b_compress_t compress)
-{
- int i;
-
- blake2b_set_lastblock(state);
- memset(state->buf + state->buflen, 0,
- BLAKE2B_BLOCK_SIZE - state->buflen); /* Padding */
- (*compress)(state, state->buf, 1, state->buflen);
- for (i = 0; i < ARRAY_SIZE(state->h); i++)
- __cpu_to_le64s(&state->h[i]);
- memcpy(out, state->h, state->outlen);
-}
-
-/* Helper functions for shash implementations of BLAKE2b */
-
-struct blake2b_tfm_ctx {
- u8 key[BLAKE2B_KEY_SIZE];
- unsigned int keylen;
-};
-
-static inline int crypto_blake2b_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen)
-{
- struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
-
- if (keylen == 0 || keylen > BLAKE2B_KEY_SIZE)
- return -EINVAL;
-
- memcpy(tctx->key, key, keylen);
- tctx->keylen = keylen;
-
- return 0;
-}
-
-static inline int crypto_blake2b_init(struct shash_desc *desc)
-{
- const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct blake2b_state *state = shash_desc_ctx(desc);
- unsigned int outlen = crypto_shash_digestsize(desc->tfm);
-
- __blake2b_init(state, outlen, tctx->key, tctx->keylen);
- return 0;
-}
-
-static inline int crypto_blake2b_update(struct shash_desc *desc,
- const u8 *in, unsigned int inlen,
- blake2b_compress_t compress)
-{
- struct blake2b_state *state = shash_desc_ctx(desc);
-
- __blake2b_update(state, in, inlen, compress);
- return 0;
-}
-
-static inline int crypto_blake2b_final(struct shash_desc *desc, u8 *out,
- blake2b_compress_t compress)
-{
- struct blake2b_state *state = shash_desc_ctx(desc);
-
- __blake2b_final(state, out, compress);
- return 0;
-}
-
-#endif /* _CRYPTO_INTERNAL_BLAKE2B_H */
diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h
deleted file mode 100644
index 506d56530ca9..000000000000
--- a/include/crypto/internal/blake2s.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/*
- * Helper functions for BLAKE2s implementations.
- * Keep this in sync with the corresponding BLAKE2b header.
- */
-
-#ifndef _CRYPTO_INTERNAL_BLAKE2S_H
-#define _CRYPTO_INTERNAL_BLAKE2S_H
-
-#include <crypto/blake2s.h>
-#include <linux/string.h>
-
-void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
- size_t nblocks, const u32 inc);
-
-void blake2s_compress(struct blake2s_state *state, const u8 *block,
- size_t nblocks, const u32 inc);
-
-bool blake2s_selftest(void);
-
-#endif /* _CRYPTO_INTERNAL_BLAKE2S_H */
diff --git a/include/crypto/internal/blockhash.h b/include/crypto/internal/blockhash.h
new file mode 100644
index 000000000000..52d9d4c82493
--- /dev/null
+++ b/include/crypto/internal/blockhash.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Handle partial blocks for block hash.
+ *
+ * Copyright (c) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#ifndef _CRYPTO_INTERNAL_BLOCKHASH_H
+#define _CRYPTO_INTERNAL_BLOCKHASH_H
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define BLOCK_HASH_UPDATE_BASE(block_fn, state, src, nbytes, bs, dv, \
+ buf, buflen) \
+ ({ \
+ typeof(block_fn) *_block_fn = &(block_fn); \
+ typeof(state + 0) _state = (state); \
+ unsigned int _buflen = (buflen); \
+ size_t _nbytes = (nbytes); \
+ unsigned int _bs = (bs); \
+ const u8 *_src = (src); \
+ u8 *_buf = (buf); \
+ while ((_buflen + _nbytes) >= _bs) { \
+ const u8 *data = _src; \
+ size_t len = _nbytes; \
+ size_t blocks; \
+ int remain; \
+ if (_buflen) { \
+ remain = _bs - _buflen; \
+ memcpy(_buf + _buflen, _src, remain); \
+ data = _buf; \
+ len = _bs; \
+ } \
+ remain = len % bs; \
+ blocks = (len - remain) / (dv); \
+ (*_block_fn)(_state, data, blocks); \
+ _src += len - remain - _buflen; \
+ _nbytes -= len - remain - _buflen; \
+ _buflen = 0; \
+ } \
+ memcpy(_buf + _buflen, _src, _nbytes); \
+ _buflen += _nbytes; \
+ })
+
+#define BLOCK_HASH_UPDATE(block, state, src, nbytes, bs, buf, buflen) \
+ BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, 1, buf, buflen)
+#define BLOCK_HASH_UPDATE_BLOCKS(block, state, src, nbytes, bs, buf, buflen) \
+ BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, bs, buf, buflen)
+
+#endif /* _CRYPTO_INTERNAL_BLOCKHASH_H */
diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h
deleted file mode 100644
index b085dc1ac151..000000000000
--- a/include/crypto/internal/chacha.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _CRYPTO_INTERNAL_CHACHA_H
-#define _CRYPTO_INTERNAL_CHACHA_H
-
-#include <crypto/chacha.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/crypto.h>
-
-struct chacha_ctx {
- u32 key[8];
- int nrounds;
-};
-
-static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize, int nrounds)
-{
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- int i;
-
- if (keysize != CHACHA_KEY_SIZE)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
- ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
-
- ctx->nrounds = nrounds;
- return 0;
-}
-
-static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- return chacha_setkey(tfm, key, keysize, 20);
-}
-
-static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- return chacha_setkey(tfm, key, keysize, 12);
-}
-
-#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/internal/drbg.h b/include/crypto/internal/drbg.h
new file mode 100644
index 000000000000..371e52dcee6c
--- /dev/null
+++ b/include/crypto/internal/drbg.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * NIST SP800-90A DRBG derivation function
+ *
+ * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _INTERNAL_DRBG_H
+#define _INTERNAL_DRBG_H
+
+/*
+ * Convert an integer into a byte representation of this integer.
+ * The byte representation is big-endian
+ *
+ * @val value to be converted
+ * @buf buffer holding the converted integer -- caller must ensure that
+ * buffer size is at least 32 bit
+ */
+static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf)
+{
+ struct s {
+ __be32 conv;
+ };
+ struct s *conversion = (struct s *)buf;
+
+ conversion->conv = cpu_to_be32(val);
+}
+
+/*
+ * Concatenation Helper and string operation helper
+ *
+ * SP800-90A requires the concatenation of different data. To avoid copying
+ * buffers around or allocate additional memory, the following data structure
+ * is used to point to the original memory with its size. In addition, it
+ * is used to build a linked list. The linked list defines the concatenation
+ * of individual buffers. The order of memory block referenced in that
+ * linked list determines the order of concatenation.
+ */
+struct drbg_string {
+ const unsigned char *buf;
+ size_t len;
+ struct list_head list;
+};
+
+static inline void drbg_string_fill(struct drbg_string *string,
+ const unsigned char *buf, size_t len)
+{
+ string->buf = buf;
+ string->len = len;
+ INIT_LIST_HEAD(&string->list);
+}
+
+#endif //_INTERNAL_DRBG_H
diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h
index f7e75e1e71f3..57cd75242141 100644
--- a/include/crypto/internal/ecc.h
+++ b/include/crypto/internal/ecc.h
@@ -27,7 +27,7 @@
#define _CRYPTO_ECC_H
#include <crypto/ecc_curve.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/* One digit is u64 qword. */
#define ECC_CURVE_NIST_P192_DIGITS 3
@@ -42,6 +42,18 @@
#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits }
+/*
+ * The integers r and s making up the signature are expected to be
+ * formatted as two consecutive u64 arrays of size ECC_MAX_BYTES.
+ * The bytes within each u64 digit are in native endianness,
+ * but the order of the u64 digits themselves is little endian.
+ * This format allows direct use by internal vli_*() functions.
+ */
+struct ecdsa_raw_sig {
+ u64 r[ECC_MAX_DIGITS];
+ u64 s[ECC_MAX_DIGITS];
+};
+
/**
* ecc_swap_digits() - Copy ndigits from big endian array to native array
* @in: Input array
@@ -63,6 +75,9 @@ static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigit
* @nbytes Size of input byte array
* @out Output digits array
* @ndigits: Number of digits to create from byte array
+ *
+ * The first byte in the input byte array is expected to hold the most
+ * significant bits of the large integer.
*/
void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
u64 *out, unsigned int ndigits);
@@ -290,4 +305,6 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
const u64 *y, const struct ecc_point *q,
const struct ecc_curve *curve);
+extern struct crypto_template ecdsa_x962_tmpl;
+extern struct crypto_template ecdsa_p1363_tmpl;
#endif
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
index fbf4be56cf12..f19ef376833f 100644
--- a/include/crypto/internal/engine.h
+++ b/include/crypto/internal/engine.h
@@ -21,24 +21,15 @@ struct device;
/*
* struct crypto_engine - crypto hardware engine
* @name: the engine name
- * @idling: the engine is entering idle state
* @busy: request pump is busy
* @running: the engine is on working
* @retry_support: indication that the hardware allows re-execution
* of a failed backlog request
* crypto-engine, in head position to keep order
+ * @rt: whether this queue is set to run as a realtime task
* @list: link with the global crypto engine list
* @queue_lock: spinlock to synchronise access to request queue
* @queue: the crypto queue of the engine
- * @rt: whether this queue is set to run as a realtime task
- * @prepare_crypt_hardware: a request will soon arrive from the queue
- * so the subsystem requests the driver to prepare the hardware
- * by issuing this call
- * @unprepare_crypt_hardware: there are currently no more requests on the
- * queue so the subsystem notifies the driver that it may relax the
- * hardware by issuing this call
- * @do_batch_requests: execute a batch of requests. Depends on multiple
- * requests support.
* @kworker: kthread worker struct for request pump
* @pump_requests: work struct for scheduling work to the request pump
* @priv_data: the engine private data
@@ -46,24 +37,17 @@ struct device;
*/
struct crypto_engine {
char name[ENGINE_NAME_LEN];
- bool idling;
bool busy;
bool running;
bool retry_support;
+ bool rt;
struct list_head list;
spinlock_t queue_lock;
struct crypto_queue queue;
struct device *dev;
- bool rt;
-
- int (*prepare_crypt_hardware)(struct crypto_engine *engine);
- int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
- int (*do_batch_requests)(struct crypto_engine *engine);
-
-
struct kthread_worker *kworker;
struct kthread_work pump_requests;
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 7fd7126f593a..012f5fb22d43 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -15,7 +15,6 @@
struct aead_geniv_ctx {
spinlock_t lock;
struct crypto_aead *child;
- struct crypto_sync_skcipher *sknull;
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 58967593b6b4..6ec5f2f37ccb 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -11,11 +11,29 @@
#include <crypto/algapi.h>
#include <crypto/hash.h>
+/* Set this bit to handle partial blocks in the API. */
+#define CRYPTO_AHASH_ALG_BLOCK_ONLY 0x01000000
+
+/* Set this bit if final requires at least one byte. */
+#define CRYPTO_AHASH_ALG_FINAL_NONZERO 0x02000000
+
+/* Set this bit if finup can deal with multiple blocks. */
+#define CRYPTO_AHASH_ALG_FINUP_MAX 0x04000000
+
+/* This bit is set by the Crypto API if export_core is not supported. */
+#define CRYPTO_AHASH_ALG_NO_EXPORT_CORE 0x08000000
+
+#define HASH_FBREQ_ON_STACK(name, req) \
+ char __##name##_req[sizeof(struct ahash_request) + \
+ MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct ahash_request *name = ahash_fbreq_on_stack_init( \
+ __##name##_req, (req))
+
struct ahash_request;
struct scatterlist;
struct crypto_hash_walk {
- char *data;
+ const char *data;
unsigned int offset;
unsigned int flags;
@@ -72,6 +90,7 @@ int crypto_register_ahashes(struct ahash_alg *algs, int count);
void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst);
+void ahash_free_singlespawn_instance(struct ahash_instance *inst);
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
@@ -81,12 +100,26 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
return alg->setkey != shash_no_setkey;
}
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
+
static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
{
return crypto_shash_alg_has_setkey(alg) &&
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
}
+static inline bool crypto_hash_alg_needs_key(struct hash_alg_common *alg)
+{
+ return crypto_hash_alg_has_setkey(alg) &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
+}
+
+static inline bool crypto_hash_no_export_core(struct crypto_ahash *tfm)
+{
+ return crypto_hash_alg_common(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+}
+
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask);
@@ -163,6 +196,13 @@ static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
tfm->reqsize = reqsize;
}
+static inline bool crypto_ahash_tested(struct crypto_ahash *tfm)
+{
+ struct crypto_tfm *tfm_base = crypto_ahash_tfm(tfm);
+
+ return tfm_base->__crt_alg->cra_flags & CRYPTO_ALG_TESTED;
+}
+
static inline void crypto_ahash_set_reqsize_dma(struct crypto_ahash *ahash,
unsigned int reqsize)
{
@@ -210,7 +250,7 @@ static inline void ahash_request_complete(struct ahash_request *req, int err)
static inline u32 ahash_request_flags(struct ahash_request *req)
{
- return req->base.flags;
+ return crypto_request_flags(&req->base) & ~CRYPTO_AHASH_REQ_PRIVATE;
}
static inline struct crypto_ahash *crypto_spawn_ahash(
@@ -270,5 +310,96 @@ static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
return container_of(tfm, struct crypto_shash, base);
}
+static inline bool ahash_request_isvirt(struct ahash_request *req)
+{
+ return req->base.flags & CRYPTO_AHASH_REQ_VIRT;
+}
+
+static inline bool crypto_ahash_req_virt(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_req_virt(&tfm->base);
+}
+
+static inline struct crypto_ahash *crypto_ahash_fb(struct crypto_ahash *tfm)
+{
+ return __crypto_ahash_cast(crypto_ahash_tfm(tfm)->fb);
+}
+
+static inline struct ahash_request *ahash_fbreq_on_stack_init(
+ char *buf, struct ahash_request *old)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(old);
+ struct ahash_request *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base,
+ crypto_ahash_tfm(crypto_ahash_fb(tfm)));
+ ahash_request_set_callback(req, ahash_request_flags(old), NULL, NULL);
+ req->base.flags &= ~CRYPTO_AHASH_REQ_PRIVATE;
+ req->base.flags |= old->base.flags & CRYPTO_AHASH_REQ_PRIVATE;
+ req->src = old->src;
+ req->result = old->result;
+ req->nbytes = old->nbytes;
+
+ return req;
+}
+
+/* Return the state size without partial block for block-only algorithms. */
+static inline unsigned int crypto_shash_coresize(struct crypto_shash *tfm)
+{
+ return crypto_shash_statesize(tfm) - crypto_shash_blocksize(tfm) - 1;
+}
+
+/* This can only be used if the request was never cloned. */
+#define HASH_REQUEST_ZERO(name) \
+ memzero_explicit(__##name##_req, sizeof(__##name##_req))
+
+/**
+ * crypto_ahash_export_core() - extract core state for message digest
+ * @req: reference to the ahash_request handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * Export the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
+int crypto_ahash_export_core(struct ahash_request *req, void *out);
+
+/**
+ * crypto_ahash_import_core() - import core state
+ * @req: reference to ahash_request handle the state is imported into
+ * @in: buffer holding the state
+ *
+ * Import the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+int crypto_ahash_import_core(struct ahash_request *req, const void *in);
+
+/**
+ * crypto_shash_export_core() - extract core state for message digest
+ * @desc: reference to the operational state handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * Export the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
+int crypto_shash_export_core(struct shash_desc *desc, void *out);
+
+/**
+ * crypto_shash_import_core() - import core state
+ * @desc: reference to the operational state handle the state imported into
+ * @in: buffer holding the state
+ *
+ * Import the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+int crypto_shash_import_core(struct shash_desc *desc, const void *in);
+
#endif /* _CRYPTO_INTERNAL_HASH_H */
diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
index 196aa769f296..a72fff409ab8 100644
--- a/include/crypto/internal/poly1305.h
+++ b/include/crypto/internal/poly1305.h
@@ -6,9 +6,8 @@
#ifndef _CRYPTO_INTERNAL_POLY1305_H
#define _CRYPTO_INTERNAL_POLY1305_H
-#include <asm/unaligned.h>
-#include <linux/types.h>
#include <crypto/poly1305.h>
+#include <linux/types.h>
/*
* Poly1305 core functions. These only accept whole blocks; the caller must
@@ -31,4 +30,27 @@ void poly1305_core_blocks(struct poly1305_state *state,
void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
void *dst);
+static inline void
+poly1305_block_init_generic(struct poly1305_block_state *desc,
+ const u8 raw_key[POLY1305_BLOCK_SIZE])
+{
+ poly1305_core_init(&desc->h);
+ poly1305_core_setkey(&desc->core_r, raw_key);
+}
+
+static inline void poly1305_blocks_generic(struct poly1305_block_state *state,
+ const u8 *src, unsigned int len,
+ u32 padbit)
+{
+ poly1305_core_blocks(&state->h, &state->core_r, src,
+ len / POLY1305_BLOCK_SIZE, padbit);
+}
+
+static inline void poly1305_emit_generic(const struct poly1305_state *state,
+ u8 digest[POLY1305_DIGEST_SIZE],
+ const u32 nonce[4])
+{
+ poly1305_core_emit(state, nonce, digest);
+}
+
#endif
diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h
index e870133f4b77..071a1951b992 100644
--- a/include/crypto/internal/rsa.h
+++ b/include/crypto/internal/rsa.h
@@ -8,6 +8,7 @@
#ifndef _RSA_HELPER_
#define _RSA_HELPER_
#include <linux/types.h>
+#include <crypto/akcipher.h>
/**
* rsa_key - RSA key structure
@@ -53,5 +54,33 @@ int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key,
int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key,
unsigned int key_len);
+#define RSA_PUB (true)
+#define RSA_PRIV (false)
+
+static inline int rsa_set_key(struct crypto_akcipher *child,
+ unsigned int *key_size, bool is_pubkey,
+ const void *key, unsigned int keylen)
+{
+ int err;
+
+ *key_size = 0;
+
+ if (is_pubkey)
+ err = crypto_akcipher_set_pub_key(child, key, keylen);
+ else
+ err = crypto_akcipher_set_priv_key(child, key, keylen);
+ if (err)
+ return err;
+
+ /* Find out new modulus size from rsa implementation */
+ err = crypto_akcipher_maxsize(child);
+ if (err > PAGE_SIZE)
+ return -ENOTSUPP;
+
+ *key_size = err;
+ return 0;
+}
+
extern struct crypto_template rsa_pkcs1pad_tmpl;
+extern struct crypto_template rsassa_pkcs1_tmpl;
#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index 07a10fd2d321..6a2c5f2e90f9 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -9,12 +9,7 @@
#ifndef _CRYPTO_SCOMP_INT_H
#define _CRYPTO_SCOMP_INT_H
-#include <crypto/acompress.h>
-#include <crypto/algapi.h>
-
-#define SCOMP_SCRATCH_SIZE 131072
-
-struct acomp_req;
+#include <crypto/internal/acompress.h>
struct crypto_scomp {
struct crypto_tfm base;
@@ -23,16 +18,12 @@ struct crypto_scomp {
/**
* struct scomp_alg - synchronous compression algorithm
*
- * @alloc_ctx: Function allocates algorithm specific context
- * @free_ctx: Function frees context allocated with alloc_ctx
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
- * @base: Common crypto API algorithm data structure
+ * @streams: Per-cpu memory for algorithm
* @calg: Cmonn algorithm data structure shared with acomp
*/
struct scomp_alg {
- void *(*alloc_ctx)(struct crypto_scomp *tfm);
- void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
int (*compress)(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx);
@@ -40,6 +31,8 @@ struct scomp_alg {
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx);
+ struct crypto_acomp_streams streams;
+
union {
struct COMP_ALG_COMMON;
struct comp_alg_common calg;
@@ -71,17 +64,6 @@ static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
}
-static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
-{
- return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
-}
-
-static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
- void *ctx)
-{
- return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
-}
-
static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
diff --git a/include/crypto/internal/sig.h b/include/crypto/internal/sig.h
index 97cb26ef8115..b16648c1a986 100644
--- a/include/crypto/internal/sig.h
+++ b/include/crypto/internal/sig.h
@@ -10,8 +10,88 @@
#include <crypto/algapi.h>
#include <crypto/sig.h>
+struct sig_instance {
+ void (*free)(struct sig_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct sig_alg, base)];
+ struct crypto_instance base;
+ };
+ struct sig_alg alg;
+ };
+};
+
+struct crypto_sig_spawn {
+ struct crypto_spawn base;
+};
+
static inline void *crypto_sig_ctx(struct crypto_sig *tfm)
{
return crypto_tfm_ctx(&tfm->base);
}
+
+/**
+ * crypto_register_sig() -- Register public key signature algorithm
+ *
+ * Function registers an implementation of a public key signature algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_sig(struct sig_alg *alg);
+
+/**
+ * crypto_unregister_sig() -- Unregister public key signature algorithm
+ *
+ * Function unregisters an implementation of a public key signature algorithm
+ *
+ * @alg: algorithm definition
+ */
+void crypto_unregister_sig(struct sig_alg *alg);
+
+int sig_register_instance(struct crypto_template *tmpl,
+ struct sig_instance *inst);
+
+static inline struct sig_instance *sig_instance(struct crypto_instance *inst)
+{
+ return container_of(&inst->alg, struct sig_instance, alg.base);
+}
+
+static inline struct sig_instance *sig_alg_instance(struct crypto_sig *tfm)
+{
+ return sig_instance(crypto_tfm_alg_instance(&tfm->base));
+}
+
+static inline struct crypto_instance *sig_crypto_instance(struct sig_instance
+ *inst)
+{
+ return container_of(&inst->alg.base, struct crypto_instance, alg);
+}
+
+static inline void *sig_instance_ctx(struct sig_instance *inst)
+{
+ return crypto_instance_ctx(sig_crypto_instance(inst));
+}
+
+int crypto_grab_sig(struct crypto_sig_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
+static inline struct crypto_sig *crypto_spawn_sig(struct crypto_sig_spawn
+ *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline void crypto_drop_sig(struct crypto_sig_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct sig_alg *crypto_spawn_sig_alg(struct crypto_sig_spawn
+ *spawn)
+{
+ return container_of(spawn->base.alg, struct sig_alg, base);
+}
#endif
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index d2316242a988..9e338e7aafbd 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -6,6 +6,7 @@
#ifndef _CRYPTO_INTERNAL_SIMD_H
#define _CRYPTO_INTERNAL_SIMD_H
+#include <asm/simd.h>
#include <linux/percpu.h>
#include <linux/types.h>
@@ -14,11 +15,10 @@
struct simd_skcipher_alg;
struct skcipher_alg;
-struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
+struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg,
+ const char *algname,
const char *drvname,
const char *basename);
-struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
- const char *basename);
void simd_skcipher_free(struct simd_skcipher_alg *alg);
int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
@@ -32,13 +32,6 @@ void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
struct simd_aead_alg;
struct aead_alg;
-struct simd_aead_alg *simd_aead_create_compat(const char *algname,
- const char *drvname,
- const char *basename);
-struct simd_aead_alg *simd_aead_create(const char *algname,
- const char *basename);
-void simd_aead_free(struct simd_aead_alg *alg);
-
int simd_register_aeads_compat(struct aead_alg *algs, int count,
struct simd_aead_alg **simd_algs);
@@ -52,13 +45,10 @@ void simd_unregister_aeads(struct aead_alg *algs, int count,
* This delegates to may_use_simd(), except that this also returns false if SIMD
* in crypto code has been temporarily disabled on this CPU by the crypto
* self-tests, in order to test the no-SIMD fallback code. This override is
- * currently limited to configurations where the extra self-tests are enabled,
- * because it might be a bit too invasive to be part of the regular self-tests.
- *
- * This is a macro so that <asm/simd.h>, which some architectures don't have,
- * doesn't have to be included directly here.
+ * currently limited to configurations where the "full" self-tests are enabled,
+ * because it might be a bit too invasive to be part of the "fast" self-tests.
*/
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+#ifdef CONFIG_CRYPTO_SELFTESTS_FULL
DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test);
#define crypto_simd_usable() \
(may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test))
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 7ae42afdcf3e..0cad8e7364c8 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -11,7 +11,6 @@
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
#include <crypto/skcipher.h>
-#include <linux/list.h>
#include <linux/types.h>
/*
@@ -57,25 +56,32 @@ struct crypto_lskcipher_spawn {
struct skcipher_walk {
union {
+ /* Virtual address of the source. */
struct {
- struct page *page;
- unsigned long offset;
- } phys;
+ struct {
+ const void *const addr;
+ } virt;
+ } src;
+ /* Private field for the API, do not use. */
+ struct scatter_walk in;
+ };
+
+ union {
+ /* Virtual address of the destination. */
struct {
- u8 *page;
- void *addr;
- } virt;
- } src, dst;
+ struct {
+ void *const addr;
+ } virt;
+ } dst;
- struct scatter_walk in;
- unsigned int nbytes;
+ /* Private field for the API, do not use. */
+ struct scatter_walk out;
+ };
- struct scatter_walk out;
+ unsigned int nbytes;
unsigned int total;
- struct list_head buffers;
-
u8 *page;
u8 *buffer;
u8 *oiv;
@@ -205,17 +211,16 @@ void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count);
int lskcipher_register_instance(struct crypto_template *tmpl,
struct lskcipher_instance *inst);
-int skcipher_walk_done(struct skcipher_walk *walk, int err);
-int skcipher_walk_virt(struct skcipher_walk *walk,
- struct skcipher_request *req,
+int skcipher_walk_done(struct skcipher_walk *walk, int res);
+int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
+ struct skcipher_request *__restrict req,
bool atomic);
-int skcipher_walk_async(struct skcipher_walk *walk,
- struct skcipher_request *req);
-int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic);
-int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic);
-void skcipher_walk_complete(struct skcipher_walk *walk, int err);
+int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic);
+int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic);
static inline void skcipher_walk_abort(struct skcipher_walk *walk)
{
diff --git a/include/crypto/krb5.h b/include/crypto/krb5.h
new file mode 100644
index 000000000000..71dd38f59be1
--- /dev/null
+++ b/include/crypto/krb5.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Kerberos 5 crypto
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _CRYPTO_KRB5_H
+#define _CRYPTO_KRB5_H
+
+#include <linux/crypto.h>
+#include <crypto/aead.h>
+#include <crypto/hash.h>
+
+struct crypto_shash;
+struct scatterlist;
+
+/*
+ * Per Kerberos v5 protocol spec crypto types from the wire. These get mapped
+ * to linux kernel crypto routines.
+ */
+#define KRB5_ENCTYPE_NULL 0x0000
+#define KRB5_ENCTYPE_DES_CBC_CRC 0x0001 /* DES cbc mode with CRC-32 */
+#define KRB5_ENCTYPE_DES_CBC_MD4 0x0002 /* DES cbc mode with RSA-MD4 */
+#define KRB5_ENCTYPE_DES_CBC_MD5 0x0003 /* DES cbc mode with RSA-MD5 */
+#define KRB5_ENCTYPE_DES_CBC_RAW 0x0004 /* DES cbc mode raw */
+/* XXX deprecated? */
+#define KRB5_ENCTYPE_DES3_CBC_SHA 0x0005 /* DES-3 cbc mode with NIST-SHA */
+#define KRB5_ENCTYPE_DES3_CBC_RAW 0x0006 /* DES-3 cbc mode raw */
+#define KRB5_ENCTYPE_DES_HMAC_SHA1 0x0008
+#define KRB5_ENCTYPE_DES3_CBC_SHA1 0x0010
+#define KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011
+#define KRB5_ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012
+#define KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128 0x0013
+#define KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192 0x0014
+#define KRB5_ENCTYPE_ARCFOUR_HMAC 0x0017
+#define KRB5_ENCTYPE_ARCFOUR_HMAC_EXP 0x0018
+#define KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC 0x0019
+#define KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC 0x001a
+#define KRB5_ENCTYPE_UNKNOWN 0x01ff
+
+#define KRB5_CKSUMTYPE_CRC32 0x0001
+#define KRB5_CKSUMTYPE_RSA_MD4 0x0002
+#define KRB5_CKSUMTYPE_RSA_MD4_DES 0x0003
+#define KRB5_CKSUMTYPE_DESCBC 0x0004
+#define KRB5_CKSUMTYPE_RSA_MD5 0x0007
+#define KRB5_CKSUMTYPE_RSA_MD5_DES 0x0008
+#define KRB5_CKSUMTYPE_NIST_SHA 0x0009
+#define KRB5_CKSUMTYPE_HMAC_SHA1_DES3 0x000c
+#define KRB5_CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f
+#define KRB5_CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010
+#define KRB5_CKSUMTYPE_CMAC_CAMELLIA128 0x0011
+#define KRB5_CKSUMTYPE_CMAC_CAMELLIA256 0x0012
+#define KRB5_CKSUMTYPE_HMAC_SHA256_128_AES128 0x0013
+#define KRB5_CKSUMTYPE_HMAC_SHA384_192_AES256 0x0014
+#define KRB5_CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */
+
+/*
+ * Constants used for key derivation
+ */
+/* from rfc3961 */
+#define KEY_USAGE_SEED_CHECKSUM (0x99)
+#define KEY_USAGE_SEED_ENCRYPTION (0xAA)
+#define KEY_USAGE_SEED_INTEGRITY (0x55)
+
+/*
+ * Standard Kerberos error codes.
+ */
+#define KRB5_PROG_KEYTYPE_NOSUPP -1765328233
+
+/*
+ * Mode of operation.
+ */
+enum krb5_crypto_mode {
+ KRB5_CHECKSUM_MODE, /* Checksum only */
+ KRB5_ENCRYPT_MODE, /* Fully encrypted, possibly with integrity checksum */
+};
+
+struct krb5_buffer {
+ unsigned int len;
+ void *data;
+};
+
+/*
+ * Kerberos encoding type definition.
+ */
+struct krb5_enctype {
+ int etype; /* Encryption (key) type */
+ int ctype; /* Checksum type */
+ const char *name; /* "Friendly" name */
+ const char *encrypt_name; /* Crypto encrypt+checksum name */
+ const char *cksum_name; /* Crypto checksum name */
+ const char *hash_name; /* Crypto hash name */
+ const char *derivation_enc; /* Cipher used in key derivation */
+ u16 block_len; /* Length of encryption block */
+ u16 conf_len; /* Length of confounder (normally == block_len) */
+ u16 cksum_len; /* Length of checksum */
+ u16 key_bytes; /* Length of raw key, in bytes */
+ u16 key_len; /* Length of final key, in bytes */
+ u16 hash_len; /* Length of hash in bytes */
+ u16 prf_len; /* Length of PRF() result in bytes */
+ u16 Kc_len; /* Length of Kc in bytes */
+ u16 Ke_len; /* Length of Ke in bytes */
+ u16 Ki_len; /* Length of Ki in bytes */
+ bool keyed_cksum; /* T if a keyed cksum */
+
+ const struct krb5_crypto_profile *profile;
+
+ int (*random_to_key)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *in,
+ struct krb5_buffer *out); /* complete key generation */
+};
+
+/*
+ * krb5_api.c
+ */
+const struct krb5_enctype *crypto_krb5_find_enctype(u32 enctype);
+size_t crypto_krb5_how_much_buffer(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t data_size, size_t *_offset);
+size_t crypto_krb5_how_much_data(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t *_buffer_size, size_t *_offset);
+void crypto_krb5_where_is_the_data(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t *_offset, size_t *_len);
+struct crypto_aead *crypto_krb5_prepare_encryption(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ u32 usage, gfp_t gfp);
+struct crypto_shash *crypto_krb5_prepare_checksum(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ u32 usage, gfp_t gfp);
+ssize_t crypto_krb5_encrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded);
+int crypto_krb5_decrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+ssize_t crypto_krb5_get_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len);
+int crypto_krb5_verify_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+
+/*
+ * krb5_kdf.c
+ */
+int crypto_krb5_calc_PRFplus(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *K,
+ unsigned int L,
+ const struct krb5_buffer *S,
+ struct krb5_buffer *result,
+ gfp_t gfp);
+
+#endif /* _CRYPTO_KRB5_H */
diff --git a/include/crypto/md5.h b/include/crypto/md5.h
index cf9e9dec3d21..c47aedfe67ec 100644
--- a/include/crypto/md5.h
+++ b/include/crypto/md5.h
@@ -2,24 +2,209 @@
#ifndef _CRYPTO_MD5_H
#define _CRYPTO_MD5_H
+#include <crypto/hash.h>
#include <linux/types.h>
#define MD5_DIGEST_SIZE 16
#define MD5_HMAC_BLOCK_SIZE 64
+#define MD5_BLOCK_SIZE 64
#define MD5_BLOCK_WORDS 16
#define MD5_HASH_WORDS 4
+#define MD5_STATE_SIZE 24
#define MD5_H0 0x67452301UL
#define MD5_H1 0xefcdab89UL
#define MD5_H2 0x98badcfeUL
#define MD5_H3 0x10325476UL
+#define CRYPTO_MD5_STATESIZE \
+ CRYPTO_HASH_STATESIZE(MD5_STATE_SIZE, MD5_HMAC_BLOCK_SIZE)
+
extern const u8 md5_zero_message_hash[MD5_DIGEST_SIZE];
struct md5_state {
u32 hash[MD5_HASH_WORDS];
- u32 block[MD5_BLOCK_WORDS];
u64 byte_count;
+ u32 block[MD5_BLOCK_WORDS];
+};
+
+/* State for the MD5 compression function */
+struct md5_block_state {
+ u32 h[MD5_HASH_WORDS];
+};
+
+/**
+ * struct md5_ctx - Context for hashing a message with MD5
+ * @state: the compression function state
+ * @bytecount: number of bytes processed so far
+ * @buf: partial block buffer; bytecount % MD5_BLOCK_SIZE bytes are valid
+ */
+struct md5_ctx {
+ struct md5_block_state state;
+ u64 bytecount;
+ u8 buf[MD5_BLOCK_SIZE] __aligned(__alignof__(__le64));
+};
+
+/**
+ * md5_init() - Initialize an MD5 context for a new message
+ * @ctx: the context to initialize
+ *
+ * If you don't need incremental computation, consider md5() instead.
+ *
+ * Context: Any context.
+ */
+void md5_init(struct md5_ctx *ctx);
+
+/**
+ * md5_update() - Update an MD5 context with message data
+ * @ctx: the context to update; must have been initialized
+ * @data: the message data
+ * @len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+void md5_update(struct md5_ctx *ctx, const u8 *data, size_t len);
+
+/**
+ * md5_final() - Finish computing an MD5 message digest
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting MD5 message digest
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void md5_final(struct md5_ctx *ctx, u8 out[at_least MD5_DIGEST_SIZE]);
+
+/**
+ * md5() - Compute MD5 message digest in one shot
+ * @data: the message data
+ * @len: the data length in bytes
+ * @out: (output) the resulting MD5 message digest
+ *
+ * Context: Any context.
+ */
+void md5(const u8 *data, size_t len, u8 out[at_least MD5_DIGEST_SIZE]);
+
+/**
+ * struct hmac_md5_key - Prepared key for HMAC-MD5
+ * @istate: private
+ * @ostate: private
+ */
+struct hmac_md5_key {
+ struct md5_block_state istate;
+ struct md5_block_state ostate;
+};
+
+/**
+ * struct hmac_md5_ctx - Context for computing HMAC-MD5 of a message
+ * @hash_ctx: private
+ * @ostate: private
+ */
+struct hmac_md5_ctx {
+ struct md5_ctx hash_ctx;
+ struct md5_block_state ostate;
};
-#endif
+/**
+ * hmac_md5_preparekey() - Prepare a key for HMAC-MD5
+ * @key: (output) the key structure to initialize
+ * @raw_key: the raw HMAC-MD5 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * Note: the caller is responsible for zeroizing both the struct hmac_md5_key
+ * and the raw key once they are no longer needed.
+ *
+ * Context: Any context.
+ */
+void hmac_md5_preparekey(struct hmac_md5_key *key,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_md5_init() - Initialize an HMAC-MD5 context for a new message
+ * @ctx: (output) the HMAC context to initialize
+ * @key: the prepared HMAC key
+ *
+ * If you don't need incremental computation, consider hmac_md5() instead.
+ *
+ * Context: Any context.
+ */
+void hmac_md5_init(struct hmac_md5_ctx *ctx, const struct hmac_md5_key *key);
+
+/**
+ * hmac_md5_init_usingrawkey() - Initialize an HMAC-MD5 context for a new
+ * message, using a raw key
+ * @ctx: (output) the HMAC context to initialize
+ * @raw_key: the raw HMAC-MD5 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * If you don't need incremental computation, consider hmac_md5_usingrawkey()
+ * instead.
+ *
+ * Context: Any context.
+ */
+void hmac_md5_init_usingrawkey(struct hmac_md5_ctx *ctx,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_md5_update() - Update an HMAC-MD5 context with message data
+ * @ctx: the HMAC context to update; must have been initialized
+ * @data: the message data
+ * @data_len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_md5_update(struct hmac_md5_ctx *ctx,
+ const u8 *data, size_t data_len)
+{
+ md5_update(&ctx->hash_ctx, data, data_len);
+}
+
+/**
+ * hmac_md5_final() - Finish computing an HMAC-MD5 value
+ * @ctx: the HMAC context to finalize; must have been initialized
+ * @out: (output) the resulting HMAC-MD5 value
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void hmac_md5_final(struct hmac_md5_ctx *ctx, u8 out[at_least MD5_DIGEST_SIZE]);
+
+/**
+ * hmac_md5() - Compute HMAC-MD5 in one shot, using a prepared key
+ * @key: the prepared HMAC key
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-MD5 value
+ *
+ * If you're using the key only once, consider using hmac_md5_usingrawkey().
+ *
+ * Context: Any context.
+ */
+void hmac_md5(const struct hmac_md5_key *key,
+ const u8 *data, size_t data_len,
+ u8 out[at_least MD5_DIGEST_SIZE]);
+
+/**
+ * hmac_md5_usingrawkey() - Compute HMAC-MD5 in one shot, using a raw key
+ * @raw_key: the raw HMAC-MD5 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-MD5 value
+ *
+ * If you're using the key multiple times, prefer to use hmac_md5_preparekey()
+ * followed by multiple calls to hmac_md5() instead.
+ *
+ * Context: Any context.
+ */
+void hmac_md5_usingrawkey(const u8 *raw_key, size_t raw_key_len,
+ const u8 *data, size_t data_len,
+ u8 out[at_least MD5_DIGEST_SIZE]);
+
+#endif /* _CRYPTO_MD5_H */
diff --git a/include/crypto/null.h b/include/crypto/null.h
index 0ef577cc00e3..1c66abf9de3b 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -9,7 +9,4 @@
#define NULL_DIGEST_SIZE 0
#define NULL_IV_SIZE 0
-struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void);
-void crypto_put_default_null_skcipher(void);
-
#endif
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index 090692ec3bc7..190beb427c6d 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -7,7 +7,6 @@
#define _CRYPTO_POLY1305_H
#include <linux/types.h>
-#include <linux/crypto.h>
#define POLY1305_BLOCK_SIZE 16
#define POLY1305_KEY_SIZE 32
@@ -38,17 +37,8 @@ struct poly1305_state {
};
};
-struct poly1305_desc_ctx {
- /* partial buffer */
- u8 buf[POLY1305_BLOCK_SIZE];
- /* bytes used in partial buffer */
- unsigned int buflen;
- /* how many keys have been set in r[] */
- unsigned short rset;
- /* whether s[] has been set */
- bool sset;
- /* finalize key */
- u32 s[4];
+/* Combined state for block function. */
+struct poly1305_block_state {
/* accumulator */
struct poly1305_state h;
/* key */
@@ -58,42 +48,20 @@ struct poly1305_desc_ctx {
};
};
-void poly1305_init_arch(struct poly1305_desc_ctx *desc,
- const u8 key[POLY1305_KEY_SIZE]);
-void poly1305_init_generic(struct poly1305_desc_ctx *desc,
- const u8 key[POLY1305_KEY_SIZE]);
-
-static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
- poly1305_init_arch(desc, key);
- else
- poly1305_init_generic(desc, key);
-}
-
-void poly1305_update_arch(struct poly1305_desc_ctx *desc, const u8 *src,
- unsigned int nbytes);
-void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src,
- unsigned int nbytes);
-
-static inline void poly1305_update(struct poly1305_desc_ctx *desc,
- const u8 *src, unsigned int nbytes)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
- poly1305_update_arch(desc, src, nbytes);
- else
- poly1305_update_generic(desc, src, nbytes);
-}
-
-void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest);
-void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *digest);
+struct poly1305_desc_ctx {
+ /* partial buffer */
+ u8 buf[POLY1305_BLOCK_SIZE];
+ /* bytes used in partial buffer */
+ unsigned int buflen;
+ /* finalize key */
+ u32 s[4];
+ struct poly1305_block_state state;
+};
-static inline void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
- poly1305_final_arch(desc, digest);
- else
- poly1305_final_generic(desc, digest);
-}
+void poly1305_init(struct poly1305_desc_ctx *desc,
+ const u8 key[at_least POLY1305_KEY_SIZE]);
+void poly1305_update(struct poly1305_desc_ctx *desc,
+ const u8 *src, unsigned int nbytes);
+void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest);
#endif
diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h
index 1d630f371f77..b28b8ef11353 100644
--- a/include/crypto/polyval.h
+++ b/include/crypto/polyval.h
@@ -1,22 +1,190 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Common values for the Polyval hash algorithm
+ * POLYVAL library API
*
- * Copyright 2021 Google LLC
+ * Copyright 2025 Google LLC
*/
#ifndef _CRYPTO_POLYVAL_H
#define _CRYPTO_POLYVAL_H
+#include <linux/string.h>
#include <linux/types.h>
-#include <linux/crypto.h>
#define POLYVAL_BLOCK_SIZE 16
#define POLYVAL_DIGEST_SIZE 16
-void polyval_mul_non4k(u8 *op1, const u8 *op2);
+/**
+ * struct polyval_elem - An element of the POLYVAL finite field
+ * @bytes: View of the element as a byte array (unioned with @lo and @hi)
+ * @lo: The low 64 terms of the element's polynomial
+ * @hi: The high 64 terms of the element's polynomial
+ *
+ * This represents an element of the finite field GF(2^128), using the POLYVAL
+ * convention: little-endian byte order and natural bit order.
+ */
+struct polyval_elem {
+ union {
+ u8 bytes[POLYVAL_BLOCK_SIZE];
+ struct {
+ __le64 lo;
+ __le64 hi;
+ };
+ };
+};
-void polyval_update_non4k(const u8 *key, const u8 *in,
- size_t nblocks, u8 *accumulator);
+/**
+ * struct polyval_key - Prepared key for POLYVAL
+ *
+ * This may contain just the raw key H, or it may contain precomputed key
+ * powers, depending on the platform's POLYVAL implementation. Use
+ * polyval_preparekey() to initialize this.
+ *
+ * By H^i we mean H^(i-1) * H * x^-128, with base case H^1 = H. I.e. the
+ * exponentiation repeats the POLYVAL dot operation, with its "extra" x^-128.
+ */
+struct polyval_key {
+#ifdef CONFIG_CRYPTO_LIB_POLYVAL_ARCH
+#ifdef CONFIG_ARM64
+ /** @h_powers: Powers of the hash key H^8 through H^1 */
+ struct polyval_elem h_powers[8];
+#elif defined(CONFIG_X86)
+ /** @h_powers: Powers of the hash key H^8 through H^1 */
+ struct polyval_elem h_powers[8];
+#else
+#error "Unhandled arch"
+#endif
+#else /* CONFIG_CRYPTO_LIB_POLYVAL_ARCH */
+ /** @h: The hash key H */
+ struct polyval_elem h;
+#endif /* !CONFIG_CRYPTO_LIB_POLYVAL_ARCH */
+};
+
+/**
+ * struct polyval_ctx - Context for computing a POLYVAL value
+ * @key: Pointer to the prepared POLYVAL key. The user of the API is
+ * responsible for ensuring that the key lives as long as the context.
+ * @acc: The accumulator
+ * @partial: Number of data bytes processed so far modulo POLYVAL_BLOCK_SIZE
+ */
+struct polyval_ctx {
+ const struct polyval_key *key;
+ struct polyval_elem acc;
+ size_t partial;
+};
+/**
+ * polyval_preparekey() - Prepare a POLYVAL key
+ * @key: (output) The key structure to initialize
+ * @raw_key: The raw hash key
+ *
+ * Initialize a POLYVAL key structure from a raw key. This may be a simple
+ * copy, or it may involve precomputing powers of the key, depending on the
+ * platform's POLYVAL implementation.
+ *
+ * Context: Any context.
+ */
+#ifdef CONFIG_CRYPTO_LIB_POLYVAL_ARCH
+void polyval_preparekey(struct polyval_key *key,
+ const u8 raw_key[POLYVAL_BLOCK_SIZE]);
+
+#else
+static inline void polyval_preparekey(struct polyval_key *key,
+ const u8 raw_key[POLYVAL_BLOCK_SIZE])
+{
+ /* Just a simple copy, so inline it. */
+ memcpy(key->h.bytes, raw_key, POLYVAL_BLOCK_SIZE);
+}
#endif
+
+/**
+ * polyval_init() - Initialize a POLYVAL context for a new message
+ * @ctx: The context to initialize
+ * @key: The key to use. Note that a pointer to the key is saved in the
+ * context, so the key must live at least as long as the context.
+ */
+static inline void polyval_init(struct polyval_ctx *ctx,
+ const struct polyval_key *key)
+{
+ *ctx = (struct polyval_ctx){ .key = key };
+}
+
+/**
+ * polyval_import_blkaligned() - Import a POLYVAL accumulator value
+ * @ctx: The context to initialize
+ * @key: The key to import. Note that a pointer to the key is saved in the
+ * context, so the key must live at least as long as the context.
+ * @acc: The accumulator value to import.
+ *
+ * This imports an accumulator that was saved by polyval_export_blkaligned().
+ * The same key must be used.
+ */
+static inline void
+polyval_import_blkaligned(struct polyval_ctx *ctx,
+ const struct polyval_key *key,
+ const struct polyval_elem *acc)
+{
+ *ctx = (struct polyval_ctx){ .key = key, .acc = *acc };
+}
+
+/**
+ * polyval_export_blkaligned() - Export a POLYVAL accumulator value
+ * @ctx: The context to export the accumulator value from
+ * @acc: (output) The exported accumulator value
+ *
+ * This exports the accumulator from a POLYVAL context. The number of data
+ * bytes processed so far must be a multiple of POLYVAL_BLOCK_SIZE.
+ */
+static inline void polyval_export_blkaligned(const struct polyval_ctx *ctx,
+ struct polyval_elem *acc)
+{
+ *acc = ctx->acc;
+}
+
+/**
+ * polyval_update() - Update a POLYVAL context with message data
+ * @ctx: The context to update; must have been initialized
+ * @data: The message data
+ * @len: The data length in bytes. Doesn't need to be block-aligned.
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+void polyval_update(struct polyval_ctx *ctx, const u8 *data, size_t len);
+
+/**
+ * polyval_final() - Finish computing a POLYVAL value
+ * @ctx: The context to finalize
+ * @out: The output value
+ *
+ * If the total data length isn't a multiple of POLYVAL_BLOCK_SIZE, then the
+ * final block is automatically zero-padded.
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void polyval_final(struct polyval_ctx *ctx, u8 out[POLYVAL_BLOCK_SIZE]);
+
+/**
+ * polyval() - Compute a POLYVAL value
+ * @key: The prepared key
+ * @data: The message data
+ * @len: The data length in bytes. Doesn't need to be block-aligned.
+ * @out: The output value
+ *
+ * Context: Any context.
+ */
+static inline void polyval(const struct polyval_key *key,
+ const u8 *data, size_t len,
+ u8 out[POLYVAL_BLOCK_SIZE])
+{
+ struct polyval_ctx ctx;
+
+ polyval_init(&ctx, key);
+ polyval_update(&ctx, data, len);
+ polyval_final(&ctx, out);
+}
+
+#endif /* _CRYPTO_POLYVAL_H */
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index b7f308977c84..81098e00c08f 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -104,9 +104,6 @@ static inline int restrict_link_by_digsig(struct key *dest_keyring,
extern int query_asymmetric_key(const struct kernel_pkey_params *,
struct kernel_pkey_query *);
-extern int encrypt_blob(struct kernel_pkey_params *, const void *, void *);
-extern int decrypt_blob(struct kernel_pkey_params *, const void *, void *);
-extern int create_signature(struct kernel_pkey_params *, const void *, void *);
extern int verify_signature(const struct key *,
const struct public_key_signature *);
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index 5ac4388f50e1..d451b54b322a 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -102,12 +102,10 @@ static inline struct rng_alg *__crypto_rng_alg(struct crypto_alg *alg)
}
/**
- * crypto_rng_alg - obtain name of RNG
- * @tfm: cipher handle
- *
- * Return the generic name (cra_name) of the initialized random number generator
+ * crypto_rng_alg() - obtain 'struct rng_alg' pointer from RNG handle
+ * @tfm: RNG handle
*
- * Return: generic name string
+ * Return: Pointer to 'struct rng_alg', derived from @tfm RNG handle
*/
static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
{
@@ -171,12 +169,11 @@ static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
*
* The reset function completely re-initializes the random number generator
* referenced by the cipher handle by clearing the current state. The new state
- * is initialized with the caller provided seed or automatically, depending
- * on the random number generator type (the ANSI X9.31 RNG requires
- * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding).
- * The seed is provided as a parameter to this function call. The provided seed
- * should have the length of the seed size defined for the random number
- * generator as defined by crypto_rng_seedsize.
+ * is initialized with the caller provided seed or automatically, depending on
+ * the random number generator type. (The SP800-90A DRBGs perform an automatic
+ * seeding.) The seed is provided as a parameter to this function call. The
+ * provided seed should have the length of the seed size defined for the random
+ * number generator as defined by crypto_rng_seedsize.
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 32fc4473175b..624fab589c2c 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -26,76 +26,228 @@ static inline void scatterwalk_crypto_chain(struct scatterlist *head,
sg_mark_end(head);
}
-static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
+static inline void scatterwalk_start(struct scatter_walk *walk,
+ struct scatterlist *sg)
{
- unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
- unsigned int len_this_page = offset_in_page(~walk->offset) + 1;
- return len_this_page > len ? len : len_this_page;
+ walk->sg = sg;
+ walk->offset = sg->offset;
+}
+
+/*
+ * This is equivalent to scatterwalk_start(walk, sg) followed by
+ * scatterwalk_skip(walk, pos).
+ */
+static inline void scatterwalk_start_at_pos(struct scatter_walk *walk,
+ struct scatterlist *sg,
+ unsigned int pos)
+{
+ while (pos > sg->length) {
+ pos -= sg->length;
+ sg = sg_next(sg);
+ }
+ walk->sg = sg;
+ walk->offset = sg->offset + pos;
}
static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,
unsigned int nbytes)
{
- unsigned int len_this_page = scatterwalk_pagelen(walk);
- return nbytes > len_this_page ? len_this_page : nbytes;
+ unsigned int len_this_sg;
+ unsigned int limit;
+
+ if (walk->offset >= walk->sg->offset + walk->sg->length)
+ scatterwalk_start(walk, sg_next(walk->sg));
+ len_this_sg = walk->sg->offset + walk->sg->length - walk->offset;
+
+ /*
+ * HIGHMEM case: the page may have to be mapped into memory. To avoid
+ * the complexity of having to map multiple pages at once per sg entry,
+ * clamp the returned length to not cross a page boundary.
+ *
+ * !HIGHMEM case: no mapping is needed; all pages of the sg entry are
+ * already mapped contiguously in the kernel's direct map. For improved
+ * performance, allow the walker to return data segments that cross a
+ * page boundary. Do still cap the length to PAGE_SIZE, since some
+ * users rely on that to avoid disabling preemption for too long when
+ * using SIMD. It's also needed for when skcipher_walk uses a bounce
+ * page due to the data not being aligned to the algorithm's alignmask.
+ */
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ limit = PAGE_SIZE - offset_in_page(walk->offset);
+ else
+ limit = PAGE_SIZE;
+
+ return min3(nbytes, len_this_sg, limit);
+}
+
+/*
+ * Create a scatterlist that represents the remaining data in a walk. Uses
+ * chaining to reference the original scatterlist, so this uses at most two
+ * entries in @sg_out regardless of the number of entries in the original list.
+ * Assumes that sg_init_table() was already done.
+ */
+static inline void scatterwalk_get_sglist(struct scatter_walk *walk,
+ struct scatterlist sg_out[2])
+{
+ if (walk->offset >= walk->sg->offset + walk->sg->length)
+ scatterwalk_start(walk, sg_next(walk->sg));
+ sg_set_page(sg_out, sg_page(walk->sg),
+ walk->sg->offset + walk->sg->length - walk->offset,
+ walk->offset);
+ scatterwalk_crypto_chain(sg_out, sg_next(walk->sg), 2);
}
-static inline void scatterwalk_advance(struct scatter_walk *walk,
- unsigned int nbytes)
+static inline void scatterwalk_map(struct scatter_walk *walk)
{
- walk->offset += nbytes;
+ struct page *base_page = sg_page(walk->sg);
+ unsigned int offset = walk->offset;
+ void *addr;
+
+ if (IS_ENABLED(CONFIG_HIGHMEM)) {
+ struct page *page;
+
+ page = base_page + (offset >> PAGE_SHIFT);
+ offset = offset_in_page(offset);
+ addr = kmap_local_page(page) + offset;
+ } else {
+ /*
+ * When !HIGHMEM we allow the walker to return segments that
+ * span a page boundary; see scatterwalk_clamp(). To make it
+ * clear that in this case we're working in the linear buffer of
+ * the whole sg entry in the kernel's direct map rather than
+ * within the mapped buffer of a single page, compute the
+ * address as an offset from the page_address() of the first
+ * page of the sg entry. Either way the result is the address
+ * in the direct map, but this makes it clearer what is really
+ * going on.
+ */
+ addr = page_address(base_page) + offset;
+ }
+
+ walk->__addr = addr;
}
-static inline struct page *scatterwalk_page(struct scatter_walk *walk)
+/**
+ * scatterwalk_next() - Get the next data buffer in a scatterlist walk
+ * @walk: the scatter_walk
+ * @total: the total number of bytes remaining, > 0
+ *
+ * A virtual address for the next segment of data from the scatterlist will
+ * be placed into @walk->addr. The caller must call scatterwalk_done_src()
+ * or scatterwalk_done_dst() when it is done using this virtual address.
+ *
+ * Returns: the next number of bytes available, <= @total
+ */
+static inline unsigned int scatterwalk_next(struct scatter_walk *walk,
+ unsigned int total)
{
- return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+ unsigned int nbytes = scatterwalk_clamp(walk, total);
+
+ scatterwalk_map(walk);
+ return nbytes;
}
-static inline void scatterwalk_unmap(void *vaddr)
+static inline void scatterwalk_unmap(struct scatter_walk *walk)
{
- kunmap_local(vaddr);
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ kunmap_local(walk->__addr);
}
-static inline void scatterwalk_start(struct scatter_walk *walk,
- struct scatterlist *sg)
+static inline void scatterwalk_advance(struct scatter_walk *walk,
+ unsigned int nbytes)
{
- walk->sg = sg;
- walk->offset = sg->offset;
+ walk->offset += nbytes;
}
-static inline void *scatterwalk_map(struct scatter_walk *walk)
+/**
+ * scatterwalk_done_src() - Finish one step of a walk of source scatterlist
+ * @walk: the scatter_walk
+ * @nbytes: the number of bytes processed this step, less than or equal to the
+ * number of bytes that scatterwalk_next() returned.
+ *
+ * Use this if the mapped address was not written to, i.e. it is source data.
+ */
+static inline void scatterwalk_done_src(struct scatter_walk *walk,
+ unsigned int nbytes)
{
- return kmap_local_page(scatterwalk_page(walk)) +
- offset_in_page(walk->offset);
+ scatterwalk_unmap(walk);
+ scatterwalk_advance(walk, nbytes);
}
-static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out,
- unsigned int more)
+/*
+ * Flush the dcache of any pages that overlap the region
+ * [offset, offset + nbytes) relative to base_page.
+ *
+ * This should be called only when ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, to ensure
+ * that all relevant code (including the call to sg_page() in the caller, if
+ * applicable) gets fully optimized out when !ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE.
+ */
+static inline void __scatterwalk_flush_dcache_pages(struct page *base_page,
+ unsigned int offset,
+ unsigned int nbytes)
{
- if (out) {
- struct page *page;
+ unsigned int num_pages;
- page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
- flush_dcache_page(page);
- }
+ base_page += offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
- if (more && walk->offset >= walk->sg->offset + walk->sg->length)
- scatterwalk_start(walk, sg_next(walk->sg));
+ /*
+ * This is an overflow-safe version of
+ * num_pages = DIV_ROUND_UP(offset + nbytes, PAGE_SIZE).
+ */
+ num_pages = nbytes / PAGE_SIZE;
+ num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE);
+
+ for (unsigned int i = 0; i < num_pages; i++)
+ flush_dcache_page(base_page + i);
}
-static inline void scatterwalk_done(struct scatter_walk *walk, int out,
- int more)
+/**
+ * scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist
+ * @walk: the scatter_walk
+ * @nbytes: the number of bytes processed this step, less than or equal to the
+ * number of bytes that scatterwalk_next() returned.
+ *
+ * Use this if the mapped address may have been written to, i.e. it is
+ * destination data.
+ */
+static inline void scatterwalk_done_dst(struct scatter_walk *walk,
+ unsigned int nbytes)
{
- if (!more || walk->offset >= walk->sg->offset + walk->sg->length ||
- !(walk->offset & (PAGE_SIZE - 1)))
- scatterwalk_pagedone(walk, out, more);
+ scatterwalk_unmap(walk);
+ if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
+ __scatterwalk_flush_dcache_pages(sg_page(walk->sg),
+ walk->offset, nbytes);
+ scatterwalk_advance(walk, nbytes);
}
-void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
- size_t nbytes, int out);
+void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes);
+
+void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk,
+ unsigned int nbytes);
-void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
- unsigned int start, unsigned int nbytes, int out);
+void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf,
+ unsigned int nbytes);
+
+void memcpy_from_sglist(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes);
+
+void memcpy_to_sglist(struct scatterlist *sg, unsigned int start,
+ const void *buf, unsigned int nbytes);
+
+void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes);
+
+/* In new code, please use memcpy_{from,to}_sglist() directly instead. */
+static inline void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
+ unsigned int start,
+ unsigned int nbytes, int out)
+{
+ if (out)
+ memcpy_to_sglist(sg, start, buf, nbytes);
+ else
+ memcpy_from_sglist(buf, sg, start, nbytes);
+}
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
diff --git a/include/crypto/sha1.h b/include/crypto/sha1.h
index 044ecea60ac8..27f08b972931 100644
--- a/include/crypto/sha1.h
+++ b/include/crypto/sha1.h
@@ -10,6 +10,7 @@
#define SHA1_DIGEST_SIZE 20
#define SHA1_BLOCK_SIZE 64
+#define SHA1_STATE_SIZE offsetof(struct sha1_state, buffer)
#define SHA1_H0 0x67452301UL
#define SHA1_H1 0xefcdab89UL
@@ -25,14 +26,6 @@ struct sha1_state {
u8 buffer[SHA1_BLOCK_SIZE];
};
-struct shash_desc;
-
-extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
/*
* An implementation of SHA-1's compression function. Don't use in new code!
* You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't
@@ -40,7 +33,187 @@ extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
*/
#define SHA1_DIGEST_WORDS (SHA1_DIGEST_SIZE / 4)
#define SHA1_WORKSPACE_WORDS 16
-void sha1_init(__u32 *buf);
+void sha1_init_raw(__u32 *buf);
void sha1_transform(__u32 *digest, const char *data, __u32 *W);
+/* State for the SHA-1 compression function */
+struct sha1_block_state {
+ u32 h[SHA1_DIGEST_SIZE / 4];
+};
+
+/**
+ * struct sha1_ctx - Context for hashing a message with SHA-1
+ * @state: the compression function state
+ * @bytecount: number of bytes processed so far
+ * @buf: partial block buffer; bytecount % SHA1_BLOCK_SIZE bytes are valid
+ */
+struct sha1_ctx {
+ struct sha1_block_state state;
+ u64 bytecount;
+ u8 buf[SHA1_BLOCK_SIZE];
+};
+
+/**
+ * sha1_init() - Initialize a SHA-1 context for a new message
+ * @ctx: the context to initialize
+ *
+ * If you don't need incremental computation, consider sha1() instead.
+ *
+ * Context: Any context.
+ */
+void sha1_init(struct sha1_ctx *ctx);
+
+/**
+ * sha1_update() - Update a SHA-1 context with message data
+ * @ctx: the context to update; must have been initialized
+ * @data: the message data
+ * @len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+void sha1_update(struct sha1_ctx *ctx, const u8 *data, size_t len);
+
+/**
+ * sha1_final() - Finish computing a SHA-1 message digest
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting SHA-1 message digest
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void sha1_final(struct sha1_ctx *ctx, u8 out[at_least SHA1_DIGEST_SIZE]);
+
+/**
+ * sha1() - Compute SHA-1 message digest in one shot
+ * @data: the message data
+ * @len: the data length in bytes
+ * @out: (output) the resulting SHA-1 message digest
+ *
+ * Context: Any context.
+ */
+void sha1(const u8 *data, size_t len, u8 out[at_least SHA1_DIGEST_SIZE]);
+
+/**
+ * struct hmac_sha1_key - Prepared key for HMAC-SHA1
+ * @istate: private
+ * @ostate: private
+ */
+struct hmac_sha1_key {
+ struct sha1_block_state istate;
+ struct sha1_block_state ostate;
+};
+
+/**
+ * struct hmac_sha1_ctx - Context for computing HMAC-SHA1 of a message
+ * @sha_ctx: private
+ * @ostate: private
+ */
+struct hmac_sha1_ctx {
+ struct sha1_ctx sha_ctx;
+ struct sha1_block_state ostate;
+};
+
+/**
+ * hmac_sha1_preparekey() - Prepare a key for HMAC-SHA1
+ * @key: (output) the key structure to initialize
+ * @raw_key: the raw HMAC-SHA1 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * Note: the caller is responsible for zeroizing both the struct hmac_sha1_key
+ * and the raw key once they are no longer needed.
+ *
+ * Context: Any context.
+ */
+void hmac_sha1_preparekey(struct hmac_sha1_key *key,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_sha1_init() - Initialize an HMAC-SHA1 context for a new message
+ * @ctx: (output) the HMAC context to initialize
+ * @key: the prepared HMAC key
+ *
+ * If you don't need incremental computation, consider hmac_sha1() instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha1_init(struct hmac_sha1_ctx *ctx, const struct hmac_sha1_key *key);
+
+/**
+ * hmac_sha1_init_usingrawkey() - Initialize an HMAC-SHA1 context for a new
+ * message, using a raw key
+ * @ctx: (output) the HMAC context to initialize
+ * @raw_key: the raw HMAC-SHA1 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * If you don't need incremental computation, consider hmac_sha1_usingrawkey()
+ * instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha1_init_usingrawkey(struct hmac_sha1_ctx *ctx,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_sha1_update() - Update an HMAC-SHA1 context with message data
+ * @ctx: the HMAC context to update; must have been initialized
+ * @data: the message data
+ * @data_len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha1_update(struct hmac_sha1_ctx *ctx,
+ const u8 *data, size_t data_len)
+{
+ sha1_update(&ctx->sha_ctx, data, data_len);
+}
+
+/**
+ * hmac_sha1_final() - Finish computing an HMAC-SHA1 value
+ * @ctx: the HMAC context to finalize; must have been initialized
+ * @out: (output) the resulting HMAC-SHA1 value
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void hmac_sha1_final(struct hmac_sha1_ctx *ctx,
+ u8 out[at_least SHA1_DIGEST_SIZE]);
+
+/**
+ * hmac_sha1() - Compute HMAC-SHA1 in one shot, using a prepared key
+ * @key: the prepared HMAC key
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA1 value
+ *
+ * If you're using the key only once, consider using hmac_sha1_usingrawkey().
+ *
+ * Context: Any context.
+ */
+void hmac_sha1(const struct hmac_sha1_key *key,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA1_DIGEST_SIZE]);
+
+/**
+ * hmac_sha1_usingrawkey() - Compute HMAC-SHA1 in one shot, using a raw key
+ * @raw_key: the raw HMAC-SHA1 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA1 value
+ *
+ * If you're using the key multiple times, prefer to use hmac_sha1_preparekey()
+ * followed by multiple calls to hmac_sha1() instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha1_usingrawkey(const u8 *raw_key, size_t raw_key_len,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA1_DIGEST_SIZE]);
+
#endif /* _CRYPTO_SHA1_H */
diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h
deleted file mode 100644
index 2e0e7c3827d1..000000000000
--- a/include/crypto/sha1_base.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * sha1_base.h - core logic for SHA-1 implementations
- *
- * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#ifndef _CRYPTO_SHA1_BASE_H
-#define _CRYPTO_SHA1_BASE_H
-
-#include <crypto/internal/hash.h>
-#include <crypto/sha1.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-#include <linux/string.h>
-
-#include <asm/unaligned.h>
-
-typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks);
-
-static inline int sha1_base_init(struct shash_desc *desc)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA1_H0;
- sctx->state[1] = SHA1_H1;
- sctx->state[2] = SHA1_H2;
- sctx->state[3] = SHA1_H3;
- sctx->state[4] = SHA1_H4;
- sctx->count = 0;
-
- return 0;
-}
-
-static inline int sha1_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha1_block_fn *block_fn)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
-
- sctx->count += len;
-
- if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SHA1_BLOCK_SIZE - partial;
-
- memcpy(sctx->buffer + partial, data, p);
- data += p;
- len -= p;
-
- block_fn(sctx, sctx->buffer, 1);
- }
-
- blocks = len / SHA1_BLOCK_SIZE;
- len %= SHA1_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SHA1_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buffer + partial, data, len);
-
- return 0;
-}
-
-static inline int sha1_base_do_finalize(struct shash_desc *desc,
- sha1_block_fn *block_fn)
-{
- const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
- struct sha1_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
-
- sctx->buffer[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial);
- partial = 0;
-
- block_fn(sctx, sctx->buffer, 1);
- }
-
- memset(sctx->buffer + partial, 0x0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, sctx->buffer, 1);
-
- return 0;
-}
-
-static inline int sha1_base_finish(struct shash_desc *desc, u8 *out)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- __be32 *digest = (__be32 *)out;
- int i;
-
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
- put_unaligned_be32(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
- return 0;
-}
-
-#endif /* _CRYPTO_SHA1_BASE_H */
diff --git a/include/crypto/sha2.h b/include/crypto/sha2.h
index b9e9281d76c9..7bb8fe169daf 100644
--- a/include/crypto/sha2.h
+++ b/include/crypto/sha2.h
@@ -13,12 +13,14 @@
#define SHA256_DIGEST_SIZE 32
#define SHA256_BLOCK_SIZE 64
+#define SHA256_STATE_WORDS 8
#define SHA384_DIGEST_SIZE 48
#define SHA384_BLOCK_SIZE 128
#define SHA512_DIGEST_SIZE 64
#define SHA512_BLOCK_SIZE 128
+#define SHA512_STATE_SIZE 80
#define SHA224_H0 0xc1059ed8UL
#define SHA224_H1 0x367cd507UL
@@ -64,9 +66,45 @@ extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE];
extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE];
-struct sha256_state {
- u32 state[SHA256_DIGEST_SIZE / 4];
+struct crypto_sha256_state {
+ u32 state[SHA256_STATE_WORDS];
u64 count;
+};
+
+static inline void sha224_block_init(struct crypto_sha256_state *sctx)
+{
+ sctx->state[0] = SHA224_H0;
+ sctx->state[1] = SHA224_H1;
+ sctx->state[2] = SHA224_H2;
+ sctx->state[3] = SHA224_H3;
+ sctx->state[4] = SHA224_H4;
+ sctx->state[5] = SHA224_H5;
+ sctx->state[6] = SHA224_H6;
+ sctx->state[7] = SHA224_H7;
+ sctx->count = 0;
+}
+
+static inline void sha256_block_init(struct crypto_sha256_state *sctx)
+{
+ sctx->state[0] = SHA256_H0;
+ sctx->state[1] = SHA256_H1;
+ sctx->state[2] = SHA256_H2;
+ sctx->state[3] = SHA256_H3;
+ sctx->state[4] = SHA256_H4;
+ sctx->state[5] = SHA256_H5;
+ sctx->state[6] = SHA256_H6;
+ sctx->state[7] = SHA256_H7;
+ sctx->count = 0;
+}
+
+struct sha256_state {
+ union {
+ struct crypto_sha256_state ctx;
+ struct {
+ u32 state[SHA256_STATE_WORDS];
+ u64 count;
+ };
+ };
u8 buf[SHA256_BLOCK_SIZE];
};
@@ -76,59 +114,800 @@ struct sha512_state {
u8 buf[SHA512_BLOCK_SIZE];
};
-struct shash_desc;
+/* State for the SHA-256 (and SHA-224) compression function */
+struct sha256_block_state {
+ u32 h[SHA256_STATE_WORDS];
+};
+
+/*
+ * Context structure, shared by SHA-224 and SHA-256. The sha224_ctx and
+ * sha256_ctx structs wrap this one so that the API has proper typing and
+ * doesn't allow mixing the SHA-224 and SHA-256 functions arbitrarily.
+ */
+struct __sha256_ctx {
+ struct sha256_block_state state;
+ u64 bytecount;
+ u8 buf[SHA256_BLOCK_SIZE] __aligned(__alignof__(__be64));
+};
+void __sha256_update(struct __sha256_ctx *ctx, const u8 *data, size_t len);
+
+/*
+ * HMAC key and message context structs, shared by HMAC-SHA224 and HMAC-SHA256.
+ * The hmac_sha224_* and hmac_sha256_* structs wrap this one so that the API has
+ * proper typing and doesn't allow mixing the functions arbitrarily.
+ */
+struct __hmac_sha256_key {
+ struct sha256_block_state istate;
+ struct sha256_block_state ostate;
+};
+struct __hmac_sha256_ctx {
+ struct __sha256_ctx sha_ctx;
+ struct sha256_block_state ostate;
+};
+void __hmac_sha256_init(struct __hmac_sha256_ctx *ctx,
+ const struct __hmac_sha256_key *key);
+
+/**
+ * struct sha224_ctx - Context for hashing a message with SHA-224
+ * @ctx: private
+ */
+struct sha224_ctx {
+ struct __sha256_ctx ctx;
+};
+
+/**
+ * sha224_init() - Initialize a SHA-224 context for a new message
+ * @ctx: the context to initialize
+ *
+ * If you don't need incremental computation, consider sha224() instead.
+ *
+ * Context: Any context.
+ */
+void sha224_init(struct sha224_ctx *ctx);
+
+/**
+ * sha224_update() - Update a SHA-224 context with message data
+ * @ctx: the context to update; must have been initialized
+ * @data: the message data
+ * @len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void sha224_update(struct sha224_ctx *ctx,
+ const u8 *data, size_t len)
+{
+ __sha256_update(&ctx->ctx, data, len);
+}
+
+/**
+ * sha224_final() - Finish computing a SHA-224 message digest
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting SHA-224 message digest
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void sha224_final(struct sha224_ctx *ctx, u8 out[at_least SHA224_DIGEST_SIZE]);
+
+/**
+ * sha224() - Compute SHA-224 message digest in one shot
+ * @data: the message data
+ * @len: the data length in bytes
+ * @out: (output) the resulting SHA-224 message digest
+ *
+ * Context: Any context.
+ */
+void sha224(const u8 *data, size_t len, u8 out[at_least SHA224_DIGEST_SIZE]);
+
+/**
+ * struct hmac_sha224_key - Prepared key for HMAC-SHA224
+ * @key: private
+ */
+struct hmac_sha224_key {
+ struct __hmac_sha256_key key;
+};
+
+/**
+ * struct hmac_sha224_ctx - Context for computing HMAC-SHA224 of a message
+ * @ctx: private
+ */
+struct hmac_sha224_ctx {
+ struct __hmac_sha256_ctx ctx;
+};
+
+/**
+ * hmac_sha224_preparekey() - Prepare a key for HMAC-SHA224
+ * @key: (output) the key structure to initialize
+ * @raw_key: the raw HMAC-SHA224 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * Note: the caller is responsible for zeroizing both the struct hmac_sha224_key
+ * and the raw key once they are no longer needed.
+ *
+ * Context: Any context.
+ */
+void hmac_sha224_preparekey(struct hmac_sha224_key *key,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_sha224_init() - Initialize an HMAC-SHA224 context for a new message
+ * @ctx: (output) the HMAC context to initialize
+ * @key: the prepared HMAC key
+ *
+ * If you don't need incremental computation, consider hmac_sha224() instead.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha224_init(struct hmac_sha224_ctx *ctx,
+ const struct hmac_sha224_key *key)
+{
+ __hmac_sha256_init(&ctx->ctx, &key->key);
+}
+
+/**
+ * hmac_sha224_init_usingrawkey() - Initialize an HMAC-SHA224 context for a new
+ * message, using a raw key
+ * @ctx: (output) the HMAC context to initialize
+ * @raw_key: the raw HMAC-SHA224 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * If you don't need incremental computation, consider hmac_sha224_usingrawkey()
+ * instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha224_init_usingrawkey(struct hmac_sha224_ctx *ctx,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_sha224_update() - Update an HMAC-SHA224 context with message data
+ * @ctx: the HMAC context to update; must have been initialized
+ * @data: the message data
+ * @data_len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha224_update(struct hmac_sha224_ctx *ctx,
+ const u8 *data, size_t data_len)
+{
+ __sha256_update(&ctx->ctx.sha_ctx, data, data_len);
+}
+
+/**
+ * hmac_sha224_final() - Finish computing an HMAC-SHA224 value
+ * @ctx: the HMAC context to finalize; must have been initialized
+ * @out: (output) the resulting HMAC-SHA224 value
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void hmac_sha224_final(struct hmac_sha224_ctx *ctx,
+ u8 out[at_least SHA224_DIGEST_SIZE]);
+
+/**
+ * hmac_sha224() - Compute HMAC-SHA224 in one shot, using a prepared key
+ * @key: the prepared HMAC key
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA224 value
+ *
+ * If you're using the key only once, consider using hmac_sha224_usingrawkey().
+ *
+ * Context: Any context.
+ */
+void hmac_sha224(const struct hmac_sha224_key *key,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA224_DIGEST_SIZE]);
+
+/**
+ * hmac_sha224_usingrawkey() - Compute HMAC-SHA224 in one shot, using a raw key
+ * @raw_key: the raw HMAC-SHA224 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA224 value
+ *
+ * If you're using the key multiple times, prefer to use
+ * hmac_sha224_preparekey() followed by multiple calls to hmac_sha224() instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha224_usingrawkey(const u8 *raw_key, size_t raw_key_len,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA224_DIGEST_SIZE]);
+
+/**
+ * struct sha256_ctx - Context for hashing a message with SHA-256
+ * @ctx: private
+ */
+struct sha256_ctx {
+ struct __sha256_ctx ctx;
+};
+
+/**
+ * sha256_init() - Initialize a SHA-256 context for a new message
+ * @ctx: the context to initialize
+ *
+ * If you don't need incremental computation, consider sha256() instead.
+ *
+ * Context: Any context.
+ */
+void sha256_init(struct sha256_ctx *ctx);
-extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
+/**
+ * sha256_update() - Update a SHA-256 context with message data
+ * @ctx: the context to update; must have been initialized
+ * @data: the message data
+ * @len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void sha256_update(struct sha256_ctx *ctx,
+ const u8 *data, size_t len)
+{
+ __sha256_update(&ctx->ctx, data, len);
+}
-extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
+/**
+ * sha256_final() - Finish computing a SHA-256 message digest
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting SHA-256 message digest
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void sha256_final(struct sha256_ctx *ctx, u8 out[at_least SHA256_DIGEST_SIZE]);
+
+/**
+ * sha256() - Compute SHA-256 message digest in one shot
+ * @data: the message data
+ * @len: the data length in bytes
+ * @out: (output) the resulting SHA-256 message digest
+ *
+ * Context: Any context.
+ */
+void sha256(const u8 *data, size_t len, u8 out[at_least SHA256_DIGEST_SIZE]);
+
+/**
+ * sha256_finup_2x() - Compute two SHA-256 digests from a common initial
+ * context. On some CPUs, this is faster than sequentially
+ * computing each digest.
+ * @ctx: an optional initial context, which may have already processed data. If
+ * NULL, a default initial context is used (equivalent to sha256_init()).
+ * @data1: data for the first message
+ * @data2: data for the second message
+ * @len: the length of each of @data1 and @data2, in bytes
+ * @out1: (output) the first SHA-256 message digest
+ * @out2: (output) the second SHA-256 message digest
+ *
+ * Context: Any context.
+ */
+void sha256_finup_2x(const struct sha256_ctx *ctx, const u8 *data1,
+ const u8 *data2, size_t len,
+ u8 out1[at_least SHA256_DIGEST_SIZE],
+ u8 out2[at_least SHA256_DIGEST_SIZE]);
+
+/**
+ * sha256_finup_2x_is_optimized() - Check if sha256_finup_2x() is using a real
+ * interleaved implementation, as opposed to a
+ * sequential fallback
+ * @return: true if optimized
+ *
+ * Context: Any context.
+ */
+bool sha256_finup_2x_is_optimized(void);
+
+/**
+ * struct hmac_sha256_key - Prepared key for HMAC-SHA256
+ * @key: private
+ */
+struct hmac_sha256_key {
+ struct __hmac_sha256_key key;
+};
+
+/**
+ * struct hmac_sha256_ctx - Context for computing HMAC-SHA256 of a message
+ * @ctx: private
+ */
+struct hmac_sha256_ctx {
+ struct __hmac_sha256_ctx ctx;
+};
+
+/**
+ * hmac_sha256_preparekey() - Prepare a key for HMAC-SHA256
+ * @key: (output) the key structure to initialize
+ * @raw_key: the raw HMAC-SHA256 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * Note: the caller is responsible for zeroizing both the struct hmac_sha256_key
+ * and the raw key once they are no longer needed.
+ *
+ * Context: Any context.
+ */
+void hmac_sha256_preparekey(struct hmac_sha256_key *key,
+ const u8 *raw_key, size_t raw_key_len);
-extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
+/**
+ * hmac_sha256_init() - Initialize an HMAC-SHA256 context for a new message
+ * @ctx: (output) the HMAC context to initialize
+ * @key: the prepared HMAC key
+ *
+ * If you don't need incremental computation, consider hmac_sha256() instead.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha256_init(struct hmac_sha256_ctx *ctx,
+ const struct hmac_sha256_key *key)
+{
+ __hmac_sha256_init(&ctx->ctx, &key->key);
+}
+
+/**
+ * hmac_sha256_init_usingrawkey() - Initialize an HMAC-SHA256 context for a new
+ * message, using a raw key
+ * @ctx: (output) the HMAC context to initialize
+ * @raw_key: the raw HMAC-SHA256 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * If you don't need incremental computation, consider hmac_sha256_usingrawkey()
+ * instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha256_init_usingrawkey(struct hmac_sha256_ctx *ctx,
+ const u8 *raw_key, size_t raw_key_len);
-extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
+/**
+ * hmac_sha256_update() - Update an HMAC-SHA256 context with message data
+ * @ctx: the HMAC context to update; must have been initialized
+ * @data: the message data
+ * @data_len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha256_update(struct hmac_sha256_ctx *ctx,
+ const u8 *data, size_t data_len)
+{
+ __sha256_update(&ctx->ctx.sha_ctx, data, data_len);
+}
+
+/**
+ * hmac_sha256_final() - Finish computing an HMAC-SHA256 value
+ * @ctx: the HMAC context to finalize; must have been initialized
+ * @out: (output) the resulting HMAC-SHA256 value
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void hmac_sha256_final(struct hmac_sha256_ctx *ctx,
+ u8 out[at_least SHA256_DIGEST_SIZE]);
+
+/**
+ * hmac_sha256() - Compute HMAC-SHA256 in one shot, using a prepared key
+ * @key: the prepared HMAC key
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA256 value
+ *
+ * If you're using the key only once, consider using hmac_sha256_usingrawkey().
+ *
+ * Context: Any context.
+ */
+void hmac_sha256(const struct hmac_sha256_key *key,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA256_DIGEST_SIZE]);
+
+/**
+ * hmac_sha256_usingrawkey() - Compute HMAC-SHA256 in one shot, using a raw key
+ * @raw_key: the raw HMAC-SHA256 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA256 value
+ *
+ * If you're using the key multiple times, prefer to use
+ * hmac_sha256_preparekey() followed by multiple calls to hmac_sha256() instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha256_usingrawkey(const u8 *raw_key, size_t raw_key_len,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA256_DIGEST_SIZE]);
+
+/* State for the SHA-512 (and SHA-384) compression function */
+struct sha512_block_state {
+ u64 h[8];
+};
/*
- * Stand-alone implementation of the SHA256 algorithm. It is designed to
- * have as little dependencies as possible so it can be used in the
- * kexec_file purgatory. In other cases you should generally use the
- * hash APIs from include/crypto/hash.h. Especially when hashing large
- * amounts of data as those APIs may be hw-accelerated.
+ * Context structure, shared by SHA-384 and SHA-512. The sha384_ctx and
+ * sha512_ctx structs wrap this one so that the API has proper typing and
+ * doesn't allow mixing the SHA-384 and SHA-512 functions arbitrarily.
+ */
+struct __sha512_ctx {
+ struct sha512_block_state state;
+ u64 bytecount_lo;
+ u64 bytecount_hi;
+ u8 buf[SHA512_BLOCK_SIZE] __aligned(__alignof__(__be64));
+};
+void __sha512_update(struct __sha512_ctx *ctx, const u8 *data, size_t len);
+
+/*
+ * HMAC key and message context structs, shared by HMAC-SHA384 and HMAC-SHA512.
+ * The hmac_sha384_* and hmac_sha512_* structs wrap this one so that the API has
+ * proper typing and doesn't allow mixing the functions arbitrarily.
+ */
+struct __hmac_sha512_key {
+ struct sha512_block_state istate;
+ struct sha512_block_state ostate;
+};
+struct __hmac_sha512_ctx {
+ struct __sha512_ctx sha_ctx;
+ struct sha512_block_state ostate;
+};
+void __hmac_sha512_init(struct __hmac_sha512_ctx *ctx,
+ const struct __hmac_sha512_key *key);
+
+/**
+ * struct sha384_ctx - Context for hashing a message with SHA-384
+ * @ctx: private
+ */
+struct sha384_ctx {
+ struct __sha512_ctx ctx;
+};
+
+/**
+ * sha384_init() - Initialize a SHA-384 context for a new message
+ * @ctx: the context to initialize
+ *
+ * If you don't need incremental computation, consider sha384() instead.
*
- * For details see lib/crypto/sha256.c
+ * Context: Any context.
*/
+void sha384_init(struct sha384_ctx *ctx);
-static inline void sha256_init(struct sha256_state *sctx)
+/**
+ * sha384_update() - Update a SHA-384 context with message data
+ * @ctx: the context to update; must have been initialized
+ * @data: the message data
+ * @len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void sha384_update(struct sha384_ctx *ctx,
+ const u8 *data, size_t len)
{
- sctx->state[0] = SHA256_H0;
- sctx->state[1] = SHA256_H1;
- sctx->state[2] = SHA256_H2;
- sctx->state[3] = SHA256_H3;
- sctx->state[4] = SHA256_H4;
- sctx->state[5] = SHA256_H5;
- sctx->state[6] = SHA256_H6;
- sctx->state[7] = SHA256_H7;
- sctx->count = 0;
+ __sha512_update(&ctx->ctx, data, len);
}
-void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len);
-void sha256_final(struct sha256_state *sctx, u8 *out);
-void sha256(const u8 *data, unsigned int len, u8 *out);
-static inline void sha224_init(struct sha256_state *sctx)
+/**
+ * sha384_final() - Finish computing a SHA-384 message digest
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting SHA-384 message digest
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void sha384_final(struct sha384_ctx *ctx, u8 out[at_least SHA384_DIGEST_SIZE]);
+
+/**
+ * sha384() - Compute SHA-384 message digest in one shot
+ * @data: the message data
+ * @len: the data length in bytes
+ * @out: (output) the resulting SHA-384 message digest
+ *
+ * Context: Any context.
+ */
+void sha384(const u8 *data, size_t len, u8 out[at_least SHA384_DIGEST_SIZE]);
+
+/**
+ * struct hmac_sha384_key - Prepared key for HMAC-SHA384
+ * @key: private
+ */
+struct hmac_sha384_key {
+ struct __hmac_sha512_key key;
+};
+
+/**
+ * struct hmac_sha384_ctx - Context for computing HMAC-SHA384 of a message
+ * @ctx: private
+ */
+struct hmac_sha384_ctx {
+ struct __hmac_sha512_ctx ctx;
+};
+
+/**
+ * hmac_sha384_preparekey() - Prepare a key for HMAC-SHA384
+ * @key: (output) the key structure to initialize
+ * @raw_key: the raw HMAC-SHA384 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * Note: the caller is responsible for zeroizing both the struct hmac_sha384_key
+ * and the raw key once they are no longer needed.
+ *
+ * Context: Any context.
+ */
+void hmac_sha384_preparekey(struct hmac_sha384_key *key,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_sha384_init() - Initialize an HMAC-SHA384 context for a new message
+ * @ctx: (output) the HMAC context to initialize
+ * @key: the prepared HMAC key
+ *
+ * If you don't need incremental computation, consider hmac_sha384() instead.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha384_init(struct hmac_sha384_ctx *ctx,
+ const struct hmac_sha384_key *key)
{
- sctx->state[0] = SHA224_H0;
- sctx->state[1] = SHA224_H1;
- sctx->state[2] = SHA224_H2;
- sctx->state[3] = SHA224_H3;
- sctx->state[4] = SHA224_H4;
- sctx->state[5] = SHA224_H5;
- sctx->state[6] = SHA224_H6;
- sctx->state[7] = SHA224_H7;
- sctx->count = 0;
+ __hmac_sha512_init(&ctx->ctx, &key->key);
}
-/* Simply use sha256_update as it is equivalent to sha224_update. */
-void sha224_final(struct sha256_state *sctx, u8 *out);
+
+/**
+ * hmac_sha384_init_usingrawkey() - Initialize an HMAC-SHA384 context for a new
+ * message, using a raw key
+ * @ctx: (output) the HMAC context to initialize
+ * @raw_key: the raw HMAC-SHA384 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * If you don't need incremental computation, consider hmac_sha384_usingrawkey()
+ * instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha384_init_usingrawkey(struct hmac_sha384_ctx *ctx,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_sha384_update() - Update an HMAC-SHA384 context with message data
+ * @ctx: the HMAC context to update; must have been initialized
+ * @data: the message data
+ * @data_len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha384_update(struct hmac_sha384_ctx *ctx,
+ const u8 *data, size_t data_len)
+{
+ __sha512_update(&ctx->ctx.sha_ctx, data, data_len);
+}
+
+/**
+ * hmac_sha384_final() - Finish computing an HMAC-SHA384 value
+ * @ctx: the HMAC context to finalize; must have been initialized
+ * @out: (output) the resulting HMAC-SHA384 value
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void hmac_sha384_final(struct hmac_sha384_ctx *ctx,
+ u8 out[at_least SHA384_DIGEST_SIZE]);
+
+/**
+ * hmac_sha384() - Compute HMAC-SHA384 in one shot, using a prepared key
+ * @key: the prepared HMAC key
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA384 value
+ *
+ * If you're using the key only once, consider using hmac_sha384_usingrawkey().
+ *
+ * Context: Any context.
+ */
+void hmac_sha384(const struct hmac_sha384_key *key,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA384_DIGEST_SIZE]);
+
+/**
+ * hmac_sha384_usingrawkey() - Compute HMAC-SHA384 in one shot, using a raw key
+ * @raw_key: the raw HMAC-SHA384 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA384 value
+ *
+ * If you're using the key multiple times, prefer to use
+ * hmac_sha384_preparekey() followed by multiple calls to hmac_sha384() instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha384_usingrawkey(const u8 *raw_key, size_t raw_key_len,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA384_DIGEST_SIZE]);
+
+/**
+ * struct sha512_ctx - Context for hashing a message with SHA-512
+ * @ctx: private
+ */
+struct sha512_ctx {
+ struct __sha512_ctx ctx;
+};
+
+/**
+ * sha512_init() - Initialize a SHA-512 context for a new message
+ * @ctx: the context to initialize
+ *
+ * If you don't need incremental computation, consider sha512() instead.
+ *
+ * Context: Any context.
+ */
+void sha512_init(struct sha512_ctx *ctx);
+
+/**
+ * sha512_update() - Update a SHA-512 context with message data
+ * @ctx: the context to update; must have been initialized
+ * @data: the message data
+ * @len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void sha512_update(struct sha512_ctx *ctx,
+ const u8 *data, size_t len)
+{
+ __sha512_update(&ctx->ctx, data, len);
+}
+
+/**
+ * sha512_final() - Finish computing a SHA-512 message digest
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting SHA-512 message digest
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void sha512_final(struct sha512_ctx *ctx, u8 out[at_least SHA512_DIGEST_SIZE]);
+
+/**
+ * sha512() - Compute SHA-512 message digest in one shot
+ * @data: the message data
+ * @len: the data length in bytes
+ * @out: (output) the resulting SHA-512 message digest
+ *
+ * Context: Any context.
+ */
+void sha512(const u8 *data, size_t len, u8 out[at_least SHA512_DIGEST_SIZE]);
+
+/**
+ * struct hmac_sha512_key - Prepared key for HMAC-SHA512
+ * @key: private
+ */
+struct hmac_sha512_key {
+ struct __hmac_sha512_key key;
+};
+
+/**
+ * struct hmac_sha512_ctx - Context for computing HMAC-SHA512 of a message
+ * @ctx: private
+ */
+struct hmac_sha512_ctx {
+ struct __hmac_sha512_ctx ctx;
+};
+
+/**
+ * hmac_sha512_preparekey() - Prepare a key for HMAC-SHA512
+ * @key: (output) the key structure to initialize
+ * @raw_key: the raw HMAC-SHA512 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * Note: the caller is responsible for zeroizing both the struct hmac_sha512_key
+ * and the raw key once they are no longer needed.
+ *
+ * Context: Any context.
+ */
+void hmac_sha512_preparekey(struct hmac_sha512_key *key,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_sha512_init() - Initialize an HMAC-SHA512 context for a new message
+ * @ctx: (output) the HMAC context to initialize
+ * @key: the prepared HMAC key
+ *
+ * If you don't need incremental computation, consider hmac_sha512() instead.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha512_init(struct hmac_sha512_ctx *ctx,
+ const struct hmac_sha512_key *key)
+{
+ __hmac_sha512_init(&ctx->ctx, &key->key);
+}
+
+/**
+ * hmac_sha512_init_usingrawkey() - Initialize an HMAC-SHA512 context for a new
+ * message, using a raw key
+ * @ctx: (output) the HMAC context to initialize
+ * @raw_key: the raw HMAC-SHA512 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * If you don't need incremental computation, consider hmac_sha512_usingrawkey()
+ * instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha512_init_usingrawkey(struct hmac_sha512_ctx *ctx,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_sha512_update() - Update an HMAC-SHA512 context with message data
+ * @ctx: the HMAC context to update; must have been initialized
+ * @data: the message data
+ * @data_len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha512_update(struct hmac_sha512_ctx *ctx,
+ const u8 *data, size_t data_len)
+{
+ __sha512_update(&ctx->ctx.sha_ctx, data, data_len);
+}
+
+/**
+ * hmac_sha512_final() - Finish computing an HMAC-SHA512 value
+ * @ctx: the HMAC context to finalize; must have been initialized
+ * @out: (output) the resulting HMAC-SHA512 value
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void hmac_sha512_final(struct hmac_sha512_ctx *ctx,
+ u8 out[at_least SHA512_DIGEST_SIZE]);
+
+/**
+ * hmac_sha512() - Compute HMAC-SHA512 in one shot, using a prepared key
+ * @key: the prepared HMAC key
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA512 value
+ *
+ * If you're using the key only once, consider using hmac_sha512_usingrawkey().
+ *
+ * Context: Any context.
+ */
+void hmac_sha512(const struct hmac_sha512_key *key,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA512_DIGEST_SIZE]);
+
+/**
+ * hmac_sha512_usingrawkey() - Compute HMAC-SHA512 in one shot, using a raw key
+ * @raw_key: the raw HMAC-SHA512 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA512 value
+ *
+ * If you're using the key multiple times, prefer to use
+ * hmac_sha512_preparekey() followed by multiple calls to hmac_sha512() instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha512_usingrawkey(const u8 *raw_key, size_t raw_key_len,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA512_DIGEST_SIZE]);
#endif /* _CRYPTO_SHA2_H */
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
deleted file mode 100644
index ab904d82236f..000000000000
--- a/include/crypto/sha256_base.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * sha256_base.h - core logic for SHA-256 implementations
- *
- * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#ifndef _CRYPTO_SHA256_BASE_H
-#define _CRYPTO_SHA256_BASE_H
-
-#include <asm/byteorder.h>
-#include <asm/unaligned.h>
-#include <crypto/internal/hash.h>
-#include <crypto/sha2.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src,
- int blocks);
-
-static inline int sha224_base_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- sha224_init(sctx);
- return 0;
-}
-
-static inline int sha256_base_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- sha256_init(sctx);
- return 0;
-}
-
-static inline int lib_sha256_base_do_update(struct sha256_state *sctx,
- const u8 *data,
- unsigned int len,
- sha256_block_fn *block_fn)
-{
- unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
-
- sctx->count += len;
-
- if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SHA256_BLOCK_SIZE - partial;
-
- memcpy(sctx->buf + partial, data, p);
- data += p;
- len -= p;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- blocks = len / SHA256_BLOCK_SIZE;
- len %= SHA256_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SHA256_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buf + partial, data, len);
-
- return 0;
-}
-
-static inline int sha256_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha256_block_fn *block_fn)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- return lib_sha256_base_do_update(sctx, data, len, block_fn);
-}
-
-static inline int lib_sha256_base_do_finalize(struct sha256_state *sctx,
- sha256_block_fn *block_fn)
-{
- const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
- __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
- unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
-
- sctx->buf[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial);
- partial = 0;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- memset(sctx->buf + partial, 0x0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, sctx->buf, 1);
-
- return 0;
-}
-
-static inline int sha256_base_do_finalize(struct shash_desc *desc,
- sha256_block_fn *block_fn)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- return lib_sha256_base_do_finalize(sctx, block_fn);
-}
-
-static inline int lib_sha256_base_finish(struct sha256_state *sctx, u8 *out,
- unsigned int digest_size)
-{
- __be32 *digest = (__be32 *)out;
- int i;
-
- for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32))
- put_unaligned_be32(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
- return 0;
-}
-
-static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
-{
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- return lib_sha256_base_finish(sctx, out, digest_size);
-}
-
-#endif /* _CRYPTO_SHA256_BASE_H */
diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h
index 080f60c2e6b1..c9e4182ff74f 100644
--- a/include/crypto/sha3.h
+++ b/include/crypto/sha3.h
@@ -1,34 +1,346 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common values for SHA-3 algorithms
+ *
+ * See also Documentation/crypto/sha3.rst
*/
#ifndef __CRYPTO_SHA3_H__
#define __CRYPTO_SHA3_H__
+#include <linux/types.h>
+#include <linux/string.h>
+
#define SHA3_224_DIGEST_SIZE (224 / 8)
#define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE)
+#define SHA3_224_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_224_BLOCK_SIZE + 1
#define SHA3_256_DIGEST_SIZE (256 / 8)
#define SHA3_256_BLOCK_SIZE (200 - 2 * SHA3_256_DIGEST_SIZE)
+#define SHA3_256_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_256_BLOCK_SIZE + 1
#define SHA3_384_DIGEST_SIZE (384 / 8)
#define SHA3_384_BLOCK_SIZE (200 - 2 * SHA3_384_DIGEST_SIZE)
+#define SHA3_384_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_384_BLOCK_SIZE + 1
#define SHA3_512_DIGEST_SIZE (512 / 8)
#define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE)
+#define SHA3_512_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_512_BLOCK_SIZE + 1
+
+/*
+ * SHAKE128 and SHAKE256 actually have variable output size, but this is used to
+ * calculate the block size (rate) analogously to the above.
+ */
+#define SHAKE128_DEFAULT_SIZE (128 / 8)
+#define SHAKE128_BLOCK_SIZE (200 - 2 * SHAKE128_DEFAULT_SIZE)
+#define SHAKE256_DEFAULT_SIZE (256 / 8)
+#define SHAKE256_BLOCK_SIZE (200 - 2 * SHAKE256_DEFAULT_SIZE)
+#define SHA3_STATE_SIZE 200
+
+/*
+ * State for the Keccak-f[1600] permutation: 25 64-bit words.
+ *
+ * We usually keep the state words as little-endian, to make absorbing and
+ * squeezing easier. (It means that absorbing and squeezing can just treat the
+ * state as a byte array.) The state words are converted to native-endian only
+ * temporarily by implementations of the permutation that need native-endian
+ * words. Of course, that conversion is a no-op on little-endian machines.
+ */
struct sha3_state {
- u64 st[25];
- unsigned int rsiz;
- unsigned int rsizw;
+ union {
+ __le64 words[SHA3_STATE_SIZE / 8];
+ u8 bytes[SHA3_STATE_SIZE];
+
+ u64 native_words[SHA3_STATE_SIZE / 8]; /* see comment above */
+ };
+};
+
+/* Internal context, shared by the digests (SHA3-*) and the XOFs (SHAKE*) */
+struct __sha3_ctx {
+ struct sha3_state state;
+ u8 digest_size; /* Digests only: the digest size in bytes */
+ u8 block_size; /* Block size in bytes */
+ u8 absorb_offset; /* Index of next state byte to absorb into */
+ u8 squeeze_offset; /* XOFs only: index of next state byte to extract */
+};
+
+void __sha3_update(struct __sha3_ctx *ctx, const u8 *in, size_t in_len);
+
+/**
+ * struct sha3_ctx - Context for SHA3-224, SHA3-256, SHA3-384, or SHA3-512
+ * @ctx: private
+ */
+struct sha3_ctx {
+ struct __sha3_ctx ctx;
+};
+
+/**
+ * sha3_zeroize_ctx() - Zeroize a SHA-3 context
+ * @ctx: The context to zeroize
+ *
+ * This is already called by sha3_final(). Call this explicitly when abandoning
+ * a context without calling sha3_final().
+ */
+static inline void sha3_zeroize_ctx(struct sha3_ctx *ctx)
+{
+ memzero_explicit(ctx, sizeof(*ctx));
+}
- unsigned int partial;
- u8 buf[SHA3_224_BLOCK_SIZE];
+/**
+ * struct shake_ctx - Context for SHAKE128 or SHAKE256
+ * @ctx: private
+ */
+struct shake_ctx {
+ struct __sha3_ctx ctx;
};
-int crypto_sha3_init(struct shash_desc *desc);
-int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-int crypto_sha3_final(struct shash_desc *desc, u8 *out);
+/**
+ * shake_zeroize_ctx() - Zeroize a SHAKE context
+ * @ctx: The context to zeroize
+ *
+ * Call this after the last squeeze.
+ */
+static inline void shake_zeroize_ctx(struct shake_ctx *ctx)
+{
+ memzero_explicit(ctx, sizeof(*ctx));
+}
+
+/**
+ * sha3_224_init() - Initialize a context for SHA3-224
+ * @ctx: The context to initialize
+ *
+ * This begins a new SHA3-224 message digest computation.
+ *
+ * Context: Any context.
+ */
+static inline void sha3_224_init(struct sha3_ctx *ctx)
+{
+ *ctx = (struct sha3_ctx){
+ .ctx.digest_size = SHA3_224_DIGEST_SIZE,
+ .ctx.block_size = SHA3_224_BLOCK_SIZE,
+ };
+}
+
+/**
+ * sha3_256_init() - Initialize a context for SHA3-256
+ * @ctx: The context to initialize
+ *
+ * This begins a new SHA3-256 message digest computation.
+ *
+ * Context: Any context.
+ */
+static inline void sha3_256_init(struct sha3_ctx *ctx)
+{
+ *ctx = (struct sha3_ctx){
+ .ctx.digest_size = SHA3_256_DIGEST_SIZE,
+ .ctx.block_size = SHA3_256_BLOCK_SIZE,
+ };
+}
+
+/**
+ * sha3_384_init() - Initialize a context for SHA3-384
+ * @ctx: The context to initialize
+ *
+ * This begins a new SHA3-384 message digest computation.
+ *
+ * Context: Any context.
+ */
+static inline void sha3_384_init(struct sha3_ctx *ctx)
+{
+ *ctx = (struct sha3_ctx){
+ .ctx.digest_size = SHA3_384_DIGEST_SIZE,
+ .ctx.block_size = SHA3_384_BLOCK_SIZE,
+ };
+}
+
+/**
+ * sha3_512_init() - Initialize a context for SHA3-512
+ * @ctx: The context to initialize
+ *
+ * This begins a new SHA3-512 message digest computation.
+ *
+ * Context: Any context.
+ */
+static inline void sha3_512_init(struct sha3_ctx *ctx)
+{
+ *ctx = (struct sha3_ctx){
+ .ctx.digest_size = SHA3_512_DIGEST_SIZE,
+ .ctx.block_size = SHA3_512_BLOCK_SIZE,
+ };
+}
+
+/**
+ * sha3_update() - Update a SHA-3 digest context with input data
+ * @ctx: The context to update; must have been initialized
+ * @in: The input data
+ * @in_len: Length of the input data in bytes
+ *
+ * This can be called any number of times to add data to a SHA3-224, SHA3-256,
+ * SHA3-384, or SHA3-512 digest (depending on which init function was called).
+ *
+ * Context: Any context.
+ */
+static inline void sha3_update(struct sha3_ctx *ctx,
+ const u8 *in, size_t in_len)
+{
+ __sha3_update(&ctx->ctx, in, in_len);
+}
+
+/**
+ * sha3_final() - Finish computing a SHA-3 message digest
+ * @ctx: The context to finalize; must have been initialized
+ * @out: (output) The resulting SHA3-224, SHA3-256, SHA3-384, or SHA3-512
+ * message digest, matching the init function that was called. Note that
+ * the size differs for each one; see SHA3_*_DIGEST_SIZE.
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void sha3_final(struct sha3_ctx *ctx, u8 *out);
+
+/**
+ * shake128_init() - Initialize a context for SHAKE128
+ * @ctx: The context to initialize
+ *
+ * This begins a new SHAKE128 extendable-output function (XOF) computation.
+ *
+ * Context: Any context.
+ */
+static inline void shake128_init(struct shake_ctx *ctx)
+{
+ *ctx = (struct shake_ctx){
+ .ctx.block_size = SHAKE128_BLOCK_SIZE,
+ };
+}
+
+/**
+ * shake256_init() - Initialize a context for SHAKE256
+ * @ctx: The context to initialize
+ *
+ * This begins a new SHAKE256 extendable-output function (XOF) computation.
+ *
+ * Context: Any context.
+ */
+static inline void shake256_init(struct shake_ctx *ctx)
+{
+ *ctx = (struct shake_ctx){
+ .ctx.block_size = SHAKE256_BLOCK_SIZE,
+ };
+}
+
+/**
+ * shake_update() - Update a SHAKE context with input data
+ * @ctx: The context to update; must have been initialized
+ * @in: The input data
+ * @in_len: Length of the input data in bytes
+ *
+ * This can be called any number of times to add more input data to SHAKE128 or
+ * SHAKE256. This cannot be called after squeezing has begun.
+ *
+ * Context: Any context.
+ */
+static inline void shake_update(struct shake_ctx *ctx,
+ const u8 *in, size_t in_len)
+{
+ __sha3_update(&ctx->ctx, in, in_len);
+}
+
+/**
+ * shake_squeeze() - Generate output from SHAKE128 or SHAKE256
+ * @ctx: The context to squeeze; must have been initialized
+ * @out: Where to write the resulting output data
+ * @out_len: The amount of data to extract to @out in bytes
+ *
+ * This may be called multiple times. A number of consecutive squeezes laid
+ * end-to-end will yield the same output as one big squeeze generating the same
+ * total amount of output. More input cannot be provided after squeezing has
+ * begun. After the last squeeze, call shake_zeroize_ctx().
+ *
+ * Context: Any context.
+ */
+void shake_squeeze(struct shake_ctx *ctx, u8 *out, size_t out_len);
+
+/**
+ * sha3_224() - Compute SHA3-224 digest in one shot
+ * @in: The input data to be digested
+ * @in_len: Length of the input data in bytes
+ * @out: The buffer into which the digest will be stored
+ *
+ * Convenience function that computes a SHA3-224 digest. Use this instead of
+ * the incremental API if you're able to provide all the input at once.
+ *
+ * Context: Any context.
+ */
+void sha3_224(const u8 *in, size_t in_len, u8 out[SHA3_224_DIGEST_SIZE]);
+
+/**
+ * sha3_256() - Compute SHA3-256 digest in one shot
+ * @in: The input data to be digested
+ * @in_len: Length of the input data in bytes
+ * @out: The buffer into which the digest will be stored
+ *
+ * Convenience function that computes a SHA3-256 digest. Use this instead of
+ * the incremental API if you're able to provide all the input at once.
+ *
+ * Context: Any context.
+ */
+void sha3_256(const u8 *in, size_t in_len, u8 out[SHA3_256_DIGEST_SIZE]);
+
+/**
+ * sha3_384() - Compute SHA3-384 digest in one shot
+ * @in: The input data to be digested
+ * @in_len: Length of the input data in bytes
+ * @out: The buffer into which the digest will be stored
+ *
+ * Convenience function that computes a SHA3-384 digest. Use this instead of
+ * the incremental API if you're able to provide all the input at once.
+ *
+ * Context: Any context.
+ */
+void sha3_384(const u8 *in, size_t in_len, u8 out[SHA3_384_DIGEST_SIZE]);
+
+/**
+ * sha3_512() - Compute SHA3-512 digest in one shot
+ * @in: The input data to be digested
+ * @in_len: Length of the input data in bytes
+ * @out: The buffer into which the digest will be stored
+ *
+ * Convenience function that computes a SHA3-512 digest. Use this instead of
+ * the incremental API if you're able to provide all the input at once.
+ *
+ * Context: Any context.
+ */
+void sha3_512(const u8 *in, size_t in_len, u8 out[SHA3_512_DIGEST_SIZE]);
+
+/**
+ * shake128() - Compute SHAKE128 in one shot
+ * @in: The input data to be used
+ * @in_len: Length of the input data in bytes
+ * @out: The buffer into which the output will be stored
+ * @out_len: Length of the output to produce in bytes
+ *
+ * Convenience function that computes SHAKE128 in one shot. Use this instead of
+ * the incremental API if you're able to provide all the input at once as well
+ * as receive all the output at once. All output lengths are supported.
+ *
+ * Context: Any context.
+ */
+void shake128(const u8 *in, size_t in_len, u8 *out, size_t out_len);
+
+/**
+ * shake256() - Compute SHAKE256 in one shot
+ * @in: The input data to be used
+ * @in_len: Length of the input data in bytes
+ * @out: The buffer into which the output will be stored
+ * @out_len: Length of the output to produce in bytes
+ *
+ * Convenience function that computes SHAKE256 in one shot. Use this instead of
+ * the incremental API if you're able to provide all the input at once as well
+ * as receive all the output at once. All output lengths are supported.
+ *
+ * Context: Any context.
+ */
+void shake256(const u8 *in, size_t in_len, u8 *out, size_t out_len);
-#endif
+#endif /* __CRYPTO_SHA3_H__ */
diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h
deleted file mode 100644
index b370b3340b16..000000000000
--- a/include/crypto/sha512_base.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * sha512_base.h - core logic for SHA-512 implementations
- *
- * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#ifndef _CRYPTO_SHA512_BASE_H
-#define _CRYPTO_SHA512_BASE_H
-
-#include <crypto/internal/hash.h>
-#include <crypto/sha2.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-#include <linux/string.h>
-
-#include <asm/unaligned.h>
-
-typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src,
- int blocks);
-
-static inline int sha384_base_init(struct shash_desc *desc)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA384_H0;
- sctx->state[1] = SHA384_H1;
- sctx->state[2] = SHA384_H2;
- sctx->state[3] = SHA384_H3;
- sctx->state[4] = SHA384_H4;
- sctx->state[5] = SHA384_H5;
- sctx->state[6] = SHA384_H6;
- sctx->state[7] = SHA384_H7;
- sctx->count[0] = sctx->count[1] = 0;
-
- return 0;
-}
-
-static inline int sha512_base_init(struct shash_desc *desc)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA512_H0;
- sctx->state[1] = SHA512_H1;
- sctx->state[2] = SHA512_H2;
- sctx->state[3] = SHA512_H3;
- sctx->state[4] = SHA512_H4;
- sctx->state[5] = SHA512_H5;
- sctx->state[6] = SHA512_H6;
- sctx->state[7] = SHA512_H7;
- sctx->count[0] = sctx->count[1] = 0;
-
- return 0;
-}
-
-static inline int sha512_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha512_block_fn *block_fn)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
-
- sctx->count[0] += len;
- if (sctx->count[0] < len)
- sctx->count[1]++;
-
- if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SHA512_BLOCK_SIZE - partial;
-
- memcpy(sctx->buf + partial, data, p);
- data += p;
- len -= p;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- blocks = len / SHA512_BLOCK_SIZE;
- len %= SHA512_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SHA512_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buf + partial, data, len);
-
- return 0;
-}
-
-static inline int sha512_base_do_finalize(struct shash_desc *desc,
- sha512_block_fn *block_fn)
-{
- const int bit_offset = SHA512_BLOCK_SIZE - sizeof(__be64[2]);
- struct sha512_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
- unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
-
- sctx->buf[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial);
- partial = 0;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- memset(sctx->buf + partial, 0x0, bit_offset - partial);
- bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
- bits[1] = cpu_to_be64(sctx->count[0] << 3);
- block_fn(sctx, sctx->buf, 1);
-
- return 0;
-}
-
-static inline int sha512_base_finish(struct shash_desc *desc, u8 *out)
-{
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
- struct sha512_state *sctx = shash_desc_ctx(desc);
- __be64 *digest = (__be64 *)out;
- int i;
-
- for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64))
- put_unaligned_be64(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
- return 0;
-}
-
-#endif /* _CRYPTO_SHA512_BASE_H */
diff --git a/include/crypto/sig.h b/include/crypto/sig.h
index d25186bb2be3..fa6dafafab3f 100644
--- a/include/crypto/sig.h
+++ b/include/crypto/sig.h
@@ -20,6 +20,57 @@ struct crypto_sig {
};
/**
+ * struct sig_alg - generic public key signature algorithm
+ *
+ * @sign: Function performs a sign operation as defined by public key
+ * algorithm. On success, the signature size is returned.
+ * Optional.
+ * @verify: Function performs a complete verify operation as defined by
+ * public key algorithm, returning verification status. Optional.
+ * @set_pub_key: Function invokes the algorithm specific set public key
+ * function, which knows how to decode and interpret
+ * the BER encoded public key and parameters. Mandatory.
+ * @set_priv_key: Function invokes the algorithm specific set private key
+ * function, which knows how to decode and interpret
+ * the BER encoded private key and parameters. Optional.
+ * @key_size: Function returns key size. Mandatory.
+ * @digest_size: Function returns maximum digest size. Optional.
+ * @max_size: Function returns maximum signature size. Optional.
+ * @init: Initialize the cryptographic transformation object.
+ * This function is used to initialize the cryptographic
+ * transformation object. This function is called only once at
+ * the instantiation time, right after the transformation context
+ * was allocated. In case the cryptographic hardware has some
+ * special requirements which need to be handled by software, this
+ * function shall check for the precise requirement of the
+ * transformation and put any software fallbacks in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * @base: Common crypto API algorithm data structure
+ */
+struct sig_alg {
+ int (*sign)(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
+ int (*verify)(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen);
+ int (*set_pub_key)(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen);
+ int (*set_priv_key)(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen);
+ unsigned int (*key_size)(struct crypto_sig *tfm);
+ unsigned int (*digest_size)(struct crypto_sig *tfm);
+ unsigned int (*max_size)(struct crypto_sig *tfm);
+ int (*init)(struct crypto_sig *tfm);
+ void (*exit)(struct crypto_sig *tfm);
+
+ struct crypto_alg base;
+};
+
+/**
* DOC: Generic Public Key Signature API
*
* The Public Key Signature API is used with the algorithms of type
@@ -47,6 +98,21 @@ static inline struct crypto_tfm *crypto_sig_tfm(struct crypto_sig *tfm)
return &tfm->base;
}
+static inline struct crypto_sig *__crypto_sig_tfm(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_sig, base);
+}
+
+static inline struct sig_alg *__crypto_sig_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct sig_alg, base);
+}
+
+static inline struct sig_alg *crypto_sig_alg(struct crypto_sig *tfm)
+{
+ return __crypto_sig_alg(crypto_sig_tfm(tfm)->__crt_alg);
+}
+
/**
* crypto_free_sig() - free signature tfm handle
*
@@ -60,16 +126,55 @@ static inline void crypto_free_sig(struct crypto_sig *tfm)
}
/**
- * crypto_sig_maxsize() - Get len for output buffer
+ * crypto_sig_keysize() - Get key size
*
- * Function returns the dest buffer size required for a given key.
+ * Function returns the key size in bits.
* Function assumes that the key is already set in the transformation. If this
- * function is called without a setkey or with a failed setkey, you will end up
+ * function is called without a setkey or with a failed setkey, you may end up
* in a NULL dereference.
*
* @tfm: signature tfm handle allocated with crypto_alloc_sig()
*/
-int crypto_sig_maxsize(struct crypto_sig *tfm);
+static inline unsigned int crypto_sig_keysize(struct crypto_sig *tfm)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->key_size(tfm);
+}
+
+/**
+ * crypto_sig_digestsize() - Get maximum digest size
+ *
+ * Function returns the maximum digest size in bytes.
+ * Function assumes that the key is already set in the transformation. If this
+ * function is called without a setkey or with a failed setkey, you may end up
+ * in a NULL dereference.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ */
+static inline unsigned int crypto_sig_digestsize(struct crypto_sig *tfm)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->digest_size(tfm);
+}
+
+/**
+ * crypto_sig_maxsize() - Get maximum signature size
+ *
+ * Function returns the maximum signature size in bytes.
+ * Function assumes that the key is already set in the transformation. If this
+ * function is called without a setkey or with a failed setkey, you may end up
+ * in a NULL dereference.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ */
+static inline unsigned int crypto_sig_maxsize(struct crypto_sig *tfm)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->max_size(tfm);
+}
/**
* crypto_sig_sign() - Invoke signing operation
@@ -82,11 +187,16 @@ int crypto_sig_maxsize(struct crypto_sig *tfm);
* @dst: destination obuffer
* @dlen: destination length
*
- * Return: zero on success; error code in case of error
+ * Return: signature size on success; error code in case of error
*/
-int crypto_sig_sign(struct crypto_sig *tfm,
- const void *src, unsigned int slen,
- void *dst, unsigned int dlen);
+static inline int crypto_sig_sign(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->sign(tfm, src, slen, dst, dlen);
+}
/**
* crypto_sig_verify() - Invoke signature verification
@@ -102,9 +212,14 @@ int crypto_sig_sign(struct crypto_sig *tfm,
*
* Return: zero on verification success; error code in case of error.
*/
-int crypto_sig_verify(struct crypto_sig *tfm,
- const void *src, unsigned int slen,
- const void *digest, unsigned int dlen);
+static inline int crypto_sig_verify(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->verify(tfm, src, slen, digest, dlen);
+}
/**
* crypto_sig_set_pubkey() - Invoke set public key operation
@@ -119,8 +234,13 @@ int crypto_sig_verify(struct crypto_sig *tfm,
*
* Return: zero on success; error code in case of error
*/
-int crypto_sig_set_pubkey(struct crypto_sig *tfm,
- const void *key, unsigned int keylen);
+static inline int crypto_sig_set_pubkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->set_pub_key(tfm, key, keylen);
+}
/**
* crypto_sig_set_privkey() - Invoke set private key operation
@@ -135,6 +255,11 @@ int crypto_sig_set_pubkey(struct crypto_sig *tfm,
*
* Return: zero on success; error code in case of error
*/
-int crypto_sig_set_privkey(struct crypto_sig *tfm,
- const void *key, unsigned int keylen);
+static inline int crypto_sig_set_privkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->set_priv_key(tfm, key, keylen);
+}
#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 18a86e0af016..9e5853464345 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -214,16 +214,17 @@ struct lskcipher_alg {
#define MAX_SYNC_SKCIPHER_REQSIZE 384
/*
- * This performs a type-check against the "tfm" argument to make sure
+ * This performs a type-check against the "_tfm" argument to make sure
* all users have the correct skcipher tfm for doing on-stack requests.
*/
-#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \
+#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, _tfm) \
char __##name##_desc[sizeof(struct skcipher_request) + \
- MAX_SYNC_SKCIPHER_REQSIZE + \
- (!(sizeof((struct crypto_sync_skcipher *)1 == \
- (typeof(tfm))1))) \
+ MAX_SYNC_SKCIPHER_REQSIZE \
] CRYPTO_MINALIGN_ATTR; \
- struct skcipher_request *name = (void *)__##name##_desc
+ struct skcipher_request *name = \
+ (((struct skcipher_request *)__##name##_desc)->base.tfm = \
+ crypto_sync_skcipher_tfm((_tfm)), \
+ (void *)__##name##_desc)
/**
* DOC: Symmetric Key Cipher API
@@ -311,6 +312,12 @@ static inline struct crypto_tfm *crypto_lskcipher_tfm(
return &tfm->base;
}
+static inline struct crypto_tfm *crypto_sync_skcipher_tfm(
+ struct crypto_sync_skcipher *tfm)
+{
+ return crypto_skcipher_tfm(&tfm->base);
+}
+
/**
* crypto_free_skcipher() - zeroize and free cipher handle
* @tfm: cipher handle to be freed
diff --git a/include/crypto/sm2.h b/include/crypto/sm2.h
deleted file mode 100644
index 04a92c1013c8..000000000000
--- a/include/crypto/sm2.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * sm2.h - SM2 asymmetric public-key algorithm
- * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and
- * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02
- *
- * Copyright (c) 2020, Alibaba Group.
- * Written by Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
- */
-
-#ifndef _CRYPTO_SM2_H
-#define _CRYPTO_SM2_H
-
-struct shash_desc;
-
-#if IS_REACHABLE(CONFIG_CRYPTO_SM2)
-int sm2_compute_z_digest(struct shash_desc *desc,
- const void *key, unsigned int keylen, void *dgst);
-#else
-static inline int sm2_compute_z_digest(struct shash_desc *desc,
- const void *key, unsigned int keylen,
- void *dgst)
-{
- return -ENOTSUPP;
-}
-#endif
-
-#endif /* _CRYPTO_SM2_H */
diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h
index 1f021ad0533f..c8d02c86c298 100644
--- a/include/crypto/sm3.h
+++ b/include/crypto/sm3.h
@@ -14,6 +14,7 @@
#define SM3_DIGEST_SIZE 32
#define SM3_BLOCK_SIZE 64
+#define SM3_STATE_SIZE 40
#define SM3_T1 0x79CC4519
#define SM3_T2 0x7A879D8A
@@ -58,7 +59,6 @@ static inline void sm3_init(struct sm3_state *sctx)
sctx->count = 0;
}
-void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len);
-void sm3_final(struct sm3_state *sctx, u8 *out);
+void sm3_block_generic(struct sm3_state *sctx, u8 const *data, int blocks);
#endif
diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h
index 2f3a32ab97bb..7c53570bc05e 100644
--- a/include/crypto/sm3_base.h
+++ b/include/crypto/sm3_base.h
@@ -11,87 +11,59 @@
#include <crypto/internal/hash.h>
#include <crypto/sm3.h>
-#include <linux/crypto.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/string.h>
-#include <asm/unaligned.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks);
static inline int sm3_base_init(struct shash_desc *desc)
{
- struct sm3_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SM3_IVA;
- sctx->state[1] = SM3_IVB;
- sctx->state[2] = SM3_IVC;
- sctx->state[3] = SM3_IVD;
- sctx->state[4] = SM3_IVE;
- sctx->state[5] = SM3_IVF;
- sctx->state[6] = SM3_IVG;
- sctx->state[7] = SM3_IVH;
- sctx->count = 0;
-
+ sm3_init(shash_desc_ctx(desc));
return 0;
}
-static inline int sm3_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sm3_block_fn *block_fn)
+static inline int sm3_base_do_update_blocks(struct shash_desc *desc,
+ const u8 *data, unsigned int len,
+ sm3_block_fn *block_fn)
{
+ unsigned int remain = len - round_down(len, SM3_BLOCK_SIZE);
struct sm3_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
-
- sctx->count += len;
-
- if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SM3_BLOCK_SIZE - partial;
-
- memcpy(sctx->buffer + partial, data, p);
- data += p;
- len -= p;
- block_fn(sctx, sctx->buffer, 1);
- }
-
- blocks = len / SM3_BLOCK_SIZE;
- len %= SM3_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SM3_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buffer + partial, data, len);
-
- return 0;
+ sctx->count += len - remain;
+ block_fn(sctx, data, len / SM3_BLOCK_SIZE);
+ return remain;
}
-static inline int sm3_base_do_finalize(struct shash_desc *desc,
- sm3_block_fn *block_fn)
+static inline int sm3_base_do_finup(struct shash_desc *desc,
+ const u8 *src, unsigned int len,
+ sm3_block_fn *block_fn)
{
- const int bit_offset = SM3_BLOCK_SIZE - sizeof(__be64);
+ unsigned int bit_offset = SM3_BLOCK_SIZE / 8 - 1;
struct sm3_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
+ union {
+ __be64 b64[SM3_BLOCK_SIZE / 4];
+ u8 u8[SM3_BLOCK_SIZE * 2];
+ } block = {};
- sctx->buffer[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buffer + partial, 0x0, SM3_BLOCK_SIZE - partial);
- partial = 0;
+ if (len >= SM3_BLOCK_SIZE) {
+ int remain;
- block_fn(sctx, sctx->buffer, 1);
+ remain = sm3_base_do_update_blocks(desc, src, len, block_fn);
+ src += len - remain;
+ len = remain;
}
- memset(sctx->buffer + partial, 0x0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, sctx->buffer, 1);
+ if (len >= bit_offset * 8)
+ bit_offset += SM3_BLOCK_SIZE / 8;
+ memcpy(&block, src, len);
+ block.u8[len] = 0x80;
+ sctx->count += len;
+ block.b64[bit_offset] = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, block.u8, (bit_offset + 1) * 8 / SM3_BLOCK_SIZE);
+ memzero_explicit(&block, sizeof(block));
return 0;
}
@@ -104,8 +76,6 @@ static inline int sm3_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++)
put_unaligned_be32(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h
index cae1b4a01971..570f720a113b 100644
--- a/include/crypto/streebog.h
+++ b/include/crypto/streebog.h
@@ -23,15 +23,10 @@ struct streebog_uint512 {
};
struct streebog_state {
- union {
- u8 buffer[STREEBOG_BLOCK_SIZE];
- struct streebog_uint512 m;
- };
struct streebog_uint512 hash;
struct streebog_uint512 h;
struct streebog_uint512 N;
struct streebog_uint512 Sigma;
- size_t fillsize;
};
#endif /* !_CRYPTO_STREEBOG_H_ */
diff --git a/include/crypto/utils.h b/include/crypto/utils.h
index acbb917a00c6..2594f45777b5 100644
--- a/include/crypto/utils.h
+++ b/include/crypto/utils.h
@@ -7,7 +7,7 @@
#ifndef _CRYPTO_UTILS_H
#define _CRYPTO_UTILS_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/compiler_attributes.h>
#include <linux/types.h>
diff --git a/include/linux/einj-cxl.h b/include/cxl/einj.h
index 624ff6ff41f9..624ff6ff41f9 100644
--- a/include/linux/einj-cxl.h
+++ b/include/cxl/einj.h
diff --git a/include/cxl/event.h b/include/cxl/event.h
new file mode 100644
index 000000000000..6fd90f9cc203
--- /dev/null
+++ b/include/cxl/event.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Intel Corporation. */
+#ifndef _LINUX_CXL_EVENT_H
+#define _LINUX_CXL_EVENT_H
+
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/workqueue_types.h>
+
+/*
+ * Common Event Record Format
+ * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ */
+struct cxl_event_record_hdr {
+ u8 length;
+ u8 flags[3];
+ __le16 handle;
+ __le16 related_handle;
+ __le64 timestamp;
+ u8 maint_op_class;
+ u8 maint_op_sub_class;
+ __le16 ld_id;
+ u8 head_id;
+ u8 reserved[11];
+} __packed;
+
+struct cxl_event_media_hdr {
+ struct cxl_event_record_hdr hdr;
+ __le64 phys_addr;
+ u8 descriptor;
+ u8 type;
+ u8 transaction_type;
+ /*
+ * The meaning of Validity Flags from bit 2 is
+ * different across DRAM and General Media records
+ */
+ u8 validity_flags[2];
+ u8 channel;
+ u8 rank;
+} __packed;
+
+#define CXL_EVENT_RECORD_DATA_LENGTH 0x50
+struct cxl_event_generic {
+ struct cxl_event_record_hdr hdr;
+ u8 data[CXL_EVENT_RECORD_DATA_LENGTH];
+} __packed;
+
+/*
+ * General Media Event Record
+ * CXL rev 3.1 Section 8.2.9.2.1.1; Table 8-45
+ */
+#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
+struct cxl_event_gen_media {
+ struct cxl_event_media_hdr media_hdr;
+ u8 device[3];
+ u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ u8 cme_threshold_ev_flags;
+ u8 cme_count[3];
+ u8 sub_type;
+ u8 reserved[41];
+} __packed;
+
+/*
+ * DRAM Event Record - DER
+ * CXL rev 3.1 section 8.2.9.2.1.2; Table 8-46
+ */
+#define CXL_EVENT_DER_CORRECTION_MASK_SIZE 0x20
+struct cxl_event_dram {
+ struct cxl_event_media_hdr media_hdr;
+ u8 nibble_mask[3];
+ u8 bank_group;
+ u8 bank;
+ u8 row[3];
+ u8 column[2];
+ u8 correction_mask[CXL_EVENT_DER_CORRECTION_MASK_SIZE];
+ u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ u8 sub_channel;
+ u8 cme_threshold_ev_flags;
+ u8 cvme_count[3];
+ u8 sub_type;
+ u8 reserved;
+} __packed;
+
+/*
+ * Get Health Info Record
+ * CXL rev 3.1 section 8.2.9.9.3.1; Table 8-133
+ */
+struct cxl_get_health_info {
+ u8 health_status;
+ u8 media_status;
+ u8 add_status;
+ u8 life_used;
+ u8 device_temp[2];
+ u8 dirty_shutdown_cnt[4];
+ u8 cor_vol_err_cnt[4];
+ u8 cor_per_err_cnt[4];
+} __packed;
+
+/*
+ * Memory Module Event Record
+ * CXL rev 3.1 section 8.2.9.2.1.3; Table 8-47
+ */
+struct cxl_event_mem_module {
+ struct cxl_event_record_hdr hdr;
+ u8 event_type;
+ struct cxl_get_health_info info;
+ u8 validity_flags[2];
+ u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ u8 event_sub_type;
+ u8 reserved[0x2a];
+} __packed;
+
+/*
+ * Memory Sparing Event Record - MSER
+ * CXL rev 3.2 section 8.2.10.2.1.4; Table 8-60
+ */
+struct cxl_event_mem_sparing {
+ struct cxl_event_record_hdr hdr;
+ /*
+ * The fields maintenance operation class and maintenance operation
+ * subclass defined in the Memory Sparing Event Record are the
+ * duplication of the same in the common event record. Thus defined
+ * as reserved and to be removed after the spec correction.
+ */
+ u8 rsv1;
+ u8 rsv2;
+ u8 flags;
+ u8 result;
+ __le16 validity_flags;
+ u8 reserved1[6];
+ __le16 res_avail;
+ u8 channel;
+ u8 rank;
+ u8 nibble_mask[3];
+ u8 bank_group;
+ u8 bank;
+ u8 row[3];
+ __le16 column;
+ u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ u8 sub_channel;
+ u8 reserved2[0x25];
+} __packed;
+
+union cxl_event {
+ struct cxl_event_generic generic;
+ struct cxl_event_gen_media gen_media;
+ struct cxl_event_dram dram;
+ struct cxl_event_mem_module mem_module;
+ struct cxl_event_mem_sparing mem_sparing;
+ /* dram & gen_media event header */
+ struct cxl_event_media_hdr media_hdr;
+} __packed;
+
+/*
+ * Common Event Record Format; in event logs
+ * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ */
+struct cxl_event_record_raw {
+ uuid_t id;
+ union cxl_event event;
+} __packed;
+
+enum cxl_event_type {
+ CXL_CPER_EVENT_GENERIC,
+ CXL_CPER_EVENT_GEN_MEDIA,
+ CXL_CPER_EVENT_DRAM,
+ CXL_CPER_EVENT_MEM_MODULE,
+ CXL_CPER_EVENT_MEM_SPARING,
+};
+
+#define CPER_CXL_DEVICE_ID_VALID BIT(0)
+#define CPER_CXL_DEVICE_SN_VALID BIT(1)
+#define CPER_CXL_COMP_EVENT_LOG_VALID BIT(2)
+struct cxl_cper_event_rec {
+ struct {
+ u32 length;
+ u64 validation_bits;
+ struct cper_cxl_event_devid {
+ u16 vendor_id;
+ u16 device_id;
+ u8 func_num;
+ u8 device_num;
+ u8 bus_num;
+ u16 segment_num;
+ u16 slot_num; /* bits 2:0 reserved */
+ u8 reserved;
+ } __packed device_id;
+ struct cper_cxl_event_sn {
+ u32 lower_dw;
+ u32 upper_dw;
+ } __packed dev_serial_num;
+ } __packed hdr;
+
+ union cxl_event event;
+} __packed;
+
+struct cxl_cper_work_data {
+ enum cxl_event_type event_type;
+ struct cxl_cper_event_rec rec;
+};
+
+#define PROT_ERR_VALID_AGENT_TYPE BIT_ULL(0)
+#define PROT_ERR_VALID_AGENT_ADDRESS BIT_ULL(1)
+#define PROT_ERR_VALID_DEVICE_ID BIT_ULL(2)
+#define PROT_ERR_VALID_SERIAL_NUMBER BIT_ULL(3)
+#define PROT_ERR_VALID_CAPABILITY BIT_ULL(4)
+#define PROT_ERR_VALID_DVSEC BIT_ULL(5)
+#define PROT_ERR_VALID_ERROR_LOG BIT_ULL(6)
+
+/*
+ * The layout of the enumeration and the values matches CXL Agent Type
+ * field in the UEFI 2.10 Section N.2.13,
+ */
+enum {
+ RCD, /* Restricted CXL Device */
+ RCH_DP, /* Restricted CXL Host Downstream Port */
+ DEVICE, /* CXL Device */
+ LD, /* CXL Logical Device */
+ FMLD, /* CXL Fabric Manager managed Logical Device */
+ RP, /* CXL Root Port */
+ DSP, /* CXL Downstream Switch Port */
+ USP, /* CXL Upstream Switch Port */
+};
+
+#pragma pack(1)
+
+/* Compute Express Link Protocol Error Section, UEFI v2.10 sec N.2.13 */
+struct cxl_cper_sec_prot_err {
+ u64 valid_bits;
+ u8 agent_type;
+ u8 reserved[7];
+
+ /*
+ * Except for RCH Downstream Port, all the remaining CXL Agent
+ * types are uniquely identified by the PCIe compatible SBDF number.
+ */
+ union {
+ u64 rcrb_base_addr;
+ struct {
+ u8 function;
+ u8 device;
+ u8 bus;
+ u16 segment;
+ u8 reserved_1[3];
+ };
+ } agent_addr;
+
+ struct {
+ u16 vendor_id;
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_id;
+ u8 class_code[2];
+ u16 slot;
+ u8 reserved_1[4];
+ } device_id;
+
+ struct {
+ u32 lower_dw;
+ u32 upper_dw;
+ } dev_serial_num;
+
+ u8 capability[60];
+ u16 dvsec_len;
+ u16 err_len;
+ u8 reserved_2[4];
+};
+
+#pragma pack()
+
+/* CXL RAS Capability Structure, CXL v3.0 sec 8.2.4.16 */
+struct cxl_ras_capability_regs {
+ u32 uncor_status;
+ u32 uncor_mask;
+ u32 uncor_severity;
+ u32 cor_status;
+ u32 cor_mask;
+ u32 cap_control;
+ u32 header_log[16];
+};
+
+struct cxl_cper_prot_err_work_data {
+ struct cxl_cper_sec_prot_err prot_err;
+ struct cxl_ras_capability_regs ras_cap;
+ int severity;
+};
+
+#ifdef CONFIG_ACPI_APEI_GHES
+int cxl_cper_register_work(struct work_struct *work);
+int cxl_cper_unregister_work(struct work_struct *work);
+int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd);
+int cxl_cper_register_prot_err_work(struct work_struct *work);
+int cxl_cper_unregister_prot_err_work(struct work_struct *work);
+int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data *wd);
+#else
+static inline int cxl_cper_register_work(struct work_struct *work)
+{
+ return 0;
+}
+
+static inline int cxl_cper_unregister_work(struct work_struct *work)
+{
+ return 0;
+}
+static inline int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
+{
+ return 0;
+}
+static inline int cxl_cper_register_prot_err_work(struct work_struct *work)
+{
+ return 0;
+}
+static inline int cxl_cper_unregister_prot_err_work(struct work_struct *work)
+{
+ return 0;
+}
+static inline int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data *wd)
+{
+ return 0;
+}
+#endif
+
+#endif /* _LINUX_CXL_EVENT_H */
diff --git a/include/cxl/features.h b/include/cxl/features.h
new file mode 100644
index 000000000000..b9297693dae7
--- /dev/null
+++ b/include/cxl/features.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024-2025 Intel Corporation. */
+#ifndef __CXL_FEATURES_H__
+#define __CXL_FEATURES_H__
+
+#include <linux/uuid.h>
+#include <linux/fwctl.h>
+#include <uapi/cxl/features.h>
+
+/* Feature UUIDs used by the kernel */
+#define CXL_FEAT_PATROL_SCRUB_UUID \
+ UUID_INIT(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33, 0x75, 0x77, 0x4e, \
+ 0x06, 0xdb, 0x8a)
+
+#define CXL_FEAT_ECS_UUID \
+ UUID_INIT(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba, 0xb9, 0x69, 0x1e, \
+ 0x89, 0x33, 0x86)
+
+#define CXL_FEAT_SPPR_UUID \
+ UUID_INIT(0x892ba475, 0xfad8, 0x474e, 0x9d, 0x3e, 0x69, 0x2c, 0x91, \
+ 0x75, 0x68, 0xbb)
+
+#define CXL_FEAT_HPPR_UUID \
+ UUID_INIT(0x80ea4521, 0x786f, 0x4127, 0xaf, 0xb1, 0xec, 0x74, 0x59, \
+ 0xfb, 0x0e, 0x24)
+
+#define CXL_FEAT_CACHELINE_SPARING_UUID \
+ UUID_INIT(0x96C33386, 0x91dd, 0x44c7, 0x9e, 0xcb, 0xfd, 0xaf, 0x65, \
+ 0x03, 0xba, 0xc4)
+
+#define CXL_FEAT_ROW_SPARING_UUID \
+ UUID_INIT(0x450ebf67, 0xb135, 0x4f97, 0xa4, 0x98, 0xc2, 0xd5, 0x7f, \
+ 0x27, 0x9b, 0xed)
+
+#define CXL_FEAT_BANK_SPARING_UUID \
+ UUID_INIT(0x78b79636, 0x90ac, 0x4b64, 0xa4, 0xef, 0xfa, 0xac, 0x5d, \
+ 0x18, 0xa8, 0x63)
+
+#define CXL_FEAT_RANK_SPARING_UUID \
+ UUID_INIT(0x34dbaff5, 0x0552, 0x4281, 0x8f, 0x76, 0xda, 0x0b, 0x5e, \
+ 0x7a, 0x76, 0xa7)
+
+/* Feature commands capability supported by a device */
+enum cxl_features_capability {
+ CXL_FEATURES_NONE = 0,
+ CXL_FEATURES_RO,
+ CXL_FEATURES_RW,
+};
+
+/**
+ * struct cxl_features_state - The Features state for the device
+ * @cxlds: Pointer to CXL device state
+ * @entries: CXl feature entry context
+ */
+struct cxl_features_state {
+ struct cxl_dev_state *cxlds;
+ struct cxl_feat_entries {
+ int num_features;
+ int num_user_features;
+ struct cxl_feat_entry ent[] __counted_by(num_features);
+ } *entries;
+};
+
+struct cxl_mailbox;
+struct cxl_memdev;
+#ifdef CONFIG_CXL_FEATURES
+struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds);
+int devm_cxl_setup_features(struct cxl_dev_state *cxlds);
+int devm_cxl_setup_fwctl(struct device *host, struct cxl_memdev *cxlmd);
+#else
+static inline struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
+{
+ return NULL;
+}
+
+static inline int devm_cxl_setup_features(struct cxl_dev_state *cxlds)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int devm_cxl_setup_fwctl(struct device *host,
+ struct cxl_memdev *cxlmd)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif
diff --git a/include/cxl/mailbox.h b/include/cxl/mailbox.h
new file mode 100644
index 000000000000..c4e99e2e3a9d
--- /dev/null
+++ b/include/cxl/mailbox.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation. */
+#ifndef __CXL_MBOX_H__
+#define __CXL_MBOX_H__
+#include <linux/rcuwait.h>
+#include <cxl/features.h>
+#include <uapi/linux/cxl_mem.h>
+
+/**
+ * struct cxl_mbox_cmd - A command to be submitted to hardware.
+ * @opcode: (input) The command set and command submitted to hardware.
+ * @payload_in: (input) Pointer to the input payload.
+ * @payload_out: (output) Pointer to the output payload. Must be allocated by
+ * the caller.
+ * @size_in: (input) Number of bytes to load from @payload_in.
+ * @size_out: (input) Max number of bytes loaded into @payload_out.
+ * (output) Number of bytes generated by the device. For fixed size
+ * outputs commands this is always expected to be deterministic. For
+ * variable sized output commands, it tells the exact number of bytes
+ * written.
+ * @min_out: (input) internal command output payload size validation
+ * @poll_count: (input) Number of timeouts to attempt.
+ * @poll_interval_ms: (input) Time between mailbox background command polling
+ * interval timeouts.
+ * @return_code: (output) Error code returned from hardware.
+ *
+ * This is the primary mechanism used to send commands to the hardware.
+ * All the fields except @payload_* correspond exactly to the fields described in
+ * Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and
+ * @payload_out are written to, and read from the Command Payload Registers
+ * defined in CXL 2.0 8.2.8.4.8.
+ */
+struct cxl_mbox_cmd {
+ u16 opcode;
+ void *payload_in;
+ void *payload_out;
+ size_t size_in;
+ size_t size_out;
+ size_t min_out;
+ int poll_count;
+ int poll_interval_ms;
+ u16 return_code;
+};
+
+/**
+ * struct cxl_mailbox - context for CXL mailbox operations
+ * @host: device that hosts the mailbox
+ * @enabled_cmds: mailbox commands that are enabled by the driver
+ * @exclusive_cmds: mailbox commands that are exclusive to the kernel
+ * @payload_size: Size of space for payload
+ * (CXL 3.1 8.2.8.4.3 Mailbox Capabilities Register)
+ * @mbox_mutex: mutex protects device mailbox and firmware
+ * @mbox_wait: rcuwait for mailbox
+ * @mbox_send: @dev specific transport for transmitting mailbox commands
+ * @feat_cap: Features capability
+ */
+struct cxl_mailbox {
+ struct device *host;
+ DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
+ DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
+ size_t payload_size;
+ struct mutex mbox_mutex; /* lock to protect mailbox context */
+ struct rcuwait mbox_wait;
+ int (*mbox_send)(struct cxl_mailbox *cxl_mbox, struct cxl_mbox_cmd *cmd);
+ enum cxl_features_capability feat_cap;
+};
+
+int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host);
+
+#endif
diff --git a/include/drm/Makefile b/include/drm/Makefile
new file mode 100644
index 000000000000..48fae3f167c7
--- /dev/null
+++ b/include/drm/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# Ensure drm headers are self-contained and pass kernel-doc
+hdrtest-files := \
+ $(shell cd $(src) && find * -name '*.h' 2>/dev/null)
+
+always-$(CONFIG_DRM_HEADER_TEST) += \
+ $(patsubst %.h,%.hdrtest, $(hdrtest-files))
+
+# Include the header twice to detect missing include guard.
+quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
+ cmd_hdrtest = \
+ $(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
+ PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
+ touch $@
+
+$(obj)/%.hdrtest: $(src)/%.h FORCE
+ $(call if_changed_dep,hdrtest)
diff --git a/include/drm/amd/isp.h b/include/drm/amd/isp.h
new file mode 100644
index 000000000000..ec868288abf2
--- /dev/null
+++ b/include/drm/amd/isp.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2025 Advanced Micro Devices, Inc. All rights reserved.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#ifndef __ISP_H__
+#define __ISP_H__
+
+#include <linux/types.h>
+
+struct device;
+
+struct isp_platform_data {
+ void *adev;
+ u32 asic_type;
+ resource_size_t base_rmmio_size;
+};
+
+int isp_user_buffer_alloc(struct device *dev, void *dmabuf,
+ void **buf_obj, u64 *buf_addr);
+
+void isp_user_buffer_free(void *buf_obj);
+
+int isp_kernel_buffer_alloc(struct device *dev, u64 size,
+ void **buf_obj, u64 *gpu_addr, void **cpu_addr);
+
+void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr);
+
+#endif
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index b0dcc07334a1..cf17646c1310 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -10,16 +10,18 @@
#include <drm/drm_crtc.h>
struct analogix_dp_device;
+struct drm_dp_aux;
enum analogix_dp_devtype {
EXYNOS_DP,
RK3288_DP,
RK3399_EDP,
+ RK3588_EDP,
};
static inline bool is_rockchip(enum analogix_dp_devtype type)
{
- return type == RK3288_DP || type == RK3399_EDP;
+ return type == RK3288_DP || type == RK3399_EDP || type == RK3588_EDP;
}
struct analogix_dp_plat_data {
@@ -29,8 +31,7 @@ struct analogix_dp_plat_data {
struct drm_connector *connector;
bool skip_connector;
- int (*power_on_start)(struct analogix_dp_plat_data *);
- int (*power_on_end)(struct analogix_dp_plat_data *);
+ int (*power_on)(struct analogix_dp_plat_data *);
int (*power_off)(struct analogix_dp_plat_data *);
int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *,
struct drm_connector *);
@@ -45,9 +46,11 @@ struct analogix_dp_device *
analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data);
int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev);
void analogix_dp_unbind(struct analogix_dp_device *dp);
-void analogix_dp_remove(struct analogix_dp_device *dp);
int analogix_dp_start_crc(struct drm_connector *connector);
int analogix_dp_stop_crc(struct drm_connector *connector);
+struct analogix_dp_plat_data *analogix_dp_aux_to_plat_data(struct drm_dp_aux *aux);
+struct drm_dp_aux *analogix_dp_get_aux(struct analogix_dp_device *dp);
+
#endif /* _ANALOGIX_DP_H_ */
diff --git a/include/drm/bridge/dw_dp.h b/include/drm/bridge/dw_dp.h
new file mode 100644
index 000000000000..d05df49fd884
--- /dev/null
+++ b/include/drm/bridge/dw_dp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ */
+
+#ifndef __DW_DP__
+#define __DW_DP__
+
+#include <linux/device.h>
+
+struct drm_encoder;
+struct dw_dp;
+
+struct dw_dp_plat_data {
+ u32 max_link_rate;
+};
+
+struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder,
+ const struct dw_dp_plat_data *plat_data);
+#endif /* __DW_DP__ */
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index 6a46baa0737c..336f062e1f9d 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -143,9 +143,15 @@ struct dw_hdmi_plat_data {
const struct drm_display_info *info,
const struct drm_display_mode *mode);
+ /*
+ * priv_audio is specially used for additional audio device to get
+ * driver data through this dw_hdmi_plat_data.
+ */
+ void *priv_audio;
+
/* Platform-specific audio enable/disable (optional) */
void (*enable_audio)(struct dw_hdmi *hdmi, int channel,
- int width, int rate, int non_pcm);
+ int width, int rate, int non_pcm, int iec958);
void (*disable_audio)(struct dw_hdmi *hdmi);
/* Vendor PHY support */
@@ -179,6 +185,7 @@ void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn,
struct device *codec_dev);
void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm);
+void dw_hdmi_set_sample_iec958(struct dw_hdmi *hdmi, unsigned int iec958);
void dw_hdmi_set_sample_width(struct dw_hdmi *hdmi, unsigned int width);
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt);
@@ -208,4 +215,6 @@ void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data);
bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi);
+const struct dw_hdmi_plat_data *dw_hdmi_to_plat_data(struct dw_hdmi *hdmi);
+
#endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/bridge/dw_hdmi_qp.h b/include/drm/bridge/dw_hdmi_qp.h
new file mode 100644
index 000000000000..3f461f6b9bbf
--- /dev/null
+++ b/include/drm/bridge/dw_hdmi_qp.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021-2022 Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ */
+
+#ifndef __DW_HDMI_QP__
+#define __DW_HDMI_QP__
+
+struct device;
+struct drm_encoder;
+struct dw_hdmi_qp;
+struct platform_device;
+
+struct dw_hdmi_qp_phy_ops {
+ int (*init)(struct dw_hdmi_qp *hdmi, void *data);
+ void (*disable)(struct dw_hdmi_qp *hdmi, void *data);
+ enum drm_connector_status (*read_hpd)(struct dw_hdmi_qp *hdmi, void *data);
+ void (*setup_hpd)(struct dw_hdmi_qp *hdmi, void *data);
+};
+
+struct dw_hdmi_qp_plat_data {
+ const struct dw_hdmi_qp_phy_ops *phy_ops;
+ void *phy_data;
+ int main_irq;
+ int cec_irq;
+ unsigned long ref_clk_rate;
+ /* Supported output formats: bitmask of @hdmi_colorspace */
+ unsigned int supported_formats;
+ /* Maximum bits per color channel: 8, 10 or 12 */
+ unsigned int max_bpc;
+};
+
+struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
+ struct drm_encoder *encoder,
+ const struct dw_hdmi_qp_plat_data *plat_data);
+void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi);
+#endif /* __DW_HDMI_QP__ */
diff --git a/include/drm/bridge/dw_mipi_dsi2.h b/include/drm/bridge/dw_mipi_dsi2.h
new file mode 100644
index 000000000000..c18c49379247
--- /dev/null
+++ b/include/drm/bridge/dw_mipi_dsi2.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Authors: Guochun Huang <hero.huang@rock-chips.com>
+ * Heiko Stuebner <heiko.stuebner@cherry.de>
+ */
+
+#ifndef __DW_MIPI_DSI2__
+#define __DW_MIPI_DSI2__
+
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_modes.h>
+
+struct drm_display_mode;
+struct drm_encoder;
+struct dw_mipi_dsi2;
+struct mipi_dsi_device;
+struct platform_device;
+
+enum dw_mipi_dsi2_phy_type {
+ DW_MIPI_DSI2_DPHY,
+ DW_MIPI_DSI2_CPHY,
+};
+
+struct dw_mipi_dsi2_phy_iface {
+ int ppi_width;
+ enum dw_mipi_dsi2_phy_type phy_type;
+};
+
+struct dw_mipi_dsi2_phy_timing {
+ u32 data_hs2lp;
+ u32 data_lp2hs;
+};
+
+struct dw_mipi_dsi2_phy_ops {
+ int (*init)(void *priv_data);
+ void (*power_on)(void *priv_data);
+ void (*power_off)(void *priv_data);
+ void (*get_interface)(void *priv_data, struct dw_mipi_dsi2_phy_iface *iface);
+ int (*get_lane_mbps)(void *priv_data,
+ const struct drm_display_mode *mode,
+ unsigned long mode_flags, u32 lanes, u32 format,
+ unsigned int *lane_mbps);
+ int (*get_timing)(void *priv_data, unsigned int lane_mbps,
+ struct dw_mipi_dsi2_phy_timing *timing);
+ int (*get_esc_clk_rate)(void *priv_data, unsigned int *esc_clk_rate);
+};
+
+struct dw_mipi_dsi2_host_ops {
+ int (*attach)(void *priv_data,
+ struct mipi_dsi_device *dsi);
+ int (*detach)(void *priv_data,
+ struct mipi_dsi_device *dsi);
+};
+
+struct dw_mipi_dsi2_plat_data {
+ struct regmap *regmap;
+ unsigned int max_data_lanes;
+
+ enum drm_mode_status (*mode_valid)(void *priv_data,
+ const struct drm_display_mode *mode,
+ unsigned long mode_flags,
+ u32 lanes, u32 format);
+
+ bool (*mode_fixup)(void *priv_data, const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ u32 *(*get_input_bus_fmts)(void *priv_data,
+ struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts);
+
+ const struct dw_mipi_dsi2_phy_ops *phy_ops;
+ const struct dw_mipi_dsi2_host_ops *host_ops;
+
+ void *priv_data;
+};
+
+struct dw_mipi_dsi2 *dw_mipi_dsi2_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi2_plat_data *plat_data);
+void dw_mipi_dsi2_remove(struct dw_mipi_dsi2 *dsi2);
+int dw_mipi_dsi2_bind(struct dw_mipi_dsi2 *dsi2, struct drm_encoder *encoder);
+void dw_mipi_dsi2_unbind(struct dw_mipi_dsi2 *dsi2);
+
+#endif /* __DW_MIPI_DSI2__ */
diff --git a/include/drm/bridge/imx.h b/include/drm/bridge/imx.h
new file mode 100644
index 000000000000..b93f719fe0e7
--- /dev/null
+++ b/include/drm/bridge/imx.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ */
+
+#ifndef DRM_IMX_BRIDGE_H
+#define DRM_IMX_BRIDGE_H
+
+struct device;
+struct device_node;
+struct drm_bridge;
+
+struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
+ struct device_node *np,
+ int type);
+
+#endif
diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h
index 9764d6eb5beb..31d7ed589233 100644
--- a/include/drm/bridge/samsung-dsim.h
+++ b/include/drm/bridge/samsung-dsim.h
@@ -29,6 +29,7 @@ enum samsung_dsim_type {
DSIM_TYPE_EXYNOS5410,
DSIM_TYPE_EXYNOS5422,
DSIM_TYPE_EXYNOS5433,
+ DSIM_TYPE_EXYNOS7870,
DSIM_TYPE_IMX8MM,
DSIM_TYPE_IMX8MP,
DSIM_TYPE_COUNT,
@@ -53,15 +54,29 @@ struct samsung_dsim_transfer {
struct samsung_dsim_driver_data {
const unsigned int *reg_ofs;
unsigned int plltmr_reg;
+ unsigned int has_legacy_status_reg:1;
unsigned int has_freqband:1;
unsigned int has_clklane_stop:1;
unsigned int has_broken_fifoctrl_emptyhdr:1;
+ unsigned int has_sfrctrl:1;
+ struct clk_bulk_data *clk_data;
unsigned int num_clks;
unsigned int min_freq;
unsigned int max_freq;
+ unsigned int wait_for_hdr_fifo;
unsigned int wait_for_reset;
unsigned int num_bits_resol;
+ unsigned int video_mode_bit;
+ unsigned int pll_stable_bit;
+ unsigned int esc_clken_bit;
+ unsigned int byte_clken_bit;
+ unsigned int tx_req_hsclk_bit;
+ unsigned int lane_esc_clk_bit;
+ unsigned int lane_esc_data_offset;
unsigned int pll_p_offset;
+ unsigned int pll_m_offset;
+ unsigned int pll_s_offset;
+ unsigned int main_vsa_offset;
const unsigned int *reg_values;
unsigned int pll_fin_min;
unsigned int pll_fin_max;
@@ -91,7 +106,6 @@ struct samsung_dsim {
void __iomem *reg_base;
struct phy *phy;
- struct clk **clks;
struct clk *pll_clk;
struct regulator_bulk_data supplies[2];
int irq;
diff --git a/include/drm/clients/drm_client_setup.h b/include/drm/clients/drm_client_setup.h
new file mode 100644
index 000000000000..46aab3fb46be
--- /dev/null
+++ b/include/drm/clients/drm_client_setup.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_CLIENT_SETUP_H
+#define DRM_CLIENT_SETUP_H
+
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_format_info;
+
+#if defined(CONFIG_DRM_CLIENT_SETUP)
+void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format);
+void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc);
+void drm_client_setup_with_color_mode(struct drm_device *dev, unsigned int color_mode);
+#else
+static inline void drm_client_setup(struct drm_device *dev,
+ const struct drm_format_info *format)
+{ }
+static inline void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc)
+{ }
+static inline void drm_client_setup_with_color_mode(struct drm_device *dev,
+ unsigned int color_mode)
+{ }
+#endif
+
+#endif
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 0b032faa8cf2..e4eebabab975 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -115,6 +115,7 @@
#define DP_MAX_LANE_COUNT 0x002
# define DP_MAX_LANE_COUNT_MASK 0x1f
+# define DP_POST_LT_ADJ_REQ_SUPPORTED (1 << 5) /* 1.3 */
# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */
# define DP_ENHANCED_FRAME_CAP (1 << 7)
@@ -232,6 +233,8 @@
#define DP_RECEIVER_ALPM_CAP 0x02e /* eDP 1.4 */
# define DP_ALPM_CAP (1 << 0)
+# define DP_ALPM_PM_STATE_2A_SUPPORT (1 << 1) /* eDP 1.5 */
+# define DP_ALPM_AUX_LESS_CAP (1 << 2) /* eDP 1.5 */
#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP 0x02f /* eDP 1.4 */
# define DP_AUX_FRAME_SYNC_CAP (1 << 0)
@@ -255,6 +258,8 @@
# define DP_DSC_RC_BUF_BLK_SIZE_4 0x1
# define DP_DSC_RC_BUF_BLK_SIZE_16 0x2
# define DP_DSC_RC_BUF_BLK_SIZE_64 0x3
+# define DP_DSC_THROUGHPUT_MODE_0_DELTA_SHIFT 3 /* DP 2.1a, in units of 2 MPixels/sec */
+# define DP_DSC_THROUGHPUT_MODE_0_DELTA_MASK (0x1f << DP_DSC_THROUGHPUT_MODE_0_DELTA_SHIFT)
#define DP_DSC_RC_BUF_SIZE 0x063
@@ -357,6 +362,7 @@
# define DP_DSC_BITS_PER_PIXEL_1_4 0x2
# define DP_DSC_BITS_PER_PIXEL_1_2 0x3
# define DP_DSC_BITS_PER_PIXEL_1_1 0x4
+# define DP_DSC_BITS_PER_PIXEL_MASK 0x7
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
@@ -544,9 +550,28 @@
/* DFP Capability Extension */
#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
-#define DP_PANEL_REPLAY_CAP 0x0b0 /* DP 2.0 */
-# define DP_PANEL_REPLAY_SUPPORT (1 << 0)
-# define DP_PANEL_REPLAY_SU_SUPPORT (1 << 1)
+#define DP_PANEL_REPLAY_CAP_SUPPORT 0x0b0 /* DP 2.0 */
+# define DP_PANEL_REPLAY_SUPPORT (1 << 0)
+# define DP_PANEL_REPLAY_SU_SUPPORT (1 << 1)
+# define DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT (1 << 2) /* eDP 1.5 */
+
+#define DP_PANEL_REPLAY_CAP_SIZE 7
+
+#define DP_PANEL_REPLAY_CAP_CAPABILITY 0xb1
+# define DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_SHIFT 1 /* DP 2.1a */
+# define DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_MASK (3 << DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_SHIFT)
+# define DP_DSC_DECODE_CAPABILITY_IN_PR_SUPPORTED 0x00
+# define DP_DSC_DECODE_CAPABILITY_IN_PR_FULL_FRAME_ONLY 0x01
+# define DP_DSC_DECODE_CAPABILITY_IN_PR_NOT_SUPPORTED 0x02
+# define DP_DSC_DECODE_CAPABILITY_IN_PR_RESERVED 0x03
+# define DP_PANEL_REPLAY_ASYNC_VIDEO_TIMING_NOT_SUPPORTED_IN_PR (1 << 3)
+# define DP_PANEL_REPLAY_DSC_CRC_OF_MULTIPLE_SUS_SUPPORTED (1 << 4)
+# define DP_PANEL_REPLAY_SU_GRANULARITY_REQUIRED (1 << 5)
+# define DP_PANEL_REPLAY_SU_Y_GRANULARITY_EXTENDED_CAPABILITY_SUPPORTED (1 << 6)
+# define DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP (1 << 7)
+
+#define DP_PANEL_REPLAY_CAP_X_GRANULARITY 0xb2
+#define DP_PANEL_REPLAY_CAP_Y_GRANULARITY 0xb4
/* Link Configuration */
#define DP_LINK_BW_SET 0x100
@@ -561,6 +586,7 @@
#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
+# define DP_POST_LT_ADJ_REQ_GRANTED (1 << 5) /* 1.3 */
# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
#define DP_TRAINING_PATTERN_SET 0x102
@@ -677,7 +703,8 @@
#define DP_RECEIVER_ALPM_CONFIG 0x116 /* eDP 1.4 */
# define DP_ALPM_ENABLE (1 << 0)
-# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1)
+# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1) /* eDP 1.5 */
+# define DP_ALPM_MODE_AUX_LESS (1 << 2) /* eDP 1.5 */
#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF 0x117 /* eDP 1.4 */
# define DP_AUX_FRAME_SYNC_ENABLE (1 << 0)
@@ -686,6 +713,9 @@
#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */
# define DP_PWR_NOT_NEEDED (1 << 0)
+#define DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_GRANT 0x119 /* 1.4a */
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_GRANTED (1 << 0)
+
#define DP_FEC_CONFIGURATION 0x120 /* 1.4 */
# define DP_FEC_READY (1 << 0)
# define DP_FEC_ERR_COUNT_SEL_MASK (7 << 1)
@@ -728,10 +758,20 @@
#define PANEL_REPLAY_CONFIG 0x1b0 /* DP 2.0 */
# define DP_PANEL_REPLAY_ENABLE (1 << 0)
+# define DP_PANEL_REPLAY_VSC_SDP_CRC_EN (1 << 1) /* eDP 1.5 */
# define DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN (1 << 3)
# define DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN (1 << 4)
# define DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN (1 << 5)
# define DP_PANEL_REPLAY_SU_ENABLE (1 << 6)
+# define DP_PANEL_REPLAY_ENABLE_SU_REGION_ET (1 << 7) /* DP 2.1 */
+
+#define PANEL_REPLAY_CONFIG2 0x1b1 /* eDP 1.5 */
+# define DP_PANEL_REPLAY_SINK_REFRESH_RATE_UNLOCK_GRANTED (1 << 0)
+# define DP_PANEL_REPLAY_CRC_VERIFICATION (1 << 1)
+# define DP_PANEL_REPLAY_SU_Y_GRANULARITY_EXTENDED_EN (1 << 2)
+# define DP_PANEL_REPLAY_SU_Y_GRANULARITY_EXTENDED_VAL_SEL_SHIFT 3
+# define DP_PANEL_REPLAY_SU_Y_GRANULARITY_EXTENDED_VAL_SEL_MASK (0xf << 3)
+# define DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE (1 << 7)
#define DP_PAYLOAD_ALLOCATE_SET 0x1c0
#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1
@@ -764,6 +804,7 @@
#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_POST_LT_ADJ_REQ_IN_PROGRESS (1 << 1) /* 1.3 */
#define DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE (1 << 2) /* 2.0 E11 */
#define DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE (1 << 3) /* 2.0 E11 */
#define DP_128B132B_LT_FAILED (1 << 4) /* 2.0 E11 */
@@ -976,6 +1017,7 @@
# define DP_EDP_14 0x03
# define DP_EDP_14a 0x04 /* eDP 1.4a */
# define DP_EDP_14b 0x05 /* eDP 1.4b */
+# define DP_EDP_15 0x06 /* eDP 1.5 */
#define DP_EDP_GENERAL_CAP_1 0x701
# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0)
@@ -1000,6 +1042,7 @@
#define DP_EDP_GENERAL_CAP_2 0x703
# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0)
# define DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE (1 << 4)
+# define DP_EDP_SMOOTH_BRIGHTNESS_CAPABLE (1 << 6) /* eDP 2.0 */
#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
# define DP_EDP_X_REGION_CAP_MASK (0xf << 0)
@@ -1148,6 +1191,15 @@
# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */
# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */
+#define DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST 0x2211 /* 1.4a */
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_MASK 0xff
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_1_MS 0x00
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_20_MS 0x01
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_40_MS 0x02
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_60_MS 0x03
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_80_MS 0x04
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_100_MS 0x05
+
#define DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1 0x2214 /* 2.0 E11 */
# define DP_ADAPTIVE_SYNC_SDP_SUPPORTED (1 << 0)
# define DP_ADAPTIVE_SYNC_SDP_OPERATION_MODE GENMASK(1, 0)
@@ -1453,6 +1505,8 @@
#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */
#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */
#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */
+# define DP_EXTENDED_WAKE_TIMEOUT_REQUEST_MASK 0x7f
+# define DP_EXTENDED_WAKE_TIMEOUT_GRANT (1 << 7)
#define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER 0xf0006 /* 2.0 */
# define DP_PHY_REPEATER_128B132B_SUPPORTED (1 << 0)
/* See DP_128B132B_SUPPORTED_LINK_RATES for values */
@@ -1523,6 +1577,10 @@ enum drm_dp_phy {
#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */
#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */
+#define DP_OUI_PHY_REPEATER1 0xf003d /* 1.3 */
+#define DP_OUI_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_OUI_PHY_REPEATER1)
+
#define __DP_FEC1_BASE 0xf0290 /* 1.4 */
#define __DP_FEC2_BASE 0xf0298 /* 1.4 */
#define DP_FEC_BASE(dp_phy) \
@@ -1630,8 +1688,9 @@ enum drm_dp_phy {
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */
+#define DP_DSC_BRANCH_CAP_SIZE 3
#define EDP_PSR_RECEIVER_CAP_SIZE 2
-#define EDP_DISPLAY_CTL_CAP_SIZE 3
+#define EDP_DISPLAY_CTL_CAP_SIZE 5
#define DP_LTTPR_COMMON_CAP_SIZE 8
#define DP_LTTPR_PHY_CAP_SIZE 3
diff --git a/include/drm/display/drm_dp_dual_mode_helper.h b/include/drm/display/drm_dp_dual_mode_helper.h
index 7ee482265087..7ac6969db935 100644
--- a/include/drm/display/drm_dp_dual_mode_helper.h
+++ b/include/drm/display/drm_dp_dual_mode_helper.h
@@ -117,5 +117,5 @@ const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type);
int drm_lspcon_get_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
enum drm_lspcon_mode *current_mode);
int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
- enum drm_lspcon_mode reqd_mode);
+ enum drm_lspcon_mode reqd_mode, int time_out);
#endif
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index 8bed890eec2c..df2f24b950e4 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -37,6 +37,7 @@ bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
+bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE]);
u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
@@ -112,7 +113,8 @@ struct drm_dp_vsc_sdp {
* @target_rr: Target Refresh
* @duration_incr_ms: Successive frame duration increase
* @duration_decr_ms: Successive frame duration decrease
- * @operation_mode: Adaptive Sync Operation Mode
+ * @target_rr_divider: Target refresh rate divider
+ * @mode: Adaptive Sync Operation Mode
*/
struct drm_dp_as_sdp {
unsigned char sdp_type;
@@ -122,6 +124,7 @@ struct drm_dp_as_sdp {
int target_rr;
int duration_incr_ms;
int duration_decr_ms;
+ bool target_rr_divider;
enum operation_mode mode;
};
@@ -154,6 +157,13 @@ drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
}
static inline bool
+drm_dp_post_lt_adj_req_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x13 &&
+ (dpcd[DP_MAX_LANE_COUNT] & DP_POST_LT_ADJ_REQ_SUPPORTED);
+}
+
+static inline bool
drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return dpcd[DP_DPCD_REV] >= 0x11 &&
@@ -201,6 +211,11 @@ u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE],
u8 dsc_bpc[3]);
+int drm_dp_dsc_sink_max_slice_throughput(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ int peak_pixel_rate, bool is_rgb_yuv444);
+int drm_dp_dsc_branch_max_overall_throughput(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE],
+ bool is_rgb_yuv444);
+int drm_dp_dsc_branch_max_line_width(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE]);
static inline bool
drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
@@ -516,23 +531,93 @@ struct drm_dp_aux {
* @powered_down: If true then the remote endpoint is powered down.
*/
bool powered_down;
+
+ /**
+ * @no_zero_sized: If the hw can't use zero sized transfers (NVIDIA)
+ */
+ bool no_zero_sized;
+
+ /**
+ * @dpcd_probe_disabled: If probing before a DPCD access is disabled.
+ */
+ bool dpcd_probe_disabled;
};
int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset);
void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered);
+void drm_dp_dpcd_set_probe(struct drm_dp_aux *aux, bool enable);
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size);
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size);
/**
+ * drm_dp_dpcd_read_data() - read a series of bytes from the DPCD
+ * @aux: DisplayPort AUX channel (SST or MST)
+ * @offset: address of the (first) register to read
+ * @buffer: buffer to store the register values
+ * @size: number of bytes in @buffer
+ *
+ * Returns zero (0) on success, or a negative error
+ * code on failure. -EIO is returned if the request was NAKed by the sink or
+ * if the retry count was exceeded. If not all bytes were transferred, this
+ * function returns -EPROTO. Errors from the underlying AUX channel transfer
+ * function, with the exception of -EBUSY (which causes the transaction to
+ * be retried), are propagated to the caller.
+ */
+static inline int drm_dp_dpcd_read_data(struct drm_dp_aux *aux,
+ unsigned int offset,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_read(aux, offset, buffer, size);
+ if (ret < 0)
+ return ret;
+ if (ret < size)
+ return -EPROTO;
+
+ return 0;
+}
+
+/**
+ * drm_dp_dpcd_write_data() - write a series of bytes to the DPCD
+ * @aux: DisplayPort AUX channel (SST or MST)
+ * @offset: address of the (first) register to write
+ * @buffer: buffer containing the values to write
+ * @size: number of bytes in @buffer
+ *
+ * Returns zero (0) on success, or a negative error
+ * code on failure. -EIO is returned if the request was NAKed by the sink or
+ * if the retry count was exceeded. If not all bytes were transferred, this
+ * function returns -EPROTO. Errors from the underlying AUX channel transfer
+ * function, with the exception of -EBUSY (which causes the transaction to
+ * be retried), are propagated to the caller.
+ */
+static inline int drm_dp_dpcd_write_data(struct drm_dp_aux *aux,
+ unsigned int offset,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_write(aux, offset, buffer, size);
+ if (ret < 0)
+ return ret;
+ if (ret < size)
+ return -EPROTO;
+
+ return 0;
+}
+
+/**
* drm_dp_dpcd_readb() - read a single byte from the DPCD
* @aux: DisplayPort AUX channel
* @offset: address of the register to read
* @valuep: location where the value of the register will be stored
*
* Returns the number of bytes transferred (1) on success, or a negative
- * error code on failure.
+ * error code on failure. In most of the cases you should be using
+ * drm_dp_dpcd_read_byte() instead.
*/
static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
unsigned int offset, u8 *valuep)
@@ -547,7 +632,8 @@ static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
* @value: value to write to the register
*
* Returns the number of bytes transferred (1) on success, or a negative
- * error code on failure.
+ * error code on failure. In most of the cases you should be using
+ * drm_dp_dpcd_write_byte() instead.
*/
static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
unsigned int offset, u8 value)
@@ -555,6 +641,34 @@ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
return drm_dp_dpcd_write(aux, offset, &value, 1);
}
+/**
+ * drm_dp_dpcd_read_byte() - read a single byte from the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to read
+ * @valuep: location where the value of the register will be stored
+ *
+ * Returns zero (0) on success, or a negative error code on failure.
+ */
+static inline int drm_dp_dpcd_read_byte(struct drm_dp_aux *aux,
+ unsigned int offset, u8 *valuep)
+{
+ return drm_dp_dpcd_read_data(aux, offset, valuep, 1);
+}
+
+/**
+ * drm_dp_dpcd_write_byte() - write a single byte to the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to write
+ * @value: value to write to the register
+ *
+ * Returns zero (0) on success, or a negative error code on failure.
+ */
+static inline int drm_dp_dpcd_write_byte(struct drm_dp_aux *aux,
+ unsigned int offset, u8 value)
+{
+ return drm_dp_dpcd_write_data(aux, offset, &value, 1);
+}
+
int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
u8 dpcd[DP_RECEIVER_CAP_SIZE]);
@@ -564,6 +678,13 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
enum drm_dp_phy dp_phy,
u8 link_status[DP_LINK_STATUS_SIZE]);
+int drm_dp_link_power_up(struct drm_dp_aux *aux, unsigned char revision);
+int drm_dp_link_power_down(struct drm_dp_aux *aux, unsigned char revision);
+
+int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux,
+ int vcpid, u8 start_time_slot, u8 time_slot_count);
+int drm_dp_dpcd_clear_payload(struct drm_dp_aux *aux);
+int drm_dp_dpcd_poll_act_handled(struct drm_dp_aux *aux, int timeout_ms);
bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
u8 real_edid_checksum);
@@ -623,9 +744,12 @@ int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux,
u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
int drm_dp_lttpr_count(const u8 cap[DP_LTTPR_COMMON_CAP_SIZE]);
int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
+int drm_dp_lttpr_set_transparent_mode(struct drm_dp_aux *aux, bool enable);
+int drm_dp_lttpr_init(struct drm_dp_aux *aux, int lttpr_count);
int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
bool drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
bool drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+void drm_dp_lttpr_wake_timeout_setup(struct drm_dp_aux *aux, bool transparent_mode);
void drm_dp_remote_aux_init(struct drm_dp_aux *aux);
void drm_dp_aux_init(struct drm_dp_aux *aux);
@@ -656,6 +780,8 @@ struct drm_dp_desc {
int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
bool is_branch);
+int drm_dp_dump_lttpr_desc(struct drm_dp_aux *aux, enum drm_dp_phy dp_phy);
+
/**
* enum drm_dp_quirk - Display Port sink/branch device specific quirks
*
@@ -707,6 +833,15 @@ enum drm_dp_quirk {
* requires enabling DSC.
*/
DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC,
+ /**
+ * @DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT:
+ *
+ * The device doesn't support DSC decompression at the maximum DSC
+ * pixel throughput and compressed bpp it indicates via its DPCD DSC
+ * capabilities. The compressed bpp must be limited above a device
+ * specific DSC pixel throughput.
+ */
+ DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT,
};
/**
@@ -730,6 +865,7 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
* @lsb_reg_used: Do we also write values to the DP_EDP_BACKLIGHT_BRIGHTNESS_LSB register?
* @aux_enable: Does the panel support the AUX enable cap?
* @aux_set: Does the panel support setting the brightness through AUX?
+ * @luminance_set: Does the panel support setting the brightness through AUX using luminance values?
*
* This structure contains various data about an eDP backlight, which can be populated by using
* drm_edp_backlight_init().
@@ -737,21 +873,23 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
struct drm_edp_backlight_info {
u8 pwmgen_bit_count;
u8 pwm_freq_pre_divider;
- u16 max;
+ u32 max;
bool lsb_reg_used : 1;
bool aux_enable : 1;
bool aux_set : 1;
+ bool luminance_set : 1;
};
int
drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl,
+ u32 max_luminance,
u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE],
- u16 *current_level, u8 *current_mode);
+ u32 *current_level, u8 *current_mode, bool need_luminance);
int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
- u16 level);
+ u32 level);
int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
- u16 level);
+ u32 level);
int drm_edp_backlight_disable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl);
#if IS_ENABLED(CONFIG_DRM_KMS_HELPER) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
@@ -869,5 +1007,7 @@ int drm_dp_bw_channel_coding_efficiency(bool is_uhbr);
int drm_dp_max_dprx_data_rate(int max_link_rate, int max_lanes);
ssize_t drm_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, struct dp_sdp *sdp);
+int drm_dp_link_symbol_cycles(int lane_count, int pixels, int dsc_slice_count,
+ int bpp_x16, int symbol_size, bool is_mst);
#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
index cfe096389d94..2cfe1d4bfc96 100644
--- a/include/drm/display/drm_dp_mst_helper.h
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -222,6 +222,13 @@ struct drm_dp_mst_branch {
*/
struct list_head destroy_next;
+ /**
+ * @rad: Relative Address of the MST branch.
+ * For &drm_dp_mst_topology_mgr.mst_primary, it's rad[8] are all 0,
+ * unset and unused. For MST branches connected after mst_primary,
+ * in each element of rad[] the nibbles are ordered by the most
+ * signifcant 4 bits first and the least significant 4 bits second.
+ */
u8 rad[8];
u8 lct;
int num_ports;
@@ -244,18 +251,18 @@ struct drm_dp_mst_branch {
bool link_address_sent;
/* global unique identifier to identify branch devices */
- u8 guid[16];
+ guid_t guid;
};
struct drm_dp_nak_reply {
- u8 guid[16];
+ guid_t guid;
u8 reason;
u8 nak_data;
};
struct drm_dp_link_address_ack_reply {
- u8 guid[16];
+ guid_t guid;
u8 nports;
struct drm_dp_link_addr_reply_port {
bool input_port;
@@ -265,7 +272,7 @@ struct drm_dp_link_address_ack_reply {
bool ddps;
bool legacy_device_plug_status;
u8 dpcd_revision;
- u8 peer_guid[16];
+ guid_t peer_guid;
u8 num_sdp_streams;
u8 num_sdp_stream_sinks;
} ports[16];
@@ -348,7 +355,7 @@ struct drm_dp_allocate_payload_ack_reply {
};
struct drm_dp_connection_status_notify {
- u8 guid[16];
+ guid_t guid;
u8 port_number;
bool legacy_device_plug_status;
bool displayport_device_plug_status;
@@ -425,7 +432,7 @@ struct drm_dp_query_payload {
struct drm_dp_resource_status_notify {
u8 port_number;
- u8 guid[16];
+ guid_t guid;
u16 available_pbn;
};
@@ -700,6 +707,13 @@ struct drm_dp_mst_topology_mgr {
bool payload_id_table_cleared : 1;
/**
+ * @reset_rx_state: The down request's reply and up request message
+ * receiver state must be reset, after the topology manager got
+ * removed. Protected by @lock.
+ */
+ bool reset_rx_state : 1;
+
+ /**
* @payload_count: The number of currently active payloads in hardware. This value is only
* intended to be used internally by MST helpers for payload tracking, and is only safe to
* read/write from the atomic commit (not check) context.
@@ -860,8 +874,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
-fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
- int link_rate, int link_lane_count);
+fixed20_12 drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count);
int drm_dp_calc_pbn_mode(int clock, int bpp);
@@ -885,6 +898,8 @@ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
void drm_dp_mst_dump_topology(struct seq_file *m,
struct drm_dp_mst_topology_mgr *mgr);
+void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr);
+
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
int __must_check
drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
diff --git a/include/drm/display/drm_dsc_helper.h b/include/drm/display/drm_dsc_helper.h
index 913aa2071232..2c2b9033f60f 100644
--- a/include/drm/display/drm_dsc_helper.h
+++ b/include/drm/display/drm_dsc_helper.h
@@ -17,6 +17,8 @@ enum drm_dsc_params_type {
DRM_DSC_1_2_420,
};
+struct drm_printer;
+
void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header);
int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size);
void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp,
@@ -28,6 +30,7 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg);
u8 drm_dsc_initial_scale_value(const struct drm_dsc_config *dsc);
u32 drm_dsc_flatness_det_thresh(const struct drm_dsc_config *dsc);
u32 drm_dsc_get_bpp_int(const struct drm_dsc_config *vdsc_cfg);
+void drm_dsc_dump_config(struct drm_printer *p, int indent, const struct drm_dsc_config *cfg);
#endif /* _DRM_DSC_HELPER_H_ */
diff --git a/include/drm/display/drm_hdmi_audio_helper.h b/include/drm/display/drm_hdmi_audio_helper.h
new file mode 100644
index 000000000000..44d910bdc72d
--- /dev/null
+++ b/include/drm/display/drm_hdmi_audio_helper.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_DISPLAY_HDMI_AUDIO_HELPER_H_
+#define DRM_DISPLAY_HDMI_AUDIO_HELPER_H_
+
+#include <linux/types.h>
+
+struct drm_connector;
+struct drm_connector_hdmi_audio_funcs;
+
+struct device;
+
+int drm_connector_hdmi_audio_init(struct drm_connector *connector,
+ struct device *hdmi_codec_dev,
+ const struct drm_connector_hdmi_audio_funcs *funcs,
+ unsigned int max_i2s_playback_channels,
+ u64 i2s_formats,
+ bool spdif_playback,
+ int sound_dai_port);
+void drm_connector_hdmi_audio_plugged_notify(struct drm_connector *connector,
+ bool plugged);
+
+#endif
diff --git a/include/drm/display/drm_hdmi_cec_helper.h b/include/drm/display/drm_hdmi_cec_helper.h
new file mode 100644
index 000000000000..fd8f4d2f02c1
--- /dev/null
+++ b/include/drm/display/drm_hdmi_cec_helper.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_DISPLAY_HDMI_CEC_HELPER
+#define DRM_DISPLAY_HDMI_CEC_HELPER
+
+#include <linux/types.h>
+
+struct drm_connector;
+
+struct cec_msg;
+struct device;
+
+struct drm_connector_hdmi_cec_funcs {
+ /**
+ * @init: perform hardware-specific initialization before registering the CEC adapter
+ */
+ int (*init)(struct drm_connector *connector);
+
+ /**
+ * @uninit: perform hardware-specific teardown for the CEC adapter
+ */
+ void (*uninit)(struct drm_connector *connector);
+
+ /**
+ * @enable: enable or disable CEC adapter
+ */
+ int (*enable)(struct drm_connector *connector, bool enable);
+
+ /**
+ * @log_addr: set adapter's logical address, can be called multiple
+ * times if adapter supports several LAs
+ */
+ int (*log_addr)(struct drm_connector *connector, u8 logical_addr);
+
+ /**
+ * @transmit: start transmission of the specified CEC message
+ */
+ int (*transmit)(struct drm_connector *connector, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg);
+};
+
+int drmm_connector_hdmi_cec_register(struct drm_connector *connector,
+ const struct drm_connector_hdmi_cec_funcs *funcs,
+ const char *name,
+ u8 available_las,
+ struct device *dev);
+
+void drm_connector_hdmi_cec_received_msg(struct drm_connector *connector,
+ struct cec_msg *msg);
+
+void drm_connector_hdmi_cec_transmit_done(struct drm_connector *connector,
+ u8 status,
+ u8 arb_lost_cnt, u8 nack_cnt,
+ u8 low_drive_cnt, u8 error_cnt);
+
+void drm_connector_hdmi_cec_transmit_attempt_done(struct drm_connector *connector,
+ u8 status);
+
+#if IS_ENABLED(CONFIG_DRM_DISPLAY_HDMI_CEC_NOTIFIER_HELPER)
+int drmm_connector_hdmi_cec_notifier_register(struct drm_connector *connector,
+ const char *port_name,
+ struct device *dev);
+#else
+static inline int drmm_connector_hdmi_cec_notifier_register(struct drm_connector *connector,
+ const char *port_name,
+ struct device *dev)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/include/drm/display/drm_hdmi_helper.h b/include/drm/display/drm_hdmi_helper.h
index 76d234826e22..09145c9ee9fc 100644
--- a/include/drm/display/drm_hdmi_helper.h
+++ b/include/drm/display/drm_hdmi_helper.h
@@ -24,4 +24,14 @@ drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
const struct drm_connector_state *conn_state);
+unsigned long long
+drm_hdmi_compute_mode_clock(const struct drm_display_mode *mode,
+ unsigned int bpc, enum hdmi_colorspace fmt);
+
+void
+drm_hdmi_acr_get_n_cts(unsigned long long tmds_char_rate,
+ unsigned int sample_rate,
+ unsigned int *out_n,
+ unsigned int *out_cts);
+
#endif
diff --git a/include/drm/display/drm_hdmi_state_helper.h b/include/drm/display/drm_hdmi_state_helper.h
new file mode 100644
index 000000000000..2349c0d0f00f
--- /dev/null
+++ b/include/drm/display/drm_hdmi_state_helper.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_HDMI_STATE_HELPER_H_
+#define DRM_HDMI_STATE_HELPER_H_
+
+struct drm_atomic_state;
+struct drm_connector;
+struct drm_connector_state;
+struct drm_display_mode;
+struct hdmi_audio_infoframe;
+
+enum drm_connector_status;
+
+void __drm_atomic_helper_connector_hdmi_reset(struct drm_connector *connector,
+ struct drm_connector_state *new_conn_state);
+
+int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
+ struct drm_atomic_state *state);
+
+int drm_atomic_helper_connector_hdmi_update_audio_infoframe(struct drm_connector *connector,
+ struct hdmi_audio_infoframe *frame);
+int drm_atomic_helper_connector_hdmi_clear_audio_infoframe(struct drm_connector *connector);
+int drm_atomic_helper_connector_hdmi_update_infoframes(struct drm_connector *connector,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector,
+ enum drm_connector_status status);
+void drm_atomic_helper_connector_hdmi_force(struct drm_connector *connector);
+
+enum drm_mode_status
+drm_hdmi_connector_mode_valid(struct drm_connector *connector,
+ const struct drm_display_mode *mode);
+
+#endif // DRM_HDMI_STATE_HELPER_H_
diff --git a/include/drm/drm_accel.h b/include/drm/drm_accel.h
index f4d3784b1dce..20a665ec6f16 100644
--- a/include/drm/drm_accel.h
+++ b/include/drm/drm_accel.h
@@ -28,7 +28,8 @@
.poll = drm_poll,\
.read = drm_read,\
.llseek = noop_llseek, \
- .mmap = drm_gem_mmap
+ .mmap = drm_gem_mmap, \
+ .fop_flags = FOP_UNSIGNED_OFFSET
/**
* DEFINE_DRM_ACCEL_FOPS() - macro to generate file operations for accelerators drivers
@@ -51,14 +52,12 @@
#if IS_ENABLED(CONFIG_DRM_ACCEL)
+extern struct xarray accel_minors_xa;
+
void accel_core_exit(void);
int accel_core_init(void);
-void accel_minor_remove(int index);
-int accel_minor_alloc(void);
-void accel_minor_replace(struct drm_minor *minor, int index);
void accel_set_device_instance_params(struct device *kdev, int index);
int accel_open(struct inode *inode, struct file *filp);
-void accel_debugfs_init(struct drm_device *dev);
void accel_debugfs_register(struct drm_device *dev);
#else
@@ -73,27 +72,10 @@ static inline int __init accel_core_init(void)
return 0;
}
-static inline void accel_minor_remove(int index)
-{
-}
-
-static inline int accel_minor_alloc(void)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void accel_minor_replace(struct drm_minor *minor, int index)
-{
-}
-
static inline void accel_set_device_instance_params(struct device *kdev, int index)
{
}
-static inline void accel_debugfs_init(struct drm_device *dev)
-{
-}
-
static inline void accel_debugfs_register(struct drm_device *dev)
{
}
diff --git a/include/drm/drm_aperture.h b/include/drm/drm_aperture.h
deleted file mode 100644
index cbe33b49fd5d..000000000000
--- a/include/drm/drm_aperture.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-
-#ifndef _DRM_APERTURE_H_
-#define _DRM_APERTURE_H_
-
-#include <linux/types.h>
-
-struct drm_device;
-struct drm_driver;
-struct pci_dev;
-
-int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base,
- resource_size_t size);
-
-int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
- const struct drm_driver *req_driver);
-
-int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
- const struct drm_driver *req_driver);
-
-/**
- * drm_aperture_remove_framebuffers - remove all existing framebuffers
- * @req_driver: requesting DRM driver
- *
- * This function removes all graphics device drivers. Use this function on systems
- * that can have their framebuffer located anywhere in memory.
- *
- * Returns:
- * 0 on success, or a negative errno code otherwise
- */
-static inline int
-drm_aperture_remove_framebuffers(const struct drm_driver *req_driver)
-{
- return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1,
- req_driver);
-}
-
-#endif
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 4d7f4c5f2001..43783891d359 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -30,6 +30,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_util.h>
+#include <drm/drm_colorop.h>
/**
* struct drm_crtc_commit - track modeset commits on a CRTC
@@ -157,14 +158,51 @@ struct drm_crtc_commit {
bool abort_completion;
};
+struct __drm_colorops_state {
+ struct drm_colorop *ptr;
+ struct drm_colorop_state *state, *old_state, *new_state;
+};
+
struct __drm_planes_state {
struct drm_plane *ptr;
- struct drm_plane_state *state, *old_state, *new_state;
+
+ /**
+ * @state_to_destroy:
+ *
+ * Used to track the @drm_plane_state we will need to free when
+ * tearing down the associated &drm_atomic_state in
+ * $drm_mode_config_funcs.atomic_state_clear or
+ * drm_atomic_state_default_clear().
+ *
+ * Before a commit, and the call to
+ * drm_atomic_helper_swap_state() in particular, it points to
+ * the same state than @new_state. After a commit, it points to
+ * the same state than @old_state.
+ */
+ struct drm_plane_state *state_to_destroy;
+
+ struct drm_plane_state *old_state, *new_state;
};
struct __drm_crtcs_state {
struct drm_crtc *ptr;
- struct drm_crtc_state *state, *old_state, *new_state;
+
+ /**
+ * @state_to_destroy:
+ *
+ * Used to track the @drm_crtc_state we will need to free when
+ * tearing down the associated &drm_atomic_state in
+ * $drm_mode_config_funcs.atomic_state_clear or
+ * drm_atomic_state_default_clear().
+ *
+ * Before a commit, and the call to
+ * drm_atomic_helper_swap_state() in particular, it points to
+ * the same state than @new_state. After a commit, it points to
+ * the same state than @old_state.
+ */
+ struct drm_crtc_state *state_to_destroy;
+
+ struct drm_crtc_state *old_state, *new_state;
/**
* @commit:
@@ -182,7 +220,24 @@ struct __drm_crtcs_state {
struct __drm_connnectors_state {
struct drm_connector *ptr;
- struct drm_connector_state *state, *old_state, *new_state;
+
+ /**
+ * @state_to_destroy:
+ *
+ * Used to track the @drm_connector_state we will need to free
+ * when tearing down the associated &drm_atomic_state in
+ * $drm_mode_config_funcs.atomic_state_clear or
+ * drm_atomic_state_default_clear().
+ *
+ * Before a commit, and the call to
+ * drm_atomic_helper_swap_state() in particular, it points to
+ * the same state than @new_state. After a commit, it points to
+ * the same state than @old_state.
+ */
+ struct drm_connector_state *state_to_destroy;
+
+ struct drm_connector_state *old_state, *new_state;
+
/**
* @out_fence_ptr:
*
@@ -342,7 +397,23 @@ struct drm_private_state {
struct __drm_private_objs_state {
struct drm_private_obj *ptr;
- struct drm_private_state *state, *old_state, *new_state;
+
+ /**
+ * @state_to_destroy:
+ *
+ * Used to track the @drm_private_state we will need to free
+ * when tearing down the associated &drm_atomic_state in
+ * $drm_mode_config_funcs.atomic_state_clear or
+ * drm_atomic_state_default_clear().
+ *
+ * Before a commit, and the call to
+ * drm_atomic_helper_swap_state() in particular, it points to
+ * the same state than @new_state. After a commit, it points to
+ * the same state than @old_state.
+ */
+ struct drm_private_state *state_to_destroy;
+
+ struct drm_private_state *old_state, *new_state;
};
/**
@@ -357,6 +428,37 @@ struct __drm_private_objs_state {
* States are added to an atomic update by calling drm_atomic_get_crtc_state(),
* drm_atomic_get_plane_state(), drm_atomic_get_connector_state(), or for
* private state structures, drm_atomic_get_private_obj_state().
+ *
+ * NOTE: struct drm_atomic_state first started as a single collection of
+ * entities state pointers (drm_plane_state, drm_crtc_state, etc.).
+ *
+ * At atomic_check time, you could get the state about to be committed
+ * from drm_atomic_state, and the one currently running from the
+ * entities state pointer (drm_crtc.state, for example). After the call
+ * to drm_atomic_helper_swap_state(), the entities state pointer would
+ * contain the state previously checked, and the drm_atomic_state
+ * structure the old state.
+ *
+ * Over time, and in order to avoid confusion, drm_atomic_state has
+ * grown to have both the old state (ie, the state we replace) and the
+ * new state (ie, the state we want to apply). Those names are stable
+ * during the commit process, which makes it easier to reason about.
+ *
+ * You can still find some traces of that evolution through some hooks
+ * or callbacks taking a drm_atomic_state parameter called names like
+ * "old_state". This doesn't necessarily mean that the previous
+ * drm_atomic_state is passed, but rather that this used to be the state
+ * collection we were replacing after drm_atomic_helper_swap_state(),
+ * but the variable name was never updated.
+ *
+ * Some atomic operations implementations followed a similar process. We
+ * first started to pass the entity state only. However, it was pretty
+ * cumbersome for drivers, and especially CRTCs, to retrieve the states
+ * of other components. Thus, we switched to passing the whole
+ * drm_atomic_state as a parameter to those operations. Similarly, the
+ * transition isn't complete yet, and one might still find atomic
+ * operations taking a drm_atomic_state pointer, or a component state
+ * pointer. The former is the preferred form.
*/
struct drm_atomic_state {
/**
@@ -376,8 +478,27 @@ struct drm_atomic_state {
*
* Allow full modeset. This is used by the ATOMIC IOCTL handler to
* implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should
- * never consult this flag, instead looking at the output of
- * drm_atomic_crtc_needs_modeset().
+ * generally not consult this flag, but instead look at the output of
+ * drm_atomic_crtc_needs_modeset(). The detailed rules are:
+ *
+ * - Drivers must not consult @allow_modeset in the atomic commit path.
+ * Use drm_atomic_crtc_needs_modeset() instead.
+ *
+ * - Drivers must consult @allow_modeset before adding unrelated struct
+ * drm_crtc_state to this commit by calling
+ * drm_atomic_get_crtc_state(). See also the warning in the
+ * documentation for that function.
+ *
+ * - Drivers must never change this flag, it is under the exclusive
+ * control of userspace.
+ *
+ * - Drivers may consult @allow_modeset in the atomic check path, if
+ * they have the choice between an optimal hardware configuration
+ * which requires a modeset, and a less optimal configuration which
+ * can be committed without a modeset. An example would be suboptimal
+ * scanout FIFO allocation resulting in increased idle power
+ * consumption. This allows userspace to avoid flickering and delays
+ * for the normal composition loop at reasonable cost.
*/
bool allow_modeset : 1;
/**
@@ -409,6 +530,40 @@ struct drm_atomic_state {
bool duplicated : 1;
/**
+ * @checked:
+ *
+ * Indicates the state has been checked and thus must no longer
+ * be mutated. For internal use only, do not consult from drivers.
+ */
+ bool checked : 1;
+
+ /**
+ * @plane_color_pipeline:
+ *
+ * Indicates whether this atomic state originated with a client that
+ * set the DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE.
+ *
+ * Drivers and helper functions should use this to ignore legacy
+ * properties that are incompatible with the drm_plane COLOR_PIPELINE
+ * behavior, such as:
+ *
+ * - COLOR_RANGE
+ * - COLOR_ENCODING
+ *
+ * or any other driver-specific properties that might affect pixel
+ * values.
+ */
+ bool plane_color_pipeline : 1;
+
+ /**
+ * @colorops:
+ *
+ * Pointer to array of @drm_colorop and @drm_colorop_state part of this
+ * update.
+ */
+ struct __drm_colorops_state *colorops;
+
+ /**
* @planes:
*
* Pointer to array of @drm_plane and @drm_plane_state part of this
@@ -460,7 +615,7 @@ struct drm_atomic_state {
*
* Used for signaling unbound planes/connectors.
* When a connector or plane is not bound to any CRTC, it's still important
- * to preserve linearity to prevent the atomic states from being freed to early.
+ * to preserve linearity to prevent the atomic states from being freed too early.
*
* This commit (if set) is not bound to any CRTC, but will be completed when
* drm_atomic_helper_commit_hw_done() is called.
@@ -549,6 +704,9 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
struct drm_plane_state * __must_check
drm_atomic_get_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane);
+struct drm_colorop_state *
+drm_atomic_get_colorop_state(struct drm_atomic_state *state,
+ struct drm_colorop *colorop);
struct drm_connector_state * __must_check
drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_connector *connector);
@@ -575,6 +733,9 @@ drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_connector *
drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder);
+struct drm_connector *
+drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx);
struct drm_crtc *
drm_atomic_get_old_crtc_for_encoder(struct drm_atomic_state *state,
@@ -584,24 +745,6 @@ drm_atomic_get_new_crtc_for_encoder(struct drm_atomic_state *state,
struct drm_encoder *encoder);
/**
- * drm_atomic_get_existing_crtc_state - get CRTC state, if it exists
- * @state: global atomic state object
- * @crtc: CRTC to grab
- *
- * This function returns the CRTC state for the given CRTC, or NULL
- * if the CRTC is not part of the global atomic state.
- *
- * This function is deprecated, @drm_atomic_get_old_crtc_state or
- * @drm_atomic_get_new_crtc_state should be used instead.
- */
-static inline struct drm_crtc_state *
-drm_atomic_get_existing_crtc_state(const struct drm_atomic_state *state,
- struct drm_crtc *crtc)
-{
- return state->crtcs[drm_crtc_index(crtc)].state;
-}
-
-/**
* drm_atomic_get_old_crtc_state - get old CRTC state, if it exists
* @state: global atomic state object
* @crtc: CRTC to grab
@@ -631,24 +774,6 @@ drm_atomic_get_new_crtc_state(const struct drm_atomic_state *state,
}
/**
- * drm_atomic_get_existing_plane_state - get plane state, if it exists
- * @state: global atomic state object
- * @plane: plane to grab
- *
- * This function returns the plane state for the given plane, or NULL
- * if the plane is not part of the global atomic state.
- *
- * This function is deprecated, @drm_atomic_get_old_plane_state or
- * @drm_atomic_get_new_plane_state should be used instead.
- */
-static inline struct drm_plane_state *
-drm_atomic_get_existing_plane_state(const struct drm_atomic_state *state,
- struct drm_plane *plane)
-{
- return state->planes[drm_plane_index(plane)].state;
-}
-
-/**
* drm_atomic_get_old_plane_state - get plane state, if it exists
* @state: global atomic state object
* @plane: plane to grab
@@ -679,26 +804,33 @@ drm_atomic_get_new_plane_state(const struct drm_atomic_state *state,
}
/**
- * drm_atomic_get_existing_connector_state - get connector state, if it exists
+ * drm_atomic_get_old_colorop_state - get colorop state, if it exists
* @state: global atomic state object
- * @connector: connector to grab
- *
- * This function returns the connector state for the given connector,
- * or NULL if the connector is not part of the global atomic state.
+ * @colorop: colorop to grab
*
- * This function is deprecated, @drm_atomic_get_old_connector_state or
- * @drm_atomic_get_new_connector_state should be used instead.
+ * This function returns the old colorop state for the given colorop, or
+ * NULL if the colorop is not part of the global atomic state.
*/
-static inline struct drm_connector_state *
-drm_atomic_get_existing_connector_state(const struct drm_atomic_state *state,
- struct drm_connector *connector)
+static inline struct drm_colorop_state *
+drm_atomic_get_old_colorop_state(struct drm_atomic_state *state,
+ struct drm_colorop *colorop)
{
- int index = drm_connector_index(connector);
-
- if (index >= state->num_connector)
- return NULL;
+ return state->colorops[drm_colorop_index(colorop)].old_state;
+}
- return state->connectors[index].state;
+/**
+ * drm_atomic_get_new_colorop_state - get colorop state, if it exists
+ * @state: global atomic state object
+ * @colorop: colorop to grab
+ *
+ * This function returns the new colorop state for the given colorop, or
+ * NULL if the colorop is not part of the global atomic state.
+ */
+static inline struct drm_colorop_state *
+drm_atomic_get_new_colorop_state(struct drm_atomic_state *state,
+ struct drm_colorop *colorop)
+{
+ return state->colorops[drm_colorop_index(colorop)].new_state;
}
/**
@@ -746,11 +878,11 @@ drm_atomic_get_new_connector_state(const struct drm_atomic_state *state,
* @state: global atomic state object
* @plane: plane to grab
*
- * This function returns the plane state for the given plane, either from
- * @state, or if the plane isn't part of the atomic state update, from @plane.
- * This is useful in atomic check callbacks, when drivers need to peek at, but
- * not change, state of other planes, since it avoids threading an error code
- * back up the call chain.
+ * This function returns the plane state for the given plane, either the
+ * new plane state from @state, or if the plane isn't part of the atomic
+ * state update, from @plane. This is useful in atomic check callbacks,
+ * when drivers need to peek at, but not change, state of other planes,
+ * since it avoids threading an error code back up the call chain.
*
* WARNING:
*
@@ -771,9 +903,15 @@ static inline const struct drm_plane_state *
__drm_atomic_get_current_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
- if (state->planes[drm_plane_index(plane)].state)
- return state->planes[drm_plane_index(plane)].state;
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_new_plane_state(state, plane);
+ if (plane_state)
+ return plane_state;
+ /*
+ * If the plane isn't part of the state, fallback to the currently active one.
+ */
return plane->state;
}
@@ -786,6 +924,9 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
int __must_check
drm_atomic_add_affected_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc);
+int __must_check
+drm_atomic_add_affected_colorops(struct drm_atomic_state *state,
+ struct drm_plane *plane);
int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
@@ -926,6 +1067,49 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1))
/**
+ * for_each_oldnew_colorop_in_state - iterate over all colorops in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @colorop: &struct drm_colorop iteration cursor
+ * @old_colorop_state: &struct drm_colorop_state iteration cursor for the old state
+ * @new_colorop_state: &struct drm_colorop_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all colorops in an atomic update, tracking both old and
+ * new state. This is useful in places where the state delta needs to be
+ * considered, for example in atomic check functions.
+ */
+#define for_each_oldnew_colorop_in_state(__state, colorop, old_colorop_state, \
+ new_colorop_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->dev->mode_config.num_colorop; \
+ (__i)++) \
+ for_each_if ((__state)->colorops[__i].ptr && \
+ ((colorop) = (__state)->colorops[__i].ptr, \
+ (void)(colorop) /* Only to avoid unused-but-set-variable warning */, \
+ (old_colorop_state) = (__state)->colorops[__i].old_state,\
+ (new_colorop_state) = (__state)->colorops[__i].new_state, 1))
+
+/**
+ * for_each_new_colorop_in_state - iterate over all colorops in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @colorop: &struct drm_colorop iteration cursor
+ * @new_colorop_state: &struct drm_colorop_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all colorops in an atomic update, tracking new state. This is
+ * useful in places where the state delta needs to be considered, for example in
+ * atomic check functions.
+ */
+#define for_each_new_colorop_in_state(__state, colorop, new_colorop_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->dev->mode_config.num_colorop; \
+ (__i)++) \
+ for_each_if ((__state)->colorops[__i].ptr && \
+ ((colorop) = (__state)->colorops[__i].ptr, \
+ (void)(colorop) /* Only to avoid unused-but-set-variable warning */, \
+ (new_colorop_state) = (__state)->colorops[__i].new_state, 1))
+
+/**
* for_each_oldnew_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 9aa0a05aa072..53382fe93537 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -139,6 +139,8 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set,
int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
+int drm_atomic_helper_reset_crtc(struct drm_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx);
void drm_atomic_helper_shutdown(struct drm_device *dev);
struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev,
diff --git a/include/drm/drm_atomic_uapi.h b/include/drm/drm_atomic_uapi.h
index 4c6d39d7bdb2..436315523326 100644
--- a/include/drm/drm_atomic_uapi.h
+++ b/include/drm/drm_atomic_uapi.h
@@ -37,6 +37,7 @@ struct drm_crtc;
struct drm_connector_state;
struct dma_fence;
struct drm_framebuffer;
+struct drm_colorop;
int __must_check
drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
@@ -49,6 +50,8 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
struct drm_crtc *crtc);
void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb);
+void drm_atomic_set_colorop_for_plane(struct drm_plane_state *plane_state,
+ struct drm_colorop *colorop);
int __must_check
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_crtc *crtc);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 4baca0d9107b..0ff7ab4aa868 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -23,6 +23,7 @@
#ifndef __DRM_BRIDGE_H__
#define __DRM_BRIDGE_H__
+#include <linux/cleanup.h>
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -32,6 +33,7 @@
#include <drm/drm_mode_object.h>
#include <drm/drm_modes.h>
+struct cec_msg;
struct device_node;
struct drm_bridge;
@@ -41,6 +43,8 @@ struct drm_display_info;
struct drm_minor;
struct drm_panel;
struct edid;
+struct hdmi_codec_daifmt;
+struct hdmi_codec_params;
struct i2c_adapter;
/**
@@ -71,10 +75,20 @@ struct drm_bridge_funcs {
*
* Zero on success, error code on failure.
*/
- int (*attach)(struct drm_bridge *bridge,
+ int (*attach)(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags);
/**
+ * @destroy:
+ *
+ * This callback is invoked when the bridge is about to be
+ * deallocated.
+ *
+ * The @destroy callback is optional.
+ */
+ void (*destroy)(struct drm_bridge *bridge);
+
+ /**
* @detach:
*
* This callback is invoked whenever our bridge is being detached from a
@@ -162,17 +176,33 @@ struct drm_bridge_funcs {
/**
* @disable:
*
- * This callback should disable the bridge. It is called right before
- * the preceding element in the display pipe is disabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's @disable vfunc. If the preceding element is a &drm_encoder
- * it's called right before the &drm_encoder_helper_funcs.disable,
- * &drm_encoder_helper_funcs.prepare or &drm_encoder_helper_funcs.dpms
- * hook.
+ * The @disable callback should disable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
+ *
+ * If the preceding element is a &drm_bridge, then this is called before
+ * that bridge is disabled via one of:
+ *
+ * - &drm_bridge_funcs.disable
+ * - &drm_bridge_funcs.atomic_disable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called before the encoder is disabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_disable
+ * - &drm_encoder_helper_funcs.prepare
+ * - &drm_encoder_helper_funcs.disable
+ * - &drm_encoder_helper_funcs.dpms
+ *
+ * and the CRTC is disabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.prepare
+ * - &drm_crtc_helper_funcs.atomic_disable
+ * - &drm_crtc_helper_funcs.disable
+ * - &drm_crtc_helper_funcs.dpms.
+ *
* The @disable callback is optional.
*
* NOTE:
@@ -185,17 +215,34 @@ struct drm_bridge_funcs {
/**
* @post_disable:
*
- * This callback should disable the bridge. It is called right after the
- * preceding element in the display pipe is disabled. If the preceding
- * element is a bridge this means it's called after that bridge's
- * @post_disable function. If the preceding element is a &drm_encoder
- * it's called right after the encoder's
- * &drm_encoder_helper_funcs.disable, &drm_encoder_helper_funcs.prepare
- * or &drm_encoder_helper_funcs.dpms hook.
- *
* The bridge must assume that the display pipe (i.e. clocks and timing
- * signals) feeding it is no longer running when this callback is
- * called.
+ * signals) feeding this bridge is no longer running when the
+ * @post_disable is called.
+ *
+ * This callback should perform all the actions required by the hardware
+ * after it has stopped receiving signals from the preceding element.
+ *
+ * If the preceding element is a &drm_bridge, then this is called after
+ * that bridge is post-disabled (unless marked otherwise by the
+ * @pre_enable_prev_first flag) via one of:
+ *
+ * - &drm_bridge_funcs.post_disable
+ * - &drm_bridge_funcs.atomic_post_disable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called after the encoder is disabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_disable
+ * - &drm_encoder_helper_funcs.prepare
+ * - &drm_encoder_helper_funcs.disable
+ * - &drm_encoder_helper_funcs.dpms
+ *
+ * and the CRTC is disabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.prepare
+ * - &drm_crtc_helper_funcs.atomic_disable
+ * - &drm_crtc_helper_funcs.disable
+ * - &drm_crtc_helper_funcs.dpms
*
* The @post_disable callback is optional.
*
@@ -238,18 +285,30 @@ struct drm_bridge_funcs {
/**
* @pre_enable:
*
- * This callback should enable the bridge. It is called right before
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's @pre_enable function. If the preceding element is a
- * &drm_encoder it's called right before the encoder's
- * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or
- * &drm_encoder_helper_funcs.dpms hook.
- *
* The display pipe (i.e. clocks and timing signals) feeding this bridge
- * will not yet be running when this callback is called. The bridge must
- * not enable the display link feeding the next bridge in the chain (if
- * there is one) when this callback is called.
+ * will not yet be running when the @pre_enable is called.
+ *
+ * This callback should perform all the necessary actions to prepare the
+ * bridge to accept signals from the preceding element.
+ *
+ * If the preceding element is a &drm_bridge, then this is called before
+ * that bridge is pre-enabled (unless marked otherwise by
+ * @pre_enable_prev_first flag) via one of:
+ *
+ * - &drm_bridge_funcs.pre_enable
+ * - &drm_bridge_funcs.atomic_pre_enable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called before the CRTC is enabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.atomic_enable
+ * - &drm_crtc_helper_funcs.commit
+ *
+ * and the encoder is enabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_enable
+ * - &drm_encoder_helper_funcs.enable
+ * - &drm_encoder_helper_funcs.commit
*
* The @pre_enable callback is optional.
*
@@ -263,19 +322,31 @@ struct drm_bridge_funcs {
/**
* @enable:
*
- * This callback should enable the bridge. It is called right after
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called after that
- * bridge's @enable function. If the preceding element is a
- * &drm_encoder it's called right after the encoder's
- * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or
- * &drm_encoder_helper_funcs.dpms hook.
+ * The @enable callback should enable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
+ * If the preceding element is a &drm_bridge, then this is called after
+ * that bridge is enabled via one of:
+ *
+ * - &drm_bridge_funcs.enable
+ * - &drm_bridge_funcs.atomic_enable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called after the CRTC is enabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.atomic_enable
+ * - &drm_crtc_helper_funcs.commit
+ *
+ * and the encoder is enabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_enable
+ * - &drm_encoder_helper_funcs.enable
+ * - drm_encoder_helper_funcs.commit
+ *
* The @enable callback is optional.
*
* NOTE:
@@ -288,78 +359,138 @@ struct drm_bridge_funcs {
/**
* @atomic_pre_enable:
*
- * This callback should enable the bridge. It is called right before
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's @atomic_pre_enable or @pre_enable function. If the preceding
- * element is a &drm_encoder it's called right before the encoder's
- * &drm_encoder_helper_funcs.atomic_enable hook.
- *
* The display pipe (i.e. clocks and timing signals) feeding this bridge
- * will not yet be running when this callback is called. The bridge must
- * not enable the display link feeding the next bridge in the chain (if
- * there is one) when this callback is called.
+ * will not yet be running when the @atomic_pre_enable is called.
+ *
+ * This callback should perform all the necessary actions to prepare the
+ * bridge to accept signals from the preceding element.
+ *
+ * If the preceding element is a &drm_bridge, then this is called before
+ * that bridge is pre-enabled (unless marked otherwise by
+ * @pre_enable_prev_first flag) via one of:
+ *
+ * - &drm_bridge_funcs.pre_enable
+ * - &drm_bridge_funcs.atomic_pre_enable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called before the CRTC is enabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.atomic_enable
+ * - &drm_crtc_helper_funcs.commit
+ *
+ * and the encoder is enabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_enable
+ * - &drm_encoder_helper_funcs.enable
+ * - &drm_encoder_helper_funcs.commit
*
* The @atomic_pre_enable callback is optional.
*/
void (*atomic_pre_enable)(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
/**
* @atomic_enable:
*
- * This callback should enable the bridge. It is called right after
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called after that
- * bridge's @atomic_enable or @enable function. If the preceding element
- * is a &drm_encoder it's called right after the encoder's
- * &drm_encoder_helper_funcs.atomic_enable hook.
+ * The @atomic_enable callback should enable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
+ * If the preceding element is a &drm_bridge, then this is called after
+ * that bridge is enabled via one of:
+ *
+ * - &drm_bridge_funcs.enable
+ * - &drm_bridge_funcs.atomic_enable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called after the CRTC is enabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.atomic_enable
+ * - &drm_crtc_helper_funcs.commit
+ *
+ * and the encoder is enabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_enable
+ * - &drm_encoder_helper_funcs.enable
+ * - drm_encoder_helper_funcs.commit
+ *
* The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
/**
* @atomic_disable:
*
- * This callback should disable the bridge. It is called right before
- * the preceding element in the display pipe is disabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's @atomic_disable or @disable vfunc. If the preceding element
- * is a &drm_encoder it's called right before the
- * &drm_encoder_helper_funcs.atomic_disable hook.
+ * The @atomic_disable callback should disable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
+ * If the preceding element is a &drm_bridge, then this is called before
+ * that bridge is disabled via one of:
+ *
+ * - &drm_bridge_funcs.disable
+ * - &drm_bridge_funcs.atomic_disable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called before the encoder is disabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_disable
+ * - &drm_encoder_helper_funcs.prepare
+ * - &drm_encoder_helper_funcs.disable
+ * - &drm_encoder_helper_funcs.dpms
+ *
+ * and the CRTC is disabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.prepare
+ * - &drm_crtc_helper_funcs.atomic_disable
+ * - &drm_crtc_helper_funcs.disable
+ * - &drm_crtc_helper_funcs.dpms.
+ *
* The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
/**
* @atomic_post_disable:
*
- * This callback should disable the bridge. It is called right after the
- * preceding element in the display pipe is disabled. If the preceding
- * element is a bridge this means it's called after that bridge's
- * @atomic_post_disable or @post_disable function. If the preceding
- * element is a &drm_encoder it's called right after the encoder's
- * &drm_encoder_helper_funcs.atomic_disable hook.
- *
* The bridge must assume that the display pipe (i.e. clocks and timing
- * signals) feeding it is no longer running when this callback is
- * called.
+ * signals) feeding this bridge is no longer running when the
+ * @atomic_post_disable is called.
+ *
+ * This callback should perform all the actions required by the hardware
+ * after it has stopped receiving signals from the preceding element.
+ *
+ * If the preceding element is a &drm_bridge, then this is called after
+ * that bridge is post-disabled (unless marked otherwise by the
+ * @pre_enable_prev_first flag) via one of:
+ *
+ * - &drm_bridge_funcs.post_disable
+ * - &drm_bridge_funcs.atomic_post_disable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called after the encoder is disabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_disable
+ * - &drm_encoder_helper_funcs.prepare
+ * - &drm_encoder_helper_funcs.disable
+ * - &drm_encoder_helper_funcs.dpms
+ *
+ * and the CRTC is disabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.prepare
+ * - &drm_crtc_helper_funcs.atomic_disable
+ * - &drm_crtc_helper_funcs.disable
+ * - &drm_crtc_helper_funcs.dpms
*
* The @atomic_post_disable callback is optional.
*/
void (*atomic_post_disable)(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
/**
* @atomic_duplicate_state:
@@ -530,7 +661,8 @@ struct drm_bridge_funcs {
*
* drm_connector_status indicating the bridge output status.
*/
- enum drm_connector_status (*detect)(struct drm_bridge *bridge);
+ enum drm_connector_status (*detect)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/**
* @get_modes:
@@ -631,6 +763,228 @@ struct drm_bridge_funcs {
void (*hpd_disable)(struct drm_bridge *bridge);
/**
+ * @hdmi_tmds_char_rate_valid:
+ *
+ * Check whether a particular TMDS character rate is supported by the
+ * driver.
+ *
+ * This callback is optional and should only be implemented by the
+ * bridges that take part in the HDMI connector implementation. Bridges
+ * that implement it shall set the DRM_BRIDGE_OP_HDMI flag in their
+ * &drm_bridge->ops.
+ *
+ * Returns:
+ *
+ * Either &drm_mode_status.MODE_OK or one of the failure reasons
+ * in &enum drm_mode_status.
+ */
+ enum drm_mode_status
+ (*hdmi_tmds_char_rate_valid)(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate);
+
+ /**
+ * @hdmi_clear_infoframe:
+ *
+ * This callback clears the infoframes in the hardware during commit.
+ * It will be called multiple times, once for every disabled infoframe
+ * type.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI flag in their &drm_bridge->ops.
+ */
+ int (*hdmi_clear_infoframe)(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type);
+ /**
+ * @hdmi_write_infoframe:
+ *
+ * Program the infoframe into the hardware. It will be called multiple
+ * times, once for every updated infoframe type.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI flag in their &drm_bridge->ops.
+ */
+ int (*hdmi_write_infoframe)(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len);
+
+ /**
+ * @hdmi_audio_startup:
+ *
+ * Called when ASoC starts an audio stream setup.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_audio_startup)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
+ * @hdmi_audio_prepare:
+ * Configures HDMI-encoder for audio stream. Can be called multiple
+ * times for each setup.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_audio_prepare)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /**
+ * @hdmi_audio_shutdown:
+ *
+ * Shut down the audio stream.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ void (*hdmi_audio_shutdown)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
+ * @hdmi_audio_mute_stream:
+ *
+ * Mute/unmute HDMI audio stream.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_audio_mute_stream)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ bool enable, int direction);
+
+ /**
+ * @hdmi_cec_init:
+ *
+ * Initialize CEC part of the bridge.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their
+ * &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_cec_init)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
+ * @hdmi_cec_enable:
+ *
+ * Enable or disable the CEC adapter inside the bridge.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their
+ * &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_cec_enable)(struct drm_bridge *bridge, bool enable);
+
+ /**
+ * @hdmi_cec_log_addr:
+ *
+ * Set the logical address of the CEC adapter inside the bridge.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their
+ * &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_cec_log_addr)(struct drm_bridge *bridge, u8 logical_addr);
+
+ /**
+ * @hdmi_cec_transmit:
+ *
+ * Transmit the message using the CEC adapter inside the bridge.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their
+ * &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_cec_transmit)(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg);
+
+ /**
+ * @dp_audio_startup:
+ *
+ * Called when ASoC starts a DisplayPort audio stream setup.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*dp_audio_startup)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
+ * @dp_audio_prepare:
+ * Configures DisplayPort audio stream. Can be called multiple
+ * times for each setup.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*dp_audio_prepare)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /**
+ * @dp_audio_shutdown:
+ *
+ * Shut down the DisplayPort audio stream.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ void (*dp_audio_shutdown)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
+ * @dp_audio_mute_stream:
+ *
+ * Mute/unmute DisplayPort audio stream.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*dp_audio_mute_stream)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ bool enable, int direction);
+
+ /**
* @debugfs_init:
*
* Allows bridges to create bridge-specific debugfs files.
@@ -705,6 +1059,52 @@ enum drm_bridge_ops {
* this flag shall implement the &drm_bridge_funcs->get_modes callback.
*/
DRM_BRIDGE_OP_MODES = BIT(3),
+ /**
+ * @DRM_BRIDGE_OP_HDMI: The bridge provides HDMI connector operations,
+ * including infoframes support. Bridges that set this flag must
+ * implement the &drm_bridge_funcs->write_infoframe callback.
+ *
+ * Note: currently there can be at most one bridge in a chain that sets
+ * this bit. This is to simplify corresponding glue code in connector
+ * drivers.
+ */
+ DRM_BRIDGE_OP_HDMI = BIT(4),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_AUDIO: The bridge provides HDMI audio operations.
+ * Bridges that set this flag must implement the
+ * &drm_bridge_funcs->hdmi_audio_prepare and
+ * &drm_bridge_funcs->hdmi_audio_shutdown callbacks.
+ *
+ * Note: currently there can be at most one bridge in a chain that sets
+ * this bit. This is to simplify corresponding glue code in connector
+ * drivers. Also it is not possible to have a bridge in the chain that
+ * sets @DRM_BRIDGE_OP_DP_AUDIO if there is a bridge that sets this
+ * flag.
+ */
+ DRM_BRIDGE_OP_HDMI_AUDIO = BIT(5),
+ /**
+ * @DRM_BRIDGE_OP_DP_AUDIO: The bridge provides DisplayPort audio operations.
+ * Bridges that set this flag must implement the
+ * &drm_bridge_funcs->dp_audio_prepare and
+ * &drm_bridge_funcs->dp_audio_shutdown callbacks.
+ *
+ * Note: currently there can be at most one bridge in a chain that sets
+ * this bit. This is to simplify corresponding glue code in connector
+ * drivers. Also it is not possible to have a bridge in the chain that
+ * sets @DRM_BRIDGE_OP_HDMI_AUDIO if there is a bridge that sets this
+ * flag.
+ */
+ DRM_BRIDGE_OP_DP_AUDIO = BIT(6),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER: The bridge requires CEC notifier
+ * to be present.
+ */
+ DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER = BIT(7),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER: The bridge requires CEC adapter
+ * to be present.
+ */
+ DRM_BRIDGE_OP_HDMI_CEC_ADAPTER = BIT(8),
};
/**
@@ -731,6 +1131,18 @@ struct drm_bridge {
const struct drm_bridge_timings *timings;
/** @funcs: control functions */
const struct drm_bridge_funcs *funcs;
+
+ /**
+ * @container: Pointer to the private driver struct embedding this
+ * @struct drm_bridge.
+ */
+ void *container;
+
+ /**
+ * @refcount: reference count of users referencing this bridge.
+ */
+ struct kref refcount;
+
/** @driver_private: pointer to the bridge driver's internal context */
void *driver_private;
/** @ops: bitmask of operations supported by the bridge */
@@ -747,6 +1159,11 @@ struct drm_bridge {
*/
bool interlace_allowed;
/**
+ * @ycbcr_420_allowed: Indicate that the bridge can handle YCbCr 420
+ * output.
+ */
+ bool ycbcr_420_allowed;
+ /**
* @pre_enable_prev_first: The bridge requires that the prev
* bridge @pre_enable function is called before its @pre_enable,
* and conversely for post_disable. This is most frequently a
@@ -755,9 +1172,89 @@ struct drm_bridge {
*/
bool pre_enable_prev_first;
/**
+ * @support_hdcp: Indicate that the bridge supports HDCP.
+ */
+ bool support_hdcp;
+ /**
* @ddc: Associated I2C adapter for DDC access, if any.
*/
struct i2c_adapter *ddc;
+
+ /**
+ * @vendor: Vendor of the product to be used for the SPD InfoFrame
+ * generation. This is required if @DRM_BRIDGE_OP_HDMI is set.
+ */
+ const char *vendor;
+
+ /**
+ * @product: Name of the product to be used for the SPD InfoFrame
+ * generation. This is required if @DRM_BRIDGE_OP_HDMI is set.
+ */
+ const char *product;
+
+ /**
+ * @supported_formats: Bitmask of @hdmi_colorspace listing supported
+ * output formats. This is only relevant if @DRM_BRIDGE_OP_HDMI is set.
+ */
+ unsigned int supported_formats;
+
+ /**
+ * @max_bpc: Maximum bits per char the HDMI bridge supports. Allowed
+ * values are 8, 10 and 12. This is only relevant if
+ * @DRM_BRIDGE_OP_HDMI is set.
+ */
+ unsigned int max_bpc;
+
+ /**
+ * @hdmi_cec_dev: device to be used as a containing device for CEC
+ * functions.
+ */
+ struct device *hdmi_cec_dev;
+
+ /**
+ * @hdmi_audio_dev: device to be used as a parent for the HDMI Codec if
+ * either of @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO is set.
+ */
+ struct device *hdmi_audio_dev;
+
+ /**
+ * @hdmi_audio_max_i2s_playback_channels: maximum number of playback
+ * I2S channels for the @DRM_BRIDGE_OP_HDMI_AUDIO or
+ * @DRM_BRIDGE_OP_DP_AUDIO.
+ */
+ int hdmi_audio_max_i2s_playback_channels;
+
+ /**
+ * @hdmi_audio_i2s_formats: supported I2S formats, optional. The
+ * default is to allow all formats supported by the corresponding I2S
+ * bus driver. This is only used for bridges setting
+ * @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO.
+ */
+ u64 hdmi_audio_i2s_formats;
+
+ /**
+ * @hdmi_audio_spdif_playback: set if this bridge has S/PDIF playback
+ * port for @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO.
+ */
+ unsigned int hdmi_audio_spdif_playback : 1;
+
+ /**
+ * @hdmi_audio_dai_port: sound DAI port for either of
+ * @DRM_BRIDGE_OP_HDMI_AUDIO and @DRM_BRIDGE_OP_DP_AUDIO, -1 if it is
+ * not used.
+ */
+ int hdmi_audio_dai_port;
+
+ /**
+ * @hdmi_cec_adapter_name: the name of the adapter to register
+ */
+ const char *hdmi_cec_adapter_name;
+
+ /**
+ * @hdmi_cec_available_las: number of logical addresses, CEC_MAX_LOG_ADDRS if unset
+ */
+ u8 hdmi_cec_available_las;
+
/** private: */
/**
* @hpd_mutex: Protects the @hpd_cb and @hpd_data fields.
@@ -781,6 +1278,33 @@ drm_priv_to_bridge(struct drm_private_obj *priv)
return container_of(priv, struct drm_bridge, base);
}
+struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge);
+void drm_bridge_put(struct drm_bridge *bridge);
+
+/* Cleanup action for use with __free() */
+DEFINE_FREE(drm_bridge_put, struct drm_bridge *, if (_T) drm_bridge_put(_T))
+
+void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_bridge_funcs *funcs);
+
+/**
+ * devm_drm_bridge_alloc - Allocate and initialize a bridge
+ * @dev: struct device of the bridge device
+ * @type: the type of the struct which contains struct &drm_bridge
+ * @member: the name of the &drm_bridge within @type
+ * @funcs: callbacks for this bridge
+ *
+ * The reference count of the returned bridge is initialized to 1. This
+ * reference will be automatically dropped via devm (by calling
+ * drm_bridge_put()) when @dev is removed.
+ *
+ * Returns:
+ * Pointer to new bridge, or ERR_PTR on failure.
+ */
+#define devm_drm_bridge_alloc(dev, type, member, funcs) \
+ ((type *)__devm_drm_bridge_alloc(dev, sizeof(type), \
+ offsetof(type, member), funcs))
+
void drm_bridge_add(struct drm_bridge *bridge);
int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge);
void drm_bridge_remove(struct drm_bridge *bridge);
@@ -797,10 +1321,54 @@ static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np)
}
#endif
+static inline bool drm_bridge_is_last(struct drm_bridge *bridge)
+{
+ return list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain);
+}
+
+/**
+ * drm_bridge_get_current_state() - Get the current bridge state
+ * @bridge: bridge object
+ *
+ * This function must be called with the modeset lock held.
+ *
+ * RETURNS:
+ *
+ * The current bridge state, or NULL if there is none.
+ */
+static inline struct drm_bridge_state *
+drm_bridge_get_current_state(struct drm_bridge *bridge)
+{
+ if (!bridge)
+ return NULL;
+
+ /*
+ * Only atomic bridges will have bridge->base initialized by
+ * drm_atomic_private_obj_init(), so we need to make sure we're
+ * working with one before we try to use the lock.
+ */
+ if (!bridge->funcs || !bridge->funcs->atomic_reset)
+ return NULL;
+
+ drm_modeset_lock_assert_held(&bridge->base.lock);
+
+ if (!bridge->base.state)
+ return NULL;
+
+ return drm_priv_to_bridge_state(bridge->base.state);
+}
+
/**
* drm_bridge_get_next_bridge() - Get the next bridge in the chain
* @bridge: bridge object
*
+ * The caller is responsible of having a reference to @bridge via
+ * drm_bridge_get() or equivalent. This function leaves the refcount of
+ * @bridge unmodified.
+ *
+ * The refcount of the returned bridge is incremented. Use drm_bridge_put()
+ * when done with it.
+ *
* RETURNS:
* the next bridge in the chain after @bridge, or NULL if @bridge is the last.
*/
@@ -810,13 +1378,20 @@ drm_bridge_get_next_bridge(struct drm_bridge *bridge)
if (list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain))
return NULL;
- return list_next_entry(bridge, chain_node);
+ return drm_bridge_get(list_next_entry(bridge, chain_node));
}
/**
* drm_bridge_get_prev_bridge() - Get the previous bridge in the chain
* @bridge: bridge object
*
+ * The caller is responsible of having a reference to @bridge via
+ * drm_bridge_get() or equivalent. This function leaves the refcount of
+ * @bridge unmodified.
+ *
+ * The refcount of the returned bridge is incremented. Use drm_bridge_put()
+ * when done with it.
+ *
* RETURNS:
* the previous bridge in the chain, or NULL if @bridge is the first.
*/
@@ -826,13 +1401,16 @@ drm_bridge_get_prev_bridge(struct drm_bridge *bridge)
if (list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain))
return NULL;
- return list_prev_entry(bridge, chain_node);
+ return drm_bridge_get(list_prev_entry(bridge, chain_node));
}
/**
* drm_bridge_chain_get_first_bridge() - Get the first bridge in the chain
* @encoder: encoder object
*
+ * The refcount of the returned bridge is incremented. Use drm_bridge_put()
+ * when done with it.
+ *
* RETURNS:
* the first bridge in the chain, or NULL if @encoder has no bridge attached
* to it.
@@ -840,24 +1418,85 @@ drm_bridge_get_prev_bridge(struct drm_bridge *bridge)
static inline struct drm_bridge *
drm_bridge_chain_get_first_bridge(struct drm_encoder *encoder)
{
- return list_first_entry_or_null(&encoder->bridge_chain,
- struct drm_bridge, chain_node);
+ return drm_bridge_get(list_first_entry_or_null(&encoder->bridge_chain,
+ struct drm_bridge, chain_node));
+}
+
+/**
+ * drm_bridge_chain_get_last_bridge() - Get the last bridge in the chain
+ * @encoder: encoder object
+ *
+ * The refcount of the returned bridge is incremented. Use drm_bridge_put()
+ * when done with it.
+ *
+ * RETURNS:
+ * the last bridge in the chain, or NULL if @encoder has no bridge attached
+ * to it.
+ */
+static inline struct drm_bridge *
+drm_bridge_chain_get_last_bridge(struct drm_encoder *encoder)
+{
+ return drm_bridge_get(list_last_entry_or_null(&encoder->bridge_chain,
+ struct drm_bridge, chain_node));
}
/**
- * drm_for_each_bridge_in_chain() - Iterate over all bridges present in a chain
+ * drm_bridge_get_next_bridge_and_put - Get the next bridge in the chain
+ * and put the previous
+ * @bridge: bridge object
+ *
+ * Same as drm_bridge_get_next_bridge() but additionally puts the @bridge.
+ *
+ * RETURNS:
+ * the next bridge in the chain after @bridge, or NULL if @bridge is the last.
+ */
+static inline struct drm_bridge *
+drm_bridge_get_next_bridge_and_put(struct drm_bridge *bridge)
+{
+ struct drm_bridge *next = drm_bridge_get_next_bridge(bridge);
+
+ drm_bridge_put(bridge);
+
+ return next;
+}
+
+/**
+ * drm_for_each_bridge_in_chain_scoped - iterate over all bridges attached
+ * to an encoder
* @encoder: the encoder to iterate bridges on
* @bridge: a bridge pointer updated to point to the current bridge at each
* iteration
*
* Iterate over all bridges present in the bridge chain attached to @encoder.
+ *
+ * Automatically gets/puts the bridge reference while iterating, and puts
+ * the reference even if returning or breaking in the middle of the loop.
*/
-#define drm_for_each_bridge_in_chain(encoder, bridge) \
- list_for_each_entry(bridge, &(encoder)->bridge_chain, chain_node)
+#define drm_for_each_bridge_in_chain_scoped(encoder, bridge) \
+ for (struct drm_bridge *bridge __free(drm_bridge_put) = \
+ drm_bridge_chain_get_first_bridge(encoder); \
+ bridge; \
+ bridge = drm_bridge_get_next_bridge_and_put(bridge))
+
+/**
+ * drm_for_each_bridge_in_chain_from - iterate over all bridges starting
+ * from the given bridge
+ * @first_bridge: the bridge to start from
+ * @bridge: a bridge pointer updated to point to the current bridge at each
+ * iteration
+ *
+ * Iterate over all bridges in the encoder chain starting from
+ * @first_bridge, included.
+ *
+ * Automatically gets/puts the bridge reference while iterating, and puts
+ * the reference even if returning or breaking in the middle of the loop.
+ */
+#define drm_for_each_bridge_in_chain_from(first_bridge, bridge) \
+ for (struct drm_bridge *bridge __free(drm_bridge_put) = \
+ drm_bridge_get(first_bridge); \
+ bridge; \
+ bridge = drm_bridge_get_next_bridge_and_put(bridge))
-bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
enum drm_mode_status
drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
@@ -886,7 +1525,8 @@ drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
u32 output_fmt,
unsigned int *num_input_fmts);
-enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge);
+enum drm_connector_status
+drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector);
int drm_bridge_get_modes(struct drm_bridge *bridge,
struct drm_connector *connector);
const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
@@ -951,4 +1591,9 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
}
#endif
+void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge);
+
+void drm_bridge_debugfs_params(struct dentry *root);
+void drm_bridge_debugfs_encoder_params(struct dentry *root, struct drm_encoder *encoder);
+
#endif
diff --git a/include/drm/drm_bridge_helper.h b/include/drm/drm_bridge_helper.h
new file mode 100644
index 000000000000..6c35b479ec2a
--- /dev/null
+++ b/include/drm/drm_bridge_helper.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __DRM_BRIDGE_HELPER_H_
+#define __DRM_BRIDGE_HELPER_H_
+
+struct drm_bridge;
+struct drm_modeset_acquire_ctx;
+
+int drm_bridge_helper_reset_crtc(struct drm_bridge *bridge,
+ struct drm_modeset_acquire_ctx *ctx);
+
+#endif // __DRM_BRIDGE_HELPER_H_
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index 82570f77e817..b909fa8f810a 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -10,23 +10,16 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/rbtree.h>
-#include <drm/drm_print.h>
-
-#define range_overflows(start, size, max) ({ \
- typeof(start) start__ = (start); \
- typeof(size) size__ = (size); \
- typeof(max) max__ = (max); \
- (void)(&start__ == &size__); \
- (void)(&start__ == &max__); \
- start__ >= max__ || size__ > max__ - start__; \
-})
+struct drm_printer;
#define DRM_BUDDY_RANGE_ALLOCATION BIT(0)
#define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1)
#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2)
#define DRM_BUDDY_CLEAR_ALLOCATION BIT(3)
#define DRM_BUDDY_CLEARED BIT(4)
+#define DRM_BUDDY_TRIM_DISABLE BIT(5)
struct drm_buddy_block {
#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
@@ -52,12 +45,16 @@ struct drm_buddy_block {
* a list, if so desired. As soon as the block is freed with
* drm_buddy_free* ownership is given back to the mm.
*/
- struct list_head link;
+ union {
+ struct rb_node rb;
+ struct list_head link;
+ };
+
struct list_head tmp_link;
};
-/* Order-zero must be at least PAGE_SIZE */
-#define DRM_BUDDY_MAX_ORDER (63 - PAGE_SHIFT)
+/* Order-zero must be at least SZ_4K */
+#define DRM_BUDDY_MAX_ORDER (63 - 12)
/*
* Binary Buddy System.
@@ -67,7 +64,7 @@ struct drm_buddy_block {
*/
struct drm_buddy {
/* Maintain a free list for each order. */
- struct list_head *free_list;
+ struct rb_root **free_trees;
/*
* Maintain explicit binary tree(s) to track the allocation of the
@@ -85,7 +82,7 @@ struct drm_buddy {
unsigned int n_roots;
unsigned int max_order;
- /* Must be at least PAGE_SIZE */
+ /* Must be at least SZ_4K */
u64 chunk_size;
u64 size;
u64 avail;
@@ -93,7 +90,7 @@ struct drm_buddy {
};
static inline u64
-drm_buddy_block_offset(struct drm_buddy_block *block)
+drm_buddy_block_offset(const struct drm_buddy_block *block)
{
return block->header & DRM_BUDDY_HEADER_OFFSET;
}
@@ -155,9 +152,12 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
unsigned long flags);
int drm_buddy_block_trim(struct drm_buddy *mm,
+ u64 *start,
u64 new_size,
struct list_head *blocks);
+void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear);
+
void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
void drm_buddy_free_list(struct drm_buddy *mm,
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index bc0e66f9c425..c972a8a3385b 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -29,6 +29,16 @@ struct drm_client_funcs {
struct module *owner;
/**
+ * @free:
+ *
+ * Called when the client gets unregistered. Implementations should
+ * release all client-specific data and free the memory.
+ *
+ * This callback is optional.
+ */
+ void (*free)(struct drm_client_dev *client);
+
+ /**
* @unregister:
*
* Called when &drm_device is unregistered. The client should respond by
@@ -47,12 +57,14 @@ struct drm_client_funcs {
*
* Note that the core does not guarantee exclusion against concurrent
* drm_open(). Clients need to ensure this themselves, for example by
- * using drm_master_internal_acquire() and
- * drm_master_internal_release().
+ * using drm_master_internal_acquire() and drm_master_internal_release().
+ *
+ * If the caller passes force, the client should ignore any present DRM
+ * master and restore the display anyway.
*
* This callback is optional.
*/
- int (*restore)(struct drm_client_dev *client);
+ int (*restore)(struct drm_client_dev *client, bool force);
/**
* @hotplug:
@@ -63,6 +75,24 @@ struct drm_client_funcs {
* This callback is optional.
*/
int (*hotplug)(struct drm_client_dev *client);
+
+ /**
+ * @suspend:
+ *
+ * Called when suspending the device.
+ *
+ * This callback is optional.
+ */
+ int (*suspend)(struct drm_client_dev *client);
+
+ /**
+ * @resume:
+ *
+ * Called when resuming the device from suspend.
+ *
+ * This callback is optional.
+ */
+ int (*resume)(struct drm_client_dev *client);
};
/**
@@ -108,6 +138,21 @@ struct drm_client_dev {
struct drm_mode_set *modesets;
/**
+ * @suspended:
+ *
+ * The client has been suspended.
+ */
+ bool suspended;
+
+ /**
+ * @hotplug_pending:
+ *
+ * A hotplug event has been received while the client was suspended.
+ * Try again on resume.
+ */
+ bool hotplug_pending;
+
+ /**
* @hotplug_failed:
*
* Set by client hotplug helpers if the hotplugging failed
@@ -121,10 +166,6 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
void drm_client_release(struct drm_client_dev *client);
void drm_client_register(struct drm_client_dev *client);
-void drm_client_dev_unregister(struct drm_device *dev);
-void drm_client_dev_hotplug(struct drm_device *dev);
-void drm_client_dev_restore(struct drm_device *dev);
-
/**
* struct drm_client_buffer - DRM client buffer
*/
@@ -135,19 +176,11 @@ struct drm_client_buffer {
struct drm_client_dev *client;
/**
- * @pitch: Buffer pitch
- */
- u32 pitch;
-
- /**
* @gem: GEM object backing this buffer
*
- * FIXME: The dependency on GEM here isn't required, we could
- * convert the driver handle to a dma-buf instead and use the
- * backend-agnostic dma-buf vmap support instead. This would
- * require that the handle2fd prime ioctl is reworked to pull the
- * fd_install step out of the driver backend hooks, to make that
- * final step optional for internal users.
+ * FIXME: The DRM framebuffer holds a reference on its GEM
+ * buffer objects. Do not use this field in new code and
+ * update existing users.
*/
struct drm_gem_object *gem;
@@ -163,9 +196,9 @@ struct drm_client_buffer {
};
struct drm_client_buffer *
-drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
-void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
-int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect);
+drm_client_buffer_create_dumb(struct drm_client_dev *client, u32 width, u32 height, u32 format);
+void drm_client_buffer_delete(struct drm_client_buffer *buffer);
+int drm_client_buffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect);
int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
struct iosys_map *map_copy);
void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer);
@@ -181,6 +214,7 @@ int drm_client_modeset_check(struct drm_client_dev *client);
int drm_client_modeset_commit_locked(struct drm_client_dev *client);
int drm_client_modeset_commit(struct drm_client_dev *client);
int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
+int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index);
/**
* drm_client_for_each_modeset() - Iterate over client modesets
@@ -205,6 +239,4 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
drm_for_each_connector_iter(connector, iter) \
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
-void drm_client_debugfs_init(struct drm_device *dev);
-
#endif
diff --git a/include/drm/drm_client_event.h b/include/drm/drm_client_event.h
new file mode 100644
index 000000000000..79369c755bc9
--- /dev/null
+++ b/include/drm/drm_client_event.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+
+#ifndef _DRM_CLIENT_EVENT_H_
+#define _DRM_CLIENT_EVENT_H_
+
+#include <linux/types.h>
+
+struct drm_device;
+
+#if defined(CONFIG_DRM_CLIENT)
+void drm_client_dev_unregister(struct drm_device *dev);
+void drm_client_dev_hotplug(struct drm_device *dev);
+void drm_client_dev_restore(struct drm_device *dev, bool force);
+void drm_client_dev_suspend(struct drm_device *dev);
+void drm_client_dev_resume(struct drm_device *dev);
+#else
+static inline void drm_client_dev_unregister(struct drm_device *dev)
+{ }
+static inline void drm_client_dev_hotplug(struct drm_device *dev)
+{ }
+static inline void drm_client_dev_restore(struct drm_device *dev, bool force)
+{ }
+static inline void drm_client_dev_suspend(struct drm_device *dev)
+{ }
+static inline void drm_client_dev_resume(struct drm_device *dev)
+{ }
+#endif
+
+#endif
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index ed81741036d7..5140691f476a 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -50,6 +50,22 @@ static inline u32 drm_color_lut_extract(u32 user_input, int bit_precision)
(1 << 16) - 1);
}
+/**
+ * drm_color_lut32_extract - clamp and round LUT entries
+ * @user_input: input value
+ * @bit_precision: number of bits the hw LUT supports
+ *
+ * Extract U0.bit_precision from a U0.32 LUT value.
+ *
+ */
+static inline u32 drm_color_lut32_extract(u32 user_input, int bit_precision)
+{
+ u64 max = (bit_precision >= 64) ? ~0ULL : (1ULL << bit_precision) - 1;
+
+ return DIV_ROUND_CLOSEST_ULL((u64)user_input * max,
+ (1ULL << 32) - 1);
+}
+
u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n);
void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
@@ -72,6 +88,18 @@ static inline int drm_color_lut_size(const struct drm_property_blob *blob)
return blob->length / sizeof(struct drm_color_lut);
}
+/**
+ * drm_color_lut32_size - calculate the number of entries in the extended LUT
+ * @blob: blob containing the LUT
+ *
+ * Returns:
+ * The number of entries in the color LUT stored in @blob.
+ */
+static inline int drm_color_lut32_size(const struct drm_property_blob *blob)
+{
+ return blob->length / sizeof(struct drm_color_lut32);
+}
+
enum drm_color_encoding {
DRM_COLOR_YCBCR_BT601,
DRM_COLOR_YCBCR_BT709,
@@ -118,4 +146,33 @@ enum drm_color_lut_tests {
};
int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests);
+
+/*
+ * Gamma-LUT programming
+ */
+
+typedef void (*drm_crtc_set_lut_func)(struct drm_crtc *, unsigned int, u16, u16, u16);
+
+void drm_crtc_load_gamma_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma);
+void drm_crtc_load_gamma_565_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma);
+void drm_crtc_load_gamma_555_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma);
+
+void drm_crtc_fill_gamma_888(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma);
+void drm_crtc_fill_gamma_565(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma);
+void drm_crtc_fill_gamma_555(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma);
+
+/*
+ * Color-LUT programming
+ */
+
+void drm_crtc_load_palette_8(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_palette);
+
+void drm_crtc_fill_palette_332(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette);
+void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette);
+
+int drm_color_lut32_check(const struct drm_property_blob *lut, u32 tests);
#endif
diff --git a/include/drm/drm_colorop.h b/include/drm/drm_colorop.h
new file mode 100644
index 000000000000..a3a32f9f918c
--- /dev/null
+++ b/include/drm/drm_colorop.h
@@ -0,0 +1,464 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DRM_COLOROP_H__
+#define __DRM_COLOROP_H__
+
+#include <drm/drm_mode_object.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_property.h>
+
+/* DRM colorop flags */
+#define DRM_COLOROP_FLAG_ALLOW_BYPASS (1<<0) /* Allow bypass on the drm_colorop */
+
+/**
+ * enum drm_colorop_curve_1d_type - type of 1D curve
+ *
+ * Describes a 1D curve to be applied by the DRM_COLOROP_1D_CURVE colorop.
+ */
+enum drm_colorop_curve_1d_type {
+ /**
+ * @DRM_COLOROP_1D_CURVE_SRGB_EOTF:
+ *
+ * enum string "sRGB EOTF"
+ *
+ * sRGB piece-wise electro-optical transfer function. Transfer
+ * characteristics as defined by IEC 61966-2-1 sRGB. Equivalent
+ * to H.273 TransferCharacteristics code point 13 with
+ * MatrixCoefficients set to 0.
+ */
+ DRM_COLOROP_1D_CURVE_SRGB_EOTF,
+
+ /**
+ * @DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF:
+ *
+ * enum string "sRGB Inverse EOTF"
+ *
+ * The inverse of &DRM_COLOROP_1D_CURVE_SRGB_EOTF
+ */
+ DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF,
+
+ /**
+ * @DRM_COLOROP_1D_CURVE_PQ_125_EOTF:
+ *
+ * enum string "PQ 125 EOTF"
+ *
+ * The PQ transfer function, scaled by 125.0f, so that 10,000
+ * nits correspond to 125.0f.
+ *
+ * Transfer characteristics of the PQ function as defined by
+ * SMPTE ST 2084 (2014) for 10-, 12-, 14-, and 16-bit systems
+ * and Rec. ITU-R BT.2100-2 perceptual quantization (PQ) system,
+ * represented by H.273 TransferCharacteristics code point 16.
+ */
+ DRM_COLOROP_1D_CURVE_PQ_125_EOTF,
+
+ /**
+ * @DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF:
+ *
+ * enum string "PQ 125 Inverse EOTF"
+ *
+ * The inverse of DRM_COLOROP_1D_CURVE_PQ_125_EOTF.
+ */
+ DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF,
+
+ /**
+ * @DRM_COLOROP_1D_CURVE_BT2020_INV_OETF:
+ *
+ * enum string "BT.2020 Inverse OETF"
+ *
+ * The inverse of &DRM_COLOROP_1D_CURVE_BT2020_OETF
+ */
+ DRM_COLOROP_1D_CURVE_BT2020_INV_OETF,
+
+ /**
+ * @DRM_COLOROP_1D_CURVE_BT2020_OETF:
+ *
+ * enum string "BT.2020 OETF"
+ *
+ * The BT.2020/BT.709 transfer function. The BT.709 and BT.2020
+ * transfer functions are the same, the only difference is that
+ * BT.2020 is defined with more precision for 10 and 12-bit
+ * encodings.
+ *
+ *
+ */
+ DRM_COLOROP_1D_CURVE_BT2020_OETF,
+
+ /**
+ * @DRM_COLOROP_1D_CURVE_GAMMA22:
+ *
+ * enum string "Gamma 2.2"
+ *
+ * A gamma 2.2 power function. This applies a power curve with
+ * gamma value of 2.2 to the input values.
+ */
+ DRM_COLOROP_1D_CURVE_GAMMA22,
+
+ /**
+ * @DRM_COLOROP_1D_CURVE_GAMMA22_INV:
+ *
+ * enum string "Gamma 2.2 Inverse"
+ *
+ * The inverse of &DRM_COLOROP_1D_CURVE_GAMMA22
+ */
+ DRM_COLOROP_1D_CURVE_GAMMA22_INV,
+ /**
+ * @DRM_COLOROP_1D_CURVE_COUNT:
+ *
+ * enum value denoting the size of the enum
+ */
+ DRM_COLOROP_1D_CURVE_COUNT
+};
+
+/**
+ * struct drm_colorop_state - mutable colorop state
+ */
+struct drm_colorop_state {
+ /** @colorop: backpointer to the colorop */
+ struct drm_colorop *colorop;
+
+ /*
+ * Color properties
+ *
+ * The following fields are not always valid, their usage depends
+ * on the colorop type. See their associated comment for more
+ * information.
+ */
+
+ /**
+ * @bypass:
+ *
+ * When the property BYPASS exists on this colorop, this stores
+ * the requested bypass state: true if colorop shall be bypassed,
+ * false if colorop is enabled.
+ */
+ bool bypass;
+
+ /**
+ * @curve_1d_type:
+ *
+ * Type of 1D curve.
+ */
+ enum drm_colorop_curve_1d_type curve_1d_type;
+
+ /**
+ * @multiplier:
+ *
+ * Multiplier to 'gain' the plane. Format is S31.32 sign-magnitude.
+ */
+ uint64_t multiplier;
+
+ /**
+ * @data:
+ *
+ * Data blob for any TYPE that requires such a blob. The
+ * interpretation of the blob is TYPE-specific.
+ *
+ * See the &drm_colorop_type documentation for how blob is laid
+ * out.
+ */
+ struct drm_property_blob *data;
+
+ /** @state: backpointer to global drm_atomic_state */
+ struct drm_atomic_state *state;
+};
+
+/**
+ * struct drm_colorop - DRM color operation control structure
+ *
+ * A colorop represents one color operation. They can be chained via
+ * the 'next' pointer to build a color pipeline.
+ *
+ * Since colorops cannot stand-alone and are used to describe colorop
+ * operations on a plane they don't have their own locking mechanism but
+ * are locked and programmed along with their associated &drm_plane.
+ *
+ */
+struct drm_colorop {
+ /** @dev: parent DRM device */
+ struct drm_device *dev;
+
+ /**
+ * @head:
+ *
+ * List of all colorops on @dev, linked from &drm_mode_config.colorop_list.
+ * Invariant over the lifetime of @dev and therefore does not need
+ * locking.
+ */
+ struct list_head head;
+
+ /**
+ * @index: Position inside the mode_config.list, can be used as an array
+ * index. It is invariant over the lifetime of the colorop.
+ */
+ unsigned int index;
+
+ /** @base: base mode object */
+ struct drm_mode_object base;
+
+ /**
+ * @plane:
+ *
+ * The plane on which the colorop sits. A drm_colorop is always unique
+ * to a plane.
+ */
+ struct drm_plane *plane;
+
+ /**
+ * @state:
+ *
+ * Current atomic state for this colorop.
+ *
+ * This is protected by @mutex. Note that nonblocking atomic commits
+ * access the current colorop state without taking locks.
+ */
+ struct drm_colorop_state *state;
+
+ /*
+ * Color properties
+ *
+ * The following fields are not always valid, their usage depends
+ * on the colorop type. See their associated comment for more
+ * information.
+ */
+
+ /** @properties: property tracking for this colorop */
+ struct drm_object_properties properties;
+
+ /**
+ * @type:
+ *
+ * Read-only
+ * Type of color operation
+ */
+ enum drm_colorop_type type;
+
+ /**
+ * @next:
+ *
+ * Read-only
+ * Pointer to next drm_colorop in pipeline
+ */
+ struct drm_colorop *next;
+
+ /**
+ * @type_property:
+ *
+ * Read-only "TYPE" property for specifying the type of
+ * this color operation. The type is enum drm_colorop_type.
+ */
+ struct drm_property *type_property;
+
+ /**
+ * @bypass_property:
+ *
+ * Boolean property to control enablement of the color
+ * operation. Only present if DRM_COLOROP_FLAG_ALLOW_BYPASS
+ * flag is set. When present, setting bypass to "true" shall
+ * always be supported to allow compositors to quickly fall
+ * back to alternate methods of color processing. This is
+ * important since setting color operations can fail due to
+ * unique HW constraints.
+ */
+ struct drm_property *bypass_property;
+
+ /**
+ * @size:
+ *
+ * Number of entries of the custom LUT. This should be read-only.
+ */
+ uint32_t size;
+
+ /**
+ * @lut1d_interpolation:
+ *
+ * Read-only
+ * Interpolation for DRM_COLOROP_1D_LUT
+ */
+ enum drm_colorop_lut1d_interpolation_type lut1d_interpolation;
+
+ /**
+ * @lut3d_interpolation:
+ *
+ * Read-only
+ * Interpolation for DRM_COLOROP_3D_LUT
+ */
+ enum drm_colorop_lut3d_interpolation_type lut3d_interpolation;
+
+ /**
+ * @lut1d_interpolation_property:
+ *
+ * Read-only property for DRM_COLOROP_1D_LUT interpolation
+ */
+ struct drm_property *lut1d_interpolation_property;
+
+ /**
+ * @curve_1d_type_property:
+ *
+ * Sub-type for DRM_COLOROP_1D_CURVE type.
+ */
+ struct drm_property *curve_1d_type_property;
+
+ /**
+ * @multiplier_property:
+ *
+ * Multiplier property for plane gain
+ */
+ struct drm_property *multiplier_property;
+
+ /**
+ * @size_property:
+ *
+ * Size property for custom LUT from userspace.
+ */
+ struct drm_property *size_property;
+
+ /**
+ * @lut3d_interpolation_property:
+ *
+ * Read-only property for DRM_COLOROP_3D_LUT interpolation
+ */
+ struct drm_property *lut3d_interpolation_property;
+
+ /**
+ * @data_property:
+ *
+ * blob property for any TYPE that requires a blob of data,
+ * such as 1DLUT, CTM, 3DLUT, etc.
+ *
+ * The way this blob is interpreted depends on the TYPE of
+ * this
+ */
+ struct drm_property *data_property;
+
+ /**
+ * @next_property:
+ *
+ * Read-only property to next colorop in the pipeline
+ */
+ struct drm_property *next_property;
+
+};
+
+#define obj_to_colorop(x) container_of(x, struct drm_colorop, base)
+
+/**
+ * drm_colorop_find - look up a Colorop object from its ID
+ * @dev: DRM device
+ * @file_priv: drm file to check for lease against.
+ * @id: &drm_mode_object ID
+ *
+ * This can be used to look up a Colorop from its userspace ID. Only used by
+ * drivers for legacy IOCTLs and interface, nowadays extensions to the KMS
+ * userspace interface should be done using &drm_property.
+ */
+static inline struct drm_colorop *drm_colorop_find(struct drm_device *dev,
+ struct drm_file *file_priv,
+ uint32_t id)
+{
+ struct drm_mode_object *mo;
+
+ mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_COLOROP);
+ return mo ? obj_to_colorop(mo) : NULL;
+}
+
+void drm_colorop_pipeline_destroy(struct drm_device *dev);
+void drm_colorop_cleanup(struct drm_colorop *colorop);
+
+int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, u64 supported_tfs, uint32_t flags);
+int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, uint32_t lut_size,
+ enum drm_colorop_lut1d_interpolation_type interpolation,
+ uint32_t flags);
+int drm_plane_colorop_ctm_3x4_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, uint32_t flags);
+int drm_plane_colorop_mult_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, uint32_t flags);
+int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane,
+ uint32_t lut_size,
+ enum drm_colorop_lut3d_interpolation_type interpolation,
+ uint32_t flags);
+
+struct drm_colorop_state *
+drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop);
+
+void drm_colorop_atomic_destroy_state(struct drm_colorop *colorop,
+ struct drm_colorop_state *state);
+
+/**
+ * drm_colorop_reset - reset colorop atomic state
+ * @colorop: drm colorop
+ *
+ * Resets the atomic state for @colorop by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ */
+void drm_colorop_reset(struct drm_colorop *colorop);
+
+/**
+ * drm_colorop_index - find the index of a registered colorop
+ * @colorop: colorop to find index for
+ *
+ * Given a registered colorop, return the index of that colorop within a DRM
+ * device's list of colorops.
+ */
+static inline unsigned int drm_colorop_index(const struct drm_colorop *colorop)
+{
+ return colorop->index;
+}
+
+#define drm_for_each_colorop(colorop, dev) \
+ list_for_each_entry(colorop, &(dev)->mode_config.colorop_list, head)
+
+/**
+ * drm_get_colorop_type_name - return a string for colorop type
+ * @type: colorop type to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
+const char *drm_get_colorop_type_name(enum drm_colorop_type type);
+
+/**
+ * drm_get_colorop_curve_1d_type_name - return a string for 1D curve type
+ * @type: 1d curve type to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
+const char *drm_get_colorop_curve_1d_type_name(enum drm_colorop_curve_1d_type type);
+
+const char *
+drm_get_colorop_lut1d_interpolation_name(enum drm_colorop_lut1d_interpolation_type type);
+
+const char *
+drm_get_colorop_lut3d_interpolation_name(enum drm_colorop_lut3d_interpolation_type type);
+
+void drm_colorop_set_next_property(struct drm_colorop *colorop, struct drm_colorop *next);
+
+#endif /* __DRM_COLOROP_H__ */
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index fe88d7fc6b8f..8f34f4b8183d 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -38,13 +38,17 @@ struct drm_connector_helper_funcs;
struct drm_modeset_acquire_ctx;
struct drm_device;
struct drm_crtc;
+struct drm_display_mode;
struct drm_encoder;
struct drm_panel;
struct drm_property;
struct drm_property_blob;
struct drm_printer;
struct drm_privacy_screen;
+struct drm_edid;
struct edid;
+struct hdmi_codec_daifmt;
+struct hdmi_codec_params;
struct i2c_adapter;
enum drm_connector_force {
@@ -201,6 +205,13 @@ enum drm_connector_tv_mode {
DRM_MODE_TV_MODE_SECAM,
/**
+ * @DRM_MODE_TV_MODE_MONOCHROME: Use timings appropriate to
+ * the DRM mode, including equalizing pulses for a 525-line
+ * or 625-line mode, with no pedestal or color encoding.
+ */
+ DRM_MODE_TV_MODE_MONOCHROME,
+
+ /**
* @DRM_MODE_TV_MODE_MAX: Number of analog TV output modes.
*
* Internal implementation detail; this is not uABI.
@@ -369,6 +380,32 @@ enum drm_panel_orientation {
};
/**
+ * enum drm_hdmi_broadcast_rgb - Broadcast RGB Selection for an HDMI @drm_connector
+ */
+enum drm_hdmi_broadcast_rgb {
+ /**
+ * @DRM_HDMI_BROADCAST_RGB_AUTO: The RGB range is selected
+ * automatically based on the mode.
+ */
+ DRM_HDMI_BROADCAST_RGB_AUTO,
+
+ /**
+ * @DRM_HDMI_BROADCAST_RGB_FULL: Full range RGB is forced.
+ */
+ DRM_HDMI_BROADCAST_RGB_FULL,
+
+ /**
+ * @DRM_HDMI_BROADCAST_RGB_LIMITED: Limited range RGB is forced.
+ */
+ DRM_HDMI_BROADCAST_RGB_LIMITED,
+};
+
+const char *
+drm_hdmi_connector_get_broadcast_rgb_name(enum drm_hdmi_broadcast_rgb broadcast_rgb);
+const char *
+drm_hdmi_connector_get_output_format_name(enum hdmi_colorspace fmt);
+
+/**
* struct drm_monitor_range_info - Panel's Monitor range in EDID for
* &drm_display_info
*
@@ -437,14 +474,6 @@ enum drm_privacy_screen_status {
*
* DP definitions come from the DP v2.0 spec
* HDMI definitions come from the CTA-861-H spec
- *
- * A note on YCC and RGB variants:
- *
- * Since userspace is not aware of the encoding on the wire
- * (RGB or YCbCr), drivers are free to pick the appropriate
- * variant, regardless of what userspace selects. E.g., if
- * BT2020_RGB is selected by userspace a driver will pick
- * BT2020_YCC if the encoding on the wire is YUV444 or YUV420.
*
* @DRM_MODE_COLORIMETRY_DEFAULT:
* Driver specific behavior.
@@ -771,6 +800,11 @@ struct drm_display_info {
struct drm_hdmi_info hdmi;
/**
+ * @hdr_sink_metadata: HDR Metadata Information read from sink
+ */
+ struct hdr_sink_metadata hdr_sink_metadata;
+
+ /**
* @non_desktop: Non desktop display (HMD).
*/
bool non_desktop;
@@ -814,7 +848,9 @@ struct drm_display_info {
int vics_len;
/**
- * @quirks: EDID based quirks. Internal to EDID parsing.
+ * @quirks: EDID based quirks. DRM core and drivers can query the
+ * @drm_edid_quirk quirks using drm_edid_has_quirk(), the rest of
+ * the quirks also tracked here are internal to EDID parsing.
*/
u32 quirks;
@@ -888,6 +924,82 @@ struct drm_tv_connector_state {
};
/**
+ * struct drm_connector_hdmi_infoframe - HDMI Infoframe container
+ */
+struct drm_connector_hdmi_infoframe {
+ /**
+ * @data: HDMI Infoframe structure
+ */
+ union hdmi_infoframe data;
+
+ /**
+ * @set: Is the content of @data valid?
+ */
+ bool set;
+};
+
+/*
+ * struct drm_connector_hdmi_state - HDMI state container
+ */
+struct drm_connector_hdmi_state {
+ /**
+ * @broadcast_rgb: Connector property to pass the
+ * Broadcast RGB selection value.
+ */
+ enum drm_hdmi_broadcast_rgb broadcast_rgb;
+
+ /**
+ * @infoframes: HDMI Infoframes matching that state
+ */
+ struct {
+ /**
+ * @avi: AVI Infoframes structure matching our
+ * state.
+ */
+ struct drm_connector_hdmi_infoframe avi;
+
+ /**
+ * @hdr_drm: DRM (Dynamic Range and Mastering)
+ * Infoframes structure matching our state.
+ */
+ struct drm_connector_hdmi_infoframe hdr_drm;
+
+ /**
+ * @spd: SPD Infoframes structure matching our
+ * state.
+ */
+ struct drm_connector_hdmi_infoframe spd;
+
+ /**
+ * @vendor: HDMI Vendor Infoframes structure
+ * matching our state.
+ */
+ struct drm_connector_hdmi_infoframe hdmi;
+ } infoframes;
+
+ /**
+ * @is_limited_range: Is the output supposed to use a limited
+ * RGB Quantization Range or not?
+ */
+ bool is_limited_range;
+
+ /**
+ * @output_bpc: Bits per color channel to output.
+ */
+ unsigned int output_bpc;
+
+ /**
+ * @output_format: Pixel format to output in.
+ */
+ enum hdmi_colorspace output_format;
+
+ /**
+ * @tmds_char_rate: TMDS Character Rate, in Hz.
+ */
+ unsigned long long tmds_char_rate;
+};
+
+/**
* struct drm_connector_state - mutable connector state
*/
struct drm_connector_state {
@@ -1031,6 +1143,156 @@ struct drm_connector_state {
* DRM blob property for HDR output metadata
*/
struct drm_property_blob *hdr_output_metadata;
+
+ /**
+ * @hdmi: HDMI-related variable and properties. Filled by
+ * @drm_atomic_helper_connector_hdmi_check().
+ */
+ struct drm_connector_hdmi_state hdmi;
+};
+
+struct drm_connector_hdmi_audio_funcs {
+ /**
+ * @startup:
+ *
+ * Called when ASoC starts an audio stream setup. The
+ * @startup() is optional.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*startup)(struct drm_connector *connector);
+
+ /**
+ * @prepare:
+ * Configures HDMI-encoder for audio stream. Can be called
+ * multiple times for each setup. Mandatory.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*prepare)(struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /**
+ * @shutdown:
+ *
+ * Shut down the audio stream. Mandatory.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ void (*shutdown)(struct drm_connector *connector);
+
+ /**
+ * @mute_stream:
+ *
+ * Mute/unmute HDMI audio stream. The @mute_stream callback is
+ * optional.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*mute_stream)(struct drm_connector *connector,
+ bool enable, int direction);
+};
+
+void drm_connector_cec_phys_addr_invalidate(struct drm_connector *connector);
+void drm_connector_cec_phys_addr_set(struct drm_connector *connector);
+
+/**
+ * struct drm_connector_cec_funcs - drm_hdmi_connector control functions
+ */
+struct drm_connector_cec_funcs {
+ /**
+ * @phys_addr_invalidate: mark CEC physical address as invalid
+ *
+ * The callback to mark CEC physical address as invalid, abstracting
+ * the operation.
+ */
+ void (*phys_addr_invalidate)(struct drm_connector *connector);
+
+ /**
+ * @phys_addr_set: set CEC physical address
+ *
+ * The callback to set CEC physical address, abstracting the operation.
+ */
+ void (*phys_addr_set)(struct drm_connector *connector, u16 addr);
+};
+
+/**
+ * struct drm_connector_hdmi_funcs - drm_hdmi_connector control functions
+ */
+struct drm_connector_hdmi_funcs {
+ /**
+ * @tmds_char_rate_valid:
+ *
+ * This callback is invoked at atomic_check time to figure out
+ * whether a particular TMDS character rate is supported by the
+ * driver.
+ *
+ * The @tmds_char_rate_valid callback is optional.
+ *
+ * Returns:
+ *
+ * Either &drm_mode_status.MODE_OK or one of the failure reasons
+ * in &enum drm_mode_status.
+ */
+ enum drm_mode_status
+ (*tmds_char_rate_valid)(const struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate);
+
+ /**
+ * @clear_infoframe:
+ *
+ * This callback is invoked through
+ * @drm_atomic_helper_connector_hdmi_update_infoframes during a
+ * commit to clear the infoframes into the hardware. It will be
+ * called multiple times, once for every disabled infoframe
+ * type.
+ *
+ * The @clear_infoframe callback is optional.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*clear_infoframe)(struct drm_connector *connector,
+ enum hdmi_infoframe_type type);
+
+ /**
+ * @write_infoframe:
+ *
+ * This callback is invoked through
+ * @drm_atomic_helper_connector_hdmi_update_infoframes during a
+ * commit to program the infoframes into the hardware. It will
+ * be called multiple times, once for every updated infoframe
+ * type.
+ *
+ * The @write_infoframe callback is mandatory.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*write_infoframe)(struct drm_connector *connector,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len);
+
+ /**
+ * @read_edid:
+ *
+ * This callback is used by the framework as a replacement for reading
+ * the EDID from connector->ddc. It is still recommended to provide
+ * connector->ddc instead of implementing this callback. Returned EDID
+ * should be freed via the drm_edid_free().
+ *
+ * The @read_edid callback is optional.
+ *
+ * Returns:
+ * Valid EDID on success, NULL in case of failure.
+ */
+ const struct drm_edid *(*read_edid)(struct drm_connector *connector);
};
/**
@@ -1494,6 +1756,133 @@ struct drm_cmdline_mode {
};
/**
+ * struct drm_connector_hdmi_audio - DRM gemeric HDMI Codec-related structure
+ *
+ * HDMI drivers usually incorporate a HDMI Codec. This structure expresses the
+ * generic HDMI Codec as used by the DRM HDMI Codec framework.
+ */
+struct drm_connector_hdmi_audio {
+ /**
+ * @funcs:
+ *
+ * Implementation of the HDMI codec functionality to be used by the DRM
+ * HDMI Codec framework.
+ */
+ const struct drm_connector_hdmi_audio_funcs *funcs;
+
+ /**
+ * @codec_pdev:
+ *
+ * Platform device created to hold the HDMI Codec. It will be
+ * automatically unregistered during drm_connector_cleanup().
+ */
+ struct platform_device *codec_pdev;
+
+ /**
+ * @lock:
+ *
+ * Mutex to protect @last_state, @plugged_cb and @plugged_cb_dev.
+ */
+ struct mutex lock;
+
+ /**
+ * @plugged_cb:
+ *
+ * Callback to be called when the HDMI sink get plugged to or unplugged
+ * from this connector. This is assigned by the framework when
+ * requested by the ASoC code.
+ */
+ void (*plugged_cb)(struct device *dev, bool plugged);
+
+ /**
+ * @plugged_cb_dev:
+ *
+ * The data for @plugged_cb(). It is being provided by the ASoC.
+ */
+ struct device *plugged_cb_dev;
+
+ /**
+ * @last_state:
+ *
+ * Last plugged state recored by the framework. It is used to correctly
+ * report the state to @plugged_cb().
+ */
+ bool last_state;
+
+ /**
+ * @dai_port:
+ *
+ * The port in DT that is used for the Codec DAI.
+ */
+ int dai_port;
+};
+
+/*
+ * struct drm_connector_hdmi - DRM Connector HDMI-related structure
+ */
+struct drm_connector_hdmi {
+#define DRM_CONNECTOR_HDMI_VENDOR_LEN 8
+ /**
+ * @vendor: HDMI Controller Vendor Name
+ */
+ unsigned char vendor[DRM_CONNECTOR_HDMI_VENDOR_LEN] __nonstring;
+
+#define DRM_CONNECTOR_HDMI_PRODUCT_LEN 16
+ /**
+ * @product: HDMI Controller Product Name
+ */
+ unsigned char product[DRM_CONNECTOR_HDMI_PRODUCT_LEN] __nonstring;
+
+ /**
+ * @supported_formats: Bitmask of @hdmi_colorspace
+ * supported by the controller.
+ */
+ unsigned long supported_formats;
+
+ /**
+ * @funcs: HDMI connector Control Functions
+ */
+ const struct drm_connector_hdmi_funcs *funcs;
+
+ /**
+ * @infoframes: Current Infoframes output by the connector
+ */
+ struct {
+ /**
+ * @lock: Mutex protecting against concurrent access to
+ * the infoframes, most notably between KMS and ALSA.
+ */
+ struct mutex lock;
+
+ /**
+ * @audio: Current Audio Infoframes structure. Protected
+ * by @lock.
+ */
+ struct drm_connector_hdmi_infoframe audio;
+ } infoframes;
+};
+
+/**
+ * struct drm_connector_cec - DRM Connector CEC-related structure
+ */
+struct drm_connector_cec {
+ /**
+ * @mutex: protects all fields in this structure.
+ */
+ struct mutex mutex;
+
+ /**
+ * @funcs: CEC Control Functions
+ */
+ const struct drm_connector_cec_funcs *funcs;
+
+ /**
+ * @data: CEC implementation-specific data
+ */
+ void *data;
+};
+
+/**
* struct drm_connector - central DRM connector control structure
*
* Each connector may be connected to one or more CRTCs, or may be clonable by
@@ -1636,8 +2025,12 @@ struct drm_connector {
/**
* @edid_blob_ptr: DRM property containing EDID if present. Protected by
- * &drm_mode_config.mutex. This should be updated only by calling
+ * &drm_mode_config.mutex.
+ *
+ * This must be updated only by calling drm_edid_connector_update() or
* drm_connector_update_edid_property().
+ *
+ * This must not be used by drivers directly.
*/
struct drm_property_blob *edid_blob_ptr;
@@ -1676,6 +2069,11 @@ struct drm_connector {
struct drm_property_blob *path_blob_ptr;
/**
+ * @max_bpc: Maximum bits per color channel the connector supports.
+ */
+ unsigned int max_bpc;
+
+ /**
* @max_bpc_property: Default connector property for the max bpc to be
* driven out of the connector.
*/
@@ -1699,6 +2097,12 @@ struct drm_connector {
*/
struct drm_property *privacy_screen_hw_state_property;
+ /**
+ * @broadcast_rgb_property: Connector property to set the
+ * Broadcast RGB selection to output with.
+ */
+ struct drm_property *broadcast_rgb_property;
+
#define DRM_CONNECTOR_POLL_HPD (1 << 0)
#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
@@ -1774,8 +2178,11 @@ struct drm_connector {
struct drm_encoder *encoder;
#define MAX_ELD_BYTES 128
- /** @eld: EDID-like data, if present */
+ /** @eld: EDID-like data, if present, protected by @eld_mutex */
uint8_t eld[MAX_ELD_BYTES];
+ /** @eld_mutex: protection for concurrenct access to @eld */
+ struct mutex eld_mutex;
+
/** @latency_present: AV delay info from ELD, if found */
bool latency_present[2];
/**
@@ -1884,8 +2291,20 @@ struct drm_connector {
*/
struct llist_node free_node;
- /** @hdr_sink_metadata: HDR Metadata Information read from sink */
- struct hdr_sink_metadata hdr_sink_metadata;
+ /**
+ * @hdmi: HDMI-related variable and properties.
+ */
+ struct drm_connector_hdmi hdmi;
+
+ /**
+ * @hdmi_audio: HDMI codec properties and non-DRM state.
+ */
+ struct drm_connector_hdmi_audio hdmi_audio;
+
+ /**
+ * @cec: CEC-related data.
+ */
+ struct drm_connector_cec cec;
};
#define obj_to_connector(x) container_of(x, struct drm_connector, base)
@@ -1894,6 +2313,11 @@ int drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type);
+int drm_connector_dynamic_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc);
int drm_connector_init_with_ddc(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
@@ -1904,8 +2328,18 @@ int drmm_connector_init(struct drm_device *dev,
const struct drm_connector_funcs *funcs,
int connector_type,
struct i2c_adapter *ddc);
+int drmm_connector_hdmi_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const char *vendor, const char *product,
+ const struct drm_connector_funcs *funcs,
+ const struct drm_connector_hdmi_funcs *hdmi_funcs,
+ int connector_type,
+ struct i2c_adapter *ddc,
+ unsigned long supported_formats,
+ unsigned int max_bpc);
void drm_connector_attach_edid_property(struct drm_connector *connector);
int drm_connector_register(struct drm_connector *connector);
+int drm_connector_dynamic_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
int drm_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
@@ -2014,6 +2448,7 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
int drm_connector_attach_vrr_capable_property(
struct drm_connector *connector);
+int drm_connector_attach_broadcast_rgb_property(struct drm_connector *connector);
int drm_connector_attach_colorspace_property(struct drm_connector *connector);
int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *connector);
bool drm_connector_atomic_hdr_metadata_equal(struct drm_connector_state *old_state,
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 8b48a1974da3..66278ffeebd6 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -186,7 +186,7 @@ struct drm_crtc_state {
* this case the driver will send the VBLANK event on its own when the
* writeback job is complete.
*/
- bool no_vblank : 1;
+ bool no_vblank;
/**
* @plane_mask: Bitmask of drm_plane_mask(plane) of planes attached to
@@ -318,6 +318,17 @@ struct drm_crtc_state {
enum drm_scaling_filter scaling_filter;
/**
+ * @sharpness_strength:
+ *
+ * Used by the user to set the sharpness intensity.
+ * The value ranges from 0-255.
+ * Default value is 0 which disable the sharpness feature.
+ * Any value greater than 0 enables sharpening with the
+ * specified strength.
+ */
+ u8 sharpness_strength;
+
+ /**
* @event:
*
* Optional pointer to a DRM event to signal upon completion of the
@@ -1089,6 +1100,12 @@ struct drm_crtc {
struct drm_property *scaling_filter_property;
/**
+ * @sharpness_strength_property: property to apply
+ * the intensity of the sharpness requested.
+ */
+ struct drm_property *sharpness_strength_property;
+
+ /**
* @state:
*
* Current atomic state for this CRTC.
@@ -1323,5 +1340,6 @@ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
unsigned int supported_filters);
-
+bool drm_crtc_in_clone_mode(struct drm_crtc_state *crtc_state);
+int drm_crtc_create_sharpness_strength_property(struct drm_crtc *crtc);
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_damage_helper.h b/include/drm/drm_damage_helper.h
index effda42cce31..a58cbcd11276 100644
--- a/include/drm/drm_damage_helper.h
+++ b/include/drm/drm_damage_helper.h
@@ -78,7 +78,7 @@ bool
drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
struct drm_rect *rect);
bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state,
- struct drm_plane_state *state,
+ const struct drm_plane_state *state,
struct drm_rect *rect);
#endif
diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h
index cf06cee4343f..ea8cba94208a 100644
--- a/include/drm/drm_debugfs.h
+++ b/include/drm/drm_debugfs.h
@@ -153,6 +153,9 @@ void drm_debugfs_add_files(struct drm_device *dev,
int drm_debugfs_gpuva_info(struct seq_file *m,
struct drm_gpuvm *gpuvm);
+
+void drm_debugfs_clients_add(struct drm_file *file);
+void drm_debugfs_clients_remove(struct drm_file *file);
#else
static inline void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
@@ -181,6 +184,14 @@ static inline int drm_debugfs_gpuva_info(struct seq_file *m,
{
return 0;
}
+
+static inline void drm_debugfs_clients_add(struct drm_file *file)
+{
+}
+
+static inline void drm_debugfs_clients_remove(struct drm_file *file)
+{
+}
#endif
#endif /* _DRM_DEBUGFS_H_ */
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 63767cf24371..5af49c5c3778 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -5,6 +5,7 @@
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/idr.h>
+#include <linux/sched.h>
#include <drm/drm_mode_config.h>
@@ -21,6 +22,28 @@ struct inode;
struct pci_dev;
struct pci_controller;
+/*
+ * Recovery methods for wedged device in order of less to more side-effects.
+ * To be used with drm_dev_wedged_event() as recovery @method. Callers can
+ * use any one, multiple (or'd) or none depending on their needs.
+ *
+ * Refer to "Device Wedging" chapter in Documentation/gpu/drm-uapi.rst for more
+ * details.
+ */
+#define DRM_WEDGE_RECOVERY_NONE BIT(0) /* optional telemetry collection */
+#define DRM_WEDGE_RECOVERY_REBIND BIT(1) /* unbind + bind driver */
+#define DRM_WEDGE_RECOVERY_BUS_RESET BIT(2) /* unbind + reset bus device + bind */
+#define DRM_WEDGE_RECOVERY_VENDOR BIT(3) /* vendor specific recovery method */
+
+/**
+ * struct drm_wedge_task_info - information about the guilty task of a wedge dev
+ */
+struct drm_wedge_task_info {
+ /** @pid: pid of the task */
+ pid_t pid;
+ /** @comm: command name of the task */
+ char comm[TASK_COMM_LEN];
+};
/**
* enum switch_power_state - power state of drm device
@@ -57,6 +80,28 @@ struct drm_device {
struct device *dev;
/**
+ * @dma_dev:
+ *
+ * Device for DMA operations. Only required if the device @dev
+ * cannot perform DMA by itself. Should be NULL otherwise. Call
+ * drm_dev_dma_dev() to get the DMA device instead of using this
+ * field directly. Call drm_dev_set_dma_dev() to set this field.
+ *
+ * DRM devices are sometimes bound to virtual devices that cannot
+ * perform DMA by themselves. Drivers should set this field to the
+ * respective DMA controller.
+ *
+ * Devices on USB and other peripheral busses also cannot perform
+ * DMA by themselves. The @dma_dev field should point the bus
+ * controller that does DMA on behalve of such a device. Required
+ * for importing buffers via dma-buf.
+ *
+ * If set, the DRM core automatically releases the reference on the
+ * device.
+ */
+ struct device *dma_dev;
+
+ /**
* @managed:
*
* Managed resources linked to the lifetime of this &drm_device as
@@ -148,16 +193,6 @@ struct drm_device {
char *unique;
/**
- * @struct_mutex:
- *
- * Lock for others (not &drm_minor.master and &drm_file.is_master)
- *
- * TODO: This lock used to be the BKL of the DRM subsystem. Move the
- * lock into i915, which is the only remaining user.
- */
- struct mutex struct_mutex;
-
- /**
* @master_mutex:
*
* Lock for &drm_minor.master and &drm_file.is_master
@@ -204,6 +239,14 @@ struct drm_device {
struct list_head clientlist;
/**
+ * @client_sysrq_list:
+ *
+ * Entry into list of devices registered for sysrq. Allows in-kernel
+ * clients on this device to handle sysrq keys.
+ */
+ struct list_head client_sysrq_list;
+
+ /**
* @vblank_disable_immediate:
*
* If true, vblank interrupt will be disabled immediately when the
@@ -213,8 +256,9 @@ struct drm_device {
* This can be set to true it the hardware has a working vblank counter
* with high-precision timestamping (otherwise there are races) and the
* driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off()
- * appropriately. See also @max_vblank_count and
- * &drm_crtc_funcs.get_vblank_counter.
+ * appropriately. Also, see @max_vblank_count,
+ * &drm_crtc_funcs.get_vblank_counter and
+ * &drm_vblank_crtc_config.disable_immediate.
*/
bool vblank_disable_immediate;
@@ -318,4 +362,23 @@ struct drm_device {
struct dentry *debugfs_root;
};
+void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev);
+
+/**
+ * drm_dev_dma_dev - returns the DMA device for a DRM device
+ * @dev: DRM device
+ *
+ * Returns the DMA device of the given DRM device. By default, this
+ * the DRM device's parent. See drm_dev_set_dma_dev().
+ *
+ * Returns:
+ * A DMA-capable device for the DRM device.
+ */
+static inline struct device *drm_dev_dma_dev(struct drm_device *dev)
+{
+ if (dev->dma_dev)
+ return dev->dma_dev;
+ return dev->dev;
+}
+
#endif
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 8878260d7529..42fc085f986d 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -34,6 +34,9 @@
#include <drm/drm_device.h>
+struct dmem_cgroup_region;
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
struct drm_file;
struct drm_gem_object;
struct drm_master;
@@ -229,34 +232,6 @@ struct drm_driver {
void (*postclose) (struct drm_device *, struct drm_file *);
/**
- * @lastclose:
- *
- * Called when the last &struct drm_file has been closed and there's
- * currently no userspace client for the &struct drm_device.
- *
- * Modern drivers should only use this to force-restore the fbdev
- * framebuffer using drm_fb_helper_restore_fbdev_mode_unlocked().
- * Anything else would indicate there's something seriously wrong.
- * Modern drivers can also use this to execute delayed power switching
- * state changes, e.g. in conjunction with the :ref:`vga_switcheroo`
- * infrastructure.
- *
- * This is called after @postclose hook has been called.
- *
- * NOTE:
- *
- * All legacy drivers use this callback to de-initialize the hardware.
- * This is purely because of the shadow-attach model, where the DRM
- * kernel driver does not really own the hardware. Instead ownershipe is
- * handled with the help of userspace through an inheritedly racy dance
- * to set/unset the VT into raw mode.
- *
- * Legacy drivers initialize the hardware in the @firstopen callback,
- * which isn't even called for modern drivers.
- */
- void (*lastclose) (struct drm_device *);
-
- /**
* @unload:
*
* Reverse the effects of the driver load callback. Ideally,
@@ -395,6 +370,22 @@ struct drm_driver {
uint64_t *offset);
/**
+ * @fbdev_probe:
+ *
+ * Allocates and initialize the fb_info structure for fbdev emulation.
+ * Furthermore it also needs to allocate the DRM framebuffer used to
+ * back the fbdev.
+ *
+ * This callback is mandatory for fbdev support.
+ *
+ * Returns:
+ *
+ * 0 on success ot a negative error code otherwise.
+ */
+ int (*fbdev_probe)(struct drm_fb_helper *fbdev_helper,
+ struct drm_fb_helper_surface_size *sizes);
+
+ /**
* @show_fdinfo:
*
* Print device specific fdinfo. See Documentation/gpu/drm-usage-stats.rst.
@@ -411,8 +402,6 @@ struct drm_driver {
char *name;
/** @desc: driver description */
char *desc;
- /** @date: driver date */
- char *date;
/**
* @driver_features:
@@ -448,6 +437,10 @@ void *__devm_drm_dev_alloc(struct device *parent,
const struct drm_driver *driver,
size_t size, size_t offset);
+struct dmem_cgroup_region *
+drmm_cgroup_register_region(struct drm_device *dev,
+ const char *region_name, u64 size);
+
/**
* devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance
* @parent: Parent device object
@@ -480,6 +473,11 @@ void *__devm_drm_dev_alloc(struct device *parent,
struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
struct device *parent);
+
+void *__drm_dev_alloc(struct device *parent,
+ const struct drm_driver *driver,
+ size_t size, size_t offset);
+
int drm_dev_register(struct drm_device *dev, unsigned long flags);
void drm_dev_unregister(struct drm_device *dev);
@@ -489,6 +487,8 @@ void drm_put_dev(struct drm_device *dev);
bool drm_dev_enter(struct drm_device *dev, int *idx);
void drm_dev_exit(int idx);
void drm_dev_unplug(struct drm_device *dev);
+int drm_dev_wedged_event(struct drm_device *dev, unsigned long method,
+ struct drm_wedge_task_info *info);
/**
* drm_dev_is_unplugged - is a DRM device unplugged
@@ -572,9 +572,24 @@ static inline bool drm_firmware_drivers_only(void)
}
#if defined(CONFIG_DEBUG_FS)
-void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root);
+void drm_debugfs_dev_init(struct drm_device *dev);
+void drm_debugfs_init_root(void);
+void drm_debugfs_remove_root(void);
+void drm_debugfs_bridge_params(void);
#else
-static inline void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root)
+static inline void drm_debugfs_dev_init(struct drm_device *dev)
+{
+}
+
+static inline void drm_debugfs_init_root(void)
+{
+}
+
+static inline void drm_debugfs_remove_root(void)
+{
+}
+
+static inline void drm_debugfs_bridge_params(void)
{
}
#endif
diff --git a/include/drm/drm_dumb_buffers.h b/include/drm/drm_dumb_buffers.h
new file mode 100644
index 000000000000..1f3a8236fb3d
--- /dev/null
+++ b/include/drm/drm_dumb_buffers.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef __DRM_DUMB_BUFFERS_H__
+#define __DRM_DUMB_BUFFERS_H__
+
+struct drm_device;
+struct drm_mode_create_dumb;
+
+int drm_mode_size_dumb(struct drm_device *dev,
+ struct drm_mode_create_dumb *args,
+ unsigned long hw_pitch_align,
+ unsigned long hw_size_align);
+
+#endif
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index b085525e53e2..04f7a7f1f108 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -109,6 +109,13 @@ struct detailed_data_string {
#define DRM_EDID_CVT_FLAGS_STANDARD_BLANKING (1 << 3)
#define DRM_EDID_CVT_FLAGS_REDUCED_BLANKING (1 << 4)
+enum drm_edid_quirk {
+ /* Do a dummy read before DPCD accesses, to prevent corruption. */
+ DRM_EDID_QUIRK_DP_DPCD_PROBE,
+
+ DRM_EDID_QUIRK_NUM,
+};
+
struct detailed_data_monitor_range {
u8 min_vfreq;
u8 max_vfreq;
@@ -333,6 +340,12 @@ struct drm_edid_ident {
const char *name;
};
+#define DRM_EDID_IDENT_INIT(_vend_chr_0, _vend_chr_1, _vend_chr_2, _product_id, _name) \
+{ \
+ .panel_id = drm_edid_encode_panel_id(_vend_chr_0, _vend_chr_1, _vend_chr_2, _product_id), \
+ .name = _name, \
+}
+
#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
/* Short Audio Descriptor */
@@ -423,10 +436,6 @@ static inline void drm_edid_decode_panel_id(u32 panel_id, char vend[4], u16 *pro
}
bool drm_probe_ddc(struct i2c_adapter *adapter);
-struct edid *drm_do_get_edid(struct drm_connector *connector,
- int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
- size_t len),
- void *data);
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
@@ -441,11 +450,9 @@ bool drm_detect_monitor_audio(const struct edid *edid);
enum hdmi_quantization_range
drm_default_rgb_quant_range(const struct drm_display_mode *mode);
int drm_add_modes_noedid(struct drm_connector *connector,
- int hdisplay, int vdisplay);
+ unsigned int hdisplay, unsigned int vdisplay);
int drm_edid_header_is_valid(const void *edid);
-bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
- bool *edid_corrupt);
bool drm_edid_is_valid(struct edid *edid);
void drm_edid_get_monitor_name(const struct edid *edid, char *name,
int buflen);
@@ -482,5 +489,6 @@ void drm_edid_print_product_id(struct drm_printer *p,
u32 drm_edid_get_panel_id(const struct drm_edid *drm_edid);
bool drm_edid_match(const struct drm_edid *drm_edid,
const struct drm_edid_ident *ident);
+bool drm_edid_has_quirk(struct drm_connector *connector, enum drm_edid_quirk quirk);
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
deleted file mode 100644
index 49172166a164..000000000000
--- a/include/drm/drm_encoder_slave.h
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright (C) 2009 Francisco Jerez.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __DRM_ENCODER_SLAVE_H__
-#define __DRM_ENCODER_SLAVE_H__
-
-#include <linux/i2c.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_encoder.h>
-
-/**
- * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver
- *
- * Most of its members are analogous to the function pointers in
- * &drm_encoder_helper_funcs and they can optionally be used to
- * initialize the latter. Connector-like methods (e.g. @get_modes and
- * @set_property) will typically be wrapped around and only be called
- * if the encoder is the currently selected one for the connector.
- */
-struct drm_encoder_slave_funcs {
- /**
- * @set_config: Initialize any encoder-specific modesetting parameters.
- * The meaning of the @params parameter is implementation dependent. It
- * will usually be a structure with DVO port data format settings or
- * timings. It's not required for the new parameters to take effect
- * until the next mode is set.
- */
- void (*set_config)(struct drm_encoder *encoder,
- void *params);
-
- /**
- * @destroy: Analogous to &drm_encoder_funcs @destroy callback.
- */
- void (*destroy)(struct drm_encoder *encoder);
-
- /**
- * @dpms: Analogous to &drm_encoder_helper_funcs @dpms callback. Wrapped
- * by drm_i2c_encoder_dpms().
- */
- void (*dpms)(struct drm_encoder *encoder, int mode);
-
- /**
- * @save: Save state. Wrapped by drm_i2c_encoder_save().
- */
- void (*save)(struct drm_encoder *encoder);
-
- /**
- * @restore: Restore state. Wrapped by drm_i2c_encoder_restore().
- */
- void (*restore)(struct drm_encoder *encoder);
-
- /**
- * @mode_fixup: Analogous to &drm_encoder_helper_funcs @mode_fixup
- * callback. Wrapped by drm_i2c_encoder_mode_fixup().
- */
- bool (*mode_fixup)(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
- /**
- * @mode_valid: Analogous to &drm_encoder_helper_funcs @mode_valid.
- */
- int (*mode_valid)(struct drm_encoder *encoder,
- struct drm_display_mode *mode);
- /**
- * @mode_set: Analogous to &drm_encoder_helper_funcs @mode_set
- * callback. Wrapped by drm_i2c_encoder_mode_set().
- */
- void (*mode_set)(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
- /**
- * @detect: Analogous to &drm_encoder_helper_funcs @detect
- * callback. Wrapped by drm_i2c_encoder_detect().
- */
- enum drm_connector_status (*detect)(struct drm_encoder *encoder,
- struct drm_connector *connector);
- /**
- * @get_modes: Get modes.
- */
- int (*get_modes)(struct drm_encoder *encoder,
- struct drm_connector *connector);
- /**
- * @create_resources: Create resources.
- */
- int (*create_resources)(struct drm_encoder *encoder,
- struct drm_connector *connector);
- /**
- * @set_property: Set property.
- */
- int (*set_property)(struct drm_encoder *encoder,
- struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val);
-};
-
-/**
- * struct drm_encoder_slave - Slave encoder struct
- *
- * A &drm_encoder_slave has two sets of callbacks, @slave_funcs and the
- * ones in @base. The former are never actually called by the common
- * CRTC code, it's just a convenience for splitting the encoder
- * functions in an upper, GPU-specific layer and a (hopefully)
- * GPU-agnostic lower layer: It's the GPU driver responsibility to
- * call the slave methods when appropriate.
- *
- * drm_i2c_encoder_init() provides a way to get an implementation of
- * this.
- */
-struct drm_encoder_slave {
- /**
- * @base: DRM encoder object.
- */
- struct drm_encoder base;
-
- /**
- * @slave_funcs: Slave encoder callbacks.
- */
- const struct drm_encoder_slave_funcs *slave_funcs;
-
- /**
- * @slave_priv: Slave encoder private data.
- */
- void *slave_priv;
-
- /**
- * @bus_priv: Bus specific data.
- */
- void *bus_priv;
-};
-#define to_encoder_slave(x) container_of((x), struct drm_encoder_slave, base)
-
-int drm_i2c_encoder_init(struct drm_device *dev,
- struct drm_encoder_slave *encoder,
- struct i2c_adapter *adap,
- const struct i2c_board_info *info);
-
-
-/**
- * struct drm_i2c_encoder_driver
- *
- * Describes a device driver for an encoder connected to the GPU through an I2C
- * bus.
- */
-struct drm_i2c_encoder_driver {
- /**
- * @i2c_driver: I2C device driver description.
- */
- struct i2c_driver i2c_driver;
-
- /**
- * @encoder_init: Callback to allocate any per-encoder data structures
- * and to initialize the @slave_funcs and (optionally) @slave_priv
- * members of @encoder.
- */
- int (*encoder_init)(struct i2c_client *client,
- struct drm_device *dev,
- struct drm_encoder_slave *encoder);
-
-};
-#define to_drm_i2c_encoder_driver(x) container_of((x), \
- struct drm_i2c_encoder_driver, \
- i2c_driver)
-
-/**
- * drm_i2c_encoder_get_client - Get the I2C client corresponding to an encoder
- * @encoder: The encoder
- */
-static inline struct i2c_client *drm_i2c_encoder_get_client(struct drm_encoder *encoder)
-{
- return (struct i2c_client *)to_encoder_slave(encoder)->bus_priv;
-}
-
-/**
- * drm_i2c_encoder_register - Register an I2C encoder driver
- * @owner: Module containing the driver.
- * @driver: Driver to be registered.
- */
-static inline int drm_i2c_encoder_register(struct module *owner,
- struct drm_i2c_encoder_driver *driver)
-{
- return i2c_register_driver(owner, &driver->i2c_driver);
-}
-
-/**
- * drm_i2c_encoder_unregister - Unregister an I2C encoder driver
- * @driver: Driver to be unregistered.
- */
-static inline void drm_i2c_encoder_unregister(struct drm_i2c_encoder_driver *driver)
-{
- i2c_del_driver(&driver->i2c_driver);
-}
-
-void drm_i2c_encoder_destroy(struct drm_encoder *encoder);
-
-
-/*
- * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
- */
-
-void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode);
-bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-void drm_i2c_encoder_prepare(struct drm_encoder *encoder);
-void drm_i2c_encoder_commit(struct drm_encoder *encoder);
-void drm_i2c_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder,
- struct drm_connector *connector);
-void drm_i2c_encoder_save(struct drm_encoder *encoder);
-void drm_i2c_encoder_restore(struct drm_encoder *encoder);
-
-
-#endif
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 375737fd6c36..dd9a18f8de5a 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -70,23 +70,6 @@ struct drm_fb_helper_surface_size {
*/
struct drm_fb_helper_funcs {
/**
- * @fb_probe:
- *
- * Driver callback to allocate and initialize the fbdev info structure.
- * Furthermore it also needs to allocate the DRM framebuffer used to
- * back the fbdev.
- *
- * This callback is mandatory.
- *
- * RETURNS:
- *
- * The driver should return 0 on success and a negative error code on
- * failure.
- */
- int (*fb_probe)(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes);
-
- /**
* @fb_dirty:
*
* Driver callback to update the framebuffer memory. If set, fbdev
@@ -99,6 +82,33 @@ struct drm_fb_helper_funcs {
* 0 on success, or an error code otherwise.
*/
int (*fb_dirty)(struct drm_fb_helper *helper, struct drm_clip_rect *clip);
+
+ /**
+ * @fb_restore:
+ *
+ * Driver callback to restore internal fbdev state. If set, fbdev
+ * emulation will invoke this callback after restoring the display
+ * mode.
+ *
+ * Only for i915. Do not use in new code.
+ *
+ * TODO: Fix i915 to not require this callback.
+ */
+ void (*fb_restore)(struct drm_fb_helper *helper);
+
+ /**
+ * @fb_set_suspend:
+ *
+ * Driver callback to suspend or resume, if set, fbdev emulation will
+ * invoke this callback during suspend and resume. Driver should call
+ * fb_set_suspend() from their implementation. If not set, fbdev
+ * emulation will invoke fb_set_suspend() directly.
+ *
+ * Only for i915. Do not use in new code.
+ *
+ * TODO: Fix i915 to not require this callback.
+ */
+ void (*fb_set_suspend)(struct drm_fb_helper *helper, bool suspend);
};
/**
@@ -244,10 +254,9 @@ int drm_fb_helper_set_par(struct fb_info *info);
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
-int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
+int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
+ bool force);
-struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper);
-void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper);
void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper);
void drm_fb_helper_fill_info(struct fb_info *info,
struct drm_fb_helper *fb_helper,
@@ -256,7 +265,9 @@ void drm_fb_helper_fill_info(struct fb_info *info,
void drm_fb_helper_damage_range(struct fb_info *info, off_t off, size_t len);
void drm_fb_helper_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u32 height);
+#ifdef CONFIG_FB_DEFERRED_IO
void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist);
+#endif
void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend);
void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
@@ -271,9 +282,6 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
-
-void drm_fb_helper_lastclose(struct drm_device *dev);
-void drm_fb_helper_output_poll_changed(struct drm_device *dev);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
@@ -330,16 +338,6 @@ drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
return 0;
}
-static inline struct fb_info *
-drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
-{
- return NULL;
-}
-
-static inline void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper)
-{
-}
-
static inline void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper)
{
}
@@ -363,10 +361,12 @@ static inline int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
return 0;
}
+#ifdef CONFIG_FB_DEFERRED_IO
static inline void drm_fb_helper_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
}
+#endif
static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
bool suspend)
@@ -397,14 +397,6 @@ static inline int drm_fb_helper_debug_leave(struct fb_info *info)
{
return 0;
}
-
-static inline void drm_fb_helper_lastclose(struct drm_device *dev)
-{
-}
-
-static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
-{
-}
#endif
#endif
diff --git a/include/drm/drm_fbdev_dma.h b/include/drm/drm_fbdev_dma.h
index 2da7ee784133..fb3f2a9aa01a 100644
--- a/include/drm/drm_fbdev_dma.h
+++ b/include/drm/drm_fbdev_dma.h
@@ -3,13 +3,18 @@
#ifndef DRM_FBDEV_DMA_H
#define DRM_FBDEV_DMA_H
-struct drm_device;
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
#ifdef CONFIG_DRM_FBDEV_EMULATION
-void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp);
+int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes);
+
+#define DRM_FBDEV_DMA_DRIVER_OPS \
+ .fbdev_probe = drm_fbdev_dma_driver_fbdev_probe
#else
-static inline void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp)
-{ }
+#define DRM_FBDEV_DMA_DRIVER_OPS \
+ .fbdev_probe = NULL
#endif
#endif
diff --git a/include/drm/drm_fbdev_generic.h b/include/drm/drm_fbdev_generic.h
deleted file mode 100644
index 75799342098d..000000000000
--- a/include/drm/drm_fbdev_generic.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-
-#ifndef DRM_FBDEV_GENERIC_H
-#define DRM_FBDEV_GENERIC_H
-
-struct drm_device;
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp);
-#else
-static inline void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
-{ }
-#endif
-
-#endif
diff --git a/include/drm/drm_fbdev_shmem.h b/include/drm/drm_fbdev_shmem.h
new file mode 100644
index 000000000000..2fc708964d75
--- /dev/null
+++ b/include/drm/drm_fbdev_shmem.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_FBDEV_SHMEM_H
+#define DRM_FBDEV_SHMEM_H
+
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes);
+
+#define DRM_FBDEV_SHMEM_DRIVER_OPS \
+ .fbdev_probe = drm_fbdev_shmem_driver_fbdev_probe
+#else
+#define DRM_FBDEV_SHMEM_DRIVER_OPS \
+ .fbdev_probe = NULL
+#endif
+
+#endif
diff --git a/include/drm/drm_fbdev_ttm.h b/include/drm/drm_fbdev_ttm.h
new file mode 100644
index 000000000000..ad4a10bb4c78
--- /dev/null
+++ b/include/drm/drm_fbdev_ttm.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_FBDEV_TTM_H
+#define DRM_FBDEV_TTM_H
+
+#include <linux/stddef.h>
+
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes);
+
+#define DRM_FBDEV_TTM_DRIVER_OPS \
+ .fbdev_probe = drm_fbdev_ttm_driver_fbdev_probe
+#else
+#define DRM_FBDEV_TTM_DRIVER_OPS \
+ .fbdev_probe = NULL
+#endif
+
+#endif
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index ab230d3af138..1a3018e4a537 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -45,6 +45,8 @@ struct drm_printer;
struct device;
struct file;
+extern struct xarray drm_minors_xa;
+
/*
* FIXME: Not sure we want to have drm_minor here in the end, but to avoid
* header include loops we need it here for now.
@@ -205,6 +207,13 @@ struct drm_file {
bool writeback_connectors;
/**
+ * @plane_color_pipeline:
+ *
+ * True if client understands plane color pipelines
+ */
+ bool plane_color_pipeline;
+
+ /**
* @was_master:
*
* This client has or had, master capability. Protected by struct
@@ -298,6 +307,9 @@ struct drm_file {
*
* Mapping of mm object handles to object pointers. Used by the GEM
* subsystem. Protected by @table_lock.
+ *
+ * Note that allocated entries might be NULL as a transient state when
+ * creating or deleting a handle.
*/
struct idr object_idr;
@@ -386,6 +398,25 @@ struct drm_file {
* Per-file buffer caches used by the PRIME buffer sharing code.
*/
struct drm_prime_file_private prime;
+
+ /**
+ * @client_name:
+ *
+ * Userspace-provided name; useful for accounting and debugging.
+ */
+ const char *client_name;
+
+ /**
+ * @client_name_lock: Protects @client_name.
+ */
+ struct mutex client_name_lock;
+
+ /**
+ * @debugfs_client:
+ *
+ * debugfs directory for each client under a drm node.
+ */
+ struct dentry *debugfs_client;
};
/**
@@ -432,8 +463,14 @@ static inline bool drm_is_accel_client(const struct drm_file *file_priv)
return file_priv->minor->type == DRM_MINOR_ACCEL;
}
+__printf(2, 3)
+void drm_file_err(struct drm_file *file_priv, const char *fmt, ...);
+
void drm_file_update_pid(struct drm_file *);
+struct drm_minor *drm_minor_acquire(struct xarray *minors_xa, unsigned int minor_id);
+void drm_minor_release(struct drm_minor *minor);
+
int drm_open(struct inode *inode, struct file *filp);
int drm_open_helper(struct file *filp, struct drm_minor *minor);
ssize_t drm_read(struct file *filp, char __user *buffer,
@@ -477,6 +514,12 @@ struct drm_memory_stats {
enum drm_gem_object_status;
+int drm_memory_stats_is_zero(const struct drm_memory_stats *stats);
+void drm_fdinfo_print_size(struct drm_printer *p,
+ const char *prefix,
+ const char *stat,
+ const char *region,
+ u64 sz);
void drm_print_memory_stats(struct drm_printer *p,
const struct drm_memory_stats *stats,
enum drm_gem_object_status supported_status,
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index 81572d32db0c..33de514a5221 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -25,8 +25,9 @@
#ifndef DRM_FIXED_H
#define DRM_FIXED_H
-#include <linux/kernel.h>
#include <linux/math64.h>
+#include <linux/types.h>
+#include <linux/wordpart.h>
typedef union dfixed {
u32 full;
@@ -77,6 +78,23 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
#define DRM_FIXED_EPSILON 1LL
#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON)
+/**
+ * @drm_sm2fixp
+ *
+ * Convert a 1.31.32 signed-magnitude fixed point to 32.32
+ * 2s-complement fixed point
+ *
+ * @return s64 2s-complement fixed point
+ */
+static inline s64 drm_sm2fixp(__u64 a)
+{
+ if ((a & (1LL << 63))) {
+ return -(a & 0x7fffffffffffffffll);
+ } else {
+ return a;
+ }
+}
+
static inline s64 drm_int2fixp(int a)
{
return ((s64)a) << DRM_FIXED_POINT;
@@ -214,4 +232,27 @@ static inline s64 drm_fixp_exp(s64 x)
return sum;
}
+static inline int fxp_q4_from_int(int val_int)
+{
+ return val_int << 4;
+}
+
+static inline int fxp_q4_to_int(int val_q4)
+{
+ return val_q4 >> 4;
+}
+
+static inline int fxp_q4_to_int_roundup(int val_q4)
+{
+ return (val_q4 + 0xf) >> 4;
+}
+
+static inline int fxp_q4_to_frac(int val_q4)
+{
+ return val_q4 & 0xf;
+}
+
+#define FXP_Q4_FMT "%d.%04d"
+#define FXP_Q4_ARGS(val_q4) fxp_q4_to_int(val_q4), (fxp_q4_to_frac(val_q4) * 625)
+
#endif
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index 428d81afe215..2b5c1aef80b0 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -82,8 +82,10 @@ void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pi
const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, struct drm_format_conv_state *state,
- bool swab);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_rgb565be(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
@@ -96,9 +98,21 @@ void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_
void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_bgrx8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip,
@@ -110,17 +124,16 @@ void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *d
void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
-
-int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
- const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_argb8888_to_argb4444(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
-size_t drm_fb_build_fourcc_list(struct drm_device *dev,
- const u32 *native_fourccs, size_t native_nfourccs,
- u32 *fourccs_out, size_t nfourccs_out);
+void drm_fb_xrgb8888_to_gray2(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
#endif /* __LINUX_DRM_FORMAT_HELPER_H */
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index ccf91daa4307..471784426857 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -54,7 +54,6 @@
#endif
struct drm_device;
-struct drm_mode_fb_cmd2;
/**
* struct drm_format_info - information about a DRM format
@@ -309,10 +308,11 @@ const struct drm_format_info *__drm_format_info(u32 format);
const struct drm_format_info *drm_format_info(u32 format);
const struct drm_format_info *
drm_get_format_info(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd);
+ u32 pixel_format, u64 modifier);
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
uint32_t drm_driver_legacy_fb_format(struct drm_device *dev,
uint32_t bpp, uint32_t depth);
+uint32_t drm_driver_color_mode_format(struct drm_device *dev, unsigned int color_mode);
unsigned int drm_format_info_block_width(const struct drm_format_info *info,
int plane);
unsigned int drm_format_info_block_height(const struct drm_format_info *info,
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index 668077009fce..38b24fc8978d 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -23,6 +23,7 @@
#ifndef __DRM_FRAMEBUFFER_H__
#define __DRM_FRAMEBUFFER_H__
+#include <linux/bits.h>
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/sched.h>
@@ -100,6 +101,8 @@ struct drm_framebuffer_funcs {
unsigned num_clips);
};
+#define DRM_FRAMEBUFFER_HAS_HANDLE_REF(_i) BIT(0u + (_i))
+
/**
* struct drm_framebuffer - frame buffer object
*
@@ -189,6 +192,10 @@ struct drm_framebuffer {
*/
int flags;
/**
+ * @internal_flags: Framebuffer flags like DRM_FRAMEBUFFER_HAS_HANDLE_REF.
+ */
+ unsigned int internal_flags;
+ /**
* @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock.
*/
struct list_head filp_head;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index bae4865b2101..8d48d2af2649 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -35,6 +35,7 @@
*/
#include <linux/kref.h>
+#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -48,19 +49,21 @@ struct drm_gem_object;
* enum drm_gem_object_status - bitmask of object state for fdinfo reporting
* @DRM_GEM_OBJECT_RESIDENT: object is resident in memory (ie. not unpinned)
* @DRM_GEM_OBJECT_PURGEABLE: object marked as purgeable by userspace
+ * @DRM_GEM_OBJECT_ACTIVE: object is currently used by an active submission
*
* Bitmask of status used for fdinfo memory stats, see &drm_gem_object_funcs.status
- * and drm_show_fdinfo(). Note that an object can DRM_GEM_OBJECT_PURGEABLE if
- * it still active or not resident, in which case drm_show_fdinfo() will not
+ * and drm_show_fdinfo(). Note that an object can report DRM_GEM_OBJECT_PURGEABLE
+ * and be active or not resident, in which case drm_show_fdinfo() will not
* account for it as purgeable. So drivers do not need to check if the buffer
- * is idle and resident to return this bit. (Ie. userspace can mark a buffer
- * as purgeable even while it is still busy on the GPU.. it does not _actually_
- * become puregeable until it becomes idle. The status gem object func does
- * not need to consider this.)
+ * is idle and resident to return this bit, i.e. userspace can mark a buffer as
+ * purgeable even while it is still busy on the GPU. It will not get reported in
+ * the puregeable stats until it becomes idle. The status gem object func does
+ * not need to consider this.
*/
enum drm_gem_object_status {
DRM_GEM_OBJECT_RESIDENT = BIT(0),
DRM_GEM_OBJECT_PURGEABLE = BIT(1),
+ DRM_GEM_OBJECT_ACTIVE = BIT(2),
};
/**
@@ -123,7 +126,8 @@ struct drm_gem_object_funcs {
/**
* @pin:
*
- * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper.
+ * Pin backing buffer in memory, such that dma-buf importers can
+ * access it. Used by the drm_gem_map_attach() helper.
*
* This callback is optional.
*/
@@ -156,7 +160,8 @@ struct drm_gem_object_funcs {
* @vmap:
*
* Returns a virtual address for the buffer. Used by the
- * drm_gem_dmabuf_vmap() helper.
+ * drm_gem_dmabuf_vmap() helper. Called with a held GEM reservation
+ * lock.
*
* This callback is optional.
*/
@@ -166,7 +171,8 @@ struct drm_gem_object_funcs {
* @vunmap:
*
* Releases the address previously returned by @vmap. Used by the
- * drm_gem_dmabuf_vunmap() helper.
+ * drm_gem_dmabuf_vunmap() helper. Called with a held GEM reservation
+ * lock.
*
* This callback is optional.
*/
@@ -189,7 +195,8 @@ struct drm_gem_object_funcs {
* @evict:
*
* Evicts gem object out from memory. Used by the drm_gem_object_evict()
- * helper. Returns 0 on success, -errno otherwise.
+ * helper. Returns 0 on success, -errno otherwise. Called with a held
+ * GEM reservation lock.
*
* This callback is optional.
*/
@@ -391,19 +398,34 @@ struct drm_gem_object {
struct dma_resv _resv;
/**
- * @gpuva:
+ * @gpuva: Fields used by GPUVM to manage mappings pointing to this GEM object.
*
- * Provides the list of GPU VAs attached to this GEM object.
+ * When DRM_GPUVM_IMMEDIATE_MODE is set, this list is protected by the
+ * mutex. Otherwise, the list is protected by the GEMs &dma_resv lock.
*
- * Drivers should lock list accesses with the GEMs &dma_resv lock
- * (&drm_gem_object.resv) or a custom lock if one is provided.
+ * Note that all entries in this list must agree on whether
+ * DRM_GPUVM_IMMEDIATE_MODE is set.
*/
struct {
+ /**
+ * @gpuva.list: list of GPUVM mappings attached to this GEM object.
+ *
+ * Drivers should lock list accesses with either the GEMs
+ * &dma_resv lock (&drm_gem_object.resv) or the
+ * &drm_gem_object.gpuva.lock mutex.
+ */
struct list_head list;
-#ifdef CONFIG_LOCKDEP
- struct lockdep_map *lock_dep_map;
-#endif
+ /**
+ * @gpuva.lock: lock protecting access to &drm_gem_object.gpuva.list
+ * when DRM_GPUVM_IMMEDIATE_MODE is used.
+ *
+ * Only used when DRM_GPUVM_IMMEDIATE_MODE is set. It should be
+ * safe to take this mutex during the fence signalling path, so
+ * do not allocate memory while holding this lock. Otherwise,
+ * the &dma_resv lock should be used.
+ */
+ struct mutex lock;
} gpuva;
/**
@@ -447,7 +469,8 @@ struct drm_gem_object {
.poll = drm_poll,\
.read = drm_read,\
.llseek = noop_llseek,\
- .mmap = drm_gem_mmap
+ .mmap = drm_gem_mmap, \
+ .fop_flags = FOP_UNSIGNED_OFFSET
/**
* DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers
@@ -472,6 +495,9 @@ void drm_gem_object_release(struct drm_gem_object *obj);
void drm_gem_object_free(struct kref *kref);
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
+int drm_gem_object_init_with_mnt(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size,
+ struct vfsmount *gemfs);
void drm_gem_private_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
void drm_gem_private_object_fini(struct drm_gem_object *obj);
@@ -530,8 +556,8 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
void drm_gem_lock(struct drm_gem_object *obj);
void drm_gem_unlock(struct drm_gem_object *obj);
-int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
-void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map);
+int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
+void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
int count, struct drm_gem_object ***objs_out);
@@ -549,12 +575,14 @@ void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
void drm_gem_lru_remove(struct drm_gem_object *obj);
void drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj);
void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
-unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
- unsigned int nr_to_scan,
- unsigned long *remaining,
- bool (*shrink)(struct drm_gem_object *obj));
+unsigned long
+drm_gem_lru_scan(struct drm_gem_lru *lru,
+ unsigned int nr_to_scan,
+ unsigned long *remaining,
+ bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket),
+ struct ww_acquire_ctx *ticket);
-int drm_gem_evict(struct drm_gem_object *obj);
+int drm_gem_evict_locked(struct drm_gem_object *obj);
/**
* drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats
@@ -569,27 +597,25 @@ static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_obje
return (obj->handle_count > 1) || obj->dma_buf;
}
-#ifdef CONFIG_LOCKDEP
/**
- * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.
- * @obj: the &drm_gem_object
- * @lock: the lock used to protect the gpuva list. The locking primitive
- * must contain a dep_map field.
+ * drm_gem_is_imported() - Tests if GEM object's buffer has been imported
+ * @obj: the GEM object
*
- * Call this if you're not proctecting access to the gpuva list with the
- * dma-resv lock, but with a custom lock.
+ * Returns:
+ * True if the GEM object's buffer has been imported, false otherwise
*/
-#define drm_gem_gpuva_set_lock(obj, lock) \
- if (!WARN((obj)->gpuva.lock_dep_map, \
- "GEM GPUVA lock should be set only once.")) \
- (obj)->gpuva.lock_dep_map = &(lock)->dep_map
-#define drm_gem_gpuva_assert_lock_held(obj) \
- lockdep_assert((obj)->gpuva.lock_dep_map ? \
- lock_is_held((obj)->gpuva.lock_dep_map) : \
+static inline bool drm_gem_is_imported(const struct drm_gem_object *obj)
+{
+ return !!obj->import_attach;
+}
+
+#ifdef CONFIG_LOCKDEP
+#define drm_gem_gpuva_assert_lock_held(gpuvm, obj) \
+ lockdep_assert(drm_gpuvm_immediate_mode(gpuvm) ? \
+ lockdep_is_held(&(obj)->gpuva.lock) : \
dma_resv_held((obj)->resv))
#else
-#define drm_gem_gpuva_set_lock(obj, lock) do {} while (0)
-#define drm_gem_gpuva_assert_lock_held(obj) do {} while (0)
+#define drm_gem_gpuva_assert_lock_held(gpuvm, obj) do {} while (0)
#endif
/**
diff --git a/include/drm/drm_gem_dma_helper.h b/include/drm/drm_gem_dma_helper.h
index a827bde494f6..f2678e7ecb98 100644
--- a/include/drm/drm_gem_dma_helper.h
+++ b/include/drm/drm_gem_dma_helper.h
@@ -267,6 +267,7 @@ unsigned long drm_gem_dma_get_unmapped_area(struct file *filp,
.read = drm_read,\
.llseek = noop_llseek,\
.mmap = drm_gem_mmap,\
+ .fop_flags = FOP_UNSIGNED_OFFSET, \
DRM_GEM_DMA_UNMAPPED_AREA_FOPS \
}
diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h
index d302521f3dd4..24f1fd40d553 100644
--- a/include/drm/drm_gem_framebuffer_helper.h
+++ b/include/drm/drm_gem_framebuffer_helper.h
@@ -8,6 +8,7 @@ struct drm_afbc_framebuffer;
struct drm_device;
struct drm_fb_helper_surface_size;
struct drm_file;
+struct drm_format_info;
struct drm_framebuffer;
struct drm_framebuffer_funcs;
struct drm_gem_object;
@@ -24,17 +25,21 @@ int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
int drm_gem_fb_init_with_funcs(struct drm_device *dev,
struct drm_framebuffer *fb,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs);
struct drm_framebuffer *
drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs);
struct drm_framebuffer *
drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *
drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
@@ -47,6 +52,7 @@ void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_directi
(((modifier) & AFBC_VENDOR_AND_TYPE_MASK) == DRM_FORMAT_MOD_ARM_AFBC(0))
int drm_gem_fb_afbc_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb);
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index efbc9f27312b..589f7bfe7506 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -37,7 +37,18 @@ struct drm_gem_shmem_object {
* Reference count on the pages table.
* The pages are put when the count reaches zero.
*/
- unsigned int pages_use_count;
+ refcount_t pages_use_count;
+
+ /**
+ * @pages_pin_count:
+ *
+ * Reference count on the pinned pages table.
+ *
+ * Pages are hard-pinned and reside in memory if count
+ * greater than zero. Otherwise, when count is zero, the pages are
+ * allowed to be evicted and purged by memory shrinker.
+ */
+ refcount_t pages_pin_count;
/**
* @madv: State for madvise
@@ -71,7 +82,7 @@ struct drm_gem_shmem_object {
* Reference count on the virtual address.
* The address are un-mapped when the count reaches zero.
*/
- unsigned int vmap_use_count;
+ refcount_t vmap_use_count;
/**
* @pages_mark_dirty_on_put:
@@ -96,31 +107,36 @@ struct drm_gem_shmem_object {
#define to_drm_gem_shmem_obj(obj) \
container_of(obj, struct drm_gem_shmem_object, base)
+int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size);
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
+struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
+ size_t size,
+ struct vfsmount *gemfs);
+void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map);
-void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
- struct iosys_map *map);
+int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
+void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma);
int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv);
+int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv);
static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
{
return (shmem->madv > 0) &&
- !shmem->vmap_use_count && shmem->sgt &&
- !shmem->base.dma_buf && !shmem->base.import_attach;
+ !refcount_read(&shmem->pages_pin_count) && shmem->sgt &&
+ !shmem->base.dma_buf && !drm_gem_is_imported(&shmem->base);
}
-void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
@@ -211,12 +227,12 @@ static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_
}
/*
- * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap()
+ * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap_locked()
* @obj: GEM object
* @map: Returns the kernel virtual address of the SHMEM GEM object's backing store.
*
- * This function wraps drm_gem_shmem_vmap(). Drivers that employ the shmem helpers should
- * use it as their &drm_gem_object_funcs.vmap handler.
+ * This function wraps drm_gem_shmem_vmap_locked(). Drivers that employ the shmem
+ * helpers should use it as their &drm_gem_object_funcs.vmap handler.
*
* Returns:
* 0 on success or a negative error code on failure.
@@ -226,7 +242,7 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- return drm_gem_shmem_vmap(shmem, map);
+ return drm_gem_shmem_vmap_locked(shmem, map);
}
/*
@@ -234,15 +250,15 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
* @obj: GEM object
* @map: Kernel virtual address where the SHMEM GEM object was mapped
*
- * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should
- * use it as their &drm_gem_object_funcs.vunmap handler.
+ * This function wraps drm_gem_shmem_vunmap_locked(). Drivers that employ the shmem
+ * helpers should use it as their &drm_gem_object_funcs.vunmap handler.
*/
static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj,
struct iosys_map *map)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- drm_gem_shmem_vunmap(shmem, map);
+ drm_gem_shmem_vunmap_locked(shmem, map);
}
/**
@@ -273,15 +289,18 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt);
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
+struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
+ struct dma_buf *buf);
/**
* DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
*
- * This macro provides a shortcut for setting the shmem GEM operations in
- * the &drm_driver structure.
+ * This macro provides a shortcut for setting the shmem GEM operations
+ * in the &drm_driver structure. Drivers that do not require an s/g table
+ * for imported buffers should use this.
*/
#define DRM_GEM_SHMEM_DRIVER_OPS \
- .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
- .dumb_create = drm_gem_shmem_dumb_create
+ .gem_prime_import = drm_gem_shmem_prime_import_no_map, \
+ .dumb_create = drm_gem_shmem_dumb_create
#endif /* __DRM_GEM_SHMEM_HELPER_H__ */
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index 9a73f786f4ad..2dd42bed679d 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -17,7 +17,6 @@
struct drm_mode_create_dumb;
struct drm_plane;
struct drm_plane_state;
-struct drm_simple_display_pipe;
struct filp;
struct vm_area_struct;
@@ -95,8 +94,6 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
unsigned long pg_align);
void drm_gem_vram_put(struct drm_gem_vram_object *gbo);
s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo);
-int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag);
-int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo);
int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map);
void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo,
struct iosys_map *map);
@@ -137,18 +134,6 @@ drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
.prepare_fb = drm_gem_vram_plane_helper_prepare_fb, \
.cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb
-/*
- * Helpers for struct drm_simple_display_pipe_funcs
- */
-
-int drm_gem_vram_simple_display_pipe_prepare_fb(
- struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *new_state);
-
-void drm_gem_vram_simple_display_pipe_cleanup_fb(
- struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state);
-
/**
* define DRM_GEM_VRAM_DRIVER - default callback functions for
* &struct drm_driver
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
new file mode 100644
index 000000000000..632e100e6efb
--- /dev/null
+++ b/include/drm/drm_gpusvm.h
@@ -0,0 +1,542 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __DRM_GPUSVM_H__
+#define __DRM_GPUSVM_H__
+
+#include <linux/kref.h>
+#include <linux/interval_tree.h>
+#include <linux/mmu_notifier.h>
+
+struct dev_pagemap_ops;
+struct drm_device;
+struct drm_gpusvm;
+struct drm_gpusvm_notifier;
+struct drm_gpusvm_ops;
+struct drm_gpusvm_range;
+struct drm_pagemap;
+struct drm_pagemap_addr;
+
+/**
+ * struct drm_gpusvm_ops - Operations structure for GPU SVM
+ *
+ * This structure defines the operations for GPU Shared Virtual Memory (SVM).
+ * These operations are provided by the GPU driver to manage SVM ranges and
+ * notifiers.
+ */
+struct drm_gpusvm_ops {
+ /**
+ * @notifier_alloc: Allocate a GPU SVM notifier (optional)
+ *
+ * Allocate a GPU SVM notifier.
+ *
+ * Return: Pointer to the allocated GPU SVM notifier on success, NULL on failure.
+ */
+ struct drm_gpusvm_notifier *(*notifier_alloc)(void);
+
+ /**
+ * @notifier_free: Free a GPU SVM notifier (optional)
+ * @notifier: Pointer to the GPU SVM notifier to be freed
+ *
+ * Free a GPU SVM notifier.
+ */
+ void (*notifier_free)(struct drm_gpusvm_notifier *notifier);
+
+ /**
+ * @range_alloc: Allocate a GPU SVM range (optional)
+ * @gpusvm: Pointer to the GPU SVM
+ *
+ * Allocate a GPU SVM range.
+ *
+ * Return: Pointer to the allocated GPU SVM range on success, NULL on failure.
+ */
+ struct drm_gpusvm_range *(*range_alloc)(struct drm_gpusvm *gpusvm);
+
+ /**
+ * @range_free: Free a GPU SVM range (optional)
+ * @range: Pointer to the GPU SVM range to be freed
+ *
+ * Free a GPU SVM range.
+ */
+ void (*range_free)(struct drm_gpusvm_range *range);
+
+ /**
+ * @invalidate: Invalidate GPU SVM notifier (required)
+ * @gpusvm: Pointer to the GPU SVM
+ * @notifier: Pointer to the GPU SVM notifier
+ * @mmu_range: Pointer to the mmu_notifier_range structure
+ *
+ * Invalidate the GPU page tables. It can safely walk the notifier range
+ * RB tree/list in this function. Called while holding the notifier lock.
+ */
+ void (*invalidate)(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ const struct mmu_notifier_range *mmu_range);
+};
+
+/**
+ * struct drm_gpusvm_notifier - Structure representing a GPU SVM notifier
+ *
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: MMU interval notifier
+ * @itree: Interval tree node for the notifier (inserted in GPU SVM)
+ * @entry: List entry to fast interval tree traversal
+ * @root: Cached root node of the RB tree containing ranges
+ * @range_list: List head containing of ranges in the same order they appear in
+ * interval tree. This is useful to keep iterating ranges while
+ * doing modifications to RB tree.
+ * @flags: Flags for notifier
+ * @flags.removed: Flag indicating whether the MMU interval notifier has been
+ * removed
+ *
+ * This structure represents a GPU SVM notifier.
+ */
+struct drm_gpusvm_notifier {
+ struct drm_gpusvm *gpusvm;
+ struct mmu_interval_notifier notifier;
+ struct interval_tree_node itree;
+ struct list_head entry;
+ struct rb_root_cached root;
+ struct list_head range_list;
+ struct {
+ u32 removed : 1;
+ } flags;
+};
+
+/**
+ * struct drm_gpusvm_pages_flags - Structure representing a GPU SVM pages flags
+ *
+ * @migrate_devmem: Flag indicating whether the pages can be migrated to device memory
+ * @unmapped: Flag indicating if the pages has been unmapped
+ * @partial_unmap: Flag indicating if the pages has been partially unmapped
+ * @has_devmem_pages: Flag indicating if the pages has devmem pages
+ * @has_dma_mapping: Flag indicating if the pages has a DMA mapping
+ * @__flags: Flags for pages in u16 form (used for READ_ONCE)
+ */
+struct drm_gpusvm_pages_flags {
+ union {
+ struct {
+ /* All flags below must be set upon creation */
+ u16 migrate_devmem : 1;
+ /* All flags below must be set / cleared under notifier lock */
+ u16 unmapped : 1;
+ u16 partial_unmap : 1;
+ u16 has_devmem_pages : 1;
+ u16 has_dma_mapping : 1;
+ };
+ u16 __flags;
+ };
+};
+
+/**
+ * struct drm_gpusvm_pages - Structure representing a GPU SVM mapped pages
+ *
+ * @dma_addr: Device address array
+ * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping.
+ * Note this is assuming only one drm_pagemap per range is allowed.
+ * @notifier_seq: Notifier sequence number of the range's pages
+ * @flags: Flags for range
+ * @flags.migrate_devmem: Flag indicating whether the range can be migrated to device memory
+ * @flags.unmapped: Flag indicating if the range has been unmapped
+ * @flags.partial_unmap: Flag indicating if the range has been partially unmapped
+ * @flags.has_devmem_pages: Flag indicating if the range has devmem pages
+ * @flags.has_dma_mapping: Flag indicating if the range has a DMA mapping
+ */
+struct drm_gpusvm_pages {
+ struct drm_pagemap_addr *dma_addr;
+ struct drm_pagemap *dpagemap;
+ unsigned long notifier_seq;
+ struct drm_gpusvm_pages_flags flags;
+};
+
+/**
+ * struct drm_gpusvm_range - Structure representing a GPU SVM range
+ *
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier
+ * @refcount: Reference count for the range
+ * @itree: Interval tree node for the range (inserted in GPU SVM notifier)
+ * @entry: List entry to fast interval tree traversal
+ * @pages: The pages for this range.
+ *
+ * This structure represents a GPU SVM range used for tracking memory ranges
+ * mapped in a DRM device.
+ */
+struct drm_gpusvm_range {
+ struct drm_gpusvm *gpusvm;
+ struct drm_gpusvm_notifier *notifier;
+ struct kref refcount;
+ struct interval_tree_node itree;
+ struct list_head entry;
+ struct drm_gpusvm_pages pages;
+};
+
+/**
+ * struct drm_gpusvm - GPU SVM structure
+ *
+ * @name: Name of the GPU SVM
+ * @drm: Pointer to the DRM device structure
+ * @mm: Pointer to the mm_struct for the address space
+ * @mm_start: Start address of GPU SVM
+ * @mm_range: Range of the GPU SVM
+ * @notifier_size: Size of individual notifiers
+ * @ops: Pointer to the operations structure for GPU SVM
+ * @chunk_sizes: Pointer to the array of chunk sizes used in range allocation.
+ * Entries should be powers of 2 in descending order.
+ * @num_chunks: Number of chunks
+ * @notifier_lock: Read-write semaphore for protecting notifier operations
+ * @root: Cached root node of the Red-Black tree containing GPU SVM notifiers
+ * @notifier_list: list head containing of notifiers in the same order they
+ * appear in interval tree. This is useful to keep iterating
+ * notifiers while doing modifications to RB tree.
+ *
+ * This structure represents a GPU SVM (Shared Virtual Memory) used for tracking
+ * memory ranges mapped in a DRM (Direct Rendering Manager) device.
+ *
+ * No reference counting is provided, as this is expected to be embedded in the
+ * driver VM structure along with the struct drm_gpuvm, which handles reference
+ * counting.
+ */
+struct drm_gpusvm {
+ const char *name;
+ struct drm_device *drm;
+ struct mm_struct *mm;
+ unsigned long mm_start;
+ unsigned long mm_range;
+ unsigned long notifier_size;
+ const struct drm_gpusvm_ops *ops;
+ const unsigned long *chunk_sizes;
+ int num_chunks;
+ struct rw_semaphore notifier_lock;
+ struct rb_root_cached root;
+ struct list_head notifier_list;
+#ifdef CONFIG_LOCKDEP
+ /**
+ * @lock_dep_map: Annotates drm_gpusvm_range_find_or_insert and
+ * drm_gpusvm_range_remove with a driver provided lock.
+ */
+ struct lockdep_map *lock_dep_map;
+#endif
+};
+
+/**
+ * struct drm_gpusvm_ctx - DRM GPU SVM context
+ *
+ * @device_private_page_owner: The device-private page owner to use for
+ * this operation
+ * @check_pages_threshold: Check CPU pages for present if chunk is less than or
+ * equal to threshold. If not present, reduce chunk
+ * size.
+ * @timeslice_ms: The timeslice MS which in minimum time a piece of memory
+ * remains with either exclusive GPU or CPU access.
+ * @in_notifier: entering from a MMU notifier
+ * @read_only: operating on read-only memory
+ * @devmem_possible: possible to use device memory
+ * @devmem_only: use only device memory
+ * @allow_mixed: Allow mixed mappings in get pages. Mixing between system and
+ * single dpagemap is supported, mixing between multiple dpagemap
+ * is unsupported.
+ *
+ * Context that is DRM GPUSVM is operating in (i.e. user arguments).
+ */
+struct drm_gpusvm_ctx {
+ void *device_private_page_owner;
+ unsigned long check_pages_threshold;
+ unsigned long timeslice_ms;
+ unsigned int in_notifier :1;
+ unsigned int read_only :1;
+ unsigned int devmem_possible :1;
+ unsigned int devmem_only :1;
+ unsigned int allow_mixed :1;
+};
+
+int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
+ const char *name, struct drm_device *drm,
+ struct mm_struct *mm,
+ unsigned long mm_start, unsigned long mm_range,
+ unsigned long notifier_size,
+ const struct drm_gpusvm_ops *ops,
+ const unsigned long *chunk_sizes, int num_chunks);
+
+void drm_gpusvm_fini(struct drm_gpusvm *gpusvm);
+
+void drm_gpusvm_free(struct drm_gpusvm *gpusvm);
+
+unsigned long
+drm_gpusvm_find_vma_start(struct drm_gpusvm *gpusvm,
+ unsigned long start,
+ unsigned long end);
+
+struct drm_gpusvm_range *
+drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
+ unsigned long fault_addr,
+ unsigned long gpuva_start,
+ unsigned long gpuva_end,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range);
+
+int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range);
+
+struct drm_gpusvm_range *
+drm_gpusvm_range_get(struct drm_gpusvm_range *range);
+
+void drm_gpusvm_range_put(struct drm_gpusvm_range *range);
+
+bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range);
+
+int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx);
+
+bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
+ unsigned long end);
+
+struct drm_gpusvm_notifier *
+drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start,
+ unsigned long end);
+
+struct drm_gpusvm_range *
+drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
+ unsigned long end);
+
+void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
+ const struct mmu_notifier_range *mmu_range);
+
+int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ struct mm_struct *mm,
+ struct mmu_interval_notifier *notifier,
+ unsigned long pages_start, unsigned long pages_end,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages);
+
+#ifdef CONFIG_LOCKDEP
+/**
+ * drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM
+ * @gpusvm: Pointer to the GPU SVM structure.
+ * @lock: the lock used to protect the gpuva list. The locking primitive
+ * must contain a dep_map field.
+ *
+ * Call this to annotate drm_gpusvm_range_find_or_insert and
+ * drm_gpusvm_range_remove.
+ */
+#define drm_gpusvm_driver_set_lock(gpusvm, lock) \
+ do { \
+ if (!WARN((gpusvm)->lock_dep_map, \
+ "GPUSVM range lock should be set only once."))\
+ (gpusvm)->lock_dep_map = &(lock)->dep_map; \
+ } while (0)
+#else
+#define drm_gpusvm_driver_set_lock(gpusvm, lock) do {} while (0)
+#endif
+
+/**
+ * drm_gpusvm_notifier_lock() - Lock GPU SVM notifier
+ * @gpusvm__: Pointer to the GPU SVM structure.
+ *
+ * Abstract client usage GPU SVM notifier lock, take lock
+ */
+#define drm_gpusvm_notifier_lock(gpusvm__) \
+ down_read(&(gpusvm__)->notifier_lock)
+
+/**
+ * drm_gpusvm_notifier_unlock() - Unlock GPU SVM notifier
+ * @gpusvm__: Pointer to the GPU SVM structure.
+ *
+ * Abstract client usage GPU SVM notifier lock, drop lock
+ */
+#define drm_gpusvm_notifier_unlock(gpusvm__) \
+ up_read(&(gpusvm__)->notifier_lock)
+
+/**
+ * drm_gpusvm_range_start() - GPU SVM range start address
+ * @range: Pointer to the GPU SVM range
+ *
+ * Return: GPU SVM range start address
+ */
+static inline unsigned long
+drm_gpusvm_range_start(struct drm_gpusvm_range *range)
+{
+ return range->itree.start;
+}
+
+/**
+ * drm_gpusvm_range_end() - GPU SVM range end address
+ * @range: Pointer to the GPU SVM range
+ *
+ * Return: GPU SVM range end address
+ */
+static inline unsigned long
+drm_gpusvm_range_end(struct drm_gpusvm_range *range)
+{
+ return range->itree.last + 1;
+}
+
+/**
+ * drm_gpusvm_range_size() - GPU SVM range size
+ * @range: Pointer to the GPU SVM range
+ *
+ * Return: GPU SVM range size
+ */
+static inline unsigned long
+drm_gpusvm_range_size(struct drm_gpusvm_range *range)
+{
+ return drm_gpusvm_range_end(range) - drm_gpusvm_range_start(range);
+}
+
+/**
+ * drm_gpusvm_notifier_start() - GPU SVM notifier start address
+ * @notifier: Pointer to the GPU SVM notifier
+ *
+ * Return: GPU SVM notifier start address
+ */
+static inline unsigned long
+drm_gpusvm_notifier_start(struct drm_gpusvm_notifier *notifier)
+{
+ return notifier->itree.start;
+}
+
+/**
+ * drm_gpusvm_notifier_end() - GPU SVM notifier end address
+ * @notifier: Pointer to the GPU SVM notifier
+ *
+ * Return: GPU SVM notifier end address
+ */
+static inline unsigned long
+drm_gpusvm_notifier_end(struct drm_gpusvm_notifier *notifier)
+{
+ return notifier->itree.last + 1;
+}
+
+/**
+ * drm_gpusvm_notifier_size() - GPU SVM notifier size
+ * @notifier: Pointer to the GPU SVM notifier
+ *
+ * Return: GPU SVM notifier size
+ */
+static inline unsigned long
+drm_gpusvm_notifier_size(struct drm_gpusvm_notifier *notifier)
+{
+ return drm_gpusvm_notifier_end(notifier) -
+ drm_gpusvm_notifier_start(notifier);
+}
+
+/**
+ * __drm_gpusvm_range_next() - Get the next GPU SVM range in the list
+ * @range: a pointer to the current GPU SVM range
+ *
+ * Return: A pointer to the next drm_gpusvm_range if available, or NULL if the
+ * current range is the last one or if the input range is NULL.
+ */
+static inline struct drm_gpusvm_range *
+__drm_gpusvm_range_next(struct drm_gpusvm_range *range)
+{
+ if (range && !list_is_last(&range->entry,
+ &range->notifier->range_list))
+ return list_next_entry(range, entry);
+
+ return NULL;
+}
+
+/**
+ * drm_gpusvm_for_each_range() - Iterate over GPU SVM ranges in a notifier
+ * @range__: Iterator variable for the ranges. If set, it indicates the start of
+ * the iterator. If NULL, call drm_gpusvm_range_find() to get the range.
+ * @notifier__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the range
+ * @end__: End address of the range
+ *
+ * This macro is used to iterate over GPU SVM ranges in a notifier. It is safe
+ * to use while holding the driver SVM lock or the notifier lock.
+ */
+#define drm_gpusvm_for_each_range(range__, notifier__, start__, end__) \
+ for ((range__) = (range__) ?: \
+ drm_gpusvm_range_find((notifier__), (start__), (end__)); \
+ (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
+ (range__) = __drm_gpusvm_range_next(range__))
+
+/**
+ * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
+ * @range__: Iterator variable for the ranges
+ * @next__: Iterator variable for the ranges temporay storage
+ * @notifier__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the range
+ * @end__: End address of the range
+ *
+ * This macro is used to iterate over GPU SVM ranges in a notifier while
+ * removing ranges from it.
+ */
+#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
+ for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_range_next(range__); \
+ (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
+ (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
+
+/**
+ * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
+ * @notifier: a pointer to the current drm_gpusvm_notifier
+ *
+ * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
+ * the current notifier is the last one or if the input notifier is
+ * NULL.
+ */
+static inline struct drm_gpusvm_notifier *
+__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
+{
+ if (notifier && !list_is_last(&notifier->entry,
+ &notifier->gpusvm->notifier_list))
+ return list_next_entry(notifier, entry);
+
+ return NULL;
+}
+
+/**
+ * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
+ * @notifier__: Iterator variable for the notifiers
+ * @gpusvm__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the notifier
+ * @end__: End address of the notifier
+ *
+ * This macro is used to iterate over GPU SVM notifiers in a gpusvm.
+ */
+#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = __drm_gpusvm_notifier_next(notifier__))
+
+/**
+ * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
+ * @notifier__: Iterator variable for the notifiers
+ * @next__: Iterator variable for the notifiers temporay storage
+ * @gpusvm__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the notifier
+ * @end__: End address of the notifier
+ *
+ * This macro is used to iterate over GPU SVM notifiers in a gpusvm while
+ * removing notifiers from it.
+ */
+#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_notifier_next(notifier__); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
+
+#endif /* __DRM_GPUSVM_H__ */
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 00d4e43b76b6..fdfc575b2603 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -27,6 +27,7 @@
#include <linux/dma-resv.h>
#include <linux/list.h>
+#include <linux/llist.h>
#include <linux/rbtree.h>
#include <linux/types.h>
@@ -103,7 +104,7 @@ struct drm_gpuva {
} va;
/**
- * @gem: structure containing the &drm_gem_object and it's offset
+ * @gem: structure containing the &drm_gem_object and its offset
*/
struct {
/**
@@ -152,6 +153,7 @@ void drm_gpuva_remove(struct drm_gpuva *va);
void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo);
void drm_gpuva_unlink(struct drm_gpuva *va);
+void drm_gpuva_unlink_defer(struct drm_gpuva *va);
struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
@@ -160,15 +162,6 @@ struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start);
struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end);
-static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset)
-{
- va->va.addr = addr;
- va->va.range = range;
- va->gem.obj = obj;
- va->gem.offset = offset;
-}
-
/**
* drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
* invalidated
@@ -206,9 +199,19 @@ enum drm_gpuvm_flags {
DRM_GPUVM_RESV_PROTECTED = BIT(0),
/**
+ * @DRM_GPUVM_IMMEDIATE_MODE: use the locking scheme for GEMs designed
+ * for modifying the GPUVM during the fence signalling path
+ *
+ * When set, gpuva.lock is used to protect gpuva.list in all GEM
+ * objects associated with this GPUVM. Otherwise, the GEMs dma-resv is
+ * used.
+ */
+ DRM_GPUVM_IMMEDIATE_MODE = BIT(1),
+
+ /**
* @DRM_GPUVM_USERBITS: user defined bits
*/
- DRM_GPUVM_USERBITS = BIT(1),
+ DRM_GPUVM_USERBITS = BIT(2),
};
/**
@@ -330,6 +333,11 @@ struct drm_gpuvm {
*/
spinlock_t lock;
} evict;
+
+ /**
+ * @bo_defer: structure holding vm_bos that need to be destroyed
+ */
+ struct llist_head bo_defer;
};
void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
@@ -379,6 +387,19 @@ drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm)
}
/**
+ * drm_gpuvm_immediate_mode() - indicates whether &DRM_GPUVM_IMMEDIATE_MODE is
+ * set
+ * @gpuvm: the &drm_gpuvm
+ *
+ * Returns: true if &DRM_GPUVM_IMMEDIATE_MODE is set, false otherwise.
+ */
+static inline bool
+drm_gpuvm_immediate_mode(struct drm_gpuvm *gpuvm)
+{
+ return gpuvm->flags & DRM_GPUVM_IMMEDIATE_MODE;
+}
+
+/**
* drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv
* @gpuvm__: the &drm_gpuvm
*
@@ -700,6 +721,12 @@ struct drm_gpuvm_bo {
* &drm_gpuvms evict list.
*/
struct list_head evict;
+
+ /**
+ * @list.entry.bo_defer: List entry to attach to
+ * the &drm_gpuvms bo_defer list.
+ */
+ struct llist_node bo_defer;
} entry;
} list;
};
@@ -732,6 +759,9 @@ drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo);
+bool drm_gpuvm_bo_put_deferred(struct drm_gpuvm_bo *vm_bo);
+void drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm);
+
struct drm_gpuvm_bo *
drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
struct drm_gem_object *obj);
@@ -751,9 +781,10 @@ drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict)
{
struct drm_gpuvm_bo *vm_bo;
- drm_gem_gpuva_assert_lock_held(obj);
- drm_gem_for_each_gpuvm_bo(vm_bo, obj)
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+ drm_gem_gpuva_assert_lock_held(vm_bo->vm, obj);
drm_gpuvm_bo_evict(vm_bo, evict);
+ }
}
void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo);
@@ -812,6 +843,11 @@ enum drm_gpuva_op_type {
* @DRM_GPUVA_OP_PREFETCH: the prefetch op type
*/
DRM_GPUVA_OP_PREFETCH,
+
+ /**
+ * @DRM_GPUVA_OP_DRIVER: the driver defined op type
+ */
+ DRM_GPUVA_OP_DRIVER,
};
/**
@@ -838,7 +874,7 @@ struct drm_gpuva_op_map {
} va;
/**
- * @gem: structure containing the &drm_gem_object and it's offset
+ * @gem: structure containing the &drm_gem_object and its offset
*/
struct {
/**
@@ -1053,10 +1089,23 @@ struct drm_gpuva_ops {
*/
#define drm_gpuva_next_op(op) list_next_entry(op, entry)
+/**
+ * struct drm_gpuvm_map_req - arguments passed to drm_gpuvm_sm_map[_ops_create]()
+ */
+struct drm_gpuvm_map_req {
+ /**
+ * @map: struct drm_gpuva_op_map
+ */
+ struct drm_gpuva_op_map map;
+};
+
struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
- u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset);
+ const struct drm_gpuvm_map_req *req);
+struct drm_gpuva_ops *
+drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_map_req *req);
+
struct drm_gpuva_ops *
drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
@@ -1074,8 +1123,10 @@ void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
struct drm_gpuva_op_map *op)
{
- drm_gpuva_init(va, op->va.addr, op->va.range,
- op->gem.obj, op->gem.offset);
+ va->va.addr = op->va.addr;
+ va->va.range = op->va.range;
+ va->gem.obj = op->gem.obj;
+ va->gem.offset = op->gem.offset;
}
/**
@@ -1184,11 +1235,11 @@ struct drm_gpuvm_ops {
/**
* @sm_step_unmap: called from &drm_gpuvm_sm_map and
- * &drm_gpuvm_sm_unmap to unmap an existent mapping
+ * &drm_gpuvm_sm_unmap to unmap an existing mapping
*
- * This callback is called when existent mapping needs to be unmapped.
+ * This callback is called when existing mapping needs to be unmapped.
* This is the case when either a newly requested mapping encloses an
- * existent mapping or an unmap of an existent mapping is requested.
+ * existing mapping or an unmap of an existing mapping is requested.
*
* The &priv pointer matches the one the driver passed to
* &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
@@ -1200,12 +1251,18 @@ struct drm_gpuvm_ops {
};
int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
- u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset);
+ const struct drm_gpuvm_map_req *req);
int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
u64 addr, u64 range);
+int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec, unsigned int num_fences,
+ struct drm_gpuvm_map_req *req);
+
+int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
+ u64 req_addr, u64 req_range);
+
void drm_gpuva_map(struct drm_gpuvm *gpuvm,
struct drm_gpuva *va,
struct drm_gpuva_op_map *op);
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
index e7cc17ee4934..4948379237e9 100644
--- a/include/drm/drm_kunit_helpers.h
+++ b/include/drm/drm_kunit_helpers.h
@@ -9,6 +9,7 @@
#include <kunit/test.h>
+struct drm_connector;
struct drm_crtc_funcs;
struct drm_crtc_helper_funcs;
struct drm_device;
@@ -95,8 +96,6 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test,
sizeof(_type), \
offsetof(_type, _member), \
_feat))
-struct drm_modeset_acquire_ctx *
-drm_kunit_helper_acquire_ctx_alloc(struct kunit *test);
struct drm_atomic_state *
drm_kunit_helper_atomic_state_alloc(struct kunit *test,
@@ -120,4 +119,18 @@ drm_kunit_helper_create_crtc(struct kunit *test,
const struct drm_crtc_funcs *funcs,
const struct drm_crtc_helper_funcs *helper_funcs);
+int drm_kunit_helper_enable_crtc_connector(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_crtc *crtc,
+ struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx);
+
+int drm_kunit_add_mode_destroy_action(struct kunit *test,
+ struct drm_display_mode *mode);
+
+struct drm_display_mode *
+drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev,
+ u8 video_code);
+
#endif // DRM_KUNIT_HELPERS_H_
diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h
index f547b09ca023..72bfac002c06 100644
--- a/include/drm/drm_managed.h
+++ b/include/drm/drm_managed.h
@@ -127,4 +127,27 @@ void __drmm_mutex_release(struct drm_device *dev, void *res);
drmm_add_action_or_reset(dev, __drmm_mutex_release, lock); \
}) \
+void __drmm_workqueue_release(struct drm_device *device, void *wq);
+
+/**
+ * drmm_alloc_ordered_workqueue - &drm_device managed alloc_ordered_workqueue()
+ * @dev: DRM device
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
+ * @args: args for @fmt
+ *
+ * This is a &drm_device-managed version of alloc_ordered_workqueue(). The
+ * allocated workqueue is automatically destroyed on the final drm_dev_put().
+ *
+ * Returns: workqueue on success, negative ERR_PTR otherwise.
+ */
+#define drmm_alloc_ordered_workqueue(dev, fmt, flags, args...) \
+ ({ \
+ struct workqueue_struct *wq = alloc_ordered_workqueue(fmt, flags, ##args); \
+ wq ? ({ \
+ int ret = drmm_add_action_or_reset(dev, __drmm_workqueue_release, wq); \
+ ret ? ERR_PTR(ret) : wq; \
+ }) : ERR_PTR(-ENOMEM); \
+ })
+
#endif
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index e8e0f8d39f3a..f45f9612c0bc 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -57,6 +57,11 @@ struct mipi_dbi {
struct spi_device *spi;
/**
+ * @write_memory_bpw: Bits per word used on a MIPI_DCS_WRITE_MEMORY_START transfer
+ */
+ unsigned int write_memory_bpw;
+
+ /**
* @dc: Optional D/C gpio.
*/
struct gpio_desc *dc;
@@ -97,6 +102,11 @@ struct mipi_dbi_dev {
struct drm_display_mode mode;
/**
+ * @pixel_format: Native pixel format (DRM_FORMAT\_\*)
+ */
+ u32 pixel_format;
+
+ /**
* @tx_buf: Buffer used for transfer (copy clip rect area)
*/
u16 *tx_buf;
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 82b1cc434ea3..3aba7b380c8d 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -10,6 +10,7 @@
#define __DRM_MIPI_DSI_H__
#include <linux/device.h>
+#include <linux/delay.h>
struct mipi_dsi_host;
struct mipi_dsi_device;
@@ -129,8 +130,6 @@ struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node);
#define MIPI_DSI_MODE_VIDEO_NO_HBP BIT(6)
/* disable hsync-active area */
#define MIPI_DSI_MODE_VIDEO_NO_HSA BIT(7)
-/* flush display FIFO on vsync pulse */
-#define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8)
/* disable EoT packets in HS mode */
#define MIPI_DSI_MODE_NO_EOT_PACKET BIT(9)
/* device supports non-continuous clock behavior (DSI spec 5.6.1) */
@@ -197,10 +196,34 @@ struct mipi_dsi_device {
struct drm_dsc_config *dsc;
};
+/**
+ * struct mipi_dsi_multi_context - Context to call multiple MIPI DSI funcs in a row
+ */
+struct mipi_dsi_multi_context {
+ /**
+ * @dsi: Pointer to the MIPI DSI device
+ */
+ struct mipi_dsi_device *dsi;
+
+ /**
+ * @accum_err: Storage for the accumulated error over the multiple calls
+ *
+ * Init to 0. If a function encounters an error then the error code
+ * will be stored here. If you call a function and this points to a
+ * non-zero value then the function will be a noop. This allows calling
+ * a function many times in a row and just checking the error at the
+ * end to see if any of them failed.
+ */
+ int accum_err;
+};
+
#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"
#define to_mipi_dsi_device(__dev) container_of_const(__dev, struct mipi_dsi_device, dev)
+extern const struct bus_type mipi_dsi_bus_type;
+#define dev_is_mipi_dsi(dev) ((dev)->bus == &mipi_dsi_bus_type)
+
/**
* mipi_dsi_pixel_format_to_bpp - obtain the number of bits per pixel for any
* given pixel format defined by the MIPI DSI
@@ -254,10 +277,38 @@ int mipi_dsi_compression_mode_ext(struct mipi_dsi_device *dsi, bool enable,
int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
const struct drm_dsc_picture_parameter_set *pps);
+void mipi_dsi_compression_mode_ext_multi(struct mipi_dsi_multi_context *ctx,
+ bool enable,
+ enum mipi_dsi_compression_algo algo,
+ unsigned int pps_selector);
+void mipi_dsi_compression_mode_multi(struct mipi_dsi_multi_context *ctx,
+ bool enable);
+void mipi_dsi_picture_parameter_set_multi(struct mipi_dsi_multi_context *ctx,
+ const struct drm_dsc_picture_parameter_set *pps);
+
ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
size_t size);
+void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx,
+ const void *payload, size_t size);
+void mipi_dsi_dual_generic_write_multi(struct mipi_dsi_multi_context *ctx,
+ struct mipi_dsi_device *dsi1,
+ struct mipi_dsi_device *dsi2,
+ const void *payload, size_t size);
ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
size_t num_params, void *data, size_t size);
+u32 drm_mipi_dsi_get_input_bus_fmt(enum mipi_dsi_pixel_format dsi_format);
+
+#define mipi_dsi_msleep(ctx, delay) \
+ do { \
+ if (!(ctx)->accum_err) \
+ msleep(delay); \
+ } while (0)
+
+#define mipi_dsi_usleep_range(ctx, min, max) \
+ do { \
+ if (!(ctx)->accum_err) \
+ usleep_range(min, max); \
+ } while (0)
/**
* enum mipi_dsi_dcs_tear_mode - Tearing Effect Output Line mode
@@ -279,10 +330,20 @@ enum mipi_dsi_dcs_tear_mode {
ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
const void *data, size_t len);
+int mipi_dsi_dcs_write_buffer_chatty(struct mipi_dsi_device *dsi,
+ const void *data, size_t len);
+void mipi_dsi_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx,
+ const void *data, size_t len);
+void mipi_dsi_dual_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx,
+ struct mipi_dsi_device *dsi1,
+ struct mipi_dsi_device *dsi2,
+ const void *data, size_t len);
ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
const void *data, size_t len);
ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
size_t len);
+void mipi_dsi_dcs_read_multi(struct mipi_dsi_multi_context *ctx, u8 cmd,
+ void *data, size_t len);
int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode);
@@ -295,7 +356,6 @@ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
-int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
enum mipi_dsi_dcs_tear_mode mode);
int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format);
@@ -309,42 +369,176 @@ int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi,
int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
u16 *brightness);
+void mipi_dsi_dcs_nop_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_enter_sleep_mode_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_exit_sleep_mode_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_set_display_off_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_set_display_on_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_set_tear_on_multi(struct mipi_dsi_multi_context *ctx,
+ enum mipi_dsi_dcs_tear_mode mode);
+void mipi_dsi_turn_on_peripheral_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_soft_reset_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_set_display_brightness_multi(struct mipi_dsi_multi_context *ctx,
+ u16 brightness);
+void mipi_dsi_dcs_set_pixel_format_multi(struct mipi_dsi_multi_context *ctx,
+ u8 format);
+void mipi_dsi_dcs_set_column_address_multi(struct mipi_dsi_multi_context *ctx,
+ u16 start, u16 end);
+void mipi_dsi_dcs_set_page_address_multi(struct mipi_dsi_multi_context *ctx,
+ u16 start, u16 end);
+void mipi_dsi_dcs_set_tear_scanline_multi(struct mipi_dsi_multi_context *ctx,
+ u16 scanline);
+void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx);
+
+/**
+ * mipi_dsi_generic_write_seq_multi - transmit data using a generic write packet
+ *
+ * This macro will print errors for you and error handling is optimized for
+ * callers that call this multiple times in a row.
+ *
+ * @ctx: Context for multiple DSI transactions
+ * @seq: buffer containing the payload
+ */
+#define mipi_dsi_generic_write_seq_multi(ctx, seq...) \
+ do { \
+ static const u8 d[] = { seq }; \
+ mipi_dsi_generic_write_multi(ctx, d, ARRAY_SIZE(d)); \
+ } while (0)
+
/**
- * mipi_dsi_generic_write_seq - transmit data using a generic write packet
- * @dsi: DSI peripheral device
+ * mipi_dsi_generic_write_var_seq_multi - transmit non-constant data using a
+ * generic write packet
+ *
+ * This macro will print errors for you and error handling is optimized for
+ * callers that call this multiple times in a row.
+ *
+ * @ctx: Context for multiple DSI transactions
* @seq: buffer containing the payload
*/
-#define mipi_dsi_generic_write_seq(dsi, seq...) \
- do { \
- static const u8 d[] = { seq }; \
- struct device *dev = &dsi->dev; \
- int ret; \
- ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) { \
- dev_err_ratelimited(dev, "transmit data failed: %d\n", \
- ret); \
- return ret; \
- } \
+#define mipi_dsi_generic_write_var_seq_multi(ctx, seq...) \
+ do { \
+ const u8 d[] = { seq }; \
+ mipi_dsi_generic_write_multi(ctx, d, ARRAY_SIZE(d)); \
} while (0)
/**
- * mipi_dsi_dcs_write_seq - transmit a DCS command with payload
- * @dsi: DSI peripheral device
+ * mipi_dsi_dcs_write_seq_multi - transmit a DCS command with payload
+ *
+ * This macro will print errors for you and error handling is optimized for
+ * callers that call this multiple times in a row.
+ *
+ * @ctx: Context for multiple DSI transactions
* @cmd: Command
* @seq: buffer containing data to be transmitted
*/
-#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) \
- do { \
- static const u8 d[] = { cmd, seq }; \
- struct device *dev = &dsi->dev; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) { \
- dev_err_ratelimited( \
- dev, "sending command %#02x failed: %d\n", \
- cmd, ret); \
- return ret; \
- } \
+#define mipi_dsi_dcs_write_seq_multi(ctx, cmd, seq...) \
+ do { \
+ static const u8 d[] = { cmd, seq }; \
+ mipi_dsi_dcs_write_buffer_multi(ctx, d, ARRAY_SIZE(d)); \
+ } while (0)
+
+/**
+ * mipi_dsi_dcs_write_var_seq_multi - transmit a DCS command with non-constant
+ * payload
+ *
+ * This macro will print errors for you and error handling is optimized for
+ * callers that call this multiple times in a row.
+ *
+ * @ctx: Context for multiple DSI transactions
+ * @cmd: Command
+ * @seq: buffer containing data to be transmitted
+ */
+#define mipi_dsi_dcs_write_var_seq_multi(ctx, cmd, seq...) \
+ do { \
+ const u8 d[] = { cmd, seq }; \
+ mipi_dsi_dcs_write_buffer_multi(ctx, d, ARRAY_SIZE(d)); \
+ } while (0)
+
+/**
+ * mipi_dsi_dual - send the same MIPI DSI command to two interfaces
+ *
+ * This macro will send the specified MIPI DSI command twice, once per each of
+ * the two interfaces supplied. This is useful for reducing duplication of code
+ * in panel drivers which use two parallel serial interfaces.
+ *
+ * Note that the _func parameter cannot accept a macro such as
+ * mipi_dsi_generic_write_multi() or mipi_dsi_dcs_write_buffer_multi(). See
+ * mipi_dsi_dual_generic_write_multi() and
+ * mipi_dsi_dual_dcs_write_buffer_multi() instead.
+ *
+ * WARNING: This macro reuses the _func argument and the optional trailing
+ * arguments twice each, which may cause unintended side effects. For example,
+ * adding the postfix increment ++ operator to one of the arguments to be
+ * passed to _func will cause the variable to be incremented twice instead of
+ * once and the variable will be its original value + 1 when sent to _dsi2.
+ *
+ * @_func: MIPI DSI function to pass context and arguments into
+ * @_ctx: Context for multiple DSI transactions
+ * @_dsi1: First DSI interface to act as recipient of the MIPI DSI command
+ * @_dsi2: Second DSI interface to act as recipient of the MIPI DSI command
+ * @...: Arguments to pass to MIPI DSI function or macro
+ */
+
+#define mipi_dsi_dual(_func, _ctx, _dsi1, _dsi2, ...) \
+ do { \
+ struct mipi_dsi_multi_context *_ctxcpy = (_ctx); \
+ _ctxcpy->dsi = (_dsi1); \
+ (_func)(_ctxcpy, ##__VA_ARGS__); \
+ _ctxcpy->dsi = (_dsi2); \
+ (_func)(_ctxcpy, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * mipi_dsi_dual_generic_write_seq_multi - transmit data using a generic write
+ * packet to two dsi interfaces, one after the other
+ *
+ * This macro will send the specified generic packet twice, once per each of
+ * the two interfaces supplied. This is useful for reducing duplication of code
+ * in panel drivers which use two parallel serial interfaces.
+ *
+ * Note that if an error occurs while transmitting the packet to the first DSI
+ * interface, the packet will not be sent to the second DSI interface.
+ *
+ * This macro will print errors for you and error handling is optimized for
+ * callers that call this multiple times in a row.
+ *
+ * @_ctx: Context for multiple DSI transactions
+ * @_dsi1: First DSI interface to act as recipient of packet
+ * @_dsi2: Second DSI interface to act as recipient of packet
+ * @_seq: buffer containing the payload
+ */
+#define mipi_dsi_dual_generic_write_seq_multi(_ctx, _dsi1, _dsi2, _seq...) \
+ do { \
+ static const u8 d[] = { _seq }; \
+ mipi_dsi_dual_generic_write_multi(_ctx, _dsi1, _dsi2, d, \
+ ARRAY_SIZE(d)); \
+ } while (0)
+
+/**
+ * mipi_dsi_dual_dcs_write_seq_multi - transmit a DCS command with payload to
+ * two dsi interfaces, one after the other
+ *
+ * This macro will send the specified DCS command with payload twice, once per
+ * each of the two interfaces supplied. This is useful for reducing duplication
+ * of code in panel drivers which use two parallel serial interfaces.
+ *
+ * Note that if an error occurs while transmitting the payload to the first DSI
+ * interface, the payload will not be sent to the second DSI interface.
+ *
+ * This macro will print errors for you and error handling is optimized for
+ * callers that call this multiple times in a row.
+ *
+ * @_ctx: Context for multiple DSI transactions
+ * @_dsi1: First DSI interface to act as recipient of packet
+ * @_dsi2: Second DSI interface to act as recipient of packet
+ * @_cmd: Command
+ * @_seq: buffer containing the payload
+ */
+#define mipi_dsi_dual_dcs_write_seq_multi(_ctx, _dsi1, _dsi2, _cmd, _seq...) \
+ do { \
+ static const u8 d[] = { _cmd, _seq }; \
+ mipi_dsi_dual_dcs_write_buffer_multi(_ctx, _dsi1, _dsi2, d, \
+ ARRAY_SIZE(d)); \
} while (0)
/**
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index ac33ba1b18bc..16ce0e8f36a6 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -48,7 +48,7 @@
#endif
#include <linux/types.h>
-#include <drm/drm_print.h>
+struct drm_printer;
#ifdef CONFIG_DRM_DEBUG_MM
#define DRM_MM_BUG_ON(expr) BUG_ON(expr)
@@ -463,7 +463,6 @@ static inline int drm_mm_insert_node(struct drm_mm *mm,
}
void drm_mm_remove_node(struct drm_mm_node *node);
-void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
void drm_mm_takedown(struct drm_mm *mm);
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 8de3c9a5f61b..895fb820dba0 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -82,6 +82,7 @@ struct drm_mode_config_funcs {
*/
struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
/**
@@ -95,23 +96,7 @@ struct drm_mode_config_funcs {
* The format information specific to the given fb metadata, or
* NULL if none is found.
*/
- const struct drm_format_info *(*get_format_info)(const struct drm_mode_fb_cmd2 *mode_cmd);
-
- /**
- * @output_poll_changed:
- *
- * Callback used by helpers to inform the driver of output configuration
- * changes.
- *
- * Drivers implementing fbdev emulation use drm_kms_helper_hotplug_event()
- * to call this hook to inform the fbdev helper of output changes.
- *
- * This hook is deprecated, drivers should instead use
- * drm_fbdev_generic_setup() which takes care of any necessary
- * hotplug event forwarding already without further involvement by
- * the driver.
- */
- void (*output_poll_changed)(struct drm_device *dev);
+ const struct drm_format_info *(*get_format_info)(u32 pixel_format, u64 modifier);
/**
* @mode_valid:
@@ -516,6 +501,24 @@ struct drm_mode_config {
struct raw_spinlock panic_lock;
/**
+ * @num_colorop:
+ *
+ * Number of colorop objects on this device.
+ * This is invariant over the lifetime of a device and hence doesn't
+ * need any locks.
+ */
+ int num_colorop;
+
+ /**
+ * @colorop_list:
+ *
+ * List of colorop objects linked with &drm_colorop.head. This is
+ * invariant over the lifetime of a device and hence doesn't need any
+ * locks.
+ */
+ struct list_head colorop_list;
+
+ /**
* @num_crtc:
*
* Number of CRTCs on this device linked with &drm_crtc.head. This is invariant over the lifetime
@@ -548,8 +551,8 @@ struct drm_mode_config {
*/
struct list_head privobj_list;
- int min_width, min_height;
- int max_width, max_height;
+ unsigned int min_width, min_height;
+ unsigned int max_width, max_height;
const struct drm_mode_config_funcs *funcs;
/* output poll support */
@@ -953,6 +956,12 @@ struct drm_mode_config {
struct drm_property *modifiers_property;
/**
+ * @async_modifiers_property: Plane property to list support modifier/format
+ * combination for asynchronous flips.
+ */
+ struct drm_property *async_modifiers_property;
+
+ /**
* @size_hints_property: Plane SIZE_HINTS property.
*/
struct drm_property *size_hints_property;
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index 08d7a7f0188f..c68edbd126d0 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -35,7 +35,7 @@ struct drm_file;
* @id: userspace visible identifier
* @type: type of the object, one of DRM_MODE_OBJECT\_\*
* @properties: properties attached to this object, including values
- * @refcount: reference count for objects which with dynamic lifetime
+ * @refcount: reference count for objects with dynamic lifetime
* @free_cb: free function callback, only set for objects with dynamic lifetime
*
* Base structure for modeset objects visible to userspace. Objects can be
diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h
index 995fd981cab0..7e3d4c5a7f66 100644
--- a/include/drm/drm_modeset_helper.h
+++ b/include/drm/drm_modeset_helper.h
@@ -26,6 +26,7 @@
struct drm_crtc;
struct drm_crtc_funcs;
struct drm_device;
+struct drm_format_info;
struct drm_framebuffer;
struct drm_mode_fb_cmd2;
@@ -33,6 +34,7 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *);
void drm_helper_mode_fill_fb_struct(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index ec59015aec3c..fe32854b7ffe 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -490,6 +490,18 @@ struct drm_crtc_helper_funcs {
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
+
+ /**
+ * @handle_vblank_timeout: Handles timeouts of the vblank timer.
+ *
+ * Called by CRTC's the vblank timer on each timeout. Semantics is
+ * equivalient to drm_crtc_handle_vblank(). Implementations should
+ * invoke drm_crtc_handle_vblank() as part of processing the timeout.
+ *
+ * This callback is optional. If unset, the vblank timer invokes
+ * drm_crtc_handle_vblank() directly.
+ */
+ bool (*handle_vblank_timeout)(struct drm_crtc *crtc);
};
/**
@@ -967,7 +979,7 @@ struct drm_connector_helper_funcs {
* drm_mode_status.
*/
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
/**
* @mode_valid_ctx:
@@ -1006,7 +1018,7 @@ struct drm_connector_helper_funcs {
*
*/
int (*mode_valid_ctx)(struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_modeset_acquire_ctx *ctx,
enum drm_mode_status *status);
@@ -1400,13 +1412,18 @@ struct drm_plane_helper_funcs {
* given update can be committed asynchronously, that is, if it can
* jump ahead of the state currently queued for update.
*
+ * This function is also used by drm_atomic_set_property() to determine
+ * if the plane can be flipped in async. The flip flag is used to
+ * distinguish if the function is used for just the plane state or for a
+ * flip.
+ *
* RETURNS:
*
* Return 0 on success and any error returned indicates that the update
* can not be applied in asynchronous manner.
*/
int (*atomic_async_check)(struct drm_plane *plane,
- struct drm_atomic_state *state);
+ struct drm_atomic_state *state, bool flip);
/**
* @atomic_async_update:
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index 02d1cdd7f798..7f0256dae3f1 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -52,6 +52,8 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
struct drm_bridge **bridge);
int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
const struct device_node *port2);
+int drm_of_lvds_get_dual_link_pixel_order_sink(struct device_node *port1,
+ struct device_node *port2);
int drm_of_lvds_get_data_mapping(const struct device_node *port);
int drm_of_get_data_lanes_count(const struct device_node *endpoint,
const unsigned int min, const unsigned int max);
@@ -110,6 +112,13 @@ drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
}
static inline int
+drm_of_lvds_get_dual_link_pixel_order_sink(struct device_node *port1,
+ struct device_node *port2)
+{
+ return -EINVAL;
+}
+
+static inline int
drm_of_lvds_get_data_mapping(const struct device_node *port)
{
return -EINVAL;
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
new file mode 100644
index 000000000000..f6e7e234c089
--- /dev/null
+++ b/include/drm/drm_pagemap.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef _DRM_PAGEMAP_H_
+#define _DRM_PAGEMAP_H_
+
+#include <linux/dma-direction.h>
+#include <linux/hmm.h>
+#include <linux/types.h>
+
+#define NR_PAGES(order) (1U << (order))
+
+struct drm_pagemap;
+struct drm_pagemap_zdd;
+struct device;
+
+/**
+ * enum drm_interconnect_protocol - Used to identify an interconnect protocol.
+ *
+ * @DRM_INTERCONNECT_SYSTEM: DMA map is system pages
+ * @DRM_INTERCONNECT_DRIVER: DMA map is driver defined
+ */
+enum drm_interconnect_protocol {
+ DRM_INTERCONNECT_SYSTEM,
+ DRM_INTERCONNECT_DRIVER,
+ /* A driver can add private values beyond DRM_INTERCONNECT_DRIVER */
+};
+
+/**
+ * struct drm_pagemap_addr - Address representation.
+ * @addr: The dma address or driver-defined address for driver private interconnects.
+ * @proto: The interconnect protocol.
+ * @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
+ * @dir: The DMA direction.
+ *
+ * Note: There is room for improvement here. We should be able to pack into
+ * 64 bits.
+ */
+struct drm_pagemap_addr {
+ dma_addr_t addr;
+ u64 proto : 54;
+ u64 order : 8;
+ u64 dir : 2;
+};
+
+/**
+ * drm_pagemap_addr_encode() - Encode a dma address with metadata
+ * @addr: The dma address or driver-defined address for driver private interconnects.
+ * @proto: The interconnect protocol.
+ * @order: The page order of the dma mapping. (Size is PAGE_SIZE << order).
+ * @dir: The DMA direction.
+ *
+ * Return: A struct drm_pagemap_addr encoding the above information.
+ */
+static inline struct drm_pagemap_addr
+drm_pagemap_addr_encode(dma_addr_t addr,
+ enum drm_interconnect_protocol proto,
+ unsigned int order,
+ enum dma_data_direction dir)
+{
+ return (struct drm_pagemap_addr) {
+ .addr = addr,
+ .proto = proto,
+ .order = order,
+ .dir = dir,
+ };
+}
+
+/**
+ * struct drm_pagemap_ops: Ops for a drm-pagemap.
+ */
+struct drm_pagemap_ops {
+ /**
+ * @device_map: Map for device access or provide a virtual address suitable for
+ *
+ * @dpagemap: The struct drm_pagemap for the page.
+ * @dev: The device mapper.
+ * @page: The page to map.
+ * @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
+ * @dir: The transfer direction.
+ */
+ struct drm_pagemap_addr (*device_map)(struct drm_pagemap *dpagemap,
+ struct device *dev,
+ struct page *page,
+ unsigned int order,
+ enum dma_data_direction dir);
+
+ /**
+ * @device_unmap: Unmap a device address previously obtained using @device_map.
+ *
+ * @dpagemap: The struct drm_pagemap for the mapping.
+ * @dev: The device unmapper.
+ * @addr: The device address obtained when mapping.
+ */
+ void (*device_unmap)(struct drm_pagemap *dpagemap,
+ struct device *dev,
+ struct drm_pagemap_addr addr);
+
+ /**
+ * @populate_mm: Populate part of the mm with @dpagemap memory,
+ * migrating existing data.
+ * @dpagemap: The struct drm_pagemap managing the memory.
+ * @start: The virtual start address in @mm
+ * @end: The virtual end address in @mm
+ * @mm: Pointer to a live mm. The caller must have an mmget()
+ * reference.
+ *
+ * The caller will have the mm lock at least in read mode.
+ * Note that there is no guarantee that the memory is resident
+ * after the function returns, it's best effort only.
+ * When the mm is not using the memory anymore,
+ * it will be released. The struct drm_pagemap might have a
+ * mechanism in place to reclaim the memory and the data will
+ * then be migrated. Typically to system memory.
+ * The implementation should hold sufficient runtime power-
+ * references while pages are used in an address space and
+ * should ideally guard against hardware device unbind in
+ * a way such that device pages are migrated back to system
+ * followed by device page removal. The implementation should
+ * return -ENODEV after device removal.
+ *
+ * Return: 0 if successful. Negative error code on error.
+ */
+ int (*populate_mm)(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm,
+ unsigned long timeslice_ms);
+};
+
+/**
+ * struct drm_pagemap: Additional information for a struct dev_pagemap
+ * used for device p2p handshaking.
+ * @ops: The struct drm_pagemap_ops.
+ * @dev: The struct drevice owning the device-private memory.
+ */
+struct drm_pagemap {
+ const struct drm_pagemap_ops *ops;
+ struct device *dev;
+};
+
+struct drm_pagemap_devmem;
+
+/**
+ * struct drm_pagemap_devmem_ops - Operations structure for GPU SVM device memory
+ *
+ * This structure defines the operations for GPU Shared Virtual Memory (SVM)
+ * device memory. These operations are provided by the GPU driver to manage device memory
+ * allocations and perform operations such as migration between device memory and system
+ * RAM.
+ */
+struct drm_pagemap_devmem_ops {
+ /**
+ * @devmem_release: Release device memory allocation (optional)
+ * @devmem_allocation: device memory allocation
+ *
+ * Release device memory allocation and drop a reference to device
+ * memory allocation.
+ */
+ void (*devmem_release)(struct drm_pagemap_devmem *devmem_allocation);
+
+ /**
+ * @populate_devmem_pfn: Populate device memory PFN (required for migration)
+ * @devmem_allocation: device memory allocation
+ * @npages: Number of pages to populate
+ * @pfn: Array of page frame numbers to populate
+ *
+ * Populate device memory page frame numbers (PFN).
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*populate_devmem_pfn)(struct drm_pagemap_devmem *devmem_allocation,
+ unsigned long npages, unsigned long *pfn);
+
+ /**
+ * @copy_to_devmem: Copy to device memory (required for migration)
+ * @pages: Pointer to array of device memory pages (destination)
+ * @pagemap_addr: Pointer to array of DMA information (source)
+ * @npages: Number of pages to copy
+ *
+ * Copy pages to device memory. If the order of a @pagemap_addr entry
+ * is greater than 0, the entry is populated but subsequent entries
+ * within the range of that order are not populated.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*copy_to_devmem)(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
+ unsigned long npages);
+
+ /**
+ * @copy_to_ram: Copy to system RAM (required for migration)
+ * @pages: Pointer to array of device memory pages (source)
+ * @pagemap_addr: Pointer to array of DMA information (destination)
+ * @npages: Number of pages to copy
+ *
+ * Copy pages to system RAM. If the order of a @pagemap_addr entry
+ * is greater than 0, the entry is populated but subsequent entries
+ * within the range of that order are not populated.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*copy_to_ram)(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
+ unsigned long npages);
+};
+
+/**
+ * struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation
+ *
+ * @dev: Pointer to the device structure which device memory allocation belongs to
+ * @mm: Pointer to the mm_struct for the address space
+ * @detached: device memory allocations is detached from device pages
+ * @ops: Pointer to the operations structure for GPU SVM device memory
+ * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
+ * @size: Size of device memory allocation
+ * @timeslice_expiration: Timeslice expiration in jiffies
+ */
+struct drm_pagemap_devmem {
+ struct device *dev;
+ struct mm_struct *mm;
+ struct completion detached;
+ const struct drm_pagemap_devmem_ops *ops;
+ struct drm_pagemap *dpagemap;
+ size_t size;
+ u64 timeslice_expiration;
+};
+
+int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ unsigned long timeslice_ms,
+ void *pgmap_owner);
+
+int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation);
+
+const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void);
+
+struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
+
+void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
+ struct device *dev, struct mm_struct *mm,
+ const struct drm_pagemap_devmem_ops *ops,
+ struct drm_pagemap *dpagemap, size_t size);
+
+int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm,
+ unsigned long timeslice_ms);
+
+#endif
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 10015891b056..2407bfa60236 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -28,12 +28,12 @@
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/kref.h>
struct backlight_device;
struct dentry;
struct device_node;
struct drm_connector;
-struct drm_device;
struct drm_panel_follower;
struct drm_panel;
struct display_timing;
@@ -160,6 +160,20 @@ struct drm_panel_follower_funcs {
* Called before the panel is powered off.
*/
int (*panel_unpreparing)(struct drm_panel_follower *follower);
+
+ /**
+ * @panel_enabled:
+ *
+ * Called after the panel and the backlight have been enabled.
+ */
+ int (*panel_enabled)(struct drm_panel_follower *follower);
+
+ /**
+ * @panel_disabling:
+ *
+ * Called before the panel and the backlight are disabled.
+ */
+ int (*panel_disabling)(struct drm_panel_follower *follower);
};
struct drm_panel_follower {
@@ -267,20 +281,60 @@ struct drm_panel {
* If true then the panel has been enabled.
*/
bool enabled;
+
+ /**
+ * @container: Pointer to the private driver struct embedding this
+ * @struct drm_panel.
+ */
+ void *container;
+
+ /**
+ * @refcount: reference count of users referencing this panel.
+ */
+ struct kref refcount;
};
+void *__devm_drm_panel_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_panel_funcs *funcs,
+ int connector_type);
+
+/**
+ * devm_drm_panel_alloc - Allocate and initialize a refcounted panel.
+ *
+ * @dev: struct device of the panel device
+ * @type: the type of the struct which contains struct &drm_panel
+ * @member: the name of the &drm_panel within @type
+ * @funcs: callbacks for this panel
+ * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to
+ * the panel interface
+ *
+ * The reference count of the returned panel is initialized to 1. This
+ * reference will be automatically dropped via devm (by calling
+ * drm_panel_put()) when @dev is removed.
+ *
+ * Returns:
+ * Pointer to container structure embedding the panel, ERR_PTR on failure.
+ */
+#define devm_drm_panel_alloc(dev, type, member, funcs, connector_type) \
+ ((type *)__devm_drm_panel_alloc(dev, sizeof(type), \
+ offsetof(type, member), funcs, \
+ connector_type))
+
void drm_panel_init(struct drm_panel *panel, struct device *dev,
const struct drm_panel_funcs *funcs,
int connector_type);
+struct drm_panel *drm_panel_get(struct drm_panel *panel);
+void drm_panel_put(struct drm_panel *panel);
+
void drm_panel_add(struct drm_panel *panel);
void drm_panel_remove(struct drm_panel *panel);
-int drm_panel_prepare(struct drm_panel *panel);
-int drm_panel_unprepare(struct drm_panel *panel);
+void drm_panel_prepare(struct drm_panel *panel);
+void drm_panel_unprepare(struct drm_panel *panel);
-int drm_panel_enable(struct drm_panel *panel);
-int drm_panel_disable(struct drm_panel *panel);
+void drm_panel_enable(struct drm_panel *panel);
+void drm_panel_disable(struct drm_panel *panel);
int drm_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector);
diff --git a/include/drm/drm_panic.h b/include/drm/drm_panic.h
index 822dbb1aa9d6..ac0e46b73436 100644
--- a/include/drm/drm_panic.h
+++ b/include/drm/drm_panic.h
@@ -1,4 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 or MIT */
+
+/*
+ * Copyright (c) 2024 Intel
+ * Copyright (c) 2024 Red Hat
+ */
+
#ifndef __DRM_PANIC_H__
#define __DRM_PANIC_H__
@@ -8,9 +14,6 @@
#include <drm/drm_device.h>
#include <drm/drm_fourcc.h>
-/*
- * Copyright (c) 2024 Intel
- */
/**
* struct drm_scanout_buffer - DRM scanout buffer
@@ -37,6 +40,16 @@ struct drm_scanout_buffer {
struct iosys_map map[DRM_FORMAT_MAX_PLANES];
/**
+ * @pages: Optional, if the scanout buffer is not mapped, set this field
+ * to the array of pages of the scanout buffer. The panic code will use
+ * kmap_local_page_try_from_panic() to map one page at a time to write
+ * all the pixels. This array shouldn't be allocated from the
+ * get_scanoutbuffer() callback.
+ * The scanout buffer should be in linear format.
+ */
+ struct page **pages;
+
+ /**
* @width: Width of the scanout buffer, in pixels.
*/
unsigned int width;
@@ -50,8 +63,25 @@ struct drm_scanout_buffer {
* @pitch: Length in bytes between the start of two consecutive lines.
*/
unsigned int pitch[DRM_FORMAT_MAX_PLANES];
+
+ /**
+ * @set_pixel: Optional function, to set a pixel color on the
+ * framebuffer. It allows to handle special tiling format inside the
+ * driver. It takes precedence over the @map and @pages fields.
+ */
+ void (*set_pixel)(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color);
+
+ /**
+ * @private: private pointer that you can use in the callbacks
+ * set_pixel()
+ */
+ void *private;
+
};
+#ifdef CONFIG_DRM_PANIC
+
/**
* drm_panic_trylock - try to enter the panic printing critical section
* @dev: struct drm_device
@@ -137,16 +167,23 @@ struct drm_scanout_buffer {
#define drm_panic_unlock(dev, flags) \
raw_spin_unlock_irqrestore(&(dev)->mode_config.panic_lock, flags)
-#ifdef CONFIG_DRM_PANIC
+#else
-void drm_panic_register(struct drm_device *dev);
-void drm_panic_unregister(struct drm_device *dev);
+static inline bool drm_panic_trylock(struct drm_device *dev, unsigned long flags)
+{
+ return true;
+}
-#else
+static inline void drm_panic_lock(struct drm_device *dev, unsigned long flags) {}
+static inline void drm_panic_unlock(struct drm_device *dev, unsigned long flags) {}
+
+#endif
-static inline void drm_panic_register(struct drm_device *dev) {}
-static inline void drm_panic_unregister(struct drm_device *dev) {}
+#if defined(CONFIG_DRM_PANIC_SCREEN_QR_CODE)
+size_t drm_panic_qr_max_data_size(u8 version, size_t url_len);
+u8 drm_panic_qr_generate(const char *url, u8 *data, size_t data_len, size_t data_size,
+ u8 *tmp, size_t tmp_size);
#endif
#endif /* __DRM_PANIC_H__ */
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 9507542121fa..703ef4d1bbbc 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -244,6 +244,14 @@ struct drm_plane_state {
enum drm_scaling_filter scaling_filter;
/**
+ * @color_pipeline:
+ *
+ * The first colorop of the active color pipeline, or NULL, if no
+ * color pipeline is active.
+ */
+ struct drm_colorop *color_pipeline;
+
+ /**
* @commit: Tracks the pending commit to prevent use-after-free conditions,
* and for async plane updates.
*
@@ -549,6 +557,23 @@ struct drm_plane_funcs {
*/
bool (*format_mod_supported)(struct drm_plane *plane, uint32_t format,
uint64_t modifier);
+ /**
+ * @format_mod_supported_async:
+ *
+ * This optional hook is used for the DRM to determine if for
+ * asynchronous flip the given format/modifier combination is valid for
+ * the plane. This allows the DRM to generate the correct format
+ * bitmask (which formats apply to which modifier), and to validate
+ * modifiers at atomic_check time.
+ *
+ * Returns:
+ *
+ * True if the given modifier is valid for that format on the plane.
+ * False otherwise.
+ */
+ bool (*format_mod_supported_async)(struct drm_plane *plane,
+ u32 format, u64 modifier);
+
};
/**
@@ -767,6 +792,14 @@ struct drm_plane {
struct drm_property *color_range_property;
/**
+ * @color_pipeline_property:
+ *
+ * Optional "COLOR_PIPELINE" enum property for specifying
+ * a color pipeline to use on the plane.
+ */
+ struct drm_property *color_pipeline_property;
+
+ /**
* @scaling_filter_property: property to apply a particular filter while
* scaling.
*/
@@ -972,6 +1005,8 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
#define drm_for_each_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
+bool drm_plane_has_format(struct drm_plane *plane,
+ u32 format, u64 modifier);
bool drm_any_plane_has_format(struct drm_device *dev,
u32 format, u64 modifier);
@@ -987,4 +1022,7 @@ int drm_plane_add_size_hints_property(struct drm_plane *plane,
const struct drm_plane_size_hint *hints,
int num_hints);
+int drm_plane_create_color_pipeline_property(struct drm_plane *plane,
+ const struct drm_prop_enum_list *pipelines,
+ int num_pipelines);
#endif
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 2a1d01e5b56b..f50f862f0d8b 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -69,6 +69,9 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags);
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd);
@@ -97,6 +100,9 @@ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt);
/* helper functions for importing */
+bool drm_gem_is_prime_exported_dma_buf(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
struct dma_buf *dma_buf,
struct device *attach_dev);
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 089950ad8681..ab017b05e175 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -32,6 +32,7 @@
#include <linux/dynamic_debug.h>
#include <drm/drm.h>
+#include <drm/drm_device.h>
struct debugfs_regset32;
struct drm_device;
@@ -175,7 +176,12 @@ struct drm_printer {
void (*printfn)(struct drm_printer *p, struct va_format *vaf);
void (*puts)(struct drm_printer *p, const char *str);
void *arg;
+ const void *origin;
const char *prefix;
+ struct {
+ unsigned int series;
+ unsigned int counter;
+ } line;
enum drm_debug_category category;
};
@@ -186,6 +192,7 @@ void __drm_puts_seq_file(struct drm_printer *p, const char *str);
void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf);
void __drm_printfn_dbg(struct drm_printer *p, struct va_format *vaf);
void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf);
+void __drm_printfn_line(struct drm_printer *p, struct va_format *vaf);
__printf(2, 3)
void drm_printf(struct drm_printer *p, const char *f, ...);
@@ -193,6 +200,8 @@ void drm_puts(struct drm_printer *p, const char *str);
void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset);
void drm_print_bits(struct drm_printer *p, unsigned long value,
const char * const bits[], unsigned int nbits);
+void drm_print_hex_dump(struct drm_printer *p, const char *prefix,
+ const u8 *buf, size_t len);
__printf(2, 0)
/**
@@ -220,7 +229,8 @@ drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va)
/**
* struct drm_print_iterator - local struct used with drm_printer_coredump
- * @data: Pointer to the devcoredump output buffer
+ * @data: Pointer to the devcoredump output buffer, can be NULL if using
+ * drm_printer_coredump to determine size of devcoredump
* @start: The offset within the buffer to start writing
* @remain: The number of bytes to write for this iteration
*/
@@ -265,6 +275,57 @@ struct drm_print_iterator {
* coredump_read, ...)
* }
*
+ * The above example has a time complexity of O(N^2), where N is the size of the
+ * devcoredump. This is acceptable for small devcoredumps but scales poorly for
+ * larger ones.
+ *
+ * Another use case for drm_coredump_printer is to capture the devcoredump into
+ * a saved buffer before the dev_coredump() callback. This involves two passes:
+ * one to determine the size of the devcoredump and another to print it to a
+ * buffer. Then, in dev_coredump(), copy from the saved buffer into the
+ * devcoredump read buffer.
+ *
+ * For example::
+ *
+ * char *devcoredump_saved_buffer;
+ *
+ * ssize_t __coredump_print(char *buffer, ssize_t count, ...)
+ * {
+ * struct drm_print_iterator iter;
+ * struct drm_printer p;
+ *
+ * iter.data = buffer;
+ * iter.start = 0;
+ * iter.remain = count;
+ *
+ * p = drm_coredump_printer(&iter);
+ *
+ * drm_printf(p, "foo=%d\n", foo);
+ * ...
+ * return count - iter.remain;
+ * }
+ *
+ * void coredump_print(...)
+ * {
+ * ssize_t count;
+ *
+ * count = __coredump_print(NULL, INT_MAX, ...);
+ * devcoredump_saved_buffer = kvmalloc(count, GFP_KERNEL);
+ * __coredump_print(devcoredump_saved_buffer, count, ...);
+ * }
+ *
+ * void coredump_read(char *buffer, loff_t offset, size_t count,
+ * void *data, size_t datalen)
+ * {
+ * ...
+ * memcpy(buffer, devcoredump_saved_buffer + offset, count);
+ * ...
+ * }
+ *
+ * The above example has a time complexity of O(N*2), where N is the size of the
+ * devcoredump. This scales better than the previous example for larger
+ * devcoredumps.
+ *
* RETURNS:
* The &drm_printer object
*/
@@ -284,6 +345,26 @@ drm_coredump_printer(struct drm_print_iterator *iter)
}
/**
+ * drm_coredump_printer_is_full() - DRM coredump printer output is full
+ * @p: DRM coredump printer
+ *
+ * DRM printer output is full, useful to short circuit coredump printing once
+ * printer is full.
+ *
+ * RETURNS:
+ * True if DRM coredump printer output buffer is full, False otherwise
+ */
+static inline bool drm_coredump_printer_is_full(struct drm_printer *p)
+{
+ struct drm_print_iterator *iterator = p->arg;
+
+ if (p->printfn != __drm_printfn_coredump)
+ return true;
+
+ return !iterator->remain;
+}
+
+/**
* drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file
* @f: the &struct seq_file to output to
*
@@ -332,6 +413,7 @@ static inline struct drm_printer drm_dbg_printer(struct drm_device *drm,
struct drm_printer p = {
.printfn = __drm_printfn_dbg,
.arg = drm,
+ .origin = (const void *)_THIS_IP_, /* it's fine as we will be inlined */
.prefix = prefix,
.category = category,
};
@@ -357,6 +439,65 @@ static inline struct drm_printer drm_err_printer(struct drm_device *drm,
return p;
}
+/**
+ * drm_line_printer - construct a &drm_printer that prefixes outputs with line numbers
+ * @p: the &struct drm_printer which actually generates the output
+ * @prefix: optional output prefix, or NULL for no prefix
+ * @series: optional unique series identifier, or 0 to omit identifier in the output
+ *
+ * This printer can be used to increase the robustness of the captured output
+ * to make sure we didn't lost any intermediate lines of the output. Helpful
+ * while capturing some crash data.
+ *
+ * Example 1::
+ *
+ * void crash_dump(struct drm_device *drm)
+ * {
+ * static unsigned int id;
+ * struct drm_printer p = drm_err_printer(drm, "crash");
+ * struct drm_printer lp = drm_line_printer(&p, "dump", ++id);
+ *
+ * drm_printf(&lp, "foo");
+ * drm_printf(&lp, "bar");
+ * }
+ *
+ * Above code will print into the dmesg something like::
+ *
+ * [ ] 0000:00:00.0: [drm] *ERROR* crash dump 1.1: foo
+ * [ ] 0000:00:00.0: [drm] *ERROR* crash dump 1.2: bar
+ *
+ * Example 2::
+ *
+ * void line_dump(struct device *dev)
+ * {
+ * struct drm_printer p = drm_info_printer(dev);
+ * struct drm_printer lp = drm_line_printer(&p, NULL, 0);
+ *
+ * drm_printf(&lp, "foo");
+ * drm_printf(&lp, "bar");
+ * }
+ *
+ * Above code will print::
+ *
+ * [ ] 0000:00:00.0: [drm] 1: foo
+ * [ ] 0000:00:00.0: [drm] 2: bar
+ *
+ * RETURNS:
+ * The &drm_printer object
+ */
+static inline struct drm_printer drm_line_printer(struct drm_printer *p,
+ const char *prefix,
+ unsigned int series)
+{
+ struct drm_printer lp = {
+ .printfn = __drm_printfn_line,
+ .arg = p,
+ .prefix = prefix,
+ .line = { .series = series, },
+ };
+ return lp;
+}
+
/*
* struct device based logging
*
@@ -463,9 +604,15 @@ void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
* Prefer drm_device based logging over device or prink based logging.
*/
+/* Helper to enforce struct drm_device type */
+static inline struct device *__drm_to_dev(const struct drm_device *drm)
+{
+ return drm ? drm->dev : NULL;
+}
+
/* Helper for struct drm_device based logging. */
#define __drm_printk(drm, level, type, fmt, ...) \
- dev_##level##type((drm) ? (drm)->dev : NULL, "[drm] " fmt, ##__VA_ARGS__)
+ dev_##level##type(__drm_to_dev(drm), "[drm] " fmt, ##__VA_ARGS__)
#define drm_info(drm, fmt, ...) \
@@ -499,25 +646,25 @@ void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
#define drm_dbg_core(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_CORE, fmt, ##__VA_ARGS__)
-#define drm_dbg_driver(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_CORE, fmt, ##__VA_ARGS__)
+#define drm_dbg_driver(drm, fmt, ...) \
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
#define drm_dbg_kms(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_KMS, fmt, ##__VA_ARGS__)
#define drm_dbg_prime(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_PRIME, fmt, ##__VA_ARGS__)
#define drm_dbg_atomic(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
#define drm_dbg_vbl(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_VBL, fmt, ##__VA_ARGS__)
#define drm_dbg_state(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_STATE, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_STATE, fmt, ##__VA_ARGS__)
#define drm_dbg_lease(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_LEASE, fmt, ##__VA_ARGS__)
#define drm_dbg_dp(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DP, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DP, fmt, ##__VA_ARGS__)
#define drm_dbg_drmres(drm, fmt, ...) \
- drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRMRES, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DRMRES, fmt, ##__VA_ARGS__)
#define drm_dbg(drm, fmt, ...) drm_dbg_driver(drm, fmt, ##__VA_ARGS__)
@@ -527,17 +674,15 @@ void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
* Prefer drm_device based logging over device or prink based logging.
*/
-__printf(3, 4)
-void ___drm_dbg(struct _ddebug *desc, enum drm_debug_category category, const char *format, ...);
__printf(1, 2)
void __drm_err(const char *format, ...);
#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
-#define __drm_dbg(cat, fmt, ...) ___drm_dbg(NULL, cat, fmt, ##__VA_ARGS__)
+#define __drm_dbg(cat, fmt, ...) __drm_dev_dbg(NULL, NULL, cat, fmt, ##__VA_ARGS__)
#else
#define __drm_dbg(cat, fmt, ...) \
- _dynamic_func_call_cls(cat, fmt, ___drm_dbg, \
- cat, fmt, ##__VA_ARGS__)
+ _dynamic_func_call_cls(cat, fmt, __drm_dev_dbg, \
+ NULL, cat, fmt, ##__VA_ARGS__)
#endif
/* Macros to make printk easier */
@@ -608,10 +753,9 @@ void __drm_err(const char *format, ...);
#define __DRM_DEFINE_DBG_RATELIMITED(category, drm, fmt, ...) \
({ \
static DEFINE_RATELIMIT_STATE(rs_, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);\
- const struct drm_device *drm_ = (drm); \
\
if (drm_debug_enabled(DRM_UT_ ## category) && __ratelimit(&rs_)) \
- drm_dev_printk(drm_ ? drm_->dev : NULL, KERN_DEBUG, fmt, ## __VA_ARGS__); \
+ drm_dev_printk(__drm_to_dev(drm), KERN_DEBUG, fmt, ## __VA_ARGS__); \
})
#define drm_dbg_ratelimited(drm, fmt, ...) \
@@ -632,14 +776,14 @@ void __drm_err(const char *format, ...);
/* Helper for struct drm_device based WARNs */
#define drm_WARN(drm, condition, format, arg...) \
- WARN(condition, "%s %s: " format, \
- dev_driver_string((drm)->dev), \
- dev_name((drm)->dev), ## arg)
+ WARN(condition, "%s %s: [drm] " format, \
+ dev_driver_string(__drm_to_dev(drm)), \
+ dev_name(__drm_to_dev(drm)), ## arg)
#define drm_WARN_ONCE(drm, condition, format, arg...) \
- WARN_ONCE(condition, "%s %s: " format, \
- dev_driver_string((drm)->dev), \
- dev_name((drm)->dev), ## arg)
+ WARN_ONCE(condition, "%s %s: [drm] " format, \
+ dev_driver_string(__drm_to_dev(drm)), \
+ dev_name(__drm_to_dev(drm)), ## arg)
#define drm_WARN_ON(drm, x) \
drm_WARN((drm), (x), "%s", \
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
index d6ce7b218b77..840ae5f798c2 100644
--- a/include/drm/drm_probe_helper.h
+++ b/include/drm/drm_probe_helper.h
@@ -17,7 +17,7 @@ int drm_helper_probe_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force);
-int drmm_kms_helper_poll_init(struct drm_device *dev);
+void drmm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_fini(struct drm_device *dev);
bool drm_helper_hpd_irq_event(struct drm_device *dev);
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index 73fcb899a01d..46f09cf68458 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -238,6 +238,21 @@ static inline void drm_rect_fp_to_int(struct drm_rect *dst,
drm_rect_height(src) >> 16);
}
+/**
+ * drm_rect_overlap - Check if two rectangles overlap
+ * @a: first rectangle
+ * @b: second rectangle
+ *
+ * RETURNS:
+ * %true if the rectangles overlap, %false otherwise.
+ */
+static inline bool drm_rect_overlap(const struct drm_rect *a,
+ const struct drm_rect *b)
+{
+ return (a->x2 > b->x1 && b->x2 > a->x1 &&
+ a->y2 > b->y1 && b->y2 > a->y1);
+}
+
bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
const struct drm_rect *clip);
diff --git a/include/drm/drm_util.h b/include/drm/drm_util.h
index 79952d8c4bba..440199618620 100644
--- a/include/drm/drm_util.h
+++ b/include/drm/drm_util.h
@@ -36,6 +36,7 @@
#include <linux/kgdb.h>
#include <linux/preempt.h>
#include <linux/smp.h>
+#include <linux/util_macros.h>
/*
* Use EXPORT_SYMBOL_FOR_TESTS_ONLY() for functions that shall
@@ -48,21 +49,6 @@
#endif
/**
- * for_each_if - helper for handling conditionals in various for_each macros
- * @condition: The condition to check
- *
- * Typical use::
- *
- * #define for_each_foo_bar(x, y) \'
- * list_for_each_entry(x, y->list, head) \'
- * for_each_if(x->something == SOMETHING)
- *
- * The for_each_if() macro makes the use of for_each_foo_bar() less error
- * prone.
- */
-#define for_each_if(condition) if (!(condition)) {} else
-
-/**
* drm_can_sleep - returns true if currently okay to sleep
*
* This function shall not be used in new code.
diff --git a/include/drm/drm_utils.h b/include/drm/drm_utils.h
index 70775748d243..6a46f755daba 100644
--- a/include/drm/drm_utils.h
+++ b/include/drm/drm_utils.h
@@ -12,8 +12,18 @@
#include <linux/types.h>
+struct drm_edid;
+
int drm_get_panel_orientation_quirk(int width, int height);
+struct drm_panel_backlight_quirk {
+ u16 min_brightness;
+ u32 brightness_mask;
+};
+
+const struct drm_panel_backlight_quirk *
+drm_get_panel_backlight_quirk(const struct drm_edid *edid);
+
signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec);
#endif
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index c8f829b4307c..ffa564d79638 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -25,6 +25,7 @@
#define _DRM_VBLANK_H_
#include <linux/seqlock.h>
+#include <linux/hrtimer.h>
#include <linux/idr.h>
#include <linux/poll.h>
#include <linux/kthread.h>
@@ -79,6 +80,53 @@ struct drm_pending_vblank_event {
};
/**
+ * struct drm_vblank_crtc_config - vblank configuration for a CRTC
+ */
+struct drm_vblank_crtc_config {
+ /**
+ * @offdelay_ms: Vblank off delay in ms, used to determine how long
+ * &drm_vblank_crtc.disable_timer waits before disabling.
+ *
+ * Defaults to the value of drm_vblank_offdelay in drm_crtc_vblank_on().
+ */
+ int offdelay_ms;
+
+ /**
+ * @disable_immediate: See &drm_device.vblank_disable_immediate
+ * for the exact semantics of immediate vblank disabling.
+ *
+ * Additionally, this tracks the disable immediate value per crtc, just
+ * in case it needs to differ from the default value for a given device.
+ *
+ * Defaults to the value of &drm_device.vblank_disable_immediate in
+ * drm_crtc_vblank_on().
+ */
+ bool disable_immediate;
+};
+
+/**
+ * struct drm_vblank_crtc_timer - vblank timer for a CRTC
+ */
+struct drm_vblank_crtc_timer {
+ /**
+ * @timer: The vblank's high-resolution timer
+ */
+ struct hrtimer timer;
+ /**
+ * @interval_lock: Protects @interval
+ */
+ spinlock_t interval_lock;
+ /**
+ * @interval: Duration between two vblanks
+ */
+ ktime_t interval;
+ /**
+ * @crtc: The timer's CRTC
+ */
+ struct drm_crtc *crtc;
+};
+
+/**
* struct drm_vblank_crtc - vblank tracking for a CRTC
*
* This structure tracks the vblank state for one CRTC.
@@ -99,8 +147,8 @@ struct drm_vblank_crtc {
wait_queue_head_t queue;
/**
* @disable_timer: Disable timer for the delayed vblank disabling
- * hysteresis logic. Vblank disabling is controlled through the
- * drm_vblank_offdelay module option and the setting of the
+ * hysteresis logic. Vblank disabling is controlled through
+ * &drm_vblank_crtc_config.offdelay_ms and the setting of the
* &drm_device.max_vblank_count value.
*/
struct timer_list disable_timer;
@@ -199,6 +247,12 @@ struct drm_vblank_crtc {
struct drm_display_mode hwmode;
/**
+ * @config: Stores vblank configuration values for a given CRTC.
+ * Also, see drm_crtc_vblank_on_config().
+ */
+ struct drm_vblank_crtc_config config;
+
+ /**
* @enabled: Tracks the enabling state of the corresponding &drm_crtc to
* avoid double-disabling and hence corrupting saved state. Needed by
* drivers not using atomic KMS, since those might go through their CRTC
@@ -223,6 +277,11 @@ struct drm_vblank_crtc {
* cancelled.
*/
wait_queue_head_t work_wait_queue;
+
+ /**
+ * @vblank_timer: Holds the state of the vblank timer
+ */
+ struct drm_vblank_crtc_timer vblank_timer;
};
struct drm_vblank_crtc *drm_crtc_vblank_crtc(struct drm_crtc *crtc);
@@ -247,6 +306,8 @@ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
void drm_crtc_vblank_off(struct drm_crtc *crtc);
void drm_crtc_vblank_reset(struct drm_crtc *crtc);
+void drm_crtc_vblank_on_config(struct drm_crtc *crtc,
+ const struct drm_vblank_crtc_config *config);
void drm_crtc_vblank_on(struct drm_crtc *crtc);
u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
void drm_crtc_vblank_restore(struct drm_crtc *crtc);
@@ -257,6 +318,10 @@ wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc);
void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
u32 max_vblank_count);
+int drm_crtc_vblank_start_timer(struct drm_crtc *crtc);
+void drm_crtc_vblank_cancel_timer(struct drm_crtc *crtc);
+void drm_crtc_vblank_get_vblank_timeout(struct drm_crtc *crtc, ktime_t *vblank_time);
+
/*
* Helpers for struct drm_crtc_funcs
*/
diff --git a/include/drm/drm_vblank_helper.h b/include/drm/drm_vblank_helper.h
new file mode 100644
index 000000000000..fcd8a9b35846
--- /dev/null
+++ b/include/drm/drm_vblank_helper.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _DRM_VBLANK_HELPER_H_
+#define _DRM_VBLANK_HELPER_H_
+
+#include <linux/hrtimer_types.h>
+#include <linux/types.h>
+
+struct drm_atomic_state;
+struct drm_crtc;
+
+/*
+ * VBLANK helpers
+ */
+
+void drm_crtc_vblank_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+void drm_crtc_vblank_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+void drm_crtc_vblank_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *crtc_state);
+
+/**
+ * DRM_CRTC_HELPER_VBLANK_FUNCS - Default implementation for VBLANK helpers
+ *
+ * This macro initializes struct &drm_crtc_helper_funcs to default helpers
+ * for VBLANK handling.
+ */
+#define DRM_CRTC_HELPER_VBLANK_FUNCS \
+ .atomic_flush = drm_crtc_vblank_atomic_flush, \
+ .atomic_enable = drm_crtc_vblank_atomic_enable, \
+ .atomic_disable = drm_crtc_vblank_atomic_disable
+
+/*
+ * VBLANK timer
+ */
+
+int drm_crtc_vblank_helper_enable_vblank_timer(struct drm_crtc *crtc);
+void drm_crtc_vblank_helper_disable_vblank_timer(struct drm_crtc *crtc);
+bool drm_crtc_vblank_helper_get_vblank_timestamp_from_timer(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq);
+
+/**
+ * DRM_CRTC_VBLANK_TIMER_FUNCS - Default implementation for VBLANK timers
+ *
+ * This macro initializes struct &drm_crtc_funcs to default helpers for
+ * VBLANK timers.
+ */
+#define DRM_CRTC_VBLANK_TIMER_FUNCS \
+ .enable_vblank = drm_crtc_vblank_helper_enable_vblank_timer, \
+ .disable_vblank = drm_crtc_vblank_helper_disable_vblank_timer, \
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp_from_timer
+
+#endif
diff --git a/include/drm/drm_vblank_work.h b/include/drm/drm_vblank_work.h
index eb41d0810c4f..e04d436b7297 100644
--- a/include/drm/drm_vblank_work.h
+++ b/include/drm/drm_vblank_work.h
@@ -17,6 +17,7 @@ struct drm_crtc;
* drm_vblank_work_init()
* drm_vblank_work_cancel_sync()
* drm_vblank_work_flush()
+ * drm_vblank_work_flush_all()
*/
struct drm_vblank_work {
/**
@@ -67,5 +68,6 @@ void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
void (*func)(struct kthread_work *work));
bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work);
void drm_vblank_work_flush(struct drm_vblank_work *work);
+void drm_vblank_work_flush_all(struct drm_crtc *crtc);
#endif /* !_DRM_VBLANK_WORK_H_ */
diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h
index 17e576c80169..c380a7b8f55a 100644
--- a/include/drm/drm_writeback.h
+++ b/include/drm/drm_writeback.h
@@ -161,6 +161,12 @@ int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
const struct drm_connector_funcs *con_funcs, const u32 *formats,
int n_formats);
+int drmm_writeback_connector_init(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ const struct drm_connector_funcs *con_funcs,
+ struct drm_encoder *enc,
+ const u32 *formats, int n_formats);
+
int drm_writeback_set_fb(struct drm_connector_state *conn_state,
struct drm_framebuffer *fb);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 5acc64954a88..fb88301b3c45 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -33,11 +33,11 @@
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
/**
- * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
+ * DRM_SCHED_FENCE_DONT_PIPELINE - Prevent dependency pipelining
*
* Setting this flag on a scheduler fence prevents pipelining of jobs depending
* on this fence. In other words we always insert a full CPU round trip before
- * dependen jobs are pushed to the hw queue.
+ * dependent jobs are pushed to the hw queue.
*/
#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
@@ -71,12 +71,6 @@ enum drm_sched_priority {
DRM_SCHED_PRIORITY_COUNT
};
-/* Used to chose between FIFO and RR jobs scheduling */
-extern int drm_sched_policy;
-
-#define DRM_SCHED_POLICY_RR 0
-#define DRM_SCHED_POLICY_FIFO 1
-
/**
* struct drm_sched_entity - A wrapper around a job queue (typically
* attached to the DRM file_priv).
@@ -97,13 +91,21 @@ struct drm_sched_entity {
struct list_head list;
/**
+ * @lock:
+ *
+ * Lock protecting the run-queue (@rq) to which this entity belongs,
+ * @priority and the list of schedulers (@sched_list, @num_sched_list).
+ */
+ spinlock_t lock;
+
+ /**
* @rq:
*
* Runqueue on which this entity is currently scheduled.
*
* FIXME: Locking is very unclear for this. Writers are protected by
- * @rq_lock, but readers are generally lockless and seem to just race
- * with not even a READ_ONCE.
+ * @lock, but readers are generally lockless and seem to just race with
+ * not even a READ_ONCE.
*/
struct drm_sched_rq *rq;
@@ -136,18 +138,11 @@ struct drm_sched_entity {
* @priority:
*
* Priority of the entity. This can be modified by calling
- * drm_sched_entity_set_priority(). Protected by &rq_lock.
+ * drm_sched_entity_set_priority(). Protected by @lock.
*/
enum drm_sched_priority priority;
/**
- * @rq_lock:
- *
- * Lock to modify the runqueue to which this entity belongs.
- */
- spinlock_t rq_lock;
-
- /**
* @job_queue: the list of jobs of this entity.
*/
struct spsc_queue job_queue;
@@ -197,8 +192,8 @@ struct drm_sched_entity {
* @last_scheduled:
*
* Points to the finished fence of the last scheduled job. Only written
- * by the scheduler thread, can be accessed locklessly from
- * drm_sched_job_arm() iff the queue is empty.
+ * by drm_sched_entity_pop_job(). Can be accessed locklessly from
+ * drm_sched_job_arm() if the queue is empty.
*/
struct dma_fence __rcu *last_scheduled;
@@ -243,21 +238,23 @@ struct drm_sched_entity {
/**
* struct drm_sched_rq - queue of entities to be scheduled.
*
- * @lock: to modify the entities list.
* @sched: the scheduler to which this rq belongs to.
- * @entities: list of the entities to be scheduled.
+ * @lock: protects @entities, @rb_tree_root and @current_entity.
* @current_entity: the entity which is to be scheduled.
- * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling
+ * @entities: list of the entities to be scheduled.
+ * @rb_tree_root: root of time based priority queue of entities for FIFO scheduling
*
* Run queue is a set of entities scheduling command submissions for
* one specific ring. It implements the scheduling policy that selects
* the next entity to emit commands from.
*/
struct drm_sched_rq {
- spinlock_t lock;
struct drm_gpu_scheduler *sched;
- struct list_head entities;
+
+ spinlock_t lock;
+ /* Following members are protected by the @lock: */
struct drm_sched_entity *current_entity;
+ struct list_head entities;
struct rb_root_cached rb_tree_root;
};
@@ -308,6 +305,13 @@ struct drm_sched_fence {
* @owner: job owner for debugging
*/
void *owner;
+
+ /**
+ * @drm_client_id:
+ *
+ * The client_id of the drm_file which owns the job.
+ */
+ uint64_t drm_client_id;
};
struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
@@ -321,8 +325,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* @s_fence: contains the fences for the scheduling of job.
* @finish_cb: the callback for the finished fence.
* @credits: the number of credits this job contributes to the scheduler
- * @work: Helper to reschdeule job kill to different context.
- * @id: a unique id assigned to each job scheduled on the scheduler.
+ * @work: Helper to reschedule job kill to different context.
* @karma: increment on every hang caused by this job. If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not
* be scheduled further.
@@ -335,27 +338,45 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* to schedule the job.
*/
struct drm_sched_job {
- struct spsc_node queue_node;
- struct list_head list;
+ /**
+ * @submit_ts:
+ *
+ * When the job was pushed into the entity queue.
+ */
+ ktime_t submit_ts;
+
+ /**
+ * @sched:
+ *
+ * The scheduler this job is or will be scheduled on. Gets set by
+ * drm_sched_job_arm(). Valid until drm_sched_backend_ops.free_job()
+ * has finished.
+ */
struct drm_gpu_scheduler *sched;
+
struct drm_sched_fence *s_fence;
+ struct drm_sched_entity *entity;
+ enum drm_sched_priority s_priority;
u32 credits;
+ /** @last_dependency: tracks @dependencies as they signal */
+ unsigned int last_dependency;
+ atomic_t karma;
+
+ struct spsc_node queue_node;
+ struct list_head list;
/*
* work is used only after finish_cb has been used and will not be
* accessed anymore.
*/
union {
- struct dma_fence_cb finish_cb;
- struct work_struct work;
+ struct dma_fence_cb finish_cb;
+ struct work_struct work;
};
- uint64_t id;
- atomic_t karma;
- enum drm_sched_priority s_priority;
- struct drm_sched_entity *entity;
struct dma_fence_cb cb;
+
/**
* @dependencies:
*
@@ -364,28 +385,22 @@ struct drm_sched_job {
* drm_sched_job_add_implicit_dependencies().
*/
struct xarray dependencies;
-
- /** @last_dependency: tracks @dependencies as they signal */
- unsigned long last_dependency;
-
- /**
- * @submit_ts:
- *
- * When the job was pushed into the entity queue.
- */
- ktime_t submit_ts;
};
-static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
- int threshold)
-{
- return s_job && atomic_inc_return(&s_job->karma) > threshold;
-}
-
+/**
+ * enum drm_gpu_sched_stat - the scheduler's status
+ *
+ * @DRM_GPU_SCHED_STAT_NONE: Reserved. Do not use.
+ * @DRM_GPU_SCHED_STAT_RESET: The GPU hung and successfully reset.
+ * @DRM_GPU_SCHED_STAT_ENODEV: Error: Device is not available anymore.
+ * @DRM_GPU_SCHED_STAT_NO_HANG: Contrary to scheduler's assumption, the GPU
+ * did not hang and is still running.
+ */
enum drm_gpu_sched_stat {
- DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
- DRM_GPU_SCHED_STAT_NOMINAL,
+ DRM_GPU_SCHED_STAT_NONE,
+ DRM_GPU_SCHED_STAT_RESET,
DRM_GPU_SCHED_STAT_ENODEV,
+ DRM_GPU_SCHED_STAT_NO_HANG,
};
/**
@@ -409,10 +424,36 @@ struct drm_sched_backend_ops {
struct drm_sched_entity *s_entity);
/**
- * @run_job: Called to execute the job once all of the dependencies
- * have been resolved. This may be called multiple times, if
- * timedout_job() has happened and drm_sched_job_recovery()
- * decides to try it again.
+ * @run_job: Called to execute the job once all of the dependencies
+ * have been resolved.
+ *
+ * @sched_job: the job to run
+ *
+ * The deprecated drm_sched_resubmit_jobs() (called by &struct
+ * drm_sched_backend_ops.timedout_job) can invoke this again with the
+ * same parameters. Using this is discouraged because it violates
+ * dma_fence rules, notably dma_fence_init() has to be called on
+ * already initialized fences for a second time. Moreover, this is
+ * dangerous because attempts to allocate memory might deadlock with
+ * memory management code waiting for the reset to complete.
+ *
+ * TODO: Document what drivers should do / use instead.
+ *
+ * This method is called in a workqueue context - either from the
+ * submit_wq the driver passed through drm_sched_init(), or, if the
+ * driver passed NULL, a separate, ordered workqueue the scheduler
+ * allocated.
+ *
+ * Note that the scheduler expects to 'inherit' its own reference to
+ * this fence from the callback. It does not invoke an extra
+ * dma_fence_get() on it. Consequently, this callback must take a
+ * reference for the scheduler, and additional ones for the driver's
+ * respective needs.
+ *
+ * Return:
+ * * On success: dma_fence the driver must signal once the hardware has
+ * completed the job ("hardware fence").
+ * * On failure: NULL or an ERR_PTR.
*/
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
@@ -420,43 +461,52 @@ struct drm_sched_backend_ops {
* @timedout_job: Called when a job has taken too long to execute,
* to trigger GPU recovery.
*
- * This method is called in a workqueue context.
+ * @sched_job: The job that has timed out
+ *
+ * Drivers typically issue a reset to recover from GPU hangs.
+ * This procedure looks very different depending on whether a firmware
+ * or a hardware scheduler is being used.
+ *
+ * For a FIRMWARE SCHEDULER, each ring has one scheduler, and each
+ * scheduler has one entity. Hence, the steps taken typically look as
+ * follows:
+ *
+ * 1. Stop the scheduler using drm_sched_stop(). This will pause the
+ * scheduler workqueues and cancel the timeout work, guaranteeing
+ * that nothing is queued while the ring is being removed.
+ * 2. Remove the ring. The firmware will make sure that the
+ * corresponding parts of the hardware are resetted, and that other
+ * rings are not impacted.
+ * 3. Kill the entity and the associated scheduler.
+ *
*
- * Drivers typically issue a reset to recover from GPU hangs, and this
- * procedure usually follows the following workflow:
+ * For a HARDWARE SCHEDULER, a scheduler instance schedules jobs from
+ * one or more entities to one ring. This implies that all entities
+ * associated with the affected scheduler cannot be torn down, because
+ * this would effectively also affect innocent userspace processes which
+ * did not submit faulty jobs (for example).
*
- * 1. Stop the scheduler using drm_sched_stop(). This will park the
- * scheduler thread and cancel the timeout work, guaranteeing that
- * nothing is queued while we reset the hardware queue
- * 2. Try to gracefully stop non-faulty jobs (optional)
- * 3. Issue a GPU reset (driver-specific)
- * 4. Re-submit jobs using drm_sched_resubmit_jobs()
- * 5. Restart the scheduler using drm_sched_start(). At that point, new
- * jobs can be queued, and the scheduler thread is unblocked
+ * Consequently, the procedure to recover with a hardware scheduler
+ * should look like this:
+ *
+ * 1. Stop all schedulers impacted by the reset using drm_sched_stop().
+ * 2. Kill the entity the faulty job stems from.
+ * 3. Issue a GPU reset on all faulty rings (driver-specific).
+ * 4. Re-submit jobs on all schedulers impacted by re-submitting them to
+ * the entities which are still alive.
+ * 5. Restart all schedulers that were stopped in step #1 using
+ * drm_sched_start().
*
* Note that some GPUs have distinct hardware queues but need to reset
* the GPU globally, which requires extra synchronization between the
- * timeout handler of the different &drm_gpu_scheduler. One way to
- * achieve this synchronization is to create an ordered workqueue
- * (using alloc_ordered_workqueue()) at the driver level, and pass this
- * queue to drm_sched_init(), to guarantee that timeout handlers are
- * executed sequentially. The above workflow needs to be slightly
- * adjusted in that case:
- *
- * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
- * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
- * the reset (optional)
- * 3. Issue a GPU reset on all faulty queues (driver-specific)
- * 4. Re-submit jobs on all schedulers impacted by the reset using
- * drm_sched_resubmit_jobs()
- * 5. Restart all schedulers that were stopped in step #1 using
- * drm_sched_start()
+ * timeout handlers of different schedulers. One way to achieve this
+ * synchronization is to create an ordered workqueue (using
+ * alloc_ordered_workqueue()) at the driver level, and pass this queue
+ * as drm_sched_init()'s @timeout_wq parameter. This will guarantee
+ * that timeout handlers are executed sequentially.
*
- * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
- * and the underlying driver has started or completed recovery.
+ * Return: The scheduler's status, defined by &enum drm_gpu_sched_stat
*
- * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
- * available, i.e. has been unplugged.
*/
enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
@@ -467,17 +517,22 @@ struct drm_sched_backend_ops {
void (*free_job)(struct drm_sched_job *sched_job);
/**
- * @update_job_credits: Called when the scheduler is considering this
- * job for execution.
+ * @cancel_job: Used by the scheduler to guarantee remaining jobs' fences
+ * get signaled in drm_sched_fini().
+ *
+ * Used by the scheduler to cancel all jobs that have not been executed
+ * with &struct drm_sched_backend_ops.run_job by the time
+ * drm_sched_fini() gets invoked.
*
- * This callback returns the number of credits the job would take if
- * pushed to the hardware. Drivers may use this to dynamically update
- * the job's credit count. For instance, deduct the number of credits
- * for already signalled native fences.
+ * Drivers need to signal the passed job's hardware fence with an
+ * appropriate error code (e.g., -ECANCELED) in this callback. They
+ * must not free the job.
*
- * This callback is optional.
+ * The scheduler will only call this callback once it stopped calling
+ * all other callbacks forever, with the exception of &struct
+ * drm_sched_backend_ops.free_job.
*/
- u32 (*update_job_credits)(struct drm_sched_job *sched_job);
+ void (*cancel_job)(struct drm_sched_job *sched_job);
};
/**
@@ -491,7 +546,7 @@ struct drm_sched_backend_ops {
* @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT,
* as there's usually one run-queue per priority, but could be less.
* @sched_rq: An allocated array of run-queues of size @num_rqs;
- * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
+ * @job_scheduled: once drm_sched_entity_flush() is called the scheduler
* waits on this wait queue until all the scheduled jobs are
* finished.
* @job_id_count: used to assign unique id to the each job.
@@ -542,18 +597,67 @@ struct drm_gpu_scheduler {
struct device *dev;
};
+/**
+ * struct drm_sched_init_args - parameters for initializing a DRM GPU scheduler
+ *
+ * @ops: backend operations provided by the driver
+ * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
+ * allocated and used.
+ * @num_rqs: Number of run-queues. This may be at most DRM_SCHED_PRIORITY_COUNT,
+ * as there's usually one run-queue per priority, but may be less.
+ * @credit_limit: the number of credits this scheduler can hold from all jobs
+ * @hang_limit: number of times to allow a job to hang before dropping it.
+ * This mechanism is DEPRECATED. Set it to 0.
+ * @timeout: timeout value in jiffies for submitted jobs.
+ * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is used.
+ * @score: score atomic shared with other schedulers. May be NULL.
+ * @name: name (typically the driver's name). Used for debugging
+ * @dev: associated device. Used for debugging
+ */
+struct drm_sched_init_args {
+ const struct drm_sched_backend_ops *ops;
+ struct workqueue_struct *submit_wq;
+ struct workqueue_struct *timeout_wq;
+ u32 num_rqs;
+ u32 credit_limit;
+ unsigned int hang_limit;
+ long timeout;
+ atomic_t *score;
+ const char *name;
+ struct device *dev;
+};
+
+/* Scheduler operations */
+
int drm_sched_init(struct drm_gpu_scheduler *sched,
- const struct drm_sched_backend_ops *ops,
- struct workqueue_struct *submit_wq,
- u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
- long timeout, struct workqueue_struct *timeout_wq,
- atomic_t *score, const char *name, struct device *dev);
+ const struct drm_sched_init_args *args);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
+
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+ unsigned long remaining);
+void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
+bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
+void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
+void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
+void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
+void drm_sched_fault(struct drm_gpu_scheduler *sched);
+
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
+
+/* Jobs */
+
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
- u32 credits, void *owner);
+ u32 credits, void *owner,
+ u64 drm_client_id);
void drm_sched_job_arm(struct drm_sched_job *job);
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
int drm_sched_job_add_dependency(struct drm_sched_job *job,
struct dma_fence *fence);
int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
@@ -566,34 +670,18 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
struct drm_gem_object *obj,
bool write);
-
-
-void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
- struct drm_gpu_scheduler **sched_list,
- unsigned int num_sched_list);
-
-void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
+bool drm_sched_job_has_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence);
void drm_sched_job_cleanup(struct drm_sched_job *job);
-void drm_sched_wakeup(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity);
-bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
-void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
-void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
-void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
-void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
-void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_increase_karma(struct drm_sched_job *bad);
-void drm_sched_reset_karma(struct drm_sched_job *bad);
-void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
-bool drm_sched_dependency_optimized(struct dma_fence* fence,
- struct drm_sched_entity *entity);
-void drm_sched_fault(struct drm_gpu_scheduler *sched);
-void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
- struct drm_sched_entity *entity);
-void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
- struct drm_sched_entity *entity);
+static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
+ int threshold)
+{
+ return s_job && atomic_inc_return(&s_job->karma) > threshold;
+}
-void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts);
+/* Entities */
int drm_sched_entity_init(struct drm_sched_entity *entity,
enum drm_sched_priority priority,
@@ -603,29 +691,11 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
-void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
-struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
-void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority);
-bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
int drm_sched_entity_error(struct drm_sched_entity *entity);
-
-struct drm_sched_fence *drm_sched_fence_alloc(
- struct drm_sched_entity *s_entity, void *owner);
-void drm_sched_fence_init(struct drm_sched_fence *fence,
- struct drm_sched_entity *entity);
-void drm_sched_fence_free(struct drm_sched_fence *fence);
-
-void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
- struct dma_fence *parent);
-void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
-
-unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
-void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
- unsigned long remaining);
-struct drm_gpu_scheduler *
-drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
- unsigned int num_sched_list);
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
#endif
diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h
deleted file mode 100644
index 5305b9797f93..000000000000
--- a/include/drm/i2c/ch7006.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2009 Francisco Jerez.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __DRM_I2C_CH7006_H__
-#define __DRM_I2C_CH7006_H__
-
-/**
- * struct ch7006_encoder_params
- *
- * Describes how the ch7006 is wired up with the GPU. It should be
- * used as the @params parameter of its @set_config method.
- *
- * See "http://www.chrontel.com/pdf/7006.pdf" for their precise
- * meaning.
- */
-struct ch7006_encoder_params {
- /* private: FIXME: document the members */
- enum {
- CH7006_FORMAT_RGB16 = 0,
- CH7006_FORMAT_YCrCb24m16,
- CH7006_FORMAT_RGB24m16,
- CH7006_FORMAT_RGB15,
- CH7006_FORMAT_RGB24m12C,
- CH7006_FORMAT_RGB24m12I,
- CH7006_FORMAT_RGB24m8,
- CH7006_FORMAT_RGB16m8,
- CH7006_FORMAT_RGB15m8,
- CH7006_FORMAT_YCrCb24m8,
- } input_format;
-
- enum {
- CH7006_CLOCK_SLAVE = 0,
- CH7006_CLOCK_MASTER,
- } clock_mode;
-
- enum {
- CH7006_CLOCK_EDGE_NEG = 0,
- CH7006_CLOCK_EDGE_POS,
- } clock_edge;
-
- int xcm, pcm;
-
- enum {
- CH7006_SYNC_SLAVE = 0,
- CH7006_SYNC_MASTER,
- } sync_direction;
-
- enum {
- CH7006_SYNC_SEPARATED = 0,
- CH7006_SYNC_EMBEDDED,
- } sync_encoding;
-
- enum {
- CH7006_POUT_1_8V = 0,
- CH7006_POUT_3_3V,
- } pout_level;
-
- enum {
- CH7006_ACTIVE_HSYNC = 0,
- CH7006_ACTIVE_DSTART,
- } active_detect;
-};
-
-#endif
diff --git a/include/drm/i2c/sil164.h b/include/drm/i2c/sil164.h
deleted file mode 100644
index ddf248693c8b..000000000000
--- a/include/drm/i2c/sil164.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2010 Francisco Jerez.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __DRM_I2C_SIL164_H__
-#define __DRM_I2C_SIL164_H__
-
-/**
- * struct sil164_encoder_params
- *
- * Describes how the sil164 is connected to the GPU. It should be used
- * as the @params parameter of its @set_config method.
- *
- * See "http://www.siliconimage.com/docs/SiI-DS-0021-E-164.pdf".
- */
-struct sil164_encoder_params {
- /* private: FIXME: document the members */
- enum {
- SIL164_INPUT_EDGE_FALLING = 0,
- SIL164_INPUT_EDGE_RISING
- } input_edge;
-
- enum {
- SIL164_INPUT_WIDTH_12BIT = 0,
- SIL164_INPUT_WIDTH_24BIT
- } input_width;
-
- enum {
- SIL164_INPUT_SINGLE_EDGE = 0,
- SIL164_INPUT_DUAL_EDGE
- } input_dual;
-
- enum {
- SIL164_PLL_FILTER_ON = 0,
- SIL164_PLL_FILTER_OFF,
- } pll_filter;
-
- int input_skew; /** < Allowed range [-4, 3], use 0 for no de-skew. */
- int duallink_skew; /** < Allowed range [-4, 3]. */
-};
-
-#endif
diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h
deleted file mode 100644
index 3cb25ccbe5e6..000000000000
--- a/include/drm/i2c/tda998x.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DRM_I2C_TDA998X_H__
-#define __DRM_I2C_TDA998X_H__
-
-#include <linux/hdmi.h>
-#include <dt-bindings/display/tda998x.h>
-
-enum {
- AFMT_UNUSED = 0,
- AFMT_SPDIF = TDA998x_SPDIF,
- AFMT_I2S = TDA998x_I2S,
-};
-
-struct tda998x_audio_params {
- u8 config;
- u8 format;
- unsigned sample_width;
- unsigned sample_rate;
- struct hdmi_audio_infoframe cea;
- u8 status[5];
-};
-
-struct tda998x_encoder_params {
- u8 swap_b:3;
- u8 mirr_b:1;
- u8 swap_a:3;
- u8 mirr_a:1;
- u8 swap_d:3;
- u8 mirr_d:1;
- u8 swap_c:3;
- u8 mirr_c:1;
- u8 swap_f:3;
- u8 mirr_f:1;
- u8 swap_e:3;
- u8 mirr_e:1;
-
- struct tda998x_audio_params audio_params;
-};
-
-#endif
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
deleted file mode 100644
index 85ce33ad6e26..000000000000
--- a/include/drm/i915_pciids.h
+++ /dev/null
@@ -1,767 +0,0 @@
-/*
- * Copyright 2013 Intel Corporation
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#ifndef _I915_PCIIDS_H
-#define _I915_PCIIDS_H
-
-/*
- * A pci_device_id struct {
- * __u32 vendor, device;
- * __u32 subvendor, subdevice;
- * __u32 class, class_mask;
- * kernel_ulong_t driver_data;
- * };
- * Don't use C99 here because "class" is reserved and we want to
- * give userspace flexibility.
- */
-#define INTEL_VGA_DEVICE(id, info) { \
- 0x8086, id, \
- ~0, ~0, \
- 0x030000, 0xff0000, \
- (unsigned long) info }
-
-#define INTEL_QUANTA_VGA_DEVICE(info) { \
- 0x8086, 0x16a, \
- 0x152d, 0x8990, \
- 0x030000, 0xff0000, \
- (unsigned long) info }
-
-#define INTEL_I810_IDS(info) \
- INTEL_VGA_DEVICE(0x7121, info), /* I810 */ \
- INTEL_VGA_DEVICE(0x7123, info), /* I810_DC100 */ \
- INTEL_VGA_DEVICE(0x7125, info) /* I810_E */
-
-#define INTEL_I815_IDS(info) \
- INTEL_VGA_DEVICE(0x1132, info) /* I815*/
-
-#define INTEL_I830_IDS(info) \
- INTEL_VGA_DEVICE(0x3577, info)
-
-#define INTEL_I845G_IDS(info) \
- INTEL_VGA_DEVICE(0x2562, info)
-
-#define INTEL_I85X_IDS(info) \
- INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \
- INTEL_VGA_DEVICE(0x358e, info)
-
-#define INTEL_I865G_IDS(info) \
- INTEL_VGA_DEVICE(0x2572, info) /* I865_G */
-
-#define INTEL_I915G_IDS(info) \
- INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \
- INTEL_VGA_DEVICE(0x258a, info) /* E7221_G */
-
-#define INTEL_I915GM_IDS(info) \
- INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */
-
-#define INTEL_I945G_IDS(info) \
- INTEL_VGA_DEVICE(0x2772, info) /* I945_G */
-
-#define INTEL_I945GM_IDS(info) \
- INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \
- INTEL_VGA_DEVICE(0x27ae, info) /* I945_GME */
-
-#define INTEL_I965G_IDS(info) \
- INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */ \
- INTEL_VGA_DEVICE(0x2982, info), /* G35_G */ \
- INTEL_VGA_DEVICE(0x2992, info), /* I965_Q */ \
- INTEL_VGA_DEVICE(0x29a2, info) /* I965_G */
-
-#define INTEL_G33_IDS(info) \
- INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \
- INTEL_VGA_DEVICE(0x29c2, info), /* G33_G */ \
- INTEL_VGA_DEVICE(0x29d2, info) /* Q33_G */
-
-#define INTEL_I965GM_IDS(info) \
- INTEL_VGA_DEVICE(0x2a02, info), /* I965_GM */ \
- INTEL_VGA_DEVICE(0x2a12, info) /* I965_GME */
-
-#define INTEL_GM45_IDS(info) \
- INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */
-
-#define INTEL_G45_IDS(info) \
- INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \
- INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \
- INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \
- INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \
- INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \
- INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */
-
-#define INTEL_PINEVIEW_G_IDS(info) \
- INTEL_VGA_DEVICE(0xa001, info)
-
-#define INTEL_PINEVIEW_M_IDS(info) \
- INTEL_VGA_DEVICE(0xa011, info)
-
-#define INTEL_IRONLAKE_D_IDS(info) \
- INTEL_VGA_DEVICE(0x0042, info)
-
-#define INTEL_IRONLAKE_M_IDS(info) \
- INTEL_VGA_DEVICE(0x0046, info)
-
-#define INTEL_SNB_D_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x0102, info), \
- INTEL_VGA_DEVICE(0x010A, info)
-
-#define INTEL_SNB_D_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x0112, info), \
- INTEL_VGA_DEVICE(0x0122, info)
-
-#define INTEL_SNB_D_IDS(info) \
- INTEL_SNB_D_GT1_IDS(info), \
- INTEL_SNB_D_GT2_IDS(info)
-
-#define INTEL_SNB_M_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x0106, info)
-
-#define INTEL_SNB_M_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x0116, info), \
- INTEL_VGA_DEVICE(0x0126, info)
-
-#define INTEL_SNB_M_IDS(info) \
- INTEL_SNB_M_GT1_IDS(info), \
- INTEL_SNB_M_GT2_IDS(info)
-
-#define INTEL_IVB_M_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x0156, info) /* GT1 mobile */
-
-#define INTEL_IVB_M_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */
-
-#define INTEL_IVB_M_IDS(info) \
- INTEL_IVB_M_GT1_IDS(info), \
- INTEL_IVB_M_GT2_IDS(info)
-
-#define INTEL_IVB_D_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \
- INTEL_VGA_DEVICE(0x015a, info) /* GT1 server */
-
-#define INTEL_IVB_D_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \
- INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */
-
-#define INTEL_IVB_D_IDS(info) \
- INTEL_IVB_D_GT1_IDS(info), \
- INTEL_IVB_D_GT2_IDS(info)
-
-#define INTEL_IVB_Q_IDS(info) \
- INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
-
-#define INTEL_HSW_ULT_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
- INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
- INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
- INTEL_VGA_DEVICE(0x0A0B, info) /* ULT GT1 reserved */
-
-#define INTEL_HSW_ULX_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x0A0E, info) /* ULX GT1 mobile */
-
-#define INTEL_HSW_GT1_IDS(info) \
- INTEL_HSW_ULT_GT1_IDS(info), \
- INTEL_HSW_ULX_GT1_IDS(info), \
- INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
- INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
- INTEL_VGA_DEVICE(0x040A, info), /* GT1 server */ \
- INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
- INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
- INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
- INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
- INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
- INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
- INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
- INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0D0E, info) /* CRW GT1 reserved */
-
-#define INTEL_HSW_ULT_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
- INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
- INTEL_VGA_DEVICE(0x0A1B, info) /* ULT GT2 reserved */ \
-
-#define INTEL_HSW_ULX_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x0A1E, info) /* ULX GT2 mobile */ \
-
-#define INTEL_HSW_GT2_IDS(info) \
- INTEL_HSW_ULT_GT2_IDS(info), \
- INTEL_HSW_ULX_GT2_IDS(info), \
- INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
- INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
- INTEL_VGA_DEVICE(0x041A, info), /* GT2 server */ \
- INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
- INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
- INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
- INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
- INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
- INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0D1E, info) /* CRW GT2 reserved */
-
-#define INTEL_HSW_ULT_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
- INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
- INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
- INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0A2E, info) /* ULT GT3 reserved */
-
-#define INTEL_HSW_GT3_IDS(info) \
- INTEL_HSW_ULT_GT3_IDS(info), \
- INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
- INTEL_VGA_DEVICE(0x0426, info), /* GT3 mobile */ \
- INTEL_VGA_DEVICE(0x042A, info), /* GT3 server */ \
- INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
- INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
- INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
- INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
- INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
- INTEL_VGA_DEVICE(0x0D26, info), /* CRW GT3 mobile */ \
- INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
- INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */
-
-#define INTEL_HSW_IDS(info) \
- INTEL_HSW_GT1_IDS(info), \
- INTEL_HSW_GT2_IDS(info), \
- INTEL_HSW_GT3_IDS(info)
-
-#define INTEL_VLV_IDS(info) \
- INTEL_VGA_DEVICE(0x0f30, info), \
- INTEL_VGA_DEVICE(0x0f31, info), \
- INTEL_VGA_DEVICE(0x0f32, info), \
- INTEL_VGA_DEVICE(0x0f33, info)
-
-#define INTEL_BDW_ULT_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
- INTEL_VGA_DEVICE(0x160B, info) /* GT1 Iris */
-
-#define INTEL_BDW_ULX_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x160E, info) /* GT1 ULX */
-
-#define INTEL_BDW_GT1_IDS(info) \
- INTEL_BDW_ULT_GT1_IDS(info), \
- INTEL_BDW_ULX_GT1_IDS(info), \
- INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
- INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
- INTEL_VGA_DEVICE(0x160D, info) /* GT1 Workstation */
-
-#define INTEL_BDW_ULT_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
- INTEL_VGA_DEVICE(0x161B, info) /* GT2 ULT */
-
-#define INTEL_BDW_ULX_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */
-
-#define INTEL_BDW_GT2_IDS(info) \
- INTEL_BDW_ULT_GT2_IDS(info), \
- INTEL_BDW_ULX_GT2_IDS(info), \
- INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
- INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
- INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */
-
-#define INTEL_BDW_ULT_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
- INTEL_VGA_DEVICE(0x162B, info) /* Iris */ \
-
-#define INTEL_BDW_ULX_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x162E, info) /* ULX */
-
-#define INTEL_BDW_GT3_IDS(info) \
- INTEL_BDW_ULT_GT3_IDS(info), \
- INTEL_BDW_ULX_GT3_IDS(info), \
- INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
- INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
- INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
-
-#define INTEL_BDW_ULT_RSVD_IDS(info) \
- INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
- INTEL_VGA_DEVICE(0x163B, info) /* Iris */
-
-#define INTEL_BDW_ULX_RSVD_IDS(info) \
- INTEL_VGA_DEVICE(0x163E, info) /* ULX */
-
-#define INTEL_BDW_RSVD_IDS(info) \
- INTEL_BDW_ULT_RSVD_IDS(info), \
- INTEL_BDW_ULX_RSVD_IDS(info), \
- INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \
- INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
- INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
-
-#define INTEL_BDW_IDS(info) \
- INTEL_BDW_GT1_IDS(info), \
- INTEL_BDW_GT2_IDS(info), \
- INTEL_BDW_GT3_IDS(info), \
- INTEL_BDW_RSVD_IDS(info)
-
-#define INTEL_CHV_IDS(info) \
- INTEL_VGA_DEVICE(0x22b0, info), \
- INTEL_VGA_DEVICE(0x22b1, info), \
- INTEL_VGA_DEVICE(0x22b2, info), \
- INTEL_VGA_DEVICE(0x22b3, info)
-
-#define INTEL_SKL_ULT_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
- INTEL_VGA_DEVICE(0x1913, info) /* ULT GT1.5 */
-
-#define INTEL_SKL_ULX_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
- INTEL_VGA_DEVICE(0x1915, info) /* ULX GT1.5 */
-
-#define INTEL_SKL_GT1_IDS(info) \
- INTEL_SKL_ULT_GT1_IDS(info), \
- INTEL_SKL_ULX_GT1_IDS(info), \
- INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
- INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
- INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
- INTEL_VGA_DEVICE(0x1917, info) /* DT GT1.5 */
-
-#define INTEL_SKL_ULT_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
- INTEL_VGA_DEVICE(0x1921, info) /* ULT GT2F */
-
-#define INTEL_SKL_ULX_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x191E, info) /* ULX GT2 */
-
-#define INTEL_SKL_GT2_IDS(info) \
- INTEL_SKL_ULT_GT2_IDS(info), \
- INTEL_SKL_ULX_GT2_IDS(info), \
- INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
- INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
- INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
-
-#define INTEL_SKL_ULT_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3e */ \
- INTEL_VGA_DEVICE(0x1927, info) /* ULT GT3e */
-
-#define INTEL_SKL_GT3_IDS(info) \
- INTEL_SKL_ULT_GT3_IDS(info), \
- INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
- INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3e */ \
- INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3e */
-
-#define INTEL_SKL_GT4_IDS(info) \
- INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \
- INTEL_VGA_DEVICE(0x193A, info), /* SRV GT4e */ \
- INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4e */ \
- INTEL_VGA_DEVICE(0x193D, info) /* WKS GT4e */
-
-#define INTEL_SKL_IDS(info) \
- INTEL_SKL_GT1_IDS(info), \
- INTEL_SKL_GT2_IDS(info), \
- INTEL_SKL_GT3_IDS(info), \
- INTEL_SKL_GT4_IDS(info)
-
-#define INTEL_BXT_IDS(info) \
- INTEL_VGA_DEVICE(0x0A84, info), \
- INTEL_VGA_DEVICE(0x1A84, info), \
- INTEL_VGA_DEVICE(0x1A85, info), \
- INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
- INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */
-
-#define INTEL_GLK_IDS(info) \
- INTEL_VGA_DEVICE(0x3184, info), \
- INTEL_VGA_DEVICE(0x3185, info)
-
-#define INTEL_KBL_ULT_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
- INTEL_VGA_DEVICE(0x5913, info) /* ULT GT1.5 */
-
-#define INTEL_KBL_ULX_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
- INTEL_VGA_DEVICE(0x5915, info) /* ULX GT1.5 */
-
-#define INTEL_KBL_GT1_IDS(info) \
- INTEL_KBL_ULT_GT1_IDS(info), \
- INTEL_KBL_ULX_GT1_IDS(info), \
- INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
- INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
- INTEL_VGA_DEVICE(0x590A, info), /* SRV GT1 */ \
- INTEL_VGA_DEVICE(0x590B, info) /* Halo GT1 */
-
-#define INTEL_KBL_ULT_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
- INTEL_VGA_DEVICE(0x5921, info) /* ULT GT2F */
-
-#define INTEL_KBL_ULX_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x591E, info) /* ULX GT2 */
-
-#define INTEL_KBL_GT2_IDS(info) \
- INTEL_KBL_ULT_GT2_IDS(info), \
- INTEL_KBL_ULX_GT2_IDS(info), \
- INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
- INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
- INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \
- INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
-
-#define INTEL_KBL_ULT_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x5926, info) /* ULT GT3 */
-
-#define INTEL_KBL_GT3_IDS(info) \
- INTEL_KBL_ULT_GT3_IDS(info), \
- INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */
-
-#define INTEL_KBL_GT4_IDS(info) \
- INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
-
-/* AML/KBL Y GT2 */
-#define INTEL_AML_KBL_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
- INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
-
-/* AML/CFL Y GT2 */
-#define INTEL_AML_CFL_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x87CA, info)
-
-/* CML GT1 */
-#define INTEL_CML_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x9BA2, info), \
- INTEL_VGA_DEVICE(0x9BA4, info), \
- INTEL_VGA_DEVICE(0x9BA5, info), \
- INTEL_VGA_DEVICE(0x9BA8, info)
-
-#define INTEL_CML_U_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x9B21, info), \
- INTEL_VGA_DEVICE(0x9BAA, info), \
- INTEL_VGA_DEVICE(0x9BAC, info)
-
-/* CML GT2 */
-#define INTEL_CML_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x9BC2, info), \
- INTEL_VGA_DEVICE(0x9BC4, info), \
- INTEL_VGA_DEVICE(0x9BC5, info), \
- INTEL_VGA_DEVICE(0x9BC6, info), \
- INTEL_VGA_DEVICE(0x9BC8, info), \
- INTEL_VGA_DEVICE(0x9BE6, info), \
- INTEL_VGA_DEVICE(0x9BF6, info)
-
-#define INTEL_CML_U_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x9B41, info), \
- INTEL_VGA_DEVICE(0x9BCA, info), \
- INTEL_VGA_DEVICE(0x9BCC, info)
-
-#define INTEL_KBL_IDS(info) \
- INTEL_KBL_GT1_IDS(info), \
- INTEL_KBL_GT2_IDS(info), \
- INTEL_KBL_GT3_IDS(info), \
- INTEL_KBL_GT4_IDS(info), \
- INTEL_AML_KBL_GT2_IDS(info)
-
-/* CFL S */
-#define INTEL_CFL_S_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \
- INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \
- INTEL_VGA_DEVICE(0x3E99, info) /* SRV GT1 */
-
-#define INTEL_CFL_S_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */
-
-/* CFL H */
-#define INTEL_CFL_H_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x3E9C, info)
-
-#define INTEL_CFL_H_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3E94, info), /* Halo GT2 */ \
- INTEL_VGA_DEVICE(0x3E9B, info) /* Halo GT2 */
-
-/* CFL U GT2 */
-#define INTEL_CFL_U_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA9, info)
-
-/* CFL U GT3 */
-#define INTEL_CFL_U_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */
-
-/* WHL/CFL U GT1 */
-#define INTEL_WHL_U_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA1, info), \
- INTEL_VGA_DEVICE(0x3EA4, info)
-
-/* WHL/CFL U GT2 */
-#define INTEL_WHL_U_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA0, info), \
- INTEL_VGA_DEVICE(0x3EA3, info)
-
-/* WHL/CFL U GT3 */
-#define INTEL_WHL_U_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA2, info)
-
-#define INTEL_CFL_IDS(info) \
- INTEL_CFL_S_GT1_IDS(info), \
- INTEL_CFL_S_GT2_IDS(info), \
- INTEL_CFL_H_GT1_IDS(info), \
- INTEL_CFL_H_GT2_IDS(info), \
- INTEL_CFL_U_GT2_IDS(info), \
- INTEL_CFL_U_GT3_IDS(info), \
- INTEL_WHL_U_GT1_IDS(info), \
- INTEL_WHL_U_GT2_IDS(info), \
- INTEL_WHL_U_GT3_IDS(info), \
- INTEL_AML_CFL_GT2_IDS(info), \
- INTEL_CML_GT1_IDS(info), \
- INTEL_CML_GT2_IDS(info), \
- INTEL_CML_U_GT1_IDS(info), \
- INTEL_CML_U_GT2_IDS(info)
-
-/* CNL */
-#define INTEL_CNL_PORT_F_IDS(info) \
- INTEL_VGA_DEVICE(0x5A44, info), \
- INTEL_VGA_DEVICE(0x5A4C, info), \
- INTEL_VGA_DEVICE(0x5A54, info), \
- INTEL_VGA_DEVICE(0x5A5C, info)
-
-#define INTEL_CNL_IDS(info) \
- INTEL_CNL_PORT_F_IDS(info), \
- INTEL_VGA_DEVICE(0x5A40, info), \
- INTEL_VGA_DEVICE(0x5A41, info), \
- INTEL_VGA_DEVICE(0x5A42, info), \
- INTEL_VGA_DEVICE(0x5A49, info), \
- INTEL_VGA_DEVICE(0x5A4A, info), \
- INTEL_VGA_DEVICE(0x5A50, info), \
- INTEL_VGA_DEVICE(0x5A51, info), \
- INTEL_VGA_DEVICE(0x5A52, info), \
- INTEL_VGA_DEVICE(0x5A59, info), \
- INTEL_VGA_DEVICE(0x5A5A, info)
-
-/* ICL */
-#define INTEL_ICL_PORT_F_IDS(info) \
- INTEL_VGA_DEVICE(0x8A50, info), \
- INTEL_VGA_DEVICE(0x8A52, info), \
- INTEL_VGA_DEVICE(0x8A53, info), \
- INTEL_VGA_DEVICE(0x8A54, info), \
- INTEL_VGA_DEVICE(0x8A56, info), \
- INTEL_VGA_DEVICE(0x8A57, info), \
- INTEL_VGA_DEVICE(0x8A58, info), \
- INTEL_VGA_DEVICE(0x8A59, info), \
- INTEL_VGA_DEVICE(0x8A5A, info), \
- INTEL_VGA_DEVICE(0x8A5B, info), \
- INTEL_VGA_DEVICE(0x8A5C, info), \
- INTEL_VGA_DEVICE(0x8A70, info), \
- INTEL_VGA_DEVICE(0x8A71, info)
-
-#define INTEL_ICL_11_IDS(info) \
- INTEL_ICL_PORT_F_IDS(info), \
- INTEL_VGA_DEVICE(0x8A51, info), \
- INTEL_VGA_DEVICE(0x8A5D, info)
-
-/* EHL */
-#define INTEL_EHL_IDS(info) \
- INTEL_VGA_DEVICE(0x4541, info), \
- INTEL_VGA_DEVICE(0x4551, info), \
- INTEL_VGA_DEVICE(0x4555, info), \
- INTEL_VGA_DEVICE(0x4557, info), \
- INTEL_VGA_DEVICE(0x4570, info), \
- INTEL_VGA_DEVICE(0x4571, info)
-
-/* JSL */
-#define INTEL_JSL_IDS(info) \
- INTEL_VGA_DEVICE(0x4E51, info), \
- INTEL_VGA_DEVICE(0x4E55, info), \
- INTEL_VGA_DEVICE(0x4E57, info), \
- INTEL_VGA_DEVICE(0x4E61, info), \
- INTEL_VGA_DEVICE(0x4E71, info)
-
-/* TGL */
-#define INTEL_TGL_12_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x9A60, info), \
- INTEL_VGA_DEVICE(0x9A68, info), \
- INTEL_VGA_DEVICE(0x9A70, info)
-
-#define INTEL_TGL_12_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x9A40, info), \
- INTEL_VGA_DEVICE(0x9A49, info), \
- INTEL_VGA_DEVICE(0x9A59, info), \
- INTEL_VGA_DEVICE(0x9A78, info), \
- INTEL_VGA_DEVICE(0x9AC0, info), \
- INTEL_VGA_DEVICE(0x9AC9, info), \
- INTEL_VGA_DEVICE(0x9AD9, info), \
- INTEL_VGA_DEVICE(0x9AF8, info)
-
-#define INTEL_TGL_12_IDS(info) \
- INTEL_TGL_12_GT1_IDS(info), \
- INTEL_TGL_12_GT2_IDS(info)
-
-/* RKL */
-#define INTEL_RKL_IDS(info) \
- INTEL_VGA_DEVICE(0x4C80, info), \
- INTEL_VGA_DEVICE(0x4C8A, info), \
- INTEL_VGA_DEVICE(0x4C8B, info), \
- INTEL_VGA_DEVICE(0x4C8C, info), \
- INTEL_VGA_DEVICE(0x4C90, info), \
- INTEL_VGA_DEVICE(0x4C9A, info)
-
-/* DG1 */
-#define INTEL_DG1_IDS(info) \
- INTEL_VGA_DEVICE(0x4905, info), \
- INTEL_VGA_DEVICE(0x4906, info), \
- INTEL_VGA_DEVICE(0x4907, info), \
- INTEL_VGA_DEVICE(0x4908, info), \
- INTEL_VGA_DEVICE(0x4909, info)
-
-/* ADL-S */
-#define INTEL_ADLS_IDS(info) \
- INTEL_VGA_DEVICE(0x4680, info), \
- INTEL_VGA_DEVICE(0x4682, info), \
- INTEL_VGA_DEVICE(0x4688, info), \
- INTEL_VGA_DEVICE(0x468A, info), \
- INTEL_VGA_DEVICE(0x468B, info), \
- INTEL_VGA_DEVICE(0x4690, info), \
- INTEL_VGA_DEVICE(0x4692, info), \
- INTEL_VGA_DEVICE(0x4693, info)
-
-/* ADL-P */
-#define INTEL_ADLP_IDS(info) \
- INTEL_VGA_DEVICE(0x46A0, info), \
- INTEL_VGA_DEVICE(0x46A1, info), \
- INTEL_VGA_DEVICE(0x46A2, info), \
- INTEL_VGA_DEVICE(0x46A3, info), \
- INTEL_VGA_DEVICE(0x46A6, info), \
- INTEL_VGA_DEVICE(0x46A8, info), \
- INTEL_VGA_DEVICE(0x46AA, info), \
- INTEL_VGA_DEVICE(0x462A, info), \
- INTEL_VGA_DEVICE(0x4626, info), \
- INTEL_VGA_DEVICE(0x4628, info), \
- INTEL_VGA_DEVICE(0x46B0, info), \
- INTEL_VGA_DEVICE(0x46B1, info), \
- INTEL_VGA_DEVICE(0x46B2, info), \
- INTEL_VGA_DEVICE(0x46B3, info), \
- INTEL_VGA_DEVICE(0x46C0, info), \
- INTEL_VGA_DEVICE(0x46C1, info), \
- INTEL_VGA_DEVICE(0x46C2, info), \
- INTEL_VGA_DEVICE(0x46C3, info)
-
-/* ADL-N */
-#define INTEL_ADLN_IDS(info) \
- INTEL_VGA_DEVICE(0x46D0, info), \
- INTEL_VGA_DEVICE(0x46D1, info), \
- INTEL_VGA_DEVICE(0x46D2, info), \
- INTEL_VGA_DEVICE(0x46D3, info), \
- INTEL_VGA_DEVICE(0x46D4, info)
-
-/* RPL-S */
-#define INTEL_RPLS_IDS(info) \
- INTEL_VGA_DEVICE(0xA780, info), \
- INTEL_VGA_DEVICE(0xA781, info), \
- INTEL_VGA_DEVICE(0xA782, info), \
- INTEL_VGA_DEVICE(0xA783, info), \
- INTEL_VGA_DEVICE(0xA788, info), \
- INTEL_VGA_DEVICE(0xA789, info), \
- INTEL_VGA_DEVICE(0xA78A, info), \
- INTEL_VGA_DEVICE(0xA78B, info)
-
-/* RPL-U */
-#define INTEL_RPLU_IDS(info) \
- INTEL_VGA_DEVICE(0xA721, info), \
- INTEL_VGA_DEVICE(0xA7A1, info), \
- INTEL_VGA_DEVICE(0xA7A9, info), \
- INTEL_VGA_DEVICE(0xA7AC, info), \
- INTEL_VGA_DEVICE(0xA7AD, info)
-
-/* RPL-P */
-#define INTEL_RPLP_IDS(info) \
- INTEL_RPLU_IDS(info), \
- INTEL_VGA_DEVICE(0xA720, info), \
- INTEL_VGA_DEVICE(0xA7A0, info), \
- INTEL_VGA_DEVICE(0xA7A8, info), \
- INTEL_VGA_DEVICE(0xA7AA, info), \
- INTEL_VGA_DEVICE(0xA7AB, info)
-
-/* DG2 */
-#define INTEL_DG2_G10_IDS(info) \
- INTEL_VGA_DEVICE(0x5690, info), \
- INTEL_VGA_DEVICE(0x5691, info), \
- INTEL_VGA_DEVICE(0x5692, info), \
- INTEL_VGA_DEVICE(0x56A0, info), \
- INTEL_VGA_DEVICE(0x56A1, info), \
- INTEL_VGA_DEVICE(0x56A2, info), \
- INTEL_VGA_DEVICE(0x56BE, info), \
- INTEL_VGA_DEVICE(0x56BF, info)
-
-#define INTEL_DG2_G11_IDS(info) \
- INTEL_VGA_DEVICE(0x5693, info), \
- INTEL_VGA_DEVICE(0x5694, info), \
- INTEL_VGA_DEVICE(0x5695, info), \
- INTEL_VGA_DEVICE(0x56A5, info), \
- INTEL_VGA_DEVICE(0x56A6, info), \
- INTEL_VGA_DEVICE(0x56B0, info), \
- INTEL_VGA_DEVICE(0x56B1, info), \
- INTEL_VGA_DEVICE(0x56BA, info), \
- INTEL_VGA_DEVICE(0x56BB, info), \
- INTEL_VGA_DEVICE(0x56BC, info), \
- INTEL_VGA_DEVICE(0x56BD, info)
-
-#define INTEL_DG2_G12_IDS(info) \
- INTEL_VGA_DEVICE(0x5696, info), \
- INTEL_VGA_DEVICE(0x5697, info), \
- INTEL_VGA_DEVICE(0x56A3, info), \
- INTEL_VGA_DEVICE(0x56A4, info), \
- INTEL_VGA_DEVICE(0x56B2, info), \
- INTEL_VGA_DEVICE(0x56B3, info)
-
-#define INTEL_DG2_IDS(info) \
- INTEL_DG2_G10_IDS(info), \
- INTEL_DG2_G11_IDS(info), \
- INTEL_DG2_G12_IDS(info)
-
-#define INTEL_ATS_M150_IDS(info) \
- INTEL_VGA_DEVICE(0x56C0, info), \
- INTEL_VGA_DEVICE(0x56C2, info)
-
-#define INTEL_ATS_M75_IDS(info) \
- INTEL_VGA_DEVICE(0x56C1, info)
-
-#define INTEL_ATS_M_IDS(info) \
- INTEL_ATS_M150_IDS(info), \
- INTEL_ATS_M75_IDS(info)
-
-/* MTL */
-#define INTEL_MTL_IDS(info) \
- INTEL_VGA_DEVICE(0x7D40, info), \
- INTEL_VGA_DEVICE(0x7D41, info), \
- INTEL_VGA_DEVICE(0x7D45, info), \
- INTEL_VGA_DEVICE(0x7D51, info), \
- INTEL_VGA_DEVICE(0x7D55, info), \
- INTEL_VGA_DEVICE(0x7D60, info), \
- INTEL_VGA_DEVICE(0x7D67, info), \
- INTEL_VGA_DEVICE(0x7DD1, info), \
- INTEL_VGA_DEVICE(0x7DD5, info)
-
-#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/intel/display_member.h b/include/drm/intel/display_member.h
new file mode 100644
index 000000000000..0319ea560b60
--- /dev/null
+++ b/include/drm/intel/display_member.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __DRM_INTEL_DISPLAY_H__
+#define __DRM_INTEL_DISPLAY_H__
+
+#include <linux/build_bug.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+#include <drm/drm_device.h>
+
+struct intel_display;
+
+/*
+ * A dummy device struct to define the relative offsets of drm and display
+ * members. With the members identically placed in struct drm_i915_private and
+ * struct xe_device, this allows figuring out the struct intel_display pointer
+ * without the definition of either driver specific structure.
+ */
+struct __intel_generic_device {
+ struct drm_device drm;
+ struct intel_display *display;
+};
+
+/**
+ * INTEL_DISPLAY_MEMBER_STATIC_ASSERT() - ensure correct placing of drm and display members
+ * @type: The struct to check
+ * @drm_member: Name of the struct drm_device member
+ * @display_member: Name of the struct intel_display * member.
+ *
+ * Use this static assert macro to ensure the struct drm_i915_private and struct
+ * xe_device struct drm_device and struct intel_display * members are at the
+ * same relative offsets.
+ */
+#define INTEL_DISPLAY_MEMBER_STATIC_ASSERT(type, drm_member, display_member) \
+ static_assert( \
+ offsetof(struct __intel_generic_device, display) - offsetof(struct __intel_generic_device, drm) == \
+ offsetof(type, display_member) - offsetof(type, drm_member), \
+ __stringify(type) " " __stringify(drm_member) " and " __stringify(display_member) " members at invalid offsets")
+
+#endif
diff --git a/include/drm/intel/display_parent_interface.h b/include/drm/intel/display_parent_interface.h
new file mode 100644
index 000000000000..26bedc360044
--- /dev/null
+++ b/include/drm/intel/display_parent_interface.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation x*/
+
+#ifndef __DISPLAY_PARENT_INTERFACE_H__
+#define __DISPLAY_PARENT_INTERFACE_H__
+
+#include <linux/types.h>
+
+struct drm_device;
+struct ref_tracker;
+
+struct intel_display_rpm_interface {
+ struct ref_tracker *(*get)(const struct drm_device *drm);
+ struct ref_tracker *(*get_raw)(const struct drm_device *drm);
+ struct ref_tracker *(*get_if_in_use)(const struct drm_device *drm);
+ struct ref_tracker *(*get_noresume)(const struct drm_device *drm);
+
+ void (*put)(const struct drm_device *drm, struct ref_tracker *wakeref);
+ void (*put_raw)(const struct drm_device *drm, struct ref_tracker *wakeref);
+ void (*put_unchecked)(const struct drm_device *drm);
+
+ bool (*suspended)(const struct drm_device *drm);
+ void (*assert_held)(const struct drm_device *drm);
+ void (*assert_block)(const struct drm_device *drm);
+ void (*assert_unblock)(const struct drm_device *drm);
+};
+
+/**
+ * struct intel_display_parent_interface - services parent driver provides to display
+ *
+ * The parent, or core, driver provides a pointer to this structure to display
+ * driver when calling intel_display_device_probe(). The display driver uses it
+ * to access services provided by the parent driver. The structure may contain
+ * sub-struct pointers to group function pointers by functionality.
+ *
+ * All function and sub-struct pointers must be initialized and callable unless
+ * explicitly marked as "optional" below. The display driver will only NULL
+ * check the optional pointers.
+ */
+struct intel_display_parent_interface {
+ /** @rpm: Runtime PM functions */
+ const struct intel_display_rpm_interface *rpm;
+};
+
+#endif
diff --git a/include/drm/i915_component.h b/include/drm/intel/i915_component.h
index 4ea3b17aa143..8082db222e00 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/intel/i915_component.h
@@ -31,6 +31,7 @@ enum i915_component_type {
I915_COMPONENT_HDCP,
I915_COMPONENT_PXP,
I915_COMPONENT_GSC_PROXY,
+ INTEL_COMPONENT_LB,
};
/* MAX_PORT is the number of port
diff --git a/include/drm/i915_drm.h b/include/drm/intel/i915_drm.h
index adff68538484..adff68538484 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/intel/i915_drm.h
diff --git a/include/drm/i915_gsc_proxy_mei_interface.h b/include/drm/intel/i915_gsc_proxy_mei_interface.h
index 850dfbf40607..850dfbf40607 100644
--- a/include/drm/i915_gsc_proxy_mei_interface.h
+++ b/include/drm/intel/i915_gsc_proxy_mei_interface.h
diff --git a/include/drm/i915_hdcp_interface.h b/include/drm/intel/i915_hdcp_interface.h
index d776ed7dcd00..d776ed7dcd00 100644
--- a/include/drm/i915_hdcp_interface.h
+++ b/include/drm/intel/i915_hdcp_interface.h
diff --git a/include/drm/i915_pxp_tee_interface.h b/include/drm/intel/i915_pxp_tee_interface.h
index a532d32f58f3..a532d32f58f3 100644
--- a/include/drm/i915_pxp_tee_interface.h
+++ b/include/drm/intel/i915_pxp_tee_interface.h
diff --git a/include/drm/intel-gtt.h b/include/drm/intel/intel-gtt.h
index cb0d5b7200c7..f53bcff01f22 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel/intel-gtt.h
@@ -28,6 +28,8 @@ void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags);
void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
+dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg,
+ bool *is_present, bool *is_local);
/* Special gtt memory types */
#define AGP_DCACHE_MEMORY 1
diff --git a/include/drm/intel/intel_lb_mei_interface.h b/include/drm/intel/intel_lb_mei_interface.h
new file mode 100644
index 000000000000..d65be2cba2ab
--- /dev/null
+++ b/include/drm/intel/intel_lb_mei_interface.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2025 Intel Corporation
+ */
+
+#ifndef _INTEL_LB_MEI_INTERFACE_H_
+#define _INTEL_LB_MEI_INTERFACE_H_
+
+#include <linux/types.h>
+
+struct device;
+
+/**
+ * define INTEL_LB_FLAG_IS_PERSISTENT - Mark the payload as persistent
+ *
+ * This flag indicates that the late binding payload should be stored
+ * persistently in flash across warm resets.
+ */
+#define INTEL_LB_FLAG_IS_PERSISTENT BIT(0)
+
+/**
+ * enum intel_lb_type - enum to determine late binding payload type
+ * @INTEL_LB_TYPE_FAN_CONTROL: Fan controller configuration
+ */
+enum intel_lb_type {
+ INTEL_LB_TYPE_FAN_CONTROL = 1,
+};
+
+/**
+ * enum intel_lb_status - Status codes returned on late binding transmissions
+ * @INTEL_LB_STATUS_SUCCESS: Operation completed successfully
+ * @INTEL_LB_STATUS_4ID_MISMATCH: Mismatch in the expected 4ID (firmware identity/token)
+ * @INTEL_LB_STATUS_ARB_FAILURE: Arbitration failure (e.g. conflicting access or state)
+ * @INTEL_LB_STATUS_GENERAL_ERROR: General firmware error not covered by other codes
+ * @INTEL_LB_STATUS_INVALID_PARAMS: One or more input parameters are invalid
+ * @INTEL_LB_STATUS_INVALID_SIGNATURE: Payload has an invalid or untrusted signature
+ * @INTEL_LB_STATUS_INVALID_PAYLOAD: Payload contents are not accepted by firmware
+ * @INTEL_LB_STATUS_TIMEOUT: Operation timed out before completion
+ */
+enum intel_lb_status {
+ INTEL_LB_STATUS_SUCCESS = 0,
+ INTEL_LB_STATUS_4ID_MISMATCH = 1,
+ INTEL_LB_STATUS_ARB_FAILURE = 2,
+ INTEL_LB_STATUS_GENERAL_ERROR = 3,
+ INTEL_LB_STATUS_INVALID_PARAMS = 4,
+ INTEL_LB_STATUS_INVALID_SIGNATURE = 5,
+ INTEL_LB_STATUS_INVALID_PAYLOAD = 6,
+ INTEL_LB_STATUS_TIMEOUT = 7,
+};
+
+/**
+ * struct intel_lb_component_ops - Ops for late binding services
+ */
+struct intel_lb_component_ops {
+ /**
+ * push_payload - Sends a payload to the authentication firmware
+ * @dev: Device struct corresponding to the mei device
+ * @type: Payload type (see &enum intel_lb_type)
+ * @flags: Payload flags bitmap (e.g. %INTEL_LB_FLAGS_IS_PERSISTENT)
+ * @payload: Pointer to payload buffer
+ * @payload_size: Payload buffer size in bytes
+ *
+ * Return: 0 success, negative errno value on transport failure,
+ * positive status returned by firmware
+ */
+ int (*push_payload)(struct device *dev, u32 type, u32 flags,
+ const void *payload, size_t payload_size);
+};
+
+#endif /* _INTEL_LB_MEI_INTERFACE_H_ */
diff --git a/include/drm/intel_lpe_audio.h b/include/drm/intel/intel_lpe_audio.h
index b6121c8fe539..b6121c8fe539 100644
--- a/include/drm/intel_lpe_audio.h
+++ b/include/drm/intel/intel_lpe_audio.h
diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h
new file mode 100644
index 000000000000..52520e684ab1
--- /dev/null
+++ b/include/drm/intel/pciids.h
@@ -0,0 +1,903 @@
+/*
+ * Copyright 2013 Intel Corporation
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __PCIIDS_H__
+#define __PCIIDS_H__
+
+#ifdef __KERNEL__
+#define INTEL_PCI_DEVICE(_id, _info) { \
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, (_id)), \
+ .driver_data = (kernel_ulong_t)(_info), \
+}
+
+#define INTEL_VGA_DEVICE(_id, _info) { \
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, (_id)), \
+ .class = PCI_BASE_CLASS_DISPLAY << 16, .class_mask = 0xff << 16, \
+ .driver_data = (kernel_ulong_t)(_info), \
+}
+
+#define INTEL_QUANTA_VGA_DEVICE(_info) { \
+ .vendor = PCI_VENDOR_ID_INTEL, .device = 0x16a, \
+ .subvendor = 0x152d, .subdevice = 0x8990, \
+ .class = PCI_BASE_CLASS_DISPLAY << 16, .class_mask = 0xff << 16, \
+ .driver_data = (kernel_ulong_t)(_info), \
+}
+#endif
+
+#define INTEL_I810_IDS(MACRO__, ...) \
+ MACRO__(0x7121, ## __VA_ARGS__), /* I810 */ \
+ MACRO__(0x7123, ## __VA_ARGS__), /* I810_DC100 */ \
+ MACRO__(0x7125, ## __VA_ARGS__) /* I810_E */
+
+#define INTEL_I815_IDS(MACRO__, ...) \
+ MACRO__(0x1132, ## __VA_ARGS__) /* I815*/
+
+#define INTEL_I830_IDS(MACRO__, ...) \
+ MACRO__(0x3577, ## __VA_ARGS__)
+
+#define INTEL_I845G_IDS(MACRO__, ...) \
+ MACRO__(0x2562, ## __VA_ARGS__)
+
+#define INTEL_I85X_IDS(MACRO__, ...) \
+ MACRO__(0x3582, ## __VA_ARGS__), /* I855_GM */ \
+ MACRO__(0x358e, ## __VA_ARGS__)
+
+#define INTEL_I865G_IDS(MACRO__, ...) \
+ MACRO__(0x2572, ## __VA_ARGS__) /* I865_G */
+
+#define INTEL_I915G_IDS(MACRO__, ...) \
+ MACRO__(0x2582, ## __VA_ARGS__), /* I915_G */ \
+ MACRO__(0x258a, ## __VA_ARGS__) /* E7221_G */
+
+#define INTEL_I915GM_IDS(MACRO__, ...) \
+ MACRO__(0x2592, ## __VA_ARGS__) /* I915_GM */
+
+#define INTEL_I945G_IDS(MACRO__, ...) \
+ MACRO__(0x2772, ## __VA_ARGS__) /* I945_G */
+
+#define INTEL_I945GM_IDS(MACRO__, ...) \
+ MACRO__(0x27a2, ## __VA_ARGS__), /* I945_GM */ \
+ MACRO__(0x27ae, ## __VA_ARGS__) /* I945_GME */
+
+#define INTEL_I965G_IDS(MACRO__, ...) \
+ MACRO__(0x2972, ## __VA_ARGS__), /* I946_GZ */ \
+ MACRO__(0x2982, ## __VA_ARGS__), /* G35_G */ \
+ MACRO__(0x2992, ## __VA_ARGS__), /* I965_Q */ \
+ MACRO__(0x29a2, ## __VA_ARGS__) /* I965_G */
+
+#define INTEL_G33_IDS(MACRO__, ...) \
+ MACRO__(0x29b2, ## __VA_ARGS__), /* Q35_G */ \
+ MACRO__(0x29c2, ## __VA_ARGS__), /* G33_G */ \
+ MACRO__(0x29d2, ## __VA_ARGS__) /* Q33_G */
+
+#define INTEL_I965GM_IDS(MACRO__, ...) \
+ MACRO__(0x2a02, ## __VA_ARGS__), /* I965_GM */ \
+ MACRO__(0x2a12, ## __VA_ARGS__) /* I965_GME */
+
+#define INTEL_GM45_IDS(MACRO__, ...) \
+ MACRO__(0x2a42, ## __VA_ARGS__) /* GM45_G */
+
+#define INTEL_G45_IDS(MACRO__, ...) \
+ MACRO__(0x2e02, ## __VA_ARGS__), /* IGD_E_G */ \
+ MACRO__(0x2e12, ## __VA_ARGS__), /* Q45_G */ \
+ MACRO__(0x2e22, ## __VA_ARGS__), /* G45_G */ \
+ MACRO__(0x2e32, ## __VA_ARGS__), /* G41_G */ \
+ MACRO__(0x2e42, ## __VA_ARGS__), /* B43_G */ \
+ MACRO__(0x2e92, ## __VA_ARGS__) /* B43_G.1 */
+
+#define INTEL_PNV_G_IDS(MACRO__, ...) \
+ MACRO__(0xa001, ## __VA_ARGS__)
+
+#define INTEL_PNV_M_IDS(MACRO__, ...) \
+ MACRO__(0xa011, ## __VA_ARGS__)
+
+#define INTEL_PNV_IDS(MACRO__, ...) \
+ INTEL_PNV_G_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_PNV_M_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_ILK_D_IDS(MACRO__, ...) \
+ MACRO__(0x0042, ## __VA_ARGS__)
+
+#define INTEL_ILK_M_IDS(MACRO__, ...) \
+ MACRO__(0x0046, ## __VA_ARGS__)
+
+#define INTEL_ILK_IDS(MACRO__, ...) \
+ INTEL_ILK_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_ILK_M_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_SNB_D_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0102, ## __VA_ARGS__), \
+ MACRO__(0x010A, ## __VA_ARGS__)
+
+#define INTEL_SNB_D_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0112, ## __VA_ARGS__), \
+ MACRO__(0x0122, ## __VA_ARGS__)
+
+#define INTEL_SNB_D_IDS(MACRO__, ...) \
+ INTEL_SNB_D_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SNB_D_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_SNB_M_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0106, ## __VA_ARGS__)
+
+#define INTEL_SNB_M_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0116, ## __VA_ARGS__), \
+ MACRO__(0x0126, ## __VA_ARGS__)
+
+#define INTEL_SNB_M_IDS(MACRO__, ...) \
+ INTEL_SNB_M_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SNB_M_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_SNB_IDS(MACRO__, ...) \
+ INTEL_SNB_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SNB_M_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_IVB_M_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0156, ## __VA_ARGS__) /* GT1 mobile */
+
+#define INTEL_IVB_M_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0166, ## __VA_ARGS__) /* GT2 mobile */
+
+#define INTEL_IVB_M_IDS(MACRO__, ...) \
+ INTEL_IVB_M_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_IVB_M_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_IVB_D_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0152, ## __VA_ARGS__), /* GT1 desktop */ \
+ MACRO__(0x015a, ## __VA_ARGS__) /* GT1 server */
+
+#define INTEL_IVB_D_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0162, ## __VA_ARGS__), /* GT2 desktop */ \
+ MACRO__(0x016a, ## __VA_ARGS__) /* GT2 server */
+
+#define INTEL_IVB_D_IDS(MACRO__, ...) \
+ INTEL_IVB_D_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_IVB_D_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_IVB_IDS(MACRO__, ...) \
+ INTEL_IVB_M_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_IVB_D_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_IVB_Q_IDS(MACRO__, ...) \
+ INTEL_QUANTA_VGA_DEVICE(__VA_ARGS__) /* Quanta transcode */
+
+#define INTEL_HSW_ULT_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0A02, ## __VA_ARGS__), /* ULT GT1 desktop */ \
+ MACRO__(0x0A06, ## __VA_ARGS__), /* ULT GT1 mobile */ \
+ MACRO__(0x0A0A, ## __VA_ARGS__), /* ULT GT1 server */ \
+ MACRO__(0x0A0B, ## __VA_ARGS__) /* ULT GT1 reserved */
+
+#define INTEL_HSW_ULX_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0A0E, ## __VA_ARGS__) /* ULX GT1 mobile */
+
+#define INTEL_HSW_GT1_IDS(MACRO__, ...) \
+ INTEL_HSW_ULT_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_HSW_ULX_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x0402, ## __VA_ARGS__), /* GT1 desktop */ \
+ MACRO__(0x0406, ## __VA_ARGS__), /* GT1 mobile */ \
+ MACRO__(0x040A, ## __VA_ARGS__), /* GT1 server */ \
+ MACRO__(0x040B, ## __VA_ARGS__), /* GT1 reserved */ \
+ MACRO__(0x040E, ## __VA_ARGS__), /* GT1 reserved */ \
+ MACRO__(0x0C02, ## __VA_ARGS__), /* SDV GT1 desktop */ \
+ MACRO__(0x0C06, ## __VA_ARGS__), /* SDV GT1 mobile */ \
+ MACRO__(0x0C0A, ## __VA_ARGS__), /* SDV GT1 server */ \
+ MACRO__(0x0C0B, ## __VA_ARGS__), /* SDV GT1 reserved */ \
+ MACRO__(0x0C0E, ## __VA_ARGS__), /* SDV GT1 reserved */ \
+ MACRO__(0x0D02, ## __VA_ARGS__), /* CRW GT1 desktop */ \
+ MACRO__(0x0D06, ## __VA_ARGS__), /* CRW GT1 mobile */ \
+ MACRO__(0x0D0A, ## __VA_ARGS__), /* CRW GT1 server */ \
+ MACRO__(0x0D0B, ## __VA_ARGS__), /* CRW GT1 reserved */ \
+ MACRO__(0x0D0E, ## __VA_ARGS__) /* CRW GT1 reserved */
+
+#define INTEL_HSW_ULT_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0A12, ## __VA_ARGS__), /* ULT GT2 desktop */ \
+ MACRO__(0x0A16, ## __VA_ARGS__), /* ULT GT2 mobile */ \
+ MACRO__(0x0A1A, ## __VA_ARGS__), /* ULT GT2 server */ \
+ MACRO__(0x0A1B, ## __VA_ARGS__) /* ULT GT2 reserved */ \
+
+#define INTEL_HSW_ULX_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0A1E, ## __VA_ARGS__) /* ULX GT2 mobile */ \
+
+#define INTEL_HSW_GT2_IDS(MACRO__, ...) \
+ INTEL_HSW_ULT_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_HSW_ULX_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x0412, ## __VA_ARGS__), /* GT2 desktop */ \
+ MACRO__(0x0416, ## __VA_ARGS__), /* GT2 mobile */ \
+ MACRO__(0x041A, ## __VA_ARGS__), /* GT2 server */ \
+ MACRO__(0x041B, ## __VA_ARGS__), /* GT2 reserved */ \
+ MACRO__(0x041E, ## __VA_ARGS__), /* GT2 reserved */ \
+ MACRO__(0x0C12, ## __VA_ARGS__), /* SDV GT2 desktop */ \
+ MACRO__(0x0C16, ## __VA_ARGS__), /* SDV GT2 mobile */ \
+ MACRO__(0x0C1A, ## __VA_ARGS__), /* SDV GT2 server */ \
+ MACRO__(0x0C1B, ## __VA_ARGS__), /* SDV GT2 reserved */ \
+ MACRO__(0x0C1E, ## __VA_ARGS__), /* SDV GT2 reserved */ \
+ MACRO__(0x0D12, ## __VA_ARGS__), /* CRW GT2 desktop */ \
+ MACRO__(0x0D16, ## __VA_ARGS__), /* CRW GT2 mobile */ \
+ MACRO__(0x0D1A, ## __VA_ARGS__), /* CRW GT2 server */ \
+ MACRO__(0x0D1B, ## __VA_ARGS__), /* CRW GT2 reserved */ \
+ MACRO__(0x0D1E, ## __VA_ARGS__) /* CRW GT2 reserved */
+
+#define INTEL_HSW_ULT_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x0A22, ## __VA_ARGS__), /* ULT GT3 desktop */ \
+ MACRO__(0x0A26, ## __VA_ARGS__), /* ULT GT3 mobile */ \
+ MACRO__(0x0A2A, ## __VA_ARGS__), /* ULT GT3 server */ \
+ MACRO__(0x0A2B, ## __VA_ARGS__), /* ULT GT3 reserved */ \
+ MACRO__(0x0A2E, ## __VA_ARGS__) /* ULT GT3 reserved */
+
+#define INTEL_HSW_GT3_IDS(MACRO__, ...) \
+ INTEL_HSW_ULT_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x0422, ## __VA_ARGS__), /* GT3 desktop */ \
+ MACRO__(0x0426, ## __VA_ARGS__), /* GT3 mobile */ \
+ MACRO__(0x042A, ## __VA_ARGS__), /* GT3 server */ \
+ MACRO__(0x042B, ## __VA_ARGS__), /* GT3 reserved */ \
+ MACRO__(0x042E, ## __VA_ARGS__), /* GT3 reserved */ \
+ MACRO__(0x0C22, ## __VA_ARGS__), /* SDV GT3 desktop */ \
+ MACRO__(0x0C26, ## __VA_ARGS__), /* SDV GT3 mobile */ \
+ MACRO__(0x0C2A, ## __VA_ARGS__), /* SDV GT3 server */ \
+ MACRO__(0x0C2B, ## __VA_ARGS__), /* SDV GT3 reserved */ \
+ MACRO__(0x0C2E, ## __VA_ARGS__), /* SDV GT3 reserved */ \
+ MACRO__(0x0D22, ## __VA_ARGS__), /* CRW GT3 desktop */ \
+ MACRO__(0x0D26, ## __VA_ARGS__), /* CRW GT3 mobile */ \
+ MACRO__(0x0D2A, ## __VA_ARGS__), /* CRW GT3 server */ \
+ MACRO__(0x0D2B, ## __VA_ARGS__), /* CRW GT3 reserved */ \
+ MACRO__(0x0D2E, ## __VA_ARGS__) /* CRW GT3 reserved */
+
+#define INTEL_HSW_IDS(MACRO__, ...) \
+ INTEL_HSW_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_HSW_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_HSW_GT3_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_VLV_IDS(MACRO__, ...) \
+ MACRO__(0x0f30, ## __VA_ARGS__), \
+ MACRO__(0x0f31, ## __VA_ARGS__), \
+ MACRO__(0x0f32, ## __VA_ARGS__), \
+ MACRO__(0x0f33, ## __VA_ARGS__)
+
+#define INTEL_BDW_ULT_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x1606, ## __VA_ARGS__), /* GT1 ULT */ \
+ MACRO__(0x160B, ## __VA_ARGS__) /* GT1 Iris */
+
+#define INTEL_BDW_ULX_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x160E, ## __VA_ARGS__) /* GT1 ULX */
+
+#define INTEL_BDW_GT1_IDS(MACRO__, ...) \
+ INTEL_BDW_ULT_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_ULX_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1602, ## __VA_ARGS__), /* GT1 ULT */ \
+ MACRO__(0x160A, ## __VA_ARGS__), /* GT1 Server */ \
+ MACRO__(0x160D, ## __VA_ARGS__) /* GT1 Workstation */
+
+#define INTEL_BDW_ULT_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x1616, ## __VA_ARGS__), /* GT2 ULT */ \
+ MACRO__(0x161B, ## __VA_ARGS__) /* GT2 ULT */
+
+#define INTEL_BDW_ULX_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x161E, ## __VA_ARGS__) /* GT2 ULX */
+
+#define INTEL_BDW_GT2_IDS(MACRO__, ...) \
+ INTEL_BDW_ULT_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_ULX_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1612, ## __VA_ARGS__), /* GT2 Halo */ \
+ MACRO__(0x161A, ## __VA_ARGS__), /* GT2 Server */ \
+ MACRO__(0x161D, ## __VA_ARGS__) /* GT2 Workstation */
+
+#define INTEL_BDW_ULT_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x1626, ## __VA_ARGS__), /* ULT */ \
+ MACRO__(0x162B, ## __VA_ARGS__) /* Iris */ \
+
+#define INTEL_BDW_ULX_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x162E, ## __VA_ARGS__) /* ULX */
+
+#define INTEL_BDW_GT3_IDS(MACRO__, ...) \
+ INTEL_BDW_ULT_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_ULX_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1622, ## __VA_ARGS__), /* ULT */ \
+ MACRO__(0x162A, ## __VA_ARGS__), /* Server */ \
+ MACRO__(0x162D, ## __VA_ARGS__) /* Workstation */
+
+#define INTEL_BDW_ULT_RSVD_IDS(MACRO__, ...) \
+ MACRO__(0x1636, ## __VA_ARGS__), /* ULT */ \
+ MACRO__(0x163B, ## __VA_ARGS__) /* Iris */
+
+#define INTEL_BDW_ULX_RSVD_IDS(MACRO__, ...) \
+ MACRO__(0x163E, ## __VA_ARGS__) /* ULX */
+
+#define INTEL_BDW_RSVD_IDS(MACRO__, ...) \
+ INTEL_BDW_ULT_RSVD_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_ULX_RSVD_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1632, ## __VA_ARGS__), /* ULT */ \
+ MACRO__(0x163A, ## __VA_ARGS__), /* Server */ \
+ MACRO__(0x163D, ## __VA_ARGS__) /* Workstation */
+
+#define INTEL_BDW_IDS(MACRO__, ...) \
+ INTEL_BDW_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_RSVD_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_CHV_IDS(MACRO__, ...) \
+ MACRO__(0x22b0, ## __VA_ARGS__), \
+ MACRO__(0x22b1, ## __VA_ARGS__), \
+ MACRO__(0x22b2, ## __VA_ARGS__), \
+ MACRO__(0x22b3, ## __VA_ARGS__)
+
+#define INTEL_SKL_ULT_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x1906, ## __VA_ARGS__), /* ULT GT1 */ \
+ MACRO__(0x1913, ## __VA_ARGS__) /* ULT GT1.5 */
+
+#define INTEL_SKL_ULX_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x190E, ## __VA_ARGS__), /* ULX GT1 */ \
+ MACRO__(0x1915, ## __VA_ARGS__) /* ULX GT1.5 */
+
+#define INTEL_SKL_GT1_IDS(MACRO__, ...) \
+ INTEL_SKL_ULT_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SKL_ULX_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1902, ## __VA_ARGS__), /* DT GT1 */ \
+ MACRO__(0x190A, ## __VA_ARGS__), /* SRV GT1 */ \
+ MACRO__(0x190B, ## __VA_ARGS__), /* Halo GT1 */ \
+ MACRO__(0x1917, ## __VA_ARGS__) /* DT GT1.5 */
+
+#define INTEL_SKL_ULT_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x1916, ## __VA_ARGS__), /* ULT GT2 */ \
+ MACRO__(0x1921, ## __VA_ARGS__) /* ULT GT2F */
+
+#define INTEL_SKL_ULX_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x191E, ## __VA_ARGS__) /* ULX GT2 */
+
+#define INTEL_SKL_GT2_IDS(MACRO__, ...) \
+ INTEL_SKL_ULT_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SKL_ULX_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1912, ## __VA_ARGS__), /* DT GT2 */ \
+ MACRO__(0x191A, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x191B, ## __VA_ARGS__), /* Halo GT2 */ \
+ MACRO__(0x191D, ## __VA_ARGS__) /* WKS GT2 */
+
+#define INTEL_SKL_ULT_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x1923, ## __VA_ARGS__), /* ULT GT3 */ \
+ MACRO__(0x1926, ## __VA_ARGS__), /* ULT GT3e */ \
+ MACRO__(0x1927, ## __VA_ARGS__) /* ULT GT3e */
+
+#define INTEL_SKL_GT3_IDS(MACRO__, ...) \
+ INTEL_SKL_ULT_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x192A, ## __VA_ARGS__), /* SRV GT3 */ \
+ MACRO__(0x192B, ## __VA_ARGS__), /* Halo GT3e */ \
+ MACRO__(0x192D, ## __VA_ARGS__) /* SRV GT3e */
+
+#define INTEL_SKL_GT4_IDS(MACRO__, ...) \
+ MACRO__(0x1932, ## __VA_ARGS__), /* DT GT4 */ \
+ MACRO__(0x193A, ## __VA_ARGS__), /* SRV GT4e */ \
+ MACRO__(0x193B, ## __VA_ARGS__), /* Halo GT4e */ \
+ MACRO__(0x193D, ## __VA_ARGS__) /* WKS GT4e */
+
+#define INTEL_SKL_IDS(MACRO__, ...) \
+ INTEL_SKL_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SKL_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SKL_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SKL_GT4_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_BXT_IDS(MACRO__, ...) \
+ MACRO__(0x0A84, ## __VA_ARGS__), \
+ MACRO__(0x1A84, ## __VA_ARGS__), \
+ MACRO__(0x1A85, ## __VA_ARGS__), \
+ MACRO__(0x5A84, ## __VA_ARGS__), /* APL HD Graphics 505 */ \
+ MACRO__(0x5A85, ## __VA_ARGS__) /* APL HD Graphics 500 */
+
+#define INTEL_GLK_IDS(MACRO__, ...) \
+ MACRO__(0x3184, ## __VA_ARGS__), \
+ MACRO__(0x3185, ## __VA_ARGS__)
+
+#define INTEL_KBL_ULT_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x5906, ## __VA_ARGS__), /* ULT GT1 */ \
+ MACRO__(0x5913, ## __VA_ARGS__) /* ULT GT1.5 */
+
+#define INTEL_KBL_ULX_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x590E, ## __VA_ARGS__), /* ULX GT1 */ \
+ MACRO__(0x5915, ## __VA_ARGS__) /* ULX GT1.5 */
+
+#define INTEL_KBL_GT1_IDS(MACRO__, ...) \
+ INTEL_KBL_ULT_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_KBL_ULX_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x5902, ## __VA_ARGS__), /* DT GT1 */ \
+ MACRO__(0x5908, ## __VA_ARGS__), /* Halo GT1 */ \
+ MACRO__(0x590A, ## __VA_ARGS__), /* SRV GT1 */ \
+ MACRO__(0x590B, ## __VA_ARGS__) /* Halo GT1 */
+
+#define INTEL_KBL_ULT_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x5916, ## __VA_ARGS__), /* ULT GT2 */ \
+ MACRO__(0x5921, ## __VA_ARGS__) /* ULT GT2F */
+
+#define INTEL_KBL_ULX_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x591E, ## __VA_ARGS__) /* ULX GT2 */
+
+#define INTEL_KBL_GT2_IDS(MACRO__, ...) \
+ INTEL_KBL_ULT_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_KBL_ULX_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x5912, ## __VA_ARGS__), /* DT GT2 */ \
+ MACRO__(0x5917, ## __VA_ARGS__), /* Mobile GT2 */ \
+ MACRO__(0x591A, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x591B, ## __VA_ARGS__), /* Halo GT2 */ \
+ MACRO__(0x591D, ## __VA_ARGS__) /* WKS GT2 */
+
+#define INTEL_KBL_ULT_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x5926, ## __VA_ARGS__) /* ULT GT3 */
+
+#define INTEL_KBL_GT3_IDS(MACRO__, ...) \
+ INTEL_KBL_ULT_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x5923, ## __VA_ARGS__), /* ULT GT3 */ \
+ MACRO__(0x5927, ## __VA_ARGS__) /* ULT GT3 */
+
+#define INTEL_KBL_GT4_IDS(MACRO__, ...) \
+ MACRO__(0x593B, ## __VA_ARGS__) /* Halo GT4 */
+
+/* AML/KBL Y GT2 */
+#define INTEL_AML_KBL_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x591C, ## __VA_ARGS__), /* ULX GT2 */ \
+ MACRO__(0x87C0, ## __VA_ARGS__) /* ULX GT2 */
+
+/* AML/CFL Y GT2 */
+#define INTEL_AML_CFL_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x87CA, ## __VA_ARGS__)
+
+/* CML GT1 */
+#define INTEL_CML_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x9BA2, ## __VA_ARGS__), \
+ MACRO__(0x9BA4, ## __VA_ARGS__), \
+ MACRO__(0x9BA5, ## __VA_ARGS__), \
+ MACRO__(0x9BA8, ## __VA_ARGS__)
+
+#define INTEL_CML_U_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x9B21, ## __VA_ARGS__), \
+ MACRO__(0x9BAA, ## __VA_ARGS__), \
+ MACRO__(0x9BAC, ## __VA_ARGS__)
+
+/* CML GT2 */
+#define INTEL_CML_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x9BC2, ## __VA_ARGS__), \
+ MACRO__(0x9BC4, ## __VA_ARGS__), \
+ MACRO__(0x9BC5, ## __VA_ARGS__), \
+ MACRO__(0x9BC6, ## __VA_ARGS__), \
+ MACRO__(0x9BC8, ## __VA_ARGS__), \
+ MACRO__(0x9BE6, ## __VA_ARGS__), \
+ MACRO__(0x9BF6, ## __VA_ARGS__)
+
+#define INTEL_CML_U_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x9B41, ## __VA_ARGS__), \
+ MACRO__(0x9BCA, ## __VA_ARGS__), \
+ MACRO__(0x9BCC, ## __VA_ARGS__)
+
+#define INTEL_CML_IDS(MACRO__, ...) \
+ INTEL_CML_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CML_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CML_U_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CML_U_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_KBL_IDS(MACRO__, ...) \
+ INTEL_KBL_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_KBL_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_KBL_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_KBL_GT4_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_AML_KBL_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+/* CFL S */
+#define INTEL_CFL_S_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x3E90, ## __VA_ARGS__), /* SRV GT1 */ \
+ MACRO__(0x3E93, ## __VA_ARGS__), /* SRV GT1 */ \
+ MACRO__(0x3E99, ## __VA_ARGS__) /* SRV GT1 */
+
+#define INTEL_CFL_S_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x3E91, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x3E92, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x3E96, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x3E98, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x3E9A, ## __VA_ARGS__) /* SRV GT2 */
+
+/* CFL H */
+#define INTEL_CFL_H_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x3E9C, ## __VA_ARGS__)
+
+#define INTEL_CFL_H_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x3E94, ## __VA_ARGS__), /* Halo GT2 */ \
+ MACRO__(0x3E9B, ## __VA_ARGS__) /* Halo GT2 */
+
+/* CFL U GT2 */
+#define INTEL_CFL_U_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x3EA9, ## __VA_ARGS__)
+
+/* CFL U GT3 */
+#define INTEL_CFL_U_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x3EA5, ## __VA_ARGS__), /* ULT GT3 */ \
+ MACRO__(0x3EA6, ## __VA_ARGS__), /* ULT GT3 */ \
+ MACRO__(0x3EA7, ## __VA_ARGS__), /* ULT GT3 */ \
+ MACRO__(0x3EA8, ## __VA_ARGS__) /* ULT GT3 */
+
+#define INTEL_CFL_IDS(MACRO__, ...) \
+ INTEL_CFL_S_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CFL_S_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CFL_H_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CFL_H_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CFL_U_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CFL_U_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_AML_CFL_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+/* WHL/CFL U GT1 */
+#define INTEL_WHL_U_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x3EA1, ## __VA_ARGS__), \
+ MACRO__(0x3EA4, ## __VA_ARGS__)
+
+/* WHL/CFL U GT2 */
+#define INTEL_WHL_U_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x3EA0, ## __VA_ARGS__), \
+ MACRO__(0x3EA3, ## __VA_ARGS__)
+
+/* WHL/CFL U GT3 */
+#define INTEL_WHL_U_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x3EA2, ## __VA_ARGS__)
+
+#define INTEL_WHL_IDS(MACRO__, ...) \
+ INTEL_WHL_U_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_WHL_U_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_WHL_U_GT3_IDS(MACRO__, ## __VA_ARGS__)
+
+/* CNL */
+#define INTEL_CNL_PORT_F_IDS(MACRO__, ...) \
+ MACRO__(0x5A44, ## __VA_ARGS__), \
+ MACRO__(0x5A4C, ## __VA_ARGS__), \
+ MACRO__(0x5A54, ## __VA_ARGS__), \
+ MACRO__(0x5A5C, ## __VA_ARGS__)
+
+#define INTEL_CNL_IDS(MACRO__, ...) \
+ INTEL_CNL_PORT_F_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x5A40, ## __VA_ARGS__), \
+ MACRO__(0x5A41, ## __VA_ARGS__), \
+ MACRO__(0x5A42, ## __VA_ARGS__), \
+ MACRO__(0x5A49, ## __VA_ARGS__), \
+ MACRO__(0x5A4A, ## __VA_ARGS__), \
+ MACRO__(0x5A50, ## __VA_ARGS__), \
+ MACRO__(0x5A51, ## __VA_ARGS__), \
+ MACRO__(0x5A52, ## __VA_ARGS__), \
+ MACRO__(0x5A59, ## __VA_ARGS__), \
+ MACRO__(0x5A5A, ## __VA_ARGS__)
+
+/* ICL */
+#define INTEL_ICL_PORT_F_IDS(MACRO__, ...) \
+ MACRO__(0x8A50, ## __VA_ARGS__), \
+ MACRO__(0x8A52, ## __VA_ARGS__), \
+ MACRO__(0x8A53, ## __VA_ARGS__), \
+ MACRO__(0x8A54, ## __VA_ARGS__), \
+ MACRO__(0x8A56, ## __VA_ARGS__), \
+ MACRO__(0x8A57, ## __VA_ARGS__), \
+ MACRO__(0x8A58, ## __VA_ARGS__), \
+ MACRO__(0x8A59, ## __VA_ARGS__), \
+ MACRO__(0x8A5A, ## __VA_ARGS__), \
+ MACRO__(0x8A5B, ## __VA_ARGS__), \
+ MACRO__(0x8A5C, ## __VA_ARGS__), \
+ MACRO__(0x8A70, ## __VA_ARGS__), \
+ MACRO__(0x8A71, ## __VA_ARGS__)
+
+#define INTEL_ICL_IDS(MACRO__, ...) \
+ INTEL_ICL_PORT_F_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x8A51, ## __VA_ARGS__), \
+ MACRO__(0x8A5D, ## __VA_ARGS__)
+
+/* EHL */
+#define INTEL_EHL_IDS(MACRO__, ...) \
+ MACRO__(0x4541, ## __VA_ARGS__), \
+ MACRO__(0x4551, ## __VA_ARGS__), \
+ MACRO__(0x4555, ## __VA_ARGS__), \
+ MACRO__(0x4557, ## __VA_ARGS__), \
+ MACRO__(0x4570, ## __VA_ARGS__), \
+ MACRO__(0x4571, ## __VA_ARGS__)
+
+/* JSL */
+#define INTEL_JSL_IDS(MACRO__, ...) \
+ MACRO__(0x4E51, ## __VA_ARGS__), \
+ MACRO__(0x4E55, ## __VA_ARGS__), \
+ MACRO__(0x4E57, ## __VA_ARGS__), \
+ MACRO__(0x4E61, ## __VA_ARGS__), \
+ MACRO__(0x4E71, ## __VA_ARGS__)
+
+/* TGL */
+#define INTEL_TGL_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x9A60, ## __VA_ARGS__), \
+ MACRO__(0x9A68, ## __VA_ARGS__), \
+ MACRO__(0x9A70, ## __VA_ARGS__)
+
+#define INTEL_TGL_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x9A40, ## __VA_ARGS__), \
+ MACRO__(0x9A49, ## __VA_ARGS__), \
+ MACRO__(0x9A59, ## __VA_ARGS__), \
+ MACRO__(0x9A78, ## __VA_ARGS__), \
+ MACRO__(0x9AC0, ## __VA_ARGS__), \
+ MACRO__(0x9AC9, ## __VA_ARGS__), \
+ MACRO__(0x9AD9, ## __VA_ARGS__), \
+ MACRO__(0x9AF8, ## __VA_ARGS__)
+
+#define INTEL_TGL_IDS(MACRO__, ...) \
+ INTEL_TGL_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_TGL_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+/* RKL */
+#define INTEL_RKL_IDS(MACRO__, ...) \
+ MACRO__(0x4C80, ## __VA_ARGS__), \
+ MACRO__(0x4C8A, ## __VA_ARGS__), \
+ MACRO__(0x4C8B, ## __VA_ARGS__), \
+ MACRO__(0x4C8C, ## __VA_ARGS__), \
+ MACRO__(0x4C90, ## __VA_ARGS__), \
+ MACRO__(0x4C9A, ## __VA_ARGS__)
+
+/* DG1 */
+#define INTEL_DG1_IDS(MACRO__, ...) \
+ MACRO__(0x4905, ## __VA_ARGS__), \
+ MACRO__(0x4906, ## __VA_ARGS__), \
+ MACRO__(0x4907, ## __VA_ARGS__), \
+ MACRO__(0x4908, ## __VA_ARGS__), \
+ MACRO__(0x4909, ## __VA_ARGS__)
+
+/* ADL-S */
+#define INTEL_ADLS_IDS(MACRO__, ...) \
+ MACRO__(0x4680, ## __VA_ARGS__), \
+ MACRO__(0x4682, ## __VA_ARGS__), \
+ MACRO__(0x4688, ## __VA_ARGS__), \
+ MACRO__(0x468A, ## __VA_ARGS__), \
+ MACRO__(0x468B, ## __VA_ARGS__), \
+ MACRO__(0x4690, ## __VA_ARGS__), \
+ MACRO__(0x4692, ## __VA_ARGS__), \
+ MACRO__(0x4693, ## __VA_ARGS__)
+
+/* ADL-P */
+#define INTEL_ADLP_IDS(MACRO__, ...) \
+ MACRO__(0x46A0, ## __VA_ARGS__), \
+ MACRO__(0x46A1, ## __VA_ARGS__), \
+ MACRO__(0x46A2, ## __VA_ARGS__), \
+ MACRO__(0x46A3, ## __VA_ARGS__), \
+ MACRO__(0x46A6, ## __VA_ARGS__), \
+ MACRO__(0x46A8, ## __VA_ARGS__), \
+ MACRO__(0x46AA, ## __VA_ARGS__), \
+ MACRO__(0x462A, ## __VA_ARGS__), \
+ MACRO__(0x4626, ## __VA_ARGS__), \
+ MACRO__(0x4628, ## __VA_ARGS__), \
+ MACRO__(0x46B0, ## __VA_ARGS__), \
+ MACRO__(0x46B1, ## __VA_ARGS__), \
+ MACRO__(0x46B2, ## __VA_ARGS__), \
+ MACRO__(0x46B3, ## __VA_ARGS__), \
+ MACRO__(0x46C0, ## __VA_ARGS__), \
+ MACRO__(0x46C1, ## __VA_ARGS__), \
+ MACRO__(0x46C2, ## __VA_ARGS__), \
+ MACRO__(0x46C3, ## __VA_ARGS__)
+
+/* ADL-N */
+#define INTEL_ADLN_IDS(MACRO__, ...) \
+ MACRO__(0x46D0, ## __VA_ARGS__), \
+ MACRO__(0x46D1, ## __VA_ARGS__), \
+ MACRO__(0x46D2, ## __VA_ARGS__), \
+ MACRO__(0x46D3, ## __VA_ARGS__), \
+ MACRO__(0x46D4, ## __VA_ARGS__)
+
+/* RPL-S */
+#define INTEL_RPLS_IDS(MACRO__, ...) \
+ MACRO__(0xA780, ## __VA_ARGS__), \
+ MACRO__(0xA781, ## __VA_ARGS__), \
+ MACRO__(0xA782, ## __VA_ARGS__), \
+ MACRO__(0xA783, ## __VA_ARGS__), \
+ MACRO__(0xA788, ## __VA_ARGS__), \
+ MACRO__(0xA789, ## __VA_ARGS__), \
+ MACRO__(0xA78A, ## __VA_ARGS__), \
+ MACRO__(0xA78B, ## __VA_ARGS__)
+
+/* RPL-U */
+#define INTEL_RPLU_IDS(MACRO__, ...) \
+ MACRO__(0xA721, ## __VA_ARGS__), \
+ MACRO__(0xA7A1, ## __VA_ARGS__), \
+ MACRO__(0xA7A9, ## __VA_ARGS__), \
+ MACRO__(0xA7AC, ## __VA_ARGS__), \
+ MACRO__(0xA7AD, ## __VA_ARGS__)
+
+/* RPL-P */
+#define INTEL_RPLP_IDS(MACRO__, ...) \
+ MACRO__(0xA720, ## __VA_ARGS__), \
+ MACRO__(0xA7A0, ## __VA_ARGS__), \
+ MACRO__(0xA7A8, ## __VA_ARGS__), \
+ MACRO__(0xA7AA, ## __VA_ARGS__), \
+ MACRO__(0xA7AB, ## __VA_ARGS__)
+
+/* DG2 */
+#define INTEL_DG2_G10_D_IDS(MACRO__, ...) \
+ MACRO__(0x56A0, ## __VA_ARGS__), \
+ MACRO__(0x56A1, ## __VA_ARGS__), \
+ MACRO__(0x56A2, ## __VA_ARGS__)
+
+#define INTEL_DG2_G10_E_IDS(MACRO__, ...) \
+ MACRO__(0x56BE, ## __VA_ARGS__), \
+ MACRO__(0x56BF, ## __VA_ARGS__)
+
+#define INTEL_DG2_G10_M_IDS(MACRO__, ...) \
+ MACRO__(0x5690, ## __VA_ARGS__), \
+ MACRO__(0x5691, ## __VA_ARGS__), \
+ MACRO__(0x5692, ## __VA_ARGS__)
+
+#define INTEL_DG2_G10_IDS(MACRO__, ...) \
+ INTEL_DG2_G10_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G10_E_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G10_M_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_DG2_G11_D_IDS(MACRO__, ...) \
+ MACRO__(0x56A5, ## __VA_ARGS__), \
+ MACRO__(0x56A6, ## __VA_ARGS__), \
+ MACRO__(0x56B0, ## __VA_ARGS__), \
+ MACRO__(0x56B1, ## __VA_ARGS__)
+
+#define INTEL_DG2_G11_E_IDS(MACRO__, ...) \
+ MACRO__(0x56BA, ## __VA_ARGS__), \
+ MACRO__(0x56BB, ## __VA_ARGS__), \
+ MACRO__(0x56BC, ## __VA_ARGS__), \
+ MACRO__(0x56BD, ## __VA_ARGS__)
+
+#define INTEL_DG2_G11_M_IDS(MACRO__, ...) \
+ MACRO__(0x5693, ## __VA_ARGS__), \
+ MACRO__(0x5694, ## __VA_ARGS__), \
+ MACRO__(0x5695, ## __VA_ARGS__)
+
+#define INTEL_DG2_G11_IDS(MACRO__, ...) \
+ INTEL_DG2_G11_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G11_E_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G11_M_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_DG2_G12_D_IDS(MACRO__, ...) \
+ MACRO__(0x56A3, ## __VA_ARGS__), \
+ MACRO__(0x56A4, ## __VA_ARGS__), \
+ MACRO__(0x56B2, ## __VA_ARGS__), \
+ MACRO__(0x56B3, ## __VA_ARGS__)
+
+#define INTEL_DG2_G12_M_IDS(MACRO__, ...) \
+ MACRO__(0x5696, ## __VA_ARGS__), \
+ MACRO__(0x5697, ## __VA_ARGS__)
+
+#define INTEL_DG2_G12_IDS(MACRO__, ...) \
+ INTEL_DG2_G12_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G12_M_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_DG2_D_IDS(MACRO__, ...) \
+ INTEL_DG2_G10_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G11_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G12_D_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_DG2_IDS(MACRO__, ...) \
+ INTEL_DG2_G10_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G11_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G12_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_ATS_M150_IDS(MACRO__, ...) \
+ MACRO__(0x56C0, ## __VA_ARGS__), \
+ MACRO__(0x56C2, ## __VA_ARGS__)
+
+#define INTEL_ATS_M75_IDS(MACRO__, ...) \
+ MACRO__(0x56C1, ## __VA_ARGS__)
+
+#define INTEL_ATS_M_IDS(MACRO__, ...) \
+ INTEL_ATS_M150_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_ATS_M75_IDS(MACRO__, ## __VA_ARGS__)
+
+/* ARL */
+#define INTEL_ARL_H_IDS(MACRO__, ...) \
+ MACRO__(0x7D51, ## __VA_ARGS__), \
+ MACRO__(0x7DD1, ## __VA_ARGS__)
+
+#define INTEL_ARL_U_IDS(MACRO__, ...) \
+ MACRO__(0x7D41, ## __VA_ARGS__) \
+
+#define INTEL_ARL_S_IDS(MACRO__, ...) \
+ MACRO__(0x7D67, ## __VA_ARGS__), \
+ MACRO__(0xB640, ## __VA_ARGS__)
+
+#define INTEL_ARL_IDS(MACRO__, ...) \
+ INTEL_ARL_H_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_ARL_U_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_ARL_S_IDS(MACRO__, ## __VA_ARGS__)
+
+/* MTL */
+#define INTEL_MTL_U_IDS(MACRO__, ...) \
+ MACRO__(0x7D40, ## __VA_ARGS__), \
+ MACRO__(0x7D45, ## __VA_ARGS__)
+
+#define INTEL_MTL_IDS(MACRO__, ...) \
+ INTEL_MTL_U_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x7D55, ## __VA_ARGS__), \
+ MACRO__(0x7D60, ## __VA_ARGS__), \
+ MACRO__(0x7DD5, ## __VA_ARGS__)
+
+/* PVC */
+#define INTEL_PVC_IDS(MACRO__, ...) \
+ MACRO__(0x0B69, ## __VA_ARGS__), \
+ MACRO__(0x0B6E, ## __VA_ARGS__), \
+ MACRO__(0x0BD4, ## __VA_ARGS__), \
+ MACRO__(0x0BD5, ## __VA_ARGS__), \
+ MACRO__(0x0BD6, ## __VA_ARGS__), \
+ MACRO__(0x0BD7, ## __VA_ARGS__), \
+ MACRO__(0x0BD8, ## __VA_ARGS__), \
+ MACRO__(0x0BD9, ## __VA_ARGS__), \
+ MACRO__(0x0BDA, ## __VA_ARGS__), \
+ MACRO__(0x0BDB, ## __VA_ARGS__), \
+ MACRO__(0x0BE0, ## __VA_ARGS__), \
+ MACRO__(0x0BE1, ## __VA_ARGS__), \
+ MACRO__(0x0BE5, ## __VA_ARGS__)
+
+/* LNL */
+#define INTEL_LNL_IDS(MACRO__, ...) \
+ MACRO__(0x6420, ## __VA_ARGS__), \
+ MACRO__(0x64A0, ## __VA_ARGS__), \
+ MACRO__(0x64B0, ## __VA_ARGS__)
+
+/* BMG */
+#define INTEL_BMG_G21_IDS(MACRO__, ...) \
+ MACRO__(0xE202, ## __VA_ARGS__), \
+ MACRO__(0xE209, ## __VA_ARGS__), \
+ MACRO__(0xE20B, ## __VA_ARGS__), \
+ MACRO__(0xE20C, ## __VA_ARGS__), \
+ MACRO__(0xE20D, ## __VA_ARGS__), \
+ MACRO__(0xE210, ## __VA_ARGS__), \
+ MACRO__(0xE211, ## __VA_ARGS__), \
+ MACRO__(0xE212, ## __VA_ARGS__), \
+ MACRO__(0xE216, ## __VA_ARGS__)
+
+#define INTEL_BMG_IDS(MACRO__, ...) \
+ INTEL_BMG_G21_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0xE220, ## __VA_ARGS__), \
+ MACRO__(0xE221, ## __VA_ARGS__), \
+ MACRO__(0xE222, ## __VA_ARGS__), \
+ MACRO__(0xE223, ## __VA_ARGS__)
+
+/* PTL */
+#define INTEL_PTL_IDS(MACRO__, ...) \
+ MACRO__(0xB080, ## __VA_ARGS__), \
+ MACRO__(0xB081, ## __VA_ARGS__), \
+ MACRO__(0xB082, ## __VA_ARGS__), \
+ MACRO__(0xB083, ## __VA_ARGS__), \
+ MACRO__(0xB084, ## __VA_ARGS__), \
+ MACRO__(0xB085, ## __VA_ARGS__), \
+ MACRO__(0xB086, ## __VA_ARGS__), \
+ MACRO__(0xB087, ## __VA_ARGS__), \
+ MACRO__(0xB08F, ## __VA_ARGS__), \
+ MACRO__(0xB090, ## __VA_ARGS__), \
+ MACRO__(0xB0A0, ## __VA_ARGS__), \
+ MACRO__(0xB0B0, ## __VA_ARGS__)
+
+/* WCL */
+#define INTEL_WCL_IDS(MACRO__, ...) \
+ MACRO__(0xFD80, ## __VA_ARGS__), \
+ MACRO__(0xFD81, ## __VA_ARGS__)
+
+/* NVL-S */
+#define INTEL_NVLS_IDS(MACRO__, ...) \
+ MACRO__(0xD740, ## __VA_ARGS__), \
+ MACRO__(0xD741, ## __VA_ARGS__), \
+ MACRO__(0xD742, ## __VA_ARGS__), \
+ MACRO__(0xD743, ## __VA_ARGS__), \
+ MACRO__(0xD744, ## __VA_ARGS__), \
+ MACRO__(0xD745, ## __VA_ARGS__)
+
+/* CRI */
+#define INTEL_CRI_IDS(MACRO__, ...) \
+ MACRO__(0x674C, ## __VA_ARGS__)
+
+#endif /* __PCIIDS_H__ */
diff --git a/include/drm/intel/xe_sriov_vfio.h b/include/drm/intel/xe_sriov_vfio.h
new file mode 100644
index 000000000000..e9814e8149fd
--- /dev/null
+++ b/include/drm/intel/xe_sriov_vfio.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_VFIO_H_
+#define _XE_SRIOV_VFIO_H_
+
+#include <linux/types.h>
+
+struct pci_dev;
+struct xe_device;
+
+/**
+ * xe_sriov_vfio_get_pf() - Get PF &xe_device.
+ * @pdev: the VF &pci_dev device
+ *
+ * Return: pointer to PF &xe_device, NULL otherwise.
+ */
+struct xe_device *xe_sriov_vfio_get_pf(struct pci_dev *pdev);
+
+/**
+ * xe_sriov_vfio_migration_supported() - Check if migration is supported.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ *
+ * Return: true if migration is supported, false otherwise.
+ */
+bool xe_sriov_vfio_migration_supported(struct xe_device *xe);
+
+/**
+ * xe_sriov_vfio_wait_flr_done() - Wait for VF FLR completion.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * This function will wait until VF FLR is processed by PF on all tiles (or
+ * until timeout occurs).
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_wait_flr_done(struct xe_device *xe, unsigned int vfid);
+
+/**
+ * xe_sriov_vfio_suspend_device() - Suspend VF.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * This function will pause VF on all tiles/GTs.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_suspend_device(struct xe_device *xe, unsigned int vfid);
+
+/**
+ * xe_sriov_vfio_resume_device() - Resume VF.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * This function will resume VF on all tiles.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_resume_device(struct xe_device *xe, unsigned int vfid);
+
+/**
+ * xe_sriov_vfio_stop_copy_enter() - Initiate a VF device migration data save.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_stop_copy_enter(struct xe_device *xe, unsigned int vfid);
+
+/**
+ * xe_sriov_vfio_stop_copy_exit() - Finish a VF device migration data save.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_stop_copy_exit(struct xe_device *xe, unsigned int vfid);
+
+/**
+ * xe_sriov_vfio_resume_data_enter() - Initiate a VF device migration data restore.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_resume_data_enter(struct xe_device *xe, unsigned int vfid);
+
+/**
+ * xe_sriov_vfio_resume_data_exit() - Finish a VF device migration data restore.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_resume_data_exit(struct xe_device *xe, unsigned int vfid);
+
+/**
+ * xe_sriov_vfio_error() - Move VF device to error state.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * Reset is needed to move it out of error state.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_error(struct xe_device *xe, unsigned int vfid);
+
+/**
+ * xe_sriov_vfio_data_read() - Read migration data from the VF device.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ * @buf: start address of userspace buffer
+ * @len: requested read size from userspace
+ *
+ * Return: number of bytes that has been successfully read,
+ * 0 if no more migration data is available, -errno on failure.
+ */
+ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid,
+ char __user *buf, size_t len);
+/**
+ * xe_sriov_vfio_data_write() - Write migration data to the VF device.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ * @buf: start address of userspace buffer
+ * @len: requested write size from userspace
+ *
+ * Return: number of bytes that has been successfully written, -errno on failure.
+ */
+ssize_t xe_sriov_vfio_data_write(struct xe_device *xe, unsigned int vfid,
+ const char __user *buf, size_t len);
+/**
+ * xe_sriov_vfio_stop_copy_size() - Get a size estimate of VF device migration data.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * Return: migration data size in bytes or a negative error code on failure.
+ */
+ssize_t xe_sriov_vfio_stop_copy_size(struct xe_device *xe, unsigned int vfid);
+
+#endif
diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h
index 125f096c88cb..ee9df8cc67b7 100644
--- a/include/drm/spsc_queue.h
+++ b/include/drm/spsc_queue.h
@@ -70,9 +70,11 @@ static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *n
preempt_disable();
+ atomic_inc(&queue->job_count);
+ smp_mb__after_atomic();
+
tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
WRITE_ONCE(*tail, node);
- atomic_inc(&queue->job_count);
/*
* In case of first element verify new node will be visible to the consumer
diff --git a/include/drm/ttm/ttm_allocation.h b/include/drm/ttm/ttm_allocation.h
new file mode 100644
index 000000000000..655d1e44aba7
--- /dev/null
+++ b/include/drm/ttm/ttm_allocation.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2025 Valve Corporation */
+
+#ifndef _TTM_ALLOCATION_H_
+#define _TTM_ALLOCATION_H_
+
+#define TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(n) ((n) & 0xff) /* Max order which caller can benefit from */
+#define TTM_ALLOCATION_POOL_USE_DMA_ALLOC BIT(8) /* Use coherent DMA allocations. */
+#define TTM_ALLOCATION_POOL_USE_DMA32 BIT(9) /* Use GFP_DMA32 allocations. */
+#define TTM_ALLOCATION_PROPAGATE_ENOSPC BIT(10) /* Do not convert ENOSPC from resource managers to ENOMEM. */
+
+#endif
diff --git a/include/drm/ttm/ttm_backup.h b/include/drm/ttm/ttm_backup.h
new file mode 100644
index 000000000000..c33cba111171
--- /dev/null
+++ b/include/drm/ttm/ttm_backup.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _TTM_BACKUP_H_
+#define _TTM_BACKUP_H_
+
+#include <linux/mm_types.h>
+#include <linux/shmem_fs.h>
+
+/**
+ * ttm_backup_handle_to_page_ptr() - Convert handle to struct page pointer
+ * @handle: The handle to convert.
+ *
+ * Converts an opaque handle received from the
+ * ttm_backup_backup_page() function to an (invalid)
+ * struct page pointer suitable for a struct page array.
+ *
+ * Return: An (invalid) struct page pointer.
+ */
+static inline struct page *
+ttm_backup_handle_to_page_ptr(unsigned long handle)
+{
+ return (struct page *)(handle << 1 | 1);
+}
+
+/**
+ * ttm_backup_page_ptr_is_handle() - Whether a struct page pointer is a handle
+ * @page: The struct page pointer to check.
+ *
+ * Return: true if the struct page pointer is a handld returned from
+ * ttm_backup_handle_to_page_ptr(). False otherwise.
+ */
+static inline bool ttm_backup_page_ptr_is_handle(const struct page *page)
+{
+ return (unsigned long)page & 1;
+}
+
+/**
+ * ttm_backup_page_ptr_to_handle() - Convert a struct page pointer to a handle
+ * @page: The struct page pointer to convert
+ *
+ * Return: The handle that was previously used in
+ * ttm_backup_handle_to_page_ptr() to obtain a struct page pointer, suitable
+ * for use as argument in the struct ttm_backup_drop() or
+ * ttm_backup_copy_page() functions.
+ */
+static inline unsigned long
+ttm_backup_page_ptr_to_handle(const struct page *page)
+{
+ WARN_ON(!ttm_backup_page_ptr_is_handle(page));
+ return (unsigned long)page >> 1;
+}
+
+void ttm_backup_drop(struct file *backup, pgoff_t handle);
+
+int ttm_backup_copy_page(struct file *backup, struct page *dst,
+ pgoff_t handle, bool intr);
+
+s64
+ttm_backup_backup_page(struct file *backup, struct page *page,
+ bool writeback, pgoff_t idx, gfp_t page_gfp,
+ gfp_t alloc_gfp);
+
+void ttm_backup_fini(struct file *backup);
+
+u64 ttm_backup_bytes_avail(void);
+
+struct file *ttm_backup_shmem_create(loff_t size);
+
+#endif
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index 6ccf96c91f3a..bca3a8849d47 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -172,7 +172,6 @@ struct ttm_bo_kmap_obj {
* @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
* @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple
* BOs share the same reservation object.
- * @force_alloc: Don't check the memory account during suspend or CPU page
* faults. Should only be used by TTM internally.
* @resv: Reservation object to allow reserved evictions with.
* @bytes_moved: Statistics on how many bytes have been moved.
@@ -185,38 +184,73 @@ struct ttm_operation_ctx {
bool no_wait_gpu;
bool gfp_retry_mayfail;
bool allow_res_evict;
- bool force_alloc;
struct dma_resv *resv;
uint64_t bytes_moved;
};
+struct ttm_lru_walk;
+
+/** struct ttm_lru_walk_ops - Operations for a LRU walk. */
+struct ttm_lru_walk_ops {
+ /**
+ * process_bo - Process this bo.
+ * @walk: struct ttm_lru_walk describing the walk.
+ * @bo: A locked and referenced buffer object.
+ *
+ * Return: Negative error code on error, User-defined positive value
+ * (typically, but not always, size of the processed bo) on success.
+ * On success, the returned values are summed by the walk and the
+ * walk exits when its target is met.
+ * 0 also indicates success, -EBUSY means this bo was skipped.
+ */
+ s64 (*process_bo)(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo);
+};
+
/**
- * ttm_bo_get - reference a struct ttm_buffer_object
- *
- * @bo: The buffer object.
+ * struct ttm_lru_walk_arg - Common part for the variants of BO LRU walk.
*/
-static inline void ttm_bo_get(struct ttm_buffer_object *bo)
-{
- kref_get(&bo->kref);
-}
+struct ttm_lru_walk_arg {
+ /** @ctx: Pointer to the struct ttm_operation_ctx. */
+ struct ttm_operation_ctx *ctx;
+ /** @ticket: The struct ww_acquire_ctx if any. */
+ struct ww_acquire_ctx *ticket;
+ /** @trylock_only: Only use trylock for locking. */
+ bool trylock_only;
+};
/**
- * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
- * its refcount has already reached zero.
- * @bo: The buffer object.
- *
- * Used to reference a TTM buffer object in lookups where the object is removed
- * from the lookup structure during the destructor and for RCU lookups.
- *
- * Returns: @bo if the referencing was successful, NULL otherwise.
+ * struct ttm_lru_walk - Structure describing a LRU walk.
*/
-static inline __must_check struct ttm_buffer_object *
-ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
-{
- if (!kref_get_unless_zero(&bo->kref))
- return NULL;
- return bo;
-}
+struct ttm_lru_walk {
+ /** @ops: Pointer to the ops structure. */
+ const struct ttm_lru_walk_ops *ops;
+ /** @arg: Common bo LRU walk arguments. */
+ struct ttm_lru_walk_arg arg;
+};
+
+s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
+ struct ttm_resource_manager *man, s64 target);
+
+/**
+ * struct ttm_bo_shrink_flags - flags to govern the bo shrinking behaviour
+ * @purge: Purge the content rather than backing it up.
+ * @writeback: Attempt to immediately write content to swap space.
+ * @allow_move: Allow moving to system before shrinking. This is typically
+ * not desired for zombie- or ghost objects (with zombie object meaning
+ * objects with a zero gem object refcount)
+ */
+struct ttm_bo_shrink_flags {
+ u32 purge : 1;
+ u32 writeback : 1;
+ u32 allow_move : 1;
+};
+
+long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
+ const struct ttm_bo_shrink_flags flags);
+
+bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx);
+
+bool ttm_bo_shrink_avoid_wait(void);
/**
* ttm_bo_reserve:
@@ -357,7 +391,7 @@ int ttm_bo_wait_ctx(struct ttm_buffer_object *bo,
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx);
-void ttm_bo_put(struct ttm_buffer_object *bo);
+void ttm_bo_fini(struct ttm_buffer_object *bo);
void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
@@ -375,18 +409,20 @@ int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
unsigned long num_pages, struct ttm_bo_kmap_obj *map);
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
+void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page);
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
-int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
- gfp_t gfp_flags);
+s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ struct ttm_resource_manager *man, gfp_t gfp_flags,
+ s64 target);
void ttm_bo_pin(struct ttm_buffer_object *bo);
void ttm_bo_unpin(struct ttm_buffer_object *bo);
-int ttm_mem_evict_first(struct ttm_device *bdev,
- struct ttm_resource_manager *man,
- const struct ttm_place *place,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket);
+int ttm_bo_evict_first(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx);
+int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset,
+ void *buf, int len, int write);
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
struct vm_fault *vmf);
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
@@ -428,5 +464,83 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
pgprot_t tmp);
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
+int ttm_bo_populate(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx);
+int ttm_bo_setup_export(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx);
+
+/* Driver LRU walk helpers initially targeted for shrinking. */
+
+/**
+ * struct ttm_bo_lru_cursor - Iterator cursor for TTM LRU list looping
+ */
+struct ttm_bo_lru_cursor {
+ /** @res_curs: Embedded struct ttm_resource_cursor. */
+ struct ttm_resource_cursor res_curs;
+ /**
+ * @bo: Buffer object pointer if a buffer object is refcounted,
+ * NULL otherwise.
+ */
+ struct ttm_buffer_object *bo;
+ /**
+ * @needs_unlock: Valid iff @bo != NULL. The bo resv needs
+ * unlock before the next iteration or after loop exit.
+ */
+ bool needs_unlock;
+ /** @arg: Pointer to common BO LRU walk arguments. */
+ struct ttm_lru_walk_arg *arg;
+};
+
+void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs);
+
+struct ttm_bo_lru_cursor *
+ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
+ struct ttm_resource_manager *man,
+ struct ttm_lru_walk_arg *arg);
+
+struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs);
+
+struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs);
+
+/*
+ * Defines needed to use autocleanup (linux/cleanup.h) with struct ttm_bo_lru_cursor.
+ */
+DEFINE_CLASS(ttm_bo_lru_cursor, struct ttm_bo_lru_cursor *,
+ if (_T) {ttm_bo_lru_cursor_fini(_T); },
+ ttm_bo_lru_cursor_init(curs, man, arg),
+ struct ttm_bo_lru_cursor *curs, struct ttm_resource_manager *man,
+ struct ttm_lru_walk_arg *arg);
+static inline void *
+class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T)
+{ return *_T; }
+#define class_ttm_bo_lru_cursor_is_conditional false
+
+/**
+ * ttm_bo_lru_for_each_reserved_guarded() - Iterate over buffer objects owning
+ * resources on LRU lists.
+ * @_cursor: struct ttm_bo_lru_cursor to use for the iteration.
+ * @_man: The resource manager whose LRU lists to iterate over.
+ * @_arg: The struct ttm_lru_walk_arg to govern the LRU walk.
+ * @_bo: The struct ttm_buffer_object pointer pointing to the buffer object
+ * for the current iteration.
+ *
+ * Iterate over all resources of @_man and for each resource, attempt to
+ * reference and lock (using the locking mode detailed in @_ctx) the buffer
+ * object it points to. If successful, assign @_bo to the address of the
+ * buffer object and update @_cursor. The iteration is guarded in the
+ * sense that @_cursor will be initialized before looping start and cleaned
+ * up at looping termination, even if terminated prematurely by, for
+ * example a return or break statement. Exiting the loop will also unlock
+ * (if needed) and unreference @_bo.
+ *
+ * Return: If locking of a bo returns an error, then iteration is terminated
+ * and @_bo is set to a corresponding error pointer. It's illegal to
+ * dereference @_bo after loop exit.
+ */
+#define ttm_bo_lru_for_each_reserved_guarded(_cursor, _man, _arg, _bo) \
+ scoped_guard(ttm_bo_lru_cursor, _cursor, _man, _arg) \
+ for ((_bo) = ttm_bo_lru_cursor_first(_cursor); \
+ !IS_ERR_OR_NULL(_bo); \
+ (_bo) = ttm_bo_lru_cursor_next(_cursor))
#endif
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
index c22f30535c84..5618aef462f2 100644
--- a/include/drm/ttm/ttm_device.h
+++ b/include/drm/ttm/ttm_device.h
@@ -27,6 +27,7 @@
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <drm/ttm/ttm_allocation.h>
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_pool.h>
@@ -220,6 +221,11 @@ struct ttm_device {
struct list_head device_list;
/**
+ * @alloc_flags: TTM_ALLOCATION_* flags.
+ */
+ unsigned int alloc_flags;
+
+ /**
* @funcs: Function table for the device.
* Constant after bo device init
*/
@@ -252,9 +258,10 @@ struct ttm_device {
spinlock_t lru_lock;
/**
- * @pinned: Buffer objects which are pinned and so not on any LRU list.
+ * @unevictable: Buffer objects which are pinned or swapped and as such
+ * not on an LRU list.
*/
- struct list_head pinned;
+ struct list_head unevictable;
/**
* @dev_mapping: A pointer to the struct address_space for invalidating
@@ -271,6 +278,7 @@ struct ttm_device {
int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags);
+int ttm_device_prepare_hibernation(struct ttm_device *bdev);
static inline struct ttm_resource_manager *
ttm_manager_type(struct ttm_device *bdev, int mem_type)
@@ -290,7 +298,7 @@ static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs,
struct device *dev, struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager,
- bool use_dma_alloc, bool use_dma32);
+ unsigned int alloc_flags);
void ttm_device_fini(struct ttm_device *bdev);
void ttm_device_clear_dma_mappings(struct ttm_device *bdev);
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
index 160d954a261e..233581670e78 100644
--- a/include/drm/ttm/ttm_pool.h
+++ b/include/drm/ttm/ttm_pool.h
@@ -33,6 +33,7 @@
struct device;
struct seq_file;
+struct ttm_backup_flags;
struct ttm_operation_ctx;
struct ttm_pool;
struct ttm_tt;
@@ -63,16 +64,14 @@ struct ttm_pool_type {
*
* @dev: the device we allocate pages for
* @nid: which numa node to use
- * @use_dma_alloc: if coherent DMA allocations should be used
- * @use_dma32: if GFP_DMA32 should be used
+ * @alloc_flags: TTM_ALLOCATION_POOL_* flags
* @caching: pools for each caching/order
*/
struct ttm_pool {
struct device *dev;
int nid;
- bool use_dma_alloc;
- bool use_dma32;
+ unsigned int alloc_flags;
struct {
struct ttm_pool_type orders[NR_PAGE_ORDERS];
@@ -84,11 +83,18 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt);
void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
- int nid, bool use_dma_alloc, bool use_dma32);
+ int nid, unsigned int alloc_flags);
void ttm_pool_fini(struct ttm_pool *pool);
int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);
+void ttm_pool_drop_backed_up(struct ttm_tt *tt);
+
+long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *ttm,
+ const struct ttm_backup_flags *flags);
+int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx);
+
int ttm_pool_mgr_init(unsigned long num_pages);
void ttm_pool_mgr_fini(void);
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 69769355139f..33e80f30b8b8 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -31,13 +31,15 @@
#include <linux/iosys-map.h>
#include <linux/dma-fence.h>
-#include <drm/drm_print.h>
#include <drm/ttm/ttm_caching.h>
#include <drm/ttm/ttm_kmap_iter.h>
#define TTM_MAX_BO_PRIORITY 4U
-#define TTM_NUM_MEM_TYPES 8
+#define TTM_NUM_MEM_TYPES 9
+struct dentry;
+struct dmem_cgroup_device;
+struct drm_printer;
struct ttm_device;
struct ttm_resource_manager;
struct ttm_resource;
@@ -49,6 +51,52 @@ struct io_mapping;
struct sg_table;
struct scatterlist;
+/**
+ * define TTM_NUM_MOVE_FENCES - How many entities can be used for evictions
+ *
+ * Pipelined evictions can be spread on multiple entities. This
+ * is the max number of entities that can be used by the driver
+ * for that purpose.
+ */
+#define TTM_NUM_MOVE_FENCES 8
+
+/**
+ * enum ttm_lru_item_type - enumerate ttm_lru_item subclasses
+ */
+enum ttm_lru_item_type {
+ /** @TTM_LRU_RESOURCE: The resource subclass */
+ TTM_LRU_RESOURCE,
+ /** @TTM_LRU_HITCH: The iterator hitch subclass */
+ TTM_LRU_HITCH
+};
+
+/**
+ * struct ttm_lru_item - The TTM lru list node base class
+ * @link: The list link
+ * @type: The subclass type
+ */
+struct ttm_lru_item {
+ struct list_head link;
+ enum ttm_lru_item_type type;
+};
+
+/**
+ * ttm_lru_item_init() - initialize a struct ttm_lru_item
+ * @item: The item to initialize
+ * @type: The subclass type
+ */
+static inline void ttm_lru_item_init(struct ttm_lru_item *item,
+ enum ttm_lru_item_type type)
+{
+ item->type = type;
+ INIT_LIST_HEAD(&item->link);
+}
+
+static inline bool ttm_lru_item_is_res(const struct ttm_lru_item *item)
+{
+ return item->type == TTM_LRU_RESOURCE;
+}
+
struct ttm_resource_manager_func {
/**
* struct ttm_resource_manager_func member alloc
@@ -142,8 +190,8 @@ struct ttm_resource_manager_func {
* @size: Size of the managed region.
* @bdev: ttm device this manager belongs to
* @func: structure pointer implementing the range manager. See above
- * @move_lock: lock for move fence
- * @move: The fence of the last pipelined move operation.
+ * @eviction_lock: lock for eviction fences
+ * @eviction_fences: The fences of the last pipelined move operation.
* @lru: The lru list for this memory type.
*
* This structure is used to identify and manage memory types for a device.
@@ -157,12 +205,12 @@ struct ttm_resource_manager {
struct ttm_device *bdev;
uint64_t size;
const struct ttm_resource_manager_func *func;
- spinlock_t move_lock;
- /*
- * Protected by @move_lock.
+ /* This is very similar to a dma_resv object, but locking rules make
+ * it difficult to use one in this context.
*/
- struct dma_fence *move;
+ spinlock_t eviction_lock;
+ struct dma_fence *eviction_fences[TTM_NUM_MOVE_FENCES];
/*
* Protected by the bdev->lru_lock.
@@ -174,6 +222,11 @@ struct ttm_resource_manager {
* bdev->lru_lock.
*/
uint64_t usage;
+
+ /**
+ * @cg: &dmem_cgroup_region used for memory accounting, if not NULL.
+ */
+ struct dmem_cgroup_region *cg;
};
/**
@@ -202,6 +255,7 @@ struct ttm_bus_placement {
* @placement: Placement flags.
* @bus: Placement on io bus accessible to the CPU
* @bo: weak reference to the BO, protected by ttm_device::lru_lock
+ * @css: cgroup state this resource is charged to
*
* Structure indicating the placement and space resources used by a
* buffer object.
@@ -214,22 +268,25 @@ struct ttm_resource {
struct ttm_bus_placement bus;
struct ttm_buffer_object *bo;
+ struct dmem_cgroup_pool_state *css;
+
/**
* @lru: Least recently used list, see &ttm_resource_manager.lru
*/
- struct list_head lru;
+ struct ttm_lru_item lru;
};
/**
- * struct ttm_resource_cursor
+ * ttm_lru_item_to_res() - Downcast a struct ttm_lru_item to a struct ttm_resource
+ * @item: The struct ttm_lru_item to downcast
*
- * @priority: the current priority
- *
- * Cursor to iterate over the resources in a manager.
+ * Return: Pointer to the embedding struct ttm_resource
*/
-struct ttm_resource_cursor {
- unsigned int priority;
-};
+static inline struct ttm_resource *
+ttm_lru_item_to_res(struct ttm_lru_item *item)
+{
+ return container_of(item, struct ttm_resource, lru);
+}
/**
* struct ttm_lru_bulk_move_pos
@@ -246,8 +303,9 @@ struct ttm_lru_bulk_move_pos {
/**
* struct ttm_lru_bulk_move
- *
* @pos: first/last lru entry for resources in the each domain/priority
+ * @cursor_list: The list of cursors currently traversing any of
+ * the sublists of @pos. Protected by the ttm device's lru_lock.
*
* Container for the current bulk move state. Should be used with
* ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move().
@@ -257,9 +315,41 @@ struct ttm_lru_bulk_move_pos {
*/
struct ttm_lru_bulk_move {
struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
+ struct list_head cursor_list;
};
/**
+ * struct ttm_resource_cursor
+ * @man: The resource manager currently being iterated over
+ * @hitch: A hitch list node inserted before the next resource
+ * to iterate over.
+ * @bulk_link: A list link for the list of cursors traversing the
+ * bulk sublist of @bulk. Protected by the ttm device's lru_lock.
+ * @bulk: Pointer to struct ttm_lru_bulk_move whose subrange @hitch is
+ * inserted to. NULL if none. Never dereference this pointer since
+ * the struct ttm_lru_bulk_move object pointed to might have been
+ * freed. The pointer is only for comparison.
+ * @mem_type: The memory type of the LRU list being traversed.
+ * This field is valid iff @bulk != NULL.
+ * @priority: the current priority
+ *
+ * Cursor to iterate over the resources in a manager.
+ */
+struct ttm_resource_cursor {
+ struct ttm_resource_manager *man;
+ struct ttm_lru_item hitch;
+ struct list_head bulk_link;
+ struct ttm_lru_bulk_move *bulk;
+ unsigned int mem_type;
+ unsigned int priority;
+};
+
+void ttm_resource_cursor_init(struct ttm_resource_cursor *cursor,
+ struct ttm_resource_manager *man);
+
+void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor);
+
+/**
* struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping +
* struct sg_table backed struct ttm_resource.
* @base: Embedded struct ttm_kmap_iter providing the usage interface.
@@ -341,12 +431,18 @@ static inline bool ttm_resource_manager_used(struct ttm_resource_manager *man)
static inline void
ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
{
- dma_fence_put(man->move);
- man->move = NULL;
+ int i;
+
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) {
+ dma_fence_put(man->eviction_fences[i]);
+ man->eviction_fences[i] = NULL;
+ }
}
void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk);
+void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
+ struct ttm_lru_bulk_move *bulk);
void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo);
@@ -362,7 +458,8 @@ void ttm_resource_fini(struct ttm_resource_manager *man,
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource **res);
+ struct ttm_resource **res,
+ struct dmem_cgroup_pool_state **ret_limit_pool);
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
bool ttm_resource_intersects(struct ttm_device *bdev,
struct ttm_resource *res,
@@ -386,24 +483,23 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
struct drm_printer *p);
struct ttm_resource *
-ttm_resource_manager_first(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor);
+ttm_resource_manager_first(struct ttm_resource_cursor *cursor);
+struct ttm_resource *
+ttm_resource_manager_next(struct ttm_resource_cursor *cursor);
+
struct ttm_resource *
-ttm_resource_manager_next(struct ttm_resource_manager *man,
- struct ttm_resource_cursor *cursor,
- struct ttm_resource *res);
+ttm_lru_first_res_or_null(struct list_head *head);
/**
* ttm_resource_manager_for_each_res - iterate over all resources
- * @man: the resource manager
* @cursor: struct ttm_resource_cursor for the current position
* @res: the current resource
*
* Iterate over all the evictable resources in a resource manager.
*/
-#define ttm_resource_manager_for_each_res(man, cursor, res) \
- for (res = ttm_resource_manager_first(man, cursor); res; \
- res = ttm_resource_manager_next(man, cursor, res))
+#define ttm_resource_manager_for_each_res(cursor, res) \
+ for (res = ttm_resource_manager_first(cursor); res; \
+ res = ttm_resource_manager_next(cursor))
struct ttm_kmap_iter *
ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 2b9d856ff388..406437ad674b 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -32,11 +32,13 @@
#include <drm/ttm/ttm_caching.h>
#include <drm/ttm/ttm_kmap_iter.h>
+struct ttm_backup;
struct ttm_device;
struct ttm_tt;
struct ttm_resource;
struct ttm_buffer_object;
struct ttm_operation_ctx;
+struct ttm_pool_tt_restore;
/**
* struct ttm_tt - This is a structure holding the pages, caching- and aperture
@@ -85,17 +87,22 @@ struct ttm_tt {
* fault handling abuses the DMA api a bit and dma_map_attrs can't be
* used to assure pgprot always matches.
*
+ * TTM_TT_FLAG_BACKED_UP: TTM internal only. This is set if the
+ * struct ttm_tt has been (possibly partially) backed up.
+ *
* TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is
* set by TTM after ttm_tt_populate() has successfully returned, and is
* then unset when TTM calls ttm_tt_unpopulate().
+ *
*/
#define TTM_TT_FLAG_SWAPPED BIT(0)
#define TTM_TT_FLAG_ZERO_ALLOC BIT(1)
#define TTM_TT_FLAG_EXTERNAL BIT(2)
#define TTM_TT_FLAG_EXTERNAL_MAPPABLE BIT(3)
#define TTM_TT_FLAG_DECRYPTED BIT(4)
+#define TTM_TT_FLAG_BACKED_UP BIT(5)
-#define TTM_TT_FLAG_PRIV_POPULATED BIT(5)
+#define TTM_TT_FLAG_PRIV_POPULATED BIT(6)
uint32_t page_flags;
/** @num_pages: Number of pages in the page array. */
uint32_t num_pages;
@@ -106,10 +113,19 @@ struct ttm_tt {
/** @swap_storage: Pointer to shmem struct file for swap storage. */
struct file *swap_storage;
/**
+ * @backup: Pointer to backup struct for backed up tts.
+ * Could be unified with @swap_storage. Meanwhile, the driver's
+ * ttm_tt_create() callback is responsible for assigning
+ * this field.
+ */
+ struct file *backup;
+ /**
* @caching: The current caching state of the pages, see enum
* ttm_caching.
*/
enum ttm_caching caching;
+ /** @restore: Partial restoration from backup state. TTM private */
+ struct ttm_pool_tt_restore *restore;
};
/**
@@ -130,6 +146,40 @@ static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
}
/**
+ * ttm_tt_is_swapped() - Whether the ttm_tt is swapped out or backed up
+ * @tt: The struct ttm_tt.
+ *
+ * Return: true if swapped or backed up, false otherwise.
+ */
+static inline bool ttm_tt_is_swapped(const struct ttm_tt *tt)
+{
+ return tt->page_flags & (TTM_TT_FLAG_SWAPPED | TTM_TT_FLAG_BACKED_UP);
+}
+
+/**
+ * ttm_tt_is_backed_up() - Whether the ttm_tt backed up
+ * @tt: The struct ttm_tt.
+ *
+ * Return: true if swapped or backed up, false otherwise.
+ */
+static inline bool ttm_tt_is_backed_up(const struct ttm_tt *tt)
+{
+ return tt->page_flags & TTM_TT_FLAG_BACKED_UP;
+}
+
+/**
+ * ttm_tt_clear_backed_up() - Clear the ttm_tt backed-up status
+ * @tt: The struct ttm_tt.
+ *
+ * Drivers can use this functionto clear the backed-up status,
+ * for example before destroying or re-validating a purged tt.
+ */
+static inline void ttm_tt_clear_backed_up(struct ttm_tt *tt)
+{
+ tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
+}
+
+/**
* ttm_tt_create
*
* @bo: pointer to a struct ttm_buffer_object
@@ -230,6 +280,26 @@ void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
struct ttm_tt *tt);
unsigned long ttm_tt_pages_limit(void);
+
+/**
+ * struct ttm_backup_flags - Flags to govern backup behaviour.
+ * @purge: Free pages without backing up. Bypass pools.
+ * @writeback: Attempt to copy contents directly to swap space, even
+ * if that means blocking on writes to external memory.
+ */
+struct ttm_backup_flags {
+ u32 purge : 1;
+ u32 writeback : 1;
+};
+
+long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_backup_flags flags);
+
+int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx);
+
+int ttm_tt_setup_backup(struct ttm_tt *tt);
+
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>
diff --git a/include/drm/xe_pciids.h b/include/drm/xe_pciids.h
deleted file mode 100644
index adb37bc541e4..000000000000
--- a/include/drm/xe_pciids.h
+++ /dev/null
@@ -1,195 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#ifndef _XE_PCIIDS_H_
-#define _XE_PCIIDS_H_
-
-/*
- * Lists below can be turned into initializers for a struct pci_device_id
- * by defining INTEL_VGA_DEVICE:
- *
- * #define INTEL_VGA_DEVICE(id, info) { \
- * 0x8086, id, \
- * ~0, ~0, \
- * 0x030000, 0xff0000, \
- * (unsigned long) info }
- *
- * And then calling like:
- *
- * XE_TGL_12_GT1_IDS(INTEL_VGA_DEVICE, ## __VA_ARGS__)
- *
- * To turn them into something else, just provide a different macro passed as
- * first argument.
- */
-
-/* TGL */
-#define XE_TGL_GT1_IDS(MACRO__, ...) \
- MACRO__(0x9A60, ## __VA_ARGS__), \
- MACRO__(0x9A68, ## __VA_ARGS__), \
- MACRO__(0x9A70, ## __VA_ARGS__)
-
-#define XE_TGL_GT2_IDS(MACRO__, ...) \
- MACRO__(0x9A40, ## __VA_ARGS__), \
- MACRO__(0x9A49, ## __VA_ARGS__), \
- MACRO__(0x9A59, ## __VA_ARGS__), \
- MACRO__(0x9A78, ## __VA_ARGS__), \
- MACRO__(0x9AC0, ## __VA_ARGS__), \
- MACRO__(0x9AC9, ## __VA_ARGS__), \
- MACRO__(0x9AD9, ## __VA_ARGS__), \
- MACRO__(0x9AF8, ## __VA_ARGS__)
-
-#define XE_TGL_IDS(MACRO__, ...) \
- XE_TGL_GT1_IDS(MACRO__, ## __VA_ARGS__),\
- XE_TGL_GT2_IDS(MACRO__, ## __VA_ARGS__)
-
-/* RKL */
-#define XE_RKL_IDS(MACRO__, ...) \
- MACRO__(0x4C80, ## __VA_ARGS__), \
- MACRO__(0x4C8A, ## __VA_ARGS__), \
- MACRO__(0x4C8B, ## __VA_ARGS__), \
- MACRO__(0x4C8C, ## __VA_ARGS__), \
- MACRO__(0x4C90, ## __VA_ARGS__), \
- MACRO__(0x4C9A, ## __VA_ARGS__)
-
-/* DG1 */
-#define XE_DG1_IDS(MACRO__, ...) \
- MACRO__(0x4905, ## __VA_ARGS__), \
- MACRO__(0x4906, ## __VA_ARGS__), \
- MACRO__(0x4907, ## __VA_ARGS__), \
- MACRO__(0x4908, ## __VA_ARGS__), \
- MACRO__(0x4909, ## __VA_ARGS__)
-
-/* ADL-S */
-#define XE_ADLS_IDS(MACRO__, ...) \
- MACRO__(0x4680, ## __VA_ARGS__), \
- MACRO__(0x4682, ## __VA_ARGS__), \
- MACRO__(0x4688, ## __VA_ARGS__), \
- MACRO__(0x468A, ## __VA_ARGS__), \
- MACRO__(0x468B, ## __VA_ARGS__), \
- MACRO__(0x4690, ## __VA_ARGS__), \
- MACRO__(0x4692, ## __VA_ARGS__), \
- MACRO__(0x4693, ## __VA_ARGS__)
-
-/* ADL-P */
-#define XE_ADLP_IDS(MACRO__, ...) \
- MACRO__(0x46A0, ## __VA_ARGS__), \
- MACRO__(0x46A1, ## __VA_ARGS__), \
- MACRO__(0x46A2, ## __VA_ARGS__), \
- MACRO__(0x46A3, ## __VA_ARGS__), \
- MACRO__(0x46A6, ## __VA_ARGS__), \
- MACRO__(0x46A8, ## __VA_ARGS__), \
- MACRO__(0x46AA, ## __VA_ARGS__), \
- MACRO__(0x462A, ## __VA_ARGS__), \
- MACRO__(0x4626, ## __VA_ARGS__), \
- MACRO__(0x4628, ## __VA_ARGS__), \
- MACRO__(0x46B0, ## __VA_ARGS__), \
- MACRO__(0x46B1, ## __VA_ARGS__), \
- MACRO__(0x46B2, ## __VA_ARGS__), \
- MACRO__(0x46B3, ## __VA_ARGS__), \
- MACRO__(0x46C0, ## __VA_ARGS__), \
- MACRO__(0x46C1, ## __VA_ARGS__), \
- MACRO__(0x46C2, ## __VA_ARGS__), \
- MACRO__(0x46C3, ## __VA_ARGS__)
-
-/* ADL-N */
-#define XE_ADLN_IDS(MACRO__, ...) \
- MACRO__(0x46D0, ## __VA_ARGS__), \
- MACRO__(0x46D1, ## __VA_ARGS__), \
- MACRO__(0x46D2, ## __VA_ARGS__)
-
-/* RPL-S */
-#define XE_RPLS_IDS(MACRO__, ...) \
- MACRO__(0xA780, ## __VA_ARGS__), \
- MACRO__(0xA781, ## __VA_ARGS__), \
- MACRO__(0xA782, ## __VA_ARGS__), \
- MACRO__(0xA783, ## __VA_ARGS__), \
- MACRO__(0xA788, ## __VA_ARGS__), \
- MACRO__(0xA789, ## __VA_ARGS__), \
- MACRO__(0xA78A, ## __VA_ARGS__), \
- MACRO__(0xA78B, ## __VA_ARGS__)
-
-/* RPL-U */
-#define XE_RPLU_IDS(MACRO__, ...) \
- MACRO__(0xA721, ## __VA_ARGS__), \
- MACRO__(0xA7A1, ## __VA_ARGS__), \
- MACRO__(0xA7A9, ## __VA_ARGS__), \
- MACRO__(0xA7AC, ## __VA_ARGS__), \
- MACRO__(0xA7AD, ## __VA_ARGS__)
-
-/* RPL-P */
-#define XE_RPLP_IDS(MACRO__, ...) \
- XE_RPLU_IDS(MACRO__, ## __VA_ARGS__), \
- MACRO__(0xA720, ## __VA_ARGS__), \
- MACRO__(0xA7A0, ## __VA_ARGS__), \
- MACRO__(0xA7A8, ## __VA_ARGS__), \
- MACRO__(0xA7AA, ## __VA_ARGS__), \
- MACRO__(0xA7AB, ## __VA_ARGS__)
-
-/* DG2 */
-#define XE_DG2_G10_IDS(MACRO__, ...) \
- MACRO__(0x5690, ## __VA_ARGS__), \
- MACRO__(0x5691, ## __VA_ARGS__), \
- MACRO__(0x5692, ## __VA_ARGS__), \
- MACRO__(0x56A0, ## __VA_ARGS__), \
- MACRO__(0x56A1, ## __VA_ARGS__), \
- MACRO__(0x56A2, ## __VA_ARGS__), \
- MACRO__(0x56BE, ## __VA_ARGS__), \
- MACRO__(0x56BF, ## __VA_ARGS__)
-
-#define XE_DG2_G11_IDS(MACRO__, ...) \
- MACRO__(0x5693, ## __VA_ARGS__), \
- MACRO__(0x5694, ## __VA_ARGS__), \
- MACRO__(0x5695, ## __VA_ARGS__), \
- MACRO__(0x56A5, ## __VA_ARGS__), \
- MACRO__(0x56A6, ## __VA_ARGS__), \
- MACRO__(0x56B0, ## __VA_ARGS__), \
- MACRO__(0x56B1, ## __VA_ARGS__), \
- MACRO__(0x56BA, ## __VA_ARGS__), \
- MACRO__(0x56BB, ## __VA_ARGS__), \
- MACRO__(0x56BC, ## __VA_ARGS__), \
- MACRO__(0x56BD, ## __VA_ARGS__)
-
-#define XE_DG2_G12_IDS(MACRO__, ...) \
- MACRO__(0x5696, ## __VA_ARGS__), \
- MACRO__(0x5697, ## __VA_ARGS__), \
- MACRO__(0x56A3, ## __VA_ARGS__), \
- MACRO__(0x56A4, ## __VA_ARGS__), \
- MACRO__(0x56B2, ## __VA_ARGS__), \
- MACRO__(0x56B3, ## __VA_ARGS__)
-
-#define XE_DG2_IDS(MACRO__, ...) \
- XE_DG2_G10_IDS(MACRO__, ## __VA_ARGS__),\
- XE_DG2_G11_IDS(MACRO__, ## __VA_ARGS__),\
- XE_DG2_G12_IDS(MACRO__, ## __VA_ARGS__)
-
-#define XE_ATS_M150_IDS(MACRO__, ...) \
- MACRO__(0x56C0, ## __VA_ARGS__), \
- MACRO__(0x56C2, ## __VA_ARGS__)
-
-#define XE_ATS_M75_IDS(MACRO__, ...) \
- MACRO__(0x56C1, ## __VA_ARGS__)
-
-#define XE_ATS_M_IDS(MACRO__, ...) \
- XE_ATS_M150_IDS(MACRO__, ## __VA_ARGS__),\
- XE_ATS_M75_IDS(MACRO__, ## __VA_ARGS__)
-
-/* MTL / ARL */
-#define XE_MTL_IDS(MACRO__, ...) \
- MACRO__(0x7D40, ## __VA_ARGS__), \
- MACRO__(0x7D41, ## __VA_ARGS__), \
- MACRO__(0x7D45, ## __VA_ARGS__), \
- MACRO__(0x7D51, ## __VA_ARGS__), \
- MACRO__(0x7D55, ## __VA_ARGS__), \
- MACRO__(0x7D60, ## __VA_ARGS__), \
- MACRO__(0x7D67, ## __VA_ARGS__), \
- MACRO__(0x7DD1, ## __VA_ARGS__), \
- MACRO__(0x7DD5, ## __VA_ARGS__)
-
-#define XE_LNL_IDS(MACRO__, ...) \
- MACRO__(0x6420, ## __VA_ARGS__), \
- MACRO__(0x64A0, ## __VA_ARGS__), \
- MACRO__(0x64B0, ## __VA_ARGS__)
-
-#endif
diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h
index d040033dc8ee..8776844e0eeb 100644
--- a/include/dt-bindings/arm/qcom,ids.h
+++ b/include/dt-bindings/arm/qcom,ids.h
@@ -175,6 +175,7 @@
#define QCOM_ID_SDA630 327
#define QCOM_ID_MSM8905 331
#define QCOM_ID_SDX202 333
+#define QCOM_ID_SDM670 336
#define QCOM_ID_SDM450 338
#define QCOM_ID_SM8150 339
#define QCOM_ID_SDA845 341
@@ -233,11 +234,14 @@
#define QCOM_ID_SA8540P 461
#define QCOM_ID_QCM4290 469
#define QCOM_ID_QCS4290 470
+#define QCOM_ID_SM7325 475
#define QCOM_ID_SM8450_2 480
#define QCOM_ID_SM8450_3 482
#define QCOM_ID_SC7280 487
#define QCOM_ID_SC7180P 495
#define QCOM_ID_QCM6490 497
+#define QCOM_ID_QCS6490 498
+#define QCOM_ID_SM7325P 499
#define QCOM_ID_IPQ5000 503
#define QCOM_ID_IPQ0509 504
#define QCOM_ID_IPQ0518 505
@@ -252,8 +256,10 @@
#define QCOM_ID_IPQ9510 521
#define QCOM_ID_QRB4210 523
#define QCOM_ID_QRB2210 524
+#define QCOM_ID_SAR2130P 525
#define QCOM_ID_SM8475 530
#define QCOM_ID_SM8475P 531
+#define QCOM_ID_SA8255P 532
#define QCOM_ID_SA8775P 534
#define QCOM_ID_QRU1000 539
#define QCOM_ID_SM8475_2 540
@@ -261,6 +267,7 @@
#define QCOM_ID_X1E80100 555
#define QCOM_ID_SM8650 557
#define QCOM_ID_SM4450 568
+#define QCOM_ID_SAR1130P 579
#define QCOM_ID_QDU1010 587
#define QCOM_ID_QRU1032 588
#define QCOM_ID_QRU1052 589
@@ -271,7 +278,22 @@
#define QCOM_ID_IPQ5302 595
#define QCOM_ID_QCS8550 603
#define QCOM_ID_QCM8550 604
+#define QCOM_ID_SM8750 618
#define QCOM_ID_IPQ5300 624
+#define QCOM_ID_SM7635 636
+#define QCOM_ID_SM6650 640
+#define QCOM_ID_SM6650P 641
+#define QCOM_ID_IPQ5321 650
+#define QCOM_ID_IPQ5424 651
+#define QCOM_ID_QCM6690 657
+#define QCOM_ID_QCS6690 658
+#define QCOM_ID_SM8850 660
+#define QCOM_ID_IPQ5404 671
+#define QCOM_ID_QCS9100 667
+#define QCOM_ID_QCS8300 674
+#define QCOM_ID_QCS8275 675
+#define QCOM_ID_QCS9075 676
+#define QCOM_ID_QCS615 680
/*
* The board type and revision information, used by Qualcomm bootloaders and
diff --git a/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h b/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h
index 06f198ee7623..2ce1a06dc735 100644
--- a/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h
+++ b/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h
@@ -164,5 +164,6 @@
#define CLKID_DMC_SEL 151
#define CLKID_DMC_DIV 152
#define CLKID_DMC_SEL2 153
+#define CLKID_SYS_PLL_DIV16 154
#endif /* __A1_PERIPHERALS_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,a1-pll-clkc.h b/include/dt-bindings/clock/amlogic,a1-pll-clkc.h
index 2b660c0f2c9f..0dfc5e78a2d5 100644
--- a/include/dt-bindings/clock/amlogic,a1-pll-clkc.h
+++ b/include/dt-bindings/clock/amlogic,a1-pll-clkc.h
@@ -21,5 +21,6 @@
#define CLKID_FCLK_DIV5 8
#define CLKID_FCLK_DIV7 9
#define CLKID_HIFI_PLL 10
+#define CLKID_SYS_PLL 11
#endif /* __A1_PLL_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,c3-peripherals-clkc.h b/include/dt-bindings/clock/amlogic,c3-peripherals-clkc.h
new file mode 100644
index 000000000000..d115c741c255
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,c3-peripherals-clkc.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ * Author: Chuan Liu <chuan.liu@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_AMLOGIC_C3_PERIPHERALS_CLKC_H
+#define _DT_BINDINGS_CLOCK_AMLOGIC_C3_PERIPHERALS_CLKC_H
+
+#define CLKID_RTC_XTAL_CLKIN 0
+#define CLKID_RTC_32K_DIV 1
+#define CLKID_RTC_32K_MUX 2
+#define CLKID_RTC_32K 3
+#define CLKID_RTC_CLK 4
+#define CLKID_SYS_RESET_CTRL 5
+#define CLKID_SYS_PWR_CTRL 6
+#define CLKID_SYS_PAD_CTRL 7
+#define CLKID_SYS_CTRL 8
+#define CLKID_SYS_TS_PLL 9
+#define CLKID_SYS_DEV_ARB 10
+#define CLKID_SYS_MMC_PCLK 11
+#define CLKID_SYS_CPU_CTRL 12
+#define CLKID_SYS_JTAG_CTRL 13
+#define CLKID_SYS_IR_CTRL 14
+#define CLKID_SYS_IRQ_CTRL 15
+#define CLKID_SYS_MSR_CLK 16
+#define CLKID_SYS_ROM 17
+#define CLKID_SYS_UART_F 18
+#define CLKID_SYS_CPU_ARB 19
+#define CLKID_SYS_RSA 20
+#define CLKID_SYS_SAR_ADC 21
+#define CLKID_SYS_STARTUP 22
+#define CLKID_SYS_SECURE 23
+#define CLKID_SYS_SPIFC 24
+#define CLKID_SYS_NNA 25
+#define CLKID_SYS_ETH_MAC 26
+#define CLKID_SYS_GIC 27
+#define CLKID_SYS_RAMA 28
+#define CLKID_SYS_BIG_NIC 29
+#define CLKID_SYS_RAMB 30
+#define CLKID_SYS_AUDIO_PCLK 31
+#define CLKID_SYS_PWM_KL 32
+#define CLKID_SYS_PWM_IJ 33
+#define CLKID_SYS_USB 34
+#define CLKID_SYS_SD_EMMC_A 35
+#define CLKID_SYS_SD_EMMC_C 36
+#define CLKID_SYS_PWM_AB 37
+#define CLKID_SYS_PWM_CD 38
+#define CLKID_SYS_PWM_EF 39
+#define CLKID_SYS_PWM_GH 40
+#define CLKID_SYS_SPICC_1 41
+#define CLKID_SYS_SPICC_0 42
+#define CLKID_SYS_UART_A 43
+#define CLKID_SYS_UART_B 44
+#define CLKID_SYS_UART_C 45
+#define CLKID_SYS_UART_D 46
+#define CLKID_SYS_UART_E 47
+#define CLKID_SYS_I2C_M_A 48
+#define CLKID_SYS_I2C_M_B 49
+#define CLKID_SYS_I2C_M_C 50
+#define CLKID_SYS_I2C_M_D 51
+#define CLKID_SYS_I2S_S_A 52
+#define CLKID_SYS_RTC 53
+#define CLKID_SYS_GE2D 54
+#define CLKID_SYS_ISP 55
+#define CLKID_SYS_GPV_ISP_NIC 56
+#define CLKID_SYS_GPV_CVE_NIC 57
+#define CLKID_SYS_MIPI_DSI_HOST 58
+#define CLKID_SYS_MIPI_DSI_PHY 59
+#define CLKID_SYS_ETH_PHY 60
+#define CLKID_SYS_ACODEC 61
+#define CLKID_SYS_DWAP 62
+#define CLKID_SYS_DOS 63
+#define CLKID_SYS_CVE 64
+#define CLKID_SYS_VOUT 65
+#define CLKID_SYS_VC9000E 66
+#define CLKID_SYS_PWM_MN 67
+#define CLKID_SYS_SD_EMMC_B 68
+#define CLKID_AXI_SYS_NIC 69
+#define CLKID_AXI_ISP_NIC 70
+#define CLKID_AXI_CVE_NIC 71
+#define CLKID_AXI_RAMB 72
+#define CLKID_AXI_RAMA 73
+#define CLKID_AXI_CPU_DMC 74
+#define CLKID_AXI_NIC 75
+#define CLKID_AXI_DMA 76
+#define CLKID_AXI_MUX_NIC 77
+#define CLKID_AXI_CVE 78
+#define CLKID_AXI_DEV1_DMC 79
+#define CLKID_AXI_DEV0_DMC 80
+#define CLKID_AXI_DSP_DMC 81
+#define CLKID_12_24M_IN 82
+#define CLKID_12M_24M 83
+#define CLKID_FCLK_25M_DIV 84
+#define CLKID_FCLK_25M 85
+#define CLKID_GEN_SEL 86
+#define CLKID_GEN_DIV 87
+#define CLKID_GEN 88
+#define CLKID_SARADC_SEL 89
+#define CLKID_SARADC_DIV 90
+#define CLKID_SARADC 91
+#define CLKID_PWM_A_SEL 92
+#define CLKID_PWM_A_DIV 93
+#define CLKID_PWM_A 94
+#define CLKID_PWM_B_SEL 95
+#define CLKID_PWM_B_DIV 96
+#define CLKID_PWM_B 97
+#define CLKID_PWM_C_SEL 98
+#define CLKID_PWM_C_DIV 99
+#define CLKID_PWM_C 100
+#define CLKID_PWM_D_SEL 101
+#define CLKID_PWM_D_DIV 102
+#define CLKID_PWM_D 103
+#define CLKID_PWM_E_SEL 104
+#define CLKID_PWM_E_DIV 105
+#define CLKID_PWM_E 106
+#define CLKID_PWM_F_SEL 107
+#define CLKID_PWM_F_DIV 108
+#define CLKID_PWM_F 109
+#define CLKID_PWM_G_SEL 110
+#define CLKID_PWM_G_DIV 111
+#define CLKID_PWM_G 112
+#define CLKID_PWM_H_SEL 113
+#define CLKID_PWM_H_DIV 114
+#define CLKID_PWM_H 115
+#define CLKID_PWM_I_SEL 116
+#define CLKID_PWM_I_DIV 117
+#define CLKID_PWM_I 118
+#define CLKID_PWM_J_SEL 119
+#define CLKID_PWM_J_DIV 120
+#define CLKID_PWM_J 121
+#define CLKID_PWM_K_SEL 122
+#define CLKID_PWM_K_DIV 123
+#define CLKID_PWM_K 124
+#define CLKID_PWM_L_SEL 125
+#define CLKID_PWM_L_DIV 126
+#define CLKID_PWM_L 127
+#define CLKID_PWM_M_SEL 128
+#define CLKID_PWM_M_DIV 129
+#define CLKID_PWM_M 130
+#define CLKID_PWM_N_SEL 131
+#define CLKID_PWM_N_DIV 132
+#define CLKID_PWM_N 133
+#define CLKID_SPICC_A_SEL 134
+#define CLKID_SPICC_A_DIV 135
+#define CLKID_SPICC_A 136
+#define CLKID_SPICC_B_SEL 137
+#define CLKID_SPICC_B_DIV 138
+#define CLKID_SPICC_B 139
+#define CLKID_SPIFC_SEL 140
+#define CLKID_SPIFC_DIV 141
+#define CLKID_SPIFC 142
+#define CLKID_SD_EMMC_A_SEL 143
+#define CLKID_SD_EMMC_A_DIV 144
+#define CLKID_SD_EMMC_A 145
+#define CLKID_SD_EMMC_B_SEL 146
+#define CLKID_SD_EMMC_B_DIV 147
+#define CLKID_SD_EMMC_B 148
+#define CLKID_SD_EMMC_C_SEL 149
+#define CLKID_SD_EMMC_C_DIV 150
+#define CLKID_SD_EMMC_C 151
+#define CLKID_TS_DIV 152
+#define CLKID_TS 153
+#define CLKID_ETH_125M_DIV 154
+#define CLKID_ETH_125M 155
+#define CLKID_ETH_RMII_DIV 156
+#define CLKID_ETH_RMII 157
+#define CLKID_MIPI_DSI_MEAS_SEL 158
+#define CLKID_MIPI_DSI_MEAS_DIV 159
+#define CLKID_MIPI_DSI_MEAS 160
+#define CLKID_DSI_PHY_SEL 161
+#define CLKID_DSI_PHY_DIV 162
+#define CLKID_DSI_PHY 163
+#define CLKID_VOUT_MCLK_SEL 164
+#define CLKID_VOUT_MCLK_DIV 165
+#define CLKID_VOUT_MCLK 166
+#define CLKID_VOUT_ENC_SEL 167
+#define CLKID_VOUT_ENC_DIV 168
+#define CLKID_VOUT_ENC 169
+#define CLKID_HCODEC_0_SEL 170
+#define CLKID_HCODEC_0_DIV 171
+#define CLKID_HCODEC_0 172
+#define CLKID_HCODEC_1_SEL 173
+#define CLKID_HCODEC_1_DIV 174
+#define CLKID_HCODEC_1 175
+#define CLKID_HCODEC 176
+#define CLKID_VC9000E_ACLK_SEL 177
+#define CLKID_VC9000E_ACLK_DIV 178
+#define CLKID_VC9000E_ACLK 179
+#define CLKID_VC9000E_CORE_SEL 180
+#define CLKID_VC9000E_CORE_DIV 181
+#define CLKID_VC9000E_CORE 182
+#define CLKID_CSI_PHY0_SEL 183
+#define CLKID_CSI_PHY0_DIV 184
+#define CLKID_CSI_PHY0 185
+#define CLKID_DEWARPA_SEL 186
+#define CLKID_DEWARPA_DIV 187
+#define CLKID_DEWARPA 188
+#define CLKID_ISP0_SEL 189
+#define CLKID_ISP0_DIV 190
+#define CLKID_ISP0 191
+#define CLKID_NNA_CORE_SEL 192
+#define CLKID_NNA_CORE_DIV 193
+#define CLKID_NNA_CORE 194
+#define CLKID_GE2D_SEL 195
+#define CLKID_GE2D_DIV 196
+#define CLKID_GE2D 197
+#define CLKID_VAPB_SEL 198
+#define CLKID_VAPB_DIV 199
+#define CLKID_VAPB 200
+
+#endif /* _DT_BINDINGS_CLOCK_AMLOGIC_C3_PERIPHERALS_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,c3-pll-clkc.h b/include/dt-bindings/clock/amlogic,c3-pll-clkc.h
new file mode 100644
index 000000000000..fcdc558715e8
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,c3-pll-clkc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ * Author: Chuan Liu <chuan.liu@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_AMLOGIC_C3_PLL_CLKC_H
+#define _DT_BINDINGS_CLOCK_AMLOGIC_C3_PLL_CLKC_H
+
+#define CLKID_FCLK_50M_EN 0
+#define CLKID_FCLK_50M 1
+#define CLKID_FCLK_DIV2_DIV 2
+#define CLKID_FCLK_DIV2 3
+#define CLKID_FCLK_DIV2P5_DIV 4
+#define CLKID_FCLK_DIV2P5 5
+#define CLKID_FCLK_DIV3_DIV 6
+#define CLKID_FCLK_DIV3 7
+#define CLKID_FCLK_DIV4_DIV 8
+#define CLKID_FCLK_DIV4 9
+#define CLKID_FCLK_DIV5_DIV 10
+#define CLKID_FCLK_DIV5 11
+#define CLKID_FCLK_DIV7_DIV 12
+#define CLKID_FCLK_DIV7 13
+#define CLKID_GP0_PLL_DCO 14
+#define CLKID_GP0_PLL 15
+#define CLKID_HIFI_PLL_DCO 16
+#define CLKID_HIFI_PLL 17
+#define CLKID_MCLK_PLL_DCO 18
+#define CLKID_MCLK_PLL_OD 19
+#define CLKID_MCLK_PLL 20
+#define CLKID_MCLK0_SEL 21
+#define CLKID_MCLK0_SEL_EN 22
+#define CLKID_MCLK0_DIV 23
+#define CLKID_MCLK0 24
+#define CLKID_MCLK1_SEL 25
+#define CLKID_MCLK1_SEL_EN 26
+#define CLKID_MCLK1_DIV 27
+#define CLKID_MCLK1 28
+
+#endif /* _DT_BINDINGS_CLOCK_AMLOGIC_C3_PLL_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,c3-scmi-clkc.h b/include/dt-bindings/clock/amlogic,c3-scmi-clkc.h
new file mode 100644
index 000000000000..663c9b349275
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,c3-scmi-clkc.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ * Author: Chuan Liu <chuan.liu@amlogic.com>
+ */
+
+#ifndef __AMLOGIC_C3_SCMI_CLKC_H
+#define __AMLOGIC_C3_SCMI_CLKC_H
+
+#define CLKID_DDR_PLL_OSC 0
+#define CLKID_DDR_PHY 1
+#define CLKID_TOP_PLL_OSC 2
+#define CLKID_USB_PLL_OSC 3
+#define CLKID_MIPIISP_VOUT 4
+#define CLKID_MCLK_PLL_OSC 5
+#define CLKID_USB_CTRL 6
+#define CLKID_ETH_PLL_OSC 7
+#define CLKID_OSC 8
+#define CLKID_SYS_CLK 9
+#define CLKID_AXI_CLK 10
+#define CLKID_CPU_CLK 11
+#define CLKID_FIXED_PLL_OSC 12
+#define CLKID_GP1_PLL_OSC 13
+#define CLKID_SYS_PLL_DIV16 14
+#define CLKID_CPU_CLK_DIV16 15
+
+#endif /* __AMLOGIC_C3_SCMI_CLKC_H */
diff --git a/include/dt-bindings/clock/aspeed,ast2700-scu.h b/include/dt-bindings/clock/aspeed,ast2700-scu.h
new file mode 100644
index 000000000000..bacf712e8e04
--- /dev/null
+++ b/include/dt-bindings/clock/aspeed,ast2700-scu.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Device Tree binding constants for AST2700 clock controller.
+ *
+ * Copyright (c) 2024 Aspeed Technology Inc.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_AST2700_H
+#define __DT_BINDINGS_CLOCK_AST2700_H
+
+/* SOC0 clk */
+#define SCU0_CLKIN 0
+#define SCU0_CLK_24M 1
+#define SCU0_CLK_192M 2
+#define SCU0_CLK_UART 3
+#define SCU0_CLK_UART_DIV13 3
+#define SCU0_CLK_PSP 4
+#define SCU0_CLK_HPLL 5
+#define SCU0_CLK_HPLL_DIV2 6
+#define SCU0_CLK_HPLL_DIV4 7
+#define SCU0_CLK_HPLL_DIV_AHB 8
+#define SCU0_CLK_DPLL 9
+#define SCU0_CLK_MPLL 10
+#define SCU0_CLK_MPLL_DIV2 11
+#define SCU0_CLK_MPLL_DIV4 12
+#define SCU0_CLK_MPLL_DIV8 13
+#define SCU0_CLK_MPLL_DIV_AHB 14
+#define SCU0_CLK_D0 15
+#define SCU0_CLK_D1 16
+#define SCU0_CLK_CRT0 17
+#define SCU0_CLK_CRT1 18
+#define SCU0_CLK_MPHY 19
+#define SCU0_CLK_AXI0 20
+#define SCU0_CLK_AXI1 21
+#define SCU0_CLK_AHB 22
+#define SCU0_CLK_APB 23
+#define SCU0_CLK_UART4 24
+#define SCU0_CLK_EMMCMUX 25
+#define SCU0_CLK_EMMC 26
+#define SCU0_CLK_U2PHY_CLK12M 27
+#define SCU0_CLK_U2PHY_REFCLK 28
+
+/* SOC0 clk-gate */
+#define SCU0_CLK_GATE_MCLK 29
+#define SCU0_CLK_GATE_ECLK 30
+#define SCU0_CLK_GATE_2DCLK 31
+#define SCU0_CLK_GATE_VCLK 32
+#define SCU0_CLK_GATE_BCLK 33
+#define SCU0_CLK_GATE_VGA0CLK 34
+#define SCU0_CLK_GATE_REFCLK 35
+#define SCU0_CLK_GATE_PORTBUSB2CLK 36
+#define SCU0_CLK_GATE_UHCICLK 37
+#define SCU0_CLK_GATE_VGA1CLK 38
+#define SCU0_CLK_GATE_DDRPHYCLK 39
+#define SCU0_CLK_GATE_E2M0CLK 40
+#define SCU0_CLK_GATE_HACCLK 41
+#define SCU0_CLK_GATE_PORTAUSB2CLK 42
+#define SCU0_CLK_GATE_UART4CLK 43
+#define SCU0_CLK_GATE_SLICLK 44
+#define SCU0_CLK_GATE_DACCLK 45
+#define SCU0_CLK_GATE_DP 46
+#define SCU0_CLK_GATE_E2M1CLK 47
+#define SCU0_CLK_GATE_CRT0CLK 48
+#define SCU0_CLK_GATE_CRT1CLK 49
+#define SCU0_CLK_GATE_ECDSACLK 50
+#define SCU0_CLK_GATE_RSACLK 51
+#define SCU0_CLK_GATE_RVAS0CLK 52
+#define SCU0_CLK_GATE_UFSCLK 53
+#define SCU0_CLK_GATE_EMMCCLK 54
+#define SCU0_CLK_GATE_RVAS1CLK 55
+#define SCU0_CLK_U2PHY_REFCLKSRC 56
+#define SCU0_CLK_AHBMUX 57
+#define SCU0_CLK_MPHYSRC 58
+
+/* SOC1 clk */
+#define SCU1_CLKIN 0
+#define SCU1_CLK_HPLL 1
+#define SCU1_CLK_APLL 2
+#define SCU1_CLK_APLL_DIV2 3
+#define SCU1_CLK_APLL_DIV4 4
+#define SCU1_CLK_DPLL 5
+#define SCU1_CLK_UXCLK 6
+#define SCU1_CLK_HUXCLK 7
+#define SCU1_CLK_UARTX 8
+#define SCU1_CLK_HUARTX 9
+#define SCU1_CLK_AHB 10
+#define SCU1_CLK_APB 11
+#define SCU1_CLK_UART0 12
+#define SCU1_CLK_UART1 13
+#define SCU1_CLK_UART2 14
+#define SCU1_CLK_UART3 15
+#define SCU1_CLK_UART5 16
+#define SCU1_CLK_UART6 17
+#define SCU1_CLK_UART7 18
+#define SCU1_CLK_UART8 19
+#define SCU1_CLK_UART9 20
+#define SCU1_CLK_UART10 21
+#define SCU1_CLK_UART11 22
+#define SCU1_CLK_UART12 23
+#define SCU1_CLK_UART13 24
+#define SCU1_CLK_UART14 25
+#define SCU1_CLK_APLL_DIVN 26
+#define SCU1_CLK_SDMUX 27
+#define SCU1_CLK_SDCLK 28
+#define SCU1_CLK_RMII 29
+#define SCU1_CLK_RGMII 30
+#define SCU1_CLK_MACHCLK 31
+#define SCU1_CLK_MAC0RCLK 32
+#define SCU1_CLK_MAC1RCLK 33
+#define SCU1_CLK_CAN 34
+
+/* SOC1 clk gate */
+#define SCU1_CLK_GATE_LCLK0 35
+#define SCU1_CLK_GATE_LCLK1 36
+#define SCU1_CLK_GATE_ESPI0CLK 37
+#define SCU1_CLK_GATE_ESPI1CLK 38
+#define SCU1_CLK_GATE_SDCLK 39
+#define SCU1_CLK_GATE_IPEREFCLK 40
+#define SCU1_CLK_GATE_REFCLK 41
+#define SCU1_CLK_GATE_LPCHCLK 42
+#define SCU1_CLK_GATE_MAC0CLK 43
+#define SCU1_CLK_GATE_MAC1CLK 44
+#define SCU1_CLK_GATE_MAC2CLK 45
+#define SCU1_CLK_GATE_UART0CLK 46
+#define SCU1_CLK_GATE_UART1CLK 47
+#define SCU1_CLK_GATE_UART2CLK 48
+#define SCU1_CLK_GATE_UART3CLK 49
+#define SCU1_CLK_GATE_I2CCLK 50
+#define SCU1_CLK_GATE_I3C0CLK 51
+#define SCU1_CLK_GATE_I3C1CLK 52
+#define SCU1_CLK_GATE_I3C2CLK 53
+#define SCU1_CLK_GATE_I3C3CLK 54
+#define SCU1_CLK_GATE_I3C4CLK 55
+#define SCU1_CLK_GATE_I3C5CLK 56
+#define SCU1_CLK_GATE_I3C6CLK 57
+#define SCU1_CLK_GATE_I3C7CLK 58
+#define SCU1_CLK_GATE_I3C8CLK 59
+#define SCU1_CLK_GATE_I3C9CLK 60
+#define SCU1_CLK_GATE_I3C10CLK 61
+#define SCU1_CLK_GATE_I3C11CLK 62
+#define SCU1_CLK_GATE_I3C12CLK 63
+#define SCU1_CLK_GATE_I3C13CLK 64
+#define SCU1_CLK_GATE_I3C14CLK 65
+#define SCU1_CLK_GATE_I3C15CLK 66
+#define SCU1_CLK_GATE_UART5CLK 67
+#define SCU1_CLK_GATE_UART6CLK 68
+#define SCU1_CLK_GATE_UART7CLK 69
+#define SCU1_CLK_GATE_UART8CLK 70
+#define SCU1_CLK_GATE_UART9CLK 71
+#define SCU1_CLK_GATE_UART10CLK 72
+#define SCU1_CLK_GATE_UART11CLK 73
+#define SCU1_CLK_GATE_UART12CLK 74
+#define SCU1_CLK_GATE_FSICLK 75
+#define SCU1_CLK_GATE_LTPIPHYCLK 76
+#define SCU1_CLK_GATE_LTPICLK 77
+#define SCU1_CLK_GATE_VGALCLK 78
+#define SCU1_CLK_GATE_UHCICLK 79
+#define SCU1_CLK_GATE_CANCLK 80
+#define SCU1_CLK_GATE_PCICLK 81
+#define SCU1_CLK_GATE_SLICLK 82
+#define SCU1_CLK_GATE_E2MCLK 83
+#define SCU1_CLK_GATE_PORTCUSB2CLK 84
+#define SCU1_CLK_GATE_PORTDUSB2CLK 85
+#define SCU1_CLK_GATE_LTPI1TXCLK 86
+#define SCU1_CLK_I3C 87
+
+#endif
diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h
index 7ae96c7bd72f..f60fff261130 100644
--- a/include/dt-bindings/clock/ast2600-clock.h
+++ b/include/dt-bindings/clock/ast2600-clock.h
@@ -122,6 +122,8 @@
#define ASPEED_RESET_PCIE_DEV_OEN 20
#define ASPEED_RESET_PCIE_RC_O 19
#define ASPEED_RESET_PCIE_RC_OEN 18
+#define ASPEED_RESET_MAC2 12
+#define ASPEED_RESET_MAC1 11
#define ASPEED_RESET_PCI_DP 5
#define ASPEED_RESET_HACE 4
#define ASPEED_RESET_AHB 1
diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h
index 3e3972a814c1..f2a7b7d39c0d 100644
--- a/include/dt-bindings/clock/at91.h
+++ b/include/dt-bindings/clock/at91.h
@@ -38,6 +38,14 @@
#define PMC_CPU (PMC_MAIN + 9)
#define PMC_MCK1 (PMC_MAIN + 10)
+/* SAM9X7 */
+#define PMC_PLLADIV2 (PMC_MAIN + 11)
+#define PMC_LVDSPLL (PMC_MAIN + 12)
+
+/* SAMA7D65 */
+#define PMC_MCK3 (PMC_MAIN + 13)
+#define PMC_MCK5 (PMC_MAIN + 14)
+
#ifndef AT91_PMC_MOSCS
#define AT91_PMC_MOSCS 0 /* MOSCS Flag */
#define AT91_PMC_LOCKA 1 /* PLLA Lock */
@@ -51,4 +59,8 @@
#define AT91_PMC_GCKRDY 24 /* Generated Clocks */
#endif
+/* Slow clock. */
+#define SCKC_MD_SLCK 0
+#define SCKC_TD_SLCK 1
+
#endif
diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h
index 08c82c22fa5f..607f23b83fa7 100644
--- a/include/dt-bindings/clock/axg-audio-clkc.h
+++ b/include/dt-bindings/clock/axg-audio-clkc.h
@@ -155,5 +155,12 @@
#define AUD_CLKID_SYSCLK_B_DIV 175
#define AUD_CLKID_SYSCLK_A_EN 176
#define AUD_CLKID_SYSCLK_B_EN 177
+#define AUD_CLKID_EARCRX 178
+#define AUD_CLKID_EARCRX_CMDC_SEL 179
+#define AUD_CLKID_EARCRX_CMDC_DIV 180
+#define AUD_CLKID_EARCRX_CMDC 181
+#define AUD_CLKID_EARCRX_DMAC_SEL 182
+#define AUD_CLKID_EARCRX_DMAC_DIV 183
+#define AUD_CLKID_EARCRX_DMAC 184
#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */
diff --git a/include/dt-bindings/clock/axis,artpec8-clk.h b/include/dt-bindings/clock/axis,artpec8-clk.h
new file mode 100644
index 000000000000..1e6e1409dd94
--- /dev/null
+++ b/include/dt-bindings/clock/axis,artpec8-clk.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ * Copyright (c) 2025 Axis Communications AB.
+ * https://www.axis.com
+ *
+ * Device Tree binding constants for ARTPEC-8 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_ARTPEC8_H
+#define _DT_BINDINGS_CLOCK_ARTPEC8_H
+
+/* CMU_CMU */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_DOUT_SHARED0_DIV2 2
+#define CLK_DOUT_SHARED0_DIV3 3
+#define CLK_DOUT_SHARED0_DIV4 4
+#define CLK_FOUT_SHARED1_PLL 5
+#define CLK_DOUT_SHARED1_DIV2 6
+#define CLK_DOUT_SHARED1_DIV3 7
+#define CLK_DOUT_SHARED1_DIV4 8
+#define CLK_FOUT_AUDIO_PLL 9
+#define CLK_DOUT_CMU_BUS 10
+#define CLK_DOUT_CMU_BUS_DLP 11
+#define CLK_DOUT_CMU_CDC_CORE 12
+#define CLK_DOUT_CMU_OTP 13
+#define CLK_DOUT_CMU_CORE_MAIN 14
+#define CLK_DOUT_CMU_CORE_DLP 15
+#define CLK_DOUT_CMU_CPUCL_SWITCH 16
+#define CLK_DOUT_CMU_DLP_CORE 17
+#define CLK_DOUT_CMU_FSYS_BUS 18
+#define CLK_DOUT_CMU_FSYS_IP 19
+#define CLK_DOUT_CMU_FSYS_SCAN0 20
+#define CLK_DOUT_CMU_FSYS_SCAN1 21
+#define CLK_DOUT_CMU_GPU_3D 22
+#define CLK_DOUT_CMU_GPU_2D 23
+#define CLK_DOUT_CMU_IMEM_ACLK 24
+#define CLK_DOUT_CMU_IMEM_JPEG 25
+#define CLK_DOUT_CMU_MIF_SWITCH 26
+#define CLK_DOUT_CMU_MIF_BUSP 27
+#define CLK_DOUT_CMU_PERI_DISP 28
+#define CLK_DOUT_CMU_PERI_IP 29
+#define CLK_DOUT_CMU_PERI_AUDIO 30
+#define CLK_DOUT_CMU_RSP_CORE 31
+#define CLK_DOUT_CMU_TRFM_CORE 32
+#define CLK_DOUT_CMU_VCA_ACE 33
+#define CLK_DOUT_CMU_VCA_OD 34
+#define CLK_DOUT_CMU_VIO_CORE 35
+#define CLK_DOUT_CMU_VIO_AUDIO 36
+#define CLK_DOUT_CMU_VIP0_CORE 37
+#define CLK_DOUT_CMU_VIP1_CORE 38
+#define CLK_DOUT_CMU_VPP_CORE 39
+
+/* CMU_BUS */
+#define CLK_MOUT_BUS_ACLK_USER 1
+#define CLK_MOUT_BUS_DLP_USER 2
+#define CLK_DOUT_BUS_PCLK 3
+
+/* CMU_CORE */
+#define CLK_MOUT_CORE_ACLK_USER 1
+#define CLK_MOUT_CORE_DLP_USER 2
+#define CLK_DOUT_CORE_PCLK 3
+
+/* CMU_CPUCL */
+#define CLK_FOUT_CPUCL_PLL 1
+#define CLK_MOUT_CPUCL_PLL 2
+#define CLK_MOUT_CPUCL_SWITCH_USER 3
+#define CLK_DOUT_CPUCL_CPU 4
+#define CLK_DOUT_CPUCL_CLUSTER_ACLK 5
+#define CLK_DOUT_CPUCL_CLUSTER_PCLKDBG 6
+#define CLK_DOUT_CPUCL_CLUSTER_CNTCLK 7
+#define CLK_DOUT_CPUCL_CLUSTER_ATCLK 8
+#define CLK_DOUT_CPUCL_PCLK 9
+#define CLK_DOUT_CPUCL_CMUREF 10
+#define CLK_DOUT_CPUCL_DBG 11
+#define CLK_DOUT_CPUCL_PCLKDBG 12
+#define CLK_GOUT_CPUCL_CLUSTER_CPU 13
+#define CLK_GOUT_CPUCL_SHORTSTOP 14
+#define CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_PCLKDBG 15
+#define CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_ATCLK 16
+
+/* CMU_FSYS */
+#define CLK_FOUT_FSYS_PLL 1
+#define CLK_MOUT_FSYS_SCAN0_USER 2
+#define CLK_MOUT_FSYS_SCAN1_USER 3
+#define CLK_MOUT_FSYS_BUS_USER 4
+#define CLK_MOUT_FSYS_MMC_USER 5
+#define CLK_DOUT_FSYS_PCIE_PIPE 6
+#define CLK_DOUT_FSYS_ADC 7
+#define CLK_DOUT_FSYS_PCIE_PHY_REFCLK_SYSPLL 8
+#define CLK_DOUT_FSYS_EQOS_INT125 9
+#define CLK_DOUT_FSYS_OTP_MEM 10
+#define CLK_DOUT_FSYS_SCLK_UART 11
+#define CLK_DOUT_FSYS_EQOS_25 12
+#define CLK_DOUT_FSYS_EQOS_2p5 13
+#define CLK_DOUT_FSYS_BUS300 14
+#define CLK_DOUT_FSYS_BUS_QSPI 15
+#define CLK_DOUT_FSYS_MMC_CARD0 16
+#define CLK_DOUT_FSYS_MMC_CARD1 17
+#define CLK_DOUT_SCAN_CLK_FSYS_125 18
+#define CLK_DOUT_FSYS_QSPI 19
+#define CLK_DOUT_FSYS_SFMC_NAND 20
+#define CLK_DOUT_FSYS_SCAN_CLK_MMC 21
+#define CLK_GOUT_FSYS_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20 22
+#define CLK_GOUT_FSYS_USB20DRD_IPCLKPORT_BUS_CLK_EARLY 23
+#define CLK_GOUT_FSYS_XHB_USB_IPCLKPORT_CLK 24
+#define CLK_GOUT_FSYS_XHB_AHBBR_IPCLKPORT_CLK 25
+#define CLK_GOUT_FSYS_I2C0_IPCLKPORT_I_PCLK 26
+#define CLK_GOUT_FSYS_I2C1_IPCLKPORT_I_PCLK 27
+#define CLK_GOUT_FSYS_PWM_IPCLKPORT_I_PCLK_S0 28
+#define CLK_GOUT_FSYS_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG 29
+#define CLK_GOUT_FSYS_DWC_PCIE_CTL_INXT_0_SLV_ACLK_UG 30
+#define CLK_GOUT_FSYS_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG 31
+#define CLK_GOUT_FSYS_PIPE_PAL_INST_0_I_APB_PCLK 32
+#define CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_ACLK_I 33
+#define CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_CLK_CSR_I 34
+#define CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_I_RGMII_TXCLK_2P5 35
+#define CLK_GOUT_FSYS_SFMC_IPCLKPORT_I_ACLK_NAND 36
+#define CLK_GOUT_FSYS_MMC0_IPCLKPORT_SDCLKIN 37
+#define CLK_GOUT_FSYS_MMC0_IPCLKPORT_I_ACLK 38
+#define CLK_GOUT_FSYS_MMC1_IPCLKPORT_SDCLKIN 39
+#define CLK_GOUT_FSYS_MMC1_IPCLKPORT_I_ACLK 40
+#define CLK_GOUT_FSYS_PCIE_PHY_REFCLK_IN 41
+#define CLK_GOUT_FSYS_UART0_PCLK 42
+#define CLK_GOUT_FSYS_UART0_SCLK_UART 43
+#define CLK_GOUT_FSYS_BUS_QSPI 44
+#define CLK_GOUT_FSYS_QSPI_IPCLKPORT_HCLK 45
+#define CLK_GOUT_FSYS_QSPI_IPCLKPORT_SSI_CLK 46
+
+/* CMU_IMEM */
+#define CLK_MOUT_IMEM_ACLK_USER 1
+#define CLK_MOUT_IMEM_GIC_CA53 2
+#define CLK_MOUT_IMEM_GIC_CA5 3
+#define CLK_MOUT_IMEM_JPEG_USER 4
+#define CLK_GOUT_IMEM_MCT_PCLK 5
+#define CLK_GOUT_IMEM_PCLK_TMU0_APBIF 6
+
+/* CMU_PERI */
+#define CLK_MOUT_PERI_IP_USER 1
+#define CLK_MOUT_PERI_AUDIO_USER 2
+#define CLK_MOUT_PERI_I2S0 3
+#define CLK_MOUT_PERI_I2S1 4
+#define CLK_MOUT_PERI_DISP_USER 5
+#define CLK_DOUT_PERI_SPI 6
+#define CLK_DOUT_PERI_UART1 7
+#define CLK_DOUT_PERI_UART2 8
+#define CLK_DOUT_PERI_PCLK 9
+#define CLK_DOUT_PERI_I2S0 10
+#define CLK_DOUT_PERI_I2S1 11
+#define CLK_DOUT_PERI_DSIM 12
+#define CLK_GOUT_PERI_UART1_PCLK 13
+#define CLK_GOUT_PERI_UART1_SCLK_UART 14
+#define CLK_GOUT_PERI_UART2_PCLK 15
+#define CLK_GOUT_PERI_UART2_SCLK_UART 16
+#define CLK_GOUT_PERI_I2C2_IPCLKPORT_I_PCLK 17
+#define CLK_GOUT_PERI_I2C3_IPCLKPORT_I_PCLK 18
+#define CLK_GOUT_PERI_SPI0_PCLK 19
+#define CLK_GOUT_PERI_SPI0_SCLK_SPI 20
+#define CLK_GOUT_PERI_APB_ASYNC_DSIM_IPCLKPORT_PCLKS 21
+#define CLK_GOUT_PERI_I2SSC0_IPCLKPORT_CLK_HST 22
+#define CLK_GOUT_PERI_I2SSC1_IPCLKPORT_CLK_HST 23
+#define CLK_GOUT_PERI_AUDIO_OUT_IPCLKPORT_CLK 24
+#define CLK_GOUT_PERI_I2SSC0_IPCLKPORT_CLK 25
+#define CLK_GOUT_PERI_I2SSC1_IPCLKPORT_CLK 26
+#define CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_APB_CLK 27
+#define CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_AXI_CLK 28
+
+#endif /* _DT_BINDINGS_CLOCK_ARTPEC8_H */
diff --git a/include/dt-bindings/clock/cirrus,ep9301-syscon.h b/include/dt-bindings/clock/cirrus,ep9301-syscon.h
new file mode 100644
index 000000000000..6bb8f532e7d0
--- /dev/null
+++ b/include/dt-bindings/clock/cirrus,ep9301-syscon.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+#ifndef DT_BINDINGS_CIRRUS_EP93XX_CLOCK_H
+#define DT_BINDINGS_CIRRUS_EP93XX_CLOCK_H
+
+#define EP93XX_CLK_PLL1 0
+#define EP93XX_CLK_PLL2 1
+
+#define EP93XX_CLK_FCLK 2
+#define EP93XX_CLK_HCLK 3
+#define EP93XX_CLK_PCLK 4
+
+#define EP93XX_CLK_UART 5
+#define EP93XX_CLK_SPI 6
+#define EP93XX_CLK_PWM 7
+#define EP93XX_CLK_USB 8
+
+#define EP93XX_CLK_M2M0 9
+#define EP93XX_CLK_M2M1 10
+
+#define EP93XX_CLK_M2P0 11
+#define EP93XX_CLK_M2P1 12
+#define EP93XX_CLK_M2P2 13
+#define EP93XX_CLK_M2P3 14
+#define EP93XX_CLK_M2P4 15
+#define EP93XX_CLK_M2P5 16
+#define EP93XX_CLK_M2P6 17
+#define EP93XX_CLK_M2P7 18
+#define EP93XX_CLK_M2P8 19
+#define EP93XX_CLK_M2P9 20
+
+#define EP93XX_CLK_UART1 21
+#define EP93XX_CLK_UART2 22
+#define EP93XX_CLK_UART3 23
+
+#define EP93XX_CLK_ADC 24
+#define EP93XX_CLK_ADC_EN 25
+
+#define EP93XX_CLK_KEYPAD 26
+
+#define EP93XX_CLK_VIDEO 27
+
+#define EP93XX_CLK_I2S_MCLK 28
+#define EP93XX_CLK_I2S_SCLK 29
+#define EP93XX_CLK_I2S_LRCLK 30
+
+#endif /* DT_BINDINGS_CIRRUS_EP93XX_CLOCK_H */
diff --git a/include/dt-bindings/clock/cix,sky1.h b/include/dt-bindings/clock/cix,sky1.h
new file mode 100644
index 000000000000..9245ebd1e80a
--- /dev/null
+++ b/include/dt-bindings/clock/cix,sky1.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright 2024-2025 Cix Technology Group Co., Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_CIX_SKY1_H
+#define _DT_BINDINGS_CLK_CIX_SKY1_H
+
+#define CLK_TREE_CPU_GICxCLK 0
+#define CLK_TREE_CPU_PPUCLK 1
+#define CLK_TREE_CPU_PERIPHCLK 2
+#define CLK_TREE_DSU_CLK 3
+#define CLK_TREE_DSU_PCLK 4
+#define CLK_TREE_CPU_CLK_BC0 5
+#define CLK_TREE_CPU_CLK_BC1 6
+#define CLK_TREE_CPU_CLK_BC2 7
+#define CLK_TREE_CPU_CLK_BC3 8
+#define CLK_TREE_CPU_CLK_MC0 9
+#define CLK_TREE_CPU_CLK_MC1 10
+#define CLK_TREE_CPU_CLK_MC2 11
+#define CLK_TREE_CPU_CLK_MC3 12
+#define CLK_TREE_CPU_CLK_LC0 13
+#define CLK_TREE_CPU_CLK_LC1 14
+#define CLK_TREE_CPU_CLK_LC2 15
+#define CLK_TREE_CPU_CLK_LC3 16
+#define CLK_TREE_CSI_CTRL0_PCLK 17
+#define CLK_TREE_CSI_CTRL1_PCLK 18
+#define CLK_TREE_CSI_CTRL2_PCLK 19
+#define CLK_TREE_CSI_CTRL3_PCLK 20
+#define CLK_TREE_CSI_DMA0_PCLK 21
+#define CLK_TREE_CSI_DMA1_PCLK 22
+#define CLK_TREE_CSI_DMA2_PCLK 23
+#define CLK_TREE_CSI_DMA3_PCLK 24
+#define CLK_TREE_CSI_PHY0_PSM 25
+#define CLK_TREE_CSI_PHY1_PSM 26
+#define CLK_TREE_CSI_PHY0_APBCLK 27
+#define CLK_TREE_CSI_PHY1_APBCLK 28
+#define CLK_TREE_FCH_APB_CLK 29
+#define CLK_TREE_GPU_CLK_400M 30
+#define CLK_TREE_GPU_CLK_CORE 31
+#define CLK_TREE_GPU_CLK_STACKS 32
+#define CLK_TREE_DP0_PIXEL0 33
+#define CLK_TREE_DP0_PIXEL1 34
+#define CLK_TREE_DP1_PIXEL0 35
+#define CLK_TREE_DP1_PIXEL1 36
+#define CLK_TREE_DP2_PIXEL0 37
+#define CLK_TREE_DP2_PIXEL1 38
+#define CLK_TREE_DP3_PIXEL0 39
+#define CLK_TREE_DP3_PIXEL1 40
+#define CLK_TREE_DP4_PIXEL0 41
+#define CLK_TREE_DP4_PIXEL1 42
+#define CLK_TREE_DPU_CLK 43
+#define CLK_TREE_DPU0_ACLK 44
+#define CLK_TREE_DPU1_ACLK 45
+#define CLK_TREE_DPU2_ACLK 46
+#define CLK_TREE_DPU3_ACLK 47
+#define CLK_TREE_DPU4_ACLK 48
+#define CLK_TREE_DPC0_VIDCLK0 49
+#define CLK_TREE_DPC0_VIDCLK1 50
+#define CLK_TREE_DPC1_VIDCLK0 51
+#define CLK_TREE_DPC1_VIDCLK1 52
+#define CLK_TREE_DPC2_VIDCLK0 53
+#define CLK_TREE_DPC2_VIDCLK1 54
+#define CLK_TREE_DPC3_VIDCLK0 55
+#define CLK_TREE_DPC3_VIDCLK1 56
+#define CLK_TREE_DPC4_VIDCLK0 57
+#define CLK_TREE_DPC4_VIDCLK1 58
+#define CLK_TREE_DPC0_APBCLK 59
+#define CLK_TREE_DPC1_APBCLK 60
+#define CLK_TREE_DPC2_APBCLK 61
+#define CLK_TREE_DPC3_APBCLK 62
+#define CLK_TREE_DPC4_APBCLK 63
+#define CLK_TREE_NPU_MEMCLK 64
+#define CLK_TREE_NPU_SYSCLK 65
+#define CLK_TREE_NPU_DBGCLK 66
+#define CLK_TREE_VPU_APBCLK 67
+#define CLK_TREE_ISP_ACLK 68
+#define CLK_TREE_ISP_SCLK 69
+#define CLK_TREE_AUDIO_CLK4 70
+#define CLK_TREE_AUDIO_CLK5 71
+#define CLK_TREE_CAMERA_MCLK0 72
+#define CLK_TREE_CAMERA_MCLK1 73
+#define CLK_TREE_CAMERA_MCLK2 74
+#define CLK_TREE_CAMERA_MCLK3 75
+#define CLK_TREE_AUDIO_CLK0 76
+#define CLK_TREE_AUDIO_CLK1 77
+#define CLK_TREE_AUDIO_CLK2 78
+#define CLK_TREE_AUDIO_CLK3 79
+#define CLK_TREE_MM_NI700_CLK 80
+#define CLK_TREE_SYS_NI700_CLK 81
+#define CLK_TREE_GMAC0_ACLK 82
+#define CLK_TREE_GMAC1_ACLK 83
+#define CLK_TREE_GMAC0_DIV_ACLK 84
+#define CLK_TREE_GMAC0_DIV_TXCLK 85
+#define CLK_TREE_GMAC0_RGMII0_TXCLK 86
+#define CLK_TREE_GMAC1_DIV_ACLK 87
+#define CLK_TREE_GMAC1_DIV_TXCLK 88
+#define CLK_TREE_GMAC1_RGMII0_TXCLK 89
+#define CLK_TREE_GMAC0_PCLK 90
+#define CLK_TREE_GMAC1_PCLK 91
+#define CLK_TREE_USB2_0_AXI_GATE 92
+#define CLK_TREE_USB2_0_APB_GATE 93
+#define CLK_TREE_USB2_1_AXI_GATE 94
+#define CLK_TREE_USB2_1_APB_GATE 95
+#define CLK_TREE_USB2_2_AXI_GATE 96
+#define CLK_TREE_USB2_2_APB_GATE 97
+#define CLK_TREE_USB2_3_AXI_GATE 98
+#define CLK_TREE_USB2_3_APB_GATE 99
+#define CLK_TREE_USB2_0_PHY_GATE 100
+#define CLK_TREE_USB2_1_PHY_GATE 101
+#define CLK_TREE_USB2_2_PHY_GATE 102
+#define CLK_TREE_USB2_3_PHY_GATE 103
+#define CLK_TREE_USB3C_DRD_AXI_GATE 104
+#define CLK_TREE_USB3C_DRD_APB_GATE 105
+#define CLK_TREE_USB3C_DRD_PHY2_GATE 106
+#define CLK_TREE_USB3C_DRD_PHY3_GATE 107
+#define CLK_TREE_USB3C_0_AXI_GATE 108
+#define CLK_TREE_USB3C_0_APB_GATE 109
+#define CLK_TREE_USB3C_0_PHY2_GATE 110
+#define CLK_TREE_USB3C_0_PHY3_GATE 111
+#define CLK_TREE_USB3C_1_AXI_GATE 112
+#define CLK_TREE_USB3C_1_APB_GATE 113
+#define CLK_TREE_USB3C_1_PHY2_GATE 114
+#define CLK_TREE_USB3C_1_PHY3_GATE 115
+#define CLK_TREE_USB3C_2_AXI_GATE 116
+#define CLK_TREE_USB3C_2_APB_GATE 117
+#define CLK_TREE_USB3C_2_PHY2_GATE 118
+#define CLK_TREE_USB3C_2_PHY3_GATE 119
+#define CLK_TREE_USB3A_0_AXI_GATE 120
+#define CLK_TREE_USB3A_0_APB_GATE 121
+#define CLK_TREE_USB3A_0_PHY2_GATE 122
+#define CLK_TREE_USB3A_1_AXI_GATE 123
+#define CLK_TREE_USB3A_1_APB_GATE 124
+#define CLK_TREE_USB3A_1_PHY2_GATE 125
+#define CLK_TREE_USB3A_PHY3_GATE 126
+#define CLK_TREE_USB2_0_CLK_SOF 127
+#define CLK_TREE_USB2_1_CLK_SOF 128
+#define CLK_TREE_USB2_2_CLK_SOF 129
+#define CLK_TREE_USB2_3_CLK_SOF 130
+#define CLK_TREE_USB3C_DRD_CLK_SOF 131
+#define CLK_TREE_USB3C_H0_CLK_SOF 132
+#define CLK_TREE_USB3C_H1_CLK_SOF 133
+#define CLK_TREE_USB3C_H2_CLK_SOF 134
+#define CLK_TREE_USB3A_H0_CLK_SOF 135
+#define CLK_TREE_USB3A_H1_CLK_SOF 136
+#define CLK_TREE_USB2_0_CLK_LPM 137
+#define CLK_TREE_USB2_1_CLK_LPM 138
+#define CLK_TREE_USB2_2_CLK_LPM 139
+#define CLK_TREE_USB2_3_CLK_LPM 140
+#define CLK_TREE_USB3C_DRD_CLK_LPM 141
+#define CLK_TREE_USB3C_H0_CLK_LPM 142
+#define CLK_TREE_USB3C_H1_CLK_LPM 143
+#define CLK_TREE_USB3C_H2_CLK_LPM 144
+#define CLK_TREE_USB3A_H0_CLK_LPM 145
+#define CLK_TREE_USB3A_H1_CLK_LPM 146
+#define CLK_TREE_USB2_0_PHY_REF 147
+#define CLK_TREE_USB2_1_PHY_REF 148
+#define CLK_TREE_USB2_2_PHY_REF 149
+#define CLK_TREE_USB2_3_PHY_REF 150
+#define CLK_TREE_USB3C_DRD_PHY_REF 151
+#define CLK_TREE_USB3C_H0_PHY_REF 152
+#define CLK_TREE_USB3C_H1_PHY_REF 153
+#define CLK_TREE_USB3C_H2_PHY_REF 154
+#define CLK_TREE_USB3A_H0_PHY_REF 155
+#define CLK_TREE_USB3A_H1_PHY_REF 156
+#define CLK_TREE_USB3C_DRD_PHY_x4_REF 157
+#define CLK_TREE_USB3C_H0_PHY_x4_REF 158
+#define CLK_TREE_USB3C_H1_PHY_x4_REF 159
+#define CLK_TREE_USB3C_H2_PHY_x4_REF 160
+#define CLK_TREE_USB3A_PHY_x2_REF 161
+#define CLK_TREE_PCIE_X8CTRL_APB 162
+#define CLK_TREE_PCIE_X4CTRL_APB 163
+#define CLK_TREE_PCIE_X2CTRL_APB 164
+#define CLK_TREE_PCIE_X1_0CTRL_APB 165
+#define CLK_TREE_PCIE_X1_1CTRL_APB 166
+#define CLK_TREE_PCIE_X8_PHY_APB 167
+#define CLK_TREE_PCIE_X4_PHY_APB 168
+#define CLK_TREE_PCIE_X211_PHY_APB 169
+#define CLK_TREE_PCIE_NI700_CLK 170
+#define CLK_TREE_PCIE_CTRL0_CLK 171
+#define CLK_TREE_PCIE_CTRL1_CLK 172
+#define CLK_TREE_PCIE_CTRL2_CLK 173
+#define CLK_TREE_PCIE_CTRL3_CLK 174
+#define CLK_TREE_PCIE_CTRL4_CLK 175
+#define CLK_TREE_CSI_CTRL0_SYSCLK 176
+#define CLK_TREE_CSI_CTRL1_SYSCLK 177
+#define CLK_TREE_CSI_CTRL2_SYSCLK 178
+#define CLK_TREE_CSI_CTRL3_SYSCLK 179
+#define CLK_TREE_CSI_CTRL0_PIXEL0_CLK 180
+#define CLK_TREE_CSI_CTRL0_PIXEL1_CLK 181
+#define CLK_TREE_CSI_CTRL0_PIXEL2_CLK 182
+#define CLK_TREE_CSI_CTRL0_PIXEL3_CLK 183
+#define CLK_TREE_CSI_CTRL1_PIXEL0_CLK 184
+#define CLK_TREE_CSI_CTRL2_PIXEL0_CLK 185
+#define CLK_TREE_CSI_CTRL2_PIXEL1_CLK 186
+#define CLK_TREE_CSI_CTRL2_PIXEL2_CLK 187
+#define CLK_TREE_CSI_CTRL2_PIXEL3_CLK 188
+#define CLK_TREE_CSI_CTRL3_PIXEL0_CLK 189
+#define CLK_TREE_CI700_GCLK0 190
+#define CLK_TREE_DDRC0_ACLK_CLK 191
+#define CLK_TREE_DDRC1_ACLK_CLK 192
+#define CLK_TREE_DDRC2_ACLK_CLK 193
+#define CLK_TREE_DDRC3_ACLK_CLK 194
+#define CLK_TREE_DDRC0_DFICLK_CLK 195
+#define CLK_TREE_DDRC1_DFICLK_CLK 196
+#define CLK_TREE_DDRC2_DFICLK_CLK 197
+#define CLK_TREE_DDRC3_DFICLK_CLK 198
+#define CLK_TREE_PHY0_SYNC_CLK 199
+#define CLK_TREE_PHY1_SYNC_CLK 200
+#define CLK_TREE_PHY2_SYNC_CLK 201
+#define CLK_TREE_PHY3_SYNC_CLK 202
+#define CLK_TREE_PHY0_BYPASS_CLK 203
+#define CLK_TREE_PHY1_BYPASS_CLK 204
+#define CLK_TREE_PHY2_BYPASS_CLK 205
+#define CLK_TREE_PHY3_BYPASS_CLK 206
+#define CLK_TREE_DDRC_0_APB 207
+#define CLK_TREE_DDRC_1_APB 208
+#define CLK_TREE_DDRC_2_APB 209
+#define CLK_TREE_DDRC_3_APB 210
+#define CLK_TREE_TZC400_0_APB 211
+#define CLK_TREE_TZC400_1_APB 212
+#define CLK_TREE_TZC400_2_APB 213
+#define CLK_TREE_TZC400_3_APB 214
+#define CLK_TREE_S5_SENSOR_HUB_25M 215
+#define CLK_TREE_S5_SENSOR_HUB_400M 216
+#define CLK_TREE_S5_CSS600_100M 217
+#define CLK_TREE_S5_DFD_800M 218
+#define CLK_TREE_S5_CSU_SE_800M 219
+#define CLK_TREE_S5_CSU_PM_800M 220
+#define CLK_TREE_PCIE_REF_B0 221
+#define CLK_TREE_PCIE_REF_B1 222
+#define CLK_TREE_PCIE_REF_B2 223
+#define CLK_TREE_PCIE_REF_B3 224
+#define CLK_TREE_PCIE_REF_B4 225
+#define CLK_TREE_PCIE_REF_PHY_X8 226
+#define CLK_TREE_PCIE_REF_PHY_X4 227
+#define CLK_TREE_PCIE_REF_PHY_X211 228
+#define CLK_TREE_GMAC_REC_CLK 229
+#define CLK_TREE_GPUTOP_PLL 230
+#define CLK_TREE_GPUCORE_PLL 231
+#define CLK_TREE_CPU_PLL_LIT 232
+#define CLK_TREE_CPU_PLL0 233
+#define CLK_TREE_CPU_PLL1 234
+#define CLK_TREE_CPU_PLL2 235
+#define CLK_TREE_CPU_PLL3 236
+#define CLK_TREE_FCH_I3C0_FUNC 237
+#define CLK_TREE_FCH_I3C1_FUNC 238
+#define CLK_TREE_FCH_DMA_ACLK 239
+#define CLK_TREE_FCH_XSPI_FUNC 240
+#define CLK_TREE_FCH_XSPI_MACLK 241
+#define CLK_TREE_FCH_TIMER_FUN 242
+#define CLK_TREE_FCH_APB_IO_S0 243
+#define CLK_TREE_FCH_I3C0_APB 244
+#define CLK_TREE_FCH_I3C1_APB 245
+#define CLK_TREE_FCH_UART0_APB 246
+#define CLK_TREE_FCH_UART1_APB 247
+#define CLK_TREE_FCH_UART2_APB 248
+#define CLK_TREE_FCH_UART3_APB 249
+#define CLK_TREE_FCH_SPI0_APB 250
+#define CLK_TREE_FCH_SPI1_APB 251
+#define CLK_TREE_FCH_XSPI_APB 252
+#define CLK_TREE_FCH_I2C0_APB 253
+#define CLK_TREE_FCH_I2C1_APB 254
+#define CLK_TREE_FCH_I2C2_APB 255
+#define CLK_TREE_FCH_I2C3_APB 256
+#define CLK_TREE_FCH_I2C4_APB 257
+#define CLK_TREE_FCH_I2C5_APB 258
+#define CLK_TREE_FCH_I2C6_APB 259
+#define CLK_TREE_FCH_I2C7_APB 260
+#define CLK_TREE_FCH_TIMER_APB 261
+#define CLK_TREE_FCH_GPIO_APB 262
+#define CLK_TREE_FCH_UART0_FUNC 263
+#define CLK_TREE_FCH_UART1_FUNC 264
+#define CLK_TREE_FCH_UART2_FUNC 265
+#define CLK_TREE_FCH_UART3_FUNC 266
+/* 267~271 not used by AP, skip */
+#define CLK_TREE_GPU_CLK_200M 272
+
+#endif
diff --git a/include/dt-bindings/clock/en7523-clk.h b/include/dt-bindings/clock/en7523-clk.h
index 717d23a5e5ae..edfa64045f52 100644
--- a/include/dt-bindings/clock/en7523-clk.h
+++ b/include/dt-bindings/clock/en7523-clk.h
@@ -12,6 +12,6 @@
#define EN7523_CLK_CRYPTO 6
#define EN7523_CLK_PCIE 7
-#define EN7523_NUM_CLOCKS 8
+#define EN7581_CLK_EMMC 8
#endif /* _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_ */
diff --git a/include/dt-bindings/clock/exynos7885.h b/include/dt-bindings/clock/exynos7885.h
index 255e3aa94323..cfede84b46b9 100644
--- a/include/dt-bindings/clock/exynos7885.h
+++ b/include/dt-bindings/clock/exynos7885.h
@@ -69,6 +69,8 @@
#define CLK_GOUT_FSYS_MMC_EMBD 58
#define CLK_GOUT_FSYS_MMC_SDIO 59
#define CLK_GOUT_FSYS_USB30DRD 60
+#define CLK_MOUT_SHARED0_PLL 61
+#define CLK_MOUT_SHARED1_PLL 62
/* CMU_CORE */
#define CLK_MOUT_CORE_BUS_USER 1
@@ -132,16 +134,24 @@
#define CLK_GOUT_WDT1_PCLK 43
/* CMU_FSYS */
-#define CLK_MOUT_FSYS_BUS_USER 1
-#define CLK_MOUT_FSYS_MMC_CARD_USER 2
-#define CLK_MOUT_FSYS_MMC_EMBD_USER 3
-#define CLK_MOUT_FSYS_MMC_SDIO_USER 4
-#define CLK_MOUT_FSYS_USB30DRD_USER 4
-#define CLK_GOUT_MMC_CARD_ACLK 5
-#define CLK_GOUT_MMC_CARD_SDCLKIN 6
-#define CLK_GOUT_MMC_EMBD_ACLK 7
-#define CLK_GOUT_MMC_EMBD_SDCLKIN 8
-#define CLK_GOUT_MMC_SDIO_ACLK 9
-#define CLK_GOUT_MMC_SDIO_SDCLKIN 10
+#define CLK_MOUT_FSYS_BUS_USER 1
+#define CLK_MOUT_FSYS_MMC_CARD_USER 2
+#define CLK_MOUT_FSYS_MMC_EMBD_USER 3
+#define CLK_MOUT_FSYS_MMC_SDIO_USER 4
+#define CLK_GOUT_MMC_CARD_ACLK 5
+#define CLK_GOUT_MMC_CARD_SDCLKIN 6
+#define CLK_GOUT_MMC_EMBD_ACLK 7
+#define CLK_GOUT_MMC_EMBD_SDCLKIN 8
+#define CLK_GOUT_MMC_SDIO_ACLK 9
+#define CLK_GOUT_MMC_SDIO_SDCLKIN 10
+#define CLK_MOUT_FSYS_USB30DRD_USER 11
+#define CLK_MOUT_USB_PLL 12
+#define CLK_FOUT_USB_PLL 13
+#define CLK_FSYS_USB20PHY_CLKCORE 14
+#define CLK_FSYS_USB30DRD_ACLK_20PHYCTRL 15
+#define CLK_FSYS_USB30DRD_ACLK_30PHYCTRL_0 16
+#define CLK_FSYS_USB30DRD_ACLK_30PHYCTRL_1 17
+#define CLK_FSYS_USB30DRD_BUS_CLK_EARLY 18
+#define CLK_FSYS_USB30DRD_REF_CLK 19
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_7885_H */
diff --git a/include/dt-bindings/clock/exynos850.h b/include/dt-bindings/clock/exynos850.h
index 7666241520f8..80dacda57229 100644
--- a/include/dt-bindings/clock/exynos850.h
+++ b/include/dt-bindings/clock/exynos850.h
@@ -358,6 +358,7 @@
#define CLK_GOUT_UART_PCLK 32
#define CLK_GOUT_WDT0_PCLK 33
#define CLK_GOUT_WDT1_PCLK 34
+#define CLK_GOUT_BUSIF_TMU_PCLK 35
/* CMU_CORE */
#define CLK_MOUT_CORE_BUS_USER 1
diff --git a/include/dt-bindings/clock/fsd-clk.h b/include/dt-bindings/clock/fsd-clk.h
index c8a2af1dd1ad..58fdec8f4c2a 100644
--- a/include/dt-bindings/clock/fsd-clk.h
+++ b/include/dt-bindings/clock/fsd-clk.h
@@ -28,7 +28,6 @@
#define DOUT_CMU_IMEM_ACLK 13
#define DOUT_CMU_IMEM_DMACLK 14
#define GAT_CMU_FSYS0_SHARED0DIV4 15
-#define CMU_NR_CLK 16
/* PERIC */
#define PERIC_SCLK_UART0 1
@@ -76,7 +75,6 @@
#define PERIC_EQOS_PHYRXCLK_MUX 43
#define PERIC_EQOS_PHYRXCLK 44
#define PERIC_DOUT_RGMII_CLK 45
-#define PERIC_NR_CLK 46
/* FSYS0 */
#define UFS0_MPHY_REFCLK_IXTAL24 1
@@ -101,7 +99,6 @@
#define FSYS0_EQOS_TOP0_IPCLKPORT_RGMII_CLK_I 20
#define FSYS0_EQOS_TOP0_IPCLKPORT_CLK_RX_I 21
#define FSYS0_DOUT_FSYS0_PERIBUS_GRP 22
-#define FSYS0_NR_CLK 23
/* FSYS1 */
#define PCIE_LINK0_IPCLKPORT_DBI_ACLK 1
@@ -112,7 +109,6 @@
#define PCIE_LINK1_IPCLKPORT_AUX_ACLK 6
#define PCIE_LINK1_IPCLKPORT_MSTR_ACLK 7
#define PCIE_LINK1_IPCLKPORT_SLV_ACLK 8
-#define FSYS1_NR_CLK 9
/* IMEM */
#define IMEM_DMA0_IPCLKPORT_ACLK 1
@@ -126,11 +122,9 @@
#define IMEM_TMU_TOP_IPCLKPORT_I_CLK_TS 9
#define IMEM_TMU_GPU_IPCLKPORT_I_CLK_TS 10
#define IMEM_TMU_GT_IPCLKPORT_I_CLK_TS 11
-#define IMEM_NR_CLK 12
/* MFC */
#define MFC_MFC_IPCLKPORT_ACLK 1
-#define MFC_NR_CLK 2
/* CAM_CSI */
#define CAM_CSI0_0_IPCLKPORT_I_ACLK 1
@@ -145,6 +139,18 @@
#define CAM_CSI2_1_IPCLKPORT_I_ACLK 10
#define CAM_CSI2_2_IPCLKPORT_I_ACLK 11
#define CAM_CSI2_3_IPCLKPORT_I_ACLK 12
-#define CAM_CSI_NR_CLK 13
+#define CAM_CSI_PLL 13
+#define CAM_CSI0_0_IPCLKPORT_I_PCLK 14
+#define CAM_CSI0_1_IPCLKPORT_I_PCLK 15
+#define CAM_CSI0_2_IPCLKPORT_I_PCLK 16
+#define CAM_CSI0_3_IPCLKPORT_I_PCLK 17
+#define CAM_CSI1_0_IPCLKPORT_I_PCLK 18
+#define CAM_CSI1_1_IPCLKPORT_I_PCLK 19
+#define CAM_CSI1_2_IPCLKPORT_I_PCLK 20
+#define CAM_CSI1_3_IPCLKPORT_I_PCLK 21
+#define CAM_CSI2_0_IPCLKPORT_I_PCLK 22
+#define CAM_CSI2_1_IPCLKPORT_I_PCLK 23
+#define CAM_CSI2_2_IPCLKPORT_I_PCLK 24
+#define CAM_CSI2_3_IPCLKPORT_I_PCLK 25
#endif /*_DT_BINDINGS_CLOCK_FSD_H */
diff --git a/include/dt-bindings/clock/google,gs101-acpm.h b/include/dt-bindings/clock/google,gs101-acpm.h
new file mode 100644
index 000000000000..e2ba89e09fa6
--- /dev/null
+++ b/include/dt-bindings/clock/google,gs101-acpm.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Device Tree binding constants for Google gs101 ACPM clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_GOOGLE_GS101_ACPM_H
+#define _DT_BINDINGS_CLOCK_GOOGLE_GS101_ACPM_H
+
+#define GS101_CLK_ACPM_DVFS_MIF 0
+#define GS101_CLK_ACPM_DVFS_INT 1
+#define GS101_CLK_ACPM_DVFS_CPUCL0 2
+#define GS101_CLK_ACPM_DVFS_CPUCL1 3
+#define GS101_CLK_ACPM_DVFS_CPUCL2 4
+#define GS101_CLK_ACPM_DVFS_G3D 5
+#define GS101_CLK_ACPM_DVFS_G3DL2 6
+#define GS101_CLK_ACPM_DVFS_TPU 7
+#define GS101_CLK_ACPM_DVFS_INTCAM 8
+#define GS101_CLK_ACPM_DVFS_TNR 9
+#define GS101_CLK_ACPM_DVFS_CAM 10
+#define GS101_CLK_ACPM_DVFS_MFC 11
+#define GS101_CLK_ACPM_DVFS_DISP 12
+#define GS101_CLK_ACPM_DVFS_BO 13
+
+#endif /* _DT_BINDINGS_CLOCK_GOOGLE_GS101_ACPM_H */
diff --git a/include/dt-bindings/clock/imx8ulp-clock.h b/include/dt-bindings/clock/imx8ulp-clock.h
index 827404fadf5c..c62d84d093a9 100644
--- a/include/dt-bindings/clock/imx8ulp-clock.h
+++ b/include/dt-bindings/clock/imx8ulp-clock.h
@@ -255,4 +255,9 @@
#define IMX8ULP_CLK_PCC5_END 56
+/* LPAV SIM */
+#define IMX8ULP_CLK_SIM_LPAV_HIFI_CORE 0
+#define IMX8ULP_CLK_SIM_LPAV_HIFI_PBCLK 1
+#define IMX8ULP_CLK_SIM_LPAV_HIFI_PLAT 2
+
#endif
diff --git a/include/dt-bindings/clock/imx93-clock.h b/include/dt-bindings/clock/imx93-clock.h
index 787c9e74dc96..c393fad3a346 100644
--- a/include/dt-bindings/clock/imx93-clock.h
+++ b/include/dt-bindings/clock/imx93-clock.h
@@ -204,6 +204,11 @@
#define IMX93_CLK_A55_SEL 199
#define IMX93_CLK_A55_CORE 200
#define IMX93_CLK_PDM_IPG 201
-#define IMX93_CLK_END 202
+#define IMX91_CLK_ENET1_QOS_TSN 202
+#define IMX91_CLK_ENET_TIMER 203
+#define IMX91_CLK_ENET2_REGULAR 204
+#define IMX91_CLK_ENET2_REGULAR_GATE 205
+#define IMX91_CLK_ENET1_QOS_TSN_GATE 206
+#define IMX93_CLK_SPDIF_IPG 207
#endif
diff --git a/include/dt-bindings/clock/loongson,ls2k-clk.h b/include/dt-bindings/clock/loongson,ls2k-clk.h
index 4279ba595f1e..8cbb86b2cf1e 100644
--- a/include/dt-bindings/clock/loongson,ls2k-clk.h
+++ b/include/dt-bindings/clock/loongson,ls2k-clk.h
@@ -43,4 +43,40 @@
#define LOONGSON2_I2S_CLK 33
#define LOONGSON2_MISC_CLK 34
+#define LS2K0300_CLK_STABLE 0
+#define LS2K0300_NODE_PLL 1
+#define LS2K0300_DDR_PLL 2
+#define LS2K0300_PIX_PLL 3
+#define LS2K0300_CLK_THSENS 4
+#define LS2K0300_CLK_NODE_DIV 5
+#define LS2K0300_CLK_NODE_PLL_GATE 6
+#define LS2K0300_CLK_NODE_SCALE 7
+#define LS2K0300_CLK_NODE_GATE 8
+#define LS2K0300_CLK_GMAC_DIV 9
+#define LS2K0300_CLK_GMAC_GATE 10
+#define LS2K0300_CLK_I2S_DIV 11
+#define LS2K0300_CLK_I2S_SCALE 12
+#define LS2K0300_CLK_I2S_GATE 13
+#define LS2K0300_CLK_DDR_DIV 14
+#define LS2K0300_CLK_DDR_GATE 15
+#define LS2K0300_CLK_NET_DIV 16
+#define LS2K0300_CLK_NET_GATE 17
+#define LS2K0300_CLK_DEV_DIV 18
+#define LS2K0300_CLK_DEV_GATE 19
+#define LS2K0300_CLK_PIX_DIV 20
+#define LS2K0300_CLK_PIX_PLL_GATE 21
+#define LS2K0300_CLK_PIX_SCALE 22
+#define LS2K0300_CLK_PIX_GATE 23
+#define LS2K0300_CLK_GMACBP_DIV 24
+#define LS2K0300_CLK_GMACBP_GATE 25
+#define LS2K0300_CLK_USB_SCALE 26
+#define LS2K0300_CLK_USB_GATE 27
+#define LS2K0300_CLK_APB_SCALE 28
+#define LS2K0300_CLK_APB_GATE 29
+#define LS2K0300_CLK_BOOT_SCALE 30
+#define LS2K0300_CLK_BOOT_GATE 31
+#define LS2K0300_CLK_SDIO_SCALE 32
+#define LS2K0300_CLK_SDIO_GATE 33
+#define LS2K0300_CLK_GMAC_IN 34
+
#endif
diff --git a/include/dt-bindings/clock/marvell,pxa1908.h b/include/dt-bindings/clock/marvell,pxa1908.h
new file mode 100644
index 000000000000..fb15b0d0cd4c
--- /dev/null
+++ b/include/dt-bindings/clock/marvell,pxa1908.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+#ifndef __DTS_MARVELL_PXA1908_CLOCK_H
+#define __DTS_MARVELL_PXA1908_CLOCK_H
+
+/* plls */
+#define PXA1908_CLK_CLK32 1
+#define PXA1908_CLK_VCTCXO 2
+#define PXA1908_CLK_PLL1_624 3
+#define PXA1908_CLK_PLL1_416 4
+#define PXA1908_CLK_PLL1_499 5
+#define PXA1908_CLK_PLL1_832 6
+#define PXA1908_CLK_PLL1_1248 7
+#define PXA1908_CLK_PLL1_D2 8
+#define PXA1908_CLK_PLL1_D4 9
+#define PXA1908_CLK_PLL1_D8 10
+#define PXA1908_CLK_PLL1_D16 11
+#define PXA1908_CLK_PLL1_D6 12
+#define PXA1908_CLK_PLL1_D12 13
+#define PXA1908_CLK_PLL1_D24 14
+#define PXA1908_CLK_PLL1_D48 15
+#define PXA1908_CLK_PLL1_D96 16
+#define PXA1908_CLK_PLL1_D13 17
+#define PXA1908_CLK_PLL1_32 18
+#define PXA1908_CLK_PLL1_208 19
+#define PXA1908_CLK_PLL1_117 20
+#define PXA1908_CLK_PLL1_416_GATE 21
+#define PXA1908_CLK_PLL1_624_GATE 22
+#define PXA1908_CLK_PLL1_832_GATE 23
+#define PXA1908_CLK_PLL1_1248_GATE 24
+#define PXA1908_CLK_PLL1_D2_GATE 25
+#define PXA1908_CLK_PLL1_499_EN 26
+#define PXA1908_CLK_PLL2VCO 27
+#define PXA1908_CLK_PLL2 28
+#define PXA1908_CLK_PLL2P 29
+#define PXA1908_CLK_PLL2VCODIV3 30
+#define PXA1908_CLK_PLL3VCO 31
+#define PXA1908_CLK_PLL3 32
+#define PXA1908_CLK_PLL3P 33
+#define PXA1908_CLK_PLL3VCODIV3 34
+#define PXA1908_CLK_PLL4VCO 35
+#define PXA1908_CLK_PLL4 36
+#define PXA1908_CLK_PLL4P 37
+#define PXA1908_CLK_PLL4VCODIV3 38
+
+/* apb (apbc) peripherals */
+#define PXA1908_CLK_UART0 1
+#define PXA1908_CLK_UART1 2
+#define PXA1908_CLK_GPIO 3
+#define PXA1908_CLK_PWM0 4
+#define PXA1908_CLK_PWM1 5
+#define PXA1908_CLK_PWM2 6
+#define PXA1908_CLK_PWM3 7
+#define PXA1908_CLK_SSP0 8
+#define PXA1908_CLK_SSP1 9
+#define PXA1908_CLK_IPC_RST 10
+#define PXA1908_CLK_RTC 11
+#define PXA1908_CLK_TWSI0 12
+#define PXA1908_CLK_KPC 13
+#define PXA1908_CLK_SWJTAG 14
+#define PXA1908_CLK_SSP2 15
+#define PXA1908_CLK_TWSI1 16
+#define PXA1908_CLK_THERMAL 17
+#define PXA1908_CLK_TWSI3 18
+
+/* apb (apbcp) peripherals */
+#define PXA1908_CLK_UART2 1
+#define PXA1908_CLK_TWSI2 2
+#define PXA1908_CLK_AICER 3
+
+/* axi (apmu) peripherals */
+#define PXA1908_CLK_CCIC1 1
+#define PXA1908_CLK_ISP 2
+#define PXA1908_CLK_DSI1 3
+#define PXA1908_CLK_DISP1 4
+#define PXA1908_CLK_CCIC0 5
+#define PXA1908_CLK_SDH0 6
+#define PXA1908_CLK_SDH1 7
+#define PXA1908_CLK_USB 8
+#define PXA1908_CLK_NF 9
+#define PXA1908_CLK_CORE_DEBUG 10
+#define PXA1908_CLK_VPU 11
+#define PXA1908_CLK_GC 12
+#define PXA1908_CLK_SDH2 13
+#define PXA1908_CLK_GC2D 14
+#define PXA1908_CLK_TRACE 15
+#define PXA1908_CLK_DVC_DFC_DEBUG 16
+
+#endif
diff --git a/include/dt-bindings/clock/mediatek,mt6735-apmixedsys.h b/include/dt-bindings/clock/mediatek,mt6735-apmixedsys.h
new file mode 100644
index 000000000000..b4705204409c
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6735-apmixedsys.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MT6735_APMIXEDSYS_H
+#define _DT_BINDINGS_CLK_MT6735_APMIXEDSYS_H
+
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_MAINPLL 1
+#define CLK_APMIXED_UNIVPLL 2
+#define CLK_APMIXED_MMPLL 3
+#define CLK_APMIXED_MSDCPLL 4
+#define CLK_APMIXED_VENCPLL 5
+#define CLK_APMIXED_TVDPLL 6
+#define CLK_APMIXED_APLL1 7
+#define CLK_APMIXED_APLL2 8
+
+#endif
diff --git a/include/dt-bindings/clock/mediatek,mt6735-imgsys.h b/include/dt-bindings/clock/mediatek,mt6735-imgsys.h
new file mode 100644
index 000000000000..f250c26c5eb4
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6735-imgsys.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MT6735_IMGSYS_H
+#define _DT_BINDINGS_CLK_MT6735_IMGSYS_H
+
+#define CLK_IMG_SMI_LARB2 0
+#define CLK_IMG_CAM_SMI 1
+#define CLK_IMG_CAM_CAM 2
+#define CLK_IMG_SEN_TG 3
+#define CLK_IMG_SEN_CAM 4
+#define CLK_IMG_CAM_SV 5
+#define CLK_IMG_SUFOD 6
+#define CLK_IMG_FD 7
+
+#endif /* _DT_BINDINGS_CLK_MT6735_IMGSYS_H */
diff --git a/include/dt-bindings/clock/mediatek,mt6735-infracfg.h b/include/dt-bindings/clock/mediatek,mt6735-infracfg.h
new file mode 100644
index 000000000000..d8dd51e15637
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6735-infracfg.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MT6735_INFRACFG_H
+#define _DT_BINDINGS_CLK_MT6735_INFRACFG_H
+
+#define CLK_INFRA_DBG 0
+#define CLK_INFRA_GCE 1
+#define CLK_INFRA_TRBG 2
+#define CLK_INFRA_CPUM 3
+#define CLK_INFRA_DEVAPC 4
+#define CLK_INFRA_AUDIO 5
+#define CLK_INFRA_GCPU 6
+#define CLK_INFRA_L2C_SRAM 7
+#define CLK_INFRA_M4U 8
+#define CLK_INFRA_CLDMA 9
+#define CLK_INFRA_CONNMCU_BUS 10
+#define CLK_INFRA_KP 11
+#define CLK_INFRA_APXGPT 12
+#define CLK_INFRA_SEJ 13
+#define CLK_INFRA_CCIF0_AP 14
+#define CLK_INFRA_CCIF1_AP 15
+#define CLK_INFRA_PMIC_SPI 16
+#define CLK_INFRA_PMIC_WRAP 17
+
+#endif
diff --git a/include/dt-bindings/clock/mediatek,mt6735-mfgcfg.h b/include/dt-bindings/clock/mediatek,mt6735-mfgcfg.h
new file mode 100644
index 000000000000..d2d99a48348a
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6735-mfgcfg.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MT6735_MFGCFG_H
+#define _DT_BINDINGS_CLK_MT6735_MFGCFG_H
+
+#define CLK_MFG_BG3D 0
+
+#endif /* _DT_BINDINGS_CLK_MT6735_MFGCFG_H */
diff --git a/include/dt-bindings/clock/mediatek,mt6735-pericfg.h b/include/dt-bindings/clock/mediatek,mt6735-pericfg.h
new file mode 100644
index 000000000000..16bc21bbd95b
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6735-pericfg.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MT6735_PERICFG_H
+#define _DT_BINDINGS_CLK_MT6735_PERICFG_H
+
+#define CLK_PERI_DISP_PWM 0
+#define CLK_PERI_THERM 1
+#define CLK_PERI_PWM1 2
+#define CLK_PERI_PWM2 3
+#define CLK_PERI_PWM3 4
+#define CLK_PERI_PWM4 5
+#define CLK_PERI_PWM5 6
+#define CLK_PERI_PWM6 7
+#define CLK_PERI_PWM7 8
+#define CLK_PERI_PWM 9
+#define CLK_PERI_USB0 10
+#define CLK_PERI_IRDA 11
+#define CLK_PERI_APDMA 12
+#define CLK_PERI_MSDC30_0 13
+#define CLK_PERI_MSDC30_1 14
+#define CLK_PERI_MSDC30_2 15
+#define CLK_PERI_MSDC30_3 16
+#define CLK_PERI_UART0 17
+#define CLK_PERI_UART1 18
+#define CLK_PERI_UART2 19
+#define CLK_PERI_UART3 20
+#define CLK_PERI_UART4 21
+#define CLK_PERI_BTIF 22
+#define CLK_PERI_I2C0 23
+#define CLK_PERI_I2C1 24
+#define CLK_PERI_I2C2 25
+#define CLK_PERI_I2C3 26
+#define CLK_PERI_AUXADC 27
+#define CLK_PERI_SPI0 28
+#define CLK_PERI_IRTX 29
+
+#endif
diff --git a/include/dt-bindings/clock/mediatek,mt6735-topckgen.h b/include/dt-bindings/clock/mediatek,mt6735-topckgen.h
new file mode 100644
index 000000000000..d4b1e113cc0a
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6735-topckgen.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MT6735_TOPCKGEN_H
+#define _DT_BINDINGS_CLK_MT6735_TOPCKGEN_H
+
+#define CLK_TOP_AD_SYS_26M_CK 0
+#define CLK_TOP_CLKPH_MCK_O 1
+#define CLK_TOP_DMPLL 2
+#define CLK_TOP_DPI_CK 3
+#define CLK_TOP_WHPLL_AUDIO_CK 4
+
+#define CLK_TOP_SYSPLL_D2 5
+#define CLK_TOP_SYSPLL_D3 6
+#define CLK_TOP_SYSPLL_D5 7
+#define CLK_TOP_SYSPLL1_D2 8
+#define CLK_TOP_SYSPLL1_D4 9
+#define CLK_TOP_SYSPLL1_D8 10
+#define CLK_TOP_SYSPLL1_D16 11
+#define CLK_TOP_SYSPLL2_D2 12
+#define CLK_TOP_SYSPLL2_D4 13
+#define CLK_TOP_SYSPLL3_D2 14
+#define CLK_TOP_SYSPLL3_D4 15
+#define CLK_TOP_SYSPLL4_D2 16
+#define CLK_TOP_SYSPLL4_D4 17
+#define CLK_TOP_UNIVPLL_D2 18
+#define CLK_TOP_UNIVPLL_D3 19
+#define CLK_TOP_UNIVPLL_D5 20
+#define CLK_TOP_UNIVPLL_D26 21
+#define CLK_TOP_UNIVPLL1_D2 22
+#define CLK_TOP_UNIVPLL1_D4 23
+#define CLK_TOP_UNIVPLL1_D8 24
+#define CLK_TOP_UNIVPLL2_D2 25
+#define CLK_TOP_UNIVPLL2_D4 26
+#define CLK_TOP_UNIVPLL2_D8 27
+#define CLK_TOP_UNIVPLL3_D2 28
+#define CLK_TOP_UNIVPLL3_D4 29
+#define CLK_TOP_MSDCPLL_D2 30
+#define CLK_TOP_MSDCPLL_D4 31
+#define CLK_TOP_MSDCPLL_D8 32
+#define CLK_TOP_MSDCPLL_D16 33
+#define CLK_TOP_VENCPLL_D3 34
+#define CLK_TOP_TVDPLL_D2 35
+#define CLK_TOP_TVDPLL_D4 36
+#define CLK_TOP_DMPLL_D2 37
+#define CLK_TOP_DMPLL_D4 38
+#define CLK_TOP_DMPLL_D8 39
+#define CLK_TOP_AD_SYS_26M_D2 40
+
+#define CLK_TOP_AXI_SEL 41
+#define CLK_TOP_MEM_SEL 42
+#define CLK_TOP_DDRPHY_SEL 43
+#define CLK_TOP_MM_SEL 44
+#define CLK_TOP_PWM_SEL 45
+#define CLK_TOP_VDEC_SEL 46
+#define CLK_TOP_MFG_SEL 47
+#define CLK_TOP_CAMTG_SEL 48
+#define CLK_TOP_UART_SEL 49
+#define CLK_TOP_SPI_SEL 50
+#define CLK_TOP_USB20_SEL 51
+#define CLK_TOP_MSDC50_0_SEL 52
+#define CLK_TOP_MSDC30_0_SEL 53
+#define CLK_TOP_MSDC30_1_SEL 54
+#define CLK_TOP_MSDC30_2_SEL 55
+#define CLK_TOP_MSDC30_3_SEL 56
+#define CLK_TOP_AUDIO_SEL 57
+#define CLK_TOP_AUDINTBUS_SEL 58
+#define CLK_TOP_PMICSPI_SEL 59
+#define CLK_TOP_SCP_SEL 60
+#define CLK_TOP_ATB_SEL 61
+#define CLK_TOP_DPI0_SEL 62
+#define CLK_TOP_SCAM_SEL 63
+#define CLK_TOP_MFG13M_SEL 64
+#define CLK_TOP_AUD1_SEL 65
+#define CLK_TOP_AUD2_SEL 66
+#define CLK_TOP_IRDA_SEL 67
+#define CLK_TOP_IRTX_SEL 68
+#define CLK_TOP_DISPPWM_SEL 69
+
+#endif
diff --git a/include/dt-bindings/clock/mediatek,mt6735-vdecsys.h b/include/dt-bindings/clock/mediatek,mt6735-vdecsys.h
new file mode 100644
index 000000000000..f94cec10c89f
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6735-vdecsys.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MT6735_VDECSYS_H
+#define _DT_BINDINGS_CLK_MT6735_VDECSYS_H
+
+#define CLK_VDEC_VDEC 0
+#define CLK_VDEC_SMI_LARB1 1
+
+#endif /* _DT_BINDINGS_CLK_MT6735_VDECSYS_H */
diff --git a/include/dt-bindings/clock/mediatek,mt6735-vencsys.h b/include/dt-bindings/clock/mediatek,mt6735-vencsys.h
new file mode 100644
index 000000000000..e5a9cb4f269f
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6735-vencsys.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MT6735_VENCSYS_H
+#define _DT_BINDINGS_CLK_MT6735_VENCSYS_H
+
+#define CLK_VENC_SMI_LARB3 0
+#define CLK_VENC_VENC 1
+#define CLK_VENC_JPGENC 2
+#define CLK_VENC_JPGDEC 3
+
+#endif /* _DT_BINDINGS_CLK_MT6735_VENCSYS_H */
diff --git a/include/dt-bindings/clock/mediatek,mt8188-clk.h b/include/dt-bindings/clock/mediatek,mt8188-clk.h
index bd5cd100b796..0e87f61c90f4 100644
--- a/include/dt-bindings/clock/mediatek,mt8188-clk.h
+++ b/include/dt-bindings/clock/mediatek,mt8188-clk.h
@@ -721,6 +721,6 @@
#define CLK_VDO1_DPINTF 58
#define CLK_VDO1_DISP_MONITOR_DPINTF 59
#define CLK_VDO1_26M_SLOW 60
-#define CLK_VDO1_NR_CLK 61
+#define CLK_VDO1_DPI1_HDMI 61
#endif /* _DT_BINDINGS_CLK_MT8188_H */
diff --git a/include/dt-bindings/clock/mediatek,mt8196-clock.h b/include/dt-bindings/clock/mediatek,mt8196-clock.h
new file mode 100644
index 000000000000..ae0946ab7621
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt8196-clock.h
@@ -0,0 +1,803 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8196_H
+#define _DT_BINDINGS_CLK_MT8196_H
+
+/* CKSYS */
+#define CLK_TOP_AXI 0
+#define CLK_TOP_MEM_SUB 1
+#define CLK_TOP_IO_NOC 2
+#define CLK_TOP_P_AXI 3
+#define CLK_TOP_UFS_PEXTP0_AXI 4
+#define CLK_TOP_PEXTP1_USB_AXI 5
+#define CLK_TOP_P_FMEM_SUB 6
+#define CLK_TOP_PEXPT0_MEM_SUB 7
+#define CLK_TOP_PEXTP1_USB_MEM_SUB 8
+#define CLK_TOP_P_NOC 9
+#define CLK_TOP_EMI_N 10
+#define CLK_TOP_EMI_S 11
+#define CLK_TOP_AP2CONN_HOST 12
+#define CLK_TOP_ATB 13
+#define CLK_TOP_CIRQ 14
+#define CLK_TOP_PBUS_156M 15
+#define CLK_TOP_EFUSE 16
+#define CLK_TOP_MCL3GIC 17
+#define CLK_TOP_MCINFRA 18
+#define CLK_TOP_DSP 19
+#define CLK_TOP_MFG_REF 20
+#define CLK_TOP_MFG_EB 21
+#define CLK_TOP_UART 22
+#define CLK_TOP_SPI0_BCLK 23
+#define CLK_TOP_SPI1_BCLK 24
+#define CLK_TOP_SPI2_BCLK 25
+#define CLK_TOP_SPI3_BCLK 26
+#define CLK_TOP_SPI4_BCLK 27
+#define CLK_TOP_SPI5_BCLK 28
+#define CLK_TOP_SPI6_BCLK 29
+#define CLK_TOP_SPI7_BCLK 30
+#define CLK_TOP_MSDC30_1 31
+#define CLK_TOP_MSDC30_2 32
+#define CLK_TOP_DISP_PWM 33
+#define CLK_TOP_USB_TOP_1P 34
+#define CLK_TOP_USB_XHCI_1P 35
+#define CLK_TOP_USB_FMCNT_P1 36
+#define CLK_TOP_I2C_P 37
+#define CLK_TOP_I2C_EAST 38
+#define CLK_TOP_I2C_WEST 39
+#define CLK_TOP_I2C_NORTH 40
+#define CLK_TOP_AES_UFSFDE 41
+#define CLK_TOP_UFS 42
+#define CLK_TOP_AUD_1 43
+#define CLK_TOP_AUD_2 44
+#define CLK_TOP_ADSP 45
+#define CLK_TOP_ADSP_UARTHUB_B 46
+#define CLK_TOP_DPMAIF_MAIN 47
+#define CLK_TOP_PWM 48
+#define CLK_TOP_MCUPM 49
+#define CLK_TOP_IPSEAST 50
+#define CLK_TOP_TL 51
+#define CLK_TOP_TL_P1 52
+#define CLK_TOP_TL_P2 53
+#define CLK_TOP_EMI_INTERFACE_546 54
+#define CLK_TOP_SDF 55
+#define CLK_TOP_UARTHUB_BCLK 56
+#define CLK_TOP_DPSW_CMP_26M 57
+#define CLK_TOP_SMAP 58
+#define CLK_TOP_SSR_PKA 59
+#define CLK_TOP_SSR_DMA 60
+#define CLK_TOP_SSR_KDF 61
+#define CLK_TOP_SSR_RNG 62
+#define CLK_TOP_SPU0 63
+#define CLK_TOP_SPU1 64
+#define CLK_TOP_DXCC 65
+#define CLK_TOP_APLL_I2SIN0 66
+#define CLK_TOP_APLL_I2SIN1 67
+#define CLK_TOP_APLL_I2SIN2 68
+#define CLK_TOP_APLL_I2SIN3 69
+#define CLK_TOP_APLL_I2SIN4 70
+#define CLK_TOP_APLL_I2SIN6 71
+#define CLK_TOP_APLL_I2SOUT0 72
+#define CLK_TOP_APLL_I2SOUT1 73
+#define CLK_TOP_APLL_I2SOUT2 74
+#define CLK_TOP_APLL_I2SOUT3 75
+#define CLK_TOP_APLL_I2SOUT4 76
+#define CLK_TOP_APLL_I2SOUT6 77
+#define CLK_TOP_APLL_FMI2S 78
+#define CLK_TOP_APLL_TDMOUT 79
+#define CLK_TOP_APLL12_DIV_TDMOUT_M 80
+#define CLK_TOP_APLL12_DIV_TDMOUT_B 81
+#define CLK_TOP_MAINPLL_D3 82
+#define CLK_TOP_MAINPLL_D4 83
+#define CLK_TOP_MAINPLL_D4_D2 84
+#define CLK_TOP_MAINPLL_D4_D4 85
+#define CLK_TOP_MAINPLL_D4_D8 86
+#define CLK_TOP_MAINPLL_D5 87
+#define CLK_TOP_MAINPLL_D5_D2 88
+#define CLK_TOP_MAINPLL_D5_D4 89
+#define CLK_TOP_MAINPLL_D5_D8 90
+#define CLK_TOP_MAINPLL_D6 91
+#define CLK_TOP_MAINPLL_D6_D2 92
+#define CLK_TOP_MAINPLL_D7 93
+#define CLK_TOP_MAINPLL_D7_D2 94
+#define CLK_TOP_MAINPLL_D7_D4 95
+#define CLK_TOP_MAINPLL_D7_D8 96
+#define CLK_TOP_MAINPLL_D9 97
+#define CLK_TOP_UNIVPLL_D4 98
+#define CLK_TOP_UNIVPLL_D4_D2 99
+#define CLK_TOP_UNIVPLL_D4_D4 100
+#define CLK_TOP_UNIVPLL_D4_D8 101
+#define CLK_TOP_UNIVPLL_D5 102
+#define CLK_TOP_UNIVPLL_D5_D2 103
+#define CLK_TOP_UNIVPLL_D5_D4 104
+#define CLK_TOP_UNIVPLL_D6 105
+#define CLK_TOP_UNIVPLL_D6_D2 106
+#define CLK_TOP_UNIVPLL_D6_D4 107
+#define CLK_TOP_UNIVPLL_D6_D8 108
+#define CLK_TOP_UNIVPLL_D6_D16 109
+#define CLK_TOP_UNIVPLL_192M 110
+#define CLK_TOP_UNIVPLL_192M_D4 111
+#define CLK_TOP_UNIVPLL_192M_D8 112
+#define CLK_TOP_UNIVPLL_192M_D16 113
+#define CLK_TOP_UNIVPLL_192M_D32 114
+#define CLK_TOP_UNIVPLL_192M_D10 115
+#define CLK_TOP_TVDPLL1_D2 116
+#define CLK_TOP_MSDCPLL_D2 117
+#define CLK_TOP_OSC_D2 118
+#define CLK_TOP_OSC_D3 119
+#define CLK_TOP_OSC_D4 120
+#define CLK_TOP_OSC_D5 121
+#define CLK_TOP_OSC_D7 122
+#define CLK_TOP_OSC_D8 123
+#define CLK_TOP_OSC_D10 124
+#define CLK_TOP_OSC_D14 125
+#define CLK_TOP_OSC_D20 126
+#define CLK_TOP_OSC_D32 127
+#define CLK_TOP_OSC_D40 128
+#define CLK_TOP_SFLASH 129
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_MAINPLL 0
+#define CLK_APMIXED_UNIVPLL 1
+#define CLK_APMIXED_MSDCPLL 2
+#define CLK_APMIXED_ADSPPLL 3
+#define CLK_APMIXED_EMIPLL 4
+#define CLK_APMIXED_EMIPLL2 5
+#define CLK_APMIXED_NET1PLL 6
+#define CLK_APMIXED_SGMIIPLL 7
+
+/* CKSYS_GP2 */
+#define CLK_TOP2_SENINF0 0
+#define CLK_TOP2_SENINF1 1
+#define CLK_TOP2_SENINF2 2
+#define CLK_TOP2_SENINF3 3
+#define CLK_TOP2_SENINF4 4
+#define CLK_TOP2_SENINF5 5
+#define CLK_TOP2_IMG1 6
+#define CLK_TOP2_IPE 7
+#define CLK_TOP2_CAM 8
+#define CLK_TOP2_CAMTM 9
+#define CLK_TOP2_DPE 10
+#define CLK_TOP2_VDEC 11
+#define CLK_TOP2_CCUSYS 12
+#define CLK_TOP2_CCUTM 13
+#define CLK_TOP2_VENC 14
+#define CLK_TOP2_DP1 15
+#define CLK_TOP2_DP0 16
+#define CLK_TOP2_DISP 17
+#define CLK_TOP2_MDP 18
+#define CLK_TOP2_MMINFRA 19
+#define CLK_TOP2_MMINFRA_SNOC 20
+#define CLK_TOP2_MMUP 21
+#define CLK_TOP2_MMINFRA_AO 22
+#define CLK_TOP2_MAINPLL2_D2 23
+#define CLK_TOP2_MAINPLL2_D3 24
+#define CLK_TOP2_MAINPLL2_D4 25
+#define CLK_TOP2_MAINPLL2_D4_D2 26
+#define CLK_TOP2_MAINPLL2_D4_D4 27
+#define CLK_TOP2_MAINPLL2_D5 28
+#define CLK_TOP2_MAINPLL2_D5_D2 29
+#define CLK_TOP2_MAINPLL2_D6 30
+#define CLK_TOP2_MAINPLL2_D6_D2 31
+#define CLK_TOP2_MAINPLL2_D7 32
+#define CLK_TOP2_MAINPLL2_D7_D2 33
+#define CLK_TOP2_MAINPLL2_D9 34
+#define CLK_TOP2_UNIVPLL2_D3 35
+#define CLK_TOP2_UNIVPLL2_D4 36
+#define CLK_TOP2_UNIVPLL2_D4_D2 37
+#define CLK_TOP2_UNIVPLL2_D5 38
+#define CLK_TOP2_UNIVPLL2_D5_D2 39
+#define CLK_TOP2_UNIVPLL2_D6 40
+#define CLK_TOP2_UNIVPLL2_D6_D2 41
+#define CLK_TOP2_UNIVPLL2_D6_D4 42
+#define CLK_TOP2_UNIVPLL2_D7 43
+#define CLK_TOP2_IMGPLL_D2 44
+#define CLK_TOP2_IMGPLL_D4 45
+#define CLK_TOP2_IMGPLL_D5 46
+#define CLK_TOP2_IMGPLL_D5_D2 47
+#define CLK_TOP2_MMPLL2_D3 48
+#define CLK_TOP2_MMPLL2_D4 49
+#define CLK_TOP2_MMPLL2_D4_D2 50
+#define CLK_TOP2_MMPLL2_D5 51
+#define CLK_TOP2_MMPLL2_D5_D2 52
+#define CLK_TOP2_MMPLL2_D6 53
+#define CLK_TOP2_MMPLL2_D6_D2 54
+#define CLK_TOP2_MMPLL2_D7 55
+#define CLK_TOP2_MMPLL2_D9 56
+#define CLK_TOP2_TVDPLL1_D4 57
+#define CLK_TOP2_TVDPLL1_D8 58
+#define CLK_TOP2_TVDPLL1_D16 59
+#define CLK_TOP2_TVDPLL2_D2 60
+#define CLK_TOP2_TVDPLL2_D4 61
+#define CLK_TOP2_TVDPLL2_D8 62
+#define CLK_TOP2_TVDPLL2_D16 63
+#define CLK_TOP2_DVO 64
+#define CLK_TOP2_DVO_FAVT 65
+#define CLK_TOP2_TVDPLL3_D2 66
+#define CLK_TOP2_TVDPLL3_D4 67
+#define CLK_TOP2_TVDPLL3_D8 68
+#define CLK_TOP2_TVDPLL3_D16 69
+
+/* APMIXEDSYS_GP2 */
+#define CLK_APMIXED2_MAINPLL2 0
+#define CLK_APMIXED2_UNIVPLL2 1
+#define CLK_APMIXED2_MMPLL2 2
+#define CLK_APMIXED2_IMGPLL 3
+#define CLK_APMIXED2_TVDPLL1 4
+#define CLK_APMIXED2_TVDPLL2 5
+#define CLK_APMIXED2_TVDPLL3 6
+
+/* IMP_IIC_WRAP_E */
+#define CLK_IMPE_I2C5 0
+
+/* IMP_IIC_WRAP_W */
+#define CLK_IMPW_I2C0 0
+#define CLK_IMPW_I2C3 1
+#define CLK_IMPW_I2C6 2
+#define CLK_IMPW_I2C10 3
+
+/* IMP_IIC_WRAP_N */
+#define CLK_IMPN_I2C1 0
+#define CLK_IMPN_I2C2 1
+#define CLK_IMPN_I2C4 2
+#define CLK_IMPN_I2C7 3
+#define CLK_IMPN_I2C8 4
+#define CLK_IMPN_I2C9 5
+
+/* IMP_IIC_WRAP_C */
+#define CLK_IMPC_I2C11 0
+#define CLK_IMPC_I2C12 1
+#define CLK_IMPC_I2C13 2
+#define CLK_IMPC_I2C14 3
+
+/* PERICFG_AO */
+#define CLK_PERI_AO_UART0_BCLK 0
+#define CLK_PERI_AO_UART1_BCLK 1
+#define CLK_PERI_AO_UART2_BCLK 2
+#define CLK_PERI_AO_UART3_BCLK 3
+#define CLK_PERI_AO_UART4_BCLK 4
+#define CLK_PERI_AO_UART5_BCLK 5
+#define CLK_PERI_AO_PWM_X16W_HCLK 6
+#define CLK_PERI_AO_PWM_X16W_BCLK 7
+#define CLK_PERI_AO_PWM_PWM_BCLK0 8
+#define CLK_PERI_AO_PWM_PWM_BCLK1 9
+#define CLK_PERI_AO_PWM_PWM_BCLK2 10
+#define CLK_PERI_AO_PWM_PWM_BCLK3 11
+#define CLK_PERI_AO_SPI0_BCLK 12
+#define CLK_PERI_AO_SPI1_BCLK 13
+#define CLK_PERI_AO_SPI2_BCLK 14
+#define CLK_PERI_AO_SPI3_BCLK 15
+#define CLK_PERI_AO_SPI4_BCLK 16
+#define CLK_PERI_AO_SPI5_BCLK 17
+#define CLK_PERI_AO_SPI6_BCLK 18
+#define CLK_PERI_AO_SPI7_BCLK 19
+#define CLK_PERI_AO_AP_DMA_X32W_BCLK 20
+#define CLK_PERI_AO_MSDC1_MSDC_SRC 21
+#define CLK_PERI_AO_MSDC1_HCLK 22
+#define CLK_PERI_AO_MSDC1_AXI 23
+#define CLK_PERI_AO_MSDC1_HCLK_WRAP 24
+#define CLK_PERI_AO_MSDC2_MSDC_SRC 25
+#define CLK_PERI_AO_MSDC2_HCLK 26
+#define CLK_PERI_AO_MSDC2_AXI 27
+#define CLK_PERI_AO_MSDC2_HCLK_WRAP 28
+#define CLK_PERI_AO_FLASHIF_FLASH 29
+#define CLK_PERI_AO_FLASHIF_27M 30
+#define CLK_PERI_AO_FLASHIF_DRAM 31
+#define CLK_PERI_AO_FLASHIF_AXI 32
+#define CLK_PERI_AO_FLASHIF_BCLK 33
+
+/* UFSCFG_AO */
+#define CLK_UFSAO_UNIPRO_TX_SYM 0
+#define CLK_UFSAO_UNIPRO_RX_SYM0 1
+#define CLK_UFSAO_UNIPRO_RX_SYM1 2
+#define CLK_UFSAO_UNIPRO_SYS 3
+#define CLK_UFSAO_UNIPRO_SAP 4
+#define CLK_UFSAO_PHY_SAP 5
+#define CLK_UFSAO_UFSHCI_UFS 6
+#define CLK_UFSAO_UFSHCI_AES 7
+
+/* PEXTP0CFG_AO */
+#define CLK_PEXT_PEXTP_MAC_P0_TL 0
+#define CLK_PEXT_PEXTP_MAC_P0_REF 1
+#define CLK_PEXT_PEXTP_PHY_P0_MCU_BUS 2
+#define CLK_PEXT_PEXTP_PHY_P0_PEXTP_REF 3
+#define CLK_PEXT_PEXTP_MAC_P0_AXI_250 4
+#define CLK_PEXT_PEXTP_MAC_P0_AHB_APB 5
+#define CLK_PEXT_PEXTP_MAC_P0_PL_P 6
+#define CLK_PEXT_PEXTP_VLP_AO_P0_LP 7
+
+/* PEXTP1CFG_AO */
+#define CLK_PEXT1_PEXTP_MAC_P1_TL 0
+#define CLK_PEXT1_PEXTP_MAC_P1_REF 1
+#define CLK_PEXT1_PEXTP_MAC_P2_TL 2
+#define CLK_PEXT1_PEXTP_MAC_P2_REF 3
+#define CLK_PEXT1_PEXTP_PHY_P1_MCU_BUS 4
+#define CLK_PEXT1_PEXTP_PHY_P1_PEXTP_REF 5
+#define CLK_PEXT1_PEXTP_PHY_P2_MCU_BUS 6
+#define CLK_PEXT1_PEXTP_PHY_P2_PEXTP_REF 7
+#define CLK_PEXT1_PEXTP_MAC_P1_AXI_250 8
+#define CLK_PEXT1_PEXTP_MAC_P1_AHB_APB 9
+#define CLK_PEXT1_PEXTP_MAC_P1_PL_P 10
+#define CLK_PEXT1_PEXTP_MAC_P2_AXI_250 11
+#define CLK_PEXT1_PEXTP_MAC_P2_AHB_APB 12
+#define CLK_PEXT1_PEXTP_MAC_P2_PL_P 13
+#define CLK_PEXT1_PEXTP_VLP_AO_P1_LP 14
+#define CLK_PEXT1_PEXTP_VLP_AO_P2_LP 15
+
+/* VLP_CKSYS */
+#define CLK_VLP_APLL1 0
+#define CLK_VLP_APLL2 1
+#define CLK_VLP_SCP 2
+#define CLK_VLP_SCP_SPI 3
+#define CLK_VLP_SCP_IIC 4
+#define CLK_VLP_SCP_IIC_HS 5
+#define CLK_VLP_PWRAP_ULPOSC 6
+#define CLK_VLP_SPMI_M_TIA_32K 7
+#define CLK_VLP_APXGPT_26M_B 8
+#define CLK_VLP_DPSW 9
+#define CLK_VLP_DPSW_CENTRAL 10
+#define CLK_VLP_SPMI_M_MST 11
+#define CLK_VLP_DVFSRC 12
+#define CLK_VLP_PWM_VLP 13
+#define CLK_VLP_AXI_VLP 14
+#define CLK_VLP_SYSTIMER_26M 15
+#define CLK_VLP_SSPM 16
+#define CLK_VLP_SRCK 17
+#define CLK_VLP_CAMTG0 18
+#define CLK_VLP_CAMTG1 19
+#define CLK_VLP_CAMTG2 20
+#define CLK_VLP_CAMTG3 21
+#define CLK_VLP_CAMTG4 22
+#define CLK_VLP_CAMTG5 23
+#define CLK_VLP_CAMTG6 24
+#define CLK_VLP_CAMTG7 25
+#define CLK_VLP_SSPM_26M 26
+#define CLK_VLP_ULPOSC_SSPM 27
+#define CLK_VLP_VLP_PBUS_26M 28
+#define CLK_VLP_DEBUG_ERR_FLAG 29
+#define CLK_VLP_DPMSRDMA 30
+#define CLK_VLP_VLP_PBUS_156M 31
+#define CLK_VLP_SPM 32
+#define CLK_VLP_MMINFRA 33
+#define CLK_VLP_USB_TOP 34
+#define CLK_VLP_USB_XHCI 35
+#define CLK_VLP_NOC_VLP 36
+#define CLK_VLP_AUDIO_H 37
+#define CLK_VLP_AUD_ENGEN1 38
+#define CLK_VLP_AUD_ENGEN2 39
+#define CLK_VLP_AUD_INTBUS 40
+#define CLK_VLP_SPVLP_26M 41
+#define CLK_VLP_SPU0_VLP 42
+#define CLK_VLP_SPU1_VLP 43
+#define CLK_VLP_CLK26M 44
+#define CLK_VLP_APLL1_D4 45
+#define CLK_VLP_APLL1_D8 46
+#define CLK_VLP_APLL2_D4 47
+#define CLK_VLP_APLL2_D8 48
+
+/* DISPSYS_CONFIG */
+#define CLK_MM_CONFIG 0
+#define CLK_MM_DISP_MUTEX0 1
+#define CLK_MM_DISP_AAL0 2
+#define CLK_MM_DISP_AAL1 3
+#define CLK_MM_DISP_C3D0 4
+#define CLK_MM_DISP_C3D1 5
+#define CLK_MM_DISP_C3D2 6
+#define CLK_MM_DISP_C3D3 7
+#define CLK_MM_DISP_CCORR0 8
+#define CLK_MM_DISP_CCORR1 9
+#define CLK_MM_DISP_CCORR2 10
+#define CLK_MM_DISP_CCORR3 11
+#define CLK_MM_DISP_CHIST0 12
+#define CLK_MM_DISP_CHIST1 13
+#define CLK_MM_DISP_COLOR0 14
+#define CLK_MM_DISP_COLOR1 15
+#define CLK_MM_DISP_DITHER0 16
+#define CLK_MM_DISP_DITHER1 17
+#define CLK_MM_DISP_DLI_ASYNC0 18
+#define CLK_MM_DISP_DLI_ASYNC1 19
+#define CLK_MM_DISP_DLI_ASYNC2 20
+#define CLK_MM_DISP_DLI_ASYNC3 21
+#define CLK_MM_DISP_DLI_ASYNC4 22
+#define CLK_MM_DISP_DLI_ASYNC5 23
+#define CLK_MM_DISP_DLI_ASYNC6 24
+#define CLK_MM_DISP_DLI_ASYNC7 25
+#define CLK_MM_DISP_DLI_ASYNC8 26
+#define CLK_MM_DISP_DLI_ASYNC9 27
+#define CLK_MM_DISP_DLI_ASYNC10 28
+#define CLK_MM_DISP_DLI_ASYNC11 29
+#define CLK_MM_DISP_DLI_ASYNC12 30
+#define CLK_MM_DISP_DLI_ASYNC13 31
+#define CLK_MM_DISP_DLI_ASYNC14 32
+#define CLK_MM_DISP_DLI_ASYNC15 33
+#define CLK_MM_DISP_DLO_ASYNC0 34
+#define CLK_MM_DISP_DLO_ASYNC1 35
+#define CLK_MM_DISP_DLO_ASYNC2 36
+#define CLK_MM_DISP_DLO_ASYNC3 37
+#define CLK_MM_DISP_DLO_ASYNC4 38
+#define CLK_MM_DISP_DLO_ASYNC5 39
+#define CLK_MM_DISP_DLO_ASYNC6 40
+#define CLK_MM_DISP_DLO_ASYNC7 41
+#define CLK_MM_DISP_DLO_ASYNC8 42
+#define CLK_MM_DISP_GAMMA0 43
+#define CLK_MM_DISP_GAMMA1 44
+#define CLK_MM_MDP_AAL0 45
+#define CLK_MM_MDP_AAL1 46
+#define CLK_MM_MDP_RDMA0 47
+#define CLK_MM_DISP_POSTMASK0 48
+#define CLK_MM_DISP_POSTMASK1 49
+#define CLK_MM_MDP_RSZ0 50
+#define CLK_MM_MDP_RSZ1 51
+#define CLK_MM_DISP_SPR0 52
+#define CLK_MM_DISP_TDSHP0 53
+#define CLK_MM_DISP_TDSHP1 54
+#define CLK_MM_DISP_WDMA0 55
+#define CLK_MM_DISP_Y2R0 56
+#define CLK_MM_SMI_SUB_COMM0 57
+#define CLK_MM_DISP_FAKE_ENG0 58
+
+/* DISPSYS1_CONFIG */
+#define CLK_MM1_DISPSYS1_CONFIG 0
+#define CLK_MM1_DISPSYS1_S_CONFIG 1
+#define CLK_MM1_DISP_MUTEX0 2
+#define CLK_MM1_DISP_DLI_ASYNC20 3
+#define CLK_MM1_DISP_DLI_ASYNC21 4
+#define CLK_MM1_DISP_DLI_ASYNC22 5
+#define CLK_MM1_DISP_DLI_ASYNC23 6
+#define CLK_MM1_DISP_DLI_ASYNC24 7
+#define CLK_MM1_DISP_DLI_ASYNC25 8
+#define CLK_MM1_DISP_DLI_ASYNC26 9
+#define CLK_MM1_DISP_DLI_ASYNC27 10
+#define CLK_MM1_DISP_DLI_ASYNC28 11
+#define CLK_MM1_DISP_RELAY0 12
+#define CLK_MM1_DISP_RELAY1 13
+#define CLK_MM1_DISP_RELAY2 14
+#define CLK_MM1_DISP_RELAY3 15
+#define CLK_MM1_DISP_DP_INTF0 16
+#define CLK_MM1_DISP_DP_INTF1 17
+#define CLK_MM1_DISP_DSC_WRAP0 18
+#define CLK_MM1_DISP_DSC_WRAP1 19
+#define CLK_MM1_DISP_DSC_WRAP2 20
+#define CLK_MM1_DISP_DSC_WRAP3 21
+#define CLK_MM1_DISP_DSI0 22
+#define CLK_MM1_DISP_DSI1 23
+#define CLK_MM1_DISP_DSI2 24
+#define CLK_MM1_DISP_DVO0 25
+#define CLK_MM1_DISP_GDMA0 26
+#define CLK_MM1_DISP_MERGE0 27
+#define CLK_MM1_DISP_MERGE1 28
+#define CLK_MM1_DISP_MERGE2 29
+#define CLK_MM1_DISP_ODDMR0 30
+#define CLK_MM1_DISP_POSTALIGN0 31
+#define CLK_MM1_DISP_DITHER2 32
+#define CLK_MM1_DISP_R2Y0 33
+#define CLK_MM1_DISP_SPLITTER0 34
+#define CLK_MM1_DISP_SPLITTER1 35
+#define CLK_MM1_DISP_SPLITTER2 36
+#define CLK_MM1_DISP_SPLITTER3 37
+#define CLK_MM1_DISP_VDCM0 38
+#define CLK_MM1_DISP_WDMA1 39
+#define CLK_MM1_DISP_WDMA2 40
+#define CLK_MM1_DISP_WDMA3 41
+#define CLK_MM1_DISP_WDMA4 42
+#define CLK_MM1_MDP_RDMA1 43
+#define CLK_MM1_SMI_LARB0 44
+#define CLK_MM1_MOD1 45
+#define CLK_MM1_MOD2 46
+#define CLK_MM1_MOD3 47
+#define CLK_MM1_MOD4 48
+#define CLK_MM1_MOD5 49
+#define CLK_MM1_MOD6 50
+#define CLK_MM1_CG0 51
+#define CLK_MM1_CG1 52
+#define CLK_MM1_CG2 53
+#define CLK_MM1_CG3 54
+#define CLK_MM1_CG4 55
+#define CLK_MM1_CG5 56
+#define CLK_MM1_CG6 57
+#define CLK_MM1_CG7 58
+#define CLK_MM1_F26M 59
+
+/* OVLSYS_CONFIG */
+#define CLK_OVLSYS_CONFIG 0
+#define CLK_OVL_FAKE_ENG0 1
+#define CLK_OVL_FAKE_ENG1 2
+#define CLK_OVL_MUTEX0 3
+#define CLK_OVL_EXDMA0 4
+#define CLK_OVL_EXDMA1 5
+#define CLK_OVL_EXDMA2 6
+#define CLK_OVL_EXDMA3 7
+#define CLK_OVL_EXDMA4 8
+#define CLK_OVL_EXDMA5 9
+#define CLK_OVL_EXDMA6 10
+#define CLK_OVL_EXDMA7 11
+#define CLK_OVL_EXDMA8 12
+#define CLK_OVL_EXDMA9 13
+#define CLK_OVL_BLENDER0 14
+#define CLK_OVL_BLENDER1 15
+#define CLK_OVL_BLENDER2 16
+#define CLK_OVL_BLENDER3 17
+#define CLK_OVL_BLENDER4 18
+#define CLK_OVL_BLENDER5 19
+#define CLK_OVL_BLENDER6 20
+#define CLK_OVL_BLENDER7 21
+#define CLK_OVL_BLENDER8 22
+#define CLK_OVL_BLENDER9 23
+#define CLK_OVL_OUTPROC0 24
+#define CLK_OVL_OUTPROC1 25
+#define CLK_OVL_OUTPROC2 26
+#define CLK_OVL_OUTPROC3 27
+#define CLK_OVL_OUTPROC4 28
+#define CLK_OVL_OUTPROC5 29
+#define CLK_OVL_MDP_RSZ0 30
+#define CLK_OVL_MDP_RSZ1 31
+#define CLK_OVL_DISP_WDMA0 32
+#define CLK_OVL_DISP_WDMA1 33
+#define CLK_OVL_UFBC_WDMA0 34
+#define CLK_OVL_MDP_RDMA0 35
+#define CLK_OVL_MDP_RDMA1 36
+#define CLK_OVL_BWM0 37
+#define CLK_OVL_DLI0 38
+#define CLK_OVL_DLI1 39
+#define CLK_OVL_DLI2 40
+#define CLK_OVL_DLI3 41
+#define CLK_OVL_DLI4 42
+#define CLK_OVL_DLI5 43
+#define CLK_OVL_DLI6 44
+#define CLK_OVL_DLI7 45
+#define CLK_OVL_DLI8 46
+#define CLK_OVL_DLO0 47
+#define CLK_OVL_DLO1 48
+#define CLK_OVL_DLO2 49
+#define CLK_OVL_DLO3 50
+#define CLK_OVL_DLO4 51
+#define CLK_OVL_DLO5 52
+#define CLK_OVL_DLO6 53
+#define CLK_OVL_DLO7 54
+#define CLK_OVL_DLO8 55
+#define CLK_OVL_DLO9 56
+#define CLK_OVL_DLO10 57
+#define CLK_OVL_DLO11 58
+#define CLK_OVL_DLO12 59
+#define CLK_OVLSYS_RELAY0 60
+#define CLK_OVL_INLINEROT0 61
+#define CLK_OVL_SMI 62
+#define CLK_OVL_SMI_SMI 63
+
+
+/* OVLSYS1_CONFIG */
+#define CLK_OVL1_OVLSYS_CONFIG 0
+#define CLK_OVL1_OVL_FAKE_ENG0 1
+#define CLK_OVL1_OVL_FAKE_ENG1 2
+#define CLK_OVL1_OVL_MUTEX0 3
+#define CLK_OVL1_OVL_EXDMA0 4
+#define CLK_OVL1_OVL_EXDMA1 5
+#define CLK_OVL1_OVL_EXDMA2 6
+#define CLK_OVL1_OVL_EXDMA3 7
+#define CLK_OVL1_OVL_EXDMA4 8
+#define CLK_OVL1_OVL_EXDMA5 9
+#define CLK_OVL1_OVL_EXDMA6 10
+#define CLK_OVL1_OVL_EXDMA7 11
+#define CLK_OVL1_OVL_EXDMA8 12
+#define CLK_OVL1_OVL_EXDMA9 13
+#define CLK_OVL1_OVL_BLENDER0 14
+#define CLK_OVL1_OVL_BLENDER1 15
+#define CLK_OVL1_OVL_BLENDER2 16
+#define CLK_OVL1_OVL_BLENDER3 17
+#define CLK_OVL1_OVL_BLENDER4 18
+#define CLK_OVL1_OVL_BLENDER5 19
+#define CLK_OVL1_OVL_BLENDER6 20
+#define CLK_OVL1_OVL_BLENDER7 21
+#define CLK_OVL1_OVL_BLENDER8 22
+#define CLK_OVL1_OVL_BLENDER9 23
+#define CLK_OVL1_OVL_OUTPROC0 24
+#define CLK_OVL1_OVL_OUTPROC1 25
+#define CLK_OVL1_OVL_OUTPROC2 26
+#define CLK_OVL1_OVL_OUTPROC3 27
+#define CLK_OVL1_OVL_OUTPROC4 28
+#define CLK_OVL1_OVL_OUTPROC5 29
+#define CLK_OVL1_OVL_MDP_RSZ0 30
+#define CLK_OVL1_OVL_MDP_RSZ1 31
+#define CLK_OVL1_OVL_DISP_WDMA0 32
+#define CLK_OVL1_OVL_DISP_WDMA1 33
+#define CLK_OVL1_OVL_UFBC_WDMA0 34
+#define CLK_OVL1_OVL_MDP_RDMA0 35
+#define CLK_OVL1_OVL_MDP_RDMA1 36
+#define CLK_OVL1_OVL_BWM0 37
+#define CLK_OVL1_DLI0 38
+#define CLK_OVL1_DLI1 39
+#define CLK_OVL1_DLI2 40
+#define CLK_OVL1_DLI3 41
+#define CLK_OVL1_DLI4 42
+#define CLK_OVL1_DLI5 43
+#define CLK_OVL1_DLI6 44
+#define CLK_OVL1_DLI7 45
+#define CLK_OVL1_DLI8 46
+#define CLK_OVL1_DLO0 47
+#define CLK_OVL1_DLO1 48
+#define CLK_OVL1_DLO2 49
+#define CLK_OVL1_DLO3 50
+#define CLK_OVL1_DLO4 51
+#define CLK_OVL1_DLO5 52
+#define CLK_OVL1_DLO6 53
+#define CLK_OVL1_DLO7 54
+#define CLK_OVL1_DLO8 55
+#define CLK_OVL1_DLO9 56
+#define CLK_OVL1_DLO10 57
+#define CLK_OVL1_DLO11 58
+#define CLK_OVL1_DLO12 59
+#define CLK_OVL1_OVLSYS_RELAY0 60
+#define CLK_OVL1_OVL_INLINEROT0 61
+#define CLK_OVL1_SMI 62
+
+
+/* VDEC_SOC_GCON_BASE */
+#define CLK_VDE1_LARB1_CKEN 0
+#define CLK_VDE1_LAT_CKEN 1
+#define CLK_VDE1_LAT_ACTIVE 2
+#define CLK_VDE1_LAT_CKEN_ENG 3
+#define CLK_VDE1_VDEC_CKEN 4
+#define CLK_VDE1_VDEC_ACTIVE 5
+#define CLK_VDE1_VDEC_CKEN_ENG 6
+#define CLK_VDE1_VDEC_SOC_APTV_EN 7
+#define CLK_VDE1_VDEC_SOC_APTV_TOP_EN 8
+#define CLK_VDE1_VDEC_SOC_IPS_EN 9
+
+/* VDEC_GCON_BASE */
+#define CLK_VDE2_LARB1_CKEN 0
+#define CLK_VDE2_LAT_CKEN 1
+#define CLK_VDE2_LAT_ACTIVE 2
+#define CLK_VDE2_LAT_CKEN_ENG 3
+#define CLK_VDE2_VDEC_CKEN 4
+#define CLK_VDE2_VDEC_ACTIVE 5
+#define CLK_VDE2_VDEC_CKEN_ENG 6
+
+/* VENC_GCON */
+#define CLK_VEN1_CKE0_LARB 0
+#define CLK_VEN1_CKE1_VENC 1
+#define CLK_VEN1_CKE2_JPGENC 2
+#define CLK_VEN1_CKE3_JPGDEC 3
+#define CLK_VEN1_CKE4_JPGDEC_C1 4
+#define CLK_VEN1_CKE5_GALS 5
+#define CLK_VEN1_CKE29_VENC_ADAB_CTRL 6
+#define CLK_VEN1_CKE29_VENC_XPC_CTRL 7
+#define CLK_VEN1_CKE6_GALS_SRAM 8
+#define CLK_VEN1_RES_FLAT 9
+
+/* VENC_GCON_CORE1 */
+#define CLK_VEN2_CKE0_LARB 0
+#define CLK_VEN2_CKE1_VENC 1
+#define CLK_VEN2_CKE2_JPGENC 2
+#define CLK_VEN2_CKE3_JPGDEC 3
+#define CLK_VEN2_CKE5_GALS 4
+#define CLK_VEN2_CKE29_VENC_XPC_CTRL 5
+#define CLK_VEN2_CKE6_GALS_SRAM 6
+#define CLK_VEN2_RES_FLAT 7
+
+/* VENC_GCON_CORE2 */
+#define CLK_VEN_C2_CKE0_LARB 0
+#define CLK_VEN_C2_CKE1_VENC 1
+#define CLK_VEN_C2_CKE5_GALS 2
+#define CLK_VEN_C2_CKE29_VENC_XPC_CTRL 3
+#define CLK_VEN_C2_CKE6_GALS_SRAM 4
+#define CLK_VEN_C2_RES_FLAT 5
+
+/* MDPSYS_CONFIG */
+#define CLK_MDP_MDP_MUTEX0 0
+#define CLK_MDP_SMI0 1
+#define CLK_MDP_SMI0_SMI 2
+#define CLK_MDP_APB_BUS 3
+#define CLK_MDP_MDP_RDMA0 4
+#define CLK_MDP_MDP_RDMA1 5
+#define CLK_MDP_MDP_RDMA2 6
+#define CLK_MDP_MDP_BIRSZ0 7
+#define CLK_MDP_MDP_HDR0 8
+#define CLK_MDP_MDP_AAL0 9
+#define CLK_MDP_MDP_RSZ0 10
+#define CLK_MDP_MDP_RSZ2 11
+#define CLK_MDP_MDP_TDSHP0 12
+#define CLK_MDP_MDP_COLOR0 13
+#define CLK_MDP_MDP_WROT0 14
+#define CLK_MDP_MDP_WROT1 15
+#define CLK_MDP_MDP_WROT2 16
+#define CLK_MDP_MDP_FAKE_ENG0 17
+#define CLK_MDP_APB_DB 18
+#define CLK_MDP_MDP_DLI_ASYNC0 19
+#define CLK_MDP_MDP_DLI_ASYNC1 20
+#define CLK_MDP_MDP_DLO_ASYNC0 21
+#define CLK_MDP_MDP_DLO_ASYNC1 22
+#define CLK_MDP_MDP_DLI_ASYNC2 23
+#define CLK_MDP_MDP_DLO_ASYNC2 24
+#define CLK_MDP_MDP_DLO_ASYNC3 25
+#define CLK_MDP_IMG_DL_ASYNC0 26
+#define CLK_MDP_MDP_RROT0 27
+#define CLK_MDP_MDP_MERGE0 28
+#define CLK_MDP_MDP_C3D0 29
+#define CLK_MDP_MDP_FG0 30
+#define CLK_MDP_MDP_CLA2 31
+#define CLK_MDP_MDP_DLO_ASYNC4 32
+#define CLK_MDP_VPP_RSZ0 33
+#define CLK_MDP_VPP_RSZ1 34
+#define CLK_MDP_MDP_DLO_ASYNC5 35
+#define CLK_MDP_IMG0 36
+#define CLK_MDP_F26M 37
+#define CLK_MDP_IMG_DL_RELAY0 38
+#define CLK_MDP_IMG_DL_RELAY1 39
+
+/* MDPSYS1_CONFIG */
+#define CLK_MDP1_MDP_MUTEX0 0
+#define CLK_MDP1_SMI0 1
+#define CLK_MDP1_SMI0_SMI 2
+#define CLK_MDP1_APB_BUS 3
+#define CLK_MDP1_MDP_RDMA0 4
+#define CLK_MDP1_MDP_RDMA1 5
+#define CLK_MDP1_MDP_RDMA2 6
+#define CLK_MDP1_MDP_BIRSZ0 7
+#define CLK_MDP1_MDP_HDR0 8
+#define CLK_MDP1_MDP_AAL0 9
+#define CLK_MDP1_MDP_RSZ0 10
+#define CLK_MDP1_MDP_RSZ2 11
+#define CLK_MDP1_MDP_TDSHP0 12
+#define CLK_MDP1_MDP_COLOR0 13
+#define CLK_MDP1_MDP_WROT0 14
+#define CLK_MDP1_MDP_WROT1 15
+#define CLK_MDP1_MDP_WROT2 16
+#define CLK_MDP1_MDP_FAKE_ENG0 17
+#define CLK_MDP1_APB_DB 18
+#define CLK_MDP1_MDP_DLI_ASYNC0 19
+#define CLK_MDP1_MDP_DLI_ASYNC1 20
+#define CLK_MDP1_MDP_DLO_ASYNC0 21
+#define CLK_MDP1_MDP_DLO_ASYNC1 22
+#define CLK_MDP1_MDP_DLI_ASYNC2 23
+#define CLK_MDP1_MDP_DLO_ASYNC2 24
+#define CLK_MDP1_MDP_DLO_ASYNC3 25
+#define CLK_MDP1_IMG_DL_ASYNC0 26
+#define CLK_MDP1_MDP_RROT0 27
+#define CLK_MDP1_MDP_MERGE0 28
+#define CLK_MDP1_MDP_C3D0 29
+#define CLK_MDP1_MDP_FG0 30
+#define CLK_MDP1_MDP_CLA2 31
+#define CLK_MDP1_MDP_DLO_ASYNC4 32
+#define CLK_MDP1_VPP_RSZ0 33
+#define CLK_MDP1_VPP_RSZ1 34
+#define CLK_MDP1_MDP_DLO_ASYNC5 35
+#define CLK_MDP1_IMG0 36
+#define CLK_MDP1_F26M 37
+#define CLK_MDP1_IMG_DL_RELAY0 38
+#define CLK_MDP1_IMG_DL_RELAY1 39
+
+/* DISP_VDISP_AO_CONFIG */
+#define CLK_MM_V_DISP_VDISP_AO_CONFIG 0
+#define CLK_MM_V_DISP_DPC 1
+#define CLK_MM_V_SMI_SUB_SOMM0 2
+
+/* MFGPLL_PLL_CTRL */
+#define CLK_MFG_AO_MFGPLL 0
+
+/* MFGPLL_SC0_PLL_CTRL */
+#define CLK_MFGSC0_AO_MFGPLL_SC0 0
+
+/* MFGPLL_SC1_PLL_CTRL */
+#define CLK_MFGSC1_AO_MFGPLL_SC1 0
+
+/* CCIPLL_PLL_CTRL */
+#define CLK_CCIPLL 0
+
+/* ARMPLL_LL_PLL_CTRL */
+#define CLK_CPLL_ARMPLL_LL 0
+
+/* ARMPLL_BL_PLL_CTRL */
+#define CLK_CPBL_ARMPLL_BL 0
+
+/* ARMPLL_B_PLL_CTRL */
+#define CLK_CPB_ARMPLL_B 0
+
+/* PTPPLL_PLL_CTRL */
+#define CLK_PTPPLL 0
+
+#endif /* _DT_BINDINGS_CLK_MT8196_H */
diff --git a/include/dt-bindings/clock/mediatek,mtmips-sysc.h b/include/dt-bindings/clock/mediatek,mtmips-sysc.h
new file mode 100644
index 000000000000..a03335b0e077
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mtmips-sysc.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Author: Sergio Paracuellos <sergio.paracuellos@gmail.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MTMIPS_H
+#define _DT_BINDINGS_CLK_MTMIPS_H
+
+/* Ralink RT-2880 clocks */
+
+#define RT2880_CLK_XTAL 0
+#define RT2880_CLK_CPU 1
+#define RT2880_CLK_BUS 2
+#define RT2880_CLK_TIMER 3
+#define RT2880_CLK_WATCHDOG 4
+#define RT2880_CLK_UART 5
+#define RT2880_CLK_I2C 6
+#define RT2880_CLK_UARTLITE 7
+#define RT2880_CLK_ETHERNET 8
+#define RT2880_CLK_WMAC 9
+
+/* Ralink RT-305X clocks */
+
+#define RT305X_CLK_XTAL 0
+#define RT305X_CLK_CPU 1
+#define RT305X_CLK_BUS 2
+#define RT305X_CLK_TIMER 3
+#define RT305X_CLK_WATCHDOG 4
+#define RT305X_CLK_UART 5
+#define RT305X_CLK_I2C 6
+#define RT305X_CLK_I2S 7
+#define RT305X_CLK_SPI1 8
+#define RT305X_CLK_SPI2 9
+#define RT305X_CLK_UARTLITE 10
+#define RT305X_CLK_ETHERNET 11
+#define RT305X_CLK_WMAC 12
+
+/* Ralink RT-3352 clocks */
+
+#define RT3352_CLK_XTAL 0
+#define RT3352_CLK_CPU 1
+#define RT3352_CLK_PERIPH 2
+#define RT3352_CLK_BUS 3
+#define RT3352_CLK_TIMER 4
+#define RT3352_CLK_WATCHDOG 5
+#define RT3352_CLK_UART 6
+#define RT3352_CLK_I2C 7
+#define RT3352_CLK_I2S 8
+#define RT3352_CLK_SPI1 9
+#define RT3352_CLK_SPI2 10
+#define RT3352_CLK_UARTLITE 11
+#define RT3352_CLK_ETHERNET 12
+#define RT3352_CLK_WMAC 13
+
+/* Ralink RT-3883 clocks */
+
+#define RT3883_CLK_XTAL 0
+#define RT3883_CLK_CPU 1
+#define RT3883_CLK_BUS 2
+#define RT3883_CLK_PERIPH 3
+#define RT3883_CLK_TIMER 4
+#define RT3883_CLK_WATCHDOG 5
+#define RT3883_CLK_UART 6
+#define RT3883_CLK_I2C 7
+#define RT3883_CLK_I2S 8
+#define RT3883_CLK_SPI1 9
+#define RT3883_CLK_SPI2 10
+#define RT3883_CLK_UARTLITE 11
+#define RT3883_CLK_ETHERNET 12
+#define RT3883_CLK_WMAC 13
+
+/* Ralink RT-5350 clocks */
+
+#define RT5350_CLK_XTAL 0
+#define RT5350_CLK_CPU 1
+#define RT5350_CLK_BUS 2
+#define RT5350_CLK_PERIPH 3
+#define RT5350_CLK_TIMER 4
+#define RT5350_CLK_WATCHDOG 5
+#define RT5350_CLK_UART 6
+#define RT5350_CLK_I2C 7
+#define RT5350_CLK_I2S 8
+#define RT5350_CLK_SPI1 9
+#define RT5350_CLK_SPI2 10
+#define RT5350_CLK_UARTLITE 11
+#define RT5350_CLK_ETHERNET 12
+#define RT5350_CLK_WMAC 13
+
+/* Ralink MT-7620 clocks */
+
+#define MT7620_CLK_XTAL 0
+#define MT7620_CLK_PLL 1
+#define MT7620_CLK_CPU 2
+#define MT7620_CLK_PERIPH 3
+#define MT7620_CLK_BUS 4
+#define MT7620_CLK_BBPPLL 5
+#define MT7620_CLK_SDHC 6
+#define MT7620_CLK_TIMER 7
+#define MT7620_CLK_WATCHDOG 8
+#define MT7620_CLK_UART 9
+#define MT7620_CLK_I2C 10
+#define MT7620_CLK_I2S 11
+#define MT7620_CLK_SPI1 12
+#define MT7620_CLK_SPI2 13
+#define MT7620_CLK_UARTLITE 14
+#define MT7620_CLK_MMC 15
+#define MT7620_CLK_WMAC 16
+
+/* Ralink MT-76X8 clocks */
+
+#define MT76X8_CLK_XTAL 0
+#define MT76X8_CLK_CPU 1
+#define MT76X8_CLK_BBPPLL 2
+#define MT76X8_CLK_PCMI2S 3
+#define MT76X8_CLK_PERIPH 4
+#define MT76X8_CLK_BUS 5
+#define MT76X8_CLK_SDHC 6
+#define MT76X8_CLK_TIMER 7
+#define MT76X8_CLK_WATCHDOG 8
+#define MT76X8_CLK_I2C 9
+#define MT76X8_CLK_I2S 10
+#define MT76X8_CLK_SPI1 11
+#define MT76X8_CLK_SPI2 12
+#define MT76X8_CLK_UART0 13
+#define MT76X8_CLK_UART1 14
+#define MT76X8_CLK_UART2 15
+#define MT76X8_CLK_MMC 16
+#define MT76X8_CLK_WMAC 17
+
+#endif /* _DT_BINDINGS_CLK_MTMIPS_H */
diff --git a/include/dt-bindings/clock/mobileye,eyeq5-clk.h b/include/dt-bindings/clock/mobileye,eyeq5-clk.h
index 26d8930335e4..f353c2988035 100644
--- a/include/dt-bindings/clock/mobileye,eyeq5-clk.h
+++ b/include/dt-bindings/clock/mobileye,eyeq5-clk.h
@@ -6,17 +6,60 @@
#ifndef _DT_BINDINGS_CLOCK_MOBILEYE_EYEQ5_CLK_H
#define _DT_BINDINGS_CLOCK_MOBILEYE_EYEQ5_CLK_H
-#define EQ5C_PLL_CPU 0
-#define EQ5C_PLL_VMP 1
-#define EQ5C_PLL_PMA 2
-#define EQ5C_PLL_VDI 3
-#define EQ5C_PLL_DDR0 4
-#define EQ5C_PLL_PCI 5
-#define EQ5C_PLL_PER 6
-#define EQ5C_PLL_PMAC 7
-#define EQ5C_PLL_MPC 8
-#define EQ5C_PLL_DDR1 9
-
-#define EQ5C_DIV_OSPI 10
+#define EQ5C_PLL_CPU 0
+#define EQ5C_PLL_VMP 1
+#define EQ5C_PLL_PMA 2
+#define EQ5C_PLL_VDI 3
+#define EQ5C_PLL_DDR0 4
+#define EQ5C_PLL_PCI 5
+#define EQ5C_PLL_PER 6
+#define EQ5C_PLL_PMAC 7
+#define EQ5C_PLL_MPC 8
+#define EQ5C_PLL_DDR1 9
+
+#define EQ5C_DIV_OSPI 10
+
+/* EQ5C_PLL_CPU children */
+#define EQ5C_CPU_CORE0 11
+#define EQ5C_CPU_CORE1 12
+#define EQ5C_CPU_CORE2 13
+#define EQ5C_CPU_CORE3 14
+
+/* EQ5C_PLL_PER children */
+#define EQ5C_PER_OCC 15
+#define EQ5C_PER_UART 16
+#define EQ5C_PER_SPI 17
+#define EQ5C_PER_I2C 18
+#define EQ5C_PER_GPIO 19
+#define EQ5C_PER_EMMC 20
+#define EQ5C_PER_OCC_PCI 21
+
+#define EQ6LC_PLL_DDR 0
+#define EQ6LC_PLL_CPU 1
+#define EQ6LC_PLL_PER 2
+#define EQ6LC_PLL_VDI 3
+
+#define EQ6HC_CENTRAL_PLL_CPU 0
+#define EQ6HC_CENTRAL_CPU_OCC 1
+
+#define EQ6HC_WEST_PLL_PER 0
+#define EQ6HC_WEST_PER_OCC 1
+#define EQ6HC_WEST_PER_UART 2
+
+#define EQ6HC_SOUTH_PLL_VDI 0
+#define EQ6HC_SOUTH_PLL_PCIE 1
+#define EQ6HC_SOUTH_PLL_PER 2
+#define EQ6HC_SOUTH_PLL_ISP 3
+
+#define EQ6HC_SOUTH_DIV_EMMC 4
+#define EQ6HC_SOUTH_DIV_OSPI_REF 5
+#define EQ6HC_SOUTH_DIV_OSPI_SYS 6
+#define EQ6HC_SOUTH_DIV_TSU 7
+
+#define EQ6HC_ACC_PLL_XNN 0
+#define EQ6HC_ACC_PLL_VMP 1
+#define EQ6HC_ACC_PLL_PMA 2
+#define EQ6HC_ACC_PLL_MPC 3
+#define EQ6HC_ACC_PLL_NOC 4
#endif
diff --git a/include/dt-bindings/clock/mt7622-clk.h b/include/dt-bindings/clock/mt7622-clk.h
index c12e7eab0788..a173eb132892 100644
--- a/include/dt-bindings/clock/mt7622-clk.h
+++ b/include/dt-bindings/clock/mt7622-clk.h
@@ -228,7 +228,7 @@
#define CLK_AUDIO_MEM_ASRC4 44
#define CLK_AUDIO_MEM_ASRC5 45
#define CLK_AUDIO_AFE_CONN 46
-#define CLK_AUDIO_NR_CLK 47
+#define CLK_AUDIO_AFE_MRGIF 47
/* SSUSBSYS */
diff --git a/include/dt-bindings/clock/nvidia,tegra264.h b/include/dt-bindings/clock/nvidia,tegra264.h
new file mode 100644
index 000000000000..0fc2ad5e6cef
--- /dev/null
+++ b/include/dt-bindings/clock/nvidia,tegra264.h
@@ -0,0 +1,466 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2022-2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef DT_BINDINGS_CLOCK_NVIDIA_TEGRA264_H
+#define DT_BINDINGS_CLOCK_NVIDIA_TEGRA264_H
+
+#define TEGRA264_CLK_OSC 1
+#define TEGRA264_CLK_CLK_S 2
+#define TEGRA264_CLK_JTAG_REG 3
+#define TEGRA264_CLK_SPLL 4
+#define TEGRA264_CLK_SPLL_OUT0 5
+#define TEGRA264_CLK_SPLL_OUT1 6
+#define TEGRA264_CLK_SPLL_OUT2 7
+#define TEGRA264_CLK_SPLL_OUT3 8
+#define TEGRA264_CLK_SPLL_OUT4 9
+#define TEGRA264_CLK_SPLL_OUT5 10
+#define TEGRA264_CLK_SPLL_OUT6 11
+#define TEGRA264_CLK_SPLL_OUT7 12
+#define TEGRA264_CLK_AON_I2C 13
+#define TEGRA264_CLK_HOST1X 14
+#define TEGRA264_CLK_ISP 15
+#define TEGRA264_CLK_ISP1 16
+#define TEGRA264_CLK_ISP_ROOT 17
+#define TEGRA264_CLK_NAFLL_PVA0_CORE 18
+#define TEGRA264_CLK_NAFLL_PVA0_VPS 19
+#define TEGRA264_CLK_NVCSI 20
+#define TEGRA264_CLK_NVCSILP 21
+#define TEGRA264_CLK_PLLP_OUT0 22
+#define TEGRA264_CLK_PVA0_CPU_AXI 23
+#define TEGRA264_CLK_PVA0_VPS 24
+#define TEGRA264_CLK_PWM10 25
+#define TEGRA264_CLK_PWM2 26
+#define TEGRA264_CLK_PWM3 27
+#define TEGRA264_CLK_PWM4 28
+#define TEGRA264_CLK_PWM5 29
+#define TEGRA264_CLK_PWM9 30
+#define TEGRA264_CLK_QSPI0 31
+#define TEGRA264_CLK_QSPI0_2X_PM 32
+#define TEGRA264_CLK_RCE1_CPU 33
+#define TEGRA264_CLK_RCE1_NIC 34
+#define TEGRA264_CLK_RCE_CPU 35
+#define TEGRA264_CLK_RCE_NIC 36
+#define TEGRA264_CLK_SE 37
+#define TEGRA264_CLK_SEU1 38
+#define TEGRA264_CLK_SEU2 39
+#define TEGRA264_CLK_SEU3 40
+#define TEGRA264_CLK_SE_ROOT 41
+#define TEGRA264_CLK_SPI1 42
+#define TEGRA264_CLK_SPI2 43
+#define TEGRA264_CLK_SPI3 44
+#define TEGRA264_CLK_SPI4 45
+#define TEGRA264_CLK_SPI5 46
+#define TEGRA264_CLK_TOP_I2C 47
+#define TEGRA264_CLK_TSEC 48
+#define TEGRA264_CLK_TSEC_PKA 49
+#define TEGRA264_CLK_UART0 50
+#define TEGRA264_CLK_UART10 51
+#define TEGRA264_CLK_UART11 52
+#define TEGRA264_CLK_UART4 53
+#define TEGRA264_CLK_UART5 54
+#define TEGRA264_CLK_UART8 55
+#define TEGRA264_CLK_UART9 56
+#define TEGRA264_CLK_VI 57
+#define TEGRA264_CLK_VI1 58
+#define TEGRA264_CLK_VIC 59
+#define TEGRA264_CLK_VI_ROOT 60
+#define TEGRA264_CLK_DISPPLL 61
+#define TEGRA264_CLK_SPPLL0 62
+#define TEGRA264_CLK_SPPLL0_CLKOUT1A 63
+#define TEGRA264_CLK_SPPLL0_CLKOUT2A 64
+#define TEGRA264_CLK_SPPLL1 65
+#define TEGRA264_CLK_VPLL0 66
+#define TEGRA264_CLK_VPLL1 67
+#define TEGRA264_CLK_VPLL2 68
+#define TEGRA264_CLK_VPLL3 69
+#define TEGRA264_CLK_VPLL4 70
+#define TEGRA264_CLK_VPLL5 71
+#define TEGRA264_CLK_VPLL6 72
+#define TEGRA264_CLK_VPLL7 73
+#define TEGRA264_CLK_RG0_DIV 74
+#define TEGRA264_CLK_RG1_DIV 75
+#define TEGRA264_CLK_RG2_DIV 76
+#define TEGRA264_CLK_RG3_DIV 77
+#define TEGRA264_CLK_RG4_DIV 78
+#define TEGRA264_CLK_RG5_DIV 79
+#define TEGRA264_CLK_RG6_DIV 80
+#define TEGRA264_CLK_RG7_DIV 81
+#define TEGRA264_CLK_RG0 82
+#define TEGRA264_CLK_RG1 83
+#define TEGRA264_CLK_RG2 84
+#define TEGRA264_CLK_RG3 85
+#define TEGRA264_CLK_RG4 86
+#define TEGRA264_CLK_RG5 87
+#define TEGRA264_CLK_RG6 88
+#define TEGRA264_CLK_RG7 89
+#define TEGRA264_CLK_DISP 90
+#define TEGRA264_CLK_DSC 91
+#define TEGRA264_CLK_DSC_ROOT 92
+#define TEGRA264_CLK_HUB 93
+#define TEGRA264_CLK_VPLLX_SOR0_MUXED 94
+#define TEGRA264_CLK_VPLLX_SOR1_MUXED 95
+#define TEGRA264_CLK_VPLLX_SOR2_MUXED 96
+#define TEGRA264_CLK_VPLLX_SOR3_MUXED 97
+#define TEGRA264_CLK_LINKA_SYM 98
+#define TEGRA264_CLK_LINKB_SYM 99
+#define TEGRA264_CLK_LINKC_SYM 100
+#define TEGRA264_CLK_LINKD_SYM 101
+#define TEGRA264_CLK_PRE_SOR0 102
+#define TEGRA264_CLK_PRE_SOR1 103
+#define TEGRA264_CLK_PRE_SOR2 104
+#define TEGRA264_CLK_PRE_SOR3 105
+#define TEGRA264_CLK_SOR0_PLL_REF 106
+#define TEGRA264_CLK_SOR1_PLL_REF 107
+#define TEGRA264_CLK_SOR2_PLL_REF 108
+#define TEGRA264_CLK_SOR3_PLL_REF 109
+#define TEGRA264_CLK_SOR0_PAD 110
+#define TEGRA264_CLK_SOR1_PAD 111
+#define TEGRA264_CLK_SOR2_PAD 112
+#define TEGRA264_CLK_SOR3_PAD 113
+#define TEGRA264_CLK_SOR0_REF 114
+#define TEGRA264_CLK_SOR1_REF 115
+#define TEGRA264_CLK_SOR2_REF 116
+#define TEGRA264_CLK_SOR3_REF 117
+#define TEGRA264_CLK_SOR0_DIV 118
+#define TEGRA264_CLK_SOR1_DIV 119
+#define TEGRA264_CLK_SOR2_DIV 120
+#define TEGRA264_CLK_SOR3_DIV 121
+#define TEGRA264_CLK_SOR0 122
+#define TEGRA264_CLK_SOR1 123
+#define TEGRA264_CLK_SOR2 124
+#define TEGRA264_CLK_SOR3 125
+#define TEGRA264_CLK_SF0_SOR 126
+#define TEGRA264_CLK_SF1_SOR 127
+#define TEGRA264_CLK_SF2_SOR 128
+#define TEGRA264_CLK_SF3_SOR 129
+#define TEGRA264_CLK_SF4_SOR 130
+#define TEGRA264_CLK_SF5_SOR 131
+#define TEGRA264_CLK_SF6_SOR 132
+#define TEGRA264_CLK_SF7_SOR 133
+#define TEGRA264_CLK_SF0 134
+#define TEGRA264_CLK_SF1 135
+#define TEGRA264_CLK_SF2 136
+#define TEGRA264_CLK_SF3 137
+#define TEGRA264_CLK_SF4 138
+#define TEGRA264_CLK_SF5 139
+#define TEGRA264_CLK_SF6 140
+#define TEGRA264_CLK_SF7 141
+#define TEGRA264_CLK_MAUD 142
+#define TEGRA264_CLK_AZA_2XBIT 143
+#define TEGRA264_CLK_DCE_CPU 144
+#define TEGRA264_CLK_DCE_NIC 145
+#define TEGRA264_CLK_PLLC4 146
+#define TEGRA264_CLK_PLLC4_OUT0 147
+#define TEGRA264_CLK_PLLC4_OUT1 148
+#define TEGRA264_CLK_PLLC4_MUXED 149
+#define TEGRA264_CLK_SDMMC1 150
+#define TEGRA264_CLK_SDMMC_LEGACY_TM 151
+#define TEGRA264_CLK_PLLC0 152
+#define TEGRA264_CLK_NAFLL_BPMP 153
+#define TEGRA264_CLK_PLLP_OUT_PDIV 154
+#define TEGRA264_CLK_DISP_ROOT 155
+#define TEGRA264_CLK_ADSP 156
+#define TEGRA264_CLK_PLLA 157
+#define TEGRA264_CLK_PLLA1 158
+#define TEGRA264_CLK_PLLA1_OUT1 159
+#define TEGRA264_CLK_PLLAON 160
+#define TEGRA264_CLK_PLLAON_APE 161
+#define TEGRA264_CLK_PLLA_OUT0 162
+#define TEGRA264_CLK_AHUB 163
+#define TEGRA264_CLK_APE 164
+#define TEGRA264_CLK_I2S1_SCLK_IN 165
+#define TEGRA264_CLK_I2S2_SCLK_IN 166
+#define TEGRA264_CLK_I2S3_SCLK_IN 167
+#define TEGRA264_CLK_I2S4_SCLK_IN 168
+#define TEGRA264_CLK_I2S5_SCLK_IN 169
+#define TEGRA264_CLK_I2S6_SCLK_IN 170
+#define TEGRA264_CLK_I2S7_SCLK_IN 171
+#define TEGRA264_CLK_I2S8_SCLK_IN 172
+#define TEGRA264_CLK_I2S9_SCLK_IN 173
+#define TEGRA264_CLK_I2S1_AUDIO_SYNC 174
+#define TEGRA264_CLK_I2S2_AUDIO_SYNC 175
+#define TEGRA264_CLK_I2S3_AUDIO_SYNC 176
+#define TEGRA264_CLK_I2S4_AUDIO_SYNC 177
+#define TEGRA264_CLK_I2S5_AUDIO_SYNC 178
+#define TEGRA264_CLK_I2S6_AUDIO_SYNC 179
+#define TEGRA264_CLK_I2S7_AUDIO_SYNC 180
+#define TEGRA264_CLK_I2S8_AUDIO_SYNC 181
+#define TEGRA264_CLK_DMIC1_AUDIO_SYNC 182
+#define TEGRA264_CLK_DSPK1_AUDIO_SYNC 183
+#define TEGRA264_CLK_I2S1 184
+#define TEGRA264_CLK_I2S2 185
+#define TEGRA264_CLK_I2S3 186
+#define TEGRA264_CLK_I2S4 187
+#define TEGRA264_CLK_I2S5 188
+#define TEGRA264_CLK_I2S6 189
+#define TEGRA264_CLK_I2S7 190
+#define TEGRA264_CLK_I2S8 191
+#define TEGRA264_CLK_I2S9 192
+#define TEGRA264_CLK_DMIC1 193
+#define TEGRA264_CLK_DMIC5 194
+#define TEGRA264_CLK_DSPK1 195
+#define TEGRA264_CLK_AON_CPU 196
+#define TEGRA264_CLK_AON_NIC 197
+#define TEGRA264_CLK_BPMP 198
+#define TEGRA264_CLK_AXI_CBB 199
+#define TEGRA264_CLK_FUSE 200
+#define TEGRA264_CLK_TSENSE 201
+#define TEGRA264_CLK_CSITE 202
+#define TEGRA264_CLK_HCSITE 203
+#define TEGRA264_CLK_DBGAPB 204
+#define TEGRA264_CLK_LA 205
+#define TEGRA264_CLK_PLLREFGP 206
+#define TEGRA264_CLK_PLLE0 207
+#define TEGRA264_CLK_UPHY0_PLL0_XDIG 208
+#define TEGRA264_CLK_EQOS_APP 209
+#define TEGRA264_CLK_EQOS_MAC 210
+#define TEGRA264_CLK_EQOS_MACSEC 211
+#define TEGRA264_CLK_EQOS_TX_PCS 212
+#define TEGRA264_CLK_MGBES_PTP_REF 213
+#define TEGRA264_CLK_MGBE0_UPHY1_PLL_XDIG 214
+#define TEGRA264_CLK_MGBE0_TX_PCS 215
+#define TEGRA264_CLK_MGBE0_MAC 216
+#define TEGRA264_CLK_MGBE0_MACSEC 217
+#define TEGRA264_CLK_MGBE0_APP 218
+#define TEGRA264_CLK_MGBE1_UPHY1_PLL_XDIG 219
+#define TEGRA264_CLK_MGBE1_TX_PCS 220
+#define TEGRA264_CLK_MGBE1_MAC 221
+#define TEGRA264_CLK_MGBE1_MACSEC 222
+#define TEGRA264_CLK_MGBE1_APP 223
+#define TEGRA264_CLK_MGBE2_UPHY1_PLL_XDIG 224
+#define TEGRA264_CLK_MGBE2_TX_PCS 225
+#define TEGRA264_CLK_MGBE2_MAC 226
+#define TEGRA264_CLK_MGBE2_MACSEC 227
+#define TEGRA264_CLK_MGBE2_APP 228
+#define TEGRA264_CLK_MGBE3_UPHY1_PLL_XDIG 229
+#define TEGRA264_CLK_MGBE3_TX_PCS 230
+#define TEGRA264_CLK_MGBE3_MAC 231
+#define TEGRA264_CLK_MGBE3_MACSEC 232
+#define TEGRA264_CLK_MGBE3_APP 233
+#define TEGRA264_CLK_PLLREFUFS 234
+#define TEGRA264_CLK_PLLREFUFS_CLKOUT624 235
+#define TEGRA264_CLK_PLLREFUFS_REFCLKOUT 236
+#define TEGRA264_CLK_PLLREFUFS_UFSDEV_REFCLKOUT 237
+#define TEGRA264_CLK_UFSHC_CG_SYS 238
+#define TEGRA264_CLK_MPHY_L0_RX_LS_BIT_DIV 239
+#define TEGRA264_CLK_MPHY_L0_RX_LS_BIT 240
+#define TEGRA264_CLK_MPHY_L0_RX_LS_SYMB_DIV 241
+#define TEGRA264_CLK_MPHY_L0_RX_HS_SYMB_DIV 242
+#define TEGRA264_CLK_MPHY_L0_RX_SYMB 243
+#define TEGRA264_CLK_MPHY_L0_UPHY_TX_FIFO 244
+#define TEGRA264_CLK_MPHY_L0_TX_LS_3XBIT_DIV 245
+#define TEGRA264_CLK_MPHY_L0_TX_LS_SYMB_DIV 246
+#define TEGRA264_CLK_UPHY0_PLL4_XDIG 247
+#define TEGRA264_CLK_MPHY_L0_TX_HS_SYMB_DIV 248
+#define TEGRA264_CLK_MPHY_L0_TX_SYMB 249
+#define TEGRA264_CLK_MPHY_L0_TX_LS_3XBIT 250
+#define TEGRA264_CLK_MPHY_L0_RX_ANA 251
+#define TEGRA264_CLK_MPHY_L1_RX_ANA 252
+#define TEGRA264_CLK_MPHY_TX_1MHZ_REF 253
+#define TEGRA264_CLK_MPHY_CORE_PLL_FIXED 254
+#define TEGRA264_CLK_MPHY_IOBIST 255
+#define TEGRA264_CLK_UFSHC_CG_SYS_DIV 256
+#define TEGRA264_CLK_XUSB1_CORE 257
+#define TEGRA264_CLK_XUSB1_FALCON 258
+#define TEGRA264_CLK_XUSB1_FS 259
+#define TEGRA264_CLK_XUSB1_SS 260
+#define TEGRA264_CLK_UPHY0_USB_P0_RX_CORE 261
+#define TEGRA264_CLK_UPHY0_USB_P1_RX_CORE 262
+#define TEGRA264_CLK_UPHY0_USB_P2_RX_CORE 263
+#define TEGRA264_CLK_UPHY0_USB_P3_RX_CORE 264
+#define TEGRA264_CLK_XUSB1_CLK480M_NVWRAP_CORE 265
+#define TEGRA264_CLK_XUSB1_CORE_HOST 266
+#define TEGRA264_CLK_XUSB1_CORE_DEV 267
+#define TEGRA264_CLK_XUSB1_CORE_SUPERSPEED 268
+#define TEGRA264_CLK_XUSB1_FALCON_HOST 269
+#define TEGRA264_CLK_XUSB1_FALCON_SUPERSPEED 270
+#define TEGRA264_CLK_XUSB1_FS_HOST 271
+#define TEGRA264_CLK_XUSB1_FS_DEV 272
+#define TEGRA264_CLK_XUSB1_HS_HSICP 273
+#define TEGRA264_CLK_XUSB1_SS_DEV 274
+#define TEGRA264_CLK_XUSB1_SS_SUPERSPEED 275
+#define TEGRA264_CLK_AON_TOUCH 276
+#define TEGRA264_CLK_AUD_MCLK 277
+#define TEGRA264_CLK_EXTPERIPH1 278
+#define TEGRA264_CLK_EXTPERIPH2 279
+#define TEGRA264_CLK_EXTPERIPH3 280
+#define TEGRA264_CLK_EXTPERIPH4 281
+#define TEGRA264_CLK_JTAG_REG_UNGATED 282
+#define TEGRA264_CLK_IST_BUS 283
+#define TEGRA264_CLK_IST_BUS_RIST_MCC 284
+#define TEGRA264_CLK_MATHS_SEC_RIST 285
+#define TEGRA264_CLK_NAFLL_IST 286
+#define TEGRA264_CLK_RIST_ROOT 287
+#define TEGRA264_CLK_IST_CONTROLLER_RIST 288
+#define TEGRA264_CLK_MSS_ENCRYPT 289
+#define TEGRA264_CLK_EMC 290
+#define TEGRA264_CLK_SPPLL0_CLKOUT100 291
+#define TEGRA264_CLK_SPPLL0_CLKOUT270 292
+#define TEGRA264_CLK_SPPLL1_CLKOUT100 293
+#define TEGRA264_CLK_SPPLL1_CLKOUT270 294
+#define TEGRA264_CLK_DP_LINKA_REF 295
+#define TEGRA264_CLK_DP_LINKB_REF 296
+#define TEGRA264_CLK_DP_LINKC_REF 297
+#define TEGRA264_CLK_DP_LINKD_REF 298
+#define TEGRA264_CLK_PLLNVCSI 299
+#define TEGRA264_CLK_PLLBPMPCAM 300
+#define TEGRA264_CLK_UTMI_PLL1 301
+#define TEGRA264_CLK_UTMI_PLL1_CLKOUT48 302
+#define TEGRA264_CLK_UTMI_PLL1_CLKOUT60 303
+#define TEGRA264_CLK_UTMI_PLL1_CLKOUT480 304
+#define TEGRA264_CLK_NAFLL_ISP 305
+#define TEGRA264_CLK_NAFLL_RCE 306
+#define TEGRA264_CLK_NAFLL_RCE1 307
+#define TEGRA264_CLK_NAFLL_SE 308
+#define TEGRA264_CLK_NAFLL_VI 309
+#define TEGRA264_CLK_NAFLL_VIC 310
+#define TEGRA264_CLK_NAFLL_DCE 311
+#define TEGRA264_CLK_NAFLL_TSEC 312
+#define TEGRA264_CLK_NAFLL_CPAIR0 313
+#define TEGRA264_CLK_NAFLL_CPAIR1 314
+#define TEGRA264_CLK_NAFLL_CPAIR2 315
+#define TEGRA264_CLK_NAFLL_CPAIR3 316
+#define TEGRA264_CLK_NAFLL_CPAIR4 317
+#define TEGRA264_CLK_NAFLL_CPAIR5 318
+#define TEGRA264_CLK_NAFLL_CPAIR6 319
+#define TEGRA264_CLK_NAFLL_GPU_SYS 320
+#define TEGRA264_CLK_NAFLL_GPU_NVD 321
+#define TEGRA264_CLK_NAFLL_GPU_UPROC 322
+#define TEGRA264_CLK_NAFLL_GPU_GPC0 323
+#define TEGRA264_CLK_NAFLL_GPU_GPC1 324
+#define TEGRA264_CLK_NAFLL_GPU_GPC2 325
+#define TEGRA264_CLK_SOR_LINKA_INPUT 326
+#define TEGRA264_CLK_SOR_LINKB_INPUT 327
+#define TEGRA264_CLK_SOR_LINKC_INPUT 328
+#define TEGRA264_CLK_SOR_LINKD_INPUT 329
+#define TEGRA264_CLK_SOR_LINKA_AFIFO 330
+#define TEGRA264_CLK_SOR_LINKB_AFIFO 331
+#define TEGRA264_CLK_SOR_LINKC_AFIFO 332
+#define TEGRA264_CLK_SOR_LINKD_AFIFO 333
+#define TEGRA264_CLK_I2S1_PAD_M 334
+#define TEGRA264_CLK_I2S2_PAD_M 335
+#define TEGRA264_CLK_I2S3_PAD_M 336
+#define TEGRA264_CLK_I2S4_PAD_M 337
+#define TEGRA264_CLK_I2S5_PAD_M 338
+#define TEGRA264_CLK_I2S6_PAD_M 339
+#define TEGRA264_CLK_I2S7_PAD_M 340
+#define TEGRA264_CLK_I2S8_PAD_M 341
+#define TEGRA264_CLK_I2S9_PAD_M 342
+#define TEGRA264_CLK_BPMP_NIC 343
+#define TEGRA264_CLK_CLK1M 344
+#define TEGRA264_CLK_RDET 345
+#define TEGRA264_CLK_ADC_SOC_REF 346
+#define TEGRA264_CLK_UPHY0_PLL0_TXREF 347
+#define TEGRA264_CLK_EQOS_TX 348
+#define TEGRA264_CLK_EQOS_TX_M 349
+#define TEGRA264_CLK_EQOS_RX_PCS_IN 350
+#define TEGRA264_CLK_EQOS_RX_PCS_M 351
+#define TEGRA264_CLK_EQOS_RX_IN 352
+#define TEGRA264_CLK_EQOS_RX 353
+#define TEGRA264_CLK_EQOS_RX_M 354
+#define TEGRA264_CLK_MGBE0_UPHY1_PLL_TXREF 355
+#define TEGRA264_CLK_MGBE0_TX 356
+#define TEGRA264_CLK_MGBE0_TX_M 357
+#define TEGRA264_CLK_MGBE0_RX_PCS_IN 358
+#define TEGRA264_CLK_MGBE0_RX_PCS_M 359
+#define TEGRA264_CLK_MGBE0_RX_IN 360
+#define TEGRA264_CLK_MGBE0_RX_M 361
+#define TEGRA264_CLK_MGBE1_UPHY1_PLL_TXREF 362
+#define TEGRA264_CLK_MGBE1_TX 363
+#define TEGRA264_CLK_MGBE1_TX_M 364
+#define TEGRA264_CLK_MGBE1_RX_PCS_IN 365
+#define TEGRA264_CLK_MGBE1_RX_PCS_M 366
+#define TEGRA264_CLK_MGBE1_RX_IN 367
+#define TEGRA264_CLK_MGBE1_RX_M 368
+#define TEGRA264_CLK_MGBE2_UPHY1_PLL_TXREF 369
+#define TEGRA264_CLK_MGBE2_TX 370
+#define TEGRA264_CLK_MGBE2_TX_M 371
+#define TEGRA264_CLK_MGBE2_RX_PCS_IN 372
+#define TEGRA264_CLK_MGBE2_RX_PCS_M 373
+#define TEGRA264_CLK_MGBE2_RX_IN 374
+#define TEGRA264_CLK_MGBE2_RX_M 375
+#define TEGRA264_CLK_MGBE3_UPHY1_PLL_TXREF 376
+#define TEGRA264_CLK_MGBE3_TX 377
+#define TEGRA264_CLK_MGBE3_TX_M 378
+#define TEGRA264_CLK_MGBE3_RX_PCS_IN 379
+#define TEGRA264_CLK_MGBE3_RX_PCS_M 380
+#define TEGRA264_CLK_MGBE3_RX_IN 381
+#define TEGRA264_CLK_MGBE3_RX_M 382
+#define TEGRA264_CLK_UPHY0_USB_P0_TX_CORE 383
+#define TEGRA264_CLK_UPHY0_USB_P1_TX_CORE 384
+#define TEGRA264_CLK_UPHY0_USB_P2_TX_CORE 385
+#define TEGRA264_CLK_UPHY0_USB_P3_TX_CORE 386
+#define TEGRA264_CLK_UPHY0_USB_P0_TX 387
+#define TEGRA264_CLK_UPHY0_USB_P1_TX 388
+#define TEGRA264_CLK_UPHY0_USB_P2_TX 389
+#define TEGRA264_CLK_UPHY0_USB_P3_TX 390
+#define TEGRA264_CLK_UPHY0_USB_P0_RX_IN 391
+#define TEGRA264_CLK_UPHY0_USB_P1_RX_IN 392
+#define TEGRA264_CLK_UPHY0_USB_P2_RX_IN 393
+#define TEGRA264_CLK_UPHY0_USB_P3_RX_IN 394
+#define TEGRA264_CLK_UPHY0_USB_P0_RX_M 395
+#define TEGRA264_CLK_UPHY0_USB_P1_RX_M 396
+#define TEGRA264_CLK_UPHY0_USB_P2_RX_M 397
+#define TEGRA264_CLK_UPHY0_USB_P3_RX_M 398
+#define TEGRA264_CLK_UPHY0_LANE0_TX_M 399
+#define TEGRA264_CLK_PCIE_C1_XCLK_NOBG_M 400
+#define TEGRA264_CLK_PCIE_C2_XCLK_NOBG_M 401
+#define TEGRA264_CLK_PCIE_C3_XCLK_NOBG_M 402
+#define TEGRA264_CLK_PCIE_C4_XCLK_NOBG_M 403
+#define TEGRA264_CLK_PCIE_C5_XCLK_NOBG_M 404
+#define TEGRA264_CLK_PCIE_C1_L0_RX_M 405
+#define TEGRA264_CLK_PCIE_C1_L1_RX_M 406
+#define TEGRA264_CLK_PCIE_C1_L2_RX_M 407
+#define TEGRA264_CLK_PCIE_C1_L3_RX_M 408
+#define TEGRA264_CLK_PCIE_C2_L0_RX_M 409
+#define TEGRA264_CLK_PCIE_C2_L1_RX_M 410
+#define TEGRA264_CLK_PCIE_C2_L2_RX_M 411
+#define TEGRA264_CLK_PCIE_C2_L3_RX_M 412
+#define TEGRA264_CLK_PCIE_C3_L0_RX_M 413
+#define TEGRA264_CLK_PCIE_C3_L1_RX_M 414
+#define TEGRA264_CLK_PCIE_C4_L0_RX_M 415
+#define TEGRA264_CLK_PCIE_C4_L1_RX_M 416
+#define TEGRA264_CLK_PCIE_C4_L2_RX_M 417
+#define TEGRA264_CLK_PCIE_C4_L3_RX_M 418
+#define TEGRA264_CLK_PCIE_C4_L4_RX_M 419
+#define TEGRA264_CLK_PCIE_C4_L5_RX_M 420
+#define TEGRA264_CLK_PCIE_C4_L6_RX_M 421
+#define TEGRA264_CLK_PCIE_C4_L7_RX_M 422
+#define TEGRA264_CLK_PCIE_C5_L0_RX_M 423
+#define TEGRA264_CLK_PCIE_C5_L1_RX_M 424
+#define TEGRA264_CLK_PCIE_C5_L2_RX_M 425
+#define TEGRA264_CLK_PCIE_C5_L3_RX_M 426
+#define TEGRA264_CLK_MPHY_L0_RX_PWM_BIT_M 427
+#define TEGRA264_CLK_MPHY_L1_RX_PWM_BIT_M 428
+#define TEGRA264_CLK_DBB_UPHY0 429
+#define TEGRA264_CLK_UPHY0_UXL_CORE 430
+#define TEGRA264_CLK_ISC_CPU_ROOT 431
+#define TEGRA264_CLK_ISC_NIC 432
+#define TEGRA264_CLK_CTC_TXCLK0_M 433
+#define TEGRA264_CLK_CTC_TXCLK1_M 434
+#define TEGRA264_CLK_CTC_RXCLK0_M 435
+#define TEGRA264_CLK_CTC_RXCLK1_M 436
+#define TEGRA264_CLK_PLLREFGP_OUT 437
+#define TEGRA264_CLK_PLLREFGP_OUT1 438
+#define TEGRA264_CLK_GPU_SYS 439
+#define TEGRA264_CLK_GPU_NVD 440
+#define TEGRA264_CLK_GPU_UPROC 441
+#define TEGRA264_CLK_GPU_GPC0 442
+#define TEGRA264_CLK_GPU_GPC1 443
+#define TEGRA264_CLK_GPU_GPC2 444
+#define TEGRA264_CLK_PLLX 445
+#define TEGRA264_CLK_APE_SOUNDWIRE_MSRC0 446
+#define TEGRA264_CLK_APE_SOUNDWIRE_DATA_EN_SHAPER 447
+#define TEGRA264_CLK_AO_SOUNDWIRE_MSRC0 448
+#define TEGRA264_CLK_AO_SOUNDWIRE_DATA_EN_SHAPER 449
+#define TEGRA264_CLK_MGBE0_TX_SER 459
+#define TEGRA264_CLK_MGBE1_TX_SER 460
+#define TEGRA264_CLK_MGBE2_TX_SER 461
+#define TEGRA264_CLK_MGBE3_TX_SER 462
+#define TEGRA264_CLK_MGBE0_RX_SER 463
+#define TEGRA264_CLK_MGBE1_RX_SER 464
+#define TEGRA264_CLK_MGBE2_RX_SER 465
+#define TEGRA264_CLK_MGBE3_RX_SER 466
+#define TEGRA264_CLK_DPAUX 467
+
+#endif /* DT_BINDINGS_CLOCK_NVIDIA_TEGRA264_H */
diff --git a/include/dt-bindings/clock/nxp,imx94-clock.h b/include/dt-bindings/clock/nxp,imx94-clock.h
new file mode 100644
index 000000000000..c4ba13352b99
--- /dev/null
+++ b/include/dt-bindings/clock/nxp,imx94-clock.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright 2025 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX94_H
+#define __DT_BINDINGS_CLOCK_IMX94_H
+
+#define IMX94_CLK_DISPMIX_CLK_SEL 0
+
+#define IMX94_CLK_DISPMIX_LVDS_CLK_GATE 0
+
+#endif /* __DT_BINDINGS_CLOCK_IMX94_H */
diff --git a/include/dt-bindings/clock/nxp,imx95-clock.h b/include/dt-bindings/clock/nxp,imx95-clock.h
index 782662c3e740..b7a713a9ac8c 100644
--- a/include/dt-bindings/clock/nxp,imx95-clock.h
+++ b/include/dt-bindings/clock/nxp,imx95-clock.h
@@ -25,4 +25,7 @@
#define IMX95_CLK_DISPMIX_ENG0_SEL 0
#define IMX95_CLK_DISPMIX_ENG1_SEL 1
+#define IMX95_CLK_NETCMIX_ENETC0_RMII 0
+#define IMX95_CLK_NETCMIX_ENETC1_RMII 1
+
#endif /* __DT_BINDINGS_CLOCK_IMX95_H */
diff --git a/include/dt-bindings/clock/px30-cru.h b/include/dt-bindings/clock/px30-cru.h
index 5b1416fcde6f..a2abf1995c34 100644
--- a/include/dt-bindings/clock/px30-cru.h
+++ b/include/dt-bindings/clock/px30-cru.h
@@ -175,8 +175,6 @@
#define PCLK_CIF 352
#define PCLK_OTP_PHY 353
-#define CLK_NR_CLKS (PCLK_OTP_PHY + 1)
-
/* pmu-clocks indices */
#define PLL_GPLL 1
@@ -195,8 +193,6 @@
#define PCLK_GPIO0_PMU 20
#define PCLK_UART0_PMU 21
-#define CLKPMU_NR_CLKS (PCLK_UART0_PMU + 1)
-
/* soft-reset indices */
#define SRST_CORE0_PO 0
#define SRST_CORE1_PO 1
diff --git a/include/dt-bindings/clock/qcom,apss-ipq.h b/include/dt-bindings/clock/qcom,apss-ipq.h
index 77b6e05492e2..0bb41e5efdef 100644
--- a/include/dt-bindings/clock/qcom,apss-ipq.h
+++ b/include/dt-bindings/clock/qcom,apss-ipq.h
@@ -8,5 +8,11 @@
#define APCS_ALIAS0_CLK_SRC 0
#define APCS_ALIAS0_CORE_CLK 1
+#define APSS_PLL_EARLY 2
+#define APSS_SILVER_CLK_SRC 3
+#define APSS_SILVER_CORE_CLK 4
+#define L3_PLL 5
+#define L3_CLK_SRC 6
+#define L3_CORE_CLK 7
#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sc7280.h b/include/dt-bindings/clock/qcom,dispcc-sc7280.h
index a4a692c20acf..9f113f346be8 100644
--- a/include/dt-bindings/clock/qcom,dispcc-sc7280.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sc7280.h
@@ -52,4 +52,8 @@
/* DISP_CC power domains */
#define DISP_CC_MDSS_CORE_GDSC 0
+/* DISPCC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_RSCC_BCR 1
+
#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm6350.h b/include/dt-bindings/clock/qcom,dispcc-sm6350.h
index cb54aae2723e..61426a80e620 100644
--- a/include/dt-bindings/clock/qcom,dispcc-sm6350.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sm6350.h
@@ -42,6 +42,10 @@
#define DISP_CC_SLEEP_CLK 31
#define DISP_CC_XO_CLK 32
+/* Resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_RSCC_BCR 1
+
/* GDSCs */
#define MDSS_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,dsi-phy-28nm.h b/include/dt-bindings/clock/qcom,dsi-phy-28nm.h
new file mode 100644
index 000000000000..ab94d58377a1
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dsi-phy-28nm.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DSI_PHY_28NM_H
+#define _DT_BINDINGS_CLK_QCOM_DSI_PHY_28NM_H
+
+#define DSI_BYTE_PLL_CLK 0
+#define DSI_PIXEL_PLL_CLK 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8917.h b/include/dt-bindings/clock/qcom,gcc-msm8917.h
index 4b421e7414b5..4e3897b3669d 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8917.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8917.h
@@ -170,6 +170,23 @@
#define VFE1_CLK_SRC 163
#define VSYNC_CLK_SRC 164
#define GPLL0_SLEEP_CLK_SRC 165
+/* Addtional MSM8937-specific clocks */
+#define MSM8937_BLSP1_QUP1_I2C_APPS_CLK_SRC 166
+#define MSM8937_BLSP1_QUP1_SPI_APPS_CLK_SRC 167
+#define MSM8937_BLSP2_QUP4_I2C_APPS_CLK_SRC 168
+#define MSM8937_BLSP2_QUP4_SPI_APPS_CLK_SRC 169
+#define MSM8937_BYTE1_CLK_SRC 170
+#define MSM8937_ESC1_CLK_SRC 171
+#define MSM8937_PCLK1_CLK_SRC 172
+#define MSM8937_GCC_BLSP1_QUP1_I2C_APPS_CLK 173
+#define MSM8937_GCC_BLSP1_QUP1_SPI_APPS_CLK 174
+#define MSM8937_GCC_BLSP2_QUP4_I2C_APPS_CLK 175
+#define MSM8937_GCC_BLSP2_QUP4_SPI_APPS_CLK 176
+#define MSM8937_GCC_MDSS_BYTE1_CLK 177
+#define MSM8937_GCC_MDSS_ESC1_CLK 178
+#define MSM8937_GCC_MDSS_PCLK1_CLK 179
+#define MSM8937_GCC_OXILI_AON_CLK 180
+#define MSM8937_GCC_OXILI_TIMER_CLK 181
/* GCC block resets */
#define GCC_CAMSS_MICRO_BCR 0
@@ -187,5 +204,7 @@
#define VENUS_GDSC 5
#define VFE0_GDSC 6
#define VFE1_GDSC 7
+/* Additional MSM8937-specific GDSCs */
+#define MSM8937_OXILI_CX_GDSC 8
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
index b5456a64d421..5b0dde080900 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -193,10 +193,15 @@
#define GCC_MMSS_GPLL0_DIV_CLK 184
#define GCC_GPU_GPLL0_DIV_CLK 185
#define GCC_GPU_GPLL0_CLK 186
+#define HLOS1_VOTE_LPASS_CORE_SMMU_CLK 187
+#define HLOS1_VOTE_LPASS_ADSP_SMMU_CLK 188
+#define GCC_MSS_Q6_BIMC_AXI_CLK 189
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
#define USB_30_GDSC 2
+#define LPASS_ADSP_GDSC 3
+#define LPASS_CORE_GDSC 4
#define GCC_BLSP1_QUP1_BCR 0
#define GCC_BLSP1_QUP2_BCR 1
diff --git a/include/dt-bindings/clock/qcom,gcc-sc8180x.h b/include/dt-bindings/clock/qcom,gcc-sc8180x.h
index 90c6e021a035..b9d8438a15ff 100644
--- a/include/dt-bindings/clock/qcom,gcc-sc8180x.h
+++ b/include/dt-bindings/clock/qcom,gcc-sc8180x.h
@@ -248,6 +248,17 @@
#define GCC_USB3_SEC_CLKREF_CLK 238
#define GCC_UFS_MEM_CLKREF_EN 239
#define GCC_UFS_CARD_CLKREF_EN 240
+#define GPLL9 241
+#define GCC_CAMERA_AHB_CLK 242
+#define GCC_CAMERA_XO_CLK 243
+#define GCC_CPUSS_DVM_BUS_CLK 244
+#define GCC_CPUSS_GNOC_CLK 245
+#define GCC_DISP_AHB_CLK 246
+#define GCC_DISP_XO_CLK 247
+#define GCC_GPU_CFG_AHB_CLK 248
+#define GCC_NPU_CFG_AHB_CLK 249
+#define GCC_VIDEO_AHB_CLK 250
+#define GCC_VIDEO_XO_CLK 251
#define GCC_EMAC_BCR 0
#define GCC_GPU_BCR 1
@@ -294,6 +305,10 @@
#define GCC_VIDEO_AXI0_CLK_BCR 42
#define GCC_VIDEO_AXI1_CLK_BCR 43
#define GCC_USB3_DP_PHY_SEC_BCR 44
+#define GCC_USB3_UNIPHY_MP0_BCR 45
+#define GCC_USB3_UNIPHY_MP1_BCR 46
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 47
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 48
/* GCC GDSCRs */
#define EMAC_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm660.h b/include/dt-bindings/clock/qcom,gcc-sdm660.h
index df8a6f3d367e..f19018b742f5 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm660.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm660.h
@@ -138,10 +138,16 @@
#define GCC_UFS_UNIPRO_CORE_HW_CTL_CLK 128
#define GCC_RX0_USB2_CLKREF_CLK 129
#define GCC_RX1_USB2_CLKREF_CLK 130
+#define GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK 131
+#define GCC_HLOS1_VOTE_TURING_ADSP_SMMU_CLK 132
+#define GCC_HLOS2_VOTE_TURING_ADSP_SMMU_CLK 133
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
#define USB_30_GDSC 2
+#define HLOS1_VOTE_TURING_ADSP_GDSC 3
+#define HLOS2_VOTE_TURING_ADSP_GDSC 4
+#define HLOS1_VOTE_LPASS_ADSP_GDSC 5
#define GCC_QUSB2PHY_PRIM_BCR 0
#define GCC_QUSB2PHY_SEC_BCR 1
@@ -153,5 +159,7 @@
#define GCC_USB_30_BCR 7
#define GCC_USB_PHY_CFG_AHB2PHY_BCR 8
#define GCC_MSS_RESTART 9
+#define GCC_SDCC1_BCR 10
+#define GCC_SDCC2_BCR 11
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm8450.h b/include/dt-bindings/clock/qcom,gcc-sm8450.h
index 9679410843a0..7320e63c3a2f 100644
--- a/include/dt-bindings/clock/qcom,gcc-sm8450.h
+++ b/include/dt-bindings/clock/qcom,gcc-sm8450.h
@@ -194,6 +194,9 @@
#define GCC_VIDEO_AXI0_CLK 182
#define GCC_VIDEO_AXI1_CLK 183
#define GCC_VIDEO_XO_CLK 184
+/* Additional SM8475-specific clocks */
+#define SM8475_GCC_GPLL2 185
+#define SM8475_GCC_GPLL3 186
/* GCC resets */
#define GCC_CAMERA_BCR 0
diff --git a/include/dt-bindings/clock/qcom,glymur-dispcc.h b/include/dt-bindings/clock/qcom,glymur-dispcc.h
new file mode 100644
index 000000000000..a845d76defe2
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,glymur-dispcc.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025, Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_GLYMUR_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_GLYMUR_H
+
+/* DISP_CC clocks */
+#define DISP_CC_ESYNC0_CLK 0
+#define DISP_CC_ESYNC0_CLK_SRC 1
+#define DISP_CC_ESYNC1_CLK 2
+#define DISP_CC_ESYNC1_CLK_SRC 3
+#define DISP_CC_MDSS_ACCU_SHIFT_CLK 4
+#define DISP_CC_MDSS_AHB1_CLK 5
+#define DISP_CC_MDSS_AHB_CLK 6
+#define DISP_CC_MDSS_AHB_CLK_SRC 7
+#define DISP_CC_MDSS_BYTE0_CLK 8
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 10
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 11
+#define DISP_CC_MDSS_BYTE1_CLK 12
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 13
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 14
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 15
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 16
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 17
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 18
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 19
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 20
+#define DISP_CC_MDSS_DPTX0_LINK_DPIN_CLK 21
+#define DISP_CC_MDSS_DPTX0_LINK_DPIN_DIV_CLK_SRC 22
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 23
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 24
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 25
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 26
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 27
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 28
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 29
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 30
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 31
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 32
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 33
+#define DISP_CC_MDSS_DPTX1_LINK_DPIN_CLK 34
+#define DISP_CC_MDSS_DPTX1_LINK_DPIN_DIV_CLK_SRC 35
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 36
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 37
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 38
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 39
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 40
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 41
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 42
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 43
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 44
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 45
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 46
+#define DISP_CC_MDSS_DPTX2_LINK_DPIN_CLK 47
+#define DISP_CC_MDSS_DPTX2_LINK_DPIN_DIV_CLK_SRC 48
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 49
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 50
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 51
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 52
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 53
+#define DISP_CC_MDSS_DPTX2_USB_ROUTER_LINK_INTF_CLK 54
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 55
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 56
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 57
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 58
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 59
+#define DISP_CC_MDSS_DPTX3_LINK_DPIN_CLK 60
+#define DISP_CC_MDSS_DPTX3_LINK_DPIN_DIV_CLK_SRC 61
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 62
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 63
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 64
+#define DISP_CC_MDSS_ESC0_CLK 65
+#define DISP_CC_MDSS_ESC0_CLK_SRC 66
+#define DISP_CC_MDSS_ESC1_CLK 67
+#define DISP_CC_MDSS_ESC1_CLK_SRC 68
+#define DISP_CC_MDSS_MDP1_CLK 69
+#define DISP_CC_MDSS_MDP_CLK 70
+#define DISP_CC_MDSS_MDP_CLK_SRC 71
+#define DISP_CC_MDSS_MDP_LUT1_CLK 72
+#define DISP_CC_MDSS_MDP_LUT_CLK 73
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 74
+#define DISP_CC_MDSS_PCLK0_CLK 75
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 76
+#define DISP_CC_MDSS_PCLK1_CLK 77
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 78
+#define DISP_CC_MDSS_PCLK2_CLK 79
+#define DISP_CC_MDSS_PCLK2_CLK_SRC 80
+#define DISP_CC_MDSS_RSCC_AHB_CLK 81
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 82
+#define DISP_CC_MDSS_VSYNC1_CLK 83
+#define DISP_CC_MDSS_VSYNC_CLK 84
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 85
+#define DISP_CC_OSC_CLK 86
+#define DISP_CC_OSC_CLK_SRC 87
+#define DISP_CC_PLL0 88
+#define DISP_CC_PLL1 89
+#define DISP_CC_SLEEP_CLK 90
+#define DISP_CC_SLEEP_CLK_SRC 91
+#define DISP_CC_XO_CLK 92
+#define DISP_CC_XO_CLK_SRC 93
+
+/* DISP_CC power domains */
+#define DISP_CC_MDSS_CORE_GDSC 0
+#define DISP_CC_MDSS_CORE_INT2_GDSC 1
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,glymur-gcc.h b/include/dt-bindings/clock/qcom,glymur-gcc.h
new file mode 100644
index 000000000000..10c12b8c51c3
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,glymur-gcc.h
@@ -0,0 +1,578 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_GLYMUR_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_GLYMUR_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL1 2
+#define GCC_GPLL14 3
+#define GCC_GPLL14_OUT_EVEN 4
+#define GCC_GPLL4 5
+#define GCC_GPLL5 6
+#define GCC_GPLL7 7
+#define GCC_GPLL8 8
+#define GCC_GPLL9 9
+#define GCC_AGGRE_NOC_PCIE_3A_WEST_SF_AXI_CLK 10
+#define GCC_AGGRE_NOC_PCIE_3B_WEST_SF_AXI_CLK 11
+#define GCC_AGGRE_NOC_PCIE_4_WEST_SF_AXI_CLK 12
+#define GCC_AGGRE_NOC_PCIE_5_EAST_SF_AXI_CLK 13
+#define GCC_AGGRE_NOC_PCIE_6_WEST_SF_AXI_CLK 14
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 15
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 16
+#define GCC_AGGRE_USB2_PRIM_AXI_CLK 17
+#define GCC_AGGRE_USB3_MP_AXI_CLK 18
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 19
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 20
+#define GCC_AGGRE_USB3_TERT_AXI_CLK 21
+#define GCC_AGGRE_USB4_0_AXI_CLK 22
+#define GCC_AGGRE_USB4_1_AXI_CLK 23
+#define GCC_AGGRE_USB4_2_AXI_CLK 24
+#define GCC_AV1E_AHB_CLK 25
+#define GCC_AV1E_AXI_CLK 26
+#define GCC_AV1E_XO_CLK 27
+#define GCC_BOOT_ROM_AHB_CLK 28
+#define GCC_CAMERA_AHB_CLK 29
+#define GCC_CAMERA_HF_AXI_CLK 30
+#define GCC_CAMERA_SF_AXI_CLK 31
+#define GCC_CAMERA_XO_CLK 32
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 33
+#define GCC_CFG_NOC_PCIE_ANOC_SOUTH_AHB_CLK 34
+#define GCC_CFG_NOC_USB2_PRIM_AXI_CLK 35
+#define GCC_CFG_NOC_USB3_MP_AXI_CLK 36
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 37
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 38
+#define GCC_CFG_NOC_USB3_TERT_AXI_CLK 39
+#define GCC_CFG_NOC_USB_ANOC_AHB_CLK 40
+#define GCC_CFG_NOC_USB_ANOC_SOUTH_AHB_CLK 41
+#define GCC_DISP_AHB_CLK 42
+#define GCC_DISP_HF_AXI_CLK 43
+#define GCC_EVA_AHB_CLK 44
+#define GCC_EVA_AXI0_CLK 45
+#define GCC_EVA_AXI0C_CLK 46
+#define GCC_EVA_XO_CLK 47
+#define GCC_GP1_CLK 48
+#define GCC_GP1_CLK_SRC 49
+#define GCC_GP2_CLK 50
+#define GCC_GP2_CLK_SRC 51
+#define GCC_GP3_CLK 52
+#define GCC_GP3_CLK_SRC 53
+#define GCC_GPU_CFG_AHB_CLK 54
+#define GCC_GPU_GEMNOC_GFX_CLK 55
+#define GCC_GPU_GPLL0_CLK_SRC 56
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 57
+#define GCC_PCIE_0_AUX_CLK 58
+#define GCC_PCIE_0_AUX_CLK_SRC 59
+#define GCC_PCIE_0_CFG_AHB_CLK 60
+#define GCC_PCIE_0_MSTR_AXI_CLK 61
+#define GCC_PCIE_0_PHY_RCHNG_CLK 62
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 63
+#define GCC_PCIE_0_PIPE_CLK 64
+#define GCC_PCIE_0_SLV_AXI_CLK 65
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 66
+#define GCC_PCIE_1_AUX_CLK 67
+#define GCC_PCIE_1_AUX_CLK_SRC 68
+#define GCC_PCIE_1_CFG_AHB_CLK 69
+#define GCC_PCIE_1_MSTR_AXI_CLK 70
+#define GCC_PCIE_1_PHY_RCHNG_CLK 71
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 72
+#define GCC_PCIE_1_PIPE_CLK 73
+#define GCC_PCIE_1_SLV_AXI_CLK 74
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 75
+#define GCC_PCIE_2_AUX_CLK 76
+#define GCC_PCIE_2_AUX_CLK_SRC 77
+#define GCC_PCIE_2_CFG_AHB_CLK 78
+#define GCC_PCIE_2_MSTR_AXI_CLK 79
+#define GCC_PCIE_2_PHY_RCHNG_CLK 80
+#define GCC_PCIE_2_PHY_RCHNG_CLK_SRC 81
+#define GCC_PCIE_2_PIPE_CLK 82
+#define GCC_PCIE_2_SLV_AXI_CLK 83
+#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 84
+#define GCC_PCIE_3A_AUX_CLK 85
+#define GCC_PCIE_3A_AUX_CLK_SRC 86
+#define GCC_PCIE_3A_CFG_AHB_CLK 87
+#define GCC_PCIE_3A_MSTR_AXI_CLK 88
+#define GCC_PCIE_3A_PHY_RCHNG_CLK 89
+#define GCC_PCIE_3A_PHY_RCHNG_CLK_SRC 90
+#define GCC_PCIE_3A_PIPE_CLK 91
+#define GCC_PCIE_3A_PIPE_CLK_SRC 92
+#define GCC_PCIE_3A_SLV_AXI_CLK 93
+#define GCC_PCIE_3A_SLV_Q2A_AXI_CLK 94
+#define GCC_PCIE_3B_AUX_CLK 95
+#define GCC_PCIE_3B_AUX_CLK_SRC 96
+#define GCC_PCIE_3B_CFG_AHB_CLK 97
+#define GCC_PCIE_3B_MSTR_AXI_CLK 98
+#define GCC_PCIE_3B_PHY_RCHNG_CLK 99
+#define GCC_PCIE_3B_PHY_RCHNG_CLK_SRC 100
+#define GCC_PCIE_3B_PIPE_CLK 101
+#define GCC_PCIE_3B_PIPE_CLK_SRC 102
+#define GCC_PCIE_3B_PIPE_DIV2_CLK 103
+#define GCC_PCIE_3B_PIPE_DIV_CLK_SRC 104
+#define GCC_PCIE_3B_SLV_AXI_CLK 105
+#define GCC_PCIE_3B_SLV_Q2A_AXI_CLK 106
+#define GCC_PCIE_4_AUX_CLK 107
+#define GCC_PCIE_4_AUX_CLK_SRC 108
+#define GCC_PCIE_4_CFG_AHB_CLK 109
+#define GCC_PCIE_4_MSTR_AXI_CLK 110
+#define GCC_PCIE_4_PHY_RCHNG_CLK 111
+#define GCC_PCIE_4_PHY_RCHNG_CLK_SRC 112
+#define GCC_PCIE_4_PIPE_CLK 113
+#define GCC_PCIE_4_PIPE_CLK_SRC 114
+#define GCC_PCIE_4_PIPE_DIV2_CLK 115
+#define GCC_PCIE_4_PIPE_DIV_CLK_SRC 116
+#define GCC_PCIE_4_SLV_AXI_CLK 117
+#define GCC_PCIE_4_SLV_Q2A_AXI_CLK 118
+#define GCC_PCIE_5_AUX_CLK 119
+#define GCC_PCIE_5_AUX_CLK_SRC 120
+#define GCC_PCIE_5_CFG_AHB_CLK 121
+#define GCC_PCIE_5_MSTR_AXI_CLK 122
+#define GCC_PCIE_5_PHY_RCHNG_CLK 123
+#define GCC_PCIE_5_PHY_RCHNG_CLK_SRC 124
+#define GCC_PCIE_5_PIPE_CLK 125
+#define GCC_PCIE_5_PIPE_CLK_SRC 126
+#define GCC_PCIE_5_PIPE_DIV2_CLK 127
+#define GCC_PCIE_5_PIPE_DIV_CLK_SRC 128
+#define GCC_PCIE_5_SLV_AXI_CLK 129
+#define GCC_PCIE_5_SLV_Q2A_AXI_CLK 130
+#define GCC_PCIE_6_AUX_CLK 131
+#define GCC_PCIE_6_AUX_CLK_SRC 132
+#define GCC_PCIE_6_CFG_AHB_CLK 133
+#define GCC_PCIE_6_MSTR_AXI_CLK 134
+#define GCC_PCIE_6_PHY_RCHNG_CLK 135
+#define GCC_PCIE_6_PHY_RCHNG_CLK_SRC 136
+#define GCC_PCIE_6_PIPE_CLK 137
+#define GCC_PCIE_6_PIPE_CLK_SRC 138
+#define GCC_PCIE_6_PIPE_DIV2_CLK 139
+#define GCC_PCIE_6_PIPE_DIV_CLK_SRC 140
+#define GCC_PCIE_6_SLV_AXI_CLK 141
+#define GCC_PCIE_6_SLV_Q2A_AXI_CLK 142
+#define GCC_PCIE_NOC_PWRCTL_CLK 143
+#define GCC_PCIE_NOC_QOSGEN_EXTREF_CLK 144
+#define GCC_PCIE_NOC_SF_CENTER_CLK 145
+#define GCC_PCIE_NOC_SLAVE_SF_EAST_CLK 146
+#define GCC_PCIE_NOC_SLAVE_SF_WEST_CLK 147
+#define GCC_PCIE_NOC_TSCTR_CLK 148
+#define GCC_PCIE_PHY_3A_AUX_CLK 149
+#define GCC_PCIE_PHY_3A_AUX_CLK_SRC 150
+#define GCC_PCIE_PHY_3B_AUX_CLK 151
+#define GCC_PCIE_PHY_3B_AUX_CLK_SRC 152
+#define GCC_PCIE_PHY_4_AUX_CLK 153
+#define GCC_PCIE_PHY_4_AUX_CLK_SRC 154
+#define GCC_PCIE_PHY_5_AUX_CLK 155
+#define GCC_PCIE_PHY_5_AUX_CLK_SRC 156
+#define GCC_PCIE_PHY_6_AUX_CLK 157
+#define GCC_PCIE_PHY_6_AUX_CLK_SRC 158
+#define GCC_PCIE_RSCC_CFG_AHB_CLK 159
+#define GCC_PCIE_RSCC_XO_CLK 160
+#define GCC_PDM2_CLK 161
+#define GCC_PDM2_CLK_SRC 162
+#define GCC_PDM_AHB_CLK 163
+#define GCC_PDM_XO4_CLK 164
+#define GCC_QMIP_AV1E_AHB_CLK 165
+#define GCC_QMIP_CAMERA_CMD_AHB_CLK 166
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 167
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 168
+#define GCC_QMIP_GPU_AHB_CLK 169
+#define GCC_QMIP_PCIE_3A_AHB_CLK 170
+#define GCC_QMIP_PCIE_3B_AHB_CLK 171
+#define GCC_QMIP_PCIE_4_AHB_CLK 172
+#define GCC_QMIP_PCIE_5_AHB_CLK 173
+#define GCC_QMIP_PCIE_6_AHB_CLK 174
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 175
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 176
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 177
+#define GCC_QMIP_VIDEO_VCODEC1_AHB_CLK 178
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 179
+#define GCC_QUPV3_OOB_CORE_2X_CLK 180
+#define GCC_QUPV3_OOB_CORE_CLK 181
+#define GCC_QUPV3_OOB_M_AHB_CLK 182
+#define GCC_QUPV3_OOB_QSPI_S0_CLK 183
+#define GCC_QUPV3_OOB_QSPI_S0_CLK_SRC 184
+#define GCC_QUPV3_OOB_QSPI_S1_CLK 185
+#define GCC_QUPV3_OOB_QSPI_S1_CLK_SRC 186
+#define GCC_QUPV3_OOB_S0_CLK 187
+#define GCC_QUPV3_OOB_S0_CLK_SRC 188
+#define GCC_QUPV3_OOB_S1_CLK 189
+#define GCC_QUPV3_OOB_S1_CLK_SRC 190
+#define GCC_QUPV3_OOB_S_AHB_CLK 191
+#define GCC_QUPV3_OOB_TCXO_CLK 192
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 193
+#define GCC_QUPV3_WRAP0_CORE_CLK 194
+#define GCC_QUPV3_WRAP0_QSPI_S2_CLK 195
+#define GCC_QUPV3_WRAP0_QSPI_S2_CLK_SRC 196
+#define GCC_QUPV3_WRAP0_QSPI_S3_CLK 197
+#define GCC_QUPV3_WRAP0_QSPI_S3_CLK_SRC 198
+#define GCC_QUPV3_WRAP0_QSPI_S6_CLK 199
+#define GCC_QUPV3_WRAP0_QSPI_S6_CLK_SRC 200
+#define GCC_QUPV3_WRAP0_S0_CLK 201
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 202
+#define GCC_QUPV3_WRAP0_S1_CLK 203
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 204
+#define GCC_QUPV3_WRAP0_S2_CLK 205
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 206
+#define GCC_QUPV3_WRAP0_S3_CLK 207
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 208
+#define GCC_QUPV3_WRAP0_S4_CLK 209
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 210
+#define GCC_QUPV3_WRAP0_S5_CLK 211
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 212
+#define GCC_QUPV3_WRAP0_S6_CLK 213
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 214
+#define GCC_QUPV3_WRAP0_S7_CLK 215
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 216
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 217
+#define GCC_QUPV3_WRAP1_CORE_CLK 218
+#define GCC_QUPV3_WRAP1_QSPI_S2_CLK 219
+#define GCC_QUPV3_WRAP1_QSPI_S2_CLK_SRC 220
+#define GCC_QUPV3_WRAP1_QSPI_S3_CLK 221
+#define GCC_QUPV3_WRAP1_QSPI_S3_CLK_SRC 222
+#define GCC_QUPV3_WRAP1_QSPI_S6_CLK 223
+#define GCC_QUPV3_WRAP1_QSPI_S6_CLK_SRC 224
+#define GCC_QUPV3_WRAP1_S0_CLK 225
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 226
+#define GCC_QUPV3_WRAP1_S1_CLK 227
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 228
+#define GCC_QUPV3_WRAP1_S2_CLK 229
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 230
+#define GCC_QUPV3_WRAP1_S3_CLK 231
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 232
+#define GCC_QUPV3_WRAP1_S4_CLK 233
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 234
+#define GCC_QUPV3_WRAP1_S5_CLK 235
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 236
+#define GCC_QUPV3_WRAP1_S6_CLK 237
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 238
+#define GCC_QUPV3_WRAP1_S7_CLK 239
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 240
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 241
+#define GCC_QUPV3_WRAP2_CORE_CLK 242
+#define GCC_QUPV3_WRAP2_QSPI_S2_CLK 243
+#define GCC_QUPV3_WRAP2_QSPI_S2_CLK_SRC 244
+#define GCC_QUPV3_WRAP2_QSPI_S3_CLK 245
+#define GCC_QUPV3_WRAP2_QSPI_S3_CLK_SRC 246
+#define GCC_QUPV3_WRAP2_QSPI_S6_CLK 247
+#define GCC_QUPV3_WRAP2_QSPI_S6_CLK_SRC 248
+#define GCC_QUPV3_WRAP2_S0_CLK 249
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 250
+#define GCC_QUPV3_WRAP2_S1_CLK 251
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 252
+#define GCC_QUPV3_WRAP2_S2_CLK 253
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 254
+#define GCC_QUPV3_WRAP2_S3_CLK 255
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 256
+#define GCC_QUPV3_WRAP2_S4_CLK 257
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 258
+#define GCC_QUPV3_WRAP2_S5_CLK 259
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 260
+#define GCC_QUPV3_WRAP2_S6_CLK 261
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 262
+#define GCC_QUPV3_WRAP2_S7_CLK 263
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 264
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 265
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 266
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 267
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 268
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 269
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 270
+#define GCC_SDCC2_AHB_CLK 271
+#define GCC_SDCC2_APPS_CLK 272
+#define GCC_SDCC2_APPS_CLK_SRC 273
+#define GCC_SDCC4_AHB_CLK 274
+#define GCC_SDCC4_APPS_CLK 275
+#define GCC_SDCC4_APPS_CLK_SRC 276
+#define GCC_UFS_PHY_AHB_CLK 277
+#define GCC_UFS_PHY_AXI_CLK 278
+#define GCC_UFS_PHY_AXI_CLK_SRC 279
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 280
+#define GCC_UFS_PHY_ICE_CORE_CLK 281
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 282
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 283
+#define GCC_UFS_PHY_PHY_AUX_CLK 284
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 285
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 286
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 287
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 288
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 289
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 290
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 291
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 292
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 293
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 294
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 295
+#define GCC_USB20_MASTER_CLK 296
+#define GCC_USB20_MASTER_CLK_SRC 297
+#define GCC_USB20_MOCK_UTMI_CLK 298
+#define GCC_USB20_MOCK_UTMI_CLK_SRC 299
+#define GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC 300
+#define GCC_USB20_SLEEP_CLK 301
+#define GCC_USB30_MP_MASTER_CLK 302
+#define GCC_USB30_MP_MASTER_CLK_SRC 303
+#define GCC_USB30_MP_MOCK_UTMI_CLK 304
+#define GCC_USB30_MP_MOCK_UTMI_CLK_SRC 305
+#define GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC 306
+#define GCC_USB30_MP_SLEEP_CLK 307
+#define GCC_USB30_PRIM_MASTER_CLK 308
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 309
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 310
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 311
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 312
+#define GCC_USB30_PRIM_SLEEP_CLK 313
+#define GCC_USB30_SEC_MASTER_CLK 314
+#define GCC_USB30_SEC_MASTER_CLK_SRC 315
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 316
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 317
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 318
+#define GCC_USB30_SEC_SLEEP_CLK 319
+#define GCC_USB30_TERT_MASTER_CLK 320
+#define GCC_USB30_TERT_MASTER_CLK_SRC 321
+#define GCC_USB30_TERT_MOCK_UTMI_CLK 322
+#define GCC_USB30_TERT_MOCK_UTMI_CLK_SRC 323
+#define GCC_USB30_TERT_MOCK_UTMI_POSTDIV_CLK_SRC 324
+#define GCC_USB30_TERT_SLEEP_CLK 325
+#define GCC_USB34_PRIM_PHY_PIPE_CLK_SRC 326
+#define GCC_USB34_SEC_PHY_PIPE_CLK_SRC 327
+#define GCC_USB34_TERT_PHY_PIPE_CLK_SRC 328
+#define GCC_USB3_MP_PHY_AUX_CLK 329
+#define GCC_USB3_MP_PHY_AUX_CLK_SRC 330
+#define GCC_USB3_MP_PHY_COM_AUX_CLK 331
+#define GCC_USB3_MP_PHY_PIPE_0_CLK 332
+#define GCC_USB3_MP_PHY_PIPE_0_CLK_SRC 333
+#define GCC_USB3_MP_PHY_PIPE_1_CLK 334
+#define GCC_USB3_MP_PHY_PIPE_1_CLK_SRC 335
+#define GCC_USB3_PRIM_PHY_AUX_CLK 336
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 337
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 338
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 339
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 340
+#define GCC_USB3_SEC_PHY_AUX_CLK 341
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 342
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 343
+#define GCC_USB3_SEC_PHY_PIPE_CLK 344
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 345
+#define GCC_USB3_TERT_PHY_AUX_CLK 346
+#define GCC_USB3_TERT_PHY_AUX_CLK_SRC 347
+#define GCC_USB3_TERT_PHY_COM_AUX_CLK 348
+#define GCC_USB3_TERT_PHY_PIPE_CLK 349
+#define GCC_USB3_TERT_PHY_PIPE_CLK_SRC 350
+#define GCC_USB4_0_CFG_AHB_CLK 351
+#define GCC_USB4_0_DP0_CLK 352
+#define GCC_USB4_0_DP1_CLK 353
+#define GCC_USB4_0_MASTER_CLK 354
+#define GCC_USB4_0_MASTER_CLK_SRC 355
+#define GCC_USB4_0_PHY_DP0_CLK_SRC 356
+#define GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC 357
+#define GCC_USB4_0_PHY_DP1_CLK_SRC 358
+#define GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC 359
+#define GCC_USB4_0_PHY_P2RR2P_PIPE_CLK 360
+#define GCC_USB4_0_PHY_P2RR2P_PIPE_CLK_SRC 361
+#define GCC_USB4_0_PHY_PCIE_PIPE_CLK 362
+#define GCC_USB4_0_PHY_PCIE_PIPE_CLK_SRC 363
+#define GCC_USB4_0_PHY_PCIE_PIPE_MUX_CLK_SRC 364
+#define GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC 365
+#define GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC 366
+#define GCC_USB4_0_PHY_RX0_CLK 367
+#define GCC_USB4_0_PHY_RX0_CLK_SRC 368
+#define GCC_USB4_0_PHY_RX1_CLK 369
+#define GCC_USB4_0_PHY_RX1_CLK_SRC 370
+#define GCC_USB4_0_PHY_SYS_CLK_SRC 371
+#define GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC 372
+#define GCC_USB4_0_PHY_USB_PIPE_CLK 373
+#define GCC_USB4_0_SB_IF_CLK 374
+#define GCC_USB4_0_SB_IF_CLK_SRC 375
+#define GCC_USB4_0_SYS_CLK 376
+#define GCC_USB4_0_TMU_CLK 377
+#define GCC_USB4_0_TMU_CLK_SRC 378
+#define GCC_USB4_0_UC_HRR_CLK 379
+#define GCC_USB4_1_CFG_AHB_CLK 380
+#define GCC_USB4_1_DP0_CLK 381
+#define GCC_USB4_1_DP1_CLK 382
+#define GCC_USB4_1_MASTER_CLK 383
+#define GCC_USB4_1_MASTER_CLK_SRC 384
+#define GCC_USB4_1_PHY_DP0_CLK_SRC 385
+#define GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC 386
+#define GCC_USB4_1_PHY_DP1_CLK_SRC 387
+#define GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC 388
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK 389
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC 390
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK 391
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC 392
+#define GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC 393
+#define GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC 394
+#define GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC 395
+#define GCC_USB4_1_PHY_PLL_PIPE_CLK_SRC 396
+#define GCC_USB4_1_PHY_RX0_CLK 397
+#define GCC_USB4_1_PHY_RX0_CLK_SRC 398
+#define GCC_USB4_1_PHY_RX1_CLK 399
+#define GCC_USB4_1_PHY_RX1_CLK_SRC 400
+#define GCC_USB4_1_PHY_SYS_CLK_SRC 401
+#define GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC 402
+#define GCC_USB4_1_PHY_USB_PIPE_CLK 403
+#define GCC_USB4_1_SB_IF_CLK 404
+#define GCC_USB4_1_SB_IF_CLK_SRC 405
+#define GCC_USB4_1_SYS_CLK 406
+#define GCC_USB4_1_TMU_CLK 407
+#define GCC_USB4_1_TMU_CLK_SRC 408
+#define GCC_USB4_1_UC_HRR_CLK 409
+#define GCC_USB4_2_CFG_AHB_CLK 410
+#define GCC_USB4_2_DP0_CLK 411
+#define GCC_USB4_2_DP1_CLK 412
+#define GCC_USB4_2_MASTER_CLK 413
+#define GCC_USB4_2_MASTER_CLK_SRC 414
+#define GCC_USB4_2_PHY_DP0_CLK_SRC 415
+#define GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC 416
+#define GCC_USB4_2_PHY_DP1_CLK_SRC 417
+#define GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC 418
+#define GCC_USB4_2_PHY_P2RR2P_PIPE_CLK 419
+#define GCC_USB4_2_PHY_P2RR2P_PIPE_CLK_SRC 420
+#define GCC_USB4_2_PHY_PCIE_PIPE_CLK 421
+#define GCC_USB4_2_PHY_PCIE_PIPE_CLK_SRC 422
+#define GCC_USB4_2_PHY_PCIE_PIPE_MUX_CLK_SRC 423
+#define GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC 424
+#define GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC 425
+#define GCC_USB4_2_PHY_RX0_CLK 426
+#define GCC_USB4_2_PHY_RX0_CLK_SRC 427
+#define GCC_USB4_2_PHY_RX1_CLK 428
+#define GCC_USB4_2_PHY_RX1_CLK_SRC 429
+#define GCC_USB4_2_PHY_SYS_CLK_SRC 430
+#define GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC 431
+#define GCC_USB4_2_PHY_USB_PIPE_CLK 432
+#define GCC_USB4_2_SB_IF_CLK 433
+#define GCC_USB4_2_SB_IF_CLK_SRC 434
+#define GCC_USB4_2_SYS_CLK 435
+#define GCC_USB4_2_TMU_CLK 436
+#define GCC_USB4_2_TMU_CLK_SRC 437
+#define GCC_USB4_2_UC_HRR_CLK 438
+#define GCC_VIDEO_AHB_CLK 439
+#define GCC_VIDEO_AXI0_CLK 440
+#define GCC_VIDEO_AXI0C_CLK 441
+#define GCC_VIDEO_AXI1_CLK 442
+#define GCC_VIDEO_XO_CLK 443
+
+/* GCC power domains */
+#define GCC_PCIE_0_TUNNEL_GDSC 0
+#define GCC_PCIE_1_TUNNEL_GDSC 1
+#define GCC_PCIE_2_TUNNEL_GDSC 2
+#define GCC_PCIE_3A_GDSC 3
+#define GCC_PCIE_3A_PHY_GDSC 4
+#define GCC_PCIE_3B_GDSC 5
+#define GCC_PCIE_3B_PHY_GDSC 6
+#define GCC_PCIE_4_GDSC 7
+#define GCC_PCIE_4_PHY_GDSC 8
+#define GCC_PCIE_5_GDSC 9
+#define GCC_PCIE_5_PHY_GDSC 10
+#define GCC_PCIE_6_GDSC 11
+#define GCC_PCIE_6_PHY_GDSC 12
+#define GCC_UFS_PHY_GDSC 13
+#define GCC_USB20_PRIM_GDSC 14
+#define GCC_USB30_MP_GDSC 15
+#define GCC_USB30_PRIM_GDSC 16
+#define GCC_USB30_SEC_GDSC 17
+#define GCC_USB30_TERT_GDSC 18
+#define GCC_USB3_MP_SS0_PHY_GDSC 19
+#define GCC_USB3_MP_SS1_PHY_GDSC 20
+#define GCC_USB4_0_GDSC 21
+#define GCC_USB4_1_GDSC 22
+#define GCC_USB4_2_GDSC 23
+#define GCC_USB_0_PHY_GDSC 24
+#define GCC_USB_1_PHY_GDSC 25
+#define GCC_USB_2_PHY_GDSC 26
+
+/* GCC resets */
+#define GCC_AV1E_BCR 0
+#define GCC_CAMERA_BCR 1
+#define GCC_DISPLAY_BCR 2
+#define GCC_EVA_BCR 3
+#define GCC_GPU_BCR 4
+#define GCC_PCIE_0_LINK_DOWN_BCR 5
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 6
+#define GCC_PCIE_0_PHY_BCR 7
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_0_TUNNEL_BCR 9
+#define GCC_PCIE_1_LINK_DOWN_BCR 10
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 11
+#define GCC_PCIE_1_PHY_BCR 12
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 13
+#define GCC_PCIE_1_TUNNEL_BCR 14
+#define GCC_PCIE_2_LINK_DOWN_BCR 15
+#define GCC_PCIE_2_NOCSR_COM_PHY_BCR 16
+#define GCC_PCIE_2_PHY_BCR 17
+#define GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR 18
+#define GCC_PCIE_2_TUNNEL_BCR 19
+#define GCC_PCIE_3A_BCR 20
+#define GCC_PCIE_3A_LINK_DOWN_BCR 21
+#define GCC_PCIE_3A_NOCSR_COM_PHY_BCR 22
+#define GCC_PCIE_3A_PHY_BCR 23
+#define GCC_PCIE_3A_PHY_NOCSR_COM_PHY_BCR 24
+#define GCC_PCIE_3B_BCR 25
+#define GCC_PCIE_3B_LINK_DOWN_BCR 26
+#define GCC_PCIE_3B_NOCSR_COM_PHY_BCR 27
+#define GCC_PCIE_3B_PHY_BCR 28
+#define GCC_PCIE_3B_PHY_NOCSR_COM_PHY_BCR 29
+#define GCC_PCIE_4_BCR 30
+#define GCC_PCIE_4_LINK_DOWN_BCR 31
+#define GCC_PCIE_4_NOCSR_COM_PHY_BCR 32
+#define GCC_PCIE_4_PHY_BCR 33
+#define GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR 34
+#define GCC_PCIE_5_BCR 35
+#define GCC_PCIE_5_LINK_DOWN_BCR 36
+#define GCC_PCIE_5_NOCSR_COM_PHY_BCR 37
+#define GCC_PCIE_5_PHY_BCR 38
+#define GCC_PCIE_5_PHY_NOCSR_COM_PHY_BCR 39
+#define GCC_PCIE_6_BCR 40
+#define GCC_PCIE_6_LINK_DOWN_BCR 41
+#define GCC_PCIE_6_NOCSR_COM_PHY_BCR 42
+#define GCC_PCIE_6_PHY_BCR 43
+#define GCC_PCIE_6_PHY_NOCSR_COM_PHY_BCR 44
+#define GCC_PCIE_NOC_BCR 45
+#define GCC_PCIE_PHY_BCR 46
+#define GCC_PCIE_PHY_CFG_AHB_BCR 47
+#define GCC_PCIE_PHY_COM_BCR 48
+#define GCC_PCIE_RSCC_BCR 49
+#define GCC_PDM_BCR 50
+#define GCC_QUPV3_WRAPPER_0_BCR 51
+#define GCC_QUPV3_WRAPPER_1_BCR 52
+#define GCC_QUPV3_WRAPPER_2_BCR 53
+#define GCC_QUPV3_WRAPPER_OOB_BCR 54
+#define GCC_QUSB2PHY_HS0_MP_BCR 55
+#define GCC_QUSB2PHY_HS1_MP_BCR 56
+#define GCC_QUSB2PHY_PRIM_BCR 57
+#define GCC_QUSB2PHY_SEC_BCR 58
+#define GCC_QUSB2PHY_TERT_BCR 59
+#define GCC_QUSB2PHY_USB20_HS_BCR 60
+#define GCC_SDCC2_BCR 61
+#define GCC_SDCC4_BCR 62
+#define GCC_TCSR_PCIE_BCR 63
+#define GCC_UFS_PHY_BCR 64
+#define GCC_USB20_PRIM_BCR 65
+#define GCC_USB30_MP_BCR 66
+#define GCC_USB30_PRIM_BCR 67
+#define GCC_USB30_SEC_BCR 68
+#define GCC_USB30_TERT_BCR 69
+#define GCC_USB3_MP_SS0_PHY_BCR 70
+#define GCC_USB3_MP_SS1_PHY_BCR 71
+#define GCC_USB3_PHY_PRIM_BCR 72
+#define GCC_USB3_PHY_SEC_BCR 73
+#define GCC_USB3_PHY_TERT_BCR 74
+#define GCC_USB3_UNIPHY_MP0_BCR 75
+#define GCC_USB3_UNIPHY_MP1_BCR 76
+#define GCC_USB3PHY_PHY_PRIM_BCR 77
+#define GCC_USB3PHY_PHY_SEC_BCR 78
+#define GCC_USB3PHY_PHY_TERT_BCR 79
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 80
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 81
+#define GCC_USB4_0_BCR 82
+#define GCC_USB4_0_DP0_PHY_PRIM_BCR 83
+#define GCC_USB4_1_BCR 84
+#define GCC_USB4_2_BCR 85
+#define GCC_USB_0_PHY_BCR 86
+#define GCC_USB_1_PHY_BCR 87
+#define GCC_USB_2_PHY_BCR 88
+#define GCC_VIDEO_AXI0_CLK_ARES 89
+#define GCC_VIDEO_AXI1_CLK_ARES 90
+#define GCC_VIDEO_BCR 91
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,glymur-tcsr.h b/include/dt-bindings/clock/qcom,glymur-tcsr.h
new file mode 100644
index 000000000000..72614226b113
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,glymur-tcsr.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_TCSR_CC_GLYMUR_H
+#define _DT_BINDINGS_CLK_QCOM_TCSR_CC_GLYMUR_H
+
+/* TCSR_CC clocks */
+#define TCSR_EDP_CLKREF_EN 0
+#define TCSR_PCIE_1_CLKREF_EN 1
+#define TCSR_PCIE_2_CLKREF_EN 2
+#define TCSR_PCIE_3_CLKREF_EN 3
+#define TCSR_PCIE_4_CLKREF_EN 4
+#define TCSR_USB2_1_CLKREF_EN 5
+#define TCSR_USB2_2_CLKREF_EN 6
+#define TCSR_USB2_3_CLKREF_EN 7
+#define TCSR_USB2_4_CLKREF_EN 8
+#define TCSR_USB3_0_CLKREF_EN 9
+#define TCSR_USB3_1_CLKREF_EN 10
+#define TCSR_USB4_1_CLKREF_EN 11
+#define TCSR_USB4_2_CLKREF_EN 12
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq-cmn-pll.h
new file mode 100644
index 000000000000..936e92b3b62c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq-cmn-pll.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_IPQ_CMN_PLL_H
+#define _DT_BINDINGS_CLK_QCOM_IPQ_CMN_PLL_H
+
+/* CMN PLL core clock. */
+#define CMN_PLL_CLK 0
+
+/* The output clocks from CMN PLL of IPQ9574. */
+#define XO_24MHZ_CLK 1
+#define SLEEP_32KHZ_CLK 2
+#define PCS_31P25MHZ_CLK 3
+#define NSS_1200MHZ_CLK 4
+#define PPE_353MHZ_CLK 5
+#define ETH0_50MHZ_CLK 6
+#define ETH1_50MHZ_CLK 7
+#define ETH2_50MHZ_CLK 8
+#define ETH_25MHZ_CLK 9
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h
new file mode 100644
index 000000000000..586d1c9b33b3
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_IPQ5018_CMN_PLL_H
+#define _DT_BINDINGS_CLK_QCOM_IPQ5018_CMN_PLL_H
+
+/* CMN PLL core clock. */
+#define IPQ5018_CMN_PLL_CLK 0
+
+/* The output clocks from CMN PLL of IPQ5018. */
+#define IPQ5018_XO_24MHZ_CLK 1
+#define IPQ5018_SLEEP_32KHZ_CLK 2
+#define IPQ5018_ETH_50MHZ_CLK 3
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq5332-gcc.h b/include/dt-bindings/clock/qcom,ipq5332-gcc.h
index 8a405a0a96d0..da9b507c30bf 100644
--- a/include/dt-bindings/clock/qcom,ipq5332-gcc.h
+++ b/include/dt-bindings/clock/qcom,ipq5332-gcc.h
@@ -96,15 +96,7 @@
#define GCC_PCNOC_BFDCD_CLK_SRC 87
#define GCC_PCNOC_LPASS_CLK 88
#define GCC_PRNG_AHB_CLK 89
-#define GCC_Q6_AHB_CLK 90
-#define GCC_Q6_AHB_S_CLK 91
-#define GCC_Q6_AXIM_CLK 92
#define GCC_Q6_AXIM_CLK_SRC 93
-#define GCC_Q6_AXIS_CLK 94
-#define GCC_Q6_TSCTR_1TO2_CLK 95
-#define GCC_Q6SS_ATBM_CLK 96
-#define GCC_Q6SS_PCLKDBG_CLK 97
-#define GCC_Q6SS_TRIG_CLK 98
#define GCC_QDSS_AT_CLK 99
#define GCC_QDSS_AT_CLK_SRC 100
#define GCC_QDSS_CFG_AHB_CLK 101
@@ -134,7 +126,6 @@
#define GCC_SNOC_PCIE3_2LANE_S_CLK 125
#define GCC_SNOC_USB_CLK 126
#define GCC_SYS_NOC_AT_CLK 127
-#define GCC_SYS_NOC_WCSS_AHB_CLK 128
#define GCC_SYSTEM_NOC_BFDCD_CLK_SRC 129
#define GCC_UNIPHY0_AHB_CLK 130
#define GCC_UNIPHY0_SYS_CLK 131
@@ -155,17 +146,6 @@
#define GCC_USB0_PIPE_CLK 146
#define GCC_USB0_SLEEP_CLK 147
#define GCC_WCSS_AHB_CLK_SRC 148
-#define GCC_WCSS_AXIM_CLK 149
-#define GCC_WCSS_AXIS_CLK 150
-#define GCC_WCSS_DBG_IFC_APB_BDG_CLK 151
-#define GCC_WCSS_DBG_IFC_APB_CLK 152
-#define GCC_WCSS_DBG_IFC_ATB_BDG_CLK 153
-#define GCC_WCSS_DBG_IFC_ATB_CLK 154
-#define GCC_WCSS_DBG_IFC_NTS_BDG_CLK 155
-#define GCC_WCSS_DBG_IFC_NTS_CLK 156
-#define GCC_WCSS_ECAHB_CLK 157
-#define GCC_WCSS_MST_ASYNC_BDG_CLK 158
-#define GCC_WCSS_SLV_ASYNC_BDG_CLK 159
#define GCC_XO_CLK 160
#define GCC_XO_CLK_SRC 161
#define GCC_XO_DIV4_CLK 162
diff --git a/include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h
new file mode 100644
index 000000000000..f643c2668c04
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_IPQ5424_CMN_PLL_H
+#define _DT_BINDINGS_CLK_QCOM_IPQ5424_CMN_PLL_H
+
+/* CMN PLL core clock. */
+#define IPQ5424_CMN_PLL_CLK 0
+
+/* The output clocks from CMN PLL of IPQ5424. */
+#define IPQ5424_XO_24MHZ_CLK 1
+#define IPQ5424_SLEEP_32KHZ_CLK 2
+#define IPQ5424_PCS_31P25MHZ_CLK 3
+#define IPQ5424_NSS_300MHZ_CLK 4
+#define IPQ5424_PPE_375MHZ_CLK 5
+#define IPQ5424_ETH0_50MHZ_CLK 6
+#define IPQ5424_ETH1_50MHZ_CLK 7
+#define IPQ5424_ETH2_50MHZ_CLK 8
+#define IPQ5424_ETH_25MHZ_CLK 9
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq5424-gcc.h b/include/dt-bindings/clock/qcom,ipq5424-gcc.h
new file mode 100644
index 000000000000..3ae33a0fa002
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq5424-gcc.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_GCC_IPQ5424_H
+#define _DT_BINDINGS_CLOCK_IPQ_GCC_IPQ5424_H
+
+#define GPLL0 0
+#define GPLL4 1
+#define GPLL2 2
+#define GPLL2_OUT_MAIN 3
+#define GCC_SLEEP_CLK_SRC 4
+#define GCC_USB0_EUD_AT_CLK 6
+#define GCC_PCIE0_AXI_M_CLK_SRC 7
+#define GCC_PCIE0_AXI_M_CLK 8
+#define GCC_PCIE1_AXI_M_CLK_SRC 9
+#define GCC_PCIE1_AXI_M_CLK 10
+#define GCC_PCIE2_AXI_M_CLK_SRC 11
+#define GCC_PCIE2_AXI_M_CLK 12
+#define GCC_PCIE3_AXI_M_CLK_SRC 13
+#define GCC_PCIE3_AXI_M_CLK 14
+#define GCC_PCIE0_AXI_S_CLK_SRC 15
+#define GCC_PCIE0_AXI_S_BRIDGE_CLK 16
+#define GCC_PCIE0_AXI_S_CLK 17
+#define GCC_PCIE1_AXI_S_CLK_SRC 18
+#define GCC_PCIE1_AXI_S_BRIDGE_CLK 19
+#define GCC_PCIE1_AXI_S_CLK 20
+#define GCC_PCIE2_AXI_S_CLK_SRC 21
+#define GCC_PCIE2_AXI_S_BRIDGE_CLK 22
+#define GCC_PCIE2_AXI_S_CLK 23
+#define GCC_PCIE3_AXI_S_CLK_SRC 24
+#define GCC_PCIE3_AXI_S_BRIDGE_CLK 25
+#define GCC_PCIE3_AXI_S_CLK 26
+#define GCC_PCIE0_PIPE_CLK_SRC 27
+#define GCC_PCIE0_PIPE_CLK 28
+#define GCC_PCIE1_PIPE_CLK_SRC 29
+#define GCC_PCIE1_PIPE_CLK 30
+#define GCC_PCIE2_PIPE_CLK_SRC 31
+#define GCC_PCIE2_PIPE_CLK 32
+#define GCC_PCIE3_PIPE_CLK_SRC 33
+#define GCC_PCIE3_PIPE_CLK 34
+#define GCC_PCIE_AUX_CLK_SRC 35
+#define GCC_PCIE0_AUX_CLK 36
+#define GCC_PCIE1_AUX_CLK 37
+#define GCC_PCIE2_AUX_CLK 38
+#define GCC_PCIE3_AUX_CLK 39
+#define GCC_PCIE0_AHB_CLK 40
+#define GCC_PCIE1_AHB_CLK 41
+#define GCC_PCIE2_AHB_CLK 42
+#define GCC_PCIE3_AHB_CLK 43
+#define GCC_USB0_AUX_CLK_SRC 44
+#define GCC_USB0_AUX_CLK 45
+#define GCC_USB0_MASTER_CLK 46
+#define GCC_USB0_MOCK_UTMI_CLK_SRC 47
+#define GCC_USB0_MOCK_UTMI_DIV_CLK_SRC 48
+#define GCC_USB0_MOCK_UTMI_CLK 49
+#define GCC_USB0_PIPE_CLK_SRC 50
+#define GCC_USB0_PIPE_CLK 51
+#define GCC_USB0_PHY_CFG_AHB_CLK 52
+#define GCC_USB0_SLEEP_CLK 53
+#define GCC_SDCC1_APPS_CLK_SRC 54
+#define GCC_SDCC1_APPS_CLK 55
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 56
+#define GCC_SDCC1_ICE_CORE_CLK 57
+#define GCC_SDCC1_AHB_CLK 58
+#define GCC_PCNOC_BFDCD_CLK_SRC 59
+#define GCC_NSSCFG_CLK 60
+#define GCC_NSSNOC_NSSCC_CLK 61
+#define GCC_NSSCC_CLK 62
+#define GCC_NSSNOC_PCNOC_1_CLK 63
+#define GCC_QPIC_AHB_CLK 64
+#define GCC_QPIC_CLK 65
+#define GCC_MDIO_AHB_CLK 66
+#define GCC_PRNG_AHB_CLK 67
+#define GCC_UNIPHY0_AHB_CLK 68
+#define GCC_UNIPHY1_AHB_CLK 69
+#define GCC_UNIPHY2_AHB_CLK 70
+#define GCC_CMN_12GPLL_AHB_CLK 71
+#define GCC_SYSTEM_NOC_BFDCD_CLK_SRC 72
+#define GCC_NSSNOC_SNOC_CLK 73
+#define GCC_NSSNOC_SNOC_1_CLK 74
+#define GCC_WCSS_AHB_CLK_SRC 75
+#define GCC_QDSS_AT_CLK_SRC 76
+#define GCC_NSSNOC_ATB_CLK 77
+#define GCC_QDSS_AT_CLK 78
+#define GCC_QDSS_TSCTR_CLK_SRC 79
+#define GCC_NSS_TS_CLK 80
+#define GCC_QPIC_IO_MACRO_CLK_SRC 81
+#define GCC_QPIC_IO_MACRO_CLK 82
+#define GCC_LPASS_AXIM_CLK_SRC 83
+#define GCC_LPASS_CORE_AXIM_CLK 84
+#define GCC_LPASS_SWAY_CLK_SRC 85
+#define GCC_LPASS_SWAY_CLK 86
+#define GCC_CNOC_LPASS_CFG_CLK 87
+#define GCC_SNOC_LPASS_CLK 88
+#define GCC_ADSS_PWM_CLK_SRC 89
+#define GCC_ADSS_PWM_CLK 90
+#define GCC_XO_CLK_SRC 91
+#define GCC_NSSNOC_XO_DCD_CLK 92
+#define GCC_NSSNOC_QOSGEN_REF_CLK 93
+#define GCC_NSSNOC_TIMEOUT_REF_CLK 94
+#define GCC_UNIPHY0_SYS_CLK 95
+#define GCC_UNIPHY1_SYS_CLK 96
+#define GCC_UNIPHY2_SYS_CLK 97
+#define GCC_CMN_12GPLL_SYS_CLK 98
+#define GCC_UNIPHY_SYS_CLK_SRC 99
+#define GCC_NSS_TS_CLK_SRC 100
+#define GCC_ANOC_PCIE0_1LANE_M_CLK 101
+#define GCC_ANOC_PCIE1_1LANE_M_CLK 102
+#define GCC_ANOC_PCIE2_2LANE_M_CLK 103
+#define GCC_ANOC_PCIE3_2LANE_M_CLK 104
+#define GCC_CNOC_PCIE0_1LANE_S_CLK 105
+#define GCC_CNOC_PCIE1_1LANE_S_CLK 106
+#define GCC_CNOC_PCIE2_2LANE_S_CLK 107
+#define GCC_CNOC_PCIE3_2LANE_S_CLK 108
+#define GCC_CNOC_USB_CLK 109
+#define GCC_CNOC_WCSS_AHB_CLK 110
+#define GCC_QUPV3_AHB_MST_CLK 111
+#define GCC_QUPV3_AHB_SLV_CLK 112
+#define GCC_QUPV3_I2C0_CLK 113
+#define GCC_QUPV3_I2C1_CLK 114
+#define GCC_QUPV3_SPI0_CLK 115
+#define GCC_QUPV3_SPI1_CLK 116
+#define GCC_QUPV3_UART0_CLK 117
+#define GCC_QUPV3_UART1_CLK 118
+#define GCC_QPIC_CLK_SRC 119
+#define GCC_QUPV3_I2C0_CLK_SRC 120
+#define GCC_QUPV3_I2C1_CLK_SRC 121
+#define GCC_QUPV3_I2C0_DIV_CLK_SRC 122
+#define GCC_QUPV3_I2C1_DIV_CLK_SRC 123
+#define GCC_QUPV3_SPI0_CLK_SRC 124
+#define GCC_QUPV3_SPI1_CLK_SRC 125
+#define GCC_QUPV3_UART0_CLK_SRC 126
+#define GCC_QUPV3_UART1_CLK_SRC 127
+#define GCC_USB1_MASTER_CLK 128
+#define GCC_USB1_MOCK_UTMI_CLK_SRC 129
+#define GCC_USB1_MOCK_UTMI_DIV_CLK_SRC 130
+#define GCC_USB1_MOCK_UTMI_CLK 131
+#define GCC_USB1_SLEEP_CLK 132
+#define GCC_USB1_PHY_CFG_AHB_CLK 133
+#define GCC_USB0_MASTER_CLK_SRC 134
+#define GCC_QDSS_DAP_CLK 135
+#define GCC_PCIE0_RCHNG_CLK_SRC 136
+#define GCC_PCIE0_RCHNG_CLK 137
+#define GCC_PCIE1_RCHNG_CLK_SRC 138
+#define GCC_PCIE1_RCHNG_CLK 139
+#define GCC_PCIE2_RCHNG_CLK_SRC 140
+#define GCC_PCIE2_RCHNG_CLK 141
+#define GCC_PCIE3_RCHNG_CLK_SRC 142
+#define GCC_PCIE3_RCHNG_CLK 143
+#define GCC_IM_SLEEP_CLK 144
+#define GCC_XO_CLK 145
+#define GPLL0_OUT_AUX 146
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq5424-nsscc.h b/include/dt-bindings/clock/qcom,ipq5424-nsscc.h
new file mode 100644
index 000000000000..eeae0dc38042
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq5424-nsscc.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_QCOM_IPQ5424_NSSCC_H
+#define _DT_BINDINGS_CLOCK_QCOM_IPQ5424_NSSCC_H
+
+/* NSS_CC clocks */
+#define NSS_CC_CE_APB_CLK 0
+#define NSS_CC_CE_AXI_CLK 1
+#define NSS_CC_CE_CLK_SRC 2
+#define NSS_CC_CFG_CLK_SRC 3
+#define NSS_CC_DEBUG_CLK 4
+#define NSS_CC_EIP_BFDCD_CLK_SRC 5
+#define NSS_CC_EIP_CLK 6
+#define NSS_CC_NSS_CSR_CLK 7
+#define NSS_CC_NSSNOC_CE_APB_CLK 8
+#define NSS_CC_NSSNOC_CE_AXI_CLK 9
+#define NSS_CC_NSSNOC_EIP_CLK 10
+#define NSS_CC_NSSNOC_NSS_CSR_CLK 11
+#define NSS_CC_NSSNOC_PPE_CFG_CLK 12
+#define NSS_CC_NSSNOC_PPE_CLK 13
+#define NSS_CC_PORT1_MAC_CLK 14
+#define NSS_CC_PORT1_RX_CLK 15
+#define NSS_CC_PORT1_RX_CLK_SRC 16
+#define NSS_CC_PORT1_RX_DIV_CLK_SRC 17
+#define NSS_CC_PORT1_TX_CLK 18
+#define NSS_CC_PORT1_TX_CLK_SRC 19
+#define NSS_CC_PORT1_TX_DIV_CLK_SRC 20
+#define NSS_CC_PORT2_MAC_CLK 21
+#define NSS_CC_PORT2_RX_CLK 22
+#define NSS_CC_PORT2_RX_CLK_SRC 23
+#define NSS_CC_PORT2_RX_DIV_CLK_SRC 24
+#define NSS_CC_PORT2_TX_CLK 25
+#define NSS_CC_PORT2_TX_CLK_SRC 26
+#define NSS_CC_PORT2_TX_DIV_CLK_SRC 27
+#define NSS_CC_PORT3_MAC_CLK 28
+#define NSS_CC_PORT3_RX_CLK 29
+#define NSS_CC_PORT3_RX_CLK_SRC 30
+#define NSS_CC_PORT3_RX_DIV_CLK_SRC 31
+#define NSS_CC_PORT3_TX_CLK 32
+#define NSS_CC_PORT3_TX_CLK_SRC 33
+#define NSS_CC_PORT3_TX_DIV_CLK_SRC 34
+#define NSS_CC_PPE_CLK_SRC 35
+#define NSS_CC_PPE_EDMA_CFG_CLK 36
+#define NSS_CC_PPE_EDMA_CLK 37
+#define NSS_CC_PPE_SWITCH_BTQ_CLK 38
+#define NSS_CC_PPE_SWITCH_CFG_CLK 39
+#define NSS_CC_PPE_SWITCH_CLK 40
+#define NSS_CC_PPE_SWITCH_IPE_CLK 41
+#define NSS_CC_UNIPHY_PORT1_RX_CLK 42
+#define NSS_CC_UNIPHY_PORT1_TX_CLK 43
+#define NSS_CC_UNIPHY_PORT2_RX_CLK 44
+#define NSS_CC_UNIPHY_PORT2_TX_CLK 45
+#define NSS_CC_UNIPHY_PORT3_RX_CLK 46
+#define NSS_CC_UNIPHY_PORT3_TX_CLK 47
+#define NSS_CC_XGMAC0_PTP_REF_CLK 48
+#define NSS_CC_XGMAC0_PTP_REF_DIV_CLK_SRC 49
+#define NSS_CC_XGMAC1_PTP_REF_CLK 50
+#define NSS_CC_XGMAC1_PTP_REF_DIV_CLK_SRC 51
+#define NSS_CC_XGMAC2_PTP_REF_CLK 52
+#define NSS_CC_XGMAC2_PTP_REF_DIV_CLK_SRC 53
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq9574-gcc.h b/include/dt-bindings/clock/qcom,ipq9574-gcc.h
index 08fd3a37acaa..0e7c319897f3 100644
--- a/include/dt-bindings/clock/qcom,ipq9574-gcc.h
+++ b/include/dt-bindings/clock/qcom,ipq9574-gcc.h
@@ -132,16 +132,8 @@
#define GCC_NSSNOC_SNOC_1_CLK 123
#define GCC_QDSS_ETR_USB_CLK 124
#define WCSS_AHB_CLK_SRC 125
-#define GCC_Q6_AHB_CLK 126
-#define GCC_Q6_AHB_S_CLK 127
-#define GCC_WCSS_ECAHB_CLK 128
-#define GCC_WCSS_ACMT_CLK 129
-#define GCC_SYS_NOC_WCSS_AHB_CLK 130
#define WCSS_AXI_M_CLK_SRC 131
-#define GCC_ANOC_WCSS_AXI_M_CLK 132
#define QDSS_AT_CLK_SRC 133
-#define GCC_Q6SS_ATBM_CLK 134
-#define GCC_WCSS_DBG_IFC_ATB_CLK 135
#define GCC_NSSNOC_ATB_CLK 136
#define GCC_QDSS_AT_CLK 137
#define GCC_SYS_NOC_AT_CLK 138
@@ -154,27 +146,18 @@
#define QDSS_TRACECLKIN_CLK_SRC 145
#define GCC_QDSS_TRACECLKIN_CLK 146
#define QDSS_TSCTR_CLK_SRC 147
-#define GCC_Q6_TSCTR_1TO2_CLK 148
-#define GCC_WCSS_DBG_IFC_NTS_CLK 149
#define GCC_QDSS_TSCTR_DIV2_CLK 150
#define GCC_QDSS_TS_CLK 151
#define GCC_QDSS_TSCTR_DIV4_CLK 152
#define GCC_NSS_TS_CLK 153
#define GCC_QDSS_TSCTR_DIV8_CLK 154
#define GCC_QDSS_TSCTR_DIV16_CLK 155
-#define GCC_Q6SS_PCLKDBG_CLK 156
-#define GCC_Q6SS_TRIG_CLK 157
-#define GCC_WCSS_DBG_IFC_APB_CLK 158
-#define GCC_WCSS_DBG_IFC_DAPBUS_CLK 159
#define GCC_QDSS_DAP_CLK 160
#define GCC_QDSS_APB2JTAG_CLK 161
#define GCC_QDSS_TSCTR_DIV3_CLK 162
#define QPIC_IO_MACRO_CLK_SRC 163
#define GCC_QPIC_IO_MACRO_CLK 164
#define Q6_AXI_CLK_SRC 165
-#define GCC_Q6_AXIM_CLK 166
-#define GCC_WCSS_Q6_TBU_CLK 167
-#define GCC_MEM_NOC_Q6_AXI_CLK 168
#define Q6_AXIM2_CLK_SRC 169
#define NSSNOC_MEMNOC_BFDCD_CLK_SRC 170
#define GCC_NSSNOC_MEMNOC_CLK 171
@@ -199,7 +182,6 @@
#define GCC_UNIPHY2_SYS_CLK 190
#define GCC_CMN_12GPLL_SYS_CLK 191
#define GCC_NSSNOC_XO_DCD_CLK 192
-#define GCC_Q6SS_BOOT_CLK 193
#define UNIPHY_SYS_CLK_SRC 194
#define NSS_TS_CLK_SRC 195
#define GCC_ANOC_PCIE0_1LANE_M_CLK 196
@@ -216,4 +198,9 @@
#define GCC_CRYPTO_AHB_CLK 207
#define GCC_USB0_PIPE_CLK 208
#define GCC_USB0_SLEEP_CLK 209
+#define GCC_PCIE0_PIPE_CLK 210
+#define GCC_PCIE1_PIPE_CLK 211
+#define GCC_PCIE2_PIPE_CLK 212
+#define GCC_PCIE3_PIPE_CLK 213
+#define GPLL0_OUT_AUX 214
#endif
diff --git a/include/dt-bindings/clock/qcom,ipq9574-nsscc.h b/include/dt-bindings/clock/qcom,ipq9574-nsscc.h
new file mode 100644
index 000000000000..21a16dc0e64c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq9574-nsscc.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, 2025 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_NSSCC_9574_H
+#define _DT_BINDINGS_CLOCK_IPQ_NSSCC_9574_H
+
+#define NSS_CC_CE_APB_CLK 0
+#define NSS_CC_CE_AXI_CLK 1
+#define NSS_CC_CE_CLK_SRC 2
+#define NSS_CC_CFG_CLK_SRC 3
+#define NSS_CC_CLC_AXI_CLK 4
+#define NSS_CC_CLC_CLK_SRC 5
+#define NSS_CC_CRYPTO_CLK 6
+#define NSS_CC_CRYPTO_CLK_SRC 7
+#define NSS_CC_CRYPTO_PPE_CLK 8
+#define NSS_CC_HAQ_AHB_CLK 9
+#define NSS_CC_HAQ_AXI_CLK 10
+#define NSS_CC_HAQ_CLK_SRC 11
+#define NSS_CC_IMEM_AHB_CLK 12
+#define NSS_CC_IMEM_CLK_SRC 13
+#define NSS_CC_IMEM_QSB_CLK 14
+#define NSS_CC_INT_CFG_CLK_SRC 15
+#define NSS_CC_NSS_CSR_CLK 16
+#define NSS_CC_NSSNOC_CE_APB_CLK 17
+#define NSS_CC_NSSNOC_CE_AXI_CLK 18
+#define NSS_CC_NSSNOC_CLC_AXI_CLK 19
+#define NSS_CC_NSSNOC_CRYPTO_CLK 20
+#define NSS_CC_NSSNOC_HAQ_AHB_CLK 21
+#define NSS_CC_NSSNOC_HAQ_AXI_CLK 22
+#define NSS_CC_NSSNOC_IMEM_AHB_CLK 23
+#define NSS_CC_NSSNOC_IMEM_QSB_CLK 24
+#define NSS_CC_NSSNOC_NSS_CSR_CLK 25
+#define NSS_CC_NSSNOC_PPE_CFG_CLK 26
+#define NSS_CC_NSSNOC_PPE_CLK 27
+#define NSS_CC_NSSNOC_UBI32_AHB0_CLK 28
+#define NSS_CC_NSSNOC_UBI32_AXI0_CLK 29
+#define NSS_CC_NSSNOC_UBI32_INT0_AHB_CLK 30
+#define NSS_CC_NSSNOC_UBI32_NC_AXI0_1_CLK 31
+#define NSS_CC_NSSNOC_UBI32_NC_AXI0_CLK 32
+#define NSS_CC_PORT1_MAC_CLK 33
+#define NSS_CC_PORT1_RX_CLK 34
+#define NSS_CC_PORT1_RX_CLK_SRC 35
+#define NSS_CC_PORT1_RX_DIV_CLK_SRC 36
+#define NSS_CC_PORT1_TX_CLK 37
+#define NSS_CC_PORT1_TX_CLK_SRC 38
+#define NSS_CC_PORT1_TX_DIV_CLK_SRC 39
+#define NSS_CC_PORT2_MAC_CLK 40
+#define NSS_CC_PORT2_RX_CLK 41
+#define NSS_CC_PORT2_RX_CLK_SRC 42
+#define NSS_CC_PORT2_RX_DIV_CLK_SRC 43
+#define NSS_CC_PORT2_TX_CLK 44
+#define NSS_CC_PORT2_TX_CLK_SRC 45
+#define NSS_CC_PORT2_TX_DIV_CLK_SRC 46
+#define NSS_CC_PORT3_MAC_CLK 47
+#define NSS_CC_PORT3_RX_CLK 48
+#define NSS_CC_PORT3_RX_CLK_SRC 49
+#define NSS_CC_PORT3_RX_DIV_CLK_SRC 50
+#define NSS_CC_PORT3_TX_CLK 51
+#define NSS_CC_PORT3_TX_CLK_SRC 52
+#define NSS_CC_PORT3_TX_DIV_CLK_SRC 53
+#define NSS_CC_PORT4_MAC_CLK 54
+#define NSS_CC_PORT4_RX_CLK 55
+#define NSS_CC_PORT4_RX_CLK_SRC 56
+#define NSS_CC_PORT4_RX_DIV_CLK_SRC 57
+#define NSS_CC_PORT4_TX_CLK 58
+#define NSS_CC_PORT4_TX_CLK_SRC 59
+#define NSS_CC_PORT4_TX_DIV_CLK_SRC 60
+#define NSS_CC_PORT5_MAC_CLK 61
+#define NSS_CC_PORT5_RX_CLK 62
+#define NSS_CC_PORT5_RX_CLK_SRC 63
+#define NSS_CC_PORT5_RX_DIV_CLK_SRC 64
+#define NSS_CC_PORT5_TX_CLK 65
+#define NSS_CC_PORT5_TX_CLK_SRC 66
+#define NSS_CC_PORT5_TX_DIV_CLK_SRC 67
+#define NSS_CC_PORT6_MAC_CLK 68
+#define NSS_CC_PORT6_RX_CLK 69
+#define NSS_CC_PORT6_RX_CLK_SRC 70
+#define NSS_CC_PORT6_RX_DIV_CLK_SRC 71
+#define NSS_CC_PORT6_TX_CLK 72
+#define NSS_CC_PORT6_TX_CLK_SRC 73
+#define NSS_CC_PORT6_TX_DIV_CLK_SRC 74
+#define NSS_CC_PPE_CLK_SRC 75
+#define NSS_CC_PPE_EDMA_CFG_CLK 76
+#define NSS_CC_PPE_EDMA_CLK 77
+#define NSS_CC_PPE_SWITCH_BTQ_CLK 78
+#define NSS_CC_PPE_SWITCH_CFG_CLK 79
+#define NSS_CC_PPE_SWITCH_CLK 80
+#define NSS_CC_PPE_SWITCH_IPE_CLK 81
+#define NSS_CC_UBI0_CLK_SRC 82
+#define NSS_CC_UBI0_DIV_CLK_SRC 83
+#define NSS_CC_UBI1_CLK_SRC 84
+#define NSS_CC_UBI1_DIV_CLK_SRC 85
+#define NSS_CC_UBI2_CLK_SRC 86
+#define NSS_CC_UBI2_DIV_CLK_SRC 87
+#define NSS_CC_UBI32_AHB0_CLK 88
+#define NSS_CC_UBI32_AHB1_CLK 89
+#define NSS_CC_UBI32_AHB2_CLK 90
+#define NSS_CC_UBI32_AHB3_CLK 91
+#define NSS_CC_UBI32_AXI0_CLK 92
+#define NSS_CC_UBI32_AXI1_CLK 93
+#define NSS_CC_UBI32_AXI2_CLK 94
+#define NSS_CC_UBI32_AXI3_CLK 95
+#define NSS_CC_UBI32_CORE0_CLK 96
+#define NSS_CC_UBI32_CORE1_CLK 97
+#define NSS_CC_UBI32_CORE2_CLK 98
+#define NSS_CC_UBI32_CORE3_CLK 99
+#define NSS_CC_UBI32_INTR0_AHB_CLK 100
+#define NSS_CC_UBI32_INTR1_AHB_CLK 101
+#define NSS_CC_UBI32_INTR2_AHB_CLK 102
+#define NSS_CC_UBI32_INTR3_AHB_CLK 103
+#define NSS_CC_UBI32_NC_AXI0_CLK 104
+#define NSS_CC_UBI32_NC_AXI1_CLK 105
+#define NSS_CC_UBI32_NC_AXI2_CLK 106
+#define NSS_CC_UBI32_NC_AXI3_CLK 107
+#define NSS_CC_UBI32_UTCM0_CLK 108
+#define NSS_CC_UBI32_UTCM1_CLK 109
+#define NSS_CC_UBI32_UTCM2_CLK 110
+#define NSS_CC_UBI32_UTCM3_CLK 111
+#define NSS_CC_UBI3_CLK_SRC 112
+#define NSS_CC_UBI3_DIV_CLK_SRC 113
+#define NSS_CC_UBI_AXI_CLK_SRC 114
+#define NSS_CC_UBI_NC_AXI_BFDCD_CLK_SRC 115
+#define NSS_CC_UNIPHY_PORT1_RX_CLK 116
+#define NSS_CC_UNIPHY_PORT1_TX_CLK 117
+#define NSS_CC_UNIPHY_PORT2_RX_CLK 118
+#define NSS_CC_UNIPHY_PORT2_TX_CLK 119
+#define NSS_CC_UNIPHY_PORT3_RX_CLK 120
+#define NSS_CC_UNIPHY_PORT3_TX_CLK 121
+#define NSS_CC_UNIPHY_PORT4_RX_CLK 122
+#define NSS_CC_UNIPHY_PORT4_TX_CLK 123
+#define NSS_CC_UNIPHY_PORT5_RX_CLK 124
+#define NSS_CC_UNIPHY_PORT5_TX_CLK 125
+#define NSS_CC_UNIPHY_PORT6_RX_CLK 126
+#define NSS_CC_UNIPHY_PORT6_TX_CLK 127
+#define NSS_CC_XGMAC0_PTP_REF_CLK 128
+#define NSS_CC_XGMAC0_PTP_REF_DIV_CLK_SRC 129
+#define NSS_CC_XGMAC1_PTP_REF_CLK 130
+#define NSS_CC_XGMAC1_PTP_REF_DIV_CLK_SRC 131
+#define NSS_CC_XGMAC2_PTP_REF_CLK 132
+#define NSS_CC_XGMAC2_PTP_REF_DIV_CLK_SRC 133
+#define NSS_CC_XGMAC3_PTP_REF_CLK 134
+#define NSS_CC_XGMAC3_PTP_REF_DIV_CLK_SRC 135
+#define NSS_CC_XGMAC4_PTP_REF_CLK 136
+#define NSS_CC_XGMAC4_PTP_REF_DIV_CLK_SRC 137
+#define NSS_CC_XGMAC5_PTP_REF_CLK 138
+#define NSS_CC_XGMAC5_PTP_REF_DIV_CLK_SRC 139
+#define UBI32_PLL 140
+#define UBI32_PLL_MAIN 141
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,kaanapali-gcc.h b/include/dt-bindings/clock/qcom,kaanapali-gcc.h
new file mode 100644
index 000000000000..890e48709f09
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,kaanapali-gcc.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_KAANAPALI_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_KAANAPALI_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_AXI_CLK 0
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 1
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 2
+#define GCC_BOOT_ROM_AHB_CLK 3
+#define GCC_CAM_BIST_MCLK_AHB_CLK 4
+#define GCC_CAMERA_AHB_CLK 5
+#define GCC_CAMERA_HF_AXI_CLK 6
+#define GCC_CAMERA_SF_AXI_CLK 7
+#define GCC_CAMERA_XO_CLK 8
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 9
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 10
+#define GCC_CNOC_PCIE_SF_AXI_CLK 11
+#define GCC_DDRSS_PCIE_SF_QTB_CLK 12
+#define GCC_QMIP_CAMERA_CMD_AHB_CLK 13
+#define GCC_DISP_HF_AXI_CLK 14
+#define GCC_DISP_SF_AXI_CLK 15
+#define GCC_EVA_AHB_CLK 16
+#define GCC_EVA_AXI0_CLK 17
+#define GCC_EVA_AXI0C_CLK 18
+#define GCC_EVA_XO_CLK 19
+#define GCC_GP1_CLK 20
+#define GCC_GP1_CLK_SRC 21
+#define GCC_GP2_CLK 22
+#define GCC_GP2_CLK_SRC 23
+#define GCC_GP3_CLK 24
+#define GCC_GP3_CLK_SRC 25
+#define GCC_GPLL0 26
+#define GCC_GPLL0_OUT_EVEN 27
+#define GCC_GPLL1 28
+#define GCC_GPLL4 29
+#define GCC_GPLL7 30
+#define GCC_GPLL9 31
+#define GCC_GPU_CFG_AHB_CLK 32
+#define GCC_GPU_GEMNOC_GFX_CLK 33
+#define GCC_GPU_GPLL0_CLK_SRC 34
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 35
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 36
+#define GCC_QMIP_GPU_AHB_CLK 37
+#define GCC_PCIE_0_AUX_CLK 38
+#define GCC_PCIE_0_AUX_CLK_SRC 39
+#define GCC_PCIE_0_CFG_AHB_CLK 40
+#define GCC_PCIE_0_MSTR_AXI_CLK 41
+#define GCC_PCIE_0_PHY_AUX_CLK 42
+#define GCC_PCIE_0_PHY_AUX_CLK_SRC 43
+#define GCC_PCIE_0_PHY_RCHNG_CLK 44
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 45
+#define GCC_PCIE_0_PIPE_CLK 46
+#define GCC_PCIE_0_PIPE_CLK_SRC 47
+#define GCC_PCIE_0_SLV_AXI_CLK 48
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 49
+#define GCC_PCIE_RSCC_CFG_AHB_CLK 50
+#define GCC_PCIE_RSCC_XO_CLK 51
+#define GCC_PDM2_CLK 52
+#define GCC_PDM2_CLK_SRC 53
+#define GCC_PDM_AHB_CLK 54
+#define GCC_PDM_XO4_CLK 55
+#define GCC_QUPV3_I2C_CORE_CLK 56
+#define GCC_QUPV3_I2C_S0_CLK 57
+#define GCC_QUPV3_I2C_S0_CLK_SRC 58
+#define GCC_QUPV3_I2C_S1_CLK 59
+#define GCC_QUPV3_I2C_S1_CLK_SRC 60
+#define GCC_QUPV3_I2C_S2_CLK 61
+#define GCC_QUPV3_I2C_S2_CLK_SRC 62
+#define GCC_QUPV3_I2C_S3_CLK 63
+#define GCC_QUPV3_I2C_S3_CLK_SRC 64
+#define GCC_QUPV3_I2C_S4_CLK 65
+#define GCC_QUPV3_I2C_S4_CLK_SRC 66
+#define GCC_QUPV3_I2C_S_AHB_CLK 67
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 68
+#define GCC_QUPV3_WRAP1_CORE_CLK 69
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK 70
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC 71
+#define GCC_QUPV3_WRAP1_S0_CLK 72
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 73
+#define GCC_QUPV3_WRAP1_S1_CLK 74
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 75
+#define GCC_QUPV3_WRAP1_S2_CLK 76
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 77
+#define GCC_QUPV3_WRAP1_S3_CLK 78
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 79
+#define GCC_QUPV3_WRAP1_S4_CLK 80
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 81
+#define GCC_QUPV3_WRAP1_S5_CLK 82
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 83
+#define GCC_QUPV3_WRAP1_S6_CLK 84
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S7_CLK 86
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 87
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 88
+#define GCC_QUPV3_WRAP2_CORE_CLK 89
+#define GCC_QUPV3_WRAP2_S0_CLK 90
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 91
+#define GCC_QUPV3_WRAP2_S1_CLK 92
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 93
+#define GCC_QUPV3_WRAP2_S2_CLK 94
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 95
+#define GCC_QUPV3_WRAP2_S3_CLK 96
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 97
+#define GCC_QUPV3_WRAP2_S4_CLK 98
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 99
+#define GCC_QUPV3_WRAP3_CORE_2X_CLK 100
+#define GCC_QUPV3_WRAP3_CORE_CLK 101
+#define GCC_QUPV3_WRAP3_IBI_CTRL_0_CLK_SRC 102
+#define GCC_QUPV3_WRAP3_IBI_CTRL_1_CLK 103
+#define GCC_QUPV3_WRAP3_IBI_CTRL_2_CLK 104
+#define GCC_QUPV3_WRAP3_S0_CLK 105
+#define GCC_QUPV3_WRAP3_S0_CLK_SRC 106
+#define GCC_QUPV3_WRAP3_S1_CLK 107
+#define GCC_QUPV3_WRAP3_S1_CLK_SRC 108
+#define GCC_QUPV3_WRAP3_S2_CLK 109
+#define GCC_QUPV3_WRAP3_S2_CLK_SRC 110
+#define GCC_QUPV3_WRAP3_S3_CLK 111
+#define GCC_QUPV3_WRAP3_S3_CLK_SRC 112
+#define GCC_QUPV3_WRAP3_S4_CLK 113
+#define GCC_QUPV3_WRAP3_S4_CLK_SRC 114
+#define GCC_QUPV3_WRAP3_S5_CLK 115
+#define GCC_QUPV3_WRAP3_S5_CLK_SRC 116
+#define GCC_QUPV3_WRAP4_CORE_2X_CLK 117
+#define GCC_QUPV3_WRAP4_CORE_CLK 118
+#define GCC_QUPV3_WRAP4_S0_CLK 119
+#define GCC_QUPV3_WRAP4_S0_CLK_SRC 120
+#define GCC_QUPV3_WRAP4_S1_CLK 121
+#define GCC_QUPV3_WRAP4_S1_CLK_SRC 122
+#define GCC_QUPV3_WRAP4_S2_CLK 123
+#define GCC_QUPV3_WRAP4_S2_CLK_SRC 124
+#define GCC_QUPV3_WRAP4_S3_CLK 125
+#define GCC_QUPV3_WRAP4_S3_CLK_SRC 126
+#define GCC_QUPV3_WRAP4_S4_CLK 127
+#define GCC_QUPV3_WRAP4_S4_CLK_SRC 128
+#define GCC_QUPV3_WRAP_1_M_AXI_CLK 129
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 130
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 131
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 132
+#define GCC_QUPV3_WRAP_3_IBI_1_AHB_CLK 133
+#define GCC_QUPV3_WRAP_3_IBI_2_AHB_CLK 134
+#define GCC_QUPV3_WRAP_3_M_AHB_CLK 135
+#define GCC_QUPV3_WRAP_3_S_AHB_CLK 136
+#define GCC_QUPV3_WRAP_4_M_AHB_CLK 137
+#define GCC_QUPV3_WRAP_4_S_AHB_CLK 138
+#define GCC_SDCC2_AHB_CLK 139
+#define GCC_SDCC2_APPS_CLK 140
+#define GCC_SDCC2_APPS_CLK_SRC 141
+#define GCC_SDCC4_AHB_CLK 142
+#define GCC_SDCC4_APPS_CLK 143
+#define GCC_SDCC4_APPS_CLK_SRC 144
+#define GCC_UFS_PHY_AHB_CLK 145
+#define GCC_UFS_PHY_AXI_CLK 146
+#define GCC_UFS_PHY_AXI_CLK_SRC 147
+#define GCC_UFS_PHY_ICE_CORE_CLK 148
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 149
+#define GCC_UFS_PHY_PHY_AUX_CLK 150
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 151
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 152
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 153
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 154
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 155
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 156
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 157
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 158
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 159
+#define GCC_USB30_PRIM_MASTER_CLK 160
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 161
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 162
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 163
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 164
+#define GCC_USB30_PRIM_SLEEP_CLK 165
+#define GCC_USB3_PRIM_PHY_AUX_CLK 166
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 167
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 168
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 169
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 170
+#define GCC_VIDEO_AHB_CLK 171
+#define GCC_VIDEO_AXI0_CLK 172
+#define GCC_VIDEO_AXI1_CLK 173
+#define GCC_VIDEO_XO_CLK 174
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 175
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 176
+#define GCC_QMIP_DISP_DCP_SF_AHB_CLK 177
+#define GCC_QMIP_PCIE_AHB_CLK 178
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 179
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 180
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 181
+#define GCC_DISP_AHB_CLK 182
+
+/* GCC power domains */
+#define GCC_PCIE_0_GDSC 0
+#define GCC_PCIE_0_PHY_GDSC 1
+#define GCC_UFS_MEM_PHY_GDSC 2
+#define GCC_UFS_PHY_GDSC 3
+#define GCC_USB30_PRIM_GDSC 4
+#define GCC_USB3_PHY_GDSC 5
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_EVA_AXI0_CLK_ARES 2
+#define GCC_EVA_AXI0C_CLK_ARES 3
+#define GCC_EVA_BCR 4
+#define GCC_GPU_BCR 5
+#define GCC_PCIE_0_BCR 6
+#define GCC_PCIE_0_LINK_DOWN_BCR 7
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_0_PHY_BCR 9
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_PHY_BCR 11
+#define GCC_PCIE_PHY_CFG_AHB_BCR 12
+#define GCC_PCIE_PHY_COM_BCR 13
+#define GCC_PCIE_RSCC_BCR 14
+#define GCC_PDM_BCR 15
+#define GCC_QUPV3_WRAPPER_1_BCR 16
+#define GCC_QUPV3_WRAPPER_2_BCR 17
+#define GCC_QUPV3_WRAPPER_3_BCR 18
+#define GCC_QUPV3_WRAPPER_4_BCR 19
+#define GCC_QUPV3_WRAPPER_I2C_BCR 20
+#define GCC_QUSB2PHY_PRIM_BCR 21
+#define GCC_QUSB2PHY_SEC_BCR 22
+#define GCC_SDCC2_BCR 23
+#define GCC_SDCC4_BCR 24
+#define GCC_UFS_PHY_BCR 25
+#define GCC_USB30_PRIM_BCR 26
+#define GCC_USB3_DP_PHY_PRIM_BCR 27
+#define GCC_USB3_DP_PHY_SEC_BCR 28
+#define GCC_USB3_PHY_PRIM_BCR 29
+#define GCC_USB3_PHY_SEC_BCR 30
+#define GCC_USB3PHY_PHY_PRIM_BCR 31
+#define GCC_USB3PHY_PHY_SEC_BCR 32
+#define GCC_VIDEO_AXI0_CLK_ARES 33
+#define GCC_VIDEO_AXI1_CLK_ARES 34
+#define GCC_VIDEO_BCR 35
+#define GCC_VIDEO_XO_CLK_ARES 36
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-camcc.h b/include/dt-bindings/clock/qcom,milos-camcc.h
new file mode 100644
index 000000000000..21925dca9a20
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-camcc.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_MILOS_H
+
+/* CAM_CC clocks */
+#define CAM_CC_PLL0 0
+#define CAM_CC_PLL0_OUT_EVEN 1
+#define CAM_CC_PLL0_OUT_ODD 2
+#define CAM_CC_PLL1 3
+#define CAM_CC_PLL1_OUT_EVEN 4
+#define CAM_CC_PLL2 5
+#define CAM_CC_PLL2_OUT_EVEN 6
+#define CAM_CC_PLL3 7
+#define CAM_CC_PLL3_OUT_EVEN 8
+#define CAM_CC_PLL4 9
+#define CAM_CC_PLL4_OUT_EVEN 10
+#define CAM_CC_PLL5 11
+#define CAM_CC_PLL5_OUT_EVEN 12
+#define CAM_CC_PLL6 13
+#define CAM_CC_PLL6_OUT_EVEN 14
+#define CAM_CC_BPS_AHB_CLK 15
+#define CAM_CC_BPS_AREG_CLK 16
+#define CAM_CC_BPS_CLK 17
+#define CAM_CC_BPS_CLK_SRC 18
+#define CAM_CC_CAMNOC_ATB_CLK 19
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 20
+#define CAM_CC_CAMNOC_AXI_HF_CLK 21
+#define CAM_CC_CAMNOC_AXI_SF_CLK 22
+#define CAM_CC_CAMNOC_NRT_AXI_CLK 23
+#define CAM_CC_CAMNOC_RT_AXI_CLK 24
+#define CAM_CC_CCI_0_CLK 25
+#define CAM_CC_CCI_0_CLK_SRC 26
+#define CAM_CC_CCI_1_CLK 27
+#define CAM_CC_CCI_1_CLK_SRC 28
+#define CAM_CC_CORE_AHB_CLK 29
+#define CAM_CC_CPAS_AHB_CLK 30
+#define CAM_CC_CPHY_RX_CLK_SRC 31
+#define CAM_CC_CRE_AHB_CLK 32
+#define CAM_CC_CRE_CLK 33
+#define CAM_CC_CRE_CLK_SRC 34
+#define CAM_CC_CSI0PHYTIMER_CLK 35
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 36
+#define CAM_CC_CSI1PHYTIMER_CLK 37
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 38
+#define CAM_CC_CSI2PHYTIMER_CLK 39
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 40
+#define CAM_CC_CSI3PHYTIMER_CLK 41
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 42
+#define CAM_CC_CSIPHY0_CLK 43
+#define CAM_CC_CSIPHY1_CLK 44
+#define CAM_CC_CSIPHY2_CLK 45
+#define CAM_CC_CSIPHY3_CLK 46
+#define CAM_CC_FAST_AHB_CLK_SRC 47
+#define CAM_CC_GDSC_CLK 48
+#define CAM_CC_ICP_ATB_CLK 49
+#define CAM_CC_ICP_CLK 50
+#define CAM_CC_ICP_CLK_SRC 51
+#define CAM_CC_ICP_CTI_CLK 52
+#define CAM_CC_ICP_TS_CLK 53
+#define CAM_CC_MCLK0_CLK 54
+#define CAM_CC_MCLK0_CLK_SRC 55
+#define CAM_CC_MCLK1_CLK 56
+#define CAM_CC_MCLK1_CLK_SRC 57
+#define CAM_CC_MCLK2_CLK 58
+#define CAM_CC_MCLK2_CLK_SRC 59
+#define CAM_CC_MCLK3_CLK 60
+#define CAM_CC_MCLK3_CLK_SRC 61
+#define CAM_CC_MCLK4_CLK 62
+#define CAM_CC_MCLK4_CLK_SRC 63
+#define CAM_CC_OPE_0_AHB_CLK 64
+#define CAM_CC_OPE_0_AREG_CLK 65
+#define CAM_CC_OPE_0_CLK 66
+#define CAM_CC_OPE_0_CLK_SRC 67
+#define CAM_CC_SLEEP_CLK 68
+#define CAM_CC_SLEEP_CLK_SRC 69
+#define CAM_CC_SLOW_AHB_CLK_SRC 70
+#define CAM_CC_SOC_AHB_CLK 71
+#define CAM_CC_SYS_TMR_CLK 72
+#define CAM_CC_TFE_0_AHB_CLK 73
+#define CAM_CC_TFE_0_CLK 74
+#define CAM_CC_TFE_0_CLK_SRC 75
+#define CAM_CC_TFE_0_CPHY_RX_CLK 76
+#define CAM_CC_TFE_0_CSID_CLK 77
+#define CAM_CC_TFE_0_CSID_CLK_SRC 78
+#define CAM_CC_TFE_1_AHB_CLK 79
+#define CAM_CC_TFE_1_CLK 80
+#define CAM_CC_TFE_1_CLK_SRC 81
+#define CAM_CC_TFE_1_CPHY_RX_CLK 82
+#define CAM_CC_TFE_1_CSID_CLK 83
+#define CAM_CC_TFE_1_CSID_CLK_SRC 84
+#define CAM_CC_TFE_2_AHB_CLK 85
+#define CAM_CC_TFE_2_CLK 86
+#define CAM_CC_TFE_2_CLK_SRC 87
+#define CAM_CC_TFE_2_CPHY_RX_CLK 88
+#define CAM_CC_TFE_2_CSID_CLK 89
+#define CAM_CC_TFE_2_CSID_CLK_SRC 90
+#define CAM_CC_TOP_SHIFT_CLK 91
+#define CAM_CC_XO_CLK_SRC 92
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CAMSS_TOP_BCR 2
+#define CAM_CC_CCI_0_BCR 3
+#define CAM_CC_CCI_1_BCR 4
+#define CAM_CC_CPAS_BCR 5
+#define CAM_CC_CRE_BCR 6
+#define CAM_CC_CSI0PHY_BCR 7
+#define CAM_CC_CSI1PHY_BCR 8
+#define CAM_CC_CSI2PHY_BCR 9
+#define CAM_CC_CSI3PHY_BCR 10
+#define CAM_CC_ICP_BCR 11
+#define CAM_CC_MCLK0_BCR 12
+#define CAM_CC_MCLK1_BCR 13
+#define CAM_CC_MCLK2_BCR 14
+#define CAM_CC_MCLK3_BCR 15
+#define CAM_CC_MCLK4_BCR 16
+#define CAM_CC_OPE_0_BCR 17
+#define CAM_CC_TFE_0_BCR 18
+#define CAM_CC_TFE_1_BCR 19
+#define CAM_CC_TFE_2_BCR 20
+
+/* CAM_CC power domains */
+#define CAM_CC_CAMSS_TOP_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-dispcc.h b/include/dt-bindings/clock/qcom,milos-dispcc.h
new file mode 100644
index 000000000000..c70f23f32f0a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-dispcc.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_MILOS_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_ACCU_CLK 1
+#define DISP_CC_MDSS_AHB1_CLK 2
+#define DISP_CC_MDSS_AHB_CLK 3
+#define DISP_CC_MDSS_AHB_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_CLK 5
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 7
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 8
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 9
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 10
+#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 11
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 12
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 13
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 14
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 15
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 16
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 17
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 18
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 19
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 20
+#define DISP_CC_MDSS_ESC0_CLK 21
+#define DISP_CC_MDSS_ESC0_CLK_SRC 22
+#define DISP_CC_MDSS_MDP1_CLK 23
+#define DISP_CC_MDSS_MDP_CLK 24
+#define DISP_CC_MDSS_MDP_CLK_SRC 25
+#define DISP_CC_MDSS_MDP_LUT1_CLK 26
+#define DISP_CC_MDSS_MDP_LUT_CLK 27
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 28
+#define DISP_CC_MDSS_PCLK0_CLK 29
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 30
+#define DISP_CC_MDSS_RSCC_AHB_CLK 31
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 32
+#define DISP_CC_MDSS_VSYNC1_CLK 33
+#define DISP_CC_MDSS_VSYNC_CLK 34
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 35
+#define DISP_CC_SLEEP_CLK 36
+#define DISP_CC_SLEEP_CLK_SRC 37
+#define DISP_CC_XO_CLK 38
+#define DISP_CC_XO_CLK_SRC 39
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+/* DISP_CC power domains */
+#define DISP_CC_MDSS_CORE_GDSC 0
+#define DISP_CC_MDSS_CORE_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-gcc.h b/include/dt-bindings/clock/qcom,milos-gcc.h
new file mode 100644
index 000000000000..a530ca39e1ef
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-gcc.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_MILOS_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL2 2
+#define GCC_GPLL4 3
+#define GCC_GPLL6 4
+#define GCC_GPLL7 5
+#define GCC_GPLL9 6
+#define GCC_AGGRE_NOC_PCIE_AXI_CLK 7
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 8
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 9
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 10
+#define GCC_BOOT_ROM_AHB_CLK 11
+#define GCC_CAMERA_AHB_CLK 12
+#define GCC_CAMERA_HF_AXI_CLK 13
+#define GCC_CAMERA_HF_XO_CLK 14
+#define GCC_CAMERA_SF_AXI_CLK 15
+#define GCC_CAMERA_SF_XO_CLK 16
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 17
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 18
+#define GCC_CNOC_PCIE_SF_AXI_CLK 19
+#define GCC_DDRSS_GPU_AXI_CLK 20
+#define GCC_DDRSS_PCIE_SF_QTB_CLK 21
+#define GCC_DISP_AHB_CLK 22
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 23
+#define GCC_DISP_HF_AXI_CLK 24
+#define GCC_DISP_XO_CLK 25
+#define GCC_GP1_CLK 26
+#define GCC_GP1_CLK_SRC 27
+#define GCC_GP2_CLK 28
+#define GCC_GP2_CLK_SRC 29
+#define GCC_GP3_CLK 30
+#define GCC_GP3_CLK_SRC 31
+#define GCC_GPU_CFG_AHB_CLK 32
+#define GCC_GPU_GPLL0_CLK_SRC 33
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 34
+#define GCC_GPU_MEMNOC_GFX_CLK 35
+#define GCC_GPU_SNOC_DVM_GFX_CLK 36
+#define GCC_PCIE_0_AUX_CLK 37
+#define GCC_PCIE_0_AUX_CLK_SRC 38
+#define GCC_PCIE_0_CFG_AHB_CLK 39
+#define GCC_PCIE_0_MSTR_AXI_CLK 40
+#define GCC_PCIE_0_PHY_RCHNG_CLK 41
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 42
+#define GCC_PCIE_0_PIPE_CLK 43
+#define GCC_PCIE_0_PIPE_CLK_SRC 44
+#define GCC_PCIE_0_PIPE_DIV2_CLK 45
+#define GCC_PCIE_0_PIPE_DIV2_CLK_SRC 46
+#define GCC_PCIE_0_SLV_AXI_CLK 47
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 48
+#define GCC_PCIE_1_AUX_CLK 49
+#define GCC_PCIE_1_AUX_CLK_SRC 50
+#define GCC_PCIE_1_CFG_AHB_CLK 51
+#define GCC_PCIE_1_MSTR_AXI_CLK 52
+#define GCC_PCIE_1_PHY_RCHNG_CLK 53
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 54
+#define GCC_PCIE_1_PIPE_CLK 55
+#define GCC_PCIE_1_PIPE_CLK_SRC 56
+#define GCC_PCIE_1_PIPE_DIV2_CLK 57
+#define GCC_PCIE_1_PIPE_DIV2_CLK_SRC 58
+#define GCC_PCIE_1_SLV_AXI_CLK 59
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 60
+#define GCC_PCIE_RSCC_CFG_AHB_CLK 61
+#define GCC_PCIE_RSCC_XO_CLK 62
+#define GCC_PDM2_CLK 63
+#define GCC_PDM2_CLK_SRC 64
+#define GCC_PDM_AHB_CLK 65
+#define GCC_PDM_XO4_CLK 66
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 67
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 68
+#define GCC_QMIP_DISP_AHB_CLK 69
+#define GCC_QMIP_GPU_AHB_CLK 70
+#define GCC_QMIP_PCIE_AHB_CLK 71
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 72
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 73
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 74
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 75
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 76
+#define GCC_QUPV3_WRAP0_CORE_CLK 77
+#define GCC_QUPV3_WRAP0_QSPI_REF_CLK 78
+#define GCC_QUPV3_WRAP0_QSPI_REF_CLK_SRC 79
+#define GCC_QUPV3_WRAP0_S0_CLK 80
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 81
+#define GCC_QUPV3_WRAP0_S1_CLK 82
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 83
+#define GCC_QUPV3_WRAP0_S2_CLK 84
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 85
+#define GCC_QUPV3_WRAP0_S3_CLK 86
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 87
+#define GCC_QUPV3_WRAP0_S4_CLK 88
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 89
+#define GCC_QUPV3_WRAP0_S5_CLK 90
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 91
+#define GCC_QUPV3_WRAP0_S6_CLK 92
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 94
+#define GCC_QUPV3_WRAP1_CORE_CLK 95
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK 96
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S0_CLK 98
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 99
+#define GCC_QUPV3_WRAP1_S1_CLK 100
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 101
+#define GCC_QUPV3_WRAP1_S2_CLK 102
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 103
+#define GCC_QUPV3_WRAP1_S3_CLK 104
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 105
+#define GCC_QUPV3_WRAP1_S4_CLK 106
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 107
+#define GCC_QUPV3_WRAP1_S5_CLK 108
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 109
+#define GCC_QUPV3_WRAP1_S6_CLK 110
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 111
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 112
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 113
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 114
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 115
+#define GCC_SDCC1_AHB_CLK 116
+#define GCC_SDCC1_APPS_CLK 117
+#define GCC_SDCC1_APPS_CLK_SRC 118
+#define GCC_SDCC1_ICE_CORE_CLK 119
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 120
+#define GCC_SDCC2_AHB_CLK 121
+#define GCC_SDCC2_APPS_CLK 122
+#define GCC_SDCC2_APPS_CLK_SRC 123
+#define GCC_UFS_PHY_AHB_CLK 124
+#define GCC_UFS_PHY_AXI_CLK 125
+#define GCC_UFS_PHY_AXI_CLK_SRC 126
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 127
+#define GCC_UFS_PHY_ICE_CORE_CLK 128
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 129
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 130
+#define GCC_UFS_PHY_PHY_AUX_CLK 131
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 132
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 133
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 134
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 135
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 136
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 137
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 138
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 139
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 140
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 141
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 142
+#define GCC_USB30_PRIM_ATB_CLK 143
+#define GCC_USB30_PRIM_MASTER_CLK 144
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 145
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 146
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 147
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 148
+#define GCC_USB30_PRIM_SLEEP_CLK 149
+#define GCC_USB3_PRIM_PHY_AUX_CLK 150
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 151
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 152
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 153
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 154
+#define GCC_VIDEO_AHB_CLK 155
+#define GCC_VIDEO_AXI0_CLK 156
+#define GCC_VIDEO_XO_CLK 157
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_1_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_RSCC_BCR 13
+#define GCC_PDM_BCR 14
+#define GCC_QUPV3_WRAPPER_0_BCR 15
+#define GCC_QUPV3_WRAPPER_1_BCR 16
+#define GCC_QUSB2PHY_PRIM_BCR 17
+#define GCC_QUSB2PHY_SEC_BCR 18
+#define GCC_SDCC1_BCR 19
+#define GCC_SDCC2_BCR 20
+#define GCC_UFS_PHY_BCR 21
+#define GCC_USB30_PRIM_BCR 22
+#define GCC_USB3_DP_PHY_PRIM_BCR 23
+#define GCC_USB3_PHY_PRIM_BCR 24
+#define GCC_USB3PHY_PHY_PRIM_BCR 25
+#define GCC_VIDEO_AXI0_CLK_ARES 26
+#define GCC_VIDEO_BCR 27
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_0_PHY_GDSC 1
+#define PCIE_1_GDSC 2
+#define PCIE_1_PHY_GDSC 3
+#define UFS_PHY_GDSC 4
+#define UFS_MEM_PHY_GDSC 5
+#define USB30_PRIM_GDSC 6
+#define USB3_PHY_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-gpucc.h b/include/dt-bindings/clock/qcom,milos-gpucc.h
new file mode 100644
index 000000000000..6ff1925d409f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-gpucc.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_MILOS_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL0_OUT_EVEN 1
+#define GPU_CC_AHB_CLK 2
+#define GPU_CC_CB_CLK 3
+#define GPU_CC_CX_ACCU_SHIFT_CLK 4
+#define GPU_CC_CX_FF_CLK 5
+#define GPU_CC_CX_GMU_CLK 6
+#define GPU_CC_CXO_AON_CLK 7
+#define GPU_CC_CXO_CLK 8
+#define GPU_CC_DEMET_CLK 9
+#define GPU_CC_DEMET_DIV_CLK_SRC 10
+#define GPU_CC_DPM_CLK 11
+#define GPU_CC_FF_CLK_SRC 12
+#define GPU_CC_FREQ_MEASURE_CLK 13
+#define GPU_CC_GMU_CLK_SRC 14
+#define GPU_CC_GX_ACCU_SHIFT_CLK 15
+#define GPU_CC_GX_ACD_AHB_FF_CLK 16
+#define GPU_CC_GX_AHB_FF_CLK 17
+#define GPU_CC_GX_GMU_CLK 18
+#define GPU_CC_GX_RCG_AHB_FF_CLK 19
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 20
+#define GPU_CC_HUB_AON_CLK 21
+#define GPU_CC_HUB_CLK_SRC 22
+#define GPU_CC_HUB_CX_INT_CLK 23
+#define GPU_CC_HUB_DIV_CLK_SRC 24
+#define GPU_CC_MEMNOC_GFX_CLK 25
+#define GPU_CC_RSCC_HUB_AON_CLK 26
+#define GPU_CC_RSCC_XO_AON_CLK 27
+#define GPU_CC_SLEEP_CLK 28
+#define GPU_CC_XO_CLK_SRC 29
+#define GPU_CC_XO_DIV_CLK_SRC 30
+
+/* GPU_CC resets */
+#define GPU_CC_CB_BCR 0
+#define GPU_CC_CX_BCR 1
+#define GPU_CC_FAST_HUB_BCR 2
+#define GPU_CC_FF_BCR 3
+#define GPU_CC_GMU_BCR 4
+#define GPU_CC_GX_BCR 5
+#define GPU_CC_RBCPR_BCR 6
+#define GPU_CC_XO_BCR 7
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-videocc.h b/include/dt-bindings/clock/qcom,milos-videocc.h
new file mode 100644
index 000000000000..3544db81ffae
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-videocc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_MILOS_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_PLL0 0
+#define VIDEO_CC_AHB_CLK 1
+#define VIDEO_CC_AHB_CLK_SRC 2
+#define VIDEO_CC_MVS0_CLK 3
+#define VIDEO_CC_MVS0_CLK_SRC 4
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 5
+#define VIDEO_CC_MVS0_SHIFT_CLK 6
+#define VIDEO_CC_MVS0C_CLK 7
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 8
+#define VIDEO_CC_MVS0C_SHIFT_CLK 9
+#define VIDEO_CC_SLEEP_CLK 10
+#define VIDEO_CC_SLEEP_CLK_SRC 11
+#define VIDEO_CC_XO_CLK 12
+#define VIDEO_CC_XO_CLK_SRC 13
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_INTERFACE_BCR 0
+#define VIDEO_CC_MVS0_BCR 1
+#define VIDEO_CC_MVS0C_CLK_ARES 2
+#define VIDEO_CC_MVS0C_BCR 3
+
+/* VIDEO_CC power domains */
+#define VIDEO_CC_MVS0_GDSC 0
+#define VIDEO_CC_MVS0C_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8960.h b/include/dt-bindings/clock/qcom,mmcc-msm8960.h
index 81714fc859c5..717431d735c1 100644
--- a/include/dt-bindings/clock/qcom,mmcc-msm8960.h
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8960.h
@@ -133,5 +133,7 @@
#define VCAP_CLK 124
#define VCAP_NPL_CLK 125
#define PLL15 126
+#define DSI2_PIXEL_LVDS_SRC 127
+#define LVDS_CLK 128
#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-sdm660.h b/include/dt-bindings/clock/qcom,mmcc-sdm660.h
index f9dbc21cb5c7..ee2a89dae72d 100644
--- a/include/dt-bindings/clock/qcom,mmcc-sdm660.h
+++ b/include/dt-bindings/clock/qcom,mmcc-sdm660.h
@@ -157,6 +157,7 @@
#define BIMC_SMMU_GDSC 7
#define CAMSS_MICRO_BCR 0
+#define MDSS_BCR 1
#endif
diff --git a/include/dt-bindings/clock/qcom,qca8k-nsscc.h b/include/dt-bindings/clock/qcom,qca8k-nsscc.h
new file mode 100644
index 000000000000..0ac3e4c69a1a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qca8k-nsscc.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_QCA8K_NSS_CC_H
+#define _DT_BINDINGS_CLK_QCOM_QCA8K_NSS_CC_H
+
+#define NSS_CC_SWITCH_CORE_CLK_SRC 0
+#define NSS_CC_SWITCH_CORE_CLK 1
+#define NSS_CC_APB_BRIDGE_CLK 2
+#define NSS_CC_MAC0_TX_CLK_SRC 3
+#define NSS_CC_MAC0_TX_DIV_CLK_SRC 4
+#define NSS_CC_MAC0_TX_CLK 5
+#define NSS_CC_MAC0_TX_SRDS1_CLK 6
+#define NSS_CC_MAC0_RX_CLK_SRC 7
+#define NSS_CC_MAC0_RX_DIV_CLK_SRC 8
+#define NSS_CC_MAC0_RX_CLK 9
+#define NSS_CC_MAC0_RX_SRDS1_CLK 10
+#define NSS_CC_MAC1_TX_CLK_SRC 11
+#define NSS_CC_MAC1_TX_DIV_CLK_SRC 12
+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_DIV_CLK_SRC 13
+#define NSS_CC_MAC1_SRDS1_CH0_RX_CLK 14
+#define NSS_CC_MAC1_TX_CLK 15
+#define NSS_CC_MAC1_GEPHY0_TX_CLK 16
+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_CLK 17
+#define NSS_CC_MAC1_RX_CLK_SRC 18
+#define NSS_CC_MAC1_RX_DIV_CLK_SRC 19
+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_DIV_CLK_SRC 20
+#define NSS_CC_MAC1_SRDS1_CH0_TX_CLK 21
+#define NSS_CC_MAC1_RX_CLK 22
+#define NSS_CC_MAC1_GEPHY0_RX_CLK 23
+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_CLK 24
+#define NSS_CC_MAC2_TX_CLK_SRC 25
+#define NSS_CC_MAC2_TX_DIV_CLK_SRC 26
+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_DIV_CLK_SRC 27
+#define NSS_CC_MAC2_SRDS1_CH1_RX_CLK 28
+#define NSS_CC_MAC2_TX_CLK 29
+#define NSS_CC_MAC2_GEPHY1_TX_CLK 30
+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_CLK 31
+#define NSS_CC_MAC2_RX_CLK_SRC 32
+#define NSS_CC_MAC2_RX_DIV_CLK_SRC 33
+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_DIV_CLK_SRC 34
+#define NSS_CC_MAC2_SRDS1_CH1_TX_CLK 35
+#define NSS_CC_MAC2_RX_CLK 36
+#define NSS_CC_MAC2_GEPHY1_RX_CLK 37
+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_CLK 38
+#define NSS_CC_MAC3_TX_CLK_SRC 39
+#define NSS_CC_MAC3_TX_DIV_CLK_SRC 40
+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_DIV_CLK_SRC 41
+#define NSS_CC_MAC3_SRDS1_CH2_RX_CLK 42
+#define NSS_CC_MAC3_TX_CLK 43
+#define NSS_CC_MAC3_GEPHY2_TX_CLK 44
+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_CLK 45
+#define NSS_CC_MAC3_RX_CLK_SRC 46
+#define NSS_CC_MAC3_RX_DIV_CLK_SRC 47
+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_DIV_CLK_SRC 48
+#define NSS_CC_MAC3_SRDS1_CH2_TX_CLK 49
+#define NSS_CC_MAC3_RX_CLK 50
+#define NSS_CC_MAC3_GEPHY2_RX_CLK 51
+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_CLK 52
+#define NSS_CC_MAC4_TX_CLK_SRC 53
+#define NSS_CC_MAC4_TX_DIV_CLK_SRC 54
+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_DIV_CLK_SRC 55
+#define NSS_CC_MAC4_SRDS1_CH3_RX_CLK 56
+#define NSS_CC_MAC4_TX_CLK 57
+#define NSS_CC_MAC4_GEPHY3_TX_CLK 58
+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_CLK 59
+#define NSS_CC_MAC4_RX_CLK_SRC 60
+#define NSS_CC_MAC4_RX_DIV_CLK_SRC 61
+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_DIV_CLK_SRC 62
+#define NSS_CC_MAC4_SRDS1_CH3_TX_CLK 63
+#define NSS_CC_MAC4_RX_CLK 64
+#define NSS_CC_MAC4_GEPHY3_RX_CLK 65
+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_CLK 66
+#define NSS_CC_MAC5_TX_CLK_SRC 67
+#define NSS_CC_MAC5_TX_DIV_CLK_SRC 68
+#define NSS_CC_MAC5_TX_SRDS0_CLK 69
+#define NSS_CC_MAC5_TX_CLK 70
+#define NSS_CC_MAC5_RX_CLK_SRC 71
+#define NSS_CC_MAC5_RX_DIV_CLK_SRC 72
+#define NSS_CC_MAC5_RX_SRDS0_CLK 73
+#define NSS_CC_MAC5_RX_CLK 74
+#define NSS_CC_MAC5_TX_SRDS0_CLK_SRC 75
+#define NSS_CC_MAC5_RX_SRDS0_CLK_SRC 76
+#define NSS_CC_AHB_CLK_SRC 77
+#define NSS_CC_AHB_CLK 78
+#define NSS_CC_SEC_CTRL_AHB_CLK 79
+#define NSS_CC_TLMM_CLK 80
+#define NSS_CC_TLMM_AHB_CLK 81
+#define NSS_CC_CNOC_AHB_CLK 82
+#define NSS_CC_MDIO_AHB_CLK 83
+#define NSS_CC_MDIO_MASTER_AHB_CLK 84
+#define NSS_CC_SYS_CLK_SRC 85
+#define NSS_CC_SRDS0_SYS_CLK 86
+#define NSS_CC_SRDS1_SYS_CLK 87
+#define NSS_CC_GEPHY0_SYS_CLK 88
+#define NSS_CC_GEPHY1_SYS_CLK 89
+#define NSS_CC_GEPHY2_SYS_CLK 90
+#define NSS_CC_GEPHY3_SYS_CLK 91
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcm2290-gpucc.h b/include/dt-bindings/clock/qcom,qcm2290-gpucc.h
new file mode 100644
index 000000000000..7c76dd05278f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcm2290-gpucc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_QCM2290_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_QCM2290_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_GFX3D_CLK 2
+#define GPU_CC_CX_GMU_CLK 3
+#define GPU_CC_CX_SNOC_DVM_CLK 4
+#define GPU_CC_CXO_AON_CLK 5
+#define GPU_CC_CXO_CLK 6
+#define GPU_CC_GMU_CLK_SRC 7
+#define GPU_CC_GX_GFX3D_CLK 8
+#define GPU_CC_GX_GFX3D_CLK_SRC 9
+#define GPU_CC_PLL0 10
+#define GPU_CC_SLEEP_CLK 11
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 12
+
+/* Resets */
+#define GPU_GX_BCR 0
+
+/* GDSCs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-camcc.h b/include/dt-bindings/clock/qcom,qcs615-camcc.h
new file mode 100644
index 000000000000..aec57dddc067
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-camcc.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_QCS615_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_AREG_CLK 1
+#define CAM_CC_BPS_AXI_CLK 2
+#define CAM_CC_BPS_CLK 3
+#define CAM_CC_BPS_CLK_SRC 4
+#define CAM_CC_CAMNOC_ATB_CLK 5
+#define CAM_CC_CAMNOC_AXI_CLK 6
+#define CAM_CC_CCI_CLK 7
+#define CAM_CC_CCI_CLK_SRC 8
+#define CAM_CC_CORE_AHB_CLK 9
+#define CAM_CC_CPAS_AHB_CLK 10
+#define CAM_CC_CPHY_RX_CLK_SRC 11
+#define CAM_CC_CSI0PHYTIMER_CLK 12
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 13
+#define CAM_CC_CSI1PHYTIMER_CLK 14
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 15
+#define CAM_CC_CSI2PHYTIMER_CLK 16
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 17
+#define CAM_CC_CSIPHY0_CLK 18
+#define CAM_CC_CSIPHY1_CLK 19
+#define CAM_CC_CSIPHY2_CLK 20
+#define CAM_CC_FAST_AHB_CLK_SRC 21
+#define CAM_CC_ICP_ATB_CLK 22
+#define CAM_CC_ICP_CLK 23
+#define CAM_CC_ICP_CLK_SRC 24
+#define CAM_CC_ICP_CTI_CLK 25
+#define CAM_CC_ICP_TS_CLK 26
+#define CAM_CC_IFE_0_AXI_CLK 27
+#define CAM_CC_IFE_0_CLK 28
+#define CAM_CC_IFE_0_CLK_SRC 29
+#define CAM_CC_IFE_0_CPHY_RX_CLK 30
+#define CAM_CC_IFE_0_CSID_CLK 31
+#define CAM_CC_IFE_0_CSID_CLK_SRC 32
+#define CAM_CC_IFE_0_DSP_CLK 33
+#define CAM_CC_IFE_1_AXI_CLK 34
+#define CAM_CC_IFE_1_CLK 35
+#define CAM_CC_IFE_1_CLK_SRC 36
+#define CAM_CC_IFE_1_CPHY_RX_CLK 37
+#define CAM_CC_IFE_1_CSID_CLK 38
+#define CAM_CC_IFE_1_CSID_CLK_SRC 39
+#define CAM_CC_IFE_1_DSP_CLK 40
+#define CAM_CC_IFE_LITE_CLK 41
+#define CAM_CC_IFE_LITE_CLK_SRC 42
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 43
+#define CAM_CC_IFE_LITE_CSID_CLK 44
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 45
+#define CAM_CC_IPE_0_AHB_CLK 46
+#define CAM_CC_IPE_0_AREG_CLK 47
+#define CAM_CC_IPE_0_AXI_CLK 48
+#define CAM_CC_IPE_0_CLK 49
+#define CAM_CC_IPE_0_CLK_SRC 50
+#define CAM_CC_JPEG_CLK 51
+#define CAM_CC_JPEG_CLK_SRC 52
+#define CAM_CC_LRME_CLK 53
+#define CAM_CC_LRME_CLK_SRC 54
+#define CAM_CC_MCLK0_CLK 55
+#define CAM_CC_MCLK0_CLK_SRC 56
+#define CAM_CC_MCLK1_CLK 57
+#define CAM_CC_MCLK1_CLK_SRC 58
+#define CAM_CC_MCLK2_CLK 59
+#define CAM_CC_MCLK2_CLK_SRC 60
+#define CAM_CC_MCLK3_CLK 61
+#define CAM_CC_MCLK3_CLK_SRC 62
+#define CAM_CC_PLL0 63
+#define CAM_CC_PLL1 64
+#define CAM_CC_PLL2 65
+#define CAM_CC_PLL2_OUT_AUX2 66
+#define CAM_CC_PLL3 67
+#define CAM_CC_SLOW_AHB_CLK_SRC 68
+#define CAM_CC_SOC_AHB_CLK 69
+#define CAM_CC_SYS_TMR_CLK 70
+
+/* CAM_CC power domains */
+#define BPS_GDSC 0
+#define IFE_0_GDSC 1
+#define IFE_1_GDSC 2
+#define IPE_0_GDSC 3
+#define TITAN_TOP_GDSC 4
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CCI_BCR 2
+#define CAM_CC_CPAS_BCR 3
+#define CAM_CC_CSI0PHY_BCR 4
+#define CAM_CC_CSI1PHY_BCR 5
+#define CAM_CC_CSI2PHY_BCR 6
+#define CAM_CC_ICP_BCR 7
+#define CAM_CC_IFE_0_BCR 8
+#define CAM_CC_IFE_1_BCR 9
+#define CAM_CC_IFE_LITE_BCR 10
+#define CAM_CC_IPE_0_BCR 11
+#define CAM_CC_JPEG_BCR 12
+#define CAM_CC_LRME_BCR 13
+#define CAM_CC_MCLK0_BCR 14
+#define CAM_CC_MCLK1_BCR 15
+#define CAM_CC_MCLK2_BCR 16
+#define CAM_CC_MCLK3_BCR 17
+#define CAM_CC_TITAN_TOP_BCR 18
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-dispcc.h b/include/dt-bindings/clock/qcom,qcs615-dispcc.h
new file mode 100644
index 000000000000..9a29945c5762
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-dispcc.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_QCS615_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_AHB_CLK 0
+#define DISP_CC_MDSS_AHB_CLK_SRC 1
+#define DISP_CC_MDSS_BYTE0_CLK 2
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 5
+#define DISP_CC_MDSS_DP_AUX_CLK 6
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 7
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 8
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 9
+#define DISP_CC_MDSS_DP_LINK_CLK 10
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 11
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 12
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 13
+#define DISP_CC_MDSS_DP_PIXEL1_CLK 14
+#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC 15
+#define DISP_CC_MDSS_DP_PIXEL_CLK 16
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 17
+#define DISP_CC_MDSS_ESC0_CLK 18
+#define DISP_CC_MDSS_ESC0_CLK_SRC 19
+#define DISP_CC_MDSS_MDP_CLK 20
+#define DISP_CC_MDSS_MDP_CLK_SRC 21
+#define DISP_CC_MDSS_MDP_LUT_CLK 22
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 23
+#define DISP_CC_MDSS_PCLK0_CLK 24
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 25
+#define DISP_CC_MDSS_ROT_CLK 26
+#define DISP_CC_MDSS_ROT_CLK_SRC 27
+#define DISP_CC_MDSS_RSCC_AHB_CLK 28
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 29
+#define DISP_CC_MDSS_VSYNC_CLK 30
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 31
+#define DISP_CC_PLL0 32
+#define DISP_CC_XO_CLK 33
+
+/* DISP_CC power domains */
+#define MDSS_CORE_GDSC 0
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_RSCC_BCR 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-gcc.h b/include/dt-bindings/clock/qcom,qcs615-gcc.h
new file mode 100644
index 000000000000..9704091636b8
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-gcc.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_QCS615_H
+
+/* GCC clocks */
+#define GPLL0_OUT_AUX2_DIV 0
+#define GPLL3_OUT_AUX2_DIV 1
+#define GPLL0 2
+#define GPLL3 3
+#define GPLL4 4
+#define GPLL6 5
+#define GPLL6_OUT_MAIN 6
+#define GPLL7 7
+#define GPLL8 8
+#define GPLL8_OUT_MAIN 9
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 10
+#define GCC_AGGRE_USB2_SEC_AXI_CLK 11
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 12
+#define GCC_AHB2PHY_EAST_CLK 13
+#define GCC_AHB2PHY_WEST_CLK 14
+#define GCC_BOOT_ROM_AHB_CLK 15
+#define GCC_CAMERA_AHB_CLK 16
+#define GCC_CAMERA_HF_AXI_CLK 17
+#define GCC_CAMERA_XO_CLK 18
+#define GCC_CE1_AHB_CLK 19
+#define GCC_CE1_AXI_CLK 20
+#define GCC_CE1_CLK 21
+#define GCC_CFG_NOC_USB2_SEC_AXI_CLK 22
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 23
+#define GCC_CPUSS_AHB_CLK 24
+#define GCC_CPUSS_AHB_CLK_SRC 25
+#define GCC_CPUSS_GNOC_CLK 26
+#define GCC_DDRSS_GPU_AXI_CLK 27
+#define GCC_DISP_AHB_CLK 28
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 29
+#define GCC_DISP_HF_AXI_CLK 30
+#define GCC_DISP_XO_CLK 31
+#define GCC_EMAC_AXI_CLK 32
+#define GCC_EMAC_PTP_CLK 33
+#define GCC_EMAC_PTP_CLK_SRC 34
+#define GCC_EMAC_RGMII_CLK 35
+#define GCC_EMAC_RGMII_CLK_SRC 36
+#define GCC_EMAC_SLV_AHB_CLK 37
+#define GCC_GP1_CLK 38
+#define GCC_GP1_CLK_SRC 39
+#define GCC_GP2_CLK 40
+#define GCC_GP2_CLK_SRC 41
+#define GCC_GP3_CLK 42
+#define GCC_GP3_CLK_SRC 43
+#define GCC_GPU_CFG_AHB_CLK 44
+#define GCC_GPU_GPLL0_CLK_SRC 45
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 46
+#define GCC_GPU_IREF_CLK 47
+#define GCC_GPU_MEMNOC_GFX_CLK 48
+#define GCC_GPU_SNOC_DVM_GFX_CLK 49
+#define GCC_PCIE0_PHY_REFGEN_CLK 50
+#define GCC_PCIE_0_AUX_CLK 51
+#define GCC_PCIE_0_AUX_CLK_SRC 52
+#define GCC_PCIE_0_CFG_AHB_CLK 53
+#define GCC_PCIE_0_CLKREF_CLK 54
+#define GCC_PCIE_0_MSTR_AXI_CLK 55
+#define GCC_PCIE_0_PIPE_CLK 56
+#define GCC_PCIE_0_SLV_AXI_CLK 57
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 58
+#define GCC_PCIE_PHY_AUX_CLK 59
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 60
+#define GCC_PDM2_CLK 61
+#define GCC_PDM2_CLK_SRC 62
+#define GCC_PDM_AHB_CLK 63
+#define GCC_PDM_XO4_CLK 64
+#define GCC_PRNG_AHB_CLK 65
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 66
+#define GCC_QMIP_DISP_AHB_CLK 67
+#define GCC_QMIP_PCIE_AHB_CLK 68
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 69
+#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 70
+#define GCC_QSPI_CORE_CLK 71
+#define GCC_QSPI_CORE_CLK_SRC 72
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 73
+#define GCC_QUPV3_WRAP0_CORE_CLK 74
+#define GCC_QUPV3_WRAP0_S0_CLK 75
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 76
+#define GCC_QUPV3_WRAP0_S1_CLK 77
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 78
+#define GCC_QUPV3_WRAP0_S2_CLK 79
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 80
+#define GCC_QUPV3_WRAP0_S3_CLK 81
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 82
+#define GCC_QUPV3_WRAP0_S4_CLK 83
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 84
+#define GCC_QUPV3_WRAP0_S5_CLK 85
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 86
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 87
+#define GCC_QUPV3_WRAP1_CORE_CLK 88
+#define GCC_QUPV3_WRAP1_S0_CLK 89
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 90
+#define GCC_QUPV3_WRAP1_S1_CLK 91
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 92
+#define GCC_QUPV3_WRAP1_S2_CLK 93
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 94
+#define GCC_QUPV3_WRAP1_S3_CLK 95
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 96
+#define GCC_QUPV3_WRAP1_S4_CLK 97
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 98
+#define GCC_QUPV3_WRAP1_S5_CLK 99
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 100
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 101
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 102
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 103
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 104
+#define GCC_RX1_USB2_CLKREF_CLK 105
+#define GCC_RX3_USB2_CLKREF_CLK 106
+#define GCC_SDCC1_AHB_CLK 107
+#define GCC_SDCC1_APPS_CLK 108
+#define GCC_SDCC1_APPS_CLK_SRC 109
+#define GCC_SDCC1_ICE_CORE_CLK 110
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 111
+#define GCC_SDCC2_AHB_CLK 112
+#define GCC_SDCC2_APPS_CLK 113
+#define GCC_SDCC2_APPS_CLK_SRC 114
+#define GCC_SDR_CORE_CLK 115
+#define GCC_SDR_CSR_HCLK 116
+#define GCC_SDR_PRI_MI2S_CLK 117
+#define GCC_SDR_SEC_MI2S_CLK 118
+#define GCC_SDR_WR0_MEM_CLK 119
+#define GCC_SDR_WR1_MEM_CLK 120
+#define GCC_SDR_WR2_MEM_CLK 121
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 122
+#define GCC_UFS_CARD_CLKREF_CLK 123
+#define GCC_UFS_MEM_CLKREF_CLK 124
+#define GCC_UFS_PHY_AHB_CLK 125
+#define GCC_UFS_PHY_AXI_CLK 126
+#define GCC_UFS_PHY_AXI_CLK_SRC 127
+#define GCC_UFS_PHY_ICE_CORE_CLK 128
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 129
+#define GCC_UFS_PHY_PHY_AUX_CLK 130
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 131
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 132
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 133
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 134
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 135
+#define GCC_USB20_SEC_MASTER_CLK 136
+#define GCC_USB20_SEC_MASTER_CLK_SRC 137
+#define GCC_USB20_SEC_MOCK_UTMI_CLK 138
+#define GCC_USB20_SEC_MOCK_UTMI_CLK_SRC 139
+#define GCC_USB20_SEC_SLEEP_CLK 140
+#define GCC_USB2_PRIM_CLKREF_CLK 141
+#define GCC_USB2_SEC_CLKREF_CLK 142
+#define GCC_USB2_SEC_PHY_AUX_CLK 143
+#define GCC_USB2_SEC_PHY_AUX_CLK_SRC 144
+#define GCC_USB2_SEC_PHY_COM_AUX_CLK 145
+#define GCC_USB2_SEC_PHY_PIPE_CLK 146
+#define GCC_USB30_PRIM_MASTER_CLK 147
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 148
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 149
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 150
+#define GCC_USB30_PRIM_SLEEP_CLK 151
+#define GCC_USB3_PRIM_CLKREF_CLK 152
+#define GCC_USB3_PRIM_PHY_AUX_CLK 153
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 154
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 155
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 156
+#define GCC_USB3_SEC_CLKREF_CLK 157
+#define GCC_VIDEO_AHB_CLK 158
+#define GCC_VIDEO_AXI0_CLK 159
+#define GCC_VIDEO_XO_CLK 160
+#define GCC_VSENSOR_CLK_SRC 161
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 162
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 163
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 164
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 165
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 166
+
+/* GCC Resets */
+#define GCC_EMAC_BCR 0
+#define GCC_QUSB2PHY_PRIM_BCR 1
+#define GCC_QUSB2PHY_SEC_BCR 2
+#define GCC_USB30_PRIM_BCR 3
+#define GCC_USB2_PHY_SEC_BCR 4
+#define GCC_USB3_DP_PHY_SEC_BCR 5
+#define GCC_USB3PHY_PHY_SEC_BCR 6
+#define GCC_PCIE_0_BCR 7
+#define GCC_PCIE_0_PHY_BCR 8
+#define GCC_PCIE_PHY_BCR 9
+#define GCC_PCIE_PHY_COM_BCR 10
+#define GCC_UFS_PHY_BCR 11
+#define GCC_USB20_SEC_BCR 12
+#define GCC_USB3_PHY_PRIM_SP0_BCR 13
+#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 14
+#define GCC_SDCC1_BCR 15
+#define GCC_SDCC2_BCR 16
+
+/* GCC power domains */
+#define EMAC_GDSC 0
+#define PCIE_0_GDSC 1
+#define UFS_PHY_GDSC 2
+#define USB20_SEC_GDSC 3
+#define USB30_PRIM_GDSC 4
+#define HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC 5
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC 6
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC 7
+#define HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC 8
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 9
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC 10
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 11
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-gpucc.h b/include/dt-bindings/clock/qcom,qcs615-gpucc.h
new file mode 100644
index 000000000000..6d8394b90d59
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-gpucc.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_QCS615_H
+
+/* GPU_CC clocks */
+#define CRC_DIV_PLL0 0
+#define CRC_DIV_PLL1 1
+#define GPU_CC_PLL0 2
+#define GPU_CC_PLL1 3
+#define GPU_CC_CRC_AHB_CLK 4
+#define GPU_CC_CX_GFX3D_CLK 5
+#define GPU_CC_CX_GFX3D_SLV_CLK 6
+#define GPU_CC_CX_GMU_CLK 7
+#define GPU_CC_CX_SNOC_DVM_CLK 8
+#define GPU_CC_CXO_AON_CLK 9
+#define GPU_CC_CXO_CLK 10
+#define GPU_CC_GMU_CLK_SRC 11
+#define GPU_CC_GX_GFX3D_CLK 12
+#define GPU_CC_GX_GFX3D_CLK_SRC 13
+#define GPU_CC_GX_GMU_CLK 14
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 15
+#define GPU_CC_SLEEP_CLK 16
+
+/* GPU_CC power domains */
+#define CX_GDSC 0
+#define GX_GDSC 1
+
+/* GPU_CC resets */
+#define GPU_CC_CX_BCR 0
+#define GPU_CC_GFX3D_AON_BCR 1
+#define GPU_CC_GMU_BCR 2
+#define GPU_CC_GX_BCR 3
+#define GPU_CC_XO_BCR 4
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-videocc.h b/include/dt-bindings/clock/qcom,qcs615-videocc.h
new file mode 100644
index 000000000000..0ca3efb21103
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-videocc.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_QCS615_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_SLEEP_CLK 0
+#define VIDEO_CC_SLEEP_CLK_SRC 1
+#define VIDEO_CC_VCODEC0_AXI_CLK 2
+#define VIDEO_CC_VCODEC0_CORE_CLK 3
+#define VIDEO_CC_VENUS_AHB_CLK 4
+#define VIDEO_CC_VENUS_CLK_SRC 5
+#define VIDEO_CC_VENUS_CTL_AXI_CLK 6
+#define VIDEO_CC_VENUS_CTL_CORE_CLK 7
+#define VIDEO_CC_XO_CLK 8
+#define VIDEO_PLL0 9
+
+/* VIDEO_CC power domains */
+#define VCODEC0_GDSC 0
+#define VENUS_GDSC 1
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_INTERFACE_BCR 0
+#define VIDEO_CC_VCODEC0_BCR 1
+#define VIDEO_CC_VENUS_BCR 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs8300-camcc.h b/include/dt-bindings/clock/qcom,qcs8300-camcc.h
new file mode 100644
index 000000000000..fc535c847859
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs8300-camcc.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_QCS8300_CAM_CC_H
+#define _DT_BINDINGS_CLK_QCOM_QCS8300_CAM_CC_H
+
+#include "qcom,sa8775p-camcc.h"
+
+/* QCS8300 introduces below new clocks compared to SA8775P */
+
+/* CAM_CC clocks */
+#define CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK 86
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs8300-gcc.h b/include/dt-bindings/clock/qcom,qcs8300-gcc.h
new file mode 100644
index 000000000000..a0083b1d2126
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs8300-gcc.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QCS8300_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_QCS8300_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL1 2
+#define GCC_GPLL4 3
+#define GCC_GPLL7 4
+#define GCC_GPLL9 5
+#define GCC_AGGRE_NOC_QUPV3_AXI_CLK 6
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 7
+#define GCC_AGGRE_USB2_PRIM_AXI_CLK 8
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 9
+#define GCC_AHB2PHY0_CLK 10
+#define GCC_AHB2PHY2_CLK 11
+#define GCC_AHB2PHY3_CLK 12
+#define GCC_BOOT_ROM_AHB_CLK 13
+#define GCC_CAMERA_AHB_CLK 14
+#define GCC_CAMERA_HF_AXI_CLK 15
+#define GCC_CAMERA_SF_AXI_CLK 16
+#define GCC_CAMERA_THROTTLE_XO_CLK 17
+#define GCC_CAMERA_XO_CLK 18
+#define GCC_CFG_NOC_USB2_PRIM_AXI_CLK 19
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 20
+#define GCC_DDRSS_GPU_AXI_CLK 21
+#define GCC_DISP_AHB_CLK 22
+#define GCC_DISP_HF_AXI_CLK 23
+#define GCC_DISP_XO_CLK 24
+#define GCC_EDP_REF_CLKREF_EN 25
+#define GCC_EMAC0_AXI_CLK 26
+#define GCC_EMAC0_PHY_AUX_CLK 27
+#define GCC_EMAC0_PHY_AUX_CLK_SRC 28
+#define GCC_EMAC0_PTP_CLK 29
+#define GCC_EMAC0_PTP_CLK_SRC 30
+#define GCC_EMAC0_RGMII_CLK 31
+#define GCC_EMAC0_RGMII_CLK_SRC 32
+#define GCC_EMAC0_SLV_AHB_CLK 33
+#define GCC_GP1_CLK 34
+#define GCC_GP1_CLK_SRC 35
+#define GCC_GP2_CLK 36
+#define GCC_GP2_CLK_SRC 37
+#define GCC_GP3_CLK 38
+#define GCC_GP3_CLK_SRC 39
+#define GCC_GP4_CLK 40
+#define GCC_GP4_CLK_SRC 41
+#define GCC_GP5_CLK 42
+#define GCC_GP5_CLK_SRC 43
+#define GCC_GPU_CFG_AHB_CLK 44
+#define GCC_GPU_GPLL0_CLK_SRC 45
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 46
+#define GCC_GPU_MEMNOC_GFX_CENTER_PIPELINE_CLK 47
+#define GCC_GPU_MEMNOC_GFX_CLK 48
+#define GCC_GPU_SNOC_DVM_GFX_CLK 49
+#define GCC_GPU_TCU_THROTTLE_AHB_CLK 50
+#define GCC_GPU_TCU_THROTTLE_CLK 51
+#define GCC_PCIE_0_AUX_CLK 52
+#define GCC_PCIE_0_AUX_CLK_SRC 53
+#define GCC_PCIE_0_CFG_AHB_CLK 54
+#define GCC_PCIE_0_MSTR_AXI_CLK 55
+#define GCC_PCIE_0_PHY_AUX_CLK 56
+#define GCC_PCIE_0_PHY_AUX_CLK_SRC 57
+#define GCC_PCIE_0_PHY_RCHNG_CLK 58
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 59
+#define GCC_PCIE_0_PIPE_CLK 60
+#define GCC_PCIE_0_PIPE_CLK_SRC 61
+#define GCC_PCIE_0_PIPE_DIV_CLK_SRC 62
+#define GCC_PCIE_0_PIPEDIV2_CLK 63
+#define GCC_PCIE_0_SLV_AXI_CLK 64
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 65
+#define GCC_PCIE_1_AUX_CLK 66
+#define GCC_PCIE_1_AUX_CLK_SRC 67
+#define GCC_PCIE_1_CFG_AHB_CLK 68
+#define GCC_PCIE_1_MSTR_AXI_CLK 69
+#define GCC_PCIE_1_PHY_AUX_CLK 70
+#define GCC_PCIE_1_PHY_AUX_CLK_SRC 71
+#define GCC_PCIE_1_PHY_RCHNG_CLK 72
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 73
+#define GCC_PCIE_1_PIPE_CLK 74
+#define GCC_PCIE_1_PIPE_CLK_SRC 75
+#define GCC_PCIE_1_PIPE_DIV_CLK_SRC 76
+#define GCC_PCIE_1_PIPEDIV2_CLK 77
+#define GCC_PCIE_1_SLV_AXI_CLK 78
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 79
+#define GCC_PCIE_CLKREF_EN 80
+#define GCC_PCIE_THROTTLE_CFG_CLK 81
+#define GCC_PDM2_CLK 82
+#define GCC_PDM2_CLK_SRC 83
+#define GCC_PDM_AHB_CLK 84
+#define GCC_PDM_XO4_CLK 85
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 86
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 87
+#define GCC_QMIP_DISP_AHB_CLK 88
+#define GCC_QMIP_DISP_ROT_AHB_CLK 89
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 90
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 91
+#define GCC_QMIP_VIDEO_VCPU_AHB_CLK 92
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 93
+#define GCC_QUPV3_WRAP0_CORE_CLK 94
+#define GCC_QUPV3_WRAP0_S0_CLK 95
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 96
+#define GCC_QUPV3_WRAP0_S1_CLK 97
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 98
+#define GCC_QUPV3_WRAP0_S2_CLK 99
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 100
+#define GCC_QUPV3_WRAP0_S3_CLK 101
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 102
+#define GCC_QUPV3_WRAP0_S4_CLK 103
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 104
+#define GCC_QUPV3_WRAP0_S5_CLK 105
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 106
+#define GCC_QUPV3_WRAP0_S6_CLK 107
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 108
+#define GCC_QUPV3_WRAP0_S7_CLK 109
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 110
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 111
+#define GCC_QUPV3_WRAP1_CORE_CLK 112
+#define GCC_QUPV3_WRAP1_S0_CLK 113
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 114
+#define GCC_QUPV3_WRAP1_S1_CLK 115
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 116
+#define GCC_QUPV3_WRAP1_S2_CLK 117
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 118
+#define GCC_QUPV3_WRAP1_S3_CLK 119
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 120
+#define GCC_QUPV3_WRAP1_S4_CLK 121
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 122
+#define GCC_QUPV3_WRAP1_S5_CLK 123
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 124
+#define GCC_QUPV3_WRAP1_S6_CLK 125
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 126
+#define GCC_QUPV3_WRAP1_S7_CLK 127
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 128
+#define GCC_QUPV3_WRAP3_CORE_2X_CLK 129
+#define GCC_QUPV3_WRAP3_CORE_CLK 130
+#define GCC_QUPV3_WRAP3_QSPI_CLK 131
+#define GCC_QUPV3_WRAP3_S0_CLK 132
+#define GCC_QUPV3_WRAP3_S0_CLK_SRC 133
+#define GCC_QUPV3_WRAP3_S0_DIV_CLK_SRC 134
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 135
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 136
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 137
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 138
+#define GCC_QUPV3_WRAP_3_M_AHB_CLK 139
+#define GCC_QUPV3_WRAP_3_S_AHB_CLK 140
+#define GCC_SDCC1_AHB_CLK 141
+#define GCC_SDCC1_APPS_CLK 142
+#define GCC_SDCC1_APPS_CLK_SRC 143
+#define GCC_SDCC1_ICE_CORE_CLK 144
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 145
+#define GCC_SGMI_CLKREF_EN 146
+#define GCC_UFS_PHY_AHB_CLK 147
+#define GCC_UFS_PHY_AXI_CLK 148
+#define GCC_UFS_PHY_AXI_CLK_SRC 149
+#define GCC_UFS_PHY_ICE_CORE_CLK 150
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 151
+#define GCC_UFS_PHY_PHY_AUX_CLK 152
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 153
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 154
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 155
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 156
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 157
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 158
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 159
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 160
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 161
+#define GCC_USB20_MASTER_CLK 162
+#define GCC_USB20_MASTER_CLK_SRC 163
+#define GCC_USB20_MOCK_UTMI_CLK 164
+#define GCC_USB20_MOCK_UTMI_CLK_SRC 165
+#define GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC 166
+#define GCC_USB20_SLEEP_CLK 167
+#define GCC_USB30_PRIM_MASTER_CLK 168
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 169
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 170
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 171
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 172
+#define GCC_USB30_PRIM_SLEEP_CLK 173
+#define GCC_USB3_PRIM_PHY_AUX_CLK 174
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 175
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 176
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 177
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 178
+#define GCC_USB_CLKREF_EN 179
+#define GCC_VIDEO_AHB_CLK 180
+#define GCC_VIDEO_AXI0_CLK 181
+#define GCC_VIDEO_AXI1_CLK 182
+#define GCC_VIDEO_XO_CLK 183
+
+/* GCC power domains */
+#define GCC_EMAC0_GDSC 0
+#define GCC_PCIE_0_GDSC 1
+#define GCC_PCIE_1_GDSC 2
+#define GCC_UFS_PHY_GDSC 3
+#define GCC_USB20_PRIM_GDSC 4
+#define GCC_USB30_PRIM_GDSC 5
+
+/* GCC resets */
+#define GCC_EMAC0_BCR 0
+#define GCC_PCIE_0_BCR 1
+#define GCC_PCIE_0_LINK_DOWN_BCR 2
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 3
+#define GCC_PCIE_0_PHY_BCR 4
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_1_BCR 6
+#define GCC_PCIE_1_LINK_DOWN_BCR 7
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_1_PHY_BCR 9
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 10
+#define GCC_SDCC1_BCR 11
+#define GCC_UFS_PHY_BCR 12
+#define GCC_USB20_PRIM_BCR 13
+#define GCC_USB2_PHY_PRIM_BCR 14
+#define GCC_USB2_PHY_SEC_BCR 15
+#define GCC_USB30_PRIM_BCR 16
+#define GCC_USB3_DP_PHY_PRIM_BCR 17
+#define GCC_USB3_PHY_PRIM_BCR 18
+#define GCC_USB3_PHY_TERT_BCR 19
+#define GCC_USB3_UNIPHY_MP0_BCR 20
+#define GCC_USB3_UNIPHY_MP1_BCR 21
+#define GCC_USB3PHY_PHY_PRIM_BCR 22
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 23
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 24
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 25
+#define GCC_VIDEO_BCR 26
+#define GCC_VIDEO_AXI0_CLK_ARES 27
+#define GCC_VIDEO_AXI1_CLK_ARES 28
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs8300-gpucc.h b/include/dt-bindings/clock/qcom,qcs8300-gpucc.h
new file mode 100644
index 000000000000..afa187467b4c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs8300-gpucc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPUCC_QCS8300_H
+#define _DT_BINDINGS_CLK_QCOM_GPUCC_QCS8300_H
+
+#include "qcom,sa8775p-gpucc.h"
+
+/* QCS8300 introduces below new clocks compared to SA8775P */
+
+/* GPU_CC clocks */
+#define GPU_CC_CX_ACCU_SHIFT_CLK 23
+#define GPU_CC_GX_ACCU_SHIFT_CLK 24
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index 46309c9953b2..1477a75e7f6d 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -170,5 +170,9 @@
#define RPM_SMD_BIMC_FREQ_LOG 124
#define RPM_SMD_LN_BB_CLK_PIN 125
#define RPM_SMD_LN_BB_A_CLK_PIN 126
+#define RPM_SMD_BB_CLK3 127
+#define RPM_SMD_BB_CLK3_A 128
+#define RPM_SMD_BB_CLK3_PIN 129
+#define RPM_SMD_BB_CLK3_A_PIN 130
#endif
diff --git a/include/dt-bindings/clock/qcom,sa8775p-camcc.h b/include/dt-bindings/clock/qcom,sa8775p-camcc.h
new file mode 100644
index 000000000000..38531acd699f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sa8775p-camcc.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_SA8775P_CAM_CC_H
+#define _DT_BINDINGS_CLK_QCOM_SA8775P_CAM_CC_H
+
+/* CAM_CC clocks */
+#define CAM_CC_CAMNOC_AXI_CLK 0
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 1
+#define CAM_CC_CAMNOC_DCD_XO_CLK 2
+#define CAM_CC_CAMNOC_XO_CLK 3
+#define CAM_CC_CCI_0_CLK 4
+#define CAM_CC_CCI_0_CLK_SRC 5
+#define CAM_CC_CCI_1_CLK 6
+#define CAM_CC_CCI_1_CLK_SRC 7
+#define CAM_CC_CCI_2_CLK 8
+#define CAM_CC_CCI_2_CLK_SRC 9
+#define CAM_CC_CCI_3_CLK 10
+#define CAM_CC_CCI_3_CLK_SRC 11
+#define CAM_CC_CORE_AHB_CLK 12
+#define CAM_CC_CPAS_AHB_CLK 13
+#define CAM_CC_CPAS_FAST_AHB_CLK 14
+#define CAM_CC_CPAS_IFE_0_CLK 15
+#define CAM_CC_CPAS_IFE_1_CLK 16
+#define CAM_CC_CPAS_IFE_LITE_CLK 17
+#define CAM_CC_CPAS_IPE_CLK 18
+#define CAM_CC_CPAS_SFE_LITE_0_CLK 19
+#define CAM_CC_CPAS_SFE_LITE_1_CLK 20
+#define CAM_CC_CPHY_RX_CLK_SRC 21
+#define CAM_CC_CSI0PHYTIMER_CLK 22
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 23
+#define CAM_CC_CSI1PHYTIMER_CLK 24
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 25
+#define CAM_CC_CSI2PHYTIMER_CLK 26
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 27
+#define CAM_CC_CSI3PHYTIMER_CLK 28
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 29
+#define CAM_CC_CSID_CLK 30
+#define CAM_CC_CSID_CLK_SRC 31
+#define CAM_CC_CSID_CSIPHY_RX_CLK 32
+#define CAM_CC_CSIPHY0_CLK 33
+#define CAM_CC_CSIPHY1_CLK 34
+#define CAM_CC_CSIPHY2_CLK 35
+#define CAM_CC_CSIPHY3_CLK 36
+#define CAM_CC_FAST_AHB_CLK_SRC 37
+#define CAM_CC_GDSC_CLK 38
+#define CAM_CC_ICP_AHB_CLK 39
+#define CAM_CC_ICP_CLK 40
+#define CAM_CC_ICP_CLK_SRC 41
+#define CAM_CC_IFE_0_CLK 42
+#define CAM_CC_IFE_0_CLK_SRC 43
+#define CAM_CC_IFE_0_FAST_AHB_CLK 44
+#define CAM_CC_IFE_1_CLK 45
+#define CAM_CC_IFE_1_CLK_SRC 46
+#define CAM_CC_IFE_1_FAST_AHB_CLK 47
+#define CAM_CC_IFE_LITE_AHB_CLK 48
+#define CAM_CC_IFE_LITE_CLK 49
+#define CAM_CC_IFE_LITE_CLK_SRC 50
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 51
+#define CAM_CC_IFE_LITE_CSID_CLK 52
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 53
+#define CAM_CC_IPE_AHB_CLK 54
+#define CAM_CC_IPE_CLK 55
+#define CAM_CC_IPE_CLK_SRC 56
+#define CAM_CC_IPE_FAST_AHB_CLK 57
+#define CAM_CC_MCLK0_CLK 58
+#define CAM_CC_MCLK0_CLK_SRC 59
+#define CAM_CC_MCLK1_CLK 60
+#define CAM_CC_MCLK1_CLK_SRC 61
+#define CAM_CC_MCLK2_CLK 62
+#define CAM_CC_MCLK2_CLK_SRC 63
+#define CAM_CC_MCLK3_CLK 64
+#define CAM_CC_MCLK3_CLK_SRC 65
+#define CAM_CC_PLL0 66
+#define CAM_CC_PLL0_OUT_EVEN 67
+#define CAM_CC_PLL0_OUT_ODD 68
+#define CAM_CC_PLL2 69
+#define CAM_CC_PLL3 70
+#define CAM_CC_PLL3_OUT_EVEN 71
+#define CAM_CC_PLL4 72
+#define CAM_CC_PLL4_OUT_EVEN 73
+#define CAM_CC_PLL5 74
+#define CAM_CC_PLL5_OUT_EVEN 75
+#define CAM_CC_SFE_LITE_0_CLK 76
+#define CAM_CC_SFE_LITE_0_FAST_AHB_CLK 77
+#define CAM_CC_SFE_LITE_1_CLK 78
+#define CAM_CC_SFE_LITE_1_FAST_AHB_CLK 79
+#define CAM_CC_SLEEP_CLK 80
+#define CAM_CC_SLEEP_CLK_SRC 81
+#define CAM_CC_SLOW_AHB_CLK_SRC 82
+#define CAM_CC_SM_OBS_CLK 83
+#define CAM_CC_XO_CLK_SRC 84
+#define CAM_CC_QDSS_DEBUG_XO_CLK 85
+
+/* CAM_CC power domains */
+#define CAM_CC_TITAN_TOP_GDSC 0
+
+/* CAM_CC resets */
+#define CAM_CC_ICP_BCR 0
+#define CAM_CC_IFE_0_BCR 1
+#define CAM_CC_IFE_1_BCR 2
+#define CAM_CC_IPE_0_BCR 3
+#define CAM_CC_SFE_LITE_0_BCR 4
+#define CAM_CC_SFE_LITE_1_BCR 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sa8775p-dispcc.h b/include/dt-bindings/clock/qcom,sa8775p-dispcc.h
new file mode 100644
index 000000000000..e2049e510658
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sa8775p-dispcc.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_SA8775P_DISP_CC_H
+#define _DT_BINDINGS_CLK_QCOM_SA8775P_DISP_CC_H
+
+/* DISP_CC_0/1 clocks */
+#define MDSS_DISP_CC_MDSS_AHB1_CLK 0
+#define MDSS_DISP_CC_MDSS_AHB_CLK 1
+#define MDSS_DISP_CC_MDSS_AHB_CLK_SRC 2
+#define MDSS_DISP_CC_MDSS_BYTE0_CLK 3
+#define MDSS_DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define MDSS_DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define MDSS_DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define MDSS_DISP_CC_MDSS_BYTE1_CLK 7
+#define MDSS_DISP_CC_MDSS_BYTE1_CLK_SRC 8
+#define MDSS_DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 9
+#define MDSS_DISP_CC_MDSS_BYTE1_INTF_CLK 10
+#define MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK 11
+#define MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 12
+#define MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK 13
+#define MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK_SRC 14
+#define MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK 15
+#define MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 16
+#define MDSS_DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 17
+#define MDSS_DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 18
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK 19
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 20
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK 21
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 22
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK 23
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK_SRC 24
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK 25
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK_SRC 26
+#define MDSS_DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 27
+#define MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK 28
+#define MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 29
+#define MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK 30
+#define MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK_SRC 31
+#define MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK 32
+#define MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 33
+#define MDSS_DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 34
+#define MDSS_DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 35
+#define MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK 36
+#define MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 37
+#define MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK 38
+#define MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 39
+#define MDSS_DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 40
+#define MDSS_DISP_CC_MDSS_ESC0_CLK 41
+#define MDSS_DISP_CC_MDSS_ESC0_CLK_SRC 42
+#define MDSS_DISP_CC_MDSS_ESC1_CLK 43
+#define MDSS_DISP_CC_MDSS_ESC1_CLK_SRC 44
+#define MDSS_DISP_CC_MDSS_MDP1_CLK 45
+#define MDSS_DISP_CC_MDSS_MDP_CLK 46
+#define MDSS_DISP_CC_MDSS_MDP_CLK_SRC 47
+#define MDSS_DISP_CC_MDSS_MDP_LUT1_CLK 48
+#define MDSS_DISP_CC_MDSS_MDP_LUT_CLK 49
+#define MDSS_DISP_CC_MDSS_NON_GDSC_AHB_CLK 50
+#define MDSS_DISP_CC_MDSS_PCLK0_CLK 51
+#define MDSS_DISP_CC_MDSS_PCLK0_CLK_SRC 52
+#define MDSS_DISP_CC_MDSS_PCLK1_CLK 53
+#define MDSS_DISP_CC_MDSS_PCLK1_CLK_SRC 54
+#define MDSS_DISP_CC_MDSS_PLL_LOCK_MONITOR_CLK 55
+#define MDSS_DISP_CC_MDSS_RSCC_AHB_CLK 56
+#define MDSS_DISP_CC_MDSS_RSCC_VSYNC_CLK 57
+#define MDSS_DISP_CC_MDSS_VSYNC1_CLK 58
+#define MDSS_DISP_CC_MDSS_VSYNC_CLK 59
+#define MDSS_DISP_CC_MDSS_VSYNC_CLK_SRC 60
+#define MDSS_DISP_CC_PLL0 61
+#define MDSS_DISP_CC_PLL1 62
+#define MDSS_DISP_CC_SLEEP_CLK 63
+#define MDSS_DISP_CC_SLEEP_CLK_SRC 64
+#define MDSS_DISP_CC_SM_OBS_CLK 65
+#define MDSS_DISP_CC_XO_CLK 66
+#define MDSS_DISP_CC_XO_CLK_SRC 67
+
+/* DISP_CC_0/1 power domains */
+#define MDSS_DISP_CC_MDSS_CORE_GDSC 0
+#define MDSS_DISP_CC_MDSS_CORE_INT2_GDSC 1
+
+/* DISP_CC_0/1 resets */
+#define MDSS_DISP_CC_MDSS_CORE_BCR 0
+#define MDSS_DISP_CC_MDSS_RSCC_BCR 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sa8775p-videocc.h b/include/dt-bindings/clock/qcom,sa8775p-videocc.h
new file mode 100644
index 000000000000..e6325f68c317
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sa8775p-videocc.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_SA8775P_VIDEO_CC_H
+#define _DT_BINDINGS_CLK_QCOM_SA8775P_VIDEO_CC_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_AHB_CLK 0
+#define VIDEO_CC_AHB_CLK_SRC 1
+#define VIDEO_CC_MVS0_CLK 2
+#define VIDEO_CC_MVS0_CLK_SRC 3
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 4
+#define VIDEO_CC_MVS0C_CLK 5
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 6
+#define VIDEO_CC_MVS1_CLK 7
+#define VIDEO_CC_MVS1_CLK_SRC 8
+#define VIDEO_CC_MVS1_DIV_CLK_SRC 9
+#define VIDEO_CC_MVS1C_CLK 10
+#define VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC 11
+#define VIDEO_CC_PLL_LOCK_MONITOR_CLK 12
+#define VIDEO_CC_SLEEP_CLK 13
+#define VIDEO_CC_SLEEP_CLK_SRC 14
+#define VIDEO_CC_SM_DIV_CLK_SRC 15
+#define VIDEO_CC_SM_OBS_CLK 16
+#define VIDEO_CC_XO_CLK 17
+#define VIDEO_CC_XO_CLK_SRC 18
+#define VIDEO_PLL0 19
+#define VIDEO_PLL1 20
+
+/* VIDEO_CC power domains */
+#define VIDEO_CC_MVS0C_GDSC 0
+#define VIDEO_CC_MVS0_GDSC 1
+#define VIDEO_CC_MVS1C_GDSC 2
+#define VIDEO_CC_MVS1_GDSC 3
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_INTERFACE_BCR 0
+#define VIDEO_CC_MVS0_BCR 1
+#define VIDEO_CC_MVS0C_CLK_ARES 2
+#define VIDEO_CC_MVS0C_BCR 3
+#define VIDEO_CC_MVS1_BCR 4
+#define VIDEO_CC_MVS1C_CLK_ARES 5
+#define VIDEO_CC_MVS1C_BCR 6
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sar2130p-gcc.h b/include/dt-bindings/clock/qcom,sar2130p-gcc.h
new file mode 100644
index 000000000000..69d2dd2538a6
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sar2130p-gcc.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SAR2130P_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SAR2130P_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL1 2
+#define GCC_GPLL9 3
+#define GCC_GPLL9_OUT_EVEN 4
+#define GCC_AGGRE_NOC_PCIE_1_AXI_CLK 5
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 6
+#define GCC_BOOT_ROM_AHB_CLK 7
+#define GCC_CAMERA_AHB_CLK 8
+#define GCC_CAMERA_HF_AXI_CLK 9
+#define GCC_CAMERA_SF_AXI_CLK 10
+#define GCC_CAMERA_XO_CLK 11
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 12
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 13
+#define GCC_DDRSS_GPU_AXI_CLK 14
+#define GCC_DDRSS_PCIE_SF_CLK 15
+#define GCC_DISP_AHB_CLK 16
+#define GCC_DISP_HF_AXI_CLK 17
+#define GCC_GP1_CLK 18
+#define GCC_GP1_CLK_SRC 19
+#define GCC_GP2_CLK 20
+#define GCC_GP2_CLK_SRC 21
+#define GCC_GP3_CLK 22
+#define GCC_GP3_CLK_SRC 23
+#define GCC_GPU_CFG_AHB_CLK 24
+#define GCC_GPU_GPLL0_CLK_SRC 25
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 26
+#define GCC_GPU_MEMNOC_GFX_CLK 27
+#define GCC_GPU_SNOC_DVM_GFX_CLK 28
+#define GCC_IRIS_SS_HF_AXI1_CLK 29
+#define GCC_IRIS_SS_SPD_AXI1_CLK 30
+#define GCC_PCIE_0_AUX_CLK 31
+#define GCC_PCIE_0_AUX_CLK_SRC 32
+#define GCC_PCIE_0_CFG_AHB_CLK 33
+#define GCC_PCIE_0_MSTR_AXI_CLK 34
+#define GCC_PCIE_0_PHY_RCHNG_CLK 35
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 36
+#define GCC_PCIE_0_PIPE_CLK 37
+#define GCC_PCIE_0_PIPE_CLK_SRC 38
+#define GCC_PCIE_0_SLV_AXI_CLK 39
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 40
+#define GCC_PCIE_1_AUX_CLK 41
+#define GCC_PCIE_1_AUX_CLK_SRC 42
+#define GCC_PCIE_1_CFG_AHB_CLK 43
+#define GCC_PCIE_1_MSTR_AXI_CLK 44
+#define GCC_PCIE_1_PHY_RCHNG_CLK 45
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 46
+#define GCC_PCIE_1_PIPE_CLK 47
+#define GCC_PCIE_1_PIPE_CLK_SRC 48
+#define GCC_PCIE_1_SLV_AXI_CLK 49
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 50
+#define GCC_PDM2_CLK 51
+#define GCC_PDM2_CLK_SRC 52
+#define GCC_PDM_AHB_CLK 53
+#define GCC_PDM_XO4_CLK 54
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 55
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 56
+#define GCC_QMIP_GPU_AHB_CLK 57
+#define GCC_QMIP_PCIE_AHB_CLK 58
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 59
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 60
+#define GCC_QMIP_VIDEO_LSR_AHB_CLK 61
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 62
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 63
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 64
+#define GCC_QUPV3_WRAP0_CORE_CLK 65
+#define GCC_QUPV3_WRAP0_S0_CLK 66
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 67
+#define GCC_QUPV3_WRAP0_S1_CLK 68
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 69
+#define GCC_QUPV3_WRAP0_S2_CLK 70
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 71
+#define GCC_QUPV3_WRAP0_S3_CLK 72
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 73
+#define GCC_QUPV3_WRAP0_S4_CLK 74
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 75
+#define GCC_QUPV3_WRAP0_S5_CLK 76
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 77
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 78
+#define GCC_QUPV3_WRAP1_CORE_CLK 79
+#define GCC_QUPV3_WRAP1_S0_CLK 80
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 81
+#define GCC_QUPV3_WRAP1_S1_CLK 82
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 83
+#define GCC_QUPV3_WRAP1_S2_CLK 84
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S3_CLK 86
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S4_CLK 88
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S5_CLK 90
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 91
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 92
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 93
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 94
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 95
+#define GCC_SDCC1_AHB_CLK 96
+#define GCC_SDCC1_APPS_CLK 97
+#define GCC_SDCC1_APPS_CLK_SRC 98
+#define GCC_SDCC1_ICE_CORE_CLK 99
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 100
+#define GCC_USB30_PRIM_MASTER_CLK 101
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 102
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 103
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 104
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 105
+#define GCC_USB30_PRIM_SLEEP_CLK 106
+#define GCC_USB3_PRIM_PHY_AUX_CLK 107
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 108
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 109
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 110
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 111
+#define GCC_VIDEO_AHB_CLK 112
+#define GCC_VIDEO_AXI0_CLK 113
+#define GCC_VIDEO_AXI1_CLK 114
+#define GCC_VIDEO_XO_CLK 115
+#define GCC_GPLL4 116
+#define GCC_GPLL5 117
+#define GCC_GPLL7 118
+#define GCC_DDRSS_SPAD_CLK 119
+#define GCC_DDRSS_SPAD_CLK_SRC 120
+#define GCC_VIDEO_AXI0_SREG 121
+#define GCC_VIDEO_AXI1_SREG 122
+#define GCC_IRIS_SS_HF_AXI1_SREG 123
+#define GCC_IRIS_SS_SPD_AXI1_SREG 124
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_1_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_PHY_BCR 13
+#define GCC_PCIE_PHY_CFG_AHB_BCR 14
+#define GCC_PCIE_PHY_COM_BCR 15
+#define GCC_PDM_BCR 16
+#define GCC_QUPV3_WRAPPER_0_BCR 17
+#define GCC_QUPV3_WRAPPER_1_BCR 18
+#define GCC_QUSB2PHY_PRIM_BCR 19
+#define GCC_QUSB2PHY_SEC_BCR 20
+#define GCC_SDCC1_BCR 21
+#define GCC_USB30_PRIM_BCR 22
+#define GCC_USB3_DP_PHY_PRIM_BCR 23
+#define GCC_USB3_DP_PHY_SEC_BCR 24
+#define GCC_USB3_PHY_PRIM_BCR 25
+#define GCC_USB3_PHY_SEC_BCR 26
+#define GCC_USB3PHY_PHY_PRIM_BCR 27
+#define GCC_USB3PHY_PHY_SEC_BCR 28
+#define GCC_VIDEO_AXI0_CLK_ARES 29
+#define GCC_VIDEO_AXI1_CLK_ARES 30
+#define GCC_VIDEO_BCR 31
+#define GCC_IRIS_SS_HF_AXI_CLK_ARES 32
+#define GCC_IRIS_SS_SPD_AXI_CLK_ARES 33
+#define GCC_DDRSS_SPAD_CLK_ARES 34
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_0_PHY_GDSC 1
+#define PCIE_1_GDSC 2
+#define PCIE_1_PHY_GDSC 3
+#define USB30_PRIM_GDSC 4
+#define USB3_PHY_GDSC 5
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_HF0_GDSC 6
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_SF0_GDSC 7
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 8
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 9
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sar2130p-gpucc.h b/include/dt-bindings/clock/qcom,sar2130p-gpucc.h
new file mode 100644
index 000000000000..a2204369110a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sar2130p-gpucc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SAR2130P_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SAR2130P_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_FF_CLK 2
+#define GPU_CC_CX_GMU_CLK 3
+#define GPU_CC_CXO_AON_CLK 4
+#define GPU_CC_CXO_CLK 5
+#define GPU_CC_FF_CLK_SRC 6
+#define GPU_CC_GMU_CLK_SRC 7
+#define GPU_CC_GX_GMU_CLK 8
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 9
+#define GPU_CC_HUB_AON_CLK 10
+#define GPU_CC_HUB_CLK_SRC 11
+#define GPU_CC_HUB_CX_INT_CLK 12
+#define GPU_CC_MEMNOC_GFX_CLK 13
+#define GPU_CC_PLL0 14
+#define GPU_CC_PLL1 15
+#define GPU_CC_SLEEP_CLK 16
+
+/* GDSCs */
+#define GPU_GX_GDSC 0
+#define GPU_CX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sc8180x-camcc.h b/include/dt-bindings/clock/qcom,sc8180x-camcc.h
new file mode 100644
index 000000000000..3e57b80f65e8
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sc8180x-camcc.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SC8180X_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SC8180X_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_AREG_CLK 1
+#define CAM_CC_BPS_AXI_CLK 2
+#define CAM_CC_BPS_CLK 3
+#define CAM_CC_BPS_CLK_SRC 4
+#define CAM_CC_CAMNOC_AXI_CLK 5
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 6
+#define CAM_CC_CAMNOC_DCD_XO_CLK 7
+#define CAM_CC_CCI_0_CLK 8
+#define CAM_CC_CCI_0_CLK_SRC 9
+#define CAM_CC_CCI_1_CLK 10
+#define CAM_CC_CCI_1_CLK_SRC 11
+#define CAM_CC_CCI_2_CLK 12
+#define CAM_CC_CCI_2_CLK_SRC 13
+#define CAM_CC_CCI_3_CLK 14
+#define CAM_CC_CCI_3_CLK_SRC 15
+#define CAM_CC_CORE_AHB_CLK 16
+#define CAM_CC_CPAS_AHB_CLK 17
+#define CAM_CC_CPHY_RX_CLK_SRC 18
+#define CAM_CC_CSI0PHYTIMER_CLK 19
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 20
+#define CAM_CC_CSI1PHYTIMER_CLK 21
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 22
+#define CAM_CC_CSI2PHYTIMER_CLK 23
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 24
+#define CAM_CC_CSI3PHYTIMER_CLK 25
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 26
+#define CAM_CC_CSIPHY0_CLK 27
+#define CAM_CC_CSIPHY1_CLK 28
+#define CAM_CC_CSIPHY2_CLK 29
+#define CAM_CC_CSIPHY3_CLK 30
+#define CAM_CC_FAST_AHB_CLK_SRC 31
+#define CAM_CC_FD_CORE_CLK 32
+#define CAM_CC_FD_CORE_CLK_SRC 33
+#define CAM_CC_FD_CORE_UAR_CLK 34
+#define CAM_CC_ICP_AHB_CLK 35
+#define CAM_CC_ICP_CLK 36
+#define CAM_CC_ICP_CLK_SRC 37
+#define CAM_CC_IFE_0_AXI_CLK 38
+#define CAM_CC_IFE_0_CLK 39
+#define CAM_CC_IFE_0_CLK_SRC 40
+#define CAM_CC_IFE_0_CPHY_RX_CLK 41
+#define CAM_CC_IFE_0_CSID_CLK 42
+#define CAM_CC_IFE_0_CSID_CLK_SRC 43
+#define CAM_CC_IFE_0_DSP_CLK 44
+#define CAM_CC_IFE_1_AXI_CLK 45
+#define CAM_CC_IFE_1_CLK 46
+#define CAM_CC_IFE_1_CLK_SRC 47
+#define CAM_CC_IFE_1_CPHY_RX_CLK 48
+#define CAM_CC_IFE_1_CSID_CLK 49
+#define CAM_CC_IFE_1_CSID_CLK_SRC 50
+#define CAM_CC_IFE_1_DSP_CLK 51
+#define CAM_CC_IFE_2_AXI_CLK 52
+#define CAM_CC_IFE_2_CLK 53
+#define CAM_CC_IFE_2_CLK_SRC 54
+#define CAM_CC_IFE_2_CPHY_RX_CLK 55
+#define CAM_CC_IFE_2_CSID_CLK 56
+#define CAM_CC_IFE_2_CSID_CLK_SRC 57
+#define CAM_CC_IFE_2_DSP_CLK 58
+#define CAM_CC_IFE_3_AXI_CLK 59
+#define CAM_CC_IFE_3_CLK 60
+#define CAM_CC_IFE_3_CLK_SRC 61
+#define CAM_CC_IFE_3_CPHY_RX_CLK 62
+#define CAM_CC_IFE_3_CSID_CLK 63
+#define CAM_CC_IFE_3_CSID_CLK_SRC 64
+#define CAM_CC_IFE_3_DSP_CLK 65
+#define CAM_CC_IFE_LITE_0_CLK 66
+#define CAM_CC_IFE_LITE_0_CLK_SRC 67
+#define CAM_CC_IFE_LITE_0_CPHY_RX_CLK 68
+#define CAM_CC_IFE_LITE_0_CSID_CLK 69
+#define CAM_CC_IFE_LITE_0_CSID_CLK_SRC 70
+#define CAM_CC_IFE_LITE_1_CLK 71
+#define CAM_CC_IFE_LITE_1_CLK_SRC 72
+#define CAM_CC_IFE_LITE_1_CPHY_RX_CLK 73
+#define CAM_CC_IFE_LITE_1_CSID_CLK 74
+#define CAM_CC_IFE_LITE_1_CSID_CLK_SRC 75
+#define CAM_CC_IFE_LITE_2_CLK 76
+#define CAM_CC_IFE_LITE_2_CLK_SRC 77
+#define CAM_CC_IFE_LITE_2_CPHY_RX_CLK 78
+#define CAM_CC_IFE_LITE_2_CSID_CLK 79
+#define CAM_CC_IFE_LITE_2_CSID_CLK_SRC 80
+#define CAM_CC_IFE_LITE_3_CLK 81
+#define CAM_CC_IFE_LITE_3_CLK_SRC 82
+#define CAM_CC_IFE_LITE_3_CPHY_RX_CLK 83
+#define CAM_CC_IFE_LITE_3_CSID_CLK 84
+#define CAM_CC_IFE_LITE_3_CSID_CLK_SRC 85
+#define CAM_CC_IPE_0_AHB_CLK 86
+#define CAM_CC_IPE_0_AREG_CLK 87
+#define CAM_CC_IPE_0_AXI_CLK 88
+#define CAM_CC_IPE_0_CLK 89
+#define CAM_CC_IPE_0_CLK_SRC 90
+#define CAM_CC_IPE_1_AHB_CLK 91
+#define CAM_CC_IPE_1_AREG_CLK 92
+#define CAM_CC_IPE_1_AXI_CLK 93
+#define CAM_CC_IPE_1_CLK 94
+#define CAM_CC_JPEG_CLK 95
+#define CAM_CC_JPEG_CLK_SRC 96
+#define CAM_CC_LRME_CLK 97
+#define CAM_CC_LRME_CLK_SRC 98
+#define CAM_CC_MCLK0_CLK 99
+#define CAM_CC_MCLK0_CLK_SRC 100
+#define CAM_CC_MCLK1_CLK 101
+#define CAM_CC_MCLK1_CLK_SRC 102
+#define CAM_CC_MCLK2_CLK 103
+#define CAM_CC_MCLK2_CLK_SRC 104
+#define CAM_CC_MCLK3_CLK 105
+#define CAM_CC_MCLK3_CLK_SRC 106
+#define CAM_CC_MCLK4_CLK 107
+#define CAM_CC_MCLK4_CLK_SRC 108
+#define CAM_CC_MCLK5_CLK 109
+#define CAM_CC_MCLK5_CLK_SRC 110
+#define CAM_CC_MCLK6_CLK 111
+#define CAM_CC_MCLK6_CLK_SRC 112
+#define CAM_CC_MCLK7_CLK 113
+#define CAM_CC_MCLK7_CLK_SRC 114
+#define CAM_CC_PLL0 115
+#define CAM_CC_PLL0_OUT_EVEN 116
+#define CAM_CC_PLL0_OUT_ODD 117
+#define CAM_CC_PLL1 118
+#define CAM_CC_PLL2 119
+#define CAM_CC_PLL2_OUT_MAIN 120
+#define CAM_CC_PLL3 121
+#define CAM_CC_PLL4 122
+#define CAM_CC_PLL5 123
+#define CAM_CC_PLL6 124
+#define CAM_CC_SLOW_AHB_CLK_SRC 125
+#define CAM_CC_XO_CLK_SRC 126
+
+
+/* CAM_CC power domains */
+#define BPS_GDSC 0
+#define IFE_0_GDSC 1
+#define IFE_1_GDSC 2
+#define IFE_2_GDSC 3
+#define IFE_3_GDSC 4
+#define IPE_0_GDSC 5
+#define IPE_1_GDSC 6
+#define TITAN_TOP_GDSC 7
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CCI_BCR 2
+#define CAM_CC_CPAS_BCR 3
+#define CAM_CC_CSI0PHY_BCR 4
+#define CAM_CC_CSI1PHY_BCR 5
+#define CAM_CC_CSI2PHY_BCR 6
+#define CAM_CC_CSI3PHY_BCR 7
+#define CAM_CC_FD_BCR 8
+#define CAM_CC_ICP_BCR 9
+#define CAM_CC_IFE_0_BCR 10
+#define CAM_CC_IFE_1_BCR 11
+#define CAM_CC_IFE_2_BCR 12
+#define CAM_CC_IFE_3_BCR 13
+#define CAM_CC_IFE_LITE_0_BCR 14
+#define CAM_CC_IFE_LITE_1_BCR 15
+#define CAM_CC_IFE_LITE_2_BCR 16
+#define CAM_CC_IFE_LITE_3_BCR 17
+#define CAM_CC_IPE_0_BCR 18
+#define CAM_CC_IPE_1_BCR 19
+#define CAM_CC_JPEG_BCR 20
+#define CAM_CC_LRME_BCR 21
+#define CAM_CC_MCLK0_BCR 22
+#define CAM_CC_MCLK1_BCR 23
+#define CAM_CC_MCLK2_BCR 24
+#define CAM_CC_MCLK3_BCR 25
+#define CAM_CC_MCLK4_BCR 26
+#define CAM_CC_MCLK5_BCR 27
+#define CAM_CC_MCLK6_BCR 28
+#define CAM_CC_MCLK7_BCR 29
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm4450-camcc.h b/include/dt-bindings/clock/qcom,sm4450-camcc.h
new file mode 100644
index 000000000000..bf077951bf1c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm4450-camcc.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM4450_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM4450_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_AREG_CLK 1
+#define CAM_CC_BPS_CLK 2
+#define CAM_CC_BPS_CLK_SRC 3
+#define CAM_CC_CAMNOC_ATB_CLK 4
+#define CAM_CC_CAMNOC_AXI_CLK 5
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 6
+#define CAM_CC_CAMNOC_AXI_HF_CLK 7
+#define CAM_CC_CAMNOC_AXI_SF_CLK 8
+#define CAM_CC_CCI_0_CLK 9
+#define CAM_CC_CCI_0_CLK_SRC 10
+#define CAM_CC_CCI_1_CLK 11
+#define CAM_CC_CCI_1_CLK_SRC 12
+#define CAM_CC_CORE_AHB_CLK 13
+#define CAM_CC_CPAS_AHB_CLK 14
+#define CAM_CC_CPHY_RX_CLK_SRC 15
+#define CAM_CC_CRE_AHB_CLK 16
+#define CAM_CC_CRE_CLK 17
+#define CAM_CC_CRE_CLK_SRC 18
+#define CAM_CC_CSI0PHYTIMER_CLK 19
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 20
+#define CAM_CC_CSI1PHYTIMER_CLK 21
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 22
+#define CAM_CC_CSI2PHYTIMER_CLK 23
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 24
+#define CAM_CC_CSIPHY0_CLK 25
+#define CAM_CC_CSIPHY1_CLK 26
+#define CAM_CC_CSIPHY2_CLK 27
+#define CAM_CC_FAST_AHB_CLK_SRC 28
+#define CAM_CC_ICP_ATB_CLK 29
+#define CAM_CC_ICP_CLK 30
+#define CAM_CC_ICP_CLK_SRC 31
+#define CAM_CC_ICP_CTI_CLK 32
+#define CAM_CC_ICP_TS_CLK 33
+#define CAM_CC_MCLK0_CLK 34
+#define CAM_CC_MCLK0_CLK_SRC 35
+#define CAM_CC_MCLK1_CLK 36
+#define CAM_CC_MCLK1_CLK_SRC 37
+#define CAM_CC_MCLK2_CLK 38
+#define CAM_CC_MCLK2_CLK_SRC 39
+#define CAM_CC_MCLK3_CLK 40
+#define CAM_CC_MCLK3_CLK_SRC 41
+#define CAM_CC_OPE_0_AHB_CLK 42
+#define CAM_CC_OPE_0_AREG_CLK 43
+#define CAM_CC_OPE_0_CLK 44
+#define CAM_CC_OPE_0_CLK_SRC 45
+#define CAM_CC_PLL0 46
+#define CAM_CC_PLL0_OUT_EVEN 47
+#define CAM_CC_PLL0_OUT_ODD 48
+#define CAM_CC_PLL1 49
+#define CAM_CC_PLL1_OUT_EVEN 50
+#define CAM_CC_PLL2 51
+#define CAM_CC_PLL2_OUT_EVEN 52
+#define CAM_CC_PLL3 53
+#define CAM_CC_PLL3_OUT_EVEN 54
+#define CAM_CC_PLL4 55
+#define CAM_CC_PLL4_OUT_EVEN 56
+#define CAM_CC_SLOW_AHB_CLK_SRC 57
+#define CAM_CC_SOC_AHB_CLK 58
+#define CAM_CC_SYS_TMR_CLK 59
+#define CAM_CC_TFE_0_AHB_CLK 60
+#define CAM_CC_TFE_0_CLK 61
+#define CAM_CC_TFE_0_CLK_SRC 62
+#define CAM_CC_TFE_0_CPHY_RX_CLK 63
+#define CAM_CC_TFE_0_CSID_CLK 64
+#define CAM_CC_TFE_0_CSID_CLK_SRC 65
+#define CAM_CC_TFE_1_AHB_CLK 66
+#define CAM_CC_TFE_1_CLK 67
+#define CAM_CC_TFE_1_CLK_SRC 68
+#define CAM_CC_TFE_1_CPHY_RX_CLK 69
+#define CAM_CC_TFE_1_CSID_CLK 70
+#define CAM_CC_TFE_1_CSID_CLK_SRC 71
+
+/* CAM_CC power domains */
+#define CAM_CC_CAMSS_TOP_GDSC 0
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CAMSS_TOP_BCR 2
+#define CAM_CC_CCI_0_BCR 3
+#define CAM_CC_CCI_1_BCR 4
+#define CAM_CC_CPAS_BCR 5
+#define CAM_CC_CRE_BCR 6
+#define CAM_CC_CSI0PHY_BCR 7
+#define CAM_CC_CSI1PHY_BCR 8
+#define CAM_CC_CSI2PHY_BCR 9
+#define CAM_CC_ICP_BCR 10
+#define CAM_CC_MCLK0_BCR 11
+#define CAM_CC_MCLK1_BCR 12
+#define CAM_CC_MCLK2_BCR 13
+#define CAM_CC_MCLK3_BCR 14
+#define CAM_CC_OPE_0_BCR 15
+#define CAM_CC_TFE_0_BCR 16
+#define CAM_CC_TFE_1_BCR 17
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm4450-dispcc.h b/include/dt-bindings/clock/qcom,sm4450-dispcc.h
new file mode 100644
index 000000000000..ca6f2ef90157
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm4450-dispcc.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM4450_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM4450_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_AHB1_CLK 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_ESC0_CLK 7
+#define DISP_CC_MDSS_ESC0_CLK_SRC 8
+#define DISP_CC_MDSS_MDP1_CLK 9
+#define DISP_CC_MDSS_MDP_CLK 10
+#define DISP_CC_MDSS_MDP_CLK_SRC 11
+#define DISP_CC_MDSS_MDP_LUT1_CLK 12
+#define DISP_CC_MDSS_MDP_LUT_CLK 13
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 14
+#define DISP_CC_MDSS_PCLK0_CLK 15
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 16
+#define DISP_CC_MDSS_ROT1_CLK 17
+#define DISP_CC_MDSS_ROT_CLK 18
+#define DISP_CC_MDSS_ROT_CLK_SRC 19
+#define DISP_CC_MDSS_RSCC_AHB_CLK 20
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 21
+#define DISP_CC_MDSS_VSYNC1_CLK 22
+#define DISP_CC_MDSS_VSYNC_CLK 23
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 24
+#define DISP_CC_PLL0 25
+#define DISP_CC_PLL1 26
+#define DISP_CC_SLEEP_CLK 27
+#define DISP_CC_SLEEP_CLK_SRC 28
+#define DISP_CC_XO_CLK 29
+#define DISP_CC_XO_CLK_SRC 30
+
+/* DISP_CC power domains */
+#define DISP_CC_MDSS_CORE_GDSC 0
+#define DISP_CC_MDSS_CORE_INT2_GDSC 1
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm4450-gpucc.h b/include/dt-bindings/clock/qcom,sm4450-gpucc.h
new file mode 100644
index 000000000000..304f83e5f645
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm4450-gpucc.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM4450_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM4450_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CB_CLK 1
+#define GPU_CC_CRC_AHB_CLK 2
+#define GPU_CC_CX_FF_CLK 3
+#define GPU_CC_CX_GFX3D_CLK 4
+#define GPU_CC_CX_GFX3D_SLV_CLK 5
+#define GPU_CC_CX_GMU_CLK 6
+#define GPU_CC_CX_SNOC_DVM_CLK 7
+#define GPU_CC_CXO_AON_CLK 8
+#define GPU_CC_CXO_CLK 9
+#define GPU_CC_DEMET_CLK 10
+#define GPU_CC_DEMET_DIV_CLK_SRC 11
+#define GPU_CC_FF_CLK_SRC 12
+#define GPU_CC_FREQ_MEASURE_CLK 13
+#define GPU_CC_GMU_CLK_SRC 14
+#define GPU_CC_GX_CXO_CLK 15
+#define GPU_CC_GX_FF_CLK 16
+#define GPU_CC_GX_GFX3D_CLK 17
+#define GPU_CC_GX_GFX3D_CLK_SRC 18
+#define GPU_CC_GX_GFX3D_RDVM_CLK 19
+#define GPU_CC_GX_GMU_CLK 20
+#define GPU_CC_GX_VSENSE_CLK 21
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 22
+#define GPU_CC_HUB_AON_CLK 23
+#define GPU_CC_HUB_CLK_SRC 24
+#define GPU_CC_HUB_CX_INT_CLK 25
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 26
+#define GPU_CC_MEMNOC_GFX_CLK 27
+#define GPU_CC_MND1X_0_GFX3D_CLK 28
+#define GPU_CC_PLL0 29
+#define GPU_CC_PLL1 30
+#define GPU_CC_SLEEP_CLK 31
+#define GPU_CC_XO_CLK_SRC 32
+#define GPU_CC_XO_DIV_CLK_SRC 33
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+#define GPU_CC_GX_GDSC 1
+
+/* GPU_CC resets */
+#define GPU_CC_ACD_BCR 0
+#define GPU_CC_CB_BCR 1
+#define GPU_CC_CX_BCR 2
+#define GPU_CC_FAST_HUB_BCR 3
+#define GPU_CC_FF_BCR 4
+#define GPU_CC_GFX3D_AON_BCR 5
+#define GPU_CC_GMU_BCR 6
+#define GPU_CC_GX_BCR 7
+#define GPU_CC_XO_BCR 8
+#define GPU_CC_GX_ACD_IROOT_BCR 9
+#define GPU_CC_RBCPR_BCR 10
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6115-lpasscc.h b/include/dt-bindings/clock/qcom,sm6115-lpasscc.h
new file mode 100644
index 000000000000..799274517c9a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6115-lpasscc.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASSCC_SM6115_H
+#define _DT_BINDINGS_CLK_QCOM_LPASSCC_SM6115_H
+
+/* LPASS CC */
+#define LPASS_SWR_TX_CONFIG_CGCR 0
+
+/* LPASS_AUDIO CC */
+#define LPASS_AUDIO_SWR_RX_CGCR 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6350-videocc.h b/include/dt-bindings/clock/qcom,sm6350-videocc.h
new file mode 100644
index 000000000000..2af7f91fa023
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6350-videocc.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM6350_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM6350_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_PLL0 0
+#define VIDEO_PLL0_OUT_EVEN 1
+#define VIDEO_CC_IRIS_AHB_CLK 2
+#define VIDEO_CC_IRIS_CLK_SRC 3
+#define VIDEO_CC_MVS0_AXI_CLK 4
+#define VIDEO_CC_MVS0_CORE_CLK 5
+#define VIDEO_CC_MVSC_CORE_CLK 6
+#define VIDEO_CC_MVSC_CTL_AXI_CLK 7
+#define VIDEO_CC_SLEEP_CLK 8
+#define VIDEO_CC_SLEEP_CLK_SRC 9
+#define VIDEO_CC_VENUS_AHB_CLK 10
+
+/* GDSCs */
+#define MVSC_GDSC 0
+#define MVS0_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm7150-camcc.h b/include/dt-bindings/clock/qcom,sm7150-camcc.h
new file mode 100644
index 000000000000..ce73ef0fe95d
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm7150-camcc.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAMCC_SM7150_H
+#define _DT_BINDINGS_CLK_QCOM_CAMCC_SM7150_H
+
+/* Hardware clocks */
+#define CAMCC_PLL0_OUT_EVEN 0
+#define CAMCC_PLL0_OUT_ODD 1
+#define CAMCC_PLL1_OUT_EVEN 2
+#define CAMCC_PLL2_OUT_EARLY 3
+#define CAMCC_PLL3_OUT_EVEN 4
+#define CAMCC_PLL4_OUT_EVEN 5
+
+/* CAMCC clock registers */
+#define CAMCC_PLL0 6
+#define CAMCC_PLL1 7
+#define CAMCC_PLL2 8
+#define CAMCC_PLL2_OUT_AUX 9
+#define CAMCC_PLL2_OUT_MAIN 10
+#define CAMCC_PLL3 11
+#define CAMCC_PLL4 12
+#define CAMCC_BPS_AHB_CLK 13
+#define CAMCC_BPS_AREG_CLK 14
+#define CAMCC_BPS_AXI_CLK 15
+#define CAMCC_BPS_CLK 16
+#define CAMCC_BPS_CLK_SRC 17
+#define CAMCC_CAMNOC_AXI_CLK 18
+#define CAMCC_CAMNOC_AXI_CLK_SRC 19
+#define CAMCC_CAMNOC_DCD_XO_CLK 20
+#define CAMCC_CCI_0_CLK 21
+#define CAMCC_CCI_0_CLK_SRC 22
+#define CAMCC_CCI_1_CLK 23
+#define CAMCC_CCI_1_CLK_SRC 24
+#define CAMCC_CORE_AHB_CLK 25
+#define CAMCC_CPAS_AHB_CLK 26
+#define CAMCC_CPHY_RX_CLK_SRC 27
+#define CAMCC_CSI0PHYTIMER_CLK 28
+#define CAMCC_CSI0PHYTIMER_CLK_SRC 29
+#define CAMCC_CSI1PHYTIMER_CLK 30
+#define CAMCC_CSI1PHYTIMER_CLK_SRC 31
+#define CAMCC_CSI2PHYTIMER_CLK 32
+#define CAMCC_CSI2PHYTIMER_CLK_SRC 33
+#define CAMCC_CSI3PHYTIMER_CLK 34
+#define CAMCC_CSI3PHYTIMER_CLK_SRC 35
+#define CAMCC_CSIPHY0_CLK 36
+#define CAMCC_CSIPHY1_CLK 37
+#define CAMCC_CSIPHY2_CLK 38
+#define CAMCC_CSIPHY3_CLK 39
+#define CAMCC_FAST_AHB_CLK_SRC 40
+#define CAMCC_FD_CORE_CLK 41
+#define CAMCC_FD_CORE_CLK_SRC 42
+#define CAMCC_FD_CORE_UAR_CLK 43
+#define CAMCC_ICP_AHB_CLK 44
+#define CAMCC_ICP_CLK 45
+#define CAMCC_ICP_CLK_SRC 46
+#define CAMCC_IFE_0_AXI_CLK 47
+#define CAMCC_IFE_0_CLK 48
+#define CAMCC_IFE_0_CLK_SRC 49
+#define CAMCC_IFE_0_CPHY_RX_CLK 50
+#define CAMCC_IFE_0_CSID_CLK 51
+#define CAMCC_IFE_0_CSID_CLK_SRC 52
+#define CAMCC_IFE_0_DSP_CLK 53
+#define CAMCC_IFE_1_AXI_CLK 54
+#define CAMCC_IFE_1_CLK 55
+#define CAMCC_IFE_1_CLK_SRC 56
+#define CAMCC_IFE_1_CPHY_RX_CLK 57
+#define CAMCC_IFE_1_CSID_CLK 58
+#define CAMCC_IFE_1_CSID_CLK_SRC 59
+#define CAMCC_IFE_1_DSP_CLK 60
+#define CAMCC_IFE_LITE_CLK 61
+#define CAMCC_IFE_LITE_CLK_SRC 62
+#define CAMCC_IFE_LITE_CPHY_RX_CLK 63
+#define CAMCC_IFE_LITE_CSID_CLK 64
+#define CAMCC_IFE_LITE_CSID_CLK_SRC 65
+#define CAMCC_IPE_0_AHB_CLK 66
+#define CAMCC_IPE_0_AREG_CLK 67
+#define CAMCC_IPE_0_AXI_CLK 68
+#define CAMCC_IPE_0_CLK 69
+#define CAMCC_IPE_0_CLK_SRC 70
+#define CAMCC_IPE_1_AHB_CLK 71
+#define CAMCC_IPE_1_AREG_CLK 72
+#define CAMCC_IPE_1_AXI_CLK 73
+#define CAMCC_IPE_1_CLK 74
+#define CAMCC_JPEG_CLK 75
+#define CAMCC_JPEG_CLK_SRC 76
+#define CAMCC_LRME_CLK 77
+#define CAMCC_LRME_CLK_SRC 78
+#define CAMCC_MCLK0_CLK 79
+#define CAMCC_MCLK0_CLK_SRC 80
+#define CAMCC_MCLK1_CLK 81
+#define CAMCC_MCLK1_CLK_SRC 82
+#define CAMCC_MCLK2_CLK 83
+#define CAMCC_MCLK2_CLK_SRC 84
+#define CAMCC_MCLK3_CLK 85
+#define CAMCC_MCLK3_CLK_SRC 86
+#define CAMCC_SLEEP_CLK 87
+#define CAMCC_SLEEP_CLK_SRC 88
+#define CAMCC_SLOW_AHB_CLK_SRC 89
+#define CAMCC_XO_CLK_SRC 90
+
+/* CAMCC GDSCRs */
+#define BPS_GDSC 0
+#define IFE_0_GDSC 1
+#define IFE_1_GDSC 2
+#define IPE_0_GDSC 3
+#define IPE_1_GDSC 4
+#define TITAN_TOP_GDSC 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm7150-dispcc.h b/include/dt-bindings/clock/qcom,sm7150-dispcc.h
new file mode 100644
index 000000000000..1e4e6432d506
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm7150-dispcc.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com>
+ * Copyright (c) 2024, David Wronek <david@mainlining.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISPCC_SM7150_H
+#define _DT_BINDINGS_CLK_QCOM_DISPCC_SM7150_H
+
+/* DISPCC clock registers */
+#define DISPCC_PLL0 0
+#define DISPCC_MDSS_AHB_CLK 1
+#define DISPCC_MDSS_AHB_CLK_SRC 2
+#define DISPCC_MDSS_BYTE0_CLK 3
+#define DISPCC_MDSS_BYTE0_CLK_SRC 4
+#define DISPCC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISPCC_MDSS_BYTE0_INTF_CLK 6
+#define DISPCC_MDSS_BYTE1_CLK 7
+#define DISPCC_MDSS_BYTE1_CLK_SRC 8
+#define DISPCC_MDSS_BYTE1_DIV_CLK_SRC 9
+#define DISPCC_MDSS_BYTE1_INTF_CLK 10
+#define DISPCC_MDSS_DP_AUX_CLK 11
+#define DISPCC_MDSS_DP_AUX_CLK_SRC 12
+#define DISPCC_MDSS_DP_CRYPTO_CLK 13
+#define DISPCC_MDSS_DP_CRYPTO_CLK_SRC 14
+#define DISPCC_MDSS_DP_LINK_CLK 15
+#define DISPCC_MDSS_DP_LINK_CLK_SRC 16
+#define DISPCC_MDSS_DP_LINK_INTF_CLK 17
+#define DISPCC_MDSS_DP_PIXEL1_CLK 18
+#define DISPCC_MDSS_DP_PIXEL1_CLK_SRC 19
+#define DISPCC_MDSS_DP_PIXEL_CLK 20
+#define DISPCC_MDSS_DP_PIXEL_CLK_SRC 21
+#define DISPCC_MDSS_ESC0_CLK 22
+#define DISPCC_MDSS_ESC0_CLK_SRC 23
+#define DISPCC_MDSS_ESC1_CLK 24
+#define DISPCC_MDSS_ESC1_CLK_SRC 25
+#define DISPCC_MDSS_MDP_CLK 26
+#define DISPCC_MDSS_MDP_CLK_SRC 27
+#define DISPCC_MDSS_MDP_LUT_CLK 28
+#define DISPCC_MDSS_NON_GDSC_AHB_CLK 29
+#define DISPCC_MDSS_PCLK0_CLK 30
+#define DISPCC_MDSS_PCLK0_CLK_SRC 31
+#define DISPCC_MDSS_PCLK1_CLK 32
+#define DISPCC_MDSS_PCLK1_CLK_SRC 33
+#define DISPCC_MDSS_ROT_CLK 34
+#define DISPCC_MDSS_ROT_CLK_SRC 35
+#define DISPCC_MDSS_RSCC_AHB_CLK 36
+#define DISPCC_MDSS_RSCC_VSYNC_CLK 37
+#define DISPCC_MDSS_VSYNC_CLK 38
+#define DISPCC_MDSS_VSYNC_CLK_SRC 39
+#define DISPCC_XO_CLK_SRC 40
+#define DISPCC_SLEEP_CLK 41
+#define DISPCC_SLEEP_CLK_SRC 42
+
+/* DISPCC resets */
+#define DISPCC_MDSS_CORE_BCR 0
+
+/* DISPCC GDSCR */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm7150-videocc.h b/include/dt-bindings/clock/qcom,sm7150-videocc.h
new file mode 100644
index 000000000000..d86e0fbb159a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm7150-videocc.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEOCC_SM7150_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEOCC_SM7150_H
+
+#define VIDEOCC_PLL0 0
+#define VIDEOCC_IRIS_AHB_CLK 1
+#define VIDEOCC_IRIS_CLK_SRC 2
+#define VIDEOCC_MVS0_AXI_CLK 3
+#define VIDEOCC_MVS0_CORE_CLK 4
+#define VIDEOCC_MVS1_AXI_CLK 5
+#define VIDEOCC_MVS1_CORE_CLK 6
+#define VIDEOCC_MVSC_CORE_CLK 7
+#define VIDEOCC_MVSC_CTL_AXI_CLK 8
+#define VIDEOCC_VENUS_AHB_CLK 9
+#define VIDEOCC_XO_CLK 10
+#define VIDEOCC_XO_CLK_SRC 11
+
+/* VIDEOCC GDSCRs */
+#define VENUS_GDSC 0
+#define VCODEC0_GDSC 1
+#define VCODEC1_GDSC 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8150-camcc.h b/include/dt-bindings/clock/qcom,sm8150-camcc.h
new file mode 100644
index 000000000000..5444035efa93
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8150-camcc.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8150_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8150_H
+
+/* CAM_CC clocks */
+#define CAM_CC_PLL0 0
+#define CAM_CC_PLL0_OUT_EVEN 1
+#define CAM_CC_PLL0_OUT_ODD 2
+#define CAM_CC_PLL1 3
+#define CAM_CC_PLL1_OUT_EVEN 4
+#define CAM_CC_PLL2 5
+#define CAM_CC_PLL2_OUT_MAIN 6
+#define CAM_CC_PLL3 7
+#define CAM_CC_PLL3_OUT_EVEN 8
+#define CAM_CC_PLL4 9
+#define CAM_CC_PLL4_OUT_EVEN 10
+#define CAM_CC_BPS_AHB_CLK 11
+#define CAM_CC_BPS_AREG_CLK 12
+#define CAM_CC_BPS_AXI_CLK 13
+#define CAM_CC_BPS_CLK 14
+#define CAM_CC_BPS_CLK_SRC 15
+#define CAM_CC_CAMNOC_AXI_CLK 16
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 17
+#define CAM_CC_CAMNOC_DCD_XO_CLK 18
+#define CAM_CC_CCI_0_CLK 19
+#define CAM_CC_CCI_0_CLK_SRC 20
+#define CAM_CC_CCI_1_CLK 21
+#define CAM_CC_CCI_1_CLK_SRC 22
+#define CAM_CC_CORE_AHB_CLK 23
+#define CAM_CC_CPAS_AHB_CLK 24
+#define CAM_CC_CPHY_RX_CLK_SRC 25
+#define CAM_CC_CSI0PHYTIMER_CLK 26
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 27
+#define CAM_CC_CSI1PHYTIMER_CLK 28
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 29
+#define CAM_CC_CSI2PHYTIMER_CLK 30
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 31
+#define CAM_CC_CSI3PHYTIMER_CLK 32
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 33
+#define CAM_CC_CSIPHY0_CLK 34
+#define CAM_CC_CSIPHY1_CLK 35
+#define CAM_CC_CSIPHY2_CLK 36
+#define CAM_CC_CSIPHY3_CLK 37
+#define CAM_CC_FAST_AHB_CLK_SRC 38
+#define CAM_CC_FD_CORE_CLK 39
+#define CAM_CC_FD_CORE_CLK_SRC 40
+#define CAM_CC_FD_CORE_UAR_CLK 41
+#define CAM_CC_GDSC_CLK 42
+#define CAM_CC_ICP_AHB_CLK 43
+#define CAM_CC_ICP_CLK 44
+#define CAM_CC_ICP_CLK_SRC 45
+#define CAM_CC_IFE_0_AXI_CLK 46
+#define CAM_CC_IFE_0_CLK 47
+#define CAM_CC_IFE_0_CLK_SRC 48
+#define CAM_CC_IFE_0_CPHY_RX_CLK 49
+#define CAM_CC_IFE_0_CSID_CLK 50
+#define CAM_CC_IFE_0_CSID_CLK_SRC 51
+#define CAM_CC_IFE_0_DSP_CLK 52
+#define CAM_CC_IFE_1_AXI_CLK 53
+#define CAM_CC_IFE_1_CLK 54
+#define CAM_CC_IFE_1_CLK_SRC 55
+#define CAM_CC_IFE_1_CPHY_RX_CLK 56
+#define CAM_CC_IFE_1_CSID_CLK 57
+#define CAM_CC_IFE_1_CSID_CLK_SRC 58
+#define CAM_CC_IFE_1_DSP_CLK 59
+#define CAM_CC_IFE_LITE_0_CLK 60
+#define CAM_CC_IFE_LITE_0_CLK_SRC 61
+#define CAM_CC_IFE_LITE_0_CPHY_RX_CLK 62
+#define CAM_CC_IFE_LITE_0_CSID_CLK 63
+#define CAM_CC_IFE_LITE_0_CSID_CLK_SRC 64
+#define CAM_CC_IFE_LITE_1_CLK 65
+#define CAM_CC_IFE_LITE_1_CLK_SRC 66
+#define CAM_CC_IFE_LITE_1_CPHY_RX_CLK 67
+#define CAM_CC_IFE_LITE_1_CSID_CLK 68
+#define CAM_CC_IFE_LITE_1_CSID_CLK_SRC 69
+#define CAM_CC_IPE_0_AHB_CLK 70
+#define CAM_CC_IPE_0_AREG_CLK 71
+#define CAM_CC_IPE_0_AXI_CLK 72
+#define CAM_CC_IPE_0_CLK 73
+#define CAM_CC_IPE_0_CLK_SRC 74
+#define CAM_CC_IPE_1_AHB_CLK 75
+#define CAM_CC_IPE_1_AREG_CLK 76
+#define CAM_CC_IPE_1_AXI_CLK 77
+#define CAM_CC_IPE_1_CLK 78
+#define CAM_CC_JPEG_CLK 79
+#define CAM_CC_JPEG_CLK_SRC 80
+#define CAM_CC_LRME_CLK 81
+#define CAM_CC_LRME_CLK_SRC 82
+#define CAM_CC_MCLK0_CLK 83
+#define CAM_CC_MCLK0_CLK_SRC 84
+#define CAM_CC_MCLK1_CLK 85
+#define CAM_CC_MCLK1_CLK_SRC 86
+#define CAM_CC_MCLK2_CLK 87
+#define CAM_CC_MCLK2_CLK_SRC 88
+#define CAM_CC_MCLK3_CLK 89
+#define CAM_CC_MCLK3_CLK_SRC 90
+#define CAM_CC_SLOW_AHB_CLK_SRC 91
+
+/* CAM_CC power domains */
+#define TITAN_TOP_GDSC 0
+#define BPS_GDSC 1
+#define IFE_0_GDSC 2
+#define IFE_1_GDSC 3
+#define IPE_0_GDSC 4
+#define IPE_1_GDSC 5
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CCI_BCR 2
+#define CAM_CC_CPAS_BCR 3
+#define CAM_CC_CSI0PHY_BCR 4
+#define CAM_CC_CSI1PHY_BCR 5
+#define CAM_CC_CSI2PHY_BCR 6
+#define CAM_CC_CSI3PHY_BCR 7
+#define CAM_CC_FD_BCR 8
+#define CAM_CC_ICP_BCR 9
+#define CAM_CC_IFE_0_BCR 10
+#define CAM_CC_IFE_1_BCR 11
+#define CAM_CC_IFE_LITE_0_BCR 12
+#define CAM_CC_IFE_LITE_1_BCR 13
+#define CAM_CC_IPE_0_BCR 14
+#define CAM_CC_IPE_1_BCR 15
+#define CAM_CC_JPEG_BCR 16
+#define CAM_CC_LRME_BCR 17
+#define CAM_CC_MCLK0_BCR 18
+#define CAM_CC_MCLK1_BCR 19
+#define CAM_CC_MCLK2_BCR 20
+#define CAM_CC_MCLK3_BCR 21
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-camcc.h b/include/dt-bindings/clock/qcom,sm8650-camcc.h
new file mode 100644
index 000000000000..df73bf35f4bf
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-camcc.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8650_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_CLK 1
+#define CAM_CC_BPS_CLK_SRC 2
+#define CAM_CC_BPS_FAST_AHB_CLK 3
+#define CAM_CC_BPS_SHIFT_CLK 4
+#define CAM_CC_CAMNOC_AXI_NRT_CLK 5
+#define CAM_CC_CAMNOC_AXI_RT_CLK 6
+#define CAM_CC_CAMNOC_AXI_RT_CLK_SRC 7
+#define CAM_CC_CAMNOC_DCD_XO_CLK 8
+#define CAM_CC_CAMNOC_XO_CLK 9
+#define CAM_CC_CCI_0_CLK 10
+#define CAM_CC_CCI_0_CLK_SRC 11
+#define CAM_CC_CCI_1_CLK 12
+#define CAM_CC_CCI_1_CLK_SRC 13
+#define CAM_CC_CCI_2_CLK 14
+#define CAM_CC_CCI_2_CLK_SRC 15
+#define CAM_CC_CORE_AHB_CLK 16
+#define CAM_CC_CPAS_AHB_CLK 17
+#define CAM_CC_CPAS_BPS_CLK 18
+#define CAM_CC_CPAS_CRE_CLK 19
+#define CAM_CC_CPAS_FAST_AHB_CLK 20
+#define CAM_CC_CPAS_IFE_0_CLK 21
+#define CAM_CC_CPAS_IFE_1_CLK 22
+#define CAM_CC_CPAS_IFE_2_CLK 23
+#define CAM_CC_CPAS_IFE_LITE_CLK 24
+#define CAM_CC_CPAS_IPE_NPS_CLK 25
+#define CAM_CC_CPAS_SBI_CLK 26
+#define CAM_CC_CPAS_SFE_0_CLK 27
+#define CAM_CC_CPAS_SFE_1_CLK 28
+#define CAM_CC_CPAS_SFE_2_CLK 29
+#define CAM_CC_CPHY_RX_CLK_SRC 30
+#define CAM_CC_CRE_AHB_CLK 31
+#define CAM_CC_CRE_CLK 32
+#define CAM_CC_CRE_CLK_SRC 33
+#define CAM_CC_CSI0PHYTIMER_CLK 34
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 35
+#define CAM_CC_CSI1PHYTIMER_CLK 36
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 37
+#define CAM_CC_CSI2PHYTIMER_CLK 38
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 39
+#define CAM_CC_CSI3PHYTIMER_CLK 40
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 41
+#define CAM_CC_CSI4PHYTIMER_CLK 42
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 43
+#define CAM_CC_CSI5PHYTIMER_CLK 44
+#define CAM_CC_CSI5PHYTIMER_CLK_SRC 45
+#define CAM_CC_CSI6PHYTIMER_CLK 46
+#define CAM_CC_CSI6PHYTIMER_CLK_SRC 47
+#define CAM_CC_CSI7PHYTIMER_CLK 48
+#define CAM_CC_CSI7PHYTIMER_CLK_SRC 49
+#define CAM_CC_CSID_CLK 50
+#define CAM_CC_CSID_CLK_SRC 51
+#define CAM_CC_CSID_CSIPHY_RX_CLK 52
+#define CAM_CC_CSIPHY0_CLK 53
+#define CAM_CC_CSIPHY1_CLK 54
+#define CAM_CC_CSIPHY2_CLK 55
+#define CAM_CC_CSIPHY3_CLK 56
+#define CAM_CC_CSIPHY4_CLK 57
+#define CAM_CC_CSIPHY5_CLK 58
+#define CAM_CC_CSIPHY6_CLK 59
+#define CAM_CC_CSIPHY7_CLK 60
+#define CAM_CC_DRV_AHB_CLK 61
+#define CAM_CC_DRV_XO_CLK 62
+#define CAM_CC_FAST_AHB_CLK_SRC 63
+#define CAM_CC_GDSC_CLK 64
+#define CAM_CC_ICP_AHB_CLK 65
+#define CAM_CC_ICP_CLK 66
+#define CAM_CC_ICP_CLK_SRC 67
+#define CAM_CC_IFE_0_CLK 68
+#define CAM_CC_IFE_0_CLK_SRC 69
+#define CAM_CC_IFE_0_FAST_AHB_CLK 70
+#define CAM_CC_IFE_0_SHIFT_CLK 71
+#define CAM_CC_IFE_1_CLK 72
+#define CAM_CC_IFE_1_CLK_SRC 73
+#define CAM_CC_IFE_1_FAST_AHB_CLK 74
+#define CAM_CC_IFE_1_SHIFT_CLK 75
+#define CAM_CC_IFE_2_CLK 76
+#define CAM_CC_IFE_2_CLK_SRC 77
+#define CAM_CC_IFE_2_FAST_AHB_CLK 78
+#define CAM_CC_IFE_2_SHIFT_CLK 79
+#define CAM_CC_IFE_LITE_AHB_CLK 80
+#define CAM_CC_IFE_LITE_CLK 81
+#define CAM_CC_IFE_LITE_CLK_SRC 82
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 83
+#define CAM_CC_IFE_LITE_CSID_CLK 84
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 85
+#define CAM_CC_IPE_NPS_AHB_CLK 86
+#define CAM_CC_IPE_NPS_CLK 87
+#define CAM_CC_IPE_NPS_CLK_SRC 88
+#define CAM_CC_IPE_NPS_FAST_AHB_CLK 89
+#define CAM_CC_IPE_PPS_CLK 90
+#define CAM_CC_IPE_PPS_FAST_AHB_CLK 91
+#define CAM_CC_IPE_SHIFT_CLK 92
+#define CAM_CC_JPEG_1_CLK 93
+#define CAM_CC_JPEG_CLK 94
+#define CAM_CC_JPEG_CLK_SRC 95
+#define CAM_CC_MCLK0_CLK 96
+#define CAM_CC_MCLK0_CLK_SRC 97
+#define CAM_CC_MCLK1_CLK 98
+#define CAM_CC_MCLK1_CLK_SRC 99
+#define CAM_CC_MCLK2_CLK 100
+#define CAM_CC_MCLK2_CLK_SRC 101
+#define CAM_CC_MCLK3_CLK 102
+#define CAM_CC_MCLK3_CLK_SRC 103
+#define CAM_CC_MCLK4_CLK 104
+#define CAM_CC_MCLK4_CLK_SRC 105
+#define CAM_CC_MCLK5_CLK 106
+#define CAM_CC_MCLK5_CLK_SRC 107
+#define CAM_CC_MCLK6_CLK 108
+#define CAM_CC_MCLK6_CLK_SRC 109
+#define CAM_CC_MCLK7_CLK 110
+#define CAM_CC_MCLK7_CLK_SRC 111
+#define CAM_CC_PLL0 112
+#define CAM_CC_PLL0_OUT_EVEN 113
+#define CAM_CC_PLL0_OUT_ODD 114
+#define CAM_CC_PLL1 115
+#define CAM_CC_PLL1_OUT_EVEN 116
+#define CAM_CC_PLL2 117
+#define CAM_CC_PLL3 118
+#define CAM_CC_PLL3_OUT_EVEN 119
+#define CAM_CC_PLL4 120
+#define CAM_CC_PLL4_OUT_EVEN 121
+#define CAM_CC_PLL5 122
+#define CAM_CC_PLL5_OUT_EVEN 123
+#define CAM_CC_PLL6 124
+#define CAM_CC_PLL6_OUT_EVEN 125
+#define CAM_CC_PLL7 126
+#define CAM_CC_PLL7_OUT_EVEN 127
+#define CAM_CC_PLL8 128
+#define CAM_CC_PLL8_OUT_EVEN 129
+#define CAM_CC_PLL9 130
+#define CAM_CC_PLL9_OUT_EVEN 131
+#define CAM_CC_PLL9_OUT_ODD 132
+#define CAM_CC_PLL10 133
+#define CAM_CC_PLL10_OUT_EVEN 134
+#define CAM_CC_QDSS_DEBUG_CLK 135
+#define CAM_CC_QDSS_DEBUG_CLK_SRC 136
+#define CAM_CC_QDSS_DEBUG_XO_CLK 137
+#define CAM_CC_SBI_CLK 138
+#define CAM_CC_SBI_FAST_AHB_CLK 139
+#define CAM_CC_SBI_SHIFT_CLK 140
+#define CAM_CC_SFE_0_CLK 141
+#define CAM_CC_SFE_0_CLK_SRC 142
+#define CAM_CC_SFE_0_FAST_AHB_CLK 143
+#define CAM_CC_SFE_0_SHIFT_CLK 144
+#define CAM_CC_SFE_1_CLK 145
+#define CAM_CC_SFE_1_CLK_SRC 146
+#define CAM_CC_SFE_1_FAST_AHB_CLK 147
+#define CAM_CC_SFE_1_SHIFT_CLK 148
+#define CAM_CC_SFE_2_CLK 149
+#define CAM_CC_SFE_2_CLK_SRC 150
+#define CAM_CC_SFE_2_FAST_AHB_CLK 151
+#define CAM_CC_SFE_2_SHIFT_CLK 152
+#define CAM_CC_SLEEP_CLK 153
+#define CAM_CC_SLEEP_CLK_SRC 154
+#define CAM_CC_SLOW_AHB_CLK_SRC 155
+#define CAM_CC_TITAN_TOP_SHIFT_CLK 156
+#define CAM_CC_XO_CLK_SRC 157
+
+/* CAM_CC power domains */
+#define CAM_CC_TITAN_TOP_GDSC 0
+#define CAM_CC_BPS_GDSC 1
+#define CAM_CC_IFE_0_GDSC 2
+#define CAM_CC_IFE_1_GDSC 3
+#define CAM_CC_IFE_2_GDSC 4
+#define CAM_CC_IPE_0_GDSC 5
+#define CAM_CC_SBI_GDSC 6
+#define CAM_CC_SFE_0_GDSC 7
+#define CAM_CC_SFE_1_GDSC 8
+#define CAM_CC_SFE_2_GDSC 9
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_DRV_BCR 1
+#define CAM_CC_ICP_BCR 2
+#define CAM_CC_IFE_0_BCR 3
+#define CAM_CC_IFE_1_BCR 4
+#define CAM_CC_IFE_2_BCR 5
+#define CAM_CC_IPE_0_BCR 6
+#define CAM_CC_QDSS_DEBUG_BCR 7
+#define CAM_CC_SBI_BCR 8
+#define CAM_CC_SFE_0_BCR 9
+#define CAM_CC_SFE_1_BCR 10
+#define CAM_CC_SFE_2_BCR 11
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-dispcc.h b/include/dt-bindings/clock/qcom,sm8650-dispcc.h
index b0a668b395a5..c0a291188f28 100644..120000
--- a/include/dt-bindings/clock/qcom,sm8650-dispcc.h
+++ b/include/dt-bindings/clock/qcom,sm8650-dispcc.h
@@ -1,102 +1 @@
-/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-/*
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved
- * Copyright (c) 2023, Linaro Ltd.
- */
-
-#ifndef _DT_BINDINGS_CLK_QCOM_SM8650_DISP_CC_H
-#define _DT_BINDINGS_CLK_QCOM_SM8650_DISP_CC_H
-
-/* DISP_CC clocks */
-#define DISP_CC_MDSS_ACCU_CLK 0
-#define DISP_CC_MDSS_AHB1_CLK 1
-#define DISP_CC_MDSS_AHB_CLK 2
-#define DISP_CC_MDSS_AHB_CLK_SRC 3
-#define DISP_CC_MDSS_BYTE0_CLK 4
-#define DISP_CC_MDSS_BYTE0_CLK_SRC 5
-#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 6
-#define DISP_CC_MDSS_BYTE0_INTF_CLK 7
-#define DISP_CC_MDSS_BYTE1_CLK 8
-#define DISP_CC_MDSS_BYTE1_CLK_SRC 9
-#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 10
-#define DISP_CC_MDSS_BYTE1_INTF_CLK 11
-#define DISP_CC_MDSS_DPTX0_AUX_CLK 12
-#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 13
-#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 14
-#define DISP_CC_MDSS_DPTX0_LINK_CLK 15
-#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 16
-#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 17
-#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 18
-#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 19
-#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 20
-#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 21
-#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 22
-#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 23
-#define DISP_CC_MDSS_DPTX1_AUX_CLK 24
-#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 25
-#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 26
-#define DISP_CC_MDSS_DPTX1_LINK_CLK 27
-#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 28
-#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 29
-#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 30
-#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 31
-#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 32
-#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 33
-#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 34
-#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 35
-#define DISP_CC_MDSS_DPTX2_AUX_CLK 36
-#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 37
-#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 38
-#define DISP_CC_MDSS_DPTX2_LINK_CLK 39
-#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 40
-#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 41
-#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 42
-#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 43
-#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 44
-#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 45
-#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 46
-#define DISP_CC_MDSS_DPTX3_AUX_CLK 47
-#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 48
-#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 49
-#define DISP_CC_MDSS_DPTX3_LINK_CLK 50
-#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 51
-#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 52
-#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 53
-#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 54
-#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 55
-#define DISP_CC_MDSS_ESC0_CLK 56
-#define DISP_CC_MDSS_ESC0_CLK_SRC 57
-#define DISP_CC_MDSS_ESC1_CLK 58
-#define DISP_CC_MDSS_ESC1_CLK_SRC 59
-#define DISP_CC_MDSS_MDP1_CLK 60
-#define DISP_CC_MDSS_MDP_CLK 61
-#define DISP_CC_MDSS_MDP_CLK_SRC 62
-#define DISP_CC_MDSS_MDP_LUT1_CLK 63
-#define DISP_CC_MDSS_MDP_LUT_CLK 64
-#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 65
-#define DISP_CC_MDSS_PCLK0_CLK 66
-#define DISP_CC_MDSS_PCLK0_CLK_SRC 67
-#define DISP_CC_MDSS_PCLK1_CLK 68
-#define DISP_CC_MDSS_PCLK1_CLK_SRC 69
-#define DISP_CC_MDSS_RSCC_AHB_CLK 70
-#define DISP_CC_MDSS_RSCC_VSYNC_CLK 71
-#define DISP_CC_MDSS_VSYNC1_CLK 72
-#define DISP_CC_MDSS_VSYNC_CLK 73
-#define DISP_CC_MDSS_VSYNC_CLK_SRC 74
-#define DISP_CC_PLL0 75
-#define DISP_CC_PLL1 76
-#define DISP_CC_SLEEP_CLK 77
-#define DISP_CC_SLEEP_CLK_SRC 78
-#define DISP_CC_XO_CLK 79
-#define DISP_CC_XO_CLK_SRC 80
-
-/* DISP_CC resets */
-#define DISP_CC_MDSS_CORE_BCR 0
-#define DISP_CC_MDSS_CORE_INT2_BCR 1
-#define DISP_CC_MDSS_RSCC_BCR 2
-
-/* DISP_CC GDSCR */
-#define MDSS_GDSC 0
-#define MDSS_INT2_GDSC 1
-
-#endif
+qcom,sm8550-dispcc.h \ No newline at end of file
diff --git a/include/dt-bindings/clock/qcom,sm8650-videocc.h b/include/dt-bindings/clock/qcom,sm8650-videocc.h
new file mode 100644
index 000000000000..4e3c2d87280f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-videocc.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8650_H
+
+#include "qcom,sm8450-videocc.h"
+
+/* SM8650 introduces below new clocks and resets compared to SM8450 */
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_MVS0_SHIFT_CLK 12
+#define VIDEO_CC_MVS0C_SHIFT_CLK 13
+#define VIDEO_CC_MVS1_SHIFT_CLK 14
+#define VIDEO_CC_MVS1C_SHIFT_CLK 15
+#define VIDEO_CC_XO_CLK_SRC 16
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_XO_CLK_ARES 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8750-dispcc.h b/include/dt-bindings/clock/qcom,sm8750-dispcc.h
new file mode 100644
index 000000000000..dafb5069c96a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8750-dispcc.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2024, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_SM8750_DISP_CC_H
+#define _DT_BINDINGS_CLK_QCOM_SM8750_DISP_CC_H
+
+/* DISP_CC clocks */
+#define DISP_CC_ESYNC0_CLK 0
+#define DISP_CC_ESYNC0_CLK_SRC 1
+#define DISP_CC_ESYNC1_CLK 2
+#define DISP_CC_ESYNC1_CLK_SRC 3
+#define DISP_CC_MDSS_ACCU_SHIFT_CLK 4
+#define DISP_CC_MDSS_AHB1_CLK 5
+#define DISP_CC_MDSS_AHB_CLK 6
+#define DISP_CC_MDSS_AHB_CLK_SRC 7
+#define DISP_CC_MDSS_BYTE0_CLK 8
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 10
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 11
+#define DISP_CC_MDSS_BYTE1_CLK 12
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 13
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 14
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 15
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 16
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 17
+#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 18
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 19
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 20
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 21
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 22
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 23
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 24
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 25
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 26
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 27
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 28
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 29
+#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 30
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 31
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 32
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 33
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 34
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 35
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 36
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 37
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 38
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 39
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 40
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 41
+#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 42
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 43
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 44
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 45
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 46
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 47
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 48
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 49
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 50
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 51
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 52
+#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 53
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 54
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 55
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 56
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 57
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 58
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 59
+#define DISP_CC_MDSS_ESC0_CLK 60
+#define DISP_CC_MDSS_ESC0_CLK_SRC 61
+#define DISP_CC_MDSS_ESC1_CLK 62
+#define DISP_CC_MDSS_ESC1_CLK_SRC 63
+#define DISP_CC_MDSS_MDP1_CLK 64
+#define DISP_CC_MDSS_MDP_CLK 65
+#define DISP_CC_MDSS_MDP_CLK_SRC 66
+#define DISP_CC_MDSS_MDP_LUT1_CLK 67
+#define DISP_CC_MDSS_MDP_LUT_CLK 68
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 69
+#define DISP_CC_MDSS_PCLK0_CLK 70
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 71
+#define DISP_CC_MDSS_PCLK1_CLK 72
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 73
+#define DISP_CC_MDSS_PCLK2_CLK 74
+#define DISP_CC_MDSS_PCLK2_CLK_SRC 75
+#define DISP_CC_MDSS_RSCC_AHB_CLK 76
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 77
+#define DISP_CC_MDSS_VSYNC1_CLK 78
+#define DISP_CC_MDSS_VSYNC_CLK 79
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 80
+#define DISP_CC_OSC_CLK 81
+#define DISP_CC_OSC_CLK_SRC 82
+#define DISP_CC_PLL0 83
+#define DISP_CC_PLL1 84
+#define DISP_CC_PLL2 85
+#define DISP_CC_SLEEP_CLK 86
+#define DISP_CC_SLEEP_CLK_SRC 87
+#define DISP_CC_XO_CLK 88
+#define DISP_CC_XO_CLK_SRC 89
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+#define MDSS_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8750-gcc.h b/include/dt-bindings/clock/qcom,sm8750-gcc.h
new file mode 100644
index 000000000000..e234595d7f42
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8750-gcc.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8750_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM8750_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_AXI_CLK 0
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_BOOT_ROM_AHB_CLK 4
+#define GCC_CAM_BIST_MCLK_AHB_CLK 5
+#define GCC_CAMERA_AHB_CLK 6
+#define GCC_CAMERA_HF_AXI_CLK 7
+#define GCC_CAMERA_SF_AXI_CLK 8
+#define GCC_CAMERA_XO_CLK 9
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 10
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 11
+#define GCC_CNOC_PCIE_SF_AXI_CLK 12
+#define GCC_DDRSS_GPU_AXI_CLK 13
+#define GCC_DDRSS_PCIE_SF_QTB_CLK 14
+#define GCC_DISP_AHB_CLK 15
+#define GCC_DISP_HF_AXI_CLK 16
+#define GCC_EVA_AHB_CLK 17
+#define GCC_EVA_AXI0_CLK 18
+#define GCC_EVA_AXI0C_CLK 19
+#define GCC_EVA_XO_CLK 20
+#define GCC_GP1_CLK 21
+#define GCC_GP1_CLK_SRC 22
+#define GCC_GP2_CLK 23
+#define GCC_GP2_CLK_SRC 24
+#define GCC_GP3_CLK 25
+#define GCC_GP3_CLK_SRC 26
+#define GCC_GPLL0 27
+#define GCC_GPLL0_OUT_EVEN 28
+#define GCC_GPLL1 29
+#define GCC_GPLL4 30
+#define GCC_GPLL7 31
+#define GCC_GPLL9 32
+#define GCC_GPU_CFG_AHB_CLK 33
+#define GCC_GPU_GEMNOC_GFX_CLK 34
+#define GCC_GPU_GPLL0_CLK_SRC 35
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 36
+#define GCC_PCIE_0_AUX_CLK 37
+#define GCC_PCIE_0_AUX_CLK_SRC 38
+#define GCC_PCIE_0_CFG_AHB_CLK 39
+#define GCC_PCIE_0_MSTR_AXI_CLK 40
+#define GCC_PCIE_0_PHY_RCHNG_CLK 41
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 42
+#define GCC_PCIE_0_PIPE_CLK 43
+#define GCC_PCIE_0_PIPE_CLK_SRC 44
+#define GCC_PCIE_0_SLV_AXI_CLK 45
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 46
+#define GCC_PCIE_RSCC_CFG_AHB_CLK 47
+#define GCC_PCIE_RSCC_XO_CLK 48
+#define GCC_PDM2_CLK 49
+#define GCC_PDM2_CLK_SRC 50
+#define GCC_PDM_AHB_CLK 51
+#define GCC_PDM_XO4_CLK 52
+#define GCC_QMIP_CAMERA_CMD_AHB_CLK 53
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 54
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 55
+#define GCC_QMIP_GPU_AHB_CLK 56
+#define GCC_QMIP_PCIE_AHB_CLK 57
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 58
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 59
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 60
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 61
+#define GCC_QUPV3_I2C_CORE_CLK 62
+#define GCC_QUPV3_I2C_S0_CLK 63
+#define GCC_QUPV3_I2C_S0_CLK_SRC 64
+#define GCC_QUPV3_I2C_S1_CLK 65
+#define GCC_QUPV3_I2C_S1_CLK_SRC 66
+#define GCC_QUPV3_I2C_S2_CLK 67
+#define GCC_QUPV3_I2C_S2_CLK_SRC 68
+#define GCC_QUPV3_I2C_S3_CLK 69
+#define GCC_QUPV3_I2C_S3_CLK_SRC 70
+#define GCC_QUPV3_I2C_S4_CLK 71
+#define GCC_QUPV3_I2C_S4_CLK_SRC 72
+#define GCC_QUPV3_I2C_S5_CLK 73
+#define GCC_QUPV3_I2C_S5_CLK_SRC 74
+#define GCC_QUPV3_I2C_S6_CLK 75
+#define GCC_QUPV3_I2C_S6_CLK_SRC 76
+#define GCC_QUPV3_I2C_S7_CLK 77
+#define GCC_QUPV3_I2C_S7_CLK_SRC 78
+#define GCC_QUPV3_I2C_S8_CLK 79
+#define GCC_QUPV3_I2C_S8_CLK_SRC 80
+#define GCC_QUPV3_I2C_S9_CLK 81
+#define GCC_QUPV3_I2C_S9_CLK_SRC 82
+#define GCC_QUPV3_I2C_S_AHB_CLK 83
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 84
+#define GCC_QUPV3_WRAP1_CORE_CLK 85
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK 86
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S0_CLK 88
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S1_CLK 90
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 91
+#define GCC_QUPV3_WRAP1_S2_CLK 92
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_S3_CLK 94
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 95
+#define GCC_QUPV3_WRAP1_S4_CLK 96
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S5_CLK 98
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 99
+#define GCC_QUPV3_WRAP1_S6_CLK 100
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 101
+#define GCC_QUPV3_WRAP1_S7_CLK 102
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 103
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 104
+#define GCC_QUPV3_WRAP2_CORE_CLK 105
+#define GCC_QUPV3_WRAP2_IBI_CTRL_0_CLK_SRC 106
+#define GCC_QUPV3_WRAP2_IBI_CTRL_2_CLK 107
+#define GCC_QUPV3_WRAP2_IBI_CTRL_3_CLK 108
+#define GCC_QUPV3_WRAP2_S0_CLK 109
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 110
+#define GCC_QUPV3_WRAP2_S1_CLK 111
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 112
+#define GCC_QUPV3_WRAP2_S2_CLK 113
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 114
+#define GCC_QUPV3_WRAP2_S3_CLK 115
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 116
+#define GCC_QUPV3_WRAP2_S4_CLK 117
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 118
+#define GCC_QUPV3_WRAP2_S5_CLK 119
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 120
+#define GCC_QUPV3_WRAP2_S6_CLK 121
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 122
+#define GCC_QUPV3_WRAP2_S7_CLK 123
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 124
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 125
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 126
+#define GCC_QUPV3_WRAP_2_IBI_2_AHB_CLK 127
+#define GCC_QUPV3_WRAP_2_IBI_3_AHB_CLK 128
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 129
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 130
+#define GCC_SDCC2_AHB_CLK 131
+#define GCC_SDCC2_APPS_CLK 132
+#define GCC_SDCC2_APPS_CLK_SRC 133
+#define GCC_SDCC4_AHB_CLK 134
+#define GCC_SDCC4_APPS_CLK 135
+#define GCC_SDCC4_APPS_CLK_SRC 136
+#define GCC_UFS_PHY_AHB_CLK 137
+#define GCC_UFS_PHY_AXI_CLK 138
+#define GCC_UFS_PHY_AXI_CLK_SRC 139
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 140
+#define GCC_UFS_PHY_ICE_CORE_CLK 141
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 142
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 143
+#define GCC_UFS_PHY_PHY_AUX_CLK 144
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 145
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 146
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 147
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 148
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 149
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 150
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 151
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 152
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 153
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 154
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 155
+#define GCC_USB30_PRIM_MASTER_CLK 156
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 157
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 158
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 159
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 160
+#define GCC_USB30_PRIM_SLEEP_CLK 161
+#define GCC_USB3_PRIM_PHY_AUX_CLK 162
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 163
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 164
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 165
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 166
+#define GCC_VIDEO_AHB_CLK 167
+#define GCC_VIDEO_AXI0_CLK 168
+#define GCC_VIDEO_AXI1_CLK 169
+#define GCC_VIDEO_XO_CLK 170
+
+/* GCC power domains */
+#define GCC_PCIE_0_GDSC 0
+#define GCC_PCIE_0_PHY_GDSC 1
+#define GCC_UFS_MEM_PHY_GDSC 2
+#define GCC_UFS_PHY_GDSC 3
+#define GCC_USB30_PRIM_GDSC 4
+#define GCC_USB3_PHY_GDSC 5
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_EVA_BCR 2
+#define GCC_GPU_BCR 3
+#define GCC_PCIE_0_BCR 4
+#define GCC_PCIE_0_LINK_DOWN_BCR 5
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 6
+#define GCC_PCIE_0_PHY_BCR 7
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_PHY_BCR 9
+#define GCC_PCIE_PHY_CFG_AHB_BCR 10
+#define GCC_PCIE_PHY_COM_BCR 11
+#define GCC_PCIE_RSCC_BCR 12
+#define GCC_PDM_BCR 13
+#define GCC_QUPV3_WRAPPER_1_BCR 14
+#define GCC_QUPV3_WRAPPER_2_BCR 15
+#define GCC_QUPV3_WRAPPER_I2C_BCR 16
+#define GCC_QUSB2PHY_PRIM_BCR 17
+#define GCC_QUSB2PHY_SEC_BCR 18
+#define GCC_SDCC2_BCR 19
+#define GCC_SDCC4_BCR 20
+#define GCC_UFS_PHY_BCR 21
+#define GCC_USB30_PRIM_BCR 22
+#define GCC_USB3_DP_PHY_PRIM_BCR 23
+#define GCC_USB3_DP_PHY_SEC_BCR 24
+#define GCC_USB3_PHY_PRIM_BCR 25
+#define GCC_USB3_PHY_SEC_BCR 26
+#define GCC_USB3PHY_PHY_PRIM_BCR 27
+#define GCC_USB3PHY_PHY_SEC_BCR 28
+#define GCC_VIDEO_AXI0_CLK_ARES 29
+#define GCC_VIDEO_AXI1_CLK_ARES 30
+#define GCC_VIDEO_BCR 31
+#define GCC_EVA_AXI0_CLK_ARES 32
+#define GCC_EVA_AXI0C_CLK_ARES 33
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8750-tcsr.h b/include/dt-bindings/clock/qcom,sm8750-tcsr.h
new file mode 100644
index 000000000000..1c502ac7c7f4
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8750-tcsr.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8750_H
+#define _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8750_H
+
+/* TCSR_CC clocks */
+#define TCSR_PCIE_0_CLKREF_EN 0
+#define TCSR_UFS_CLKREF_EN 1
+#define TCSR_USB2_CLKREF_EN 2
+#define TCSR_USB3_CLKREF_EN 3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8750-videocc.h b/include/dt-bindings/clock/qcom,sm8750-videocc.h
new file mode 100644
index 000000000000..f3bfa2ba5160
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8750-videocc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8750_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8750_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_AHB_CLK 0
+#define VIDEO_CC_AHB_CLK_SRC 1
+#define VIDEO_CC_MVS0_CLK 2
+#define VIDEO_CC_MVS0_CLK_SRC 3
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 4
+#define VIDEO_CC_MVS0_FREERUN_CLK 5
+#define VIDEO_CC_MVS0_SHIFT_CLK 6
+#define VIDEO_CC_MVS0C_CLK 7
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 8
+#define VIDEO_CC_MVS0C_FREERUN_CLK 9
+#define VIDEO_CC_MVS0C_SHIFT_CLK 10
+#define VIDEO_CC_PLL0 11
+#define VIDEO_CC_SLEEP_CLK 12
+#define VIDEO_CC_SLEEP_CLK_SRC 13
+#define VIDEO_CC_XO_CLK 14
+#define VIDEO_CC_XO_CLK_SRC 15
+
+/* VIDEO_CC power domains */
+#define VIDEO_CC_MVS0_GDSC 0
+#define VIDEO_CC_MVS0C_GDSC 1
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_INTERFACE_BCR 0
+#define VIDEO_CC_MVS0_BCR 1
+#define VIDEO_CC_MVS0C_CLK_ARES 2
+#define VIDEO_CC_MVS0C_BCR 3
+#define VIDEO_CC_MVS0_FREERUN_CLK_ARES 4
+#define VIDEO_CC_MVS0C_FREERUN_CLK_ARES 5
+#define VIDEO_CC_XO_CLK_ARES 6
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,x1e80100-dispcc.h b/include/dt-bindings/clock/qcom,x1e80100-dispcc.h
index d4a83e4fd0d1..49b3a9e5ce4a 100644
--- a/include/dt-bindings/clock/qcom,x1e80100-dispcc.h
+++ b/include/dt-bindings/clock/qcom,x1e80100-dispcc.h
@@ -90,6 +90,9 @@
#define DISP_CC_MDSS_CORE_BCR 0
#define DISP_CC_MDSS_CORE_INT2_BCR 1
#define DISP_CC_MDSS_RSCC_BCR 2
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK_ARES 3
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK_ARES 4
+#define DISP_CC_MDSS_DPTX2_USB_ROUTER_LINK_INTF_CLK_ARES 5
/* DISP_CC GDSCR */
#define MDSS_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,x1e80100-gcc.h b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
index 24ba9e2a5cf6..62aa12425592 100644
--- a/include/dt-bindings/clock/qcom,x1e80100-gcc.h
+++ b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
@@ -363,6 +363,30 @@
#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 353
#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 354
#define GCC_USB3_TERT_PHY_PIPE_CLK_SRC 355
+#define GCC_USB34_PRIM_PHY_PIPE_CLK_SRC 356
+#define GCC_USB34_SEC_PHY_PIPE_CLK_SRC 357
+#define GCC_USB34_TERT_PHY_PIPE_CLK_SRC 358
+#define GCC_USB4_0_PHY_DP0_CLK_SRC 359
+#define GCC_USB4_0_PHY_DP1_CLK_SRC 360
+#define GCC_USB4_0_PHY_P2RR2P_PIPE_CLK_SRC 361
+#define GCC_USB4_0_PHY_PCIE_PIPE_MUX_CLK_SRC 362
+#define GCC_USB4_0_PHY_RX0_CLK_SRC 363
+#define GCC_USB4_0_PHY_RX1_CLK_SRC 364
+#define GCC_USB4_0_PHY_SYS_CLK_SRC 365
+#define GCC_USB4_1_PHY_DP0_CLK_SRC 366
+#define GCC_USB4_1_PHY_DP1_CLK_SRC 367
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC 368
+#define GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC 369
+#define GCC_USB4_1_PHY_RX0_CLK_SRC 370
+#define GCC_USB4_1_PHY_RX1_CLK_SRC 371
+#define GCC_USB4_1_PHY_SYS_CLK_SRC 372
+#define GCC_USB4_2_PHY_DP0_CLK_SRC 373
+#define GCC_USB4_2_PHY_DP1_CLK_SRC 374
+#define GCC_USB4_2_PHY_P2RR2P_PIPE_CLK_SRC 375
+#define GCC_USB4_2_PHY_PCIE_PIPE_MUX_CLK_SRC 376
+#define GCC_USB4_2_PHY_RX0_CLK_SRC 377
+#define GCC_USB4_2_PHY_RX1_CLK_SRC 378
+#define GCC_USB4_2_PHY_SYS_CLK_SRC 379
/* GCC power domains */
#define GCC_PCIE_0_TUNNEL_GDSC 0
@@ -482,4 +506,43 @@
#define GCC_USB_1_PHY_BCR 85
#define GCC_USB_2_PHY_BCR 86
#define GCC_VIDEO_BCR 87
+#define GCC_VIDEO_AXI0_CLK_ARES 88
+#define GCC_VIDEO_AXI1_CLK_ARES 89
+#define GCC_USB4_0_MISC_USB4_SYS_BCR 90
+#define GCC_USB4_0_MISC_RX_CLK_0_BCR 91
+#define GCC_USB4_0_MISC_RX_CLK_1_BCR 92
+#define GCC_USB4_0_MISC_USB_PIPE_BCR 93
+#define GCC_USB4_0_MISC_PCIE_PIPE_BCR 94
+#define GCC_USB4_0_MISC_TMU_BCR 95
+#define GCC_USB4_0_MISC_SB_IF_BCR 96
+#define GCC_USB4_0_MISC_HIA_MSTR_BCR 97
+#define GCC_USB4_0_MISC_AHB_BCR 98
+#define GCC_USB4_0_MISC_DP0_MAX_PCLK_BCR 99
+#define GCC_USB4_0_MISC_DP1_MAX_PCLK_BCR 100
+#define GCC_USB4_1_MISC_USB4_SYS_BCR 101
+#define GCC_USB4_1_MISC_RX_CLK_0_BCR 102
+#define GCC_USB4_1_MISC_RX_CLK_1_BCR 103
+#define GCC_USB4_1_MISC_USB_PIPE_BCR 104
+#define GCC_USB4_1_MISC_PCIE_PIPE_BCR 105
+#define GCC_USB4_1_MISC_TMU_BCR 106
+#define GCC_USB4_1_MISC_SB_IF_BCR 107
+#define GCC_USB4_1_MISC_HIA_MSTR_BCR 108
+#define GCC_USB4_1_MISC_AHB_BCR 109
+#define GCC_USB4_1_MISC_DP0_MAX_PCLK_BCR 110
+#define GCC_USB4_1_MISC_DP1_MAX_PCLK_BCR 111
+#define GCC_USB4_2_MISC_USB4_SYS_BCR 112
+#define GCC_USB4_2_MISC_RX_CLK_0_BCR 113
+#define GCC_USB4_2_MISC_RX_CLK_1_BCR 114
+#define GCC_USB4_2_MISC_USB_PIPE_BCR 115
+#define GCC_USB4_2_MISC_PCIE_PIPE_BCR 116
+#define GCC_USB4_2_MISC_TMU_BCR 117
+#define GCC_USB4_2_MISC_SB_IF_BCR 118
+#define GCC_USB4_2_MISC_HIA_MSTR_BCR 119
+#define GCC_USB4_2_MISC_AHB_BCR 120
+#define GCC_USB4_2_MISC_DP0_MAX_PCLK_BCR 121
+#define GCC_USB4_2_MISC_DP1_MAX_PCLK_BCR 122
+#define GCC_USB4PHY_PHY_PRIM_BCR 123
+#define GCC_USB4PHY_PHY_SEC_BCR 124
+#define GCC_USB4PHY_PHY_TERT_BCR 125
+
#endif
diff --git a/include/dt-bindings/clock/qcom,x1e80100-gpucc.h b/include/dt-bindings/clock/qcom,x1e80100-gpucc.h
index 61a3a8f3ac43..27b8f50541fd 100644
--- a/include/dt-bindings/clock/qcom,x1e80100-gpucc.h
+++ b/include/dt-bindings/clock/qcom,x1e80100-gpucc.h
@@ -33,9 +33,22 @@
#define GPU_CC_SLEEP_CLK 23
#define GPU_CC_XO_CLK_SRC 24
#define GPU_CC_XO_DIV_CLK_SRC 25
+#define GPU_CC_CX_ACCU_SHIFT_CLK 26
+#define GPU_CC_GX_ACCU_SHIFT_CLK 27
/* GDSCs */
#define GPU_CX_GDSC 0
#define GPU_GX_GDSC 1
+/* GPU_CC resets */
+#define GPU_CC_ACD_BCR 0
+#define GPU_CC_CB_BCR 1
+#define GPU_CC_CX_BCR 2
+#define GPU_CC_FAST_HUB_BCR 3
+#define GPU_CC_FF_BCR 4
+#define GPU_CC_GFX3D_AON_BCR 5
+#define GPU_CC_GMU_BCR 6
+#define GPU_CC_GX_BCR 7
+#define GPU_CC_XO_BCR 8
+
#endif
diff --git a/include/dt-bindings/clock/r8a7779-clock.h b/include/dt-bindings/clock/r8a7779-clock.h
index 342a60b11934..e39acdc6499c 100644
--- a/include/dt-bindings/clock/r8a7779-clock.h
+++ b/include/dt-bindings/clock/r8a7779-clock.h
@@ -57,5 +57,4 @@
#define R8A7779_CLK_MMC1 30
#define R8A7779_CLK_MMC0 31
-
#endif /* __DT_BINDINGS_CLOCK_R8A7779_H__ */
diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h
deleted file mode 100644
index c92ff1e60223..000000000000
--- a/include/dt-bindings/clock/r8a7790-clock.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2013 Ideas On Board SPRL
- */
-
-#ifndef __DT_BINDINGS_CLOCK_R8A7790_H__
-#define __DT_BINDINGS_CLOCK_R8A7790_H__
-
-/* CPG */
-#define R8A7790_CLK_MAIN 0
-#define R8A7790_CLK_PLL0 1
-#define R8A7790_CLK_PLL1 2
-#define R8A7790_CLK_PLL3 3
-#define R8A7790_CLK_LB 4
-#define R8A7790_CLK_QSPI 5
-#define R8A7790_CLK_SDH 6
-#define R8A7790_CLK_SD0 7
-#define R8A7790_CLK_SD1 8
-#define R8A7790_CLK_Z 9
-#define R8A7790_CLK_RCAN 10
-#define R8A7790_CLK_ADSP 11
-
-/* MSTP0 */
-#define R8A7790_CLK_MSIOF0 0
-
-/* MSTP1 */
-#define R8A7790_CLK_VCP1 0
-#define R8A7790_CLK_VCP0 1
-#define R8A7790_CLK_VPC1 2
-#define R8A7790_CLK_VPC0 3
-#define R8A7790_CLK_JPU 6
-#define R8A7790_CLK_SSP1 9
-#define R8A7790_CLK_TMU1 11
-#define R8A7790_CLK_3DG 12
-#define R8A7790_CLK_2DDMAC 15
-#define R8A7790_CLK_FDP1_2 17
-#define R8A7790_CLK_FDP1_1 18
-#define R8A7790_CLK_FDP1_0 19
-#define R8A7790_CLK_TMU3 21
-#define R8A7790_CLK_TMU2 22
-#define R8A7790_CLK_CMT0 24
-#define R8A7790_CLK_TMU0 25
-#define R8A7790_CLK_VSP1_DU1 27
-#define R8A7790_CLK_VSP1_DU0 28
-#define R8A7790_CLK_VSP1_R 30
-#define R8A7790_CLK_VSP1_S 31
-
-/* MSTP2 */
-#define R8A7790_CLK_SCIFA2 2
-#define R8A7790_CLK_SCIFA1 3
-#define R8A7790_CLK_SCIFA0 4
-#define R8A7790_CLK_MSIOF2 5
-#define R8A7790_CLK_SCIFB0 6
-#define R8A7790_CLK_SCIFB1 7
-#define R8A7790_CLK_MSIOF1 8
-#define R8A7790_CLK_MSIOF3 15
-#define R8A7790_CLK_SCIFB2 16
-#define R8A7790_CLK_SYS_DMAC1 18
-#define R8A7790_CLK_SYS_DMAC0 19
-
-/* MSTP3 */
-#define R8A7790_CLK_IIC2 0
-#define R8A7790_CLK_TPU0 4
-#define R8A7790_CLK_MMCIF1 5
-#define R8A7790_CLK_SCIF2 10
-#define R8A7790_CLK_SDHI3 11
-#define R8A7790_CLK_SDHI2 12
-#define R8A7790_CLK_SDHI1 13
-#define R8A7790_CLK_SDHI0 14
-#define R8A7790_CLK_MMCIF0 15
-#define R8A7790_CLK_IIC0 18
-#define R8A7790_CLK_PCIEC 19
-#define R8A7790_CLK_IIC1 23
-#define R8A7790_CLK_SSUSB 28
-#define R8A7790_CLK_CMT1 29
-#define R8A7790_CLK_USBDMAC0 30
-#define R8A7790_CLK_USBDMAC1 31
-
-/* MSTP4 */
-#define R8A7790_CLK_IRQC 7
-#define R8A7790_CLK_INTC_SYS 8
-
-/* MSTP5 */
-#define R8A7790_CLK_AUDIO_DMAC1 1
-#define R8A7790_CLK_AUDIO_DMAC0 2
-#define R8A7790_CLK_ADSP_MOD 6
-#define R8A7790_CLK_THERMAL 22
-#define R8A7790_CLK_PWM 23
-
-/* MSTP7 */
-#define R8A7790_CLK_EHCI 3
-#define R8A7790_CLK_HSUSB 4
-#define R8A7790_CLK_HSCIF1 16
-#define R8A7790_CLK_HSCIF0 17
-#define R8A7790_CLK_SCIF1 20
-#define R8A7790_CLK_SCIF0 21
-#define R8A7790_CLK_DU2 22
-#define R8A7790_CLK_DU1 23
-#define R8A7790_CLK_DU0 24
-#define R8A7790_CLK_LVDS1 25
-#define R8A7790_CLK_LVDS0 26
-
-/* MSTP8 */
-#define R8A7790_CLK_MLB 2
-#define R8A7790_CLK_VIN3 8
-#define R8A7790_CLK_VIN2 9
-#define R8A7790_CLK_VIN1 10
-#define R8A7790_CLK_VIN0 11
-#define R8A7790_CLK_ETHERAVB 12
-#define R8A7790_CLK_ETHER 13
-#define R8A7790_CLK_SATA1 14
-#define R8A7790_CLK_SATA0 15
-
-/* MSTP9 */
-#define R8A7790_CLK_GPIO5 7
-#define R8A7790_CLK_GPIO4 8
-#define R8A7790_CLK_GPIO3 9
-#define R8A7790_CLK_GPIO2 10
-#define R8A7790_CLK_GPIO1 11
-#define R8A7790_CLK_GPIO0 12
-#define R8A7790_CLK_RCAN1 15
-#define R8A7790_CLK_RCAN0 16
-#define R8A7790_CLK_QSPI_MOD 17
-#define R8A7790_CLK_IICDVFS 26
-#define R8A7790_CLK_I2C3 28
-#define R8A7790_CLK_I2C2 29
-#define R8A7790_CLK_I2C1 30
-#define R8A7790_CLK_I2C0 31
-
-/* MSTP10 */
-#define R8A7790_CLK_SSI_ALL 5
-#define R8A7790_CLK_SSI9 6
-#define R8A7790_CLK_SSI8 7
-#define R8A7790_CLK_SSI7 8
-#define R8A7790_CLK_SSI6 9
-#define R8A7790_CLK_SSI5 10
-#define R8A7790_CLK_SSI4 11
-#define R8A7790_CLK_SSI3 12
-#define R8A7790_CLK_SSI2 13
-#define R8A7790_CLK_SSI1 14
-#define R8A7790_CLK_SSI0 15
-#define R8A7790_CLK_SCU_ALL 17
-#define R8A7790_CLK_SCU_DVC1 18
-#define R8A7790_CLK_SCU_DVC0 19
-#define R8A7790_CLK_SCU_CTU1_MIX1 20
-#define R8A7790_CLK_SCU_CTU0_MIX0 21
-#define R8A7790_CLK_SCU_SRC9 22
-#define R8A7790_CLK_SCU_SRC8 23
-#define R8A7790_CLK_SCU_SRC7 24
-#define R8A7790_CLK_SCU_SRC6 25
-#define R8A7790_CLK_SCU_SRC5 26
-#define R8A7790_CLK_SCU_SRC4 27
-#define R8A7790_CLK_SCU_SRC3 28
-#define R8A7790_CLK_SCU_SRC2 29
-#define R8A7790_CLK_SCU_SRC1 30
-#define R8A7790_CLK_SCU_SRC0 31
-
-#endif /* __DT_BINDINGS_CLOCK_R8A7790_H__ */
diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h
deleted file mode 100644
index bb4f18b1b3d5..000000000000
--- a/include/dt-bindings/clock/r8a7791-clock.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2013 Ideas On Board SPRL
- */
-
-#ifndef __DT_BINDINGS_CLOCK_R8A7791_H__
-#define __DT_BINDINGS_CLOCK_R8A7791_H__
-
-/* CPG */
-#define R8A7791_CLK_MAIN 0
-#define R8A7791_CLK_PLL0 1
-#define R8A7791_CLK_PLL1 2
-#define R8A7791_CLK_PLL3 3
-#define R8A7791_CLK_LB 4
-#define R8A7791_CLK_QSPI 5
-#define R8A7791_CLK_SDH 6
-#define R8A7791_CLK_SD0 7
-#define R8A7791_CLK_Z 8
-#define R8A7791_CLK_RCAN 9
-#define R8A7791_CLK_ADSP 10
-
-/* MSTP0 */
-#define R8A7791_CLK_MSIOF0 0
-
-/* MSTP1 */
-#define R8A7791_CLK_VCP0 1
-#define R8A7791_CLK_VPC0 3
-#define R8A7791_CLK_JPU 6
-#define R8A7791_CLK_SSP1 9
-#define R8A7791_CLK_TMU1 11
-#define R8A7791_CLK_3DG 12
-#define R8A7791_CLK_2DDMAC 15
-#define R8A7791_CLK_FDP1_1 18
-#define R8A7791_CLK_FDP1_0 19
-#define R8A7791_CLK_TMU3 21
-#define R8A7791_CLK_TMU2 22
-#define R8A7791_CLK_CMT0 24
-#define R8A7791_CLK_TMU0 25
-#define R8A7791_CLK_VSP1_DU1 27
-#define R8A7791_CLK_VSP1_DU0 28
-#define R8A7791_CLK_VSP1_S 31
-
-/* MSTP2 */
-#define R8A7791_CLK_SCIFA2 2
-#define R8A7791_CLK_SCIFA1 3
-#define R8A7791_CLK_SCIFA0 4
-#define R8A7791_CLK_MSIOF2 5
-#define R8A7791_CLK_SCIFB0 6
-#define R8A7791_CLK_SCIFB1 7
-#define R8A7791_CLK_MSIOF1 8
-#define R8A7791_CLK_SCIFB2 16
-#define R8A7791_CLK_SYS_DMAC1 18
-#define R8A7791_CLK_SYS_DMAC0 19
-
-/* MSTP3 */
-#define R8A7791_CLK_TPU0 4
-#define R8A7791_CLK_SDHI2 11
-#define R8A7791_CLK_SDHI1 12
-#define R8A7791_CLK_SDHI0 14
-#define R8A7791_CLK_MMCIF0 15
-#define R8A7791_CLK_IIC0 18
-#define R8A7791_CLK_PCIEC 19
-#define R8A7791_CLK_IIC1 23
-#define R8A7791_CLK_SSUSB 28
-#define R8A7791_CLK_CMT1 29
-#define R8A7791_CLK_USBDMAC0 30
-#define R8A7791_CLK_USBDMAC1 31
-
-/* MSTP4 */
-#define R8A7791_CLK_IRQC 7
-#define R8A7791_CLK_INTC_SYS 8
-
-/* MSTP5 */
-#define R8A7791_CLK_AUDIO_DMAC1 1
-#define R8A7791_CLK_AUDIO_DMAC0 2
-#define R8A7791_CLK_ADSP_MOD 6
-#define R8A7791_CLK_THERMAL 22
-#define R8A7791_CLK_PWM 23
-
-/* MSTP7 */
-#define R8A7791_CLK_EHCI 3
-#define R8A7791_CLK_HSUSB 4
-#define R8A7791_CLK_HSCIF2 13
-#define R8A7791_CLK_SCIF5 14
-#define R8A7791_CLK_SCIF4 15
-#define R8A7791_CLK_HSCIF1 16
-#define R8A7791_CLK_HSCIF0 17
-#define R8A7791_CLK_SCIF3 18
-#define R8A7791_CLK_SCIF2 19
-#define R8A7791_CLK_SCIF1 20
-#define R8A7791_CLK_SCIF0 21
-#define R8A7791_CLK_DU1 23
-#define R8A7791_CLK_DU0 24
-#define R8A7791_CLK_LVDS0 26
-
-/* MSTP8 */
-#define R8A7791_CLK_IPMMU_SGX 0
-#define R8A7791_CLK_MLB 2
-#define R8A7791_CLK_VIN2 9
-#define R8A7791_CLK_VIN1 10
-#define R8A7791_CLK_VIN0 11
-#define R8A7791_CLK_ETHERAVB 12
-#define R8A7791_CLK_ETHER 13
-#define R8A7791_CLK_SATA1 14
-#define R8A7791_CLK_SATA0 15
-
-/* MSTP9 */
-#define R8A7791_CLK_GYROADC 1
-#define R8A7791_CLK_GPIO7 4
-#define R8A7791_CLK_GPIO6 5
-#define R8A7791_CLK_GPIO5 7
-#define R8A7791_CLK_GPIO4 8
-#define R8A7791_CLK_GPIO3 9
-#define R8A7791_CLK_GPIO2 10
-#define R8A7791_CLK_GPIO1 11
-#define R8A7791_CLK_GPIO0 12
-#define R8A7791_CLK_RCAN1 15
-#define R8A7791_CLK_RCAN0 16
-#define R8A7791_CLK_QSPI_MOD 17
-#define R8A7791_CLK_I2C5 25
-#define R8A7791_CLK_IICDVFS 26
-#define R8A7791_CLK_I2C4 27
-#define R8A7791_CLK_I2C3 28
-#define R8A7791_CLK_I2C2 29
-#define R8A7791_CLK_I2C1 30
-#define R8A7791_CLK_I2C0 31
-
-/* MSTP10 */
-#define R8A7791_CLK_SSI_ALL 5
-#define R8A7791_CLK_SSI9 6
-#define R8A7791_CLK_SSI8 7
-#define R8A7791_CLK_SSI7 8
-#define R8A7791_CLK_SSI6 9
-#define R8A7791_CLK_SSI5 10
-#define R8A7791_CLK_SSI4 11
-#define R8A7791_CLK_SSI3 12
-#define R8A7791_CLK_SSI2 13
-#define R8A7791_CLK_SSI1 14
-#define R8A7791_CLK_SSI0 15
-#define R8A7791_CLK_SCU_ALL 17
-#define R8A7791_CLK_SCU_DVC1 18
-#define R8A7791_CLK_SCU_DVC0 19
-#define R8A7791_CLK_SCU_CTU1_MIX1 20
-#define R8A7791_CLK_SCU_CTU0_MIX0 21
-#define R8A7791_CLK_SCU_SRC9 22
-#define R8A7791_CLK_SCU_SRC8 23
-#define R8A7791_CLK_SCU_SRC7 24
-#define R8A7791_CLK_SCU_SRC6 25
-#define R8A7791_CLK_SCU_SRC5 26
-#define R8A7791_CLK_SCU_SRC4 27
-#define R8A7791_CLK_SCU_SRC3 28
-#define R8A7791_CLK_SCU_SRC2 29
-#define R8A7791_CLK_SCU_SRC1 30
-#define R8A7791_CLK_SCU_SRC0 31
-
-/* MSTP11 */
-#define R8A7791_CLK_SCIFA3 6
-#define R8A7791_CLK_SCIFA4 7
-#define R8A7791_CLK_SCIFA5 8
-
-#endif /* __DT_BINDINGS_CLOCK_R8A7791_H__ */
diff --git a/include/dt-bindings/clock/r8a7792-clock.h b/include/dt-bindings/clock/r8a7792-clock.h
deleted file mode 100644
index 2948d9ce3a14..000000000000
--- a/include/dt-bindings/clock/r8a7792-clock.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2016 Cogent Embedded, Inc.
- */
-
-#ifndef __DT_BINDINGS_CLOCK_R8A7792_H__
-#define __DT_BINDINGS_CLOCK_R8A7792_H__
-
-/* CPG */
-#define R8A7792_CLK_MAIN 0
-#define R8A7792_CLK_PLL0 1
-#define R8A7792_CLK_PLL1 2
-#define R8A7792_CLK_PLL3 3
-#define R8A7792_CLK_LB 4
-#define R8A7792_CLK_QSPI 5
-
-/* MSTP0 */
-#define R8A7792_CLK_MSIOF0 0
-
-/* MSTP1 */
-#define R8A7792_CLK_JPU 6
-#define R8A7792_CLK_TMU1 11
-#define R8A7792_CLK_TMU3 21
-#define R8A7792_CLK_TMU2 22
-#define R8A7792_CLK_CMT0 24
-#define R8A7792_CLK_TMU0 25
-#define R8A7792_CLK_VSP1DU1 27
-#define R8A7792_CLK_VSP1DU0 28
-#define R8A7792_CLK_VSP1_SY 31
-
-/* MSTP2 */
-#define R8A7792_CLK_MSIOF1 8
-#define R8A7792_CLK_SYS_DMAC1 18
-#define R8A7792_CLK_SYS_DMAC0 19
-
-/* MSTP3 */
-#define R8A7792_CLK_TPU0 4
-#define R8A7792_CLK_SDHI0 14
-#define R8A7792_CLK_CMT1 29
-
-/* MSTP4 */
-#define R8A7792_CLK_IRQC 7
-#define R8A7792_CLK_INTC_SYS 8
-
-/* MSTP5 */
-#define R8A7792_CLK_AUDIO_DMAC0 2
-#define R8A7792_CLK_THERMAL 22
-#define R8A7792_CLK_PWM 23
-
-/* MSTP7 */
-#define R8A7792_CLK_HSCIF1 16
-#define R8A7792_CLK_HSCIF0 17
-#define R8A7792_CLK_SCIF3 18
-#define R8A7792_CLK_SCIF2 19
-#define R8A7792_CLK_SCIF1 20
-#define R8A7792_CLK_SCIF0 21
-#define R8A7792_CLK_DU1 23
-#define R8A7792_CLK_DU0 24
-
-/* MSTP8 */
-#define R8A7792_CLK_VIN5 4
-#define R8A7792_CLK_VIN4 5
-#define R8A7792_CLK_VIN3 8
-#define R8A7792_CLK_VIN2 9
-#define R8A7792_CLK_VIN1 10
-#define R8A7792_CLK_VIN0 11
-#define R8A7792_CLK_ETHERAVB 12
-
-/* MSTP9 */
-#define R8A7792_CLK_GPIO7 4
-#define R8A7792_CLK_GPIO6 5
-#define R8A7792_CLK_GPIO5 7
-#define R8A7792_CLK_GPIO4 8
-#define R8A7792_CLK_GPIO3 9
-#define R8A7792_CLK_GPIO2 10
-#define R8A7792_CLK_GPIO1 11
-#define R8A7792_CLK_GPIO0 12
-#define R8A7792_CLK_GPIO11 13
-#define R8A7792_CLK_GPIO10 14
-#define R8A7792_CLK_CAN1 15
-#define R8A7792_CLK_CAN0 16
-#define R8A7792_CLK_QSPI_MOD 17
-#define R8A7792_CLK_GPIO9 19
-#define R8A7792_CLK_GPIO8 21
-#define R8A7792_CLK_I2C5 25
-#define R8A7792_CLK_IICDVFS 26
-#define R8A7792_CLK_I2C4 27
-#define R8A7792_CLK_I2C3 28
-#define R8A7792_CLK_I2C2 29
-#define R8A7792_CLK_I2C1 30
-#define R8A7792_CLK_I2C0 31
-
-/* MSTP10 */
-#define R8A7792_CLK_SSI_ALL 5
-#define R8A7792_CLK_SSI4 11
-#define R8A7792_CLK_SSI3 12
-
-#endif /* __DT_BINDINGS_CLOCK_R8A7792_H__ */
diff --git a/include/dt-bindings/clock/r8a7793-clock.h b/include/dt-bindings/clock/r8a7793-clock.h
deleted file mode 100644
index 49c66d8ed178..000000000000
--- a/include/dt-bindings/clock/r8a7793-clock.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * r8a7793 clock definition
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- */
-
-#ifndef __DT_BINDINGS_CLOCK_R8A7793_H__
-#define __DT_BINDINGS_CLOCK_R8A7793_H__
-
-/* CPG */
-#define R8A7793_CLK_MAIN 0
-#define R8A7793_CLK_PLL0 1
-#define R8A7793_CLK_PLL1 2
-#define R8A7793_CLK_PLL3 3
-#define R8A7793_CLK_LB 4
-#define R8A7793_CLK_QSPI 5
-#define R8A7793_CLK_SDH 6
-#define R8A7793_CLK_SD0 7
-#define R8A7793_CLK_Z 8
-#define R8A7793_CLK_RCAN 9
-#define R8A7793_CLK_ADSP 10
-
-/* MSTP0 */
-#define R8A7793_CLK_MSIOF0 0
-
-/* MSTP1 */
-#define R8A7793_CLK_VCP0 1
-#define R8A7793_CLK_VPC0 3
-#define R8A7793_CLK_SSP1 9
-#define R8A7793_CLK_TMU1 11
-#define R8A7793_CLK_3DG 12
-#define R8A7793_CLK_2DDMAC 15
-#define R8A7793_CLK_FDP1_1 18
-#define R8A7793_CLK_FDP1_0 19
-#define R8A7793_CLK_TMU3 21
-#define R8A7793_CLK_TMU2 22
-#define R8A7793_CLK_CMT0 24
-#define R8A7793_CLK_TMU0 25
-#define R8A7793_CLK_VSP1_DU1 27
-#define R8A7793_CLK_VSP1_DU0 28
-#define R8A7793_CLK_VSP1_S 31
-
-/* MSTP2 */
-#define R8A7793_CLK_SCIFA2 2
-#define R8A7793_CLK_SCIFA1 3
-#define R8A7793_CLK_SCIFA0 4
-#define R8A7793_CLK_MSIOF2 5
-#define R8A7793_CLK_SCIFB0 6
-#define R8A7793_CLK_SCIFB1 7
-#define R8A7793_CLK_MSIOF1 8
-#define R8A7793_CLK_SCIFB2 16
-#define R8A7793_CLK_SYS_DMAC1 18
-#define R8A7793_CLK_SYS_DMAC0 19
-
-/* MSTP3 */
-#define R8A7793_CLK_TPU0 4
-#define R8A7793_CLK_SDHI2 11
-#define R8A7793_CLK_SDHI1 12
-#define R8A7793_CLK_SDHI0 14
-#define R8A7793_CLK_MMCIF0 15
-#define R8A7793_CLK_IIC0 18
-#define R8A7793_CLK_PCIEC 19
-#define R8A7793_CLK_IIC1 23
-#define R8A7793_CLK_SSUSB 28
-#define R8A7793_CLK_CMT1 29
-#define R8A7793_CLK_USBDMAC0 30
-#define R8A7793_CLK_USBDMAC1 31
-
-/* MSTP4 */
-#define R8A7793_CLK_IRQC 7
-#define R8A7793_CLK_INTC_SYS 8
-
-/* MSTP5 */
-#define R8A7793_CLK_AUDIO_DMAC1 1
-#define R8A7793_CLK_AUDIO_DMAC0 2
-#define R8A7793_CLK_ADSP_MOD 6
-#define R8A7793_CLK_THERMAL 22
-#define R8A7793_CLK_PWM 23
-
-/* MSTP7 */
-#define R8A7793_CLK_EHCI 3
-#define R8A7793_CLK_HSUSB 4
-#define R8A7793_CLK_HSCIF2 13
-#define R8A7793_CLK_SCIF5 14
-#define R8A7793_CLK_SCIF4 15
-#define R8A7793_CLK_HSCIF1 16
-#define R8A7793_CLK_HSCIF0 17
-#define R8A7793_CLK_SCIF3 18
-#define R8A7793_CLK_SCIF2 19
-#define R8A7793_CLK_SCIF1 20
-#define R8A7793_CLK_SCIF0 21
-#define R8A7793_CLK_DU1 23
-#define R8A7793_CLK_DU0 24
-#define R8A7793_CLK_LVDS0 26
-
-/* MSTP8 */
-#define R8A7793_CLK_IPMMU_SGX 0
-#define R8A7793_CLK_VIN2 9
-#define R8A7793_CLK_VIN1 10
-#define R8A7793_CLK_VIN0 11
-#define R8A7793_CLK_ETHER 13
-#define R8A7793_CLK_SATA1 14
-#define R8A7793_CLK_SATA0 15
-
-/* MSTP9 */
-#define R8A7793_CLK_GPIO7 4
-#define R8A7793_CLK_GPIO6 5
-#define R8A7793_CLK_GPIO5 7
-#define R8A7793_CLK_GPIO4 8
-#define R8A7793_CLK_GPIO3 9
-#define R8A7793_CLK_GPIO2 10
-#define R8A7793_CLK_GPIO1 11
-#define R8A7793_CLK_GPIO0 12
-#define R8A7793_CLK_RCAN1 15
-#define R8A7793_CLK_RCAN0 16
-#define R8A7793_CLK_QSPI_MOD 17
-#define R8A7793_CLK_I2C5 25
-#define R8A7793_CLK_IICDVFS 26
-#define R8A7793_CLK_I2C4 27
-#define R8A7793_CLK_I2C3 28
-#define R8A7793_CLK_I2C2 29
-#define R8A7793_CLK_I2C1 30
-#define R8A7793_CLK_I2C0 31
-
-/* MSTP10 */
-#define R8A7793_CLK_SSI_ALL 5
-#define R8A7793_CLK_SSI9 6
-#define R8A7793_CLK_SSI8 7
-#define R8A7793_CLK_SSI7 8
-#define R8A7793_CLK_SSI6 9
-#define R8A7793_CLK_SSI5 10
-#define R8A7793_CLK_SSI4 11
-#define R8A7793_CLK_SSI3 12
-#define R8A7793_CLK_SSI2 13
-#define R8A7793_CLK_SSI1 14
-#define R8A7793_CLK_SSI0 15
-#define R8A7793_CLK_SCU_ALL 17
-#define R8A7793_CLK_SCU_DVC1 18
-#define R8A7793_CLK_SCU_DVC0 19
-#define R8A7793_CLK_SCU_CTU1_MIX1 20
-#define R8A7793_CLK_SCU_CTU0_MIX0 21
-#define R8A7793_CLK_SCU_SRC9 22
-#define R8A7793_CLK_SCU_SRC8 23
-#define R8A7793_CLK_SCU_SRC7 24
-#define R8A7793_CLK_SCU_SRC6 25
-#define R8A7793_CLK_SCU_SRC5 26
-#define R8A7793_CLK_SCU_SRC4 27
-#define R8A7793_CLK_SCU_SRC3 28
-#define R8A7793_CLK_SCU_SRC2 29
-#define R8A7793_CLK_SCU_SRC1 30
-#define R8A7793_CLK_SCU_SRC0 31
-
-/* MSTP11 */
-#define R8A7793_CLK_SCIFA3 6
-#define R8A7793_CLK_SCIFA4 7
-#define R8A7793_CLK_SCIFA5 8
-
-#endif /* __DT_BINDINGS_CLOCK_R8A7793_H__ */
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
deleted file mode 100644
index 649f005782d0..000000000000
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- * Copyright 2013 Ideas On Board SPRL
- */
-
-#ifndef __DT_BINDINGS_CLOCK_R8A7794_H__
-#define __DT_BINDINGS_CLOCK_R8A7794_H__
-
-/* CPG */
-#define R8A7794_CLK_MAIN 0
-#define R8A7794_CLK_PLL0 1
-#define R8A7794_CLK_PLL1 2
-#define R8A7794_CLK_PLL3 3
-#define R8A7794_CLK_LB 4
-#define R8A7794_CLK_QSPI 5
-#define R8A7794_CLK_SDH 6
-#define R8A7794_CLK_SD0 7
-#define R8A7794_CLK_RCAN 8
-
-/* MSTP0 */
-#define R8A7794_CLK_MSIOF0 0
-
-/* MSTP1 */
-#define R8A7794_CLK_VCP0 1
-#define R8A7794_CLK_VPC0 3
-#define R8A7794_CLK_TMU1 11
-#define R8A7794_CLK_3DG 12
-#define R8A7794_CLK_2DDMAC 15
-#define R8A7794_CLK_FDP1_0 19
-#define R8A7794_CLK_TMU3 21
-#define R8A7794_CLK_TMU2 22
-#define R8A7794_CLK_CMT0 24
-#define R8A7794_CLK_TMU0 25
-#define R8A7794_CLK_VSP1_DU0 28
-#define R8A7794_CLK_VSP1_S 31
-
-/* MSTP2 */
-#define R8A7794_CLK_SCIFA2 2
-#define R8A7794_CLK_SCIFA1 3
-#define R8A7794_CLK_SCIFA0 4
-#define R8A7794_CLK_MSIOF2 5
-#define R8A7794_CLK_SCIFB0 6
-#define R8A7794_CLK_SCIFB1 7
-#define R8A7794_CLK_MSIOF1 8
-#define R8A7794_CLK_SCIFB2 16
-#define R8A7794_CLK_SYS_DMAC1 18
-#define R8A7794_CLK_SYS_DMAC0 19
-
-/* MSTP3 */
-#define R8A7794_CLK_SDHI2 11
-#define R8A7794_CLK_SDHI1 12
-#define R8A7794_CLK_SDHI0 14
-#define R8A7794_CLK_MMCIF0 15
-#define R8A7794_CLK_IIC0 18
-#define R8A7794_CLK_IIC1 23
-#define R8A7794_CLK_CMT1 29
-#define R8A7794_CLK_USBDMAC0 30
-#define R8A7794_CLK_USBDMAC1 31
-
-/* MSTP4 */
-#define R8A7794_CLK_IRQC 7
-#define R8A7794_CLK_INTC_SYS 8
-
-/* MSTP5 */
-#define R8A7794_CLK_AUDIO_DMAC0 2
-#define R8A7794_CLK_PWM 23
-
-/* MSTP7 */
-#define R8A7794_CLK_EHCI 3
-#define R8A7794_CLK_HSUSB 4
-#define R8A7794_CLK_HSCIF2 13
-#define R8A7794_CLK_SCIF5 14
-#define R8A7794_CLK_SCIF4 15
-#define R8A7794_CLK_HSCIF1 16
-#define R8A7794_CLK_HSCIF0 17
-#define R8A7794_CLK_SCIF3 18
-#define R8A7794_CLK_SCIF2 19
-#define R8A7794_CLK_SCIF1 20
-#define R8A7794_CLK_SCIF0 21
-#define R8A7794_CLK_DU1 23
-#define R8A7794_CLK_DU0 24
-
-/* MSTP8 */
-#define R8A7794_CLK_VIN1 10
-#define R8A7794_CLK_VIN0 11
-#define R8A7794_CLK_ETHERAVB 12
-#define R8A7794_CLK_ETHER 13
-
-/* MSTP9 */
-#define R8A7794_CLK_GPIO6 5
-#define R8A7794_CLK_GPIO5 7
-#define R8A7794_CLK_GPIO4 8
-#define R8A7794_CLK_GPIO3 9
-#define R8A7794_CLK_GPIO2 10
-#define R8A7794_CLK_GPIO1 11
-#define R8A7794_CLK_GPIO0 12
-#define R8A7794_CLK_RCAN1 15
-#define R8A7794_CLK_RCAN0 16
-#define R8A7794_CLK_QSPI_MOD 17
-#define R8A7794_CLK_I2C5 25
-#define R8A7794_CLK_I2C4 27
-#define R8A7794_CLK_I2C3 28
-#define R8A7794_CLK_I2C2 29
-#define R8A7794_CLK_I2C1 30
-#define R8A7794_CLK_I2C0 31
-
-/* MSTP10 */
-#define R8A7794_CLK_SSI_ALL 5
-#define R8A7794_CLK_SSI9 6
-#define R8A7794_CLK_SSI8 7
-#define R8A7794_CLK_SSI7 8
-#define R8A7794_CLK_SSI6 9
-#define R8A7794_CLK_SSI5 10
-#define R8A7794_CLK_SSI4 11
-#define R8A7794_CLK_SSI3 12
-#define R8A7794_CLK_SSI2 13
-#define R8A7794_CLK_SSI1 14
-#define R8A7794_CLK_SSI0 15
-#define R8A7794_CLK_SCU_ALL 17
-#define R8A7794_CLK_SCU_DVC1 18
-#define R8A7794_CLK_SCU_DVC0 19
-#define R8A7794_CLK_SCU_CTU1_MIX1 20
-#define R8A7794_CLK_SCU_CTU0_MIX0 21
-#define R8A7794_CLK_SCU_SRC6 25
-#define R8A7794_CLK_SCU_SRC5 26
-#define R8A7794_CLK_SCU_SRC4 27
-#define R8A7794_CLK_SCU_SRC3 28
-#define R8A7794_CLK_SCU_SRC2 29
-#define R8A7794_CLK_SCU_SRC1 30
-
-/* MSTP11 */
-#define R8A7794_CLK_SCIFA3 6
-#define R8A7794_CLK_SCIFA4 7
-#define R8A7794_CLK_SCIFA5 8
-
-#endif /* __DT_BINDINGS_CLOCK_R8A7794_H__ */
diff --git a/include/dt-bindings/clock/r8a779a0-cpg-mssr.h b/include/dt-bindings/clock/r8a779a0-cpg-mssr.h
index f1d737ca7ca1..124a6b8856df 100644
--- a/include/dt-bindings/clock/r8a779a0-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a779a0-cpg-mssr.h
@@ -51,5 +51,6 @@
#define R8A779A0_CLK_CBFUSA 40
#define R8A779A0_CLK_R 41
#define R8A779A0_CLK_OSC 42
+#define R8A779A0_CLK_ZG 43
#endif /* __DT_BINDINGS_CLOCK_R8A779A0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r9a07g043-cpg.h b/include/dt-bindings/clock/r9a07g043-cpg.h
index 131993343777..e1f65f1928cf 100644
--- a/include/dt-bindings/clock/r9a07g043-cpg.h
+++ b/include/dt-bindings/clock/r9a07g043-cpg.h
@@ -200,57 +200,4 @@
#define R9A07G043_AX45MP_CORE0_RESETN 78 /* RZ/Five Only */
#define R9A07G043_IAX45_RESETN 79 /* RZ/Five Only */
-/* Power domain IDs. */
-#define R9A07G043_PD_ALWAYS_ON 0
-#define R9A07G043_PD_GIC 1 /* RZ/G2UL Only */
-#define R9A07G043_PD_IA55 2 /* RZ/G2UL Only */
-#define R9A07G043_PD_MHU 3 /* RZ/G2UL Only */
-#define R9A07G043_PD_CORESIGHT 4 /* RZ/G2UL Only */
-#define R9A07G043_PD_SYC 5 /* RZ/G2UL Only */
-#define R9A07G043_PD_DMAC 6
-#define R9A07G043_PD_GTM0 7
-#define R9A07G043_PD_GTM1 8
-#define R9A07G043_PD_GTM2 9
-#define R9A07G043_PD_MTU 10
-#define R9A07G043_PD_POE3 11
-#define R9A07G043_PD_WDT0 12
-#define R9A07G043_PD_SPI 13
-#define R9A07G043_PD_SDHI0 14
-#define R9A07G043_PD_SDHI1 15
-#define R9A07G043_PD_ISU 16 /* RZ/G2UL Only */
-#define R9A07G043_PD_CRU 17 /* RZ/G2UL Only */
-#define R9A07G043_PD_LCDC 18 /* RZ/G2UL Only */
-#define R9A07G043_PD_SSI0 19
-#define R9A07G043_PD_SSI1 20
-#define R9A07G043_PD_SSI2 21
-#define R9A07G043_PD_SSI3 22
-#define R9A07G043_PD_SRC 23
-#define R9A07G043_PD_USB0 24
-#define R9A07G043_PD_USB1 25
-#define R9A07G043_PD_USB_PHY 26
-#define R9A07G043_PD_ETHER0 27
-#define R9A07G043_PD_ETHER1 28
-#define R9A07G043_PD_I2C0 29
-#define R9A07G043_PD_I2C1 30
-#define R9A07G043_PD_I2C2 31
-#define R9A07G043_PD_I2C3 32
-#define R9A07G043_PD_SCIF0 33
-#define R9A07G043_PD_SCIF1 34
-#define R9A07G043_PD_SCIF2 35
-#define R9A07G043_PD_SCIF3 36
-#define R9A07G043_PD_SCIF4 37
-#define R9A07G043_PD_SCI0 38
-#define R9A07G043_PD_SCI1 39
-#define R9A07G043_PD_IRDA 40
-#define R9A07G043_PD_RSPI0 41
-#define R9A07G043_PD_RSPI1 42
-#define R9A07G043_PD_RSPI2 43
-#define R9A07G043_PD_CANFD 44
-#define R9A07G043_PD_ADC 45
-#define R9A07G043_PD_TSU 46
-#define R9A07G043_PD_PLIC 47 /* RZ/Five Only */
-#define R9A07G043_PD_IAX45 48 /* RZ/Five Only */
-#define R9A07G043_PD_NCEPLDM 49 /* RZ/Five Only */
-#define R9A07G043_PD_NCEPLMT 50 /* RZ/Five Only */
-
#endif /* __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a07g044-cpg.h b/include/dt-bindings/clock/r9a07g044-cpg.h
index e209f96f92b7..0bb17ff1a01a 100644
--- a/include/dt-bindings/clock/r9a07g044-cpg.h
+++ b/include/dt-bindings/clock/r9a07g044-cpg.h
@@ -217,62 +217,4 @@
#define R9A07G044_ADC_ADRST_N 82
#define R9A07G044_TSU_PRESETN 83
-/* Power domain IDs. */
-#define R9A07G044_PD_ALWAYS_ON 0
-#define R9A07G044_PD_GIC 1
-#define R9A07G044_PD_IA55 2
-#define R9A07G044_PD_MHU 3
-#define R9A07G044_PD_CORESIGHT 4
-#define R9A07G044_PD_SYC 5
-#define R9A07G044_PD_DMAC 6
-#define R9A07G044_PD_GTM0 7
-#define R9A07G044_PD_GTM1 8
-#define R9A07G044_PD_GTM2 9
-#define R9A07G044_PD_MTU 10
-#define R9A07G044_PD_POE3 11
-#define R9A07G044_PD_GPT 12
-#define R9A07G044_PD_POEGA 13
-#define R9A07G044_PD_POEGB 14
-#define R9A07G044_PD_POEGC 15
-#define R9A07G044_PD_POEGD 16
-#define R9A07G044_PD_WDT0 17
-#define R9A07G044_PD_WDT1 18
-#define R9A07G044_PD_SPI 19
-#define R9A07G044_PD_SDHI0 20
-#define R9A07G044_PD_SDHI1 21
-#define R9A07G044_PD_3DGE 22
-#define R9A07G044_PD_ISU 23
-#define R9A07G044_PD_VCPL4 24
-#define R9A07G044_PD_CRU 25
-#define R9A07G044_PD_MIPI_DSI 26
-#define R9A07G044_PD_LCDC 27
-#define R9A07G044_PD_SSI0 28
-#define R9A07G044_PD_SSI1 29
-#define R9A07G044_PD_SSI2 30
-#define R9A07G044_PD_SSI3 31
-#define R9A07G044_PD_SRC 32
-#define R9A07G044_PD_USB0 33
-#define R9A07G044_PD_USB1 34
-#define R9A07G044_PD_USB_PHY 35
-#define R9A07G044_PD_ETHER0 36
-#define R9A07G044_PD_ETHER1 37
-#define R9A07G044_PD_I2C0 38
-#define R9A07G044_PD_I2C1 39
-#define R9A07G044_PD_I2C2 40
-#define R9A07G044_PD_I2C3 41
-#define R9A07G044_PD_SCIF0 42
-#define R9A07G044_PD_SCIF1 43
-#define R9A07G044_PD_SCIF2 44
-#define R9A07G044_PD_SCIF3 45
-#define R9A07G044_PD_SCIF4 46
-#define R9A07G044_PD_SCI0 47
-#define R9A07G044_PD_SCI1 48
-#define R9A07G044_PD_IRDA 49
-#define R9A07G044_PD_RSPI0 50
-#define R9A07G044_PD_RSPI1 51
-#define R9A07G044_PD_RSPI2 52
-#define R9A07G044_PD_CANFD 53
-#define R9A07G044_PD_ADC 54
-#define R9A07G044_PD_TSU 55
-
#endif /* __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a07g054-cpg.h b/include/dt-bindings/clock/r9a07g054-cpg.h
index 2c99f89397c4..43f4dbda872c 100644
--- a/include/dt-bindings/clock/r9a07g054-cpg.h
+++ b/include/dt-bindings/clock/r9a07g054-cpg.h
@@ -226,62 +226,4 @@
#define R9A07G054_TSU_PRESETN 83
#define R9A07G054_STPAI_ARESETN 84
-/* Power domain IDs. */
-#define R9A07G054_PD_ALWAYS_ON 0
-#define R9A07G054_PD_GIC 1
-#define R9A07G054_PD_IA55 2
-#define R9A07G054_PD_MHU 3
-#define R9A07G054_PD_CORESIGHT 4
-#define R9A07G054_PD_SYC 5
-#define R9A07G054_PD_DMAC 6
-#define R9A07G054_PD_GTM0 7
-#define R9A07G054_PD_GTM1 8
-#define R9A07G054_PD_GTM2 9
-#define R9A07G054_PD_MTU 10
-#define R9A07G054_PD_POE3 11
-#define R9A07G054_PD_GPT 12
-#define R9A07G054_PD_POEGA 13
-#define R9A07G054_PD_POEGB 14
-#define R9A07G054_PD_POEGC 15
-#define R9A07G054_PD_POEGD 16
-#define R9A07G054_PD_WDT0 17
-#define R9A07G054_PD_WDT1 18
-#define R9A07G054_PD_SPI 19
-#define R9A07G054_PD_SDHI0 20
-#define R9A07G054_PD_SDHI1 21
-#define R9A07G054_PD_3DGE 22
-#define R9A07G054_PD_ISU 23
-#define R9A07G054_PD_VCPL4 24
-#define R9A07G054_PD_CRU 25
-#define R9A07G054_PD_MIPI_DSI 26
-#define R9A07G054_PD_LCDC 27
-#define R9A07G054_PD_SSI0 28
-#define R9A07G054_PD_SSI1 29
-#define R9A07G054_PD_SSI2 30
-#define R9A07G054_PD_SSI3 31
-#define R9A07G054_PD_SRC 32
-#define R9A07G054_PD_USB0 33
-#define R9A07G054_PD_USB1 34
-#define R9A07G054_PD_USB_PHY 35
-#define R9A07G054_PD_ETHER0 36
-#define R9A07G054_PD_ETHER1 37
-#define R9A07G054_PD_I2C0 38
-#define R9A07G054_PD_I2C1 39
-#define R9A07G054_PD_I2C2 40
-#define R9A07G054_PD_I2C3 41
-#define R9A07G054_PD_SCIF0 42
-#define R9A07G054_PD_SCIF1 43
-#define R9A07G054_PD_SCIF2 44
-#define R9A07G054_PD_SCIF3 45
-#define R9A07G054_PD_SCIF4 46
-#define R9A07G054_PD_SCI0 47
-#define R9A07G054_PD_SCI1 48
-#define R9A07G054_PD_IRDA 49
-#define R9A07G054_PD_RSPI0 50
-#define R9A07G054_PD_RSPI1 51
-#define R9A07G054_PD_RSPI2 52
-#define R9A07G054_PD_CANFD 53
-#define R9A07G054_PD_ADC 54
-#define R9A07G054_PD_TSU 55
-
#endif /* __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a08g045-cpg.h b/include/dt-bindings/clock/r9a08g045-cpg.h
index 8281e9caf3a9..410725b778a8 100644
--- a/include/dt-bindings/clock/r9a08g045-cpg.h
+++ b/include/dt-bindings/clock/r9a08g045-cpg.h
@@ -239,74 +239,4 @@
#define R9A08G045_I3C_PRESETN 92
#define R9A08G045_VBAT_BRESETN 93
-/* Power domain IDs. */
-#define R9A08G045_PD_ALWAYS_ON 0
-#define R9A08G045_PD_GIC 1
-#define R9A08G045_PD_IA55 2
-#define R9A08G045_PD_MHU 3
-#define R9A08G045_PD_CORESIGHT 4
-#define R9A08G045_PD_SYC 5
-#define R9A08G045_PD_DMAC 6
-#define R9A08G045_PD_GTM0 7
-#define R9A08G045_PD_GTM1 8
-#define R9A08G045_PD_GTM2 9
-#define R9A08G045_PD_GTM3 10
-#define R9A08G045_PD_GTM4 11
-#define R9A08G045_PD_GTM5 12
-#define R9A08G045_PD_GTM6 13
-#define R9A08G045_PD_GTM7 14
-#define R9A08G045_PD_MTU 15
-#define R9A08G045_PD_POE3 16
-#define R9A08G045_PD_GPT 17
-#define R9A08G045_PD_POEGA 18
-#define R9A08G045_PD_POEGB 19
-#define R9A08G045_PD_POEGC 20
-#define R9A08G045_PD_POEGD 21
-#define R9A08G045_PD_WDT0 22
-#define R9A08G045_PD_XSPI 23
-#define R9A08G045_PD_SDHI0 24
-#define R9A08G045_PD_SDHI1 25
-#define R9A08G045_PD_SDHI2 26
-#define R9A08G045_PD_SSI0 27
-#define R9A08G045_PD_SSI1 28
-#define R9A08G045_PD_SSI2 29
-#define R9A08G045_PD_SSI3 30
-#define R9A08G045_PD_SRC 31
-#define R9A08G045_PD_USB0 32
-#define R9A08G045_PD_USB1 33
-#define R9A08G045_PD_USB_PHY 34
-#define R9A08G045_PD_ETHER0 35
-#define R9A08G045_PD_ETHER1 36
-#define R9A08G045_PD_I2C0 37
-#define R9A08G045_PD_I2C1 38
-#define R9A08G045_PD_I2C2 39
-#define R9A08G045_PD_I2C3 40
-#define R9A08G045_PD_SCIF0 41
-#define R9A08G045_PD_SCIF1 42
-#define R9A08G045_PD_SCIF2 43
-#define R9A08G045_PD_SCIF3 44
-#define R9A08G045_PD_SCIF4 45
-#define R9A08G045_PD_SCIF5 46
-#define R9A08G045_PD_SCI0 47
-#define R9A08G045_PD_SCI1 48
-#define R9A08G045_PD_IRDA 49
-#define R9A08G045_PD_RSPI0 50
-#define R9A08G045_PD_RSPI1 51
-#define R9A08G045_PD_RSPI2 52
-#define R9A08G045_PD_RSPI3 53
-#define R9A08G045_PD_RSPI4 54
-#define R9A08G045_PD_CANFD 55
-#define R9A08G045_PD_ADC 56
-#define R9A08G045_PD_TSU 57
-#define R9A08G045_PD_OCTA 58
-#define R9A08G045_PD_PDM 59
-#define R9A08G045_PD_PCI 60
-#define R9A08G045_PD_SPDIF 61
-#define R9A08G045_PD_I3C 62
-#define R9A08G045_PD_VBAT 63
-
-#define R9A08G045_PD_DDR 64
-#define R9A08G045_PD_TZCDDR 65
-#define R9A08G045_PD_OTFDE_DDR 66
-
#endif /* __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ */
diff --git a/include/dt-bindings/clock/raspberrypi,rp1-clocks.h b/include/dt-bindings/clock/raspberrypi,rp1-clocks.h
new file mode 100644
index 000000000000..7915fb8197bf
--- /dev/null
+++ b/include/dt-bindings/clock/raspberrypi,rp1-clocks.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021 Raspberry Pi Ltd.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_RASPBERRYPI_RP1
+#define __DT_BINDINGS_CLOCK_RASPBERRYPI_RP1
+
+#define RP1_PLL_SYS_CORE 0
+#define RP1_PLL_AUDIO_CORE 1
+#define RP1_PLL_VIDEO_CORE 2
+
+#define RP1_PLL_SYS 3
+#define RP1_PLL_AUDIO 4
+#define RP1_PLL_VIDEO 5
+
+#define RP1_PLL_SYS_PRI_PH 6
+#define RP1_PLL_SYS_SEC_PH 7
+#define RP1_PLL_AUDIO_PRI_PH 8
+
+#define RP1_PLL_SYS_SEC 9
+#define RP1_PLL_AUDIO_SEC 10
+#define RP1_PLL_VIDEO_SEC 11
+
+#define RP1_CLK_SYS 12
+#define RP1_CLK_SLOW_SYS 13
+#define RP1_CLK_DMA 14
+#define RP1_CLK_UART 15
+#define RP1_CLK_ETH 16
+#define RP1_CLK_PWM0 17
+#define RP1_CLK_PWM1 18
+#define RP1_CLK_AUDIO_IN 19
+#define RP1_CLK_AUDIO_OUT 20
+#define RP1_CLK_I2S 21
+#define RP1_CLK_MIPI0_CFG 22
+#define RP1_CLK_MIPI1_CFG 23
+#define RP1_CLK_PCIE_AUX 24
+#define RP1_CLK_USBH0_MICROFRAME 25
+#define RP1_CLK_USBH1_MICROFRAME 26
+#define RP1_CLK_USBH0_SUSPEND 27
+#define RP1_CLK_USBH1_SUSPEND 28
+#define RP1_CLK_ETH_TSU 29
+#define RP1_CLK_ADC 30
+#define RP1_CLK_SDIO_TIMER 31
+#define RP1_CLK_SDIO_ALT_SRC 32
+#define RP1_CLK_GP0 33
+#define RP1_CLK_GP1 34
+#define RP1_CLK_GP2 35
+#define RP1_CLK_GP3 36
+#define RP1_CLK_GP4 37
+#define RP1_CLK_GP5 38
+#define RP1_CLK_VEC 39
+#define RP1_CLK_DPI 40
+#define RP1_CLK_MIPI0_DPI 41
+#define RP1_CLK_MIPI1_DPI 42
+
+/* Extra PLL output channels - RP1B0 only */
+#define RP1_PLL_VIDEO_PRI_PH 43
+#define RP1_PLL_AUDIO_TERN 44
+
+/* MIPI clocks managed by the DSI driver */
+#define RP1_CLK_MIPI0_DSI_BYTECLOCK 45
+#define RP1_CLK_MIPI1_DSI_BYTECLOCK 46
+
+#endif
diff --git a/include/dt-bindings/clock/renesas,r9a08g045-vbattb.h b/include/dt-bindings/clock/renesas,r9a08g045-vbattb.h
new file mode 100644
index 000000000000..4cc8fc34b23c
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a08g045-vbattb.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A08G045_VBATTB_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A08G045_VBATTB_H__
+
+#define VBATTB_XC 0
+#define VBATTB_XBYP 1
+#define VBATTB_MUX 2
+#define VBATTB_VBATTCLK 3
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A08G045_VBATTB_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g047-cpg.h b/include/dt-bindings/clock/renesas,r9a09g047-cpg.h
new file mode 100644
index 000000000000..dab24740de3c
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a09g047-cpg.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G047_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G047_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* Core Clock list */
+#define R9A09G047_SYS_0_PCLK 0
+#define R9A09G047_CA55_0_CORECLK0 1
+#define R9A09G047_CA55_0_CORECLK1 2
+#define R9A09G047_CA55_0_CORECLK2 3
+#define R9A09G047_CA55_0_CORECLK3 4
+#define R9A09G047_CA55_0_PERIPHCLK 5
+#define R9A09G047_CM33_CLK0 6
+#define R9A09G047_CST_0_SWCLKTCK 7
+#define R9A09G047_IOTOP_0_SHCLK 8
+#define R9A09G047_SPI_CLK_SPI 9
+#define R9A09G047_GBETH_0_CLK_PTP_REF_I 10
+#define R9A09G047_GBETH_1_CLK_PTP_REF_I 11
+#define R9A09G047_USB3_0_REF_ALT_CLK_P 12
+#define R9A09G047_USB3_0_CLKCORE 13
+#define R9A09G047_USB2_0_CLK_CORE0 14
+#define R9A09G047_USB2_0_CLK_CORE1 15
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G047_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g056-cpg.h b/include/dt-bindings/clock/renesas,r9a09g056-cpg.h
new file mode 100644
index 000000000000..234dcf4f0f91
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a09g056-cpg.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G056_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G056_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* Core Clock list */
+#define R9A09G056_SYS_0_PCLK 0
+#define R9A09G056_CA55_0_CORE_CLK0 1
+#define R9A09G056_CA55_0_CORE_CLK1 2
+#define R9A09G056_CA55_0_CORE_CLK2 3
+#define R9A09G056_CA55_0_CORE_CLK3 4
+#define R9A09G056_CA55_0_PERIPHCLK 5
+#define R9A09G056_CM33_CLK0 6
+#define R9A09G056_CST_0_SWCLKTCK 7
+#define R9A09G056_IOTOP_0_SHCLK 8
+#define R9A09G056_USB2_0_CLK_CORE0 9
+#define R9A09G056_GBETH_0_CLK_PTP_REF_I 10
+#define R9A09G056_GBETH_1_CLK_PTP_REF_I 11
+#define R9A09G056_SPI_CLK_SPI 12
+#define R9A09G056_USB3_0_REF_ALT_CLK_P 13
+#define R9A09G056_USB3_0_CLKCORE 14
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G056_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g057-cpg.h b/include/dt-bindings/clock/renesas,r9a09g057-cpg.h
new file mode 100644
index 000000000000..f91d7f72922a
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a09g057-cpg.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G057_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G057_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* Core Clock list */
+#define R9A09G057_SYS_0_PCLK 0
+#define R9A09G057_CA55_0_CORE_CLK0 1
+#define R9A09G057_CA55_0_CORE_CLK1 2
+#define R9A09G057_CA55_0_CORE_CLK2 3
+#define R9A09G057_CA55_0_CORE_CLK3 4
+#define R9A09G057_CA55_0_PERIPHCLK 5
+#define R9A09G057_CM33_CLK0 6
+#define R9A09G057_CST_0_SWCLKTCK 7
+#define R9A09G057_IOTOP_0_SHCLK 8
+#define R9A09G057_USB2_0_CLK_CORE0 9
+#define R9A09G057_USB2_0_CLK_CORE1 10
+#define R9A09G057_GBETH_0_CLK_PTP_REF_I 11
+#define R9A09G057_GBETH_1_CLK_PTP_REF_I 12
+#define R9A09G057_SPI_CLK_SPI 13
+#define R9A09G057_USB3_0_REF_ALT_CLK_P 14
+#define R9A09G057_USB3_0_CLKCORE 15
+#define R9A09G057_USB3_1_REF_ALT_CLK_P 16
+#define R9A09G057_USB3_1_CLKCORE 17
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G057_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h b/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
new file mode 100644
index 000000000000..2a805e06487b
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A09G077 CPG Core Clocks */
+#define R9A09G077_CLK_CA55C0 0
+#define R9A09G077_CLK_CA55C1 1
+#define R9A09G077_CLK_CA55C2 2
+#define R9A09G077_CLK_CA55C3 3
+#define R9A09G077_CLK_CA55S 4
+#define R9A09G077_CLK_CR52_CPU0 5
+#define R9A09G077_CLK_CR52_CPU1 6
+#define R9A09G077_CLK_CKIO 7
+#define R9A09G077_CLK_PCLKAH 8
+#define R9A09G077_CLK_PCLKAM 9
+#define R9A09G077_CLK_PCLKAL 10
+#define R9A09G077_CLK_PCLKGPTL 11
+#define R9A09G077_CLK_PCLKH 12
+#define R9A09G077_CLK_PCLKM 13
+#define R9A09G077_CLK_PCLKL 14
+#define R9A09G077_SDHI_CLKHS 15
+#define R9A09G077_USB_CLK 16
+#define R9A09G077_ETCLKA 17
+#define R9A09G077_ETCLKB 18
+#define R9A09G077_ETCLKC 19
+#define R9A09G077_ETCLKD 20
+#define R9A09G077_ETCLKE 21
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h b/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
new file mode 100644
index 000000000000..09da0ad33be6
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A09G087 CPG Core Clocks */
+#define R9A09G087_CLK_CA55C0 0
+#define R9A09G087_CLK_CA55C1 1
+#define R9A09G087_CLK_CA55C2 2
+#define R9A09G087_CLK_CA55C3 3
+#define R9A09G087_CLK_CA55S 4
+#define R9A09G087_CLK_CR52_CPU0 5
+#define R9A09G087_CLK_CR52_CPU1 6
+#define R9A09G087_CLK_CKIO 7
+#define R9A09G087_CLK_PCLKAH 8
+#define R9A09G087_CLK_PCLKAM 9
+#define R9A09G087_CLK_PCLKAL 10
+#define R9A09G087_CLK_PCLKGPTL 11
+#define R9A09G087_CLK_PCLKH 12
+#define R9A09G087_CLK_PCLKM 13
+#define R9A09G087_CLK_PCLKL 14
+#define R9A09G087_SDHI_CLKHS 15
+#define R9A09G087_USB_CLK 16
+#define R9A09G087_ETCLKA 17
+#define R9A09G087_ETCLKB 18
+#define R9A09G087_ETCLKC 19
+#define R9A09G087_ETCLKD 20
+#define R9A09G087_ETCLKE 21
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__ */
diff --git a/include/dt-bindings/clock/rk3036-cru.h b/include/dt-bindings/clock/rk3036-cru.h
index a96a9870ad59..5cbc0e2b08ff 100644
--- a/include/dt-bindings/clock/rk3036-cru.h
+++ b/include/dt-bindings/clock/rk3036-cru.h
@@ -47,6 +47,7 @@
#define SCLK_MACREF 152
#define SCLK_MACPLL 153
#define SCLK_SFC 160
+#define SCLK_USB480M 161
/* aclk gates */
#define ACLK_DMAC2 194
@@ -94,8 +95,6 @@
#define HCLK_CPU 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE0 0
#define SRST_CORE1 1
diff --git a/include/dt-bindings/clock/rk3128-cru.h b/include/dt-bindings/clock/rk3128-cru.h
index 6a47825dac5d..b609fcf96508 100644
--- a/include/dt-bindings/clock/rk3128-cru.h
+++ b/include/dt-bindings/clock/rk3128-cru.h
@@ -116,6 +116,7 @@
#define PCLK_GMAC 367
#define PCLK_PMU_PRE 368
#define PCLK_SIM_CARD 369
+#define PCLK_MIPIPHY 370
/* hclk gates */
#define HCLK_SPDIF 440
@@ -143,8 +144,7 @@
#define HCLK_TSP 475
#define HCLK_CRYPTO 476
#define HCLK_PERI 478
-
-#define CLK_NR_CLKS (HCLK_PERI + 1)
+#define HCLK_SFC 479
/* soft-reset indices */
#define SRST_CORE0_PO 0
diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h
index afad90680fce..dd988cc9d582 100644
--- a/include/dt-bindings/clock/rk3188-cru-common.h
+++ b/include/dt-bindings/clock/rk3188-cru-common.h
@@ -103,6 +103,8 @@
#define PCLK_PERI 351
#define PCLK_DDRUPCTL 352
#define PCLK_PUBL 353
+#define PCLK_CIF0 354
+#define PCLK_CIF1 355
/* hclk gates */
#define HCLK_SDMMC 448
@@ -132,8 +134,6 @@
#define HCLK_VDPU 472
#define HCLK_HDMI 473
-#define CLK_NR_CLKS (HCLK_HDMI + 1)
-
/* soft-reset indices */
#define SRST_MCORE 2
#define SRST_CORE0 3
diff --git a/include/dt-bindings/clock/rk3228-cru.h b/include/dt-bindings/clock/rk3228-cru.h
index de550ea56eeb..138b6ce514dd 100644
--- a/include/dt-bindings/clock/rk3228-cru.h
+++ b/include/dt-bindings/clock/rk3228-cru.h
@@ -146,8 +146,6 @@
#define HCLK_S_CRYPTO 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE0_PO 0
#define SRST_CORE1_PO 1
diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h
index 33819acbfc56..c6034b01b050 100644
--- a/include/dt-bindings/clock/rk3288-cru.h
+++ b/include/dt-bindings/clock/rk3288-cru.h
@@ -195,8 +195,6 @@
#define HCLK_CPU 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE0 0
#define SRST_CORE1 1
diff --git a/include/dt-bindings/clock/rk3308-cru.h b/include/dt-bindings/clock/rk3308-cru.h
index d97840f9ee2e..ce4cd72b9d3d 100644
--- a/include/dt-bindings/clock/rk3308-cru.h
+++ b/include/dt-bindings/clock/rk3308-cru.h
@@ -212,8 +212,6 @@
#define PCLK_CAN 233
#define PCLK_OWIRE 234
-#define CLK_NR_CLKS (PCLK_OWIRE + 1)
-
/* soft-reset indices */
/* cru_softrst_con0 */
diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h
index 555b4ff660ae..8885a2e98c65 100644
--- a/include/dt-bindings/clock/rk3328-cru.h
+++ b/include/dt-bindings/clock/rk3328-cru.h
@@ -201,8 +201,6 @@
#define HCLK_RGA 340
#define HCLK_HDCP 341
-#define CLK_NR_CLKS (HCLK_HDCP + 1)
-
/* soft-reset indices */
#define SRST_CORE0_PO 0
#define SRST_CORE1_PO 1
diff --git a/include/dt-bindings/clock/rk3368-cru.h b/include/dt-bindings/clock/rk3368-cru.h
index 83c72a163fd3..b951e2906948 100644
--- a/include/dt-bindings/clock/rk3368-cru.h
+++ b/include/dt-bindings/clock/rk3368-cru.h
@@ -72,6 +72,7 @@
#define SCLK_SFC 126
#define SCLK_MAC 127
#define SCLK_MACREF_OUT 128
+#define SCLK_MIPIDSI_24M 129
#define SCLK_TIMER10 133
#define SCLK_TIMER11 134
#define SCLK_TIMER12 135
@@ -182,8 +183,6 @@
#define HCLK_BUS 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE_B0 0
#define SRST_CORE_B1 1
diff --git a/include/dt-bindings/clock/rk3399-cru.h b/include/dt-bindings/clock/rk3399-cru.h
index 39169d94a44e..4c90c7703a83 100644
--- a/include/dt-bindings/clock/rk3399-cru.h
+++ b/include/dt-bindings/clock/rk3399-cru.h
@@ -335,8 +335,6 @@
#define HCLK_SDIO_NOC 495
#define HCLK_SDIOAUDIO_NOC 496
-#define CLK_NR_CLKS (HCLK_SDIOAUDIO_NOC + 1)
-
/* pmu-clocks indices */
#define PLL_PPLL 1
@@ -378,8 +376,6 @@
#define PCLK_INTR_ARB_PMU 49
#define HCLK_NOC_PMU 50
-#define CLKPMU_NR_CLKS (HCLK_NOC_PMU + 1)
-
/* soft-reset indices */
/* cru_softrst_con0 */
diff --git a/include/dt-bindings/clock/rk3568-cru.h b/include/dt-bindings/clock/rk3568-cru.h
index 5263085c5b23..1e0aef8a645d 100644
--- a/include/dt-bindings/clock/rk3568-cru.h
+++ b/include/dt-bindings/clock/rk3568-cru.h
@@ -483,7 +483,11 @@
#define PCLK_CORE_PVTM 450
-#define CLK_NR_CLKS (PCLK_CORE_PVTM + 1)
+/* scmi-clocks indices */
+
+#define SCMI_CLK_CPU 0
+#define SCMI_CLK_GPU 1
+#define SCMI_CLK_NPU 2
/* pmu soft-reset indices */
/* pmucru_softrst_con0 */
diff --git a/include/dt-bindings/clock/rockchip,rk3506-cru.h b/include/dt-bindings/clock/rockchip,rk3506-cru.h
new file mode 100644
index 000000000000..71d7dda23cc9
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rk3506-cru.h
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023-2025 Rockchip Electronics Co., Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3506_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3506_H
+
+/* cru plls */
+#define PLL_GPLL 0
+#define PLL_V0PLL 1
+#define PLL_V1PLL 2
+
+/* cru-clocks indices */
+#define ARMCLK 3
+#define CLK_DDR 4
+#define XIN24M_GATE 5
+#define CLK_GPLL_GATE 6
+#define CLK_V0PLL_GATE 7
+#define CLK_V1PLL_GATE 8
+#define CLK_GPLL_DIV 9
+#define CLK_GPLL_DIV_100M 10
+#define CLK_V0PLL_DIV 11
+#define CLK_V1PLL_DIV 12
+#define CLK_INT_VOICE_MATRIX0 13
+#define CLK_INT_VOICE_MATRIX1 14
+#define CLK_INT_VOICE_MATRIX2 15
+#define CLK_FRAC_UART_MATRIX0_MUX 16
+#define CLK_FRAC_UART_MATRIX1_MUX 17
+#define CLK_FRAC_VOICE_MATRIX0_MUX 18
+#define CLK_FRAC_VOICE_MATRIX1_MUX 19
+#define CLK_FRAC_COMMON_MATRIX0_MUX 20
+#define CLK_FRAC_COMMON_MATRIX1_MUX 21
+#define CLK_FRAC_COMMON_MATRIX2_MUX 22
+#define CLK_FRAC_UART_MATRIX0 23
+#define CLK_FRAC_UART_MATRIX1 24
+#define CLK_FRAC_VOICE_MATRIX0 25
+#define CLK_FRAC_VOICE_MATRIX1 26
+#define CLK_FRAC_COMMON_MATRIX0 27
+#define CLK_FRAC_COMMON_MATRIX1 28
+#define CLK_FRAC_COMMON_MATRIX2 29
+#define CLK_REF_USBPHY_TOP 30
+#define CLK_REF_DPHY_TOP 31
+#define ACLK_CORE_ROOT 32
+#define PCLK_CORE_ROOT 33
+#define PCLK_DBG 34
+#define PCLK_CORE_GRF 35
+#define PCLK_CORE_CRU 36
+#define CLK_CORE_EMA_DETECT 37
+#define CLK_REF_PVTPLL_CORE 38
+#define PCLK_GPIO1 39
+#define DBCLK_GPIO1 40
+#define ACLK_CORE_PERI_ROOT 41
+#define HCLK_CORE_PERI_ROOT 42
+#define PCLK_CORE_PERI_ROOT 43
+#define CLK_DSMC 44
+#define ACLK_DSMC 45
+#define PCLK_DSMC 46
+#define CLK_FLEXBUS_TX 47
+#define CLK_FLEXBUS_RX 48
+#define ACLK_FLEXBUS 49
+#define HCLK_FLEXBUS 50
+#define ACLK_DSMC_SLV 51
+#define HCLK_DSMC_SLV 52
+#define ACLK_BUS_ROOT 53
+#define HCLK_BUS_ROOT 54
+#define PCLK_BUS_ROOT 55
+#define ACLK_SYSRAM 56
+#define HCLK_SYSRAM 57
+#define ACLK_DMAC0 58
+#define ACLK_DMAC1 59
+#define HCLK_M0 60
+#define PCLK_BUS_GRF 61
+#define PCLK_TIMER 62
+#define CLK_TIMER0_CH0 63
+#define CLK_TIMER0_CH1 64
+#define CLK_TIMER0_CH2 65
+#define CLK_TIMER0_CH3 66
+#define CLK_TIMER0_CH4 67
+#define CLK_TIMER0_CH5 68
+#define PCLK_WDT0 69
+#define TCLK_WDT0 70
+#define PCLK_WDT1 71
+#define TCLK_WDT1 72
+#define PCLK_MAILBOX 73
+#define PCLK_INTMUX 74
+#define PCLK_SPINLOCK 75
+#define PCLK_DDRC 76
+#define HCLK_DDRPHY 77
+#define PCLK_DDRMON 78
+#define CLK_DDRMON_OSC 79
+#define PCLK_STDBY 80
+#define HCLK_USBOTG0 81
+#define HCLK_USBOTG0_PMU 82
+#define CLK_USBOTG0_ADP 83
+#define HCLK_USBOTG1 84
+#define HCLK_USBOTG1_PMU 85
+#define CLK_USBOTG1_ADP 86
+#define PCLK_USBPHY 87
+#define ACLK_DMA2DDR 88
+#define PCLK_DMA2DDR 89
+#define STCLK_M0 90
+#define CLK_DDRPHY 91
+#define CLK_DDRC_SRC 92
+#define ACLK_DDRC_0 93
+#define ACLK_DDRC_1 94
+#define CLK_DDRC 95
+#define CLK_DDRMON 96
+#define HCLK_LSPERI_ROOT 97
+#define PCLK_LSPERI_ROOT 98
+#define PCLK_UART0 99
+#define PCLK_UART1 100
+#define PCLK_UART2 101
+#define PCLK_UART3 102
+#define PCLK_UART4 103
+#define SCLK_UART0 104
+#define SCLK_UART1 105
+#define SCLK_UART2 106
+#define SCLK_UART3 107
+#define SCLK_UART4 108
+#define PCLK_I2C0 109
+#define CLK_I2C0 110
+#define PCLK_I2C1 111
+#define CLK_I2C1 112
+#define PCLK_I2C2 113
+#define CLK_I2C2 114
+#define PCLK_PWM1 115
+#define CLK_PWM1 116
+#define CLK_OSC_PWM1 117
+#define CLK_RC_PWM1 118
+#define CLK_FREQ_PWM1 119
+#define CLK_COUNTER_PWM1 120
+#define PCLK_SPI0 121
+#define CLK_SPI0 122
+#define PCLK_SPI1 123
+#define CLK_SPI1 124
+#define PCLK_GPIO2 125
+#define DBCLK_GPIO2 126
+#define PCLK_GPIO3 127
+#define DBCLK_GPIO3 128
+#define PCLK_GPIO4 129
+#define DBCLK_GPIO4 130
+#define HCLK_CAN0 131
+#define CLK_CAN0 132
+#define HCLK_CAN1 133
+#define CLK_CAN1 134
+#define HCLK_PDM 135
+#define MCLK_PDM 136
+#define CLKOUT_PDM 137
+#define MCLK_SPDIFTX 138
+#define HCLK_SPDIFTX 139
+#define HCLK_SPDIFRX 140
+#define MCLK_SPDIFRX 141
+#define MCLK_SAI0 142
+#define HCLK_SAI0 143
+#define MCLK_OUT_SAI0 144
+#define MCLK_SAI1 145
+#define HCLK_SAI1 146
+#define MCLK_OUT_SAI1 147
+#define HCLK_ASRC0 148
+#define CLK_ASRC0 149
+#define HCLK_ASRC1 150
+#define CLK_ASRC1 151
+#define PCLK_CRU 152
+#define PCLK_PMU_ROOT 153
+#define MCLK_ASRC0 154
+#define MCLK_ASRC1 155
+#define MCLK_ASRC2 156
+#define MCLK_ASRC3 157
+#define LRCK_ASRC0_SRC 158
+#define LRCK_ASRC0_DST 159
+#define LRCK_ASRC1_SRC 160
+#define LRCK_ASRC1_DST 161
+#define ACLK_HSPERI_ROOT 162
+#define HCLK_HSPERI_ROOT 163
+#define PCLK_HSPERI_ROOT 164
+#define CCLK_SRC_SDMMC 165
+#define HCLK_SDMMC 166
+#define HCLK_FSPI 167
+#define SCLK_FSPI 168
+#define PCLK_SPI2 169
+#define ACLK_MAC0 170
+#define ACLK_MAC1 171
+#define PCLK_MAC0 172
+#define PCLK_MAC1 173
+#define CLK_MAC_ROOT 174
+#define CLK_MAC0 175
+#define CLK_MAC1 176
+#define MCLK_SAI2 177
+#define HCLK_SAI2 178
+#define MCLK_OUT_SAI2 179
+#define MCLK_SAI3_SRC 180
+#define HCLK_SAI3 181
+#define MCLK_SAI3 182
+#define MCLK_OUT_SAI3 183
+#define MCLK_SAI4_SRC 184
+#define HCLK_SAI4 185
+#define MCLK_SAI4 186
+#define HCLK_DSM 187
+#define MCLK_DSM 188
+#define PCLK_AUDIO_ADC 189
+#define MCLK_AUDIO_ADC 190
+#define MCLK_AUDIO_ADC_DIV4 191
+#define PCLK_SARADC 192
+#define CLK_SARADC 193
+#define PCLK_OTPC_NS 194
+#define CLK_SBPI_OTPC_NS 195
+#define CLK_USER_OTPC_NS 196
+#define PCLK_UART5 197
+#define SCLK_UART5 198
+#define PCLK_GPIO234_IOC 199
+#define CLK_MAC_PTP_ROOT 200
+#define CLK_MAC0_PTP 201
+#define CLK_MAC1_PTP 202
+#define CLK_SPI2 203
+#define ACLK_VIO_ROOT 204
+#define HCLK_VIO_ROOT 205
+#define PCLK_VIO_ROOT 206
+#define HCLK_RGA 207
+#define ACLK_RGA 208
+#define CLK_CORE_RGA 209
+#define ACLK_VOP 210
+#define HCLK_VOP 211
+#define DCLK_VOP 212
+#define PCLK_DPHY 213
+#define PCLK_DSI_HOST 214
+#define PCLK_TSADC 215
+#define CLK_TSADC 216
+#define CLK_TSADC_TSEN 217
+#define PCLK_GPIO1_IOC 218
+#define PCLK_OTPC_S 219
+#define CLK_SBPI_OTPC_S 220
+#define CLK_USER_OTPC_S 221
+#define PCLK_OTP_MASK 222
+#define PCLK_KEYREADER 223
+#define HCLK_BOOTROM 224
+#define PCLK_DDR_SERVICE 225
+#define HCLK_CRYPTO_S 226
+#define HCLK_KEYLAD 227
+#define CLK_CORE_CRYPTO 228
+#define CLK_PKA_CRYPTO 229
+#define CLK_CORE_CRYPTO_S 230
+#define CLK_PKA_CRYPTO_S 231
+#define ACLK_CRYPTO_S 232
+#define HCLK_RNG_S 233
+#define CLK_CORE_CRYPTO_NS 234
+#define CLK_PKA_CRYPTO_NS 235
+#define ACLK_CRYPTO_NS 236
+#define HCLK_CRYPTO_NS 237
+#define HCLK_RNG 238
+#define CLK_PMU 239
+#define PCLK_PMU 240
+#define CLK_PMU_32K 241
+#define PCLK_PMU_CRU 242
+#define PCLK_PMU_GRF 243
+#define PCLK_GPIO0_IOC 244
+#define PCLK_GPIO0 245
+#define DBCLK_GPIO0 246
+#define PCLK_GPIO1_SHADOW 247
+#define DBCLK_GPIO1_SHADOW 248
+#define PCLK_PMU_HP_TIMER 249
+#define CLK_PMU_HP_TIMER 250
+#define CLK_PMU_HP_TIMER_32K 251
+#define PCLK_PWM0 252
+#define CLK_PWM0 253
+#define CLK_OSC_PWM0 254
+#define CLK_RC_PWM0 255
+#define CLK_MAC_OUT 256
+#define CLK_REF_OUT0 257
+#define CLK_REF_OUT1 258
+#define CLK_32K_FRAC 259
+#define CLK_32K_RC 260
+#define CLK_32K 261
+#define CLK_32K_PMU 262
+#define PCLK_TOUCH_KEY 263
+#define CLK_TOUCH_KEY 264
+#define CLK_REF_PHY_PLL 265
+#define CLK_REF_PHY_PMU_MUX 266
+#define CLK_WIFI_OUT 267
+#define CLK_V0PLL_REF 268
+#define CLK_V1PLL_REF 269
+#define CLK_32K_FRAC_MUX 270
+
+#endif
diff --git a/include/dt-bindings/clock/rockchip,rk3528-cru.h b/include/dt-bindings/clock/rockchip,rk3528-cru.h
new file mode 100644
index 000000000000..0245a53fc334
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rk3528-cru.h
@@ -0,0 +1,459 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2024 Yao Zi <ziyao@disroot.org>
+ * Author: Joseph Chen <chenjh@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3528_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3528_H
+
+/* cru-clocks indices */
+#define PLL_APLL 0
+#define PLL_CPLL 1
+#define PLL_GPLL 2
+#define PLL_PPLL 3
+#define PLL_DPLL 4
+#define ARMCLK 5
+#define XIN_OSC0_HALF 6
+#define CLK_MATRIX_50M_SRC 7
+#define CLK_MATRIX_100M_SRC 8
+#define CLK_MATRIX_150M_SRC 9
+#define CLK_MATRIX_200M_SRC 10
+#define CLK_MATRIX_250M_SRC 11
+#define CLK_MATRIX_300M_SRC 12
+#define CLK_MATRIX_339M_SRC 13
+#define CLK_MATRIX_400M_SRC 14
+#define CLK_MATRIX_500M_SRC 15
+#define CLK_MATRIX_600M_SRC 16
+#define CLK_UART0_SRC 17
+#define CLK_UART0_FRAC 18
+#define SCLK_UART0 19
+#define CLK_UART1_SRC 20
+#define CLK_UART1_FRAC 21
+#define SCLK_UART1 22
+#define CLK_UART2_SRC 23
+#define CLK_UART2_FRAC 24
+#define SCLK_UART2 25
+#define CLK_UART3_SRC 26
+#define CLK_UART3_FRAC 27
+#define SCLK_UART3 28
+#define CLK_UART4_SRC 29
+#define CLK_UART4_FRAC 30
+#define SCLK_UART4 31
+#define CLK_UART5_SRC 32
+#define CLK_UART5_FRAC 33
+#define SCLK_UART5 34
+#define CLK_UART6_SRC 35
+#define CLK_UART6_FRAC 36
+#define SCLK_UART6 37
+#define CLK_UART7_SRC 38
+#define CLK_UART7_FRAC 39
+#define SCLK_UART7 40
+#define CLK_I2S0_2CH_SRC 41
+#define CLK_I2S0_2CH_FRAC 42
+#define MCLK_I2S0_2CH_SAI_SRC 43
+#define CLK_I2S3_8CH_SRC 44
+#define CLK_I2S3_8CH_FRAC 45
+#define MCLK_I2S3_8CH_SAI_SRC 46
+#define CLK_I2S1_8CH_SRC 47
+#define CLK_I2S1_8CH_FRAC 48
+#define MCLK_I2S1_8CH_SAI_SRC 49
+#define CLK_I2S2_2CH_SRC 50
+#define CLK_I2S2_2CH_FRAC 51
+#define MCLK_I2S2_2CH_SAI_SRC 52
+#define CLK_SPDIF_SRC 53
+#define CLK_SPDIF_FRAC 54
+#define MCLK_SPDIF_SRC 55
+#define DCLK_VOP_SRC0 56
+#define DCLK_VOP_SRC1 57
+#define CLK_HSM 58
+#define CLK_CORE_SRC_ACS 59
+#define CLK_CORE_SRC_PVTMUX 60
+#define CLK_CORE_SRC 61
+#define CLK_CORE 62
+#define ACLK_M_CORE_BIU 63
+#define CLK_CORE_PVTPLL_SRC 64
+#define PCLK_DBG 65
+#define SWCLKTCK 66
+#define CLK_SCANHS_CORE 67
+#define CLK_SCANHS_ACLKM_CORE 68
+#define CLK_SCANHS_PCLK_DBG 69
+#define CLK_SCANHS_PCLK_CPU_BIU 70
+#define PCLK_CPU_ROOT 71
+#define PCLK_CORE_GRF 72
+#define PCLK_DAPLITE_BIU 73
+#define PCLK_CPU_BIU 74
+#define CLK_REF_PVTPLL_CORE 75
+#define ACLK_BUS_VOPGL_ROOT 76
+#define ACLK_BUS_VOPGL_BIU 77
+#define ACLK_BUS_H_ROOT 78
+#define ACLK_BUS_H_BIU 79
+#define ACLK_BUS_ROOT 80
+#define HCLK_BUS_ROOT 81
+#define PCLK_BUS_ROOT 82
+#define ACLK_BUS_M_ROOT 83
+#define ACLK_SYSMEM_BIU 84
+#define CLK_TIMER_ROOT 85
+#define ACLK_BUS_BIU 86
+#define HCLK_BUS_BIU 87
+#define PCLK_BUS_BIU 88
+#define PCLK_DFT2APB 89
+#define PCLK_BUS_GRF 90
+#define ACLK_BUS_M_BIU 91
+#define ACLK_GIC 92
+#define ACLK_SPINLOCK 93
+#define ACLK_DMAC 94
+#define PCLK_TIMER 95
+#define CLK_TIMER0 96
+#define CLK_TIMER1 97
+#define CLK_TIMER2 98
+#define CLK_TIMER3 99
+#define CLK_TIMER4 100
+#define CLK_TIMER5 101
+#define PCLK_JDBCK_DAP 102
+#define CLK_JDBCK_DAP 103
+#define PCLK_WDT_NS 104
+#define TCLK_WDT_NS 105
+#define HCLK_TRNG_NS 106
+#define PCLK_UART0 107
+#define PCLK_DMA2DDR 108
+#define ACLK_DMA2DDR 109
+#define PCLK_PWM0 110
+#define CLK_PWM0 111
+#define CLK_CAPTURE_PWM0 112
+#define PCLK_PWM1 113
+#define CLK_PWM1 114
+#define CLK_CAPTURE_PWM1 115
+#define PCLK_SCR 116
+#define ACLK_DCF 117
+#define PCLK_INTMUX 118
+#define CLK_PPLL_I 119
+#define CLK_PPLL_MUX 120
+#define CLK_PPLL_100M_MATRIX 121
+#define CLK_PPLL_50M_MATRIX 122
+#define CLK_REF_PCIE_INNER_PHY 123
+#define CLK_REF_PCIE_100M_PHY 124
+#define ACLK_VPU_L_ROOT 125
+#define CLK_GMAC1_VPU_25M 126
+#define CLK_PPLL_125M_MATRIX 127
+#define ACLK_VPU_ROOT 128
+#define HCLK_VPU_ROOT 129
+#define PCLK_VPU_ROOT 130
+#define ACLK_VPU_BIU 131
+#define HCLK_VPU_BIU 132
+#define PCLK_VPU_BIU 133
+#define ACLK_VPU 134
+#define HCLK_VPU 135
+#define PCLK_CRU_PCIE 136
+#define PCLK_VPU_GRF 137
+#define HCLK_SFC 138
+#define SCLK_SFC 139
+#define CCLK_SRC_EMMC 140
+#define HCLK_EMMC 141
+#define ACLK_EMMC 142
+#define BCLK_EMMC 143
+#define TCLK_EMMC 144
+#define PCLK_GPIO1 145
+#define DBCLK_GPIO1 146
+#define ACLK_VPU_L_BIU 147
+#define PCLK_VPU_IOC 148
+#define HCLK_SAI_I2S0 149
+#define MCLK_SAI_I2S0 150
+#define HCLK_SAI_I2S2 151
+#define MCLK_SAI_I2S2 152
+#define PCLK_ACODEC 153
+#define MCLK_ACODEC_TX 154
+#define PCLK_GPIO3 155
+#define DBCLK_GPIO3 156
+#define PCLK_SPI1 157
+#define CLK_SPI1 158
+#define SCLK_IN_SPI1 159
+#define PCLK_UART2 160
+#define PCLK_UART5 161
+#define PCLK_UART6 162
+#define PCLK_UART7 163
+#define PCLK_I2C3 164
+#define CLK_I2C3 165
+#define PCLK_I2C5 166
+#define CLK_I2C5 167
+#define PCLK_I2C6 168
+#define CLK_I2C6 169
+#define ACLK_MAC_VPU 170
+#define PCLK_MAC_VPU 171
+#define CLK_GMAC1_RMII_VPU 172
+#define CLK_GMAC1_SRC_VPU 173
+#define PCLK_PCIE 174
+#define CLK_PCIE_AUX 175
+#define ACLK_PCIE 176
+#define HCLK_PCIE_SLV 177
+#define HCLK_PCIE_DBI 178
+#define PCLK_PCIE_PHY 179
+#define PCLK_PIPE_GRF 180
+#define CLK_PIPE_USB3OTG_COMBO 181
+#define CLK_UTMI_USB3OTG 182
+#define CLK_PCIE_PIPE_PHY 183
+#define CCLK_SRC_SDIO0 184
+#define HCLK_SDIO0 185
+#define CCLK_SRC_SDIO1 186
+#define HCLK_SDIO1 187
+#define CLK_TS_0 188
+#define CLK_TS_1 189
+#define PCLK_CAN2 190
+#define CLK_CAN2 191
+#define PCLK_CAN3 192
+#define CLK_CAN3 193
+#define PCLK_SARADC 194
+#define CLK_SARADC 195
+#define PCLK_TSADC 196
+#define CLK_TSADC 197
+#define CLK_TSADC_TSEN 198
+#define ACLK_USB3OTG 199
+#define CLK_REF_USB3OTG 200
+#define CLK_SUSPEND_USB3OTG 201
+#define ACLK_GPU_ROOT 202
+#define PCLK_GPU_ROOT 203
+#define ACLK_GPU_BIU 204
+#define PCLK_GPU_BIU 205
+#define ACLK_GPU 206
+#define CLK_GPU_PVTPLL_SRC 207
+#define ACLK_GPU_MALI 208
+#define HCLK_RKVENC_ROOT 209
+#define ACLK_RKVENC_ROOT 210
+#define PCLK_RKVENC_ROOT 211
+#define HCLK_RKVENC_BIU 212
+#define ACLK_RKVENC_BIU 213
+#define PCLK_RKVENC_BIU 214
+#define HCLK_RKVENC 215
+#define ACLK_RKVENC 216
+#define CLK_CORE_RKVENC 217
+#define HCLK_SAI_I2S1 218
+#define MCLK_SAI_I2S1 219
+#define PCLK_I2C1 220
+#define CLK_I2C1 221
+#define PCLK_I2C0 222
+#define CLK_I2C0 223
+#define CLK_UART_JTAG 224
+#define PCLK_SPI0 225
+#define CLK_SPI0 226
+#define SCLK_IN_SPI0 227
+#define PCLK_GPIO4 228
+#define DBCLK_GPIO4 229
+#define PCLK_RKVENC_IOC 230
+#define HCLK_SPDIF 231
+#define MCLK_SPDIF 232
+#define HCLK_PDM 233
+#define MCLK_PDM 234
+#define PCLK_UART1 235
+#define PCLK_UART3 236
+#define PCLK_RKVENC_GRF 237
+#define PCLK_CAN0 238
+#define CLK_CAN0 239
+#define PCLK_CAN1 240
+#define CLK_CAN1 241
+#define ACLK_VO_ROOT 242
+#define HCLK_VO_ROOT 243
+#define PCLK_VO_ROOT 244
+#define ACLK_VO_BIU 245
+#define HCLK_VO_BIU 246
+#define PCLK_VO_BIU 247
+#define HCLK_RGA2E 248
+#define ACLK_RGA2E 249
+#define CLK_CORE_RGA2E 250
+#define HCLK_VDPP 251
+#define ACLK_VDPP 252
+#define CLK_CORE_VDPP 253
+#define PCLK_VO_GRF 254
+#define PCLK_CRU 255
+#define ACLK_VOP_ROOT 256
+#define ACLK_VOP_BIU 257
+#define HCLK_VOP 258
+#define DCLK_VOP0 259
+#define DCLK_VOP1 260
+#define ACLK_VOP 261
+#define PCLK_HDMI 262
+#define CLK_SFR_HDMI 263
+#define CLK_CEC_HDMI 264
+#define CLK_SPDIF_HDMI 265
+#define CLK_HDMIPHY_TMDSSRC 266
+#define CLK_HDMIPHY_PREP 267
+#define PCLK_HDMIPHY 268
+#define HCLK_HDCP_KEY 269
+#define ACLK_HDCP 270
+#define HCLK_HDCP 271
+#define PCLK_HDCP 272
+#define HCLK_CVBS 273
+#define DCLK_CVBS 274
+#define DCLK_4X_CVBS 275
+#define ACLK_JPEG_DECODER 276
+#define HCLK_JPEG_DECODER 277
+#define ACLK_VO_L_ROOT 278
+#define ACLK_VO_L_BIU 279
+#define ACLK_MAC_VO 280
+#define PCLK_MAC_VO 281
+#define CLK_GMAC0_SRC 282
+#define CLK_GMAC0_RMII_50M 283
+#define CLK_GMAC0_TX 284
+#define CLK_GMAC0_RX 285
+#define ACLK_JPEG_ROOT 286
+#define ACLK_JPEG_BIU 287
+#define HCLK_SAI_I2S3 288
+#define MCLK_SAI_I2S3 289
+#define CLK_MACPHY 290
+#define PCLK_VCDCPHY 291
+#define PCLK_GPIO2 292
+#define DBCLK_GPIO2 293
+#define PCLK_VO_IOC 294
+#define CCLK_SRC_SDMMC0 295
+#define HCLK_SDMMC0 296
+#define PCLK_OTPC_NS 297
+#define CLK_SBPI_OTPC_NS 298
+#define CLK_USER_OTPC_NS 299
+#define CLK_HDMIHDP0 300
+#define HCLK_USBHOST 301
+#define HCLK_USBHOST_ARB 302
+#define CLK_USBHOST_OHCI 303
+#define CLK_USBHOST_UTMI 304
+#define PCLK_UART4 305
+#define PCLK_I2C4 306
+#define CLK_I2C4 307
+#define PCLK_I2C7 308
+#define CLK_I2C7 309
+#define PCLK_USBPHY 310
+#define CLK_REF_USBPHY 311
+#define HCLK_RKVDEC_ROOT 312
+#define ACLK_RKVDEC_ROOT_NDFT 313
+#define PCLK_DDRPHY_CRU 314
+#define HCLK_RKVDEC_BIU 315
+#define ACLK_RKVDEC_BIU 316
+#define ACLK_RKVDEC 317
+#define HCLK_RKVDEC 318
+#define CLK_HEVC_CA_RKVDEC 319
+#define ACLK_RKVDEC_PVTMUX_ROOT 320
+#define CLK_RKVDEC_PVTPLL_SRC 321
+#define PCLK_DDR_ROOT 322
+#define PCLK_DDR_BIU 323
+#define PCLK_DDRC 324
+#define PCLK_DDRMON 325
+#define CLK_TIMER_DDRMON 326
+#define PCLK_MSCH_BIU 327
+#define PCLK_DDR_GRF 328
+#define PCLK_DDR_HWLP 329
+#define PCLK_DDRPHY 330
+#define CLK_MSCH_BIU 331
+#define ACLK_DDR_UPCTL 332
+#define CLK_DDR_UPCTL 333
+#define CLK_DDRMON 334
+#define ACLK_DDR_SCRAMBLE 335
+#define ACLK_SPLIT 336
+#define CLK_DDRC_SRC 337
+#define CLK_DDR_PHY 338
+#define PCLK_OTPC_S 339
+#define CLK_SBPI_OTPC_S 340
+#define CLK_USER_OTPC_S 341
+#define PCLK_KEYREADER 342
+#define PCLK_BUS_SGRF 343
+#define PCLK_STIMER 344
+#define CLK_STIMER0 345
+#define CLK_STIMER1 346
+#define PCLK_WDT_S 347
+#define TCLK_WDT_S 348
+#define HCLK_TRNG_S 349
+#define HCLK_BOOTROM 350
+#define PCLK_DCF 351
+#define ACLK_SYSMEM 352
+#define HCLK_TSP 353
+#define ACLK_TSP 354
+#define CLK_CORE_TSP 355
+#define CLK_OTPC_ARB 356
+#define PCLK_OTP_MASK 357
+#define CLK_PMC_OTP 358
+#define PCLK_PMU_ROOT 359
+#define HCLK_PMU_ROOT 360
+#define PCLK_I2C2 361
+#define CLK_I2C2 362
+#define HCLK_PMU_BIU 363
+#define PCLK_PMU_BIU 364
+#define FCLK_MCU 365
+#define RTC_CLK_MCU 366
+#define PCLK_OSCCHK 367
+#define CLK_PMU_MCU_JTAG 368
+#define PCLK_PMU 369
+#define PCLK_GPIO0 370
+#define DBCLK_GPIO0 371
+#define XIN_OSC0_DIV 372
+#define CLK_DEEPSLOW 373
+#define CLK_DDR_FAIL_SAFE 374
+#define PCLK_PMU_HP_TIMER 375
+#define CLK_PMU_HP_TIMER 376
+#define CLK_PMU_32K_HP_TIMER 377
+#define PCLK_PMU_IOC 378
+#define PCLK_PMU_CRU 379
+#define PCLK_PMU_GRF 380
+#define PCLK_PMU_WDT 381
+#define TCLK_PMU_WDT 382
+#define PCLK_PMU_MAILBOX 383
+#define PCLK_SCRKEYGEN 384
+#define CLK_SCRKEYGEN 385
+#define CLK_PVTM_OSCCHK 386
+#define CLK_REFOUT 387
+#define CLK_PVTM_PMU 388
+#define PCLK_PVTM_PMU 389
+#define PCLK_PMU_SGRF 390
+#define HCLK_PMU_SRAM 391
+#define CLK_UART0 392
+#define CLK_UART1 393
+#define CLK_UART2 394
+#define CLK_UART3 395
+#define CLK_UART4 396
+#define CLK_UART5 397
+#define CLK_UART6 398
+#define CLK_UART7 399
+#define MCLK_I2S0_2CH_SAI_SRC_PRE 400
+#define MCLK_I2S1_8CH_SAI_SRC_PRE 401
+#define MCLK_I2S2_2CH_SAI_SRC_PRE 402
+#define MCLK_I2S3_8CH_SAI_SRC_PRE 403
+#define MCLK_SDPDIF_SRC_PRE 404
+#define SCLK_SDMMC_DRV 405
+#define SCLK_SDMMC_SAMPLE 406
+#define SCLK_SDIO0_DRV 407
+#define SCLK_SDIO0_SAMPLE 408
+#define SCLK_SDIO1_DRV 409
+#define SCLK_SDIO1_SAMPLE 410
+
+/* scmi-clocks indices */
+#define SCMI_PCLK_KEYREADER 0
+#define SCMI_HCLK_KLAD 1
+#define SCMI_PCLK_KLAD 2
+#define SCMI_HCLK_TRNG_S 3
+#define SCMI_HCLK_CRYPTO_S 4
+#define SCMI_PCLK_WDT_S 5
+#define SCMI_TCLK_WDT_S 6
+#define SCMI_PCLK_STIMER 7
+#define SCMI_CLK_STIMER0 8
+#define SCMI_CLK_STIMER1 9
+#define SCMI_PCLK_OTP_MASK 10
+#define SCMI_PCLK_OTPC_S 11
+#define SCMI_CLK_SBPI_OTPC_S 12
+#define SCMI_CLK_USER_OTPC_S 13
+#define SCMI_CLK_PMC_OTP 14
+#define SCMI_CLK_OTPC_ARB 15
+#define SCMI_CLK_CORE_TSP 16
+#define SCMI_ACLK_TSP 17
+#define SCMI_HCLK_TSP 18
+#define SCMI_PCLK_DCF 19
+#define SCMI_CLK_DDR 20
+#define SCMI_CLK_CPU 21
+#define SCMI_CLK_GPU 22
+#define SCMI_CORE_CRYPTO 23
+#define SCMI_ACLK_CRYPTO 24
+#define SCMI_PKA_CRYPTO 25
+#define SCMI_HCLK_CRYPTO 26
+#define SCMI_CORE_CRYPTO_S 27
+#define SCMI_ACLK_CRYPTO_S 28
+#define SCMI_PKA_CRYPTO_S 29
+#define SCMI_CORE_KLAD 30
+#define SCMI_ACLK_KLAD 31
+#define SCMI_HCLK_TRNG 32
+
+#endif // _DT_BINDINGS_CLK_ROCKCHIP_RK3528_H
diff --git a/include/dt-bindings/clock/rockchip,rk3562-cru.h b/include/dt-bindings/clock/rockchip,rk3562-cru.h
new file mode 100644
index 000000000000..a5b0b153209c
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rk3562-cru.h
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022-2025 Rockchip Electronics Co., Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3562_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3562_H
+
+/* cru-clocks indices */
+
+/* cru plls */
+#define PLL_DMPLL0 0
+#define PLL_APLL 1
+#define PLL_GPLL 2
+#define PLL_VPLL 3
+#define PLL_HPLL 4
+#define PLL_CPLL 5
+#define PLL_DPLL 6
+#define PLL_DMPLL1 7
+
+/* cru clocks */
+#define ARMCLK 8
+#define CLK_GPU 9
+#define ACLK_RKNN 10
+#define CLK_DDR 11
+#define CLK_MATRIX_50M_SRC 12
+#define CLK_MATRIX_100M_SRC 13
+#define CLK_MATRIX_125M_SRC 14
+#define CLK_MATRIX_200M_SRC 15
+#define CLK_MATRIX_300M_SRC 16
+#define ACLK_TOP 17
+#define ACLK_TOP_VIO 18
+#define CLK_CAM0_OUT2IO 19
+#define CLK_CAM1_OUT2IO 20
+#define CLK_CAM2_OUT2IO 21
+#define CLK_CAM3_OUT2IO 22
+#define ACLK_BUS 23
+#define HCLK_BUS 24
+#define PCLK_BUS 25
+#define PCLK_I2C1 26
+#define PCLK_I2C2 27
+#define PCLK_I2C3 28
+#define PCLK_I2C4 29
+#define PCLK_I2C5 30
+#define CLK_I2C 31
+#define CLK_I2C1 32
+#define CLK_I2C2 33
+#define CLK_I2C3 34
+#define CLK_I2C4 35
+#define CLK_I2C5 36
+#define DCLK_BUS_GPIO 37
+#define DCLK_BUS_GPIO3 38
+#define DCLK_BUS_GPIO4 39
+#define PCLK_TIMER 40
+#define CLK_TIMER0 41
+#define CLK_TIMER1 42
+#define CLK_TIMER2 43
+#define CLK_TIMER3 44
+#define CLK_TIMER4 45
+#define CLK_TIMER5 46
+#define PCLK_STIMER 47
+#define CLK_STIMER0 48
+#define CLK_STIMER1 49
+#define PCLK_WDTNS 50
+#define CLK_WDTNS 51
+#define PCLK_GRF 52
+#define PCLK_SGRF 53
+#define PCLK_MAILBOX 54
+#define PCLK_INTC 55
+#define ACLK_BUS_GIC400 56
+#define ACLK_BUS_SPINLOCK 57
+#define ACLK_DCF 58
+#define PCLK_DCF 59
+#define FCLK_BUS_CM0_CORE 60
+#define CLK_BUS_CM0_RTC 61
+#define HCLK_ICACHE 62
+#define HCLK_DCACHE 63
+#define PCLK_TSADC 64
+#define CLK_TSADC 65
+#define CLK_TSADC_TSEN 66
+#define PCLK_DFT2APB 67
+#define CLK_SARADC_VCCIO156 68
+#define PCLK_GMAC 69
+#define ACLK_GMAC 70
+#define CLK_GMAC_125M_CRU_I 71
+#define CLK_GMAC_50M_CRU_I 72
+#define CLK_GMAC_50M_O 73
+#define CLK_GMAC_ETH_OUT2IO 74
+#define PCLK_APB2ASB_VCCIO156 75
+#define PCLK_TO_VCCIO156 76
+#define PCLK_DSIPHY 77
+#define PCLK_DSITX 78
+#define PCLK_CPU_EMA_DET 79
+#define PCLK_HASH 80
+#define PCLK_TOPCRU 81
+#define PCLK_ASB2APB_VCCIO156 82
+#define PCLK_IOC_VCCIO156 83
+#define PCLK_GPIO3_VCCIO156 84
+#define PCLK_GPIO4_VCCIO156 85
+#define PCLK_SARADC_VCCIO156 86
+#define PCLK_MAC100 87
+#define ACLK_MAC100 89
+#define CLK_MAC100_50M_MATRIX 90
+#define HCLK_CORE 91
+#define PCLK_DDR 92
+#define CLK_MSCH_BRG_BIU 93
+#define PCLK_DDR_HWLP 94
+#define PCLK_DDR_UPCTL 95
+#define PCLK_DDR_PHY 96
+#define PCLK_DDR_DFICTL 97
+#define PCLK_DDR_DMA2DDR 98
+#define PCLK_DDR_MON 99
+#define TMCLK_DDR_MON 100
+#define PCLK_DDR_GRF 101
+#define PCLK_DDR_CRU 102
+#define PCLK_SUBDDR_CRU 103
+#define CLK_GPU_PRE 104
+#define ACLK_GPU_PRE 105
+#define CLK_GPU_BRG 107
+#define CLK_NPU_PRE 108
+#define HCLK_NPU_PRE 109
+#define HCLK_RKNN 111
+#define ACLK_PERI 112
+#define HCLK_PERI 113
+#define PCLK_PERI 114
+#define PCLK_PERICRU 115
+#define HCLK_SAI0 116
+#define CLK_SAI0_SRC 117
+#define CLK_SAI0_FRAC 118
+#define CLK_SAI0 119
+#define MCLK_SAI0 120
+#define MCLK_SAI0_OUT2IO 121
+#define HCLK_SAI1 122
+#define CLK_SAI1_SRC 123
+#define CLK_SAI1_FRAC 124
+#define CLK_SAI1 125
+#define MCLK_SAI1 126
+#define MCLK_SAI1_OUT2IO 127
+#define HCLK_SAI2 128
+#define CLK_SAI2_SRC 129
+#define CLK_SAI2_FRAC 130
+#define CLK_SAI2 131
+#define MCLK_SAI2 132
+#define MCLK_SAI2_OUT2IO 133
+#define HCLK_DSM 134
+#define CLK_DSM 135
+#define HCLK_PDM 136
+#define MCLK_PDM 137
+#define HCLK_SPDIF 138
+#define CLK_SPDIF_SRC 139
+#define CLK_SPDIF_FRAC 140
+#define CLK_SPDIF 141
+#define MCLK_SPDIF 142
+#define HCLK_SDMMC0 143
+#define CCLK_SDMMC0 144
+#define HCLK_SDMMC1 145
+#define CCLK_SDMMC1 146
+#define SCLK_SDMMC0_DRV 147
+#define SCLK_SDMMC0_SAMPLE 148
+#define SCLK_SDMMC1_DRV 149
+#define SCLK_SDMMC1_SAMPLE 150
+#define HCLK_EMMC 151
+#define ACLK_EMMC 152
+#define CCLK_EMMC 153
+#define BCLK_EMMC 154
+#define TMCLK_EMMC 155
+#define SCLK_SFC 156
+#define HCLK_SFC 157
+#define HCLK_USB2HOST 158
+#define HCLK_USB2HOST_ARB 159
+#define PCLK_SPI1 160
+#define CLK_SPI1 161
+#define SCLK_IN_SPI1 162
+#define PCLK_SPI2 163
+#define CLK_SPI2 164
+#define SCLK_IN_SPI2 165
+#define PCLK_UART1 166
+#define PCLK_UART2 167
+#define PCLK_UART3 168
+#define PCLK_UART4 169
+#define PCLK_UART5 170
+#define PCLK_UART6 171
+#define PCLK_UART7 172
+#define PCLK_UART8 173
+#define PCLK_UART9 174
+#define CLK_UART1_SRC 175
+#define CLK_UART1_FRAC 176
+#define CLK_UART1 177
+#define SCLK_UART1 178
+#define CLK_UART2_SRC 179
+#define CLK_UART2_FRAC 180
+#define CLK_UART2 181
+#define SCLK_UART2 182
+#define CLK_UART3_SRC 183
+#define CLK_UART3_FRAC 184
+#define CLK_UART3 185
+#define SCLK_UART3 186
+#define CLK_UART4_SRC 187
+#define CLK_UART4_FRAC 188
+#define CLK_UART4 189
+#define SCLK_UART4 190
+#define CLK_UART5_SRC 191
+#define CLK_UART5_FRAC 192
+#define CLK_UART5 193
+#define SCLK_UART5 194
+#define CLK_UART6_SRC 195
+#define CLK_UART6_FRAC 196
+#define CLK_UART6 197
+#define SCLK_UART6 198
+#define CLK_UART7_SRC 199
+#define CLK_UART7_FRAC 200
+#define CLK_UART7 201
+#define SCLK_UART7 202
+#define CLK_UART8_SRC 203
+#define CLK_UART8_FRAC 204
+#define CLK_UART8 205
+#define SCLK_UART8 206
+#define CLK_UART9_SRC 207
+#define CLK_UART9_FRAC 208
+#define CLK_UART9 209
+#define SCLK_UART9 210
+#define PCLK_PWM1_PERI 211
+#define CLK_PWM1_PERI 212
+#define CLK_CAPTURE_PWM1_PERI 213
+#define PCLK_PWM2_PERI 214
+#define CLK_PWM2_PERI 215
+#define CLK_CAPTURE_PWM2_PERI 216
+#define PCLK_PWM3_PERI 217
+#define CLK_PWM3_PERI 218
+#define CLK_CAPTURE_PWM3_PERI 219
+#define PCLK_CAN0 220
+#define CLK_CAN0 221
+#define PCLK_CAN1 222
+#define CLK_CAN1 223
+#define ACLK_CRYPTO 224
+#define HCLK_CRYPTO 225
+#define PCLK_CRYPTO 226
+#define CLK_CORE_CRYPTO 227
+#define CLK_PKA_CRYPTO 228
+#define HCLK_KLAD 229
+#define PCLK_KEY_READER 230
+#define HCLK_RK_RNG_NS 231
+#define HCLK_RK_RNG_S 232
+#define HCLK_TRNG_NS 233
+#define HCLK_TRNG_S 234
+#define HCLK_CRYPTO_S 235
+#define PCLK_PERI_WDT 236
+#define TCLK_PERI_WDT 237
+#define ACLK_SYSMEM 238
+#define HCLK_BOOTROM 239
+#define PCLK_PERI_GRF 240
+#define ACLK_DMAC 241
+#define ACLK_RKDMAC 242
+#define PCLK_OTPC_NS 243
+#define CLK_SBPI_OTPC_NS 244
+#define CLK_USER_OTPC_NS 245
+#define PCLK_OTPC_S 246
+#define CLK_SBPI_OTPC_S 247
+#define CLK_USER_OTPC_S 248
+#define CLK_OTPC_ARB 249
+#define PCLK_OTPPHY 250
+#define PCLK_USB2PHY 251
+#define PCLK_PIPEPHY 252
+#define PCLK_SARADC 253
+#define CLK_SARADC 254
+#define PCLK_IOC_VCCIO234 255
+#define PCLK_PERI_GPIO1 256
+#define PCLK_PERI_GPIO2 257
+#define DCLK_PERI_GPIO 258
+#define DCLK_PERI_GPIO1 259
+#define DCLK_PERI_GPIO2 260
+#define ACLK_PHP 261
+#define PCLK_PHP 262
+#define ACLK_PCIE20_MST 263
+#define ACLK_PCIE20_SLV 264
+#define ACLK_PCIE20_DBI 265
+#define PCLK_PCIE20 266
+#define CLK_PCIE20_AUX 267
+#define ACLK_USB3OTG 268
+#define CLK_USB3OTG_SUSPEND 269
+#define CLK_USB3OTG_REF 270
+#define CLK_PIPEPHY_REF_FUNC 271
+#define CLK_200M_PMU 272
+#define CLK_RTC_32K 273
+#define CLK_RTC32K_FRAC 274
+#define BUSCLK_PDPMU0 275
+#define PCLK_PMU0_CRU 276
+#define PCLK_PMU0_PMU 277
+#define CLK_PMU0_PMU 278
+#define PCLK_PMU0_HP_TIMER 279
+#define CLK_PMU0_HP_TIMER 280
+#define CLK_PMU0_32K_HP_TIMER 281
+#define PCLK_PMU0_PVTM 282
+#define CLK_PMU0_PVTM 283
+#define PCLK_IOC_PMUIO 284
+#define PCLK_PMU0_GPIO0 285
+#define DBCLK_PMU0_GPIO0 286
+#define PCLK_PMU0_GRF 287
+#define PCLK_PMU0_SGRF 288
+#define CLK_DDR_FAIL_SAFE 289
+#define PCLK_PMU0_SCRKEYGEN 290
+#define PCLK_PMU1_CRU 291
+#define HCLK_PMU1_MEM 292
+#define PCLK_PMU0_I2C0 293
+#define CLK_PMU0_I2C0 294
+#define PCLK_PMU1_UART0 295
+#define CLK_PMU1_UART0_SRC 296
+#define CLK_PMU1_UART0_FRAC 297
+#define CLK_PMU1_UART0 298
+#define SCLK_PMU1_UART0 299
+#define PCLK_PMU1_SPI0 300
+#define CLK_PMU1_SPI0 301
+#define SCLK_IN_PMU1_SPI0 302
+#define PCLK_PMU1_PWM0 303
+#define CLK_PMU1_PWM0 304
+#define CLK_CAPTURE_PMU1_PWM0 305
+#define CLK_PMU1_WIFI 306
+#define FCLK_PMU1_CM0_CORE 307
+#define CLK_PMU1_CM0_RTC 308
+#define PCLK_PMU1_WDTNS 309
+#define CLK_PMU1_WDTNS 310
+#define PCLK_PMU1_MAILBOX 311
+#define CLK_PIPEPHY_DIV 312
+#define CLK_PIPEPHY_XIN24M 313
+#define CLK_PIPEPHY_REF 314
+#define CLK_24M_SSCSRC 315
+#define CLK_USB2PHY_XIN24M 316
+#define CLK_USB2PHY_REF 317
+#define CLK_MIPIDSIPHY_XIN24M 318
+#define CLK_MIPIDSIPHY_REF 319
+#define ACLK_RGA_PRE 320
+#define HCLK_RGA_PRE 321
+#define ACLK_RGA 322
+#define HCLK_RGA 323
+#define CLK_RGA_CORE 324
+#define ACLK_JDEC 325
+#define HCLK_JDEC 326
+#define ACLK_VDPU_PRE 327
+#define CLK_RKVDEC_HEVC_CA 328
+#define HCLK_VDPU_PRE 329
+#define ACLK_RKVDEC 330
+#define HCLK_RKVDEC 331
+#define CLK_RKVENC_CORE 332
+#define ACLK_VEPU_PRE 333
+#define HCLK_VEPU_PRE 334
+#define ACLK_RKVENC 335
+#define HCLK_RKVENC 336
+#define ACLK_VI 337
+#define HCLK_VI 338
+#define PCLK_VI 339
+#define ACLK_ISP 340
+#define HCLK_ISP 341
+#define CLK_ISP 342
+#define ACLK_VICAP 343
+#define HCLK_VICAP 344
+#define DCLK_VICAP 345
+#define CSIRX0_CLK_DATA 346
+#define CSIRX1_CLK_DATA 347
+#define CSIRX2_CLK_DATA 348
+#define CSIRX3_CLK_DATA 349
+#define PCLK_CSIHOST0 350
+#define PCLK_CSIHOST1 351
+#define PCLK_CSIHOST2 352
+#define PCLK_CSIHOST3 353
+#define PCLK_CSIPHY0 354
+#define PCLK_CSIPHY1 355
+#define ACLK_VO_PRE 356
+#define HCLK_VO_PRE 357
+#define ACLK_VOP 358
+#define HCLK_VOP 359
+#define DCLK_VOP 360
+#define DCLK_VOP1 361
+#define ACLK_CRYPTO_S 362
+#define PCLK_CRYPTO_S 363
+#define CLK_CORE_CRYPTO_S 364
+#define CLK_PKA_CRYPTO_S 365
+
+#endif
diff --git a/include/dt-bindings/clock/rockchip,rk3576-cru.h b/include/dt-bindings/clock/rockchip,rk3576-cru.h
new file mode 100644
index 000000000000..ded5ce42e62a
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rk3576-cru.h
@@ -0,0 +1,607 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2023 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ *
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ * Author: Detlev Casanova <detlev.casanova@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3576_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3576_H
+
+/* cru-clocks indices */
+
+/* cru plls */
+#define PLL_BPLL 0
+#define PLL_LPLL 1
+#define PLL_VPLL 2
+#define PLL_AUPLL 3
+#define PLL_CPLL 4
+#define PLL_GPLL 5
+#define PLL_PPLL 6
+#define ARMCLK_L 7
+#define ARMCLK_B 8
+
+/* cru clocks */
+#define CLK_CPLL_DIV20 9
+#define CLK_CPLL_DIV10 10
+#define CLK_GPLL_DIV8 11
+#define CLK_GPLL_DIV6 12
+#define CLK_CPLL_DIV4 13
+#define CLK_GPLL_DIV4 14
+#define CLK_SPLL_DIV2 15
+#define CLK_GPLL_DIV3 16
+#define CLK_CPLL_DIV2 17
+#define CLK_GPLL_DIV2 18
+#define CLK_SPLL_DIV1 19
+#define PCLK_TOP_ROOT 20
+#define ACLK_TOP 21
+#define HCLK_TOP 22
+#define CLK_AUDIO_FRAC_0 23
+#define CLK_AUDIO_FRAC_1 24
+#define CLK_AUDIO_FRAC_2 25
+#define CLK_AUDIO_FRAC_3 26
+#define CLK_UART_FRAC_0 27
+#define CLK_UART_FRAC_1 28
+#define CLK_UART_FRAC_2 29
+#define CLK_UART1_SRC_TOP 30
+#define CLK_AUDIO_INT_0 31
+#define CLK_AUDIO_INT_1 32
+#define CLK_AUDIO_INT_2 33
+#define CLK_PDM0_SRC_TOP 34
+#define CLK_PDM1_OUT 35
+#define CLK_GMAC0_125M_SRC 36
+#define CLK_GMAC1_125M_SRC 37
+#define LCLK_ASRC_SRC_0 38
+#define LCLK_ASRC_SRC_1 39
+#define REF_CLK0_OUT_PLL 40
+#define REF_CLK1_OUT_PLL 41
+#define REF_CLK2_OUT_PLL 42
+#define REFCLKO25M_GMAC0_OUT 43
+#define REFCLKO25M_GMAC1_OUT 44
+#define CLK_CIFOUT_OUT 45
+#define CLK_GMAC0_RMII_CRU 46
+#define CLK_GMAC1_RMII_CRU 47
+#define CLK_OTPC_AUTO_RD_G 48
+#define CLK_OTP_PHY_G 49
+#define CLK_MIPI_CAMERAOUT_M0 50
+#define CLK_MIPI_CAMERAOUT_M1 51
+#define CLK_MIPI_CAMERAOUT_M2 52
+#define MCLK_PDM0_SRC_TOP 53
+#define HCLK_AUDIO_ROOT 54
+#define HCLK_ASRC_2CH_0 55
+#define HCLK_ASRC_2CH_1 56
+#define HCLK_ASRC_4CH_0 57
+#define HCLK_ASRC_4CH_1 58
+#define CLK_ASRC_2CH_0 59
+#define CLK_ASRC_2CH_1 60
+#define CLK_ASRC_4CH_0 61
+#define CLK_ASRC_4CH_1 62
+#define MCLK_SAI0_8CH_SRC 63
+#define MCLK_SAI0_8CH 64
+#define HCLK_SAI0_8CH 65
+#define HCLK_SPDIF_RX0 66
+#define MCLK_SPDIF_RX0 67
+#define HCLK_SPDIF_RX1 68
+#define MCLK_SPDIF_RX1 69
+#define MCLK_SAI1_8CH_SRC 70
+#define MCLK_SAI1_8CH 71
+#define HCLK_SAI1_8CH 72
+#define MCLK_SAI2_2CH_SRC 73
+#define MCLK_SAI2_2CH 74
+#define HCLK_SAI2_2CH 75
+#define MCLK_SAI3_2CH_SRC 76
+#define MCLK_SAI3_2CH 77
+#define HCLK_SAI3_2CH 78
+#define MCLK_SAI4_2CH_SRC 79
+#define MCLK_SAI4_2CH 80
+#define HCLK_SAI4_2CH 81
+#define HCLK_ACDCDIG_DSM 82
+#define MCLK_ACDCDIG_DSM 83
+#define CLK_PDM1 84
+#define HCLK_PDM1 85
+#define MCLK_PDM1 86
+#define HCLK_SPDIF_TX0 87
+#define MCLK_SPDIF_TX0 88
+#define HCLK_SPDIF_TX1 89
+#define MCLK_SPDIF_TX1 90
+#define CLK_SAI1_MCLKOUT 91
+#define CLK_SAI2_MCLKOUT 92
+#define CLK_SAI3_MCLKOUT 93
+#define CLK_SAI4_MCLKOUT 94
+#define CLK_SAI0_MCLKOUT 95
+#define HCLK_BUS_ROOT 96
+#define PCLK_BUS_ROOT 97
+#define ACLK_BUS_ROOT 98
+#define HCLK_CAN0 99
+#define CLK_CAN0 100
+#define HCLK_CAN1 101
+#define CLK_CAN1 102
+#define CLK_KEY_SHIFT 103
+#define PCLK_I2C1 104
+#define PCLK_I2C2 105
+#define PCLK_I2C3 106
+#define PCLK_I2C4 107
+#define PCLK_I2C5 108
+#define PCLK_I2C6 109
+#define PCLK_I2C7 110
+#define PCLK_I2C8 111
+#define PCLK_I2C9 112
+#define PCLK_WDT_BUSMCU 113
+#define TCLK_WDT_BUSMCU 114
+#define ACLK_GIC 115
+#define CLK_I2C1 116
+#define CLK_I2C2 117
+#define CLK_I2C3 118
+#define CLK_I2C4 119
+#define CLK_I2C5 120
+#define CLK_I2C6 121
+#define CLK_I2C7 122
+#define CLK_I2C8 123
+#define CLK_I2C9 124
+#define PCLK_SARADC 125
+#define CLK_SARADC 126
+#define PCLK_TSADC 127
+#define CLK_TSADC 128
+#define PCLK_UART0 129
+#define PCLK_UART2 130
+#define PCLK_UART3 131
+#define PCLK_UART4 132
+#define PCLK_UART5 133
+#define PCLK_UART6 134
+#define PCLK_UART7 135
+#define PCLK_UART8 136
+#define PCLK_UART9 137
+#define PCLK_UART10 138
+#define PCLK_UART11 139
+#define SCLK_UART0 140
+#define SCLK_UART2 141
+#define SCLK_UART3 142
+#define SCLK_UART4 143
+#define SCLK_UART5 144
+#define SCLK_UART6 145
+#define SCLK_UART7 146
+#define SCLK_UART8 147
+#define SCLK_UART9 148
+#define SCLK_UART10 149
+#define SCLK_UART11 150
+#define PCLK_SPI0 151
+#define PCLK_SPI1 152
+#define PCLK_SPI2 153
+#define PCLK_SPI3 154
+#define PCLK_SPI4 155
+#define CLK_SPI0 156
+#define CLK_SPI1 157
+#define CLK_SPI2 158
+#define CLK_SPI3 159
+#define CLK_SPI4 160
+#define PCLK_WDT0 161
+#define TCLK_WDT0 162
+#define PCLK_PWM1 163
+#define CLK_PWM1 164
+#define CLK_OSC_PWM1 165
+#define CLK_RC_PWM1 166
+#define PCLK_BUSTIMER0 167
+#define PCLK_BUSTIMER1 168
+#define CLK_TIMER0_ROOT 169
+#define CLK_TIMER0 170
+#define CLK_TIMER1 171
+#define CLK_TIMER2 172
+#define CLK_TIMER3 173
+#define CLK_TIMER4 174
+#define CLK_TIMER5 175
+#define PCLK_MAILBOX0 176
+#define PCLK_GPIO1 177
+#define DBCLK_GPIO1 178
+#define PCLK_GPIO2 179
+#define DBCLK_GPIO2 180
+#define PCLK_GPIO3 181
+#define DBCLK_GPIO3 182
+#define PCLK_GPIO4 183
+#define DBCLK_GPIO4 184
+#define ACLK_DECOM 185
+#define PCLK_DECOM 186
+#define DCLK_DECOM 187
+#define CLK_TIMER1_ROOT 188
+#define CLK_TIMER6 189
+#define CLK_TIMER7 190
+#define CLK_TIMER8 191
+#define CLK_TIMER9 192
+#define CLK_TIMER10 193
+#define CLK_TIMER11 194
+#define ACLK_DMAC0 195
+#define ACLK_DMAC1 196
+#define ACLK_DMAC2 197
+#define ACLK_SPINLOCK 198
+#define HCLK_I3C0 199
+#define HCLK_I3C1 200
+#define HCLK_BUS_CM0_ROOT 201
+#define FCLK_BUS_CM0_CORE 202
+#define CLK_BUS_CM0_RTC 203
+#define PCLK_PMU2 204
+#define PCLK_PWM2 205
+#define CLK_PWM2 206
+#define CLK_RC_PWM2 207
+#define CLK_OSC_PWM2 208
+#define CLK_FREQ_PWM1 209
+#define CLK_COUNTER_PWM1 210
+#define SAI_SCLKIN_FREQ 211
+#define SAI_SCLKIN_COUNTER 212
+#define CLK_I3C0 213
+#define CLK_I3C1 214
+#define PCLK_CSIDPHY1 215
+#define PCLK_DDR_ROOT 216
+#define PCLK_DDR_MON_CH0 217
+#define TMCLK_DDR_MON_CH0 218
+#define ACLK_DDR_ROOT 219
+#define HCLK_DDR_ROOT 220
+#define FCLK_DDR_CM0_CORE 221
+#define CLK_DDR_TIMER_ROOT 222
+#define CLK_DDR_TIMER0 223
+#define CLK_DDR_TIMER1 224
+#define TCLK_WDT_DDR 225
+#define PCLK_WDT 226
+#define PCLK_TIMER 227
+#define CLK_DDR_CM0_RTC 228
+#define ACLK_RKNN0 229
+#define ACLK_RKNN1 230
+#define HCLK_RKNN_ROOT 231
+#define CLK_RKNN_DSU0 232
+#define PCLK_NPUTOP_ROOT 233
+#define PCLK_NPU_TIMER 234
+#define CLK_NPUTIMER_ROOT 235
+#define CLK_NPUTIMER0 236
+#define CLK_NPUTIMER1 237
+#define PCLK_NPU_WDT 238
+#define TCLK_NPU_WDT 239
+#define ACLK_RKNN_CBUF 240
+#define HCLK_NPU_CM0_ROOT 241
+#define FCLK_NPU_CM0_CORE 242
+#define CLK_NPU_CM0_RTC 243
+#define HCLK_RKNN_CBUF 244
+#define HCLK_NVM_ROOT 245
+#define ACLK_NVM_ROOT 246
+#define SCLK_FSPI_X2 247
+#define HCLK_FSPI 248
+#define CCLK_SRC_EMMC 249
+#define HCLK_EMMC 250
+#define ACLK_EMMC 251
+#define BCLK_EMMC 252
+#define TCLK_EMMC 253
+#define PCLK_PHP_ROOT 254
+#define ACLK_PHP_ROOT 255
+#define PCLK_PCIE0 256
+#define CLK_PCIE0_AUX 257
+#define ACLK_PCIE0_MST 258
+#define ACLK_PCIE0_SLV 259
+#define ACLK_PCIE0_DBI 260
+#define ACLK_USB3OTG1 261
+#define CLK_REF_USB3OTG1 262
+#define CLK_SUSPEND_USB3OTG1 263
+#define ACLK_MMU0 264
+#define ACLK_SLV_MMU0 265
+#define ACLK_MMU1 266
+#define ACLK_SLV_MMU1 267
+#define PCLK_PCIE1 268
+#define CLK_PCIE1_AUX 269
+#define ACLK_PCIE1_MST 270
+#define ACLK_PCIE1_SLV 271
+#define ACLK_PCIE1_DBI 272
+#define CLK_RXOOB0 273
+#define CLK_RXOOB1 274
+#define CLK_PMALIVE0 275
+#define CLK_PMALIVE1 276
+#define ACLK_SATA0 277
+#define ACLK_SATA1 278
+#define CLK_USB3OTG1_PIPE_PCLK 279
+#define CLK_USB3OTG1_UTMI 280
+#define CLK_USB3OTG0_PIPE_PCLK 281
+#define CLK_USB3OTG0_UTMI 282
+#define HCLK_SDGMAC_ROOT 283
+#define ACLK_SDGMAC_ROOT 284
+#define PCLK_SDGMAC_ROOT 285
+#define ACLK_GMAC0 286
+#define ACLK_GMAC1 287
+#define PCLK_GMAC0 288
+#define PCLK_GMAC1 289
+#define CCLK_SRC_SDIO 290
+#define HCLK_SDIO 291
+#define CLK_GMAC1_PTP_REF 292
+#define CLK_GMAC0_PTP_REF 293
+#define CLK_GMAC1_PTP_REF_SRC 294
+#define CLK_GMAC0_PTP_REF_SRC 295
+#define CCLK_SRC_SDMMC0 296
+#define HCLK_SDMMC0 297
+#define SCLK_FSPI1_X2 298
+#define HCLK_FSPI1 299
+#define ACLK_DSMC_ROOT 300
+#define ACLK_DSMC 301
+#define PCLK_DSMC 302
+#define CLK_DSMC_SYS 303
+#define HCLK_HSGPIO 304
+#define CLK_HSGPIO_TX 305
+#define CLK_HSGPIO_RX 306
+#define ACLK_HSGPIO 307
+#define PCLK_PHPPHY_ROOT 308
+#define PCLK_PCIE2_COMBOPHY0 309
+#define PCLK_PCIE2_COMBOPHY1 310
+#define CLK_PCIE_100M_SRC 311
+#define CLK_PCIE_100M_NDUTY_SRC 312
+#define CLK_REF_PCIE0_PHY 313
+#define CLK_REF_PCIE1_PHY 314
+#define CLK_REF_MPHY_26M 315
+#define HCLK_RKVDEC_ROOT 316
+#define ACLK_RKVDEC_ROOT 317
+#define HCLK_RKVDEC 318
+#define CLK_RKVDEC_HEVC_CA 319
+#define CLK_RKVDEC_CORE 320
+#define ACLK_UFS_ROOT 321
+#define ACLK_USB_ROOT 322
+#define PCLK_USB_ROOT 323
+#define ACLK_USB3OTG0 324
+#define CLK_REF_USB3OTG0 325
+#define CLK_SUSPEND_USB3OTG0 326
+#define ACLK_MMU2 327
+#define ACLK_SLV_MMU2 328
+#define ACLK_UFS_SYS 329
+#define ACLK_VPU_ROOT 330
+#define ACLK_VPU_MID_ROOT 331
+#define HCLK_VPU_ROOT 332
+#define ACLK_JPEG_ROOT 333
+#define ACLK_VPU_LOW_ROOT 334
+#define HCLK_RGA2E_0 335
+#define ACLK_RGA2E_0 336
+#define CLK_CORE_RGA2E_0 337
+#define ACLK_JPEG 338
+#define HCLK_JPEG 339
+#define HCLK_VDPP 340
+#define ACLK_VDPP 341
+#define CLK_CORE_VDPP 342
+#define HCLK_RGA2E_1 343
+#define ACLK_RGA2E_1 344
+#define CLK_CORE_RGA2E_1 345
+#define DCLK_EBC_FRAC_SRC 346
+#define HCLK_EBC 347
+#define ACLK_EBC 348
+#define DCLK_EBC 349
+#define HCLK_VEPU0_ROOT 350
+#define ACLK_VEPU0_ROOT 351
+#define HCLK_VEPU0 352
+#define ACLK_VEPU0 353
+#define CLK_VEPU0_CORE 354
+#define ACLK_VI_ROOT 355
+#define HCLK_VI_ROOT 356
+#define PCLK_VI_ROOT 357
+#define DCLK_VICAP 358
+#define ACLK_VICAP 359
+#define HCLK_VICAP 360
+#define CLK_ISP_CORE 361
+#define CLK_ISP_CORE_MARVIN 362
+#define CLK_ISP_CORE_VICAP 363
+#define ACLK_ISP 364
+#define HCLK_ISP 365
+#define ACLK_VPSS 366
+#define HCLK_VPSS 367
+#define CLK_CORE_VPSS 368
+#define PCLK_CSI_HOST_0 369
+#define PCLK_CSI_HOST_1 370
+#define PCLK_CSI_HOST_2 371
+#define PCLK_CSI_HOST_3 372
+#define PCLK_CSI_HOST_4 373
+#define ICLK_CSIHOST01 374
+#define ICLK_CSIHOST0 375
+#define CLK_ISP_PVTPLL_SRC 376
+#define ACLK_VI_ROOT_INTER 377
+#define CLK_VICAP_I0CLK 378
+#define CLK_VICAP_I1CLK 379
+#define CLK_VICAP_I2CLK 380
+#define CLK_VICAP_I3CLK 381
+#define CLK_VICAP_I4CLK 382
+#define ACLK_VOP_ROOT 383
+#define HCLK_VOP_ROOT 384
+#define PCLK_VOP_ROOT 385
+#define HCLK_VOP 386
+#define ACLK_VOP 387
+#define DCLK_VP0_SRC 388
+#define DCLK_VP1_SRC 389
+#define DCLK_VP2_SRC 390
+#define DCLK_VP0 391
+#define DCLK_VP1 392
+#define DCLK_VP2 393
+#define PCLK_VOPGRF 394
+#define ACLK_VO0_ROOT 395
+#define HCLK_VO0_ROOT 396
+#define PCLK_VO0_ROOT 397
+#define PCLK_VO0_GRF 398
+#define ACLK_HDCP0 399
+#define HCLK_HDCP0 400
+#define PCLK_HDCP0 401
+#define CLK_TRNG0_SKP 402
+#define PCLK_DSIHOST0 403
+#define CLK_DSIHOST0 404
+#define PCLK_HDMITX0 405
+#define CLK_HDMITX0_EARC 406
+#define CLK_HDMITX0_REF 407
+#define PCLK_EDP0 408
+#define CLK_EDP0_24M 409
+#define CLK_EDP0_200M 410
+#define MCLK_SAI5_8CH_SRC 411
+#define MCLK_SAI5_8CH 412
+#define HCLK_SAI5_8CH 413
+#define MCLK_SAI6_8CH_SRC 414
+#define MCLK_SAI6_8CH 415
+#define HCLK_SAI6_8CH 416
+#define HCLK_SPDIF_TX2 417
+#define MCLK_SPDIF_TX2 418
+#define HCLK_SPDIF_RX2 419
+#define MCLK_SPDIF_RX2 420
+#define HCLK_SAI8_8CH 421
+#define MCLK_SAI8_8CH_SRC 422
+#define MCLK_SAI8_8CH 423
+#define ACLK_VO1_ROOT 424
+#define HCLK_VO1_ROOT 425
+#define PCLK_VO1_ROOT 426
+#define MCLK_SAI7_8CH_SRC 427
+#define MCLK_SAI7_8CH 428
+#define HCLK_SAI7_8CH 429
+#define HCLK_SPDIF_TX3 430
+#define HCLK_SPDIF_TX4 431
+#define HCLK_SPDIF_TX5 432
+#define MCLK_SPDIF_TX3 433
+#define CLK_AUX16MHZ_0 434
+#define ACLK_DP0 435
+#define PCLK_DP0 436
+#define PCLK_VO1_GRF 437
+#define ACLK_HDCP1 438
+#define HCLK_HDCP1 439
+#define PCLK_HDCP1 440
+#define CLK_TRNG1_SKP 441
+#define HCLK_SAI9_8CH 442
+#define MCLK_SAI9_8CH_SRC 443
+#define MCLK_SAI9_8CH 444
+#define MCLK_SPDIF_TX4 445
+#define MCLK_SPDIF_TX5 446
+#define CLK_GPU_SRC_PRE 447
+#define CLK_GPU 448
+#define PCLK_GPU_ROOT 449
+#define ACLK_CENTER_ROOT 450
+#define ACLK_CENTER_LOW_ROOT 451
+#define HCLK_CENTER_ROOT 452
+#define PCLK_CENTER_ROOT 453
+#define ACLK_DMA2DDR 454
+#define ACLK_DDR_SHAREMEM 455
+#define PCLK_DMA2DDR 456
+#define PCLK_SHAREMEM 457
+#define HCLK_VEPU1_ROOT 458
+#define ACLK_VEPU1_ROOT 459
+#define HCLK_VEPU1 460
+#define ACLK_VEPU1 461
+#define CLK_VEPU1_CORE 462
+#define CLK_JDBCK_DAP 463
+#define PCLK_MIPI_DCPHY 464
+#define CLK_32K_USB2DEBUG 465
+#define PCLK_CSIDPHY 466
+#define PCLK_USBDPPHY 467
+#define CLK_PMUPHY_REF_SRC 468
+#define CLK_USBDP_COMBO_PHY_IMMORTAL 469
+#define CLK_HDMITXHDP 470
+#define PCLK_MPHY 471
+#define CLK_REF_OSC_MPHY 472
+#define CLK_REF_UFS_CLKOUT 473
+#define HCLK_PMU1_ROOT 474
+#define HCLK_PMU_CM0_ROOT 475
+#define CLK_200M_PMU_SRC 476
+#define CLK_100M_PMU_SRC 477
+#define CLK_50M_PMU_SRC 478
+#define FCLK_PMU_CM0_CORE 479
+#define CLK_PMU_CM0_RTC 480
+#define PCLK_PMU1 481
+#define CLK_PMU1 482
+#define PCLK_PMU1WDT 483
+#define TCLK_PMU1WDT 484
+#define PCLK_PMUTIMER 485
+#define CLK_PMUTIMER_ROOT 486
+#define CLK_PMUTIMER0 487
+#define CLK_PMUTIMER1 488
+#define PCLK_PMU1PWM 489
+#define CLK_PMU1PWM 490
+#define CLK_PMU1PWM_OSC 491
+#define PCLK_PMUPHY_ROOT 492
+#define PCLK_I2C0 493
+#define CLK_I2C0 494
+#define SCLK_UART1 495
+#define PCLK_UART1 496
+#define CLK_PMU1PWM_RC 497
+#define CLK_PDM0 498
+#define HCLK_PDM0 499
+#define MCLK_PDM0 500
+#define HCLK_VAD 501
+#define CLK_OSCCHK_PVTM 502
+#define CLK_PDM0_OUT 503
+#define CLK_HPTIMER_SRC 504
+#define PCLK_PMU0_ROOT 505
+#define PCLK_PMU0 506
+#define PCLK_GPIO0 507
+#define DBCLK_GPIO0 508
+#define CLK_OSC0_PMU1 509
+#define PCLK_PMU1_ROOT 510
+#define XIN_OSC0_DIV 511
+#define ACLK_USB 512
+#define ACLK_UFS 513
+#define ACLK_SDGMAC 514
+#define HCLK_SDGMAC 515
+#define PCLK_SDGMAC 516
+#define HCLK_VO1 517
+#define HCLK_VO0 518
+#define PCLK_CCI_ROOT 519
+#define ACLK_CCI_ROOT 520
+#define HCLK_VO0VOP_CHANNEL 521
+#define ACLK_VO0VOP_CHANNEL 522
+#define ACLK_TOP_MID 523
+#define ACLK_SECURE_HIGH 524
+#define CLK_USBPHY_REF_SRC 525
+#define CLK_PHY_REF_SRC 526
+#define CLK_CPLL_REF_SRC 527
+#define CLK_AUPLL_REF_SRC 528
+#define PCLK_SECURE_NS 529
+#define HCLK_SECURE_NS 530
+#define ACLK_SECURE_NS 531
+#define PCLK_OTPC_NS 532
+#define HCLK_CRYPTO_NS 533
+#define HCLK_TRNG_NS 534
+#define CLK_OTPC_NS 535
+#define SCLK_DSU 536
+#define SCLK_DDR 537
+#define ACLK_CRYPTO_NS 538
+#define CLK_PKA_CRYPTO_NS 539
+#define ACLK_RKVDEC_ROOT_BAK 540
+#define CLK_AUDIO_FRAC_0_SRC 541
+#define CLK_AUDIO_FRAC_1_SRC 542
+#define CLK_AUDIO_FRAC_2_SRC 543
+#define CLK_AUDIO_FRAC_3_SRC 544
+#define PCLK_HDPTX_APB 545
+
+/* secure clk */
+#define CLK_STIMER0_ROOT 546
+#define CLK_STIMER1_ROOT 547
+#define PCLK_SECURE_S 548
+#define HCLK_SECURE_S 549
+#define ACLK_SECURE_S 550
+#define CLK_PKA_CRYPTO_S 551
+#define HCLK_VO1_S 552
+#define PCLK_VO1_S 553
+#define HCLK_VO0_S 554
+#define PCLK_VO0_S 555
+#define PCLK_KLAD 556
+#define HCLK_CRYPTO_S 557
+#define HCLK_KLAD 558
+#define ACLK_CRYPTO_S 559
+#define HCLK_TRNG_S 560
+#define PCLK_OTPC_S 561
+#define CLK_OTPC_S 562
+#define PCLK_WDT_S 563
+#define TCLK_WDT_S 564
+#define PCLK_HDCP0_TRNG 565
+#define PCLK_HDCP1_TRNG 566
+#define HCLK_HDCP_KEY0 567
+#define HCLK_HDCP_KEY1 568
+#define PCLK_EDP_S 569
+#define ACLK_KLAD 570
+
+/* SCMI clocks, use these when changing clocks through SCMI */
+#define SCMI_ARMCLK_L 10
+#define SCMI_ARMCLK_B 11
+#define SCMI_CLK_GPU 456
+
+/* IOC-controlled output clocks */
+#define CLK_SAI0_MCLKOUT_TO_IO 571
+#define CLK_SAI1_MCLKOUT_TO_IO 572
+#define CLK_SAI2_MCLKOUT_TO_IO 573
+#define CLK_SAI3_MCLKOUT_TO_IO 574
+#define CLK_SAI4_MCLKOUT_TO_IO 575
+#define CLK_SAI4_MCLKOUT_TO_IO 575
+#define CLK_FSPI0_TO_IO 576
+#define CLK_FSPI1_TO_IO 577
+
+#endif
diff --git a/include/dt-bindings/clock/rockchip,rv1126b-cru.h b/include/dt-bindings/clock/rockchip,rv1126b-cru.h
new file mode 100644
index 000000000000..721d50a1419f
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rv1126b-cru.h
@@ -0,0 +1,392 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RV1126B_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RV1126B_H
+
+/* pll clocks */
+#define PLL_GPLL 0
+#define PLL_CPLL 1
+#define PLL_AUPLL 2
+#define ARMCLK 3
+#define SCLK_DDR 4
+
+/* clk (clocks) */
+#define CLK_CPLL_DIV20 5
+#define CLK_CPLL_DIV10 6
+#define CLK_CPLL_DIV8 7
+#define CLK_GPLL_DIV8 8
+#define CLK_GPLL_DIV6 9
+#define CLK_GPLL_DIV4 10
+#define CLK_CPLL_DIV3 11
+#define CLK_GPLL_DIV3 12
+#define CLK_CPLL_DIV2 13
+#define CLK_GPLL_DIV2 14
+#define CLK_CM_FRAC0 15
+#define CLK_CM_FRAC1 16
+#define CLK_CM_FRAC2 17
+#define CLK_UART_FRAC0 18
+#define CLK_UART_FRAC1 19
+#define CLK_AUDIO_FRAC0 20
+#define CLK_AUDIO_FRAC1 21
+#define CLK_AUDIO_INT0 22
+#define CLK_AUDIO_INT1 23
+#define SCLK_UART0_SRC 24
+#define SCLK_UART1 25
+#define SCLK_UART2 26
+#define SCLK_UART3 27
+#define SCLK_UART4 28
+#define SCLK_UART5 29
+#define SCLK_UART6 30
+#define SCLK_UART7 31
+#define MCLK_SAI0 32
+#define MCLK_SAI1 33
+#define MCLK_SAI2 34
+#define MCLK_PDM 35
+#define CLKOUT_PDM 36
+#define MCLK_ASRC0 37
+#define MCLK_ASRC1 38
+#define MCLK_ASRC2 39
+#define MCLK_ASRC3 40
+#define CLK_ASRC0 41
+#define CLK_ASRC1 42
+#define CLK_CORE_PLL 43
+#define CLK_NPU_PLL 44
+#define CLK_VEPU_PLL 45
+#define CLK_ISP_PLL 46
+#define CLK_AISP_PLL 47
+#define CLK_SARADC0_SRC 48
+#define CLK_SARADC1_SRC 49
+#define CLK_SARADC2_SRC 50
+#define HCLK_NPU_ROOT 51
+#define PCLK_NPU_ROOT 52
+#define ACLK_VEPU_ROOT 53
+#define HCLK_VEPU_ROOT 54
+#define PCLK_VEPU_ROOT 55
+#define CLK_CORE_RGA_SRC 56
+#define ACLK_GMAC_ROOT 57
+#define ACLK_VI_ROOT 58
+#define HCLK_VI_ROOT 59
+#define PCLK_VI_ROOT 60
+#define DCLK_VICAP_ROOT 61
+#define CLK_SYS_DSMC_ROOT 62
+#define ACLK_VDO_ROOT 63
+#define ACLK_RKVDEC_ROOT 64
+#define HCLK_VDO_ROOT 65
+#define PCLK_VDO_ROOT 66
+#define DCLK_OOC_SRC 67
+#define DCLK_VOP 68
+#define DCLK_DECOM_SRC 69
+#define PCLK_DDR_ROOT 70
+#define ACLK_SYSMEM_SRC 71
+#define ACLK_TOP_ROOT 72
+#define ACLK_BUS_ROOT 73
+#define HCLK_BUS_ROOT 74
+#define PCLK_BUS_ROOT 75
+#define CCLK_SDMMC0 76
+#define CCLK_SDMMC1 77
+#define CCLK_EMMC 78
+#define SCLK_2X_FSPI0 79
+#define CLK_GMAC_PTP_REF_SRC 80
+#define CLK_GMAC_125M 81
+#define CLK_TIMER_ROOT 82
+#define TCLK_WDT_NS_SRC 83
+#define TCLK_WDT_S_SRC 84
+#define TCLK_WDT_HPMCU 85
+#define CLK_CAN0 86
+#define CLK_CAN1 87
+#define PCLK_PERI_ROOT 88
+#define ACLK_PERI_ROOT 89
+#define CLK_I2C_BUS_SRC 90
+#define CLK_SPI0 91
+#define CLK_SPI1 92
+#define BUSCLK_PMU_SRC 93
+#define CLK_PWM0 94
+#define CLK_PWM2 95
+#define CLK_PWM3 96
+#define CLK_PKA_RKCE_SRC 97
+#define ACLK_RKCE_SRC 98
+#define ACLK_VCP_ROOT 99
+#define HCLK_VCP_ROOT 100
+#define PCLK_VCP_ROOT 101
+#define CLK_CORE_FEC_SRC 102
+#define CLK_CORE_AVSP_SRC 103
+#define CLK_50M_GMAC_IOBUF_VI 104
+#define PCLK_TOP_ROOT 105
+#define CLK_MIPI0_OUT2IO 106
+#define CLK_MIPI1_OUT2IO 107
+#define CLK_MIPI2_OUT2IO 108
+#define CLK_MIPI3_OUT2IO 109
+#define CLK_CIF_OUT2IO 110
+#define CLK_MAC_OUT2IO 111
+#define MCLK_SAI0_OUT2IO 112
+#define MCLK_SAI1_OUT2IO 113
+#define MCLK_SAI2_OUT2IO 114
+#define CLK_CM_FRAC0_SRC 115
+#define CLK_CM_FRAC1_SRC 116
+#define CLK_CM_FRAC2_SRC 117
+#define CLK_UART_FRAC0_SRC 118
+#define CLK_UART_FRAC1_SRC 119
+#define CLK_AUDIO_FRAC0_SRC 120
+#define CLK_AUDIO_FRAC1_SRC 121
+#define ACLK_NPU_ROOT 122
+#define HCLK_RKNN 123
+#define ACLK_RKNN 124
+#define PCLK_GPIO3 125
+#define DBCLK_GPIO3 126
+#define PCLK_IOC_VCCIO3 127
+#define PCLK_SARADC0 128
+#define CLK_SARADC0 129
+#define HCLK_SDMMC1 130
+#define HCLK_VEPU 131
+#define ACLK_VEPU 132
+#define CLK_CORE_VEPU 133
+#define HCLK_FEC 134
+#define ACLK_FEC 135
+#define CLK_CORE_FEC 136
+#define HCLK_AVSP 137
+#define ACLK_AVSP 138
+#define BUSCLK_PMU1_ROOT 139
+#define HCLK_AISP 140
+#define ACLK_AISP 141
+#define CLK_CORE_AISP 142
+#define CLK_CORE_ISP_ROOT 143
+#define PCLK_DSMC 144
+#define ACLK_DSMC 145
+#define HCLK_CAN0 146
+#define HCLK_CAN1 147
+#define PCLK_GPIO2 148
+#define DBCLK_GPIO2 149
+#define PCLK_GPIO4 150
+#define DBCLK_GPIO4 151
+#define PCLK_GPIO5 152
+#define DBCLK_GPIO5 153
+#define PCLK_GPIO6 154
+#define DBCLK_GPIO6 155
+#define PCLK_GPIO7 156
+#define DBCLK_GPIO7 157
+#define PCLK_IOC_VCCIO2 158
+#define PCLK_IOC_VCCIO4 159
+#define PCLK_IOC_VCCIO5 160
+#define PCLK_IOC_VCCIO6 161
+#define PCLK_IOC_VCCIO7 162
+#define HCLK_ISP 163
+#define ACLK_ISP 164
+#define CLK_CORE_ISP 165
+#define HCLK_VICAP 166
+#define ACLK_VICAP 167
+#define DCLK_VICAP 168
+#define ISP0CLK_VICAP 169
+#define HCLK_VPSS 170
+#define ACLK_VPSS 171
+#define CLK_CORE_VPSS 172
+#define PCLK_CSI2HOST0 173
+#define DCLK_CSI2HOST0 174
+#define PCLK_CSI2HOST1 175
+#define DCLK_CSI2HOST1 176
+#define PCLK_CSI2HOST2 177
+#define DCLK_CSI2HOST2 178
+#define PCLK_CSI2HOST3 179
+#define DCLK_CSI2HOST3 180
+#define HCLK_SDMMC0 181
+#define ACLK_GMAC 182
+#define PCLK_GMAC 183
+#define CLK_GMAC_PTP_REF 184
+#define PCLK_CSIPHY0 185
+#define PCLK_CSIPHY1 186
+#define PCLK_MACPHY 187
+#define PCLK_SARADC1 188
+#define CLK_SARADC1 189
+#define PCLK_SARADC2 190
+#define CLK_SARADC2 191
+#define ACLK_RKVDEC 192
+#define HCLK_RKVDEC 193
+#define CLK_HEVC_CA_RKVDEC 194
+#define ACLK_VOP 195
+#define HCLK_VOP 196
+#define HCLK_RKJPEG 197
+#define ACLK_RKJPEG 198
+#define ACLK_RKMMU_DECOM 199
+#define HCLK_RKMMU_DECOM 200
+#define DCLK_DECOM 201
+#define ACLK_DECOM 202
+#define PCLK_DECOM 203
+#define PCLK_MIPI_DSI 204
+#define PCLK_DSIPHY 205
+#define ACLK_OOC 206
+#define ACLK_SYSMEM 207
+#define PCLK_DDRC 208
+#define PCLK_DDRMON 209
+#define CLK_TIMER_DDRMON 210
+#define PCLK_DFICTRL 211
+#define PCLK_DDRPHY 212
+#define PCLK_DMA2DDR 213
+#define CLK_RCOSC_SRC 214
+#define BUSCLK_PMU_MUX 215
+#define BUSCLK_PMU_ROOT 216
+#define PCLK_PMU 217
+#define CLK_XIN_RC_DIV 218
+#define CLK_32K 219
+#define PCLK_PMU_GPIO0 220
+#define DBCLK_PMU_GPIO0 221
+#define PCLK_PMU_HP_TIMER 222
+#define CLK_PMU_HP_TIMER 223
+#define CLK_PMU_32K_HP_TIMER 224
+#define PCLK_PWM1 225
+#define CLK_PWM1 226
+#define CLK_OSC_PWM1 227
+#define CLK_RC_PWM1 228
+#define CLK_FREQ_PWM1 229
+#define CLK_COUNTER_PWM1 230
+#define PCLK_I2C2 231
+#define CLK_I2C2 232
+#define PCLK_UART0 233
+#define SCLK_UART0 234
+#define PCLK_RCOSC_CTRL 235
+#define CLK_OSC_RCOSC_CTRL 236
+#define CLK_REF_RCOSC_CTRL 237
+#define PCLK_IOC_PMUIO0 238
+#define CLK_REFOUT 239
+#define CLK_PREROLL 240
+#define CLK_PREROLL_32K 241
+#define HCLK_PMU_SRAM 242
+#define PCLK_WDT_LPMCU 243
+#define TCLK_WDT_LPMCU 244
+#define CLK_LPMCU 245
+#define CLK_LPMCU_RTC 246
+#define PCLK_LPMCU_MAILBOX 247
+#define HCLK_OOC 248
+#define PCLK_SPI2AHB 249
+#define HCLK_SPI2AHB 250
+#define HCLK_FSPI1 251
+#define HCLK_XIP_FSPI1 252
+#define SCLK_1X_FSPI1 253
+#define PCLK_IOC_PMUIO1 254
+#define PCLK_AUDIO_ADC_PMU 255
+#define MCLK_AUDIO_ADC_PMU 256
+#define MCLK_AUDIO_ADC_DIV4_PMU 257
+#define MCLK_LPSAI 258
+#define ACLK_GIC400 259
+#define PCLK_WDT_NS 260
+#define TCLK_WDT_NS 261
+#define PCLK_WDT_HPMCU 262
+#define HCLK_CACHE 263
+#define PCLK_HPMCU_MAILBOX 264
+#define PCLK_HPMCU_INTMUX 265
+#define CLK_HPMCU 266
+#define CLK_HPMCU_RTC 267
+#define PCLK_RKDMA 268
+#define ACLK_RKDMA 269
+#define PCLK_DCF 270
+#define ACLK_DCF 271
+#define HCLK_RGA 272
+#define ACLK_RGA 273
+#define CLK_CORE_RGA 274
+#define PCLK_TIMER 275
+#define CLK_TIMER0 276
+#define CLK_TIMER1 277
+#define CLK_TIMER2 278
+#define CLK_TIMER3 279
+#define CLK_TIMER4 280
+#define CLK_TIMER5 281
+#define PCLK_I2C0 282
+#define CLK_I2C0 283
+#define PCLK_I2C1 284
+#define CLK_I2C1 285
+#define PCLK_I2C3 286
+#define CLK_I2C3 287
+#define PCLK_I2C4 288
+#define CLK_I2C4 289
+#define PCLK_I2C5 290
+#define CLK_I2C5 291
+#define PCLK_SPI0 292
+#define PCLK_SPI1 293
+#define PCLK_PWM0 294
+#define CLK_OSC_PWM0 295
+#define CLK_RC_PWM0 296
+#define PCLK_PWM2 297
+#define CLK_OSC_PWM2 298
+#define CLK_RC_PWM2 299
+#define PCLK_PWM3 300
+#define CLK_OSC_PWM3 301
+#define CLK_RC_PWM3 302
+#define PCLK_UART1 303
+#define PCLK_UART2 304
+#define PCLK_UART3 305
+#define PCLK_UART4 306
+#define PCLK_UART5 307
+#define PCLK_UART6 308
+#define PCLK_UART7 309
+#define PCLK_TSADC 310
+#define CLK_TSADC 311
+#define HCLK_SAI0 312
+#define HCLK_SAI1 313
+#define HCLK_SAI2 314
+#define HCLK_RKDSM 315
+#define MCLK_RKDSM 316
+#define HCLK_PDM 317
+#define HCLK_ASRC0 318
+#define HCLK_ASRC1 319
+#define PCLK_AUDIO_ADC_BUS 320
+#define MCLK_AUDIO_ADC_BUS 321
+#define MCLK_AUDIO_ADC_DIV4_BUS 322
+#define PCLK_RKCE 323
+#define HCLK_NS_RKCE 324
+#define PCLK_OTPC_NS 325
+#define CLK_SBPI_OTPC_NS 326
+#define CLK_USER_OTPC_NS 327
+#define CLK_OTPC_ARB 328
+#define PCLK_OTP_MASK 329
+#define CLK_TSADC_PHYCTRL 330
+#define LRCK_SRC_ASRC0 331
+#define LRCK_DST_ASRC0 332
+#define LRCK_SRC_ASRC1 333
+#define LRCK_DST_ASRC1 334
+#define PCLK_KEY_READER 335
+#define ACLK_NSRKCE 336
+#define CLK_PKA_NSRKCE 337
+#define PCLK_RTC_ROOT 338
+#define PCLK_GPIO1 339
+#define DBCLK_GPIO1 340
+#define PCLK_IOC_VCCIO1 341
+#define ACLK_USB3OTG 342
+#define CLK_REF_USB3OTG 343
+#define CLK_SUSPEND_USB3OTG 344
+#define HCLK_USB2HOST 345
+#define HCLK_ARB_USB2HOST 346
+#define PCLK_RTC_TEST 347
+#define HCLK_EMMC 348
+#define HCLK_FSPI0 349
+#define HCLK_XIP_FSPI0 350
+#define PCLK_PIPEPHY 351
+#define PCLK_USB2PHY 352
+#define CLK_REF_PIPEPHY_CPLL_SRC 353
+#define CLK_REF_PIPEPHY 354
+#define HCLK_VPSL 355
+#define ACLK_VPSL 356
+#define CLK_CORE_VPSL 357
+#define CLK_MACPHY 358
+#define HCLK_RKRNG_NS 359
+#define HCLK_RKRNG_S_NS 360
+#define CLK_AISP_PLL_SRC 361
+
+/* secure clks */
+#define CLK_USER_OTPC_S 362
+#define CLK_SBPI_OTPC_S 363
+#define PCLK_OTPC_S 364
+#define PCLK_KEY_READER_S 365
+#define HCLK_KL_RKCE_S 366
+#define HCLK_RKCE_S 367
+#define PCLK_WDT_S 368
+#define TCLK_WDT_S 369
+#define CLK_STIMER0 370
+#define CLK_STIMER1 371
+#define PLK_STIMER 372
+#define HCLK_RKRNG_S 373
+#define CLK_PKA_RKCE_S 374
+#define ACLK_RKCE_S 375
+
+#endif
diff --git a/include/dt-bindings/clock/samsung,exynos2200-cmu.h b/include/dt-bindings/clock/samsung,exynos2200-cmu.h
new file mode 100644
index 000000000000..310552be0c8c
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,exynos2200-cmu.h
@@ -0,0 +1,431 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ * Author: Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ *
+ * Device Tree binding constants for Exynos2200 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS2200_H
+#define _DT_BINDINGS_CLOCK_EXYNOS2200_H
+
+/* CMU_TOP */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_FOUT_SHARED2_PLL 3
+#define CLK_FOUT_SHARED3_PLL 4
+#define CLK_FOUT_SHARED4_PLL 5
+#define CLK_FOUT_MMC_PLL 6
+#define CLK_FOUT_SHARED_MIF_PLL 7
+
+#define CLK_MOUT_CMU_CP_MPLL_CLK_D2_USER 8
+#define CLK_MOUT_CMU_CP_MPLL_CLK_USER 9
+#define CLK_MOUT_CMU_AUD_AUDIF0 10
+#define CLK_MOUT_CMU_AUD_AUDIF1 11
+#define CLK_MOUT_CMU_AUD_CPU 12
+#define CLK_MOUT_CMU_CPUCL0_DBG_NOC 13
+#define CLK_MOUT_CMU_CPUCL0_SWITCH 14
+#define CLK_MOUT_CMU_CPUCL1_SWITCH 15
+#define CLK_MOUT_CMU_CPUCL2_SWITCH 16
+#define CLK_MOUT_CMU_DNC_NOC 17
+#define CLK_MOUT_CMU_DPUB_NOC 18
+#define CLK_MOUT_CMU_DPUF_NOC 19
+#define CLK_MOUT_CMU_DSP_NOC 20
+#define CLK_MOUT_CMU_DSU_SWITCH 21
+#define CLK_MOUT_CMU_G3D_SWITCH 22
+#define CLK_MOUT_CMU_GNPU_NOC 23
+#define CLK_MOUT_CMU_UFS_MMC_CARD 24
+#define CLK_MOUT_CMU_M2M_NOC 25
+#define CLK_MOUT_CMU_NOCL0_NOC 26
+#define CLK_MOUT_CMU_NOCL1A_NOC 27
+#define CLK_MOUT_CMU_NOCL1B_NOC0 28
+#define CLK_MOUT_CMU_NOCL1C_NOC 29
+#define CLK_MOUT_CMU_SDMA_NOC 30
+#define CLK_MOUT_CMU_CP_HISPEEDY_CLK 31
+#define CLK_MOUT_CMU_CP_SHARED0_CLK 32
+#define CLK_MOUT_CMU_CP_SHARED2_CLK 33
+#define CLK_MOUT_CMU_MUX_ALIVE_NOC 34
+#define CLK_MOUT_CMU_MUX_AUD_AUDIF0 35
+#define CLK_MOUT_CMU_MUX_AUD_AUDIF1 36
+#define CLK_MOUT_CMU_MUX_AUD_CPU 37
+#define CLK_MOUT_CMU_MUX_AUD_NOC 38
+#define CLK_MOUT_CMU_MUX_BRP_NOC 39
+#define CLK_MOUT_CMU_MUX_CIS_CLK0 40
+#define CLK_MOUT_CMU_MUX_CIS_CLK1 41
+#define CLK_MOUT_CMU_MUX_CIS_CLK2 42
+#define CLK_MOUT_CMU_MUX_CIS_CLK3 43
+#define CLK_MOUT_CMU_MUX_CIS_CLK4 44
+#define CLK_MOUT_CMU_MUX_CIS_CLK5 45
+#define CLK_MOUT_CMU_MUX_CIS_CLK6 46
+#define CLK_MOUT_CMU_MUX_CIS_CLK7 47
+#define CLK_MOUT_CMU_MUX_CMU_BOOST 48
+#define CLK_MOUT_CMU_MUX_CMU_BOOST_CAM 49
+#define CLK_MOUT_CMU_MUX_CMU_BOOST_CPU 50
+#define CLK_MOUT_CMU_MUX_CMU_BOOST_MIF 51
+#define CLK_MOUT_CMU_MUX_CPUCL0_DBG_NOC 52
+#define CLK_MOUT_CMU_MUX_CPUCL0_NOCP 53
+#define CLK_MOUT_CMU_MUX_CPUCL0_SWITCH 54
+#define CLK_MOUT_CMU_MUX_CPUCL1_SWITCH 55
+#define CLK_MOUT_CMU_MUX_CPUCL2_SWITCH 56
+#define CLK_MOUT_CMU_MUX_CSIS_DCPHY 57
+#define CLK_MOUT_CMU_MUX_CSIS_NOC 58
+#define CLK_MOUT_CMU_MUX_CSIS_OIS_MCU 59
+#define CLK_MOUT_CMU_MUX_CSTAT_NOC 60
+#define CLK_MOUT_CMU_MUX_DNC_NOC 61
+#define CLK_MOUT_CMU_MUX_DPUB 62
+#define CLK_MOUT_CMU_MUX_DPUB_ALT 63
+#define CLK_MOUT_CMU_MUX_DPUB_DSIM 64
+#define CLK_MOUT_CMU_MUX_DPUF 65
+#define CLK_MOUT_CMU_MUX_DPUF_ALT 66
+#define CLK_MOUT_CMU_MUX_DSP_NOC 67
+#define CLK_MOUT_CMU_MUX_DSU_SWITCH 68
+#define CLK_MOUT_CMU_MUX_G3D_NOCP 69
+#define CLK_MOUT_CMU_MUX_G3D_SWITCH 70
+#define CLK_MOUT_CMU_MUX_GNPU_NOC 71
+#define CLK_MOUT_CMU_MUX_HSI0_DPGTC 72
+#define CLK_MOUT_CMU_MUX_HSI0_DPOSC 73
+#define CLK_MOUT_CMU_MUX_HSI0_NOC 74
+#define CLK_MOUT_CMU_MUX_HSI0_USB32DRD 75
+#define CLK_MOUT_CMU_MUX_UFS_MMC_CARD 76
+#define CLK_MOUT_CMU_MUX_HSI1_NOC 77
+#define CLK_MOUT_CMU_MUX_HSI1_PCIE 78
+#define CLK_MOUT_CMU_MUX_UFS_UFS_EMBD 79
+#define CLK_MOUT_CMU_MUX_LME_LME 80
+#define CLK_MOUT_CMU_MUX_LME_NOC 81
+#define CLK_MOUT_CMU_MUX_M2M_NOC 82
+#define CLK_MOUT_CMU_MUX_MCSC_MCSC 83
+#define CLK_MOUT_CMU_MUX_MCSC_NOC 84
+#define CLK_MOUT_CMU_MUX_MFC0_MFC0 85
+#define CLK_MOUT_CMU_MUX_MFC0_WFD 86
+#define CLK_MOUT_CMU_MUX_MFC1_MFC1 87
+#define CLK_MOUT_CMU_MUX_MIF_NOCP 88
+#define CLK_MOUT_CMU_MUX_MIF_SWITCH 89
+#define CLK_MOUT_CMU_MUX_NOCL0_NOC 90
+#define CLK_MOUT_CMU_MUX_NOCL1A_NOC 91
+#define CLK_MOUT_CMU_MUX_NOCL1B_NOC0 92
+#define CLK_MOUT_CMU_MUX_NOCL1B_NOC1 93
+#define CLK_MOUT_CMU_MUX_NOCL1C_NOC 94
+#define CLK_MOUT_CMU_MUX_PERIC0_IP0 95
+#define CLK_MOUT_CMU_MUX_PERIC0_IP1 96
+#define CLK_MOUT_CMU_MUX_PERIC0_NOC 97
+#define CLK_MOUT_CMU_MUX_PERIC1_IP0 98
+#define CLK_MOUT_CMU_MUX_PERIC1_IP1 99
+#define CLK_MOUT_CMU_MUX_PERIC1_NOC 100
+#define CLK_MOUT_CMU_MUX_PERIC2_IP0 101
+#define CLK_MOUT_CMU_MUX_PERIC2_IP1 102
+#define CLK_MOUT_CMU_MUX_PERIC2_NOC 103
+#define CLK_MOUT_CMU_MUX_PERIS_GIC 104
+#define CLK_MOUT_CMU_MUX_PERIS_NOC 105
+#define CLK_MOUT_CMU_MUX_SDMA_NOC 106
+#define CLK_MOUT_CMU_MUX_SSP_NOC 107
+#define CLK_MOUT_CMU_MUX_VTS_DMIC 108
+#define CLK_MOUT_CMU_MUX_YUVP_NOC 109
+#define CLK_MOUT_CMU_MUX_CMU_CMUREF 110
+#define CLK_MOUT_CMU_MUX_CP_HISPEEDY_CLK 111
+#define CLK_MOUT_CMU_MUX_CP_SHARED0_CLK 112
+#define CLK_MOUT_CMU_MUX_CP_SHARED1_CLK 113
+#define CLK_MOUT_CMU_MUX_CP_SHARED2_CLK 114
+#define CLK_MOUT_CMU_M2M_FRC 115
+#define CLK_MOUT_CMU_MCSC_MCSC 116
+#define CLK_MOUT_CMU_MCSC_NOC 117
+#define CLK_MOUT_CMU_MUX_M2M_FRC 118
+#define CLK_MOUT_CMU_MUX_UFS_NOC 119
+
+#define CLK_DOUT_CMU_ALIVE_NOC 120
+#define CLK_DOUT_CMU_AUD_NOC 121
+#define CLK_DOUT_CMU_BRP_NOC 122
+#define CLK_DOUT_CMU_CMU_BOOST 123
+#define CLK_DOUT_CMU_CMU_BOOST_CAM 124
+#define CLK_DOUT_CMU_CMU_BOOST_CPU 125
+#define CLK_DOUT_CMU_CMU_BOOST_MIF 126
+#define CLK_DOUT_CMU_CPUCL0_NOCP 127
+#define CLK_DOUT_CMU_CSIS_DCPHY 128
+#define CLK_DOUT_CMU_CSIS_NOC 129
+#define CLK_DOUT_CMU_CSIS_OIS_MCU 130
+#define CLK_DOUT_CMU_CSTAT_NOC 131
+#define CLK_DOUT_CMU_DPUB_DSIM 132
+#define CLK_DOUT_CMU_LME_LME 133
+#define CLK_DOUT_CMU_G3D_NOCP 134
+#define CLK_DOUT_CMU_HSI0_DPGTC 135
+#define CLK_DOUT_CMU_HSI0_DPOSC 136
+#define CLK_DOUT_CMU_HSI0_NOC 137
+#define CLK_DOUT_CMU_HSI0_USB32DRD 138
+#define CLK_DOUT_CMU_HSI1_NOC 139
+#define CLK_DOUT_CMU_HSI1_PCIE 140
+#define CLK_DOUT_CMU_UFS_UFS_EMBD 141
+#define CLK_DOUT_CMU_LME_NOC 142
+#define CLK_DOUT_CMU_MFC0_MFC0 143
+#define CLK_DOUT_CMU_MFC0_WFD 144
+#define CLK_DOUT_CMU_MFC1_MFC1 145
+#define CLK_DOUT_CMU_MIF_NOCP 146
+#define CLK_DOUT_CMU_NOCL1B_NOC1 147
+#define CLK_DOUT_CMU_PERIC0_IP0 148
+#define CLK_DOUT_CMU_PERIC0_IP1 149
+#define CLK_DOUT_CMU_PERIC0_NOC 150
+#define CLK_DOUT_CMU_PERIC1_IP0 151
+#define CLK_DOUT_CMU_PERIC1_IP1 152
+#define CLK_DOUT_CMU_PERIC1_NOC 153
+#define CLK_DOUT_CMU_PERIC2_IP0 154
+#define CLK_DOUT_CMU_PERIC2_IP1 155
+#define CLK_DOUT_CMU_PERIC2_NOC 156
+#define CLK_DOUT_CMU_PERIS_GIC 157
+#define CLK_DOUT_CMU_PERIS_NOC 158
+#define CLK_DOUT_CMU_SSP_NOC 159
+#define CLK_DOUT_CMU_VTS_DMIC 160
+#define CLK_DOUT_CMU_YUVP_NOC 161
+#define CLK_DOUT_CMU_CP_SHARED1_CLK 162
+#define CLK_DOUT_CMU_DIV_AUD_AUDIF0 163
+#define CLK_DOUT_CMU_DIV_AUD_AUDIF0_SM 164
+#define CLK_DOUT_CMU_DIV_AUD_AUDIF1 165
+#define CLK_DOUT_CMU_DIV_AUD_AUDIF1_SM 166
+#define CLK_DOUT_CMU_DIV_AUD_CPU 167
+#define CLK_DOUT_CMU_DIV_AUD_CPU_SM 168
+#define CLK_DOUT_CMU_DIV_CIS_CLK0 169
+#define CLK_DOUT_CMU_DIV_CIS_CLK1 170
+#define CLK_DOUT_CMU_DIV_CIS_CLK2 171
+#define CLK_DOUT_CMU_DIV_CIS_CLK3 172
+#define CLK_DOUT_CMU_DIV_CIS_CLK4 173
+#define CLK_DOUT_CMU_DIV_CIS_CLK5 174
+#define CLK_DOUT_CMU_DIV_CIS_CLK6 175
+#define CLK_DOUT_CMU_DIV_CIS_CLK7 176
+#define CLK_DOUT_CMU_DIV_CPUCL0_DBG_NOC 177
+#define CLK_DOUT_CMU_DIV_CPUCL0_DBG_NOC_SM 178
+#define CLK_DOUT_CMU_DIV_CPUCL0_SWITCH 179
+#define CLK_DOUT_CMU_DIV_CPUCL0_SWITCH_SM 180
+#define CLK_DOUT_CMU_DIV_CPUCL1_SWITCH 181
+#define CLK_DOUT_CMU_DIV_CPUCL1_SWITCH_SM 182
+#define CLK_DOUT_CMU_DIV_CPUCL2_SWITCH 183
+#define CLK_DOUT_CMU_DIV_CPUCL2_SWITCH_SM 184
+#define CLK_DOUT_CMU_DIV_DNC_NOC 185
+#define CLK_DOUT_CMU_DIV_DNC_NOC_SM 186
+#define CLK_DOUT_CMU_DIV_DPUB 187
+#define CLK_DOUT_CMU_DIV_DPUB_ALT 188
+#define CLK_DOUT_CMU_DIV_DPUF 189
+#define CLK_DOUT_CMU_DIV_DPUF_ALT 190
+#define CLK_DOUT_CMU_DIV_DSP_NOC 191
+#define CLK_DOUT_CMU_DIV_DSP_NOC_SM 192
+#define CLK_DOUT_CMU_DIV_DSU_SWITCH 193
+#define CLK_DOUT_CMU_DIV_DSU_SWITCH_SM 194
+#define CLK_DOUT_CMU_DIV_G3D_SWITCH 195
+#define CLK_DOUT_CMU_DIV_G3D_SWITCH_SM 196
+#define CLK_DOUT_CMU_DIV_GNPU_NOC 197
+#define CLK_DOUT_CMU_DIV_GNPU_NOC_SM 198
+#define CLK_DOUT_CMU_DIV_UFS_MMC_CARD 199
+#define CLK_DOUT_CMU_DIV_UFS_MMC_CARD_SM 200
+#define CLK_DOUT_CMU_DIV_M2M_NOC 201
+#define CLK_DOUT_CMU_DIV_M2M_NOC_SM 202
+#define CLK_DOUT_CMU_DIV_NOCL0_NOC 203
+#define CLK_DOUT_CMU_DIV_NOCL0_NOC_SM 204
+#define CLK_DOUT_CMU_DIV_NOCL1A_NOC 205
+#define CLK_DOUT_CMU_DIV_NOCL1A_NOC_SM 206
+#define CLK_DOUT_CMU_DIV_NOCL1B_NOC0 207
+#define CLK_DOUT_CMU_DIV_NOCL1B_NOC0_SM 208
+#define CLK_DOUT_CMU_DIV_NOCL1C_NOC 209
+#define CLK_DOUT_CMU_DIV_NOCL1C_NOC_SM 210
+#define CLK_DOUT_CMU_DIV_SDMA_NOC 211
+#define CLK_DOUT_CMU_DIV_SDMA_NOC_SM 212
+#define CLK_DOUT_CMU_DIV_CP_HISPEEDY_CLK 213
+#define CLK_DOUT_CMU_DIV_CP_HISPEEDY_CLK_SM 214
+#define CLK_DOUT_CMU_DIV_CP_SHARED0_CLK 215
+#define CLK_DOUT_CMU_DIV_CP_SHARED0_CLK_SM 216
+#define CLK_DOUT_CMU_DIV_CP_SHARED2_CLK 217
+#define CLK_DOUT_CMU_DIV_CP_SHARED2_CLK_SM 218
+#define CLK_DOUT_CMU_UFS_NOC 219
+#define CLK_DOUT_CMU_DIV_M2M_FRC 220
+#define CLK_DOUT_CMU_DIV_M2M_FRC_SM 221
+#define CLK_DOUT_CMU_DIV_MCSC_MCSC 222
+#define CLK_DOUT_CMU_DIV_MCSC_MCSC_SM 223
+#define CLK_DOUT_CMU_DIV_MCSC_NOC 224
+#define CLK_DOUT_CMU_DIV_MCSC_NOC_SM 225
+#define CLK_DOUT_SHARED0_DIV1 226
+#define CLK_DOUT_SHARED0_DIV2 227
+#define CLK_DOUT_SHARED0_DIV4 228
+#define CLK_DOUT_SHARED1_DIV1 229
+#define CLK_DOUT_SHARED1_DIV2 230
+#define CLK_DOUT_SHARED1_DIV4 231
+#define CLK_DOUT_SHARED2_DIV1 232
+#define CLK_DOUT_SHARED2_DIV2 233
+#define CLK_DOUT_SHARED2_DIV4 234
+#define CLK_DOUT_SHARED3_DIV1 235
+#define CLK_DOUT_SHARED3_DIV2 236
+#define CLK_DOUT_SHARED3_DIV4 237
+#define CLK_DOUT_SHARED4_DIV1 238
+#define CLK_DOUT_SHARED4_DIV2 239
+#define CLK_DOUT_SHARED4_DIV4 240
+#define CLK_DOUT_SHARED_MIF_DIV1 241
+#define CLK_DOUT_SHARED_MIF_DIV2 242
+#define CLK_DOUT_SHARED_MIF_DIV4 243
+#define CLK_DOUT_TCXO_DIV3 244
+#define CLK_DOUT_TCXO_DIV4 245
+
+/* CMU_ALIVE */
+#define CLK_MOUT_ALIVE_NOC_USER 1
+#define CLK_MOUT_ALIVE_RCO_SPMI_USER 2
+#define CLK_MOUT_RCO_ALIVE_USER 3
+#define CLK_MOUT_ALIVE_CHUB_PERI 4
+#define CLK_MOUT_ALIVE_CMGP_NOC 5
+#define CLK_MOUT_ALIVE_CMGP_PERI 6
+#define CLK_MOUT_ALIVE_DBGCORE_NOC 7
+#define CLK_MOUT_ALIVE_DNC_NOC 8
+#define CLK_MOUT_ALIVE_CHUBVTS_NOC 9
+#define CLK_MOUT_ALIVE_GNPU_NOC 10
+#define CLK_MOUT_ALIVE_GNSS_NOC 11
+#define CLK_MOUT_ALIVE_SDMA_NOC 12
+#define CLK_MOUT_ALIVE_UFD_NOC 13
+#define CLK_MOUT_ALIVE_DBGCORE_UART 14
+#define CLK_MOUT_ALIVE_NOC 15
+#define CLK_MOUT_ALIVE_PMU_SUB 16
+#define CLK_MOUT_ALIVE_SPMI 17
+#define CLK_MOUT_ALIVE_TIMER 18
+#define CLK_MOUT_ALIVE_CSIS_NOC 19
+#define CLK_MOUT_ALIVE_DSP_NOC 20
+
+#define CLK_DOUT_ALIVE_CHUB_PERI 21
+#define CLK_DOUT_ALIVE_CMGP_NOC 22
+#define CLK_DOUT_ALIVE_CMGP_PERI 23
+#define CLK_DOUT_ALIVE_DBGCORE_NOC 24
+#define CLK_DOUT_ALIVE_DNC_NOC 25
+#define CLK_DOUT_ALIVE_CHUBVTS_NOC 26
+#define CLK_DOUT_ALIVE_GNPU_NOC 27
+#define CLK_DOUT_ALIVE_SDMA_NOC 28
+#define CLK_DOUT_ALIVE_UFD_NOC 29
+#define CLK_DOUT_ALIVE_DBGCORE_UART 30
+#define CLK_DOUT_ALIVE_NOC 31
+#define CLK_DOUT_ALIVE_PMU_SUB 32
+#define CLK_DOUT_ALIVE_SPMI 33
+#define CLK_DOUT_ALIVE_CSIS_NOC 34
+#define CLK_DOUT_ALIVE_DSP_NOC 35
+
+/* CMU_PERIS */
+#define CLK_MOUT_PERIS_GIC_USER 1
+#define CLK_MOUT_PERIS_NOC_USER 2
+#define CLK_MOUT_PERIS_GIC 3
+
+#define CLK_DOUT_PERIS_OTP 4
+#define CLK_DOUT_PERIS_DDD_CTRL 5
+
+/* CMU_CMGP */
+#define CLK_MOUT_CMGP_CLKALIVE_NOC_USER 1
+#define CLK_MOUT_CMGP_CLKALIVE_PERI_USER 2
+#define CLK_MOUT_CMGP_I2C 3
+#define CLK_MOUT_CMGP_SPI_I2C0 4
+#define CLK_MOUT_CMGP_SPI_I2C1 5
+#define CLK_MOUT_CMGP_SPI_MS_CTRL 6
+#define CLK_MOUT_CMGP_USI0 7
+#define CLK_MOUT_CMGP_USI1 8
+#define CLK_MOUT_CMGP_USI2 9
+#define CLK_MOUT_CMGP_USI3 10
+#define CLK_MOUT_CMGP_USI4 11
+#define CLK_MOUT_CMGP_USI5 12
+#define CLK_MOUT_CMGP_USI6 13
+
+#define CLK_DOUT_CMGP_I2C 14
+#define CLK_DOUT_CMGP_SPI_I2C0 15
+#define CLK_DOUT_CMGP_SPI_I2C1 16
+#define CLK_DOUT_CMGP_SPI_MS_CTRL 17
+#define CLK_DOUT_CMGP_USI0 18
+#define CLK_DOUT_CMGP_USI1 19
+#define CLK_DOUT_CMGP_USI2 20
+#define CLK_DOUT_CMGP_USI3 21
+#define CLK_DOUT_CMGP_USI4 22
+#define CLK_DOUT_CMGP_USI5 23
+#define CLK_DOUT_CMGP_USI6 24
+
+/* CMU_HSI0 */
+#define CLK_MOUT_CLKCMU_HSI0_DPGTC_USER 1
+#define CLK_MOUT_CLKCMU_HSI0_DPOSC_USER 2
+#define CLK_MOUT_CLKCMU_HSI0_NOC_USER 3
+#define CLK_MOUT_CLKCMU_HSI0_USB32DRD_USER 4
+#define CLK_MOUT_HSI0_NOC 5
+#define CLK_MOUT_HSI0_RTCCLK 6
+#define CLK_MOUT_HSI0_USB32DRD 7
+
+#define CLK_DOUT_DIV_CLK_HSI0_EUSB 8
+
+/* CMU_PERIC0 */
+#define CLK_MOUT_PERIC0_IP0_USER 1
+#define CLK_MOUT_PERIC0_IP1_USER 2
+#define CLK_MOUT_PERIC0_NOC_USER 3
+#define CLK_MOUT_PERIC0_I2C 4
+#define CLK_MOUT_PERIC0_USI04 5
+
+#define CLK_DOUT_PERIC0_I2C 6
+#define CLK_DOUT_PERIC0_USI04 7
+
+/* CMU_PERIC1 */
+#define CLK_MOUT_PERIC1_IP0_USER 1
+#define CLK_MOUT_PERIC1_IP1_USER 2
+#define CLK_MOUT_PERIC1_NOC_USER 3
+#define CLK_MOUT_PERIC1_I2C 4
+#define CLK_MOUT_PERIC1_SPI_MS_CTRL 5
+#define CLK_MOUT_PERIC1_UART_BT 6
+#define CLK_MOUT_PERIC1_USI07 7
+#define CLK_MOUT_PERIC1_USI07_SPI_I2C 8
+#define CLK_MOUT_PERIC1_USI08 9
+#define CLK_MOUT_PERIC1_USI08_SPI_I2C 10
+#define CLK_MOUT_PERIC1_USI09 11
+#define CLK_MOUT_PERIC1_USI10 12
+
+#define CLK_DOUT_PERIC1_I2C 13
+#define CLK_DOUT_PERIC1_SPI_MS_CTRL 14
+#define CLK_DOUT_PERIC1_UART_BT 15
+#define CLK_DOUT_PERIC1_USI07 16
+#define CLK_DOUT_PERIC1_USI07_SPI_I2C 17
+#define CLK_DOUT_PERIC1_USI08 18
+#define CLK_DOUT_PERIC1_USI08_SPI_I2C 19
+#define CLK_DOUT_PERIC1_USI09 20
+#define CLK_DOUT_PERIC1_USI10 21
+
+/* CMU_PERIC2 */
+#define CLK_MOUT_PERIC2_IP0_USER 1
+#define CLK_MOUT_PERIC2_IP1_USER 2
+#define CLK_MOUT_PERIC2_NOC_USER 3
+#define CLK_MOUT_PERIC2_I2C 4
+#define CLK_MOUT_PERIC2_SPI_MS_CTRL 5
+#define CLK_MOUT_PERIC2_UART_DBG 6
+#define CLK_MOUT_PERIC2_USI00 7
+#define CLK_MOUT_PERIC2_USI00_SPI_I2C 8
+#define CLK_MOUT_PERIC2_USI01 9
+#define CLK_MOUT_PERIC2_USI01_SPI_I2C 10
+#define CLK_MOUT_PERIC2_USI02 11
+#define CLK_MOUT_PERIC2_USI03 12
+#define CLK_MOUT_PERIC2_USI05 13
+#define CLK_MOUT_PERIC2_USI06 14
+#define CLK_MOUT_PERIC2_USI11 15
+
+#define CLK_DOUT_PERIC2_I2C 16
+#define CLK_DOUT_PERIC2_SPI_MS_CTRL 17
+#define CLK_DOUT_PERIC2_UART_DBG 18
+#define CLK_DOUT_PERIC2_USI00 19
+#define CLK_DOUT_PERIC2_USI00_SPI_I2C 20
+#define CLK_DOUT_PERIC2_USI01 21
+#define CLK_DOUT_PERIC2_USI01_SPI_I2C 22
+#define CLK_DOUT_PERIC2_USI02 23
+#define CLK_DOUT_PERIC2_USI03 24
+#define CLK_DOUT_PERIC2_USI05 25
+#define CLK_DOUT_PERIC2_USI06 26
+#define CLK_DOUT_PERIC2_USI11 27
+
+/* CMU_UFS */
+#define CLK_MOUT_UFS_MMC_CARD_USER 1
+#define CLK_MOUT_UFS_NOC_USER 2
+#define CLK_MOUT_UFS_UFS_EMBD_USER 3
+
+/* CMU_VTS */
+#define CLK_MOUT_CLKALIVE_VTS_NOC_USER 1
+#define CLK_MOUT_CLKALIVE_VTS_RCO_USER 2
+#define CLK_MOUT_CLKCMU_VTS_DMIC_USER 3
+#define CLK_MOUT_CLKVTS_AUD_DMIC1 4
+#define CLK_MOUT_CLKVTS_NOC 5
+#define CLK_MOUT_CLKVTS_DMIC_PAD 6
+
+#define CLK_DOUT_CLKVTS_AUD_DMIC0 7
+#define CLK_DOUT_CLKVTS_AUD_DMIC1 8
+#define CLK_DOUT_CLKVTS_CPU 9
+#define CLK_DOUT_CLKVTS_DMIC_IF 10
+#define CLK_DOUT_CLKVTS_DMIC_IF_DIV2 11
+#define CLK_DOUT_CLKVTS_NOC 12
+#define CLK_DOUT_CLKVTS_SERIAL_LIF 13
+#define CLK_DOUT_CLKVTS_SERIAL_LIF_CORE 14
+
+#endif
diff --git a/include/dt-bindings/clock/samsung,exynos7870-cmu.h b/include/dt-bindings/clock/samsung,exynos7870-cmu.h
new file mode 100644
index 000000000000..57d04bbe342d
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,exynos7870-cmu.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2015 Samsung Electronics Co., Ltd.
+ * Author: Kaustabh Chakraborty <kauschluss@disroot.org>
+ *
+ * Device Tree binding constants for Exynos7870 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS7870_H
+#define _DT_BINDINGS_CLOCK_EXYNOS7870_H
+
+/* CMU_MIF */
+#define CLK_DOUT_MIF_APB 1
+#define CLK_DOUT_MIF_BUSD 2
+#define CLK_DOUT_MIF_CMU_DISPAUD_BUS 3
+#define CLK_DOUT_MIF_CMU_DISPAUD_DECON_ECLK 4
+#define CLK_DOUT_MIF_CMU_DISPAUD_DECON_VCLK 5
+#define CLK_DOUT_MIF_CMU_FSYS_BUS 6
+#define CLK_DOUT_MIF_CMU_FSYS_MMC0 7
+#define CLK_DOUT_MIF_CMU_FSYS_MMC1 8
+#define CLK_DOUT_MIF_CMU_FSYS_MMC2 9
+#define CLK_DOUT_MIF_CMU_FSYS_USB20DRD_REFCLK 10
+#define CLK_DOUT_MIF_CMU_G3D_SWITCH 11
+#define CLK_DOUT_MIF_CMU_ISP_CAM 12
+#define CLK_DOUT_MIF_CMU_ISP_ISP 13
+#define CLK_DOUT_MIF_CMU_ISP_SENSOR0 14
+#define CLK_DOUT_MIF_CMU_ISP_SENSOR1 15
+#define CLK_DOUT_MIF_CMU_ISP_SENSOR2 16
+#define CLK_DOUT_MIF_CMU_ISP_VRA 17
+#define CLK_DOUT_MIF_CMU_MFCMSCL_MFC 18
+#define CLK_DOUT_MIF_CMU_MFCMSCL_MSCL 19
+#define CLK_DOUT_MIF_CMU_PERI_BUS 20
+#define CLK_DOUT_MIF_CMU_PERI_SPI0 21
+#define CLK_DOUT_MIF_CMU_PERI_SPI1 22
+#define CLK_DOUT_MIF_CMU_PERI_SPI2 23
+#define CLK_DOUT_MIF_CMU_PERI_SPI3 24
+#define CLK_DOUT_MIF_CMU_PERI_SPI4 25
+#define CLK_DOUT_MIF_CMU_PERI_UART0 26
+#define CLK_DOUT_MIF_CMU_PERI_UART1 27
+#define CLK_DOUT_MIF_CMU_PERI_UART2 28
+#define CLK_DOUT_MIF_HSI2C 29
+#define CLK_FOUT_MIF_BUS_PLL 30
+#define CLK_FOUT_MIF_MEDIA_PLL 31
+#define CLK_FOUT_MIF_MEM_PLL 32
+#define CLK_GOUT_MIF_CMU_DISPAUD_BUS 33
+#define CLK_GOUT_MIF_CMU_DISPAUD_DECON_ECLK 34
+#define CLK_GOUT_MIF_CMU_DISPAUD_DECON_VCLK 35
+#define CLK_GOUT_MIF_CMU_FSYS_BUS 36
+#define CLK_GOUT_MIF_CMU_FSYS_MMC0 37
+#define CLK_GOUT_MIF_CMU_FSYS_MMC1 38
+#define CLK_GOUT_MIF_CMU_FSYS_MMC2 39
+#define CLK_GOUT_MIF_CMU_FSYS_USB20DRD_REFCLK 40
+#define CLK_GOUT_MIF_CMU_G3D_SWITCH 41
+#define CLK_GOUT_MIF_CMU_ISP_CAM 42
+#define CLK_GOUT_MIF_CMU_ISP_ISP 43
+#define CLK_GOUT_MIF_CMU_ISP_SENSOR0 44
+#define CLK_GOUT_MIF_CMU_ISP_SENSOR1 45
+#define CLK_GOUT_MIF_CMU_ISP_SENSOR2 46
+#define CLK_GOUT_MIF_CMU_ISP_VRA 47
+#define CLK_GOUT_MIF_CMU_MFCMSCL_MFC 48
+#define CLK_GOUT_MIF_CMU_MFCMSCL_MSCL 49
+#define CLK_GOUT_MIF_CMU_PERI_BUS 50
+#define CLK_GOUT_MIF_CMU_PERI_SPI0 51
+#define CLK_GOUT_MIF_CMU_PERI_SPI1 52
+#define CLK_GOUT_MIF_CMU_PERI_SPI2 53
+#define CLK_GOUT_MIF_CMU_PERI_SPI3 54
+#define CLK_GOUT_MIF_CMU_PERI_SPI4 55
+#define CLK_GOUT_MIF_CMU_PERI_UART0 56
+#define CLK_GOUT_MIF_CMU_PERI_UART1 57
+#define CLK_GOUT_MIF_CMU_PERI_UART2 58
+#define CLK_GOUT_MIF_CP_PCLK_HSI2C 59
+#define CLK_GOUT_MIF_CP_PCLK_HSI2C_BAT_0 60
+#define CLK_GOUT_MIF_CP_PCLK_HSI2C_BAT_1 61
+#define CLK_GOUT_MIF_HSI2C_AP_PCLKM 62
+#define CLK_GOUT_MIF_HSI2C_AP_PCLKS 63
+#define CLK_GOUT_MIF_HSI2C_CP_PCLKM 64
+#define CLK_GOUT_MIF_HSI2C_CP_PCLKS 65
+#define CLK_GOUT_MIF_HSI2C_IPCLK 66
+#define CLK_GOUT_MIF_HSI2C_ITCLK 67
+#define CLK_GOUT_MIF_MUX_BUSD 68
+#define CLK_GOUT_MIF_MUX_BUS_PLL 69
+#define CLK_GOUT_MIF_MUX_BUS_PLL_CON 70
+#define CLK_GOUT_MIF_MUX_CMU_DISPAUD_BUS 71
+#define CLK_GOUT_MIF_MUX_CMU_DISPAUD_DECON_ECLK 72
+#define CLK_GOUT_MIF_MUX_CMU_DISPAUD_DECON_VCLK 73
+#define CLK_GOUT_MIF_MUX_CMU_FSYS_BUS 74
+#define CLK_GOUT_MIF_MUX_CMU_FSYS_MMC0 75
+#define CLK_GOUT_MIF_MUX_CMU_FSYS_MMC1 76
+#define CLK_GOUT_MIF_MUX_CMU_FSYS_MMC2 77
+#define CLK_GOUT_MIF_MUX_CMU_FSYS_USB20DRD_REFCLK 78
+#define CLK_GOUT_MIF_MUX_CMU_ISP_CAM 79
+#define CLK_GOUT_MIF_MUX_CMU_ISP_ISP 80
+#define CLK_GOUT_MIF_MUX_CMU_ISP_SENSOR0 81
+#define CLK_GOUT_MIF_MUX_CMU_ISP_SENSOR1 82
+#define CLK_GOUT_MIF_MUX_CMU_ISP_SENSOR2 83
+#define CLK_GOUT_MIF_MUX_CMU_ISP_VRA 84
+#define CLK_GOUT_MIF_MUX_CMU_MFCMSCL_MFC 85
+#define CLK_GOUT_MIF_MUX_CMU_MFCMSCL_MSCL 86
+#define CLK_GOUT_MIF_MUX_CMU_PERI_BUS 87
+#define CLK_GOUT_MIF_MUX_CMU_PERI_SPI0 88
+#define CLK_GOUT_MIF_MUX_CMU_PERI_SPI1 89
+#define CLK_GOUT_MIF_MUX_CMU_PERI_SPI2 90
+#define CLK_GOUT_MIF_MUX_CMU_PERI_SPI3 91
+#define CLK_GOUT_MIF_MUX_CMU_PERI_SPI4 92
+#define CLK_GOUT_MIF_MUX_CMU_PERI_UART0 93
+#define CLK_GOUT_MIF_MUX_CMU_PERI_UART1 94
+#define CLK_GOUT_MIF_MUX_CMU_PERI_UART2 95
+#define CLK_GOUT_MIF_MUX_MEDIA_PLL 96
+#define CLK_GOUT_MIF_MUX_MEDIA_PLL_CON 97
+#define CLK_GOUT_MIF_MUX_MEM_PLL 98
+#define CLK_GOUT_MIF_MUX_MEM_PLL_CON 99
+#define CLK_GOUT_MIF_WRAP_ADC_IF_OSC_SYS 100
+#define CLK_GOUT_MIF_WRAP_ADC_IF_PCLK_S0 101
+#define CLK_GOUT_MIF_WRAP_ADC_IF_PCLK_S1 102
+#define CLK_MOUT_MIF_BUSD 103
+#define CLK_MOUT_MIF_CMU_DISPAUD_BUS 104
+#define CLK_MOUT_MIF_CMU_DISPAUD_DECON_ECLK 105
+#define CLK_MOUT_MIF_CMU_DISPAUD_DECON_VCLK 106
+#define CLK_MOUT_MIF_CMU_FSYS_BUS 107
+#define CLK_MOUT_MIF_CMU_FSYS_MMC0 108
+#define CLK_MOUT_MIF_CMU_FSYS_MMC1 109
+#define CLK_MOUT_MIF_CMU_FSYS_MMC2 110
+#define CLK_MOUT_MIF_CMU_FSYS_USB20DRD_REFCLK 111
+#define CLK_MOUT_MIF_CMU_ISP_CAM 112
+#define CLK_MOUT_MIF_CMU_ISP_ISP 113
+#define CLK_MOUT_MIF_CMU_ISP_SENSOR0 114
+#define CLK_MOUT_MIF_CMU_ISP_SENSOR1 115
+#define CLK_MOUT_MIF_CMU_ISP_SENSOR2 116
+#define CLK_MOUT_MIF_CMU_ISP_VRA 117
+#define CLK_MOUT_MIF_CMU_MFCMSCL_MFC 118
+#define CLK_MOUT_MIF_CMU_MFCMSCL_MSCL 119
+#define CLK_MOUT_MIF_CMU_PERI_BUS 120
+#define CLK_MOUT_MIF_CMU_PERI_SPI0 121
+#define CLK_MOUT_MIF_CMU_PERI_SPI1 122
+#define CLK_MOUT_MIF_CMU_PERI_SPI2 123
+#define CLK_MOUT_MIF_CMU_PERI_SPI3 124
+#define CLK_MOUT_MIF_CMU_PERI_SPI4 125
+#define CLK_MOUT_MIF_CMU_PERI_UART0 126
+#define CLK_MOUT_MIF_CMU_PERI_UART1 127
+#define CLK_MOUT_MIF_CMU_PERI_UART2 128
+#define MIF_NR_CLK 129
+
+/* CMU_DISPAUD */
+#define CLK_DOUT_DISPAUD_APB 1
+#define CLK_DOUT_DISPAUD_DECON_ECLK 2
+#define CLK_DOUT_DISPAUD_DECON_VCLK 3
+#define CLK_DOUT_DISPAUD_MI2S 4
+#define CLK_DOUT_DISPAUD_MIXER 5
+#define CLK_FOUT_DISPAUD_AUD_PLL 6
+#define CLK_FOUT_DISPAUD_PLL 7
+#define CLK_GOUT_DISPAUD_APB_AUD 8
+#define CLK_GOUT_DISPAUD_APB_AUD_AMP 9
+#define CLK_GOUT_DISPAUD_APB_DISP 10
+#define CLK_GOUT_DISPAUD_BUS 11
+#define CLK_GOUT_DISPAUD_BUS_DISP 12
+#define CLK_GOUT_DISPAUD_BUS_PPMU 13
+#define CLK_GOUT_DISPAUD_CON_AUD_I2S_BCLK_BT_IN 14
+#define CLK_GOUT_DISPAUD_CON_AUD_I2S_BCLK_FM_IN 15
+#define CLK_GOUT_DISPAUD_CON_CP2AUD_BCK 16
+#define CLK_GOUT_DISPAUD_CON_EXT2AUD_BCK_GPIO_I2S 17
+#define CLK_GOUT_DISPAUD_DECON_ECLK 18
+#define CLK_GOUT_DISPAUD_DECON_VCLK 19
+#define CLK_GOUT_DISPAUD_MI2S_AMP_I2SCODCLKI 20
+#define CLK_GOUT_DISPAUD_MI2S_AUD_I2SCODCLKI 21
+#define CLK_GOUT_DISPAUD_MIXER_AUD_SYSCLK 22
+#define CLK_GOUT_DISPAUD_MUX_AUD_PLL 23
+#define CLK_GOUT_DISPAUD_MUX_AUD_PLL_CON 24
+#define CLK_GOUT_DISPAUD_MUX_BUS_USER 25
+#define CLK_GOUT_DISPAUD_MUX_DECON_ECLK 26
+#define CLK_GOUT_DISPAUD_MUX_DECON_ECLK_USER 27
+#define CLK_GOUT_DISPAUD_MUX_DECON_VCLK 28
+#define CLK_GOUT_DISPAUD_MUX_DECON_VCLK_USER 29
+#define CLK_GOUT_DISPAUD_MUX_MI2S 30
+#define CLK_GOUT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER 31
+#define CLK_GOUT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER_CON 32
+#define CLK_GOUT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER 33
+#define CLK_GOUT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER_CON 34
+#define CLK_GOUT_DISPAUD_MUX_PLL 35
+#define CLK_GOUT_DISPAUD_MUX_PLL_CON 36
+#define CLK_MOUT_DISPAUD_BUS_USER 37
+#define CLK_MOUT_DISPAUD_DECON_ECLK 38
+#define CLK_MOUT_DISPAUD_DECON_ECLK_USER 39
+#define CLK_MOUT_DISPAUD_DECON_VCLK 40
+#define CLK_MOUT_DISPAUD_DECON_VCLK_USER 41
+#define CLK_MOUT_DISPAUD_MI2S 42
+#define DISPAUD_NR_CLK 43
+
+/* CMU_FSYS */
+#define CLK_FOUT_FSYS_USB_PLL 1
+#define CLK_GOUT_FSYS_BUSP3_HCLK 2
+#define CLK_GOUT_FSYS_MMC0_ACLK 3
+#define CLK_GOUT_FSYS_MMC1_ACLK 4
+#define CLK_GOUT_FSYS_MMC2_ACLK 5
+#define CLK_GOUT_FSYS_MUX_USB20DRD_PHYCLOCK_USER 6
+#define CLK_GOUT_FSYS_MUX_USB20DRD_PHYCLOCK_USER_CON 7
+#define CLK_GOUT_FSYS_MUX_USB_PLL 8
+#define CLK_GOUT_FSYS_MUX_USB_PLL_CON 9
+#define CLK_GOUT_FSYS_PDMA0_ACLK_PDMA0 10
+#define CLK_GOUT_FSYS_PPMU_ACLK 11
+#define CLK_GOUT_FSYS_PPMU_PCLK 12
+#define CLK_GOUT_FSYS_SROMC_HCLK 13
+#define CLK_GOUT_FSYS_UPSIZER_BUS1_ACLK 14
+#define CLK_GOUT_FSYS_USB20DRD_ACLK_HSDRD 15
+#define CLK_GOUT_FSYS_USB20DRD_HCLK_USB20_CTRL 16
+#define CLK_GOUT_FSYS_USB20DRD_HSDRD_REF_CLK 17
+#define FSYS_NR_CLK 18
+
+/* CMU_G3D */
+#define CLK_DOUT_G3D_APB 1
+#define CLK_DOUT_G3D_BUS 2
+#define CLK_FOUT_G3D_PLL 3
+#define CLK_GOUT_G3D_ASYNCS_D0_CLK 4
+#define CLK_GOUT_G3D_ASYNC_PCLKM 5
+#define CLK_GOUT_G3D_CLK 6
+#define CLK_GOUT_G3D_MUX 7
+#define CLK_GOUT_G3D_MUX_PLL 8
+#define CLK_GOUT_G3D_MUX_PLL_CON 9
+#define CLK_GOUT_G3D_MUX_SWITCH_USER 10
+#define CLK_GOUT_G3D_PPMU_ACLK 11
+#define CLK_GOUT_G3D_PPMU_PCLK 12
+#define CLK_GOUT_G3D_QE_ACLK 13
+#define CLK_GOUT_G3D_QE_PCLK 14
+#define CLK_GOUT_G3D_SYSREG_PCLK 15
+#define CLK_MOUT_G3D 16
+#define CLK_MOUT_G3D_SWITCH_USER 17
+#define G3D_NR_CLK 18
+
+/* CMU_ISP */
+#define CLK_DOUT_ISP_APB 1
+#define CLK_DOUT_ISP_CAM_HALF 2
+#define CLK_FOUT_ISP_PLL 3
+#define CLK_GOUT_ISP_CAM 4
+#define CLK_GOUT_ISP_CAM_HALF 5
+#define CLK_GOUT_ISP_ISPD 6
+#define CLK_GOUT_ISP_ISPD_PPMU 7
+#define CLK_GOUT_ISP_MUX_CAM 8
+#define CLK_GOUT_ISP_MUX_CAM_USER 9
+#define CLK_GOUT_ISP_MUX_ISP 10
+#define CLK_GOUT_ISP_MUX_ISPD 11
+#define CLK_GOUT_ISP_MUX_PLL 12
+#define CLK_GOUT_ISP_MUX_PLL_CON 13
+#define CLK_GOUT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER 14
+#define CLK_GOUT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER_CON 15
+#define CLK_GOUT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER 16
+#define CLK_GOUT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER_CON 17
+#define CLK_GOUT_ISP_MUX_USER 18
+#define CLK_GOUT_ISP_MUX_VRA 19
+#define CLK_GOUT_ISP_MUX_VRA_USER 20
+#define CLK_GOUT_ISP_VRA 21
+#define CLK_MOUT_ISP_CAM 22
+#define CLK_MOUT_ISP_CAM_USER 23
+#define CLK_MOUT_ISP_ISP 24
+#define CLK_MOUT_ISP_ISPD 25
+#define CLK_MOUT_ISP_USER 26
+#define CLK_MOUT_ISP_VRA 27
+#define CLK_MOUT_ISP_VRA_USER 28
+#define ISP_NR_CLK 29
+
+/* CMU_MFCMSCL */
+#define CLK_DOUT_MFCMSCL_APB 1
+#define CLK_GOUT_MFCMSCL_MFC 2
+#define CLK_GOUT_MFCMSCL_MSCL 3
+#define CLK_GOUT_MFCMSCL_MSCL_BI 4
+#define CLK_GOUT_MFCMSCL_MSCL_D 5
+#define CLK_GOUT_MFCMSCL_MSCL_JPEG 6
+#define CLK_GOUT_MFCMSCL_MSCL_POLY 7
+#define CLK_GOUT_MFCMSCL_MSCL_PPMU 8
+#define CLK_GOUT_MFCMSCL_MUX_MFC_USER 9
+#define CLK_GOUT_MFCMSCL_MUX_MSCL_USER 10
+#define CLK_MOUT_MFCMSCL_MFC_USER 11
+#define CLK_MOUT_MFCMSCL_MSCL_USER 12
+#define MFCMSCL_NR_CLK 13
+
+/* CMU_PERI */
+#define CLK_GOUT_PERI_BUSP1_PERIC0_HCLK 1
+#define CLK_GOUT_PERI_GPIO2_PCLK 2
+#define CLK_GOUT_PERI_GPIO5_PCLK 3
+#define CLK_GOUT_PERI_GPIO6_PCLK 4
+#define CLK_GOUT_PERI_GPIO7_PCLK 5
+#define CLK_GOUT_PERI_HSI2C1_IPCLK 6
+#define CLK_GOUT_PERI_HSI2C2_IPCLK 7
+#define CLK_GOUT_PERI_HSI2C3_IPCLK 8
+#define CLK_GOUT_PERI_HSI2C4_IPCLK 9
+#define CLK_GOUT_PERI_HSI2C5_IPCLK 10
+#define CLK_GOUT_PERI_HSI2C6_IPCLK 11
+#define CLK_GOUT_PERI_I2C0_PCLK 12
+#define CLK_GOUT_PERI_I2C1_PCLK 13
+#define CLK_GOUT_PERI_I2C2_PCLK 14
+#define CLK_GOUT_PERI_I2C3_PCLK 15
+#define CLK_GOUT_PERI_I2C4_PCLK 16
+#define CLK_GOUT_PERI_I2C5_PCLK 17
+#define CLK_GOUT_PERI_I2C6_PCLK 18
+#define CLK_GOUT_PERI_I2C7_PCLK 19
+#define CLK_GOUT_PERI_I2C8_PCLK 20
+#define CLK_GOUT_PERI_MCT_PCLK 21
+#define CLK_GOUT_PERI_PWM_MOTOR_OSCCLK 22
+#define CLK_GOUT_PERI_PWM_MOTOR_PCLK_S0 23
+#define CLK_GOUT_PERI_SFRIF_TMU_CPUCL0_PCLK 24
+#define CLK_GOUT_PERI_SFRIF_TMU_CPUCL1_PCLK 25
+#define CLK_GOUT_PERI_SFRIF_TMU_PCLK 26
+#define CLK_GOUT_PERI_SPI0_PCLK 27
+#define CLK_GOUT_PERI_SPI0_SPI_EXT_CLK 28
+#define CLK_GOUT_PERI_SPI1_PCLK 29
+#define CLK_GOUT_PERI_SPI1_SPI_EXT_CLK 30
+#define CLK_GOUT_PERI_SPI2_PCLK 31
+#define CLK_GOUT_PERI_SPI2_SPI_EXT_CLK 32
+#define CLK_GOUT_PERI_SPI3_PCLK 33
+#define CLK_GOUT_PERI_SPI3_SPI_EXT_CLK 34
+#define CLK_GOUT_PERI_SPI4_PCLK 35
+#define CLK_GOUT_PERI_SPI4_SPI_EXT_CLK 36
+#define CLK_GOUT_PERI_TMU_CLK 37
+#define CLK_GOUT_PERI_TMU_CPUCL0_CLK 38
+#define CLK_GOUT_PERI_TMU_CPUCL1_CLK 39
+#define CLK_GOUT_PERI_UART0_EXT_UCLK 40
+#define CLK_GOUT_PERI_UART0_PCLK 41
+#define CLK_GOUT_PERI_UART1_EXT_UCLK 42
+#define CLK_GOUT_PERI_UART1_PCLK 43
+#define CLK_GOUT_PERI_UART2_EXT_UCLK 44
+#define CLK_GOUT_PERI_UART2_PCLK 45
+#define CLK_GOUT_PERI_WDT_CPUCL0_PCLK 46
+#define CLK_GOUT_PERI_WDT_CPUCL1_PCLK 47
+#define PERI_NR_CLK 48
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS7870_H */
diff --git a/include/dt-bindings/clock/samsung,exynos8895.h b/include/dt-bindings/clock/samsung,exynos8895.h
new file mode 100644
index 000000000000..27998c53f929
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,exynos8895.h
@@ -0,0 +1,453 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2024 Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ * Author: Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ *
+ * Device Tree binding constants for Exynos8895 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS8895_H
+#define _DT_BINDINGS_CLOCK_EXYNOS8895_H
+
+/* CMU_TOP */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_FOUT_SHARED2_PLL 3
+#define CLK_FOUT_SHARED3_PLL 4
+#define CLK_FOUT_SHARED4_PLL 5
+#define CLK_MOUT_PLL_SHARED0 6
+#define CLK_MOUT_PLL_SHARED1 7
+#define CLK_MOUT_PLL_SHARED2 8
+#define CLK_MOUT_PLL_SHARED3 9
+#define CLK_MOUT_PLL_SHARED4 10
+#define CLK_MOUT_CP2AP_MIF_CLK_USER 11
+#define CLK_MOUT_CMU_ABOX_CPUABOX 12
+#define CLK_MOUT_CMU_APM_BUS 13
+#define CLK_MOUT_CMU_BUS1_BUS 14
+#define CLK_MOUT_CMU_BUSC_BUS 15
+#define CLK_MOUT_CMU_BUSC_BUSPHSI2C 16
+#define CLK_MOUT_CMU_CAM_BUS 17
+#define CLK_MOUT_CMU_CAM_TPU0 18
+#define CLK_MOUT_CMU_CAM_TPU1 19
+#define CLK_MOUT_CMU_CAM_VRA 20
+#define CLK_MOUT_CMU_CIS_CLK0 21
+#define CLK_MOUT_CMU_CIS_CLK1 22
+#define CLK_MOUT_CMU_CIS_CLK2 23
+#define CLK_MOUT_CMU_CIS_CLK3 24
+#define CLK_MOUT_CMU_CORE_BUS 25
+#define CLK_MOUT_CMU_CPUCL0_SWITCH 26
+#define CLK_MOUT_CMU_CPUCL1_SWITCH 27
+#define CLK_MOUT_CMU_DBG_BUS 28
+#define CLK_MOUT_CMU_DCAM_BUS 29
+#define CLK_MOUT_CMU_DCAM_IMGD 30
+#define CLK_MOUT_CMU_DPU_BUS 31
+#define CLK_MOUT_CMU_DROOPDETECTOR 32
+#define CLK_MOUT_CMU_DSP_BUS 33
+#define CLK_MOUT_CMU_FSYS0_BUS 34
+#define CLK_MOUT_CMU_FSYS0_DPGTC 35
+#define CLK_MOUT_CMU_FSYS0_MMC_EMBD 36
+#define CLK_MOUT_CMU_FSYS0_UFS_EMBD 37
+#define CLK_MOUT_CMU_FSYS0_USBDRD30 38
+#define CLK_MOUT_CMU_FSYS1_BUS 39
+#define CLK_MOUT_CMU_FSYS1_MMC_CARD 40
+#define CLK_MOUT_CMU_FSYS1_PCIE 41
+#define CLK_MOUT_CMU_FSYS1_UFS_CARD 42
+#define CLK_MOUT_CMU_G2D_G2D 43
+#define CLK_MOUT_CMU_G2D_JPEG 44
+#define CLK_MOUT_CMU_HPM 45
+#define CLK_MOUT_CMU_IMEM_BUS 46
+#define CLK_MOUT_CMU_ISPHQ_BUS 47
+#define CLK_MOUT_CMU_ISPLP_BUS 48
+#define CLK_MOUT_CMU_IVA_BUS 49
+#define CLK_MOUT_CMU_MFC_BUS 50
+#define CLK_MOUT_CMU_MIF_SWITCH 51
+#define CLK_MOUT_CMU_PERIC0_BUS 52
+#define CLK_MOUT_CMU_PERIC0_UART_DBG 53
+#define CLK_MOUT_CMU_PERIC0_USI00 54
+#define CLK_MOUT_CMU_PERIC0_USI01 55
+#define CLK_MOUT_CMU_PERIC0_USI02 56
+#define CLK_MOUT_CMU_PERIC0_USI03 57
+#define CLK_MOUT_CMU_PERIC1_BUS 58
+#define CLK_MOUT_CMU_PERIC1_SPEEDY2 59
+#define CLK_MOUT_CMU_PERIC1_SPI_CAM0 60
+#define CLK_MOUT_CMU_PERIC1_SPI_CAM1 61
+#define CLK_MOUT_CMU_PERIC1_UART_BT 62
+#define CLK_MOUT_CMU_PERIC1_USI04 63
+#define CLK_MOUT_CMU_PERIC1_USI05 64
+#define CLK_MOUT_CMU_PERIC1_USI06 65
+#define CLK_MOUT_CMU_PERIC1_USI07 66
+#define CLK_MOUT_CMU_PERIC1_USI08 67
+#define CLK_MOUT_CMU_PERIC1_USI09 68
+#define CLK_MOUT_CMU_PERIC1_USI10 69
+#define CLK_MOUT_CMU_PERIC1_USI11 70
+#define CLK_MOUT_CMU_PERIC1_USI12 71
+#define CLK_MOUT_CMU_PERIC1_USI13 72
+#define CLK_MOUT_CMU_PERIS_BUS 73
+#define CLK_MOUT_CMU_SRDZ_BUS 74
+#define CLK_MOUT_CMU_SRDZ_IMGD 75
+#define CLK_MOUT_CMU_VPU_BUS 76
+#define CLK_DOUT_CMU_ABOX_CPUABOX 77
+#define CLK_DOUT_CMU_APM_BUS 78
+#define CLK_DOUT_CMU_BUS1_BUS 79
+#define CLK_DOUT_CMU_BUSC_BUS 80
+#define CLK_DOUT_CMU_BUSC_BUSPHSI2C 81
+#define CLK_DOUT_CMU_CAM_BUS 82
+#define CLK_DOUT_CMU_CAM_TPU0 83
+#define CLK_DOUT_CMU_CAM_TPU1 84
+#define CLK_DOUT_CMU_CAM_VRA 85
+#define CLK_DOUT_CMU_CIS_CLK0 86
+#define CLK_DOUT_CMU_CIS_CLK1 87
+#define CLK_DOUT_CMU_CIS_CLK2 88
+#define CLK_DOUT_CMU_CIS_CLK3 89
+#define CLK_DOUT_CMU_CORE_BUS 90
+#define CLK_DOUT_CMU_CPUCL0_SWITCH 91
+#define CLK_DOUT_CMU_CPUCL1_SWITCH 92
+#define CLK_DOUT_CMU_DBG_BUS 93
+#define CLK_DOUT_CMU_DCAM_BUS 94
+#define CLK_DOUT_CMU_DCAM_IMGD 95
+#define CLK_DOUT_CMU_DPU_BUS 96
+#define CLK_DOUT_CMU_DSP_BUS 97
+#define CLK_DOUT_CMU_FSYS0_BUS 98
+#define CLK_DOUT_CMU_FSYS0_DPGTC 99
+#define CLK_DOUT_CMU_FSYS0_MMC_EMBD 100
+#define CLK_DOUT_CMU_FSYS0_UFS_EMBD 101
+#define CLK_DOUT_CMU_FSYS0_USBDRD30 102
+#define CLK_DOUT_CMU_FSYS1_BUS 103
+#define CLK_DOUT_CMU_FSYS1_MMC_CARD 104
+#define CLK_DOUT_CMU_FSYS1_UFS_CARD 105
+#define CLK_DOUT_CMU_G2D_G2D 106
+#define CLK_DOUT_CMU_G2D_JPEG 107
+#define CLK_DOUT_CMU_G3D_SWITCH 108
+#define CLK_DOUT_CMU_HPM 109
+#define CLK_DOUT_CMU_IMEM_BUS 110
+#define CLK_DOUT_CMU_ISPHQ_BUS 111
+#define CLK_DOUT_CMU_ISPLP_BUS 112
+#define CLK_DOUT_CMU_IVA_BUS 113
+#define CLK_DOUT_CMU_MFC_BUS 114
+#define CLK_DOUT_CMU_MODEM_SHARED0 115
+#define CLK_DOUT_CMU_MODEM_SHARED1 116
+#define CLK_DOUT_CMU_PERIC0_BUS 117
+#define CLK_DOUT_CMU_PERIC0_UART_DBG 118
+#define CLK_DOUT_CMU_PERIC0_USI00 119
+#define CLK_DOUT_CMU_PERIC0_USI01 120
+#define CLK_DOUT_CMU_PERIC0_USI02 121
+#define CLK_DOUT_CMU_PERIC0_USI03 122
+#define CLK_DOUT_CMU_PERIC1_BUS 123
+#define CLK_DOUT_CMU_PERIC1_SPEEDY2 124
+#define CLK_DOUT_CMU_PERIC1_SPI_CAM0 125
+#define CLK_DOUT_CMU_PERIC1_SPI_CAM1 126
+#define CLK_DOUT_CMU_PERIC1_UART_BT 127
+#define CLK_DOUT_CMU_PERIC1_USI04 128
+#define CLK_DOUT_CMU_PERIC1_USI05 129
+#define CLK_DOUT_CMU_PERIC1_USI06 130
+#define CLK_DOUT_CMU_PERIC1_USI07 131
+#define CLK_DOUT_CMU_PERIC1_USI08 132
+#define CLK_DOUT_CMU_PERIC1_USI09 133
+#define CLK_DOUT_CMU_PERIC1_USI10 134
+#define CLK_DOUT_CMU_PERIC1_USI11 135
+#define CLK_DOUT_CMU_PERIC1_USI12 136
+#define CLK_DOUT_CMU_PERIC1_USI13 137
+#define CLK_DOUT_CMU_PERIS_BUS 138
+#define CLK_DOUT_CMU_SRDZ_BUS 139
+#define CLK_DOUT_CMU_SRDZ_IMGD 140
+#define CLK_DOUT_CMU_VPU_BUS 141
+#define CLK_DOUT_CMU_SHARED0_DIV2 142
+#define CLK_DOUT_CMU_SHARED0_DIV4 143
+#define CLK_DOUT_CMU_SHARED1_DIV2 144
+#define CLK_DOUT_CMU_SHARED1_DIV4 145
+#define CLK_DOUT_CMU_SHARED2_DIV2 146
+#define CLK_DOUT_CMU_SHARED3_DIV2 147
+#define CLK_DOUT_CMU_SHARED4_DIV2 148
+#define CLK_DOUT_CMU_FSYS1_PCIE 149
+#define CLK_DOUT_CMU_CP2AP_MIF_CLK_DIV2 150
+#define CLK_DOUT_CMU_CMU_OTP 151
+#define CLK_GOUT_CMU_DROOPDETECTOR 152
+#define CLK_GOUT_CMU_MIF_SWITCH 153
+#define CLK_GOUT_CMU_ABOX_CPUABOX 154
+#define CLK_GOUT_CMU_APM_BUS 155
+#define CLK_GOUT_CMU_BUS1_BUS 156
+#define CLK_GOUT_CMU_BUSC_BUS 157
+#define CLK_GOUT_CMU_BUSC_BUSPHSI2C 158
+#define CLK_GOUT_CMU_CAM_BUS 159
+#define CLK_GOUT_CMU_CAM_TPU0 160
+#define CLK_GOUT_CMU_CAM_TPU1 161
+#define CLK_GOUT_CMU_CAM_VRA 162
+#define CLK_GOUT_CMU_CIS_CLK0 163
+#define CLK_GOUT_CMU_CIS_CLK1 164
+#define CLK_GOUT_CMU_CIS_CLK2 165
+#define CLK_GOUT_CMU_CIS_CLK3 166
+#define CLK_GOUT_CMU_CORE_BUS 167
+#define CLK_GOUT_CMU_CPUCL0_SWITCH 168
+#define CLK_GOUT_CMU_CPUCL1_SWITCH 169
+#define CLK_GOUT_CMU_DBG_BUS 170
+#define CLK_GOUT_CMU_DCAM_BUS 171
+#define CLK_GOUT_CMU_DCAM_IMGD 172
+#define CLK_GOUT_CMU_DPU_BUS 173
+#define CLK_GOUT_CMU_DSP_BUS 174
+#define CLK_GOUT_CMU_FSYS0_BUS 175
+#define CLK_GOUT_CMU_FSYS0_DPGTC 176
+#define CLK_GOUT_CMU_FSYS0_MMC_EMBD 177
+#define CLK_GOUT_CMU_FSYS0_UFS_EMBD 178
+#define CLK_GOUT_CMU_FSYS0_USBDRD30 179
+#define CLK_GOUT_CMU_FSYS1_BUS 180
+#define CLK_GOUT_CMU_FSYS1_MMC_CARD 181
+#define CLK_GOUT_CMU_FSYS1_PCIE 182
+#define CLK_GOUT_CMU_FSYS1_UFS_CARD 183
+#define CLK_GOUT_CMU_G2D_G2D 184
+#define CLK_GOUT_CMU_G2D_JPEG 185
+#define CLK_GOUT_CMU_G3D_SWITCH 186
+#define CLK_GOUT_CMU_HPM 187
+#define CLK_GOUT_CMU_IMEM_BUS 188
+#define CLK_GOUT_CMU_ISPHQ_BUS 189
+#define CLK_GOUT_CMU_ISPLP_BUS 190
+#define CLK_GOUT_CMU_IVA_BUS 191
+#define CLK_GOUT_CMU_MFC_BUS 192
+#define CLK_GOUT_CMU_MODEM_SHARED0 193
+#define CLK_GOUT_CMU_MODEM_SHARED1 194
+#define CLK_GOUT_CMU_PERIC0_BUS 195
+#define CLK_GOUT_CMU_PERIC0_UART_DBG 196
+#define CLK_GOUT_CMU_PERIC0_USI00 197
+#define CLK_GOUT_CMU_PERIC0_USI01 198
+#define CLK_GOUT_CMU_PERIC0_USI02 199
+#define CLK_GOUT_CMU_PERIC0_USI03 200
+#define CLK_GOUT_CMU_PERIC1_BUS 201
+#define CLK_GOUT_CMU_PERIC1_SPEEDY2 202
+#define CLK_GOUT_CMU_PERIC1_SPI_CAM0 203
+#define CLK_GOUT_CMU_PERIC1_SPI_CAM1 204
+#define CLK_GOUT_CMU_PERIC1_UART_BT 205
+#define CLK_GOUT_CMU_PERIC1_USI04 206
+#define CLK_GOUT_CMU_PERIC1_USI05 207
+#define CLK_GOUT_CMU_PERIC1_USI06 208
+#define CLK_GOUT_CMU_PERIC1_USI07 209
+#define CLK_GOUT_CMU_PERIC1_USI08 210
+#define CLK_GOUT_CMU_PERIC1_USI09 211
+#define CLK_GOUT_CMU_PERIC1_USI10 212
+#define CLK_GOUT_CMU_PERIC1_USI11 213
+#define CLK_GOUT_CMU_PERIC1_USI12 214
+#define CLK_GOUT_CMU_PERIC1_USI13 215
+#define CLK_GOUT_CMU_PERIS_BUS 216
+#define CLK_GOUT_CMU_SRDZ_BUS 217
+#define CLK_GOUT_CMU_SRDZ_IMGD 218
+#define CLK_GOUT_CMU_VPU_BUS 219
+
+/* CMU_PERIS */
+#define CLK_MOUT_PERIS_BUS_USER 1
+#define CLK_MOUT_PERIS_GIC 2
+#define CLK_GOUT_PERIS_CMU_PERIS_PCLK 3
+#define CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKM 4
+#define CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKS 5
+#define CLK_GOUT_PERIS_AXI2APB_PERISP0_ACLK 6
+#define CLK_GOUT_PERIS_AXI2APB_PERISP1_ACLK 7
+#define CLK_GOUT_PERIS_BUSIF_TMU_PCLK 8
+#define CLK_GOUT_PERIS_GIC_CLK 9
+#define CLK_GOUT_PERIS_LHM_AXI_P_PERIS_I_CLK 10
+#define CLK_GOUT_PERIS_MCT_PCLK 11
+#define CLK_GOUT_PERIS_OTP_CON_BIRA_PCLK 12
+#define CLK_GOUT_PERIS_OTP_CON_TOP_PCLK 13
+#define CLK_GOUT_PERIS_PMU_PERIS_PCLK 14
+#define CLK_GOUT_PERIS_RSTNSYNC_CLK_PERIS_BUSP_CLK 15
+#define CLK_GOUT_PERIS_RSTNSYNC_CLK_PERIS_GIC_CLK 16
+#define CLK_GOUT_PERIS_SYSREG_PERIS_PCLK 17
+#define CLK_GOUT_PERIS_TZPC00_PCLK 18
+#define CLK_GOUT_PERIS_TZPC01_PCLK 19
+#define CLK_GOUT_PERIS_TZPC02_PCLK 20
+#define CLK_GOUT_PERIS_TZPC03_PCLK 21
+#define CLK_GOUT_PERIS_TZPC04_PCLK 22
+#define CLK_GOUT_PERIS_TZPC05_PCLK 23
+#define CLK_GOUT_PERIS_TZPC06_PCLK 24
+#define CLK_GOUT_PERIS_TZPC07_PCLK 25
+#define CLK_GOUT_PERIS_TZPC08_PCLK 26
+#define CLK_GOUT_PERIS_TZPC09_PCLK 27
+#define CLK_GOUT_PERIS_TZPC10_PCLK 28
+#define CLK_GOUT_PERIS_TZPC11_PCLK 29
+#define CLK_GOUT_PERIS_TZPC12_PCLK 30
+#define CLK_GOUT_PERIS_TZPC13_PCLK 31
+#define CLK_GOUT_PERIS_TZPC14_PCLK 32
+#define CLK_GOUT_PERIS_TZPC15_PCLK 33
+#define CLK_GOUT_PERIS_WDT_CLUSTER0_PCLK 34
+#define CLK_GOUT_PERIS_WDT_CLUSTER1_PCLK 35
+#define CLK_GOUT_PERIS_XIU_P_PERIS_ACLK 36
+
+/* CMU_FSYS0 */
+#define CLK_MOUT_FSYS0_BUS_USER 1
+#define CLK_MOUT_FSYS0_DPGTC_USER 2
+#define CLK_MOUT_FSYS0_MMC_EMBD_USER 3
+#define CLK_MOUT_FSYS0_UFS_EMBD_USER 4
+#define CLK_MOUT_FSYS0_USBDRD30_USER 5
+#define CLK_GOUT_FSYS0_FSYS0_CMU_FSYS0_PCLK 6
+#define CLK_GOUT_FSYS0_AHBBR_FSYS0_HCLK 7
+#define CLK_GOUT_FSYS0_AXI2AHB_FSYS0_ACLK 8
+#define CLK_GOUT_FSYS0_AXI2AHB_USB_FSYS0_ACLK 9
+#define CLK_GOUT_FSYS0_AXI2APB_FSYS0_ACLK 10
+#define CLK_GOUT_FSYS0_BTM_FSYS0_I_ACLK 11
+#define CLK_GOUT_FSYS0_BTM_FSYS0_I_PCLK 12
+#define CLK_GOUT_FSYS0_DP_LINK_I_GTC_EXT_CLK 13
+#define CLK_GOUT_FSYS0_DP_LINK_I_PCLK 14
+#define CLK_GOUT_FSYS0_ETR_MIU_I_ACLK 15
+#define CLK_GOUT_FSYS0_ETR_MIU_I_PCLK 16
+#define CLK_GOUT_FSYS0_GPIO_FSYS0_PCLK 17
+#define CLK_GOUT_FSYS0_LHM_AXI_D_USBTV_I_CLK 18
+#define CLK_GOUT_FSYS0_LHM_AXI_G_ETR_I_CLK 19
+#define CLK_GOUT_FSYS0_LHM_AXI_P_FSYS0_I_CLK 20
+#define CLK_GOUT_FSYS0_LHS_ACEL_D_FSYS0_I_CLK 21
+#define CLK_GOUT_FSYS0_MMC_EMBD_I_ACLK 22
+#define CLK_GOUT_FSYS0_MMC_EMBD_SDCLKIN 23
+#define CLK_GOUT_FSYS0_PMU_FSYS0_PCLK 24
+#define CLK_GOUT_FSYS0_BCM_FSYS0_ACLK 25
+#define CLK_GOUT_FSYS0_BCM_FSYS0_PCLK 26
+#define CLK_GOUT_FSYS0_RSTNSYNC_CLK_FSYS0_BUS_CLK 27
+#define CLK_GOUT_FSYS0_SYSREG_FSYS0_PCLK 28
+#define CLK_GOUT_FSYS0_UFS_EMBD_I_ACLK 29
+#define CLK_GOUT_FSYS0_UFS_EMBD_I_CLK_UNIPRO 30
+#define CLK_GOUT_FSYS0_UFS_EMBD_I_FMP_CLK 31
+#define CLK_GOUT_FSYS0_USBTV_I_USB30DRD_ACLK 32
+#define CLK_GOUT_FSYS0_USBTV_I_USB30DRD_REF_CLK 33
+#define CLK_GOUT_FSYS0_USBTV_I_USB30DRD_SUSPEND_CLK 34
+#define CLK_GOUT_FSYS0_USBTV_I_USBTVH_AHB_CLK 35
+#define CLK_GOUT_FSYS0_USBTV_I_USBTVH_CORE_CLK 36
+#define CLK_GOUT_FSYS0_USBTV_I_USBTVH_XIU_CLK 37
+#define CLK_GOUT_FSYS0_US_D_FSYS0_USB_ACLK 38
+#define CLK_GOUT_FSYS0_XIU_D_FSYS0_ACLK 39
+#define CLK_GOUT_FSYS0_XIU_D_FSYS0_USB_ACLK 40
+#define CLK_GOUT_FSYS0_XIU_P_FSYS0_ACLK 41
+
+/* CMU_FSYS1 */
+#define CLK_MOUT_FSYS1_BUS_USER 1
+#define CLK_MOUT_FSYS1_MMC_CARD_USER 2
+#define CLK_MOUT_FSYS1_PCIE_USER 3
+#define CLK_MOUT_FSYS1_UFS_CARD_USER 4
+#define CLK_GOUT_FSYS1_PCIE_PHY_REF_CLK_IN 5
+#define CLK_GOUT_FSYS1_ADM_AHB_SSS_HCLKM 6
+#define CLK_GOUT_FSYS1_AHBBR_FSYS1_HCLK 7
+#define CLK_GOUT_FSYS1_AXI2AHB_FSYS1_ACLK 8
+#define CLK_GOUT_FSYS1_AXI2APB_FSYS1P0_ACLK 9
+#define CLK_GOUT_FSYS1_AXI2APB_FSYS1P1_ACLK 10
+#define CLK_GOUT_FSYS1_BTM_FSYS1_I_ACLK 11
+#define CLK_GOUT_FSYS1_BTM_FSYS1_I_PCLK 12
+#define CLK_GOUT_FSYS1_FSYS1_CMU_FSYS1_PCLK 13
+#define CLK_GOUT_FSYS1_GPIO_FSYS1_PCLK 14
+#define CLK_GOUT_FSYS1_LHM_AXI_P_FSYS1_I_CLK 15
+#define CLK_GOUT_FSYS1_LHS_ACEL_D_FSYS1_I_CLK 16
+#define CLK_GOUT_FSYS1_MMC_CARD_I_ACLK 17
+#define CLK_GOUT_FSYS1_MMC_CARD_SDCLKIN 18
+#define CLK_GOUT_FSYS1_PCIE_DBI_ACLK_0 19
+#define CLK_GOUT_FSYS1_PCIE_DBI_ACLK_1 20
+#define CLK_GOUT_FSYS1_PCIE_IEEE1500_WRAPPER_FOR_PCIE_PHY_LC_X2_INST_0_I_SCL_APB_PCLK 21
+#define CLK_GOUT_FSYS1_PCIE_MSTR_ACLK_0 22
+#define CLK_GOUT_FSYS1_PCIE_MSTR_ACLK_1 23
+#define CLK_GOUT_FSYS1_PCIE_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK 24
+#define CLK_GOUT_FSYS1_PCIE_PCIE_SUB_CTRL_INST_1_I_DRIVER_APB_CLK 25
+#define CLK_GOUT_FSYS1_PCIE_PIPE2_DIGITAL_X2_WRAP_INST_0_I_APB_PCLK_SCL 26
+#define CLK_GOUT_FSYS1_PCIE_SLV_ACLK_0 27
+#define CLK_GOUT_FSYS1_PCIE_SLV_ACLK_1 28
+#define CLK_GOUT_FSYS1_PMU_FSYS1_PCLK 29
+#define CLK_GOUT_FSYS1_BCM_FSYS1_ACLK 30
+#define CLK_GOUT_FSYS1_BCM_FSYS1_PCLK 31
+#define CLK_GOUT_FSYS1_RSTNSYNC_CLK_FSYS1_BUS_CLK 32
+#define CLK_GOUT_FSYS1_RTIC_I_ACLK 33
+#define CLK_GOUT_FSYS1_RTIC_I_PCLK 34
+#define CLK_GOUT_FSYS1_SSS_I_ACLK 35
+#define CLK_GOUT_FSYS1_SSS_I_PCLK 36
+#define CLK_GOUT_FSYS1_SYSREG_FSYS1_PCLK 37
+#define CLK_GOUT_FSYS1_TOE_WIFI0_I_CLK 38
+#define CLK_GOUT_FSYS1_TOE_WIFI1_I_CLK 39
+#define CLK_GOUT_FSYS1_UFS_CARD_I_ACLK 40
+#define CLK_GOUT_FSYS1_UFS_CARD_I_CLK_UNIPRO 41
+#define CLK_GOUT_FSYS1_UFS_CARD_I_FMP_CLK 42
+#define CLK_GOUT_FSYS1_XIU_D_FSYS1_ACLK 43
+#define CLK_GOUT_FSYS1_XIU_P_FSYS1_ACLK 44
+
+/* CMU_PERIC0 */
+#define CLK_MOUT_PERIC0_BUS_USER 1
+#define CLK_MOUT_PERIC0_UART_DBG_USER 2
+#define CLK_MOUT_PERIC0_USI00_USER 3
+#define CLK_MOUT_PERIC0_USI01_USER 4
+#define CLK_MOUT_PERIC0_USI02_USER 5
+#define CLK_MOUT_PERIC0_USI03_USER 6
+#define CLK_GOUT_PERIC0_PERIC0_CMU_PERIC0_PCLK 7
+#define CLK_GOUT_PERIC0_AXI2APB_PERIC0_ACLK 8
+#define CLK_GOUT_PERIC0_GPIO_PERIC0_PCLK 9
+#define CLK_GOUT_PERIC0_LHM_AXI_P_PERIC0_I_CLK 10
+#define CLK_GOUT_PERIC0_PMU_PERIC0_PCLK 11
+#define CLK_GOUT_PERIC0_PWM_I_PCLK_S0 12
+#define CLK_GOUT_PERIC0_RSTNSYNC_CLK_PERIC0_BUSP_CLK 13
+#define CLK_GOUT_PERIC0_SPEEDY2_TSP_CLK 14
+#define CLK_GOUT_PERIC0_SYSREG_PERIC0_PCLK 15
+#define CLK_GOUT_PERIC0_UART_DBG_EXT_UCLK 16
+#define CLK_GOUT_PERIC0_UART_DBG_PCLK 17
+#define CLK_GOUT_PERIC0_USI00_I_PCLK 18
+#define CLK_GOUT_PERIC0_USI00_I_SCLK_USI 19
+#define CLK_GOUT_PERIC0_USI01_I_PCLK 20
+#define CLK_GOUT_PERIC0_USI01_I_SCLK_USI 21
+#define CLK_GOUT_PERIC0_USI02_I_PCLK 22
+#define CLK_GOUT_PERIC0_USI02_I_SCLK_USI 23
+#define CLK_GOUT_PERIC0_USI03_I_PCLK 24
+#define CLK_GOUT_PERIC0_USI03_I_SCLK_USI 25
+
+/* CMU_PERIC1 */
+#define CLK_MOUT_PERIC1_BUS_USER 1
+#define CLK_MOUT_PERIC1_SPEEDY2_USER 2
+#define CLK_MOUT_PERIC1_SPI_CAM0_USER 3
+#define CLK_MOUT_PERIC1_SPI_CAM1_USER 4
+#define CLK_MOUT_PERIC1_UART_BT_USER 5
+#define CLK_MOUT_PERIC1_USI04_USER 6
+#define CLK_MOUT_PERIC1_USI05_USER 7
+#define CLK_MOUT_PERIC1_USI06_USER 8
+#define CLK_MOUT_PERIC1_USI07_USER 9
+#define CLK_MOUT_PERIC1_USI08_USER 10
+#define CLK_MOUT_PERIC1_USI09_USER 11
+#define CLK_MOUT_PERIC1_USI10_USER 12
+#define CLK_MOUT_PERIC1_USI11_USER 13
+#define CLK_MOUT_PERIC1_USI12_USER 14
+#define CLK_MOUT_PERIC1_USI13_USER 15
+#define CLK_GOUT_PERIC1_PERIC1_CMU_PERIC1_PCLK 16
+#define CLK_GOUT_PERIC1_RSTNSYNC_CLK_PERIC1_SPEEDY2_CLK 17
+#define CLK_GOUT_PERIC1_AXI2APB_PERIC1P0_ACLK 18
+#define CLK_GOUT_PERIC1_AXI2APB_PERIC1P1_ACLK 19
+#define CLK_GOUT_PERIC1_AXI2APB_PERIC1P2_ACLK 20
+#define CLK_GOUT_PERIC1_GPIO_PERIC1_PCLK 21
+#define CLK_GOUT_PERIC1_HSI2C_CAM0_IPCLK 22
+#define CLK_GOUT_PERIC1_HSI2C_CAM1_IPCLK 23
+#define CLK_GOUT_PERIC1_HSI2C_CAM2_IPCLK 24
+#define CLK_GOUT_PERIC1_HSI2C_CAM3_IPCLK 25
+#define CLK_GOUT_PERIC1_LHM_AXI_P_PERIC1_I_CLK 26
+#define CLK_GOUT_PERIC1_PMU_PERIC1_PCLK 27
+#define CLK_GOUT_PERIC1_RSTNSYNC_CLK_PERIC1_BUSP_CLK 28
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI1_CLK 29
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI1_SCLK 30
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI2_CLK 31
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI2_SCLK 32
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI_CLK 33
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI_SCLK 34
+#define CLK_GOUT_PERIC1_SPEEDY2_TSP1_CLK 35
+#define CLK_GOUT_PERIC1_SPEEDY2_TSP2_CLK 36
+#define CLK_GOUT_PERIC1_SPI_CAM0_PCLK 37
+#define CLK_GOUT_PERIC1_SPI_CAM0_SPI_EXT_CLK 38
+#define CLK_GOUT_PERIC1_SPI_CAM1_PCLK 39
+#define CLK_GOUT_PERIC1_SPI_CAM1_SPI_EXT_CLK 40
+#define CLK_GOUT_PERIC1_SYSREG_PERIC1_PCLK 41
+#define CLK_GOUT_PERIC1_UART_BT_EXT_UCLK 42
+#define CLK_GOUT_PERIC1_UART_BT_PCLK 43
+#define CLK_GOUT_PERIC1_USI04_I_PCLK 44
+#define CLK_GOUT_PERIC1_USI04_I_SCLK_USI 45
+#define CLK_GOUT_PERIC1_USI05_I_PCLK 46
+#define CLK_GOUT_PERIC1_USI05_I_SCLK_USI 47
+#define CLK_GOUT_PERIC1_USI06_I_PCLK 48
+#define CLK_GOUT_PERIC1_USI06_I_SCLK_USI 49
+#define CLK_GOUT_PERIC1_USI07_I_PCLK 50
+#define CLK_GOUT_PERIC1_USI07_I_SCLK_USI 51
+#define CLK_GOUT_PERIC1_USI08_I_PCLK 52
+#define CLK_GOUT_PERIC1_USI08_I_SCLK_USI 53
+#define CLK_GOUT_PERIC1_USI09_I_PCLK 54
+#define CLK_GOUT_PERIC1_USI09_I_SCLK_USI 55
+#define CLK_GOUT_PERIC1_USI10_I_PCLK 56
+#define CLK_GOUT_PERIC1_USI10_I_SCLK_USI 57
+#define CLK_GOUT_PERIC1_USI11_I_PCLK 58
+#define CLK_GOUT_PERIC1_USI11_I_SCLK_USI 59
+#define CLK_GOUT_PERIC1_USI12_I_PCLK 60
+#define CLK_GOUT_PERIC1_USI12_I_SCLK_USI 61
+#define CLK_GOUT_PERIC1_USI13_I_PCLK 62
+#define CLK_GOUT_PERIC1_USI13_I_SCLK_USI 63
+#define CLK_GOUT_PERIC1_XIU_P_PERIC1_ACLK 64
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS8895_H */
diff --git a/include/dt-bindings/clock/samsung,exynos990.h b/include/dt-bindings/clock/samsung,exynos990.h
new file mode 100644
index 000000000000..47540307cb52
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,exynos990.h
@@ -0,0 +1,438 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2024 Igor Belwon <igor.belwon@mentallysanemainliners.org>
+ *
+ * Device Tree binding constants for Exynos990 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS_990_H
+#define _DT_BINDINGS_CLOCK_EXYNOS_990_H
+
+/* CMU_TOP */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_FOUT_SHARED2_PLL 3
+#define CLK_FOUT_SHARED3_PLL 4
+#define CLK_FOUT_SHARED4_PLL 5
+#define CLK_FOUT_G3D_PLL 6
+#define CLK_FOUT_MMC_PLL 7
+#define CLK_MOUT_PLL_SHARED0 8
+#define CLK_MOUT_PLL_SHARED1 9
+#define CLK_MOUT_PLL_SHARED2 10
+#define CLK_MOUT_PLL_SHARED3 11
+#define CLK_MOUT_PLL_SHARED4 12
+#define CLK_MOUT_PLL_MMC 13
+#define CLK_MOUT_PLL_G3D 14
+#define CLK_MOUT_CMU_APM_BUS 15
+#define CLK_MOUT_CMU_AUD_CPU 16
+#define CLK_MOUT_CMU_BUS0_BUS 17
+#define CLK_MOUT_CMU_BUS1_BUS 18
+#define CLK_MOUT_CMU_BUS1_SSS 19
+#define CLK_MOUT_CMU_CIS_CLK0 20
+#define CLK_MOUT_CMU_CIS_CLK1 21
+#define CLK_MOUT_CMU_CIS_CLK2 22
+#define CLK_MOUT_CMU_CIS_CLK3 23
+#define CLK_MOUT_CMU_CIS_CLK4 24
+#define CLK_MOUT_CMU_CIS_CLK5 25
+#define CLK_MOUT_CMU_CMU_BOOST 26
+#define CLK_MOUT_CMU_CORE_BUS 27
+#define CLK_MOUT_CMU_CPUCL0_DBG_BUS 28
+#define CLK_MOUT_CMU_CPUCL0_SWITCH 29
+#define CLK_MOUT_CMU_CPUCL1_SWITCH 30
+#define CLK_MOUT_CMU_CPUCL2_BUSP 31
+#define CLK_MOUT_CMU_CPUCL2_SWITCH 32
+#define CLK_MOUT_CMU_CSIS_BUS 33
+#define CLK_MOUT_CMU_CSIS_OIS_MCU 34
+#define CLK_MOUT_CMU_DNC_BUS 35
+#define CLK_MOUT_CMU_DNC_BUSM 36
+#define CLK_MOUT_CMU_DNS_BUS 37
+#define CLK_MOUT_CMU_DPU 38
+#define CLK_MOUT_CMU_DPU_ALT 39
+#define CLK_MOUT_CMU_DSP_BUS 40
+#define CLK_MOUT_CMU_G2D_G2D 41
+#define CLK_MOUT_CMU_G2D_MSCL 42
+#define CLK_MOUT_CMU_HPM 43
+#define CLK_MOUT_CMU_HSI0_BUS 44
+#define CLK_MOUT_CMU_HSI0_DPGTC 45
+#define CLK_MOUT_CMU_HSI0_USB31DRD 46
+#define CLK_MOUT_CMU_HSI0_USBDP_DEBUG 47
+#define CLK_MOUT_CMU_HSI1_BUS 48
+#define CLK_MOUT_CMU_HSI1_MMC_CARD 49
+#define CLK_MOUT_CMU_HSI1_PCIE 50
+#define CLK_MOUT_CMU_HSI1_UFS_CARD 51
+#define CLK_MOUT_CMU_HSI1_UFS_EMBD 52
+#define CLK_MOUT_CMU_HSI2_BUS 53
+#define CLK_MOUT_CMU_HSI2_PCIE 54
+#define CLK_MOUT_CMU_IPP_BUS 55
+#define CLK_MOUT_CMU_ITP_BUS 56
+#define CLK_MOUT_CMU_MCSC_BUS 57
+#define CLK_MOUT_CMU_MCSC_GDC 58
+#define CLK_MOUT_CMU_CMU_BOOST_CPU 59
+#define CLK_MOUT_CMU_MFC0_MFC0 60
+#define CLK_MOUT_CMU_MFC0_WFD 61
+#define CLK_MOUT_CMU_MIF_BUSP 62
+#define CLK_MOUT_CMU_MIF_SWITCH 63
+#define CLK_MOUT_CMU_NPU_BUS 64
+#define CLK_MOUT_CMU_PERIC0_BUS 65
+#define CLK_MOUT_CMU_PERIC0_IP 66
+#define CLK_MOUT_CMU_PERIC1_BUS 67
+#define CLK_MOUT_CMU_PERIC1_IP 68
+#define CLK_MOUT_CMU_PERIS_BUS 69
+#define CLK_MOUT_CMU_SSP_BUS 70
+#define CLK_MOUT_CMU_TNR_BUS 71
+#define CLK_MOUT_CMU_VRA_BUS 72
+#define CLK_DOUT_CMU_APM_BUS 73
+#define CLK_DOUT_CMU_AUD_CPU 74
+#define CLK_DOUT_CMU_BUS0_BUS 75
+#define CLK_DOUT_CMU_BUS1_BUS 76
+#define CLK_DOUT_CMU_BUS1_SSS 77
+#define CLK_DOUT_CMU_CIS_CLK0 78
+#define CLK_DOUT_CMU_CIS_CLK1 79
+#define CLK_DOUT_CMU_CIS_CLK2 80
+#define CLK_DOUT_CMU_CIS_CLK3 81
+#define CLK_DOUT_CMU_CIS_CLK4 82
+#define CLK_DOUT_CMU_CIS_CLK5 83
+#define CLK_DOUT_CMU_CMU_BOOST 84
+#define CLK_DOUT_CMU_CORE_BUS 85
+#define CLK_DOUT_CMU_CPUCL0_DBG_BUS 86
+#define CLK_DOUT_CMU_CPUCL0_SWITCH 87
+#define CLK_DOUT_CMU_CPUCL1_SWITCH 88
+#define CLK_DOUT_CMU_CPUCL2_BUSP 89
+#define CLK_DOUT_CMU_CPUCL2_SWITCH 90
+#define CLK_DOUT_CMU_CSIS_BUS 91
+#define CLK_DOUT_CMU_CSIS_OIS_MCU 92
+#define CLK_DOUT_CMU_DNC_BUS 93
+#define CLK_DOUT_CMU_DNC_BUSM 94
+#define CLK_DOUT_CMU_DNS_BUS 95
+#define CLK_DOUT_CMU_DSP_BUS 96
+#define CLK_DOUT_CMU_G2D_G2D 97
+#define CLK_DOUT_CMU_G2D_MSCL 98
+#define CLK_DOUT_CMU_G3D_SWITCH 99
+#define CLK_DOUT_CMU_HPM 100
+#define CLK_DOUT_CMU_HSI0_BUS 101
+#define CLK_DOUT_CMU_HSI0_DPGTC 102
+#define CLK_DOUT_CMU_HSI0_USB31DRD 103
+#define CLK_DOUT_CMU_HSI0_USBDP_DEBUG 104
+#define CLK_DOUT_CMU_HSI1_BUS 105
+#define CLK_DOUT_CMU_HSI1_MMC_CARD 106
+#define CLK_DOUT_CMU_HSI1_PCIE 107
+#define CLK_DOUT_CMU_HSI1_UFS_CARD 108
+#define CLK_DOUT_CMU_HSI1_UFS_EMBD 109
+#define CLK_DOUT_CMU_HSI2_BUS 110
+#define CLK_DOUT_CMU_HSI2_PCIE 111
+#define CLK_DOUT_CMU_IPP_BUS 112
+#define CLK_DOUT_CMU_ITP_BUS 113
+#define CLK_DOUT_CMU_MCSC_BUS 114
+#define CLK_DOUT_CMU_MCSC_GDC 115
+#define CLK_DOUT_CMU_CMU_BOOST_CPU 116
+#define CLK_DOUT_CMU_MFC0_MFC0 117
+#define CLK_DOUT_CMU_MFC0_WFD 118
+#define CLK_DOUT_CMU_MIF_BUSP 119
+#define CLK_DOUT_CMU_NPU_BUS 120
+#define CLK_DOUT_CMU_OTP 121
+#define CLK_DOUT_CMU_PERIC0_BUS 122
+#define CLK_DOUT_CMU_PERIC0_IP 123
+#define CLK_DOUT_CMU_PERIC1_BUS 124
+#define CLK_DOUT_CMU_PERIC1_IP 125
+#define CLK_DOUT_CMU_PERIS_BUS 126
+#define CLK_DOUT_CMU_SSP_BUS 127
+#define CLK_DOUT_CMU_TNR_BUS 128
+#define CLK_DOUT_CMU_VRA_BUS 129
+#define CLK_DOUT_CMU_DPU 130
+#define CLK_DOUT_CMU_DPU_ALT 131
+#define CLK_DOUT_CMU_SHARED0_DIV2 132
+#define CLK_DOUT_CMU_SHARED0_DIV3 133
+#define CLK_DOUT_CMU_SHARED0_DIV4 134
+#define CLK_DOUT_CMU_SHARED1_DIV2 135
+#define CLK_DOUT_CMU_SHARED1_DIV3 136
+#define CLK_DOUT_CMU_SHARED1_DIV4 137
+#define CLK_DOUT_CMU_SHARED2_DIV2 138
+#define CLK_DOUT_CMU_SHARED4_DIV2 139
+#define CLK_DOUT_CMU_SHARED4_DIV3 140
+#define CLK_DOUT_CMU_SHARED4_DIV4 141
+#define CLK_GOUT_CMU_G3D_BUS 142
+#define CLK_GOUT_CMU_MIF_SWITCH 143
+#define CLK_GOUT_CMU_APM_BUS 144
+#define CLK_GOUT_CMU_AUD_CPU 145
+#define CLK_GOUT_CMU_BUS0_BUS 146
+#define CLK_GOUT_CMU_BUS1_BUS 147
+#define CLK_GOUT_CMU_BUS1_SSS 148
+#define CLK_GOUT_CMU_CIS_CLK0 149
+#define CLK_GOUT_CMU_CIS_CLK1 150
+#define CLK_GOUT_CMU_CIS_CLK2 151
+#define CLK_GOUT_CMU_CIS_CLK3 152
+#define CLK_GOUT_CMU_CIS_CLK4 153
+#define CLK_GOUT_CMU_CIS_CLK5 154
+#define CLK_GOUT_CMU_CORE_BUS 155
+#define CLK_GOUT_CMU_CPUCL0_DBG_BUS 156
+#define CLK_GOUT_CMU_CPUCL0_SWITCH 157
+#define CLK_GOUT_CMU_CPUCL1_SWITCH 158
+#define CLK_GOUT_CMU_CPUCL2_BUSP 159
+#define CLK_GOUT_CMU_CPUCL2_SWITCH 160
+#define CLK_GOUT_CMU_CSIS_BUS 161
+#define CLK_GOUT_CMU_CSIS_OIS_MCU 162
+#define CLK_GOUT_CMU_DNC_BUS 163
+#define CLK_GOUT_CMU_DNC_BUSM 164
+#define CLK_GOUT_CMU_DNS_BUS 165
+#define CLK_GOUT_CMU_DPU 166
+#define CLK_GOUT_CMU_DPU_BUS 167
+#define CLK_GOUT_CMU_DSP_BUS 168
+#define CLK_GOUT_CMU_G2D_G2D 169
+#define CLK_GOUT_CMU_G2D_MSCL 170
+#define CLK_GOUT_CMU_G3D_SWITCH 171
+#define CLK_GOUT_CMU_HPM 172
+#define CLK_GOUT_CMU_HSI0_BUS 173
+#define CLK_GOUT_CMU_HSI0_DPGTC 174
+#define CLK_GOUT_CMU_HSI0_USB31DRD 175
+#define CLK_GOUT_CMU_HSI0_USBDP_DEBUG 176
+#define CLK_GOUT_CMU_HSI1_BUS 177
+#define CLK_GOUT_CMU_HSI1_MMC_CARD 178
+#define CLK_GOUT_CMU_HSI1_PCIE 179
+#define CLK_GOUT_CMU_HSI1_UFS_CARD 180
+#define CLK_GOUT_CMU_HSI1_UFS_EMBD 181
+#define CLK_GOUT_CMU_HSI2_BUS 182
+#define CLK_GOUT_CMU_HSI2_PCIE 183
+#define CLK_GOUT_CMU_IPP_BUS 184
+#define CLK_GOUT_CMU_ITP_BUS 185
+#define CLK_GOUT_CMU_MCSC_BUS 186
+#define CLK_GOUT_CMU_MCSC_GDC 187
+#define CLK_GOUT_CMU_MFC0_MFC0 188
+#define CLK_GOUT_CMU_MFC0_WFD 189
+#define CLK_GOUT_CMU_MIF_BUSP 190
+#define CLK_GOUT_CMU_NPU_BUS 191
+#define CLK_GOUT_CMU_PERIC0_BUS 192
+#define CLK_GOUT_CMU_PERIC0_IP 193
+#define CLK_GOUT_CMU_PERIC1_BUS 194
+#define CLK_GOUT_CMU_PERIC1_IP 195
+#define CLK_GOUT_CMU_PERIS_BUS 196
+#define CLK_GOUT_CMU_SSP_BUS 197
+#define CLK_GOUT_CMU_TNR_BUS 198
+#define CLK_GOUT_CMU_VRA_BUS 199
+#define CLK_MOUT_CMU_CMUREF 200
+#define CLK_MOUT_CMU_DPU_BUS 201
+#define CLK_MOUT_CMU_CLK_CMUREF 202
+#define CLK_DOUT_CMU_CLK_CMUREF 203
+
+/* CMU_HSI0 */
+#define CLK_MOUT_HSI0_BUS_USER 1
+#define CLK_MOUT_HSI0_USB31DRD_USER 2
+#define CLK_MOUT_HSI0_USBDP_DEBUG_USER 3
+#define CLK_MOUT_HSI0_DPGTC_USER 4
+#define CLK_GOUT_HSI0_DP_LINK_DP_GTC_CLK 5
+#define CLK_GOUT_HSI0_DP_LINK_PCLK 6
+#define CLK_GOUT_HSI0_D_TZPC_HSI0_PCLK 7
+#define CLK_GOUT_HSI0_LHM_AXI_P_HSI0_CLK 8
+#define CLK_GOUT_HSI0_PPMU_HSI0_BUS1_ACLK 9
+#define CLK_GOUT_HSI0_PPMU_HSI0_BUS1_PCLK 10
+#define CLK_GOUT_HSI0_CLK_HSI0_BUS_CLK 11
+#define CLK_GOUT_HSI0_SYSMMU_USB_CLK_S2 12
+#define CLK_GOUT_HSI0_SYSREG_HSI0_PCLK 13
+#define CLK_GOUT_HSI0_USB31DRD_ACLK_PHYCTRL 14
+#define CLK_GOUT_HSI0_USB31DRD_BUS_CLK_EARLY 15
+#define CLK_GOUT_HSI0_USB31DRD_USB31DRD_REF_CLK_40 16
+#define CLK_GOUT_HSI0_USB31DRD_USBDPPHY_REF_SOC_PLL 17
+#define CLK_GOUT_HSI0_USB31DRD_USBDPPHY_SCL_APB 18
+#define CLK_GOUT_HSI0_USB31DRD_USBPCS_APB_CLK 19
+#define CLK_GOUT_HSI0_VGEN_LITE_HSI0_CLK 20
+#define CLK_GOUT_HSI0_CMU_HSI0_PCLK 21
+#define CLK_GOUT_HSI0_XIU_D_HSI0_ACLK 22
+#define CLK_GOUT_HSI0_LHS_ACEL_D_HSI0_CLK 23
+
+/* CMU_PERIC0 */
+#define CLK_MOUT_PERIC0_BUS_USER 1
+#define CLK_MOUT_PERIC0_UART_DBG 2
+#define CLK_MOUT_PERIC0_USI00_USI_USER 3
+#define CLK_MOUT_PERIC0_USI01_USI_USER 4
+#define CLK_MOUT_PERIC0_USI02_USI_USER 5
+#define CLK_MOUT_PERIC0_USI03_USI_USER 6
+#define CLK_MOUT_PERIC0_USI04_USI_USER 7
+#define CLK_MOUT_PERIC0_USI05_USI_USER 8
+#define CLK_MOUT_PERIC0_USI13_USI_USER 9
+#define CLK_MOUT_PERIC0_USI14_USI_USER 10
+#define CLK_MOUT_PERIC0_USI15_USI_USER 11
+#define CLK_MOUT_PERIC0_USI_I2C_USER 12
+#define CLK_DOUT_PERIC0_UART_DBG 13
+#define CLK_DOUT_PERIC0_USI00_USI 14
+#define CLK_DOUT_PERIC0_USI01_USI 15
+#define CLK_DOUT_PERIC0_USI02_USI 16
+#define CLK_DOUT_PERIC0_USI03_USI 17
+#define CLK_DOUT_PERIC0_USI04_USI 18
+#define CLK_DOUT_PERIC0_USI05_USI 19
+#define CLK_DOUT_PERIC0_USI13_USI 20
+#define CLK_DOUT_PERIC0_USI14_USI 21
+#define CLK_DOUT_PERIC0_USI15_USI 22
+#define CLK_DOUT_PERIC0_USI_I2C 23
+#define CLK_GOUT_PERIC0_CMU_PCLK 24
+#define CLK_GOUT_PERIC0_OSCCLK_CLK 25
+#define CLK_GOUT_PERIC0_D_TZPC_PCLK 26
+#define CLK_GOUT_PERIC0_GPIO_PCLK 27
+#define CLK_GOUT_PERIC0_LHM_AXI_P_CLK 28
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_10 29
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_11 30
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_12 31
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_13 32
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_14 33
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_15 34
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_4 35
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_5 36
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_6 37
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_7 38
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_8 39
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_9 40
+#define CLK_GOUT_PERIC0_TOP0_PCLK_10 41
+#define CLK_GOUT_PERIC0_TOP0_PCLK_11 42
+#define CLK_GOUT_PERIC0_TOP0_PCLK_12 43
+#define CLK_GOUT_PERIC0_TOP0_PCLK_13 44
+#define CLK_GOUT_PERIC0_TOP0_PCLK_14 45
+#define CLK_GOUT_PERIC0_TOP0_PCLK_15 46
+#define CLK_GOUT_PERIC0_TOP0_PCLK_4 47
+#define CLK_GOUT_PERIC0_TOP0_PCLK_5 48
+#define CLK_GOUT_PERIC0_TOP0_PCLK_6 49
+#define CLK_GOUT_PERIC0_TOP0_PCLK_7 50
+#define CLK_GOUT_PERIC0_TOP0_PCLK_8 51
+#define CLK_GOUT_PERIC0_TOP0_PCLK_9 52
+#define CLK_GOUT_PERIC0_TOP1_IPCLK_0 53
+#define CLK_GOUT_PERIC0_TOP1_IPCLK_3 54
+#define CLK_GOUT_PERIC0_TOP1_IPCLK_4 55
+#define CLK_GOUT_PERIC0_TOP1_IPCLK_5 56
+#define CLK_GOUT_PERIC0_TOP1_IPCLK_6 57
+#define CLK_GOUT_PERIC0_TOP1_IPCLK_7 58
+#define CLK_GOUT_PERIC0_TOP1_IPCLK_8 59
+#define CLK_GOUT_PERIC0_TOP1_PCLK_0 60
+#define CLK_GOUT_PERIC0_TOP1_PCLK_15 61
+#define CLK_GOUT_PERIC0_TOP1_PCLK_3 62
+#define CLK_GOUT_PERIC0_TOP1_PCLK_4 63
+#define CLK_GOUT_PERIC0_TOP1_PCLK_5 64
+#define CLK_GOUT_PERIC0_TOP1_PCLK_6 65
+#define CLK_GOUT_PERIC0_TOP1_PCLK_7 66
+#define CLK_GOUT_PERIC0_TOP1_PCLK_8 67
+#define CLK_GOUT_PERIC0_BUSP_CLK 68
+#define CLK_GOUT_PERIC0_UART_DBG_CLK 69
+#define CLK_GOUT_PERIC0_USI00_USI_CLK 70
+#define CLK_GOUT_PERIC0_USI01_USI_CLK 71
+#define CLK_GOUT_PERIC0_USI02_USI_CLK 72
+#define CLK_GOUT_PERIC0_USI03_USI_CLK 73
+#define CLK_GOUT_PERIC0_USI04_USI_CLK 74
+#define CLK_GOUT_PERIC0_USI05_USI_CLK 75
+#define CLK_GOUT_PERIC0_USI13_USI_CLK 76
+#define CLK_GOUT_PERIC0_USI14_USI_CLK 77
+#define CLK_GOUT_PERIC0_USI15_USI_CLK 78
+#define CLK_GOUT_PERIC0_USI_I2C_CLK 79
+#define CLK_GOUT_PERIC0_SYSREG_PCLK 80
+
+/* CMU_PERIC1 */
+#define CLK_MOUT_PERIC1_BUS_USER 1
+#define CLK_MOUT_PERIC1_UART_BT_USER 2
+#define CLK_MOUT_PERIC1_USI06_USI_USER 3
+#define CLK_MOUT_PERIC1_USI07_USI_USER 4
+#define CLK_MOUT_PERIC1_USI08_USI_USER 5
+#define CLK_MOUT_PERIC1_USI09_USI_USER 6
+#define CLK_MOUT_PERIC1_USI10_USI_USER 7
+#define CLK_MOUT_PERIC1_USI11_USI_USER 8
+#define CLK_MOUT_PERIC1_USI12_USI_USER 9
+#define CLK_MOUT_PERIC1_USI18_USI_USER 10
+#define CLK_MOUT_PERIC1_USI16_USI_USER 11
+#define CLK_MOUT_PERIC1_USI17_USI_USER 12
+#define CLK_MOUT_PERIC1_USI_I2C_USER 13
+#define CLK_DOUT_PERIC1_UART_BT 14
+#define CLK_DOUT_PERIC1_USI06_USI 15
+#define CLK_DOUT_PERIC1_USI07_USI 16
+#define CLK_DOUT_PERIC1_USI08_USI 17
+#define CLK_DOUT_PERIC1_USI18_USI 18
+#define CLK_DOUT_PERIC1_USI12_USI 19
+#define CLK_DOUT_PERIC1_USI09_USI 20
+#define CLK_DOUT_PERIC1_USI10_USI 21
+#define CLK_DOUT_PERIC1_USI11_USI 22
+#define CLK_DOUT_PERIC1_USI16_USI 23
+#define CLK_DOUT_PERIC1_USI17_USI 24
+#define CLK_DOUT_PERIC1_USI_I2C 25
+#define CLK_GOUT_PERIC1_CMU_PCLK 26
+#define CLK_GOUT_PERIC1_UART_BT_CLK 27
+#define CLK_GOUT_PERIC1_USI12_USI_CLK 28
+#define CLK_GOUT_PERIC1_USI18_USI_CLK 29
+#define CLK_GOUT_PERIC1_D_TZPC_PCLK 30
+#define CLK_GOUT_PERIC1_GPIO_PCLK 31
+#define CLK_GOUT_PERIC1_LHM_AXI_P_CSIS_CLK 32
+#define CLK_GOUT_PERIC1_LHM_AXI_P_CLK 33
+#define CLK_GOUT_PERIC1_TOP0_IPCLK_10 34
+#define CLK_GOUT_PERIC1_TOP0_IPCLK_11 35
+#define CLK_GOUT_PERIC1_TOP0_IPCLK_12 36
+#define CLK_GOUT_PERIC1_TOP0_IPCLK_13 37
+#define CLK_GOUT_PERIC1_TOP0_IPCLK_14 38
+#define CLK_GOUT_PERIC1_TOP0_IPCLK_15 39
+#define CLK_GOUT_PERIC1_TOP0_IPCLK_4 40
+#define CLK_GOUT_PERIC1_TOP0_PCLK_10 41
+#define CLK_GOUT_PERIC1_TOP0_PCLK_11 42
+#define CLK_GOUT_PERIC1_TOP0_PCLK_12 43
+#define CLK_GOUT_PERIC1_TOP0_PCLK_13 44
+#define CLK_GOUT_PERIC1_TOP0_PCLK_14 45
+#define CLK_GOUT_PERIC1_TOP0_PCLK_15 46
+#define CLK_GOUT_PERIC1_TOP0_PCLK_4 47
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_0 48
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_1 49
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_10 50
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_12 51
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_13 52
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_14 53
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_15 54
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_2 55
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_3 56
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_4 57
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_5 58
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_6 59
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_7 60
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_9 61
+#define CLK_GOUT_PERIC1_TOP1_PCLK_0 62
+#define CLK_GOUT_PERIC1_TOP1_PCLK_1 63
+#define CLK_GOUT_PERIC1_TOP1_PCLK_10 64
+#define CLK_GOUT_PERIC1_TOP1_PCLK_12 65
+#define CLK_GOUT_PERIC1_TOP1_PCLK_13 66
+#define CLK_GOUT_PERIC1_TOP1_PCLK_14 67
+#define CLK_GOUT_PERIC1_TOP1_PCLK_15 68
+#define CLK_GOUT_PERIC1_TOP1_PCLK_2 69
+#define CLK_GOUT_PERIC1_TOP1_PCLK_3 70
+#define CLK_GOUT_PERIC1_TOP1_PCLK_4 71
+#define CLK_GOUT_PERIC1_TOP1_PCLK_5 72
+#define CLK_GOUT_PERIC1_TOP1_PCLK_6 73
+#define CLK_GOUT_PERIC1_TOP1_PCLK_7 74
+#define CLK_GOUT_PERIC1_TOP1_PCLK_9 75
+#define CLK_GOUT_PERIC1_BUSP_CLK 76
+#define CLK_GOUT_PERIC1_OSCCLK_CLK 77
+#define CLK_GOUT_PERIC1_USI06_USI_CLK 78
+#define CLK_GOUT_PERIC1_USI07_USI_CLK 79
+#define CLK_GOUT_PERIC1_USI08_USI_CLK 80
+#define CLK_GOUT_PERIC1_USI09_USI_CLK 81
+#define CLK_GOUT_PERIC1_USI10_USI_CLK 82
+#define CLK_GOUT_PERIC1_USI11_USI_CLK 83
+#define CLK_GOUT_PERIC1_USI16_USI_CLK 84
+#define CLK_GOUT_PERIC1_USI17_USI_CLK 85
+#define CLK_GOUT_PERIC1_USI_I2C_CLK 86
+#define CLK_GOUT_PERIC1_SYSREG_PCLK 87
+#define CLK_GOUT_PERIC1_USI16_I3C_PCLK 88
+#define CLK_GOUT_PERIC1_USI16_I3C_SCLK 89
+#define CLK_GOUT_PERIC1_USI17_I3C_PCLK 90
+#define CLK_GOUT_PERIC1_USI17_I3C_SCLK 91
+#define CLK_GOUT_PERIC1_XIU_P_ACLK 92
+
+/* CMU_PERIS */
+#define CLK_MOUT_PERIS_BUS_USER 1
+#define CLK_MOUT_PERIS_CLK_PERIS_GIC 2
+#define CLK_GOUT_PERIS_SYSREG_PERIS_PCLK 3
+#define CLK_GOUT_PERIS_WDT_CLUSTER2_PCLK 4
+#define CLK_GOUT_PERIS_WDT_CLUSTER0_PCLK 5
+#define CLK_CLK_PERIS_PERIS_CMU_PERIS_PCLK 6
+#define CLK_GOUT_PERIS_CLK_PERIS_BUSP_CLK 7
+#define CLK_GOUT_PERIS_CLK_PERIS_OSCCLK_CLK 8
+#define CLK_GOUT_PERIS_CLK_PERIS_GIC_CLK 9
+#define CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKM 10
+#define CLK_GOUT_PERIS_OTP_CON_BIRA_PCLK 11
+#define CLK_GOUT_PERIS_GIC_CLK 12
+#define CLK_GOUT_PERIS_LHM_AXI_P_PERIS_CLK 13
+#define CLK_GOUT_PERIS_MCT_PCLK 14
+#define CLK_GOUT_PERIS_OTP_CON_TOP_PCLK 15
+#define CLK_GOUT_PERIS_D_TZPC_PERIS_PCLK 16
+#define CLK_GOUT_PERIS_TMU_TOP_PCLK 17
+#define CLK_GOUT_PERIS_OTP_CON_BIRA_OSCCLK 18
+#define CLK_GOUT_PERIS_OTP_CON_TOP_OSCCLK 19
+
+#endif
diff --git a/include/dt-bindings/clock/samsung,exynosautov9.h b/include/dt-bindings/clock/samsung,exynosautov9.h
index 3065375c2d8b..ce8fb8f7d718 100644
--- a/include/dt-bindings/clock/samsung,exynosautov9.h
+++ b/include/dt-bindings/clock/samsung,exynosautov9.h
@@ -179,6 +179,17 @@
#define CLK_GOUT_CORE_CCI_PCLK 4
#define CLK_GOUT_CORE_CMU_CORE_PCLK 5
+/* CMU_DPUM */
+#define CLK_MOUT_DPUM_BUS_USER 1
+#define CLK_DOUT_DPUM_BUSP 2
+#define CLK_GOUT_DPUM_ACLK_DECON 3
+#define CLK_GOUT_DPUM_ACLK_DMA 4
+#define CLK_GOUT_DPUM_ACLK_DPP 5
+#define CLK_GOUT_DPUM_SYSMMU_D0_CLK 6
+#define CLK_GOUT_DPUM_SYSMMU_D1_CLK 7
+#define CLK_GOUT_DPUM_SYSMMU_D2_CLK 8
+#define CLK_GOUT_DPUM_SYSMMU_D3_CLK 9
+
/* CMU_FSYS0 */
#define CLK_MOUT_FSYS0_BUS_USER 1
#define CLK_MOUT_FSYS0_PCIE_USER 2
diff --git a/include/dt-bindings/clock/samsung,exynosautov920.h b/include/dt-bindings/clock/samsung,exynosautov920.h
new file mode 100644
index 000000000000..970d05167fc6
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,exynosautov920.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Sunyeal Hong <sunyeal.hong@samsung.com>
+ *
+ * Device Tree binding constants for ExynosAuto v920 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H
+#define _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H
+
+/* CMU_TOP */
+#define FOUT_SHARED0_PLL 1
+#define FOUT_SHARED1_PLL 2
+#define FOUT_SHARED2_PLL 3
+#define FOUT_SHARED3_PLL 4
+#define FOUT_SHARED4_PLL 5
+#define FOUT_SHARED5_PLL 6
+#define FOUT_MMC_PLL 7
+
+/* MUX in CMU_TOP */
+#define MOUT_SHARED0_PLL 8
+#define MOUT_SHARED1_PLL 9
+#define MOUT_SHARED2_PLL 10
+#define MOUT_SHARED3_PLL 11
+#define MOUT_SHARED4_PLL 12
+#define MOUT_SHARED5_PLL 13
+#define MOUT_MMC_PLL 14
+#define MOUT_CLKCMU_CMU_BOOST 15
+#define MOUT_CLKCMU_CMU_CMUREF 16
+#define MOUT_CLKCMU_ACC_NOC 17
+#define MOUT_CLKCMU_ACC_ORB 18
+#define MOUT_CLKCMU_APM_NOC 19
+#define MOUT_CLKCMU_AUD_CPU 20
+#define MOUT_CLKCMU_AUD_NOC 21
+#define MOUT_CLKCMU_CPUCL0_SWITCH 22
+#define MOUT_CLKCMU_CPUCL0_CLUSTER 23
+#define MOUT_CLKCMU_CPUCL0_DBG 24
+#define MOUT_CLKCMU_CPUCL1_SWITCH 25
+#define MOUT_CLKCMU_CPUCL1_CLUSTER 26
+#define MOUT_CLKCMU_CPUCL2_SWITCH 27
+#define MOUT_CLKCMU_CPUCL2_CLUSTER 28
+#define MOUT_CLKCMU_DNC_NOC 29
+#define MOUT_CLKCMU_DPTX_NOC 30
+#define MOUT_CLKCMU_DPTX_DPGTC 31
+#define MOUT_CLKCMU_DPTX_DPOSC 32
+#define MOUT_CLKCMU_DPUB_NOC 33
+#define MOUT_CLKCMU_DPUB_DSIM 34
+#define MOUT_CLKCMU_DPUF0_NOC 35
+#define MOUT_CLKCMU_DPUF1_NOC 36
+#define MOUT_CLKCMU_DPUF2_NOC 37
+#define MOUT_CLKCMU_DSP_NOC 38
+#define MOUT_CLKCMU_G3D_SWITCH 39
+#define MOUT_CLKCMU_G3D_NOCP 40
+#define MOUT_CLKCMU_GNPU_NOC 41
+#define MOUT_CLKCMU_HSI0_NOC 42
+#define MOUT_CLKCMU_HSI1_NOC 43
+#define MOUT_CLKCMU_HSI1_USBDRD 44
+#define MOUT_CLKCMU_HSI1_MMC_CARD 45
+#define MOUT_CLKCMU_HSI2_NOC 46
+#define MOUT_CLKCMU_HSI2_NOC_UFS 47
+#define MOUT_CLKCMU_HSI2_UFS_EMBD 48
+#define MOUT_CLKCMU_HSI2_ETHERNET 49
+#define MOUT_CLKCMU_ISP_NOC 50
+#define MOUT_CLKCMU_M2M_NOC 51
+#define MOUT_CLKCMU_M2M_JPEG 52
+#define MOUT_CLKCMU_MFC_MFC 53
+#define MOUT_CLKCMU_MFC_WFD 54
+#define MOUT_CLKCMU_MFD_NOC 55
+#define MOUT_CLKCMU_MIF_SWITCH 56
+#define MOUT_CLKCMU_MIF_NOCP 57
+#define MOUT_CLKCMU_MISC_NOC 58
+#define MOUT_CLKCMU_NOCL0_NOC 59
+#define MOUT_CLKCMU_NOCL1_NOC 60
+#define MOUT_CLKCMU_NOCL2_NOC 61
+#define MOUT_CLKCMU_PERIC0_NOC 62
+#define MOUT_CLKCMU_PERIC0_IP 63
+#define MOUT_CLKCMU_PERIC1_NOC 64
+#define MOUT_CLKCMU_PERIC1_IP 65
+#define MOUT_CLKCMU_SDMA_NOC 66
+#define MOUT_CLKCMU_SNW_NOC 67
+#define MOUT_CLKCMU_SSP_NOC 68
+#define MOUT_CLKCMU_TAA_NOC 69
+
+/* DIV in CMU_TOP */
+#define DOUT_SHARED0_DIV1 70
+#define DOUT_SHARED0_DIV2 71
+#define DOUT_SHARED0_DIV3 72
+#define DOUT_SHARED0_DIV4 73
+#define DOUT_SHARED1_DIV1 74
+#define DOUT_SHARED1_DIV2 75
+#define DOUT_SHARED1_DIV3 76
+#define DOUT_SHARED1_DIV4 77
+#define DOUT_SHARED2_DIV1 78
+#define DOUT_SHARED2_DIV2 79
+#define DOUT_SHARED2_DIV3 80
+#define DOUT_SHARED2_DIV4 81
+#define DOUT_SHARED3_DIV1 82
+#define DOUT_SHARED3_DIV2 83
+#define DOUT_SHARED3_DIV3 84
+#define DOUT_SHARED3_DIV4 85
+#define DOUT_SHARED4_DIV1 86
+#define DOUT_SHARED4_DIV2 87
+#define DOUT_SHARED4_DIV3 88
+#define DOUT_SHARED4_DIV4 89
+#define DOUT_SHARED5_DIV1 90
+#define DOUT_SHARED5_DIV2 91
+#define DOUT_SHARED5_DIV3 92
+#define DOUT_SHARED5_DIV4 93
+#define DOUT_CLKCMU_CMU_BOOST 94
+#define DOUT_CLKCMU_ACC_NOC 95
+#define DOUT_CLKCMU_ACC_ORB 96
+#define DOUT_CLKCMU_APM_NOC 97
+#define DOUT_CLKCMU_AUD_CPU 98
+#define DOUT_CLKCMU_AUD_NOC 99
+#define DOUT_CLKCMU_CPUCL0_SWITCH 100
+#define DOUT_CLKCMU_CPUCL0_CLUSTER 101
+#define DOUT_CLKCMU_CPUCL0_DBG 102
+#define DOUT_CLKCMU_CPUCL1_SWITCH 103
+#define DOUT_CLKCMU_CPUCL1_CLUSTER 104
+#define DOUT_CLKCMU_CPUCL2_SWITCH 105
+#define DOUT_CLKCMU_CPUCL2_CLUSTER 106
+#define DOUT_CLKCMU_DNC_NOC 107
+#define DOUT_CLKCMU_DPTX_NOC 108
+#define DOUT_CLKCMU_DPTX_DPGTC 109
+#define DOUT_CLKCMU_DPTX_DPOSC 110
+#define DOUT_CLKCMU_DPUB_NOC 111
+#define DOUT_CLKCMU_DPUB_DSIM 112
+#define DOUT_CLKCMU_DPUF0_NOC 113
+#define DOUT_CLKCMU_DPUF1_NOC 114
+#define DOUT_CLKCMU_DPUF2_NOC 115
+#define DOUT_CLKCMU_DSP_NOC 116
+#define DOUT_CLKCMU_G3D_SWITCH 117
+#define DOUT_CLKCMU_G3D_NOCP 118
+#define DOUT_CLKCMU_GNPU_NOC 119
+#define DOUT_CLKCMU_HSI0_NOC 120
+#define DOUT_CLKCMU_HSI1_NOC 121
+#define DOUT_CLKCMU_HSI1_USBDRD 122
+#define DOUT_CLKCMU_HSI1_MMC_CARD 123
+#define DOUT_CLKCMU_HSI2_NOC 124
+#define DOUT_CLKCMU_HSI2_NOC_UFS 125
+#define DOUT_CLKCMU_HSI2_UFS_EMBD 126
+#define DOUT_CLKCMU_HSI2_ETHERNET 127
+#define DOUT_CLKCMU_ISP_NOC 128
+#define DOUT_CLKCMU_M2M_NOC 129
+#define DOUT_CLKCMU_M2M_JPEG 130
+#define DOUT_CLKCMU_MFC_MFC 131
+#define DOUT_CLKCMU_MFC_WFD 132
+#define DOUT_CLKCMU_MFD_NOC 133
+#define DOUT_CLKCMU_MIF_NOCP 134
+#define DOUT_CLKCMU_MISC_NOC 135
+#define DOUT_CLKCMU_NOCL0_NOC 136
+#define DOUT_CLKCMU_NOCL1_NOC 137
+#define DOUT_CLKCMU_NOCL2_NOC 138
+#define DOUT_CLKCMU_PERIC0_NOC 139
+#define DOUT_CLKCMU_PERIC0_IP 140
+#define DOUT_CLKCMU_PERIC1_NOC 141
+#define DOUT_CLKCMU_PERIC1_IP 142
+#define DOUT_CLKCMU_SDMA_NOC 143
+#define DOUT_CLKCMU_SNW_NOC 144
+#define DOUT_CLKCMU_SSP_NOC 145
+#define DOUT_CLKCMU_TAA_NOC 146
+#define DOUT_TCXO_DIV2 147
+
+/* CMU_CPUCL0 */
+#define CLK_FOUT_CPUCL0_PLL 1
+
+#define CLK_MOUT_PLL_CPUCL0 2
+#define CLK_MOUT_CPUCL0_CLUSTER_USER 3
+#define CLK_MOUT_CPUCL0_DBG_USER 4
+#define CLK_MOUT_CPUCL0_SWITCH_USER 5
+#define CLK_MOUT_CPUCL0_CLUSTER 6
+#define CLK_MOUT_CPUCL0_CORE 7
+
+#define CLK_DOUT_CLUSTER0_ACLK 8
+#define CLK_DOUT_CLUSTER0_ATCLK 9
+#define CLK_DOUT_CLUSTER0_MPCLK 10
+#define CLK_DOUT_CLUSTER0_PCLK 11
+#define CLK_DOUT_CLUSTER0_PERIPHCLK 12
+#define CLK_DOUT_CPUCL0_DBG_NOC 13
+#define CLK_DOUT_CPUCL0_DBG_PCLKDBG 14
+#define CLK_DOUT_CPUCL0_NOCP 15
+
+/* CMU_CPUCL1 */
+#define CLK_FOUT_CPUCL1_PLL 1
+
+#define CLK_MOUT_PLL_CPUCL1 2
+#define CLK_MOUT_CPUCL1_CLUSTER_USER 3
+#define CLK_MOUT_CPUCL1_SWITCH_USER 4
+#define CLK_MOUT_CPUCL1_CLUSTER 5
+#define CLK_MOUT_CPUCL1_CORE 6
+
+#define CLK_DOUT_CLUSTER1_ACLK 7
+#define CLK_DOUT_CLUSTER1_ATCLK 8
+#define CLK_DOUT_CLUSTER1_MPCLK 9
+#define CLK_DOUT_CLUSTER1_PCLK 10
+#define CLK_DOUT_CLUSTER1_PERIPHCLK 11
+#define CLK_DOUT_CPUCL1_NOCP 12
+
+/* CMU_CPUCL2 */
+#define CLK_FOUT_CPUCL2_PLL 1
+
+#define CLK_MOUT_PLL_CPUCL2 2
+#define CLK_MOUT_CPUCL2_CLUSTER_USER 3
+#define CLK_MOUT_CPUCL2_SWITCH_USER 4
+#define CLK_MOUT_CPUCL2_CLUSTER 5
+#define CLK_MOUT_CPUCL2_CORE 6
+
+#define CLK_DOUT_CLUSTER2_ACLK 7
+#define CLK_DOUT_CLUSTER2_ATCLK 8
+#define CLK_DOUT_CLUSTER2_MPCLK 9
+#define CLK_DOUT_CLUSTER2_PCLK 10
+#define CLK_DOUT_CLUSTER2_PERIPHCLK 11
+#define CLK_DOUT_CPUCL2_NOCP 12
+
+/* CMU_PERIC0 */
+#define CLK_MOUT_PERIC0_IP_USER 1
+#define CLK_MOUT_PERIC0_NOC_USER 2
+#define CLK_MOUT_PERIC0_USI00_USI 3
+#define CLK_MOUT_PERIC0_USI01_USI 4
+#define CLK_MOUT_PERIC0_USI02_USI 5
+#define CLK_MOUT_PERIC0_USI03_USI 6
+#define CLK_MOUT_PERIC0_USI04_USI 7
+#define CLK_MOUT_PERIC0_USI05_USI 8
+#define CLK_MOUT_PERIC0_USI06_USI 9
+#define CLK_MOUT_PERIC0_USI07_USI 10
+#define CLK_MOUT_PERIC0_USI08_USI 11
+#define CLK_MOUT_PERIC0_USI_I2C 12
+#define CLK_MOUT_PERIC0_I3C 13
+
+#define CLK_DOUT_PERIC0_USI00_USI 14
+#define CLK_DOUT_PERIC0_USI01_USI 15
+#define CLK_DOUT_PERIC0_USI02_USI 16
+#define CLK_DOUT_PERIC0_USI03_USI 17
+#define CLK_DOUT_PERIC0_USI04_USI 18
+#define CLK_DOUT_PERIC0_USI05_USI 19
+#define CLK_DOUT_PERIC0_USI06_USI 20
+#define CLK_DOUT_PERIC0_USI07_USI 21
+#define CLK_DOUT_PERIC0_USI08_USI 22
+#define CLK_DOUT_PERIC0_USI_I2C 23
+#define CLK_DOUT_PERIC0_I3C 24
+
+/* CMU_PERIC1 */
+#define CLK_MOUT_PERIC1_IP_USER 1
+#define CLK_MOUT_PERIC1_NOC_USER 2
+#define CLK_MOUT_PERIC1_USI09_USI 3
+#define CLK_MOUT_PERIC1_USI10_USI 4
+#define CLK_MOUT_PERIC1_USI11_USI 5
+#define CLK_MOUT_PERIC1_USI12_USI 6
+#define CLK_MOUT_PERIC1_USI13_USI 7
+#define CLK_MOUT_PERIC1_USI14_USI 8
+#define CLK_MOUT_PERIC1_USI15_USI 9
+#define CLK_MOUT_PERIC1_USI16_USI 10
+#define CLK_MOUT_PERIC1_USI17_USI 11
+#define CLK_MOUT_PERIC1_USI_I2C 12
+#define CLK_MOUT_PERIC1_I3C 13
+
+#define CLK_DOUT_PERIC1_USI09_USI 14
+#define CLK_DOUT_PERIC1_USI10_USI 15
+#define CLK_DOUT_PERIC1_USI11_USI 16
+#define CLK_DOUT_PERIC1_USI12_USI 17
+#define CLK_DOUT_PERIC1_USI13_USI 18
+#define CLK_DOUT_PERIC1_USI14_USI 19
+#define CLK_DOUT_PERIC1_USI15_USI 20
+#define CLK_DOUT_PERIC1_USI16_USI 21
+#define CLK_DOUT_PERIC1_USI17_USI 22
+#define CLK_DOUT_PERIC1_USI_I2C 23
+#define CLK_DOUT_PERIC1_I3C 24
+
+/* CMU_MISC */
+#define CLK_MOUT_MISC_NOC_USER 1
+#define CLK_MOUT_MISC_GIC 2
+
+#define CLK_DOUT_MISC_OTP 3
+#define CLK_DOUT_MISC_NOCP 4
+#define CLK_DOUT_MISC_OSC_DIV2 5
+
+/* CMU_HSI0 */
+#define CLK_MOUT_HSI0_NOC_USER 1
+
+#define CLK_DOUT_HSI0_PCIE_APB 2
+
+/* CMU_HSI1 */
+#define CLK_MOUT_HSI1_MMC_CARD_USER 1
+#define CLK_MOUT_HSI1_NOC_USER 2
+#define CLK_MOUT_HSI1_USBDRD_USER 3
+#define CLK_MOUT_HSI1_USBDRD 4
+
+/* CMU_HSI2 */
+#define FOUT_PLL_ETH 1
+#define CLK_MOUT_HSI2_NOC_UFS_USER 2
+#define CLK_MOUT_HSI2_UFS_EMBD_USER 3
+#define CLK_MOUT_HSI2_ETHERNET 4
+#define CLK_MOUT_HSI2_ETHERNET_USER 5
+#define CLK_DOUT_HSI2_ETHERNET 6
+#define CLK_DOUT_HSI2_ETHERNET_PTP 7
+
+/* CMU_M2M */
+#define CLK_MOUT_M2M_JPEG_USER 1
+#define CLK_MOUT_M2M_NOC_USER 2
+#define CLK_DOUT_M2M_NOCP 3
+
+/* CMU_MFC */
+#define CLK_MOUT_MFC_MFC_USER 1
+#define CLK_MOUT_MFC_WFD_USER 2
+#define CLK_DOUT_MFC_NOCP 3
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H */
diff --git a/include/dt-bindings/clock/sophgo,sg2042-clkgen.h b/include/dt-bindings/clock/sophgo,sg2042-clkgen.h
new file mode 100644
index 000000000000..84f7857317a2
--- /dev/null
+++ b/include/dt-bindings/clock/sophgo,sg2042-clkgen.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Technology Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_SOPHGO_SG2042_CLKGEN_H__
+#define __DT_BINDINGS_SOPHGO_SG2042_CLKGEN_H__
+
+#define DIV_CLK_MPLL_RP_CPU_NORMAL_0 0
+#define DIV_CLK_MPLL_AXI_DDR_0 1
+#define DIV_CLK_FPLL_DDR01_1 2
+#define DIV_CLK_FPLL_DDR23_1 3
+#define DIV_CLK_FPLL_RP_CPU_NORMAL_1 4
+#define DIV_CLK_FPLL_50M_A53 5
+#define DIV_CLK_FPLL_TOP_RP_CMN_DIV2 6
+#define DIV_CLK_FPLL_UART_500M 7
+#define DIV_CLK_FPLL_AHB_LPC 8
+#define DIV_CLK_FPLL_EFUSE 9
+#define DIV_CLK_FPLL_TX_ETH0 10
+#define DIV_CLK_FPLL_PTP_REF_I_ETH0 11
+#define DIV_CLK_FPLL_REF_ETH0 12
+#define DIV_CLK_FPLL_EMMC 13
+#define DIV_CLK_FPLL_SD 14
+#define DIV_CLK_FPLL_TOP_AXI0 15
+#define DIV_CLK_FPLL_TOP_AXI_HSPERI 16
+#define DIV_CLK_FPLL_AXI_DDR_1 17
+#define DIV_CLK_FPLL_DIV_TIMER1 18
+#define DIV_CLK_FPLL_DIV_TIMER2 19
+#define DIV_CLK_FPLL_DIV_TIMER3 20
+#define DIV_CLK_FPLL_DIV_TIMER4 21
+#define DIV_CLK_FPLL_DIV_TIMER5 22
+#define DIV_CLK_FPLL_DIV_TIMER6 23
+#define DIV_CLK_FPLL_DIV_TIMER7 24
+#define DIV_CLK_FPLL_DIV_TIMER8 25
+#define DIV_CLK_FPLL_100K_EMMC 26
+#define DIV_CLK_FPLL_100K_SD 27
+#define DIV_CLK_FPLL_GPIO_DB 28
+#define DIV_CLK_DPLL0_DDR01_0 29
+#define DIV_CLK_DPLL1_DDR23_0 30
+
+#define GATE_CLK_RP_CPU_NORMAL_DIV0 31
+#define GATE_CLK_AXI_DDR_DIV0 32
+
+#define GATE_CLK_RP_CPU_NORMAL_DIV1 33
+#define GATE_CLK_A53_50M 34
+#define GATE_CLK_TOP_RP_CMN_DIV2 35
+#define GATE_CLK_HSDMA 36
+#define GATE_CLK_EMMC_100M 37
+#define GATE_CLK_SD_100M 38
+#define GATE_CLK_TX_ETH0 39
+#define GATE_CLK_PTP_REF_I_ETH0 40
+#define GATE_CLK_REF_ETH0 41
+#define GATE_CLK_UART_500M 42
+#define GATE_CLK_EFUSE 43
+
+#define GATE_CLK_AHB_LPC 44
+#define GATE_CLK_AHB_ROM 45
+#define GATE_CLK_AHB_SF 46
+
+#define GATE_CLK_APB_UART 47
+#define GATE_CLK_APB_TIMER 48
+#define GATE_CLK_APB_EFUSE 49
+#define GATE_CLK_APB_GPIO 50
+#define GATE_CLK_APB_GPIO_INTR 51
+#define GATE_CLK_APB_SPI 52
+#define GATE_CLK_APB_I2C 53
+#define GATE_CLK_APB_WDT 54
+#define GATE_CLK_APB_PWM 55
+#define GATE_CLK_APB_RTC 56
+
+#define GATE_CLK_AXI_PCIE0 57
+#define GATE_CLK_AXI_PCIE1 58
+#define GATE_CLK_SYSDMA_AXI 59
+#define GATE_CLK_AXI_DBG_I2C 60
+#define GATE_CLK_AXI_SRAM 61
+#define GATE_CLK_AXI_ETH0 62
+#define GATE_CLK_AXI_EMMC 63
+#define GATE_CLK_AXI_SD 64
+#define GATE_CLK_TOP_AXI0 65
+#define GATE_CLK_TOP_AXI_HSPERI 66
+
+#define GATE_CLK_TIMER1 67
+#define GATE_CLK_TIMER2 68
+#define GATE_CLK_TIMER3 69
+#define GATE_CLK_TIMER4 70
+#define GATE_CLK_TIMER5 71
+#define GATE_CLK_TIMER6 72
+#define GATE_CLK_TIMER7 73
+#define GATE_CLK_TIMER8 74
+#define GATE_CLK_100K_EMMC 75
+#define GATE_CLK_100K_SD 76
+#define GATE_CLK_GPIO_DB 77
+
+#define GATE_CLK_AXI_DDR_DIV1 78
+#define GATE_CLK_DDR01_DIV1 79
+#define GATE_CLK_DDR23_DIV1 80
+
+#define GATE_CLK_DDR01_DIV0 81
+#define GATE_CLK_DDR23_DIV0 82
+
+#define GATE_CLK_DDR01 83
+#define GATE_CLK_DDR23 84
+#define GATE_CLK_RP_CPU_NORMAL 85
+#define GATE_CLK_AXI_DDR 86
+
+#define MUX_CLK_DDR01 87
+#define MUX_CLK_DDR23 88
+#define MUX_CLK_RP_CPU_NORMAL 89
+#define MUX_CLK_AXI_DDR 90
+
+#endif /* __DT_BINDINGS_SOPHGO_SG2042_CLKGEN_H__ */
diff --git a/include/dt-bindings/clock/sophgo,sg2042-pll.h b/include/dt-bindings/clock/sophgo,sg2042-pll.h
new file mode 100644
index 000000000000..2d519b3bf51c
--- /dev/null
+++ b/include/dt-bindings/clock/sophgo,sg2042-pll.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Technology Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_SOPHGO_SG2042_PLL_H__
+#define __DT_BINDINGS_SOPHGO_SG2042_PLL_H__
+
+#define MPLL_CLK 0
+#define FPLL_CLK 1
+#define DPLL0_CLK 2
+#define DPLL1_CLK 3
+
+#endif /* __DT_BINDINGS_SOPHGO_SG2042_PLL_H__ */
diff --git a/include/dt-bindings/clock/sophgo,sg2042-rpgate.h b/include/dt-bindings/clock/sophgo,sg2042-rpgate.h
new file mode 100644
index 000000000000..8b4522d5f559
--- /dev/null
+++ b/include/dt-bindings/clock/sophgo,sg2042-rpgate.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Technology Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_SOPHGO_SG2042_RPGATE_H__
+#define __DT_BINDINGS_SOPHGO_SG2042_RPGATE_H__
+
+#define GATE_CLK_RXU0 0
+#define GATE_CLK_RXU1 1
+#define GATE_CLK_RXU2 2
+#define GATE_CLK_RXU3 3
+#define GATE_CLK_RXU4 4
+#define GATE_CLK_RXU5 5
+#define GATE_CLK_RXU6 6
+#define GATE_CLK_RXU7 7
+#define GATE_CLK_RXU8 8
+#define GATE_CLK_RXU9 9
+#define GATE_CLK_RXU10 10
+#define GATE_CLK_RXU11 11
+#define GATE_CLK_RXU12 12
+#define GATE_CLK_RXU13 13
+#define GATE_CLK_RXU14 14
+#define GATE_CLK_RXU15 15
+#define GATE_CLK_RXU16 16
+#define GATE_CLK_RXU17 17
+#define GATE_CLK_RXU18 18
+#define GATE_CLK_RXU19 19
+#define GATE_CLK_RXU20 20
+#define GATE_CLK_RXU21 21
+#define GATE_CLK_RXU22 22
+#define GATE_CLK_RXU23 23
+#define GATE_CLK_RXU24 24
+#define GATE_CLK_RXU25 25
+#define GATE_CLK_RXU26 26
+#define GATE_CLK_RXU27 27
+#define GATE_CLK_RXU28 28
+#define GATE_CLK_RXU29 29
+#define GATE_CLK_RXU30 30
+#define GATE_CLK_RXU31 31
+#define GATE_CLK_MP0 32
+#define GATE_CLK_MP1 33
+#define GATE_CLK_MP2 34
+#define GATE_CLK_MP3 35
+#define GATE_CLK_MP4 36
+#define GATE_CLK_MP5 37
+#define GATE_CLK_MP6 38
+#define GATE_CLK_MP7 39
+#define GATE_CLK_MP8 40
+#define GATE_CLK_MP9 41
+#define GATE_CLK_MP10 42
+#define GATE_CLK_MP11 43
+#define GATE_CLK_MP12 44
+#define GATE_CLK_MP13 45
+#define GATE_CLK_MP14 46
+#define GATE_CLK_MP15 47
+
+#endif /* __DT_BINDINGS_SOPHGO_SG2042_RPGATE_H__ */
diff --git a/include/dt-bindings/clock/sophgo,sg2044-clk.h b/include/dt-bindings/clock/sophgo,sg2044-clk.h
new file mode 100644
index 000000000000..d9adca42548e
--- /dev/null
+++ b/include/dt-bindings/clock/sophgo,sg2044-clk.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#ifndef __DT_BINDINGS_SOPHGO_SG2044_CLK_H__
+#define __DT_BINDINGS_SOPHGO_SG2044_CLK_H__
+
+#define CLK_DIV_AP_SYS_FIXED 0
+#define CLK_DIV_AP_SYS_MAIN 1
+#define CLK_DIV_RP_SYS_FIXED 2
+#define CLK_DIV_RP_SYS_MAIN 3
+#define CLK_DIV_TPU_SYS_FIXED 4
+#define CLK_DIV_TPU_SYS_MAIN 5
+#define CLK_DIV_NOC_SYS_FIXED 6
+#define CLK_DIV_NOC_SYS_MAIN 7
+#define CLK_DIV_VC_SRC0_FIXED 8
+#define CLK_DIV_VC_SRC0_MAIN 9
+#define CLK_DIV_VC_SRC1_FIXED 10
+#define CLK_DIV_VC_SRC1_MAIN 11
+#define CLK_DIV_CXP_MAC_FIXED 12
+#define CLK_DIV_CXP_MAC_MAIN 13
+#define CLK_DIV_DDR0_FIXED 14
+#define CLK_DIV_DDR0_MAIN 15
+#define CLK_DIV_DDR1_FIXED 16
+#define CLK_DIV_DDR1_MAIN 17
+#define CLK_DIV_DDR2_FIXED 18
+#define CLK_DIV_DDR2_MAIN 19
+#define CLK_DIV_DDR3_FIXED 20
+#define CLK_DIV_DDR3_MAIN 21
+#define CLK_DIV_DDR4_FIXED 22
+#define CLK_DIV_DDR4_MAIN 23
+#define CLK_DIV_DDR5_FIXED 24
+#define CLK_DIV_DDR5_MAIN 25
+#define CLK_DIV_DDR6_FIXED 26
+#define CLK_DIV_DDR6_MAIN 27
+#define CLK_DIV_DDR7_FIXED 28
+#define CLK_DIV_DDR7_MAIN 29
+#define CLK_DIV_TOP_50M 30
+#define CLK_DIV_TOP_AXI0 31
+#define CLK_DIV_TOP_AXI_HSPERI 32
+#define CLK_DIV_TIMER0 33
+#define CLK_DIV_TIMER1 34
+#define CLK_DIV_TIMER2 35
+#define CLK_DIV_TIMER3 36
+#define CLK_DIV_TIMER4 37
+#define CLK_DIV_TIMER5 38
+#define CLK_DIV_TIMER6 39
+#define CLK_DIV_TIMER7 40
+#define CLK_DIV_CXP_TEST_PHY 41
+#define CLK_DIV_CXP_TEST_ETH_PHY 42
+#define CLK_DIV_C2C0_TEST_PHY 43
+#define CLK_DIV_C2C1_TEST_PHY 44
+#define CLK_DIV_PCIE_1G 45
+#define CLK_DIV_UART_500M 46
+#define CLK_DIV_GPIO_DB 47
+#define CLK_DIV_SD 48
+#define CLK_DIV_SD_100K 49
+#define CLK_DIV_EMMC 50
+#define CLK_DIV_EMMC_100K 51
+#define CLK_DIV_EFUSE 52
+#define CLK_DIV_TX_ETH0 53
+#define CLK_DIV_PTP_REF_I_ETH0 54
+#define CLK_DIV_REF_ETH0 55
+#define CLK_DIV_PKA 56
+#define CLK_MUX_DDR0 57
+#define CLK_MUX_DDR1 58
+#define CLK_MUX_DDR2 59
+#define CLK_MUX_DDR3 60
+#define CLK_MUX_DDR4 61
+#define CLK_MUX_DDR5 62
+#define CLK_MUX_DDR6 63
+#define CLK_MUX_DDR7 64
+#define CLK_MUX_NOC_SYS 65
+#define CLK_MUX_TPU_SYS 66
+#define CLK_MUX_RP_SYS 67
+#define CLK_MUX_AP_SYS 68
+#define CLK_MUX_VC_SRC0 69
+#define CLK_MUX_VC_SRC1 70
+#define CLK_MUX_CXP_MAC 71
+#define CLK_GATE_AP_SYS 72
+#define CLK_GATE_RP_SYS 73
+#define CLK_GATE_TPU_SYS 74
+#define CLK_GATE_NOC_SYS 75
+#define CLK_GATE_VC_SRC0 76
+#define CLK_GATE_VC_SRC1 77
+#define CLK_GATE_DDR0 78
+#define CLK_GATE_DDR1 79
+#define CLK_GATE_DDR2 80
+#define CLK_GATE_DDR3 81
+#define CLK_GATE_DDR4 82
+#define CLK_GATE_DDR5 83
+#define CLK_GATE_DDR6 84
+#define CLK_GATE_DDR7 85
+#define CLK_GATE_TOP_50M 86
+#define CLK_GATE_SC_RX 87
+#define CLK_GATE_SC_RX_X0Y1 88
+#define CLK_GATE_TOP_AXI0 89
+#define CLK_GATE_INTC0 90
+#define CLK_GATE_INTC1 91
+#define CLK_GATE_INTC2 92
+#define CLK_GATE_INTC3 93
+#define CLK_GATE_MAILBOX0 94
+#define CLK_GATE_MAILBOX1 95
+#define CLK_GATE_MAILBOX2 96
+#define CLK_GATE_MAILBOX3 97
+#define CLK_GATE_TOP_AXI_HSPERI 98
+#define CLK_GATE_APB_TIMER 99
+#define CLK_GATE_TIMER0 100
+#define CLK_GATE_TIMER1 101
+#define CLK_GATE_TIMER2 102
+#define CLK_GATE_TIMER3 103
+#define CLK_GATE_TIMER4 104
+#define CLK_GATE_TIMER5 105
+#define CLK_GATE_TIMER6 106
+#define CLK_GATE_TIMER7 107
+#define CLK_GATE_CXP_CFG 108
+#define CLK_GATE_CXP_MAC 109
+#define CLK_GATE_CXP_TEST_PHY 110
+#define CLK_GATE_CXP_TEST_ETH_PHY 111
+#define CLK_GATE_PCIE_1G 112
+#define CLK_GATE_C2C0_TEST_PHY 113
+#define CLK_GATE_C2C1_TEST_PHY 114
+#define CLK_GATE_UART_500M 115
+#define CLK_GATE_APB_UART 116
+#define CLK_GATE_APB_SPI 117
+#define CLK_GATE_AHB_SPIFMC 118
+#define CLK_GATE_APB_I2C 119
+#define CLK_GATE_AXI_DBG_I2C 120
+#define CLK_GATE_GPIO_DB 121
+#define CLK_GATE_APB_GPIO_INTR 122
+#define CLK_GATE_APB_GPIO 123
+#define CLK_GATE_SD 124
+#define CLK_GATE_AXI_SD 125
+#define CLK_GATE_SD_100K 126
+#define CLK_GATE_EMMC 127
+#define CLK_GATE_AXI_EMMC 128
+#define CLK_GATE_EMMC_100K 129
+#define CLK_GATE_EFUSE 130
+#define CLK_GATE_APB_EFUSE 131
+#define CLK_GATE_SYSDMA_AXI 132
+#define CLK_GATE_TX_ETH0 133
+#define CLK_GATE_AXI_ETH0 134
+#define CLK_GATE_PTP_REF_I_ETH0 135
+#define CLK_GATE_REF_ETH0 136
+#define CLK_GATE_APB_RTC 137
+#define CLK_GATE_APB_PWM 138
+#define CLK_GATE_APB_WDT 139
+#define CLK_GATE_AXI_SRAM 140
+#define CLK_GATE_AHB_ROM 141
+#define CLK_GATE_PKA 142
+
+#endif /* __DT_BINDINGS_SOPHGO_SG2044_CLK_H__ */
diff --git a/include/dt-bindings/clock/sophgo,sg2044-pll.h b/include/dt-bindings/clock/sophgo,sg2044-pll.h
new file mode 100644
index 000000000000..817d45e700cc
--- /dev/null
+++ b/include/dt-bindings/clock/sophgo,sg2044-pll.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#ifndef __DT_BINDINGS_SOPHGO_SG2044_PLL_H__
+#define __DT_BINDINGS_SOPHGO_SG2044_PLL_H__
+
+#define CLK_FPLL0 0
+#define CLK_FPLL1 1
+#define CLK_FPLL2 2
+#define CLK_DPLL0 3
+#define CLK_DPLL1 4
+#define CLK_DPLL2 5
+#define CLK_DPLL3 6
+#define CLK_DPLL4 7
+#define CLK_DPLL5 8
+#define CLK_DPLL6 9
+#define CLK_DPLL7 10
+#define CLK_MPLL0 11
+#define CLK_MPLL1 12
+#define CLK_MPLL2 13
+#define CLK_MPLL3 14
+#define CLK_MPLL4 15
+#define CLK_MPLL5 16
+
+#endif /* __DT_BINDINGS_SOPHGO_SG2044_PLL_H__ */
diff --git a/include/dt-bindings/clock/spacemit,k1-syscon.h b/include/dt-bindings/clock/spacemit,k1-syscon.h
new file mode 100644
index 000000000000..0f8b59d6753c
--- /dev/null
+++ b/include/dt-bindings/clock/spacemit,k1-syscon.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2024-2025 Haylen Chu <heylenay@outlook.com>
+ */
+
+#ifndef _DT_BINDINGS_SPACEMIT_CCU_H_
+#define _DT_BINDINGS_SPACEMIT_CCU_H_
+
+/* APBS (PLL) clocks */
+#define CLK_PLL1 0
+#define CLK_PLL2 1
+#define CLK_PLL3 2
+#define CLK_PLL1_D2 3
+#define CLK_PLL1_D3 4
+#define CLK_PLL1_D4 5
+#define CLK_PLL1_D5 6
+#define CLK_PLL1_D6 7
+#define CLK_PLL1_D7 8
+#define CLK_PLL1_D8 9
+#define CLK_PLL1_D11 10
+#define CLK_PLL1_D13 11
+#define CLK_PLL1_D23 12
+#define CLK_PLL1_D64 13
+#define CLK_PLL1_D10_AUD 14
+#define CLK_PLL1_D100_AUD 15
+#define CLK_PLL2_D1 16
+#define CLK_PLL2_D2 17
+#define CLK_PLL2_D3 18
+#define CLK_PLL2_D4 19
+#define CLK_PLL2_D5 20
+#define CLK_PLL2_D6 21
+#define CLK_PLL2_D7 22
+#define CLK_PLL2_D8 23
+#define CLK_PLL3_D1 24
+#define CLK_PLL3_D2 25
+#define CLK_PLL3_D3 26
+#define CLK_PLL3_D4 27
+#define CLK_PLL3_D5 28
+#define CLK_PLL3_D6 29
+#define CLK_PLL3_D7 30
+#define CLK_PLL3_D8 31
+#define CLK_PLL3_80 32
+#define CLK_PLL3_40 33
+#define CLK_PLL3_20 34
+
+/* MPMU clocks */
+#define CLK_PLL1_307P2 0
+#define CLK_PLL1_76P8 1
+#define CLK_PLL1_61P44 2
+#define CLK_PLL1_153P6 3
+#define CLK_PLL1_102P4 4
+#define CLK_PLL1_51P2 5
+#define CLK_PLL1_51P2_AP 6
+#define CLK_PLL1_57P6 7
+#define CLK_PLL1_25P6 8
+#define CLK_PLL1_12P8 9
+#define CLK_PLL1_12P8_WDT 10
+#define CLK_PLL1_6P4 11
+#define CLK_PLL1_3P2 12
+#define CLK_PLL1_1P6 13
+#define CLK_PLL1_0P8 14
+#define CLK_PLL1_409P6 15
+#define CLK_PLL1_204P8 16
+#define CLK_PLL1_491 17
+#define CLK_PLL1_245P76 18
+#define CLK_PLL1_614 19
+#define CLK_PLL1_47P26 20
+#define CLK_PLL1_31P5 21
+#define CLK_PLL1_819 22
+#define CLK_PLL1_1228 23
+#define CLK_SLOW_UART 24
+#define CLK_SLOW_UART1 25
+#define CLK_SLOW_UART2 26
+#define CLK_WDT 27
+#define CLK_RIPC 28
+#define CLK_I2S_SYSCLK 29
+#define CLK_I2S_BCLK 30
+#define CLK_APB 31
+#define CLK_WDT_BUS 32
+#define CLK_I2S_153P6 33
+#define CLK_I2S_153P6_BASE 34
+#define CLK_I2S_SYSCLK_SRC 35
+#define CLK_I2S_BCLK_FACTOR 36
+
+/* MPMU resets */
+#define RESET_WDT 0
+
+/* APBC clocks */
+#define CLK_UART0 0
+#define CLK_UART2 1
+#define CLK_UART3 2
+#define CLK_UART4 3
+#define CLK_UART5 4
+#define CLK_UART6 5
+#define CLK_UART7 6
+#define CLK_UART8 7
+#define CLK_UART9 8
+#define CLK_GPIO 9
+#define CLK_PWM0 10
+#define CLK_PWM1 11
+#define CLK_PWM2 12
+#define CLK_PWM3 13
+#define CLK_PWM4 14
+#define CLK_PWM5 15
+#define CLK_PWM6 16
+#define CLK_PWM7 17
+#define CLK_PWM8 18
+#define CLK_PWM9 19
+#define CLK_PWM10 20
+#define CLK_PWM11 21
+#define CLK_PWM12 22
+#define CLK_PWM13 23
+#define CLK_PWM14 24
+#define CLK_PWM15 25
+#define CLK_PWM16 26
+#define CLK_PWM17 27
+#define CLK_PWM18 28
+#define CLK_PWM19 29
+#define CLK_SSP3 30
+#define CLK_RTC 31
+#define CLK_TWSI0 32
+#define CLK_TWSI1 33
+#define CLK_TWSI2 34
+#define CLK_TWSI4 35
+#define CLK_TWSI5 36
+#define CLK_TWSI6 37
+#define CLK_TWSI7 38
+#define CLK_TWSI8 39
+#define CLK_TIMERS1 40
+#define CLK_TIMERS2 41
+#define CLK_AIB 42
+#define CLK_ONEWIRE 43
+#define CLK_SSPA0 44
+#define CLK_SSPA1 45
+#define CLK_DRO 46
+#define CLK_IR 47
+#define CLK_TSEN 48
+#define CLK_IPC_AP2AUD 49
+#define CLK_CAN0 50
+#define CLK_CAN0_BUS 51
+#define CLK_UART0_BUS 52
+#define CLK_UART2_BUS 53
+#define CLK_UART3_BUS 54
+#define CLK_UART4_BUS 55
+#define CLK_UART5_BUS 56
+#define CLK_UART6_BUS 57
+#define CLK_UART7_BUS 58
+#define CLK_UART8_BUS 59
+#define CLK_UART9_BUS 60
+#define CLK_GPIO_BUS 61
+#define CLK_PWM0_BUS 62
+#define CLK_PWM1_BUS 63
+#define CLK_PWM2_BUS 64
+#define CLK_PWM3_BUS 65
+#define CLK_PWM4_BUS 66
+#define CLK_PWM5_BUS 67
+#define CLK_PWM6_BUS 68
+#define CLK_PWM7_BUS 69
+#define CLK_PWM8_BUS 70
+#define CLK_PWM9_BUS 71
+#define CLK_PWM10_BUS 72
+#define CLK_PWM11_BUS 73
+#define CLK_PWM12_BUS 74
+#define CLK_PWM13_BUS 75
+#define CLK_PWM14_BUS 76
+#define CLK_PWM15_BUS 77
+#define CLK_PWM16_BUS 78
+#define CLK_PWM17_BUS 79
+#define CLK_PWM18_BUS 80
+#define CLK_PWM19_BUS 81
+#define CLK_SSP3_BUS 82
+#define CLK_RTC_BUS 83
+#define CLK_TWSI0_BUS 84
+#define CLK_TWSI1_BUS 85
+#define CLK_TWSI2_BUS 86
+#define CLK_TWSI4_BUS 87
+#define CLK_TWSI5_BUS 88
+#define CLK_TWSI6_BUS 89
+#define CLK_TWSI7_BUS 90
+#define CLK_TWSI8_BUS 91
+#define CLK_TIMERS1_BUS 92
+#define CLK_TIMERS2_BUS 93
+#define CLK_AIB_BUS 94
+#define CLK_ONEWIRE_BUS 95
+#define CLK_SSPA0_BUS 96
+#define CLK_SSPA1_BUS 97
+#define CLK_TSEN_BUS 98
+#define CLK_IPC_AP2AUD_BUS 99
+#define CLK_SSPA0_I2S_BCLK 100
+#define CLK_SSPA1_I2S_BCLK 101
+
+/* APBC resets */
+#define RESET_UART0 0
+#define RESET_UART2 1
+#define RESET_UART3 2
+#define RESET_UART4 3
+#define RESET_UART5 4
+#define RESET_UART6 5
+#define RESET_UART7 6
+#define RESET_UART8 7
+#define RESET_UART9 8
+#define RESET_GPIO 9
+#define RESET_PWM0 10
+#define RESET_PWM1 11
+#define RESET_PWM2 12
+#define RESET_PWM3 13
+#define RESET_PWM4 14
+#define RESET_PWM5 15
+#define RESET_PWM6 16
+#define RESET_PWM7 17
+#define RESET_PWM8 18
+#define RESET_PWM9 19
+#define RESET_PWM10 20
+#define RESET_PWM11 21
+#define RESET_PWM12 22
+#define RESET_PWM13 23
+#define RESET_PWM14 24
+#define RESET_PWM15 25
+#define RESET_PWM16 26
+#define RESET_PWM17 27
+#define RESET_PWM18 28
+#define RESET_PWM19 29
+#define RESET_SSP3 30
+#define RESET_RTC 31
+#define RESET_TWSI0 32
+#define RESET_TWSI1 33
+#define RESET_TWSI2 34
+#define RESET_TWSI4 35
+#define RESET_TWSI5 36
+#define RESET_TWSI6 37
+#define RESET_TWSI7 38
+#define RESET_TWSI8 39
+#define RESET_TIMERS1 40
+#define RESET_TIMERS2 41
+#define RESET_AIB 42
+#define RESET_ONEWIRE 43
+#define RESET_SSPA0 44
+#define RESET_SSPA1 45
+#define RESET_DRO 46
+#define RESET_IR 47
+#define RESET_TSEN 48
+#define RESET_IPC_AP2AUD 49
+#define RESET_CAN0 50
+
+/* APMU clocks */
+#define CLK_CCI550 0
+#define CLK_CPU_C0_HI 1
+#define CLK_CPU_C0_CORE 2
+#define CLK_CPU_C0_ACE 3
+#define CLK_CPU_C0_TCM 4
+#define CLK_CPU_C1_HI 5
+#define CLK_CPU_C1_CORE 6
+#define CLK_CPU_C1_ACE 7
+#define CLK_CCIC_4X 8
+#define CLK_CCIC1PHY 9
+#define CLK_SDH_AXI 10
+#define CLK_SDH0 11
+#define CLK_SDH1 12
+#define CLK_SDH2 13
+#define CLK_USB_P1 14
+#define CLK_USB_AXI 15
+#define CLK_USB30 16
+#define CLK_QSPI 17
+#define CLK_QSPI_BUS 18
+#define CLK_DMA 19
+#define CLK_AES 20
+#define CLK_VPU 21
+#define CLK_GPU 22
+#define CLK_EMMC 23
+#define CLK_EMMC_X 24
+#define CLK_AUDIO 25
+#define CLK_HDMI 26
+#define CLK_PMUA_ACLK 27
+#define CLK_PCIE0_MASTER 28
+#define CLK_PCIE0_SLAVE 29
+#define CLK_PCIE0_DBI 30
+#define CLK_PCIE1_MASTER 31
+#define CLK_PCIE1_SLAVE 32
+#define CLK_PCIE1_DBI 33
+#define CLK_PCIE2_MASTER 34
+#define CLK_PCIE2_SLAVE 35
+#define CLK_PCIE2_DBI 36
+#define CLK_EMAC0_BUS 37
+#define CLK_EMAC0_PTP 38
+#define CLK_EMAC1_BUS 39
+#define CLK_EMAC1_PTP 40
+#define CLK_JPG 41
+#define CLK_CCIC2PHY 42
+#define CLK_CCIC3PHY 43
+#define CLK_CSI 44
+#define CLK_CAMM0 45
+#define CLK_CAMM1 46
+#define CLK_CAMM2 47
+#define CLK_ISP_CPP 48
+#define CLK_ISP_BUS 49
+#define CLK_ISP 50
+#define CLK_DPU_MCLK 51
+#define CLK_DPU_ESC 52
+#define CLK_DPU_BIT 53
+#define CLK_DPU_PXCLK 54
+#define CLK_DPU_HCLK 55
+#define CLK_DPU_SPI 56
+#define CLK_DPU_SPI_HBUS 57
+#define CLK_DPU_SPIBUS 58
+#define CLK_DPU_SPI_ACLK 59
+#define CLK_V2D 60
+#define CLK_EMMC_BUS 61
+
+/* APMU resets */
+#define RESET_CCIC_4X 0
+#define RESET_CCIC1_PHY 1
+#define RESET_SDH_AXI 2
+#define RESET_SDH0 3
+#define RESET_SDH1 4
+#define RESET_SDH2 5
+#define RESET_USBP1_AXI 6
+#define RESET_USB_AXI 7
+#define RESET_USB30_AHB 8
+#define RESET_USB30_VCC 9
+#define RESET_USB30_PHY 10
+#define RESET_QSPI 11
+#define RESET_QSPI_BUS 12
+#define RESET_DMA 13
+#define RESET_AES 14
+#define RESET_VPU 15
+#define RESET_GPU 16
+#define RESET_EMMC 17
+#define RESET_EMMC_X 18
+#define RESET_AUDIO_SYS 19
+#define RESET_AUDIO_MCU 20
+#define RESET_AUDIO_APMU 21
+#define RESET_HDMI 22
+#define RESET_PCIE0_MASTER 23
+#define RESET_PCIE0_SLAVE 24
+#define RESET_PCIE0_DBI 25
+#define RESET_PCIE0_GLOBAL 26
+#define RESET_PCIE1_MASTER 27
+#define RESET_PCIE1_SLAVE 28
+#define RESET_PCIE1_DBI 29
+#define RESET_PCIE1_GLOBAL 30
+#define RESET_PCIE2_MASTER 31
+#define RESET_PCIE2_SLAVE 32
+#define RESET_PCIE2_DBI 33
+#define RESET_PCIE2_GLOBAL 34
+#define RESET_EMAC0 35
+#define RESET_EMAC1 36
+#define RESET_JPG 37
+#define RESET_CCIC2PHY 38
+#define RESET_CCIC3PHY 39
+#define RESET_CSI 40
+#define RESET_ISP_CPP 41
+#define RESET_ISP_BUS 42
+#define RESET_ISP 43
+#define RESET_ISP_CI 44
+#define RESET_DPU_MCLK 45
+#define RESET_DPU_ESC 46
+#define RESET_DPU_HCLK 47
+#define RESET_DPU_SPIBUS 48
+#define RESET_DPU_SPI_HBUS 49
+#define RESET_V2D 50
+#define RESET_MIPI 51
+#define RESET_MC 52
+
+/* RCPU resets */
+#define RESET_RCPU_SSP0 0
+#define RESET_RCPU_I2C0 1
+#define RESET_RCPU_UART1 2
+#define RESET_RCPU_IR 3
+#define RESET_RCPU_CAN 4
+#define RESET_RCPU_UART0 5
+#define RESET_RCPU_HDMI_AUDIO 6
+
+/* RCPU2 resets */
+#define RESET_RCPU2_PWM0 0
+#define RESET_RCPU2_PWM1 1
+#define RESET_RCPU2_PWM2 2
+#define RESET_RCPU2_PWM3 3
+#define RESET_RCPU2_PWM4 4
+#define RESET_RCPU2_PWM5 5
+#define RESET_RCPU2_PWM6 6
+#define RESET_RCPU2_PWM7 7
+#define RESET_RCPU2_PWM8 8
+#define RESET_RCPU2_PWM9 9
+
+/* APBC2 resets */
+#define RESET_APBC2_UART1 0
+#define RESET_APBC2_SSP2 1
+#define RESET_APBC2_TWSI3 2
+#define RESET_APBC2_RTC 3
+#define RESET_APBC2_TIMERS0 4
+#define RESET_APBC2_KPC 5
+#define RESET_APBC2_GPIO 6
+
+#endif /* _DT_BINDINGS_SPACEMIT_CCU_H_ */
diff --git a/include/dt-bindings/clock/st,stm32mp21-rcc.h b/include/dt-bindings/clock/st,stm32mp21-rcc.h
new file mode 100644
index 000000000000..054b785f2796
--- /dev/null
+++ b/include/dt-bindings/clock/st,stm32mp21-rcc.h
@@ -0,0 +1,426 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com>
+ */
+
+#ifndef _DT_BINDINGS_STM32MP21_CLKS_H_
+#define _DT_BINDINGS_STM32MP21_CLKS_H_
+
+/* INTERNAL/EXTERNAL OSCILLATORS */
+#define HSI_CK 0
+#define HSE_CK 1
+#define MSI_CK 2
+#define LSI_CK 3
+#define LSE_CK 4
+#define I2S_CK 5
+#define RTC_CK 6
+#define SPDIF_CK_SYMB 7
+
+/* PLL CLOCKS */
+#define PLL1_CK 8
+#define PLL2_CK 9
+#define PLL4_CK 10
+#define PLL5_CK 11
+#define PLL6_CK 12
+#define PLL7_CK 13
+#define PLL8_CK 14
+
+#define CK_CPU1 15
+
+/* APB DIV CLOCKS */
+#define CK_ICN_APB1 16
+#define CK_ICN_APB2 17
+#define CK_ICN_APB3 18
+#define CK_ICN_APB4 19
+#define CK_ICN_APB5 20
+#define CK_ICN_APBDBG 21
+
+/* GLOBAL TIMER */
+#define TIMG1_CK 22
+#define TIMG2_CK 23
+
+/* FLEXGEN CLOCKS */
+#define CK_ICN_HS_MCU 24
+#define CK_ICN_SDMMC 25
+#define CK_ICN_DDR 26
+#define CK_ICN_DISPLAY 27
+#define CK_ICN_HSL 28
+#define CK_ICN_NIC 29
+#define CK_ICN_VID 30
+#define CK_FLEXGEN_07 31
+#define CK_FLEXGEN_08 32
+#define CK_FLEXGEN_09 33
+#define CK_FLEXGEN_10 34
+#define CK_FLEXGEN_11 35
+#define CK_FLEXGEN_12 36
+#define CK_FLEXGEN_13 37
+#define CK_FLEXGEN_14 38
+#define CK_FLEXGEN_15 39
+#define CK_FLEXGEN_16 40
+#define CK_FLEXGEN_17 41
+#define CK_FLEXGEN_18 42
+#define CK_FLEXGEN_19 43
+#define CK_FLEXGEN_20 44
+#define CK_FLEXGEN_21 45
+#define CK_FLEXGEN_22 46
+#define CK_FLEXGEN_23 47
+#define CK_FLEXGEN_24 48
+#define CK_FLEXGEN_25 49
+#define CK_FLEXGEN_26 50
+#define CK_FLEXGEN_27 51
+#define CK_FLEXGEN_28 52
+#define CK_FLEXGEN_29 53
+#define CK_FLEXGEN_30 54
+#define CK_FLEXGEN_31 55
+#define CK_FLEXGEN_32 56
+#define CK_FLEXGEN_33 57
+#define CK_FLEXGEN_34 58
+#define CK_FLEXGEN_35 59
+#define CK_FLEXGEN_36 60
+#define CK_FLEXGEN_37 61
+#define CK_FLEXGEN_38 62
+#define CK_FLEXGEN_39 63
+#define CK_FLEXGEN_40 64
+#define CK_FLEXGEN_41 65
+#define CK_FLEXGEN_42 66
+#define CK_FLEXGEN_43 67
+#define CK_FLEXGEN_44 68
+#define CK_FLEXGEN_45 69
+#define CK_FLEXGEN_46 70
+#define CK_FLEXGEN_47 71
+#define CK_FLEXGEN_48 72
+#define CK_FLEXGEN_49 73
+#define CK_FLEXGEN_50 74
+#define CK_FLEXGEN_51 75
+#define CK_FLEXGEN_52 76
+#define CK_FLEXGEN_53 77
+#define CK_FLEXGEN_54 78
+#define CK_FLEXGEN_55 79
+#define CK_FLEXGEN_56 80
+#define CK_FLEXGEN_57 81
+#define CK_FLEXGEN_58 82
+#define CK_FLEXGEN_59 83
+#define CK_FLEXGEN_60 84
+#define CK_FLEXGEN_61 85
+#define CK_FLEXGEN_62 86
+#define CK_FLEXGEN_63 87
+
+/* LOW SPEED MCU CLOCK */
+#define CK_ICN_LS_MCU 88
+
+#define CK_BUS_STM 89
+#define CK_BUS_FMC 90
+#define CK_BUS_ETH1 91
+#define CK_BUS_ETH2 92
+#define CK_BUS_DDRPHYC 93
+#define CK_BUS_SYSCPU1 94
+#define CK_BUS_HPDMA1 95
+#define CK_BUS_HPDMA2 96
+#define CK_BUS_HPDMA3 97
+#define CK_BUS_ADC1 98
+#define CK_BUS_ADC2 99
+#define CK_BUS_IPCC1 100
+#define CK_BUS_DCMIPSSI 101
+#define CK_BUS_CRC 102
+#define CK_BUS_MDF1 103
+#define CK_BUS_BKPSRAM 104
+#define CK_BUS_HASH1 105
+#define CK_BUS_HASH2 106
+#define CK_BUS_RNG1 107
+#define CK_BUS_RNG2 108
+#define CK_BUS_CRYP1 109
+#define CK_BUS_CRYP2 110
+#define CK_BUS_SAES 111
+#define CK_BUS_PKA 112
+#define CK_BUS_GPIOA 113
+#define CK_BUS_GPIOB 114
+#define CK_BUS_GPIOC 115
+#define CK_BUS_GPIOD 116
+#define CK_BUS_GPIOE 117
+#define CK_BUS_GPIOF 118
+#define CK_BUS_GPIOG 119
+#define CK_BUS_GPIOH 120
+#define CK_BUS_GPIOI 121
+#define CK_BUS_GPIOZ 122
+#define CK_BUS_RTC 124
+#define CK_BUS_LPUART1 125
+#define CK_BUS_LPTIM3 126
+#define CK_BUS_LPTIM4 127
+#define CK_BUS_LPTIM5 128
+#define CK_BUS_TIM2 129
+#define CK_BUS_TIM3 130
+#define CK_BUS_TIM4 131
+#define CK_BUS_TIM5 132
+#define CK_BUS_TIM6 133
+#define CK_BUS_TIM7 134
+#define CK_BUS_TIM10 135
+#define CK_BUS_TIM11 136
+#define CK_BUS_TIM12 137
+#define CK_BUS_TIM13 138
+#define CK_BUS_TIM14 139
+#define CK_BUS_LPTIM1 140
+#define CK_BUS_LPTIM2 141
+#define CK_BUS_SPI2 142
+#define CK_BUS_SPI3 143
+#define CK_BUS_SPDIFRX 144
+#define CK_BUS_USART2 145
+#define CK_BUS_USART3 146
+#define CK_BUS_UART4 147
+#define CK_BUS_UART5 148
+#define CK_BUS_I2C1 149
+#define CK_BUS_I2C2 150
+#define CK_BUS_I2C3 151
+#define CK_BUS_I3C1 152
+#define CK_BUS_I3C2 153
+#define CK_BUS_I3C3 154
+#define CK_BUS_TIM1 155
+#define CK_BUS_TIM8 156
+#define CK_BUS_TIM15 157
+#define CK_BUS_TIM16 158
+#define CK_BUS_TIM17 159
+#define CK_BUS_SAI1 160
+#define CK_BUS_SAI2 161
+#define CK_BUS_SAI3 162
+#define CK_BUS_SAI4 163
+#define CK_BUS_USART1 164
+#define CK_BUS_USART6 165
+#define CK_BUS_UART7 166
+#define CK_BUS_FDCAN 167
+#define CK_BUS_SPI1 168
+#define CK_BUS_SPI4 169
+#define CK_BUS_SPI5 170
+#define CK_BUS_SPI6 171
+#define CK_BUS_BSEC 172
+#define CK_BUS_IWDG1 173
+#define CK_BUS_IWDG2 174
+#define CK_BUS_IWDG3 175
+#define CK_BUS_IWDG4 176
+#define CK_BUS_WWDG1 177
+#define CK_BUS_VREF 178
+#define CK_BUS_DTS 179
+#define CK_BUS_SERC 180
+#define CK_BUS_HDP 181
+#define CK_BUS_DDRPERFM 182
+#define CK_BUS_OTG 183
+#define CK_BUS_LTDC 184
+#define CK_BUS_CSI 185
+#define CK_BUS_DCMIPP 186
+#define CK_BUS_DDRC 187
+#define CK_BUS_DDRCFG 188
+#define CK_BUS_STGEN 189
+#define CK_SYSDBG 190
+#define CK_KER_TIM2 191
+#define CK_KER_TIM3 192
+#define CK_KER_TIM4 193
+#define CK_KER_TIM5 194
+#define CK_KER_TIM6 195
+#define CK_KER_TIM7 196
+#define CK_KER_TIM10 197
+#define CK_KER_TIM11 198
+#define CK_KER_TIM12 199
+#define CK_KER_TIM13 200
+#define CK_KER_TIM14 201
+#define CK_KER_TIM1 202
+#define CK_KER_TIM8 203
+#define CK_KER_TIM15 204
+#define CK_KER_TIM16 205
+#define CK_KER_TIM17 206
+#define CK_BUS_SYSRAM 207
+#define CK_BUS_RETRAM 208
+#define CK_BUS_OSPI1 209
+#define CK_BUS_OTFD1 210
+#define CK_BUS_SRAM1 211
+#define CK_BUS_SDMMC1 212
+#define CK_BUS_SDMMC2 213
+#define CK_BUS_SDMMC3 214
+#define CK_BUS_DDR 215
+#define CK_BUS_RISAF4 216
+#define CK_BUS_USBHOHCI 217
+#define CK_BUS_USBHEHCI 218
+#define CK_KER_LPTIM1 219
+#define CK_KER_LPTIM2 220
+#define CK_KER_USART2 221
+#define CK_KER_UART4 222
+#define CK_KER_USART3 223
+#define CK_KER_UART5 224
+#define CK_KER_SPI2 225
+#define CK_KER_SPI3 226
+#define CK_KER_SPDIFRX 227
+#define CK_KER_I2C1 228
+#define CK_KER_I2C2 229
+#define CK_KER_I3C1 230
+#define CK_KER_I3C2 231
+#define CK_KER_I2C3 232
+#define CK_KER_I3C3 233
+#define CK_KER_SPI1 234
+#define CK_KER_SPI4 235
+#define CK_KER_SPI5 236
+#define CK_KER_SPI6 237
+#define CK_KER_USART1 238
+#define CK_KER_USART6 239
+#define CK_KER_UART7 240
+#define CK_KER_MDF1 241
+#define CK_KER_SAI1 242
+#define CK_KER_SAI2 243
+#define CK_KER_SAI3 244
+#define CK_KER_SAI4 245
+#define CK_KER_FDCAN 246
+#define CK_KER_CSI 247
+#define CK_KER_CSITXESC 248
+#define CK_KER_CSIPHY 249
+#define CK_KER_STGEN 250
+#define CK_KER_USB2PHY2EN 251
+#define CK_KER_LPUART1 252
+#define CK_KER_LPTIM3 253
+#define CK_KER_LPTIM4 254
+#define CK_KER_LPTIM5 255
+#define CK_KER_TSDBG 256
+#define CK_KER_TPIU 257
+#define CK_BUS_ETR 258
+#define CK_BUS_SYSATB 259
+#define CK_KER_ADC1 260
+#define CK_KER_ADC2 261
+#define CK_KER_OSPI1 262
+#define CK_KER_FMC 263
+#define CK_KER_SDMMC1 264
+#define CK_KER_SDMMC2 265
+#define CK_KER_SDMMC3 266
+#define CK_KER_ETH1 267
+#define CK_KER_ETH2 268
+#define CK_KER_ETH1PTP 269
+#define CK_KER_ETH2PTP 270
+#define CK_KER_USB2PHY1 271
+#define CK_KER_USB2PHY2 272
+#define CK_MCO1 273
+#define CK_MCO2 274
+#define CK_KER_DTS 275
+#define CK_ETH1_RX 276
+#define CK_ETH1_TX 277
+#define CK_ETH1_MAC 278
+#define CK_ETH2_RX 279
+#define CK_ETH2_TX 280
+#define CK_ETH2_MAC 281
+#define CK_ETH1_STP 282
+#define CK_ETH2_STP 283
+#define CK_KER_LTDC 284
+#define HSE_DIV2_CK 285
+#define CK_DBGMCU 286
+#define CK_DAP 287
+#define CK_KER_ETR 288
+#define CK_KER_STM 289
+
+#define CK_SCMI_ICN_HS_MCU 0
+#define CK_SCMI_ICN_SDMMC 1
+#define CK_SCMI_ICN_DDR 2
+#define CK_SCMI_ICN_DISPLAY 3
+#define CK_SCMI_ICN_HSL 4
+#define CK_SCMI_ICN_NIC 5
+#define CK_SCMI_FLEXGEN_07 7
+#define CK_SCMI_FLEXGEN_08 8
+#define CK_SCMI_FLEXGEN_09 9
+#define CK_SCMI_FLEXGEN_10 10
+#define CK_SCMI_FLEXGEN_11 11
+#define CK_SCMI_FLEXGEN_12 12
+#define CK_SCMI_FLEXGEN_13 13
+#define CK_SCMI_FLEXGEN_14 14
+#define CK_SCMI_FLEXGEN_15 15
+#define CK_SCMI_FLEXGEN_16 16
+#define CK_SCMI_FLEXGEN_17 17
+#define CK_SCMI_FLEXGEN_18 18
+#define CK_SCMI_FLEXGEN_19 19
+#define CK_SCMI_FLEXGEN_20 20
+#define CK_SCMI_FLEXGEN_21 21
+#define CK_SCMI_FLEXGEN_22 22
+#define CK_SCMI_FLEXGEN_23 23
+#define CK_SCMI_FLEXGEN_24 24
+#define CK_SCMI_FLEXGEN_25 25
+#define CK_SCMI_FLEXGEN_26 26
+#define CK_SCMI_FLEXGEN_27 27
+#define CK_SCMI_FLEXGEN_28 28
+#define CK_SCMI_FLEXGEN_29 29
+#define CK_SCMI_FLEXGEN_30 30
+#define CK_SCMI_FLEXGEN_31 31
+#define CK_SCMI_FLEXGEN_32 32
+#define CK_SCMI_FLEXGEN_33 33
+#define CK_SCMI_FLEXGEN_34 34
+#define CK_SCMI_FLEXGEN_35 35
+#define CK_SCMI_FLEXGEN_36 36
+#define CK_SCMI_FLEXGEN_37 37
+#define CK_SCMI_FLEXGEN_38 38
+#define CK_SCMI_FLEXGEN_39 39
+#define CK_SCMI_FLEXGEN_40 40
+#define CK_SCMI_FLEXGEN_41 41
+#define CK_SCMI_FLEXGEN_42 42
+#define CK_SCMI_FLEXGEN_43 43
+#define CK_SCMI_FLEXGEN_44 44
+#define CK_SCMI_FLEXGEN_45 45
+#define CK_SCMI_FLEXGEN_46 46
+#define CK_SCMI_FLEXGEN_47 47
+#define CK_SCMI_FLEXGEN_48 48
+#define CK_SCMI_FLEXGEN_49 49
+#define CK_SCMI_FLEXGEN_50 50
+#define CK_SCMI_FLEXGEN_51 51
+#define CK_SCMI_FLEXGEN_52 52
+#define CK_SCMI_FLEXGEN_53 53
+#define CK_SCMI_FLEXGEN_54 54
+#define CK_SCMI_FLEXGEN_55 55
+#define CK_SCMI_FLEXGEN_56 56
+#define CK_SCMI_FLEXGEN_57 57
+#define CK_SCMI_FLEXGEN_58 58
+#define CK_SCMI_FLEXGEN_59 59
+#define CK_SCMI_FLEXGEN_60 60
+#define CK_SCMI_FLEXGEN_61 61
+#define CK_SCMI_FLEXGEN_62 62
+#define CK_SCMI_FLEXGEN_63 63
+#define CK_SCMI_ICN_LS_MCU 64
+#define CK_SCMI_HSE 65
+#define CK_SCMI_LSE 66
+#define CK_SCMI_HSI 67
+#define CK_SCMI_LSI 68
+#define CK_SCMI_MSI 69
+#define CK_SCMI_HSE_DIV2 70
+#define CK_SCMI_CPU1 71
+#define CK_SCMI_SYSCPU1 72
+#define CK_SCMI_PLL2 73
+#define CK_SCMI_RTC 74
+#define CK_SCMI_RTCCK 75
+#define CK_SCMI_ICN_APB1 76
+#define CK_SCMI_ICN_APB2 77
+#define CK_SCMI_ICN_APB3 78
+#define CK_SCMI_ICN_APB4 79
+#define CK_SCMI_ICN_APB5 80
+#define CK_SCMI_ICN_APBDBG 81
+#define CK_SCMI_TIMG1 82
+#define CK_SCMI_TIMG2 83
+#define CK_SCMI_BKPSRAM 84
+#define CK_SCMI_BSEC 85
+#define CK_SCMI_BUS_ETR 86
+#define CK_SCMI_FMC 87
+#define CK_SCMI_GPIOA 88
+#define CK_SCMI_GPIOB 89
+#define CK_SCMI_GPIOC 90
+#define CK_SCMI_GPIOD 91
+#define CK_SCMI_GPIOE 92
+#define CK_SCMI_GPIOF 93
+#define CK_SCMI_GPIOG 94
+#define CK_SCMI_GPIOH 95
+#define CK_SCMI_GPIOI 96
+#define CK_SCMI_GPIOZ 97
+#define CK_SCMI_HPDMA1 98
+#define CK_SCMI_HPDMA2 99
+#define CK_SCMI_HPDMA3 100
+#define CK_SCMI_IPCC1 101
+#define CK_SCMI_RETRAM 102
+#define CK_SCMI_SRAM1 103
+#define CK_SCMI_SYSRAM 104
+#define CK_SCMI_OSPI1 105
+#define CK_SCMI_TPIU 106
+#define CK_SCMI_SYSDBG 107
+#define CK_SCMI_SYSATB 108
+#define CK_SCMI_TSDBG 109
+#define CK_SCMI_BUS_STM 110
+#define CK_SCMI_KER_STM 111
+#define CK_SCMI_KER_ETR 112
+
+#endif /* _DT_BINDINGS_STM32MP21_CLKS_H_ */
diff --git a/include/dt-bindings/clock/stm32fx-clock.h b/include/dt-bindings/clock/stm32fx-clock.h
index e5dad050d518..b6ff9c68cb3f 100644
--- a/include/dt-bindings/clock/stm32fx-clock.h
+++ b/include/dt-bindings/clock/stm32fx-clock.h
@@ -10,7 +10,7 @@
* List of clocks which are not derived from system clock (SYSCLOCK)
*
* The index of these clocks is the secondary index of DT bindings
- * (see Documentation/devicetree/bindings/clock/st,stm32-rcc.txt)
+ * (see Documentation/devicetree/bindings/clock/st,stm32-rcc.yaml)
*
* e.g:
<assigned-clocks = <&rcc 1 CLK_LSE>;
diff --git a/include/dt-bindings/clock/stm32h7-clks.h b/include/dt-bindings/clock/stm32h7-clks.h
index 6637272b3242..330b39c2c303 100644
--- a/include/dt-bindings/clock/stm32h7-clks.h
+++ b/include/dt-bindings/clock/stm32h7-clks.h
@@ -126,8 +126,8 @@
#define ADC3_CK 128
#define DSI_CK 129
#define LTDC_CK 130
-#define USART8_CK 131
-#define USART7_CK 132
+#define UART8_CK 131
+#define UART7_CK 132
#define HDMICEC_CK 133
#define I2C3_CK 134
#define I2C2_CK 135
diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h
index 175892189e9d..4f220ea7a23c 100644
--- a/include/dt-bindings/clock/sun50i-a64-ccu.h
+++ b/include/dt-bindings/clock/sun50i-a64-ccu.h
@@ -44,7 +44,9 @@
#define _DT_BINDINGS_CLK_SUN50I_A64_H_
#define CLK_PLL_VIDEO0 7
+#define CLK_PLL_VIDEO0_2X 8
#define CLK_PLL_PERIPH0 11
+#define CLK_PLL_MIPI 17
#define CLK_CPUX 21
#define CLK_BUS_MIPI_DSI 28
diff --git a/include/dt-bindings/clock/sun50i-h616-ccu.h b/include/dt-bindings/clock/sun50i-h616-ccu.h
index 6f8f01e67628..6889405f9fec 100644
--- a/include/dt-bindings/clock/sun50i-h616-ccu.h
+++ b/include/dt-bindings/clock/sun50i-h616-ccu.h
@@ -112,5 +112,10 @@
#define CLK_HDCP 126
#define CLK_BUS_HDCP 127
#define CLK_PLL_SYSTEM_32K 128
+#define CLK_BUS_GPADC 129
+#define CLK_TCON_LCD0 130
+#define CLK_BUS_TCON_LCD0 131
+#define CLK_TCON_LCD1 132
+#define CLK_BUS_TCON_LCD1 133
#endif /* _DT_BINDINGS_CLK_SUN50I_H616_H_ */
diff --git a/include/dt-bindings/clock/sun55i-a523-ccu.h b/include/dt-bindings/clock/sun55i-a523-ccu.h
new file mode 100644
index 000000000000..54808fcfd556
--- /dev/null
+++ b/include/dt-bindings/clock/sun55i-a523-ccu.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN55I_A523_CCU_H_
+#define _DT_BINDINGS_CLK_SUN55I_A523_CCU_H_
+
+#define CLK_PLL_DDR0 0
+#define CLK_PLL_PERIPH0_4X 1
+#define CLK_PLL_PERIPH0_2X 2
+#define CLK_PLL_PERIPH0_800M 3
+#define CLK_PLL_PERIPH0_480M 4
+#define CLK_PLL_PERIPH0_600M 5
+#define CLK_PLL_PERIPH0_400M 6
+#define CLK_PLL_PERIPH0_300M 7
+#define CLK_PLL_PERIPH0_200M 8
+#define CLK_PLL_PERIPH0_160M 9
+#define CLK_PLL_PERIPH0_150M 10
+#define CLK_PLL_PERIPH1_4X 11
+#define CLK_PLL_PERIPH1_2X 12
+#define CLK_PLL_PERIPH1_800M 13
+#define CLK_PLL_PERIPH1_480M 14
+#define CLK_PLL_PERIPH1_600M 15
+#define CLK_PLL_PERIPH1_400M 16
+#define CLK_PLL_PERIPH1_300M 17
+#define CLK_PLL_PERIPH1_200M 18
+#define CLK_PLL_PERIPH1_160M 19
+#define CLK_PLL_PERIPH1_150M 20
+#define CLK_PLL_GPU 21
+#define CLK_PLL_VIDEO0_8X 22
+#define CLK_PLL_VIDEO0_4X 23
+#define CLK_PLL_VIDEO0_3X 24
+#define CLK_PLL_VIDEO1_8X 25
+#define CLK_PLL_VIDEO1_4X 26
+#define CLK_PLL_VIDEO1_3X 27
+#define CLK_PLL_VIDEO2_8X 28
+#define CLK_PLL_VIDEO2_4X 29
+#define CLK_PLL_VIDEO2_3X 30
+#define CLK_PLL_VIDEO3_8X 31
+#define CLK_PLL_VIDEO3_4X 32
+#define CLK_PLL_VIDEO3_3X 33
+#define CLK_PLL_VE 34
+#define CLK_PLL_AUDIO0_4X 35
+#define CLK_PLL_AUDIO0_2X 36
+#define CLK_PLL_AUDIO0 37
+#define CLK_PLL_NPU_4X 38
+#define CLK_PLL_NPU_2X 39
+#define CLK_PLL_NPU 40
+#define CLK_AHB 41
+#define CLK_APB0 42
+#define CLK_APB1 43
+#define CLK_MBUS 44
+#define CLK_DE 45
+#define CLK_BUS_DE 46
+#define CLK_DI 47
+#define CLK_BUS_DI 48
+#define CLK_G2D 49
+#define CLK_BUS_G2D 50
+#define CLK_GPU 51
+#define CLK_BUS_GPU 52
+#define CLK_CE 53
+#define CLK_BUS_CE 54
+#define CLK_BUS_CE_SYS 55
+#define CLK_VE 56
+#define CLK_BUS_VE 57
+#define CLK_BUS_DMA 58
+#define CLK_BUS_MSGBOX 59
+#define CLK_BUS_SPINLOCK 60
+#define CLK_HSTIMER0 61
+#define CLK_HSTIMER1 62
+#define CLK_HSTIMER2 63
+#define CLK_HSTIMER3 64
+#define CLK_HSTIMER4 65
+#define CLK_HSTIMER5 66
+#define CLK_BUS_HSTIMER 67
+#define CLK_BUS_DBG 68
+#define CLK_BUS_PWM0 69
+#define CLK_BUS_PWM1 70
+#define CLK_IOMMU 71
+#define CLK_BUS_IOMMU 72
+#define CLK_DRAM 73
+#define CLK_MBUS_DMA 74
+#define CLK_MBUS_VE 75
+#define CLK_MBUS_CE 76
+#define CLK_MBUS_CSI 77
+#define CLK_MBUS_ISP 78
+#define CLK_MBUS_EMAC1 79
+#define CLK_BUS_DRAM 80
+#define CLK_NAND0 81
+#define CLK_NAND1 82
+#define CLK_BUS_NAND 83
+#define CLK_MMC0 84
+#define CLK_MMC1 85
+#define CLK_MMC2 86
+#define CLK_BUS_SYSDAP 87
+#define CLK_BUS_MMC0 88
+#define CLK_BUS_MMC1 89
+#define CLK_BUS_MMC2 90
+#define CLK_BUS_UART0 91
+#define CLK_BUS_UART1 92
+#define CLK_BUS_UART2 93
+#define CLK_BUS_UART3 94
+#define CLK_BUS_UART4 95
+#define CLK_BUS_UART5 96
+#define CLK_BUS_UART6 97
+#define CLK_BUS_UART7 98
+#define CLK_BUS_I2C0 99
+#define CLK_BUS_I2C1 100
+#define CLK_BUS_I2C2 101
+#define CLK_BUS_I2C3 102
+#define CLK_BUS_I2C4 103
+#define CLK_BUS_I2C5 104
+#define CLK_BUS_CAN 105
+#define CLK_SPI0 106
+#define CLK_SPI1 107
+#define CLK_SPI2 108
+#define CLK_SPIFC 109
+#define CLK_BUS_SPI0 110
+#define CLK_BUS_SPI1 111
+#define CLK_BUS_SPI2 112
+#define CLK_BUS_SPIFC 113
+#define CLK_EMAC0_25M 114
+#define CLK_EMAC1_25M 115
+#define CLK_BUS_EMAC0 116
+#define CLK_BUS_EMAC1 117
+#define CLK_IR_RX 118
+#define CLK_BUS_IR_RX 119
+#define CLK_IR_TX 120
+#define CLK_BUS_IR_TX 121
+#define CLK_GPADC0 122
+#define CLK_GPADC1 123
+#define CLK_BUS_GPADC0 124
+#define CLK_BUS_GPADC1 125
+#define CLK_BUS_THS 126
+#define CLK_USB_OHCI0 127
+#define CLK_USB_OHCI1 128
+#define CLK_BUS_OHCI0 129
+#define CLK_BUS_OHCI1 130
+#define CLK_BUS_EHCI0 131
+#define CLK_BUS_EHCI1 132
+#define CLK_BUS_OTG 133
+#define CLK_BUS_LRADC 134
+#define CLK_PCIE_AUX 135
+#define CLK_BUS_DISPLAY0_TOP 136
+#define CLK_BUS_DISPLAY1_TOP 137
+#define CLK_HDMI_24M 138
+#define CLK_HDMI_CEC_32K 139
+#define CLK_HDMI_CEC 140
+#define CLK_BUS_HDMI 141
+#define CLK_MIPI_DSI0 142
+#define CLK_MIPI_DSI1 143
+#define CLK_BUS_MIPI_DSI0 144
+#define CLK_BUS_MIPI_DSI1 145
+#define CLK_TCON_LCD0 146
+#define CLK_TCON_LCD1 147
+#define CLK_TCON_LCD2 148
+#define CLK_COMBOPHY_DSI0 149
+#define CLK_COMBOPHY_DSI1 150
+#define CLK_BUS_TCON_LCD0 151
+#define CLK_BUS_TCON_LCD1 152
+#define CLK_BUS_TCON_LCD2 153
+#define CLK_TCON_TV0 154
+#define CLK_TCON_TV1 155
+#define CLK_BUS_TCON_TV0 156
+#define CLK_BUS_TCON_TV1 157
+#define CLK_EDP 158
+#define CLK_BUS_EDP 159
+#define CLK_LEDC 160
+#define CLK_BUS_LEDC 161
+#define CLK_CSI_TOP 162
+#define CLK_CSI_MCLK0 163
+#define CLK_CSI_MCLK1 164
+#define CLK_CSI_MCLK2 165
+#define CLK_CSI_MCLK3 166
+#define CLK_BUS_CSI 167
+#define CLK_ISP 168
+#define CLK_DSP 169
+#define CLK_FANOUT_24M 170
+#define CLK_FANOUT_12M 171
+#define CLK_FANOUT_16M 172
+#define CLK_FANOUT_25M 173
+#define CLK_FANOUT_27M 174
+#define CLK_FANOUT_PCLK 175
+#define CLK_FANOUT0 176
+#define CLK_FANOUT1 177
+#define CLK_FANOUT2 178
+#define CLK_NPU 179
+
+#endif /* _DT_BINDINGS_CLK_SUN55I_A523_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun55i-a523-mcu-ccu.h b/include/dt-bindings/clock/sun55i-a523-mcu-ccu.h
new file mode 100644
index 000000000000..6efc6bc7e11a
--- /dev/null
+++ b/include/dt-bindings/clock/sun55i-a523-mcu-ccu.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2025 Chen-Yu Tsai <wens@csie.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN55I_A523_MCU_CCU_H_
+#define _DT_BINDINGS_CLK_SUN55I_A523_MCU_CCU_H_
+
+#define CLK_MCU_PLL_AUDIO1 0
+#define CLK_MCU_PLL_AUDIO1_DIV2 1
+#define CLK_MCU_PLL_AUDIO1_DIV5 2
+#define CLK_MCU_AUDIO_OUT 3
+#define CLK_MCU_DSP 4
+#define CLK_MCU_I2S0 5
+#define CLK_MCU_I2S1 6
+#define CLK_MCU_I2S2 7
+#define CLK_MCU_I2S3 8
+#define CLK_MCU_I2S3_ASRC 9
+#define CLK_BUS_MCU_I2S0 10
+#define CLK_BUS_MCU_I2S1 11
+#define CLK_BUS_MCU_I2S2 12
+#define CLK_BUS_MCU_I2S3 13
+#define CLK_MCU_SPDIF_TX 14
+#define CLK_MCU_SPDIF_RX 15
+#define CLK_BUS_MCU_SPDIF 16
+#define CLK_MCU_DMIC 17
+#define CLK_BUS_MCU_DMIC 18
+#define CLK_MCU_AUDIO_CODEC_DAC 19
+#define CLK_MCU_AUDIO_CODEC_ADC 20
+#define CLK_BUS_MCU_AUDIO_CODEC 21
+#define CLK_BUS_MCU_DSP_MSGBOX 22
+#define CLK_BUS_MCU_DSP_CFG 23
+#define CLK_BUS_MCU_NPU_HCLK 24
+#define CLK_BUS_MCU_NPU_ACLK 25
+#define CLK_MCU_TIMER0 26
+#define CLK_MCU_TIMER1 27
+#define CLK_MCU_TIMER2 28
+#define CLK_MCU_TIMER3 29
+#define CLK_MCU_TIMER4 30
+#define CLK_MCU_TIMER5 31
+#define CLK_BUS_MCU_TIMER 32
+#define CLK_BUS_MCU_DMA 33
+#define CLK_MCU_TZMA0 34
+#define CLK_MCU_TZMA1 35
+#define CLK_BUS_MCU_PUBSRAM 36
+#define CLK_MCU_MBUS_DMA 37
+#define CLK_MCU_MBUS 38
+#define CLK_MCU_RISCV 39
+#define CLK_BUS_MCU_RISCV_CFG 40
+#define CLK_BUS_MCU_RISCV_MSGBOX 41
+#define CLK_MCU_PWM0 42
+#define CLK_BUS_MCU_PWM0 43
+
+#endif /* _DT_BINDINGS_CLK_SUN55I_A523_MCU_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun55i-a523-r-ccu.h b/include/dt-bindings/clock/sun55i-a523-r-ccu.h
new file mode 100644
index 000000000000..365647499b9a
--- /dev/null
+++ b/include/dt-bindings/clock/sun55i-a523-r-ccu.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN55I_A523_R_CCU_H_
+#define _DT_BINDINGS_CLK_SUN55I_A523_R_CCU_H_
+
+#define CLK_R_AHB 0
+#define CLK_R_APB0 1
+#define CLK_R_APB1 2
+#define CLK_R_TIMER0 3
+#define CLK_R_TIMER1 4
+#define CLK_R_TIMER2 5
+#define CLK_BUS_R_TIMER 6
+#define CLK_BUS_R_TWD 7
+#define CLK_R_PWMCTRL 8
+#define CLK_BUS_R_PWMCTRL 9
+#define CLK_R_SPI 10
+#define CLK_BUS_R_SPI 11
+#define CLK_BUS_R_SPINLOCK 12
+#define CLK_BUS_R_MSGBOX 13
+#define CLK_BUS_R_UART0 14
+#define CLK_BUS_R_UART1 15
+#define CLK_BUS_R_I2C0 16
+#define CLK_BUS_R_I2C1 17
+#define CLK_BUS_R_I2C2 18
+#define CLK_BUS_R_PPU0 19
+#define CLK_BUS_R_PPU1 20
+#define CLK_BUS_R_CPU_BIST 21
+#define CLK_R_IR_RX 22
+#define CLK_BUS_R_IR_RX 23
+#define CLK_BUS_R_DMA 24
+#define CLK_BUS_R_RTC 25
+#define CLK_BUS_R_CPUCFG 26
+
+#endif /* _DT_BINDINGS_CLK_SUN55I_A523_R_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun8i-v3s-ccu.h b/include/dt-bindings/clock/sun8i-v3s-ccu.h
index 014ac6123d17..c4055629c9f9 100644
--- a/include/dt-bindings/clock/sun8i-v3s-ccu.h
+++ b/include/dt-bindings/clock/sun8i-v3s-ccu.h
@@ -96,7 +96,7 @@
#define CLK_TCON0 64
#define CLK_CSI_MISC 65
#define CLK_CSI0_MCLK 66
-#define CLK_CSI1_SCLK 67
+#define CLK_CSI_SCLK 67
#define CLK_CSI1_MCLK 68
#define CLK_VE 69
#define CLK_AC_DIG 70
diff --git a/include/dt-bindings/clock/tegra30-car.h b/include/dt-bindings/clock/tegra30-car.h
index f193663e6f28..763b81f80908 100644
--- a/include/dt-bindings/clock/tegra30-car.h
+++ b/include/dt-bindings/clock/tegra30-car.h
@@ -271,6 +271,7 @@
#define TEGRA30_CLK_AUDIO3_MUX 306
#define TEGRA30_CLK_AUDIO4_MUX 307
#define TEGRA30_CLK_SPDIF_MUX 308
-#define TEGRA30_CLK_CLK_MAX 309
+#define TEGRA30_CLK_CSIA_PAD 309
+#define TEGRA30_CLK_CSIB_PAD 310
#endif /* _DT_BINDINGS_CLOCK_TEGRA30_CAR_H */
diff --git a/include/dt-bindings/clock/thead,th1520-clk-ap.h b/include/dt-bindings/clock/thead,th1520-clk-ap.h
new file mode 100644
index 000000000000..09a9aa7b3ab1
--- /dev/null
+++ b/include/dt-bindings/clock/thead,th1520-clk-ap.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Vivo Communication Technology Co. Ltd.
+ * Authors: Yangtao Li <frank.li@vivo.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_TH1520_H_
+#define _DT_BINDINGS_CLK_TH1520_H_
+
+#define CLK_CPU_PLL0 0
+#define CLK_CPU_PLL1 1
+#define CLK_GMAC_PLL 2
+#define CLK_VIDEO_PLL 3
+#define CLK_DPU0_PLL 4
+#define CLK_DPU1_PLL 5
+#define CLK_TEE_PLL 6
+#define CLK_C910_I0 7
+#define CLK_C910 8
+#define CLK_BROM 9
+#define CLK_BMU 10
+#define CLK_AHB2_CPUSYS_HCLK 11
+#define CLK_APB3_CPUSYS_PCLK 12
+#define CLK_AXI4_CPUSYS2_ACLK 13
+#define CLK_AON2CPU_A2X 14
+#define CLK_X2X_CPUSYS 15
+#define CLK_AXI_ACLK 16
+#define CLK_CPU2AON_X2H 17
+#define CLK_PERI_AHB_HCLK 18
+#define CLK_CPU2PERI_X2H 19
+#define CLK_PERI_APB_PCLK 20
+#define CLK_PERI2APB_PCLK 21
+#define CLK_PERISYS_APB1_HCLK 22
+#define CLK_PERISYS_APB2_HCLK 23
+#define CLK_PERISYS_APB3_HCLK 24
+#define CLK_PERISYS_APB4_HCLK 25
+#define CLK_OSC12M 26
+#define CLK_OUT1 27
+#define CLK_OUT2 28
+#define CLK_OUT3 29
+#define CLK_OUT4 30
+#define CLK_APB_PCLK 31
+#define CLK_NPU 32
+#define CLK_NPU_AXI 33
+#define CLK_VI 34
+#define CLK_VI_AHB 35
+#define CLK_VO_AXI 36
+#define CLK_VP_APB 37
+#define CLK_VP_AXI 38
+#define CLK_CPU2VP 39
+#define CLK_VENC 40
+#define CLK_DPU0 41
+#define CLK_DPU1 42
+#define CLK_EMMC_SDIO 43
+#define CLK_GMAC1 44
+#define CLK_PADCTRL1 45
+#define CLK_DSMART 46
+#define CLK_PADCTRL0 47
+#define CLK_GMAC_AXI 48
+#define CLK_GPIO3 49
+#define CLK_GMAC0 50
+#define CLK_PWM 51
+#define CLK_QSPI0 52
+#define CLK_QSPI1 53
+#define CLK_SPI 54
+#define CLK_UART0_PCLK 55
+#define CLK_UART1_PCLK 56
+#define CLK_UART2_PCLK 57
+#define CLK_UART3_PCLK 58
+#define CLK_UART4_PCLK 59
+#define CLK_UART5_PCLK 60
+#define CLK_GPIO0 61
+#define CLK_GPIO1 62
+#define CLK_GPIO2 63
+#define CLK_I2C0 64
+#define CLK_I2C1 65
+#define CLK_I2C2 66
+#define CLK_I2C3 67
+#define CLK_I2C4 68
+#define CLK_I2C5 69
+#define CLK_SPINLOCK 70
+#define CLK_DMA 71
+#define CLK_MBOX0 72
+#define CLK_MBOX1 73
+#define CLK_MBOX2 74
+#define CLK_MBOX3 75
+#define CLK_WDT0 76
+#define CLK_WDT1 77
+#define CLK_TIMER0 78
+#define CLK_TIMER1 79
+#define CLK_SRAM0 80
+#define CLK_SRAM1 81
+#define CLK_SRAM2 82
+#define CLK_SRAM3 83
+#define CLK_PLL_GMAC_100M 84
+#define CLK_UART_SCLK 85
+
+/* VO clocks */
+#define CLK_AXI4_VO_ACLK 0
+#define CLK_GPU_MEM 1
+#define CLK_GPU_CORE 2
+#define CLK_GPU_CFG_ACLK 3
+#define CLK_DPU_PIXELCLK0 4
+#define CLK_DPU_PIXELCLK1 5
+#define CLK_DPU_HCLK 6
+#define CLK_DPU_ACLK 7
+#define CLK_DPU_CCLK 8
+#define CLK_HDMI_SFR 9
+#define CLK_HDMI_PCLK 10
+#define CLK_HDMI_CEC 11
+#define CLK_MIPI_DSI0_PCLK 12
+#define CLK_MIPI_DSI1_PCLK 13
+#define CLK_MIPI_DSI0_CFG 14
+#define CLK_MIPI_DSI1_CFG 15
+#define CLK_MIPI_DSI0_REFCLK 16
+#define CLK_MIPI_DSI1_REFCLK 17
+#define CLK_HDMI_I2S 18
+#define CLK_X2H_DPU1_ACLK 19
+#define CLK_X2H_DPU_ACLK 20
+#define CLK_AXI4_VO_PCLK 21
+#define CLK_IOPMP_VOSYS_DPU_PCLK 22
+#define CLK_IOPMP_VOSYS_DPU1_PCLK 23
+#define CLK_IOPMP_VOSYS_GPU_PCLK 24
+#define CLK_IOPMP_DPU1_ACLK 25
+#define CLK_IOPMP_DPU_ACLK 26
+#define CLK_IOPMP_GPU_ACLK 27
+#define CLK_MIPIDSI0_PIXCLK 28
+#define CLK_MIPIDSI1_PIXCLK 29
+#define CLK_HDMI_PIXCLK 30
+
+#endif
diff --git a/include/dt-bindings/clock/toshiba,tmpv770x.h b/include/dt-bindings/clock/toshiba,tmpv770x.h
index 5fce713001fd..a36c89266686 100644
--- a/include/dt-bindings/clock/toshiba,tmpv770x.h
+++ b/include/dt-bindings/clock/toshiba,tmpv770x.h
@@ -11,7 +11,6 @@
#define TMPV770X_PLL_PIDDRCPLL 4
#define TMPV770X_PLL_PIVOIFPLL 5
#define TMPV770X_PLL_PIIMGERPLL 6
-#define TMPV770X_NR_PLL 7
/* Clocks */
#define TMPV770X_CLK_PIPLL1_DIV1 0
@@ -141,7 +140,9 @@
#define TMPV770X_CLK_PIREFCLK 124
#define TMPV770X_CLK_SBUS 125
#define TMPV770X_CLK_BUSLCK 126
-#define TMPV770X_NR_CLK 127
+#define TMPV770X_CLK_VIIFBS1_L2ISP 127
+#define TMPV770X_CLK_VIIFBS1_L1ISP 128
+#define TMPV770X_CLK_VIIFBS1_PROC 129
/* Reset */
#define TMPV770X_RESET_PIETHER_2P5M 0
@@ -176,6 +177,13 @@
#define TMPV770X_RESET_PIPCMIF 29
#define TMPV770X_RESET_PICKMON 30
#define TMPV770X_RESET_SBUSCLK 31
-#define TMPV770X_NR_RESET 32
+#define TMPV770X_RESET_VIIFBS0 32
+#define TMPV770X_RESET_VIIFBS0_APB 33
+#define TMPV770X_RESET_VIIFBS0_L2ISP 34
+#define TMPV770X_RESET_VIIFBS0_L1ISP 35
+#define TMPV770X_RESET_VIIFBS1 36
+#define TMPV770X_RESET_VIIFBS1_APB 37
+#define TMPV770X_RESET_VIIFBS1_L2ISP 38
+#define TMPV770X_RESET_VIIFBS1_L1ISP 39
#endif /*_DT_BINDINGS_CLOCK_TOSHIBA_TMPV770X_H_ */
diff --git a/include/dt-bindings/clock/xlnx-zynqmp-clk.h b/include/dt-bindings/clock/xlnx-zynqmp-clk.h
index cdc4c0b9a374..f0f7ddd3dcbd 100644
--- a/include/dt-bindings/clock/xlnx-zynqmp-clk.h
+++ b/include/dt-bindings/clock/xlnx-zynqmp-clk.h
@@ -9,6 +9,13 @@
#ifndef _DT_BINDINGS_CLK_ZYNQMP_H
#define _DT_BINDINGS_CLK_ZYNQMP_H
+/*
+ * These bindings are deprecated, because they do not match the actual
+ * concept of bindings but rather contain pure firmware values.
+ * Instead include the header in the DTS source directory.
+ */
+#warning "These bindings are deprecated. Instead use the header in the DTS source directory."
+
#define IOPLL 0
#define RPLL 1
#define APLL 2
diff --git a/include/dt-bindings/gpio/tegra256-gpio.h b/include/dt-bindings/gpio/tegra256-gpio.h
new file mode 100644
index 000000000000..a0353a302aeb
--- /dev/null
+++ b/include/dt-bindings/gpio/tegra256-gpio.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+/*
+ * This header provides constants for the nvidia,tegra256-gpio DT binding.
+ *
+ * The first cell in Tegra's GPIO specifier is the GPIO ID.
+ * The macros below provide names for this.
+ *
+ * The second cell contains standard flag values specified in gpio.h.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_TEGRA256_GPIO_H
+#define _DT_BINDINGS_GPIO_TEGRA256_GPIO_H
+
+#include <dt-bindings/gpio/gpio.h>
+
+/* GPIOs implemented by main GPIO controller */
+#define TEGRA256_MAIN_GPIO_PORT_A 0
+#define TEGRA256_MAIN_GPIO_PORT_B 1
+#define TEGRA256_MAIN_GPIO_PORT_C 2
+#define TEGRA256_MAIN_GPIO_PORT_D 3
+
+#define TEGRA256_MAIN_GPIO(port, offset) \
+ ((TEGRA256_MAIN_GPIO_PORT_##port * 8) + (offset))
+
+#endif
+
diff --git a/include/dt-bindings/i3c/i3c.h b/include/dt-bindings/i3c/i3c.h
new file mode 100644
index 000000000000..373439218bba
--- /dev/null
+++ b/include/dt-bindings/i3c/i3c.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef _DT_BINDINGS_I3C_I3C_H
+#define _DT_BINDINGS_I3C_I3C_H
+
+#define I2C_FM (1 << 4)
+#define I2C_FM_PLUS (0 << 4)
+
+#define I2C_FILTER (0 << 5)
+#define I2C_NO_FILTER_HIGH_FREQUENCY (1 << 5)
+#define I2C_NO_FILTER_LOW_FREQUENCY (2 << 5)
+
+#endif
diff --git a/include/dt-bindings/iio/adc/adi,ad4695.h b/include/dt-bindings/iio/adc/adi,ad4695.h
new file mode 100644
index 000000000000..fea4525d2710
--- /dev/null
+++ b/include/dt-bindings/iio/adc/adi,ad4695.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_ADI_AD4695_H
+#define _DT_BINDINGS_ADI_AD4695_H
+
+#define AD4695_COMMON_MODE_REFGND 0xFF
+#define AD4695_COMMON_MODE_COM 0xFE
+
+#define AD4695_TRIGGER_EVENT_BUSY 0
+#define AD4695_TRIGGER_EVENT_ALERT 1
+
+#define AD4695_TRIGGER_PIN_GP0 0
+#define AD4695_TRIGGER_PIN_GP2 2
+#define AD4695_TRIGGER_PIN_GP3 3
+
+#endif /* _DT_BINDINGS_ADI_AD4695_H */
diff --git a/include/dt-bindings/iio/adc/adi,ad7606.h b/include/dt-bindings/iio/adc/adi,ad7606.h
new file mode 100644
index 000000000000..f38a6d72b6dc
--- /dev/null
+++ b/include/dt-bindings/iio/adc/adi,ad7606.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_ADI_AD7606_H
+#define _DT_BINDINGS_ADI_AD7606_H
+
+#define AD7606_TRIGGER_EVENT_BUSY 0
+#define AD7606_TRIGGER_EVENT_FRSTDATA 1
+
+#endif /* _DT_BINDINGS_ADI_AD7606_H */
diff --git a/include/dt-bindings/iio/adc/adi,ad7768-1.h b/include/dt-bindings/iio/adc/adi,ad7768-1.h
new file mode 100644
index 000000000000..34d92856a50b
--- /dev/null
+++ b/include/dt-bindings/iio/adc/adi,ad7768-1.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_ADI_AD7768_1_H
+#define _DT_BINDINGS_ADI_AD7768_1_H
+
+#define AD7768_TRIGGER_SOURCE_SYNC_OUT 0
+#define AD7768_TRIGGER_SOURCE_GPIO3 1
+#define AD7768_TRIGGER_SOURCE_DRDY 2
+
+#endif /* _DT_BINDINGS_ADI_AD7768_1_H */
diff --git a/include/dt-bindings/iio/adc/gehc,pmc-adc.h b/include/dt-bindings/iio/adc/gehc,pmc-adc.h
new file mode 100644
index 000000000000..2f291e3c76ae
--- /dev/null
+++ b/include/dt-bindings/iio/adc/gehc,pmc-adc.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_IIO_ADC_GEHC_PMC_ADC_H
+#define _DT_BINDINGS_IIO_ADC_GEHC_PMC_ADC_H
+
+/* ADC channel type */
+#define GEHC_PMC_ADC_VOLTAGE 0
+#define GEHC_PMC_ADC_CURRENT 1
+
+#endif
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6357-auxadc.h b/include/dt-bindings/iio/adc/mediatek,mt6357-auxadc.h
new file mode 100644
index 000000000000..03ebb1d23953
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6357-auxadc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_MEDIATEK_MT6357_AUXADC_H
+#define _DT_BINDINGS_MEDIATEK_MT6357_AUXADC_H
+
+/* ADC Channel Index */
+#define MT6357_AUXADC_BATADC 0
+#define MT6357_AUXADC_ISENSE 1
+#define MT6357_AUXADC_VCDT 2
+#define MT6357_AUXADC_BAT_TEMP 3
+#define MT6357_AUXADC_CHIP_TEMP 4
+#define MT6357_AUXADC_ACCDET 5
+#define MT6357_AUXADC_VDCXO 6
+#define MT6357_AUXADC_TSX_TEMP 7
+#define MT6357_AUXADC_HPOFS_CAL 8
+#define MT6357_AUXADC_DCXO_TEMP 9
+#define MT6357_AUXADC_VCORE_TEMP 10
+#define MT6357_AUXADC_VPROC_TEMP 11
+#define MT6357_AUXADC_VBAT 12
+
+#endif
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6358-auxadc.h b/include/dt-bindings/iio/adc/mediatek,mt6358-auxadc.h
new file mode 100644
index 000000000000..efa08398fafd
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6358-auxadc.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_MEDIATEK_MT6358_AUXADC_H
+#define _DT_BINDINGS_MEDIATEK_MT6358_AUXADC_H
+
+/* ADC Channel Index */
+#define MT6358_AUXADC_BATADC 0
+#define MT6358_AUXADC_VCDT 1
+#define MT6358_AUXADC_BAT_TEMP 2
+#define MT6358_AUXADC_CHIP_TEMP 3
+#define MT6358_AUXADC_ACCDET 4
+#define MT6358_AUXADC_VDCXO 5
+#define MT6358_AUXADC_TSX_TEMP 6
+#define MT6358_AUXADC_HPOFS_CAL 7
+#define MT6358_AUXADC_DCXO_TEMP 8
+#define MT6358_AUXADC_VBIF 9
+#define MT6358_AUXADC_VCORE_TEMP 10
+#define MT6358_AUXADC_VPROC_TEMP 11
+#define MT6358_AUXADC_VGPU_TEMP 12
+#define MT6358_AUXADC_VBAT 13
+
+#endif
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6359-auxadc.h b/include/dt-bindings/iio/adc/mediatek,mt6359-auxadc.h
new file mode 100644
index 000000000000..59826393ee7e
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6359-auxadc.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_MEDIATEK_MT6359_AUXADC_H
+#define _DT_BINDINGS_MEDIATEK_MT6359_AUXADC_H
+
+/* ADC Channel Index */
+#define MT6359_AUXADC_BATADC 0
+#define MT6359_AUXADC_BAT_TEMP 1
+#define MT6359_AUXADC_CHIP_TEMP 2
+#define MT6359_AUXADC_ACCDET 3
+#define MT6359_AUXADC_VDCXO 4
+#define MT6359_AUXADC_TSX_TEMP 5
+#define MT6359_AUXADC_HPOFS_CAL 6
+#define MT6359_AUXADC_DCXO_TEMP 7
+#define MT6359_AUXADC_VBIF 8
+#define MT6359_AUXADC_VCORE_TEMP 9
+#define MT6359_AUXADC_VPROC_TEMP 10
+#define MT6359_AUXADC_VGPU_TEMP 11
+#define MT6359_AUXADC_VBAT 12
+#define MT6359_AUXADC_IBAT 13
+
+#endif
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6363-auxadc.h b/include/dt-bindings/iio/adc/mediatek,mt6363-auxadc.h
new file mode 100644
index 000000000000..92d135477d0e
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6363-auxadc.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_MEDIATEK_MT6363_AUXADC_H
+#define _DT_BINDINGS_MEDIATEK_MT6363_AUXADC_H
+
+/* ADC Channel Index */
+#define MT6363_AUXADC_BATADC 0
+#define MT6363_AUXADC_VCDT 1
+#define MT6363_AUXADC_BAT_TEMP 2
+#define MT6363_AUXADC_CHIP_TEMP 3
+#define MT6363_AUXADC_VSYSSNS 4
+#define MT6363_AUXADC_VTREF 5
+#define MT6363_AUXADC_VCORE_TEMP 6
+#define MT6363_AUXADC_VPROC_TEMP 7
+#define MT6363_AUXADC_VGPU_TEMP 8
+#define MT6363_AUXADC_VIN1 9
+#define MT6363_AUXADC_VIN2 10
+#define MT6363_AUXADC_VIN3 11
+#define MT6363_AUXADC_VIN4 12
+#define MT6363_AUXADC_VIN5 13
+#define MT6363_AUXADC_VIN6 14
+#define MT6363_AUXADC_VIN7 15
+
+#endif
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6373-auxadc.h b/include/dt-bindings/iio/adc/mediatek,mt6373-auxadc.h
new file mode 100644
index 000000000000..17cab86d355e
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6373-auxadc.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_MEDIATEK_MT6373_AUXADC_H
+#define _DT_BINDINGS_MEDIATEK_MT6373_AUXADC_H
+
+/* ADC Channel Index */
+#define MT6373_AUXADC_CHIP_TEMP 0
+#define MT6373_AUXADC_VCORE_TEMP 1
+#define MT6373_AUXADC_VPROC_TEMP 2
+#define MT6373_AUXADC_VGPU_TEMP 3
+#define MT6373_AUXADC_VIN1 4
+#define MT6373_AUXADC_VIN2 5
+#define MT6373_AUXADC_VIN3 6
+#define MT6373_AUXADC_VIN4 7
+#define MT6373_AUXADC_VIN5 8
+#define MT6373_AUXADC_VIN6 9
+#define MT6373_AUXADC_VIN7 10
+
+#endif
diff --git a/include/dt-bindings/input/cros-ec-keyboard.h b/include/dt-bindings/input/cros-ec-keyboard.h
index f0ae03634a96..afc12f6aa642 100644
--- a/include/dt-bindings/input/cros-ec-keyboard.h
+++ b/include/dt-bindings/input/cros-ec-keyboard.h
@@ -100,4 +100,108 @@
MATRIX_KEY(0x07, 0x0b, KEY_UP) \
MATRIX_KEY(0x07, 0x0c, KEY_LEFT)
+/* No numpad */
+#define CROS_TOP_ROW_KEYMAP_V30 \
+ MATRIX_KEY(0x00, 0x01, KEY_F11) /* T11 */ \
+ MATRIX_KEY(0x00, 0x02, KEY_F1) /* T1 */ \
+ MATRIX_KEY(0x00, 0x04, KEY_F10) /* T10 */ \
+ MATRIX_KEY(0x00, 0x0b, KEY_F14) /* T14 */ \
+ MATRIX_KEY(0x00, 0x0c, KEY_F15) /* T15 */ \
+ MATRIX_KEY(0x01, 0x02, KEY_F4) /* T4 */ \
+ MATRIX_KEY(0x01, 0x04, KEY_F7) /* T7 */ \
+ MATRIX_KEY(0x01, 0x05, KEY_F12) /* T12 */ \
+ MATRIX_KEY(0x01, 0x09, KEY_F9) /* T9 */ \
+ MATRIX_KEY(0x02, 0x02, KEY_F3) /* T3 */ \
+ MATRIX_KEY(0x02, 0x04, KEY_F6) /* T6 */ \
+ MATRIX_KEY(0x02, 0x0b, KEY_F8) /* T8 */ \
+ MATRIX_KEY(0x03, 0x02, KEY_F2) /* T2 */ \
+ MATRIX_KEY(0x03, 0x05, KEY_F13) /* T13 */ \
+ MATRIX_KEY(0x04, 0x04, KEY_F5) /* T5 */
+
+#define CROS_MAIN_KEYMAP_V30 /* Keycode */ \
+ MATRIX_KEY(0x00, 0x03, KEY_B) /* 50 */ \
+ MATRIX_KEY(0x00, 0x05, KEY_N) /* 51 */ \
+ MATRIX_KEY(0x00, 0x06, KEY_RO) /* 56 (JIS) */ \
+ MATRIX_KEY(0x00, 0x08, KEY_EQUAL) /* 13 */ \
+ MATRIX_KEY(0x00, 0x09, KEY_HOME) /* 80 (Numpad) */ \
+ MATRIX_KEY(0x00, 0x0a, KEY_RIGHTALT) /* 62 */ \
+ MATRIX_KEY(0x00, 0x10, KEY_FN) /* 127 */ \
+ \
+ MATRIX_KEY(0x01, 0x01, KEY_ESC) /* 110 */ \
+ MATRIX_KEY(0x01, 0x03, KEY_G) /* 35 */ \
+ MATRIX_KEY(0x01, 0x06, KEY_H) /* 36 */ \
+ MATRIX_KEY(0x01, 0x08, KEY_APOSTROPHE) /* 41 */ \
+ MATRIX_KEY(0x01, 0x0b, KEY_BACKSPACE) /* 15 */ \
+ MATRIX_KEY(0x01, 0x0c, KEY_HENKAN) /* 65 (JIS) */ \
+ MATRIX_KEY(0x01, 0x0e, KEY_LEFTCTRL) /* 58 */ \
+ \
+ MATRIX_KEY(0x02, 0x01, KEY_TAB) /* 16 */ \
+ MATRIX_KEY(0x02, 0x03, KEY_T) /* 21 */ \
+ MATRIX_KEY(0x02, 0x05, KEY_RIGHTBRACE) /* 28 */ \
+ MATRIX_KEY(0x02, 0x06, KEY_Y) /* 22 */ \
+ MATRIX_KEY(0x02, 0x08, KEY_LEFTBRACE) /* 27 */ \
+ MATRIX_KEY(0x02, 0x09, KEY_DELETE) /* 76 (Numpad) */ \
+ MATRIX_KEY(0x02, 0x0c, KEY_PAGEUP) /* 85 (Numpad) */ \
+ MATRIX_KEY(0x02, 0x011, KEY_YEN) /* 14 (JIS) */ \
+ \
+ MATRIX_KEY(0x03, 0x00, KEY_LEFTMETA) /* Launcher */ \
+ MATRIX_KEY(0x03, 0x01, KEY_GRAVE) /* 1 */ \
+ MATRIX_KEY(0x03, 0x03, KEY_5) /* 6 */ \
+ MATRIX_KEY(0x03, 0x04, KEY_S) /* 32 */ \
+ MATRIX_KEY(0x03, 0x06, KEY_MINUS) /* 12 */ \
+ MATRIX_KEY(0x03, 0x08, KEY_6) /* 7 */ \
+ MATRIX_KEY(0x03, 0x09, KEY_SLEEP) /* Lock */ \
+ MATRIX_KEY(0x03, 0x0b, KEY_BACKSLASH) /* 29 */ \
+ MATRIX_KEY(0x03, 0x0c, KEY_MUHENKAN) /* 63 (JIS) */ \
+ MATRIX_KEY(0x03, 0x0e, KEY_RIGHTCTRL) /* 64 */ \
+ \
+ MATRIX_KEY(0x04, 0x01, KEY_A) /* 31 */ \
+ MATRIX_KEY(0x04, 0x02, KEY_D) /* 33 */ \
+ MATRIX_KEY(0x04, 0x03, KEY_F) /* 34 */ \
+ MATRIX_KEY(0x04, 0x05, KEY_K) /* 38 */ \
+ MATRIX_KEY(0x04, 0x06, KEY_J) /* 37 */ \
+ MATRIX_KEY(0x04, 0x08, KEY_SEMICOLON) /* 40 */ \
+ MATRIX_KEY(0x04, 0x09, KEY_L) /* 39 */ \
+ MATRIX_KEY(0x04, 0x0b, KEY_ENTER) /* 43 */ \
+ MATRIX_KEY(0x04, 0x0c, KEY_END) /* 81 (Numpad) */ \
+ \
+ MATRIX_KEY(0x05, 0x01, KEY_1) /* 2 */ \
+ MATRIX_KEY(0x05, 0x02, KEY_COMMA) /* 53 */ \
+ MATRIX_KEY(0x05, 0x03, KEY_DOT) /* 54 */ \
+ MATRIX_KEY(0x05, 0x04, KEY_SLASH) /* 55 */ \
+ MATRIX_KEY(0x05, 0x05, KEY_C) /* 48 */ \
+ MATRIX_KEY(0x05, 0x06, KEY_SPACE) /* 61 */ \
+ MATRIX_KEY(0x05, 0x07, KEY_LEFTSHIFT) /* 44 */ \
+ MATRIX_KEY(0x05, 0x08, KEY_X) /* 47 */ \
+ MATRIX_KEY(0x05, 0x09, KEY_V) /* 49 */ \
+ MATRIX_KEY(0x05, 0x0b, KEY_M) /* 52 */ \
+ MATRIX_KEY(0x05, 0x0c, KEY_PAGEDOWN) /* 86 (Numpad) */ \
+ \
+ MATRIX_KEY(0x06, 0x01, KEY_Z) /* 46 */ \
+ MATRIX_KEY(0x06, 0x02, KEY_3) /* 4 */ \
+ MATRIX_KEY(0x06, 0x03, KEY_4) /* 5 */ \
+ MATRIX_KEY(0x06, 0x04, KEY_2) /* 3 */ \
+ MATRIX_KEY(0x06, 0x05, KEY_8) /* 9 */ \
+ MATRIX_KEY(0x06, 0x06, KEY_0) /* 11 */ \
+ MATRIX_KEY(0x06, 0x08, KEY_7) /* 8 */ \
+ MATRIX_KEY(0x06, 0x09, KEY_9) /* 10 */ \
+ MATRIX_KEY(0x06, 0x0b, KEY_DOWN) /* 84 */ \
+ MATRIX_KEY(0x06, 0x0c, KEY_RIGHT) /* 89 */ \
+ MATRIX_KEY(0x06, 0x0d, KEY_LEFTALT) /* 60 */ \
+ MATRIX_KEY(0x06, 0x0f, KEY_ASSISTANT) /* 128 */ \
+ MATRIX_KEY(0x06, 0x11, KEY_BACKSLASH) /* 42 (JIS, ISO) */ \
+ \
+ MATRIX_KEY(0x07, 0x01, KEY_U) /* 23 */ \
+ MATRIX_KEY(0x07, 0x02, KEY_I) /* 24 */ \
+ MATRIX_KEY(0x07, 0x03, KEY_O) /* 25 */ \
+ MATRIX_KEY(0x07, 0x04, KEY_P) /* 26 */ \
+ MATRIX_KEY(0x07, 0x05, KEY_Q) /* 17 */ \
+ MATRIX_KEY(0x07, 0x06, KEY_W) /* 18 */ \
+ MATRIX_KEY(0x07, 0x07, KEY_RIGHTSHIFT) /* 57 */ \
+ MATRIX_KEY(0x07, 0x08, KEY_E) /* 19 */ \
+ MATRIX_KEY(0x07, 0x09, KEY_R) /* 20 */ \
+ MATRIX_KEY(0x07, 0x0b, KEY_UP) /* 83 */ \
+ MATRIX_KEY(0x07, 0x0c, KEY_LEFT) /* 79 */ \
+ MATRIX_KEY(0x07, 0x11, KEY_102ND) /* 45 (ISO) */
+
#endif /* _CROS_EC_KEYBOARD_H */
diff --git a/include/dt-bindings/interconnect/mediatek,mt8183.h b/include/dt-bindings/interconnect/mediatek,mt8183.h
new file mode 100644
index 000000000000..1088c350258d
--- /dev/null
+++ b/include/dt-bindings/interconnect/mediatek,mt8183.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Copyright (c) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_MEDIATEK_MT8183_H
+#define __DT_BINDINGS_INTERCONNECT_MEDIATEK_MT8183_H
+
+#define SLAVE_DDR_EMI 0
+#define MASTER_MCUSYS 1
+#define MASTER_MFG 2
+#define MASTER_MMSYS 3
+#define MASTER_MM_VPU 4
+#define MASTER_MM_DISP 5
+#define MASTER_MM_VDEC 6
+#define MASTER_MM_VENC 7
+#define MASTER_MM_CAM 8
+#define MASTER_MM_IMG 9
+#define MASTER_MM_MDP 10
+
+#endif
diff --git a/include/dt-bindings/interconnect/mediatek,mt8195.h b/include/dt-bindings/interconnect/mediatek,mt8195.h
new file mode 100644
index 000000000000..33e0e6cde732
--- /dev/null
+++ b/include/dt-bindings/interconnect/mediatek,mt8195.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_MEDIATEK_MT8195_H
+#define __DT_BINDINGS_INTERCONNECT_MEDIATEK_MT8195_H
+
+#define SLAVE_DDR_EMI 0
+#define MASTER_MCUSYS 1
+#define MASTER_GPUSYS 2
+#define MASTER_MMSYS 3
+#define MASTER_MM_VPU 4
+#define MASTER_MM_DISP 5
+#define MASTER_MM_VDEC 6
+#define MASTER_MM_VENC 7
+#define MASTER_MM_CAM 8
+#define MASTER_MM_IMG 9
+#define MASTER_MM_MDP 10
+#define MASTER_VPUSYS 11
+#define MASTER_VPU_0 12
+#define MASTER_VPU_1 13
+#define MASTER_MDLASYS 14
+#define MASTER_MDLA_0 15
+#define MASTER_UFS 16
+#define MASTER_PCIE_0 17
+#define MASTER_PCIE_1 18
+#define MASTER_USB 19
+#define MASTER_DBGIF 20
+#define SLAVE_HRT_DDR_EMI 21
+#define MASTER_HRT_MMSYS 22
+#define MASTER_HRT_MM_DISP 23
+#define MASTER_HRT_MM_VDEC 24
+#define MASTER_HRT_MM_VENC 25
+#define MASTER_HRT_MM_CAM 26
+#define MASTER_HRT_MM_IMG 27
+#define MASTER_HRT_MM_MDP 28
+#define MASTER_HRT_DBGIF 29
+#define MASTER_WIFI 30
+#define MASTER_BT 31
+#define MASTER_NETSYS 32
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,glymur-rpmh.h b/include/dt-bindings/interconnect/qcom,glymur-rpmh.h
new file mode 100644
index 000000000000..6a0e754345e4
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,glymur-rpmh.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_GLYMUR_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_GLYMUR_H
+
+#define MASTER_CRYPTO 0
+#define MASTER_SOCCP_PROC 1
+#define MASTER_QDSS_ETR 2
+#define MASTER_QDSS_ETR_1 3
+#define SLAVE_A1NOC_SNOC 4
+
+#define MASTER_UFS_MEM 0
+#define MASTER_USB3_2 1
+#define MASTER_USB4_2 2
+#define SLAVE_A2NOC_SNOC 3
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_1 2
+#define MASTER_QUP_2 3
+#define MASTER_SP 4
+#define MASTER_SDCC_2 5
+#define MASTER_SDCC_4 6
+#define MASTER_USB2 7
+#define MASTER_USB3_MP 8
+#define SLAVE_A3NOC_SNOC 9
+
+#define MASTER_USB3_0 0
+#define MASTER_USB3_1 1
+#define MASTER_USB4_0 2
+#define MASTER_USB4_1 3
+#define SLAVE_A4NOC_HSCNOC 4
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_AHB2PHY_2 3
+#define SLAVE_AHB2PHY_3 4
+#define SLAVE_AV1_ENC_CFG 5
+#define SLAVE_CAMERA_CFG 6
+#define SLAVE_CLK_CTL 7
+#define SLAVE_CRYPTO_0_CFG 8
+#define SLAVE_DISPLAY_CFG 9
+#define SLAVE_GFX3D_CFG 10
+#define SLAVE_IMEM_CFG 11
+#define SLAVE_PCIE_0_CFG 12
+#define SLAVE_PCIE_1_CFG 13
+#define SLAVE_PCIE_2_CFG 14
+#define SLAVE_PCIE_3A_CFG 15
+#define SLAVE_PCIE_3B_CFG 16
+#define SLAVE_PCIE_4_CFG 17
+#define SLAVE_PCIE_5_CFG 18
+#define SLAVE_PCIE_6_CFG 19
+#define SLAVE_PCIE_RSCC 20
+#define SLAVE_PDM 21
+#define SLAVE_PRNG 22
+#define SLAVE_QDSS_CFG 23
+#define SLAVE_QSPI_0 24
+#define SLAVE_QUP_0 25
+#define SLAVE_QUP_1 26
+#define SLAVE_QUP_2 27
+#define SLAVE_SDCC_2 28
+#define SLAVE_SDCC_4 29
+#define SLAVE_SMMUV3_CFG 30
+#define SLAVE_TCSR 31
+#define SLAVE_TLMM 32
+#define SLAVE_UFS_MEM_CFG 33
+#define SLAVE_USB2 34
+#define SLAVE_USB3_0 35
+#define SLAVE_USB3_1 36
+#define SLAVE_USB3_2 37
+#define SLAVE_USB3_MP 38
+#define SLAVE_USB4_0 39
+#define SLAVE_USB4_1 40
+#define SLAVE_USB4_2 41
+#define SLAVE_VENUS_CFG 42
+#define SLAVE_CNOC_PCIE_SLAVE_EAST_CFG 43
+#define SLAVE_CNOC_PCIE_SLAVE_WEST_CFG 44
+#define SLAVE_LPASS_QTB_CFG 45
+#define SLAVE_CNOC_MNOC_CFG 46
+#define SLAVE_NSP_QTB_CFG 47
+#define SLAVE_PCIE_EAST_ANOC_CFG 48
+#define SLAVE_PCIE_WEST_ANOC_CFG 49
+#define SLAVE_QDSS_STM 50
+#define SLAVE_TCU 51
+
+#define MASTER_HSCNOC_CNOC 0
+#define SLAVE_AOSS 1
+#define SLAVE_IPC_ROUTER_CFG 2
+#define SLAVE_SOCCP 3
+#define SLAVE_TME_CFG 4
+#define SLAVE_APPSS 5
+#define SLAVE_CNOC_CFG 6
+#define SLAVE_BOOT_IMEM 7
+#define SLAVE_IMEM 8
+
+#define MASTER_GPU_TCU 0
+#define MASTER_PCIE_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_AGGRE_NOC_EAST 4
+#define MASTER_GFX3D 5
+#define MASTER_LPASS_GEM_NOC 6
+#define MASTER_MNOC_HF_MEM_NOC 7
+#define MASTER_MNOC_SF_MEM_NOC 8
+#define MASTER_COMPUTE_NOC 9
+#define MASTER_PCIE_EAST 10
+#define MASTER_PCIE_WEST 11
+#define MASTER_SNOC_SF_MEM_NOC 12
+#define MASTER_WLAN_Q6 13
+#define MASTER_GIC 14
+#define SLAVE_HSCNOC_CNOC 15
+#define SLAVE_LLCC 16
+#define SLAVE_PCIE_EAST 17
+#define SLAVE_PCIE_WEST 18
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_AV1_ENC 0
+#define MASTER_CAMNOC_HF 1
+#define MASTER_CAMNOC_ICP 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_EVA 4
+#define MASTER_MDP 5
+#define MASTER_CDSP_HCP 6
+#define MASTER_VIDEO 7
+#define MASTER_VIDEO_CV_PROC 8
+#define MASTER_VIDEO_V_PROC 9
+#define MASTER_CNOC_MNOC_CFG 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_MNOC_SF_MEM_NOC 12
+#define SLAVE_SERVICE_MNOC 13
+
+#define MASTER_CPUCP 0
+#define SLAVE_NSINOC_SYSTEM_NOC 1
+#define SLAVE_SERVICE_NSINOC 2
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_NSP0_HSC_NOC 1
+
+#define MASTER_OOBMSS_SP_PROC 0
+#define SLAVE_OOBMSS_SNOC 1
+
+#define MASTER_PCIE_EAST_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define MASTER_PCIE_1 2
+#define MASTER_PCIE_5 3
+#define SLAVE_PCIE_EAST_MEM_NOC 4
+#define SLAVE_SERVICE_PCIE_EAST_AGGRE_NOC 5
+
+#define MASTER_HSCNOC_PCIE_EAST 0
+#define MASTER_CNOC_PCIE_EAST_SLAVE_CFG 1
+#define SLAVE_HSCNOC_PCIE_EAST_MS_MPU_CFG 2
+#define SLAVE_SERVICE_PCIE_EAST 3
+#define SLAVE_PCIE_0 4
+#define SLAVE_PCIE_1 5
+#define SLAVE_PCIE_5 6
+
+#define MASTER_PCIE_WEST_ANOC_CFG 0
+#define MASTER_PCIE_2 1
+#define MASTER_PCIE_3A 2
+#define MASTER_PCIE_3B 3
+#define MASTER_PCIE_4 4
+#define MASTER_PCIE_6 5
+#define SLAVE_PCIE_WEST_MEM_NOC 6
+#define SLAVE_SERVICE_PCIE_WEST_AGGRE_NOC 7
+
+#define MASTER_HSCNOC_PCIE_WEST 0
+#define MASTER_CNOC_PCIE_WEST_SLAVE_CFG 1
+#define SLAVE_HSCNOC_PCIE_WEST_MS_MPU_CFG 2
+#define SLAVE_SERVICE_PCIE_WEST 3
+#define SLAVE_PCIE_2 4
+#define SLAVE_PCIE_3A 5
+#define SLAVE_PCIE_3B 6
+#define SLAVE_PCIE_4 7
+#define SLAVE_PCIE_6 8
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_A3NOC_SNOC 2
+#define MASTER_NSINOC_SNOC 3
+#define MASTER_OOBMSS 4
+#define SLAVE_SNOC_GEM_NOC_SF 5
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,ipq5332.h b/include/dt-bindings/interconnect/qcom,ipq5332.h
new file mode 100644
index 000000000000..16475bb07a48
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,ipq5332.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef INTERCONNECT_QCOM_IPQ5332_H
+#define INTERCONNECT_QCOM_IPQ5332_H
+
+#define MASTER_SNOC_PCIE3_1_M 0
+#define SLAVE_SNOC_PCIE3_1_M 1
+#define MASTER_ANOC_PCIE3_1_S 2
+#define SLAVE_ANOC_PCIE3_1_S 3
+#define MASTER_SNOC_PCIE3_2_M 4
+#define SLAVE_SNOC_PCIE3_2_M 5
+#define MASTER_ANOC_PCIE3_2_S 6
+#define SLAVE_ANOC_PCIE3_2_S 7
+#define MASTER_SNOC_USB 8
+#define SLAVE_SNOC_USB 9
+#define MASTER_NSSNOC_NSSCC 10
+#define SLAVE_NSSNOC_NSSCC 11
+#define MASTER_NSSNOC_SNOC_0 12
+#define SLAVE_NSSNOC_SNOC_0 13
+#define MASTER_NSSNOC_SNOC_1 14
+#define SLAVE_NSSNOC_SNOC_1 15
+#define MASTER_NSSNOC_ATB 16
+#define SLAVE_NSSNOC_ATB 17
+#define MASTER_NSSNOC_PCNOC_1 18
+#define SLAVE_NSSNOC_PCNOC_1 19
+#define MASTER_NSSNOC_QOSGEN_REF 20
+#define SLAVE_NSSNOC_QOSGEN_REF 21
+#define MASTER_NSSNOC_TIMEOUT_REF 22
+#define SLAVE_NSSNOC_TIMEOUT_REF 23
+#define MASTER_NSSNOC_XO_DCD 24
+#define SLAVE_NSSNOC_XO_DCD 25
+
+#define MASTER_NSSNOC_PPE 0
+#define SLAVE_NSSNOC_PPE 1
+#define MASTER_NSSNOC_PPE_CFG 2
+#define SLAVE_NSSNOC_PPE_CFG 3
+#define MASTER_NSSNOC_NSS_CSR 4
+#define SLAVE_NSSNOC_NSS_CSR 5
+#define MASTER_NSSNOC_CE_APB 6
+#define SLAVE_NSSNOC_CE_APB 7
+#define MASTER_NSSNOC_CE_AXI 8
+#define SLAVE_NSSNOC_CE_AXI 9
+
+#define MASTER_CNOC_AHB 0
+#define SLAVE_CNOC_AHB 1
+
+#endif /* INTERCONNECT_QCOM_IPQ5332_H */
diff --git a/include/dt-bindings/interconnect/qcom,ipq5424.h b/include/dt-bindings/interconnect/qcom,ipq5424.h
new file mode 100644
index 000000000000..07b786bee7d6
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,ipq5424.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef INTERCONNECT_QCOM_IPQ5424_H
+#define INTERCONNECT_QCOM_IPQ5424_H
+
+#define MASTER_ANOC_PCIE0 0
+#define SLAVE_ANOC_PCIE0 1
+#define MASTER_CNOC_PCIE0 2
+#define SLAVE_CNOC_PCIE0 3
+#define MASTER_ANOC_PCIE1 4
+#define SLAVE_ANOC_PCIE1 5
+#define MASTER_CNOC_PCIE1 6
+#define SLAVE_CNOC_PCIE1 7
+#define MASTER_ANOC_PCIE2 8
+#define SLAVE_ANOC_PCIE2 9
+#define MASTER_CNOC_PCIE2 10
+#define SLAVE_CNOC_PCIE2 11
+#define MASTER_ANOC_PCIE3 12
+#define SLAVE_ANOC_PCIE3 13
+#define MASTER_CNOC_PCIE3 14
+#define SLAVE_CNOC_PCIE3 15
+#define MASTER_CNOC_USB 16
+#define SLAVE_CNOC_USB 17
+#define MASTER_NSSNOC_NSSCC 18
+#define SLAVE_NSSNOC_NSSCC 19
+#define MASTER_NSSNOC_SNOC_0 20
+#define SLAVE_NSSNOC_SNOC_0 21
+#define MASTER_NSSNOC_SNOC_1 22
+#define SLAVE_NSSNOC_SNOC_1 23
+#define MASTER_NSSNOC_PCNOC_1 24
+#define SLAVE_NSSNOC_PCNOC_1 25
+#define MASTER_NSSNOC_QOSGEN_REF 26
+#define SLAVE_NSSNOC_QOSGEN_REF 27
+#define MASTER_NSSNOC_TIMEOUT_REF 28
+#define SLAVE_NSSNOC_TIMEOUT_REF 29
+#define MASTER_NSSNOC_XO_DCD 30
+#define SLAVE_NSSNOC_XO_DCD 31
+#define MASTER_NSSNOC_ATB 32
+#define SLAVE_NSSNOC_ATB 33
+#define MASTER_CNOC_LPASS_CFG 34
+#define SLAVE_CNOC_LPASS_CFG 35
+#define MASTER_SNOC_LPASS 36
+#define SLAVE_SNOC_LPASS 37
+
+#define MASTER_CPU 0
+#define SLAVE_L3 1
+
+#define MASTER_NSSNOC_PPE 0
+#define SLAVE_NSSNOC_PPE 1
+#define MASTER_NSSNOC_PPE_CFG 2
+#define SLAVE_NSSNOC_PPE_CFG 3
+#define MASTER_NSSNOC_NSS_CSR 4
+#define SLAVE_NSSNOC_NSS_CSR 5
+#define MASTER_NSSNOC_CE_AXI 6
+#define SLAVE_NSSNOC_CE_AXI 7
+#define MASTER_NSSNOC_CE_APB 8
+#define SLAVE_NSSNOC_CE_APB 9
+#define MASTER_NSSNOC_EIP 10
+#define SLAVE_NSSNOC_EIP 11
+
+#endif /* INTERCONNECT_QCOM_IPQ5424_H */
diff --git a/include/dt-bindings/interconnect/qcom,ipq9574.h b/include/dt-bindings/interconnect/qcom,ipq9574.h
new file mode 100644
index 000000000000..42019335c7dd
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,ipq9574.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef INTERCONNECT_QCOM_IPQ9574_H
+#define INTERCONNECT_QCOM_IPQ9574_H
+
+#define MASTER_ANOC_PCIE0 0
+#define SLAVE_ANOC_PCIE0 1
+#define MASTER_SNOC_PCIE0 2
+#define SLAVE_SNOC_PCIE0 3
+#define MASTER_ANOC_PCIE1 4
+#define SLAVE_ANOC_PCIE1 5
+#define MASTER_SNOC_PCIE1 6
+#define SLAVE_SNOC_PCIE1 7
+#define MASTER_ANOC_PCIE2 8
+#define SLAVE_ANOC_PCIE2 9
+#define MASTER_SNOC_PCIE2 10
+#define SLAVE_SNOC_PCIE2 11
+#define MASTER_ANOC_PCIE3 12
+#define SLAVE_ANOC_PCIE3 13
+#define MASTER_SNOC_PCIE3 14
+#define SLAVE_SNOC_PCIE3 15
+#define MASTER_USB 16
+#define SLAVE_USB 17
+#define MASTER_USB_AXI 18
+#define SLAVE_USB_AXI 19
+#define MASTER_NSSNOC_NSSCC 20
+#define SLAVE_NSSNOC_NSSCC 21
+#define MASTER_NSSNOC_SNOC_0 22
+#define SLAVE_NSSNOC_SNOC_0 23
+#define MASTER_NSSNOC_SNOC_1 24
+#define SLAVE_NSSNOC_SNOC_1 25
+#define MASTER_NSSNOC_PCNOC_1 26
+#define SLAVE_NSSNOC_PCNOC_1 27
+#define MASTER_NSSNOC_QOSGEN_REF 28
+#define SLAVE_NSSNOC_QOSGEN_REF 29
+#define MASTER_NSSNOC_TIMEOUT_REF 30
+#define SLAVE_NSSNOC_TIMEOUT_REF 31
+#define MASTER_NSSNOC_XO_DCD 32
+#define SLAVE_NSSNOC_XO_DCD 33
+#define MASTER_NSSNOC_ATB 34
+#define SLAVE_NSSNOC_ATB 35
+#define MASTER_MEM_NOC_NSSNOC 36
+#define SLAVE_MEM_NOC_NSSNOC 37
+#define MASTER_NSSNOC_MEMNOC 38
+#define SLAVE_NSSNOC_MEMNOC 39
+#define MASTER_NSSNOC_MEM_NOC_1 40
+#define SLAVE_NSSNOC_MEM_NOC_1 41
+
+#define MASTER_NSSNOC_PPE 0
+#define SLAVE_NSSNOC_PPE 1
+#define MASTER_NSSNOC_PPE_CFG 2
+#define SLAVE_NSSNOC_PPE_CFG 3
+#define MASTER_NSSNOC_NSS_CSR 4
+#define SLAVE_NSSNOC_NSS_CSR 5
+#define MASTER_NSSNOC_IMEM_QSB 6
+#define SLAVE_NSSNOC_IMEM_QSB 7
+#define MASTER_NSSNOC_IMEM_AHB 8
+#define SLAVE_NSSNOC_IMEM_AHB 9
+
+#endif /* INTERCONNECT_QCOM_IPQ9574_H */
diff --git a/include/dt-bindings/interconnect/qcom,kaanapali-rpmh.h b/include/dt-bindings/interconnect/qcom,kaanapali-rpmh.h
new file mode 100644
index 000000000000..dde3f9abd677
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,kaanapali-rpmh.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_KAANAPALI_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_KAANAPALI_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_CRYPTO 1
+#define MASTER_QUP_1 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB3 5
+#define MASTER_QUP_2 6
+#define MASTER_QUP_3 7
+#define MASTER_QUP_4 8
+#define MASTER_IPA 9
+#define MASTER_SOCCP_PROC 10
+#define MASTER_SP 11
+#define MASTER_QDSS_ETR 12
+#define MASTER_QDSS_ETR_1 13
+#define MASTER_SDCC_2 14
+#define SLAVE_A1NOC_SNOC 15
+#define SLAVE_A2NOC_SNOC 16
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define MASTER_QUP_CORE_3 3
+#define MASTER_QUP_CORE_4 4
+#define SLAVE_QUP_CORE_0 5
+#define SLAVE_QUP_CORE_1 6
+#define SLAVE_QUP_CORE_2 7
+#define SLAVE_QUP_CORE_3 8
+#define SLAVE_QUP_CORE_4 9
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_CAMERA_CFG 3
+#define SLAVE_CLK_CTL 4
+#define SLAVE_CRYPTO_0_CFG 5
+#define SLAVE_DISPLAY_CFG 6
+#define SLAVE_EVA_CFG 7
+#define SLAVE_GFX3D_CFG 8
+#define SLAVE_I2C 9
+#define SLAVE_I3C_IBI0_CFG 10
+#define SLAVE_I3C_IBI1_CFG 11
+#define SLAVE_IMEM_CFG 12
+#define SLAVE_IPC_ROUTER_CFG 13
+#define SLAVE_CNOC_MSS 14
+#define SLAVE_PCIE_CFG 15
+#define SLAVE_PRNG 16
+#define SLAVE_QDSS_CFG 17
+#define SLAVE_QSPI_0 18
+#define SLAVE_QUP_1 19
+#define SLAVE_QUP_2 20
+#define SLAVE_QUP_3 21
+#define SLAVE_QUP_4 22
+#define SLAVE_SDCC_2 23
+#define SLAVE_SDCC_4 24
+#define SLAVE_SPSS_CFG 25
+#define SLAVE_TCSR 26
+#define SLAVE_TLMM 27
+#define SLAVE_UFS_MEM_CFG 28
+#define SLAVE_USB3 29
+#define SLAVE_VENUS_CFG 30
+#define SLAVE_VSENSE_CTRL_CFG 31
+#define SLAVE_CNOC_MNOC_CFG 32
+#define SLAVE_PCIE_ANOC_CFG 33
+#define SLAVE_QDSS_STM 34
+#define SLAVE_TCU 35
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_IPA_CFG 3
+#define SLAVE_IPC_ROUTER_FENCE 4
+#define SLAVE_SOCCP 5
+#define SLAVE_TME_CFG 6
+#define SLAVE_APPSS 7
+#define SLAVE_CNOC_CFG 8
+#define SLAVE_DDRSS_CFG 9
+#define SLAVE_BOOT_IMEM 10
+#define SLAVE_IMEM 11
+#define SLAVE_PCIE_0 12
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_GFX3D 3
+#define MASTER_LPASS_GEM_NOC 4
+#define MASTER_MSS_PROC 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_COMPUTE_NOC 8
+#define MASTER_ANOC_PCIE_GEM_NOC 9
+#define MASTER_QPACE 10
+#define MASTER_SNOC_SF_MEM_NOC 11
+#define MASTER_WLAN_Q6 12
+#define MASTER_GIC 13
+#define SLAVE_GEM_NOC_CNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_MEM_NOC_PCIE_SNOC 16
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_NRT_ICP_SF 1
+#define MASTER_CAMNOC_RT_CDM_SF 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP 4
+#define MASTER_MDSS_DCP 5
+#define MASTER_CDSP_HCP 6
+#define MASTER_VIDEO_CV_PROC 7
+#define MASTER_VIDEO_EVA 8
+#define MASTER_VIDEO_MVP 9
+#define MASTER_VIDEO_V_PROC 10
+#define MASTER_CNOC_MNOC_CFG 11
+#define SLAVE_MNOC_HF_MEM_NOC 12
+#define SLAVE_MNOC_SF_MEM_NOC 13
+#define SLAVE_SERVICE_MNOC 14
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define SLAVE_ANOC_PCIE_GEM_NOC 2
+#define SLAVE_SERVICE_PCIE_ANOC 3
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_APSS_NOC 2
+#define MASTER_CNOC_SNOC 3
+#define SLAVE_SNOC_GEM_NOC_SF 4
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,milos-rpmh.h b/include/dt-bindings/interconnect/qcom,milos-rpmh.h
new file mode 100644
index 000000000000..9326d7d9c2a3
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,milos-rpmh.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MILOS_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MILOS_H
+
+#define MASTER_QUP_1 0
+#define MASTER_UFS_MEM 1
+#define MASTER_USB3_0 2
+#define SLAVE_A1NOC_SNOC 3
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QSPI_0 1
+#define MASTER_QUP_0 2
+#define MASTER_CRYPTO 3
+#define MASTER_IPA 4
+#define MASTER_QDSS_ETR 5
+#define MASTER_QDSS_ETR_1 6
+#define MASTER_SDCC_1 7
+#define MASTER_SDCC_2 8
+#define SLAVE_A2NOC_SNOC 9
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define SLAVE_QUP_CORE_0 2
+#define SLAVE_QUP_CORE_1 3
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_CAMERA_CFG 3
+#define SLAVE_CLK_CTL 4
+#define SLAVE_RBCPR_CX_CFG 5
+#define SLAVE_RBCPR_MXA_CFG 6
+#define SLAVE_CRYPTO_0_CFG 7
+#define SLAVE_CX_RDPM 8
+#define SLAVE_GFX3D_CFG 9
+#define SLAVE_IMEM_CFG 10
+#define SLAVE_CNOC_MSS 11
+#define SLAVE_MX_2_RDPM 12
+#define SLAVE_MX_RDPM 13
+#define SLAVE_PDM 14
+#define SLAVE_QDSS_CFG 15
+#define SLAVE_QSPI_0 16
+#define SLAVE_QUP_0 17
+#define SLAVE_QUP_1 18
+#define SLAVE_SDC1 19
+#define SLAVE_SDCC_2 20
+#define SLAVE_TCSR 21
+#define SLAVE_TLMM 22
+#define SLAVE_UFS_MEM_CFG 23
+#define SLAVE_USB3_0 24
+#define SLAVE_VENUS_CFG 25
+#define SLAVE_VSENSE_CTRL_CFG 26
+#define SLAVE_WLAN 27
+#define SLAVE_CNOC_MNOC_HF_CFG 28
+#define SLAVE_CNOC_MNOC_SF_CFG 29
+#define SLAVE_NSP_QTB_CFG 30
+#define SLAVE_PCIE_ANOC_CFG 31
+#define SLAVE_WLAN_Q6_THROTTLE_CFG 32
+#define SLAVE_SERVICE_CNOC_CFG 33
+#define SLAVE_QDSS_STM 34
+#define SLAVE_TCU 35
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_DISPLAY_CFG 3
+#define SLAVE_IPA_CFG 4
+#define SLAVE_IPC_ROUTER_CFG 5
+#define SLAVE_PCIE_0_CFG 6
+#define SLAVE_PCIE_1_CFG 7
+#define SLAVE_PRNG 8
+#define SLAVE_TME_CFG 9
+#define SLAVE_APPSS 10
+#define SLAVE_CNOC_CFG 11
+#define SLAVE_DDRSS_CFG 12
+#define SLAVE_IMEM 13
+#define SLAVE_PIMEM 14
+#define SLAVE_SERVICE_CNOC 15
+#define SLAVE_PCIE_0 16
+#define SLAVE_PCIE_1 17
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_GFX3D 3
+#define MASTER_LPASS_GEM_NOC 4
+#define MASTER_MSS_PROC 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_COMPUTE_NOC 8
+#define MASTER_ANOC_PCIE_GEM_NOC 9
+#define MASTER_SNOC_GC_MEM_NOC 10
+#define MASTER_SNOC_SF_MEM_NOC 11
+#define MASTER_WLAN_Q6 12
+#define SLAVE_GEM_NOC_CNOC 13
+#define SLAVE_LLCC 14
+#define SLAVE_MEM_NOC_PCIE_SNOC 15
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP 3
+#define MASTER_VIDEO 4
+#define MASTER_CNOC_MNOC_HF_CFG 5
+#define MASTER_CNOC_MNOC_SF_CFG 6
+#define SLAVE_MNOC_HF_MEM_NOC 7
+#define SLAVE_MNOC_SF_MEM_NOC 8
+#define SLAVE_SERVICE_MNOC_HF 9
+#define SLAVE_SERVICE_MNOC_SF 10
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define MASTER_PCIE_1 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+#define SLAVE_SERVICE_PCIE_ANOC 4
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_APSS_NOC 2
+#define MASTER_CNOC_SNOC 3
+#define MASTER_PIMEM 4
+#define MASTER_GIC 5
+#define SLAVE_SNOC_GEM_NOC_GC 6
+#define SLAVE_SNOC_GEM_NOC_SF 7
+
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,msm8937.h b/include/dt-bindings/interconnect/qcom,msm8937.h
new file mode 100644
index 000000000000..98b8a4637aab
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8937.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm MSM8937 interconnect IDs
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8937_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8937_H
+
+/* BIMC fabric */
+#define MAS_APPS_PROC 0
+#define MAS_OXILI 1
+#define MAS_SNOC_BIMC_0 2
+#define MAS_SNOC_BIMC_2 3
+#define MAS_SNOC_BIMC_1 4
+#define MAS_TCU_0 5
+#define SLV_EBI 6
+#define SLV_BIMC_SNOC 7
+
+/* PCNOC fabric */
+#define MAS_SPDM 0
+#define MAS_BLSP_1 1
+#define MAS_BLSP_2 2
+#define MAS_USB_HS1 3
+#define MAS_XI_USB_HS1 4
+#define MAS_CRYPTO 5
+#define MAS_SDCC_1 6
+#define MAS_SDCC_2 7
+#define MAS_SNOC_PCNOC 8
+#define PCNOC_M_0 9
+#define PCNOC_M_1 10
+#define PCNOC_INT_0 11
+#define PCNOC_INT_1 12
+#define PCNOC_INT_2 13
+#define PCNOC_INT_3 14
+#define PCNOC_S_0 15
+#define PCNOC_S_1 16
+#define PCNOC_S_2 17
+#define PCNOC_S_3 18
+#define PCNOC_S_4 19
+#define PCNOC_S_6 20
+#define PCNOC_S_7 21
+#define PCNOC_S_8 22
+#define SLV_SDCC_2 23
+#define SLV_SPDM 24
+#define SLV_PDM 25
+#define SLV_PRNG 26
+#define SLV_TCSR 27
+#define SLV_SNOC_CFG 28
+#define SLV_MESSAGE_RAM 29
+#define SLV_CAMERA_SS_CFG 30
+#define SLV_DISP_SS_CFG 31
+#define SLV_VENUS_CFG 32
+#define SLV_GPU_CFG 33
+#define SLV_TLMM 34
+#define SLV_BLSP_1 35
+#define SLV_BLSP_2 36
+#define SLV_PMIC_ARB 37
+#define SLV_SDCC_1 38
+#define SLV_CRYPTO_0_CFG 39
+#define SLV_USB_HS 40
+#define SLV_TCU 41
+#define SLV_PCNOC_SNOC 42
+
+/* SNOC fabric */
+#define MAS_QDSS_BAM 0
+#define MAS_BIMC_SNOC 1
+#define MAS_PCNOC_SNOC 2
+#define MAS_QDSS_ETR 3
+#define QDSS_INT 4
+#define SNOC_INT_0 5
+#define SNOC_INT_1 6
+#define SNOC_INT_2 7
+#define SLV_KPSS_AHB 8
+#define SLV_WCSS 9
+#define SLV_SNOC_BIMC_1 10
+#define SLV_IMEM 11
+#define SLV_SNOC_PCNOC 12
+#define SLV_QDSS_STM 13
+#define SLV_CATS_1 14
+#define SLV_LPASS 15
+
+/* SNOC-MM fabric */
+#define MAS_JPEG 0
+#define MAS_MDP 1
+#define MAS_VENUS 2
+#define MAS_VFE0 3
+#define MAS_VFE1 4
+#define MAS_CPP 5
+#define SLV_SNOC_BIMC_0 6
+#define SLV_SNOC_BIMC_2 7
+#define SLV_CATS_0 8
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_MSM8937_H */
diff --git a/include/dt-bindings/interconnect/qcom,msm8953.h b/include/dt-bindings/interconnect/qcom,msm8953.h
new file mode 100644
index 000000000000..12564c434af7
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8953.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm MSM8953 interconnect IDs
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8953_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8953_H
+
+/* BIMC fabric */
+#define MAS_APPS_PROC 0
+#define MAS_OXILI 1
+#define MAS_SNOC_BIMC_0 2
+#define MAS_SNOC_BIMC_2 3
+#define MAS_SNOC_BIMC_1 4
+#define MAS_TCU_0 5
+#define SLV_EBI 6
+#define SLV_BIMC_SNOC 7
+
+/* PCNOC fabric */
+#define MAS_SPDM 0
+#define MAS_BLSP_1 1
+#define MAS_BLSP_2 2
+#define MAS_USB3 3
+#define MAS_CRYPTO 4
+#define MAS_SDCC_1 5
+#define MAS_SDCC_2 6
+#define MAS_SNOC_PCNOC 7
+#define PCNOC_M_0 8
+#define PCNOC_M_1 9
+#define PCNOC_INT_1 10
+#define PCNOC_INT_2 11
+#define PCNOC_S_0 12
+#define PCNOC_S_1 13
+#define PCNOC_S_2 14
+#define PCNOC_S_3 15
+#define PCNOC_S_4 16
+#define PCNOC_S_6 17
+#define PCNOC_S_7 18
+#define PCNOC_S_8 19
+#define PCNOC_S_9 20
+#define SLV_SPDM 21
+#define SLV_PDM 22
+#define SLV_TCSR 23
+#define SLV_SNOC_CFG 24
+#define SLV_TLMM 25
+#define SLV_MESSAGE_RAM 26
+#define SLV_BLSP_1 27
+#define SLV_BLSP_2 28
+#define SLV_PRNG 29
+#define SLV_CAMERA_SS_CFG 30
+#define SLV_DISP_SS_CFG 31
+#define SLV_VENUS_CFG 32
+#define SLV_GPU_CFG 33
+#define SLV_SDCC_1 34
+#define SLV_SDCC_2 35
+#define SLV_CRYPTO_0_CFG 36
+#define SLV_PMIC_ARB 37
+#define SLV_USB3 38
+#define SLV_IPA_CFG 39
+#define SLV_TCU 40
+#define SLV_PCNOC_SNOC 41
+
+/* SNOC fabric */
+#define MAS_QDSS_BAM 0
+#define MAS_BIMC_SNOC 1
+#define MAS_PCNOC_SNOC 2
+#define MAS_IPA 3
+#define MAS_QDSS_ETR 4
+#define QDSS_INT 5
+#define SNOC_INT_0 6
+#define SNOC_INT_1 7
+#define SNOC_INT_2 8
+#define SLV_KPSS_AHB 9
+#define SLV_WCSS 10
+#define SLV_SNOC_BIMC_1 11
+#define SLV_IMEM 12
+#define SLV_SNOC_PCNOC 13
+#define SLV_QDSS_STM 14
+#define SLV_CATS_1 15
+#define SLV_LPASS 16
+
+/* SNOC-MM fabric */
+#define MAS_JPEG 0
+#define MAS_MDP 1
+#define MAS_VENUS 2
+#define MAS_VFE0 3
+#define MAS_VFE1 4
+#define MAS_CPP 5
+#define SLV_SNOC_BIMC_0 6
+#define SLV_SNOC_BIMC_2 7
+#define SLV_CATS_0 8
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_MSM8953_H */
diff --git a/include/dt-bindings/interconnect/qcom,msm8976.h b/include/dt-bindings/interconnect/qcom,msm8976.h
new file mode 100644
index 000000000000..4ea90f22320e
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8976.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm MSM8976 interconnect IDs
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8976_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8976_H
+
+/* BIMC fabric */
+#define MAS_APPS_PROC 0
+#define MAS_SMMNOC_BIMC 1
+#define MAS_SNOC_BIMC 2
+#define MAS_TCU_0 3
+#define SLV_EBI 4
+#define SLV_BIMC_SNOC 5
+
+/* PCNOC fabric */
+#define MAS_USB_HS2 0
+#define MAS_BLSP_1 1
+#define MAS_USB_HS1 2
+#define MAS_BLSP_2 3
+#define MAS_CRYPTO 4
+#define MAS_SDCC_1 5
+#define MAS_SDCC_2 6
+#define MAS_SDCC_3 7
+#define MAS_SNOC_PCNOC 8
+#define MAS_LPASS_AHB 9
+#define MAS_SPDM 10
+#define MAS_DEHR 11
+#define MAS_XM_USB_HS1 12
+#define PCNOC_M_0 13
+#define PCNOC_M_1 14
+#define PCNOC_INT_0 15
+#define PCNOC_INT_1 16
+#define PCNOC_INT_2 17
+#define PCNOC_S_1 18
+#define PCNOC_S_2 19
+#define PCNOC_S_3 20
+#define PCNOC_S_4 21
+#define PCNOC_S_8 22
+#define PCNOC_S_9 23
+#define SLV_TCSR 24
+#define SLV_TLMM 25
+#define SLV_CRYPTO_0_CFG 26
+#define SLV_MESSAGE_RAM 27
+#define SLV_PDM 28
+#define SLV_PRNG 29
+#define SLV_PMIC_ARB 30
+#define SLV_SNOC_CFG 31
+#define SLV_DCC_CFG 32
+#define SLV_CAMERA_SS_CFG 33
+#define SLV_DISP_SS_CFG 34
+#define SLV_VENUS_CFG 35
+#define SLV_SDCC_1 36
+#define SLV_BLSP_1 37
+#define SLV_USB_HS 38
+#define SLV_SDCC_3 39
+#define SLV_SDCC_2 40
+#define SLV_GPU_CFG 41
+#define SLV_USB_HS2 42
+#define SLV_BLSP_2 43
+#define SLV_PCNOC_SNOC 44
+
+/* SNOC fabric */
+#define MAS_QDSS_BAM 0
+#define MAS_BIMC_SNOC 1
+#define MAS_PCNOC_SNOC 2
+#define MAS_QDSS_ETR 3
+#define MAS_LPASS_PROC 4
+#define MAS_IPA 5
+#define QDSS_INT 6
+#define SNOC_INT_0 7
+#define SNOC_INT_1 8
+#define SNOC_INT_2 9
+#define SLV_KPSS_AHB 10
+#define SLV_SNOC_BIMC 11
+#define SLV_IMEM 12
+#define SLV_SNOC_PCNOC 13
+#define SLV_QDSS_STM 14
+#define SLV_CATS_0 15
+#define SLV_CATS_1 16
+#define SLV_LPASS 17
+
+/* SNOC-MM fabric */
+#define MAS_JPEG 0
+#define MAS_OXILI 1
+#define MAS_MDP0 2
+#define MAS_MDP1 3
+#define MAS_VENUS_0 4
+#define MAS_VENUS_1 5
+#define MAS_VFE_0 6
+#define MAS_VFE_1 7
+#define MAS_CPP 8
+#define MM_INT_0 9
+#define SLV_SMMNOC_BIMC 10
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_MSM8976_H */
diff --git a/include/dt-bindings/interconnect/qcom,qcs615-rpmh.h b/include/dt-bindings/interconnect/qcom,qcs615-rpmh.h
new file mode 100644
index 000000000000..84ae0d39e73c
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,qcs615-rpmh.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_QCS615_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_QCS615_H
+
+#define MASTER_A1NOC_CFG 1
+#define MASTER_QDSS_BAM 2
+#define MASTER_QSPI 3
+#define MASTER_QUP_0 4
+#define MASTER_BLSP_1 5
+#define MASTER_CNOC_A2NOC 6
+#define MASTER_CRYPTO 7
+#define MASTER_IPA 8
+#define MASTER_EMAC_EVB 9
+#define MASTER_PCIE 10
+#define MASTER_QDSS_ETR 11
+#define MASTER_SDCC_1 12
+#define MASTER_SDCC_2 13
+#define MASTER_UFS_MEM 14
+#define MASTER_USB2 15
+#define MASTER_USB3_0 16
+#define SLAVE_A1NOC_SNOC 17
+#define SLAVE_LPASS_SNOC 18
+#define SLAVE_ANOC_PCIE_SNOC 19
+#define SLAVE_SERVICE_A2NOC 20
+
+#define MASTER_CAMNOC_HF0_UNCOMP 1
+#define MASTER_CAMNOC_HF1_UNCOMP 2
+#define MASTER_CAMNOC_SF_UNCOMP 3
+#define SLAVE_CAMNOC_UNCOMP 4
+
+#define MASTER_SPDM 1
+#define MASTER_SNOC_CNOC 2
+#define MASTER_QDSS_DAP 3
+#define SLAVE_A1NOC_CFG 4
+#define SLAVE_AHB2PHY_EAST 5
+#define SLAVE_AHB2PHY_WEST 6
+#define SLAVE_AOP 7
+#define SLAVE_AOSS 8
+#define SLAVE_CAMERA_CFG 9
+#define SLAVE_CLK_CTL 10
+#define SLAVE_RBCPR_CX_CFG 11
+#define SLAVE_RBCPR_MX_CFG 12
+#define SLAVE_CRYPTO_0_CFG 13
+#define SLAVE_CNOC_DDRSS 14
+#define SLAVE_DISPLAY_CFG 15
+#define SLAVE_EMAC_AVB_CFG 16
+#define SLAVE_GLM 17
+#define SLAVE_GFX3D_CFG 18
+#define SLAVE_IMEM_CFG 19
+#define SLAVE_IPA_CFG 20
+#define SLAVE_CNOC_MNOC_CFG 21
+#define SLAVE_PCIE_CFG 22
+#define SLAVE_PIMEM_CFG 23
+#define SLAVE_PRNG 24
+#define SLAVE_QDSS_CFG 25
+#define SLAVE_QSPI 26
+#define SLAVE_QUP_0 27
+#define SLAVE_QUP_1 28
+#define SLAVE_SDCC_1 29
+#define SLAVE_SDCC_2 30
+#define SLAVE_SNOC_CFG 31
+#define SLAVE_SPDM_WRAPPER 32
+#define SLAVE_TCSR 33
+#define SLAVE_TLMM_EAST 34
+#define SLAVE_TLMM_SOUTH 35
+#define SLAVE_TLMM_WEST 36
+#define SLAVE_UFS_MEM_CFG 37
+#define SLAVE_USB2 38
+#define SLAVE_USB3 39
+#define SLAVE_VENUS_CFG 40
+#define SLAVE_VSENSE_CTRL_CFG 41
+#define SLAVE_CNOC_A2NOC 42
+#define SLAVE_SERVICE_CNOC 43
+
+#define MASTER_CNOC_DC_NOC 1
+#define SLAVE_DC_NOC_GEMNOC 2
+#define SLAVE_LLCC_CFG 3
+
+#define MASTER_APPSS_PROC 1
+#define MASTER_GPU_TCU 2
+#define MASTER_SYS_TCU 3
+#define MASTER_GEM_NOC_CFG 4
+#define MASTER_GFX3D 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_SNOC_GC_MEM_NOC 8
+#define MASTER_SNOC_SF_MEM_NOC 9
+#define SLAVE_MSS_PROC_MS_MPU_CFG 10
+#define SLAVE_GEM_NOC_SNOC 11
+#define SLAVE_LLCC 12
+#define SLAVE_MEM_NOC_PCIE_SNOC 13
+#define SLAVE_SERVICE_GEM_NOC 14
+
+#define MASTER_IPA_CORE 1
+#define SLAVE_IPA_CORE 2
+
+#define MASTER_LLCC 1
+#define SLAVE_EBI1 2
+
+#define MASTER_CNOC_MNOC_CFG 1
+#define MASTER_CAMNOC_HF0 2
+#define MASTER_CAMNOC_HF1 3
+#define MASTER_CAMNOC_SF 4
+#define MASTER_MDP0 5
+#define MASTER_ROTATOR 6
+#define MASTER_VIDEO_P0 7
+#define MASTER_VIDEO_PROC 8
+#define SLAVE_MNOC_SF_MEM_NOC 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_SERVICE_MNOC 11
+
+#define MASTER_SNOC_CFG 1
+#define MASTER_A1NOC_SNOC 2
+#define MASTER_GEM_NOC_SNOC 3
+#define MASTER_GEM_NOC_PCIE_SNOC 4
+#define MASTER_LPASS_ANOC 5
+#define MASTER_ANOC_PCIE_SNOC 6
+#define MASTER_PIMEM 7
+#define MASTER_GIC 8
+#define SLAVE_APPSS 9
+#define SLAVE_SNOC_CNOC 10
+#define SLAVE_SNOC_GEM_NOC_SF 11
+#define SLAVE_SNOC_MEM_NOC_GC 12
+#define SLAVE_IMEM 13
+#define SLAVE_PIMEM 14
+#define SLAVE_SERVICE_SNOC 15
+#define SLAVE_PCIE_0 16
+#define SLAVE_QDSS_STM 17
+#define SLAVE_TCU 18
+
+#endif
+
diff --git a/include/dt-bindings/interconnect/qcom,qcs8300-rpmh.h b/include/dt-bindings/interconnect/qcom,qcs8300-rpmh.h
new file mode 100644
index 000000000000..c5eeafa1b1dd
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,qcs8300-rpmh.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_QCS8300_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_QCS8300_H
+
+#define MASTER_QUP_3 0
+#define MASTER_EMAC 1
+#define MASTER_SDC 2
+#define MASTER_UFS_MEM 3
+#define MASTER_USB2 4
+#define MASTER_USB3_0 5
+#define SLAVE_A1NOC_SNOC 6
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_1 2
+#define MASTER_CNOC_A2NOC 3
+#define MASTER_CRYPTO_CORE0 4
+#define MASTER_CRYPTO_CORE1 5
+#define MASTER_IPA 6
+#define MASTER_QDSS_ETR_0 7
+#define MASTER_QDSS_ETR_1 8
+#define SLAVE_A2NOC_SNOC 9
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_3 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_3 5
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AHB2PHY_2 2
+#define SLAVE_AHB2PHY_3 3
+#define SLAVE_ANOC_THROTTLE_CFG 4
+#define SLAVE_AOSS 5
+#define SLAVE_APPSS 6
+#define SLAVE_BOOT_ROM 7
+#define SLAVE_CAMERA_CFG 8
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 9
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 10
+#define SLAVE_CLK_CTL 11
+#define SLAVE_CDSP_CFG 12
+#define SLAVE_RBCPR_CX_CFG 13
+#define SLAVE_RBCPR_MMCX_CFG 14
+#define SLAVE_RBCPR_MX_CFG 15
+#define SLAVE_CPR_NSPCX 16
+#define SLAVE_CPR_NSPHMX 17
+#define SLAVE_CRYPTO_0_CFG 18
+#define SLAVE_CX_RDPM 19
+#define SLAVE_DISPLAY_CFG 20
+#define SLAVE_DISPLAY_RT_THROTTLE_CFG 21
+#define SLAVE_EMAC_CFG 22
+#define SLAVE_GP_DSP0_CFG 23
+#define SLAVE_GPDSP0_THROTTLE_CFG 24
+#define SLAVE_GPU_TCU_THROTTLE_CFG 25
+#define SLAVE_GFX3D_CFG 26
+#define SLAVE_HWKM 27
+#define SLAVE_IMEM_CFG 28
+#define SLAVE_IPA_CFG 29
+#define SLAVE_IPC_ROUTER_CFG 30
+#define SLAVE_LPASS 31
+#define SLAVE_LPASS_THROTTLE_CFG 32
+#define SLAVE_MX_RDPM 33
+#define SLAVE_MXC_RDPM 34
+#define SLAVE_PCIE_0_CFG 35
+#define SLAVE_PCIE_1_CFG 36
+#define SLAVE_PCIE_TCU_THROTTLE_CFG 37
+#define SLAVE_PCIE_THROTTLE_CFG 38
+#define SLAVE_PDM 39
+#define SLAVE_PIMEM_CFG 40
+#define SLAVE_PKA_WRAPPER_CFG 41
+#define SLAVE_QDSS_CFG 42
+#define SLAVE_QM_CFG 43
+#define SLAVE_QM_MPU_CFG 44
+#define SLAVE_QUP_0 45
+#define SLAVE_QUP_1 46
+#define SLAVE_QUP_3 47
+#define SLAVE_SAIL_THROTTLE_CFG 48
+#define SLAVE_SDC1 49
+#define SLAVE_SECURITY 50
+#define SLAVE_SNOC_THROTTLE_CFG 51
+#define SLAVE_TCSR 52
+#define SLAVE_TLMM 53
+#define SLAVE_TSC_CFG 54
+#define SLAVE_UFS_MEM_CFG 55
+#define SLAVE_USB2 56
+#define SLAVE_USB3_0 57
+#define SLAVE_VENUS_CFG 58
+#define SLAVE_VENUS_CVP_THROTTLE_CFG 59
+#define SLAVE_VENUS_V_CPU_THROTTLE_CFG 60
+#define SLAVE_VENUS_VCODEC_THROTTLE_CFG 61
+#define SLAVE_DDRSS_CFG 62
+#define SLAVE_GPDSP_NOC_CFG 63
+#define SLAVE_CNOC_MNOC_HF_CFG 64
+#define SLAVE_CNOC_MNOC_SF_CFG 65
+#define SLAVE_PCIE_ANOC_CFG 66
+#define SLAVE_SNOC_CFG 67
+#define SLAVE_BOOT_IMEM 68
+#define SLAVE_IMEM 69
+#define SLAVE_PIMEM 70
+#define SLAVE_PCIE_0 71
+#define SLAVE_PCIE_1 72
+#define SLAVE_QDSS_STM 73
+#define SLAVE_TCU 74
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+#define MASTER_GPU_TCU 0
+#define MASTER_PCIE_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_COMPUTE_NOC 4
+#define MASTER_GEM_NOC_CFG 5
+#define MASTER_GPDSP_SAIL 6
+#define MASTER_GFX3D 7
+#define MASTER_MNOC_HF_MEM_NOC 8
+#define MASTER_MNOC_SF_MEM_NOC 9
+#define MASTER_ANOC_PCIE_GEM_NOC 10
+#define MASTER_SNOC_GC_MEM_NOC 11
+#define MASTER_SNOC_SF_MEM_NOC 12
+#define SLAVE_GEM_NOC_CNOC 13
+#define SLAVE_LLCC 14
+#define SLAVE_GEM_NOC_PCIE_CNOC 15
+#define SLAVE_SERVICE_GEM_NOC_1 16
+#define SLAVE_SERVICE_GEM_NOC_2 17
+#define SLAVE_SERVICE_GEM_NOC 18
+#define SLAVE_SERVICE_GEM_NOC2 19
+
+#define MASTER_SAILSS_MD0 0
+#define MASTER_DSP0 1
+#define SLAVE_GP_DSP_SAIL_NOC 2
+
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define MASTER_LPASS_PROC 1
+#define SLAVE_LPASS_CORE_CFG 2
+#define SLAVE_LPASS_LPI_CFG 3
+#define SLAVE_LPASS_MPU_CFG 4
+#define SLAVE_LPASS_TOP_CFG 5
+#define SLAVE_LPASS_SNOC 6
+#define SLAVE_SERVICES_LPASS_AML_NOC 7
+#define SLAVE_SERVICE_LPASS_AG_NOC 8
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP0 3
+#define MASTER_MDP1 4
+#define MASTER_CNOC_MNOC_HF_CFG 5
+#define MASTER_CNOC_MNOC_SF_CFG 6
+#define MASTER_VIDEO_P0 7
+#define MASTER_VIDEO_PROC 8
+#define MASTER_VIDEO_V_PROC 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC_HF 12
+#define SLAVE_SERVICE_MNOC_SF 13
+
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_HCP_A 2
+#define SLAVE_CDSP_MEM_NOC 3
+#define SLAVE_SERVICE_NSP_NOC 4
+
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define SLAVE_ANOC_PCIE_GEM_NOC 2
+
+#define MASTER_GIC_AHB 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_LPASS_ANOC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_SNOC_GEM_NOC_GC 7
+#define SLAVE_SNOC_GEM_NOC_SF 8
+#define SLAVE_SERVICE_SNOC 9
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sar2130p-rpmh.h b/include/dt-bindings/interconnect/qcom,sar2130p-rpmh.h
new file mode 100644
index 000000000000..aec7cbb7cd70
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sar2130p-rpmh.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2024, Linaro Ltd.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SAR2130P_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SAR2130P_H
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define SLAVE_QUP_CORE_0 2
+#define SLAVE_QUP_CORE_1 3
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define MASTER_QDSS_DAP 2
+#define SLAVE_AHB2PHY_SOUTH 3
+#define SLAVE_AOSS 4
+#define SLAVE_CAMERA_CFG 5
+#define SLAVE_CLK_CTL 6
+#define SLAVE_CDSP_CFG 7
+#define SLAVE_RBCPR_CX_CFG 8
+#define SLAVE_RBCPR_MMCX_CFG 9
+#define SLAVE_RBCPR_MXA_CFG 10
+#define SLAVE_RBCPR_MXC_CFG 11
+#define SLAVE_CPR_NSPCX 12
+#define SLAVE_CRYPTO_0_CFG 13
+#define SLAVE_CX_RDPM 14
+#define SLAVE_DISPLAY_CFG 15
+#define SLAVE_GFX3D_CFG 16
+#define SLAVE_IMEM_CFG 17
+#define SLAVE_IPC_ROUTER_CFG 18
+#define SLAVE_LPASS 19
+#define SLAVE_MX_RDPM 20
+#define SLAVE_PCIE_0_CFG 21
+#define SLAVE_PCIE_1_CFG 22
+#define SLAVE_PDM 23
+#define SLAVE_PIMEM_CFG 24
+#define SLAVE_PRNG 25
+#define SLAVE_QDSS_CFG 26
+#define SLAVE_QSPI_0 27
+#define SLAVE_QUP_0 28
+#define SLAVE_QUP_1 29
+#define SLAVE_SDCC_1 30
+#define SLAVE_TCSR 31
+#define SLAVE_TLMM 32
+#define SLAVE_TME_CFG 33
+#define SLAVE_USB3_0 34
+#define SLAVE_VENUS_CFG 35
+#define SLAVE_VSENSE_CTRL_CFG 36
+#define SLAVE_WLAN_Q6_CFG 37
+#define SLAVE_DDRSS_CFG 38
+#define SLAVE_CNOC_MNOC_CFG 39
+#define SLAVE_SNOC_CFG 40
+#define SLAVE_IMEM 41
+#define SLAVE_PIMEM 42
+#define SLAVE_SERVICE_CNOC 43
+#define SLAVE_PCIE_0 44
+#define SLAVE_PCIE_1 45
+#define SLAVE_QDSS_STM 46
+#define SLAVE_TCU 47
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_GFX3D 3
+#define MASTER_MNOC_HF_MEM_NOC 4
+#define MASTER_MNOC_SF_MEM_NOC 5
+#define MASTER_COMPUTE_NOC 6
+#define MASTER_ANOC_PCIE_GEM_NOC 7
+#define MASTER_SNOC_GC_MEM_NOC 8
+#define MASTER_SNOC_SF_MEM_NOC 9
+#define MASTER_WLAN_Q6 10
+#define SLAVE_GEM_NOC_CNOC 11
+#define SLAVE_LLCC 12
+#define SLAVE_MEM_NOC_PCIE_SNOC 13
+
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define MASTER_LPASS_PROC 1
+#define SLAVE_LPASS_CORE_CFG 2
+#define SLAVE_LPASS_LPI_CFG 3
+#define SLAVE_LPASS_MPU_CFG 4
+#define SLAVE_LPASS_TOP_CFG 5
+#define SLAVE_LPASS_SNOC 6
+#define SLAVE_SERVICES_LPASS_AML_NOC 7
+#define SLAVE_SERVICE_LPASS_AG_NOC 8
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_LSR 3
+#define MASTER_MDP 4
+#define MASTER_CNOC_MNOC_CFG 5
+#define MASTER_VIDEO 6
+#define MASTER_VIDEO_CV_PROC 7
+#define MASTER_VIDEO_PROC 8
+#define MASTER_VIDEO_V_PROC 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_CDSP_MEM_NOC 2
+#define SLAVE_SERVICE_NSP_NOC 3
+
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define SLAVE_ANOC_PCIE_GEM_NOC 2
+
+#define MASTER_GIC_AHB 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QSPI_0 2
+#define MASTER_QUP_0 3
+#define MASTER_QUP_1 4
+#define MASTER_A2NOC_SNOC 5
+#define MASTER_CNOC_DATAPATH 6
+#define MASTER_LPASS_ANOC 7
+#define MASTER_SNOC_CFG 8
+#define MASTER_CRYPTO 9
+#define MASTER_PIMEM 10
+#define MASTER_GIC 11
+#define MASTER_QDSS_ETR 12
+#define MASTER_QDSS_ETR_1 13
+#define MASTER_SDCC_1 14
+#define MASTER_USB3_0 15
+#define SLAVE_A2NOC_SNOC 16
+#define SLAVE_SNOC_GEM_NOC_GC 17
+#define SLAVE_SNOC_GEM_NOC_SF 18
+#define SLAVE_SERVICE_SNOC 19
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdx75.h b/include/dt-bindings/interconnect/qcom,sdx75.h
index e903f5f3dd8f..0e19ee8f1687 100644
--- a/include/dt-bindings/interconnect/qcom,sdx75.h
+++ b/include/dt-bindings/interconnect/qcom,sdx75.h
@@ -6,9 +6,7 @@
#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX75_H
#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX75_H
-#define MASTER_QPIC_CORE 0
#define MASTER_QUP_CORE_0 1
-#define SLAVE_QPIC_CORE 2
#define SLAVE_QUP_CORE_0 3
#define MASTER_LLCC 0
diff --git a/include/dt-bindings/interconnect/qcom,sm8350.h b/include/dt-bindings/interconnect/qcom,sm8350.h
index c7f7ed315aeb..2282f93607bc 100644
--- a/include/dt-bindings/interconnect/qcom,sm8350.h
+++ b/include/dt-bindings/interconnect/qcom,sm8350.h
@@ -119,9 +119,6 @@
#define SLAVE_SERVICE_GEM_NOC_1 16
#define SLAVE_SERVICE_GEM_NOC_2 17
#define SLAVE_SERVICE_GEM_NOC 18
-#define MASTER_MNOC_HF_MEM_NOC_DISP 19
-#define MASTER_MNOC_SF_MEM_NOC_DISP 20
-#define SLAVE_LLCC_DISP 21
#define MASTER_CNOC_LPASS_AG_NOC 0
#define SLAVE_LPASS_CORE_CFG 1
@@ -133,8 +130,6 @@
#define MASTER_LLCC 0
#define SLAVE_EBI1 1
-#define MASTER_LLCC_DISP 2
-#define SLAVE_EBI1_DISP 3
#define MASTER_CAMNOC_HF 0
#define MASTER_CAMNOC_ICP 1
@@ -149,11 +144,6 @@
#define SLAVE_MNOC_HF_MEM_NOC 10
#define SLAVE_MNOC_SF_MEM_NOC 11
#define SLAVE_SERVICE_MNOC 12
-#define MASTER_MDP0_DISP 13
-#define MASTER_MDP1_DISP 14
-#define MASTER_ROTATOR_DISP 15
-#define SLAVE_MNOC_HF_MEM_NOC_DISP 16
-#define SLAVE_MNOC_SF_MEM_NOC_DISP 17
#define MASTER_CDSP_NOC_CFG 0
#define MASTER_CDSP_PROC 1
diff --git a/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h b/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h
index 6c1eaf04e241..1216aa352d55 100644
--- a/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h
+++ b/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h
@@ -150,5 +150,6 @@
#define MASTER_A1NOC_SNOC 0
#define MASTER_A2NOC_SNOC 1
#define SLAVE_SNOC_GEM_NOC_SF 2
+#define MASTER_APSS_NOC 3
#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8750-rpmh.h b/include/dt-bindings/interconnect/qcom,sm8750-rpmh.h
new file mode 100644
index 000000000000..30563952a646
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8750-rpmh.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8750_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8750_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_QUP_3 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB3_0 5
+#define SLAVE_A1NOC_SNOC 6
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_2 1
+#define MASTER_CRYPTO 2
+#define MASTER_IPA 3
+#define MASTER_SOCCP_AGGR_NOC 4
+#define MASTER_SP 5
+#define MASTER_QDSS_ETR 6
+#define MASTER_QDSS_ETR_1 7
+#define MASTER_SDCC_2 8
+#define SLAVE_A2NOC_SNOC 9
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_CAMERA_CFG 3
+#define SLAVE_CLK_CTL 4
+#define SLAVE_CRYPTO_0_CFG 5
+#define SLAVE_DISPLAY_CFG 6
+#define SLAVE_EVA_CFG 7
+#define SLAVE_GFX3D_CFG 8
+#define SLAVE_I2C 9
+#define SLAVE_I3C_IBI0_CFG 10
+#define SLAVE_I3C_IBI1_CFG 11
+#define SLAVE_IMEM_CFG 12
+#define SLAVE_CNOC_MSS 13
+#define SLAVE_PCIE_CFG 14
+#define SLAVE_PRNG 15
+#define SLAVE_QDSS_CFG 16
+#define SLAVE_QSPI_0 17
+#define SLAVE_QUP_3 18
+#define SLAVE_QUP_1 19
+#define SLAVE_QUP_2 20
+#define SLAVE_SDCC_2 21
+#define SLAVE_SDCC_4 22
+#define SLAVE_SPSS_CFG 23
+#define SLAVE_TCSR 24
+#define SLAVE_TLMM 25
+#define SLAVE_UFS_MEM_CFG 26
+#define SLAVE_USB3_0 27
+#define SLAVE_VENUS_CFG 28
+#define SLAVE_VSENSE_CTRL_CFG 29
+#define SLAVE_CNOC_MNOC_CFG 30
+#define SLAVE_PCIE_ANOC_CFG 31
+#define SLAVE_QDSS_STM 32
+#define SLAVE_TCU 33
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_IPA_CFG 3
+#define SLAVE_IPC_ROUTER_CFG 4
+#define SLAVE_SOCCP 5
+#define SLAVE_TME_CFG 6
+#define SLAVE_APPSS 7
+#define SLAVE_CNOC_CFG 8
+#define SLAVE_DDRSS_CFG 9
+#define SLAVE_BOOT_IMEM 10
+#define SLAVE_IMEM 11
+#define SLAVE_BOOT_IMEM_2 12
+#define SLAVE_SERVICE_CNOC 13
+#define SLAVE_PCIE_0 14
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_GFX3D 3
+#define MASTER_LPASS_GEM_NOC 4
+#define MASTER_MSS_PROC 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_COMPUTE_NOC 8
+#define MASTER_ANOC_PCIE_GEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define MASTER_UBWC_P 11
+#define MASTER_GIC 12
+#define SLAVE_UBWC_P 13
+#define SLAVE_GEM_NOC_CNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_MEM_NOC_PCIE_SNOC 16
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_NRT_ICP_SF 1
+#define MASTER_CAMNOC_RT_CDM_SF 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP 4
+#define MASTER_CDSP_HCP 5
+#define MASTER_VIDEO_CV_PROC 6
+#define MASTER_VIDEO_EVA 7
+#define MASTER_VIDEO_MVP 8
+#define MASTER_VIDEO_V_PROC 9
+#define MASTER_CNOC_MNOC_CFG 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_MNOC_SF_MEM_NOC 12
+#define SLAVE_SERVICE_MNOC 13
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define SLAVE_ANOC_PCIE_GEM_NOC 2
+#define SLAVE_SERVICE_PCIE_ANOC 3
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define SLAVE_SNOC_GEM_NOC_SF 2
+
+#endif
diff --git a/include/dt-bindings/interrupt-controller/arm-gic.h b/include/dt-bindings/interrupt-controller/arm-gic.h
index 35b6f69b7db6..887f53363e8a 100644
--- a/include/dt-bindings/interrupt-controller/arm-gic.h
+++ b/include/dt-bindings/interrupt-controller/arm-gic.h
@@ -12,6 +12,8 @@
#define GIC_SPI 0
#define GIC_PPI 1
+#define GIC_ESPI 2
+#define GIC_EPPI 3
/*
* Interrupt specifier cell 2.
diff --git a/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h b/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
index f315d5a7f5ee..7dd04424afcc 100644
--- a/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
+++ b/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
@@ -20,4 +20,18 @@
#define ASPEED_AST2600_SCU_IC1_LPC_RESET_LO_TO_HI 0
#define ASPEED_AST2600_SCU_IC1_LPC_RESET_HI_TO_LO 1
+#define ASPEED_AST2700_SCU_IC0_PCIE_PERST_LO_TO_HI 3
+#define ASPEED_AST2700_SCU_IC0_PCIE_PERST_HI_TO_LO 2
+
+#define ASPEED_AST2700_SCU_IC1_PCIE_RCRST_LO_TO_HI 3
+#define ASPEED_AST2700_SCU_IC1_PCIE_RCRST_HI_TO_LO 2
+
+#define ASPEED_AST2700_SCU_IC2_PCIE_PERST_LO_TO_HI 3
+#define ASPEED_AST2700_SCU_IC2_PCIE_PERST_HI_TO_LO 2
+#define ASPEED_AST2700_SCU_IC2_LPC_RESET_LO_TO_HI 1
+#define ASPEED_AST2700_SCU_IC2_LPC_RESET_HI_TO_LO 0
+
+#define ASPEED_AST2700_SCU_IC3_LPC_RESET_LO_TO_HI 1
+#define ASPEED_AST2700_SCU_IC3_LPC_RESET_HI_TO_LO 0
+
#endif /* _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_SCU_IC_H_ */
diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h
index fbfa3febc66d..fd85a79381b3 100644
--- a/include/dt-bindings/mailbox/qcom-ipcc.h
+++ b/include/dt-bindings/mailbox/qcom-ipcc.h
@@ -33,5 +33,7 @@
#define IPCC_CLIENT_NSP1 18
#define IPCC_CLIENT_TME 23
#define IPCC_CLIENT_WPSS 24
+#define IPCC_CLIENT_GPDSP0 31
+#define IPCC_CLIENT_GPDSP1 32
#endif
diff --git a/include/dt-bindings/media/c8sectpfe.h b/include/dt-bindings/media/c8sectpfe.h
deleted file mode 100644
index 6b1fb6f5413b..000000000000
--- a/include/dt-bindings/media/c8sectpfe.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DT_C8SECTPFE_H
-#define __DT_C8SECTPFE_H
-
-#define STV0367_TDA18212_NIMA_1 0
-#define STV0367_TDA18212_NIMA_2 1
-#define STV0367_TDA18212_NIMB_1 2
-#define STV0367_TDA18212_NIMB_2 3
-
-#define STV0903_6110_LNB24_NIMA 4
-#define STV0903_6110_LNB24_NIMB 5
-
-#endif /* __DT_C8SECTPFE_H */
diff --git a/include/dt-bindings/media/tvp5150.h b/include/dt-bindings/media/tvp5150.h
index dda00c038530..ba34c420c303 100644
--- a/include/dt-bindings/media/tvp5150.h
+++ b/include/dt-bindings/media/tvp5150.h
@@ -2,7 +2,7 @@
/*
tvp5150.h - definition for tvp5150 inputs
- Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/dt-bindings/media/video-interfaces.h b/include/dt-bindings/media/video-interfaces.h
index 68ac4e05e37f..0b19c9b2e627 100644
--- a/include/dt-bindings/media/video-interfaces.h
+++ b/include/dt-bindings/media/video-interfaces.h
@@ -13,4 +13,15 @@
#define MEDIA_BUS_TYPE_PARALLEL 5
#define MEDIA_BUS_TYPE_BT656 6
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_ABC 0
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_ACB 1
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_BAC 2
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_BCA 3
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_CAB 4
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_CBA 5
+
+#define MEDIA_PCLK_SAMPLE_FALLING_EDGE 0
+#define MEDIA_PCLK_SAMPLE_RISING_EDGE 1
+#define MEDIA_PCLK_SAMPLE_DUAL_EDGE 2
+
#endif /* __DT_BINDINGS_MEDIA_VIDEO_INTERFACES_H__ */
diff --git a/include/dt-bindings/memory/mediatek,mt6893-memory-port.h b/include/dt-bindings/memory/mediatek,mt6893-memory-port.h
new file mode 100644
index 000000000000..26e8b400db0d
--- /dev/null
+++ b/include/dt-bindings/memory/mediatek,mt6893-memory-port.h
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2025 Collabora Ltd
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT6893_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT6893_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU supports 16GB dma address.
+ *
+ * The address will preassign like this:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/2
+ * vcodec 4G ~ 8G larb4/5/7
+ * cam/mdp 8G ~ 12G larb9/11/13/14/16/17/18/19/20
+ * CCU0 0x4000_0000 ~ 0x43ff_ffff larb13: port 9/10
+ * CCU1 0x4400_0000 ~ 0x47ff_ffff larb14: port 4/5
+ *
+ * larb3/6/8/10/12/15 are null.
+ */
+
+/* larb0 */
+#define M4U_PORT_L0_DISP_POSTMASK0 MTK_M4U_DOM_ID(0, 0)
+#define M4U_PORT_L0_MDP_RDMA4 MTK_M4U_DOM_ID(0, 1)
+#define M4U_PORT_L0_OVL_RDMA0_HDR MTK_M4U_DOM_ID(0, 2)
+#define M4U_PORT_L0_OVL_2L_RDMA1_HDR MTK_M4U_DOM_ID(0, 3)
+#define M4U_PORT_L0_OVL_2L_RDMA3_HDR MTK_M4U_DOM_ID(0, 4)
+#define M4U_PORT_L0_OVL_RDMA0 MTK_M4U_DOM_ID(0, 5)
+#define M4U_PORT_L0_OVL_2L_RDMA1 MTK_M4U_DOM_ID(0, 6)
+#define M4U_PORT_L0_OVL_2L_RDMA3 MTK_M4U_DOM_ID(0, 7)
+#define M4U_PORT_L0_OVL_RDMA1_SYSRAM MTK_M4U_DOM_ID(0, 8)
+#define M4U_PORT_L0_OVL_2L_RDMA0_SYSRAM MTK_M4U_DOM_ID(0, 9)
+#define M4U_PORT_L0_OVL_2L_RDMA2_SYSRAM MTK_M4U_DOM_ID(0, 10)
+#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_DOM_ID(0, 11)
+#define M4U_PORT_L0_DISP_RDMA0 MTK_M4U_DOM_ID(0, 12)
+#define M4U_PORT_L0_DISP_UFBC_WDMA0 MTK_M4U_DOM_ID(0, 13)
+#define M4U_PORT_L0_DISP_FAKE0 MTK_M4U_DOM_ID(0, 14)
+
+/* larb1 */
+#define M4U_PORT_L1_DISP_POSTMASK1 MTK_M4U_DOM_ID(1, 0)
+#define M4U_PORT_L1_MDP_RDMA5 MTK_M4U_DOM_ID(1, 1)
+#define M4U_PORT_L1_OVL_RDMA1_HDR MTK_M4U_DOM_ID(1, 2)
+#define M4U_PORT_L1_OVL_2L_RDMA0_HDR MTK_M4U_DOM_ID(1, 3)
+#define M4U_PORT_L1_OVL_2L_RDMA2_HDR MTK_M4U_DOM_ID(1, 4)
+#define M4U_PORT_L1_OVL_RDMA1 MTK_M4U_DOM_ID(1, 5)
+#define M4U_PORT_L1_OVL_2L_RDMA0 MTK_M4U_DOM_ID(1, 6)
+#define M4U_PORT_L1_OVL_2L_RDMA2 MTK_M4U_DOM_ID(1, 7)
+#define M4U_PORT_L1_OVL_RDMA0_SYSRAM MTK_M4U_DOM_ID(1, 8)
+#define M4U_PORT_L1_OVL_2L_RDMA1_SYSRAM MTK_M4U_DOM_ID(1, 9)
+#define M4U_PORT_L1_OVL_2L_RDMA3_SYSRAM MTK_M4U_DOM_ID(1, 10)
+#define M4U_PORT_L1_DISP_WDMA1 MTK_M4U_DOM_ID(1, 11)
+#define M4U_PORT_L1_DISP_RDMA1 MTK_M4U_DOM_ID(1, 12)
+#define M4U_PORT_L1_DISP_UFBC_WDMA1 MTK_M4U_DOM_ID(1, 13)
+#define M4U_PORT_L1_DISP_FAKE1 MTK_M4U_DOM_ID(1, 14)
+
+/* larb2 */
+#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_DOM_ID(2, 0)
+#define M4U_PORT_L2_MDP_RDMA2 MTK_M4U_DOM_ID(2, 1)
+#define M4U_PORT_L2_MDP_WROT0 MTK_M4U_DOM_ID(2, 2)
+#define M4U_PORT_L2_MDP_WROT2 MTK_M4U_DOM_ID(2, 3)
+#define M4U_PORT_L2_MDP_FILMGRAIN0 MTK_M4U_DOM_ID(2, 4)
+#define M4U_PORT_L2_MDP_FAKE0 MTK_M4U_DOM_ID(2, 5)
+
+/* larb3: null */
+
+/* larb4 */
+#define M4U_PORT_L4_VDEC_MC_EXT_MDP MTK_M4U_DOM_ID(4, 0)
+#define M4U_PORT_L4_VDEC_UFO_EXT_MDP MTK_M4U_DOM_ID(4, 1)
+#define M4U_PORT_L4_VDEC_PP_EXT_MDP MTK_M4U_DOM_ID(4, 2)
+#define M4U_PORT_L4_VDEC_PRED_RD_EXT_MDP MTK_M4U_DOM_ID(4, 3)
+#define M4U_PORT_L4_VDEC_PRED_WR_EXT_MDP MTK_M4U_DOM_ID(4, 4)
+#define M4U_PORT_L4_VDEC_PPWRAP_EXT_MDP MTK_M4U_DOM_ID(4, 5)
+#define M4U_PORT_L4_VDEC_TILE_EXT_MDP MTK_M4U_DOM_ID(4, 6)
+#define M4U_PORT_L4_VDEC_VLD_EXT_MDP MTK_M4U_DOM_ID(4, 7)
+#define M4U_PORT_L4_VDEC_VLD2_EXT_MDP MTK_M4U_DOM_ID(4, 8)
+#define M4U_PORT_L4_VDEC_AVC_MV_EXT_MDP MTK_M4U_DOM_ID(4, 9)
+#define M4U_PORT_L4_VDEC_RG_CTRL_DMA_EXT_MDP MTK_M4U_DOM_ID(4, 10)
+
+/* larb5 */
+#define M4U_PORT_L5_VDEC_LAT0_VLD_EXT_DISP MTK_M4U_DOM_ID(5, 0)
+#define M4U_PORT_L5_VDEC_LAT0_VLD2_EXT_DISP MTK_M4U_DOM_ID(5, 1)
+#define M4U_PORT_L5_VDEC_LAT0_AVC_MV_EXT_DISP MTK_M4U_DOM_ID(5, 2)
+#define M4U_PORT_L5_VDEC_LAT0_PRED_RD_EXT_DISP MTK_M4U_DOM_ID(5, 3)
+#define M4U_PORT_L5_VDEC_LAT0_TILE_EXT_DISP MTK_M4U_DOM_ID(5, 4)
+#define M4U_PORT_L5_VDEC_LAT0_WDMA_EXT_DISP MTK_M4U_DOM_ID(5, 5)
+#define M4U_PORT_L5_VDEC_LAT0_RG_CTRL_DMA_EXT_DISP MTK_M4U_DOM_ID(5, 6)
+#define M4U_PORT_L5_VDEC_UFO_ENC_EXT_DISP MTK_M4U_DOM_ID(5, 7)
+
+/* larb6: null */
+
+/* larb7 */
+#define M4U_PORT_L7_VENC_RCPU_DISP MTK_M4U_DOM_ID(7, 0)
+#define M4U_PORT_L7_VENC_REC_DISP MTK_M4U_DOM_ID(7, 1)
+#define M4U_PORT_L7_VENC_BSDMA_DISP MTK_M4U_DOM_ID(7, 2)
+#define M4U_PORT_L7_VENC_SV_COMV_DISP MTK_M4U_DOM_ID(7, 3)
+#define M4U_PORT_L7_VENC_RD_COMV_DISP MTK_M4U_DOM_ID(7, 4)
+#define M4U_PORT_L7_VENC_NBM_RDMA_DISP MTK_M4U_DOM_ID(7, 5)
+#define M4U_PORT_L7_VENC_NBM_RDMA_LITE_DISP MTK_M4U_DOM_ID(7, 6)
+#define M4U_PORT_L7_JPGENC_Y_RDMA_DISP MTK_M4U_DOM_ID(7, 7)
+#define M4U_PORT_L7_JPGENC_C_RDMA_DISP MTK_M4U_DOM_ID(7, 8)
+#define M4U_PORT_L7_JPGENC_Q_TABLE_DISP MTK_M4U_DOM_ID(7, 9)
+#define M4U_PORT_L7_JPGENC_BSDMA_DISP MTK_M4U_DOM_ID(7, 10)
+#define M4U_PORT_L7_JPGENC_WDMA0_DISP MTK_M4U_DOM_ID(7, 11)
+#define M4U_PORT_L7_JPGENC_BSDMA0_DISP MTK_M4U_DOM_ID(7, 12)
+#define M4U_PORT_L7_VENC_NBM_WDMA_DISP MTK_M4U_DOM_ID(7, 13)
+#define M4U_PORT_L7_VENC_NBM_WDMA_LITE_DISP MTK_M4U_DOM_ID(7, 14)
+#define M4U_PORT_L7_VENC_CUR_LUMA_DISP MTK_M4U_DOM_ID(7, 15)
+#define M4U_PORT_L7_VENC_CUR_CHROMA_DISP MTK_M4U_DOM_ID(7, 16)
+#define M4U_PORT_L7_VENC_REF_LUMA_DISP MTK_M4U_DOM_ID(7, 17)
+#define M4U_PORT_L7_VENC_REF_CHROMA_DISP MTK_M4U_DOM_ID(7, 18)
+#define M4U_PORT_L7_VENC_SUB_R_LUMA_DISP MTK_M4U_DOM_ID(7, 19)
+#define M4U_PORT_L7_VENC_SUB_W_LUMA_DISP MTK_M4U_DOM_ID(7, 20)
+#define M4U_PORT_L7_VENC_FCS_NBM_RDMA_DISP MTK_M4U_DOM_ID(7, 21)
+#define M4U_PORT_L7_VENC_FCS_NBM_WDMA_DISP MTK_M4U_DOM_ID(7, 22)
+#define M4U_PORT_L7_JPGENC_WDMA1_DISP MTK_M4U_DOM_ID(7, 23)
+#define M4U_PORT_L7_JPGENC_BSDMA1_DISP MTK_M4U_DOM_ID(7, 24)
+#define M4U_PORT_L7_JPGENC_HUFF_OFFSET1_DISP MTK_M4U_DOM_ID(7, 25)
+#define M4U_PORT_L7_JPGENC_HUFF_OFFSET0_DISP MTK_M4U_DOM_ID(7, 26)
+
+/* larb8: null */
+
+/* larb9 */
+#define M4U_PORT_L9_IMG_IMGI_D1_MDP MTK_M4U_DOM_ID(9, 0)
+#define M4U_PORT_L9_IMG_IMGBI_D1_MDP MTK_M4U_DOM_ID(9, 1)
+#define M4U_PORT_L9_IMG_DMGI_D1_MDP MTK_M4U_DOM_ID(9, 2)
+#define M4U_PORT_L9_IMG_DEPI_D1_MDP MTK_M4U_DOM_ID(9, 3)
+#define M4U_PORT_L9_IMG_ICE_D1_MDP MTK_M4U_DOM_ID(9, 4)
+#define M4U_PORT_L9_IMG_SMTI_D1_MDP MTK_M4U_DOM_ID(9, 5)
+#define M4U_PORT_L9_IMG_SMTO_D2_MDP MTK_M4U_DOM_ID(9, 6)
+#define M4U_PORT_L9_IMG_SMTO_D1_MDP MTK_M4U_DOM_ID(9, 7)
+#define M4U_PORT_L9_IMG_CRZO_D1_MDP MTK_M4U_DOM_ID(9, 8)
+#define M4U_PORT_L9_IMG_IMG3O_D1_MDP MTK_M4U_DOM_ID(9, 9)
+#define M4U_PORT_L9_IMG_VIPI_D1_MDP MTK_M4U_DOM_ID(9, 10)
+#define M4U_PORT_L9_IMG_SMTI_D5_MDP MTK_M4U_DOM_ID(9, 11)
+#define M4U_PORT_L9_IMG_TIMGO_D1_MDP MTK_M4U_DOM_ID(9, 12)
+#define M4U_PORT_L9_IMG_UFBC_W0_MDP MTK_M4U_DOM_ID(9, 13)
+#define M4U_PORT_L9_IMG_UFBC_R0_MDP MTK_M4U_DOM_ID(9, 14)
+#define M4U_PORT_L9_IMG_WPE_RDMA1_MDP MTK_M4U_DOM_ID(9, 15)
+#define M4U_PORT_L9_IMG_WPE_RDMA0_MDP MTK_M4U_DOM_ID(9, 16)
+#define M4U_PORT_L9_IMG_WPE_WDMA_MDP MTK_M4U_DOM_ID(9, 17)
+#define M4U_PORT_L9_IMG_MFB_RDMA0_MDP MTK_M4U_DOM_ID(9, 18)
+#define M4U_PORT_L9_IMG_MFB_RDMA1_MDP MTK_M4U_DOM_ID(9, 19)
+#define M4U_PORT_L9_IMG_MFB_RDMA2_MDP MTK_M4U_DOM_ID(9, 20)
+#define M4U_PORT_L9_IMG_MFB_RDMA3_MDP MTK_M4U_DOM_ID(9, 21)
+#define M4U_PORT_L9_IMG_MFB_RDMA4_MDP MTK_M4U_DOM_ID(9, 22)
+#define M4U_PORT_L9_IMG_MFB_RDMA5_MDP MTK_M4U_DOM_ID(9, 23)
+#define M4U_PORT_L9_IMG_MFB_WDMA0_MDP MTK_M4U_DOM_ID(9, 24)
+#define M4U_PORT_L9_IMG_MFB_WDMA1_MDP MTK_M4U_DOM_ID(9, 25)
+#define M4U_PORT_L9_IMG_RESERVE6_MDP MTK_M4U_DOM_ID(9, 26)
+#define M4U_PORT_L9_IMG_RESERVE7_MDP MTK_M4U_DOM_ID(9, 27)
+#define M4U_PORT_L9_IMG_RESERVE8_MDP MTK_M4U_DOM_ID(9, 28)
+
+/* larb10: null */
+
+/* larb11 */
+#define M4U_PORT_L11_IMG_IMGI_D1_DISP MTK_M4U_DOM_ID(11, 0)
+#define M4U_PORT_L11_IMG_IMGBI_D1_DISP MTK_M4U_DOM_ID(11, 1)
+#define M4U_PORT_L11_IMG_DMGI_D1_DISP MTK_M4U_DOM_ID(11, 2)
+#define M4U_PORT_L11_IMG_DEPI_D1_DISP MTK_M4U_DOM_ID(11, 3)
+#define M4U_PORT_L11_IMG_ICE_D1_DISP MTK_M4U_DOM_ID(11, 4)
+#define M4U_PORT_L11_IMG_SMTI_D1_DISP MTK_M4U_DOM_ID(11, 5)
+#define M4U_PORT_L11_IMG_SMTO_D2_DISP MTK_M4U_DOM_ID(11, 6)
+#define M4U_PORT_L11_IMG_SMTO_D1_DISP MTK_M4U_DOM_ID(11, 7)
+#define M4U_PORT_L11_IMG_CRZO_D1_DISP MTK_M4U_DOM_ID(11, 8)
+#define M4U_PORT_L11_IMG_IMG3O_D1_DISP MTK_M4U_DOM_ID(11, 9)
+#define M4U_PORT_L11_IMG_VIPI_D1_DISP MTK_M4U_DOM_ID(11, 10)
+#define M4U_PORT_L11_IMG_SMTI_D5_DISP MTK_M4U_DOM_ID(11, 11)
+#define M4U_PORT_L11_IMG_TIMGO_D1_DISP MTK_M4U_DOM_ID(11, 12)
+#define M4U_PORT_L11_IMG_UFBC_W0_DISP MTK_M4U_DOM_ID(11, 13)
+#define M4U_PORT_L11_IMG_UFBC_R0_DISP MTK_M4U_DOM_ID(11, 14)
+#define M4U_PORT_L11_IMG_WPE_RDMA1_DISP MTK_M4U_DOM_ID(11, 15)
+#define M4U_PORT_L11_IMG_WPE_RDMA0_DISP MTK_M4U_DOM_ID(11, 16)
+#define M4U_PORT_L11_IMG_WPE_WDMA_DISP MTK_M4U_DOM_ID(11, 17)
+#define M4U_PORT_L11_IMG_MFB_RDMA0_DISP MTK_M4U_DOM_ID(11, 18)
+#define M4U_PORT_L11_IMG_MFB_RDMA1_DISP MTK_M4U_DOM_ID(11, 19)
+#define M4U_PORT_L11_IMG_MFB_RDMA2_DISP MTK_M4U_DOM_ID(11, 20)
+#define M4U_PORT_L11_IMG_MFB_RDMA3_DISP MTK_M4U_DOM_ID(11, 21)
+#define M4U_PORT_L11_IMG_MFB_RDMA4_DISP MTK_M4U_DOM_ID(11, 22)
+#define M4U_PORT_L11_IMG_MFB_RDMA5_DISP MTK_M4U_DOM_ID(11, 23)
+#define M4U_PORT_L11_IMG_MFB_WDMA0_DISP MTK_M4U_DOM_ID(11, 24)
+#define M4U_PORT_L11_IMG_MFB_WDMA1_DISP MTK_M4U_DOM_ID(11, 25)
+#define M4U_PORT_L11_IMG_RESERVE6_DISP MTK_M4U_DOM_ID(11, 26)
+#define M4U_PORT_L11_IMG_RESERVE7_DISP MTK_M4U_DOM_ID(11, 27)
+#define M4U_PORT_L11_IMG_RESERVE8_DISP MTK_M4U_DOM_ID(11, 28)
+
+/* larb12: null */
+
+/* larb13 */
+#define M4U_PORT_L13_CAM_MRAWI_MDP MTK_M4U_DOM_ID(13, 0)
+#define M4U_PORT_L13_CAM_MRAWO0_MDP MTK_M4U_DOM_ID(13, 1)
+#define M4U_PORT_L13_CAM_MRAWO1_MDP MTK_M4U_DOM_ID(13, 2)
+#define M4U_PORT_L13_CAM_CAMSV1_MDP MTK_M4U_DOM_ID(13, 3)
+#define M4U_PORT_L13_CAM_CAMSV2_MDP MTK_M4U_DOM_ID(13, 4)
+#define M4U_PORT_L13_CAM_CAMSV3_MDP MTK_M4U_DOM_ID(13, 5)
+#define M4U_PORT_L13_CAM_CAMSV4_MDP MTK_M4U_DOM_ID(13, 6)
+#define M4U_PORT_L13_CAM_CAMSV5_MDP MTK_M4U_DOM_ID(13, 7)
+#define M4U_PORT_L13_CAM_CAMSV6_MDP MTK_M4U_DOM_ID(13, 8)
+#define M4U_PORT_L13_CAM_CCUI_MDP MTK_M4U_DOM_ID(13, 9)
+#define M4U_PORT_L13_CAM_CCUO_MDP MTK_M4U_DOM_ID(13, 10)
+#define M4U_PORT_L13_CAM_FAKE_MDP MTK_M4U_DOM_ID(13, 11)
+
+/* larb14 */
+#define M4U_PORT_L14_CAM_MRAWI_DISP MTK_M4U_DOM_ID(14, 0)
+#define M4U_PORT_L14_CAM_MRAWO0_DISP MTK_M4U_DOM_ID(14, 1)
+#define M4U_PORT_L14_CAM_MRAWO1_DISP MTK_M4U_DOM_ID(14, 2)
+#define M4U_PORT_L14_CAM_CAMSV0_DISP MTK_M4U_DOM_ID(14, 3)
+#define M4U_PORT_L14_CAM_CCUI_DISP MTK_M4U_DOM_ID(14, 4)
+#define M4U_PORT_L14_CAM_CCUO_DISP MTK_M4U_DOM_ID(14, 5)
+
+/* larb15: null */
+
+/* larb16 */
+#define M4U_PORT_L16_CAM_IMGO_R1_A_MDP MTK_M4U_DOM_ID(16, 0)
+#define M4U_PORT_L16_CAM_RRZO_R1_A_MDP MTK_M4U_DOM_ID(16, 1)
+#define M4U_PORT_L16_CAM_CQI_R1_A_MDP MTK_M4U_DOM_ID(16, 2)
+#define M4U_PORT_L16_CAM_BPCI_R1_A_MDP MTK_M4U_DOM_ID(16, 3)
+#define M4U_PORT_L16_CAM_YUVO_R1_A_MDP MTK_M4U_DOM_ID(16, 4)
+#define M4U_PORT_L16_CAM_UFDI_R2_A_MDP MTK_M4U_DOM_ID(16, 5)
+#define M4U_PORT_L16_CAM_RAWI_R2_A_MDP MTK_M4U_DOM_ID(16, 6)
+#define M4U_PORT_L16_CAM_RAWI_R3_A_MDP MTK_M4U_DOM_ID(16, 7)
+#define M4U_PORT_L16_CAM_AAO_R1_A_MDP MTK_M4U_DOM_ID(16, 8)
+#define M4U_PORT_L16_CAM_AFO_R1_A_MDP MTK_M4U_DOM_ID(16, 9)
+#define M4U_PORT_L16_CAM_FLKO_R1_A_MDP MTK_M4U_DOM_ID(16, 10)
+#define M4U_PORT_L16_CAM_LCESO_R1_A_MDP MTK_M4U_DOM_ID(16, 11)
+#define M4U_PORT_L16_CAM_CRZO_R1_A_MDP MTK_M4U_DOM_ID(16, 12)
+#define M4U_PORT_L16_CAM_LTMSO_R1_A_MDP MTK_M4U_DOM_ID(16, 13)
+#define M4U_PORT_L16_CAM_RSSO_R1_A_MDP MTK_M4U_DOM_ID(16, 14)
+#define M4U_PORT_L16_CAM_AAHO_R1_A_MDP MTK_M4U_DOM_ID(16, 15)
+#define M4U_PORT_L16_CAM_LSCI_R1_A_MDP MTK_M4U_DOM_ID(16, 16)
+
+/* larb17 */
+#define M4U_PORT_L17_CAM_IMGO_R1_B_DISP MTK_M4U_DOM_ID(17, 0)
+#define M4U_PORT_L17_CAM_RRZO_R1_B_DISP MTK_M4U_DOM_ID(17, 1)
+#define M4U_PORT_L17_CAM_CQI_R1_B_DISP MTK_M4U_DOM_ID(17, 2)
+#define M4U_PORT_L17_CAM_BPCI_R1_B_DISP MTK_M4U_DOM_ID(17, 3)
+#define M4U_PORT_L17_CAM_YUVO_R1_B_DISP MTK_M4U_DOM_ID(17, 4)
+#define M4U_PORT_L17_CAM_UFDI_R2_B_DISP MTK_M4U_DOM_ID(17, 5)
+#define M4U_PORT_L17_CAM_RAWI_R2_B_DISP MTK_M4U_DOM_ID(17, 6)
+#define M4U_PORT_L17_CAM_RAWI_R3_B_DISP MTK_M4U_DOM_ID(17, 7)
+#define M4U_PORT_L17_CAM_AAO_R1_B_DISP MTK_M4U_DOM_ID(17, 8)
+#define M4U_PORT_L17_CAM_AFO_R1_B_DISP MTK_M4U_DOM_ID(17, 9)
+#define M4U_PORT_L17_CAM_FLKO_R1_B_DISP MTK_M4U_DOM_ID(17, 10)
+#define M4U_PORT_L17_CAM_LCESO_R1_B_DISP MTK_M4U_DOM_ID(17, 11)
+#define M4U_PORT_L17_CAM_CRZO_R1_B_DISP MTK_M4U_DOM_ID(17, 12)
+#define M4U_PORT_L17_CAM_LTMSO_R1_B_DISP MTK_M4U_DOM_ID(17, 13)
+#define M4U_PORT_L17_CAM_RSSO_R1_B_DISP MTK_M4U_DOM_ID(17, 14)
+#define M4U_PORT_L17_CAM_AAHO_R1_B_DISP MTK_M4U_DOM_ID(17, 15)
+#define M4U_PORT_L17_CAM_LSCI_R1_B_DISP MTK_M4U_DOM_ID(17, 16)
+
+/* larb18 */
+#define M4U_PORT_L18_CAM_IMGO_R1_C_MDP MTK_M4U_DOM_ID(18, 0)
+#define M4U_PORT_L18_CAM_RRZO_R1_C_MDP MTK_M4U_DOM_ID(18, 1)
+#define M4U_PORT_L18_CAM_CQI_R1_C_MDP MTK_M4U_DOM_ID(18, 2)
+#define M4U_PORT_L18_CAM_BPCI_R1_C_MDP MTK_M4U_DOM_ID(18, 3)
+#define M4U_PORT_L18_CAM_YUVO_R1_C_MDP MTK_M4U_DOM_ID(18, 4)
+#define M4U_PORT_L18_CAM_UFDI_R2_C_MDP MTK_M4U_DOM_ID(18, 5)
+#define M4U_PORT_L18_CAM_RAWI_R2_C_MDP MTK_M4U_DOM_ID(18, 6)
+#define M4U_PORT_L18_CAM_RAWI_R3_C_MDP MTK_M4U_DOM_ID(18, 7)
+#define M4U_PORT_L18_CAM_AAO_R1_C_MDP MTK_M4U_DOM_ID(18, 8)
+#define M4U_PORT_L18_CAM_AFO_R1_C_MDP MTK_M4U_DOM_ID(18, 9)
+#define M4U_PORT_L18_CAM_FLKO_R1_C_MDP MTK_M4U_DOM_ID(18, 10)
+#define M4U_PORT_L18_CAM_LCESO_R1_C_MDP MTK_M4U_DOM_ID(18, 11)
+#define M4U_PORT_L18_CAM_CRZO_R1_C_MDP MTK_M4U_DOM_ID(18, 12)
+#define M4U_PORT_L18_CAM_LTMSO_R1_C_MDP MTK_M4U_DOM_ID(18, 13)
+#define M4U_PORT_L18_CAM_RSSO_R1_C_MDP MTK_M4U_DOM_ID(18, 14)
+#define M4U_PORT_L18_CAM_AAHO_R1_C_MDP MTK_M4U_DOM_ID(18, 15)
+#define M4U_PORT_L18_CAM_LSCI_R1_C_MDP MTK_M4U_DOM_ID(18, 16)
+
+/* larb19 */
+#define M4U_PORT_L19_IPE_DVS_RDMA_DISP MTK_M4U_DOM_ID(19, 0)
+#define M4U_PORT_L19_IPE_DVS_WDMA_DISP MTK_M4U_DOM_ID(19, 1)
+#define M4U_PORT_L19_IPE_DVP_RDMA_DISP MTK_M4U_DOM_ID(19, 2)
+#define M4U_PORT_L19_IPE_DVP_WDMA_DISP MTK_M4U_DOM_ID(19, 3)
+
+/* larb20 */
+#define M4U_PORT_L20_IPE_FDVT_RDA_DISP MTK_M4U_DOM_ID(20, 0)
+#define M4U_PORT_L20_IPE_FDVT_RDB_DISP MTK_M4U_DOM_ID(20, 1)
+#define M4U_PORT_L20_IPE_FDVT_WRA_DISP MTK_M4U_DOM_ID(20, 2)
+#define M4U_PORT_L20_IPE_FDVT_WRB_DISP MTK_M4U_DOM_ID(20, 3)
+#define M4U_PORT_L20_IPE_RSC_RDMA0_DISP MTK_M4U_DOM_ID(20, 4)
+#define M4U_PORT_L20_IPE_RSC_WDMA_DISP MTK_M4U_DOM_ID(20, 5)
+
+#endif
diff --git a/include/dt-bindings/memory/mediatek,mt8189-memory-port.h b/include/dt-bindings/memory/mediatek,mt8189-memory-port.h
new file mode 100644
index 000000000000..849fead3d0f7
--- /dev/null
+++ b/include/dt-bindings/memory/mediatek,mt8189-memory-port.h
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Author: Zhengnan chen <zhengnan.chen@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MEDIATEK_MT8189_MEMORY_PORT_H_
+#define _DT_BINDINGS_MEMORY_MEDIATEK_MT8189_MEMORY_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+#define SMI_L0_ID (0)
+#define SMI_L1_ID (1)
+#define SMI_L2_ID (2)
+#define SMI_L4_ID (3)
+#define SMI_L7_ID (4)
+#define SMI_L9_ID (5)
+#define SMI_L11_ID (6)
+#define SMI_L13_ID (7)
+#define SMI_L14_ID (8)
+#define SMI_L16_ID (9)
+#define SMI_L17_ID (10)
+#define SMI_L19_ID (11)
+#define SMI_L20_ID (12)
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules dma-address-region larbs-ports
+ * disp/mdp 0 ~ 4G larb0/1/2
+ * vcodec 4G ~ 8G larb4/7
+ * imgsys/cam/ipesys 8G ~ 12G the other larbs.
+ * N/A 12G ~ 16G
+ */
+
+/* Larb0 -- disp */
+#define M4U_L0_P0_DISP_OVL0_4L_HDR MTK_M4U_ID(SMI_L0_ID, 0)
+#define M4U_L0_P1_DISP_OVL0_4L_RDMA0 MTK_M4U_ID(SMI_L0_ID, 1)
+#define M4U_L0_P2_DISP_OVL1_4L_RDMA1 MTK_M4U_ID(SMI_L0_ID, 2)
+#define M4U_L0_P3_DISP_OVL0_4L_RDMA2 MTK_M4U_ID(SMI_L0_ID, 3)
+#define M4U_L0_P4_DISP_OVL1_4L_RDMA3 MTK_M4U_ID(SMI_L0_ID, 4)
+#define M4U_L0_P5_DISP_RDMA0 MTK_M4U_ID(SMI_L0_ID, 5)
+#define M4U_L0_P6_DISP_WDMA0 MTK_M4U_ID(SMI_L0_ID, 6)
+#define M4U_L0_P7_DISP_FAKE_ENG0 MTK_M4U_ID(SMI_L0_ID, 7)
+
+/* Larb1 -- disp */
+#define M4U_L1_P0_DISP_OVL1_4L_HDR MTK_M4U_ID(SMI_L1_ID, 0)
+#define M4U_L1_P1_DISP_OVL1_4L_RDMA0 MTK_M4U_ID(SMI_L1_ID, 1)
+#define M4U_L1_P2_DISP_OVL0_4L_RDMA1 MTK_M4U_ID(SMI_L1_ID, 2)
+#define M4U_L1_P3_DISP_OVL1_4L_RDMA2 MTK_M4U_ID(SMI_L1_ID, 3)
+#define M4U_L1_P4_DISP_OVL0_4L_RDMA3 MTK_M4U_ID(SMI_L1_ID, 4)
+#define M4U_L1_P5_DISP_RDMA1 MTK_M4U_ID(SMI_L1_ID, 5)
+#define M4U_L1_P6_DISP_WDMA1 MTK_M4U_ID(SMI_L1_ID, 6)
+#define M4U_L1_P7_DISP_FAKE_ENG1 MTK_M4U_ID(SMI_L1_ID, 7)
+
+/* Larb2 -- mmlsys(mdp) */
+#define M4U_L2_P0_MDP_RDMA0 MTK_M4U_ID(SMI_L2_ID, 0)
+#define M4U_L2_P1_MDP_RDMA1 MTK_M4U_ID(SMI_L2_ID, 1)
+#define M4U_L2_P2_MDP_WROT0 MTK_M4U_ID(SMI_L2_ID, 2)
+#define M4U_L2_P3_MDP_WROT1 MTK_M4U_ID(SMI_L2_ID, 3)
+#define M4U_L2_P4_MDP_DUMMY0 MTK_M4U_ID(SMI_L2_ID, 4)
+#define M4U_L2_P5_MDP_DUMMY1 MTK_M4U_ID(SMI_L2_ID, 5)
+#define M4U_L2_P6_MDP_RDMA2 MTK_M4U_ID(SMI_L2_ID, 6)
+#define M4U_L2_P7_MDP_RDMA3 MTK_M4U_ID(SMI_L2_ID, 7)
+#define M4U_L2_P8_MDP_WROT2 MTK_M4U_ID(SMI_L2_ID, 8)
+#define M4U_L2_P9_MDP_WROT3 MTK_M4U_ID(SMI_L2_ID, 9)
+#define M4U_L2_P10_DISP_FAKE0 MTK_M4U_ID(SMI_L2_ID, 10)
+
+/* Larb3: null */
+
+/* Larb4 -- vdec */
+#define M4U_L4_P0_HW_VDEC_MC_EXT MTK_M4U_ID(SMI_L4_ID, 0)
+#define M4U_L4_P1_HW_VDEC_UFO_EXT MTK_M4U_ID(SMI_L4_ID, 1)
+#define M4U_L4_P2_HW_VDEC_PP_EXT MTK_M4U_ID(SMI_L4_ID, 2)
+#define M4U_L4_P3_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(SMI_L4_ID, 3)
+#define M4U_L4_P4_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(SMI_L4_ID, 4)
+#define M4U_L4_P5_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(SMI_L4_ID, 5)
+#define M4U_L4_P6_HW_VDEC_TILE_EXT MTK_M4U_ID(SMI_L4_ID, 6)
+#define M4U_L4_P7_HW_VDEC_VLD_EXT MTK_M4U_ID(SMI_L4_ID, 7)
+#define M4U_L4_P8_HW_VDEC_VLD2_EXT MTK_M4U_ID(SMI_L4_ID, 8)
+#define M4U_L4_P9_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(SMI_L4_ID, 9)
+#define M4U_L4_P10_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(SMI_L4_ID, 10)
+#define M4U_L4_P11_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(SMI_L4_ID, 11)
+
+/* Larb5: null */
+
+/* Larb6: null */
+
+/* Larb7 -- venc */
+#define M4U_L7_P0_VENC_RCPU MTK_M4U_ID(SMI_L7_ID, 0)
+#define M4U_L7_P1_VENC_REC MTK_M4U_ID(SMI_L7_ID, 1)
+#define M4U_L7_P2_VENC_BSDMA MTK_M4U_ID(SMI_L7_ID, 2)
+#define M4U_L7_P3_VENC_SV_COMV MTK_M4U_ID(SMI_L7_ID, 3)
+#define M4U_L7_P4_VENC_RD_COMV MTK_M4U_ID(SMI_L7_ID, 4)
+#define M4U_L7_P5_JPGENC_Y_RDMA MTK_M4U_ID(SMI_L7_ID, 5)
+#define M4U_L7_P6_JPGENC_C_RDMA MTK_M4U_ID(SMI_L7_ID, 6)
+#define M4U_L7_P7_JPGENC_Q_RDMA MTK_M4U_ID(SMI_L7_ID, 7)
+#define M4U_L7_P8_VENC_SUB_W_LUMA MTK_M4U_ID(SMI_L7_ID, 8)
+#define M4U_L7_P9_JPGENC_BSDMA MTK_M4U_ID(SMI_L7_ID, 9)
+#define M4U_L7_P10_VENC_CUR_LUMA MTK_M4U_ID(SMI_L7_ID, 10)
+#define M4U_L7_P11_VENC_CUR_CHROMA MTK_M4U_ID(SMI_L7_ID, 11)
+#define M4U_L7_P12_VENC_REF_LUMA MTK_M4U_ID(SMI_L7_ID, 12)
+#define M4U_L7_P13_VENC_REF_CHROMA MTK_M4U_ID(SMI_L7_ID, 13)
+#define M4U_L7_P14_VENC_SUB_R_LUMA MTK_M4U_ID(SMI_L7_ID, 14)
+#define M4U_L7_P15_JPGDEC_WDMA MTK_M4U_ID(SMI_L7_ID, 15)
+#define M4U_L7_P16_JPGDEC_BSDMA MTK_M4U_ID(SMI_L7_ID, 16)
+#define M4U_L7_P17_JPGDEC_HUFF_OFFSET MTK_M4U_ID(SMI_L7_ID, 17)
+
+/* Larb8: null */
+
+/* Larb9 --imgsys */
+#define M4U_L9_P0_IMGI_D1 MTK_M4U_ID(SMI_L9_ID, 0)
+#define M4U_L9_P1_IMGBI_D1 MTK_M4U_ID(SMI_L9_ID, 1)
+#define M4U_L9_P2_DMGI_D1 MTK_M4U_ID(SMI_L9_ID, 2)
+#define M4U_L9_P3_DEPI_D1 MTK_M4U_ID(SMI_L9_ID, 3)
+#define M4U_L9_P4_LCE_D1 MTK_M4U_ID(SMI_L9_ID, 4)
+#define M4U_L9_P5_SMTI_D1 MTK_M4U_ID(SMI_L9_ID, 5)
+#define M4U_L9_P6_SMTO_D2 MTK_M4U_ID(SMI_L9_ID, 6)
+#define M4U_L9_P7_SMTO_D1 MTK_M4U_ID(SMI_L9_ID, 7)
+#define M4U_L9_P8_CRZO_D1 MTK_M4U_ID(SMI_L9_ID, 8)
+#define M4U_L9_P9_IMG3O_D1 MTK_M4U_ID(SMI_L9_ID, 9)
+#define M4U_L9_P10_VIPI_D1 MTK_M4U_ID(SMI_L9_ID, 10)
+#define M4U_L9_P11_SMTI_D5 MTK_M4U_ID(SMI_L9_ID, 11)
+#define M4U_L9_P12_TIMGO_D1 MTK_M4U_ID(SMI_L9_ID, 12)
+#define M4U_L9_P13_UFBC_W0 MTK_M4U_ID(SMI_L9_ID, 13)
+#define M4U_L9_P14_UFBC_R0 MTK_M4U_ID(SMI_L9_ID, 14)
+#define M4U_L9_P15_WPE_RDMA1 MTK_M4U_ID(SMI_L9_ID, 15)
+#define M4U_L9_P16_WPE_RDMA0 MTK_M4U_ID(SMI_L9_ID, 16)
+#define M4U_L9_P17_WPE_WDMA MTK_M4U_ID(SMI_L9_ID, 17)
+#define M4U_L9_P18_MFB_RDMA0 MTK_M4U_ID(SMI_L9_ID, 18)
+#define M4U_L9_P19_MFB_RDMA1 MTK_M4U_ID(SMI_L9_ID, 19)
+#define M4U_L9_P20_MFB_RDMA2 MTK_M4U_ID(SMI_L9_ID, 20)
+#define M4U_L9_P21_MFB_RDMA3 MTK_M4U_ID(SMI_L9_ID, 21)
+#define M4U_L9_P22_MFB_RDMA4 MTK_M4U_ID(SMI_L9_ID, 22)
+#define M4U_L9_P23_MFB_RDMA5 MTK_M4U_ID(SMI_L9_ID, 23)
+#define M4U_L9_P24_MFB_WDMA0 MTK_M4U_ID(SMI_L9_ID, 24)
+#define M4U_L9_P25_MFB_WDMA1 MTK_M4U_ID(SMI_L9_ID, 25)
+#define M4U_L9_P26_RESERVE6 MTK_M4U_ID(SMI_L9_ID, 26)
+#define M4U_L9_P27_RESERVE7 MTK_M4U_ID(SMI_L9_ID, 27)
+#define M4U_L9_P28_RESERVE8 MTK_M4U_ID(SMI_L9_ID, 28)
+
+/* Larb10: null */
+
+/* Larb11 -- imgsys */
+#define M4U_L11_P0_IMGI_D1 MTK_M4U_ID(SMI_L11_ID, 0)
+#define M4U_L11_P1_IMGBI_D1 MTK_M4U_ID(SMI_L11_ID, 1)
+#define M4U_L11_P2_DMGI_D1 MTK_M4U_ID(SMI_L11_ID, 2)
+#define M4U_L11_P3_DEPI_D1 MTK_M4U_ID(SMI_L11_ID, 3)
+#define M4U_L11_P4_LCE_D1 MTK_M4U_ID(SMI_L11_ID, 4)
+#define M4U_L11_P5_SMTI_D1 MTK_M4U_ID(SMI_L11_ID, 5)
+#define M4U_L11_P6_SMTO_D2 MTK_M4U_ID(SMI_L11_ID, 6)
+#define M4U_L11_P7_SMTO_D1 MTK_M4U_ID(SMI_L11_ID, 7)
+#define M4U_L11_P8_CRZO_D1 MTK_M4U_ID(SMI_L11_ID, 8)
+#define M4U_L11_P9_IMG3O_D1 MTK_M4U_ID(SMI_L11_ID, 9)
+#define M4U_L11_P10_VIPI_D1 MTK_M4U_ID(SMI_L11_ID, 10)
+#define M4U_L11_P11_SMTI_D5 MTK_M4U_ID(SMI_L11_ID, 11)
+#define M4U_L11_P12_TIMGO_D1 MTK_M4U_ID(SMI_L11_ID, 12)
+#define M4U_L11_P13_UFBC_W0 MTK_M4U_ID(SMI_L11_ID, 13)
+#define M4U_L11_P14_UFBC_R0 MTK_M4U_ID(SMI_L11_ID, 14)
+#define M4U_L11_P15_WPE_RDMA1 MTK_M4U_ID(SMI_L11_ID, 15)
+#define M4U_L11_P16_WPE_RDMA0 MTK_M4U_ID(SMI_L11_ID, 16)
+#define M4U_L11_P17_WPE_WDMA MTK_M4U_ID(SMI_L11_ID, 17)
+#define M4U_L11_P18_MFB_RDMA0 MTK_M4U_ID(SMI_L11_ID, 18)
+#define M4U_L11_P19_MFB_RDMA1 MTK_M4U_ID(SMI_L11_ID, 19)
+#define M4U_L11_P20_MFB_RDMA2 MTK_M4U_ID(SMI_L11_ID, 20)
+#define M4U_L11_P21_MFB_RDMA3 MTK_M4U_ID(SMI_L11_ID, 21)
+#define M4U_L11_P22_MFB_RDMA4 MTK_M4U_ID(SMI_L11_ID, 22)
+#define M4U_L11_P23_MFB_RDMA5 MTK_M4U_ID(SMI_L11_ID, 23)
+#define M4U_L11_P24_MFB_WDMA0 MTK_M4U_ID(SMI_L11_ID, 24)
+#define M4U_L11_P25_MFB_WDMA1 MTK_M4U_ID(SMI_L11_ID, 25)
+#define M4U_L11_P26_RESERVE6 MTK_M4U_ID(SMI_L11_ID, 26)
+#define M4U_L11_P27_RESERVE7 MTK_M4U_ID(SMI_L11_ID, 27)
+#define M4U_L11_P28_RESERVE8 MTK_M4U_ID(SMI_L11_ID, 28)
+
+/* Larb12: null */
+
+/* Larb13 -- cam */
+#define M4U_L13_P0_MRAWI MTK_M4U_ID(SMI_L13_ID, 0)
+#define M4U_L13_P1_MRAWO_0 MTK_M4U_ID(SMI_L13_ID, 1)
+#define M4U_L13_P2_MRAWO_1 MTK_M4U_ID(SMI_L13_ID, 2)
+#define M4U_L13_P3_CAMSV_1 MTK_M4U_ID(SMI_L13_ID, 3)
+#define M4U_L13_P4_CAMSV_2 MTK_M4U_ID(SMI_L13_ID, 4)
+#define M4U_L13_P5_CAMSV_3 MTK_M4U_ID(SMI_L13_ID, 5)
+#define M4U_L13_P6_CAMSV_4 MTK_M4U_ID(SMI_L13_ID, 6)
+#define M4U_L13_P7_CAMSV_5 MTK_M4U_ID(SMI_L13_ID, 7)
+#define M4U_L13_P8_CAMSV_6 MTK_M4U_ID(SMI_L13_ID, 8)
+#define M4U_L13_P9_CCUI MTK_M4U_ID(SMI_L13_ID, 9)
+#define M4U_L13_P10_CCUO MTK_M4U_ID(SMI_L13_ID, 10)
+#define M4U_L13_P11_FAKE MTK_M4U_ID(SMI_L13_ID, 11)
+#define M4U_L13_P12_PDAI_0 MTK_M4U_ID(SMI_L13_ID, 12)
+#define M4U_L13_P13_PDAI_1 MTK_M4U_ID(SMI_L13_ID, 13)
+#define M4U_L13_P14_PDAO MTK_M4U_ID(SMI_L13_ID, 14)
+
+/* Larb14 -- cam */
+#define M4U_L14_P0_RESERVE MTK_M4U_ID(SMI_L14_ID, 0)
+#define M4U_L14_P1_RESERVE MTK_M4U_ID(SMI_L14_ID, 1)
+#define M4U_L14_P2_RESERVE MTK_M4U_ID(SMI_L14_ID, 2)
+#define M4U_L14_P3_CAMSV_0 MTK_M4U_ID(SMI_L14_ID, 3)
+#define M4U_L14_P4_CCUI MTK_M4U_ID(SMI_L14_ID, 4)
+#define M4U_L14_P5_CCUO MTK_M4U_ID(SMI_L14_ID, 5)
+#define M4U_L14_P6_CAMSV_7 MTK_M4U_ID(SMI_L14_ID, 6)
+#define M4U_L14_P7_CAMSV_8 MTK_M4U_ID(SMI_L14_ID, 7)
+#define M4U_L14_P8_CAMSV_9 MTK_M4U_ID(SMI_L14_ID, 8)
+#define M4U_L14_P9_CAMSV_10 MTK_M4U_ID(SMI_L14_ID, 9)
+
+/* Larb15: null */
+
+/* Larb16 -- cam */
+#define M4U_L16_P0_IMGO_R1_A MTK_M4U_ID(SMI_L16_ID, 0)
+#define M4U_L16_P1_RRZO_R1_A MTK_M4U_ID(SMI_L16_ID, 1)
+#define M4U_L16_P2_CQI_R1_A MTK_M4U_ID(SMI_L16_ID, 2)
+#define M4U_L16_P3_BPCI_R1_A MTK_M4U_ID(SMI_L16_ID, 3)
+#define M4U_L16_P4_YUVO_R1_A MTK_M4U_ID(SMI_L16_ID, 4)
+#define M4U_L16_P5_UFDI_R2_A MTK_M4U_ID(SMI_L16_ID, 5)
+#define M4U_L16_P6_RAWI_R2_A MTK_M4U_ID(SMI_L16_ID, 6)
+#define M4U_L16_P7_RAWI_R3_A MTK_M4U_ID(SMI_L16_ID, 7)
+#define M4U_L16_P8_AAO_R1_A MTK_M4U_ID(SMI_L16_ID, 8)
+#define M4U_L16_P9_AFO_R1_A MTK_M4U_ID(SMI_L16_ID, 9)
+#define M4U_L16_P10_FLKO_R1_A MTK_M4U_ID(SMI_L16_ID, 10)
+#define M4U_L16_P11_LCESO_R1_A MTK_M4U_ID(SMI_L16_ID, 11)
+#define M4U_L16_P12_CRZO_R1_A MTK_M4U_ID(SMI_L16_ID, 12)
+#define M4U_L16_P13_LTMSO_R1_A MTK_M4U_ID(SMI_L16_ID, 13)
+#define M4U_L16_P14_RSSO_R1_A MTK_M4U_ID(SMI_L16_ID, 14)
+#define M4U_L16_P15_AAHO_R1_A MTK_M4U_ID(SMI_L16_ID, 15)
+#define M4U_L16_P16_LSCI_R1_A MTK_M4U_ID(SMI_L16_ID, 16)
+
+/* Larb17 -- cam */
+#define M4U_L17_P0_IMGO_R1_B MTK_M4U_ID(SMI_L17_ID, 0)
+#define M4U_L17_P1_RRZO_R1_B MTK_M4U_ID(SMI_L17_ID, 1)
+#define M4U_L17_P2_CQI_R1_B MTK_M4U_ID(SMI_L17_ID, 2)
+#define M4U_L17_P3_BPCI_R1_B MTK_M4U_ID(SMI_L17_ID, 3)
+#define M4U_L17_P4_YUVO_R1_B MTK_M4U_ID(SMI_L17_ID, 4)
+#define M4U_L17_P5_UFDI_R2_B MTK_M4U_ID(SMI_L17_ID, 5)
+#define M4U_L17_P6_RAWI_R2_B MTK_M4U_ID(SMI_L17_ID, 6)
+#define M4U_L17_P7_RAWI_R3_B MTK_M4U_ID(SMI_L17_ID, 7)
+#define M4U_L17_P8_AAO_R1_B MTK_M4U_ID(SMI_L17_ID, 8)
+#define M4U_L17_P9_AFO_R1_B MTK_M4U_ID(SMI_L17_ID, 9)
+#define M4U_L17_P10_FLKO_R1_B MTK_M4U_ID(SMI_L17_ID, 10)
+#define M4U_L17_P11_LCESO_R1_B MTK_M4U_ID(SMI_L17_ID, 11)
+#define M4U_L17_P12_CRZO_R1_B MTK_M4U_ID(SMI_L17_ID, 12)
+#define M4U_L17_P13_LTMSO_R1_B MTK_M4U_ID(SMI_L17_ID, 13)
+#define M4U_L17_P14_RSSO_R1_B MTK_M4U_ID(SMI_L17_ID, 14)
+#define M4U_L17_P15_AAHO_R1_B MTK_M4U_ID(SMI_L17_ID, 15)
+#define M4U_L17_P16_LSCI_R1_B MTK_M4U_ID(SMI_L17_ID, 16)
+
+/* Larb19 -- ipesys */
+#define M4U_L19_P0_DVS_RDMA MTK_M4U_ID(SMI_L19_ID, 0)
+#define M4U_L19_P1_DVS_WDMA MTK_M4U_ID(SMI_L19_ID, 1)
+#define M4U_L19_P2_DVP_RDMA MTK_M4U_ID(SMI_L19_ID, 2)
+#define M4U_L19_P3_DVP_WDMA MTK_M4U_ID(SMI_L19_ID, 3)
+
+/* Larb20 -- ipesys */
+#define M4U_L20_P0_FDVT_RDA_0 MTK_M4U_ID(SMI_L20_ID, 0)
+#define M4U_L20_P1_FDVT_RDB_0 MTK_M4U_ID(SMI_L20_ID, 1)
+#define M4U_L20_P2_FDVT_WRA_0 MTK_M4U_ID(SMI_L20_ID, 2)
+#define M4U_L20_P3_FDVT_WRB_0 MTK_M4U_ID(SMI_L20_ID, 3)
+#define M4U_L20_P4_RSC_RDMA MTK_M4U_ID(SMI_L20_ID, 4)
+#define M4U_L20_P5_RSC_WDMA MTK_M4U_ID(SMI_L20_ID, 5)
+
+/* fake larb21 for gce */
+#define M4U_L21_GCE_DM MTK_M4U_ID(21, 0)
+#define M4U_L21_GCE_MM MTK_M4U_ID(21, 1)
+
+/* fake larb & port for svp and dual svp and wfd */
+#define M4U_PORT_SVP_HEAP MTK_M4U_ID(22, 0)
+#define M4U_PORT_DUAL_SVP_HEAP MTK_M4U_ID(22, 1)
+#define M4U_PORT_WFD_HEAP MTK_M4U_ID(22, 2)
+
+/* fake larb0 for apu */
+#define M4U_L0_APU_DATA MTK_M4U_ID(0, 0)
+#define M4U_L0_APU_CODE MTK_M4U_ID(0, 1)
+#define M4U_L0_APU_SECURE MTK_M4U_ID(0, 2)
+#define M4U_L0_APU_VLM MTK_M4U_ID(0, 3)
+
+/* infra/peri */
+#define IFR_IOMMU_PORT_PCIE_0 MTK_IFAIOMMU_PERI_ID(0, 26)
+
+#endif
diff --git a/include/dt-bindings/memory/nvidia,tegra264.h b/include/dt-bindings/memory/nvidia,tegra264.h
new file mode 100644
index 000000000000..521405c01f84
--- /dev/null
+++ b/include/dt-bindings/memory/nvidia,tegra264.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef DT_BINDINGS_MEMORY_NVIDIA_TEGRA264_H
+#define DT_BINDINGS_MEMORY_NVIDIA_TEGRA264_H
+
+#define TEGRA264_SID(x) ((x) << 8)
+
+/*
+ * SMMU stream IDs
+ */
+
+#define TEGRA264_SID_AON TEGRA264_SID(0x01)
+#define TEGRA264_SID_APE TEGRA264_SID(0x02)
+#define TEGRA264_SID_ETR TEGRA264_SID(0x03)
+#define TEGRA264_SID_BPMP TEGRA264_SID(0x04)
+#define TEGRA264_SID_DCE TEGRA264_SID(0x05)
+#define TEGRA264_SID_EQOS TEGRA264_SID(0x06)
+#define TEGRA264_SID_GPCDMA TEGRA264_SID(0x08)
+#define TEGRA264_SID_DISP TEGRA264_SID(0x09)
+#define TEGRA264_SID_HDA TEGRA264_SID(0x0a)
+#define TEGRA264_SID_HOST1X TEGRA264_SID(0x0b)
+#define TEGRA264_SID_ISP0 TEGRA264_SID(0x0c)
+#define TEGRA264_SID_ISP1 TEGRA264_SID(0x0d)
+#define TEGRA264_SID_PMA0 TEGRA264_SID(0x0e)
+#define TEGRA264_SID_FSI0 TEGRA264_SID(0x0f)
+#define TEGRA264_SID_FSI1 TEGRA264_SID(0x10)
+#define TEGRA264_SID_PVA TEGRA264_SID(0x11)
+#define TEGRA264_SID_SDMMC0 TEGRA264_SID(0x12)
+#define TEGRA264_SID_MGBE0 TEGRA264_SID(0x13)
+#define TEGRA264_SID_MGBE1 TEGRA264_SID(0x14)
+#define TEGRA264_SID_MGBE2 TEGRA264_SID(0x15)
+#define TEGRA264_SID_MGBE3 TEGRA264_SID(0x16)
+#define TEGRA264_SID_MSSSEQ TEGRA264_SID(0x17)
+#define TEGRA264_SID_SE TEGRA264_SID(0x18)
+#define TEGRA264_SID_SEU1 TEGRA264_SID(0x19)
+#define TEGRA264_SID_SEU2 TEGRA264_SID(0x1a)
+#define TEGRA264_SID_SEU3 TEGRA264_SID(0x1b)
+#define TEGRA264_SID_PSC TEGRA264_SID(0x1c)
+#define TEGRA264_SID_OESP TEGRA264_SID(0x23)
+#define TEGRA264_SID_SB TEGRA264_SID(0x24)
+#define TEGRA264_SID_XSPI0 TEGRA264_SID(0x25)
+#define TEGRA264_SID_TSEC TEGRA264_SID(0x29)
+#define TEGRA264_SID_UFS TEGRA264_SID(0x2a)
+#define TEGRA264_SID_RCE TEGRA264_SID(0x2b)
+#define TEGRA264_SID_RCE1 TEGRA264_SID(0x2c)
+#define TEGRA264_SID_VI TEGRA264_SID(0x2e)
+#define TEGRA264_SID_VI1 TEGRA264_SID(0x2f)
+#define TEGRA264_SID_VIC TEGRA264_SID(0x30)
+#define TEGRA264_SID_XUSB_DEV TEGRA264_SID(0x32)
+#define TEGRA264_SID_XUSB_DEV1 TEGRA264_SID(0x33)
+#define TEGRA264_SID_XUSB_DEV2 TEGRA264_SID(0x34)
+#define TEGRA264_SID_XUSB_DEV3 TEGRA264_SID(0x35)
+#define TEGRA264_SID_XUSB_DEV4 TEGRA264_SID(0x36)
+#define TEGRA264_SID_XUSB_DEV5 TEGRA264_SID(0x37)
+
+/*
+ * memory client IDs
+ */
+
+/* HOST1X read client */
+#define TEGRA264_MEMORY_CLIENT_HOST1XR 0x16
+/* VIC read client */
+#define TEGRA264_MEMORY_CLIENT_VICR 0x6c
+/* VIC Write client */
+#define TEGRA264_MEMORY_CLIENT_VICW 0x6d
+/* VI R5 Write client */
+#define TEGRA264_MEMORY_CLIENT_VIW 0x72
+#define TEGRA264_MEMORY_CLIENT_NVDECSRD2MC 0x78
+#define TEGRA264_MEMORY_CLIENT_NVDECSWR2MC 0x79
+/* Audio processor(APE) Read client */
+#define TEGRA264_MEMORY_CLIENT_APER 0x7a
+/* Audio processor(APE) Write client */
+#define TEGRA264_MEMORY_CLIENT_APEW 0x7b
+/* Audio DMA Read client */
+#define TEGRA264_MEMORY_CLIENT_APEDMAR 0x9f
+/* Audio DMA Write client */
+#define TEGRA264_MEMORY_CLIENT_APEDMAW 0xa0
+#define TEGRA264_MEMORY_CLIENT_GPUR02MC 0xb6
+#define TEGRA264_MEMORY_CLIENT_GPUW02MC 0xb7
+/* VI Falcon Read client */
+#define TEGRA264_MEMORY_CLIENT_VIFALCONR 0xbc
+/* VI Falcon Write client */
+#define TEGRA264_MEMORY_CLIENT_VIFALCONW 0xbd
+/* Read Client of RCE */
+#define TEGRA264_MEMORY_CLIENT_RCER 0xd2
+/* Write client of RCE */
+#define TEGRA264_MEMORY_CLIENT_RCEW 0xd3
+/* PCIE0/MSI Write clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE0W 0xd9
+/* PCIE1/RPX4 Read clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE1R 0xda
+/* PCIE1/RPX4 Write clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE1W 0xdb
+/* PCIE2/DMX4 Read clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE2AR 0xdc
+/* PCIE2/DMX4 Write clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE2AW 0xdd
+/* PCIE3/RPX4 Read clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE3R 0xde
+/* PCIE3/RPX4 Write clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE3W 0xdf
+/* PCIE4/DMX8 Read clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE4R 0xe0
+/* PCIE4/DMX8 Write clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE4W 0xe1
+/* PCIE5/DMX4 Read clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE5R 0xe2
+/* PCIE5/DMX4 Write clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE5W 0xe3
+/* UFS Read client */
+#define TEGRA264_MEMORY_CLIENT_UFSR 0x15c
+/* UFS write client */
+#define TEGRA264_MEMORY_CLIENT_UFSW 0x15d
+/* HDA Read client */
+#define TEGRA264_MEMORY_CLIENT_HDAR 0x17c
+/* HDA Write client */
+#define TEGRA264_MEMORY_CLIENT_HDAW 0x17d
+/* Disp ISO Read Client */
+#define TEGRA264_MEMORY_CLIENT_DISPR 0x182
+/* MGBE0 Read mccif */
+#define TEGRA264_MEMORY_CLIENT_MGBE0R 0x1a2
+/* MGBE0 Write mccif */
+#define TEGRA264_MEMORY_CLIENT_MGBE0W 0x1a3
+/* MGBE1 Read mccif */
+#define TEGRA264_MEMORY_CLIENT_MGBE1R 0x1a4
+/* MGBE1 Write mccif */
+#define TEGRA264_MEMORY_CLIENT_MGBE1W 0x1a5
+/* VI1 R5 Write client */
+#define TEGRA264_MEMORY_CLIENT_VI1W 0x1a6
+/* SDMMC0 Read mccif */
+#define TEGRA264_MEMORY_CLIENT_SDMMC0R 0x1c2
+/* SDMMC0 Write mccif */
+#define TEGRA264_MEMORY_CLIENT_SDMMC0W 0x1c3
+
+#endif /* DT_BINDINGS_MEMORY_NVIDIA_TEGRA264_H */
diff --git a/include/dt-bindings/memory/tegra210-mc.h b/include/dt-bindings/memory/tegra210-mc.h
index 5e082547f179..881bf78aa8b2 100644
--- a/include/dt-bindings/memory/tegra210-mc.h
+++ b/include/dt-bindings/memory/tegra210-mc.h
@@ -75,4 +75,78 @@
#define TEGRA210_MC_RESET_ETR 28
#define TEGRA210_MC_RESET_TSECB 29
+#define TEGRA210_MC_PTCR 0
+#define TEGRA210_MC_DISPLAY0A 1
+#define TEGRA210_MC_DISPLAY0AB 2
+#define TEGRA210_MC_DISPLAY0B 3
+#define TEGRA210_MC_DISPLAY0BB 4
+#define TEGRA210_MC_DISPLAY0C 5
+#define TEGRA210_MC_DISPLAY0CB 6
+#define TEGRA210_MC_AFIR 14
+#define TEGRA210_MC_AVPCARM7R 15
+#define TEGRA210_MC_DISPLAYHC 16
+#define TEGRA210_MC_DISPLAYHCB 17
+#define TEGRA210_MC_HDAR 21
+#define TEGRA210_MC_HOST1XDMAR 22
+#define TEGRA210_MC_HOST1XR 23
+#define TEGRA210_MC_NVENCSRD 28
+#define TEGRA210_MC_PPCSAHBDMAR 29
+#define TEGRA210_MC_PPCSAHBSLVR 30
+#define TEGRA210_MC_SATAR 31
+#define TEGRA210_MC_MPCORER 39
+#define TEGRA210_MC_NVENCSWR 43
+#define TEGRA210_MC_AFIW 49
+#define TEGRA210_MC_AVPCARM7W 50
+#define TEGRA210_MC_HDAW 53
+#define TEGRA210_MC_HOST1XW 54
+#define TEGRA210_MC_MPCOREW 57
+#define TEGRA210_MC_PPCSAHBDMAW 59
+#define TEGRA210_MC_PPCSAHBSLVW 60
+#define TEGRA210_MC_SATAW 61
+#define TEGRA210_MC_ISPRA 68
+#define TEGRA210_MC_ISPWA 70
+#define TEGRA210_MC_ISPWB 71
+#define TEGRA210_MC_XUSB_HOSTR 74
+#define TEGRA210_MC_XUSB_HOSTW 75
+#define TEGRA210_MC_XUSB_DEVR 76
+#define TEGRA210_MC_XUSB_DEVW 77
+#define TEGRA210_MC_ISPRAB 78
+#define TEGRA210_MC_ISPWAB 80
+#define TEGRA210_MC_ISPWBB 81
+#define TEGRA210_MC_TSECSRD 84
+#define TEGRA210_MC_TSECSWR 85
+#define TEGRA210_MC_A9AVPSCR 86
+#define TEGRA210_MC_A9AVPSCW 87
+#define TEGRA210_MC_GPUSRD 88
+#define TEGRA210_MC_GPUSWR 89
+#define TEGRA210_MC_DISPLAYT 90
+#define TEGRA210_MC_SDMMCRA 96
+#define TEGRA210_MC_SDMMCRAA 97
+#define TEGRA210_MC_SDMMCR 98
+#define TEGRA210_MC_SDMMCRAB 99
+#define TEGRA210_MC_SDMMCWA 100
+#define TEGRA210_MC_SDMMCWAA 101
+#define TEGRA210_MC_SDMMCW 102
+#define TEGRA210_MC_SDMMCWAB 103
+#define TEGRA210_MC_VICSRD 108
+#define TEGRA210_MC_VICSWR 109
+#define TEGRA210_MC_VIW 114
+#define TEGRA210_MC_DISPLAYD 115
+#define TEGRA210_MC_NVDECSRD 120
+#define TEGRA210_MC_NVDECSWR 121
+#define TEGRA210_MC_APER 122
+#define TEGRA210_MC_APEW 123
+#define TEGRA210_MC_NVJPGRD 126
+#define TEGRA210_MC_NVJPGWR 127
+#define TEGRA210_MC_SESRD 128
+#define TEGRA210_MC_SESWR 129
+#define TEGRA210_MC_AXIAPR 130
+#define TEGRA210_MC_AXIAPW 131
+#define TEGRA210_MC_ETRR 132
+#define TEGRA210_MC_ETRW 133
+#define TEGRA210_MC_TSECSRDB 134
+#define TEGRA210_MC_TSECSWRB 135
+#define TEGRA210_MC_GPUSRD2 136
+#define TEGRA210_MC_GPUSWR2 137
+
#endif
diff --git a/include/dt-bindings/mfd/qcom-pm8008.h b/include/dt-bindings/mfd/qcom-pm8008.h
deleted file mode 100644
index eca9448df228..000000000000
--- a/include/dt-bindings/mfd/qcom-pm8008.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2021 The Linux Foundation. All rights reserved.
- */
-
-#ifndef __DT_BINDINGS_MFD_QCOM_PM8008_H
-#define __DT_BINDINGS_MFD_QCOM_PM8008_H
-
-/* PM8008 IRQ numbers */
-#define PM8008_IRQ_MISC_UVLO 0
-#define PM8008_IRQ_MISC_OVLO 1
-#define PM8008_IRQ_MISC_OTST2 2
-#define PM8008_IRQ_MISC_OTST3 3
-#define PM8008_IRQ_MISC_LDO_OCP 4
-#define PM8008_IRQ_TEMP_ALARM 5
-#define PM8008_IRQ_GPIO1 6
-#define PM8008_IRQ_GPIO2 7
-
-#endif
diff --git a/include/dt-bindings/mfd/st,stpmic1.h b/include/dt-bindings/mfd/st,stpmic1.h
index 321cd08797d9..9dd15b9c743e 100644
--- a/include/dt-bindings/mfd/st,stpmic1.h
+++ b/include/dt-bindings/mfd/st,stpmic1.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Author: Philippe Peurichard <philippe.peurichard@st.com>,
diff --git a/include/dt-bindings/net/renesas,r9a09g077-pcs-miic.h b/include/dt-bindings/net/renesas,r9a09g077-pcs-miic.h
new file mode 100644
index 000000000000..43a2b5743a63
--- /dev/null
+++ b/include/dt-bindings/net/renesas,r9a09g077-pcs-miic.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2025 Renesas Electronics Corporation.
+ */
+
+#ifndef _DT_BINDINGS_RENASAS_R9A09G077_PCS_MIIC_H
+#define _DT_BINDINGS_RENASAS_R9A09G077_PCS_MIIC_H
+
+/*
+ * Media Interface Connection Matrix
+ * ===========================================================
+ *
+ * Selects the function of the Media interface of the MAC to be used
+ *
+ * SW_MODE[2:0] | Port 0 | Port 1 | Port 2 | Port 3
+ * -------------|-------------|-------------|-------------|-------------
+ * 000b | ETHSW Port0 | ETHSW Port1 | ETHSW Port2 | GMAC1
+ * 001b | ESC Port0 | ESC Port1 | GMAC2 | GMAC1
+ * 010b | ESC Port0 | ESC Port1 | ETHSW Port2 | GMAC1
+ * 011b | ESC Port0 | ESC Port1 | ESC Port2 | GMAC1
+ * 100b | ETHSW Port0 | ESC Port1 | ESC Port2 | GMAC1
+ * 101b | ETHSW Port0 | ESC Port1 | ETHSW Port2 | GMAC1
+ * 110b | ETHSW Port0 | ETHSW Port1 | GMAC2 | GMAC1
+ * 111b | GMAC0 | GMAC1 | GMAC2 | -
+ */
+#define ETHSS_GMAC0_PORT 0
+#define ETHSS_GMAC1_PORT 1
+#define ETHSS_GMAC2_PORT 2
+#define ETHSS_ESC_PORT0 3
+#define ETHSS_ESC_PORT1 4
+#define ETHSS_ESC_PORT2 5
+#define ETHSS_ETHSW_PORT0 6
+#define ETHSS_ETHSW_PORT1 7
+#define ETHSS_ETHSW_PORT2 8
+
+#endif
diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h
index 6fc4b445d3a1..b8a4f3ff4a3b 100644
--- a/include/dt-bindings/net/ti-dp83867.h
+++ b/include/dt-bindings/net/ti-dp83867.h
@@ -1,10 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
/*
* Device Tree constants for the Texas Instruments DP83867 PHY
*
* Author: Dan Murphy <dmurphy@ti.com>
*
- * Copyright: (C) 2015 Texas Instruments, Inc.
+ * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef _DT_BINDINGS_TI_DP83867_H
diff --git a/include/dt-bindings/net/ti-dp83869.h b/include/dt-bindings/net/ti-dp83869.h
index 218b1a64e975..917114aad7d0 100644
--- a/include/dt-bindings/net/ti-dp83869.h
+++ b/include/dt-bindings/net/ti-dp83869.h
@@ -1,10 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
/*
* Device Tree constants for the Texas Instruments DP83869 PHY
*
* Author: Dan Murphy <dmurphy@ti.com>
*
- * Copyright: (C) 2019 Texas Instruments, Inc.
+ * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef _DT_BINDINGS_TI_DP83869_H
diff --git a/include/dt-bindings/pinctrl/amlogic,pinctrl.h b/include/dt-bindings/pinctrl/amlogic,pinctrl.h
new file mode 100644
index 000000000000..7d40aecc7147
--- /dev/null
+++ b/include/dt-bindings/pinctrl/amlogic,pinctrl.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2024 Amlogic, Inc. All rights reserved.
+ * Author: Xianwei Zhao <xianwei.zhao@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_PINCTRL_H
+#define _DT_BINDINGS_AMLOGIC_PINCTRL_H
+/* Normal PIN bank */
+#define AMLOGIC_GPIO_A 0
+#define AMLOGIC_GPIO_B 1
+#define AMLOGIC_GPIO_C 2
+#define AMLOGIC_GPIO_D 3
+#define AMLOGIC_GPIO_E 4
+#define AMLOGIC_GPIO_F 5
+#define AMLOGIC_GPIO_G 6
+#define AMLOGIC_GPIO_H 7
+#define AMLOGIC_GPIO_I 8
+#define AMLOGIC_GPIO_J 9
+#define AMLOGIC_GPIO_K 10
+#define AMLOGIC_GPIO_L 11
+#define AMLOGIC_GPIO_M 12
+#define AMLOGIC_GPIO_N 13
+#define AMLOGIC_GPIO_O 14
+#define AMLOGIC_GPIO_P 15
+#define AMLOGIC_GPIO_Q 16
+#define AMLOGIC_GPIO_R 17
+#define AMLOGIC_GPIO_S 18
+#define AMLOGIC_GPIO_T 19
+#define AMLOGIC_GPIO_U 20
+#define AMLOGIC_GPIO_V 21
+#define AMLOGIC_GPIO_W 22
+#define AMLOGIC_GPIO_X 23
+#define AMLOGIC_GPIO_Y 24
+#define AMLOGIC_GPIO_Z 25
+
+/* Special PIN bank */
+#define AMLOGIC_GPIO_DV 26
+#define AMLOGIC_GPIO_AO 27
+#define AMLOGIC_GPIO_CC 28
+#define AMLOGIC_GPIO_TEST_N 29
+#define AMLOGIC_GPIO_ANALOG 30
+
+#define AML_PINMUX(bank, offset, mode) (((((bank) << 8) + (offset)) << 8) | (mode))
+
+#endif /* _DT_BINDINGS_AMLOGIC_PINCTRL_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-cv1800b.h b/include/dt-bindings/pinctrl/pinctrl-cv1800b.h
new file mode 100644
index 000000000000..0593fc33d470
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-cv1800b.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_CV1800B_H
+#define _DT_BINDINGS_PINCTRL_CV1800B_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PIN_AUD_AOUTR 1
+#define PIN_SD0_CLK 3
+#define PIN_SD0_CMD 4
+#define PIN_SD0_D0 5
+#define PIN_SD0_D1 7
+#define PIN_SD0_D2 8
+#define PIN_SD0_D3 9
+#define PIN_SD0_CD 11
+#define PIN_SD0_PWR_EN 12
+#define PIN_SPK_EN 14
+#define PIN_UART0_TX 15
+#define PIN_UART0_RX 16
+#define PIN_SPINOR_HOLD_X 17
+#define PIN_SPINOR_SCK 18
+#define PIN_SPINOR_MOSI 19
+#define PIN_SPINOR_WP_X 20
+#define PIN_SPINOR_MISO 21
+#define PIN_SPINOR_CS_X 22
+#define PIN_IIC0_SCL 23
+#define PIN_IIC0_SDA 24
+#define PIN_AUX0 25
+#define PIN_PWR_VBAT_DET 30
+#define PIN_PWR_SEQ2 31
+#define PIN_XTAL_XIN 33
+#define PIN_SD1_GPIO0 35
+#define PIN_SD1_GPIO1 36
+#define PIN_SD1_D3 38
+#define PIN_SD1_D2 39
+#define PIN_SD1_D1 40
+#define PIN_SD1_D0 41
+#define PIN_SD1_CMD 42
+#define PIN_SD1_CLK 43
+#define PIN_ADC1 44
+#define PIN_USB_VBUS_DET 45
+#define PIN_ETH_TXP 47
+#define PIN_ETH_TXM 48
+#define PIN_ETH_RXP 49
+#define PIN_ETH_RXM 50
+#define PIN_MIPIRX4N 56
+#define PIN_MIPIRX4P 57
+#define PIN_MIPIRX3N 58
+#define PIN_MIPIRX3P 59
+#define PIN_MIPIRX2N 60
+#define PIN_MIPIRX2P 61
+#define PIN_MIPIRX1N 62
+#define PIN_MIPIRX1P 63
+#define PIN_MIPIRX0N 64
+#define PIN_MIPIRX0P 65
+#define PIN_AUD_AINL_MIC 67
+
+#endif /* _DT_BINDINGS_PINCTRL_CV1800B_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-cv1812h.h b/include/dt-bindings/pinctrl/pinctrl-cv1812h.h
new file mode 100644
index 000000000000..2908de347919
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-cv1812h.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_CV1812H_H
+#define _DT_BINDINGS_PINCTRL_CV1812H_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PINPOS(row, col) \
+ ((((row) - 'A' + 1) << 8) + ((col) - 1))
+
+#define PIN_MIPI_TXM4 PINPOS('A', 2)
+#define PIN_MIPIRX0N PINPOS('A', 4)
+#define PIN_MIPIRX3P PINPOS('A', 6)
+#define PIN_MIPIRX4P PINPOS('A', 7)
+#define PIN_VIVO_D2 PINPOS('A', 9)
+#define PIN_VIVO_D3 PINPOS('A', 10)
+#define PIN_VIVO_D10 PINPOS('A', 12)
+#define PIN_USB_VBUS_DET PINPOS('A', 13)
+#define PIN_MIPI_TXP3 PINPOS('B', 1)
+#define PIN_MIPI_TXM3 PINPOS('B', 2)
+#define PIN_MIPI_TXP4 PINPOS('B', 3)
+#define PIN_MIPIRX0P PINPOS('B', 4)
+#define PIN_MIPIRX1N PINPOS('B', 5)
+#define PIN_MIPIRX2N PINPOS('B', 6)
+#define PIN_MIPIRX4N PINPOS('B', 7)
+#define PIN_MIPIRX5N PINPOS('B', 8)
+#define PIN_VIVO_D1 PINPOS('B', 9)
+#define PIN_VIVO_D5 PINPOS('B', 10)
+#define PIN_VIVO_D7 PINPOS('B', 11)
+#define PIN_VIVO_D9 PINPOS('B', 12)
+#define PIN_USB_ID PINPOS('B', 13)
+#define PIN_ETH_RXM PINPOS('B', 15)
+#define PIN_MIPI_TXP2 PINPOS('C', 1)
+#define PIN_MIPI_TXM2 PINPOS('C', 2)
+#define PIN_CAM_PD0 PINPOS('C', 3)
+#define PIN_CAM_MCLK0 PINPOS('C', 4)
+#define PIN_MIPIRX1P PINPOS('C', 5)
+#define PIN_MIPIRX2P PINPOS('C', 6)
+#define PIN_MIPIRX3N PINPOS('C', 7)
+#define PIN_MIPIRX5P PINPOS('C', 8)
+#define PIN_VIVO_CLK PINPOS('C', 9)
+#define PIN_VIVO_D6 PINPOS('C', 10)
+#define PIN_VIVO_D8 PINPOS('C', 11)
+#define PIN_USB_VBUS_EN PINPOS('C', 12)
+#define PIN_ETH_RXP PINPOS('C', 14)
+#define PIN_GPIO_RTX PINPOS('C', 15)
+#define PIN_MIPI_TXP1 PINPOS('D', 1)
+#define PIN_MIPI_TXM1 PINPOS('D', 2)
+#define PIN_CAM_MCLK1 PINPOS('D', 3)
+#define PIN_IIC3_SCL PINPOS('D', 4)
+#define PIN_VIVO_D4 PINPOS('D', 10)
+#define PIN_ETH_TXM PINPOS('D', 14)
+#define PIN_ETH_TXP PINPOS('D', 15)
+#define PIN_MIPI_TXP0 PINPOS('E', 1)
+#define PIN_MIPI_TXM0 PINPOS('E', 2)
+#define PIN_CAM_PD1 PINPOS('E', 4)
+#define PIN_CAM_RST0 PINPOS('E', 5)
+#define PIN_VIVO_D0 PINPOS('E', 10)
+#define PIN_ADC1 PINPOS('E', 13)
+#define PIN_ADC2 PINPOS('E', 14)
+#define PIN_ADC3 PINPOS('E', 15)
+#define PIN_AUD_AOUTL PINPOS('F', 2)
+#define PIN_IIC3_SDA PINPOS('F', 4)
+#define PIN_SD1_D2 PINPOS('F', 14)
+#define PIN_AUD_AOUTR PINPOS('G', 2)
+#define PIN_SD1_D3 PINPOS('G', 13)
+#define PIN_SD1_CLK PINPOS('G', 14)
+#define PIN_SD1_CMD PINPOS('G', 15)
+#define PIN_AUD_AINL_MIC PINPOS('H', 1)
+#define PIN_RSTN PINPOS('H', 12)
+#define PIN_PWM0_BUCK PINPOS('H', 13)
+#define PIN_SD1_D1 PINPOS('H', 14)
+#define PIN_SD1_D0 PINPOS('H', 15)
+#define PIN_AUD_AINR_MIC PINPOS('J', 1)
+#define PIN_IIC2_SCL PINPOS('J', 13)
+#define PIN_IIC2_SDA PINPOS('J', 14)
+#define PIN_SD0_CD PINPOS('K', 2)
+#define PIN_SD0_D1 PINPOS('K', 3)
+#define PIN_UART2_RX PINPOS('K', 13)
+#define PIN_UART2_CTS PINPOS('K', 14)
+#define PIN_UART2_TX PINPOS('K', 15)
+#define PIN_SD0_CLK PINPOS('L', 1)
+#define PIN_SD0_D0 PINPOS('L', 2)
+#define PIN_SD0_CMD PINPOS('L', 3)
+#define PIN_CLK32K PINPOS('L', 14)
+#define PIN_UART2_RTS PINPOS('L', 15)
+#define PIN_SD0_D3 PINPOS('M', 1)
+#define PIN_SD0_D2 PINPOS('M', 2)
+#define PIN_UART0_RX PINPOS('M', 4)
+#define PIN_UART0_TX PINPOS('M', 5)
+#define PIN_JTAG_CPU_TRST PINPOS('M', 6)
+#define PIN_PWR_ON PINPOS('M', 11)
+#define PIN_PWR_GPIO2 PINPOS('M', 12)
+#define PIN_PWR_GPIO0 PINPOS('M', 13)
+#define PIN_CLK25M PINPOS('M', 14)
+#define PIN_SD0_PWR_EN PINPOS('N', 1)
+#define PIN_SPK_EN PINPOS('N', 3)
+#define PIN_JTAG_CPU_TCK PINPOS('N', 4)
+#define PIN_JTAG_CPU_TMS PINPOS('N', 6)
+#define PIN_PWR_WAKEUP1 PINPOS('N', 11)
+#define PIN_PWR_WAKEUP0 PINPOS('N', 12)
+#define PIN_PWR_GPIO1 PINPOS('N', 13)
+#define PIN_EMMC_DAT3 PINPOS('P', 1)
+#define PIN_EMMC_DAT0 PINPOS('P', 2)
+#define PIN_EMMC_DAT2 PINPOS('P', 3)
+#define PIN_EMMC_RSTN PINPOS('P', 4)
+#define PIN_AUX0 PINPOS('P', 5)
+#define PIN_IIC0_SDA PINPOS('P', 6)
+#define PIN_PWR_SEQ3 PINPOS('P', 10)
+#define PIN_PWR_VBAT_DET PINPOS('P', 11)
+#define PIN_PWR_SEQ1 PINPOS('P', 12)
+#define PIN_PWR_BUTTON1 PINPOS('P', 13)
+#define PIN_EMMC_DAT1 PINPOS('R', 2)
+#define PIN_EMMC_CMD PINPOS('R', 3)
+#define PIN_EMMC_CLK PINPOS('R', 4)
+#define PIN_IIC0_SCL PINPOS('R', 6)
+#define PIN_GPIO_ZQ PINPOS('R', 10)
+#define PIN_PWR_RSTN PINPOS('R', 11)
+#define PIN_PWR_SEQ2 PINPOS('R', 12)
+#define PIN_XTAL_XIN PINPOS('R', 13)
+
+#endif /* _DT_BINDINGS_PINCTRL_CV1812H_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-cv18xx.h b/include/dt-bindings/pinctrl/pinctrl-cv18xx.h
new file mode 100644
index 000000000000..bc92ad1067ec
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-cv18xx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Ltd.
+ *
+ * Author: Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_CV18XX_H
+#define _DT_BINDINGS_PINCTRL_CV18XX_H
+
+#define PIN_MUX_INVALD 0xff
+
+#define PINMUX2(pin, mux, mux2) \
+ (((pin) & 0xffff) | (((mux) & 0xff) << 16) | (((mux2) & 0xff) << 24))
+
+#define PINMUX(pin, mux) \
+ PINMUX2(pin, mux, PIN_MUX_INVALD)
+
+#endif /* _DT_BINDINGS_PINCTRL_CV18XX_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-sg2000.h b/include/dt-bindings/pinctrl/pinctrl-sg2000.h
new file mode 100644
index 000000000000..4871f9a7c6c1
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-sg2000.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_SG2000_H
+#define _DT_BINDINGS_PINCTRL_SG2000_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PINPOS(row, col) \
+ ((((row) - 'A' + 1) << 8) + ((col) - 1))
+
+#define PIN_MIPI_TXM4 PINPOS('A', 2)
+#define PIN_MIPIRX0N PINPOS('A', 4)
+#define PIN_MIPIRX3P PINPOS('A', 6)
+#define PIN_MIPIRX4P PINPOS('A', 7)
+#define PIN_VIVO_D2 PINPOS('A', 9)
+#define PIN_VIVO_D3 PINPOS('A', 10)
+#define PIN_VIVO_D10 PINPOS('A', 12)
+#define PIN_USB_VBUS_DET PINPOS('A', 13)
+#define PIN_MIPI_TXP3 PINPOS('B', 1)
+#define PIN_MIPI_TXM3 PINPOS('B', 2)
+#define PIN_MIPI_TXP4 PINPOS('B', 3)
+#define PIN_MIPIRX0P PINPOS('B', 4)
+#define PIN_MIPIRX1N PINPOS('B', 5)
+#define PIN_MIPIRX2N PINPOS('B', 6)
+#define PIN_MIPIRX4N PINPOS('B', 7)
+#define PIN_MIPIRX5N PINPOS('B', 8)
+#define PIN_VIVO_D1 PINPOS('B', 9)
+#define PIN_VIVO_D5 PINPOS('B', 10)
+#define PIN_VIVO_D7 PINPOS('B', 11)
+#define PIN_VIVO_D9 PINPOS('B', 12)
+#define PIN_USB_ID PINPOS('B', 13)
+#define PIN_ETH_RXM PINPOS('B', 15)
+#define PIN_MIPI_TXP2 PINPOS('C', 1)
+#define PIN_MIPI_TXM2 PINPOS('C', 2)
+#define PIN_CAM_PD0 PINPOS('C', 3)
+#define PIN_CAM_MCLK0 PINPOS('C', 4)
+#define PIN_MIPIRX1P PINPOS('C', 5)
+#define PIN_MIPIRX2P PINPOS('C', 6)
+#define PIN_MIPIRX3N PINPOS('C', 7)
+#define PIN_MIPIRX5P PINPOS('C', 8)
+#define PIN_VIVO_CLK PINPOS('C', 9)
+#define PIN_VIVO_D6 PINPOS('C', 10)
+#define PIN_VIVO_D8 PINPOS('C', 11)
+#define PIN_USB_VBUS_EN PINPOS('C', 12)
+#define PIN_ETH_RXP PINPOS('C', 14)
+#define PIN_GPIO_RTX PINPOS('C', 15)
+#define PIN_MIPI_TXP1 PINPOS('D', 1)
+#define PIN_MIPI_TXM1 PINPOS('D', 2)
+#define PIN_CAM_MCLK1 PINPOS('D', 3)
+#define PIN_IIC3_SCL PINPOS('D', 4)
+#define PIN_VIVO_D4 PINPOS('D', 10)
+#define PIN_ETH_TXM PINPOS('D', 14)
+#define PIN_ETH_TXP PINPOS('D', 15)
+#define PIN_MIPI_TXP0 PINPOS('E', 1)
+#define PIN_MIPI_TXM0 PINPOS('E', 2)
+#define PIN_CAM_PD1 PINPOS('E', 4)
+#define PIN_CAM_RST0 PINPOS('E', 5)
+#define PIN_VIVO_D0 PINPOS('E', 10)
+#define PIN_ADC1 PINPOS('E', 13)
+#define PIN_ADC2 PINPOS('E', 14)
+#define PIN_ADC3 PINPOS('E', 15)
+#define PIN_AUD_AOUTL PINPOS('F', 2)
+#define PIN_IIC3_SDA PINPOS('F', 4)
+#define PIN_SD1_D2 PINPOS('F', 14)
+#define PIN_AUD_AOUTR PINPOS('G', 2)
+#define PIN_SD1_D3 PINPOS('G', 13)
+#define PIN_SD1_CLK PINPOS('G', 14)
+#define PIN_SD1_CMD PINPOS('G', 15)
+#define PIN_AUD_AINL_MIC PINPOS('H', 1)
+#define PIN_RSTN PINPOS('H', 12)
+#define PIN_PWM0_BUCK PINPOS('H', 13)
+#define PIN_SD1_D1 PINPOS('H', 14)
+#define PIN_SD1_D0 PINPOS('H', 15)
+#define PIN_AUD_AINR_MIC PINPOS('J', 1)
+#define PIN_IIC2_SCL PINPOS('J', 13)
+#define PIN_IIC2_SDA PINPOS('J', 14)
+#define PIN_SD0_CD PINPOS('K', 2)
+#define PIN_SD0_D1 PINPOS('K', 3)
+#define PIN_UART2_RX PINPOS('K', 13)
+#define PIN_UART2_CTS PINPOS('K', 14)
+#define PIN_UART2_TX PINPOS('K', 15)
+#define PIN_SD0_CLK PINPOS('L', 1)
+#define PIN_SD0_D0 PINPOS('L', 2)
+#define PIN_SD0_CMD PINPOS('L', 3)
+#define PIN_CLK32K PINPOS('L', 14)
+#define PIN_UART2_RTS PINPOS('L', 15)
+#define PIN_SD0_D3 PINPOS('M', 1)
+#define PIN_SD0_D2 PINPOS('M', 2)
+#define PIN_UART0_RX PINPOS('M', 4)
+#define PIN_UART0_TX PINPOS('M', 5)
+#define PIN_JTAG_CPU_TRST PINPOS('M', 6)
+#define PIN_PWR_ON PINPOS('M', 11)
+#define PIN_PWR_GPIO2 PINPOS('M', 12)
+#define PIN_PWR_GPIO0 PINPOS('M', 13)
+#define PIN_CLK25M PINPOS('M', 14)
+#define PIN_SD0_PWR_EN PINPOS('N', 1)
+#define PIN_SPK_EN PINPOS('N', 3)
+#define PIN_JTAG_CPU_TCK PINPOS('N', 4)
+#define PIN_JTAG_CPU_TMS PINPOS('N', 6)
+#define PIN_PWR_WAKEUP1 PINPOS('N', 11)
+#define PIN_PWR_WAKEUP0 PINPOS('N', 12)
+#define PIN_PWR_GPIO1 PINPOS('N', 13)
+#define PIN_EMMC_DAT3 PINPOS('P', 1)
+#define PIN_EMMC_DAT0 PINPOS('P', 2)
+#define PIN_EMMC_DAT2 PINPOS('P', 3)
+#define PIN_EMMC_RSTN PINPOS('P', 4)
+#define PIN_AUX0 PINPOS('P', 5)
+#define PIN_IIC0_SDA PINPOS('P', 6)
+#define PIN_PWR_SEQ3 PINPOS('P', 10)
+#define PIN_PWR_VBAT_DET PINPOS('P', 11)
+#define PIN_PWR_SEQ1 PINPOS('P', 12)
+#define PIN_PWR_BUTTON1 PINPOS('P', 13)
+#define PIN_EMMC_DAT1 PINPOS('R', 2)
+#define PIN_EMMC_CMD PINPOS('R', 3)
+#define PIN_EMMC_CLK PINPOS('R', 4)
+#define PIN_IIC0_SCL PINPOS('R', 6)
+#define PIN_GPIO_ZQ PINPOS('R', 10)
+#define PIN_PWR_RSTN PINPOS('R', 11)
+#define PIN_PWR_SEQ2 PINPOS('R', 12)
+#define PIN_XTAL_XIN PINPOS('R', 13)
+
+#endif /* _DT_BINDINGS_PINCTRL_SG2000_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-sg2002.h b/include/dt-bindings/pinctrl/pinctrl-sg2002.h
new file mode 100644
index 000000000000..3c36cfa0a550
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-sg2002.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_SG2002_H
+#define _DT_BINDINGS_PINCTRL_SG2002_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PIN_AUD_AINL_MIC 2
+#define PIN_AUD_AOUTR 4
+#define PIN_SD0_CLK 6
+#define PIN_SD0_CMD 7
+#define PIN_SD0_D0 8
+#define PIN_SD0_D1 10
+#define PIN_SD0_D2 11
+#define PIN_SD0_D3 12
+#define PIN_SD0_CD 14
+#define PIN_SD0_PWR_EN 15
+#define PIN_SPK_EN 17
+#define PIN_UART0_TX 18
+#define PIN_UART0_RX 19
+#define PIN_EMMC_DAT2 20
+#define PIN_EMMC_CLK 21
+#define PIN_EMMC_DAT0 22
+#define PIN_EMMC_DAT3 23
+#define PIN_EMMC_CMD 24
+#define PIN_EMMC_DAT1 25
+#define PIN_JTAG_CPU_TMS 26
+#define PIN_JTAG_CPU_TCK 27
+#define PIN_IIC0_SCL 28
+#define PIN_IIC0_SDA 29
+#define PIN_AUX0 30
+#define PIN_GPIO_ZQ 35
+#define PIN_PWR_VBAT_DET 38
+#define PIN_PWR_RSTN 39
+#define PIN_PWR_SEQ1 40
+#define PIN_PWR_SEQ2 41
+#define PIN_PWR_WAKEUP0 43
+#define PIN_PWR_BUTTON1 44
+#define PIN_XTAL_XIN 45
+#define PIN_PWR_GPIO0 47
+#define PIN_PWR_GPIO1 48
+#define PIN_PWR_GPIO2 49
+#define PIN_SD1_D3 51
+#define PIN_SD1_D2 52
+#define PIN_SD1_D1 53
+#define PIN_SD1_D0 54
+#define PIN_SD1_CMD 55
+#define PIN_SD1_CLK 56
+#define PIN_PWM0_BUCK 58
+#define PIN_ADC1 59
+#define PIN_USB_VBUS_DET 60
+#define PIN_ETH_TXP 62
+#define PIN_ETH_TXM 63
+#define PIN_ETH_RXP 64
+#define PIN_ETH_RXM 65
+#define PIN_GPIO_RTX 67
+#define PIN_MIPIRX4N 72
+#define PIN_MIPIRX4P 73
+#define PIN_MIPIRX3N 74
+#define PIN_MIPIRX3P 75
+#define PIN_MIPIRX2N 76
+#define PIN_MIPIRX2P 77
+#define PIN_MIPIRX1N 78
+#define PIN_MIPIRX1P 79
+#define PIN_MIPIRX0N 80
+#define PIN_MIPIRX0P 81
+#define PIN_MIPI_TXM2 83
+#define PIN_MIPI_TXP2 84
+#define PIN_MIPI_TXM1 85
+#define PIN_MIPI_TXP1 86
+#define PIN_MIPI_TXM0 87
+#define PIN_MIPI_TXP0 88
+
+#endif /* _DT_BINDINGS_PINCTRL_SG2002_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-sg2042.h b/include/dt-bindings/pinctrl/pinctrl-sg2042.h
new file mode 100644
index 000000000000..79d5bb8e04f8
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-sg2042.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_SG2042_H
+#define _DT_BINDINGS_PINCTRL_SG2042_H
+
+#define PINMUX(pin, mux) \
+ (((pin) & 0xffff) | (((mux) & 0xff) << 16))
+
+#define PIN_LPC_LCLK 0
+#define PIN_LPC_LFRAME 1
+#define PIN_LPC_LAD0 2
+#define PIN_LPC_LAD1 3
+#define PIN_LPC_LAD2 4
+#define PIN_LPC_LAD3 5
+#define PIN_LPC_LDRQ0 6
+#define PIN_LPC_LDRQ1 7
+#define PIN_LPC_SERIRQ 8
+#define PIN_LPC_CLKRUN 9
+#define PIN_LPC_LPME 10
+#define PIN_LPC_LPCPD 11
+#define PIN_LPC_LSMI 12
+#define PIN_PCIE0_L0_RESET 13
+#define PIN_PCIE0_L1_RESET 14
+#define PIN_PCIE0_L0_WAKEUP 15
+#define PIN_PCIE0_L1_WAKEUP 16
+#define PIN_PCIE0_L0_CLKREQ_IN 17
+#define PIN_PCIE0_L1_CLKREQ_IN 18
+#define PIN_PCIE1_L0_RESET 19
+#define PIN_PCIE1_L1_RESET 20
+#define PIN_PCIE1_L0_WAKEUP 21
+#define PIN_PCIE1_L1_WAKEUP 22
+#define PIN_PCIE1_L0_CLKREQ_IN 23
+#define PIN_PCIE1_L1_CLKREQ_IN 24
+#define PIN_SPIF0_CLK_SEL1 25
+#define PIN_SPIF0_CLK_SEL0 26
+#define PIN_SPIF0_WP 27
+#define PIN_SPIF0_HOLD 28
+#define PIN_SPIF0_SDI 29
+#define PIN_SPIF0_CS 30
+#define PIN_SPIF0_SCK 31
+#define PIN_SPIF0_SDO 32
+#define PIN_SPIF1_CLK_SEL1 33
+#define PIN_SPIF1_CLK_SEL0 34
+#define PIN_SPIF1_WP 35
+#define PIN_SPIF1_HOLD 36
+#define PIN_SPIF1_SDI 37
+#define PIN_SPIF1_CS 38
+#define PIN_SPIF1_SCK 39
+#define PIN_SPIF1_SDO 40
+#define PIN_EMMC_WP 41
+#define PIN_EMMC_CD 42
+#define PIN_EMMC_RST 43
+#define PIN_EMMC_PWR_EN 44
+#define PIN_SDIO_CD 45
+#define PIN_SDIO_WP 46
+#define PIN_SDIO_RST 47
+#define PIN_SDIO_PWR_EN 48
+#define PIN_RGMII0_TXD0 49
+#define PIN_RGMII0_TXD1 50
+#define PIN_RGMII0_TXD2 51
+#define PIN_RGMII0_TXD3 52
+#define PIN_RGMII0_TXCTRL 53
+#define PIN_RGMII0_RXD0 54
+#define PIN_RGMII0_RXD1 55
+#define PIN_RGMII0_RXD2 56
+#define PIN_RGMII0_RXD3 57
+#define PIN_RGMII0_RXCTRL 58
+#define PIN_RGMII0_TXC 59
+#define PIN_RGMII0_RXC 60
+#define PIN_RGMII0_REFCLKO 61
+#define PIN_RGMII0_IRQ 62
+#define PIN_RGMII0_MDC 63
+#define PIN_RGMII0_MDIO 64
+#define PIN_PWM0 65
+#define PIN_PWM1 66
+#define PIN_PWM2 67
+#define PIN_PWM3 68
+#define PIN_FAN0 69
+#define PIN_FAN1 70
+#define PIN_FAN2 71
+#define PIN_FAN3 72
+#define PIN_IIC0_SDA 73
+#define PIN_IIC0_SCL 74
+#define PIN_IIC1_SDA 75
+#define PIN_IIC1_SCL 76
+#define PIN_IIC2_SDA 77
+#define PIN_IIC2_SCL 78
+#define PIN_IIC3_SDA 79
+#define PIN_IIC3_SCL 80
+#define PIN_UART0_TX 81
+#define PIN_UART0_RX 82
+#define PIN_UART0_RTS 83
+#define PIN_UART0_CTS 84
+#define PIN_UART1_TX 85
+#define PIN_UART1_RX 86
+#define PIN_UART1_RTS 87
+#define PIN_UART1_CTS 88
+#define PIN_UART2_TX 89
+#define PIN_UART2_RX 90
+#define PIN_UART2_RTS 91
+#define PIN_UART2_CTS 92
+#define PIN_UART3_TX 93
+#define PIN_UART3_RX 94
+#define PIN_UART3_RTS 95
+#define PIN_UART3_CTS 96
+#define PIN_SPI0_CS0 97
+#define PIN_SPI0_CS1 98
+#define PIN_SPI0_SDI 99
+#define PIN_SPI0_SDO 100
+#define PIN_SPI0_SCK 101
+#define PIN_SPI1_CS0 102
+#define PIN_SPI1_CS1 103
+#define PIN_SPI1_SDI 104
+#define PIN_SPI1_SDO 105
+#define PIN_SPI1_SCK 106
+#define PIN_JTAG0_TDO 107
+#define PIN_JTAG0_TCK 108
+#define PIN_JTAG0_TDI 109
+#define PIN_JTAG0_TMS 110
+#define PIN_JTAG0_TRST 111
+#define PIN_JTAG0_SRST 112
+#define PIN_JTAG1_TDO 113
+#define PIN_JTAG1_TCK 114
+#define PIN_JTAG1_TDI 115
+#define PIN_JTAG1_TMS 116
+#define PIN_JTAG1_TRST 117
+#define PIN_JTAG1_SRST 118
+#define PIN_JTAG2_TDO 119
+#define PIN_JTAG2_TCK 120
+#define PIN_JTAG2_TDI 121
+#define PIN_JTAG2_TMS 122
+#define PIN_JTAG2_TRST 123
+#define PIN_JTAG2_SRST 124
+#define PIN_GPIO0 125
+#define PIN_GPIO1 126
+#define PIN_GPIO2 127
+#define PIN_GPIO3 128
+#define PIN_GPIO4 129
+#define PIN_GPIO5 130
+#define PIN_GPIO6 131
+#define PIN_GPIO7 132
+#define PIN_GPIO8 133
+#define PIN_GPIO9 134
+#define PIN_GPIO10 135
+#define PIN_GPIO11 136
+#define PIN_GPIO12 137
+#define PIN_GPIO13 138
+#define PIN_GPIO14 139
+#define PIN_GPIO15 140
+#define PIN_GPIO16 141
+#define PIN_GPIO17 142
+#define PIN_GPIO18 143
+#define PIN_GPIO19 144
+#define PIN_GPIO20 145
+#define PIN_GPIO21 146
+#define PIN_GPIO22 147
+#define PIN_GPIO23 148
+#define PIN_GPIO24 149
+#define PIN_GPIO25 150
+#define PIN_GPIO26 151
+#define PIN_GPIO27 152
+#define PIN_GPIO28 153
+#define PIN_GPIO29 154
+#define PIN_GPIO30 155
+#define PIN_GPIO31 156
+#define PIN_MODE_SEL0 157
+#define PIN_MODE_SEL1 158
+#define PIN_MODE_SEL2 159
+#define PIN_BOOT_SEL0 160
+#define PIN_BOOT_SEL1 161
+#define PIN_BOOT_SEL2 162
+#define PIN_BOOT_SEL3 163
+#define PIN_BOOT_SEL4 164
+#define PIN_BOOT_SEL5 165
+#define PIN_BOOT_SEL6 166
+#define PIN_BOOT_SEL7 167
+#define PIN_MULTI_SCKT 168
+#define PIN_SCKT_ID0 169
+#define PIN_SCKT_ID1 170
+#define PIN_PLL_CLK_IN_MAIN 171
+#define PIN_PLL_CLK_IN_DDR_L 172
+#define PIN_PLL_CLK_IN_DDR_R 173
+#define PIN_XTAL_32K 174
+#define PIN_SYS_RST 175
+#define PIN_PWR_BUTTON 176
+#define PIN_TEST_EN 177
+#define PIN_TEST_MODE_MBIST 178
+#define PIN_TEST_MODE_SCAN 179
+#define PIN_TEST_MODE_BSD 180
+#define PIN_BISR_BYP 181
+
+#endif /* _DT_BINDINGS_PINCTRL_SG2042_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-sg2044.h b/include/dt-bindings/pinctrl/pinctrl-sg2044.h
new file mode 100644
index 000000000000..2a619f681c39
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-sg2044.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_SG2044_H
+#define _DT_BINDINGS_PINCTRL_SG2044_H
+
+#define PINMUX(pin, mux) \
+ (((pin) & 0xffff) | (((mux) & 0xff) << 16))
+
+#define PIN_IIC0_SMBSUS_IN 0
+#define PIN_IIC0_SMBSUS_OUT 1
+#define PIN_IIC0_SMBALERT 2
+#define PIN_IIC1_SMBSUS_IN 3
+#define PIN_IIC1_SMBSUS_OUT 4
+#define PIN_IIC1_SMBALERT 5
+#define PIN_IIC2_SMBSUS_IN 6
+#define PIN_IIC2_SMBSUS_OUT 7
+#define PIN_IIC2_SMBALERT 8
+#define PIN_IIC3_SMBSUS_IN 9
+#define PIN_IIC3_SMBSUS_OUT 10
+#define PIN_IIC3_SMBALERT 11
+#define PIN_PCIE0_L0_RESET 12
+#define PIN_PCIE0_L1_RESET 13
+#define PIN_PCIE0_L0_WAKEUP 14
+#define PIN_PCIE0_L1_WAKEUP 15
+#define PIN_PCIE0_L0_CLKREQ_IN 16
+#define PIN_PCIE0_L1_CLKREQ_IN 17
+#define PIN_PCIE1_L0_RESET 18
+#define PIN_PCIE1_L1_RESET 19
+#define PIN_PCIE1_L0_WAKEUP 20
+#define PIN_PCIE1_L1_WAKEUP 21
+#define PIN_PCIE1_L0_CLKREQ_IN 22
+#define PIN_PCIE1_L1_CLKREQ_IN 23
+#define PIN_PCIE2_L0_RESET 24
+#define PIN_PCIE2_L1_RESET 25
+#define PIN_PCIE2_L0_WAKEUP 26
+#define PIN_PCIE2_L1_WAKEUP 27
+#define PIN_PCIE2_L0_CLKREQ_IN 28
+#define PIN_PCIE2_L1_CLKREQ_IN 29
+#define PIN_PCIE3_L0_RESET 30
+#define PIN_PCIE3_L1_RESET 31
+#define PIN_PCIE3_L0_WAKEUP 32
+#define PIN_PCIE3_L1_WAKEUP 33
+#define PIN_PCIE3_L0_CLKREQ_IN 34
+#define PIN_PCIE3_L1_CLKREQ_IN 35
+#define PIN_PCIE4_L0_RESET 36
+#define PIN_PCIE4_L1_RESET 37
+#define PIN_PCIE4_L0_WAKEUP 38
+#define PIN_PCIE4_L1_WAKEUP 39
+#define PIN_PCIE4_L0_CLKREQ_IN 40
+#define PIN_PCIE4_L1_CLKREQ_IN 41
+#define PIN_SPIF0_CLK_SEL1 42
+#define PIN_SPIF0_CLK_SEL0 43
+#define PIN_SPIF0_WP 44
+#define PIN_SPIF0_HOLD 45
+#define PIN_SPIF0_SDI 46
+#define PIN_SPIF0_CS 47
+#define PIN_SPIF0_SCK 48
+#define PIN_SPIF0_SDO 49
+#define PIN_SPIF1_CLK_SEL1 50
+#define PIN_SPIF1_CLK_SEL0 51
+#define PIN_SPIF1_WP 52
+#define PIN_SPIF1_HOLD 53
+#define PIN_SPIF1_SDI 54
+#define PIN_SPIF1_CS 55
+#define PIN_SPIF1_SCK 56
+#define PIN_SPIF1_SDO 57
+#define PIN_EMMC_WP 58
+#define PIN_EMMC_CD 59
+#define PIN_EMMC_RST 60
+#define PIN_EMMC_PWR_EN 61
+#define PIN_SDIO_CD 62
+#define PIN_SDIO_WP 63
+#define PIN_SDIO_RST 64
+#define PIN_SDIO_PWR_EN 65
+#define PIN_RGMII0_TXD0 66
+#define PIN_RGMII0_TXD1 67
+#define PIN_RGMII0_TXD2 68
+#define PIN_RGMII0_TXD3 69
+#define PIN_RGMII0_TXCTRL 70
+#define PIN_RGMII0_RXD0 71
+#define PIN_RGMII0_RXD1 72
+#define PIN_RGMII0_RXD2 73
+#define PIN_RGMII0_RXD3 74
+#define PIN_RGMII0_RXCTRL 75
+#define PIN_RGMII0_TXC 76
+#define PIN_RGMII0_RXC 77
+#define PIN_RGMII0_REFCLKO 78
+#define PIN_RGMII0_IRQ 79
+#define PIN_RGMII0_MDC 80
+#define PIN_RGMII0_MDIO 81
+#define PIN_PWM0 82
+#define PIN_PWM1 83
+#define PIN_PWM2 84
+#define PIN_PWM3 85
+#define PIN_FAN0 86
+#define PIN_FAN1 87
+#define PIN_FAN2 88
+#define PIN_FAN3 89
+#define PIN_IIC0_SDA 90
+#define PIN_IIC0_SCL 91
+#define PIN_IIC1_SDA 92
+#define PIN_IIC1_SCL 93
+#define PIN_IIC2_SDA 94
+#define PIN_IIC2_SCL 95
+#define PIN_IIC3_SDA 96
+#define PIN_IIC3_SCL 97
+#define PIN_UART0_TX 98
+#define PIN_UART0_RX 99
+#define PIN_UART0_RTS 100
+#define PIN_UART0_CTS 101
+#define PIN_UART1_TX 102
+#define PIN_UART1_RX 103
+#define PIN_UART1_RTS 104
+#define PIN_UART1_CTS 105
+#define PIN_UART2_TX 106
+#define PIN_UART2_RX 107
+#define PIN_UART2_RTS 108
+#define PIN_UART2_CTS 109
+#define PIN_UART3_TX 110
+#define PIN_UART3_RX 111
+#define PIN_UART3_RTS 112
+#define PIN_UART3_CTS 113
+#define PIN_SPI0_CS0 114
+#define PIN_SPI0_CS1 115
+#define PIN_SPI0_SDI 116
+#define PIN_SPI0_SDO 117
+#define PIN_SPI0_SCK 118
+#define PIN_SPI1_CS0 119
+#define PIN_SPI1_CS1 120
+#define PIN_SPI1_SDI 121
+#define PIN_SPI1_SDO 122
+#define PIN_SPI1_SCK 123
+#define PIN_JTAG0_TDO 124
+#define PIN_JTAG0_TCK 125
+#define PIN_JTAG0_TDI 126
+#define PIN_JTAG0_TMS 127
+#define PIN_JTAG0_TRST 128
+#define PIN_JTAG0_SRST 129
+#define PIN_JTAG1_TDO 130
+#define PIN_JTAG1_TCK 131
+#define PIN_JTAG1_TDI 132
+#define PIN_JTAG1_TMS 133
+#define PIN_JTAG1_TRST 134
+#define PIN_JTAG1_SRST 135
+#define PIN_JTAG2_TDO 136
+#define PIN_JTAG2_TCK 137
+#define PIN_JTAG2_TDI 138
+#define PIN_JTAG2_TMS 139
+#define PIN_JTAG2_TRST 140
+#define PIN_JTAG2_SRST 141
+#define PIN_JTAG3_TDO 142
+#define PIN_JTAG3_TCK 143
+#define PIN_JTAG3_TDI 144
+#define PIN_JTAG3_TMS 145
+#define PIN_JTAG3_TRST 146
+#define PIN_JTAG3_SRST 147
+#define PIN_GPIO0 148
+#define PIN_GPIO1 149
+#define PIN_GPIO2 150
+#define PIN_GPIO3 151
+#define PIN_GPIO4 152
+#define PIN_GPIO5 153
+#define PIN_GPIO6 154
+#define PIN_GPIO7 155
+#define PIN_GPIO8 156
+#define PIN_GPIO9 157
+#define PIN_GPIO10 158
+#define PIN_GPIO11 159
+#define PIN_GPIO12 160
+#define PIN_GPIO13 161
+#define PIN_GPIO14 162
+#define PIN_GPIO15 163
+#define PIN_GPIO16 164
+#define PIN_GPIO17 165
+#define PIN_GPIO18 166
+#define PIN_GPIO19 167
+#define PIN_GPIO20 168
+#define PIN_GPIO21 169
+#define PIN_GPIO22 170
+#define PIN_GPIO23 171
+#define PIN_GPIO24 172
+#define PIN_GPIO25 173
+#define PIN_GPIO26 174
+#define PIN_GPIO27 175
+#define PIN_GPIO28 176
+#define PIN_GPIO29 177
+#define PIN_GPIO30 178
+#define PIN_GPIO31 179
+#define PIN_MODE_SEL0 180
+#define PIN_MODE_SEL1 181
+#define PIN_MODE_SEL2 182
+#define PIN_BOOT_SEL0 183
+#define PIN_BOOT_SEL1 184
+#define PIN_BOOT_SEL2 185
+#define PIN_BOOT_SEL3 186
+#define PIN_BOOT_SEL4 187
+#define PIN_BOOT_SEL5 188
+#define PIN_BOOT_SEL6 189
+#define PIN_BOOT_SEL7 190
+#define PIN_MULTI_SCKT 191
+#define PIN_SCKT_ID0 192
+#define PIN_SCKT_ID1 193
+#define PIN_PLL_CLK_IN_MAIN 194
+#define PIN_PLL_CLK_IN_DDR_0 195
+#define PIN_PLL_CLK_IN_DDR_1 196
+#define PIN_PLL_CLK_IN_DDR_2 197
+#define PIN_PLL_CLK_IN_DDR_3 198
+#define PIN_XTAL_32K 199
+#define PIN_SYS_RST 200
+#define PIN_PWR_BUTTON 201
+#define PIN_TEST_EN 202
+#define PIN_TEST_MODE_MBIST 203
+#define PIN_TEST_MODE_SCAN 204
+#define PIN_TEST_MODE_BSD 205
+#define PIN_BISR_BYP 206
+
+#endif /* _DT_BINDINGS_PINCTRL_SG2044_H */
diff --git a/include/dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h b/include/dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h
new file mode 100644
index 000000000000..5917096720bd
--- /dev/null
+++ b/include/dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/G3E family pinctrl bindings.
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_RENESAS_R9A09G047_PINCTRL_H__
+#define __DT_BINDINGS_PINCTRL_RENESAS_R9A09G047_PINCTRL_H__
+
+#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
+
+/* RZG3E_Px = Offset address of PFC_P_mn - 0x20 */
+#define RZG3E_P0 0
+#define RZG3E_P1 1
+#define RZG3E_P2 2
+#define RZG3E_P3 3
+#define RZG3E_P4 4
+#define RZG3E_P5 5
+#define RZG3E_P6 6
+#define RZG3E_P7 7
+#define RZG3E_P8 8
+#define RZG3E_PA 10
+#define RZG3E_PB 11
+#define RZG3E_PC 12
+#define RZG3E_PD 13
+#define RZG3E_PE 14
+#define RZG3E_PF 15
+#define RZG3E_PG 16
+#define RZG3E_PH 17
+#define RZG3E_PJ 19
+#define RZG3E_PK 20
+#define RZG3E_PL 21
+#define RZG3E_PM 22
+#define RZG3E_PS 28
+
+#define RZG3E_PORT_PINMUX(b, p, f) RZG2L_PORT_PINMUX(RZG3E_P##b, p, f)
+#define RZG3E_GPIO(port, pin) RZG2L_GPIO(RZG3E_P##port, pin)
+
+#endif /* __DT_BINDINGS_PINCTRL_RENESAS_R9A09G047_PINCTRL_H__ */
diff --git a/include/dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h b/include/dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h
new file mode 100644
index 000000000000..2e83bf43160b
--- /dev/null
+++ b/include/dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/V2H family pinctrl bindings.
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_RENESAS_R9A09G057_PINCTRL_H__
+#define __DT_BINDINGS_PINCTRL_RENESAS_R9A09G057_PINCTRL_H__
+
+#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
+
+/* RZV2H_Px = Offset address of PFC_P_mn - 0x20 */
+#define RZV2H_P0 0
+#define RZV2H_P1 1
+#define RZV2H_P2 2
+#define RZV2H_P3 3
+#define RZV2H_P4 4
+#define RZV2H_P5 5
+#define RZV2H_P6 6
+#define RZV2H_P7 7
+#define RZV2H_P8 8
+#define RZV2H_P9 9
+#define RZV2H_PA 10
+#define RZV2H_PB 11
+
+#define RZV2H_PORT_PINMUX(b, p, f) RZG2L_PORT_PINMUX(RZV2H_P##b, p, f)
+#define RZV2H_GPIO(port, pin) RZG2L_GPIO(RZV2H_P##port, pin)
+
+#endif /* __DT_BINDINGS_PINCTRL_RENESAS_R9A09G057_PINCTRL_H__ */
diff --git a/include/dt-bindings/pinctrl/renesas,r9a09g077-pinctrl.h b/include/dt-bindings/pinctrl/renesas,r9a09g077-pinctrl.h
new file mode 100644
index 000000000000..f088793f23ee
--- /dev/null
+++ b/include/dt-bindings/pinctrl/renesas,r9a09g077-pinctrl.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/T2H family pinctrl bindings.
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_RENESAS_R9A09G077_PINCTRL_H__
+#define __DT_BINDINGS_PINCTRL_RENESAS_R9A09G077_PINCTRL_H__
+
+#define RZT2H_PINS_PER_PORT 8
+
+/*
+ * Create the pin index from its bank and position numbers and store in
+ * the upper 16 bits the alternate function identifier
+ */
+#define RZT2H_PORT_PINMUX(b, p, f) ((b) * RZT2H_PINS_PER_PORT + (p) | ((f) << 16))
+
+/* Convert a port and pin label to its global pin index */
+#define RZT2H_GPIO(port, pin) ((port) * RZT2H_PINS_PER_PORT + (pin))
+
+#endif /* __DT_BINDINGS_PINCTRL_RENESAS_R9A09G077_PINCTRL_H__ */
diff --git a/include/dt-bindings/pinctrl/stm32-pinfunc.h b/include/dt-bindings/pinctrl/stm32-pinfunc.h
index 28ad0235086a..af3fd388329a 100644
--- a/include/dt-bindings/pinctrl/stm32-pinfunc.h
+++ b/include/dt-bindings/pinctrl/stm32-pinfunc.h
@@ -26,6 +26,7 @@
#define AF14 0xf
#define AF15 0x10
#define ANALOG 0x11
+#define RSVD 0x12
/* define Pins number*/
#define PIN_NO(port, line) (((port) - 'A') * 0x10 + (line))
diff --git a/include/dt-bindings/power/allwinner,sun55i-a523-pck-600.h b/include/dt-bindings/power/allwinner,sun55i-a523-pck-600.h
new file mode 100644
index 000000000000..6b3d8ea7bb69
--- /dev/null
+++ b/include/dt-bindings/power/allwinner,sun55i-a523-pck-600.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_POWER_SUN55I_A523_PCK600_H_
+#define _DT_BINDINGS_POWER_SUN55I_A523_PCK600_H_
+
+#define PD_VE 0
+#define PD_GPU 1
+#define PD_VI 2
+#define PD_VO0 3
+#define PD_VO1 4
+#define PD_DE 5
+#define PD_NAND 6
+#define PD_PCIE 7
+
+#endif /* _DT_BINDINGS_POWER_SUN55I_A523_PCK600_H_ */
diff --git a/include/dt-bindings/power/allwinner,sun55i-a523-ppu.h b/include/dt-bindings/power/allwinner,sun55i-a523-ppu.h
new file mode 100644
index 000000000000..bc9aba73c19a
--- /dev/null
+++ b/include/dt-bindings/power/allwinner,sun55i-a523-ppu.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_POWER_SUN55I_A523_PPU_H_
+#define _DT_BINDINGS_POWER_SUN55I_A523_PPU_H_
+
+#define PD_DSP 0
+#define PD_NPU 1
+#define PD_AUDIO 2
+#define PD_SRAM 3
+#define PD_RISCV 4
+
+#endif /* _DT_BINDINGS_POWER_SUN55I_A523_PPU_H_ */
diff --git a/include/dt-bindings/power/allwinner,sun8i-v853-ppu.h b/include/dt-bindings/power/allwinner,sun8i-v853-ppu.h
new file mode 100644
index 000000000000..b1c18a490613
--- /dev/null
+++ b/include/dt-bindings/power/allwinner,sun8i-v853-ppu.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_POWER_SUN8I_V853_PPU_H_
+#define _DT_BINDINGS_POWER_SUN8I_V853_PPU_H_
+
+#define PD_RISCV 0
+#define PD_NPU 1
+#define PD_VE 2
+
+#endif
diff --git a/include/dt-bindings/power/amlogic,a4-pwrc.h b/include/dt-bindings/power/amlogic,a4-pwrc.h
new file mode 100644
index 000000000000..bd2f9c558d22
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,a4-pwrc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_A4_POWER_H
+#define _DT_BINDINGS_AMLOGIC_A4_POWER_H
+
+#define PWRC_A4_AUDIO_ID 0
+#define PWRC_A4_SDIOA_ID 1
+#define PWRC_A4_EMMC_ID 2
+#define PWRC_A4_USB_COMB_ID 3
+#define PWRC_A4_ETH_ID 4
+#define PWRC_A4_VOUT_ID 5
+#define PWRC_A4_AUDIO_PDM_ID 6
+#define PWRC_A4_DMC_ID 7
+#define PWRC_A4_SYS_WRAP_ID 8
+#define PWRC_A4_AO_I2C_S_ID 9
+#define PWRC_A4_AO_UART_ID 10
+#define PWRC_A4_AO_IR_ID 11
+
+#endif
diff --git a/include/dt-bindings/power/amlogic,a5-pwrc.h b/include/dt-bindings/power/amlogic,a5-pwrc.h
new file mode 100644
index 000000000000..3a6f53eb959f
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,a5-pwrc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_A5_POWER_H
+#define _DT_BINDINGS_AMLOGIC_A5_POWER_H
+
+#define PWRC_A5_NNA_ID 0
+#define PWRC_A5_AUDIO_ID 1
+#define PWRC_A5_SDIOA_ID 2
+#define PWRC_A5_EMMC_ID 3
+#define PWRC_A5_USB_COMB_ID 4
+#define PWRC_A5_ETH_ID 5
+#define PWRC_A5_RSA_ID 6
+#define PWRC_A5_AUDIO_PDM_ID 7
+#define PWRC_A5_DMC_ID 8
+#define PWRC_A5_SYS_WRAP_ID 9
+#define PWRC_A5_DSPA_ID 10
+
+#endif
diff --git a/include/dt-bindings/power/amlogic,s6-pwrc.h b/include/dt-bindings/power/amlogic,s6-pwrc.h
new file mode 100644
index 000000000000..2c005864ae73
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,s6-pwrc.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2025 Amlogic, Inc. All rights reserved
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_S6_POWER_H
+#define _DT_BINDINGS_AMLOGIC_S6_POWER_H
+
+#define PWRC_S6_DSPA_ID 0
+#define PWRC_S6_DOS_HEVC_ID 1
+#define PWRC_S6_DOS_VDEC_ID 2
+#define PWRC_S6_VPU_HDMI_ID 3
+#define PWRC_S6_U2DRD_ID 4
+#define PWRC_S6_U3DRD_ID 5
+#define PWRC_S6_SD_EMMC_C_ID 6
+#define PWRC_S6_GE2D_ID 7
+#define PWRC_S6_AMFC_ID 8
+#define PWRC_S6_VC9000E_ID 9
+#define PWRC_S6_DEWARP_ID 10
+#define PWRC_S6_VICP_ID 11
+#define PWRC_S6_SD_EMMC_A_ID 12
+#define PWRC_S6_SD_EMMC_B_ID 13
+#define PWRC_S6_ETH_ID 14
+#define PWRC_S6_PCIE_ID 15
+#define PWRC_S6_NNA_4T_ID 16
+#define PWRC_S6_AUDIO_ID 17
+#define PWRC_S6_AUCPU_ID 18
+#define PWRC_S6_ADAPT_ID 19
+
+#endif
diff --git a/include/dt-bindings/power/amlogic,s7-pwrc.h b/include/dt-bindings/power/amlogic,s7-pwrc.h
new file mode 100644
index 000000000000..3f21d095f784
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,s7-pwrc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2025 Amlogic, Inc. All rights reserved
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_S7_POWER_H
+#define _DT_BINDINGS_AMLOGIC_S7_POWER_H
+
+#define PWRC_S7_DOS_HEVC_ID 0
+#define PWRC_S7_DOS_VDEC_ID 1
+#define PWRC_S7_VPU_HDMI_ID 2
+#define PWRC_S7_USB_COMB_ID 3
+#define PWRC_S7_SD_EMMC_C_ID 4
+#define PWRC_S7_GE2D_ID 5
+#define PWRC_S7_SD_EMMC_A_ID 6
+#define PWRC_S7_SD_EMMC_B_ID 7
+#define PWRC_S7_ETH_ID 8
+#define PWRC_S7_AUCPU_ID 9
+#define PWRC_S7_AUDIO_ID 10
+
+#endif
diff --git a/include/dt-bindings/power/amlogic,s7d-pwrc.h b/include/dt-bindings/power/amlogic,s7d-pwrc.h
new file mode 100644
index 000000000000..c6998553670a
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,s7d-pwrc.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2025 Amlogic, Inc. All rights reserved
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_S7D_POWER_H
+#define _DT_BINDINGS_AMLOGIC_S7D_POWER_H
+
+#define PWRC_S7D_DOS_HCODEC_ID 0
+#define PWRC_S7D_DOS_HEVC_ID 1
+#define PWRC_S7D_DOS_VDEC_ID 2
+#define PWRC_S7D_VPU_HDMI_ID 3
+#define PWRC_S7D_USB_U2DRD_ID 4
+#define PWRC_S7D_USB_U2H_ID 5
+#define PWRC_S7D_SSD_EMMC_C_ID 6
+#define PWRC_S7D_GE2D_ID 7
+#define PWRC_S7D_AMFC_ID 8
+#define PWRC_S7D_EMMC_A_ID 9
+#define PWRC_S7D_EMMC_B_ID 10
+#define PWRC_S7D_ETH_ID 11
+#define PWRC_S7D_AUCPU_ID 12
+#define PWRC_S7D_AUDIO_ID 13
+#define PWRC_S7D_SRAMA_ID 14
+#define PWRC_S7D_DMC0_ID 15
+#define PWRC_S7D_DMC1_ID 16
+#define PWRC_S7D_DDR_ID 17
+
+#endif
diff --git a/include/dt-bindings/power/marvell,pxa1908-power.h b/include/dt-bindings/power/marvell,pxa1908-power.h
new file mode 100644
index 000000000000..19b088351af1
--- /dev/null
+++ b/include/dt-bindings/power/marvell,pxa1908-power.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Marvell PXA1908 power domains
+ *
+ * Copyright 2025, Duje Mihanović <duje@dujemihanovic.xyz>
+ */
+
+#ifndef __DTS_MARVELL_PXA1908_POWER_H
+#define __DTS_MARVELL_PXA1908_POWER_H
+
+#define PXA1908_POWER_DOMAIN_VPU 0
+#define PXA1908_POWER_DOMAIN_GPU 1
+#define PXA1908_POWER_DOMAIN_GPU2D 2
+#define PXA1908_POWER_DOMAIN_DSI 3
+#define PXA1908_POWER_DOMAIN_ISP 4
+
+#endif
diff --git a/include/dt-bindings/power/mediatek,mt6735-power-controller.h b/include/dt-bindings/power/mediatek,mt6735-power-controller.h
new file mode 100644
index 000000000000..6957075fcb9e
--- /dev/null
+++ b/include/dt-bindings/power/mediatek,mt6735-power-controller.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_POWER_MT6735_POWER_CONTROLLER_H
+#define _DT_BINDINGS_POWER_MT6735_POWER_CONTROLLER_H
+
+#define MT6735_POWER_DOMAIN_MD1 0
+#define MT6735_POWER_DOMAIN_CONN 1
+#define MT6735_POWER_DOMAIN_DIS 2
+#define MT6735_POWER_DOMAIN_MFG 3
+#define MT6735_POWER_DOMAIN_ISP 4
+#define MT6735_POWER_DOMAIN_VDE 5
+#define MT6735_POWER_DOMAIN_VEN 6
+
+#endif
diff --git a/include/dt-bindings/power/mediatek,mt6893-power.h b/include/dt-bindings/power/mediatek,mt6893-power.h
new file mode 100644
index 000000000000..aeab51bb2ad8
--- /dev/null
+++ b/include/dt-bindings/power/mediatek,mt6893-power.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2025 Collabora Ltd
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT6893_POWER_H
+#define _DT_BINDINGS_POWER_MT6893_POWER_H
+
+#define MT6893_POWER_DOMAIN_CONN 0
+#define MT6893_POWER_DOMAIN_MFG0 1
+#define MT6893_POWER_DOMAIN_MFG1 2
+#define MT6893_POWER_DOMAIN_MFG2 3
+#define MT6893_POWER_DOMAIN_MFG3 4
+#define MT6893_POWER_DOMAIN_MFG4 5
+#define MT6893_POWER_DOMAIN_MFG5 6
+#define MT6893_POWER_DOMAIN_MFG6 7
+#define MT6893_POWER_DOMAIN_ISP 8
+#define MT6893_POWER_DOMAIN_ISP2 9
+#define MT6893_POWER_DOMAIN_IPE 10
+#define MT6893_POWER_DOMAIN_VDEC0 11
+#define MT6893_POWER_DOMAIN_VDEC1 12
+#define MT6893_POWER_DOMAIN_VENC0 13
+#define MT6893_POWER_DOMAIN_VENC1 14
+#define MT6893_POWER_DOMAIN_MDP 15
+#define MT6893_POWER_DOMAIN_DISP 16
+#define MT6893_POWER_DOMAIN_AUDIO 17
+#define MT6893_POWER_DOMAIN_ADSP 18
+#define MT6893_POWER_DOMAIN_CAM 19
+#define MT6893_POWER_DOMAIN_CAM_RAWA 20
+#define MT6893_POWER_DOMAIN_CAM_RAWB 21
+#define MT6893_POWER_DOMAIN_CAM_RAWC 22
+#define MT6893_POWER_DOMAIN_DP_TX 23
+
+#endif /* _DT_BINDINGS_POWER_MT6893_POWER_H */
diff --git a/include/dt-bindings/power/mediatek,mt8196-power.h b/include/dt-bindings/power/mediatek,mt8196-power.h
new file mode 100644
index 000000000000..0f622a93c807
--- /dev/null
+++ b/include/dt-bindings/power/mediatek,mt8196-power.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2025 Collabora Ltd
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8196_POWER_H
+#define _DT_BINDINGS_POWER_MT8196_POWER_H
+
+/* SCPSYS Secure Power Manager - Direct Control */
+#define MT8196_POWER_DOMAIN_MD 0
+#define MT8196_POWER_DOMAIN_CONN 1
+#define MT8196_POWER_DOMAIN_SSUSB_P0 2
+#define MT8196_POWER_DOMAIN_SSUSB_DP_PHY_P0 3
+#define MT8196_POWER_DOMAIN_SSUSB_P1 4
+#define MT8196_POWER_DOMAIN_SSUSB_P23 5
+#define MT8196_POWER_DOMAIN_SSUSB_PHY_P2 6
+#define MT8196_POWER_DOMAIN_PEXTP_MAC0 7
+#define MT8196_POWER_DOMAIN_PEXTP_MAC1 8
+#define MT8196_POWER_DOMAIN_PEXTP_MAC2 9
+#define MT8196_POWER_DOMAIN_PEXTP_PHY0 10
+#define MT8196_POWER_DOMAIN_PEXTP_PHY1 11
+#define MT8196_POWER_DOMAIN_PEXTP_PHY2 12
+#define MT8196_POWER_DOMAIN_AUDIO 13
+#define MT8196_POWER_DOMAIN_ADSP_TOP_DORMANT 14
+#define MT8196_POWER_DOMAIN_ADSP_INFRA 15
+#define MT8196_POWER_DOMAIN_ADSP_AO 16
+
+/* SCPSYS Secure Power Manager - HW Voter */
+#define MT8196_POWER_DOMAIN_MM_PROC_DORMANT 0
+#define MT8196_POWER_DOMAIN_SSR 1
+
+/* HFRPSYS MultiMedia Power Control (MMPC) - HW Voter */
+#define MT8196_POWER_DOMAIN_VDE0 0
+#define MT8196_POWER_DOMAIN_VDE1 1
+#define MT8196_POWER_DOMAIN_VDE_VCORE0 2
+#define MT8196_POWER_DOMAIN_VEN0 3
+#define MT8196_POWER_DOMAIN_VEN1 4
+#define MT8196_POWER_DOMAIN_VEN2 5
+#define MT8196_POWER_DOMAIN_DISP_VCORE 6
+#define MT8196_POWER_DOMAIN_DIS0_DORMANT 7
+#define MT8196_POWER_DOMAIN_DIS1_DORMANT 8
+#define MT8196_POWER_DOMAIN_OVL0_DORMANT 9
+#define MT8196_POWER_DOMAIN_OVL1_DORMANT 10
+#define MT8196_POWER_DOMAIN_DISP_EDPTX_DORMANT 11
+#define MT8196_POWER_DOMAIN_DISP_DPTX_DORMANT 12
+#define MT8196_POWER_DOMAIN_MML0_SHUTDOWN 13
+#define MT8196_POWER_DOMAIN_MML1_SHUTDOWN 14
+#define MT8196_POWER_DOMAIN_MM_INFRA0 15
+#define MT8196_POWER_DOMAIN_MM_INFRA1 16
+#define MT8196_POWER_DOMAIN_MM_INFRA_AO 17
+#define MT8196_POWER_DOMAIN_CSI_BS_RX 18
+#define MT8196_POWER_DOMAIN_CSI_LS_RX 19
+#define MT8196_POWER_DOMAIN_DSI_PHY0 20
+#define MT8196_POWER_DOMAIN_DSI_PHY1 21
+#define MT8196_POWER_DOMAIN_DSI_PHY2 22
+
+#endif /* _DT_BINDINGS_POWER_MT8196_POWER_H */
diff --git a/include/dt-bindings/power/nvidia,tegra264-bpmp.h b/include/dt-bindings/power/nvidia,tegra264-bpmp.h
new file mode 100644
index 000000000000..2eef4a2a02b0
--- /dev/null
+++ b/include/dt-bindings/power/nvidia,tegra264-bpmp.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef DT_BINDINGS_POWER_NVIDIA_TEGRA264_BPMP_H
+#define DT_BINDINGS_POWER_NVIDIA_TEGRA264_BPMP_H
+
+#define TEGRA264_POWER_DOMAIN_DISP 1
+#define TEGRA264_POWER_DOMAIN_AUD 2
+/* reserved 3:9 */
+#define TEGRA264_POWER_DOMAIN_XUSB_SS 10
+#define TEGRA264_POWER_DOMAIN_XUSB_DEV 11
+#define TEGRA264_POWER_DOMAIN_XUSB_HOST 12
+#define TEGRA264_POWER_DOMAIN_MGBE0 13
+#define TEGRA264_POWER_DOMAIN_MGBE1 14
+#define TEGRA264_POWER_DOMAIN_MGBE2 15
+#define TEGRA264_POWER_DOMAIN_MGBE3 16
+#define TEGRA264_POWER_DOMAIN_VI 17
+#define TEGRA264_POWER_DOMAIN_VIC 18
+#define TEGRA264_POWER_DOMAIN_ISP0 19
+#define TEGRA264_POWER_DOMAIN_ISP1 20
+#define TEGRA264_POWER_DOMAIN_PVA0 21
+#define TEGRA264_POWER_DOMAIN_GPU 22
+
+#endif /* DT_BINDINGS_POWER_NVIDIA_TEGRA264_BPMP_H */
diff --git a/include/dt-bindings/power/qcom,rpmhpd.h b/include/dt-bindings/power/qcom,rpmhpd.h
index e54ffa361451..50e7c886709d 100644
--- a/include/dt-bindings/power/qcom,rpmhpd.h
+++ b/include/dt-bindings/power/qcom,rpmhpd.h
@@ -29,4 +29,240 @@
#define RPMHPD_NSP2 19
#define RPMHPD_GMXC 20
+/* RPMh Power Domain performance levels */
+#define RPMH_REGULATOR_LEVEL_RETENTION 16
+#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D3 50
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D2_1 51
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D2 52
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D1_1 54
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D1 56
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D0 60
+#define RPMH_REGULATOR_LEVEL_LOW_SVS 64
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_P1 72
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_L0 76
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_L1 80
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_L2 96
+#define RPMH_REGULATOR_LEVEL_SVS 128
+#define RPMH_REGULATOR_LEVEL_SVS_L0 144
+#define RPMH_REGULATOR_LEVEL_SVS_L1 192
+#define RPMH_REGULATOR_LEVEL_SVS_L2 224
+#define RPMH_REGULATOR_LEVEL_NOM 256
+#define RPMH_REGULATOR_LEVEL_NOM_L0 288
+#define RPMH_REGULATOR_LEVEL_NOM_L1 320
+#define RPMH_REGULATOR_LEVEL_NOM_L2 336
+#define RPMH_REGULATOR_LEVEL_TURBO 384
+#define RPMH_REGULATOR_LEVEL_TURBO_L0 400
+#define RPMH_REGULATOR_LEVEL_TURBO_L1 416
+#define RPMH_REGULATOR_LEVEL_TURBO_L2 432
+#define RPMH_REGULATOR_LEVEL_TURBO_L3 448
+#define RPMH_REGULATOR_LEVEL_TURBO_L4 452
+#define RPMH_REGULATOR_LEVEL_TURBO_L5 456
+#define RPMH_REGULATOR_LEVEL_SUPER_TURBO 464
+#define RPMH_REGULATOR_LEVEL_SUPER_TURBO_NO_CPR 480
+
+/*
+ * Platform-specific power domain bindings. Don't add new entries here, use
+ * RPMHPD_* above.
+ */
+
+/* SA8775P Power Domain Indexes */
+#define SA8775P_CX 0
+#define SA8775P_CX_AO 1
+#define SA8775P_DDR 2
+#define SA8775P_EBI 3
+#define SA8775P_GFX 4
+#define SA8775P_LCX 5
+#define SA8775P_LMX 6
+#define SA8775P_MMCX 7
+#define SA8775P_MMCX_AO 8
+#define SA8775P_MSS 9
+#define SA8775P_MX 10
+#define SA8775P_MX_AO 11
+#define SA8775P_MXC 12
+#define SA8775P_MXC_AO 13
+#define SA8775P_NSP0 14
+#define SA8775P_NSP1 15
+#define SA8775P_XO 16
+
+/* SDM670 Power Domain Indexes */
+#define SDM670_MX 0
+#define SDM670_MX_AO 1
+#define SDM670_CX 2
+#define SDM670_CX_AO 3
+#define SDM670_LMX 4
+#define SDM670_LCX 5
+#define SDM670_GFX 6
+#define SDM670_MSS 7
+
+/* SDM845 Power Domain Indexes */
+#define SDM845_EBI 0
+#define SDM845_MX 1
+#define SDM845_MX_AO 2
+#define SDM845_CX 3
+#define SDM845_CX_AO 4
+#define SDM845_LMX 5
+#define SDM845_LCX 6
+#define SDM845_GFX 7
+#define SDM845_MSS 8
+
+/* SDX55 Power Domain Indexes */
+#define SDX55_MSS 0
+#define SDX55_MX 1
+#define SDX55_CX 2
+
+/* SDX65 Power Domain Indexes */
+#define SDX65_MSS 0
+#define SDX65_MX 1
+#define SDX65_MX_AO 2
+#define SDX65_CX 3
+#define SDX65_CX_AO 4
+#define SDX65_MXC 5
+
+/* SM6350 Power Domain Indexes */
+#define SM6350_CX 0
+#define SM6350_GFX 1
+#define SM6350_LCX 2
+#define SM6350_LMX 3
+#define SM6350_MSS 4
+#define SM6350_MX 5
+
+/* SM8150 Power Domain Indexes */
+#define SM8150_MSS 0
+#define SM8150_EBI 1
+#define SM8150_LMX 2
+#define SM8150_LCX 3
+#define SM8150_GFX 4
+#define SM8150_MX 5
+#define SM8150_MX_AO 6
+#define SM8150_CX 7
+#define SM8150_CX_AO 8
+#define SM8150_MMCX 9
+#define SM8150_MMCX_AO 10
+
+/* SA8155P is a special case, kept for backwards compatibility */
+#define SA8155P_CX SM8150_CX
+#define SA8155P_CX_AO SM8150_CX_AO
+#define SA8155P_EBI SM8150_EBI
+#define SA8155P_GFX SM8150_GFX
+#define SA8155P_MSS SM8150_MSS
+#define SA8155P_MX SM8150_MX
+#define SA8155P_MX_AO SM8150_MX_AO
+
+/* SM8250 Power Domain Indexes */
+#define SM8250_CX 0
+#define SM8250_CX_AO 1
+#define SM8250_EBI 2
+#define SM8250_GFX 3
+#define SM8250_LCX 4
+#define SM8250_LMX 5
+#define SM8250_MMCX 6
+#define SM8250_MMCX_AO 7
+#define SM8250_MX 8
+#define SM8250_MX_AO 9
+
+/* SM8350 Power Domain Indexes */
+#define SM8350_CX 0
+#define SM8350_CX_AO 1
+#define SM8350_EBI 2
+#define SM8350_GFX 3
+#define SM8350_LCX 4
+#define SM8350_LMX 5
+#define SM8350_MMCX 6
+#define SM8350_MMCX_AO 7
+#define SM8350_MX 8
+#define SM8350_MX_AO 9
+#define SM8350_MXC 10
+#define SM8350_MXC_AO 11
+#define SM8350_MSS 12
+
+/* SM8450 Power Domain Indexes */
+#define SM8450_CX 0
+#define SM8450_CX_AO 1
+#define SM8450_EBI 2
+#define SM8450_GFX 3
+#define SM8450_LCX 4
+#define SM8450_LMX 5
+#define SM8450_MMCX 6
+#define SM8450_MMCX_AO 7
+#define SM8450_MX 8
+#define SM8450_MX_AO 9
+#define SM8450_MXC 10
+#define SM8450_MXC_AO 11
+#define SM8450_MSS 12
+
+/* SM8550 Power Domain Indexes */
+#define SM8550_CX 0
+#define SM8550_CX_AO 1
+#define SM8550_EBI 2
+#define SM8550_GFX 3
+#define SM8550_LCX 4
+#define SM8550_LMX 5
+#define SM8550_MMCX 6
+#define SM8550_MMCX_AO 7
+#define SM8550_MX 8
+#define SM8550_MX_AO 9
+#define SM8550_MXC 10
+#define SM8550_MXC_AO 11
+#define SM8550_MSS 12
+#define SM8550_NSP 13
+
+/* QDU1000/QRU1000 Power Domain Indexes */
+#define QDU1000_EBI 0
+#define QDU1000_MSS 1
+#define QDU1000_CX 2
+#define QDU1000_MX 3
+
+/* SC7180 Power Domain Indexes */
+#define SC7180_CX 0
+#define SC7180_CX_AO 1
+#define SC7180_GFX 2
+#define SC7180_MX 3
+#define SC7180_MX_AO 4
+#define SC7180_LMX 5
+#define SC7180_LCX 6
+#define SC7180_MSS 7
+
+/* SC7280 Power Domain Indexes */
+#define SC7280_CX 0
+#define SC7280_CX_AO 1
+#define SC7280_EBI 2
+#define SC7280_GFX 3
+#define SC7280_MX 4
+#define SC7280_MX_AO 5
+#define SC7280_LMX 6
+#define SC7280_LCX 7
+#define SC7280_MSS 8
+
+/* SC8180X Power Domain Indexes */
+#define SC8180X_CX 0
+#define SC8180X_CX_AO 1
+#define SC8180X_EBI 2
+#define SC8180X_GFX 3
+#define SC8180X_LCX 4
+#define SC8180X_LMX 5
+#define SC8180X_MMCX 6
+#define SC8180X_MMCX_AO 7
+#define SC8180X_MSS 8
+#define SC8180X_MX 9
+#define SC8180X_MX_AO 10
+
+/* SC8280XP Power Domain Indexes */
+#define SC8280XP_CX 0
+#define SC8280XP_CX_AO 1
+#define SC8280XP_DDR 2
+#define SC8280XP_EBI 3
+#define SC8280XP_GFX 4
+#define SC8280XP_LCX 5
+#define SC8280XP_LMX 6
+#define SC8280XP_MMCX 7
+#define SC8280XP_MMCX_AO 8
+#define SC8280XP_MSS 9
+#define SC8280XP_MX 10
+#define SC8280XP_MXC 12
+#define SC8280XP_MX_AO 11
+#define SC8280XP_NSP 13
+#define SC8280XP_QPHY 14
+#define SC8280XP_XO 15
+
#endif
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index 608087fb9a3d..4371ac941f29 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -4,255 +4,39 @@
#ifndef _DT_BINDINGS_POWER_QCOM_RPMPD_H
#define _DT_BINDINGS_POWER_QCOM_RPMPD_H
-/* SA8775P Power Domain Indexes */
-#define SA8775P_CX 0
-#define SA8775P_CX_AO 1
-#define SA8775P_DDR 2
-#define SA8775P_EBI 3
-#define SA8775P_GFX 4
-#define SA8775P_LCX 5
-#define SA8775P_LMX 6
-#define SA8775P_MMCX 7
-#define SA8775P_MMCX_AO 8
-#define SA8775P_MSS 9
-#define SA8775P_MX 10
-#define SA8775P_MX_AO 11
-#define SA8775P_MXC 12
-#define SA8775P_MXC_AO 13
-#define SA8775P_NSP0 14
-#define SA8775P_NSP1 15
-#define SA8775P_XO 16
-
-/* SDM670 Power Domain Indexes */
-#define SDM670_MX 0
-#define SDM670_MX_AO 1
-#define SDM670_CX 2
-#define SDM670_CX_AO 3
-#define SDM670_LMX 4
-#define SDM670_LCX 5
-#define SDM670_GFX 6
-#define SDM670_MSS 7
-
-/* SDM845 Power Domain Indexes */
-#define SDM845_EBI 0
-#define SDM845_MX 1
-#define SDM845_MX_AO 2
-#define SDM845_CX 3
-#define SDM845_CX_AO 4
-#define SDM845_LMX 5
-#define SDM845_LCX 6
-#define SDM845_GFX 7
-#define SDM845_MSS 8
-
-/* SDX55 Power Domain Indexes */
-#define SDX55_MSS 0
-#define SDX55_MX 1
-#define SDX55_CX 2
-
-/* SDX65 Power Domain Indexes */
-#define SDX65_MSS 0
-#define SDX65_MX 1
-#define SDX65_MX_AO 2
-#define SDX65_CX 3
-#define SDX65_CX_AO 4
-#define SDX65_MXC 5
-
-/* SM6350 Power Domain Indexes */
-#define SM6350_CX 0
-#define SM6350_GFX 1
-#define SM6350_LCX 2
-#define SM6350_LMX 3
-#define SM6350_MSS 4
-#define SM6350_MX 5
-
-/* SM6350 Power Domain Indexes */
-#define SM6375_VDDCX 0
-#define SM6375_VDDCX_AO 1
-#define SM6375_VDDCX_VFL 2
-#define SM6375_VDDMX 3
-#define SM6375_VDDMX_AO 4
-#define SM6375_VDDMX_VFL 5
-#define SM6375_VDDGX 6
-#define SM6375_VDDGX_AO 7
-#define SM6375_VDD_LPI_CX 8
-#define SM6375_VDD_LPI_MX 9
-
-/* SM8150 Power Domain Indexes */
-#define SM8150_MSS 0
-#define SM8150_EBI 1
-#define SM8150_LMX 2
-#define SM8150_LCX 3
-#define SM8150_GFX 4
-#define SM8150_MX 5
-#define SM8150_MX_AO 6
-#define SM8150_CX 7
-#define SM8150_CX_AO 8
-#define SM8150_MMCX 9
-#define SM8150_MMCX_AO 10
-
-/* SA8155P is a special case, kept for backwards compatibility */
-#define SA8155P_CX SM8150_CX
-#define SA8155P_CX_AO SM8150_CX_AO
-#define SA8155P_EBI SM8150_EBI
-#define SA8155P_GFX SM8150_GFX
-#define SA8155P_MSS SM8150_MSS
-#define SA8155P_MX SM8150_MX
-#define SA8155P_MX_AO SM8150_MX_AO
-
-/* SM8250 Power Domain Indexes */
-#define SM8250_CX 0
-#define SM8250_CX_AO 1
-#define SM8250_EBI 2
-#define SM8250_GFX 3
-#define SM8250_LCX 4
-#define SM8250_LMX 5
-#define SM8250_MMCX 6
-#define SM8250_MMCX_AO 7
-#define SM8250_MX 8
-#define SM8250_MX_AO 9
-
-/* SM8350 Power Domain Indexes */
-#define SM8350_CX 0
-#define SM8350_CX_AO 1
-#define SM8350_EBI 2
-#define SM8350_GFX 3
-#define SM8350_LCX 4
-#define SM8350_LMX 5
-#define SM8350_MMCX 6
-#define SM8350_MMCX_AO 7
-#define SM8350_MX 8
-#define SM8350_MX_AO 9
-#define SM8350_MXC 10
-#define SM8350_MXC_AO 11
-#define SM8350_MSS 12
-
-/* SM8450 Power Domain Indexes */
-#define SM8450_CX 0
-#define SM8450_CX_AO 1
-#define SM8450_EBI 2
-#define SM8450_GFX 3
-#define SM8450_LCX 4
-#define SM8450_LMX 5
-#define SM8450_MMCX 6
-#define SM8450_MMCX_AO 7
-#define SM8450_MX 8
-#define SM8450_MX_AO 9
-#define SM8450_MXC 10
-#define SM8450_MXC_AO 11
-#define SM8450_MSS 12
-
-/* SM8550 Power Domain Indexes */
-#define SM8550_CX 0
-#define SM8550_CX_AO 1
-#define SM8550_EBI 2
-#define SM8550_GFX 3
-#define SM8550_LCX 4
-#define SM8550_LMX 5
-#define SM8550_MMCX 6
-#define SM8550_MMCX_AO 7
-#define SM8550_MX 8
-#define SM8550_MX_AO 9
-#define SM8550_MXC 10
-#define SM8550_MXC_AO 11
-#define SM8550_MSS 12
-#define SM8550_NSP 13
-
-/* QDU1000/QRU1000 Power Domain Indexes */
-#define QDU1000_EBI 0
-#define QDU1000_MSS 1
-#define QDU1000_CX 2
-#define QDU1000_MX 3
-
-/* SC7180 Power Domain Indexes */
-#define SC7180_CX 0
-#define SC7180_CX_AO 1
-#define SC7180_GFX 2
-#define SC7180_MX 3
-#define SC7180_MX_AO 4
-#define SC7180_LMX 5
-#define SC7180_LCX 6
-#define SC7180_MSS 7
-
-/* SC7280 Power Domain Indexes */
-#define SC7280_CX 0
-#define SC7280_CX_AO 1
-#define SC7280_EBI 2
-#define SC7280_GFX 3
-#define SC7280_MX 4
-#define SC7280_MX_AO 5
-#define SC7280_LMX 6
-#define SC7280_LCX 7
-#define SC7280_MSS 8
-
-/* SC8180X Power Domain Indexes */
-#define SC8180X_CX 0
-#define SC8180X_CX_AO 1
-#define SC8180X_EBI 2
-#define SC8180X_GFX 3
-#define SC8180X_LCX 4
-#define SC8180X_LMX 5
-#define SC8180X_MMCX 6
-#define SC8180X_MMCX_AO 7
-#define SC8180X_MSS 8
-#define SC8180X_MX 9
-#define SC8180X_MX_AO 10
-
-/* SC8280XP Power Domain Indexes */
-#define SC8280XP_CX 0
-#define SC8280XP_CX_AO 1
-#define SC8280XP_DDR 2
-#define SC8280XP_EBI 3
-#define SC8280XP_GFX 4
-#define SC8280XP_LCX 5
-#define SC8280XP_LMX 6
-#define SC8280XP_MMCX 7
-#define SC8280XP_MMCX_AO 8
-#define SC8280XP_MSS 9
-#define SC8280XP_MX 10
-#define SC8280XP_MXC 12
-#define SC8280XP_MX_AO 11
-#define SC8280XP_NSP 13
-#define SC8280XP_QPHY 14
-#define SC8280XP_XO 15
-
-/* SDM845 Power Domain performance levels */
-#define RPMH_REGULATOR_LEVEL_RETENTION 16
-#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
-#define RPMH_REGULATOR_LEVEL_LOW_SVS_D2 52
-#define RPMH_REGULATOR_LEVEL_LOW_SVS_D1 56
-#define RPMH_REGULATOR_LEVEL_LOW_SVS_D0 60
-#define RPMH_REGULATOR_LEVEL_LOW_SVS 64
-#define RPMH_REGULATOR_LEVEL_LOW_SVS_P1 72
-#define RPMH_REGULATOR_LEVEL_LOW_SVS_L1 80
-#define RPMH_REGULATOR_LEVEL_LOW_SVS_L2 96
-#define RPMH_REGULATOR_LEVEL_SVS 128
-#define RPMH_REGULATOR_LEVEL_SVS_L0 144
-#define RPMH_REGULATOR_LEVEL_SVS_L1 192
-#define RPMH_REGULATOR_LEVEL_SVS_L2 224
-#define RPMH_REGULATOR_LEVEL_NOM 256
-#define RPMH_REGULATOR_LEVEL_NOM_L0 288
-#define RPMH_REGULATOR_LEVEL_NOM_L1 320
-#define RPMH_REGULATOR_LEVEL_NOM_L2 336
-#define RPMH_REGULATOR_LEVEL_TURBO 384
-#define RPMH_REGULATOR_LEVEL_TURBO_L0 400
-#define RPMH_REGULATOR_LEVEL_TURBO_L1 416
-#define RPMH_REGULATOR_LEVEL_TURBO_L2 432
-#define RPMH_REGULATOR_LEVEL_TURBO_L3 448
-#define RPMH_REGULATOR_LEVEL_SUPER_TURBO 464
-#define RPMH_REGULATOR_LEVEL_SUPER_TURBO_NO_CPR 480
+#include <dt-bindings/power/qcom,rpmhpd.h>
+
+/* Generic RPM Power Domain Indexes */
+#define RPMPD_VDDCX 0
+#define RPMPD_VDDCX_AO 1
+/* VFC and VFL are mutually exclusive and can not be present on the same platform */
+#define RPMPD_VDDCX_VFC 2
+#define RPMPD_VDDCX_VFL 2
+#define RPMPD_VDDMX 3
+#define RPMPD_VDDMX_AO 4
+#define RPMPD_VDDMX_VFL 5
+#define RPMPD_SSCCX 6
+#define RPMPD_SSCCX_VFL 7
+#define RPMPD_SSCMX 8
+#define RPMPD_SSCMX_VFL 9
+
+/*
+ * Platform-specific power domain bindings. Don't add new entries here, use
+ * RPMPD_* above.
+ */
/* MDM9607 Power Domains */
-#define MDM9607_VDDCX 0
-#define MDM9607_VDDCX_AO 1
-#define MDM9607_VDDCX_VFL 2
-#define MDM9607_VDDMX 3
-#define MDM9607_VDDMX_AO 4
-#define MDM9607_VDDMX_VFL 5
+#define MDM9607_VDDCX RPMPD_VDDCX
+#define MDM9607_VDDCX_AO RPMPD_VDDCX_AO
+#define MDM9607_VDDCX_VFL RPMPD_VDDCX_VFL
+#define MDM9607_VDDMX RPMPD_VDDMX
+#define MDM9607_VDDMX_AO RPMPD_VDDMX_AO
+#define MDM9607_VDDMX_VFL RPMPD_VDDMX_VFL
/* MSM8226 Power Domain Indexes */
-#define MSM8226_VDDCX 0
-#define MSM8226_VDDCX_AO 1
-#define MSM8226_VDDCX_VFC 2
+#define MSM8226_VDDCX RPMPD_VDDCX
+#define MSM8226_VDDCX_AO RPMPD_VDDCX_AO
+#define MSM8226_VDDCX_VFC RPMPD_VDDCX_VFC
/* MSM8939 Power Domains */
#define MSM8939_VDDMDCX 0
@@ -265,11 +49,11 @@
#define MSM8939_VDDMX_AO 7
/* MSM8916 Power Domain Indexes */
-#define MSM8916_VDDCX 0
-#define MSM8916_VDDCX_AO 1
-#define MSM8916_VDDCX_VFC 2
-#define MSM8916_VDDMX 3
-#define MSM8916_VDDMX_AO 4
+#define MSM8916_VDDCX RPMPD_VDDCX
+#define MSM8916_VDDCX_AO RPMPD_VDDCX_AO
+#define MSM8916_VDDCX_VFC RPMPD_VDDCX_VFC
+#define MSM8916_VDDMX RPMPD_VDDMX
+#define MSM8916_VDDMX_AO RPMPD_VDDMX_AO
/* MSM8909 Power Domain Indexes */
#define MSM8909_VDDCX MSM8916_VDDCX
@@ -279,11 +63,11 @@
#define MSM8909_VDDMX_AO MSM8916_VDDMX_AO
/* MSM8917 Power Domain Indexes */
-#define MSM8917_VDDCX 0
-#define MSM8917_VDDCX_AO 1
-#define MSM8917_VDDCX_VFL 2
-#define MSM8917_VDDMX 3
-#define MSM8917_VDDMX_AO 4
+#define MSM8917_VDDCX RPMPD_VDDCX
+#define MSM8917_VDDCX_AO RPMPD_VDDCX_AO
+#define MSM8917_VDDCX_VFL RPMPD_VDDCX_VFL
+#define MSM8917_VDDMX RPMPD_VDDMX
+#define MSM8917_VDDMX_AO RPMPD_VDDMX_AO
/* MSM8937 Power Domain Indexes */
#define MSM8937_VDDCX MSM8917_VDDCX
@@ -316,12 +100,12 @@
#define MSM8974_VDDGFX_VFC 4
/* MSM8976 Power Domain Indexes */
-#define MSM8976_VDDCX 0
-#define MSM8976_VDDCX_AO 1
-#define MSM8976_VDDCX_VFL 2
-#define MSM8976_VDDMX 3
-#define MSM8976_VDDMX_AO 4
-#define MSM8976_VDDMX_VFL 5
+#define MSM8976_VDDCX RPMPD_VDDCX
+#define MSM8976_VDDCX_AO RPMPD_VDDCX_AO
+#define MSM8976_VDDCX_VFL RPMPD_VDDCX_VFL
+#define MSM8976_VDDMX RPMPD_VDDMX
+#define MSM8976_VDDMX_AO RPMPD_VDDMX_AO
+#define MSM8976_VDDMX_VFL RPMPD_VDDMX_VFL
/* MSM8994 Power Domain Indexes */
#define MSM8994_VDDCX 0
@@ -342,16 +126,26 @@
#define MSM8996_VDDSSCX_VFC 6
/* MSM8998 Power Domain Indexes */
-#define MSM8998_VDDCX 0
-#define MSM8998_VDDCX_AO 1
-#define MSM8998_VDDCX_VFL 2
-#define MSM8998_VDDMX 3
-#define MSM8998_VDDMX_AO 4
-#define MSM8998_VDDMX_VFL 5
-#define MSM8998_SSCCX 6
-#define MSM8998_SSCCX_VFL 7
-#define MSM8998_SSCMX 8
-#define MSM8998_SSCMX_VFL 9
+#define MSM8998_VDDCX RPMPD_VDDCX
+#define MSM8998_VDDCX_AO RPMPD_VDDCX_AO
+#define MSM8998_VDDCX_VFL RPMPD_VDDCX_VFL
+#define MSM8998_VDDMX RPMPD_VDDMX
+#define MSM8998_VDDMX_AO RPMPD_VDDMX_AO
+#define MSM8998_VDDMX_VFL RPMPD_VDDMX_VFL
+#define MSM8998_SSCCX RPMPD_SSCCX
+#define MSM8998_SSCCX_VFL RPMPD_SSCCX_VFL
+#define MSM8998_SSCMX RPMPD_SSCMX
+#define MSM8998_SSCMX_VFL RPMPD_SSCMX_VFL
+
+/* QCM2290 Power Domains */
+#define QCM2290_VDDCX 0
+#define QCM2290_VDDCX_AO 1
+#define QCM2290_VDDCX_VFL 2
+#define QCM2290_VDDMX 3
+#define QCM2290_VDDMX_AO 4
+#define QCM2290_VDDMX_VFL 5
+#define QCM2290_VDD_LPI_CX 6
+#define QCM2290_VDD_LPI_MX 7
/* QCS404 Power Domains */
#define QCS404_VDDMX 0
@@ -363,16 +157,16 @@
#define QCS404_LPIMX_VFL 6
/* SDM660 Power Domains */
-#define SDM660_VDDCX 0
-#define SDM660_VDDCX_AO 1
-#define SDM660_VDDCX_VFL 2
-#define SDM660_VDDMX 3
-#define SDM660_VDDMX_AO 4
-#define SDM660_VDDMX_VFL 5
-#define SDM660_SSCCX 6
-#define SDM660_SSCCX_VFL 7
-#define SDM660_SSCMX 8
-#define SDM660_SSCMX_VFL 9
+#define SDM660_VDDCX RPMPD_VDDCX
+#define SDM660_VDDCX_AO RPMPD_VDDCX_AO
+#define SDM660_VDDCX_VFL RPMPD_VDDCX_VFL
+#define SDM660_VDDMX RPMPD_VDDMX
+#define SDM660_VDDMX_AO RPMPD_VDDMX_AO
+#define SDM660_VDDMX_VFL RPMPD_VDDMX_VFL
+#define SDM660_SSCCX RPMPD_SSCCX
+#define SDM660_SSCCX_VFL RPMPD_SSCCX_VFL
+#define SDM660_SSCMX RPMPD_SSCMX
+#define SDM660_SSCMX_VFL RPMPD_SSCMX_VFL
/* SM6115 Power Domains */
#define SM6115_VDDCX 0
@@ -385,22 +179,24 @@
#define SM6115_VDD_LPI_MX 7
/* SM6125 Power Domains */
-#define SM6125_VDDCX 0
-#define SM6125_VDDCX_AO 1
-#define SM6125_VDDCX_VFL 2
-#define SM6125_VDDMX 3
-#define SM6125_VDDMX_AO 4
-#define SM6125_VDDMX_VFL 5
-
-/* QCM2290 Power Domains */
-#define QCM2290_VDDCX 0
-#define QCM2290_VDDCX_AO 1
-#define QCM2290_VDDCX_VFL 2
-#define QCM2290_VDDMX 3
-#define QCM2290_VDDMX_AO 4
-#define QCM2290_VDDMX_VFL 5
-#define QCM2290_VDD_LPI_CX 6
-#define QCM2290_VDD_LPI_MX 7
+#define SM6125_VDDCX RPMPD_VDDCX
+#define SM6125_VDDCX_AO RPMPD_VDDCX_AO
+#define SM6125_VDDCX_VFL RPMPD_VDDCX_VFL
+#define SM6125_VDDMX RPMPD_VDDMX
+#define SM6125_VDDMX_AO RPMPD_VDDMX_AO
+#define SM6125_VDDMX_VFL RPMPD_VDDMX_VFL
+
+/* SM6375 Power Domain Indexes */
+#define SM6375_VDDCX 0
+#define SM6375_VDDCX_AO 1
+#define SM6375_VDDCX_VFL 2
+#define SM6375_VDDMX 3
+#define SM6375_VDDMX_AO 4
+#define SM6375_VDDMX_VFL 5
+#define SM6375_VDDGX 6
+#define SM6375_VDDGX_AO 7
+#define SM6375_VDD_LPI_CX 8
+#define SM6375_VDD_LPI_MX 9
/* RPM SMD Power Domain performance levels */
#define RPM_SMD_LEVEL_RETENTION 16
diff --git a/include/dt-bindings/power/rockchip,rk3528-power.h b/include/dt-bindings/power/rockchip,rk3528-power.h
new file mode 100644
index 000000000000..318923cdaaf6
--- /dev/null
+++ b/include/dt-bindings/power/rockchip,rk3528-power.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+#ifndef __DT_BINDINGS_POWER_RK3528_POWER_H__
+#define __DT_BINDINGS_POWER_RK3528_POWER_H__
+
+#define RK3528_PD_PMU 0
+#define RK3528_PD_BUS 1
+#define RK3528_PD_DDR 2
+#define RK3528_PD_MSCH 3
+
+/* VD_GPU */
+#define RK3528_PD_GPU 4
+
+/* VD_LOGIC */
+#define RK3528_PD_RKVDEC 5
+#define RK3528_PD_RKVENC 6
+#define RK3528_PD_VO 7
+#define RK3528_PD_VPU 8
+
+#endif
diff --git a/include/dt-bindings/power/rockchip,rk3562-power.h b/include/dt-bindings/power/rockchip,rk3562-power.h
new file mode 100644
index 000000000000..5182c2427a55
--- /dev/null
+++ b/include/dt-bindings/power/rockchip,rk3562-power.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022-2024 Rockchip Electronics Co., Ltd.
+ */
+#ifndef __DT_BINDINGS_POWER_RK3562_POWER_H__
+#define __DT_BINDINGS_POWER_RK3562_POWER_H__
+
+/* VD_CORE */
+#define RK3562_PD_CPU_0 0
+#define RK3562_PD_CPU_1 1
+#define RK3562_PD_CPU_2 2
+#define RK3562_PD_CPU_3 3
+#define RK3562_PD_CORE_ALIVE 4
+
+/* VD_PMU */
+#define RK3562_PD_PMU 5
+#define RK3562_PD_PMU_ALIVE 6
+
+/* VD_NPU */
+#define RK3562_PD_NPU 7
+
+/* VD_GPU */
+#define RK3562_PD_GPU 8
+
+/* VD_LOGIC */
+#define RK3562_PD_DDR 9
+#define RK3562_PD_VEPU 10
+#define RK3562_PD_VDPU 11
+#define RK3562_PD_VI 12
+#define RK3562_PD_VO 13
+#define RK3562_PD_RGA 14
+#define RK3562_PD_PHP 15
+#define RK3562_PD_LOGIC_ALIVE 16
+
+#endif
diff --git a/include/dt-bindings/power/rockchip,rk3576-power.h b/include/dt-bindings/power/rockchip,rk3576-power.h
new file mode 100644
index 000000000000..324a056aa851
--- /dev/null
+++ b/include/dt-bindings/power/rockchip,rk3576-power.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+#ifndef __DT_BINDINGS_POWER_RK3576_POWER_H__
+#define __DT_BINDINGS_POWER_RK3576_POWER_H__
+
+/* VD_NPU */
+#define RK3576_PD_NPU 0
+#define RK3576_PD_NPUTOP 1
+#define RK3576_PD_NPU0 2
+#define RK3576_PD_NPU1 3
+
+/* VD_GPU */
+#define RK3576_PD_GPU 4
+
+/* VD_LOGIC */
+#define RK3576_PD_NVM 5
+#define RK3576_PD_SDGMAC 6
+#define RK3576_PD_USB 7
+#define RK3576_PD_PHP 8
+#define RK3576_PD_SUBPHP 9
+#define RK3576_PD_AUDIO 10
+#define RK3576_PD_VEPU0 11
+#define RK3576_PD_VEPU1 12
+#define RK3576_PD_VPU 13
+#define RK3576_PD_VDEC 14
+#define RK3576_PD_VI 15
+#define RK3576_PD_VO0 16
+#define RK3576_PD_VO1 17
+#define RK3576_PD_VOP 18
+
+#endif
diff --git a/include/dt-bindings/power/rockchip,rv1126b-power-controller.h b/include/dt-bindings/power/rockchip,rv1126b-power-controller.h
new file mode 100644
index 000000000000..48ea87a4423c
--- /dev/null
+++ b/include/dt-bindings/power/rockchip,rv1126b-power-controller.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2024 Rockchip Electronics Co., Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#ifndef __DT_BINDINGS_POWER_RV1126B_POWER_CONTROLLER_H__
+#define __DT_BINDINGS_POWER_RV1126B_POWER_CONTROLLER_H__
+
+/* VD_NPU */
+#define RV1126B_PD_NPU 0
+
+/* VD_LOGIC */
+#define RV1126B_PD_VDO 1
+#define RV1126B_PD_AIISP 2
+
+#endif
diff --git a/include/dt-bindings/power/thead,th1520-power.h b/include/dt-bindings/power/thead,th1520-power.h
new file mode 100644
index 000000000000..8395bd1459f3
--- /dev/null
+++ b/include/dt-bindings/power/thead,th1520-power.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 Alibaba Group Holding Limited.
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Michal Wilczynski <m.wilczynski@samsung.com>
+ */
+
+#ifndef __DT_BINDINGS_POWER_TH1520_H
+#define __DT_BINDINGS_POWER_TH1520_H
+
+#define TH1520_AUDIO_PD 0
+#define TH1520_VDEC_PD 1
+#define TH1520_NPU_PD 2
+#define TH1520_VENC_PD 3
+#define TH1520_GPU_PD 4
+#define TH1520_DSP0_PD 5
+#define TH1520_DSP1_PD 6
+
+#endif
diff --git a/include/dt-bindings/regulator/nxp,pca9450-regulator.h b/include/dt-bindings/regulator/nxp,pca9450-regulator.h
new file mode 100644
index 000000000000..08434caef429
--- /dev/null
+++ b/include/dt-bindings/regulator/nxp,pca9450-regulator.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Device Tree binding constants for the NXP PCA9450A/B/C PMIC regulators
+ */
+
+#ifndef _DT_BINDINGS_REGULATORS_NXP_PCA9450_H
+#define _DT_BINDINGS_REGULATORS_NXP_PCA9450_H
+
+/*
+ * Buck mode constants which may be used in devicetree properties (eg.
+ * regulator-initial-mode, regulator-allowed-modes).
+ * See the manufacturer's datasheet for more information on these modes.
+ */
+
+#define PCA9450_BUCK_MODE_AUTO 0
+#define PCA9450_BUCK_MODE_FORCE_PWM 1
+
+#endif
diff --git a/include/dt-bindings/regulator/st,stm32mp15-regulator.h b/include/dt-bindings/regulator/st,stm32mp15-regulator.h
new file mode 100644
index 000000000000..7052507cb3e5
--- /dev/null
+++ b/include/dt-bindings/regulator/st,stm32mp15-regulator.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2025, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef __DT_BINDINGS_REGULATOR_ST_STM32MP15_REGULATOR_H
+#define __DT_BINDINGS_REGULATOR_ST_STM32MP15_REGULATOR_H
+
+/* SCMI voltage domain identifiers */
+
+/* SOC Internal regulators */
+#define VOLTD_SCMI_REG11 0
+#define VOLTD_SCMI_REG18 1
+#define VOLTD_SCMI_USB33 2
+
+/* STPMIC1 regulators */
+#define VOLTD_SCMI_STPMIC1_BUCK1 3
+#define VOLTD_SCMI_STPMIC1_BUCK2 4
+#define VOLTD_SCMI_STPMIC1_BUCK3 5
+#define VOLTD_SCMI_STPMIC1_BUCK4 6
+#define VOLTD_SCMI_STPMIC1_LDO1 7
+#define VOLTD_SCMI_STPMIC1_LDO2 8
+#define VOLTD_SCMI_STPMIC1_LDO3 9
+#define VOLTD_SCMI_STPMIC1_LDO4 10
+#define VOLTD_SCMI_STPMIC1_LDO5 11
+#define VOLTD_SCMI_STPMIC1_LDO6 12
+#define VOLTD_SCMI_STPMIC1_VREFDDR 13
+#define VOLTD_SCMI_STPMIC1_BOOST 14
+#define VOLTD_SCMI_STPMIC1_PWR_SW1 15
+#define VOLTD_SCMI_STPMIC1_PWR_SW2 16
+#define VOLTD_SCMI_VREFBUF 17
+
+/* External regulators */
+#define VOLTD_SCMI_REGU0 18
+#define VOLTD_SCMI_REGU1 19
+#define VOLTD_SCMI_REGU2 20
+#define VOLTD_SCMI_REGU3 21
+#define VOLTD_SCMI_REGU4 22
+
+#endif /*__DT_BINDINGS_REGULATOR_ST_STM32MP15_REGULATOR_H */
diff --git a/include/dt-bindings/regulator/st,stm32mp25-regulator.h b/include/dt-bindings/regulator/st,stm32mp25-regulator.h
new file mode 100644
index 000000000000..3c3d30911dd0
--- /dev/null
+++ b/include/dt-bindings/regulator/st,stm32mp25-regulator.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2024, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef __DT_BINDINGS_REGULATOR_ST_STM32MP25_REGULATOR_H
+#define __DT_BINDINGS_REGULATOR_ST_STM32MP25_REGULATOR_H
+
+/* SCMI voltage domains identifiers */
+
+/* SOC Internal regulators */
+#define VOLTD_SCMI_VDDIO1 0
+#define VOLTD_SCMI_VDDIO2 1
+#define VOLTD_SCMI_VDDIO3 2
+#define VOLTD_SCMI_VDDIO4 3
+#define VOLTD_SCMI_VDDIO 4
+#define VOLTD_SCMI_UCPD 5
+#define VOLTD_SCMI_USB33 6
+#define VOLTD_SCMI_ADC 7
+#define VOLTD_SCMI_GPU 8
+#define VOLTD_SCMI_VREFBUF 9
+
+/* STPMIC2 regulators */
+#define VOLTD_SCMI_STPMIC2_BUCK1 10
+#define VOLTD_SCMI_STPMIC2_BUCK2 11
+#define VOLTD_SCMI_STPMIC2_BUCK3 12
+#define VOLTD_SCMI_STPMIC2_BUCK4 13
+#define VOLTD_SCMI_STPMIC2_BUCK5 14
+#define VOLTD_SCMI_STPMIC2_BUCK6 15
+#define VOLTD_SCMI_STPMIC2_BUCK7 16
+#define VOLTD_SCMI_STPMIC2_LDO1 17
+#define VOLTD_SCMI_STPMIC2_LDO2 18
+#define VOLTD_SCMI_STPMIC2_LDO3 19
+#define VOLTD_SCMI_STPMIC2_LDO4 20
+#define VOLTD_SCMI_STPMIC2_LDO5 21
+#define VOLTD_SCMI_STPMIC2_LDO6 22
+#define VOLTD_SCMI_STPMIC2_LDO7 23
+#define VOLTD_SCMI_STPMIC2_LDO8 24
+#define VOLTD_SCMI_STPMIC2_REFDDR 25
+
+/* External regulators */
+#define VOLTD_SCMI_REGU0 26
+#define VOLTD_SCMI_REGU1 27
+#define VOLTD_SCMI_REGU2 28
+#define VOLTD_SCMI_REGU3 29
+#define VOLTD_SCMI_REGU4 30
+
+#endif /*__DT_BINDINGS_REGULATOR_ST_STM32MP25_REGULATOR_H */
diff --git a/include/dt-bindings/reset/airoha,en7523-reset.h b/include/dt-bindings/reset/airoha,en7523-reset.h
new file mode 100644
index 000000000000..211e8a23a21c
--- /dev/null
+++ b/include/dt-bindings/reset/airoha,en7523-reset.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 iopsys Software Solutions AB.
+ * Copyright (C) 2025 Genexis AB.
+ *
+ * Author: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu>
+ *
+ * based on
+ * include/dt-bindings/reset/airoha,en7581-reset.h
+ * by Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7523_H_
+#define __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7523_H_
+
+/* RST_CTRL2 */
+#define EN7523_XPON_PHY_RST 0
+#define EN7523_XSI_MAC_RST 1
+#define EN7523_XSI_PHY_RST 2
+#define EN7523_NPU_RST 3
+#define EN7523_I2S_RST 4
+#define EN7523_TRNG_RST 5
+#define EN7523_TRNG_MSTART_RST 6
+#define EN7523_DUAL_HSI0_RST 7
+#define EN7523_DUAL_HSI1_RST 8
+#define EN7523_HSI_RST 9
+#define EN7523_DUAL_HSI0_MAC_RST 10
+#define EN7523_DUAL_HSI1_MAC_RST 11
+#define EN7523_HSI_MAC_RST 12
+#define EN7523_WDMA_RST 13
+#define EN7523_WOE0_RST 14
+#define EN7523_WOE1_RST 15
+#define EN7523_HSDMA_RST 16
+#define EN7523_I2C2RBUS_RST 17
+#define EN7523_TDMA_RST 18
+/* RST_CTRL1 */
+#define EN7523_PCM1_ZSI_ISI_RST 19
+#define EN7523_FE_PDMA_RST 20
+#define EN7523_FE_QDMA_RST 21
+#define EN7523_PCM_SPIWP_RST 22
+#define EN7523_CRYPTO_RST 23
+#define EN7523_TIMER_RST 24
+#define EN7523_PCM1_RST 25
+#define EN7523_UART_RST 26
+#define EN7523_GPIO_RST 27
+#define EN7523_GDMA_RST 28
+#define EN7523_I2C_MASTER_RST 29
+#define EN7523_PCM2_ZSI_ISI_RST 30
+#define EN7523_SFC_RST 31
+#define EN7523_UART2_RST 32
+#define EN7523_GDMP_RST 33
+#define EN7523_FE_RST 34
+#define EN7523_USB_HOST_P0_RST 35
+#define EN7523_GSW_RST 36
+#define EN7523_SFC2_PCM_RST 37
+#define EN7523_PCIE0_RST 38
+#define EN7523_PCIE1_RST 39
+#define EN7523_PCIE_HB_RST 40
+#define EN7523_XPON_MAC_RST 41
+
+#endif /* __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7523_H_ */
diff --git a/include/dt-bindings/reset/airoha,en7581-reset.h b/include/dt-bindings/reset/airoha,en7581-reset.h
new file mode 100644
index 000000000000..6544a1790b83
--- /dev/null
+++ b/include/dt-bindings/reset/airoha,en7581-reset.h
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7581_H_
+#define __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7581_H_
+
+/* RST_CTRL2 */
+#define EN7581_XPON_PHY_RST 0
+#define EN7581_CPU_TIMER2_RST 1
+#define EN7581_HSUART_RST 2
+#define EN7581_UART4_RST 3
+#define EN7581_UART5_RST 4
+#define EN7581_I2C2_RST 5
+#define EN7581_XSI_MAC_RST 6
+#define EN7581_XSI_PHY_RST 7
+#define EN7581_NPU_RST 8
+#define EN7581_I2S_RST 9
+#define EN7581_TRNG_RST 10
+#define EN7581_TRNG_MSTART_RST 11
+#define EN7581_DUAL_HSI0_RST 12
+#define EN7581_DUAL_HSI1_RST 13
+#define EN7581_HSI_RST 14
+#define EN7581_DUAL_HSI0_MAC_RST 15
+#define EN7581_DUAL_HSI1_MAC_RST 16
+#define EN7581_HSI_MAC_RST 17
+#define EN7581_WDMA_RST 18
+#define EN7581_WOE0_RST 19
+#define EN7581_WOE1_RST 20
+#define EN7581_HSDMA_RST 21
+#define EN7581_TDMA_RST 22
+#define EN7581_EMMC_RST 23
+#define EN7581_SOE_RST 24
+#define EN7581_PCIE2_RST 25
+#define EN7581_XFP_MAC_RST 26
+#define EN7581_USB_HOST_P1_RST 27
+#define EN7581_USB_HOST_P1_U3_PHY_RST 28
+/* RST_CTRL1 */
+#define EN7581_PCM1_ZSI_ISI_RST 29
+#define EN7581_FE_PDMA_RST 30
+#define EN7581_FE_QDMA_RST 31
+#define EN7581_PCM_SPIWP_RST 32
+#define EN7581_CRYPTO_RST 33
+#define EN7581_TIMER_RST 34
+#define EN7581_PCM1_RST 35
+#define EN7581_UART_RST 36
+#define EN7581_GPIO_RST 37
+#define EN7581_GDMA_RST 38
+#define EN7581_I2C_MASTER_RST 39
+#define EN7581_PCM2_ZSI_ISI_RST 40
+#define EN7581_SFC_RST 41
+#define EN7581_UART2_RST 42
+#define EN7581_GDMP_RST 43
+#define EN7581_FE_RST 44
+#define EN7581_USB_HOST_P0_RST 45
+#define EN7581_GSW_RST 46
+#define EN7581_SFC2_PCM_RST 47
+#define EN7581_PCIE0_RST 48
+#define EN7581_PCIE1_RST 49
+#define EN7581_CPU_TIMER_RST 50
+#define EN7581_PCIE_HB_RST 51
+#define EN7581_XPON_MAC_RST 52
+
+#endif /* __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7581_H_ */
diff --git a/include/dt-bindings/reset/amlogic,meson-a1-audio-reset.h b/include/dt-bindings/reset/amlogic,meson-a1-audio-reset.h
new file mode 100644
index 000000000000..7693552f1507
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,meson-a1-audio-reset.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2024, SaluteDevices. All Rights Reserved.
+ *
+ * Author: Jan Dakinevich <jan.dakinevich@salutedevices.com>
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_MESON_A1_AUDIO_RESET_H
+#define _DT_BINDINGS_AMLOGIC_MESON_A1_AUDIO_RESET_H
+
+#define AUD_RESET_DDRARB 0
+#define AUD_RESET_TDMIN_A 1
+#define AUD_RESET_TDMIN_B 2
+#define AUD_RESET_TDMIN_LB 3
+#define AUD_RESET_LOOPBACK 4
+#define AUD_RESET_TDMOUT_A 5
+#define AUD_RESET_TDMOUT_B 6
+#define AUD_RESET_FRDDR_A 7
+#define AUD_RESET_FRDDR_B 8
+#define AUD_RESET_TODDR_A 9
+#define AUD_RESET_TODDR_B 10
+#define AUD_RESET_SPDIFIN 11
+#define AUD_RESET_RESAMPLE 12
+#define AUD_RESET_EQDRC 13
+#define AUD_RESET_LOCKER 14
+#define AUD_RESET_TOACODEC 30
+#define AUD_RESET_CLKTREE 31
+
+#define AUD_VAD_RESET_DDRARB 0
+#define AUD_VAD_RESET_PDM 1
+#define AUD_VAD_RESET_TDMIN_VAD 2
+#define AUD_VAD_RESET_TODDR_VAD 3
+#define AUD_VAD_RESET_TOVAD 4
+#define AUD_VAD_RESET_CLKTREE 5
+
+#endif /* _DT_BINDINGS_AMLOGIC_MESON_A1_AUDIO_RESET_H */
diff --git a/include/dt-bindings/reset/aspeed,ast2700-scu.h b/include/dt-bindings/reset/aspeed,ast2700-scu.h
new file mode 100644
index 000000000000..d53c719b7a66
--- /dev/null
+++ b/include/dt-bindings/reset/aspeed,ast2700-scu.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Device Tree binding constants for AST2700 reset controller.
+ *
+ * Copyright (c) 2024 Aspeed Technology Inc.
+ */
+
+#ifndef _MACH_ASPEED_AST2700_RESET_H_
+#define _MACH_ASPEED_AST2700_RESET_H_
+
+/* SOC0 */
+#define SCU0_RESET_SDRAM 0
+#define SCU0_RESET_DDRPHY 1
+#define SCU0_RESET_RSA 2
+#define SCU0_RESET_SHA3 3
+#define SCU0_RESET_HACE 4
+#define SCU0_RESET_SOC 5
+#define SCU0_RESET_VIDEO 6
+#define SCU0_RESET_2D 7
+#define SCU0_RESET_PCIS 8
+#define SCU0_RESET_RVAS0 9
+#define SCU0_RESET_RVAS1 10
+#define SCU0_RESET_SM3 11
+#define SCU0_RESET_SM4 12
+#define SCU0_RESET_CRT0 13
+#define SCU0_RESET_ECC 14
+#define SCU0_RESET_DP_PCI 15
+#define SCU0_RESET_UFS 16
+#define SCU0_RESET_EMMC 17
+#define SCU0_RESET_PCIE1RST 18
+#define SCU0_RESET_PCIE1RSTOE 19
+#define SCU0_RESET_PCIE0RST 20
+#define SCU0_RESET_PCIE0RSTOE 21
+#define SCU0_RESET_JTAG 22
+#define SCU0_RESET_MCTP0 23
+#define SCU0_RESET_MCTP1 24
+#define SCU0_RESET_XDMA0 25
+#define SCU0_RESET_XDMA1 26
+#define SCU0_RESET_H2X1 27
+#define SCU0_RESET_DP 28
+#define SCU0_RESET_DP_MCU 29
+#define SCU0_RESET_SSP 30
+#define SCU0_RESET_H2X0 31
+#define SCU0_RESET_PORTA_VHUB 32
+#define SCU0_RESET_PORTA_PHY3 33
+#define SCU0_RESET_PORTA_XHCI 34
+#define SCU0_RESET_PORTB_VHUB 35
+#define SCU0_RESET_PORTB_PHY3 36
+#define SCU0_RESET_PORTB_XHCI 37
+#define SCU0_RESET_PORTA_VHUB_EHCI 38
+#define SCU0_RESET_PORTB_VHUB_EHCI 39
+#define SCU0_RESET_UHCI 40
+#define SCU0_RESET_TSP 41
+#define SCU0_RESET_E2M0 42
+#define SCU0_RESET_E2M1 43
+#define SCU0_RESET_VLINK 44
+
+/* SOC1 */
+#define SCU1_RESET_LPC0 0
+#define SCU1_RESET_LPC1 1
+#define SCU1_RESET_MII 2
+#define SCU1_RESET_PECI 3
+#define SCU1_RESET_PWM 4
+#define SCU1_RESET_MAC0 5
+#define SCU1_RESET_MAC1 6
+#define SCU1_RESET_MAC2 7
+#define SCU1_RESET_ADC 8
+#define SCU1_RESET_SD 9
+#define SCU1_RESET_ESPI0 10
+#define SCU1_RESET_ESPI1 11
+#define SCU1_RESET_JTAG1 12
+#define SCU1_RESET_SPI0 13
+#define SCU1_RESET_SPI1 14
+#define SCU1_RESET_SPI2 15
+#define SCU1_RESET_I3C0 16
+#define SCU1_RESET_I3C1 17
+#define SCU1_RESET_I3C2 18
+#define SCU1_RESET_I3C3 19
+#define SCU1_RESET_I3C4 20
+#define SCU1_RESET_I3C5 21
+#define SCU1_RESET_I3C6 22
+#define SCU1_RESET_I3C7 23
+#define SCU1_RESET_I3C8 24
+#define SCU1_RESET_I3C9 25
+#define SCU1_RESET_I3C10 26
+#define SCU1_RESET_I3C11 27
+#define SCU1_RESET_I3C12 28
+#define SCU1_RESET_I3C13 29
+#define SCU1_RESET_I3C14 30
+#define SCU1_RESET_I3C15 31
+#define SCU1_RESET_MCU0 32
+#define SCU1_RESET_MCU1 33
+#define SCU1_RESET_H2A_SPI1 34
+#define SCU1_RESET_H2A_SPI2 35
+#define SCU1_RESET_UART0 36
+#define SCU1_RESET_UART1 37
+#define SCU1_RESET_UART2 38
+#define SCU1_RESET_UART3 39
+#define SCU1_RESET_I2C_FILTER 40
+#define SCU1_RESET_CALIPTRA 41
+#define SCU1_RESET_XDMA 42
+#define SCU1_RESET_FSI 43
+#define SCU1_RESET_CAN 44
+#define SCU1_RESET_MCTP 45
+#define SCU1_RESET_I2C 46
+#define SCU1_RESET_UART6 47
+#define SCU1_RESET_UART7 48
+#define SCU1_RESET_UART8 49
+#define SCU1_RESET_UART9 50
+#define SCU1_RESET_LTPI0 51
+#define SCU1_RESET_VGAL 52
+#define SCU1_RESET_LTPI1 53
+#define SCU1_RESET_ACE 54
+#define SCU1_RESET_E2M 55
+#define SCU1_RESET_UHCI 56
+#define SCU1_RESET_PORTC_USB2UART 57
+#define SCU1_RESET_PORTC_VHUB_EHCI 58
+#define SCU1_RESET_PORTD_USB2UART 59
+#define SCU1_RESET_PORTD_VHUB_EHCI 60
+#define SCU1_RESET_H2X 61
+#define SCU1_RESET_I3CDMA 62
+#define SCU1_RESET_PCIE2RST 63
+
+#endif /* _MACH_ASPEED_AST2700_RESET_H_ */
diff --git a/include/dt-bindings/reset/canaan,k230-rst.h b/include/dt-bindings/reset/canaan,k230-rst.h
new file mode 100644
index 000000000000..e4f6612607fe
--- /dev/null
+++ b/include/dt-bindings/reset/canaan,k230-rst.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023-2024 Canaan Bright Sight Co., Ltd
+ * Copyright (C) 2024-2025 Junhui Liu <junhui.liu@pigmoral.tech>
+ */
+#ifndef _DT_BINDINGS_CANAAN_K230_RST_H_
+#define _DT_BINDINGS_CANAAN_K230_RST_H_
+
+#define RST_CPU0 0
+#define RST_CPU1 1
+#define RST_CPU0_FLUSH 2
+#define RST_CPU1_FLUSH 3
+#define RST_AI 4
+#define RST_VPU 5
+#define RST_HISYS 6
+#define RST_HISYS_AHB 7
+#define RST_SDIO0 8
+#define RST_SDIO1 9
+#define RST_SDIO_AXI 10
+#define RST_USB0 11
+#define RST_USB1 12
+#define RST_USB0_AHB 13
+#define RST_USB1_AHB 14
+#define RST_SPI0 15
+#define RST_SPI1 16
+#define RST_SPI2 17
+#define RST_SEC 18
+#define RST_PDMA 19
+#define RST_SDMA 20
+#define RST_DECOMPRESS 21
+#define RST_SRAM 22
+#define RST_SHRM_AXIM 23
+#define RST_SHRM_AXIS 24
+#define RST_NONAI2D 25
+#define RST_MCTL 26
+#define RST_ISP 27
+#define RST_ISP_DW 28
+#define RST_DPU 29
+#define RST_DISP 30
+#define RST_GPU 31
+#define RST_AUDIO 32
+#define RST_TIMER0 33
+#define RST_TIMER1 34
+#define RST_TIMER2 35
+#define RST_TIMER3 36
+#define RST_TIMER4 37
+#define RST_TIMER5 38
+#define RST_TIMER_APB 39
+#define RST_HDI 40
+#define RST_WDT0 41
+#define RST_WDT1 42
+#define RST_WDT0_APB 43
+#define RST_WDT1_APB 44
+#define RST_TS_APB 45
+#define RST_MAILBOX 46
+#define RST_STC 47
+#define RST_PMU 48
+#define RST_LOSYS_APB 49
+#define RST_UART0 50
+#define RST_UART1 51
+#define RST_UART2 52
+#define RST_UART3 53
+#define RST_UART4 54
+#define RST_I2C0 55
+#define RST_I2C1 56
+#define RST_I2C2 57
+#define RST_I2C3 58
+#define RST_I2C4 59
+#define RST_JAMLINK0_APB 60
+#define RST_JAMLINK1_APB 61
+#define RST_JAMLINK2_APB 62
+#define RST_JAMLINK3_APB 63
+#define RST_CODEC_APB 64
+#define RST_GPIO_DB 65
+#define RST_GPIO_APB 66
+#define RST_ADC 67
+#define RST_ADC_APB 68
+#define RST_PWM_APB 69
+#define RST_SHRM_APB 70
+#define RST_CSI0 71
+#define RST_CSI1 72
+#define RST_CSI2 73
+#define RST_CSI_DPHY 74
+#define RST_ISP_AHB 75
+#define RST_M0 76
+#define RST_M1 77
+#define RST_M2 78
+#define RST_SPI2AXI 79
+
+#endif
diff --git a/include/dt-bindings/reset/eswin,eic7700-reset.h b/include/dt-bindings/reset/eswin,eic7700-reset.h
new file mode 100644
index 000000000000..a370c9f74307
--- /dev/null
+++ b/include/dt-bindings/reset/eswin,eic7700-reset.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright 2025, Beijing ESWIN Computing Technology Co., Ltd..
+ * All rights reserved.
+ *
+ * Device Tree binding constants for EIC7700 reset controller.
+ *
+ * Authors:
+ * Yifeng Huang <huangyifeng@eswincomputing.com>
+ * Xuyang Dong <dongxuyang@eswincomputing.com>
+ */
+
+#ifndef __DT_ESWIN_EIC7700_RESET_H__
+#define __DT_ESWIN_EIC7700_RESET_H__
+
+#define EIC7700_RESET_NOC_NSP 0
+#define EIC7700_RESET_NOC_CFG 1
+#define EIC7700_RESET_RNOC_NSP 2
+#define EIC7700_RESET_SNOC_TCU 3
+#define EIC7700_RESET_SNOC_U84 4
+#define EIC7700_RESET_SNOC_PCIE_XSR 5
+#define EIC7700_RESET_SNOC_PCIE_XMR 6
+#define EIC7700_RESET_SNOC_PCIE_PR 7
+#define EIC7700_RESET_SNOC_NPU 8
+#define EIC7700_RESET_SNOC_JTAG 9
+#define EIC7700_RESET_SNOC_DSP 10
+#define EIC7700_RESET_SNOC_DDRC1_P2 11
+#define EIC7700_RESET_SNOC_DDRC1_P1 12
+#define EIC7700_RESET_SNOC_DDRC0_P2 13
+#define EIC7700_RESET_SNOC_DDRC0_P1 14
+#define EIC7700_RESET_SNOC_D2D 15
+#define EIC7700_RESET_SNOC_AON 16
+#define EIC7700_RESET_GPU_AXI 17
+#define EIC7700_RESET_GPU_CFG 18
+#define EIC7700_RESET_GPU_GRAY 19
+#define EIC7700_RESET_GPU_JONES 20
+#define EIC7700_RESET_GPU_SPU 21
+#define EIC7700_RESET_DSP_AXI 22
+#define EIC7700_RESET_DSP_CFG 23
+#define EIC7700_RESET_DSP_DIV4 24
+#define EIC7700_RESET_DSP_DIV0 25
+#define EIC7700_RESET_DSP_DIV1 26
+#define EIC7700_RESET_DSP_DIV2 27
+#define EIC7700_RESET_DSP_DIV3 28
+#define EIC7700_RESET_D2D_AXI 29
+#define EIC7700_RESET_D2D_CFG 30
+#define EIC7700_RESET_D2D_PRST 31
+#define EIC7700_RESET_D2D_RAW_PCS 32
+#define EIC7700_RESET_D2D_RX 33
+#define EIC7700_RESET_D2D_TX 34
+#define EIC7700_RESET_D2D_CORE 35
+#define EIC7700_RESET_DDR1_ARST 36
+#define EIC7700_RESET_DDR1_TRACE 37
+#define EIC7700_RESET_DDR0_ARST 38
+#define EIC7700_RESET_DDR_CFG 39
+#define EIC7700_RESET_DDR0_TRACE 40
+#define EIC7700_RESET_DDR_CORE 41
+#define EIC7700_RESET_DDR_PRST 42
+#define EIC7700_RESET_TCU_AXI 43
+#define EIC7700_RESET_TCU_CFG 44
+#define EIC7700_RESET_TCU_TBU0 45
+#define EIC7700_RESET_TCU_TBU1 46
+#define EIC7700_RESET_TCU_TBU2 47
+#define EIC7700_RESET_TCU_TBU3 48
+#define EIC7700_RESET_TCU_TBU4 49
+#define EIC7700_RESET_TCU_TBU5 50
+#define EIC7700_RESET_TCU_TBU6 51
+#define EIC7700_RESET_TCU_TBU7 52
+#define EIC7700_RESET_TCU_TBU8 53
+#define EIC7700_RESET_TCU_TBU9 54
+#define EIC7700_RESET_TCU_TBU10 55
+#define EIC7700_RESET_TCU_TBU11 56
+#define EIC7700_RESET_TCU_TBU12 57
+#define EIC7700_RESET_TCU_TBU13 58
+#define EIC7700_RESET_TCU_TBU14 59
+#define EIC7700_RESET_TCU_TBU15 60
+#define EIC7700_RESET_TCU_TBU16 61
+#define EIC7700_RESET_NPU_AXI 62
+#define EIC7700_RESET_NPU_CFG 63
+#define EIC7700_RESET_NPU_CORE 64
+#define EIC7700_RESET_NPU_E31CORE 65
+#define EIC7700_RESET_NPU_E31BUS 66
+#define EIC7700_RESET_NPU_E31DBG 67
+#define EIC7700_RESET_NPU_LLC 68
+#define EIC7700_RESET_HSP_AXI 69
+#define EIC7700_RESET_HSP_CFG 70
+#define EIC7700_RESET_HSP_POR 71
+#define EIC7700_RESET_MSHC0_PHY 72
+#define EIC7700_RESET_MSHC1_PHY 73
+#define EIC7700_RESET_MSHC2_PHY 74
+#define EIC7700_RESET_MSHC0_TXRX 75
+#define EIC7700_RESET_MSHC1_TXRX 76
+#define EIC7700_RESET_MSHC2_TXRX 77
+#define EIC7700_RESET_SATA_ASIC0 78
+#define EIC7700_RESET_SATA_OOB 79
+#define EIC7700_RESET_SATA_PMALIVE 80
+#define EIC7700_RESET_SATA_RBC 81
+#define EIC7700_RESET_DMA0 82
+#define EIC7700_RESET_HSP_DMA 83
+#define EIC7700_RESET_USB0_VAUX 84
+#define EIC7700_RESET_USB1_VAUX 85
+#define EIC7700_RESET_HSP_SD1_PRST 86
+#define EIC7700_RESET_HSP_SD0_PRST 87
+#define EIC7700_RESET_HSP_EMMC_PRST 88
+#define EIC7700_RESET_HSP_DMA_PRST 89
+#define EIC7700_RESET_HSP_SD1_ARST 90
+#define EIC7700_RESET_HSP_SD0_ARST 91
+#define EIC7700_RESET_HSP_EMMC_ARST 92
+#define EIC7700_RESET_HSP_DMA_ARST 93
+#define EIC7700_RESET_HSP_ETH1_ARST 94
+#define EIC7700_RESET_HSP_ETH0_ARST 95
+#define EIC7700_RESET_SATA_ARST 96
+#define EIC7700_RESET_PCIE_CFG 97
+#define EIC7700_RESET_PCIE_POWEUP 98
+#define EIC7700_RESET_PCIE_PERST 99
+#define EIC7700_RESET_I2C0 100
+#define EIC7700_RESET_I2C1 101
+#define EIC7700_RESET_I2C2 102
+#define EIC7700_RESET_I2C3 103
+#define EIC7700_RESET_I2C4 104
+#define EIC7700_RESET_I2C5 105
+#define EIC7700_RESET_I2C6 106
+#define EIC7700_RESET_I2C7 107
+#define EIC7700_RESET_I2C8 108
+#define EIC7700_RESET_I2C9 109
+#define EIC7700_RESET_FAN 110
+#define EIC7700_RESET_PVT0 111
+#define EIC7700_RESET_PVT1 112
+#define EIC7700_RESET_MBOX0 113
+#define EIC7700_RESET_MBOX1 114
+#define EIC7700_RESET_MBOX2 115
+#define EIC7700_RESET_MBOX3 116
+#define EIC7700_RESET_MBOX4 117
+#define EIC7700_RESET_MBOX5 118
+#define EIC7700_RESET_MBOX6 119
+#define EIC7700_RESET_MBOX7 120
+#define EIC7700_RESET_MBOX8 121
+#define EIC7700_RESET_MBOX9 122
+#define EIC7700_RESET_MBOX10 123
+#define EIC7700_RESET_MBOX11 124
+#define EIC7700_RESET_MBOX12 125
+#define EIC7700_RESET_MBOX13 126
+#define EIC7700_RESET_MBOX14 127
+#define EIC7700_RESET_MBOX15 128
+#define EIC7700_RESET_UART0 129
+#define EIC7700_RESET_UART1 130
+#define EIC7700_RESET_UART2 131
+#define EIC7700_RESET_UART3 132
+#define EIC7700_RESET_UART4 133
+#define EIC7700_RESET_GPIO0 134
+#define EIC7700_RESET_GPIO1 135
+#define EIC7700_RESET_TIMER 136
+#define EIC7700_RESET_SSI0 137
+#define EIC7700_RESET_SSI1 138
+#define EIC7700_RESET_WDT0 139
+#define EIC7700_RESET_WDT1 140
+#define EIC7700_RESET_WDT2 141
+#define EIC7700_RESET_WDT3 142
+#define EIC7700_RESET_LSP_CFG 143
+#define EIC7700_RESET_U84_CORE0 144
+#define EIC7700_RESET_U84_CORE1 145
+#define EIC7700_RESET_U84_CORE2 146
+#define EIC7700_RESET_U84_CORE3 147
+#define EIC7700_RESET_U84_BUS 148
+#define EIC7700_RESET_U84_DBG 149
+#define EIC7700_RESET_U84_TRACECOM 150
+#define EIC7700_RESET_U84_TRACE0 151
+#define EIC7700_RESET_U84_TRACE1 152
+#define EIC7700_RESET_U84_TRACE2 153
+#define EIC7700_RESET_U84_TRACE3 154
+#define EIC7700_RESET_SCPU_CORE 155
+#define EIC7700_RESET_SCPU_BUS 156
+#define EIC7700_RESET_SCPU_DBG 157
+#define EIC7700_RESET_LPCPU_CORE 158
+#define EIC7700_RESET_LPCPU_BUS 159
+#define EIC7700_RESET_LPCPU_DBG 160
+#define EIC7700_RESET_VC_CFG 161
+#define EIC7700_RESET_VC_AXI 162
+#define EIC7700_RESET_VC_MONCFG 163
+#define EIC7700_RESET_JD_CFG 164
+#define EIC7700_RESET_JD_AXI 165
+#define EIC7700_RESET_JE_CFG 166
+#define EIC7700_RESET_JE_AXI 167
+#define EIC7700_RESET_VD_CFG 168
+#define EIC7700_RESET_VD_AXI 169
+#define EIC7700_RESET_VE_AXI 170
+#define EIC7700_RESET_VE_CFG 171
+#define EIC7700_RESET_G2D_CORE 172
+#define EIC7700_RESET_G2D_CFG 173
+#define EIC7700_RESET_G2D_AXI 174
+#define EIC7700_RESET_VI_AXI 175
+#define EIC7700_RESET_VI_CFG 176
+#define EIC7700_RESET_VI_DWE 177
+#define EIC7700_RESET_DVP 178
+#define EIC7700_RESET_ISP0 179
+#define EIC7700_RESET_ISP1 180
+#define EIC7700_RESET_SHUTTR0 181
+#define EIC7700_RESET_SHUTTR1 182
+#define EIC7700_RESET_SHUTTR2 183
+#define EIC7700_RESET_SHUTTR3 184
+#define EIC7700_RESET_SHUTTR4 185
+#define EIC7700_RESET_SHUTTR5 186
+#define EIC7700_RESET_VO_MIPI 187
+#define EIC7700_RESET_VO_PRST 188
+#define EIC7700_RESET_VO_HDMI_PRST 189
+#define EIC7700_RESET_VO_HDMI_PHY 190
+#define EIC7700_RESET_VO_HDMI 191
+#define EIC7700_RESET_VO_I2S 192
+#define EIC7700_RESET_VO_I2S_PRST 193
+#define EIC7700_RESET_VO_AXI 194
+#define EIC7700_RESET_VO_CFG 195
+#define EIC7700_RESET_VO_DC 196
+#define EIC7700_RESET_VO_DC_PRST 197
+#define EIC7700_RESET_BOOTSPI_HRST 198
+#define EIC7700_RESET_BOOTSPI 199
+#define EIC7700_RESET_ANO1 200
+#define EIC7700_RESET_ANO0 201
+#define EIC7700_RESET_DMA1_ARST 202
+#define EIC7700_RESET_DMA1_HRST 203
+#define EIC7700_RESET_FPRT 204
+#define EIC7700_RESET_HBLOCK 205
+#define EIC7700_RESET_SECSR 206
+#define EIC7700_RESET_OTP 207
+#define EIC7700_RESET_PKA 208
+#define EIC7700_RESET_SPACC 209
+#define EIC7700_RESET_TRNG 210
+#define EIC7700_RESET_TIMER0_0 211
+#define EIC7700_RESET_TIMER0_1 212
+#define EIC7700_RESET_TIMER0_2 213
+#define EIC7700_RESET_TIMER0_3 214
+#define EIC7700_RESET_TIMER0_4 215
+#define EIC7700_RESET_TIMER0_5 216
+#define EIC7700_RESET_TIMER0_6 217
+#define EIC7700_RESET_TIMER0_7 218
+#define EIC7700_RESET_TIMER0_N 219
+#define EIC7700_RESET_TIMER1_0 220
+#define EIC7700_RESET_TIMER1_1 221
+#define EIC7700_RESET_TIMER1_2 222
+#define EIC7700_RESET_TIMER1_3 223
+#define EIC7700_RESET_TIMER1_4 224
+#define EIC7700_RESET_TIMER1_5 225
+#define EIC7700_RESET_TIMER1_6 226
+#define EIC7700_RESET_TIMER1_7 227
+#define EIC7700_RESET_TIMER1_N 228
+#define EIC7700_RESET_TIMER2_0 229
+#define EIC7700_RESET_TIMER2_1 230
+#define EIC7700_RESET_TIMER2_2 231
+#define EIC7700_RESET_TIMER2_3 232
+#define EIC7700_RESET_TIMER2_4 233
+#define EIC7700_RESET_TIMER2_5 234
+#define EIC7700_RESET_TIMER2_6 235
+#define EIC7700_RESET_TIMER2_7 236
+#define EIC7700_RESET_TIMER2_N 237
+#define EIC7700_RESET_TIMER3_0 238
+#define EIC7700_RESET_TIMER3_1 239
+#define EIC7700_RESET_TIMER3_2 240
+#define EIC7700_RESET_TIMER3_3 241
+#define EIC7700_RESET_TIMER3_4 242
+#define EIC7700_RESET_TIMER3_5 243
+#define EIC7700_RESET_TIMER3_6 244
+#define EIC7700_RESET_TIMER3_7 245
+#define EIC7700_RESET_TIMER3_N 246
+#define EIC7700_RESET_RTC 247
+#define EIC7700_RESET_MNOC_SNOC_NSP 248
+#define EIC7700_RESET_MNOC_VC 249
+#define EIC7700_RESET_MNOC_CFG 250
+#define EIC7700_RESET_MNOC_HSP 251
+#define EIC7700_RESET_MNOC_GPU 252
+#define EIC7700_RESET_MNOC_DDRC1_P3 253
+#define EIC7700_RESET_MNOC_DDRC0_P3 254
+#define EIC7700_RESET_RNOC_VO 255
+#define EIC7700_RESET_RNOC_VI 256
+#define EIC7700_RESET_RNOC_SNOC_NSP 257
+#define EIC7700_RESET_RNOC_CFG 258
+#define EIC7700_RESET_MNOC_DDRC1_P4 259
+#define EIC7700_RESET_MNOC_DDRC0_P4 260
+#define EIC7700_RESET_CNOC_VO_CFG 261
+#define EIC7700_RESET_CNOC_VI_CFG 262
+#define EIC7700_RESET_CNOC_VC_CFG 263
+#define EIC7700_RESET_CNOC_TCU_CFG 264
+#define EIC7700_RESET_CNOC_PCIE_CFG 265
+#define EIC7700_RESET_CNOC_NPU_CFG 266
+#define EIC7700_RESET_CNOC_LSP_CFG 267
+#define EIC7700_RESET_CNOC_HSP_CFG 268
+#define EIC7700_RESET_CNOC_GPU_CFG 269
+#define EIC7700_RESET_CNOC_DSPT_CFG 270
+#define EIC7700_RESET_CNOC_DDRT1_CFG 271
+#define EIC7700_RESET_CNOC_DDRT0_CFG 272
+#define EIC7700_RESET_CNOC_D2D_CFG 273
+#define EIC7700_RESET_CNOC_CFG 274
+#define EIC7700_RESET_CNOC_CLMM_CFG 275
+#define EIC7700_RESET_CNOC_AON_CFG 276
+#define EIC7700_RESET_LNOC_CFG 277
+#define EIC7700_RESET_LNOC_NPU_LLC 278
+#define EIC7700_RESET_LNOC_DDRC1_P0 279
+#define EIC7700_RESET_LNOC_DDRC0_P0 280
+
+#endif /* __DT_ESWIN_EIC7700_RESET_H__ */
diff --git a/include/dt-bindings/reset/fsl,imx8ulp-sim-lpav.h b/include/dt-bindings/reset/fsl,imx8ulp-sim-lpav.h
new file mode 100644
index 000000000000..adf95bb26d21
--- /dev/null
+++ b/include/dt-bindings/reset/fsl,imx8ulp-sim-lpav.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright 2025 NXP
+ */
+
+#ifndef DT_BINDING_RESET_IMX8ULP_SIM_LPAV_H
+#define DT_BINDING_RESET_IMX8ULP_SIM_LPAV_H
+
+#define IMX8ULP_SIM_LPAV_HIFI4_DSP_DBG_RST 0
+#define IMX8ULP_SIM_LPAV_HIFI4_DSP_RST 1
+#define IMX8ULP_SIM_LPAV_HIFI4_DSP_STALL 2
+#define IMX8ULP_SIM_LPAV_DSI_RST_BYTE_N 3
+#define IMX8ULP_SIM_LPAV_DSI_RST_ESC_N 4
+#define IMX8ULP_SIM_LPAV_DSI_RST_DPI_N 5
+
+#endif /* DT_BINDING_RESET_IMX8ULP_SIM_LPAV_H */
diff --git a/include/dt-bindings/reset/imx8mp-reset-audiomix.h b/include/dt-bindings/reset/imx8mp-reset-audiomix.h
new file mode 100644
index 000000000000..746c1337ed99
--- /dev/null
+++ b/include/dt-bindings/reset/imx8mp-reset-audiomix.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Copyright 2025 NXP
+ */
+
+#ifndef DT_BINDING_RESET_IMX8MP_AUDIOMIX_H
+#define DT_BINDING_RESET_IMX8MP_AUDIOMIX_H
+
+#define IMX8MP_AUDIOMIX_EARC_RESET 0
+#define IMX8MP_AUDIOMIX_EARC_PHY_RESET 1
+#define IMX8MP_AUDIOMIX_DSP_RUNSTALL 2
+
+#endif /* DT_BINDING_RESET_IMX8MP_AUDIOMIX_H */
diff --git a/include/dt-bindings/reset/mediatek,mt6735-infracfg.h b/include/dt-bindings/reset/mediatek,mt6735-infracfg.h
new file mode 100644
index 000000000000..9df969090377
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt6735-infracfg.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_RESET_MT6735_INFRACFG_H
+#define _DT_BINDINGS_RESET_MT6735_INFRACFG_H
+
+#define MT6735_INFRA_RST0_EMI_REG 0
+#define MT6735_INFRA_RST0_DRAMC0_AO 1
+#define MT6735_INFRA_RST0_AP_CIRQ_EINT 2
+#define MT6735_INFRA_RST0_APXGPT 3
+#define MT6735_INFRA_RST0_SCPSYS 4
+#define MT6735_INFRA_RST0_KP 5
+#define MT6735_INFRA_RST0_PMIC_WRAP 6
+#define MT6735_INFRA_RST0_CLDMA_AO_TOP 7
+#define MT6735_INFRA_RST0_USBSIF_TOP 8
+#define MT6735_INFRA_RST0_EMI 9
+#define MT6735_INFRA_RST0_CCIF 10
+#define MT6735_INFRA_RST0_DRAMC0 11
+#define MT6735_INFRA_RST0_EMI_AO_REG 12
+#define MT6735_INFRA_RST0_CCIF_AO 13
+#define MT6735_INFRA_RST0_TRNG 14
+#define MT6735_INFRA_RST0_SYS_CIRQ 15
+#define MT6735_INFRA_RST0_GCE 16
+#define MT6735_INFRA_RST0_M4U 17
+#define MT6735_INFRA_RST0_CCIF1 18
+#define MT6735_INFRA_RST0_CLDMA_TOP_PD 19
+
+#endif
diff --git a/include/dt-bindings/reset/mediatek,mt6735-mfgcfg.h b/include/dt-bindings/reset/mediatek,mt6735-mfgcfg.h
new file mode 100644
index 000000000000..c489242b226e
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt6735-mfgcfg.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_RESET_MT6735_MFGCFG_H
+#define _DT_BINDINGS_RESET_MT6735_MFGCFG_H
+
+#define MT6735_MFG_RST0_AXI 0
+#define MT6735_MFG_RST0_G3D 1
+
+#endif /* _DT_BINDINGS_RESET_MT6735_MFGCFG_H */
diff --git a/include/dt-bindings/reset/mediatek,mt6735-pericfg.h b/include/dt-bindings/reset/mediatek,mt6735-pericfg.h
new file mode 100644
index 000000000000..a62bb192835a
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt6735-pericfg.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_RESET_MT6735_PERICFG_H
+#define _DT_BINDINGS_RESET_MT6735_PERICFG_H
+
+#define MT6735_PERI_RST0_UART0 0
+#define MT6735_PERI_RST0_UART1 1
+#define MT6735_PERI_RST0_UART2 2
+#define MT6735_PERI_RST0_UART3 3
+#define MT6735_PERI_RST0_UART4 4
+#define MT6735_PERI_RST0_BTIF 5
+#define MT6735_PERI_RST0_DISP_PWM_PERI 6
+#define MT6735_PERI_RST0_PWM 7
+#define MT6735_PERI_RST0_AUXADC 8
+#define MT6735_PERI_RST0_DMA 9
+#define MT6735_PERI_RST0_IRDA 10
+#define MT6735_PERI_RST0_IRTX 11
+#define MT6735_PERI_RST0_THERM 12
+#define MT6735_PERI_RST0_MSDC2 13
+#define MT6735_PERI_RST0_MSDC3 14
+#define MT6735_PERI_RST0_MSDC0 15
+#define MT6735_PERI_RST0_MSDC1 16
+#define MT6735_PERI_RST0_I2C0 17
+#define MT6735_PERI_RST0_I2C1 18
+#define MT6735_PERI_RST0_I2C2 19
+#define MT6735_PERI_RST0_I2C3 20
+#define MT6735_PERI_RST0_USB 21
+
+#define MT6735_PERI_RST1_SPI0 22
+
+#endif
diff --git a/include/dt-bindings/reset/mediatek,mt6735-vdecsys.h b/include/dt-bindings/reset/mediatek,mt6735-vdecsys.h
new file mode 100644
index 000000000000..b6ae5d249192
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt6735-vdecsys.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_RESET_MT6735_VDECSYS_H
+#define _DT_BINDINGS_RESET_MT6735_VDECSYS_H
+
+#define MT6735_VDEC_RST0_VDEC 0
+#define MT6735_VDEC_RST1_SMI_LARB1 1
+
+#endif /* _DT_BINDINGS_RESET_MT6735_VDECSYS_H */
diff --git a/include/dt-bindings/reset/mediatek,mt8196-resets.h b/include/dt-bindings/reset/mediatek,mt8196-resets.h
new file mode 100644
index 000000000000..46ced0850d91
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt8196-resets.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8196
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8196
+
+/* PEXTP0 resets */
+#define MT8196_PEXTP0_RST0_PCIE0_MAC 0
+#define MT8196_PEXTP0_RST0_PCIE0_PHY 1
+
+/* PEXTP1 resets */
+#define MT8196_PEXTP1_RST0_PCIE1_MAC 0
+#define MT8196_PEXTP1_RST0_PCIE1_PHY 1
+#define MT8196_PEXTP1_RST0_PCIE2_MAC 2
+#define MT8196_PEXTP1_RST0_PCIE2_PHY 3
+
+/* UFS resets */
+#define MT8196_UFSAO_RST0_UFS_MPHY 0
+#define MT8196_UFSAO_RST1_UFS_UNIPRO 1
+#define MT8196_UFSAO_RST1_UFS_CRYPTO 2
+#define MT8196_UFSAO_RST1_UFSHCI 3
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8196 */
diff --git a/include/dt-bindings/reset/nvidia,tegra114-car.h b/include/dt-bindings/reset/nvidia,tegra114-car.h
new file mode 100644
index 000000000000..9b8c320402db
--- /dev/null
+++ b/include/dt-bindings/reset/nvidia,tegra114-car.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * This header provides Tegra114-specific constants for binding
+ * nvidia,tegra114-car.
+ */
+
+#ifndef _DT_BINDINGS_RESET_NVIDIA_TEGRA114_CAR_H
+#define _DT_BINDINGS_RESET_NVIDIA_TEGRA114_CAR_H
+
+#define TEGRA114_RESET(x) (5 * 32 + (x))
+#define TEGRA114_RST_DFLL_DVCO TEGRA114_RESET(0)
+
+#endif /* _DT_BINDINGS_RESET_NVIDIA_TEGRA114_CAR_H */
diff --git a/include/dt-bindings/reset/nvidia,tegra264.h b/include/dt-bindings/reset/nvidia,tegra264.h
new file mode 100644
index 000000000000..a61a56bb232b
--- /dev/null
+++ b/include/dt-bindings/reset/nvidia,tegra264.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2022-2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef DT_BINDINGS_RESET_NVIDIA_TEGRA264_H
+#define DT_BINDINGS_RESET_NVIDIA_TEGRA264_H
+
+#define TEGRA264_RESET_APE_TKE 1
+#define TEGRA264_RESET_CEC 2
+#define TEGRA264_RESET_ADSP_ALL 3
+#define TEGRA264_RESET_RCE_ALL 4
+#define TEGRA264_RESET_UFSHC 5
+#define TEGRA264_RESET_UFSHC_AXI_M 6
+#define TEGRA264_RESET_UFSHC_LP_SEQ 7
+#define TEGRA264_RESET_DPAUX 8
+#define TEGRA264_RESET_EQOS_PCS 9
+#define TEGRA264_RESET_HWPM 10
+#define TEGRA264_RESET_I2C1 11
+#define TEGRA264_RESET_I2C2 12
+#define TEGRA264_RESET_I2C3 13
+#define TEGRA264_RESET_I2C4 14
+#define TEGRA264_RESET_I2C6 15
+#define TEGRA264_RESET_I2C7 16
+#define TEGRA264_RESET_I2C8 17
+#define TEGRA264_RESET_I2C9 18
+#define TEGRA264_RESET_ISP 19
+#define TEGRA264_RESET_LA 20
+#define TEGRA264_RESET_NVCSI 21
+#define TEGRA264_RESET_EQOS_MAC 22
+#define TEGRA264_RESET_PWM10 23
+#define TEGRA264_RESET_PWM2 24
+#define TEGRA264_RESET_PWM3 25
+#define TEGRA264_RESET_PWM4 26
+#define TEGRA264_RESET_PWM5 27
+#define TEGRA264_RESET_PWM9 28
+#define TEGRA264_RESET_QSPI0 29
+#define TEGRA264_RESET_HDA 30
+#define TEGRA264_RESET_HDACODEC 31
+#define TEGRA264_RESET_I2C0 32
+#define TEGRA264_RESET_I2C10 33
+#define TEGRA264_RESET_SDMMC1 34
+#define TEGRA264_RESET_MIPI_CAL 35
+#define TEGRA264_RESET_SPI1 36
+#define TEGRA264_RESET_SPI2 37
+#define TEGRA264_RESET_SPI3 38
+#define TEGRA264_RESET_SPI4 39
+#define TEGRA264_RESET_SPI5 40
+#define TEGRA264_RESET_SPI7 41
+#define TEGRA264_RESET_SPI8 42
+#define TEGRA264_RESET_SPI9 43
+#define TEGRA264_RESET_TACH0 44
+#define TEGRA264_RESET_TSEC 45
+#define TEGRA264_RESET_VI 46
+#define TEGRA264_RESET_VI1 47
+#define TEGRA264_RESET_PVA0_ALL 48
+#define TEGRA264_RESET_VIC 49
+#define TEGRA264_RESET_MPHY_CLK_CTL 50
+#define TEGRA264_RESET_MPHY_L0_RX 51
+#define TEGRA264_RESET_MPHY_L0_TX 52
+#define TEGRA264_RESET_MPHY_L1_RX 53
+#define TEGRA264_RESET_MPHY_L1_TX 54
+#define TEGRA264_RESET_ISP1 55
+#define TEGRA264_RESET_I2C11 56
+#define TEGRA264_RESET_I2C12 57
+#define TEGRA264_RESET_I2C14 58
+#define TEGRA264_RESET_I2C15 59
+#define TEGRA264_RESET_I2C16 60
+#define TEGRA264_RESET_EQOS_MACSEC 61
+#define TEGRA264_RESET_MGBE0_PCS 62
+#define TEGRA264_RESET_MGBE0_MAC 63
+#define TEGRA264_RESET_MGBE0_MACSEC 64
+#define TEGRA264_RESET_MGBE1_PCS 65
+#define TEGRA264_RESET_MGBE1_MAC 66
+#define TEGRA264_RESET_MGBE1_MACSEC 67
+#define TEGRA264_RESET_MGBE2_PCS 68
+#define TEGRA264_RESET_MGBE2_MAC 69
+#define TEGRA264_RESET_MGBE2_MACSEC 70
+#define TEGRA264_RESET_MGBE3_PCS 71
+#define TEGRA264_RESET_MGBE3_MAC 72
+#define TEGRA264_RESET_MGBE3_MACSEC 73
+#define TEGRA264_RESET_ADSP_CORE0 74
+#define TEGRA264_RESET_ADSP_CORE1 75
+#define TEGRA264_RESET_APE 76
+#define TEGRA264_RESET_XUSB1_PADCTL 77
+#define TEGRA264_RESET_AON_CPU_ALL 78
+#define TEGRA264_RESET_AON_HSP 79
+#define TEGRA264_RESET_UART4 80
+#define TEGRA264_RESET_UART5 81
+#define TEGRA264_RESET_UART9 82
+#define TEGRA264_RESET_UART10 83
+#define TEGRA264_RESET_UART8 84
+
+#endif /* DT_BINDINGS_RESET_NVIDIA_TEGRA264_H */
diff --git a/include/dt-bindings/reset/qcom,ipq5424-gcc.h b/include/dt-bindings/reset/qcom,ipq5424-gcc.h
new file mode 100644
index 000000000000..16a72771c79a
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,ipq5424-gcc.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_GCC_IPQ5424_H
+#define _DT_BINDINGS_RESET_IPQ_GCC_IPQ5424_H
+
+#define GCC_QUPV3_BCR 0
+#define GCC_QUPV3_I2C0_BCR 1
+#define GCC_QUPV3_UART0_BCR 2
+#define GCC_QUPV3_I2C1_BCR 3
+#define GCC_QUPV3_UART1_BCR 4
+#define GCC_QUPV3_SPI0_BCR 5
+#define GCC_QUPV3_SPI1_BCR 6
+#define GCC_IMEM_BCR 7
+#define GCC_TME_BCR 8
+#define GCC_DDRSS_BCR 9
+#define GCC_PRNG_BCR 10
+#define GCC_BOOT_ROM_BCR 11
+#define GCC_NSS_BCR 12
+#define GCC_MDIO_BCR 13
+#define GCC_UNIPHY0_BCR 14
+#define GCC_UNIPHY1_BCR 15
+#define GCC_UNIPHY2_BCR 16
+#define GCC_WCSS_BCR 17
+#define GCC_SEC_CTRL_BCR 19
+#define GCC_TME_SEC_BUS_BCR 20
+#define GCC_ADSS_BCR 21
+#define GCC_LPASS_BCR 22
+#define GCC_PCIE0_BCR 23
+#define GCC_PCIE0_LINK_DOWN_BCR 24
+#define GCC_PCIE0PHY_PHY_BCR 25
+#define GCC_PCIE0_PHY_BCR 26
+#define GCC_PCIE1_BCR 27
+#define GCC_PCIE1_LINK_DOWN_BCR 28
+#define GCC_PCIE1PHY_PHY_BCR 29
+#define GCC_PCIE1_PHY_BCR 30
+#define GCC_PCIE2_BCR 31
+#define GCC_PCIE2_LINK_DOWN_BCR 32
+#define GCC_PCIE2PHY_PHY_BCR 33
+#define GCC_PCIE2_PHY_BCR 34
+#define GCC_PCIE3_BCR 35
+#define GCC_PCIE3_LINK_DOWN_BCR 36
+#define GCC_PCIE3PHY_PHY_BCR 37
+#define GCC_PCIE3_PHY_BCR 38
+#define GCC_USB_BCR 39
+#define GCC_QUSB2_0_PHY_BCR 40
+#define GCC_USB0_PHY_BCR 41
+#define GCC_USB3PHY_0_PHY_BCR 42
+#define GCC_QDSS_BCR 43
+#define GCC_SNOC_BCR 44
+#define GCC_ANOC_BCR 45
+#define GCC_PCNOC_BCR 46
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 47
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 48
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 49
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 50
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 51
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 52
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 53
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 54
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 55
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 56
+#define GCC_QPIC_BCR 57
+#define GCC_SDCC_BCR 58
+#define GCC_DCC_BCR 59
+#define GCC_SPDM_BCR 60
+#define GCC_MPM_BCR 61
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 62
+#define GCC_RBCPR_BCR 63
+#define GCC_CMN_BLK_BCR 64
+#define GCC_TCSR_BCR 65
+#define GCC_TLMM_BCR 66
+#define GCC_QUPV3_AHB_MST_ARES 67
+#define GCC_QUPV3_CORE_ARES 68
+#define GCC_QUPV3_2X_CORE_ARES 69
+#define GCC_QUPV3_SLEEP_ARES 70
+#define GCC_QUPV3_AHB_SLV_ARES 71
+#define GCC_QUPV3_I2C0_ARES 72
+#define GCC_QUPV3_UART0_ARES 73
+#define GCC_QUPV3_I2C1_ARES 74
+#define GCC_QUPV3_UART1_ARES 75
+#define GCC_QUPV3_SPI0_ARES 76
+#define GCC_QUPV3_SPI1_ARES 77
+#define GCC_DEBUG_ARES 78
+#define GCC_GP1_ARES 79
+#define GCC_GP2_ARES 80
+#define GCC_GP3_ARES 81
+#define GCC_IMEM_AXI_ARES 82
+#define GCC_IMEM_CFG_AHB_ARES 83
+#define GCC_TME_ARES 84
+#define GCC_TME_TS_ARES 85
+#define GCC_TME_SLOW_ARES 86
+#define GCC_TME_RTC_TOGGLE_ARES 87
+#define GCC_TIC_ARES 88
+#define GCC_PRNG_AHB_ARES 89
+#define GCC_BOOT_ROM_AHB_ARES 90
+#define GCC_NSSNOC_ATB_ARES 91
+#define GCC_NSS_TS_ARES 92
+#define GCC_NSSNOC_QOSGEN_REF_ARES 93
+#define GCC_NSSNOC_TIMEOUT_REF_ARES 94
+#define GCC_NSSNOC_MEMNOC_ARES 95
+#define GCC_NSSNOC_SNOC_ARES 96
+#define GCC_NSSCFG_ARES 97
+#define GCC_NSSNOC_NSSCC_ARES 98
+#define GCC_NSSCC_ARES 99
+#define GCC_MDIO_AHB_ARES 100
+#define GCC_UNIPHY0_SYS_ARES 101
+#define GCC_UNIPHY0_AHB_ARES 102
+#define GCC_UNIPHY1_SYS_ARES 103
+#define GCC_UNIPHY1_AHB_ARES 104
+#define GCC_UNIPHY2_SYS_ARES 105
+#define GCC_UNIPHY2_AHB_ARES 106
+#define GCC_NSSNOC_XO_DCD_ARES 107
+#define GCC_NSSNOC_SNOC_1_ARES 108
+#define GCC_NSSNOC_PCNOC_1_ARES 109
+#define GCC_NSSNOC_MEMNOC_1_ARES 110
+#define GCC_DDRSS_ATB_ARES 111
+#define GCC_DDRSS_AHB_ARES 112
+#define GCC_GEMNOC_AHB_ARES 113
+#define GCC_GEMNOC_Q6_AXI_ARES 114
+#define GCC_GEMNOC_NSSNOC_ARES 115
+#define GCC_GEMNOC_SNOC_ARES 116
+#define GCC_GEMNOC_APSS_ARES 117
+#define GCC_GEMNOC_QOSGEN_EXTREF_ARES 118
+#define GCC_GEMNOC_TS_ARES 119
+#define GCC_DDRSS_SMS_SLOW_ARES 120
+#define GCC_GEMNOC_CNOC_ARES 121
+#define GCC_GEMNOC_XO_DBG_ARES 122
+#define GCC_GEMNOC_ANOC_ARES 123
+#define GCC_DDRSS_LLCC_ATB_ARES 124
+#define GCC_LLCC_TPDM_CFG_ARES 125
+#define GCC_TME_BUS_ARES 126
+#define GCC_SEC_CTRL_ACC_ARES 127
+#define GCC_SEC_CTRL_ARES 128
+#define GCC_SEC_CTRL_SENSE_ARES 129
+#define GCC_SEC_CTRL_AHB_ARES 130
+#define GCC_SEC_CTRL_BOOT_ROM_PATCH_ARES 131
+#define GCC_ADSS_PWM_ARES 132
+#define GCC_TME_ATB_ARES 133
+#define GCC_TME_DBGAPB_ARES 134
+#define GCC_TME_DEBUG_ARES 135
+#define GCC_TME_AT_ARES 136
+#define GCC_TME_APB_ARES 137
+#define GCC_TME_DMI_DBG_HS_ARES 138
+#define GCC_APSS_AHB_ARES 139
+#define GCC_APSS_AXI_ARES 140
+#define GCC_CPUSS_TRIG_ARES 141
+#define GCC_APSS_DBG_ARES 142
+#define GCC_APSS_TS_ARES 143
+#define GCC_APSS_ATB_ARES 144
+#define GCC_Q6_AXIM_ARES 145
+#define GCC_Q6_AXIS_ARES 146
+#define GCC_Q6_AHB_ARES 147
+#define GCC_Q6_AHB_S_ARES 148
+#define GCC_Q6SS_ATBM_ARES 149
+#define GCC_Q6_TSCTR_1TO2_ARES 150
+#define GCC_Q6SS_PCLKDBG_ARES 151
+#define GCC_Q6SS_TRIG_ARES 152
+#define GCC_Q6SS_BOOT_CBCR_ARES 153
+#define GCC_WCSS_DBG_IFC_APB_ARES 154
+#define GCC_WCSS_DBG_IFC_ATB_ARES 155
+#define GCC_WCSS_DBG_IFC_NTS_ARES 156
+#define GCC_WCSS_DBG_IFC_DAPBUS_ARES 157
+#define GCC_WCSS_DBG_IFC_APB_BDG_ARES 158
+#define GCC_WCSS_DBG_IFC_NTS_BDG_ARES 159
+#define GCC_WCSS_DBG_IFC_DAPBUS_BDG_ARES 160
+#define GCC_WCSS_ECAHB_ARES 161
+#define GCC_WCSS_ACMT_ARES 162
+#define GCC_WCSS_AHB_S_ARES 163
+#define GCC_WCSS_AXI_M_ARES 164
+#define GCC_PCNOC_WAPSS_ARES 165
+#define GCC_SNOC_WAPSS_ARES 166
+#define GCC_LPASS_SWAY_ARES 167
+#define GCC_LPASS_CORE_AXIM_ARES 168
+#define GCC_PCIE0_AHB_ARES 169
+#define GCC_PCIE0_AXI_M_ARES 170
+#define GCC_PCIE0_AXI_S_ARES 171
+#define GCC_PCIE0_AXI_S_BRIDGE_ARES 172
+#define GCC_PCIE0_PIPE_ARES 173
+#define GCC_PCIE0_AUX_ARES 174
+#define GCC_PCIE1_AHB_ARES 175
+#define GCC_PCIE1_AXI_M_ARES 176
+#define GCC_PCIE1_AXI_S_ARES 177
+#define GCC_PCIE1_AXI_S_BRIDGE_ARES 178
+#define GCC_PCIE1_PIPE_ARES 179
+#define GCC_PCIE1_AUX_ARES 180
+#define GCC_PCIE2_AHB_ARES 181
+#define GCC_PCIE2_AXI_M_ARES 182
+#define GCC_PCIE2_AXI_S_ARES 183
+#define GCC_PCIE2_AXI_S_BRIDGE_ARES 184
+#define GCC_PCIE2_PIPE_ARES 185
+#define GCC_PCIE2_AUX_ARES 186
+#define GCC_PCIE3_AHB_ARES 187
+#define GCC_PCIE3_AXI_M_ARES 188
+#define GCC_PCIE3_AXI_S_ARES 189
+#define GCC_PCIE3_AXI_S_BRIDGE_ARES 190
+#define GCC_PCIE3_PIPE_ARES 191
+#define GCC_PCIE3_AUX_ARES 192
+#define GCC_USB0_MASTER_ARES 193
+#define GCC_USB0_AUX_ARES 194
+#define GCC_USB0_MOCK_UTMI_ARES 195
+#define GCC_USB0_PIPE_ARES 196
+#define GCC_USB0_SLEEP_ARES 197
+#define GCC_USB0_PHY_CFG_AHB_ARES 198
+#define GCC_QDSS_AT_ARES 199
+#define GCC_QDSS_STM_ARES 200
+#define GCC_QDSS_TRACECLKIN_ARES 201
+#define GCC_QDSS_TSCTR_DIV2_ARES 202
+#define GCC_QDSS_TSCTR_DIV3_ARES 203
+#define GCC_QDSS_TSCTR_DIV4_ARES 204
+#define GCC_QDSS_TSCTR_DIV8_ARES 205
+#define GCC_QDSS_TSCTR_DIV16_ARES 206
+#define GCC_QDSS_DAP_ARES 207
+#define GCC_QDSS_APB2JTAG_ARES 208
+#define GCC_QDSS_ETR_USB_ARES 209
+#define GCC_QDSS_DAP_AHB_ARES 210
+#define GCC_QDSS_CFG_AHB_ARES 211
+#define GCC_QDSS_EUD_AT_ARES 212
+#define GCC_QDSS_TS_ARES 213
+#define GCC_QDSS_USB_ARES 214
+#define GCC_SYS_NOC_AXI_ARES 215
+#define GCC_SNOC_QOSGEN_EXTREF_ARES 216
+#define GCC_CNOC_LPASS_CFG_ARES 217
+#define GCC_SYS_NOC_AT_ARES 218
+#define GCC_SNOC_PCNOC_AHB_ARES 219
+#define GCC_SNOC_TME_ARES 220
+#define GCC_SNOC_XO_DCD_ARES 221
+#define GCC_SNOC_TS_ARES 222
+#define GCC_ANOC0_AXI_ARES 223
+#define GCC_ANOC_PCIE0_1LANE_M_ARES 224
+#define GCC_ANOC_PCIE2_2LANE_M_ARES 225
+#define GCC_ANOC_PCIE1_1LANE_M_ARES 226
+#define GCC_ANOC_PCIE3_2LANE_M_ARES 227
+#define GCC_ANOC_PCNOC_AHB_ARES 228
+#define GCC_ANOC_QOSGEN_EXTREF_ARES 229
+#define GCC_ANOC_XO_DCD_ARES 230
+#define GCC_SNOC_XO_DBG_ARES 231
+#define GCC_AGGRNOC_ATB_ARES 232
+#define GCC_AGGRNOC_TS_ARES 233
+#define GCC_USB0_EUD_AT_ARES 234
+#define GCC_PCNOC_TIC_ARES 235
+#define GCC_PCNOC_AHB_ARES 236
+#define GCC_PCNOC_XO_DBG_ARES 237
+#define GCC_SNOC_LPASS_ARES 238
+#define GCC_PCNOC_AT_ARES 239
+#define GCC_PCNOC_XO_DCD_ARES 240
+#define GCC_PCNOC_TS_ARES 241
+#define GCC_PCNOC_BUS_TIMEOUT0_AHB_ARES 242
+#define GCC_PCNOC_BUS_TIMEOUT1_AHB_ARES 243
+#define GCC_PCNOC_BUS_TIMEOUT2_AHB_ARES 244
+#define GCC_PCNOC_BUS_TIMEOUT3_AHB_ARES 245
+#define GCC_PCNOC_BUS_TIMEOUT4_AHB_ARES 246
+#define GCC_PCNOC_BUS_TIMEOUT5_AHB_ARES 247
+#define GCC_PCNOC_BUS_TIMEOUT6_AHB_ARES 248
+#define GCC_PCNOC_BUS_TIMEOUT7_AHB_ARES 249
+#define GCC_Q6_AXIM_RESET 250
+#define GCC_Q6_AXIS_RESET 251
+#define GCC_Q6_AHB_S_RESET 252
+#define GCC_Q6_AHB_RESET 253
+#define GCC_Q6SS_DBG_RESET 254
+#define GCC_WCSS_ECAHB_RESET 255
+#define GCC_WCSS_DBG_BDG_RESET 256
+#define GCC_WCSS_DBG_RESET 257
+#define GCC_WCSS_AXI_M_RESET 258
+#define GCC_WCSS_AHB_S_RESET 259
+#define GCC_WCSS_ACMT_RESET 260
+#define GCC_WCSSAON_RESET 261
+#define GCC_PCIE0_PIPE_RESET 262
+#define GCC_PCIE0_CORE_STICKY_RESET 263
+#define GCC_PCIE0_AXI_S_STICKY_RESET 264
+#define GCC_PCIE0_AXI_S_RESET 265
+#define GCC_PCIE0_AXI_M_STICKY_RESET 266
+#define GCC_PCIE0_AXI_M_RESET 267
+#define GCC_PCIE0_AUX_RESET 268
+#define GCC_PCIE0_AHB_RESET 269
+#define GCC_PCIE1_PIPE_RESET 270
+#define GCC_PCIE1_CORE_STICKY_RESET 271
+#define GCC_PCIE1_AXI_S_STICKY_RESET 272
+#define GCC_PCIE1_AXI_S_RESET 273
+#define GCC_PCIE1_AXI_M_STICKY_RESET 274
+#define GCC_PCIE1_AXI_M_RESET 275
+#define GCC_PCIE1_AUX_RESET 276
+#define GCC_PCIE1_AHB_RESET 277
+#define GCC_PCIE2_PIPE_RESET 278
+#define GCC_PCIE2_CORE_STICKY_RESET 279
+#define GCC_PCIE2_AXI_S_STICKY_RESET 280
+#define GCC_PCIE2_AXI_S_RESET 281
+#define GCC_PCIE2_AXI_M_STICKY_RESET 282
+#define GCC_PCIE2_AXI_M_RESET 283
+#define GCC_PCIE2_AUX_RESET 284
+#define GCC_PCIE2_AHB_RESET 285
+#define GCC_PCIE3_PIPE_RESET 286
+#define GCC_PCIE3_CORE_STICKY_RESET 287
+#define GCC_PCIE3_AXI_S_STICKY_RESET 288
+#define GCC_PCIE3_AXI_S_RESET 289
+#define GCC_PCIE3_AXI_M_STICKY_RESET 290
+#define GCC_PCIE3_AXI_M_RESET 291
+#define GCC_PCIE3_AUX_RESET 292
+#define GCC_PCIE3_AHB_RESET 293
+#define GCC_NSS_PARTIAL_RESET 294
+#define GCC_UNIPHY0_XPCS_ARES 295
+#define GCC_UNIPHY1_XPCS_ARES 296
+#define GCC_UNIPHY2_XPCS_ARES 297
+#define GCC_USB1_BCR 298
+#define GCC_QUSB2_1_PHY_BCR 299
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,ipq5424-nsscc.h b/include/dt-bindings/reset/qcom,ipq5424-nsscc.h
new file mode 100644
index 000000000000..9627e3b0ad30
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,ipq5424-nsscc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_IPQ5424_NSSCC_H
+#define _DT_BINDINGS_RESET_QCOM_IPQ5424_NSSCC_H
+
+#define NSS_CC_CE_APB_CLK_ARES 0
+#define NSS_CC_CE_AXI_CLK_ARES 1
+#define NSS_CC_DEBUG_CLK_ARES 2
+#define NSS_CC_EIP_CLK_ARES 3
+#define NSS_CC_NSS_CSR_CLK_ARES 4
+#define NSS_CC_NSSNOC_CE_APB_CLK_ARES 5
+#define NSS_CC_NSSNOC_CE_AXI_CLK_ARES 6
+#define NSS_CC_NSSNOC_EIP_CLK_ARES 7
+#define NSS_CC_NSSNOC_NSS_CSR_CLK_ARES 8
+#define NSS_CC_NSSNOC_PPE_CLK_ARES 9
+#define NSS_CC_NSSNOC_PPE_CFG_CLK_ARES 10
+#define NSS_CC_PORT1_MAC_CLK_ARES 11
+#define NSS_CC_PORT1_RX_CLK_ARES 12
+#define NSS_CC_PORT1_TX_CLK_ARES 13
+#define NSS_CC_PORT2_MAC_CLK_ARES 14
+#define NSS_CC_PORT2_RX_CLK_ARES 15
+#define NSS_CC_PORT2_TX_CLK_ARES 16
+#define NSS_CC_PORT3_MAC_CLK_ARES 17
+#define NSS_CC_PORT3_RX_CLK_ARES 18
+#define NSS_CC_PORT3_TX_CLK_ARES 19
+#define NSS_CC_PPE_BCR 20
+#define NSS_CC_PPE_EDMA_CLK_ARES 21
+#define NSS_CC_PPE_EDMA_CFG_CLK_ARES 22
+#define NSS_CC_PPE_SWITCH_BTQ_CLK_ARES 23
+#define NSS_CC_PPE_SWITCH_CLK_ARES 24
+#define NSS_CC_PPE_SWITCH_CFG_CLK_ARES 25
+#define NSS_CC_PPE_SWITCH_IPE_CLK_ARES 26
+#define NSS_CC_UNIPHY_PORT1_RX_CLK_ARES 27
+#define NSS_CC_UNIPHY_PORT1_TX_CLK_ARES 28
+#define NSS_CC_UNIPHY_PORT2_RX_CLK_ARES 29
+#define NSS_CC_UNIPHY_PORT2_TX_CLK_ARES 30
+#define NSS_CC_UNIPHY_PORT3_RX_CLK_ARES 31
+#define NSS_CC_UNIPHY_PORT3_TX_CLK_ARES 32
+#define NSS_CC_XGMAC0_PTP_REF_CLK_ARES 33
+#define NSS_CC_XGMAC1_PTP_REF_CLK_ARES 34
+#define NSS_CC_XGMAC2_PTP_REF_CLK_ARES 35
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,ipq9574-nsscc.h b/include/dt-bindings/reset/qcom,ipq9574-nsscc.h
new file mode 100644
index 000000000000..7f152e98b99c
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,ipq9574-nsscc.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, 2025 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_NSSCC_9574_H
+#define _DT_BINDINGS_RESET_IPQ_NSSCC_9574_H
+
+#define EDMA_HW_RESET 0
+#define NSS_CC_CE_BCR 1
+#define NSS_CC_CLC_BCR 2
+#define NSS_CC_EIP197_BCR 3
+#define NSS_CC_HAQ_BCR 4
+#define NSS_CC_IMEM_BCR 5
+#define NSS_CC_MAC_BCR 6
+#define NSS_CC_PPE_BCR 7
+#define NSS_CC_UBI_BCR 8
+#define NSS_CC_UNIPHY_BCR 9
+#define UBI3_CLKRST_CLAMP_ENABLE 10
+#define UBI3_CORE_CLAMP_ENABLE 11
+#define UBI2_CLKRST_CLAMP_ENABLE 12
+#define UBI2_CORE_CLAMP_ENABLE 13
+#define UBI1_CLKRST_CLAMP_ENABLE 14
+#define UBI1_CORE_CLAMP_ENABLE 15
+#define UBI0_CLKRST_CLAMP_ENABLE 16
+#define UBI0_CORE_CLAMP_ENABLE 17
+#define NSSNOC_NSS_CSR_ARES 18
+#define NSS_CSR_ARES 19
+#define PPE_BTQ_ARES 20
+#define PPE_IPE_ARES 21
+#define PPE_ARES 22
+#define PPE_CFG_ARES 23
+#define PPE_EDMA_ARES 24
+#define PPE_EDMA_CFG_ARES 25
+#define CRY_PPE_ARES 26
+#define NSSNOC_PPE_ARES 27
+#define NSSNOC_PPE_CFG_ARES 28
+#define PORT1_MAC_ARES 29
+#define PORT2_MAC_ARES 30
+#define PORT3_MAC_ARES 31
+#define PORT4_MAC_ARES 32
+#define PORT5_MAC_ARES 33
+#define PORT6_MAC_ARES 34
+#define XGMAC0_PTP_REF_ARES 35
+#define XGMAC1_PTP_REF_ARES 36
+#define XGMAC2_PTP_REF_ARES 37
+#define XGMAC3_PTP_REF_ARES 38
+#define XGMAC4_PTP_REF_ARES 39
+#define XGMAC5_PTP_REF_ARES 40
+#define HAQ_AHB_ARES 41
+#define HAQ_AXI_ARES 42
+#define NSSNOC_HAQ_AHB_ARES 43
+#define NSSNOC_HAQ_AXI_ARES 44
+#define CE_APB_ARES 45
+#define CE_AXI_ARES 46
+#define NSSNOC_CE_APB_ARES 47
+#define NSSNOC_CE_AXI_ARES 48
+#define CRYPTO_ARES 49
+#define NSSNOC_CRYPTO_ARES 50
+#define NSSNOC_NC_AXI0_1_ARES 51
+#define UBI0_CORE_ARES 52
+#define UBI1_CORE_ARES 53
+#define UBI2_CORE_ARES 54
+#define UBI3_CORE_ARES 55
+#define NC_AXI0_ARES 56
+#define UTCM0_ARES 57
+#define NC_AXI1_ARES 58
+#define UTCM1_ARES 59
+#define NC_AXI2_ARES 60
+#define UTCM2_ARES 61
+#define NC_AXI3_ARES 62
+#define UTCM3_ARES 63
+#define NSSNOC_NC_AXI0_ARES 64
+#define AHB0_ARES 65
+#define INTR0_AHB_ARES 66
+#define AHB1_ARES 67
+#define INTR1_AHB_ARES 68
+#define AHB2_ARES 69
+#define INTR2_AHB_ARES 70
+#define AHB3_ARES 71
+#define INTR3_AHB_ARES 72
+#define NSSNOC_AHB0_ARES 73
+#define NSSNOC_INT0_AHB_ARES 74
+#define AXI0_ARES 75
+#define AXI1_ARES 76
+#define AXI2_ARES 77
+#define AXI3_ARES 78
+#define NSSNOC_AXI0_ARES 79
+#define IMEM_QSB_ARES 80
+#define NSSNOC_IMEM_QSB_ARES 81
+#define IMEM_AHB_ARES 82
+#define NSSNOC_IMEM_AHB_ARES 83
+#define UNIPHY_PORT1_RX_ARES 84
+#define UNIPHY_PORT1_TX_ARES 85
+#define UNIPHY_PORT2_RX_ARES 86
+#define UNIPHY_PORT2_TX_ARES 87
+#define UNIPHY_PORT3_RX_ARES 88
+#define UNIPHY_PORT3_TX_ARES 89
+#define UNIPHY_PORT4_RX_ARES 90
+#define UNIPHY_PORT4_TX_ARES 91
+#define UNIPHY_PORT5_RX_ARES 92
+#define UNIPHY_PORT5_TX_ARES 93
+#define UNIPHY_PORT6_RX_ARES 94
+#define UNIPHY_PORT6_TX_ARES 95
+#define PORT1_RX_ARES 96
+#define PORT1_TX_ARES 97
+#define PORT2_RX_ARES 98
+#define PORT2_TX_ARES 99
+#define PORT3_RX_ARES 100
+#define PORT3_TX_ARES 101
+#define PORT4_RX_ARES 102
+#define PORT4_TX_ARES 103
+#define PORT5_RX_ARES 104
+#define PORT5_TX_ARES 105
+#define PORT6_RX_ARES 106
+#define PORT6_TX_ARES 107
+#define PPE_FULL_RESET 108
+#define UNIPHY0_SOFT_RESET 109
+#define UNIPHY1_SOFT_RESET 110
+#define UNIPHY2_SOFT_RESET 111
+#define UNIPHY_PORT1_ARES 112
+#define UNIPHY_PORT2_ARES 113
+#define UNIPHY_PORT3_ARES 114
+#define UNIPHY_PORT4_ARES 115
+#define UNIPHY_PORT5_ARES 116
+#define UNIPHY_PORT6_ARES 117
+#define NSSPORT1_RESET 118
+#define NSSPORT2_RESET 119
+#define NSSPORT3_RESET 120
+#define NSSPORT4_RESET 121
+#define NSSPORT5_RESET 122
+#define NSSPORT6_RESET 123
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,qca8k-nsscc.h b/include/dt-bindings/reset/qcom,qca8k-nsscc.h
new file mode 100644
index 000000000000..c71167a3bd41
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,qca8k-nsscc.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_QCA8K_NSS_CC_H
+#define _DT_BINDINGS_RESET_QCOM_QCA8K_NSS_CC_H
+
+#define NSS_CC_SWITCH_CORE_ARES 1
+#define NSS_CC_APB_BRIDGE_ARES 2
+#define NSS_CC_MAC0_TX_ARES 3
+#define NSS_CC_MAC0_TX_SRDS1_ARES 4
+#define NSS_CC_MAC0_RX_ARES 5
+#define NSS_CC_MAC0_RX_SRDS1_ARES 6
+#define NSS_CC_MAC1_SRDS1_CH0_RX_ARES 7
+#define NSS_CC_MAC1_TX_ARES 8
+#define NSS_CC_MAC1_GEPHY0_TX_ARES 9
+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_ARES 10
+#define NSS_CC_MAC1_SRDS1_CH0_TX_ARES 11
+#define NSS_CC_MAC1_RX_ARES 12
+#define NSS_CC_MAC1_GEPHY0_RX_ARES 13
+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_ARES 14
+#define NSS_CC_MAC2_SRDS1_CH1_RX_ARES 15
+#define NSS_CC_MAC2_TX_ARES 16
+#define NSS_CC_MAC2_GEPHY1_TX_ARES 17
+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_ARES 18
+#define NSS_CC_MAC2_SRDS1_CH1_TX_ARES 19
+#define NSS_CC_MAC2_RX_ARES 20
+#define NSS_CC_MAC2_GEPHY1_RX_ARES 21
+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_ARES 22
+#define NSS_CC_MAC3_SRDS1_CH2_RX_ARES 23
+#define NSS_CC_MAC3_TX_ARES 24
+#define NSS_CC_MAC3_GEPHY2_TX_ARES 25
+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_ARES 26
+#define NSS_CC_MAC3_SRDS1_CH2_TX_ARES 27
+#define NSS_CC_MAC3_RX_ARES 28
+#define NSS_CC_MAC3_GEPHY2_RX_ARES 29
+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_ARES 30
+#define NSS_CC_MAC4_SRDS1_CH3_RX_ARES 31
+#define NSS_CC_MAC4_TX_ARES 32
+#define NSS_CC_MAC4_GEPHY3_TX_ARES 33
+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_ARES 34
+#define NSS_CC_MAC4_SRDS1_CH3_TX_ARES 35
+#define NSS_CC_MAC4_RX_ARES 36
+#define NSS_CC_MAC4_GEPHY3_RX_ARES 37
+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_ARES 38
+#define NSS_CC_MAC5_TX_ARES 39
+#define NSS_CC_MAC5_TX_SRDS0_ARES 40
+#define NSS_CC_MAC5_RX_ARES 41
+#define NSS_CC_MAC5_RX_SRDS0_ARES 42
+#define NSS_CC_AHB_ARES 43
+#define NSS_CC_SEC_CTRL_AHB_ARES 44
+#define NSS_CC_TLMM_ARES 45
+#define NSS_CC_TLMM_AHB_ARES 46
+#define NSS_CC_CNOC_AHB_ARES 47
+#define NSS_CC_MDIO_AHB_ARES 48
+#define NSS_CC_MDIO_MASTER_AHB_ARES 49
+#define NSS_CC_SRDS0_SYS_ARES 50
+#define NSS_CC_SRDS1_SYS_ARES 51
+#define NSS_CC_GEPHY0_SYS_ARES 52
+#define NSS_CC_GEPHY1_SYS_ARES 53
+#define NSS_CC_GEPHY2_SYS_ARES 54
+#define NSS_CC_GEPHY3_SYS_ARES 55
+#define NSS_CC_SEC_CTRL_ARES 56
+#define NSS_CC_SEC_CTRL_SENSE_ARES 57
+#define NSS_CC_SLEEP_ARES 58
+#define NSS_CC_DEBUG_ARES 59
+#define NSS_CC_GEPHY0_ARES 60
+#define NSS_CC_GEPHY1_ARES 61
+#define NSS_CC_GEPHY2_ARES 62
+#define NSS_CC_GEPHY3_ARES 63
+#define NSS_CC_DSP_ARES 64
+#define NSS_CC_GEPHY_FULL_ARES 65
+#define NSS_CC_GLOBAL_ARES 66
+#define NSS_CC_XPCS_ARES 67
+#endif
diff --git a/include/dt-bindings/reset/qcom,sar2130p-gpucc.h b/include/dt-bindings/reset/qcom,sar2130p-gpucc.h
new file mode 100644
index 000000000000..99ba5f092e2a
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,sar2130p-gpucc.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_GPU_CC_SAR2130P_H
+#define _DT_BINDINGS_RESET_QCOM_GPU_CC_SAR2130P_H
+
+#define GPUCC_GPU_CC_GX_BCR 0
+#define GPUCC_GPU_CC_ACD_BCR 1
+#define GPUCC_GPU_CC_GX_ACD_IROOT_BCR 2
+
+#endif
diff --git a/include/dt-bindings/reset/rockchip,rk3506-cru.h b/include/dt-bindings/reset/rockchip,rk3506-cru.h
new file mode 100644
index 000000000000..31c0d4aa410f
--- /dev/null
+++ b/include/dt-bindings/reset/rockchip,rk3506-cru.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023-2025 Rockchip Electronics Co., Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_REST_ROCKCHIP_RK3506_H
+#define _DT_BINDINGS_REST_ROCKCHIP_RK3506_H
+
+/* CRU-->SOFTRST_CON00 */
+#define SRST_NCOREPORESET0_AC 0
+#define SRST_NCOREPORESET1_AC 1
+#define SRST_NCOREPORESET2_AC 2
+#define SRST_NCORESET0_AC 3
+#define SRST_NCORESET1_AC 4
+#define SRST_NCORESET2_AC 5
+#define SRST_NL2RESET_AC 6
+#define SRST_A_CORE_BIU_AC 7
+#define SRST_H_M0_AC 8
+
+/* CRU-->SOFTRST_CON02 */
+#define SRST_NDBGRESET 9
+#define SRST_P_CORE_BIU 10
+#define SRST_PMU 11
+
+/* CRU-->SOFTRST_CON03 */
+#define SRST_P_DBG 12
+#define SRST_POT_DBG 13
+#define SRST_P_CORE_GRF 14
+#define SRST_CORE_EMA_DETECT 15
+#define SRST_REF_PVTPLL_CORE 16
+#define SRST_P_GPIO1 17
+#define SRST_DB_GPIO1 18
+
+/* CRU-->SOFTRST_CON04 */
+#define SRST_A_CORE_PERI_BIU 19
+#define SRST_A_DSMC 20
+#define SRST_P_DSMC 21
+#define SRST_FLEXBUS 22
+#define SRST_A_FLEXBUS 23
+#define SRST_H_FLEXBUS 24
+#define SRST_A_DSMC_SLV 25
+#define SRST_H_DSMC_SLV 26
+#define SRST_DSMC_SLV 27
+
+/* CRU-->SOFTRST_CON05 */
+#define SRST_A_BUS_BIU 28
+#define SRST_H_BUS_BIU 29
+#define SRST_P_BUS_BIU 30
+#define SRST_A_SYSRAM 31
+#define SRST_H_SYSRAM 32
+#define SRST_A_DMAC0 33
+#define SRST_A_DMAC1 34
+#define SRST_H_M0 35
+#define SRST_M0_JTAG 36
+#define SRST_H_CRYPTO 37
+
+/* CRU-->SOFTRST_CON06 */
+#define SRST_H_RNG 38
+#define SRST_P_BUS_GRF 39
+#define SRST_P_TIMER0 40
+#define SRST_TIMER0_CH0 41
+#define SRST_TIMER0_CH1 42
+#define SRST_TIMER0_CH2 43
+#define SRST_TIMER0_CH3 44
+#define SRST_TIMER0_CH4 45
+#define SRST_TIMER0_CH5 46
+#define SRST_P_WDT0 47
+#define SRST_T_WDT0 48
+#define SRST_P_WDT1 49
+#define SRST_T_WDT1 50
+#define SRST_P_MAILBOX 51
+#define SRST_P_INTMUX 52
+#define SRST_P_SPINLOCK 53
+
+/* CRU-->SOFTRST_CON07 */
+#define SRST_P_DDRC 54
+#define SRST_H_DDRPHY 55
+#define SRST_P_DDRMON 56
+#define SRST_DDRMON_OSC 57
+#define SRST_P_DDR_LPC 58
+#define SRST_H_USBOTG0 59
+#define SRST_USBOTG0_ADP 60
+#define SRST_H_USBOTG1 61
+#define SRST_USBOTG1_ADP 62
+#define SRST_P_USBPHY 63
+#define SRST_USBPHY_POR 64
+#define SRST_USBPHY_OTG0 65
+#define SRST_USBPHY_OTG1 66
+
+/* CRU-->SOFTRST_CON08 */
+#define SRST_A_DMA2DDR 67
+#define SRST_P_DMA2DDR 68
+
+/* CRU-->SOFTRST_CON09 */
+#define SRST_USBOTG0_UTMI 69
+#define SRST_USBOTG1_UTMI 70
+
+/* CRU-->SOFTRST_CON10 */
+#define SRST_A_DDRC_0 71
+#define SRST_A_DDRC_1 72
+#define SRST_A_DDR_BIU 73
+#define SRST_DDRC 74
+#define SRST_DDRMON 75
+
+/* CRU-->SOFTRST_CON11 */
+#define SRST_H_LSPERI_BIU 76
+#define SRST_P_UART0 77
+#define SRST_P_UART1 78
+#define SRST_P_UART2 79
+#define SRST_P_UART3 80
+#define SRST_P_UART4 81
+#define SRST_UART0 82
+#define SRST_UART1 83
+#define SRST_UART2 84
+#define SRST_UART3 85
+#define SRST_UART4 86
+#define SRST_P_I2C0 87
+#define SRST_I2C0 88
+
+/* CRU-->SOFTRST_CON12 */
+#define SRST_P_I2C1 89
+#define SRST_I2C1 90
+#define SRST_P_I2C2 91
+#define SRST_I2C2 92
+#define SRST_P_PWM1 93
+#define SRST_PWM1 94
+#define SRST_P_SPI0 95
+#define SRST_SPI0 96
+#define SRST_P_SPI1 97
+#define SRST_SPI1 98
+#define SRST_P_GPIO2 99
+#define SRST_DB_GPIO2 100
+
+/* CRU-->SOFTRST_CON13 */
+#define SRST_P_GPIO3 101
+#define SRST_DB_GPIO3 102
+#define SRST_P_GPIO4 103
+#define SRST_DB_GPIO4 104
+#define SRST_H_CAN0 105
+#define SRST_CAN0 106
+#define SRST_H_CAN1 107
+#define SRST_CAN1 108
+#define SRST_H_PDM 109
+#define SRST_M_PDM 110
+#define SRST_PDM 111
+#define SRST_SPDIFTX 112
+#define SRST_H_SPDIFTX 113
+#define SRST_H_SPDIFRX 114
+#define SRST_SPDIFRX 115
+#define SRST_M_SAI0 116
+
+/* CRU-->SOFTRST_CON14 */
+#define SRST_H_SAI0 117
+#define SRST_M_SAI1 118
+#define SRST_H_SAI1 119
+#define SRST_H_ASRC0 120
+#define SRST_ASRC0 121
+#define SRST_H_ASRC1 122
+#define SRST_ASRC1 123
+
+/* CRU-->SOFTRST_CON17 */
+#define SRST_H_HSPERI_BIU 124
+#define SRST_H_SDMMC 125
+#define SRST_H_FSPI 126
+#define SRST_S_FSPI 127
+#define SRST_P_SPI2 128
+#define SRST_A_MAC0 129
+#define SRST_A_MAC1 130
+
+/* CRU-->SOFTRST_CON18 */
+#define SRST_M_SAI2 131
+#define SRST_H_SAI2 132
+#define SRST_H_SAI3 133
+#define SRST_M_SAI3 134
+#define SRST_H_SAI4 135
+#define SRST_M_SAI4 136
+#define SRST_H_DSM 137
+#define SRST_M_DSM 138
+#define SRST_P_AUDIO_ADC 139
+#define SRST_M_AUDIO_ADC 140
+
+/* CRU-->SOFTRST_CON19 */
+#define SRST_P_SARADC 141
+#define SRST_SARADC 142
+#define SRST_SARADC_PHY 143
+#define SRST_P_OTPC_NS 144
+#define SRST_SBPI_OTPC_NS 145
+#define SRST_USER_OTPC_NS 146
+#define SRST_P_UART5 147
+#define SRST_UART5 148
+#define SRST_P_GPIO234_IOC 149
+
+/* CRU-->SOFTRST_CON21 */
+#define SRST_A_VIO_BIU 150
+#define SRST_H_VIO_BIU 151
+#define SRST_H_RGA 152
+#define SRST_A_RGA 153
+#define SRST_CORE_RGA 154
+#define SRST_A_VOP 155
+#define SRST_H_VOP 156
+#define SRST_VOP 157
+#define SRST_P_DPHY 158
+#define SRST_P_DSI_HOST 159
+#define SRST_P_TSADC 160
+#define SRST_TSADC 161
+
+/* CRU-->SOFTRST_CON22 */
+#define SRST_P_GPIO1_IOC 162
+
+#endif
diff --git a/include/dt-bindings/reset/rockchip,rk3528-cru.h b/include/dt-bindings/reset/rockchip,rk3528-cru.h
new file mode 100644
index 000000000000..6b024c5f2e1c
--- /dev/null
+++ b/include/dt-bindings/reset/rockchip,rk3528-cru.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2024 Yao Zi <ziyao@disroot.org>
+ * Author: Joseph Chen <chenjh@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_ROCKCHIP_RK3528_H
+#define _DT_BINDINGS_RESET_ROCKCHIP_RK3528_H
+
+#define SRST_CORE0_PO 0
+#define SRST_CORE1_PO 1
+#define SRST_CORE2_PO 2
+#define SRST_CORE3_PO 3
+#define SRST_CORE0 4
+#define SRST_CORE1 5
+#define SRST_CORE2 6
+#define SRST_CORE3 7
+#define SRST_NL2 8
+#define SRST_CORE_BIU 9
+#define SRST_CORE_CRYPTO 10
+#define SRST_P_DBG 11
+#define SRST_POT_DBG 12
+#define SRST_NT_DBG 13
+#define SRST_P_CORE_GRF 14
+#define SRST_P_DAPLITE_BIU 15
+#define SRST_P_CPU_BIU 16
+#define SRST_REF_PVTPLL_CORE 17
+#define SRST_A_BUS_VOPGL_BIU 18
+#define SRST_A_BUS_H_BIU 19
+#define SRST_A_SYSMEM_BIU 20
+#define SRST_A_BUS_BIU 21
+#define SRST_H_BUS_BIU 22
+#define SRST_P_BUS_BIU 23
+#define SRST_P_DFT2APB 24
+#define SRST_P_BUS_GRF 25
+#define SRST_A_BUS_M_BIU 26
+#define SRST_A_GIC 27
+#define SRST_A_SPINLOCK 28
+#define SRST_A_DMAC 29
+#define SRST_P_TIMER 30
+#define SRST_TIMER0 31
+#define SRST_TIMER1 32
+#define SRST_TIMER2 33
+#define SRST_TIMER3 34
+#define SRST_TIMER4 35
+#define SRST_TIMER5 36
+#define SRST_P_JDBCK_DAP 37
+#define SRST_JDBCK_DAP 38
+#define SRST_P_WDT_NS 39
+#define SRST_T_WDT_NS 40
+#define SRST_H_TRNG_NS 41
+#define SRST_P_UART0 42
+#define SRST_S_UART0 43
+#define SRST_PKA_CRYPTO 44
+#define SRST_A_CRYPTO 45
+#define SRST_H_CRYPTO 46
+#define SRST_P_DMA2DDR 47
+#define SRST_A_DMA2DDR 48
+#define SRST_P_PWM0 49
+#define SRST_PWM0 50
+#define SRST_P_PWM1 51
+#define SRST_PWM1 52
+#define SRST_P_SCR 53
+#define SRST_A_DCF 54
+#define SRST_P_INTMUX 55
+#define SRST_A_VPU_BIU 56
+#define SRST_H_VPU_BIU 57
+#define SRST_P_VPU_BIU 58
+#define SRST_A_VPU 59
+#define SRST_H_VPU 60
+#define SRST_P_CRU_PCIE 61
+#define SRST_P_VPU_GRF 62
+#define SRST_H_SFC 63
+#define SRST_S_SFC 64
+#define SRST_C_EMMC 65
+#define SRST_H_EMMC 66
+#define SRST_A_EMMC 67
+#define SRST_B_EMMC 68
+#define SRST_T_EMMC 69
+#define SRST_P_GPIO1 70
+#define SRST_DB_GPIO1 71
+#define SRST_A_VPU_L_BIU 72
+#define SRST_P_VPU_IOC 73
+#define SRST_H_SAI_I2S0 74
+#define SRST_M_SAI_I2S0 75
+#define SRST_H_SAI_I2S2 76
+#define SRST_M_SAI_I2S2 77
+#define SRST_P_ACODEC 78
+#define SRST_P_GPIO3 79
+#define SRST_DB_GPIO3 80
+#define SRST_P_SPI1 81
+#define SRST_SPI1 82
+#define SRST_P_UART2 83
+#define SRST_S_UART2 84
+#define SRST_P_UART5 85
+#define SRST_S_UART5 86
+#define SRST_P_UART6 87
+#define SRST_S_UART6 88
+#define SRST_P_UART7 89
+#define SRST_S_UART7 90
+#define SRST_P_I2C3 91
+#define SRST_I2C3 92
+#define SRST_P_I2C5 93
+#define SRST_I2C5 94
+#define SRST_P_I2C6 95
+#define SRST_I2C6 96
+#define SRST_A_MAC 97
+#define SRST_P_PCIE 98
+#define SRST_PCIE_PIPE_PHY 99
+#define SRST_PCIE_POWER_UP 100
+#define SRST_P_PCIE_PHY 101
+#define SRST_P_PIPE_GRF 102
+#define SRST_H_SDIO0 103
+#define SRST_H_SDIO1 104
+#define SRST_TS_0 105
+#define SRST_TS_1 106
+#define SRST_P_CAN2 107
+#define SRST_CAN2 108
+#define SRST_P_CAN3 109
+#define SRST_CAN3 110
+#define SRST_P_SARADC 111
+#define SRST_SARADC 112
+#define SRST_SARADC_PHY 113
+#define SRST_P_TSADC 114
+#define SRST_TSADC 115
+#define SRST_A_USB3OTG 116
+#define SRST_A_GPU_BIU 117
+#define SRST_P_GPU_BIU 118
+#define SRST_A_GPU 119
+#define SRST_REF_PVTPLL_GPU 120
+#define SRST_H_RKVENC_BIU 121
+#define SRST_A_RKVENC_BIU 122
+#define SRST_P_RKVENC_BIU 123
+#define SRST_H_RKVENC 124
+#define SRST_A_RKVENC 125
+#define SRST_CORE_RKVENC 126
+#define SRST_H_SAI_I2S1 127
+#define SRST_M_SAI_I2S1 128
+#define SRST_P_I2C1 129
+#define SRST_I2C1 130
+#define SRST_P_I2C0 131
+#define SRST_I2C0 132
+#define SRST_P_SPI0 133
+#define SRST_SPI0 134
+#define SRST_P_GPIO4 135
+#define SRST_DB_GPIO4 136
+#define SRST_P_RKVENC_IOC 137
+#define SRST_H_SPDIF 138
+#define SRST_M_SPDIF 139
+#define SRST_H_PDM 140
+#define SRST_M_PDM 141
+#define SRST_P_UART1 142
+#define SRST_S_UART1 143
+#define SRST_P_UART3 144
+#define SRST_S_UART3 145
+#define SRST_P_RKVENC_GRF 146
+#define SRST_P_CAN0 147
+#define SRST_CAN0 148
+#define SRST_P_CAN1 149
+#define SRST_CAN1 150
+#define SRST_A_VO_BIU 151
+#define SRST_H_VO_BIU 152
+#define SRST_P_VO_BIU 153
+#define SRST_H_RGA2E 154
+#define SRST_A_RGA2E 155
+#define SRST_CORE_RGA2E 156
+#define SRST_H_VDPP 157
+#define SRST_A_VDPP 158
+#define SRST_CORE_VDPP 159
+#define SRST_P_VO_GRF 160
+#define SRST_P_CRU 161
+#define SRST_A_VOP_BIU 162
+#define SRST_H_VOP 163
+#define SRST_D_VOP0 164
+#define SRST_D_VOP1 165
+#define SRST_A_VOP 166
+#define SRST_P_HDMI 167
+#define SRST_HDMI 168
+#define SRST_P_HDMIPHY 169
+#define SRST_H_HDCP_KEY 170
+#define SRST_A_HDCP 171
+#define SRST_H_HDCP 172
+#define SRST_P_HDCP 173
+#define SRST_H_CVBS 174
+#define SRST_D_CVBS_VOP 175
+#define SRST_D_4X_CVBS_VOP 176
+#define SRST_A_JPEG_DECODER 177
+#define SRST_H_JPEG_DECODER 178
+#define SRST_A_VO_L_BIU 179
+#define SRST_A_MAC_VO 180
+#define SRST_A_JPEG_BIU 181
+#define SRST_H_SAI_I2S3 182
+#define SRST_M_SAI_I2S3 183
+#define SRST_MACPHY 184
+#define SRST_P_VCDCPHY 185
+#define SRST_P_GPIO2 186
+#define SRST_DB_GPIO2 187
+#define SRST_P_VO_IOC 188
+#define SRST_H_SDMMC0 189
+#define SRST_P_OTPC_NS 190
+#define SRST_SBPI_OTPC_NS 191
+#define SRST_USER_OTPC_NS 192
+#define SRST_HDMIHDP0 193
+#define SRST_H_USBHOST 194
+#define SRST_H_USBHOST_ARB 195
+#define SRST_HOST_UTMI 196
+#define SRST_P_UART4 197
+#define SRST_S_UART4 198
+#define SRST_P_I2C4 199
+#define SRST_I2C4 200
+#define SRST_P_I2C7 201
+#define SRST_I2C7 202
+#define SRST_P_USBPHY 203
+#define SRST_USBPHY_POR 204
+#define SRST_USBPHY_OTG 205
+#define SRST_USBPHY_HOST 206
+#define SRST_P_DDRPHY_CRU 207
+#define SRST_H_RKVDEC_BIU 208
+#define SRST_A_RKVDEC_BIU 209
+#define SRST_A_RKVDEC 210
+#define SRST_H_RKVDEC 211
+#define SRST_HEVC_CA_RKVDEC 212
+#define SRST_REF_PVTPLL_RKVDEC 213
+#define SRST_P_DDR_BIU 214
+#define SRST_P_DDRC 215
+#define SRST_P_DDRMON 216
+#define SRST_TIMER_DDRMON 217
+#define SRST_P_MSCH_BIU 218
+#define SRST_P_DDR_GRF 219
+#define SRST_P_DDR_HWLP 220
+#define SRST_P_DDRPHY 221
+#define SRST_MSCH_BIU 222
+#define SRST_A_DDR_UPCTL 223
+#define SRST_DDR_UPCTL 224
+#define SRST_DDRMON 225
+#define SRST_A_DDR_SCRAMBLE 226
+#define SRST_A_SPLIT 227
+#define SRST_DDR_PHY 228
+
+#endif // _DT_BINDINGS_RESET_ROCKCHIP_RK3528_H
diff --git a/include/dt-bindings/reset/rockchip,rk3562-cru.h b/include/dt-bindings/reset/rockchip,rk3562-cru.h
new file mode 100644
index 000000000000..8df95113056e
--- /dev/null
+++ b/include/dt-bindings/reset/rockchip,rk3562-cru.h
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024-2025 Rockchip Electronics Co. Ltd.
+ *
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_ROCKCHIP_RK3562_H
+#define _DT_BINDINGS_RESET_ROCKCHIP_RK3562_H
+
+/********Name=SOFTRST_CON01,Offset=0x404********/
+#define SRST_A_TOP_BIU 0
+#define SRST_A_TOP_VIO_BIU 1
+#define SRST_REF_PVTPLL_LOGIC 2
+/********Name=SOFTRST_CON03,Offset=0x40C********/
+#define SRST_NCOREPORESET0 3
+#define SRST_NCOREPORESET1 4
+#define SRST_NCOREPORESET2 5
+#define SRST_NCOREPORESET3 6
+#define SRST_NCORESET0 7
+#define SRST_NCORESET1 8
+#define SRST_NCORESET2 9
+#define SRST_NCORESET3 10
+#define SRST_NL2RESET 11
+/********Name=SOFTRST_CON04,Offset=0x410********/
+#define SRST_DAP 12
+#define SRST_P_DBG_DAPLITE 13
+#define SRST_REF_PVTPLL_CORE 14
+/********Name=SOFTRST_CON05,Offset=0x414********/
+#define SRST_A_CORE_BIU 15
+#define SRST_P_CORE_BIU 16
+#define SRST_H_CORE_BIU 17
+/********Name=SOFTRST_CON06,Offset=0x418********/
+#define SRST_A_NPU_BIU 18
+#define SRST_H_NPU_BIU 19
+#define SRST_A_RKNN 20
+#define SRST_H_RKNN 21
+#define SRST_REF_PVTPLL_NPU 22
+/********Name=SOFTRST_CON08,Offset=0x420********/
+#define SRST_A_GPU_BIU 23
+#define SRST_GPU 24
+#define SRST_REF_PVTPLL_GPU 25
+#define SRST_GPU_BRG_BIU 26
+/********Name=SOFTRST_CON09,Offset=0x424********/
+#define SRST_RKVENC_CORE 27
+#define SRST_A_VEPU_BIU 28
+#define SRST_H_VEPU_BIU 29
+#define SRST_A_RKVENC 30
+#define SRST_H_RKVENC 31
+/********Name=SOFTRST_CON10,Offset=0x428********/
+#define SRST_RKVDEC_HEVC_CA 32
+#define SRST_A_VDPU_BIU 33
+#define SRST_H_VDPU_BIU 34
+#define SRST_A_RKVDEC 35
+#define SRST_H_RKVDEC 36
+/********Name=SOFTRST_CON11,Offset=0x42C********/
+#define SRST_A_VI_BIU 37
+#define SRST_H_VI_BIU 38
+#define SRST_P_VI_BIU 39
+#define SRST_ISP 40
+#define SRST_A_VICAP 41
+#define SRST_H_VICAP 42
+#define SRST_D_VICAP 43
+#define SRST_I0_VICAP 44
+#define SRST_I1_VICAP 45
+#define SRST_I2_VICAP 46
+#define SRST_I3_VICAP 47
+/********Name=SOFTRST_CON12,Offset=0x430********/
+#define SRST_P_CSIHOST0 48
+#define SRST_P_CSIHOST1 49
+#define SRST_P_CSIHOST2 50
+#define SRST_P_CSIHOST3 51
+#define SRST_P_CSIPHY0 52
+#define SRST_P_CSIPHY1 53
+/********Name=SOFTRST_CON13,Offset=0x434********/
+#define SRST_A_VO_BIU 54
+#define SRST_H_VO_BIU 55
+#define SRST_A_VOP 56
+#define SRST_H_VOP 57
+#define SRST_D_VOP 58
+#define SRST_D_VOP1 59
+/********Name=SOFTRST_CON14,Offset=0x438********/
+#define SRST_A_RGA_BIU 60
+#define SRST_H_RGA_BIU 61
+#define SRST_A_RGA 62
+#define SRST_H_RGA 63
+#define SRST_RGA_CORE 64
+#define SRST_A_JDEC 65
+#define SRST_H_JDEC 66
+/********Name=SOFTRST_CON15,Offset=0x43C********/
+#define SRST_B_EBK_BIU 67
+#define SRST_P_EBK_BIU 68
+#define SRST_AHB2AXI_EBC 69
+#define SRST_H_EBC 70
+#define SRST_D_EBC 71
+#define SRST_H_EINK 72
+#define SRST_P_EINK 73
+/********Name=SOFTRST_CON16,Offset=0x440********/
+#define SRST_P_PHP_BIU 74
+#define SRST_A_PHP_BIU 75
+#define SRST_P_PCIE20 76
+#define SRST_PCIE20_POWERUP 77
+#define SRST_USB3OTG 78
+/********Name=SOFTRST_CON17,Offset=0x444********/
+#define SRST_PIPEPHY 79
+/********Name=SOFTRST_CON18,Offset=0x448********/
+#define SRST_A_BUS_BIU 80
+#define SRST_H_BUS_BIU 81
+#define SRST_P_BUS_BIU 82
+/********Name=SOFTRST_CON19,Offset=0x44C********/
+#define SRST_P_I2C1 83
+#define SRST_P_I2C2 84
+#define SRST_P_I2C3 85
+#define SRST_P_I2C4 86
+#define SRST_P_I2C5 87
+#define SRST_I2C1 88
+#define SRST_I2C2 89
+#define SRST_I2C3 90
+#define SRST_I2C4 91
+#define SRST_I2C5 92
+/********Name=SOFTRST_CON20,Offset=0x450********/
+#define SRST_BUS_GPIO3 93
+#define SRST_BUS_GPIO4 94
+/********Name=SOFTRST_CON21,Offset=0x454********/
+#define SRST_P_TIMER 95
+#define SRST_TIMER0 96
+#define SRST_TIMER1 97
+#define SRST_TIMER2 98
+#define SRST_TIMER3 99
+#define SRST_TIMER4 100
+#define SRST_TIMER5 101
+#define SRST_P_STIMER 102
+#define SRST_STIMER0 103
+#define SRST_STIMER1 104
+/********Name=SOFTRST_CON22,Offset=0x458********/
+#define SRST_P_WDTNS 105
+#define SRST_WDTNS 106
+#define SRST_P_GRF 107
+#define SRST_P_SGRF 108
+#define SRST_P_MAILBOX 109
+#define SRST_P_INTC 110
+#define SRST_A_BUS_GIC400 111
+#define SRST_A_BUS_GIC400_DEBUG 112
+/********Name=SOFTRST_CON23,Offset=0x45C********/
+#define SRST_A_BUS_SPINLOCK 113
+#define SRST_A_DCF 114
+#define SRST_P_DCF 115
+#define SRST_F_BUS_CM0_CORE 116
+#define SRST_T_BUS_CM0_JTAG 117
+#define SRST_H_ICACHE 118
+#define SRST_H_DCACHE 119
+/********Name=SOFTRST_CON24,Offset=0x460********/
+#define SRST_P_TSADC 120
+#define SRST_TSADC 121
+#define SRST_TSADCPHY 122
+#define SRST_P_DFT2APB 123
+/********Name=SOFTRST_CON25,Offset=0x464********/
+#define SRST_A_GMAC 124
+#define SRST_P_APB2ASB_VCCIO156 125
+#define SRST_P_DSIPHY 126
+#define SRST_P_DSITX 127
+#define SRST_P_CPU_EMA_DET 128
+#define SRST_P_HASH 129
+#define SRST_P_TOPCRU 130
+/********Name=SOFTRST_CON26,Offset=0x468********/
+#define SRST_P_ASB2APB_VCCIO156 131
+#define SRST_P_IOC_VCCIO156 132
+#define SRST_P_GPIO3_VCCIO156 133
+#define SRST_P_GPIO4_VCCIO156 134
+#define SRST_P_SARADC_VCCIO156 135
+#define SRST_SARADC_VCCIO156 136
+#define SRST_SARADC_VCCIO156_PHY 137
+/********Name=SOFTRST_CON27,Offset=0x46c********/
+#define SRST_A_MAC100 138
+
+/********Name=PMU0SOFTRST_CON00,Offset=0x10200********/
+#define SRST_P_PMU0_CRU 139
+#define SRST_P_PMU0_PMU 140
+#define SRST_PMU0_PMU 141
+#define SRST_P_PMU0_HP_TIMER 142
+#define SRST_PMU0_HP_TIMER 143
+#define SRST_PMU0_32K_HP_TIMER 144
+#define SRST_P_PMU0_PVTM 145
+#define SRST_PMU0_PVTM 146
+#define SRST_P_IOC_PMUIO 147
+#define SRST_P_PMU0_GPIO0 148
+#define SRST_PMU0_GPIO0 149
+#define SRST_P_PMU0_GRF 150
+#define SRST_P_PMU0_SGRF 151
+/********Name=PMU0SOFTRST_CON01,Offset=0x10204********/
+#define SRST_DDR_FAIL_SAFE 152
+#define SRST_P_PMU0_SCRKEYGEN 153
+/********Name=PMU0SOFTRST_CON02,Offset=0x10208********/
+#define SRST_P_PMU0_I2C0 154
+#define SRST_PMU0_I2C0 155
+
+/********Name=PMU1SOFTRST_CON00,Offset=0x18200********/
+#define SRST_P_PMU1_CRU 156
+#define SRST_H_PMU1_MEM 157
+#define SRST_H_PMU1_BIU 158
+#define SRST_P_PMU1_BIU 159
+#define SRST_P_PMU1_UART0 160
+#define SRST_S_PMU1_UART0 161
+/********Name=PMU1SOFTRST_CON01,Offset=0x18204********/
+#define SRST_P_PMU1_SPI0 162
+#define SRST_PMU1_SPI0 163
+#define SRST_P_PMU1_PWM0 164
+#define SRST_PMU1_PWM0 165
+/********Name=PMU1SOFTRST_CON02,Offset=0x18208********/
+#define SRST_F_PMU1_CM0_CORE 166
+#define SRST_T_PMU1_CM0_JTAG 167
+#define SRST_P_PMU1_WDTNS 168
+#define SRST_PMU1_WDTNS 169
+#define SRST_PMU1_MAILBOX 170
+
+/********Name=DDRSOFTRST_CON00,Offset=0x20200********/
+#define SRST_MSCH_BRG_BIU 171
+#define SRST_P_MSCH_BIU 172
+#define SRST_P_DDR_HWLP 173
+#define SRST_P_DDR_PHY 290
+#define SRST_P_DDR_DFICTL 174
+#define SRST_P_DDR_DMA2DDR 175
+/********Name=DDRSOFTRST_CON01,Offset=0x20204********/
+#define SRST_P_DDR_MON 176
+#define SRST_TM_DDR_MON 177
+#define SRST_P_DDR_GRF 178
+#define SRST_P_DDR_CRU 179
+#define SRST_P_SUBDDR_CRU 180
+
+/********Name=SUBDDRSOFTRST_CON00,Offset=0x28200********/
+#define SRST_MSCH_BIU 181
+#define SRST_DDR_PHY 182
+#define SRST_DDR_DFICTL 183
+#define SRST_DDR_SCRAMBLE 184
+#define SRST_DDR_MON 185
+#define SRST_A_DDR_SPLIT 186
+#define SRST_DDR_DMA2DDR 187
+
+/********Name=PERISOFTRST_CON01,Offset=0x30404********/
+#define SRST_A_PERI_BIU 188
+#define SRST_H_PERI_BIU 189
+#define SRST_P_PERI_BIU 190
+#define SRST_P_PERICRU 191
+/********Name=PERISOFTRST_CON02,Offset=0x30408********/
+#define SRST_H_SAI0_8CH 192
+#define SRST_M_SAI0_8CH 193
+#define SRST_H_SAI1_8CH 194
+#define SRST_M_SAI1_8CH 195
+#define SRST_H_SAI2_2CH 196
+#define SRST_M_SAI2_2CH 197
+/********Name=PERISOFTRST_CON03,Offset=0x3040C********/
+#define SRST_H_DSM 198
+#define SRST_DSM 199
+#define SRST_H_PDM 200
+#define SRST_M_PDM 201
+#define SRST_H_SPDIF 202
+#define SRST_M_SPDIF 203
+/********Name=PERISOFTRST_CON04,Offset=0x30410********/
+#define SRST_H_SDMMC0 204
+#define SRST_H_SDMMC1 205
+#define SRST_H_EMMC 206
+#define SRST_A_EMMC 207
+#define SRST_C_EMMC 208
+#define SRST_B_EMMC 209
+#define SRST_T_EMMC 210
+#define SRST_S_SFC 211
+#define SRST_H_SFC 212
+/********Name=PERISOFTRST_CON05,Offset=0x30414********/
+#define SRST_H_USB2HOST 213
+#define SRST_H_USB2HOST_ARB 214
+#define SRST_USB2HOST_UTMI 215
+/********Name=PERISOFTRST_CON06,Offset=0x30418********/
+#define SRST_P_SPI1 216
+#define SRST_SPI1 217
+#define SRST_P_SPI2 218
+#define SRST_SPI2 219
+/********Name=PERISOFTRST_CON07,Offset=0x3041C********/
+#define SRST_P_UART1 220
+#define SRST_P_UART2 221
+#define SRST_P_UART3 222
+#define SRST_P_UART4 223
+#define SRST_P_UART5 224
+#define SRST_P_UART6 225
+#define SRST_P_UART7 226
+#define SRST_P_UART8 227
+#define SRST_P_UART9 228
+#define SRST_S_UART1 229
+#define SRST_S_UART2 230
+/********Name=PERISOFTRST_CON08,Offset=0x30420********/
+#define SRST_S_UART3 231
+#define SRST_S_UART4 232
+#define SRST_S_UART5 233
+#define SRST_S_UART6 234
+#define SRST_S_UART7 235
+/********Name=PERISOFTRST_CON09,Offset=0x30424********/
+#define SRST_S_UART8 236
+#define SRST_S_UART9 237
+/********Name=PERISOFTRST_CON10,Offset=0x30428********/
+#define SRST_P_PWM1_PERI 238
+#define SRST_PWM1_PERI 239
+#define SRST_P_PWM2_PERI 240
+#define SRST_PWM2_PERI 241
+#define SRST_P_PWM3_PERI 242
+#define SRST_PWM3_PERI 243
+/********Name=PERISOFTRST_CON11,Offset=0x3042C********/
+#define SRST_P_CAN0 244
+#define SRST_CAN0 245
+#define SRST_P_CAN1 246
+#define SRST_CAN1 247
+/********Name=PERISOFTRST_CON12,Offset=0x30430********/
+#define SRST_A_CRYPTO 248
+#define SRST_H_CRYPTO 249
+#define SRST_P_CRYPTO 250
+#define SRST_CORE_CRYPTO 251
+#define SRST_PKA_CRYPTO 252
+#define SRST_H_KLAD 253
+#define SRST_P_KEY_READER 254
+#define SRST_H_RK_RNG_NS 255
+#define SRST_H_RK_RNG_S 256
+#define SRST_H_TRNG_NS 257
+#define SRST_H_TRNG_S 258
+#define SRST_H_CRYPTO_S 259
+/********Name=PERISOFTRST_CON13,Offset=0x30434********/
+#define SRST_P_PERI_WDT 260
+#define SRST_T_PERI_WDT 261
+#define SRST_A_SYSMEM 262
+#define SRST_H_BOOTROM 263
+#define SRST_P_PERI_GRF 264
+#define SRST_A_DMAC 265
+#define SRST_A_RKDMAC 267
+/********Name=PERISOFTRST_CON14,Offset=0x30438********/
+#define SRST_P_OTPC_NS 268
+#define SRST_SBPI_OTPC_NS 269
+#define SRST_USER_OTPC_NS 270
+#define SRST_P_OTPC_S 271
+#define SRST_SBPI_OTPC_S 272
+#define SRST_USER_OTPC_S 273
+#define SRST_OTPC_ARB 274
+#define SRST_P_OTPPHY 275
+#define SRST_OTP_NPOR 276
+/********Name=PERISOFTRST_CON15,Offset=0x3043C********/
+#define SRST_P_USB2PHY 277
+#define SRST_USB2PHY_POR 278
+#define SRST_USB2PHY_OTG 279
+#define SRST_USB2PHY_HOST 280
+#define SRST_P_PIPEPHY 281
+/********Name=PERISOFTRST_CON16,Offset=0x30440********/
+#define SRST_P_SARADC 282
+#define SRST_SARADC 283
+#define SRST_SARADC_PHY 284
+#define SRST_P_IOC_VCCIO234 285
+/********Name=PERISOFTRST_CON17,Offset=0x30444********/
+#define SRST_P_PERI_GPIO1 286
+#define SRST_P_PERI_GPIO2 287
+#define SRST_PERI_GPIO1 288
+#define SRST_PERI_GPIO2 289
+
+#endif
diff --git a/include/dt-bindings/reset/rockchip,rk3576-cru.h b/include/dt-bindings/reset/rockchip,rk3576-cru.h
new file mode 100644
index 000000000000..ae856906f3a3
--- /dev/null
+++ b/include/dt-bindings/reset/rockchip,rk3576-cru.h
@@ -0,0 +1,564 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2023 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ *
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ * Author: Detlev Casanova <detlev.casanova@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_ROCKCHIP_RK3576_H
+#define _DT_BINDINGS_RESET_ROCKCHIP_RK3576_H
+
+#define SRST_A_TOP_BIU 0
+#define SRST_P_TOP_BIU 1
+#define SRST_A_TOP_MID_BIU 2
+#define SRST_A_SECURE_HIGH_BIU 3
+#define SRST_H_TOP_BIU 4
+
+#define SRST_H_VO0VOP_CHANNEL_BIU 5
+#define SRST_A_VO0VOP_CHANNEL_BIU 6
+
+#define SRST_BISRINTF 7
+
+#define SRST_H_AUDIO_BIU 8
+#define SRST_H_ASRC_2CH_0 9
+#define SRST_H_ASRC_2CH_1 10
+#define SRST_H_ASRC_4CH_0 11
+#define SRST_H_ASRC_4CH_1 12
+#define SRST_ASRC_2CH_0 13
+#define SRST_ASRC_2CH_1 14
+#define SRST_ASRC_4CH_0 15
+#define SRST_ASRC_4CH_1 16
+#define SRST_M_SAI0_8CH 17
+#define SRST_H_SAI0_8CH 18
+#define SRST_H_SPDIF_RX0 19
+#define SRST_M_SPDIF_RX0 20
+
+#define SRST_H_SPDIF_RX1 21
+#define SRST_M_SPDIF_RX1 22
+#define SRST_M_SAI1_8CH 23
+#define SRST_H_SAI1_8CH 24
+#define SRST_M_SAI2_2CH 25
+#define SRST_H_SAI2_2CH 26
+#define SRST_M_SAI3_2CH 27
+#define SRST_H_SAI3_2CH 28
+
+#define SRST_M_SAI4_2CH 29
+#define SRST_H_SAI4_2CH 30
+#define SRST_H_ACDCDIG_DSM 31
+#define SRST_M_ACDCDIG_DSM 32
+#define SRST_PDM1 33
+#define SRST_H_PDM1 34
+#define SRST_M_PDM1 35
+#define SRST_H_SPDIF_TX0 36
+#define SRST_M_SPDIF_TX0 37
+#define SRST_H_SPDIF_TX1 38
+#define SRST_M_SPDIF_TX1 39
+
+#define SRST_A_BUS_BIU 40
+#define SRST_P_BUS_BIU 41
+#define SRST_P_CRU 42
+#define SRST_H_CAN0 43
+#define SRST_CAN0 44
+#define SRST_H_CAN1 45
+#define SRST_CAN1 46
+#define SRST_P_INTMUX2BUS 47
+#define SRST_P_VCCIO_IOC 48
+#define SRST_H_BUS_BIU 49
+#define SRST_KEY_SHIFT 50
+
+#define SRST_P_I2C1 51
+#define SRST_P_I2C2 52
+#define SRST_P_I2C3 53
+#define SRST_P_I2C4 54
+#define SRST_P_I2C5 55
+#define SRST_P_I2C6 56
+#define SRST_P_I2C7 57
+#define SRST_P_I2C8 58
+#define SRST_P_I2C9 59
+#define SRST_P_WDT_BUSMCU 60
+#define SRST_T_WDT_BUSMCU 61
+#define SRST_A_GIC 62
+#define SRST_I2C1 63
+#define SRST_I2C2 64
+#define SRST_I2C3 65
+#define SRST_I2C4 66
+
+#define SRST_I2C5 67
+#define SRST_I2C6 68
+#define SRST_I2C7 69
+#define SRST_I2C8 70
+#define SRST_I2C9 71
+#define SRST_P_SARADC 72
+#define SRST_SARADC 73
+#define SRST_P_TSADC 74
+#define SRST_TSADC 75
+#define SRST_P_UART0 76
+#define SRST_P_UART2 77
+#define SRST_P_UART3 78
+#define SRST_P_UART4 79
+#define SRST_P_UART5 80
+#define SRST_P_UART6 81
+
+#define SRST_P_UART7 82
+#define SRST_P_UART8 83
+#define SRST_P_UART9 84
+#define SRST_P_UART10 85
+#define SRST_P_UART11 86
+#define SRST_S_UART0 87
+#define SRST_S_UART2 88
+#define SRST_S_UART3 89
+#define SRST_S_UART4 90
+#define SRST_S_UART5 91
+
+#define SRST_S_UART6 92
+#define SRST_S_UART7 93
+#define SRST_S_UART8 94
+#define SRST_S_UART9 95
+#define SRST_S_UART10 96
+#define SRST_S_UART11 97
+#define SRST_P_SPI0 98
+#define SRST_P_SPI1 99
+#define SRST_P_SPI2 100
+
+#define SRST_P_SPI3 101
+#define SRST_P_SPI4 102
+#define SRST_SPI0 103
+#define SRST_SPI1 104
+#define SRST_SPI2 105
+#define SRST_SPI3 106
+#define SRST_SPI4 107
+#define SRST_P_WDT0 108
+#define SRST_T_WDT0 109
+#define SRST_P_SYS_GRF 110
+#define SRST_P_PWM1 111
+#define SRST_PWM1 112
+
+#define SRST_P_BUSTIMER0 113
+#define SRST_P_BUSTIMER1 114
+#define SRST_TIMER0 115
+#define SRST_TIMER1 116
+#define SRST_TIMER2 117
+#define SRST_TIMER3 118
+#define SRST_TIMER4 119
+#define SRST_TIMER5 120
+#define SRST_P_BUSIOC 121
+#define SRST_P_MAILBOX0 122
+#define SRST_P_GPIO1 123
+
+#define SRST_GPIO1 124
+#define SRST_P_GPIO2 125
+#define SRST_GPIO2 126
+#define SRST_P_GPIO3 127
+#define SRST_GPIO3 128
+#define SRST_P_GPIO4 129
+#define SRST_GPIO4 130
+#define SRST_A_DECOM 131
+#define SRST_P_DECOM 132
+#define SRST_D_DECOM 133
+#define SRST_TIMER6 134
+#define SRST_TIMER7 135
+#define SRST_TIMER8 136
+#define SRST_TIMER9 137
+#define SRST_TIMER10 138
+
+#define SRST_TIMER11 139
+#define SRST_A_DMAC0 140
+#define SRST_A_DMAC1 141
+#define SRST_A_DMAC2 142
+#define SRST_A_SPINLOCK 143
+#define SRST_REF_PVTPLL_BUS 144
+#define SRST_H_I3C0 145
+#define SRST_H_I3C1 146
+#define SRST_H_BUS_CM0_BIU 147
+#define SRST_F_BUS_CM0_CORE 148
+#define SRST_T_BUS_CM0_JTAG 149
+
+#define SRST_P_INTMUX2PMU 150
+#define SRST_P_INTMUX2DDR 151
+#define SRST_P_PVTPLL_BUS 152
+#define SRST_P_PWM2 153
+#define SRST_PWM2 154
+#define SRST_FREQ_PWM1 155
+#define SRST_COUNTER_PWM1 156
+#define SRST_I3C0 157
+#define SRST_I3C1 158
+
+#define SRST_P_DDR_MON_CH0 159
+#define SRST_P_DDR_BIU 160
+#define SRST_P_DDR_UPCTL_CH0 161
+#define SRST_TM_DDR_MON_CH0 162
+#define SRST_A_DDR_BIU 163
+#define SRST_DFI_CH0 164
+#define SRST_DDR_MON_CH0 165
+#define SRST_P_DDR_HWLP_CH0 166
+#define SRST_P_DDR_MON_CH1 167
+#define SRST_P_DDR_HWLP_CH1 168
+
+#define SRST_P_DDR_UPCTL_CH1 169
+#define SRST_TM_DDR_MON_CH1 170
+#define SRST_DFI_CH1 171
+#define SRST_A_DDR01_MSCH0 172
+#define SRST_A_DDR01_MSCH1 173
+#define SRST_DDR_MON_CH1 174
+#define SRST_DDR_SCRAMBLE_CH0 175
+#define SRST_DDR_SCRAMBLE_CH1 176
+#define SRST_P_AHB2APB 177
+#define SRST_H_AHB2APB 178
+#define SRST_H_DDR_BIU 179
+#define SRST_F_DDR_CM0_CORE 180
+
+#define SRST_P_DDR01_MSCH0 181
+#define SRST_P_DDR01_MSCH1 182
+#define SRST_DDR_TIMER0 183
+#define SRST_DDR_TIMER1 184
+#define SRST_T_WDT_DDR 185
+#define SRST_P_WDT 186
+#define SRST_P_TIMER 187
+#define SRST_T_DDR_CM0_JTAG 188
+#define SRST_P_DDR_GRF 189
+
+#define SRST_DDR_UPCTL_CH0 190
+#define SRST_A_DDR_UPCTL_0_CH0 191
+#define SRST_A_DDR_UPCTL_1_CH0 192
+#define SRST_A_DDR_UPCTL_2_CH0 193
+#define SRST_A_DDR_UPCTL_3_CH0 194
+#define SRST_A_DDR_UPCTL_4_CH0 195
+
+#define SRST_DDR_UPCTL_CH1 196
+#define SRST_A_DDR_UPCTL_0_CH1 197
+#define SRST_A_DDR_UPCTL_1_CH1 198
+#define SRST_A_DDR_UPCTL_2_CH1 199
+#define SRST_A_DDR_UPCTL_3_CH1 200
+#define SRST_A_DDR_UPCTL_4_CH1 201
+
+#define SRST_REF_PVTPLL_DDR 202
+#define SRST_P_PVTPLL_DDR 203
+
+#define SRST_A_RKNN0 204
+#define SRST_A_RKNN0_BIU 205
+#define SRST_L_RKNN0_BIU 206
+
+#define SRST_A_RKNN1 207
+#define SRST_A_RKNN1_BIU 208
+#define SRST_L_RKNN1_BIU 209
+
+#define SRST_NPU_DAP 210
+#define SRST_L_NPUSUBSYS_BIU 211
+#define SRST_P_NPUTOP_BIU 212
+#define SRST_P_NPU_TIMER 213
+#define SRST_NPUTIMER0 214
+#define SRST_NPUTIMER1 215
+#define SRST_P_NPU_WDT 216
+#define SRST_T_NPU_WDT 217
+
+#define SRST_A_RKNN_CBUF 218
+#define SRST_A_RVCORE0 219
+#define SRST_P_NPU_GRF 220
+#define SRST_P_PVTPLL_NPU 221
+#define SRST_NPU_PVTPLL 222
+#define SRST_H_NPU_CM0_BIU 223
+#define SRST_F_NPU_CM0_CORE 224
+#define SRST_T_NPU_CM0_JTAG 225
+#define SRST_A_RKNNTOP_BIU 226
+#define SRST_H_RKNN_CBUF 227
+#define SRST_H_RKNNTOP_BIU 228
+
+#define SRST_H_NVM_BIU 229
+#define SRST_A_NVM_BIU 230
+#define SRST_S_FSPI 231
+#define SRST_H_FSPI 232
+#define SRST_C_EMMC 233
+#define SRST_H_EMMC 234
+#define SRST_A_EMMC 235
+#define SRST_B_EMMC 236
+#define SRST_T_EMMC 237
+
+#define SRST_P_GRF 238
+#define SRST_P_PHP_BIU 239
+#define SRST_A_PHP_BIU 240
+#define SRST_P_PCIE0 241
+#define SRST_PCIE0_POWER_UP 242
+
+#define SRST_A_USB3OTG1 243
+#define SRST_A_MMU0 244
+#define SRST_A_SLV_MMU0 245
+#define SRST_A_MMU1 246
+
+#define SRST_A_SLV_MMU1 247
+#define SRST_P_PCIE1 248
+#define SRST_PCIE1_POWER_UP 249
+
+#define SRST_RXOOB0 250
+#define SRST_RXOOB1 251
+#define SRST_PMALIVE0 252
+#define SRST_PMALIVE1 253
+#define SRST_A_SATA0 254
+#define SRST_A_SATA1 255
+#define SRST_ASIC1 256
+#define SRST_ASIC0 257
+
+#define SRST_P_CSIDPHY1 258
+#define SRST_SCAN_CSIDPHY1 259
+
+#define SRST_P_SDGMAC_GRF 260
+#define SRST_P_SDGMAC_BIU 261
+#define SRST_A_SDGMAC_BIU 262
+#define SRST_H_SDGMAC_BIU 263
+#define SRST_A_GMAC0 264
+#define SRST_A_GMAC1 265
+#define SRST_P_GMAC0 266
+#define SRST_P_GMAC1 267
+#define SRST_H_SDIO 268
+
+#define SRST_H_SDMMC0 269
+#define SRST_S_FSPI1 270
+#define SRST_H_FSPI1 271
+#define SRST_A_DSMC_BIU 272
+#define SRST_A_DSMC 273
+#define SRST_P_DSMC 274
+#define SRST_H_HSGPIO 275
+#define SRST_HSGPIO 276
+#define SRST_A_HSGPIO 277
+
+#define SRST_H_RKVDEC 278
+#define SRST_H_RKVDEC_BIU 279
+#define SRST_A_RKVDEC_BIU 280
+#define SRST_RKVDEC_HEVC_CA 281
+#define SRST_RKVDEC_CORE 282
+
+#define SRST_A_USB_BIU 283
+#define SRST_P_USBUFS_BIU 284
+#define SRST_A_USB3OTG0 285
+#define SRST_A_UFS_BIU 286
+#define SRST_A_MMU2 287
+#define SRST_A_SLV_MMU2 288
+#define SRST_A_UFS_SYS 289
+
+#define SRST_A_UFS 290
+#define SRST_P_USBUFS_GRF 291
+#define SRST_P_UFS_GRF 292
+
+#define SRST_H_VPU_BIU 293
+#define SRST_A_JPEG_BIU 294
+#define SRST_A_RGA_BIU 295
+#define SRST_A_VDPP_BIU 296
+#define SRST_A_EBC_BIU 297
+#define SRST_H_RGA2E_0 298
+#define SRST_A_RGA2E_0 299
+#define SRST_CORE_RGA2E_0 300
+
+#define SRST_A_JPEG 301
+#define SRST_H_JPEG 302
+#define SRST_H_VDPP 303
+#define SRST_A_VDPP 304
+#define SRST_CORE_VDPP 305
+#define SRST_H_RGA2E_1 306
+#define SRST_A_RGA2E_1 307
+#define SRST_CORE_RGA2E_1 308
+#define SRST_H_EBC 309
+#define SRST_A_EBC 310
+#define SRST_D_EBC 311
+
+#define SRST_H_VEPU0_BIU 312
+#define SRST_A_VEPU0_BIU 313
+#define SRST_H_VEPU0 314
+#define SRST_A_VEPU0 315
+#define SRST_VEPU0_CORE 316
+
+#define SRST_A_VI_BIU 317
+#define SRST_H_VI_BIU 318
+#define SRST_P_VI_BIU 319
+#define SRST_D_VICAP 320
+#define SRST_A_VICAP 321
+#define SRST_H_VICAP 322
+#define SRST_ISP0 323
+#define SRST_ISP0_VICAP 324
+
+#define SRST_CORE_VPSS 325
+#define SRST_P_CSI_HOST_0 326
+#define SRST_P_CSI_HOST_1 327
+#define SRST_P_CSI_HOST_2 328
+#define SRST_P_CSI_HOST_3 329
+#define SRST_P_CSI_HOST_4 330
+
+#define SRST_CIFIN 331
+#define SRST_VICAP_I0CLK 332
+#define SRST_VICAP_I1CLK 333
+#define SRST_VICAP_I2CLK 334
+#define SRST_VICAP_I3CLK 335
+#define SRST_VICAP_I4CLK 336
+
+#define SRST_A_VOP_BIU 337
+#define SRST_A_VOP2_BIU 338
+#define SRST_H_VOP_BIU 339
+#define SRST_P_VOP_BIU 340
+#define SRST_H_VOP 341
+#define SRST_A_VOP 342
+#define SRST_D_VP0 343
+
+#define SRST_D_VP1 344
+#define SRST_D_VP2 345
+#define SRST_P_VOP2_BIU 346
+#define SRST_P_VOPGRF 347
+
+#define SRST_H_VO0_BIU 348
+#define SRST_P_VO0_BIU 349
+#define SRST_A_HDCP0_BIU 350
+#define SRST_P_VO0_GRF 351
+#define SRST_A_HDCP0 352
+#define SRST_H_HDCP0 353
+#define SRST_HDCP0 354
+
+#define SRST_P_DSIHOST0 355
+#define SRST_DSIHOST0 356
+#define SRST_P_HDMITX0 357
+#define SRST_HDMITX0_REF 358
+#define SRST_P_EDP0 359
+#define SRST_EDP0_24M 360
+
+#define SRST_M_SAI5_8CH 361
+#define SRST_H_SAI5_8CH 362
+#define SRST_M_SAI6_8CH 363
+#define SRST_H_SAI6_8CH 364
+#define SRST_H_SPDIF_TX2 365
+#define SRST_M_SPDIF_TX2 366
+#define SRST_H_SPDIF_RX2 367
+#define SRST_M_SPDIF_RX2 368
+
+#define SRST_H_SAI8_8CH 369
+#define SRST_M_SAI8_8CH 370
+
+#define SRST_H_VO1_BIU 371
+#define SRST_P_VO1_BIU 372
+#define SRST_M_SAI7_8CH 373
+#define SRST_H_SAI7_8CH 374
+#define SRST_H_SPDIF_TX3 375
+#define SRST_H_SPDIF_TX4 376
+#define SRST_H_SPDIF_TX5 377
+#define SRST_M_SPDIF_TX3 378
+
+#define SRST_DP0 379
+#define SRST_P_VO1_GRF 380
+#define SRST_A_HDCP1_BIU 381
+#define SRST_A_HDCP1 382
+#define SRST_H_HDCP1 383
+#define SRST_HDCP1 384
+#define SRST_H_SAI9_8CH 385
+#define SRST_M_SAI9_8CH 386
+#define SRST_M_SPDIF_TX4 387
+#define SRST_M_SPDIF_TX5 388
+
+#define SRST_GPU 389
+#define SRST_A_S_GPU_BIU 390
+#define SRST_A_M0_GPU_BIU 391
+#define SRST_P_GPU_BIU 392
+#define SRST_P_GPU_GRF 393
+#define SRST_GPU_PVTPLL 394
+#define SRST_P_PVTPLL_GPU 395
+
+#define SRST_A_CENTER_BIU 396
+#define SRST_A_DMA2DDR 397
+#define SRST_A_DDR_SHAREMEM 398
+#define SRST_A_DDR_SHAREMEM_BIU 399
+#define SRST_H_CENTER_BIU 400
+#define SRST_P_CENTER_GRF 401
+#define SRST_P_DMA2DDR 402
+#define SRST_P_SHAREMEM 403
+#define SRST_P_CENTER_BIU 404
+
+#define SRST_LINKSYM_HDMITXPHY0 405
+
+#define SRST_DP0_PIXELCLK 406
+#define SRST_PHY_DP0_TX 407
+#define SRST_DP1_PIXELCLK 408
+#define SRST_DP2_PIXELCLK 409
+
+#define SRST_H_VEPU1_BIU 410
+#define SRST_A_VEPU1_BIU 411
+#define SRST_H_VEPU1 412
+#define SRST_A_VEPU1 413
+#define SRST_VEPU1_CORE 414
+
+#define SRST_P_PHPPHY_CRU 415
+#define SRST_P_APB2ASB_SLV_CHIP_TOP 416
+#define SRST_P_PCIE2_COMBOPHY0 417
+#define SRST_P_PCIE2_COMBOPHY0_GRF 418
+#define SRST_P_PCIE2_COMBOPHY1 419
+#define SRST_P_PCIE2_COMBOPHY1_GRF 420
+
+#define SRST_PCIE0_PIPE_PHY 421
+#define SRST_PCIE1_PIPE_PHY 422
+
+#define SRST_H_CRYPTO_NS 423
+#define SRST_H_TRNG_NS 424
+#define SRST_P_OTPC_NS 425
+#define SRST_OTPC_NS 426
+
+#define SRST_P_HDPTX_GRF 427
+#define SRST_P_HDPTX_APB 428
+#define SRST_P_MIPI_DCPHY 429
+#define SRST_P_DCPHY_GRF 430
+#define SRST_P_BOT0_APB2ASB 431
+#define SRST_P_BOT1_APB2ASB 432
+#define SRST_USB2DEBUG 433
+#define SRST_P_CSIPHY_GRF 434
+#define SRST_P_CSIPHY 435
+#define SRST_P_USBPHY_GRF_0 436
+#define SRST_P_USBPHY_GRF_1 437
+#define SRST_P_USBDP_GRF 438
+#define SRST_P_USBDPPHY 439
+#define SRST_USBDP_COMBO_PHY_INIT 440
+
+#define SRST_USBDP_COMBO_PHY_CMN 441
+#define SRST_USBDP_COMBO_PHY_LANE 442
+#define SRST_USBDP_COMBO_PHY_PCS 443
+#define SRST_M_MIPI_DCPHY 444
+#define SRST_S_MIPI_DCPHY 445
+#define SRST_SCAN_CSIPHY 446
+#define SRST_P_VCCIO6_IOC 447
+#define SRST_OTGPHY_0 448
+#define SRST_OTGPHY_1 449
+#define SRST_HDPTX_INIT 450
+#define SRST_HDPTX_CMN 451
+#define SRST_HDPTX_LANE 452
+#define SRST_HDMITXHDP 453
+
+#define SRST_MPHY_INIT 454
+#define SRST_P_MPHY_GRF 455
+#define SRST_P_VCCIO7_IOC 456
+
+#define SRST_H_PMU1_BIU 457
+#define SRST_P_PMU1_NIU 458
+#define SRST_H_PMU_CM0_BIU 459
+#define SRST_PMU_CM0_CORE 460
+#define SRST_PMU_CM0_JTAG 461
+
+#define SRST_P_CRU_PMU1 462
+#define SRST_P_PMU1_GRF 463
+#define SRST_P_PMU1_IOC 464
+#define SRST_P_PMU1WDT 465
+#define SRST_T_PMU1WDT 466
+#define SRST_P_PMUTIMER 467
+#define SRST_PMUTIMER0 468
+#define SRST_PMUTIMER1 469
+#define SRST_P_PMU1PWM 470
+#define SRST_PMU1PWM 471
+
+#define SRST_P_I2C0 472
+#define SRST_I2C0 473
+#define SRST_S_UART1 474
+#define SRST_P_UART1 475
+#define SRST_PDM0 476
+#define SRST_H_PDM0 477
+
+#define SRST_M_PDM0 478
+#define SRST_H_VAD 479
+
+#define SRST_P_PMU0GRF 480
+#define SRST_P_PMU0IOC 481
+#define SRST_P_GPIO0 482
+#define SRST_DB_GPIO0 483
+
+#endif
diff --git a/include/dt-bindings/reset/rockchip,rk3588-cru.h b/include/dt-bindings/reset/rockchip,rk3588-cru.h
index e2fe4bd5f7f0..878beae6dc3b 100644
--- a/include/dt-bindings/reset/rockchip,rk3588-cru.h
+++ b/include/dt-bindings/reset/rockchip,rk3588-cru.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
- * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2021, 2024 Rockchip Electronics Co. Ltd.
* Copyright (c) 2022 Collabora Ltd.
*
* Author: Elaine Zhang <zhangqing@rock-chips.com>
@@ -753,4 +753,43 @@
#define SRST_A_HDMIRX_BIU 660
+/* SCMI Secure Resets */
+
+/* Name=SECURE_SOFTRST_CON00,Offset=0xA00 */
+#define SCMI_SRST_A_SECURE_NS_BIU 10
+#define SCMI_SRST_H_SECURE_NS_BIU 11
+#define SCMI_SRST_A_SECURE_S_BIU 12
+#define SCMI_SRST_H_SECURE_S_BIU 13
+#define SCMI_SRST_P_SECURE_S_BIU 14
+#define SCMI_SRST_CRYPTO_CORE 15
+/* Name=SECURE_SOFTRST_CON01,Offset=0xA04 */
+#define SCMI_SRST_CRYPTO_PKA 16
+#define SCMI_SRST_CRYPTO_RNG 17
+#define SCMI_SRST_A_CRYPTO 18
+#define SCMI_SRST_H_CRYPTO 19
+#define SCMI_SRST_KEYLADDER_CORE 25
+#define SCMI_SRST_KEYLADDER_RNG 26
+#define SCMI_SRST_A_KEYLADDER 27
+#define SCMI_SRST_H_KEYLADDER 28
+#define SCMI_SRST_P_OTPC_S 29
+#define SCMI_SRST_OTPC_S 30
+#define SCMI_SRST_WDT_S 31
+/* Name=SECURE_SOFTRST_CON02,Offset=0xA08 */
+#define SCMI_SRST_T_WDT_S 32
+#define SCMI_SRST_H_BOOTROM 33
+#define SCMI_SRST_A_DCF 34
+#define SCMI_SRST_P_DCF 35
+#define SCMI_SRST_H_BOOTROM_NS 37
+#define SCMI_SRST_P_KEYLADDER 46
+#define SCMI_SRST_H_TRNG_S 47
+/* Name=SECURE_SOFTRST_CON03,Offset=0xA0C */
+#define SCMI_SRST_H_TRNG_NS 48
+#define SCMI_SRST_D_SDMMC_BUFFER 49
+#define SCMI_SRST_H_SDMMC 50
+#define SCMI_SRST_H_SDMMC_BUFFER 51
+#define SCMI_SRST_SDMMC 52
+#define SCMI_SRST_P_TRNG_CHK 53
+#define SCMI_SRST_TRNG_S 54
+
+
#endif
diff --git a/include/dt-bindings/reset/rockchip,rv1126b-cru.h b/include/dt-bindings/reset/rockchip,rv1126b-cru.h
new file mode 100644
index 000000000000..a7712db319d0
--- /dev/null
+++ b/include/dt-bindings/reset/rockchip,rv1126b-cru.h
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_ROCKCHIP_RV1126B_H
+#define _DT_BINDINGS_RESET_ROCKCHIP_RV1126B_H
+
+/* ==========================list all of reset fields id=========================== */
+/* TOPCRU-->SOFTRST_CON00 */
+
+/* TOPCRU-->SOFTRST_CON15 */
+#define SRST_P_CRU 0
+#define SRST_P_CRU_BIU 1
+
+/* BUSCRU-->SOFTRST_CON00 */
+#define SRST_A_TOP_BIU 2
+#define SRST_A_RKCE_BIU 3
+#define SRST_A_BUS_BIU 4
+#define SRST_H_BUS_BIU 5
+#define SRST_P_BUS_BIU 6
+#define SRST_P_CRU_BUS 7
+#define SRST_P_SYS_GRF 8
+#define SRST_H_BOOTROM 9
+#define SRST_A_GIC400 10
+#define SRST_A_SPINLOCK 11
+#define SRST_P_WDT_NS 12
+#define SRST_T_WDT_NS 13
+
+/* BUSCRU-->SOFTRST_CON01 */
+#define SRST_P_WDT_HPMCU 14
+#define SRST_T_WDT_HPMCU 15
+#define SRST_H_CACHE 16
+#define SRST_P_HPMCU_MAILBOX 17
+#define SRST_P_HPMCU_INTMUX 18
+#define SRST_HPMCU_FULL_CLUSTER 19
+#define SRST_HPMCU_PWUP 20
+#define SRST_HPMCU_ONLY_CORE 21
+#define SRST_T_HPMCU_JTAG 22
+#define SRST_P_RKDMA 23
+#define SRST_A_RKDMA 24
+
+/* BUSCRU-->SOFTRST_CON02 */
+#define SRST_P_DCF 25
+#define SRST_A_DCF 26
+#define SRST_H_RGA 27
+#define SRST_A_RGA 28
+#define SRST_CORE_RGA 29
+#define SRST_P_TIMER 30
+#define SRST_TIMER0 31
+#define SRST_TIMER1 32
+#define SRST_TIMER2 33
+#define SRST_TIMER3 34
+#define SRST_TIMER4 35
+#define SRST_TIMER5 36
+#define SRST_A_RKCE 37
+#define SRST_PKA_RKCE 38
+#define SRST_H_RKRNG_S 39
+#define SRST_H_RKRNG_NS 40
+
+/* BUSCRU-->SOFTRST_CON03 */
+#define SRST_P_I2C0 41
+#define SRST_I2C0 42
+#define SRST_P_I2C1 43
+#define SRST_I2C1 44
+#define SRST_P_I2C3 45
+#define SRST_I2C3 46
+#define SRST_P_I2C4 47
+#define SRST_I2C4 48
+#define SRST_P_I2C5 49
+#define SRST_I2C5 50
+#define SRST_P_SPI0 51
+#define SRST_SPI0 52
+#define SRST_P_SPI1 53
+#define SRST_SPI1 54
+
+/* BUSCRU-->SOFTRST_CON04 */
+#define SRST_P_PWM0 55
+#define SRST_PWM0 56
+#define SRST_P_PWM2 57
+#define SRST_PWM2 58
+#define SRST_P_PWM3 59
+#define SRST_PWM3 60
+
+/* BUSCRU-->SOFTRST_CON05 */
+#define SRST_P_UART1 61
+#define SRST_S_UART1 62
+#define SRST_P_UART2 63
+#define SRST_S_UART2 64
+#define SRST_P_UART3 65
+#define SRST_S_UART3 66
+#define SRST_P_UART4 67
+#define SRST_S_UART4 68
+#define SRST_P_UART5 69
+#define SRST_S_UART5 70
+#define SRST_P_UART6 71
+#define SRST_S_UART6 72
+#define SRST_P_UART7 73
+#define SRST_S_UART7 74
+
+/* BUSCRU-->SOFTRST_CON06 */
+#define SRST_P_TSADC 75
+#define SRST_TSADC 76
+#define SRST_H_SAI0 77
+#define SRST_M_SAI0 78
+#define SRST_H_SAI1 79
+#define SRST_M_SAI1 80
+#define SRST_H_SAI2 81
+#define SRST_M_SAI2 82
+#define SRST_H_RKDSM 83
+#define SRST_M_RKDSM 84
+#define SRST_H_PDM 85
+#define SRST_M_PDM 86
+#define SRST_PDM 87
+
+/* BUSCRU-->SOFTRST_CON07 */
+#define SRST_H_ASRC0 88
+#define SRST_ASRC0 89
+#define SRST_H_ASRC1 90
+#define SRST_ASRC1 91
+#define SRST_P_AUDIO_ADC_BUS 92
+#define SRST_M_AUDIO_ADC_BUS 93
+#define SRST_P_RKCE 94
+#define SRST_H_NS_RKCE 95
+#define SRST_P_OTPC_NS 96
+#define SRST_SBPI_OTPC_NS 97
+#define SRST_USER_OTPC_NS 98
+#define SRST_OTPC_ARB 99
+#define SRST_P_OTP_MASK 100
+
+/* PERICRU-->SOFTRST_CON00 */
+#define SRST_A_PERI_BIU 101
+#define SRST_P_PERI_BIU 102
+#define SRST_P_RTC_BIU 103
+#define SRST_P_CRU_PERI 104
+#define SRST_P_PERI_GRF 105
+#define SRST_P_GPIO1 106
+#define SRST_DB_GPIO1 107
+#define SRST_P_IOC_VCCIO1 108
+#define SRST_A_USB3OTG 109
+#define SRST_H_USB2HOST 110
+#define SRST_H_ARB_USB2HOST 111
+#define SRST_P_RTC_TEST 112
+
+/* PERICRU-->SOFTRST_CON01 */
+#define SRST_H_EMMC 113
+#define SRST_H_FSPI0 114
+#define SRST_H_XIP_FSPI0 115
+#define SRST_S_2X_FSPI0 116
+#define SRST_UTMI_USB2HOST 117
+#define SRST_REF_PIPEPHY 118
+#define SRST_P_PIPEPHY 119
+#define SRST_P_PIPEPHY_GRF 120
+#define SRST_P_USB2PHY 121
+#define SRST_POR_USB2PHY 122
+#define SRST_OTG_USB2PHY 123
+#define SRST_HOST_USB2PHY 124
+
+/* CORECRU-->SOFTRST_CON00 */
+#define SRST_REF_PVTPLL_CORE 125
+#define SRST_NCOREPORESET0 126
+#define SRST_NCORESET0 127
+#define SRST_NCOREPORESET1 128
+#define SRST_NCORESET1 129
+#define SRST_NCOREPORESET2 130
+#define SRST_NCORESET2 131
+#define SRST_NCOREPORESET3 132
+#define SRST_NCORESET3 133
+#define SRST_NDBGRESET 134
+#define SRST_NL2RESET 135
+
+/* CORECRU-->SOFTRST_CON01 */
+#define SRST_A_CORE_BIU 136
+#define SRST_P_CORE_BIU 137
+#define SRST_H_CORE_BIU 138
+#define SRST_P_DBG 139
+#define SRST_POT_DBG 140
+#define SRST_NT_DBG 141
+#define SRST_P_CORE_PVTPLL 142
+#define SRST_P_CRU_CORE 143
+#define SRST_P_CORE_GRF 144
+#define SRST_P_DFT2APB 145
+
+/* PMUCRU-->SOFTRST_CON00 */
+#define SRST_H_PMU_BIU 146
+#define SRST_P_PMU_GPIO0 147
+#define SRST_DB_PMU_GPIO0 148
+#define SRST_P_PMU_HP_TIMER 149
+#define SRST_PMU_HP_TIMER 150
+#define SRST_PMU_32K_HP_TIMER 151
+
+/* PMUCRU-->SOFTRST_CON01 */
+#define SRST_P_PWM1 152
+#define SRST_PWM1 153
+#define SRST_P_I2C2 154
+#define SRST_I2C2 155
+#define SRST_P_UART0 156
+#define SRST_S_UART0 157
+
+/* PMUCRU-->SOFTRST_CON02 */
+#define SRST_P_RCOSC_CTRL 158
+#define SRST_REF_RCOSC_CTRL 159
+#define SRST_P_IOC_PMUIO0 160
+#define SRST_P_CRU_PMU 161
+#define SRST_P_PMU_GRF 162
+#define SRST_PREROLL 163
+#define SRST_PREROLL_32K 164
+#define SRST_H_PMU_SRAM 165
+
+/* PMUCRU-->SOFTRST_CON03 */
+#define SRST_P_WDT_LPMCU 166
+#define SRST_T_WDT_LPMCU 167
+#define SRST_LPMCU_FULL_CLUSTER 168
+#define SRST_LPMCU_PWUP 169
+#define SRST_LPMCU_ONLY_CORE 170
+#define SRST_T_LPMCU_JTAG 171
+#define SRST_P_LPMCU_MAILBOX 172
+
+/* PMU1CRU-->SOFTRST_CON00 */
+#define SRST_P_SPI2AHB 173
+#define SRST_H_SPI2AHB 174
+#define SRST_H_FSPI1 175
+#define SRST_H_XIP_FSPI1 176
+#define SRST_S_1X_FSPI1 177
+#define SRST_P_IOC_PMUIO1 178
+#define SRST_P_CRU_PMU1 179
+#define SRST_P_AUDIO_ADC_PMU 180
+#define SRST_M_AUDIO_ADC_PMU 181
+#define SRST_H_PMU1_BIU 182
+
+/* PMU1CRU-->SOFTRST_CON01 */
+#define SRST_P_LPDMA 183
+#define SRST_A_LPDMA 184
+#define SRST_H_LPSAI 185
+#define SRST_M_LPSAI 186
+#define SRST_P_AOA_TDD 187
+#define SRST_P_AOA_FE 188
+#define SRST_P_AOA_AAD 189
+#define SRST_P_AOA_APB 190
+#define SRST_P_AOA_SRAM 191
+
+/* DDRCRU-->SOFTRST_CON00 */
+#define SRST_P_DDR_BIU 192
+#define SRST_P_DDRC 193
+#define SRST_P_DDRMON 194
+#define SRST_TIMER_DDRMON 195
+#define SRST_P_DFICTRL 196
+#define SRST_P_DDR_GRF 197
+#define SRST_P_CRU_DDR 198
+#define SRST_P_DDRPHY 199
+#define SRST_P_DMA2DDR 200
+
+/* SUBDDRCRU-->SOFTRST_CON00 */
+#define SRST_A_SYSMEM_BIU 201
+#define SRST_A_SYSMEM 202
+#define SRST_A_DDR_BIU 203
+#define SRST_A_DDRSCH0_CPU 204
+#define SRST_A_DDRSCH1_NPU 205
+#define SRST_A_DDRSCH2_POE 206
+#define SRST_A_DDRSCH3_VI 207
+#define SRST_CORE_DDRC 208
+#define SRST_DDRMON 209
+#define SRST_DFICTRL 210
+#define SRST_RS 211
+#define SRST_A_DMA2DDR 212
+#define SRST_DDRPHY 213
+
+/* VICRU-->SOFTRST_CON00 */
+#define SRST_REF_PVTPLL_ISP 214
+#define SRST_A_GMAC_BIU 215
+#define SRST_A_VI_BIU 216
+#define SRST_H_VI_BIU 217
+#define SRST_P_VI_BIU 218
+#define SRST_P_CRU_VI 219
+#define SRST_P_VI_GRF 220
+#define SRST_P_VI_PVTPLL 221
+#define SRST_P_DSMC 222
+#define SRST_A_DSMC 223
+#define SRST_H_CAN0 224
+#define SRST_CAN0 225
+#define SRST_H_CAN1 226
+#define SRST_CAN1 227
+
+/* VICRU-->SOFTRST_CON01 */
+#define SRST_P_GPIO2 228
+#define SRST_DB_GPIO2 229
+#define SRST_P_GPIO4 230
+#define SRST_DB_GPIO4 231
+#define SRST_P_GPIO5 232
+#define SRST_DB_GPIO5 233
+#define SRST_P_GPIO6 234
+#define SRST_DB_GPIO6 235
+#define SRST_P_GPIO7 236
+#define SRST_DB_GPIO7 237
+#define SRST_P_IOC_VCCIO2 238
+#define SRST_P_IOC_VCCIO4 239
+#define SRST_P_IOC_VCCIO5 240
+#define SRST_P_IOC_VCCIO6 241
+#define SRST_P_IOC_VCCIO7 242
+
+/* VICRU-->SOFTRST_CON02 */
+#define SRST_CORE_ISP 243
+#define SRST_H_VICAP 244
+#define SRST_A_VICAP 245
+#define SRST_D_VICAP 246
+#define SRST_ISP0_VICAP 247
+#define SRST_CORE_VPSS 248
+#define SRST_CORE_VPSL 249
+#define SRST_P_CSI2HOST0 250
+#define SRST_P_CSI2HOST1 251
+#define SRST_P_CSI2HOST2 252
+#define SRST_P_CSI2HOST3 253
+#define SRST_H_SDMMC0 254
+#define SRST_A_GMAC 255
+#define SRST_P_CSIPHY0 256
+#define SRST_P_CSIPHY1 257
+
+/* VICRU-->SOFTRST_CON03 */
+#define SRST_P_MACPHY 258
+#define SRST_MACPHY 259
+#define SRST_P_SARADC1 260
+#define SRST_SARADC1 261
+#define SRST_P_SARADC2 262
+#define SRST_SARADC2 263
+
+/* VEPUCRU-->SOFTRST_CON00 */
+#define SRST_REF_PVTPLL_VEPU 264
+#define SRST_A_VEPU_BIU 265
+#define SRST_H_VEPU_BIU 266
+#define SRST_P_VEPU_BIU 267
+#define SRST_P_CRU_VEPU 268
+#define SRST_P_VEPU_GRF 269
+#define SRST_P_GPIO3 270
+#define SRST_DB_GPIO3 271
+#define SRST_P_IOC_VCCIO3 272
+#define SRST_P_SARADC0 273
+#define SRST_SARADC0 274
+#define SRST_H_SDMMC1 275
+
+/* VEPUCRU-->SOFTRST_CON01 */
+#define SRST_P_VEPU_PVTPLL 276
+#define SRST_H_VEPU 277
+#define SRST_A_VEPU 278
+#define SRST_CORE_VEPU 279
+
+/* NPUCRU-->SOFTRST_CON00 */
+#define SRST_REF_PVTPLL_NPU 280
+#define SRST_A_NPU_BIU 281
+#define SRST_H_NPU_BIU 282
+#define SRST_P_NPU_BIU 283
+#define SRST_P_CRU_NPU 284
+#define SRST_P_NPU_GRF 285
+#define SRST_P_NPU_PVTPLL 286
+#define SRST_H_RKNN 287
+#define SRST_A_RKNN 288
+
+/* VDOCRU-->SOFTRST_CON00 */
+#define SRST_A_RKVDEC_BIU 289
+#define SRST_A_VDO_BIU 290
+#define SRST_H_VDO_BIU 291
+#define SRST_P_VDO_BIU 292
+#define SRST_P_CRU_VDO 293
+#define SRST_P_VDO_GRF 294
+#define SRST_A_RKVDEC 295
+#define SRST_H_RKVDEC 296
+#define SRST_HEVC_CA_RKVDEC 297
+#define SRST_A_VOP 298
+#define SRST_H_VOP 299
+#define SRST_D_VOP 300
+#define SRST_A_OOC 301
+#define SRST_H_OOC 302
+#define SRST_D_OOC 303
+
+/* VDOCRU-->SOFTRST_CON01 */
+#define SRST_H_RKJPEG 304
+#define SRST_A_RKJPEG 305
+#define SRST_A_RKMMU_DECOM 306
+#define SRST_H_RKMMU_DECOM 307
+#define SRST_D_DECOM 308
+#define SRST_A_DECOM 309
+#define SRST_P_DECOM 310
+#define SRST_P_MIPI_DSI 311
+#define SRST_P_DSIPHY 312
+
+/* VCPCRU-->SOFTRST_CON00 */
+#define SRST_REF_PVTPLL_VCP 313
+#define SRST_A_VCP_BIU 314
+#define SRST_H_VCP_BIU 315
+#define SRST_P_VCP_BIU 316
+#define SRST_P_CRU_VCP 317
+#define SRST_P_VCP_GRF 318
+#define SRST_P_VCP_PVTPLL 319
+#define SRST_A_AISP_BIU 320
+#define SRST_H_AISP_BIU 321
+#define SRST_CORE_AISP 322
+
+/* VCPCRU-->SOFTRST_CON01 */
+#define SRST_H_FEC 323
+#define SRST_A_FEC 324
+#define SRST_CORE_FEC 325
+#define SRST_H_AVSP 326
+#define SRST_A_AVSP 327
+
+#endif
diff --git a/include/dt-bindings/reset/st,stm32mp21-rcc.h b/include/dt-bindings/reset/st,stm32mp21-rcc.h
new file mode 100644
index 000000000000..6463bd73d025
--- /dev/null
+++ b/include/dt-bindings/reset/st,stm32mp21-rcc.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com>
+ */
+
+#ifndef _DT_BINDINGS_STM32MP21_RESET_H_
+#define _DT_BINDINGS_STM32MP21_RESET_H_
+
+#define TIM1_R 0
+#define TIM2_R 1
+#define TIM3_R 2
+#define TIM4_R 3
+#define TIM5_R 4
+#define TIM6_R 5
+#define TIM7_R 6
+#define TIM8_R 7
+#define TIM10_R 8
+#define TIM11_R 9
+#define TIM12_R 10
+#define TIM13_R 11
+#define TIM14_R 12
+#define TIM15_R 13
+#define TIM16_R 14
+#define TIM17_R 15
+#define LPTIM1_R 16
+#define LPTIM2_R 17
+#define LPTIM3_R 18
+#define LPTIM4_R 19
+#define LPTIM5_R 20
+#define SPI1_R 21
+#define SPI2_R 22
+#define SPI3_R 23
+#define SPI4_R 24
+#define SPI5_R 25
+#define SPI6_R 26
+#define SPDIFRX_R 27
+#define USART1_R 28
+#define USART2_R 29
+#define USART3_R 30
+#define UART4_R 31
+#define UART5_R 32
+#define USART6_R 33
+#define UART7_R 34
+#define LPUART1_R 35
+#define I2C1_R 36
+#define I2C2_R 37
+#define I2C3_R 38
+#define SAI1_R 39
+#define SAI2_R 40
+#define SAI3_R 41
+#define SAI4_R 42
+#define MDF1_R 43
+#define FDCAN_R 44
+#define HDP_R 45
+#define ADC1_R 46
+#define ADC2_R 47
+#define ETH1_R 48
+#define ETH2_R 49
+#define USBH_R 50
+#define USB2PHY1_R 51
+#define USB2PHY2_R 52
+#define SDMMC1_R 53
+#define SDMMC1DLL_R 54
+#define SDMMC2_R 55
+#define SDMMC2DLL_R 56
+#define SDMMC3_R 57
+#define SDMMC3DLL_R 58
+#define LTDC_R 59
+#define CSI_R 60
+#define DCMIPP_R 61
+#define DCMIPSSI_R 62
+#define WWDG1_R 63
+#define VREF_R 64
+#define DTS_R 65
+#define CRC_R 66
+#define SERC_R 67
+#define I3C1_R 68
+#define I3C2_R 69
+#define I3C3_R 70
+#define IWDG2_KER_R 71
+#define IWDG4_KER_R 72
+#define RNG1_R 73
+#define RNG2_R 74
+#define PKA_R 75
+#define SAES_R 76
+#define HASH1_R 77
+#define HASH2_R 78
+#define CRYP1_R 79
+#define CRYP2_R 80
+#define OSPI1_R 81
+#define OSPI1DLL_R 82
+#define OTG_R 83
+#define FMC_R 84
+#define DBG_R 85
+#define GPIOA_R 86
+#define GPIOB_R 87
+#define GPIOC_R 88
+#define GPIOD_R 89
+#define GPIOE_R 90
+#define GPIOF_R 91
+#define GPIOG_R 92
+#define GPIOH_R 93
+#define GPIOI_R 94
+#define GPIOZ_R 95
+#define HPDMA1_R 96
+#define HPDMA2_R 97
+#define HPDMA3_R 98
+#define IPCC1_R 99
+#define C2_HOLDBOOT_R 100
+#define C1_HOLDBOOT_R 101
+#define C1_R 102
+#define C1P1POR_R 103
+#define C1P1_R 104
+#define C2_R 105
+#define SYS_R 106
+#define VSW_R 107
+#define C1MS_R 108
+#define DDRCP_R 109
+#define DDRCAPB_R 110
+#define DDRPHYCAPB_R 111
+#define DDRCFG_R 112
+#define DDR_R 113
+#define DDRPERFM_R 114
+#define IWDG1_SYS_R 116
+#define IWDG2_SYS_R 117
+#define IWDG3_SYS_R 118
+#define IWDG4_SYS_R 119
+
+#define RST_SCMI_C1_R 0
+#define RST_SCMI_C2_R 1
+#define RST_SCMI_C1_HOLDBOOT_R 2
+#define RST_SCMI_C2_HOLDBOOT_R 3
+#define RST_SCMI_FMC 4
+#define RST_SCMI_OSPI1 5
+#define RST_SCMI_OSPI1DLL 6
+
+#endif /* _DT_BINDINGS_STM32MP21_RESET_H_ */
diff --git a/include/dt-bindings/reset/sun50i-h616-ccu.h b/include/dt-bindings/reset/sun50i-h616-ccu.h
index 1bd8bb0a11be..ba626f7015b5 100644
--- a/include/dt-bindings/reset/sun50i-h616-ccu.h
+++ b/include/dt-bindings/reset/sun50i-h616-ccu.h
@@ -66,5 +66,9 @@
#define RST_BUS_TVE0 57
#define RST_BUS_HDCP 58
#define RST_BUS_KEYADC 59
+#define RST_BUS_GPADC 60
+#define RST_BUS_TCON_LCD0 61
+#define RST_BUS_TCON_LCD1 62
+#define RST_BUS_LVDS 63
#endif /* _DT_BINDINGS_RESET_SUN50I_H616_H_ */
diff --git a/include/dt-bindings/reset/sun55i-a523-ccu.h b/include/dt-bindings/reset/sun55i-a523-ccu.h
new file mode 100644
index 000000000000..70df503f34fe
--- /dev/null
+++ b/include/dt-bindings/reset/sun55i-a523-ccu.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2024 Arm Ltd.
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN55I_A523_CCU_H_
+#define _DT_BINDINGS_RST_SUN55I_A523_CCU_H_
+
+#define RST_MBUS 0
+#define RST_BUS_NSI 1
+#define RST_BUS_DE 2
+#define RST_BUS_DI 3
+#define RST_BUS_G2D 4
+#define RST_BUS_SYS 5
+#define RST_BUS_GPU 6
+#define RST_BUS_CE 7
+#define RST_BUS_SYS_CE 8
+#define RST_BUS_VE 9
+#define RST_BUS_DMA 10
+#define RST_BUS_MSGBOX 11
+#define RST_BUS_SPINLOCK 12
+#define RST_BUS_CPUXTIMER 13
+#define RST_BUS_DBG 14
+#define RST_BUS_PWM0 15
+#define RST_BUS_PWM1 16
+#define RST_BUS_DRAM 17
+#define RST_BUS_NAND 18
+#define RST_BUS_MMC0 19
+#define RST_BUS_MMC1 20
+#define RST_BUS_MMC2 21
+#define RST_BUS_SYSDAP 22
+#define RST_BUS_UART0 23
+#define RST_BUS_UART1 24
+#define RST_BUS_UART2 25
+#define RST_BUS_UART3 26
+#define RST_BUS_UART4 27
+#define RST_BUS_UART5 28
+#define RST_BUS_UART6 29
+#define RST_BUS_UART7 30
+#define RST_BUS_I2C0 31
+#define RST_BUS_I2C1 32
+#define RST_BUS_I2C2 33
+#define RST_BUS_I2C3 34
+#define RST_BUS_I2C4 35
+#define RST_BUS_I2C5 36
+#define RST_BUS_CAN 37
+#define RST_BUS_SPI0 38
+#define RST_BUS_SPI1 39
+#define RST_BUS_SPI2 40
+#define RST_BUS_SPIFC 41
+#define RST_BUS_EMAC0 42
+#define RST_BUS_EMAC1 43
+#define RST_BUS_IR_RX 44
+#define RST_BUS_IR_TX 45
+#define RST_BUS_GPADC0 46
+#define RST_BUS_GPADC1 47
+#define RST_BUS_THS 48
+#define RST_USB_PHY0 49
+#define RST_USB_PHY1 50
+#define RST_BUS_OHCI0 51
+#define RST_BUS_OHCI1 52
+#define RST_BUS_EHCI0 53
+#define RST_BUS_EHCI1 54
+#define RST_BUS_OTG 55
+#define RST_BUS_3 56
+#define RST_BUS_LRADC 57
+#define RST_BUS_PCIE_USB3 58
+#define RST_BUS_DISPLAY0_TOP 59
+#define RST_BUS_DISPLAY1_TOP 60
+#define RST_BUS_HDMI_MAIN 61
+#define RST_BUS_HDMI_SUB 62
+#define RST_BUS_MIPI_DSI0 63
+#define RST_BUS_MIPI_DSI1 64
+#define RST_BUS_TCON_LCD0 65
+#define RST_BUS_TCON_LCD1 66
+#define RST_BUS_TCON_LCD2 67
+#define RST_BUS_TCON_TV0 68
+#define RST_BUS_TCON_TV1 69
+#define RST_BUS_LVDS0 70
+#define RST_BUS_LVDS1 71
+#define RST_BUS_EDP 72
+#define RST_BUS_VIDEO_OUT0 73
+#define RST_BUS_VIDEO_OUT1 74
+#define RST_BUS_LEDC 75
+#define RST_BUS_CSI 76
+#define RST_BUS_ISP 77
+
+#endif /* _DT_BINDINGS_RST_SUN55I_A523_CCU_H_ */
diff --git a/include/dt-bindings/reset/sun55i-a523-mcu-ccu.h b/include/dt-bindings/reset/sun55i-a523-mcu-ccu.h
new file mode 100644
index 000000000000..a89a0b44f08b
--- /dev/null
+++ b/include/dt-bindings/reset/sun55i-a523-mcu-ccu.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2025 Chen-Yu Tsai <wens@csie.org>
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN55I_A523_MCU_CCU_H_
+#define _DT_BINDINGS_RST_SUN55I_A523_MCU_CCU_H_
+
+#define RST_BUS_MCU_I2S0 0
+#define RST_BUS_MCU_I2S1 1
+#define RST_BUS_MCU_I2S2 2
+#define RST_BUS_MCU_I2S3 3
+#define RST_BUS_MCU_SPDIF 4
+#define RST_BUS_MCU_DMIC 5
+#define RST_BUS_MCU_AUDIO_CODEC 6
+#define RST_BUS_MCU_DSP_MSGBOX 7
+#define RST_BUS_MCU_DSP_CFG 8
+#define RST_BUS_MCU_NPU 9
+#define RST_BUS_MCU_TIMER 10
+#define RST_BUS_MCU_DSP_DEBUG 11
+#define RST_BUS_MCU_DSP 12
+#define RST_BUS_MCU_DMA 13
+#define RST_BUS_MCU_PUBSRAM 14
+#define RST_BUS_MCU_RISCV_CFG 15
+#define RST_BUS_MCU_RISCV_DEBUG 16
+#define RST_BUS_MCU_RISCV_CORE 17
+#define RST_BUS_MCU_RISCV_MSGBOX 18
+#define RST_BUS_MCU_PWM0 19
+
+#endif /* _DT_BINDINGS_RST_SUN55I_A523_MCU_CCU_H_ */
diff --git a/include/dt-bindings/reset/sun55i-a523-r-ccu.h b/include/dt-bindings/reset/sun55i-a523-r-ccu.h
new file mode 100644
index 000000000000..eb31ae9958d6
--- /dev/null
+++ b/include/dt-bindings/reset/sun55i-a523-r-ccu.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN55I_A523_R_CCU_H_
+#define _DT_BINDINGS_RST_SUN55I_A523_R_CCU_H_
+
+#define RST_BUS_R_TIMER 0
+#define RST_BUS_R_TWD 1
+#define RST_BUS_R_PWMCTRL 2
+#define RST_BUS_R_SPI 3
+#define RST_BUS_R_SPINLOCK 4
+#define RST_BUS_R_MSGBOX 5
+#define RST_BUS_R_UART0 6
+#define RST_BUS_R_UART1 7
+#define RST_BUS_R_I2C0 8
+#define RST_BUS_R_I2C1 9
+#define RST_BUS_R_I2C2 10
+#define RST_BUS_R_PPU1 11
+#define RST_BUS_R_IR_RX 12
+#define RST_BUS_R_RTC 13
+#define RST_BUS_R_CPUCFG 14
+#define RST_BUS_R_PPU0 15
+
+#endif /* _DT_BINDINGS_RST_SUN55I_A523_R_CCU_H_ */
diff --git a/include/dt-bindings/reset/thead,th1520-reset.h b/include/dt-bindings/reset/thead,th1520-reset.h
new file mode 100644
index 000000000000..ba6805b6b12a
--- /dev/null
+++ b/include/dt-bindings/reset/thead,th1520-reset.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Michal Wilczynski <m.wilczynski@samsung.com>
+ */
+
+#ifndef _DT_BINDINGS_TH1520_RESET_H
+#define _DT_BINDINGS_TH1520_RESET_H
+
+/* AO Subsystem */
+#define TH1520_RESET_ID_SYSTEM 0
+#define TH1520_RESET_ID_RTC_APB 1
+#define TH1520_RESET_ID_RTC_REF 2
+#define TH1520_RESET_ID_AOGPIO_DB 3
+#define TH1520_RESET_ID_AOGPIO_APB 4
+#define TH1520_RESET_ID_AOI2C_APB 5
+#define TH1520_RESET_ID_PVT_APB 6
+#define TH1520_RESET_ID_E902_CORE 7
+#define TH1520_RESET_ID_E902_HAD 8
+#define TH1520_RESET_ID_AOTIMER_APB 9
+#define TH1520_RESET_ID_AOTIMER_CORE 10
+#define TH1520_RESET_ID_AOWDT_APB 11
+#define TH1520_RESET_ID_APSYS 12
+#define TH1520_RESET_ID_NPUSYS 13
+#define TH1520_RESET_ID_DDRSYS 14
+#define TH1520_RESET_ID_AXI_AP2CP 15
+#define TH1520_RESET_ID_AXI_CP2AP 16
+#define TH1520_RESET_ID_AXI_CP2SRAM 17
+#define TH1520_RESET_ID_AUDSYS_CORE 18
+#define TH1520_RESET_ID_AUDSYS_IOPMP 19
+#define TH1520_RESET_ID_AUDSYS 20
+#define TH1520_RESET_ID_DSP0 21
+#define TH1520_RESET_ID_DSP1 22
+#define TH1520_RESET_ID_GPU_MODULE 23
+#define TH1520_RESET_ID_VDEC 24
+#define TH1520_RESET_ID_VENC 25
+#define TH1520_RESET_ID_ADC_APB 26
+#define TH1520_RESET_ID_AUDGPIO_DB 27
+#define TH1520_RESET_ID_AUDGPIO_APB 28
+#define TH1520_RESET_ID_AOUART_IF 29
+#define TH1520_RESET_ID_AOUART_APB 30
+#define TH1520_RESET_ID_SRAM_AXI_P0 31
+#define TH1520_RESET_ID_SRAM_AXI_P1 32
+#define TH1520_RESET_ID_SRAM_AXI_P2 33
+#define TH1520_RESET_ID_SRAM_AXI_P3 34
+#define TH1520_RESET_ID_SRAM_AXI_P4 35
+#define TH1520_RESET_ID_SRAM_AXI_CORE 36
+#define TH1520_RESET_ID_SE 37
+
+/* AP Subsystem */
+#define TH1520_RESET_ID_BROM 0
+#define TH1520_RESET_ID_C910_TOP 1
+#define TH1520_RESET_ID_NPU 2
+#define TH1520_RESET_ID_WDT0 3
+#define TH1520_RESET_ID_WDT1 4
+#define TH1520_RESET_ID_C910_C0 5
+#define TH1520_RESET_ID_C910_C1 6
+#define TH1520_RESET_ID_C910_C2 7
+#define TH1520_RESET_ID_C910_C3 8
+#define TH1520_RESET_ID_CHIP_DBG_CORE 9
+#define TH1520_RESET_ID_CHIP_DBG_AXI 10
+#define TH1520_RESET_ID_AXI4_CPUSYS2_AXI 11
+#define TH1520_RESET_ID_AXI4_CPUSYS2_APB 12
+#define TH1520_RESET_ID_X2H_CPUSYS 13
+#define TH1520_RESET_ID_AHB2_CPUSYS 14
+#define TH1520_RESET_ID_APB3_CPUSYS 15
+#define TH1520_RESET_ID_MBOX0_APB 16
+#define TH1520_RESET_ID_MBOX1_APB 17
+#define TH1520_RESET_ID_MBOX2_APB 18
+#define TH1520_RESET_ID_MBOX3_APB 19
+#define TH1520_RESET_ID_TIMER0_APB 20
+#define TH1520_RESET_ID_TIMER0_CORE 21
+#define TH1520_RESET_ID_TIMER1_APB 22
+#define TH1520_RESET_ID_TIMER1_CORE 23
+#define TH1520_RESET_ID_PERISYS_AHB 24
+#define TH1520_RESET_ID_PERISYS_APB1 25
+#define TH1520_RESET_ID_PERISYS_APB2 26
+#define TH1520_RESET_ID_GMAC0_APB 27
+#define TH1520_RESET_ID_GMAC0_AHB 28
+#define TH1520_RESET_ID_GMAC0_CLKGEN 29
+#define TH1520_RESET_ID_GMAC0_AXI 30
+#define TH1520_RESET_ID_UART0_APB 31
+#define TH1520_RESET_ID_UART0_IF 32
+#define TH1520_RESET_ID_UART1_APB 33
+#define TH1520_RESET_ID_UART1_IF 34
+#define TH1520_RESET_ID_UART2_APB 35
+#define TH1520_RESET_ID_UART2_IF 36
+#define TH1520_RESET_ID_UART3_APB 37
+#define TH1520_RESET_ID_UART3_IF 38
+#define TH1520_RESET_ID_UART4_APB 39
+#define TH1520_RESET_ID_UART4_IF 40
+#define TH1520_RESET_ID_UART5_APB 41
+#define TH1520_RESET_ID_UART5_IF 42
+#define TH1520_RESET_ID_QSPI0_IF 43
+#define TH1520_RESET_ID_QSPI0_APB 44
+#define TH1520_RESET_ID_QSPI1_IF 45
+#define TH1520_RESET_ID_QSPI1_APB 46
+#define TH1520_RESET_ID_SPI_IF 47
+#define TH1520_RESET_ID_SPI_APB 48
+#define TH1520_RESET_ID_I2C0_APB 49
+#define TH1520_RESET_ID_I2C0_CORE 50
+#define TH1520_RESET_ID_I2C1_APB 51
+#define TH1520_RESET_ID_I2C1_CORE 52
+#define TH1520_RESET_ID_I2C2_APB 53
+#define TH1520_RESET_ID_I2C2_CORE 54
+#define TH1520_RESET_ID_I2C3_APB 55
+#define TH1520_RESET_ID_I2C3_CORE 56
+#define TH1520_RESET_ID_I2C4_APB 57
+#define TH1520_RESET_ID_I2C4_CORE 58
+#define TH1520_RESET_ID_I2C5_APB 59
+#define TH1520_RESET_ID_I2C5_CORE 60
+#define TH1520_RESET_ID_GPIO0_DB 61
+#define TH1520_RESET_ID_GPIO0_APB 62
+#define TH1520_RESET_ID_GPIO1_DB 63
+#define TH1520_RESET_ID_GPIO1_APB 64
+#define TH1520_RESET_ID_GPIO2_DB 65
+#define TH1520_RESET_ID_GPIO2_APB 66
+#define TH1520_RESET_ID_PWM_COUNTER 67
+#define TH1520_RESET_ID_PWM_APB 68
+#define TH1520_RESET_ID_PADCTRL0_APB 69
+#define TH1520_RESET_ID_CPU2PERI_X2H 70
+#define TH1520_RESET_ID_CPU2AON_X2H 71
+#define TH1520_RESET_ID_AON2CPU_A2X 72
+#define TH1520_RESET_ID_NPUSYS_AXI 73
+#define TH1520_RESET_ID_NPUSYS_AXI_APB 74
+#define TH1520_RESET_ID_CPU2VP_X2P 75
+#define TH1520_RESET_ID_CPU2VI_X2H 76
+#define TH1520_RESET_ID_BMU_AXI 77
+#define TH1520_RESET_ID_BMU_APB 78
+#define TH1520_RESET_ID_DMAC_CPUSYS_AXI 79
+#define TH1520_RESET_ID_DMAC_CPUSYS_AHB 80
+#define TH1520_RESET_ID_SPINLOCK 81
+#define TH1520_RESET_ID_CFG2TEE 82
+#define TH1520_RESET_ID_DSMART 83
+#define TH1520_RESET_ID_GPIO3_DB 84
+#define TH1520_RESET_ID_GPIO3_APB 85
+#define TH1520_RESET_ID_PERI_I2S 86
+#define TH1520_RESET_ID_PERI_APB3 87
+#define TH1520_RESET_ID_PERI2PERI1_APB 88
+#define TH1520_RESET_ID_VPSYS_APB 89
+#define TH1520_RESET_ID_PERISYS_APB4 90
+#define TH1520_RESET_ID_GMAC1_APB 91
+#define TH1520_RESET_ID_GMAC1_AHB 92
+#define TH1520_RESET_ID_GMAC1_CLKGEN 93
+#define TH1520_RESET_ID_GMAC1_AXI 94
+#define TH1520_RESET_ID_GMAC_AXI 95
+#define TH1520_RESET_ID_GMAC_AXI_APB 96
+#define TH1520_RESET_ID_PADCTRL1_APB 97
+#define TH1520_RESET_ID_VOSYS_AXI 98
+#define TH1520_RESET_ID_VOSYS_AXI_APB 99
+#define TH1520_RESET_ID_VOSYS_AXI_X2X 100
+#define TH1520_RESET_ID_MISC2VP_X2X 101
+#define TH1520_RESET_ID_DSPSYS 102
+#define TH1520_RESET_ID_VISYS 103
+#define TH1520_RESET_ID_VOSYS 104
+#define TH1520_RESET_ID_VPSYS 105
+
+/* DSP Subsystem */
+#define TH1520_RESET_ID_X2X_DSP1 0
+#define TH1520_RESET_ID_X2X_DSP0 1
+#define TH1520_RESET_ID_X2X_SLAVE_DSP1 2
+#define TH1520_RESET_ID_X2X_SLAVE_DSP0 3
+#define TH1520_RESET_ID_DSP0_CORE 4
+#define TH1520_RESET_ID_DSP0_DEBUG 5
+#define TH1520_RESET_ID_DSP0_APB 6
+#define TH1520_RESET_ID_DSP1_CORE 7
+#define TH1520_RESET_ID_DSP1_DEBUG 8
+#define TH1520_RESET_ID_DSP1_APB 9
+#define TH1520_RESET_ID_DSPSYS_APB 10
+#define TH1520_RESET_ID_AXI4_DSPSYS_SLV 11
+#define TH1520_RESET_ID_AXI4_DSPSYS 12
+#define TH1520_RESET_ID_AXI4_DSP_RS 13
+
+/* MISC Subsystem */
+#define TH1520_RESET_ID_EMMC_SDIO_CLKGEN 0
+#define TH1520_RESET_ID_EMMC 1
+#define TH1520_RESET_ID_MISCSYS_AXI 2
+#define TH1520_RESET_ID_MISCSYS_AXI_APB 3
+#define TH1520_RESET_ID_SDIO0 4
+#define TH1520_RESET_ID_SDIO1 5
+#define TH1520_RESET_ID_USB3_APB 6
+#define TH1520_RESET_ID_USB3_PHY 7
+#define TH1520_RESET_ID_USB3_VCC 8
+
+/* VI Subsystem */
+#define TH1520_RESET_ID_ISP0 0
+#define TH1520_RESET_ID_ISP1 1
+#define TH1520_RESET_ID_CSI0_APB 2
+#define TH1520_RESET_ID_CSI1_APB 3
+#define TH1520_RESET_ID_CSI2_APB 4
+#define TH1520_RESET_ID_MIPI_FIFO 5
+#define TH1520_RESET_ID_ISP_VENC_APB 6
+#define TH1520_RESET_ID_VIPRE_APB 7
+#define TH1520_RESET_ID_VIPRE_AXI 8
+#define TH1520_RESET_ID_DW200_APB 9
+#define TH1520_RESET_ID_VISYS3_AXI 10
+#define TH1520_RESET_ID_VISYS2_AXI 11
+#define TH1520_RESET_ID_VISYS1_AXI 12
+#define TH1520_RESET_ID_VISYS_AXI 13
+#define TH1520_RESET_ID_VISYS_APB 14
+#define TH1520_RESET_ID_ISP_VENC_AXI 15
+
+/* VO Subsystem */
+#define TH1520_RESET_ID_GPU 0
+#define TH1520_RESET_ID_GPU_CLKGEN 1
+#define TH1520_RESET_ID_DPU_AHB 5
+#define TH1520_RESET_ID_DPU_AXI 6
+#define TH1520_RESET_ID_DPU_CORE 7
+#define TH1520_RESET_ID_DSI0_APB 8
+#define TH1520_RESET_ID_DSI1_APB 9
+#define TH1520_RESET_ID_HDMI 10
+#define TH1520_RESET_ID_HDMI_APB 11
+#define TH1520_RESET_ID_VOAXI 12
+#define TH1520_RESET_ID_VOAXI_APB 13
+#define TH1520_RESET_ID_X2H_DPU_AXI 14
+#define TH1520_RESET_ID_X2H_DPU_AHB 15
+#define TH1520_RESET_ID_X2H_DPU1_AXI 16
+#define TH1520_RESET_ID_X2H_DPU1_AHB 17
+
+/* VP Subsystem */
+#define TH1520_RESET_ID_VPSYS_AXI_APB 0
+#define TH1520_RESET_ID_VPSYS_AXI 1
+#define TH1520_RESET_ID_FCE_APB 2
+#define TH1520_RESET_ID_FCE_CORE 3
+#define TH1520_RESET_ID_FCE_X2X_MASTER 4
+#define TH1520_RESET_ID_FCE_X2X_SLAVE 5
+#define TH1520_RESET_ID_G2D_APB 6
+#define TH1520_RESET_ID_G2D_ACLK 7
+#define TH1520_RESET_ID_G2D_CORE 8
+#define TH1520_RESET_ID_VDEC_APB 9
+#define TH1520_RESET_ID_VDEC_ACLK 10
+#define TH1520_RESET_ID_VDEC_CORE 11
+#define TH1520_RESET_ID_VENC_APB 12
+#define TH1520_RESET_ID_VENC_CORE 13
+
+#endif /* _DT_BINDINGS_TH1520_RESET_H */
diff --git a/include/dt-bindings/reset/toshiba,tmpv770x.h b/include/dt-bindings/reset/toshiba,tmpv770x.h
index c1007acb1941..9452bef31425 100644
--- a/include/dt-bindings/reset/toshiba,tmpv770x.h
+++ b/include/dt-bindings/reset/toshiba,tmpv770x.h
@@ -36,6 +36,13 @@
#define TMPV770X_RESET_PIPCMIF 29
#define TMPV770X_RESET_PICKMON 30
#define TMPV770X_RESET_SBUSCLK 31
-#define TMPV770X_NR_RESET 32
+#define TMPV770X_RESET_VIIFBS0 32
+#define TMPV770X_RESET_VIIFBS0_APB 33
+#define TMPV770X_RESET_VIIFBS0_L2ISP 34
+#define TMPV770X_RESET_VIIFBS0_L1ISP 35
+#define TMPV770X_RESET_VIIFBS1 36
+#define TMPV770X_RESET_VIIFBS1_APB 37
+#define TMPV770X_RESET_VIIFBS1_L2ISP 38
+#define TMPV770X_RESET_VIIFBS1_L1ISP 39
#endif /*_DT_BINDINGS_RESET_TOSHIBA_TMPV770X_H_ */
diff --git a/include/dt-bindings/soc/qe-fsl,tsa.h b/include/dt-bindings/soc/qe-fsl,tsa.h
new file mode 100644
index 000000000000..3cf3df9c0968
--- /dev/null
+++ b/include/dt-bindings/soc/qe-fsl,tsa.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef __DT_BINDINGS_SOC_FSL_QE_TSA_H
+#define __DT_BINDINGS_SOC_FSL_QE_TSA_H
+
+#define FSL_QE_TSA_NU 0
+#define FSL_QE_TSA_UCC1 1
+#define FSL_QE_TSA_UCC2 2
+#define FSL_QE_TSA_UCC3 3
+#define FSL_QE_TSA_UCC4 4
+#define FSL_QE_TSA_UCC5 5
+
+#endif
diff --git a/include/dt-bindings/soc/samsung,exynos-usi.h b/include/dt-bindings/soc/samsung,exynos-usi.h
index a01af169d249..b46de214dd09 100644
--- a/include/dt-bindings/soc/samsung,exynos-usi.h
+++ b/include/dt-bindings/soc/samsung,exynos-usi.h
@@ -9,9 +9,18 @@
#ifndef __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H
#define __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H
-#define USI_V2_NONE 0
-#define USI_V2_UART 1
-#define USI_V2_SPI 2
-#define USI_V2_I2C 3
+#define USI_MODE_NONE 0
+#define USI_MODE_UART 1
+#define USI_MODE_SPI 2
+#define USI_MODE_I2C 3
+#define USI_MODE_I2C1 4
+#define USI_MODE_I2C0_1 5
+#define USI_MODE_UART_I2C1 6
+
+/* Deprecated */
+#define USI_V2_NONE USI_MODE_NONE
+#define USI_V2_UART USI_MODE_UART
+#define USI_V2_SPI USI_MODE_SPI
+#define USI_V2_I2C USI_MODE_I2C
#endif /* __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H */
diff --git a/include/dt-bindings/sound/audio-graph.h b/include/dt-bindings/sound/audio-graph.h
new file mode 100644
index 000000000000..bdb70c6b7332
--- /dev/null
+++ b/include/dt-bindings/sound/audio-graph.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * audio-graph.h
+ *
+ * Copyright (c) 2024 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ */
+#ifndef __AUDIO_GRAPH_H
+#define __AUDIO_GRAPH_H
+
+/*
+ * used in
+ * link-trigger-order
+ * link-trigger-order-start
+ * link-trigger-order-stop
+ *
+ * default is
+ * link-trigger-order = <SND_SOC_TRIGGER_LINK
+ * SND_SOC_TRIGGER_COMPONENT
+ * SND_SOC_TRIGGER_DAI>;
+ */
+#define SND_SOC_TRIGGER_LINK 0
+#define SND_SOC_TRIGGER_COMPONENT 1
+#define SND_SOC_TRIGGER_DAI 2
+#define SND_SOC_TRIGGER_SIZE 3 /* shoud be last */
+
+#endif /* __AUDIO_GRAPH_H */
diff --git a/include/dt-bindings/sound/cs48l32.h b/include/dt-bindings/sound/cs48l32.h
new file mode 100644
index 000000000000..4e82260fff67
--- /dev/null
+++ b/include/dt-bindings/sound/cs48l32.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Device Tree defines for CS48L32 DSP.
+ *
+ * Copyright (C) 2016-2018, 2022, 2025 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef DT_BINDINGS_SOUND_CS48L32_H
+#define DT_BINDINGS_SOUND_CS48L32_H
+
+/* Values for cirrus,in-type */
+#define CS48L32_IN_TYPE_DIFF 0
+#define CS48L32_IN_TYPE_SE 1
+
+/* Values for cirrus,pdm-sup */
+#define CS48L32_PDM_SUP_VOUT_MIC 0
+#define CS48L32_PDM_SUP_MICBIAS1 1
+
+#endif
diff --git a/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
index 39f203256c4f..6d1ce7f5da51 100644
--- a/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
+++ b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
@@ -139,6 +139,7 @@
#define DISPLAY_PORT_RX_5 133
#define DISPLAY_PORT_RX_6 134
#define DISPLAY_PORT_RX_7 135
+#define USB_RX 136
#define LPASS_CLK_ID_PRI_MI2S_IBIT 1
#define LPASS_CLK_ID_PRI_MI2S_EBIT 2
diff --git a/include/dt-bindings/sound/qcom,wcd9335.h b/include/dt-bindings/sound/qcom,wcd9335.h
index f5e9f1db091e..4fc68aeb9e04 100644
--- a/include/dt-bindings/sound/qcom,wcd9335.h
+++ b/include/dt-bindings/sound/qcom,wcd9335.h
@@ -10,6 +10,5 @@
#define AIF3_PB 4
#define AIF3_CAP 5
#define AIF4_PB 6
-#define NUM_CODEC_DAIS 7
#endif
diff --git a/include/dt-bindings/sound/qcom,wcd934x.h b/include/dt-bindings/sound/qcom,wcd934x.h
new file mode 100644
index 000000000000..8b30d34fcc87
--- /dev/null
+++ b/include/dt-bindings/sound/qcom,wcd934x.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef __DT_SOUND_QCOM_WCD934x_H
+#define __DT_SOUND_QCOM_WCD934x_H
+
+#define AIF1_PB 0
+#define AIF1_CAP 1
+#define AIF2_PB 2
+#define AIF2_CAP 3
+#define AIF3_PB 4
+#define AIF3_CAP 5
+#define AIF4_PB 6
+#define AIF4_VIFEED 7
+#define AIF4_MAD_TX 8
+
+#endif
diff --git a/include/dt-bindings/thermal/mediatek,lvts-thermal.h b/include/dt-bindings/thermal/mediatek,lvts-thermal.h
index bf95309d2525..ddc7302a510a 100644
--- a/include/dt-bindings/thermal/mediatek,lvts-thermal.h
+++ b/include/dt-bindings/thermal/mediatek,lvts-thermal.h
@@ -24,7 +24,7 @@
#define MT8186_BIG_CPU1 5
#define MT8186_NNA 6
#define MT8186_ADSP 7
-#define MT8186_MFG 8
+#define MT8186_GPU 8
#define MT8188_MCU_LITTLE_CPU0 0
#define MT8188_MCU_LITTLE_CPU1 1
@@ -34,11 +34,11 @@
#define MT8188_MCU_BIG_CPU1 5
#define MT8188_AP_APU 0
-#define MT8188_AP_GPU1 1
-#define MT8188_AP_GPU2 2
-#define MT8188_AP_SOC1 3
-#define MT8188_AP_SOC2 4
-#define MT8188_AP_SOC3 5
+#define MT8188_AP_GPU0 1
+#define MT8188_AP_GPU1 2
+#define MT8188_AP_ADSP 3
+#define MT8188_AP_VDO 4
+#define MT8188_AP_INFRA 5
#define MT8188_AP_CAM1 6
#define MT8188_AP_CAM2 7
diff --git a/include/dt-bindings/thermal/tegra114-soctherm.h b/include/dt-bindings/thermal/tegra114-soctherm.h
new file mode 100644
index 000000000000..b766a61cd1ce
--- /dev/null
+++ b/include/dt-bindings/thermal/tegra114-soctherm.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for binding nvidia,tegra114-soctherm.
+ */
+
+#ifndef _DT_BINDINGS_THERMAL_TEGRA114_SOCTHERM_H
+#define _DT_BINDINGS_THERMAL_TEGRA114_SOCTHERM_H
+
+#define TEGRA114_SOCTHERM_SENSOR_CPU 0
+#define TEGRA114_SOCTHERM_SENSOR_MEM 1
+#define TEGRA114_SOCTHERM_SENSOR_GPU 2
+#define TEGRA114_SOCTHERM_SENSOR_PLLX 3
+
+#define TEGRA114_SOCTHERM_THROT_LEVEL_NONE 0
+#define TEGRA114_SOCTHERM_THROT_LEVEL_LOW 1
+#define TEGRA114_SOCTHERM_THROT_LEVEL_MED 2
+#define TEGRA114_SOCTHERM_THROT_LEVEL_HIGH 3
+
+#endif
diff --git a/include/dt-bindings/watchdog/aspeed-wdt.h b/include/dt-bindings/watchdog/aspeed-wdt.h
index 7ae6d84b2bd9..89fa31ffce2d 100644
--- a/include/dt-bindings/watchdog/aspeed-wdt.h
+++ b/include/dt-bindings/watchdog/aspeed-wdt.h
@@ -89,4 +89,142 @@
#define AST2600_WDT_RESET2_DEFAULT 0x03fffff1
+#define AST2700_WDT_RESET1_CPU (1 << 0)
+#define AST2700_WDT_RESET1_DRAM (1 << 1)
+#define AST2700_WDT_RESET1_SLI0 (1 << 2)
+#define AST2700_WDT_RESET1_EHCI (1 << 3)
+#define AST2700_WDT_RESET1_HACE (1 << 4)
+#define AST2700_WDT_RESET1_SOC_MISC0 (1 << 5)
+#define AST2700_WDT_RESET1_VIDEO (1 << 6)
+#define AST2700_WDT_RESET1_2D_GRAPHIC (1 << 7)
+#define AST2700_WDT_RESET1_RAVS0 (1 << 8)
+#define AST2700_WDT_RESET1_RAVS1 (1 << 9)
+#define AST2700_WDT_RESET1_GPIO0 (1 << 10)
+#define AST2700_WDT_RESET1_SSP (1 << 11)
+#define AST2700_WDT_RESET1_TSP (1 << 12)
+#define AST2700_WDT_RESET1_CRT (1 << 13)
+#define AST2700_WDT_RESET1_USB20_HOST (1 << 14)
+#define AST2700_WDT_RESET1_USB11_HOST (1 << 15)
+#define AST2700_WDT_RESET1_UFS (1 << 16)
+#define AST2700_WDT_RESET1_EMMC (1 << 17)
+#define AST2700_WDT_RESET1_AHB_TO_PCIE1 (1 << 18)
+#define AST2700_WDT_RESET1_XDMA0 (1 << 22)
+#define AST2700_WDT_RESET1_MCTP1 (1 << 23)
+#define AST2700_WDT_RESET1_MCTP0 (1 << 24)
+#define AST2700_WDT_RESET1_JTAG0 (1 << 25)
+#define AST2700_WDT_RESET1_ECC (1 << 26)
+#define AST2700_WDT_RESET1_XDMA1 (1 << 27)
+#define AST2700_WDT_RESET1_DP (1 << 28)
+#define AST2700_WDT_RESET1_DP_MCU (1 << 29)
+#define AST2700_WDT_RESET1_AHB_TO_PCIE0 (1 << 31)
+
+#define AST2700_WDT_RESET1_DEFAULT 0x8207ff71
+
+#define AST2700_WDT_RESET2_USB3_A_HOST (1 << 0)
+#define AST2700_WDT_RESET2_USB3_A_VHUB3 (1 << 1)
+#define AST2700_WDT_RESET2_USB3_A_VHUB2 (1 << 2)
+#define AST2700_WDT_RESET2_USB3_B_HOST (1 << 3)
+#define AST2700_WDT_RESET2_USB3_B_VHUB3 (1 << 4)
+#define AST2700_WDT_RESET2_USB3_B_VHUB2 (1 << 5)
+#define AST2700_WDT_RESET2_SM3 (1 << 6)
+#define AST2700_WDT_RESET2_SM4 (1 << 7)
+#define AST2700_WDT_RESET2_SHA3 (1 << 8)
+#define AST2700_WDT_RESET2_RSA (1 << 9)
+
+#define AST2700_WDT_RESET2_DEFAULT 0x000003f6
+
+#define AST2700_WDT_RESET3_LPC0 (1 << 0)
+#define AST2700_WDT_RESET3_LPC1 (1 << 1)
+#define AST2700_WDT_RESET3_MDIO (1 << 2)
+#define AST2700_WDT_RESET3_PECI (1 << 3)
+#define AST2700_WDT_RESET3_PWM (1 << 4)
+#define AST2700_WDT_RESET3_MAC0 (1 << 5)
+#define AST2700_WDT_RESET3_MAC1 (1 << 6)
+#define AST2700_WDT_RESET3_MAC2 (1 << 7)
+#define AST2700_WDT_RESET3_ADC (1 << 8)
+#define AST2700_WDT_RESET3_SDC (1 << 9)
+#define AST2700_WDT_RESET3_ESPI0 (1 << 10)
+#define AST2700_WDT_RESET3_ESPI1 (1 << 11)
+#define AST2700_WDT_RESET3_JTAG1 (1 << 12)
+#define AST2700_WDT_RESET3_SPI0 (1 << 13)
+#define AST2700_WDT_RESET3_SPI1 (1 << 14)
+#define AST2700_WDT_RESET3_SPI2 (1 << 15)
+#define AST2700_WDT_RESET3_I3C0 (1 << 16)
+#define AST2700_WDT_RESET3_I3C1 (1 << 17)
+#define AST2700_WDT_RESET3_I3C2 (1 << 18)
+#define AST2700_WDT_RESET3_I3C3 (1 << 19)
+#define AST2700_WDT_RESET3_I3C4 (1 << 20)
+#define AST2700_WDT_RESET3_I3C5 (1 << 21)
+#define AST2700_WDT_RESET3_I3C6 (1 << 22)
+#define AST2700_WDT_RESET3_I3C7 (1 << 23)
+#define AST2700_WDT_RESET3_I3C8 (1 << 24)
+#define AST2700_WDT_RESET3_I3C9 (1 << 25)
+#define AST2700_WDT_RESET3_I3C10 (1 << 26)
+#define AST2700_WDT_RESET3_I3C11 (1 << 27)
+#define AST2700_WDT_RESET3_I3C12 (1 << 28)
+#define AST2700_WDT_RESET3_I3C13 (1 << 29)
+#define AST2700_WDT_RESET3_I3C14 (1 << 30)
+#define AST2700_WDT_RESET3_I3C15 (1 << 31)
+
+#define AST2700_WDT_RESET3_DEFAULT 0x000093ec
+
+#define AST2700_WDT_RESET4_FMC (1 << 0)
+#define AST2700_WDT_RESET4_SOC_MISC1 (1 << 1)
+#define AST2700_WDT_RESET4_AHB (1 << 2)
+#define AST2700_WDT_RESET4_SLI1 (1 << 3)
+#define AST2700_WDT_RESET4_UART0 (1 << 4)
+#define AST2700_WDT_RESET4_UART1 (1 << 5)
+#define AST2700_WDT_RESET4_UART2 (1 << 6)
+#define AST2700_WDT_RESET4_UART3 (1 << 7)
+#define AST2700_WDT_RESET4_I2C_MONITOR (1 << 8)
+#define AST2700_WDT_RESET4_HOST_TO_SPI1 (1 << 9)
+#define AST2700_WDT_RESET4_HOST_TO_SPI2 (1 << 10)
+#define AST2700_WDT_RESET4_GPIO1 (1 << 11)
+#define AST2700_WDT_RESET4_FSI (1 << 12)
+#define AST2700_WDT_RESET4_CANBUS (1 << 13)
+#define AST2700_WDT_RESET4_MCTP (1 << 14)
+#define AST2700_WDT_RESET4_XDMA (1 << 15)
+#define AST2700_WDT_RESET4_UART5 (1 << 16)
+#define AST2700_WDT_RESET4_UART6 (1 << 17)
+#define AST2700_WDT_RESET4_UART7 (1 << 18)
+#define AST2700_WDT_RESET4_UART8 (1 << 19)
+#define AST2700_WDT_RESET4_BOOT_MCU (1 << 20)
+#define AST2700_WDT_RESET4_IO_MCU (1 << 21)
+#define AST2700_WDT_RESET4_LTPI0 (1 << 22)
+#define AST2700_WDT_RESET4_VGA_LINK (1 << 23)
+#define AST2700_WDT_RESET4_LTPI1 (1 << 24)
+#define AST2700_WDT_RESET4_LTPI_PHY (1 << 25)
+#define AST2700_WDT_RESET4_ACE (1 << 26)
+#define AST2700_WDT_RESET4_LTPI_GPIO0 (1 << 28)
+#define AST2700_WDT_RESET4_LTPI_GPIO1 (1 << 29)
+#define AST2700_WDT_RESET4_AHB_TO_PCIE1 (1 << 30)
+#define AST2700_WDT_RESET4_I3C_DMA (1 << 31)
+
+#define AST2700_WDT_RESET4_DEFAULT 0x40303803
+
+#define AST2700_WDT_RESET5_I2C_GLOBAL (1 << 0)
+#define AST2700_WDT_RESET5_I2C0 (1 << 1)
+#define AST2700_WDT_RESET5_I2C1 (1 << 2)
+#define AST2700_WDT_RESET5_I2C2 (1 << 3)
+#define AST2700_WDT_RESET5_I2C3 (1 << 4)
+#define AST2700_WDT_RESET5_I2C4 (1 << 5)
+#define AST2700_WDT_RESET5_I2C5 (1 << 6)
+#define AST2700_WDT_RESET5_I2C6 (1 << 7)
+#define AST2700_WDT_RESET5_I2C7 (1 << 8)
+#define AST2700_WDT_RESET5_I2C8 (1 << 9)
+#define AST2700_WDT_RESET5_I2C9 (1 << 10)
+#define AST2700_WDT_RESET5_I2C10 (1 << 11)
+#define AST2700_WDT_RESET5_I2C11 (1 << 12)
+#define AST2700_WDT_RESET5_I2C12 (1 << 13)
+#define AST2700_WDT_RESET5_I2C13 (1 << 14)
+#define AST2700_WDT_RESET5_I2C14 (1 << 15)
+#define AST2700_WDT_RESET5_I2C15 (1 << 16)
+#define AST2700_WDT_RESET5_UHCI (1 << 17)
+#define AST2700_WDT_RESET5_USB2_C_UART (1 << 18)
+#define AST2700_WDT_RESET5_USB2_C (1 << 19)
+#define AST2700_WDT_RESET5_USB2_D_UART (1 << 20)
+#define AST2700_WDT_RESET5_USB2_D (1 << 21)
+
+#define AST2700_WDT_RESET5_DEFAULT 0x00320000
+
#endif
diff --git a/include/hyperv/hvgdk.h b/include/hyperv/hvgdk.h
new file mode 100644
index 000000000000..dd6d4939ea29
--- /dev/null
+++ b/include/hyperv/hvgdk.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Type definitions for the Microsoft Hypervisor.
+ */
+#ifndef _HV_HVGDK_H
+#define _HV_HVGDK_H
+
+#include "hvgdk_mini.h"
+#include "hvgdk_ext.h"
+
+/*
+ * The guest OS needs to register the guest ID with the hypervisor.
+ * The guest ID is a 64 bit entity and the structure of this ID is
+ * specified in the Hyper-V TLFS specification.
+ *
+ * While the current guideline does not specify how Linux guest ID(s)
+ * need to be generated, our plan is to publish the guidelines for
+ * Linux and other guest operating systems that currently are hosted
+ * on Hyper-V. The implementation here conforms to this yet
+ * unpublished guidelines.
+ *
+ * Bit(s)
+ * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
+ * 62:56 - Os Type; Linux is 0x100
+ * 55:48 - Distro specific identification
+ * 47:16 - Linux kernel version number
+ * 15:0 - Distro specific identification
+ */
+
+#define HV_LINUX_VENDOR_ID 0x8100
+
+/* HV_VMX_ENLIGHTENED_VMCS */
+struct hv_enlightened_vmcs {
+ u32 revision_id;
+ u32 abort;
+
+ u16 host_es_selector;
+ u16 host_cs_selector;
+ u16 host_ss_selector;
+ u16 host_ds_selector;
+ u16 host_fs_selector;
+ u16 host_gs_selector;
+ u16 host_tr_selector;
+
+ u16 padding16_1;
+
+ u64 host_ia32_pat;
+ u64 host_ia32_efer;
+
+ u64 host_cr0;
+ u64 host_cr3;
+ u64 host_cr4;
+
+ u64 host_ia32_sysenter_esp;
+ u64 host_ia32_sysenter_eip;
+ u64 host_rip;
+ u32 host_ia32_sysenter_cs;
+
+ u32 pin_based_vm_exec_control;
+ u32 vm_exit_controls;
+ u32 secondary_vm_exec_control;
+
+ u64 io_bitmap_a;
+ u64 io_bitmap_b;
+ u64 msr_bitmap;
+
+ u16 guest_es_selector;
+ u16 guest_cs_selector;
+ u16 guest_ss_selector;
+ u16 guest_ds_selector;
+ u16 guest_fs_selector;
+ u16 guest_gs_selector;
+ u16 guest_ldtr_selector;
+ u16 guest_tr_selector;
+
+ u32 guest_es_limit;
+ u32 guest_cs_limit;
+ u32 guest_ss_limit;
+ u32 guest_ds_limit;
+ u32 guest_fs_limit;
+ u32 guest_gs_limit;
+ u32 guest_ldtr_limit;
+ u32 guest_tr_limit;
+ u32 guest_gdtr_limit;
+ u32 guest_idtr_limit;
+
+ u32 guest_es_ar_bytes;
+ u32 guest_cs_ar_bytes;
+ u32 guest_ss_ar_bytes;
+ u32 guest_ds_ar_bytes;
+ u32 guest_fs_ar_bytes;
+ u32 guest_gs_ar_bytes;
+ u32 guest_ldtr_ar_bytes;
+ u32 guest_tr_ar_bytes;
+
+ u64 guest_es_base;
+ u64 guest_cs_base;
+ u64 guest_ss_base;
+ u64 guest_ds_base;
+ u64 guest_fs_base;
+ u64 guest_gs_base;
+ u64 guest_ldtr_base;
+ u64 guest_tr_base;
+ u64 guest_gdtr_base;
+ u64 guest_idtr_base;
+
+ u64 padding64_1[3];
+
+ u64 vm_exit_msr_store_addr;
+ u64 vm_exit_msr_load_addr;
+ u64 vm_entry_msr_load_addr;
+
+ u64 cr3_target_value0;
+ u64 cr3_target_value1;
+ u64 cr3_target_value2;
+ u64 cr3_target_value3;
+
+ u32 page_fault_error_code_mask;
+ u32 page_fault_error_code_match;
+
+ u32 cr3_target_count;
+ u32 vm_exit_msr_store_count;
+ u32 vm_exit_msr_load_count;
+ u32 vm_entry_msr_load_count;
+
+ u64 tsc_offset;
+ u64 virtual_apic_page_addr;
+ u64 vmcs_link_pointer;
+
+ u64 guest_ia32_debugctl;
+ u64 guest_ia32_pat;
+ u64 guest_ia32_efer;
+
+ u64 guest_pdptr0;
+ u64 guest_pdptr1;
+ u64 guest_pdptr2;
+ u64 guest_pdptr3;
+
+ u64 guest_pending_dbg_exceptions;
+ u64 guest_sysenter_esp;
+ u64 guest_sysenter_eip;
+
+ u32 guest_activity_state;
+ u32 guest_sysenter_cs;
+
+ u64 cr0_guest_host_mask;
+ u64 cr4_guest_host_mask;
+ u64 cr0_read_shadow;
+ u64 cr4_read_shadow;
+ u64 guest_cr0;
+ u64 guest_cr3;
+ u64 guest_cr4;
+ u64 guest_dr7;
+
+ u64 host_fs_base;
+ u64 host_gs_base;
+ u64 host_tr_base;
+ u64 host_gdtr_base;
+ u64 host_idtr_base;
+ u64 host_rsp;
+
+ u64 ept_pointer;
+
+ u16 virtual_processor_id;
+ u16 padding16_2[3];
+
+ u64 padding64_2[5];
+ u64 guest_physical_address;
+
+ u32 vm_instruction_error;
+ u32 vm_exit_reason;
+ u32 vm_exit_intr_info;
+ u32 vm_exit_intr_error_code;
+ u32 idt_vectoring_info_field;
+ u32 idt_vectoring_error_code;
+ u32 vm_exit_instruction_len;
+ u32 vmx_instruction_info;
+
+ u64 exit_qualification;
+ u64 exit_io_instruction_ecx;
+ u64 exit_io_instruction_esi;
+ u64 exit_io_instruction_edi;
+ u64 exit_io_instruction_eip;
+
+ u64 guest_linear_address;
+ u64 guest_rsp;
+ u64 guest_rflags;
+
+ u32 guest_interruptibility_info;
+ u32 cpu_based_vm_exec_control;
+ u32 exception_bitmap;
+ u32 vm_entry_controls;
+ u32 vm_entry_intr_info_field;
+ u32 vm_entry_exception_error_code;
+ u32 vm_entry_instruction_len;
+ u32 tpr_threshold;
+
+ u64 guest_rip;
+
+ u32 hv_clean_fields;
+ u32 padding32_1;
+ u32 hv_synthetic_controls;
+ struct {
+ u32 nested_flush_hypercall:1;
+ u32 msr_bitmap:1;
+ u32 reserved:30;
+ } __packed hv_enlightenments_control;
+ u32 hv_vp_id;
+ u32 padding32_2;
+ u64 hv_vm_id;
+ u64 partition_assist_page;
+ u64 padding64_4[4];
+ u64 guest_bndcfgs;
+ u64 guest_ia32_perf_global_ctrl;
+ u64 guest_ia32_s_cet;
+ u64 guest_ssp;
+ u64 guest_ia32_int_ssp_table_addr;
+ u64 guest_ia32_lbr_ctl;
+ u64 padding64_5[2];
+ u64 xss_exit_bitmap;
+ u64 encls_exiting_bitmap;
+ u64 host_ia32_perf_global_ctrl;
+ u64 tsc_multiplier;
+ u64 host_ia32_s_cet;
+ u64 host_ssp;
+ u64 host_ia32_int_ssp_table_addr;
+ u64 padding64_6;
+} __packed;
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE 0
+
+
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP BIT(0)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP BIT(1)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2 BIT(2)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1 BIT(3)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC BIT(4)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT BIT(5)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY BIT(6)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN BIT(7)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR BIT(8)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT BIT(9)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC BIT(10)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1 BIT(11)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2 BIT(12)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER BIT(13)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1 BIT(14)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL BIT(15)
+
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL 0xFFFF
+
+/*
+ * Note, Hyper-V isn't actually stealing bit 28 from Intel, just abusing it by
+ * pairing it with architecturally impossible exit reasons. Bit 28 is set only
+ * on SMI exits to a SMI transfer monitor (STM) and if and only if a MTF VM-Exit
+ * is pending. I.e. it will never be set by hardware for non-SMI exits (there
+ * are only three), nor will it ever be set unless the VMM is an STM.
+ */
+#define HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH 0x10000031
+
+/*
+ * Hyper-V uses the software reserved 32 bytes in VMCB control area to expose
+ * SVM enlightenments to guests. This is documented in the TLFS doc.
+ * Note on naming: SVM_NESTED_ENLIGHTENED_VMCB_FIELDS
+ */
+struct hv_vmcb_enlightenments {
+ struct __packed hv_enlightenments_control {
+ u32 nested_flush_hypercall : 1;
+ u32 msr_bitmap : 1;
+ u32 enlightened_npt_tlb: 1;
+ u32 reserved : 29;
+ } __packed hv_enlightenments_control;
+ u32 hv_vp_id;
+ u64 hv_vm_id;
+ u64 partition_assist_page;
+ u64 reserved;
+} __packed;
+
+/*
+ * Hyper-V uses the software reserved clean bit in VMCB.
+ */
+#define HV_VMCB_NESTED_ENLIGHTENMENTS 31
+
+/* Synthetic VM-Exit */
+#define HV_SVM_EXITCODE_ENL 0xf0000000
+#define HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH (1)
+
+/* VM_PARTITION_ASSIST_PAGE */
+struct hv_partition_assist_pg {
+ u32 tlb_lock_count;
+};
+
+/* Define connection identifier type. */
+union hv_connection_id {
+ u32 asu32;
+ struct {
+ u32 id : 24;
+ u32 reserved : 8;
+ } __packed u;
+};
+
+struct hv_input_unmap_gpa_pages {
+ u64 target_partition_id;
+ u64 target_gpa_base;
+ u32 unmap_flags;
+ u32 padding;
+} __packed;
+
+#endif /* #ifndef _HV_HVGDK_H */
diff --git a/include/hyperv/hvgdk_ext.h b/include/hyperv/hvgdk_ext.h
new file mode 100644
index 000000000000..641b591ee61f
--- /dev/null
+++ b/include/hyperv/hvgdk_ext.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Type definitions for the Microsoft Hypervisor.
+ */
+#ifndef _HV_HVGDK_EXT_H
+#define _HV_HVGDK_EXT_H
+
+#include "hvgdk_mini.h"
+
+/* Extended hypercalls */
+#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001
+#define HV_EXT_CALL_MEMORY_HEAT_HINT 0x8003
+
+/* Extended hypercalls */
+enum { /* HV_EXT_CALL */
+ HV_EXTCALL_QUERY_CAPABILITIES = 0x8001,
+ HV_EXTCALL_MEMORY_HEAT_HINT = 0x8003,
+};
+
+/* HV_EXT_OUTPUT_QUERY_CAPABILITIES */
+#define HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT BIT(8)
+
+enum { /* HV_EXT_MEMORY_HEAT_HINT_TYPE */
+ HV_EXTMEM_HEAT_HINT_COLD = 0,
+ HV_EXTMEM_HEAT_HINT_HOT = 1,
+ HV_EXTMEM_HEAT_HINT_COLD_DISCARD = 2,
+ HV_EXTMEM_HEAT_HINT_MAX
+};
+
+/*
+ * The whole argument should fit in a page to be able to pass to the hypervisor
+ * in one hypercall.
+ */
+#define HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_memory_hint)) / \
+ sizeof(union hv_gpa_page_range))
+
+/* HvExtCallMemoryHeatHint hypercall */
+#define HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD 2
+struct hv_memory_hint { /* HV_EXT_INPUT_MEMORY_HEAT_HINT */
+ u64 heat_type : 2; /* HV_EXTMEM_HEAT_HINT_* */
+ u64 reserved : 62;
+ union hv_gpa_page_range ranges[];
+} __packed;
+
+#endif /* _HV_HVGDK_EXT_H */
diff --git a/include/hyperv/hvgdk_mini.h b/include/hyperv/hvgdk_mini.h
new file mode 100644
index 000000000000..04b18d0e37af
--- /dev/null
+++ b/include/hyperv/hvgdk_mini.h
@@ -0,0 +1,1528 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Type definitions for the Microsoft hypervisor.
+ */
+#ifndef _HV_HVGDK_MINI_H
+#define _HV_HVGDK_MINI_H
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+struct hv_u128 {
+ u64 low_part;
+ u64 high_part;
+} __packed;
+
+/* NOTE: when adding below, update hv_result_to_string() */
+#define HV_STATUS_SUCCESS 0x0
+#define HV_STATUS_INVALID_HYPERCALL_CODE 0x2
+#define HV_STATUS_INVALID_HYPERCALL_INPUT 0x3
+#define HV_STATUS_INVALID_ALIGNMENT 0x4
+#define HV_STATUS_INVALID_PARAMETER 0x5
+#define HV_STATUS_ACCESS_DENIED 0x6
+#define HV_STATUS_INVALID_PARTITION_STATE 0x7
+#define HV_STATUS_OPERATION_DENIED 0x8
+#define HV_STATUS_UNKNOWN_PROPERTY 0x9
+#define HV_STATUS_PROPERTY_VALUE_OUT_OF_RANGE 0xA
+#define HV_STATUS_INSUFFICIENT_MEMORY 0xB
+#define HV_STATUS_INVALID_PARTITION_ID 0xD
+#define HV_STATUS_INVALID_VP_INDEX 0xE
+#define HV_STATUS_NOT_FOUND 0x10
+#define HV_STATUS_INVALID_PORT_ID 0x11
+#define HV_STATUS_INVALID_CONNECTION_ID 0x12
+#define HV_STATUS_INSUFFICIENT_BUFFERS 0x13
+#define HV_STATUS_NOT_ACKNOWLEDGED 0x14
+#define HV_STATUS_INVALID_VP_STATE 0x15
+#define HV_STATUS_NO_RESOURCES 0x1D
+#define HV_STATUS_PROCESSOR_FEATURE_NOT_SUPPORTED 0x20
+#define HV_STATUS_INVALID_LP_INDEX 0x41
+#define HV_STATUS_INVALID_REGISTER_VALUE 0x50
+#define HV_STATUS_OPERATION_FAILED 0x71
+#define HV_STATUS_TIME_OUT 0x78
+#define HV_STATUS_CALL_PENDING 0x79
+#define HV_STATUS_VTL_ALREADY_ENABLED 0x86
+
+/*
+ * The Hyper-V TimeRefCount register and the TSC
+ * page provide a guest VM clock with 100ns tick rate
+ */
+#define HV_CLOCK_HZ (NSEC_PER_SEC / 100)
+
+#define HV_HYP_PAGE_SHIFT 12
+#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT)
+#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1))
+#define HV_HYP_LARGE_PAGE_SHIFT 21
+
+#define HV_PARTITION_ID_INVALID ((u64)0)
+#define HV_PARTITION_ID_SELF ((u64)-1)
+
+/* Hyper-V specific model specific registers (MSRs) */
+
+#if defined(CONFIG_X86)
+/* HV_X64_SYNTHETIC_MSR */
+#define HV_X64_MSR_GUEST_OS_ID 0x40000000
+#define HV_X64_MSR_HYPERCALL 0x40000001
+#define HV_X64_MSR_VP_INDEX 0x40000002
+#define HV_X64_MSR_RESET 0x40000003
+#define HV_X64_MSR_VP_RUNTIME 0x40000010
+#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
+#define HV_X64_MSR_REFERENCE_TSC 0x40000021
+#define HV_X64_MSR_TSC_FREQUENCY 0x40000022
+#define HV_X64_MSR_APIC_FREQUENCY 0x40000023
+
+/* Define the virtual APIC registers */
+#define HV_X64_MSR_EOI 0x40000070
+#define HV_X64_MSR_ICR 0x40000071
+#define HV_X64_MSR_TPR 0x40000072
+#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073
+
+/* Define synthetic interrupt controller model specific registers. */
+#define HV_X64_MSR_SCONTROL 0x40000080
+#define HV_X64_MSR_SVERSION 0x40000081
+#define HV_X64_MSR_SIEFP 0x40000082
+#define HV_X64_MSR_SIMP 0x40000083
+#define HV_X64_MSR_EOM 0x40000084
+#define HV_X64_MSR_SIRBP 0x40000085
+#define HV_X64_MSR_SINT0 0x40000090
+#define HV_X64_MSR_SINT1 0x40000091
+#define HV_X64_MSR_SINT2 0x40000092
+#define HV_X64_MSR_SINT3 0x40000093
+#define HV_X64_MSR_SINT4 0x40000094
+#define HV_X64_MSR_SINT5 0x40000095
+#define HV_X64_MSR_SINT6 0x40000096
+#define HV_X64_MSR_SINT7 0x40000097
+#define HV_X64_MSR_SINT8 0x40000098
+#define HV_X64_MSR_SINT9 0x40000099
+#define HV_X64_MSR_SINT10 0x4000009A
+#define HV_X64_MSR_SINT11 0x4000009B
+#define HV_X64_MSR_SINT12 0x4000009C
+#define HV_X64_MSR_SINT13 0x4000009D
+#define HV_X64_MSR_SINT14 0x4000009E
+#define HV_X64_MSR_SINT15 0x4000009F
+
+/* Define synthetic interrupt controller model specific registers for nested hypervisor */
+#define HV_X64_MSR_NESTED_SCONTROL 0x40001080
+#define HV_X64_MSR_NESTED_SVERSION 0x40001081
+#define HV_X64_MSR_NESTED_SIEFP 0x40001082
+#define HV_X64_MSR_NESTED_SIMP 0x40001083
+#define HV_X64_MSR_NESTED_EOM 0x40001084
+#define HV_X64_MSR_NESTED_SINT0 0x40001090
+
+/*
+ * Synthetic Timer MSRs. Four timers per vcpu.
+ */
+#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0
+#define HV_X64_MSR_STIMER0_COUNT 0x400000B1
+#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2
+#define HV_X64_MSR_STIMER1_COUNT 0x400000B3
+#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4
+#define HV_X64_MSR_STIMER2_COUNT 0x400000B5
+#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6
+#define HV_X64_MSR_STIMER3_COUNT 0x400000B7
+
+/* Hyper-V guest idle MSR */
+#define HV_X64_MSR_GUEST_IDLE 0x400000F0
+
+/* Hyper-V guest crash notification MSR's */
+#define HV_X64_MSR_CRASH_P0 0x40000100
+#define HV_X64_MSR_CRASH_P1 0x40000101
+#define HV_X64_MSR_CRASH_P2 0x40000102
+#define HV_X64_MSR_CRASH_P3 0x40000103
+#define HV_X64_MSR_CRASH_P4 0x40000104
+#define HV_X64_MSR_CRASH_CTL 0x40000105
+
+#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001
+#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12
+#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \
+ (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
+
+#define HV_X64_MSR_CRASH_PARAMS \
+ (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0))
+
+#define HV_IPI_LOW_VECTOR 0x10
+#define HV_IPI_HIGH_VECTOR 0xff
+
+#define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001
+#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12
+#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \
+ (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
+
+/* Hyper-V Enlightened VMCS version mask in nested features CPUID */
+#define HV_X64_ENLIGHTENED_VMCS_VERSION 0xff
+
+#define HV_X64_MSR_TSC_REFERENCE_ENABLE 0x00000001
+#define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT 12
+
+/* Number of XMM registers used in hypercall input/output */
+#define HV_HYPERCALL_MAX_XMM_REGISTERS 6
+
+struct hv_reenlightenment_control {
+ u64 vector : 8;
+ u64 reserved1 : 8;
+ u64 enabled : 1;
+ u64 reserved2 : 15;
+ u64 target_vp : 32;
+} __packed;
+
+struct hv_tsc_emulation_status { /* HV_TSC_EMULATION_STATUS */
+ u64 inprogress : 1;
+ u64 reserved : 63;
+} __packed;
+
+struct hv_tsc_emulation_control { /* HV_TSC_INVARIANT_CONTROL */
+ u64 enabled : 1;
+ u64 reserved : 63;
+} __packed;
+
+/* TSC emulation after migration */
+#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
+#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107
+#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108
+#define HV_X64_MSR_TSC_INVARIANT_CONTROL 0x40000118
+#define HV_EXPOSE_INVARIANT_TSC BIT_ULL(0)
+
+#endif /* CONFIG_X86 */
+
+struct hv_output_get_partition_id {
+ u64 partition_id;
+} __packed;
+
+/* HV_CRASH_CTL_REG_CONTENTS */
+#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62)
+#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63)
+
+union hv_reference_tsc_msr {
+ u64 as_uint64;
+ struct {
+ u64 enable : 1;
+ u64 reserved : 11;
+ u64 pfn : 52;
+ } __packed;
+};
+
+/* The maximum number of sparse vCPU banks which can be encoded by 'struct hv_vpset' */
+#define HV_MAX_SPARSE_VCPU_BANKS (64)
+/* The number of vCPUs in one sparse bank */
+#define HV_VCPUS_PER_SPARSE_BANK (64)
+
+/*
+ * Some of Hyper-V structs do not use hv_vpset where linux uses them.
+ *
+ * struct hv_vpset is usually used as part of hypercall input. The portion
+ * that counts as "fixed size input header" vs. "variable size input header"
+ * varies per hypercall. See comments at relevant hypercall call sites as to
+ * how the "valid_bank_mask" field should be accounted.
+ */
+struct hv_vpset { /* HV_VP_SET */
+ u64 format;
+ u64 valid_bank_mask;
+ u64 bank_contents[];
+} __packed;
+
+/*
+ * Version info reported by hypervisor
+ * Changed to a union for convenience
+ */
+union hv_hypervisor_version_info {
+ struct {
+ u32 build_number;
+
+ u32 minor_version : 16;
+ u32 major_version : 16;
+
+ u32 service_pack;
+
+ u32 service_number : 24;
+ u32 service_branch : 8;
+ };
+ struct {
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+ };
+};
+
+/* HV_CPUID_FUNCTION */
+#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000
+#define HYPERV_CPUID_INTERFACE 0x40000001
+#define HYPERV_CPUID_VERSION 0x40000002
+#define HYPERV_CPUID_FEATURES 0x40000003
+#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
+#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
+#define HYPERV_CPUID_CPU_MANAGEMENT_FEATURES 0x40000007
+#define HYPERV_CPUID_NESTED_FEATURES 0x4000000A
+#define HYPERV_CPUID_ISOLATION_CONFIG 0x4000000C
+
+#define HYPERV_CPUID_VIRT_STACK_INTERFACE 0x40000081
+#define HYPERV_VS_INTERFACE_EAX_SIGNATURE 0x31235356 /* "VS#1" */
+
+#define HYPERV_CPUID_VIRT_STACK_PROPERTIES 0x40000082
+/* Support for the extended IOAPIC RTE format */
+#define HYPERV_VS_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE BIT(2)
+#define HYPERV_VS_PROPERTIES_EAX_CONFIDENTIAL_VMBUS_AVAILABLE BIT(3)
+
+#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000
+#define HYPERV_CPUID_MIN 0x40000005
+#define HYPERV_CPUID_MAX 0x4000ffff
+
+/*
+ * HV_X64_HYPERVISOR_FEATURES (EAX), or
+ * HV_PARTITION_PRIVILEGE_MASK [31-0]
+ */
+#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0)
+#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1)
+#define HV_MSR_SYNIC_AVAILABLE BIT(2)
+#define HV_MSR_SYNTIMER_AVAILABLE BIT(3)
+#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4)
+#define HV_MSR_HYPERCALL_AVAILABLE BIT(5)
+#define HV_MSR_VP_INDEX_AVAILABLE BIT(6)
+#define HV_MSR_RESET_AVAILABLE BIT(7)
+#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8)
+#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9)
+#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10)
+#define HV_ACCESS_FREQUENCY_MSRS BIT(11)
+#define HV_ACCESS_REENLIGHTENMENT BIT(13)
+#define HV_ACCESS_TSC_INVARIANT BIT(15)
+
+/*
+ * HV_X64_HYPERVISOR_FEATURES (EBX), or
+ * HV_PARTITION_PRIVILEGE_MASK [63-32]
+ */
+#define HV_CREATE_PARTITIONS BIT(0)
+#define HV_ACCESS_PARTITION_ID BIT(1)
+#define HV_ACCESS_MEMORY_POOL BIT(2)
+#define HV_ADJUST_MESSAGE_BUFFERS BIT(3)
+#define HV_POST_MESSAGES BIT(4)
+#define HV_SIGNAL_EVENTS BIT(5)
+#define HV_CREATE_PORT BIT(6)
+#define HV_CONNECT_PORT BIT(7)
+#define HV_ACCESS_STATS BIT(8)
+#define HV_DEBUGGING BIT(11)
+#define HV_CPU_MANAGEMENT BIT(12)
+#define HV_ENABLE_EXTENDED_HYPERCALLS BIT(20)
+#define HV_ISOLATION BIT(22)
+
+#if defined(CONFIG_X86)
+/* HV_X64_HYPERVISOR_FEATURES (EDX) */
+#define HV_X64_MWAIT_AVAILABLE BIT(0)
+#define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1)
+#define HV_X64_PERF_MONITOR_AVAILABLE BIT(2)
+#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3)
+#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE BIT(4)
+#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5)
+#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8)
+#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10)
+#define HV_FEATURE_DEBUG_MSRS_AVAILABLE BIT(11)
+#define HV_FEATURE_EXT_GVA_RANGES_FLUSH BIT(14)
+/*
+ * Support for returning hypercall output block via XMM
+ * registers is available
+ */
+#define HV_X64_HYPERCALL_XMM_OUTPUT_AVAILABLE BIT(15)
+/* stimer Direct Mode is available */
+#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19)
+
+/*
+ * Implementation recommendations. Indicates which behaviors the hypervisor
+ * recommends the OS implement for optimal performance.
+ * These are HYPERV_CPUID_ENLIGHTMENT_INFO.EAX bits.
+ */
+/* HV_X64_ENLIGHTENMENT_INFORMATION */
+#define HV_X64_AS_SWITCH_RECOMMENDED BIT(0)
+#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED BIT(1)
+#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED BIT(2)
+#define HV_X64_APIC_ACCESS_RECOMMENDED BIT(3)
+#define HV_X64_SYSTEM_RESET_RECOMMENDED BIT(4)
+#define HV_X64_RELAXED_TIMING_RECOMMENDED BIT(5)
+#define HV_DEPRECATING_AEOI_RECOMMENDED BIT(9)
+#define HV_X64_CLUSTER_IPI_RECOMMENDED BIT(10)
+#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED BIT(11)
+#define HV_X64_HYPERV_NESTED BIT(12)
+#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14)
+#define HV_X64_USE_MMIO_HYPERCALLS BIT(21)
+
+/*
+ * CPU management features identification.
+ * These are HYPERV_CPUID_CPU_MANAGEMENT_FEATURES.EAX bits.
+ */
+#define HV_X64_START_LOGICAL_PROCESSOR BIT(0)
+#define HV_X64_CREATE_ROOT_VIRTUAL_PROCESSOR BIT(1)
+#define HV_X64_PERFORMANCE_COUNTER_SYNC BIT(2)
+#define HV_X64_RESERVED_IDENTITY_BIT BIT(31)
+
+/*
+ * Virtual processor will never share a physical core with another virtual
+ * processor, except for virtual processors that are reported as sibling SMT
+ * threads.
+ */
+#define HV_X64_NO_NONARCH_CORESHARING BIT(18)
+
+/* Nested features. These are HYPERV_CPUID_NESTED_FEATURES.EAX bits. */
+#define HV_X64_NESTED_DIRECT_FLUSH BIT(17)
+#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18)
+#define HV_X64_NESTED_MSR_BITMAP BIT(19)
+
+/* Nested features #2. These are HYPERV_CPUID_NESTED_FEATURES.EBX bits. */
+#define HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL BIT(0)
+
+/*
+ * This is specific to AMD and specifies that enlightened TLB flush is
+ * supported. If guest opts in to this feature, ASID invalidations only
+ * flushes gva -> hpa mapping entries. To flush the TLB entries derived
+ * from NPT, hypercalls should be used (HvFlushGuestPhysicalAddressSpace
+ * or HvFlushGuestPhysicalAddressList).
+ */
+#define HV_X64_NESTED_ENLIGHTENED_TLB BIT(22)
+
+/* HYPERV_CPUID_ISOLATION_CONFIG.EAX bits. */
+#define HV_PARAVISOR_PRESENT BIT(0)
+
+/* HYPERV_CPUID_ISOLATION_CONFIG.EBX bits. */
+#define HV_ISOLATION_TYPE GENMASK(3, 0)
+#define HV_SHARED_GPA_BOUNDARY_ACTIVE BIT(5)
+#define HV_SHARED_GPA_BOUNDARY_BITS GENMASK(11, 6)
+
+/* HYPERV_CPUID_FEATURES.ECX bits. */
+#define HV_VP_DISPATCH_INTERRUPT_INJECTION_AVAILABLE BIT(9)
+#define HV_VP_GHCB_ROOT_MAPPING_AVAILABLE BIT(10)
+
+enum hv_isolation_type {
+ HV_ISOLATION_TYPE_NONE = 0, /* HV_PARTITION_ISOLATION_TYPE_NONE */
+ HV_ISOLATION_TYPE_VBS = 1,
+ HV_ISOLATION_TYPE_SNP = 2,
+ HV_ISOLATION_TYPE_TDX = 3
+};
+
+union hv_x64_msr_hypercall_contents {
+ u64 as_uint64;
+ struct {
+ u64 enable : 1;
+ u64 reserved : 11;
+ u64 guest_physical_address : 52;
+ } __packed;
+};
+#endif /* CONFIG_X86 */
+
+#if defined(CONFIG_ARM64)
+#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(8)
+#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(13)
+#endif /* CONFIG_ARM64 */
+
+#if defined(CONFIG_X86)
+#define HV_MAXIMUM_PROCESSORS 2048
+#elif defined(CONFIG_ARM64) /* CONFIG_X86 */
+#define HV_MAXIMUM_PROCESSORS 320
+#endif /* CONFIG_ARM64 */
+
+#define HV_MAX_VP_INDEX (HV_MAXIMUM_PROCESSORS - 1)
+#define HV_VP_INDEX_SELF ((u32)-2)
+#define HV_ANY_VP ((u32)-1)
+
+union hv_vp_assist_msr_contents { /* HV_REGISTER_VP_ASSIST_PAGE */
+ u64 as_uint64;
+ struct {
+ u64 enable : 1;
+ u64 reserved : 11;
+ u64 pfn : 52;
+ } __packed;
+};
+
+/* Declare the various hypercall operations. */
+/* HV_CALL_CODE */
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
+#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
+#define HVCALL_SEND_IPI 0x000b
+#define HVCALL_ENABLE_VP_VTL 0x000f
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
+#define HVCALL_SEND_IPI_EX 0x0015
+#define HVCALL_CREATE_PARTITION 0x0040
+#define HVCALL_INITIALIZE_PARTITION 0x0041
+#define HVCALL_FINALIZE_PARTITION 0x0042
+#define HVCALL_DELETE_PARTITION 0x0043
+#define HVCALL_GET_PARTITION_PROPERTY 0x0044
+#define HVCALL_SET_PARTITION_PROPERTY 0x0045
+#define HVCALL_GET_PARTITION_ID 0x0046
+#define HVCALL_DEPOSIT_MEMORY 0x0048
+#define HVCALL_WITHDRAW_MEMORY 0x0049
+#define HVCALL_MAP_GPA_PAGES 0x004b
+#define HVCALL_UNMAP_GPA_PAGES 0x004c
+#define HVCALL_INSTALL_INTERCEPT 0x004d
+#define HVCALL_CREATE_VP 0x004e
+#define HVCALL_DELETE_VP 0x004f
+#define HVCALL_GET_VP_REGISTERS 0x0050
+#define HVCALL_SET_VP_REGISTERS 0x0051
+#define HVCALL_TRANSLATE_VIRTUAL_ADDRESS 0x0052
+#define HVCALL_CLEAR_VIRTUAL_INTERRUPT 0x0056
+#define HVCALL_DELETE_PORT 0x0058
+#define HVCALL_DISCONNECT_PORT 0x005b
+#define HVCALL_POST_MESSAGE 0x005c
+#define HVCALL_SIGNAL_EVENT 0x005d
+#define HVCALL_POST_DEBUG_DATA 0x0069
+#define HVCALL_RETRIEVE_DEBUG_DATA 0x006a
+#define HVCALL_RESET_DEBUG_SESSION 0x006b
+#define HVCALL_MAP_STATS_PAGE 0x006c
+#define HVCALL_UNMAP_STATS_PAGE 0x006d
+#define HVCALL_SET_SYSTEM_PROPERTY 0x006f
+#define HVCALL_ADD_LOGICAL_PROCESSOR 0x0076
+#define HVCALL_GET_SYSTEM_PROPERTY 0x007b
+#define HVCALL_MAP_DEVICE_INTERRUPT 0x007c
+#define HVCALL_UNMAP_DEVICE_INTERRUPT 0x007d
+#define HVCALL_RETARGET_INTERRUPT 0x007e
+#define HVCALL_NOTIFY_PARTITION_EVENT 0x0087
+#define HVCALL_ENTER_SLEEP_STATE 0x0084
+#define HVCALL_NOTIFY_PORT_RING_EMPTY 0x008b
+#define HVCALL_REGISTER_INTERCEPT_RESULT 0x0091
+#define HVCALL_ASSERT_VIRTUAL_INTERRUPT 0x0094
+#define HVCALL_CREATE_PORT 0x0095
+#define HVCALL_CONNECT_PORT 0x0096
+#define HVCALL_START_VP 0x0099
+#define HVCALL_GET_VP_INDEX_FROM_APIC_ID 0x009a
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
+#define HVCALL_SIGNAL_EVENT_DIRECT 0x00c0
+#define HVCALL_POST_MESSAGE_DIRECT 0x00c1
+#define HVCALL_DISPATCH_VP 0x00c2
+#define HVCALL_GET_GPA_PAGES_ACCESS_STATES 0x00c9
+#define HVCALL_ACQUIRE_SPARSE_SPA_PAGE_HOST_ACCESS 0x00d7
+#define HVCALL_RELEASE_SPARSE_SPA_PAGE_HOST_ACCESS 0x00d8
+#define HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY 0x00db
+#define HVCALL_MAP_VP_STATE_PAGE 0x00e1
+#define HVCALL_UNMAP_VP_STATE_PAGE 0x00e2
+#define HVCALL_GET_VP_STATE 0x00e3
+#define HVCALL_SET_VP_STATE 0x00e4
+#define HVCALL_GET_VP_CPUID_VALUES 0x00f4
+#define HVCALL_GET_PARTITION_PROPERTY_EX 0x0101
+#define HVCALL_MMIO_READ 0x0106
+#define HVCALL_MMIO_WRITE 0x0107
+#define HVCALL_DISABLE_HYP_EX 0x010f
+#define HVCALL_MAP_STATS_PAGE2 0x0131
+
+/* HV_HYPERCALL_INPUT */
+#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0)
+#define HV_HYPERCALL_FAST_BIT BIT(16)
+#define HV_HYPERCALL_VARHEAD_OFFSET 17
+#define HV_HYPERCALL_VARHEAD_MASK GENMASK_ULL(26, 17)
+#define HV_HYPERCALL_RSVD0_MASK GENMASK_ULL(31, 27)
+#define HV_HYPERCALL_NESTED BIT_ULL(31)
+#define HV_HYPERCALL_REP_COMP_OFFSET 32
+#define HV_HYPERCALL_REP_COMP_1 BIT_ULL(32)
+#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32)
+#define HV_HYPERCALL_RSVD1_MASK GENMASK_ULL(47, 44)
+#define HV_HYPERCALL_REP_START_OFFSET 48
+#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48)
+#define HV_HYPERCALL_RSVD2_MASK GENMASK_ULL(63, 60)
+#define HV_HYPERCALL_RSVD_MASK (HV_HYPERCALL_RSVD0_MASK | \
+ HV_HYPERCALL_RSVD1_MASK | \
+ HV_HYPERCALL_RSVD2_MASK)
+
+/* HvFlushGuestPhysicalAddressSpace hypercalls */
+struct hv_guest_mapping_flush {
+ u64 address_space;
+ u64 flags;
+} __packed;
+
+/*
+ * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited
+ * by the bitwidth of "additional_pages" in union hv_gpa_page_range.
+ */
+#define HV_MAX_FLUSH_PAGES (2048)
+#define HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB 0
+#define HV_GPA_PAGE_RANGE_PAGE_SIZE_1GB 1
+
+#define HV_FLUSH_ALL_PROCESSORS BIT(0)
+#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
+#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
+#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
+
+/* HvFlushGuestPhysicalAddressList, HvExtCallMemoryHeatHint hypercall */
+union hv_gpa_page_range {
+ u64 address_space;
+ struct {
+ u64 additional_pages : 11;
+ u64 largepage : 1;
+ u64 basepfn : 52;
+ } page;
+ struct {
+ u64 reserved : 12;
+ u64 page_size : 1;
+ u64 reserved1 : 8;
+ u64 base_large_pfn : 43;
+ };
+};
+
+/*
+ * All input flush parameters should be in single page. The max flush
+ * count is equal with how many entries of union hv_gpa_page_range can
+ * be populated into the input parameter page.
+ */
+#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) / \
+ sizeof(union hv_gpa_page_range))
+
+struct hv_guest_mapping_flush_list {
+ u64 address_space;
+ u64 flags;
+ union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT];
+};
+
+struct hv_tlb_flush { /* HV_INPUT_FLUSH_VIRTUAL_ADDRESS_LIST */
+ u64 address_space;
+ u64 flags;
+ u64 processor_mask;
+ u64 gva_list[];
+} __packed;
+
+/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
+struct hv_tlb_flush_ex {
+ u64 address_space;
+ u64 flags;
+ struct hv_vpset hv_vp_set;
+ u64 gva_list[];
+} __packed;
+
+struct ms_hyperv_tsc_page { /* HV_REFERENCE_TSC_PAGE */
+ volatile u32 tsc_sequence;
+ u32 reserved1;
+ volatile u64 tsc_scale;
+ volatile s64 tsc_offset;
+} __packed;
+
+/* Define the number of synthetic interrupt sources. */
+#define HV_SYNIC_SINT_COUNT (16)
+
+/* Define the expected SynIC version. */
+#define HV_SYNIC_VERSION_1 (0x1)
+/* Valid SynIC vectors are 16-255. */
+#define HV_SYNIC_FIRST_VALID_VECTOR (16)
+
+#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0)
+#define HV_SYNIC_SIMP_ENABLE (1ULL << 0)
+#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0)
+#define HV_SYNIC_SINT_MASKED (1ULL << 16)
+#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17)
+#define HV_SYNIC_SINT_VECTOR_MASK (0xFF)
+
+/* Hyper-V defined statically assigned SINTs */
+#define HV_SYNIC_INTERCEPTION_SINT_INDEX 0x00000000
+#define HV_SYNIC_IOMMU_FAULT_SINT_INDEX 0x00000001
+#define HV_SYNIC_VMBUS_SINT_INDEX 0x00000002
+#define HV_SYNIC_FIRST_UNUSED_SINT_INDEX 0x00000005
+
+/* mshv assigned SINT for doorbell */
+#define HV_SYNIC_DOORBELL_SINT_INDEX HV_SYNIC_FIRST_UNUSED_SINT_INDEX
+
+enum hv_interrupt_type {
+ HV_X64_INTERRUPT_TYPE_FIXED = 0x0000,
+ HV_X64_INTERRUPT_TYPE_LOWESTPRIORITY = 0x0001,
+ HV_X64_INTERRUPT_TYPE_SMI = 0x0002,
+ HV_X64_INTERRUPT_TYPE_REMOTEREAD = 0x0003,
+ HV_X64_INTERRUPT_TYPE_NMI = 0x0004,
+ HV_X64_INTERRUPT_TYPE_INIT = 0x0005,
+ HV_X64_INTERRUPT_TYPE_SIPI = 0x0006,
+ HV_X64_INTERRUPT_TYPE_EXTINT = 0x0007,
+ HV_X64_INTERRUPT_TYPE_LOCALINT0 = 0x0008,
+ HV_X64_INTERRUPT_TYPE_LOCALINT1 = 0x0009,
+ HV_X64_INTERRUPT_TYPE_MAXIMUM = 0x000A,
+};
+
+/* Define synthetic interrupt source. */
+union hv_synic_sint {
+ u64 as_uint64;
+ struct {
+ u64 vector : 8;
+ u64 reserved1 : 8;
+ u64 masked : 1;
+ u64 auto_eoi : 1;
+ u64 polling : 1;
+ u64 as_intercept : 1;
+ u64 proxy : 1;
+ u64 reserved2 : 43;
+ } __packed;
+};
+
+union hv_x64_xsave_xfem_register {
+ u64 as_uint64;
+ struct {
+ u32 low_uint32;
+ u32 high_uint32;
+ } __packed;
+ struct {
+ u64 legacy_x87 : 1;
+ u64 legacy_sse : 1;
+ u64 avx : 1;
+ u64 mpx_bndreg : 1;
+ u64 mpx_bndcsr : 1;
+ u64 avx_512_op_mask : 1;
+ u64 avx_512_zmmhi : 1;
+ u64 avx_512_zmm16_31 : 1;
+ u64 rsvd8_9 : 2;
+ u64 pasid : 1;
+ u64 cet_u : 1;
+ u64 cet_s : 1;
+ u64 rsvd13_16 : 4;
+ u64 xtile_cfg : 1;
+ u64 xtile_data : 1;
+ u64 rsvd19_63 : 45;
+ } __packed;
+};
+
+/* Synthetic timer configuration */
+union hv_stimer_config { /* HV_X64_MSR_STIMER_CONFIG_CONTENTS */
+ u64 as_uint64;
+ struct {
+ u64 enable : 1;
+ u64 periodic : 1;
+ u64 lazy : 1;
+ u64 auto_enable : 1;
+ u64 apic_vector : 8;
+ u64 direct_mode : 1;
+ u64 reserved_z0 : 3;
+ u64 sintx : 4;
+ u64 reserved_z1 : 44;
+ } __packed;
+};
+
+/* Define the number of synthetic timers */
+#define HV_SYNIC_STIMER_COUNT (4)
+
+/* Define port identifier type. */
+union hv_port_id {
+ u32 asu32;
+ struct {
+ u32 id : 24;
+ u32 reserved : 8;
+ } __packed u;
+};
+
+#define HV_MESSAGE_SIZE (256)
+#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240)
+#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30)
+
+/* Define hypervisor message types. */
+enum hv_message_type {
+ HVMSG_NONE = 0x00000000,
+
+ /* Memory access messages. */
+ HVMSG_UNMAPPED_GPA = 0x80000000,
+ HVMSG_GPA_INTERCEPT = 0x80000001,
+
+ /* Timer notification messages. */
+ HVMSG_TIMER_EXPIRED = 0x80000010,
+
+ /* Error messages. */
+ HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
+ HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021,
+ HVMSG_UNSUPPORTED_FEATURE = 0x80000022,
+
+ /*
+ * Opaque intercept message. The original intercept message is only
+ * accessible from the mapped intercept message page.
+ */
+ HVMSG_OPAQUE_INTERCEPT = 0x8000003F,
+
+ /* Trace buffer complete messages. */
+ HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040,
+
+ /* Hypercall intercept */
+ HVMSG_HYPERCALL_INTERCEPT = 0x80000050,
+
+ /* SynIC intercepts */
+ HVMSG_SYNIC_EVENT_INTERCEPT = 0x80000060,
+ HVMSG_SYNIC_SINT_INTERCEPT = 0x80000061,
+ HVMSG_SYNIC_SINT_DELIVERABLE = 0x80000062,
+
+ /* Async call completion intercept */
+ HVMSG_ASYNC_CALL_COMPLETION = 0x80000070,
+
+ /* Root scheduler messages */
+ HVMSG_SCHEDULER_VP_SIGNAL_BITSET = 0x80000100,
+ HVMSG_SCHEDULER_VP_SIGNAL_PAIR = 0x80000101,
+
+ /* Platform-specific processor intercept messages. */
+ HVMSG_X64_IO_PORT_INTERCEPT = 0x80010000,
+ HVMSG_X64_MSR_INTERCEPT = 0x80010001,
+ HVMSG_X64_CPUID_INTERCEPT = 0x80010002,
+ HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003,
+ HVMSG_X64_APIC_EOI = 0x80010004,
+ HVMSG_X64_LEGACY_FP_ERROR = 0x80010005,
+ HVMSG_X64_IOMMU_PRQ = 0x80010006,
+ HVMSG_X64_HALT = 0x80010007,
+ HVMSG_X64_INTERRUPTION_DELIVERABLE = 0x80010008,
+ HVMSG_X64_SIPI_INTERCEPT = 0x80010009,
+};
+
+/* Define the format of the SIMP register */
+union hv_synic_simp {
+ u64 as_uint64;
+ struct {
+ u64 simp_enabled : 1;
+ u64 preserved : 11;
+ u64 base_simp_gpa : 52;
+ } __packed;
+};
+
+union hv_message_flags {
+ u8 asu8;
+ struct {
+ u8 msg_pending : 1;
+ u8 reserved : 7;
+ } __packed;
+};
+
+struct hv_message_header {
+ u32 message_type;
+ u8 payload_size;
+ union hv_message_flags message_flags;
+ u8 reserved[2];
+ union {
+ u64 sender;
+ union hv_port_id port;
+ };
+} __packed;
+
+/*
+ * Message format for notifications delivered via
+ * intercept message(as_intercept=1)
+ */
+struct hv_notification_message_payload {
+ u32 sint_index;
+} __packed;
+
+struct hv_message {
+ struct hv_message_header header;
+ union {
+ u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
+ } u;
+} __packed;
+
+/* Define the synthetic interrupt message page layout. */
+struct hv_message_page {
+ struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
+} __packed;
+
+/* Define timer message payload structure. */
+struct hv_timer_message_payload {
+ u32 timer_index;
+ u32 reserved;
+ u64 expiration_time; /* When the timer expired */
+ u64 delivery_time; /* When the message was delivered */
+} __packed;
+
+struct hv_x64_segment_register {
+ u64 base;
+ u32 limit;
+ u16 selector;
+ union {
+ struct {
+ u16 segment_type : 4;
+ u16 non_system_segment : 1;
+ u16 descriptor_privilege_level : 2;
+ u16 present : 1;
+ u16 reserved : 4;
+ u16 available : 1;
+ u16 _long : 1;
+ u16 _default : 1;
+ u16 granularity : 1;
+ } __packed;
+ u16 attributes;
+ };
+} __packed;
+
+struct hv_x64_table_register {
+ u16 pad[3];
+ u16 limit;
+ u64 base;
+} __packed;
+
+#define HV_NORMAL_VTL 0
+
+union hv_input_vtl {
+ u8 as_uint8;
+ struct {
+ u8 target_vtl : 4;
+ u8 use_target_vtl : 1;
+ u8 reserved_z : 3;
+ };
+} __packed;
+
+struct hv_init_vp_context {
+ u64 rip;
+ u64 rsp;
+ u64 rflags;
+
+ struct hv_x64_segment_register cs;
+ struct hv_x64_segment_register ds;
+ struct hv_x64_segment_register es;
+ struct hv_x64_segment_register fs;
+ struct hv_x64_segment_register gs;
+ struct hv_x64_segment_register ss;
+ struct hv_x64_segment_register tr;
+ struct hv_x64_segment_register ldtr;
+
+ struct hv_x64_table_register idtr;
+ struct hv_x64_table_register gdtr;
+
+ u64 efer;
+ u64 cr0;
+ u64 cr3;
+ u64 cr4;
+ u64 msr_cr_pat;
+} __packed;
+
+struct hv_enable_vp_vtl {
+ u64 partition_id;
+ u32 vp_index;
+ union hv_input_vtl target_vtl;
+ u8 mbz0;
+ u16 mbz1;
+ struct hv_init_vp_context vp_context;
+} __packed;
+
+struct hv_get_vp_from_apic_id_in {
+ u64 partition_id;
+ union hv_input_vtl target_vtl;
+ u8 res[7];
+ u32 apic_ids[];
+} __packed;
+
+union hv_register_vsm_partition_config {
+ u64 as_uint64;
+ struct {
+ u64 enable_vtl_protection : 1;
+ u64 default_vtl_protection_mask : 4;
+ u64 zero_memory_on_reset : 1;
+ u64 deny_lower_vtl_startup : 1;
+ u64 intercept_acceptance : 1;
+ u64 intercept_enable_vtl_protection : 1;
+ u64 intercept_vp_startup : 1;
+ u64 intercept_cpuid_unimplemented : 1;
+ u64 intercept_unrecoverable_exception : 1;
+ u64 intercept_page : 1;
+ u64 mbz : 51;
+ } __packed;
+};
+
+union hv_register_vsm_capabilities {
+ u64 as_uint64;
+ struct {
+ u64 dr6_shared: 1;
+ u64 mbec_vtl_mask: 16;
+ u64 deny_lower_vtl_startup: 1;
+ u64 supervisor_shadow_stack: 1;
+ u64 hardware_hvpt_available: 1;
+ u64 software_hvpt_available: 1;
+ u64 hardware_hvpt_range_bits: 6;
+ u64 intercept_page_available: 1;
+ u64 return_action_available: 1;
+ u64 reserved: 35;
+ } __packed;
+};
+
+union hv_register_vsm_page_offsets {
+ struct {
+ u64 vtl_call_offset : 12;
+ u64 vtl_return_offset : 12;
+ u64 reserved_mbz : 40;
+ } __packed;
+ u64 as_uint64;
+};
+
+struct hv_nested_enlightenments_control {
+ struct {
+ u32 directhypercall : 1;
+ u32 reserved : 31;
+ } __packed features;
+ struct {
+ u32 inter_partition_comm : 1;
+ u32 reserved : 31;
+ } __packed hypercall_controls;
+} __packed;
+
+/* Define virtual processor assist page structure. */
+struct hv_vp_assist_page {
+ u32 apic_assist;
+ u32 reserved1;
+ u32 vtl_entry_reason;
+ u32 vtl_reserved;
+ u64 vtl_ret_x64rax;
+ u64 vtl_ret_x64rcx;
+ struct hv_nested_enlightenments_control nested_control;
+ u8 enlighten_vmentry;
+ u8 reserved2[7];
+ u64 current_nested_vmcs;
+ u8 synthetic_time_unhalted_timer_expired;
+ u8 reserved3[7];
+ u8 virtualization_fault_information[40];
+ u8 reserved4[8];
+ u8 intercept_message[256];
+ u8 vtl_ret_actions[256];
+} __packed;
+
+enum hv_register_name {
+ /* Suspend Registers */
+ HV_REGISTER_EXPLICIT_SUSPEND = 0x00000000,
+ HV_REGISTER_INTERCEPT_SUSPEND = 0x00000001,
+ HV_REGISTER_DISPATCH_SUSPEND = 0x00000003,
+
+ /* Version - 128-bit result same as CPUID 0x40000002 */
+ HV_REGISTER_HYPERVISOR_VERSION = 0x00000100,
+
+ /* Feature Access (registers are 128 bits) - same as CPUID 0x40000003 - 0x4000000B */
+ HV_REGISTER_PRIVILEGES_AND_FEATURES_INFO = 0x00000200,
+ HV_REGISTER_FEATURES_INFO = 0x00000201,
+ HV_REGISTER_IMPLEMENTATION_LIMITS_INFO = 0x00000202,
+ HV_REGISTER_HARDWARE_FEATURES_INFO = 0x00000203,
+ HV_REGISTER_CPU_MANAGEMENT_FEATURES_INFO = 0x00000204,
+ HV_REGISTER_SVM_FEATURES_INFO = 0x00000205,
+ HV_REGISTER_SKIP_LEVEL_FEATURES_INFO = 0x00000206,
+ HV_REGISTER_NESTED_VIRT_FEATURES_INFO = 0x00000207,
+ HV_REGISTER_IPT_FEATURES_INFO = 0x00000208,
+
+ /* Guest Crash Registers */
+ HV_REGISTER_GUEST_CRASH_P0 = 0x00000210,
+ HV_REGISTER_GUEST_CRASH_P1 = 0x00000211,
+ HV_REGISTER_GUEST_CRASH_P2 = 0x00000212,
+ HV_REGISTER_GUEST_CRASH_P3 = 0x00000213,
+ HV_REGISTER_GUEST_CRASH_P4 = 0x00000214,
+ HV_REGISTER_GUEST_CRASH_CTL = 0x00000215,
+
+ /* Misc */
+ HV_REGISTER_VP_RUNTIME = 0x00090000,
+ HV_REGISTER_GUEST_OS_ID = 0x00090002,
+ HV_REGISTER_VP_INDEX = 0x00090003,
+ HV_REGISTER_TIME_REF_COUNT = 0x00090004,
+ HV_REGISTER_CPU_MANAGEMENT_VERSION = 0x00090007,
+ HV_REGISTER_VP_ASSIST_PAGE = 0x00090013,
+ HV_REGISTER_VP_ROOT_SIGNAL_COUNT = 0x00090014,
+ HV_REGISTER_REFERENCE_TSC = 0x00090017,
+
+ /* Hypervisor-defined Registers (Synic) */
+ HV_REGISTER_SINT0 = 0x000A0000,
+ HV_REGISTER_SINT1 = 0x000A0001,
+ HV_REGISTER_SINT2 = 0x000A0002,
+ HV_REGISTER_SINT3 = 0x000A0003,
+ HV_REGISTER_SINT4 = 0x000A0004,
+ HV_REGISTER_SINT5 = 0x000A0005,
+ HV_REGISTER_SINT6 = 0x000A0006,
+ HV_REGISTER_SINT7 = 0x000A0007,
+ HV_REGISTER_SINT8 = 0x000A0008,
+ HV_REGISTER_SINT9 = 0x000A0009,
+ HV_REGISTER_SINT10 = 0x000A000A,
+ HV_REGISTER_SINT11 = 0x000A000B,
+ HV_REGISTER_SINT12 = 0x000A000C,
+ HV_REGISTER_SINT13 = 0x000A000D,
+ HV_REGISTER_SINT14 = 0x000A000E,
+ HV_REGISTER_SINT15 = 0x000A000F,
+ HV_REGISTER_SCONTROL = 0x000A0010,
+ HV_REGISTER_SVERSION = 0x000A0011,
+ HV_REGISTER_SIEFP = 0x000A0012,
+ HV_REGISTER_SIMP = 0x000A0013,
+ HV_REGISTER_EOM = 0x000A0014,
+ HV_REGISTER_SIRBP = 0x000A0015,
+
+ HV_REGISTER_NESTED_SINT0 = 0x000A1000,
+ HV_REGISTER_NESTED_SINT1 = 0x000A1001,
+ HV_REGISTER_NESTED_SINT2 = 0x000A1002,
+ HV_REGISTER_NESTED_SINT3 = 0x000A1003,
+ HV_REGISTER_NESTED_SINT4 = 0x000A1004,
+ HV_REGISTER_NESTED_SINT5 = 0x000A1005,
+ HV_REGISTER_NESTED_SINT6 = 0x000A1006,
+ HV_REGISTER_NESTED_SINT7 = 0x000A1007,
+ HV_REGISTER_NESTED_SINT8 = 0x000A1008,
+ HV_REGISTER_NESTED_SINT9 = 0x000A1009,
+ HV_REGISTER_NESTED_SINT10 = 0x000A100A,
+ HV_REGISTER_NESTED_SINT11 = 0x000A100B,
+ HV_REGISTER_NESTED_SINT12 = 0x000A100C,
+ HV_REGISTER_NESTED_SINT13 = 0x000A100D,
+ HV_REGISTER_NESTED_SINT14 = 0x000A100E,
+ HV_REGISTER_NESTED_SINT15 = 0x000A100F,
+ HV_REGISTER_NESTED_SCONTROL = 0x000A1010,
+ HV_REGISTER_NESTED_SVERSION = 0x000A1011,
+ HV_REGISTER_NESTED_SIFP = 0x000A1012,
+ HV_REGISTER_NESTED_SIPP = 0x000A1013,
+ HV_REGISTER_NESTED_EOM = 0x000A1014,
+ HV_REGISTER_NESTED_SIRBP = 0x000a1015,
+
+ /* Hypervisor-defined Registers (Synthetic Timers) */
+ HV_REGISTER_STIMER0_CONFIG = 0x000B0000,
+ HV_REGISTER_STIMER0_COUNT = 0x000B0001,
+
+ /* VSM */
+ HV_REGISTER_VSM_VP_STATUS = 0x000D0003,
+
+ /* Synthetic VSM registers */
+ HV_REGISTER_VSM_CODE_PAGE_OFFSETS = 0x000D0002,
+ HV_REGISTER_VSM_CAPABILITIES = 0x000D0006,
+ HV_REGISTER_VSM_PARTITION_CONFIG = 0x000D0007,
+
+#if defined(CONFIG_X86)
+ /* X64 Debug Registers */
+ HV_X64_REGISTER_DR0 = 0x00050000,
+ HV_X64_REGISTER_DR1 = 0x00050001,
+ HV_X64_REGISTER_DR2 = 0x00050002,
+ HV_X64_REGISTER_DR3 = 0x00050003,
+ HV_X64_REGISTER_DR6 = 0x00050004,
+ HV_X64_REGISTER_DR7 = 0x00050005,
+
+ /* X64 Cache control MSRs */
+ HV_X64_REGISTER_MSR_MTRR_CAP = 0x0008000D,
+ HV_X64_REGISTER_MSR_MTRR_DEF_TYPE = 0x0008000E,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE0 = 0x00080010,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE1 = 0x00080011,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE2 = 0x00080012,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE3 = 0x00080013,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE4 = 0x00080014,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE5 = 0x00080015,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE6 = 0x00080016,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE7 = 0x00080017,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE8 = 0x00080018,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE9 = 0x00080019,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASEA = 0x0008001A,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASEB = 0x0008001B,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASEC = 0x0008001C,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASED = 0x0008001D,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASEE = 0x0008001E,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASEF = 0x0008001F,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK0 = 0x00080040,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK1 = 0x00080041,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK2 = 0x00080042,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK3 = 0x00080043,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK4 = 0x00080044,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK5 = 0x00080045,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK6 = 0x00080046,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK7 = 0x00080047,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK8 = 0x00080048,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK9 = 0x00080049,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKA = 0x0008004A,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKB = 0x0008004B,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKC = 0x0008004C,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKD = 0x0008004D,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKE = 0x0008004E,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKF = 0x0008004F,
+ HV_X64_REGISTER_MSR_MTRR_FIX64K00000 = 0x00080070,
+ HV_X64_REGISTER_MSR_MTRR_FIX16K80000 = 0x00080071,
+ HV_X64_REGISTER_MSR_MTRR_FIX16KA0000 = 0x00080072,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KC0000 = 0x00080073,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KC8000 = 0x00080074,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KD0000 = 0x00080075,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KD8000 = 0x00080076,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KE0000 = 0x00080077,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KE8000 = 0x00080078,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KF0000 = 0x00080079,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KF8000 = 0x0008007A,
+
+ HV_X64_REGISTER_REG_PAGE = 0x0009001C,
+#endif
+};
+
+/*
+ * Arch compatibility regs for use with hv_set/get_register
+ */
+#if defined(CONFIG_X86)
+
+/*
+ * To support arch-generic code calling hv_set/get_register:
+ * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrq/wrmsrq
+ * - On ARM, HV_MSR_ indicates a VP register accessed via hypercall
+ */
+#define HV_MSR_CRASH_P0 (HV_X64_MSR_CRASH_P0)
+#define HV_MSR_CRASH_P1 (HV_X64_MSR_CRASH_P1)
+#define HV_MSR_CRASH_P2 (HV_X64_MSR_CRASH_P2)
+#define HV_MSR_CRASH_P3 (HV_X64_MSR_CRASH_P3)
+#define HV_MSR_CRASH_P4 (HV_X64_MSR_CRASH_P4)
+#define HV_MSR_CRASH_CTL (HV_X64_MSR_CRASH_CTL)
+
+#define HV_MSR_VP_INDEX (HV_X64_MSR_VP_INDEX)
+#define HV_MSR_TIME_REF_COUNT (HV_X64_MSR_TIME_REF_COUNT)
+#define HV_MSR_REFERENCE_TSC (HV_X64_MSR_REFERENCE_TSC)
+
+#define HV_MSR_SINT0 (HV_X64_MSR_SINT0)
+#define HV_MSR_SVERSION (HV_X64_MSR_SVERSION)
+#define HV_MSR_SCONTROL (HV_X64_MSR_SCONTROL)
+#define HV_MSR_SIEFP (HV_X64_MSR_SIEFP)
+#define HV_MSR_SIMP (HV_X64_MSR_SIMP)
+#define HV_MSR_EOM (HV_X64_MSR_EOM)
+#define HV_MSR_SIRBP (HV_X64_MSR_SIRBP)
+
+#define HV_MSR_NESTED_SCONTROL (HV_X64_MSR_NESTED_SCONTROL)
+#define HV_MSR_NESTED_SVERSION (HV_X64_MSR_NESTED_SVERSION)
+#define HV_MSR_NESTED_SIEFP (HV_X64_MSR_NESTED_SIEFP)
+#define HV_MSR_NESTED_SIMP (HV_X64_MSR_NESTED_SIMP)
+#define HV_MSR_NESTED_EOM (HV_X64_MSR_NESTED_EOM)
+#define HV_MSR_NESTED_SINT0 (HV_X64_MSR_NESTED_SINT0)
+
+#define HV_MSR_STIMER0_CONFIG (HV_X64_MSR_STIMER0_CONFIG)
+#define HV_MSR_STIMER0_COUNT (HV_X64_MSR_STIMER0_COUNT)
+
+#elif defined(CONFIG_ARM64) /* CONFIG_X86 */
+
+#define HV_MSR_CRASH_P0 (HV_REGISTER_GUEST_CRASH_P0)
+#define HV_MSR_CRASH_P1 (HV_REGISTER_GUEST_CRASH_P1)
+#define HV_MSR_CRASH_P2 (HV_REGISTER_GUEST_CRASH_P2)
+#define HV_MSR_CRASH_P3 (HV_REGISTER_GUEST_CRASH_P3)
+#define HV_MSR_CRASH_P4 (HV_REGISTER_GUEST_CRASH_P4)
+#define HV_MSR_CRASH_CTL (HV_REGISTER_GUEST_CRASH_CTL)
+
+#define HV_MSR_VP_INDEX (HV_REGISTER_VP_INDEX)
+#define HV_MSR_TIME_REF_COUNT (HV_REGISTER_TIME_REF_COUNT)
+#define HV_MSR_REFERENCE_TSC (HV_REGISTER_REFERENCE_TSC)
+
+#define HV_MSR_SINT0 (HV_REGISTER_SINT0)
+#define HV_MSR_SCONTROL (HV_REGISTER_SCONTROL)
+#define HV_MSR_SIEFP (HV_REGISTER_SIEFP)
+#define HV_MSR_SIMP (HV_REGISTER_SIMP)
+#define HV_MSR_EOM (HV_REGISTER_EOM)
+#define HV_MSR_SIRBP (HV_REGISTER_SIRBP)
+
+#define HV_MSR_STIMER0_CONFIG (HV_REGISTER_STIMER0_CONFIG)
+#define HV_MSR_STIMER0_COUNT (HV_REGISTER_STIMER0_COUNT)
+
+#endif /* CONFIG_ARM64 */
+
+union hv_explicit_suspend_register {
+ u64 as_uint64;
+ struct {
+ u64 suspended : 1;
+ u64 reserved : 63;
+ } __packed;
+};
+
+union hv_intercept_suspend_register {
+ u64 as_uint64;
+ struct {
+ u64 suspended : 1;
+ u64 reserved : 63;
+ } __packed;
+};
+
+union hv_dispatch_suspend_register {
+ u64 as_uint64;
+ struct {
+ u64 suspended : 1;
+ u64 reserved : 63;
+ } __packed;
+};
+
+union hv_arm64_pending_interruption_register {
+ u64 as_uint64;
+ struct {
+ u64 interruption_pending : 1;
+ u64 interruption_type: 1;
+ u64 reserved : 30;
+ u64 error_code : 32;
+ } __packed;
+};
+
+union hv_arm64_interrupt_state_register {
+ u64 as_uint64;
+ struct {
+ u64 interrupt_shadow : 1;
+ u64 reserved : 63;
+ } __packed;
+};
+
+union hv_arm64_pending_synthetic_exception_event {
+ u64 as_uint64[2];
+ struct {
+ u8 event_pending : 1;
+ u8 event_type : 3;
+ u8 reserved : 4;
+ u8 rsvd[3];
+ u32 exception_type;
+ u64 context;
+ } __packed;
+};
+
+union hv_x64_interrupt_state_register {
+ u64 as_uint64;
+ struct {
+ u64 interrupt_shadow : 1;
+ u64 nmi_masked : 1;
+ u64 reserved : 62;
+ } __packed;
+};
+
+union hv_x64_pending_interruption_register {
+ u64 as_uint64;
+ struct {
+ u32 interruption_pending : 1;
+ u32 interruption_type : 3;
+ u32 deliver_error_code : 1;
+ u32 instruction_length : 4;
+ u32 nested_event : 1;
+ u32 reserved : 6;
+ u32 interruption_vector : 16;
+ u32 error_code;
+ } __packed;
+};
+
+union hv_register_value {
+ struct hv_u128 reg128;
+ u64 reg64;
+ u32 reg32;
+ u16 reg16;
+ u8 reg8;
+
+ struct hv_x64_segment_register segment;
+ struct hv_x64_table_register table;
+ union hv_explicit_suspend_register explicit_suspend;
+ union hv_intercept_suspend_register intercept_suspend;
+ union hv_dispatch_suspend_register dispatch_suspend;
+#ifdef CONFIG_ARM64
+ union hv_arm64_interrupt_state_register interrupt_state;
+ union hv_arm64_pending_interruption_register pending_interruption;
+#endif
+#ifdef CONFIG_X86
+ union hv_x64_interrupt_state_register interrupt_state;
+ union hv_x64_pending_interruption_register pending_interruption;
+#endif
+ union hv_arm64_pending_synthetic_exception_event pending_synthetic_exception_event;
+};
+
+/* NOTE: Linux helper struct - NOT from Hyper-V code. */
+struct hv_output_get_vp_registers {
+ DECLARE_FLEX_ARRAY(union hv_register_value, values);
+};
+
+#if defined(CONFIG_ARM64)
+/* HvGetVpRegisters returns an array of these output elements */
+struct hv_get_vp_registers_output {
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ u32 c;
+ u32 d;
+ } as32 __packed;
+ struct {
+ u64 low;
+ u64 high;
+ } as64 __packed;
+ };
+};
+
+#endif /* CONFIG_ARM64 */
+
+struct hv_register_assoc {
+ u32 name; /* enum hv_register_name */
+ u32 reserved1;
+ u64 reserved2;
+ union hv_register_value value;
+} __packed;
+
+struct hv_input_get_vp_registers {
+ u64 partition_id;
+ u32 vp_index;
+ union hv_input_vtl input_vtl;
+ u8 rsvd_z8;
+ u16 rsvd_z16;
+ u32 names[];
+} __packed;
+
+struct hv_input_set_vp_registers {
+ u64 partition_id;
+ u32 vp_index;
+ union hv_input_vtl input_vtl;
+ u8 rsvd_z8;
+ u16 rsvd_z16;
+ struct hv_register_assoc elements[];
+} __packed;
+
+#define HV_UNMAP_GPA_LARGE_PAGE 0x2
+
+/* HvCallSendSyntheticClusterIpi hypercall */
+struct hv_send_ipi { /* HV_INPUT_SEND_SYNTHETIC_CLUSTER_IPI */
+ u32 vector;
+ u32 reserved;
+ u64 cpu_mask;
+} __packed;
+
+#define HV_VTL_MASK GENMASK(3, 0)
+
+/* Hyper-V memory host visibility */
+enum hv_mem_host_visibility {
+ VMBUS_PAGE_NOT_VISIBLE = 0,
+ VMBUS_PAGE_VISIBLE_READ_ONLY = 1,
+ VMBUS_PAGE_VISIBLE_READ_WRITE = 3
+};
+
+/* HvCallModifySparseGpaPageHostVisibility hypercall */
+#define HV_MAX_MODIFY_GPA_REP_COUNT ((HV_HYP_PAGE_SIZE / sizeof(u64)) - 2)
+struct hv_gpa_range_for_visibility {
+ u64 partition_id;
+ u32 host_visibility : 2;
+ u32 reserved0 : 30;
+ u32 reserved1;
+ u64 gpa_page_list[HV_MAX_MODIFY_GPA_REP_COUNT];
+} __packed;
+
+#if defined(CONFIG_X86)
+union hv_msi_address_register { /* HV_MSI_ADDRESS */
+ u32 as_uint32;
+ struct {
+ u32 reserved1 : 2;
+ u32 destination_mode : 1;
+ u32 redirection_hint : 1;
+ u32 reserved2 : 8;
+ u32 destination_id : 8;
+ u32 msi_base : 12;
+ };
+} __packed;
+
+union hv_msi_data_register { /* HV_MSI_ENTRY.Data */
+ u32 as_uint32;
+ struct {
+ u32 vector : 8;
+ u32 delivery_mode : 3;
+ u32 reserved1 : 3;
+ u32 level_assert : 1;
+ u32 trigger_mode : 1;
+ u32 reserved2 : 16;
+ };
+} __packed;
+
+union hv_msi_entry { /* HV_MSI_ENTRY */
+
+ u64 as_uint64;
+ struct {
+ union hv_msi_address_register address;
+ union hv_msi_data_register data;
+ } __packed;
+};
+
+#elif defined(CONFIG_ARM64) /* CONFIG_X86 */
+
+union hv_msi_entry {
+ u64 as_uint64[2];
+ struct {
+ u64 address;
+ u32 data;
+ u32 reserved;
+ } __packed;
+};
+#endif /* CONFIG_ARM64 */
+
+union hv_ioapic_rte {
+ u64 as_uint64;
+
+ struct {
+ u32 vector : 8;
+ u32 delivery_mode : 3;
+ u32 destination_mode : 1;
+ u32 delivery_status : 1;
+ u32 interrupt_polarity : 1;
+ u32 remote_irr : 1;
+ u32 trigger_mode : 1;
+ u32 interrupt_mask : 1;
+ u32 reserved1 : 15;
+
+ u32 reserved2 : 24;
+ u32 destination_id : 8;
+ };
+
+ struct {
+ u32 low_uint32;
+ u32 high_uint32;
+ };
+} __packed;
+
+enum hv_interrupt_source { /* HV_INTERRUPT_SOURCE */
+ HV_INTERRUPT_SOURCE_MSI = 1, /* MSI and MSI-X */
+ HV_INTERRUPT_SOURCE_IOAPIC,
+};
+
+struct hv_interrupt_entry { /* HV_INTERRUPT_ENTRY */
+ u32 source;
+ u32 reserved1;
+ union {
+ union hv_msi_entry msi_entry;
+ union hv_ioapic_rte ioapic_rte;
+ };
+} __packed;
+
+#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
+#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
+
+struct hv_device_interrupt_target { /* HV_DEVICE_INTERRUPT_TARGET */
+ u32 vector;
+ u32 flags; /* HV_DEVICE_INTERRUPT_TARGET_* above */
+ union {
+ u64 vp_mask;
+ struct hv_vpset vp_set;
+ };
+} __packed;
+
+struct hv_retarget_device_interrupt { /* HV_INPUT_RETARGET_DEVICE_INTERRUPT */
+ u64 partition_id; /* use "self" */
+ u64 device_id;
+ struct hv_interrupt_entry int_entry;
+ u64 reserved2;
+ struct hv_device_interrupt_target int_target;
+} __packed __aligned(8);
+
+enum hv_intercept_type {
+#if defined(CONFIG_X86)
+ HV_INTERCEPT_TYPE_X64_IO_PORT = 0x00000000,
+ HV_INTERCEPT_TYPE_X64_MSR = 0x00000001,
+ HV_INTERCEPT_TYPE_X64_CPUID = 0x00000002,
+#endif
+ HV_INTERCEPT_TYPE_EXCEPTION = 0x00000003,
+ /* Used to be HV_INTERCEPT_TYPE_REGISTER */
+ HV_INTERCEPT_TYPE_RESERVED0 = 0x00000004,
+ HV_INTERCEPT_TYPE_MMIO = 0x00000005,
+#if defined(CONFIG_X86)
+ HV_INTERCEPT_TYPE_X64_GLOBAL_CPUID = 0x00000006,
+ HV_INTERCEPT_TYPE_X64_APIC_SMI = 0x00000007,
+#endif
+ HV_INTERCEPT_TYPE_HYPERCALL = 0x00000008,
+#if defined(CONFIG_X86)
+ HV_INTERCEPT_TYPE_X64_APIC_INIT_SIPI = 0x00000009,
+ HV_INTERCEPT_MC_UPDATE_PATCH_LEVEL_MSR_READ = 0x0000000A,
+ HV_INTERCEPT_TYPE_X64_APIC_WRITE = 0x0000000B,
+ HV_INTERCEPT_TYPE_X64_MSR_INDEX = 0x0000000C,
+#endif
+ HV_INTERCEPT_TYPE_MAX,
+ HV_INTERCEPT_TYPE_INVALID = 0xFFFFFFFF,
+};
+
+union hv_intercept_parameters {
+ /* HV_INTERCEPT_PARAMETERS is defined to be an 8-byte field. */
+ u64 as_uint64;
+#if defined(CONFIG_X86)
+ /* HV_INTERCEPT_TYPE_X64_IO_PORT */
+ u16 io_port;
+ /* HV_INTERCEPT_TYPE_X64_CPUID */
+ u32 cpuid_index;
+ /* HV_INTERCEPT_TYPE_X64_APIC_WRITE */
+ u32 apic_write_mask;
+ /* HV_INTERCEPT_TYPE_EXCEPTION */
+ u16 exception_vector;
+ /* HV_INTERCEPT_TYPE_X64_MSR_INDEX */
+ u32 msr_index;
+#endif
+ /* N.B. Other intercept types do not have any parameters. */
+};
+
+/* Data structures for HVCALL_MMIO_READ and HVCALL_MMIO_WRITE */
+#define HV_HYPERCALL_MMIO_MAX_DATA_LENGTH 64
+
+struct hv_mmio_read_input { /* HV_INPUT_MEMORY_MAPPED_IO_READ */
+ u64 gpa;
+ u32 size;
+ u32 reserved;
+} __packed;
+
+struct hv_mmio_read_output {
+ u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
+} __packed;
+
+struct hv_mmio_write_input {
+ u64 gpa;
+ u32 size;
+ u32 reserved;
+ u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
+} __packed;
+
+#endif /* _HV_HVGDK_MINI_H */
diff --git a/include/hyperv/hvhdk.h b/include/hyperv/hvhdk.h
new file mode 100644
index 000000000000..469186df7826
--- /dev/null
+++ b/include/hyperv/hvhdk.h
@@ -0,0 +1,899 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Type definitions for the Microsoft hypervisor.
+ */
+#ifndef _HV_HVHDK_H
+#define _HV_HVHDK_H
+
+#include <linux/build_bug.h>
+
+#include "hvhdk_mini.h"
+#include "hvgdk.h"
+
+/* Bits for dirty mask of hv_vp_register_page */
+#define HV_X64_REGISTER_CLASS_GENERAL 0
+#define HV_X64_REGISTER_CLASS_IP 1
+#define HV_X64_REGISTER_CLASS_XMM 2
+#define HV_X64_REGISTER_CLASS_SEGMENT 3
+#define HV_X64_REGISTER_CLASS_FLAGS 4
+
+#define HV_VP_REGISTER_PAGE_VERSION_1 1u
+
+#define HV_VP_REGISTER_PAGE_MAX_VECTOR_COUNT 7
+
+union hv_vp_register_page_interrupt_vectors {
+ u64 as_uint64;
+ struct {
+ u8 vector_count;
+ u8 vector[HV_VP_REGISTER_PAGE_MAX_VECTOR_COUNT];
+ } __packed;
+};
+
+struct hv_vp_register_page {
+ u16 version;
+ u8 isvalid;
+ u8 rsvdz;
+ u32 dirty;
+
+#if IS_ENABLED(CONFIG_X86)
+
+ union {
+ struct {
+ /* General purpose registers
+ * (HV_X64_REGISTER_CLASS_GENERAL)
+ */
+ union {
+ struct {
+ u64 rax;
+ u64 rcx;
+ u64 rdx;
+ u64 rbx;
+ u64 rsp;
+ u64 rbp;
+ u64 rsi;
+ u64 rdi;
+ u64 r8;
+ u64 r9;
+ u64 r10;
+ u64 r11;
+ u64 r12;
+ u64 r13;
+ u64 r14;
+ u64 r15;
+ } __packed;
+
+ u64 gp_registers[16];
+ };
+ /* Instruction pointer (HV_X64_REGISTER_CLASS_IP) */
+ u64 rip;
+ /* Flags (HV_X64_REGISTER_CLASS_FLAGS) */
+ u64 rflags;
+ } __packed;
+
+ u64 registers[18];
+ };
+ /* Volatile XMM registers (HV_X64_REGISTER_CLASS_XMM) */
+ union {
+ struct {
+ struct hv_u128 xmm0;
+ struct hv_u128 xmm1;
+ struct hv_u128 xmm2;
+ struct hv_u128 xmm3;
+ struct hv_u128 xmm4;
+ struct hv_u128 xmm5;
+ } __packed;
+
+ struct hv_u128 xmm_registers[6];
+ };
+ /* Segment registers (HV_X64_REGISTER_CLASS_SEGMENT) */
+ union {
+ struct {
+ struct hv_x64_segment_register es;
+ struct hv_x64_segment_register cs;
+ struct hv_x64_segment_register ss;
+ struct hv_x64_segment_register ds;
+ struct hv_x64_segment_register fs;
+ struct hv_x64_segment_register gs;
+ } __packed;
+
+ struct hv_x64_segment_register segment_registers[6];
+ };
+ /* Misc. control registers (cannot be set via this interface) */
+ u64 cr0;
+ u64 cr3;
+ u64 cr4;
+ u64 cr8;
+ u64 efer;
+ u64 dr7;
+ union hv_x64_pending_interruption_register pending_interruption;
+ union hv_x64_interrupt_state_register interrupt_state;
+ u64 instruction_emulation_hints;
+ u64 xfem;
+
+ /*
+ * Fields from this point are not included in the register page save chunk.
+ * The reserved field is intended to maintain alignment for unsaved fields.
+ */
+ u8 reserved1[0x100];
+
+ /*
+ * Interrupts injected as part of HvCallDispatchVp.
+ */
+ union hv_vp_register_page_interrupt_vectors interrupt_vectors;
+
+#elif IS_ENABLED(CONFIG_ARM64)
+ /* Not yet supported in ARM */
+#endif
+} __packed;
+
+#define HV_PARTITION_PROCESSOR_FEATURES_BANKS 2
+
+union hv_partition_processor_features {
+ u64 as_uint64[HV_PARTITION_PROCESSOR_FEATURES_BANKS];
+ struct {
+ u64 sse3_support : 1;
+ u64 lahf_sahf_support : 1;
+ u64 ssse3_support : 1;
+ u64 sse4_1_support : 1;
+ u64 sse4_2_support : 1;
+ u64 sse4a_support : 1;
+ u64 xop_support : 1;
+ u64 pop_cnt_support : 1;
+ u64 cmpxchg16b_support : 1;
+ u64 altmovcr8_support : 1;
+ u64 lzcnt_support : 1;
+ u64 mis_align_sse_support : 1;
+ u64 mmx_ext_support : 1;
+ u64 amd3dnow_support : 1;
+ u64 extended_amd3dnow_support : 1;
+ u64 page_1gb_support : 1;
+ u64 aes_support : 1;
+ u64 pclmulqdq_support : 1;
+ u64 pcid_support : 1;
+ u64 fma4_support : 1;
+ u64 f16c_support : 1;
+ u64 rd_rand_support : 1;
+ u64 rd_wr_fs_gs_support : 1;
+ u64 smep_support : 1;
+ u64 enhanced_fast_string_support : 1;
+ u64 bmi1_support : 1;
+ u64 bmi2_support : 1;
+ u64 hle_support_deprecated : 1;
+ u64 rtm_support_deprecated : 1;
+ u64 movbe_support : 1;
+ u64 npiep1_support : 1;
+ u64 dep_x87_fpu_save_support : 1;
+ u64 rd_seed_support : 1;
+ u64 adx_support : 1;
+ u64 intel_prefetch_support : 1;
+ u64 smap_support : 1;
+ u64 hle_support : 1;
+ u64 rtm_support : 1;
+ u64 rdtscp_support : 1;
+ u64 clflushopt_support : 1;
+ u64 clwb_support : 1;
+ u64 sha_support : 1;
+ u64 x87_pointers_saved_support : 1;
+ u64 invpcid_support : 1;
+ u64 ibrs_support : 1;
+ u64 stibp_support : 1;
+ u64 ibpb_support: 1;
+ u64 unrestricted_guest_support : 1;
+ u64 mdd_support : 1;
+ u64 fast_short_rep_mov_support : 1;
+ u64 l1dcache_flush_support : 1;
+ u64 rdcl_no_support : 1;
+ u64 ibrs_all_support : 1;
+ u64 skip_l1df_support : 1;
+ u64 ssb_no_support : 1;
+ u64 rsb_a_no_support : 1;
+ u64 virt_spec_ctrl_support : 1;
+ u64 rd_pid_support : 1;
+ u64 umip_support : 1;
+ u64 mbs_no_support : 1;
+ u64 mb_clear_support : 1;
+ u64 taa_no_support : 1;
+ u64 tsx_ctrl_support : 1;
+ /*
+ * N.B. The final processor feature bit in bank 0 is reserved to
+ * simplify potential downlevel backports.
+ */
+ u64 reserved_bank0 : 1;
+
+ /* N.B. Begin bank 1 processor features. */
+ u64 acount_mcount_support : 1;
+ u64 tsc_invariant_support : 1;
+ u64 cl_zero_support : 1;
+ u64 rdpru_support : 1;
+ u64 la57_support : 1;
+ u64 mbec_support : 1;
+ u64 nested_virt_support : 1;
+ u64 psfd_support : 1;
+ u64 cet_ss_support : 1;
+ u64 cet_ibt_support : 1;
+ u64 vmx_exception_inject_support : 1;
+ u64 enqcmd_support : 1;
+ u64 umwait_tpause_support : 1;
+ u64 movdiri_support : 1;
+ u64 movdir64b_support : 1;
+ u64 cldemote_support : 1;
+ u64 serialize_support : 1;
+ u64 tsc_deadline_tmr_support : 1;
+ u64 tsc_adjust_support : 1;
+ u64 fzlrep_movsb : 1;
+ u64 fsrep_stosb : 1;
+ u64 fsrep_cmpsb : 1;
+ u64 reserved_bank1 : 42;
+ } __packed;
+};
+
+union hv_partition_processor_xsave_features {
+ struct {
+ u64 xsave_support : 1;
+ u64 xsaveopt_support : 1;
+ u64 avx_support : 1;
+ u64 reserved1 : 61;
+ } __packed;
+ u64 as_uint64;
+};
+
+struct hv_partition_creation_properties {
+ union hv_partition_processor_features disabled_processor_features;
+ union hv_partition_processor_xsave_features
+ disabled_processor_xsave_features;
+} __packed;
+
+#define HV_PARTITION_SYNTHETIC_PROCESSOR_FEATURES_BANKS 1
+
+union hv_partition_synthetic_processor_features {
+ u64 as_uint64[HV_PARTITION_SYNTHETIC_PROCESSOR_FEATURES_BANKS];
+
+ struct {
+ u64 hypervisor_present : 1;
+ /* Support for HV#1: (CPUID leaves 0x40000000 - 0x40000006)*/
+ u64 hv1 : 1;
+ u64 access_vp_run_time_reg : 1; /* HV_X64_MSR_VP_RUNTIME */
+ u64 access_partition_reference_counter : 1; /* HV_X64_MSR_TIME_REF_COUNT */
+ u64 access_synic_regs : 1; /* SINT-related registers */
+ /*
+ * Access to HV_X64_MSR_STIMER0_CONFIG through
+ * HV_X64_MSR_STIMER3_COUNT.
+ */
+ u64 access_synthetic_timer_regs : 1;
+ u64 access_intr_ctrl_regs : 1; /* APIC MSRs and VP assist page*/
+ /* HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL */
+ u64 access_hypercall_regs : 1;
+ u64 access_vp_index : 1;
+ u64 access_partition_reference_tsc : 1;
+ u64 access_guest_idle_reg : 1;
+ u64 access_frequency_regs : 1;
+ u64 reserved_z12 : 1;
+ u64 reserved_z13 : 1;
+ u64 reserved_z14 : 1;
+ u64 enable_extended_gva_ranges_for_flush_virtual_address_list : 1;
+ u64 reserved_z16 : 1;
+ u64 reserved_z17 : 1;
+ /* Use fast hypercall output. Corresponds to privilege. */
+ u64 fast_hypercall_output : 1;
+ u64 reserved_z19 : 1;
+ u64 start_virtual_processor : 1; /* Can start VPs */
+ u64 reserved_z21 : 1;
+ /* Synthetic timers in direct mode. */
+ u64 direct_synthetic_timers : 1;
+ u64 reserved_z23 : 1;
+ u64 extended_processor_masks : 1;
+
+ /* Enable various hypercalls */
+ u64 tb_flush_hypercalls : 1;
+ u64 synthetic_cluster_ipi : 1;
+ u64 notify_long_spin_wait : 1;
+ u64 query_numa_distance : 1;
+ u64 signal_events : 1;
+ u64 retarget_device_interrupt : 1;
+ u64 restore_time : 1;
+
+ /* EnlightenedVmcs nested enlightenment is supported. */
+ u64 enlightened_vmcs : 1;
+ u64 reserved : 31;
+ } __packed;
+};
+
+#define HV_MAKE_COMPATIBILITY_VERSION(major_, minor_) \
+ ((u32)((major_) << 8 | (minor_)))
+
+#define HV_COMPATIBILITY_21_H2 HV_MAKE_COMPATIBILITY_VERSION(0X6, 0X9)
+
+union hv_partition_isolation_properties {
+ u64 as_uint64;
+ struct {
+ u64 isolation_type: 5;
+ u64 isolation_host_type : 2;
+ u64 rsvd_z: 5;
+ u64 shared_gpa_boundary_page_number: 52;
+ } __packed;
+};
+
+/*
+ * Various isolation types supported by MSHV.
+ */
+#define HV_PARTITION_ISOLATION_TYPE_NONE 0
+#define HV_PARTITION_ISOLATION_TYPE_SNP 2
+#define HV_PARTITION_ISOLATION_TYPE_TDX 3
+
+/*
+ * Various host isolation types supported by MSHV.
+ */
+#define HV_PARTITION_ISOLATION_HOST_TYPE_NONE 0x0
+#define HV_PARTITION_ISOLATION_HOST_TYPE_HARDWARE 0x1
+#define HV_PARTITION_ISOLATION_HOST_TYPE_RESERVED 0x2
+
+/* Note: Exo partition is enabled by default */
+#define HV_PARTITION_CREATION_FLAG_GPA_SUPER_PAGES_ENABLED BIT(4)
+#define HV_PARTITION_CREATION_FLAG_EXO_PARTITION BIT(8)
+#define HV_PARTITION_CREATION_FLAG_LAPIC_ENABLED BIT(13)
+#define HV_PARTITION_CREATION_FLAG_INTERCEPT_MESSAGE_PAGE_ENABLED BIT(19)
+#define HV_PARTITION_CREATION_FLAG_X2APIC_CAPABLE BIT(22)
+
+struct hv_input_create_partition {
+ u64 flags;
+ struct hv_proximity_domain_info proximity_domain_info;
+ u32 compatibility_version;
+ u32 padding;
+ struct hv_partition_creation_properties partition_creation_properties;
+ union hv_partition_isolation_properties isolation_properties;
+} __packed;
+
+struct hv_output_create_partition {
+ u64 partition_id;
+} __packed;
+
+struct hv_input_initialize_partition {
+ u64 partition_id;
+} __packed;
+
+struct hv_input_finalize_partition {
+ u64 partition_id;
+} __packed;
+
+struct hv_input_delete_partition {
+ u64 partition_id;
+} __packed;
+
+struct hv_input_get_partition_property {
+ u64 partition_id;
+ u32 property_code; /* enum hv_partition_property_code */
+ u32 padding;
+} __packed;
+
+struct hv_output_get_partition_property {
+ u64 property_value;
+} __packed;
+
+struct hv_input_set_partition_property {
+ u64 partition_id;
+ u32 property_code; /* enum hv_partition_property_code */
+ u32 padding;
+ u64 property_value;
+} __packed;
+
+union hv_partition_property_arg {
+ u64 as_uint64;
+ struct {
+ union {
+ u32 arg;
+ u32 vp_index;
+ };
+ u16 reserved0;
+ u8 reserved1;
+ u8 object_type;
+ } __packed;
+};
+
+struct hv_input_get_partition_property_ex {
+ u64 partition_id;
+ u32 property_code; /* enum hv_partition_property_code */
+ u32 padding;
+ union {
+ union hv_partition_property_arg arg_data;
+ u64 arg;
+ };
+} __packed;
+
+/*
+ * NOTE: Should use hv_input_set_partition_property_ex_header to compute this
+ * size, but hv_input_get_partition_property_ex is identical so it suffices
+ */
+#define HV_PARTITION_PROPERTY_EX_MAX_VAR_SIZE \
+ (HV_HYP_PAGE_SIZE - sizeof(struct hv_input_get_partition_property_ex))
+
+union hv_partition_property_ex {
+ u8 buffer[HV_PARTITION_PROPERTY_EX_MAX_VAR_SIZE];
+ struct hv_partition_property_vmm_capabilities vmm_capabilities;
+ /* More fields to be filled in when needed */
+};
+
+struct hv_output_get_partition_property_ex {
+ union hv_partition_property_ex property_value;
+} __packed;
+
+enum hv_vp_state_page_type {
+ HV_VP_STATE_PAGE_REGISTERS = 0,
+ HV_VP_STATE_PAGE_INTERCEPT_MESSAGE = 1,
+ HV_VP_STATE_PAGE_GHCB = 2,
+ HV_VP_STATE_PAGE_COUNT
+};
+
+struct hv_input_map_vp_state_page {
+ u64 partition_id;
+ u32 vp_index;
+ u16 type; /* enum hv_vp_state_page_type */
+ union hv_input_vtl input_vtl;
+ union {
+ u8 as_uint8;
+ struct {
+ u8 map_location_provided : 1;
+ u8 reserved : 7;
+ };
+ } flags;
+ u64 requested_map_location;
+} __packed;
+
+struct hv_output_map_vp_state_page {
+ u64 map_location; /* GPA page number */
+} __packed;
+
+struct hv_input_unmap_vp_state_page {
+ u64 partition_id;
+ u32 vp_index;
+ u16 type; /* enum hv_vp_state_page_type */
+ union hv_input_vtl input_vtl;
+ u8 reserved0;
+} __packed;
+
+struct hv_x64_apic_eoi_message {
+ u32 vp_index;
+ u32 interrupt_vector;
+} __packed;
+
+struct hv_opaque_intercept_message {
+ u32 vp_index;
+} __packed;
+
+enum hv_port_type {
+ HV_PORT_TYPE_MESSAGE = 1,
+ HV_PORT_TYPE_EVENT = 2,
+ HV_PORT_TYPE_MONITOR = 3,
+ HV_PORT_TYPE_DOORBELL = 4 /* Root Partition only */
+};
+
+struct hv_port_info {
+ u32 port_type; /* enum hv_port_type */
+ u32 padding;
+ union {
+ struct {
+ u32 target_sint;
+ u32 target_vp;
+ u64 rsvdz;
+ } message_port_info;
+ struct {
+ u32 target_sint;
+ u32 target_vp;
+ u16 base_flag_number;
+ u16 flag_count;
+ u32 rsvdz;
+ } event_port_info;
+ struct {
+ u64 monitor_address;
+ u64 rsvdz;
+ } monitor_port_info;
+ struct {
+ u32 target_sint;
+ u32 target_vp;
+ u64 rsvdz;
+ } doorbell_port_info;
+ };
+} __packed;
+
+struct hv_connection_info {
+ u32 port_type;
+ u32 padding;
+ union {
+ struct {
+ u64 rsvdz;
+ } message_connection_info;
+ struct {
+ u64 rsvdz;
+ } event_connection_info;
+ struct {
+ u64 monitor_address;
+ } monitor_connection_info;
+ struct {
+ u64 gpa;
+ u64 trigger_value;
+ u64 flags;
+ } doorbell_connection_info;
+ };
+} __packed;
+
+/* Define synthetic interrupt controller flag constants. */
+#define HV_EVENT_FLAGS_COUNT (256 * 8)
+#define HV_EVENT_FLAGS_BYTE_COUNT (256)
+#define HV_EVENT_FLAGS32_COUNT (256 / sizeof(u32))
+
+/* linux side we create long version of flags to use long bit ops on flags */
+#define HV_EVENT_FLAGS_UL_COUNT (256 / sizeof(ulong))
+
+/* Define the synthetic interrupt controller event flags format. */
+union hv_synic_event_flags {
+ unsigned char flags8[HV_EVENT_FLAGS_BYTE_COUNT];
+ u32 flags32[HV_EVENT_FLAGS32_COUNT];
+ ulong flags[HV_EVENT_FLAGS_UL_COUNT]; /* linux only */
+};
+
+struct hv_synic_event_flags_page {
+ volatile union hv_synic_event_flags event_flags[HV_SYNIC_SINT_COUNT];
+};
+
+#define HV_SYNIC_EVENT_RING_MESSAGE_COUNT 63
+
+struct hv_synic_event_ring {
+ u8 signal_masked;
+ u8 ring_full;
+ u16 reserved_z;
+ u32 data[HV_SYNIC_EVENT_RING_MESSAGE_COUNT];
+} __packed;
+
+struct hv_synic_event_ring_page {
+ struct hv_synic_event_ring sint_event_ring[HV_SYNIC_SINT_COUNT];
+};
+
+/* Define SynIC control register. */
+union hv_synic_scontrol {
+ u64 as_uint64;
+ struct {
+ u64 enable : 1;
+ u64 reserved : 63;
+ } __packed;
+};
+
+/* Define the format of the SIEFP register */
+union hv_synic_siefp {
+ u64 as_uint64;
+ struct {
+ u64 siefp_enabled : 1;
+ u64 preserved : 11;
+ u64 base_siefp_gpa : 52;
+ } __packed;
+};
+
+union hv_synic_sirbp {
+ u64 as_uint64;
+ struct {
+ u64 sirbp_enabled : 1;
+ u64 preserved : 11;
+ u64 base_sirbp_gpa : 52;
+ } __packed;
+};
+
+union hv_interrupt_control {
+ u64 as_uint64;
+ struct {
+ u32 interrupt_type; /* enum hv_interrupt_type */
+#if IS_ENABLED(CONFIG_X86)
+ u32 level_triggered : 1;
+ u32 logical_dest_mode : 1;
+ u32 rsvd : 30;
+#elif IS_ENABLED(CONFIG_ARM64)
+ u32 rsvd1 : 2;
+ u32 asserted : 1;
+ u32 rsvd2 : 29;
+#endif
+ } __packed;
+};
+
+struct hv_stimer_state {
+ struct {
+ u32 undelivered_msg_pending : 1;
+ u32 reserved : 31;
+ } __packed flags;
+ u32 resvd;
+ u64 config;
+ u64 count;
+ u64 adjustment;
+ u64 undelivered_exp_time;
+} __packed;
+
+struct hv_synthetic_timers_state {
+ struct hv_stimer_state timers[HV_SYNIC_STIMER_COUNT];
+ u64 reserved[5];
+} __packed;
+
+struct hv_async_completion_message_payload {
+ u64 partition_id;
+ u32 status;
+ u32 completion_count;
+ u64 sub_status;
+} __packed;
+
+union hv_input_delete_vp {
+ u64 as_uint64[2];
+ struct {
+ u64 partition_id;
+ u32 vp_index;
+ u8 reserved[4];
+ } __packed;
+} __packed;
+
+struct hv_input_assert_virtual_interrupt {
+ u64 partition_id;
+ union hv_interrupt_control control;
+ u64 dest_addr; /* cpu's apic id */
+ u32 vector;
+ u8 target_vtl;
+ u8 rsvd_z0;
+ u16 rsvd_z1;
+} __packed;
+
+struct hv_input_create_port {
+ u64 port_partition_id;
+ union hv_port_id port_id;
+ u8 port_vtl;
+ u8 min_connection_vtl;
+ u16 padding;
+ u64 connection_partition_id;
+ struct hv_port_info port_info;
+ struct hv_proximity_domain_info proximity_domain_info;
+} __packed;
+
+union hv_input_delete_port {
+ u64 as_uint64[2];
+ struct {
+ u64 port_partition_id;
+ union hv_port_id port_id;
+ u32 reserved;
+ };
+} __packed;
+
+struct hv_input_connect_port {
+ u64 connection_partition_id;
+ union hv_connection_id connection_id;
+ u8 connection_vtl;
+ u8 rsvdz0;
+ u16 rsvdz1;
+ u64 port_partition_id;
+ union hv_port_id port_id;
+ u32 reserved2;
+ struct hv_connection_info connection_info;
+ struct hv_proximity_domain_info proximity_domain_info;
+} __packed;
+
+union hv_input_disconnect_port {
+ u64 as_uint64[2];
+ struct {
+ u64 connection_partition_id;
+ union hv_connection_id connection_id;
+ u32 is_doorbell: 1;
+ u32 reserved: 31;
+ } __packed;
+} __packed;
+
+union hv_input_notify_port_ring_empty {
+ u64 as_uint64;
+ struct {
+ u32 sint_index;
+ u32 reserved;
+ };
+} __packed;
+
+struct hv_vp_state_data_xsave {
+ u64 flags;
+ union hv_x64_xsave_xfem_register states;
+} __packed;
+
+/*
+ * For getting and setting VP state, there are two options based on the state type:
+ *
+ * 1.) Data that is accessed by PFNs in the input hypercall page. This is used
+ * for state which may not fit into the hypercall pages.
+ * 2.) Data that is accessed directly in the input\output hypercall pages.
+ * This is used for state that will always fit into the hypercall pages.
+ *
+ * In the future this could be dynamic based on the size if needed.
+ *
+ * Note these hypercalls have an 8-byte aligned variable header size as per the tlfs
+ */
+
+#define HV_GET_SET_VP_STATE_TYPE_PFN BIT(31)
+
+enum hv_get_set_vp_state_type {
+ /* HvGetSetVpStateLocalInterruptControllerState - APIC/GIC state */
+ HV_GET_SET_VP_STATE_LAPIC_STATE = 0 | HV_GET_SET_VP_STATE_TYPE_PFN,
+ HV_GET_SET_VP_STATE_XSAVE = 1 | HV_GET_SET_VP_STATE_TYPE_PFN,
+ HV_GET_SET_VP_STATE_SIM_PAGE = 2 | HV_GET_SET_VP_STATE_TYPE_PFN,
+ HV_GET_SET_VP_STATE_SIEF_PAGE = 3 | HV_GET_SET_VP_STATE_TYPE_PFN,
+ HV_GET_SET_VP_STATE_SYNTHETIC_TIMERS = 4,
+};
+
+struct hv_vp_state_data {
+ u32 type;
+ u32 rsvd;
+ struct hv_vp_state_data_xsave xsave;
+} __packed;
+
+struct hv_input_get_vp_state {
+ u64 partition_id;
+ u32 vp_index;
+ u8 input_vtl;
+ u8 rsvd0;
+ u16 rsvd1;
+ struct hv_vp_state_data state_data;
+ u64 output_data_pfns[];
+} __packed;
+
+union hv_output_get_vp_state {
+ struct hv_synthetic_timers_state synthetic_timers_state;
+} __packed;
+
+union hv_input_set_vp_state_data {
+ u64 pfns;
+ u8 bytes;
+} __packed;
+
+struct hv_input_set_vp_state {
+ u64 partition_id;
+ u32 vp_index;
+ u8 input_vtl;
+ u8 rsvd0;
+ u16 rsvd1;
+ struct hv_vp_state_data state_data;
+ union hv_input_set_vp_state_data data[];
+} __packed;
+
+union hv_x64_vp_execution_state {
+ u16 as_uint16;
+ struct {
+ u16 cpl:2;
+ u16 cr0_pe:1;
+ u16 cr0_am:1;
+ u16 efer_lma:1;
+ u16 debug_active:1;
+ u16 interruption_pending:1;
+ u16 vtl:4;
+ u16 enclave_mode:1;
+ u16 interrupt_shadow:1;
+ u16 virtualization_fault_active:1;
+ u16 reserved:2;
+ } __packed;
+};
+
+struct hv_x64_intercept_message_header {
+ u32 vp_index;
+ u8 instruction_length:4;
+ u8 cr8:4; /* Only set for exo partitions */
+ u8 intercept_access_type;
+ union hv_x64_vp_execution_state execution_state;
+ struct hv_x64_segment_register cs_segment;
+ u64 rip;
+ u64 rflags;
+} __packed;
+
+union hv_x64_memory_access_info {
+ u8 as_uint8;
+ struct {
+ u8 gva_valid:1;
+ u8 gva_gpa_valid:1;
+ u8 hypercall_output_pending:1;
+ u8 tlb_locked_no_overlay:1;
+ u8 reserved:4;
+ } __packed;
+};
+
+struct hv_x64_memory_intercept_message {
+ struct hv_x64_intercept_message_header header;
+ u32 cache_type; /* enum hv_cache_type */
+ u8 instruction_byte_count;
+ union hv_x64_memory_access_info memory_access_info;
+ u8 tpr_priority;
+ u8 reserved1;
+ u64 guest_virtual_address;
+ u64 guest_physical_address;
+ u8 instruction_bytes[16];
+} __packed;
+
+/*
+ * Dispatch state for the VP communicated by the hypervisor to the
+ * VP-dispatching thread in the root on return from HVCALL_DISPATCH_VP.
+ */
+enum hv_vp_dispatch_state {
+ HV_VP_DISPATCH_STATE_INVALID = 0,
+ HV_VP_DISPATCH_STATE_BLOCKED = 1,
+ HV_VP_DISPATCH_STATE_READY = 2,
+};
+
+/*
+ * Dispatch event that caused the current dispatch state on return from
+ * HVCALL_DISPATCH_VP.
+ */
+enum hv_vp_dispatch_event {
+ HV_VP_DISPATCH_EVENT_INVALID = 0x00000000,
+ HV_VP_DISPATCH_EVENT_SUSPEND = 0x00000001,
+ HV_VP_DISPATCH_EVENT_INTERCEPT = 0x00000002,
+};
+
+#define HV_ROOT_SCHEDULER_MAX_VPS_PER_CHILD_PARTITION 1024
+/* The maximum array size of HV_GENERIC_SET (vp_set) buffer */
+#define HV_GENERIC_SET_QWORD_COUNT(max) (((((max) - 1) >> 6) + 1) + 2)
+
+struct hv_vp_signal_bitset_scheduler_message {
+ u64 partition_id;
+ u32 overflow_count;
+ u16 vp_count;
+ u16 reserved;
+
+#define BITSET_BUFFER_SIZE \
+ HV_GENERIC_SET_QWORD_COUNT(HV_ROOT_SCHEDULER_MAX_VPS_PER_CHILD_PARTITION)
+ union {
+ struct hv_vpset bitset;
+ u64 bitset_buffer[BITSET_BUFFER_SIZE];
+ } vp_bitset;
+#undef BITSET_BUFFER_SIZE
+} __packed;
+
+static_assert(sizeof(struct hv_vp_signal_bitset_scheduler_message) <=
+ (sizeof(struct hv_message) - sizeof(struct hv_message_header)));
+
+#define HV_MESSAGE_MAX_PARTITION_VP_PAIR_COUNT \
+ (((sizeof(struct hv_message) - sizeof(struct hv_message_header)) / \
+ (sizeof(u64 /* partition id */) + sizeof(u32 /* vp index */))) - 1)
+
+struct hv_vp_signal_pair_scheduler_message {
+ u32 overflow_count;
+ u8 vp_count;
+ u8 reserved1[3];
+
+ u64 partition_ids[HV_MESSAGE_MAX_PARTITION_VP_PAIR_COUNT];
+ u32 vp_indexes[HV_MESSAGE_MAX_PARTITION_VP_PAIR_COUNT];
+
+ u8 reserved2[4];
+} __packed;
+
+static_assert(sizeof(struct hv_vp_signal_pair_scheduler_message) ==
+ (sizeof(struct hv_message) - sizeof(struct hv_message_header)));
+
+/* Input and output structures for HVCALL_DISPATCH_VP */
+#define HV_DISPATCH_VP_FLAG_CLEAR_INTERCEPT_SUSPEND 0x1
+#define HV_DISPATCH_VP_FLAG_ENABLE_CALLER_INTERRUPTS 0x2
+#define HV_DISPATCH_VP_FLAG_SET_CALLER_SPEC_CTRL 0x4
+#define HV_DISPATCH_VP_FLAG_SKIP_VP_SPEC_FLUSH 0x8
+#define HV_DISPATCH_VP_FLAG_SKIP_CALLER_SPEC_FLUSH 0x10
+#define HV_DISPATCH_VP_FLAG_SKIP_CALLER_USER_SPEC_FLUSH 0x20
+#define HV_DISPATCH_VP_FLAG_SCAN_INTERRUPT_INJECTION 0x40
+
+struct hv_input_dispatch_vp {
+ u64 partition_id;
+ u32 vp_index;
+ u32 flags;
+ u64 time_slice; /* in 100ns */
+ u64 spec_ctrl;
+} __packed;
+
+struct hv_output_dispatch_vp {
+ u32 dispatch_state; /* enum hv_vp_dispatch_state */
+ u32 dispatch_event; /* enum hv_vp_dispatch_event */
+} __packed;
+
+struct hv_input_modify_sparse_spa_page_host_access {
+ u32 host_access : 2;
+ u32 reserved : 30;
+ u32 flags;
+ u64 partition_id;
+ u64 spa_page_list[];
+} __packed;
+
+/* hv_input_modify_sparse_spa_page_host_access flags */
+#define HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE 0x1
+#define HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_SHARED 0x2
+#define HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE 0x4
+#define HV_MODIFY_SPA_PAGE_HOST_ACCESS_HUGE_PAGE 0x8
+
+#endif /* _HV_HVHDK_H */
diff --git a/include/hyperv/hvhdk_mini.h b/include/hyperv/hvhdk_mini.h
new file mode 100644
index 000000000000..41a29bf8ec14
--- /dev/null
+++ b/include/hyperv/hvhdk_mini.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Type definitions for the Microsoft Hypervisor.
+ */
+#ifndef _HV_HVHDK_MINI_H
+#define _HV_HVHDK_MINI_H
+
+#include "hvgdk_mini.h"
+
+/*
+ * Doorbell connection_info flags.
+ */
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_MASK 0x00000007
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_ANY 0x00000000
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_BYTE 0x00000001
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_WORD 0x00000002
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_DWORD 0x00000003
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_QWORD 0x00000004
+#define HV_DOORBELL_FLAG_TRIGGER_ANY_VALUE 0x80000000
+
+/* Each generic set contains 64 elements */
+#define HV_GENERIC_SET_SHIFT (6)
+#define HV_GENERIC_SET_MASK (63)
+
+enum hv_generic_set_format {
+ HV_GENERIC_SET_SPARSE_4K,
+ HV_GENERIC_SET_ALL,
+};
+#define HV_GENERIC_SET_FORMAT hv_generic_set_format
+
+enum hv_scheduler_type {
+ HV_SCHEDULER_TYPE_LP = 1, /* Classic scheduler w/o SMT */
+ HV_SCHEDULER_TYPE_LP_SMT = 2, /* Classic scheduler w/ SMT */
+ HV_SCHEDULER_TYPE_CORE_SMT = 3, /* Core scheduler */
+ HV_SCHEDULER_TYPE_ROOT = 4, /* Root / integrated scheduler */
+ HV_SCHEDULER_TYPE_MAX
+};
+
+/* HV_STATS_AREA_TYPE */
+enum hv_stats_area_type {
+ HV_STATS_AREA_SELF = 0,
+ HV_STATS_AREA_PARENT = 1,
+ HV_STATS_AREA_INTERNAL = 2,
+ HV_STATS_AREA_COUNT
+};
+
+enum hv_stats_object_type {
+ HV_STATS_OBJECT_HYPERVISOR = 0x00000001,
+ HV_STATS_OBJECT_LOGICAL_PROCESSOR = 0x00000002,
+ HV_STATS_OBJECT_PARTITION = 0x00010001,
+ HV_STATS_OBJECT_VP = 0x00010002
+};
+
+union hv_stats_object_identity {
+ /* hv_stats_hypervisor */
+ struct {
+ u8 reserved[15];
+ u8 stats_area_type;
+ } __packed hv;
+
+ /* hv_stats_logical_processor */
+ struct {
+ u32 lp_index;
+ u8 reserved[11];
+ u8 stats_area_type;
+ } __packed lp;
+
+ /* hv_stats_partition */
+ struct {
+ u64 partition_id;
+ u8 reserved[7];
+ u8 stats_area_type;
+ } __packed partition;
+
+ /* hv_stats_vp */
+ struct {
+ u64 partition_id;
+ u32 vp_index;
+ u16 flags;
+ u8 reserved;
+ u8 stats_area_type;
+ } __packed vp;
+};
+
+enum hv_partition_property_code {
+ /* Privilege properties */
+ HV_PARTITION_PROPERTY_PRIVILEGE_FLAGS = 0x00010000,
+ HV_PARTITION_PROPERTY_SYNTHETIC_PROC_FEATURES = 0x00010001,
+
+ /* Resource properties */
+ HV_PARTITION_PROPERTY_GPA_PAGE_ACCESS_TRACKING = 0x00050005,
+ HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION = 0x00050017,
+
+ /* Compatibility properties */
+ HV_PARTITION_PROPERTY_PROCESSOR_XSAVE_FEATURES = 0x00060002,
+ HV_PARTITION_PROPERTY_XSAVE_STATES = 0x00060007,
+ HV_PARTITION_PROPERTY_MAX_XSAVE_DATA_SIZE = 0x00060008,
+ HV_PARTITION_PROPERTY_PROCESSOR_CLOCK_FREQUENCY = 0x00060009,
+
+ /* Extended properties with larger property values */
+ HV_PARTITION_PROPERTY_VMM_CAPABILITIES = 0x00090007,
+};
+
+#define HV_PARTITION_VMM_CAPABILITIES_BANK_COUNT 1
+#define HV_PARTITION_VMM_CAPABILITIES_RESERVED_BITFIELD_COUNT 59
+
+struct hv_partition_property_vmm_capabilities {
+ u16 bank_count;
+ u16 reserved[3];
+ union {
+ u64 as_uint64[HV_PARTITION_VMM_CAPABILITIES_BANK_COUNT];
+ struct {
+ u64 map_gpa_preserve_adjustable: 1;
+ u64 vmm_can_provide_overlay_gpfn: 1;
+ u64 vp_affinity_property: 1;
+#if IS_ENABLED(CONFIG_ARM64)
+ u64 vmm_can_provide_gic_overlay_locations: 1;
+#else
+ u64 reservedbit3: 1;
+#endif
+ u64 assignable_synthetic_proc_features: 1;
+ u64 reserved0: HV_PARTITION_VMM_CAPABILITIES_RESERVED_BITFIELD_COUNT;
+ } __packed;
+ };
+} __packed;
+
+enum hv_snp_status {
+ HV_SNP_STATUS_NONE = 0,
+ HV_SNP_STATUS_AVAILABLE = 1,
+ HV_SNP_STATUS_INCOMPATIBLE = 2,
+ HV_SNP_STATUS_PSP_UNAVAILABLE = 3,
+ HV_SNP_STATUS_PSP_INIT_FAILED = 4,
+ HV_SNP_STATUS_PSP_BAD_FW_VERSION = 5,
+ HV_SNP_STATUS_BAD_CONFIGURATION = 6,
+ HV_SNP_STATUS_PSP_FW_UPDATE_IN_PROGRESS = 7,
+ HV_SNP_STATUS_PSP_RB_INIT_FAILED = 8,
+ HV_SNP_STATUS_PSP_PLATFORM_STATUS_FAILED = 9,
+ HV_SNP_STATUS_PSP_INIT_LATE_FAILED = 10,
+};
+
+enum hv_system_property {
+ /* Add more values when needed */
+ HV_SYSTEM_PROPERTY_SLEEP_STATE = 3,
+ HV_SYSTEM_PROPERTY_SCHEDULER_TYPE = 15,
+ HV_DYNAMIC_PROCESSOR_FEATURE_PROPERTY = 21,
+ HV_SYSTEM_PROPERTY_CRASHDUMPAREA = 47,
+};
+
+#define HV_PFN_RANGE_PGBITS 24 /* HV_SPA_PAGE_RANGE_ADDITIONAL_PAGES_BITS */
+union hv_pfn_range { /* HV_SPA_PAGE_RANGE */
+ u64 as_uint64;
+ struct {
+ /* 39:0: base pfn. 63:40: additional pages */
+ u64 base_pfn : 64 - HV_PFN_RANGE_PGBITS;
+ u64 add_pfns : HV_PFN_RANGE_PGBITS;
+ } __packed;
+};
+
+enum hv_sleep_state {
+ HV_SLEEP_STATE_S1 = 1,
+ HV_SLEEP_STATE_S2 = 2,
+ HV_SLEEP_STATE_S3 = 3,
+ HV_SLEEP_STATE_S4 = 4,
+ HV_SLEEP_STATE_S5 = 5,
+ /*
+ * After hypervisor has received this, any follow up sleep
+ * state registration requests will be rejected.
+ */
+ HV_SLEEP_STATE_LOCK = 6
+};
+
+enum hv_dynamic_processor_feature_property {
+ /* Add more values when needed */
+ HV_X64_DYNAMIC_PROCESSOR_FEATURE_MAX_ENCRYPTED_PARTITIONS = 13,
+ HV_X64_DYNAMIC_PROCESSOR_FEATURE_SNP_STATUS = 16,
+};
+
+struct hv_input_get_system_property {
+ u32 property_id; /* enum hv_system_property */
+ union {
+ u32 as_uint32;
+#if IS_ENABLED(CONFIG_X86)
+ /* enum hv_dynamic_processor_feature_property */
+ u32 hv_processor_feature;
+#endif
+ /* More fields to be filled in when needed */
+ };
+} __packed;
+
+struct hv_output_get_system_property {
+ union {
+ u32 scheduler_type; /* enum hv_scheduler_type */
+#if IS_ENABLED(CONFIG_X86)
+ u64 hv_processor_feature_value;
+#endif
+ union hv_pfn_range hv_cda_info; /* CrashdumpAreaAddress */
+ u64 hv_tramp_pa; /* CrashdumpTrampolineAddress */
+ };
+} __packed;
+
+struct hv_sleep_state_info {
+ u32 sleep_state; /* enum hv_sleep_state */
+ u8 pm1a_slp_typ;
+ u8 pm1b_slp_typ;
+} __packed;
+
+struct hv_input_set_system_property {
+ u32 property_id; /* enum hv_system_property */
+ u32 reserved;
+ union {
+ /* More fields to be filled in when needed */
+ struct hv_sleep_state_info set_sleep_state_info;
+
+ /*
+ * Add a reserved field to ensure the union is 8-byte aligned as
+ * existing members may not be. This is a temporary measure
+ * until all remaining members are added.
+ */
+ u64 reserved0[8];
+ };
+} __packed;
+
+struct hv_input_enter_sleep_state { /* HV_INPUT_ENTER_SLEEP_STATE */
+ u32 sleep_state; /* enum hv_sleep_state */
+} __packed;
+
+struct hv_input_map_stats_page {
+ u32 type; /* enum hv_stats_object_type */
+ u32 padding;
+ union hv_stats_object_identity identity;
+} __packed;
+
+struct hv_input_map_stats_page2 {
+ u32 type; /* enum hv_stats_object_type */
+ u32 padding;
+ union hv_stats_object_identity identity;
+ u64 map_location;
+} __packed;
+
+struct hv_output_map_stats_page {
+ u64 map_location;
+} __packed;
+
+struct hv_input_unmap_stats_page {
+ u32 type; /* enum hv_stats_object_type */
+ u32 padding;
+ union hv_stats_object_identity identity;
+} __packed;
+
+struct hv_proximity_domain_flags {
+ u32 proximity_preferred : 1;
+ u32 reserved : 30;
+ u32 proximity_info_valid : 1;
+} __packed;
+
+struct hv_proximity_domain_info {
+ u32 domain_id;
+ struct hv_proximity_domain_flags flags;
+} __packed;
+
+/* HvDepositMemory hypercall */
+struct hv_deposit_memory { /* HV_INPUT_DEPOSIT_MEMORY */
+ u64 partition_id;
+ u64 gpa_page_list[];
+} __packed;
+
+struct hv_input_withdraw_memory {
+ u64 partition_id;
+ struct hv_proximity_domain_info proximity_domain_info;
+} __packed;
+
+struct hv_output_withdraw_memory {
+ DECLARE_FLEX_ARRAY(u64, gpa_page_list);
+} __packed;
+
+/* HV Map GPA (Guest Physical Address) Flags */
+#define HV_MAP_GPA_PERMISSIONS_NONE 0x0
+#define HV_MAP_GPA_READABLE 0x1
+#define HV_MAP_GPA_WRITABLE 0x2
+#define HV_MAP_GPA_KERNEL_EXECUTABLE 0x4
+#define HV_MAP_GPA_USER_EXECUTABLE 0x8
+#define HV_MAP_GPA_EXECUTABLE 0xC
+#define HV_MAP_GPA_PERMISSIONS_MASK 0xF
+#define HV_MAP_GPA_ADJUSTABLE 0x8000
+#define HV_MAP_GPA_NO_ACCESS 0x10000
+#define HV_MAP_GPA_NOT_CACHED 0x200000
+#define HV_MAP_GPA_LARGE_PAGE 0x80000000
+
+struct hv_input_map_gpa_pages {
+ u64 target_partition_id;
+ u64 target_gpa_base;
+ u32 map_flags;
+ u32 padding;
+ u64 source_gpa_page_list[];
+} __packed;
+
+union hv_gpa_page_access_state_flags {
+ struct {
+ u64 clear_accessed : 1;
+ u64 set_accessed : 1;
+ u64 clear_dirty : 1;
+ u64 set_dirty : 1;
+ u64 reserved : 60;
+ } __packed;
+ u64 as_uint64;
+};
+
+struct hv_input_get_gpa_pages_access_state {
+ u64 partition_id;
+ union hv_gpa_page_access_state_flags flags;
+ u64 hv_gpa_page_number;
+} __packed;
+
+union hv_gpa_page_access_state {
+ struct {
+ u8 accessed : 1;
+ u8 dirty : 1;
+ u8 reserved: 6;
+ };
+ u8 as_uint8;
+} __packed;
+
+enum hv_crashdump_action {
+ HV_CRASHDUMP_NONE = 0,
+ HV_CRASHDUMP_SUSPEND_ALL_VPS,
+ HV_CRASHDUMP_PREPARE_FOR_STATE_SAVE,
+ HV_CRASHDUMP_STATE_SAVED,
+ HV_CRASHDUMP_ENTRY,
+};
+
+struct hv_partition_event_root_crashdump_input {
+ u32 crashdump_action; /* enum hv_crashdump_action */
+} __packed;
+
+struct hv_input_disable_hyp_ex { /* HV_X64_INPUT_DISABLE_HYPERVISOR_EX */
+ u64 rip;
+ u64 arg;
+} __packed;
+
+struct hv_crashdump_area { /* HV_CRASHDUMP_AREA */
+ u32 version;
+ union {
+ u32 flags_as_uint32;
+ struct {
+ u32 cda_valid : 1;
+ u32 cda_unused : 31;
+ } __packed;
+ };
+ /* more unused fields */
+} __packed;
+
+union hv_partition_event_input {
+ struct hv_partition_event_root_crashdump_input crashdump_input;
+};
+
+enum hv_partition_event {
+ HV_PARTITION_EVENT_ROOT_CRASHDUMP = 2,
+};
+
+struct hv_input_notify_partition_event {
+ u32 event; /* enum hv_partition_event */
+ union hv_partition_event_input input;
+} __packed;
+
+struct hv_lp_startup_status {
+ u64 hv_status;
+ u64 substatus1;
+ u64 substatus2;
+ u64 substatus3;
+ u64 substatus4;
+ u64 substatus5;
+ u64 substatus6;
+} __packed;
+
+struct hv_input_add_logical_processor {
+ u32 lp_index;
+ u32 apic_id;
+ struct hv_proximity_domain_info proximity_domain_info;
+} __packed;
+
+struct hv_output_add_logical_processor {
+ struct hv_lp_startup_status startup_status;
+} __packed;
+
+enum { /* HV_SUBNODE_TYPE */
+ HV_SUBNODE_ANY = 0,
+ HV_SUBNODE_SOCKET,
+ HV_SUBNODE_CLUSTER,
+ HV_SUBNODE_L3,
+ HV_SUBNODE_COUNT,
+ HV_SUBNODE_INVALID = -1
+};
+
+struct hv_create_vp { /* HV_INPUT_CREATE_VP */
+ u64 partition_id;
+ u32 vp_index;
+ u8 padding[3];
+ u8 subnode_type;
+ u64 subnode_id;
+ struct hv_proximity_domain_info proximity_domain_info;
+ u64 flags;
+} __packed;
+
+/* HV_INTERRUPT_TRIGGER_MODE */
+enum hv_interrupt_trigger_mode {
+ HV_INTERRUPT_TRIGGER_MODE_EDGE = 0,
+ HV_INTERRUPT_TRIGGER_MODE_LEVEL = 1,
+};
+
+/* HV_DEVICE_INTERRUPT_DESCRIPTOR */
+struct hv_device_interrupt_descriptor {
+ u32 interrupt_type;
+ u32 trigger_mode;
+ u32 vector_count;
+ u32 reserved;
+ struct hv_device_interrupt_target target;
+} __packed;
+
+/* HV_INPUT_MAP_DEVICE_INTERRUPT */
+struct hv_input_map_device_interrupt {
+ u64 partition_id;
+ u64 device_id;
+ u32 flags;
+ u32 base_irt_idx;
+ struct hv_interrupt_entry logical_interrupt_entry;
+ struct hv_device_interrupt_descriptor interrupt_descriptor;
+} __packed;
+
+/* HV_OUTPUT_MAP_DEVICE_INTERRUPT */
+struct hv_output_map_device_interrupt {
+ struct hv_interrupt_entry interrupt_entry;
+ u64 ext_status_deprecated[5];
+} __packed;
+
+/* HV_INPUT_UNMAP_DEVICE_INTERRUPT */
+struct hv_input_unmap_device_interrupt {
+ u64 partition_id;
+ u64 device_id;
+ struct hv_interrupt_entry interrupt_entry;
+ u32 flags;
+} __packed;
+
+#define HV_SOURCE_SHADOW_NONE 0x0
+#define HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE 0x1
+
+struct hv_send_ipi_ex { /* HV_INPUT_SEND_SYNTHETIC_CLUSTER_IPI_EX */
+ u32 vector;
+ u32 reserved;
+ struct hv_vpset vp_set;
+} __packed;
+
+typedef u16 hv_pci_rid; /* HV_PCI_RID */
+typedef u16 hv_pci_segment; /* HV_PCI_SEGMENT */
+typedef u64 hv_logical_device_id;
+union hv_pci_bdf { /* HV_PCI_BDF */
+ u16 as_uint16;
+
+ struct {
+ u8 function : 3;
+ u8 device : 5;
+ u8 bus;
+ };
+} __packed;
+
+union hv_pci_bus_range {
+ u16 as_uint16;
+
+ struct {
+ u8 subordinate_bus;
+ u8 secondary_bus;
+ };
+} __packed;
+
+enum hv_device_type { /* HV_DEVICE_TYPE */
+ HV_DEVICE_TYPE_LOGICAL = 0,
+ HV_DEVICE_TYPE_PCI = 1,
+ HV_DEVICE_TYPE_IOAPIC = 2,
+ HV_DEVICE_TYPE_ACPI = 3,
+};
+
+union hv_device_id { /* HV_DEVICE_ID */
+ u64 as_uint64;
+
+ struct {
+ u64 reserved0 : 62;
+ u64 device_type : 2;
+ };
+
+ /* HV_DEVICE_TYPE_LOGICAL */
+ struct {
+ u64 id : 62;
+ u64 device_type : 2;
+ } logical;
+
+ /* HV_DEVICE_TYPE_PCI */
+ struct {
+ union {
+ hv_pci_rid rid;
+ union hv_pci_bdf bdf;
+ };
+
+ hv_pci_segment segment;
+ union hv_pci_bus_range shadow_bus_range;
+
+ u16 phantom_function_bits : 2;
+ u16 source_shadow : 1;
+
+ u16 rsvdz0 : 11;
+ u16 device_type : 2;
+ } pci;
+
+ /* HV_DEVICE_TYPE_IOAPIC */
+ struct {
+ u8 ioapic_id;
+ u8 rsvdz0;
+ u16 rsvdz1;
+ u16 rsvdz2;
+
+ u16 rsvdz3 : 14;
+ u16 device_type : 2;
+ } ioapic;
+
+ /* HV_DEVICE_TYPE_ACPI */
+ struct {
+ u32 input_mapping_base;
+ u32 input_mapping_count : 30;
+ u32 device_type : 2;
+ } acpi;
+} __packed;
+
+#endif /* _HV_HVHDK_MINI_H */
diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h
index 69a13e1e5b2e..1b91c8f98688 100644
--- a/include/keys/asymmetric-type.h
+++ b/include/keys/asymmetric-type.h
@@ -49,7 +49,7 @@ enum asymmetric_payload_bits {
*/
struct asymmetric_key_id {
unsigned short len;
- unsigned char data[];
+ unsigned char data[] __counted_by(len);
};
struct asymmetric_key_ids {
diff --git a/include/keys/dns_resolver-type.h b/include/keys/dns_resolver-type.h
index 218ca22fb056..1b89088a2837 100644
--- a/include/keys/dns_resolver-type.h
+++ b/include/keys/dns_resolver-type.h
@@ -12,8 +12,4 @@
extern struct key_type key_type_dns_resolver;
-extern int request_dns_resolver_key(const char *description,
- const char *callout_info,
- char **data);
-
#endif /* _KEYS_DNS_RESOLVER_TYPE_H */
diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h
index 333c0f49a9cd..0ddbe197a261 100644
--- a/include/keys/rxrpc-type.h
+++ b/include/keys/rxrpc-type.h
@@ -9,6 +9,7 @@
#define _KEYS_RXRPC_TYPE_H
#include <linux/key.h>
+#include <crypto/krb5.h>
/*
* key type for AF_RXRPC keys
@@ -32,6 +33,21 @@ struct rxkad_key {
};
/*
+ * RxRPC key for YFS-RxGK (type-6 security)
+ */
+struct rxgk_key {
+ s64 begintime; /* Time at which the ticket starts */
+ s64 endtime; /* Time at which the ticket ends */
+ u64 lifetime; /* Maximum lifespan of a connection (seconds) */
+ u64 bytelife; /* Maximum number of bytes on a connection */
+ unsigned int enctype; /* Encoding type */
+ s8 level; /* Negotiated security RXRPC_SECURITY_PLAIN/AUTH/ENCRYPT */
+ struct krb5_buffer key; /* Master key, K0 */
+ struct krb5_buffer ticket; /* Ticket to be passed to server */
+ u8 _key[]; /* Key storage */
+};
+
+/*
* list of tokens attached to an rxrpc key
*/
struct rxrpc_key_token {
@@ -40,6 +56,7 @@ struct rxrpc_key_token {
struct rxrpc_key_token *next; /* the next token in the list */
union {
struct rxkad_key *kad;
+ struct rxgk_key *rxgk;
};
};
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index 8365adf842ef..a6c2897bcc63 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -73,7 +73,6 @@ static inline void __init set_machine_trusted_keys(struct key *keyring)
}
#endif
-extern struct pkcs7_message *pkcs7;
#ifdef CONFIG_SYSTEM_BLACKLIST_KEYRING
extern int mark_hash_blacklisted(const u8 *hash, size_t hash_len,
enum blacklist_hash_type hash_type);
@@ -93,6 +92,7 @@ static inline int is_binary_blacklisted(const u8 *hash, size_t hash_len)
}
#endif
+struct pkcs7_message;
#ifdef CONFIG_SYSTEM_REVOCATION_LIST
extern int add_key_to_revocation_list(const char *data, size_t size);
extern int is_key_on_revocation_list(struct pkcs7_message *pkcs7);
diff --git a/include/keys/trusted_tpm.h b/include/keys/trusted_tpm.h
index a088b33fd0e3..0fadc6a4f166 100644
--- a/include/keys/trusted_tpm.h
+++ b/include/keys/trusted_tpm.h
@@ -5,41 +5,8 @@
#include <keys/trusted-type.h>
#include <linux/tpm_command.h>
-/* implementation specific TPM constants */
-#define TPM_SIZE_OFFSET 2
-#define TPM_RETURN_OFFSET 6
-#define TPM_DATA_OFFSET 10
-
-#define LOAD32(buffer, offset) (ntohl(*(uint32_t *)&buffer[offset]))
-#define LOAD32N(buffer, offset) (*(uint32_t *)&buffer[offset])
-#define LOAD16(buffer, offset) (ntohs(*(uint16_t *)&buffer[offset]))
-
extern struct trusted_key_ops trusted_key_tpm_ops;
-struct osapsess {
- uint32_t handle;
- unsigned char secret[SHA1_DIGEST_SIZE];
- unsigned char enonce[TPM_NONCE_SIZE];
-};
-
-/* discrete values, but have to store in uint16_t for TPM use */
-enum {
- SEAL_keytype = 1,
- SRK_keytype = 4
-};
-
-int TSS_authhmac(unsigned char *digest, const unsigned char *key,
- unsigned int keylen, unsigned char *h1,
- unsigned char *h2, unsigned int h3, ...);
-int TSS_checkhmac1(unsigned char *buffer,
- const uint32_t command,
- const unsigned char *ononce,
- const unsigned char *key,
- unsigned int keylen, ...);
-
-int trusted_tpm_send(unsigned char *cmd, size_t buflen);
-int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce);
-
int tpm2_seal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options);
@@ -47,50 +14,4 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options);
-#define TPM_DEBUG 0
-
-#if TPM_DEBUG
-static inline void dump_options(struct trusted_key_options *o)
-{
- pr_info("sealing key type %d\n", o->keytype);
- pr_info("sealing key handle %0X\n", o->keyhandle);
- pr_info("pcrlock %d\n", o->pcrlock);
- pr_info("pcrinfo %d\n", o->pcrinfo_len);
- print_hex_dump(KERN_INFO, "pcrinfo ", DUMP_PREFIX_NONE,
- 16, 1, o->pcrinfo, o->pcrinfo_len, 0);
-}
-
-static inline void dump_sess(struct osapsess *s)
-{
- print_hex_dump(KERN_INFO, "trusted-key: handle ", DUMP_PREFIX_NONE,
- 16, 1, &s->handle, 4, 0);
- pr_info("secret:\n");
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
- 16, 1, &s->secret, SHA1_DIGEST_SIZE, 0);
- pr_info("trusted-key: enonce:\n");
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
- 16, 1, &s->enonce, SHA1_DIGEST_SIZE, 0);
-}
-
-static inline void dump_tpm_buf(unsigned char *buf)
-{
- int len;
-
- pr_info("\ntpm buffer\n");
- len = LOAD32(buf, TPM_SIZE_OFFSET);
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, buf, len, 0);
-}
-#else
-static inline void dump_options(struct trusted_key_options *o)
-{
-}
-
-static inline void dump_sess(struct osapsess *s)
-{
-}
-
-static inline void dump_tpm_buf(unsigned char *buf)
-{
-}
-#endif
#endif
diff --git a/include/kunit/assert.h b/include/kunit/assert.h
index 24c2b9fa61e8..bb879389f11d 100644
--- a/include/kunit/assert.h
+++ b/include/kunit/assert.h
@@ -60,7 +60,7 @@ void kunit_assert_prologue(const struct kunit_loc *loc,
* struct kunit_fail_assert - Represents a plain fail expectation/assertion.
* @assert: The parent of this type.
*
- * Represents a simple KUNIT_FAIL/KUNIT_ASSERT_FAILURE that always fails.
+ * Represents a simple KUNIT_FAIL/KUNIT_FAIL_AND_ABORT that always fails.
*/
struct kunit_fail_assert {
struct kunit_assert assert;
@@ -218,4 +218,15 @@ void kunit_mem_assert_format(const struct kunit_assert *assert,
const struct va_format *message,
struct string_stream *stream);
+#if IS_ENABLED(CONFIG_KUNIT)
+void kunit_assert_print_msg(const struct va_format *message,
+ struct string_stream *stream);
+bool is_literal(const char *text, long long value);
+bool is_str_literal(const char *text, const char *value);
+void kunit_assert_hexdump(struct string_stream *stream,
+ const void *buf,
+ const void *compared_buf,
+ const size_t len);
+#endif
+
#endif /* _KUNIT_ASSERT_H */
diff --git a/include/kunit/clk.h b/include/kunit/clk.h
new file mode 100644
index 000000000000..f226044cc78d
--- /dev/null
+++ b/include/kunit/clk.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CLK_KUNIT_H
+#define _CLK_KUNIT_H
+
+struct clk;
+struct clk_hw;
+struct device;
+struct device_node;
+struct of_phandle_args;
+struct kunit;
+
+struct clk *
+clk_get_kunit(struct kunit *test, struct device *dev, const char *con_id);
+struct clk *
+of_clk_get_kunit(struct kunit *test, struct device_node *np, int index);
+
+struct clk *
+clk_hw_get_clk_kunit(struct kunit *test, struct clk_hw *hw, const char *con_id);
+struct clk *
+clk_hw_get_clk_prepared_enabled_kunit(struct kunit *test, struct clk_hw *hw,
+ const char *con_id);
+
+int clk_prepare_enable_kunit(struct kunit *test, struct clk *clk);
+
+int clk_hw_register_kunit(struct kunit *test, struct device *dev, struct clk_hw *hw);
+int of_clk_hw_register_kunit(struct kunit *test, struct device_node *node,
+ struct clk_hw *hw);
+
+int of_clk_add_hw_provider_kunit(struct kunit *test, struct device_node *np,
+ struct clk_hw *(*get)(struct of_phandle_args *clkspec, void *data),
+ void *data);
+
+#endif
diff --git a/include/kunit/of.h b/include/kunit/of.h
new file mode 100644
index 000000000000..75a760a4e2a5
--- /dev/null
+++ b/include/kunit/of.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _KUNIT_OF_H
+#define _KUNIT_OF_H
+
+#include <kunit/test.h>
+
+struct device_node;
+
+#ifdef CONFIG_OF
+
+void of_node_put_kunit(struct kunit *test, struct device_node *node);
+
+#else
+
+static inline
+void of_node_put_kunit(struct kunit *test, struct device_node *node)
+{
+ kunit_skip(test, "requires CONFIG_OF");
+}
+
+#endif /* !CONFIG_OF */
+
+#if defined(CONFIG_OF) && defined(CONFIG_OF_OVERLAY) && defined(CONFIG_OF_EARLY_FLATTREE)
+
+int of_overlay_fdt_apply_kunit(struct kunit *test, void *overlay_fdt,
+ u32 overlay_fdt_size, int *ovcs_id);
+#else
+
+static inline int
+of_overlay_fdt_apply_kunit(struct kunit *test, void *overlay_fdt,
+ u32 overlay_fdt_size, int *ovcs_id)
+{
+ kunit_skip(test, "requires CONFIG_OF and CONFIG_OF_OVERLAY and CONFIG_OF_EARLY_FLATTREE for root node");
+ return -EINVAL;
+}
+
+#endif
+
+/**
+ * __of_overlay_apply_kunit() - Test managed of_overlay_fdt_apply() variant
+ * @test: test context
+ * @overlay_begin: start address of overlay to apply
+ * @overlay_end: end address of overlay to apply
+ *
+ * This is mostly internal API. See of_overlay_apply_kunit() for the wrapper
+ * that makes this easier to use.
+ *
+ * Similar to of_overlay_fdt_apply(), except the overlay is managed by the test
+ * case and is automatically removed with of_overlay_remove() after the test
+ * case concludes.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static inline int __of_overlay_apply_kunit(struct kunit *test,
+ u8 *overlay_begin,
+ const u8 *overlay_end)
+{
+ int unused;
+
+ return of_overlay_fdt_apply_kunit(test, overlay_begin,
+ overlay_end - overlay_begin,
+ &unused);
+}
+
+#define of_overlay_begin(overlay_name) __dtbo_##overlay_name##_begin
+#define of_overlay_end(overlay_name) __dtbo_##overlay_name##_end
+
+#define OF_OVERLAY_DECLARE(overlay_name) \
+ extern uint8_t of_overlay_begin(overlay_name)[]; \
+ extern uint8_t of_overlay_end(overlay_name)[] \
+
+/**
+ * of_overlay_apply_kunit() - Test managed of_overlay_fdt_apply() for built-in overlays
+ * @test: test context
+ * @overlay_name: name of overlay to apply
+ *
+ * This macro is used to apply a device tree overlay built with the
+ * cmd_dt_S_dtbo rule in scripts/Makefile.lib that has been compiled into the
+ * kernel image or KUnit test module. The overlay is automatically removed when
+ * the test is finished.
+ *
+ * Unit tests that need device tree nodes should compile an overlay file with
+ * @overlay_name\.dtbo.o in their Makefile along with their unit test and then
+ * load the overlay during their test. The @overlay_name matches the filename
+ * of the overlay without the dtbo filename extension. If CONFIG_OF_OVERLAY is
+ * not enabled, the @test will be skipped.
+ *
+ * In the Makefile
+ *
+ * .. code-block:: none
+ *
+ * obj-$(CONFIG_OF_OVERLAY_KUNIT_TEST) += overlay_test.o kunit_overlay_test.dtbo.o
+ *
+ * In the test
+ *
+ * .. code-block:: c
+ *
+ * static void of_overlay_kunit_of_overlay_apply(struct kunit *test)
+ * {
+ * struct device_node *np;
+ *
+ * KUNIT_ASSERT_EQ(test, 0,
+ * of_overlay_apply_kunit(test, kunit_overlay_test));
+ *
+ * np = of_find_node_by_name(NULL, "test-kunit");
+ * KUNIT_EXPECT_NOT_ERR_OR_NULL(test, np);
+ * of_node_put(np);
+ * }
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+#define of_overlay_apply_kunit(test, overlay_name) \
+({ \
+ OF_OVERLAY_DECLARE(overlay_name); \
+ \
+ __of_overlay_apply_kunit((test), \
+ of_overlay_begin(overlay_name), \
+ of_overlay_end(overlay_name)); \
+})
+
+#endif
diff --git a/include/kunit/platform_device.h b/include/kunit/platform_device.h
new file mode 100644
index 000000000000..f8236a8536f7
--- /dev/null
+++ b/include/kunit/platform_device.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _KUNIT_PLATFORM_DRIVER_H
+#define _KUNIT_PLATFORM_DRIVER_H
+
+struct completion;
+struct kunit;
+struct platform_device;
+struct platform_driver;
+
+struct platform_device *
+kunit_platform_device_alloc(struct kunit *test, const char *name, int id);
+int kunit_platform_device_add(struct kunit *test, struct platform_device *pdev);
+
+int kunit_platform_device_prepare_wait_for_probe(struct kunit *test,
+ struct platform_device *pdev,
+ struct completion *x);
+
+int kunit_platform_driver_register(struct kunit *test,
+ struct platform_driver *drv);
+
+#endif
diff --git a/include/kunit/run-in-irq-context.h b/include/kunit/run-in-irq-context.h
new file mode 100644
index 000000000000..108e96433ea4
--- /dev/null
+++ b/include/kunit/run-in-irq-context.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper function for testing code in interrupt contexts
+ *
+ * Copyright 2025 Google LLC
+ */
+#ifndef _KUNIT_RUN_IN_IRQ_CONTEXT_H
+#define _KUNIT_RUN_IN_IRQ_CONTEXT_H
+
+#include <kunit/test.h>
+#include <linux/timekeeping.h>
+#include <linux/hrtimer.h>
+#include <linux/workqueue.h>
+
+#define KUNIT_IRQ_TEST_HRTIMER_INTERVAL us_to_ktime(5)
+
+struct kunit_irq_test_state {
+ bool (*func)(void *test_specific_state);
+ void *test_specific_state;
+ bool task_func_reported_failure;
+ bool hardirq_func_reported_failure;
+ bool softirq_func_reported_failure;
+ unsigned long hardirq_func_calls;
+ unsigned long softirq_func_calls;
+ struct hrtimer timer;
+ struct work_struct bh_work;
+};
+
+static enum hrtimer_restart kunit_irq_test_timer_func(struct hrtimer *timer)
+{
+ struct kunit_irq_test_state *state =
+ container_of(timer, typeof(*state), timer);
+
+ WARN_ON_ONCE(!in_hardirq());
+ state->hardirq_func_calls++;
+
+ if (!state->func(state->test_specific_state))
+ state->hardirq_func_reported_failure = true;
+
+ hrtimer_forward_now(&state->timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL);
+ queue_work(system_bh_wq, &state->bh_work);
+ return HRTIMER_RESTART;
+}
+
+static void kunit_irq_test_bh_work_func(struct work_struct *work)
+{
+ struct kunit_irq_test_state *state =
+ container_of(work, typeof(*state), bh_work);
+
+ WARN_ON_ONCE(!in_serving_softirq());
+ state->softirq_func_calls++;
+
+ if (!state->func(state->test_specific_state))
+ state->softirq_func_reported_failure = true;
+}
+
+/*
+ * Helper function which repeatedly runs the given @func in task, softirq, and
+ * hardirq context concurrently, and reports a failure to KUnit if any
+ * invocation of @func in any context returns false. @func is passed
+ * @test_specific_state as its argument. At most 3 invocations of @func will
+ * run concurrently: one in each of task, softirq, and hardirq context.
+ *
+ * The main purpose of this interrupt context testing is to validate fallback
+ * code paths that run in contexts where the normal code path cannot be used,
+ * typically due to the FPU or vector registers already being in-use in kernel
+ * mode. These code paths aren't covered when the test code is executed only by
+ * the KUnit test runner thread in task context. The reason for the concurrency
+ * is because merely using hardirq context is not sufficient to reach a fallback
+ * code path on some architectures; the hardirq actually has to occur while the
+ * FPU or vector unit was already in-use in kernel mode.
+ *
+ * Another purpose of this testing is to detect issues with the architecture's
+ * irq_fpu_usable() and kernel_fpu_begin/end() or equivalent functions,
+ * especially in softirq context when the softirq may have interrupted a task
+ * already using kernel-mode FPU or vector (if the arch didn't prevent that).
+ * Crypto functions are often executed in softirqs, so this is important.
+ */
+static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *),
+ int max_iterations,
+ void *test_specific_state)
+{
+ struct kunit_irq_test_state state = {
+ .func = func,
+ .test_specific_state = test_specific_state,
+ };
+ unsigned long end_jiffies;
+
+ /*
+ * Set up a hrtimer (the way we access hardirq context) and a work
+ * struct for the BH workqueue (the way we access softirq context).
+ */
+ hrtimer_setup_on_stack(&state.timer, kunit_irq_test_timer_func,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
+ INIT_WORK_ONSTACK(&state.bh_work, kunit_irq_test_bh_work_func);
+
+ /* Run for up to max_iterations or 1 second, whichever comes first. */
+ end_jiffies = jiffies + HZ;
+ hrtimer_start(&state.timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL,
+ HRTIMER_MODE_REL_HARD);
+ for (int i = 0; i < max_iterations && !time_after(jiffies, end_jiffies);
+ i++) {
+ if (!func(test_specific_state))
+ state.task_func_reported_failure = true;
+ }
+
+ /* Cancel the timer and work. */
+ hrtimer_cancel(&state.timer);
+ flush_work(&state.bh_work);
+
+ /* Sanity check: the timer and BH functions should have been run. */
+ KUNIT_EXPECT_GT_MSG(test, state.hardirq_func_calls, 0,
+ "Timer function was not called");
+ KUNIT_EXPECT_GT_MSG(test, state.softirq_func_calls, 0,
+ "BH work function was not called");
+
+ /* Check for incorrect hash values reported from any context. */
+ KUNIT_EXPECT_FALSE_MSG(
+ test, state.task_func_reported_failure,
+ "Incorrect hash values reported from task context");
+ KUNIT_EXPECT_FALSE_MSG(
+ test, state.hardirq_func_reported_failure,
+ "Incorrect hash values reported from hardirq context");
+ KUNIT_EXPECT_FALSE_MSG(
+ test, state.softirq_func_reported_failure,
+ "Incorrect hash values reported from softirq context");
+}
+
+#endif /* _KUNIT_RUN_IN_IRQ_CONTEXT_H */
diff --git a/include/kunit/skbuff.h b/include/kunit/skbuff.h
index 44d12370939a..07784694357c 100644
--- a/include/kunit/skbuff.h
+++ b/include/kunit/skbuff.h
@@ -20,8 +20,9 @@ static void kunit_action_kfree_skb(void *p)
* kunit_zalloc_skb() - Allocate and initialize a resource managed skb.
* @test: The test case to which the skb belongs
* @len: size to allocate
+ * @gfp: allocation flags
*
- * Allocate a new struct sk_buff with GFP_KERNEL, zero fill the give length
+ * Allocate a new struct sk_buff with gfp flags, zero fill the given length
* and add it as a resource to the kunit test for automatic cleanup.
*
* Returns: newly allocated SKB, or %NULL on error
@@ -29,7 +30,7 @@ static void kunit_action_kfree_skb(void *p)
static inline struct sk_buff *kunit_zalloc_skb(struct kunit *test, int len,
gfp_t gfp)
{
- struct sk_buff *res = alloc_skb(len, GFP_KERNEL);
+ struct sk_buff *res = alloc_skb(len, gfp);
if (!res || skb_pad(res, len))
return NULL;
diff --git a/include/kunit/test.h b/include/kunit/test.h
index e32b4cb7afa2..5ec5182b5e57 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <asm/rwonce.h>
+#include <asm/sections.h>
/* Static key: true if any KUnit tests are currently running */
DECLARE_STATIC_KEY_FALSE(kunit_running);
@@ -66,7 +67,7 @@ enum kunit_status {
/*
* Speed Attribute is stored as an enum and separated into categories of
- * speed: very_slowm, slow, and normal. These speeds are relative to
+ * speed: very_slow, slow, and normal. These speeds are relative to
* other KUnit tests.
*
* Note: unset speed attribute acts as default of KUNIT_SPEED_NORMAL.
@@ -91,6 +92,8 @@ struct kunit_attributes {
* @name: the name of the test case.
* @generate_params: the generator function for parameterized tests.
* @attr: the attributes associated with the test
+ * @param_init: The init function to run before a parameterized test.
+ * @param_exit: The exit function to run after a parameterized test.
*
* A test case is a function with the signature,
* ``void (*)(struct kunit *)``
@@ -125,8 +128,11 @@ struct kunit_attributes {
struct kunit_case {
void (*run_case)(struct kunit *test);
const char *name;
- const void* (*generate_params)(const void *prev, char *desc);
+ const void* (*generate_params)(struct kunit *test,
+ const void *prev, char *desc);
struct kunit_attributes attr;
+ int (*param_init)(struct kunit *test);
+ void (*param_exit)(struct kunit *test);
/* private: internal use only. */
enum kunit_status status;
@@ -218,6 +224,31 @@ static inline char *kunit_status_to_ok_not_ok(enum kunit_status status)
.attr = attributes, .module_name = KBUILD_MODNAME}
/**
+ * KUNIT_CASE_PARAM_WITH_INIT - Define a parameterized KUnit test case with custom
+ * param_init() and param_exit() functions.
+ * @test_name: The function implementing the test case.
+ * @gen_params: The function to generate parameters for the test case.
+ * @init: A reference to the param_init() function to run before a parameterized test.
+ * @exit: A reference to the param_exit() function to run after a parameterized test.
+ *
+ * Provides the option to register param_init() and param_exit() functions.
+ * param_init/exit will be passed the parameterized test context and run once
+ * before and once after the parameterized test. The init function can be used
+ * to add resources to share between parameter runs, pass parameter arrays,
+ * and any other setup logic. The exit function can be used to clean up resources
+ * that were not managed by the parameterized test, and any other teardown logic.
+ *
+ * Note: If you are registering a parameter array in param_init() with
+ * kunit_register_param_array() then you need to pass kunit_array_gen_params()
+ * to this as the generator function.
+ */
+#define KUNIT_CASE_PARAM_WITH_INIT(test_name, gen_params, init, exit) \
+ { .run_case = test_name, .name = #test_name, \
+ .generate_params = gen_params, \
+ .param_init = init, .param_exit = exit, \
+ .module_name = KBUILD_MODNAME}
+
+/**
* struct kunit_suite - describes a related collection of &struct kunit_case
*
* @name: the name of the test. Purely informational.
@@ -262,19 +293,39 @@ struct kunit_suite_set {
struct kunit_suite * const *end;
};
+/* Stores the pointer to the parameter array and its metadata. */
+struct kunit_params {
+ /*
+ * Reference to the parameter array for a parameterized test. This
+ * is NULL if a parameter array wasn't directly passed to the
+ * parameterized test context struct kunit via kunit_register_params_array().
+ */
+ const void *params;
+ /* Reference to a function that gets the description of a parameter. */
+ void (*get_description)(struct kunit *test, const void *param, char *desc);
+ size_t num_params;
+ size_t elem_size;
+};
+
/**
* struct kunit - represents a running instance of a test.
*
* @priv: for user to store arbitrary data. Commonly used to pass data
* created in the init function (see &struct kunit_suite).
+ * @parent: reference to the parent context of type struct kunit that can
+ * be used for storing shared resources.
+ * @params_array: for storing the parameter array.
*
* Used to store information about the current context under which the test
* is running. Most of this data is private and should only be accessed
- * indirectly via public functions; the one exception is @priv which can be
- * used by the test writer to store arbitrary data.
+ * indirectly via public functions; the exceptions are @priv, @parent and
+ * @params_array which can be used by the test writer to store arbitrary data,
+ * access the parent context, and to store the parameter array, respectively.
*/
struct kunit {
void *priv;
+ struct kunit *parent;
+ struct kunit_params params_array;
/* private: internal use only. */
const char *name; /* Read only after initialization! */
@@ -311,6 +362,7 @@ static inline void kunit_set_failure(struct kunit *test)
}
bool kunit_enabled(void);
+bool kunit_autorun(void);
const char *kunit_action(void);
const char *kunit_filter_glob(void);
char *kunit_filter(void);
@@ -333,7 +385,8 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set,
int *err);
void kunit_free_suite_set(struct kunit_suite_set suite_set);
-int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites);
+int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites,
+ bool run_tests);
void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites);
@@ -343,6 +396,8 @@ void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr)
struct kunit_suite_set kunit_merge_suite_sets(struct kunit_suite_set init_suite_set,
struct kunit_suite_set suite_set);
+const void *kunit_array_gen_params(struct kunit *test, const void *prev, char *desc);
+
#if IS_BUILTIN(CONFIG_KUNIT)
int kunit_run_all_tests(void);
#else
@@ -480,14 +535,91 @@ static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp
return kunit_kmalloc_array(test, n, size, gfp | __GFP_ZERO);
}
+
+/**
+ * kunit_kfree_const() - conditionally free test managed memory
+ * @test: The test context object.
+ * @x: pointer to the memory
+ *
+ * Calls kunit_kfree() only if @x is not in .rodata section.
+ * See kunit_kstrdup_const() for more information.
+ */
+void kunit_kfree_const(struct kunit *test, const void *x);
+
+/**
+ * kunit_kstrdup() - Duplicates a string into a test managed allocation.
+ *
+ * @test: The test context object.
+ * @str: The NULL-terminated string to duplicate.
+ * @gfp: flags passed to underlying kmalloc().
+ *
+ * See kstrdup() and kunit_kmalloc_array() for more information.
+ */
+static inline char *kunit_kstrdup(struct kunit *test, const char *str, gfp_t gfp)
+{
+ size_t len;
+ char *buf;
+
+ if (!str)
+ return NULL;
+
+ len = strlen(str) + 1;
+ buf = kunit_kmalloc(test, len, gfp);
+ if (buf)
+ memcpy(buf, str, len);
+ return buf;
+}
+
+/**
+ * kunit_kstrdup_const() - Conditionally duplicates a string into a test managed allocation.
+ *
+ * @test: The test context object.
+ * @str: The NULL-terminated string to duplicate.
+ * @gfp: flags passed to underlying kmalloc().
+ *
+ * Calls kunit_kstrdup() only if @str is not in the rodata section. Must be freed with
+ * kunit_kfree_const() -- not kunit_kfree().
+ * See kstrdup_const() and kunit_kmalloc_array() for more information.
+ */
+const char *kunit_kstrdup_const(struct kunit *test, const char *str, gfp_t gfp);
+
+/**
+ * kunit_attach_mm() - Create and attach a new mm if it doesn't already exist.
+ *
+ * Allocates a &struct mm_struct and attaches it to @current. In most cases, call
+ * kunit_vm_mmap() without calling kunit_attach_mm() directly. Only necessary when
+ * code under test accesses the mm before executing the mmap (e.g., to perform
+ * additional initialization beforehand).
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int kunit_attach_mm(void);
+
+/**
+ * kunit_vm_mmap() - Allocate KUnit-tracked vm_mmap() area
+ * @test: The test context object.
+ * @file: struct file pointer to map from, if any
+ * @addr: desired address, if any
+ * @len: how many bytes to allocate
+ * @prot: mmap PROT_* bits
+ * @flag: mmap flags
+ * @offset: offset into @file to start mapping from.
+ *
+ * See vm_mmap() for more information.
+ */
+unsigned long kunit_vm_mmap(struct kunit *test, struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flag,
+ unsigned long offset);
+
void kunit_cleanup(struct kunit *test);
void __printf(2, 3) kunit_log_append(struct string_stream *log, const char *fmt, ...);
/**
- * kunit_mark_skipped() - Marks @test_or_suite as skipped
+ * kunit_mark_skipped() - Marks @test as skipped
*
- * @test_or_suite: The test context object.
+ * @test: The test context object.
* @fmt: A printk() style format string.
*
* Marks the test as skipped. @fmt is given output as the test status
@@ -495,18 +627,18 @@ void __printf(2, 3) kunit_log_append(struct string_stream *log, const char *fmt,
*
* Test execution continues after kunit_mark_skipped() is called.
*/
-#define kunit_mark_skipped(test_or_suite, fmt, ...) \
+#define kunit_mark_skipped(test, fmt, ...) \
do { \
- WRITE_ONCE((test_or_suite)->status, KUNIT_SKIPPED); \
- scnprintf((test_or_suite)->status_comment, \
+ WRITE_ONCE((test)->status, KUNIT_SKIPPED); \
+ scnprintf((test)->status_comment, \
KUNIT_STATUS_COMMENT_SIZE, \
fmt, ##__VA_ARGS__); \
} while (0)
/**
- * kunit_skip() - Marks @test_or_suite as skipped
+ * kunit_skip() - Marks @test as skipped
*
- * @test_or_suite: The test context object.
+ * @test: The test context object.
* @fmt: A printk() style format string.
*
* Skips the test. @fmt is given output as the test status
@@ -514,10 +646,10 @@ void __printf(2, 3) kunit_log_append(struct string_stream *log, const char *fmt,
*
* Test execution is halted after kunit_skip() is called.
*/
-#define kunit_skip(test_or_suite, fmt, ...) \
+#define kunit_skip(test, fmt, ...) \
do { \
- kunit_mark_skipped((test_or_suite), fmt, ##__VA_ARGS__);\
- kunit_try_catch_throw(&((test_or_suite)->try_catch)); \
+ kunit_mark_skipped((test), fmt, ##__VA_ARGS__); \
+ kunit_try_catch_throw(&((test)->try_catch)); \
} while (0)
/*
@@ -1211,7 +1343,18 @@ do { \
fmt, \
##__VA_ARGS__)
-#define KUNIT_ASSERT_FAILURE(test, fmt, ...) \
+/**
+ * KUNIT_FAIL_AND_ABORT() - Always causes a test to fail and abort when evaluated.
+ * @test: The test context object.
+ * @fmt: an informational message to be printed when the assertion is made.
+ * @...: string format arguments.
+ *
+ * The opposite of KUNIT_SUCCEED(), it is an assertion that always fails. In
+ * other words, it always results in a failed assertion, and consequently
+ * always causes the test case to fail and abort when evaluated.
+ * See KUNIT_ASSERT_TRUE() for more information.
+ */
+#define KUNIT_FAIL_AND_ABORT(test, fmt, ...) \
KUNIT_FAIL_ASSERTION(test, KUNIT_ASSERTION, fmt, ##__VA_ARGS__)
/**
@@ -1438,12 +1581,12 @@ do { \
##__VA_ARGS__)
/**
- * KUNIT_ASSERT_STRNEQ() - Expects that strings @left and @right are not equal.
+ * KUNIT_ASSERT_STRNEQ() - An assertion that strings @left and @right are not equal.
* @test: The test context object.
* @left: an arbitrary expression that evaluates to a null terminated string.
* @right: an arbitrary expression that evaluates to a null terminated string.
*
- * Sets an expectation that the values that @left and @right evaluate to are
+ * Sets an assertion that the values that @left and @right evaluate to are
* not equal. This is semantically equivalent to
* KUNIT_ASSERT_TRUE(@test, strcmp((@left), (@right))). See KUNIT_ASSERT_TRUE()
* for more information.
@@ -1459,6 +1602,60 @@ do { \
##__VA_ARGS__)
/**
+ * KUNIT_ASSERT_MEMEQ() - Asserts that the first @size bytes of @left and @right are equal.
+ * @test: The test context object.
+ * @left: An arbitrary expression that evaluates to the specified size.
+ * @right: An arbitrary expression that evaluates to the specified size.
+ * @size: Number of bytes compared.
+ *
+ * Sets an assertion that the values that @left and @right evaluate to are
+ * equal. This is semantically equivalent to
+ * KUNIT_ASSERT_TRUE(@test, !memcmp((@left), (@right), (@size))). See
+ * KUNIT_ASSERT_TRUE() for more information.
+ *
+ * Although this assertion works for any memory block, it is not recommended
+ * for comparing more structured data, such as structs. This assertion is
+ * recommended for comparing, for example, data arrays.
+ */
+#define KUNIT_ASSERT_MEMEQ(test, left, right, size) \
+ KUNIT_ASSERT_MEMEQ_MSG(test, left, right, size, NULL)
+
+#define KUNIT_ASSERT_MEMEQ_MSG(test, left, right, size, fmt, ...) \
+ KUNIT_MEM_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, ==, right, \
+ size, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_MEMNEQ() - Asserts that the first @size bytes of @left and @right are not equal.
+ * @test: The test context object.
+ * @left: An arbitrary expression that evaluates to the specified size.
+ * @right: An arbitrary expression that evaluates to the specified size.
+ * @size: Number of bytes compared.
+ *
+ * Sets an assertion that the values that @left and @right evaluate to are
+ * not equal. This is semantically equivalent to
+ * KUNIT_ASSERT_TRUE(@test, memcmp((@left), (@right), (@size))). See
+ * KUNIT_ASSERT_TRUE() for more information.
+ *
+ * Although this assertion works for any memory block, it is not recommended
+ * for comparing more structured data, such as structs. This assertion is
+ * recommended for comparing, for example, data arrays.
+ */
+#define KUNIT_ASSERT_MEMNEQ(test, left, right, size) \
+ KUNIT_ASSERT_MEMNEQ_MSG(test, left, right, size, NULL)
+
+#define KUNIT_ASSERT_MEMNEQ_MSG(test, left, right, size, fmt, ...) \
+ KUNIT_MEM_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, !=, right, \
+ size, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
* KUNIT_ASSERT_NULL() - Asserts that pointers @ptr is null.
* @test: The test context object.
* @ptr: an arbitrary pointer.
@@ -1529,9 +1726,12 @@ do { \
* Define function @name_gen_params which uses @array to generate parameters.
*/
#define KUNIT_ARRAY_PARAM(name, array, get_desc) \
- static const void *name##_gen_params(const void *prev, char *desc) \
+ static const void *name##_gen_params(struct kunit *test, \
+ const void *prev, char *desc) \
{ \
typeof((array)[0]) *__next = prev ? ((typeof(__next)) prev) + 1 : (array); \
+ if (!prev) \
+ kunit_register_params_array(test, array, ARRAY_SIZE(array), NULL); \
if (__next - (array) < ARRAY_SIZE((array))) { \
void (*__get_desc)(typeof(__next), char *) = get_desc; \
if (__get_desc) \
@@ -1550,9 +1750,12 @@ do { \
* Define function @name_gen_params which uses @array to generate parameters.
*/
#define KUNIT_ARRAY_PARAM_DESC(name, array, desc_member) \
- static const void *name##_gen_params(const void *prev, char *desc) \
+ static const void *name##_gen_params(struct kunit *test, \
+ const void *prev, char *desc) \
{ \
typeof((array)[0]) *__next = prev ? ((typeof(__next)) prev) + 1 : (array); \
+ if (!prev) \
+ kunit_register_params_array(test, array, ARRAY_SIZE(array), NULL); \
if (__next - (array) < ARRAY_SIZE((array))) { \
strscpy(desc, __next->desc_member, KUNIT_PARAM_DESC_SIZE); \
return __next; \
@@ -1560,6 +1763,33 @@ do { \
return NULL; \
}
+/**
+ * kunit_register_params_array() - Register parameter array for a KUnit test.
+ * @test: The KUnit test structure to which parameters will be added.
+ * @array: An array of test parameters.
+ * @param_count: Number of parameters.
+ * @get_desc: Function that generates a string description for a given parameter
+ * element.
+ *
+ * This macro initializes the @test's parameter array data, storing information
+ * including the parameter array, its count, the element size, and the parameter
+ * description function within `test->params_array`.
+ *
+ * Note: If using this macro in param_init(), kunit_array_gen_params()
+ * will then need to be manually provided as the parameter generator function to
+ * KUNIT_CASE_PARAM_WITH_INIT(). kunit_array_gen_params() is a KUnit
+ * function that uses the registered array to generate parameters
+ */
+#define kunit_register_params_array(test, array, param_count, get_desc) \
+ do { \
+ struct kunit *_test = (test); \
+ const typeof((array)[0]) * _params_ptr = &(array)[0]; \
+ _test->params_array.params = _params_ptr; \
+ _test->params_array.num_params = (param_count); \
+ _test->params_array.elem_size = sizeof(*_params_ptr); \
+ _test->params_array.get_description = (get_desc); \
+ } while (0)
+
// TODO(dlatypov@google.com): consider eventually migrating users to explicitly
// include resource.h themselves if they need it.
#include <kunit/resource.h>
diff --git a/include/kunit/try-catch.h b/include/kunit/try-catch.h
index 7c966a1adbd3..d4e1a5b98ed6 100644
--- a/include/kunit/try-catch.h
+++ b/include/kunit/try-catch.h
@@ -47,6 +47,7 @@ struct kunit_try_catch {
int try_result;
kunit_try_catch_func_t try;
kunit_try_catch_func_t catch;
+ unsigned long timeout;
void *context;
};
diff --git a/include/kunit/visibility.h b/include/kunit/visibility.h
index 0dfe35feeec6..7c34c8ffcf3b 100644
--- a/include/kunit/visibility.h
+++ b/include/kunit/visibility.h
@@ -20,11 +20,11 @@
/**
* EXPORT_SYMBOL_IF_KUNIT(symbol) - Exports symbol into
* EXPORTED_FOR_KUNIT_TESTING namespace only if CONFIG_KUNIT is
- * enabled. Must use MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING)
+ * enabled. Must use MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING")
* in test file in order to use symbols.
+ * @symbol: the symbol identifier to export
*/
- #define EXPORT_SYMBOL_IF_KUNIT(symbol) EXPORT_SYMBOL_NS(symbol, \
- EXPORTED_FOR_KUNIT_TESTING)
+ #define EXPORT_SYMBOL_IF_KUNIT(symbol) EXPORT_SYMBOL_NS(symbol, "EXPORTED_FOR_KUNIT_TESTING")
#else
#define VISIBLE_IF_KUNIT static
#define EXPORT_SYMBOL_IF_KUNIT(symbol)
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index c819c5d16613..7310841f4512 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -51,8 +51,6 @@ struct arch_timer_vm_data {
};
struct arch_timer_context {
- struct kvm_vcpu *vcpu;
-
/* Emulated Timer (may be unused) */
struct hrtimer hrtimer;
u64 ns_frac;
@@ -71,6 +69,9 @@ struct arch_timer_context {
bool level;
} irq;
+ /* Who am I? */
+ enum kvm_arch_timers timer_id;
+
/* Duplicated state from arch_timer.c for convenience */
u32 host_timer_irq;
};
@@ -98,6 +99,7 @@ int __init kvm_timer_hyp_init(bool has_gic);
int kvm_timer_enable(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_timer_sync_nested(struct kvm_vcpu *vcpu);
void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
void kvm_timer_update_run(struct kvm_vcpu *vcpu);
@@ -105,9 +107,6 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
void kvm_timer_init_vm(struct kvm *kvm);
-u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
-int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
-
int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
@@ -126,9 +125,9 @@ void kvm_timer_init_vhe(void);
#define vcpu_hvtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HVTIMER])
#define vcpu_hptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HPTIMER])
-#define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers)
-
-#define timer_vm_data(ctx) (&(ctx)->vcpu->kvm->arch.timer_data)
+#define arch_timer_ctx_index(ctx) ((ctx)->timer_id)
+#define timer_context_to_vcpu(ctx) container_of((ctx), struct kvm_vcpu, arch.timer_cpu.timers[(ctx)->timer_id])
+#define timer_vm_data(ctx) (&(timer_context_to_vcpu(ctx)->kvm->arch.timer_data))
#define timer_irq(ctx) (timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)])
u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
@@ -147,9 +146,44 @@ u64 timer_get_cval(struct arch_timer_context *ctxt);
void kvm_timer_cpu_up(void);
void kvm_timer_cpu_down(void);
+/* CNTKCTL_EL1 valid bits as of DDI0487J.a */
+#define CNTKCTL_VALID_BITS (BIT(17) | GENMASK_ULL(9, 0))
+
+DECLARE_STATIC_KEY_FALSE(broken_cntvoff_key);
+
+static inline bool has_broken_cntvoff(void)
+{
+ return static_branch_unlikely(&broken_cntvoff_key);
+}
+
static inline bool has_cntpoff(void)
{
return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
}
+static inline u64 timer_get_offset(struct arch_timer_context *ctxt)
+{
+ u64 offset = 0;
+
+ if (!ctxt)
+ return 0;
+
+ if (ctxt->offset.vm_offset)
+ offset += *ctxt->offset.vm_offset;
+ if (ctxt->offset.vcpu_offset)
+ offset += *ctxt->offset.vcpu_offset;
+
+ return offset;
+}
+
+static inline void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
+{
+ if (!ctxt->offset.vm_offset) {
+ WARN(offset, "timer %d\n", arch_timer_ctx_index(ctxt));
+ return;
+ }
+
+ WRITE_ONCE(*ctxt->offset.vm_offset, offset);
+}
+
#endif
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 35d4ca4f6122..96754b51b411 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -10,7 +10,7 @@
#include <linux/perf_event.h>
#include <linux/perf/arm_pmuv3.h>
-#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
+#define KVM_ARMV8_PMU_MAX_COUNTERS 32
#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
struct kvm_pmc {
@@ -19,14 +19,14 @@ struct kvm_pmc {
};
struct kvm_pmu_events {
- u32 events_host;
- u32 events_guest;
+ u64 events_host;
+ u64 events_guest;
};
struct kvm_pmu {
struct irq_work overflow_work;
struct kvm_pmu_events events;
- struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
+ struct kvm_pmc pmc[KVM_ARMV8_PMU_MAX_COUNTERS];
int irq_num;
bool created;
bool irq_level;
@@ -37,23 +37,17 @@ struct arm_pmu_entry {
struct arm_pmu *arm_pmu;
};
-DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
-
-static __always_inline bool kvm_arm_support_pmu_v3(void)
-{
- return static_branch_likely(&kvm_arm_pmu_available);
-}
-
+bool kvm_supports_guest_pmuv3(void);
#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
-u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
+void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
+u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu);
+u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu);
u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
-void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
-void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
-void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
@@ -86,7 +80,7 @@ void kvm_vcpu_pmu_resync_el0(void);
*/
#define kvm_pmu_update_vcpu_events(vcpu) \
do { \
- if (!has_vhe() && kvm_arm_support_pmu_v3()) \
+ if (!has_vhe() && system_supports_pmuv3()) \
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
} while (0)
@@ -96,11 +90,13 @@ int kvm_arm_set_default_pmu(struct kvm *kvm);
u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
+bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx);
+void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu);
#else
struct kvm_pmu {
};
-static inline bool kvm_arm_support_pmu_v3(void)
+static inline bool kvm_supports_guest_pmuv3(void)
{
return false;
}
@@ -113,15 +109,19 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
}
static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
u64 select_idx, u64 val) {}
-static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
+static inline void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu,
+ u64 select_idx, u64 val) {}
+static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
{
return 0;
}
static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
-static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
-static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
-static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
@@ -187,6 +187,13 @@ static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
return 0;
}
+static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
+{
+ return false;
+}
+
+static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {}
+
#endif
#endif
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
index e8fb624013d1..cbaec804eb83 100644
--- a/include/kvm/arm_psci.h
+++ b/include/kvm/arm_psci.h
@@ -14,8 +14,10 @@
#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2)
#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0)
#define KVM_ARM_PSCI_1_1 PSCI_VERSION(1, 1)
+#define KVM_ARM_PSCI_1_2 PSCI_VERSION(1, 2)
+#define KVM_ARM_PSCI_1_3 PSCI_VERSION(1, 3)
-#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_1
+#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_3
static inline int kvm_psci_version(struct kvm_vcpu *vcpu)
{
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index f5172549f9ba..b261fb3968d0 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -8,8 +8,8 @@
#include <linux/bits.h>
#include <linux/kvm.h>
#include <linux/irqreturn.h>
-#include <linux/kref.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <linux/spinlock.h>
#include <linux/static_key.h>
#include <linux/types.h>
@@ -26,7 +26,6 @@
#define VGIC_NR_SGIS 16
#define VGIC_NR_PPIS 16
#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
-#define VGIC_MAX_PRIVATE (VGIC_NR_PRIVATE_IRQS - 1)
#define VGIC_MAX_SPI 1019
#define VGIC_MAX_RESERVED 1023
#define VGIC_MIN_LPI 8192
@@ -39,6 +38,7 @@
enum vgic_type {
VGIC_V2, /* Good ol' GICv2 */
VGIC_V3, /* New fancy GICv3 */
+ VGIC_V5, /* Newer, fancier GICv5 */
};
/* same for all guests, as depending only on the _host's_ GIC model */
@@ -59,6 +59,9 @@ struct vgic_global {
/* virtual control interface mapping, HYP VA */
void __iomem *vctrl_hyp;
+ /* Physical CPU interface, kernel VA */
+ void __iomem *gicc_base;
+
/* Number of implemented list registers */
int nr_lr;
@@ -78,9 +81,12 @@ struct vgic_global {
/* Pseudo GICv3 from outer space */
bool no_hw_deactivation;
- /* GIC system register CPU interface */
+ /* GICv3 system register CPU interface */
struct static_key_false gicv3_cpuif;
+ /* GICv3 compat mode on a GICv5 host */
+ bool has_gcie_v3_compat;
+
u32 ich_vtr_el2;
};
@@ -117,6 +123,7 @@ struct irq_ops {
struct vgic_irq {
raw_spinlock_t irq_lock; /* Protects the content of the struct */
+ u32 intid; /* Guest visible INTID */
struct rcu_head rcu;
struct list_head ap_list;
@@ -131,15 +138,19 @@ struct vgic_irq {
* affinity reg (v3).
*/
- u32 intid; /* Guest visible INTID */
- bool line_level; /* Level only */
- bool pending_latch; /* The pending latch state used to calculate
+ bool pending_release:1; /* Used for LPIs only, unreferenced IRQ
+ * pending a release */
+
+ bool pending_latch:1; /* The pending latch state used to calculate
* the pending state for both level
* and edge triggered IRQs. */
- bool active; /* not used for LPIs */
- bool enabled;
- bool hw; /* Tied to HW IRQ */
- struct kref refcount; /* Used for LPIs */
+ enum vgic_irq_config config:1; /* Level or edge */
+ bool line_level:1; /* Level only */
+ bool enabled:1;
+ bool active:1;
+ bool hw:1; /* Tied to HW IRQ */
+ bool on_lr:1; /* Present in a CPU LR */
+ refcount_t refcount; /* Used for LPIs */
u32 hwintid; /* HW INTID number */
unsigned int host_irq; /* linux irq corresponding to hwintid */
union {
@@ -150,7 +161,6 @@ struct vgic_irq {
u8 active_source; /* GICv2 SGIs only */
u8 priority;
u8 group; /* 0 == group 0, 1 == group 1 */
- enum vgic_irq_config config; /* Level or edge */
struct irq_ops *ops;
@@ -250,6 +260,12 @@ struct vgic_dist {
int nr_spis;
+ /* The GIC maintenance IRQ for nested hypervisors. */
+ u32 mi_intid;
+
+ /* Track the number of in-flight active SPIs */
+ atomic_t active_spis;
+
/* base addresses in guest physical address space: */
gpa_t vgic_dist_base; /* distributor */
union {
@@ -262,12 +278,16 @@ struct vgic_dist {
/* distributor enabled */
bool enabled;
+ /* Supports SGIs without active state */
+ bool nassgicap;
+
/* Wants SGIs without active state */
bool nassgireq;
struct vgic_irq *spis;
struct vgic_io_device dist_iodev;
+ struct vgic_io_device cpuif_iodev;
bool has_its;
bool table_write_in_progress;
@@ -366,10 +386,12 @@ struct vgic_cpu {
extern struct static_key_false vgic_v2_cpuif_trap;
extern struct static_key_false vgic_v3_cpuif_trap;
+extern struct static_key_false vgic_v3_has_v2_compat;
int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr);
void kvm_vgic_early_init(struct kvm *kvm);
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
+int kvm_vgic_vcpu_nv_init(struct kvm_vcpu *vcpu);
int kvm_vgic_create(struct kvm *kvm, u32 type);
void kvm_vgic_destroy(struct kvm *kvm);
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
@@ -390,9 +412,12 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
void kvm_vgic_load(struct kvm_vcpu *vcpu);
void kvm_vgic_put(struct kvm_vcpu *vcpu);
+u16 vgic_v3_get_eisr(struct kvm_vcpu *vcpu);
+u16 vgic_v3_get_elrsr(struct kvm_vcpu *vcpu);
+u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu);
+
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) ((k)->arch.vgic.initialized)
-#define vgic_ready(k) ((k)->arch.vgic.ready)
#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))
@@ -400,6 +425,7 @@ bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);
+void kvm_vgic_process_async_update(struct kvm_vcpu *vcpu);
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1);
@@ -427,13 +453,14 @@ struct kvm_kernel_irq_routing_entry;
int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
struct kvm_kernel_irq_routing_entry *irq_entry);
-int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
- struct kvm_kernel_irq_routing_entry *irq_entry);
+void kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq);
int vgic_v4_load(struct kvm_vcpu *vcpu);
void vgic_v4_commit(struct kvm_vcpu *vcpu);
int vgic_v4_put(struct kvm_vcpu *vcpu);
+bool vgic_state_is_nested(struct kvm_vcpu *vcpu);
+
/* CPU HP callbacks */
void kvm_vgic_cpu_up(void);
void kvm_vgic_cpu_down(void);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 28c3fb2bef0d..fbf0c3a65f59 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -8,6 +8,7 @@
#ifndef _LINUX_ACPI_H
#define _LINUX_ACPI_H
+#include <linux/cleanup.h>
#include <linux/errno.h>
#include <linux/ioport.h> /* for struct resource */
#include <linux/resource_ext.h>
@@ -24,6 +25,7 @@ struct irq_domain_ops;
#define _LINUX
#endif
#include <acpi/acpi.h>
+#include <acpi/acpi_numa.h>
#ifdef CONFIG_ACPI
@@ -35,12 +37,11 @@ struct irq_domain_ops;
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_numa.h>
#include <acpi/acpi_io.h>
#include <asm/acpi.h>
#ifdef CONFIG_ACPI_TABLE_LIB
-#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
+#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, "ACPI")
#define __init_or_acpilib
#define __initdata_or_acpilib
#else
@@ -107,6 +108,7 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_PLATFORM,
ACPI_IRQ_MODEL_GIC,
ACPI_IRQ_MODEL_LPIC,
+ ACPI_IRQ_MODEL_RINTC,
ACPI_IRQ_MODEL_COUNT
};
@@ -220,6 +222,17 @@ void acpi_reserve_initial_tables (void);
void acpi_table_init_complete (void);
int acpi_table_init (void);
+static inline struct acpi_table_header *acpi_get_table_pointer(char *signature, u32 instance)
+{
+ struct acpi_table_header *table;
+ int status = acpi_get_table(signature, instance, &table);
+
+ if (ACPI_FAILURE(status))
+ return ERR_PTR(-ENOENT);
+ return table;
+}
+DEFINE_FREE(acpi_put_table, struct acpi_table_header *, if (!IS_ERR_OR_NULL(_T)) acpi_put_table(_T))
+
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
int __init_or_acpilib acpi_table_parse_entries(char *id,
unsigned long table_size, int entry_id,
@@ -237,11 +250,6 @@ acpi_table_parse_cedt(enum acpi_cedt_type id,
int acpi_parse_mcfg (struct acpi_table_header *header);
void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
-static inline bool acpi_gicc_is_usable(struct acpi_madt_generic_interrupt *gicc)
-{
- return gicc->flags & ACPI_MADT_ENABLED;
-}
-
#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
#else
@@ -264,6 +272,12 @@ static inline void
acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { }
#endif
+#ifdef CONFIG_RISCV
+void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa);
+#else
+static inline void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa) { }
+#endif
+
#ifndef PHYS_CPUID_INVALID
typedef u32 phys_cpuid_t;
#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1)
@@ -279,6 +293,9 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
return phys_id == PHYS_CPUID_INVALID;
}
+
+int __init acpi_get_madt_revision(void);
+
/* Validate the processor object's proc_id */
bool acpi_duplicate_processor_id(int proc_id);
/* Processor _CTS control */
@@ -304,6 +321,8 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
int acpi_unmap_cpu(int cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
+acpi_handle acpi_get_processor_handle(int cpu);
+
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
#endif
@@ -323,14 +342,16 @@ static inline bool acpi_sci_irq_valid(void)
}
extern int sbf_port;
-extern unsigned long acpi_realmode_flags;
int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity);
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
+typedef struct fwnode_handle *(*acpi_gsi_domain_disp_fn)(u32);
+
void acpi_set_irq_model(enum acpi_irq_model_id model,
- struct fwnode_handle *(*)(u32));
+ acpi_gsi_domain_disp_fn fn);
+acpi_gsi_domain_disp_fn acpi_get_gsi_dispatcher(void);
void acpi_set_gsi_to_irq_fallback(u32 (*)(u32));
struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
@@ -356,6 +377,7 @@ void acpi_unregister_gsi (u32 gsi);
struct pci_dev;
+struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin);
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
bool acpi_isa_irq_available(int irq);
@@ -380,7 +402,7 @@ extern bool acpi_is_pnp_device(struct acpi_device *);
#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
-typedef void (*wmi_notify_handler) (u32 value, void *context);
+typedef void (*wmi_notify_handler) (union acpi_object *data, void *context);
int wmi_instance_count(const char *guid);
@@ -395,7 +417,6 @@ extern acpi_status wmi_set_block(const char *guid, u8 instance,
extern acpi_status wmi_install_notify_handler(const char *guid,
wmi_notify_handler handler, void *data);
extern acpi_status wmi_remove_notify_handler(const char *guid);
-extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out);
extern bool wmi_has_guid(const char *guid);
extern char *wmi_get_acpi_device_uid(const char *guid);
@@ -576,6 +597,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000
#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00020000
#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
+#define OSC_SB_BATTERY_CHARGE_LIMITING_SUPPORT 0x00080000
#define OSC_SB_PRM_SUPPORT 0x00200000
#define OSC_SB_FFH_OPR_SUPPORT 0x00400000
@@ -745,25 +767,29 @@ int acpi_reconfig_notifier_unregister(struct notifier_block *nb);
int acpi_gtdt_init(struct acpi_table_header *table, int *platform_timer_count);
int acpi_gtdt_map_ppi(int type);
bool acpi_gtdt_c3stop(int type);
-int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count);
#endif
#ifndef ACPI_HAVE_ARCH_SET_ROOT_POINTER
-static inline void acpi_arch_set_root_pointer(u64 addr)
+static __always_inline void acpi_arch_set_root_pointer(u64 addr)
{
}
#endif
#ifndef ACPI_HAVE_ARCH_GET_ROOT_POINTER
-static inline u64 acpi_arch_get_root_pointer(void)
+static __always_inline u64 acpi_arch_get_root_pointer(void)
{
return 0;
}
#endif
+int acpi_get_local_u64_address(acpi_handle handle, u64 *addr);
int acpi_get_local_address(acpi_handle handle, u32 *addr);
const char *acpi_get_subsystem_id(acpi_handle handle);
+#ifdef CONFIG_ACPI_MRRM
+int acpi_mrrm_max_mem_region(void);
+#endif
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
@@ -777,8 +803,6 @@ const char *acpi_get_subsystem_id(acpi_handle handle);
#define acpi_dev_uid_match(adev, uid2) (adev && false)
#define acpi_dev_hid_uid_match(adev, hid2, uid2) (adev && false)
-#include <acpi/acpi_numa.h>
-
struct fwnode_handle;
static inline bool acpi_dev_found(const char *hid)
@@ -847,6 +871,11 @@ static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
return NULL;
}
+static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
+{
+ return NULL;
+}
+
static inline bool has_acpi_companion(struct device *dev)
{
return false;
@@ -1076,8 +1105,29 @@ static inline bool acpi_sleep_state_supported(u8 sleep_state)
return false;
}
+static inline acpi_handle acpi_get_processor_handle(int cpu)
+{
+ return NULL;
+}
+
+static inline int acpi_mrrm_max_mem_region(void)
+{
+ return 1;
+}
+
#endif /* !CONFIG_ACPI */
+#ifdef CONFIG_ACPI_HMAT
+int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid,
+ resource_size_t *size);
+#else
+static inline int hmat_get_extended_linear_cache_size(struct resource *backing_res,
+ int nid, resource_size_t *size)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
extern void arch_post_acpi_subsys_init(void);
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
@@ -1098,20 +1148,22 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
u32 val_a, u32 val_b);
-#if defined(CONFIG_SUSPEND) && defined(CONFIG_X86)
struct acpi_s2idle_dev_ops {
struct list_head list_node;
void (*prepare)(void);
void (*check)(void);
void (*restore)(void);
};
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_X86)
int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg);
void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg);
-int acpi_get_lps0_constraint(struct acpi_device *adev);
#else /* CONFIG_SUSPEND && CONFIG_X86 */
-static inline int acpi_get_lps0_constraint(struct device *dev)
+static inline int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg)
+{
+ return -ENODEV;
+}
+static inline void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg)
{
- return ACPI_STATE_UNKNOWN;
}
#endif /* CONFIG_SUSPEND && CONFIG_X86 */
void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
@@ -1152,8 +1204,6 @@ int acpi_subsys_suspend_noirq(struct device *dev);
int acpi_subsys_suspend(struct device *dev);
int acpi_subsys_freeze(struct device *dev);
int acpi_subsys_poweroff(struct device *dev);
-void acpi_ec_mark_gpe_for_wake(void);
-void acpi_ec_set_gpe_wake_mask(u8 action);
int acpi_subsys_restore_early(struct device *dev);
#else
static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
@@ -1164,6 +1214,12 @@ static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
static inline int acpi_subsys_poweroff(struct device *dev) { return 0; }
static inline int acpi_subsys_restore_early(struct device *dev) { return 0; }
+#endif
+
+#if defined(CONFIG_ACPI_EC) && defined(CONFIG_PM_SLEEP)
+void acpi_ec_mark_gpe_for_wake(void);
+void acpi_ec_set_gpe_wake_mask(u8 action);
+#else
static inline void acpi_ec_mark_gpe_for_wake(void) {}
static inline void acpi_ec_set_gpe_wake_mask(u8 action) {}
#endif
@@ -1299,9 +1355,6 @@ acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
void **valptr);
-struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
- struct fwnode_handle *child);
-
struct acpi_probe_entry;
typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
struct acpi_probe_entry *);
@@ -1332,6 +1385,8 @@ struct acpi_probe_entry {
kernel_ulong_t driver_data;
};
+void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr);
+
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \
valid, data, fn) \
static const struct acpi_probe_entry __acpi_probe_##name \
@@ -1399,13 +1454,6 @@ static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode,
}
static inline struct fwnode_handle *
-acpi_get_next_subnode(const struct fwnode_handle *fwnode,
- struct fwnode_handle *child)
-{
- return NULL;
-}
-
-static inline struct fwnode_handle *
acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
@@ -1451,18 +1499,25 @@ int acpi_parse_spcr(bool enable_earlycon, bool enable_console);
#else
static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console)
{
- return 0;
+ return -ENODEV;
}
#endif
#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI)
int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res);
+const struct cpumask *acpi_irq_get_affinity(acpi_handle handle,
+ unsigned int index);
#else
static inline
int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
{
return -EINVAL;
}
+static inline const struct cpumask *acpi_irq_get_affinity(acpi_handle handle,
+ unsigned int index)
+{
+ return NULL;
+}
#endif
#ifdef CONFIG_ACPI_LPIT
@@ -1489,6 +1544,9 @@ int find_acpi_cpu_topology(unsigned int cpu, int level);
int find_acpi_cpu_topology_cluster(unsigned int cpu);
int find_acpi_cpu_topology_package(unsigned int cpu);
int find_acpi_cpu_topology_hetero_id(unsigned int cpu);
+void acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus);
+int find_acpi_cache_level_from_id(u32 cache_id);
+int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus);
#else
static inline int acpi_pptt_cpu_is_thread(unsigned int cpu)
{
@@ -1510,13 +1568,20 @@ static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
{
return -EINVAL;
}
+static inline void acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id,
+ cpumask_t *cpus) { }
+static inline int find_acpi_cache_level_from_id(u32 cache_id)
+{
+ return -ENOENT;
+}
+static inline int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id,
+ cpumask_t *cpus)
+{
+ return -ENOENT;
+}
#endif
-#ifdef CONFIG_ARM64
-void acpi_arm_init(void);
-#else
-static inline void acpi_arm_init(void) { }
-#endif
+void acpi_arch_init(void);
#ifdef CONFIG_ACPI_PCC
void acpi_init_pcc(void);
@@ -1547,18 +1612,6 @@ static inline void acpi_use_parent_companion(struct device *dev)
ACPI_COMPANION_SET(dev, ACPI_COMPANION(dev->parent));
}
-#ifdef CONFIG_ACPI_HMAT
-int hmat_update_target_coordinates(int nid, struct access_coordinate *coord,
- enum access_coordinate_class access);
-#else
-static inline int hmat_update_target_coordinates(int nid,
- struct access_coordinate *coord,
- enum access_coordinate_class access)
-{
- return -EOPNOTSUPP;
-}
-#endif
-
#ifdef CONFIG_ACPI_NUMA
bool acpi_node_backed_by_real_pxm(int nid);
#else
diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h
index 72cedb916a9c..e748b2877602 100644
--- a/include/linux/acpi_dma.h
+++ b/include/linux/acpi_dma.h
@@ -11,10 +11,11 @@
#ifndef __LINUX_ACPI_DMA_H
#define __LINUX_ACPI_DMA_H
-#include <linux/list.h>
-#include <linux/device.h>
#include <linux/err.h>
#include <linux/dmaengine.h>
+#include <linux/types.h>
+
+struct device;
/**
* struct acpi_dma_spec - slave device DMA resources
@@ -65,7 +66,6 @@ int devm_acpi_dma_controller_register(struct device *dev,
struct dma_chan *(*acpi_dma_xlate)
(struct acpi_dma_spec *, struct acpi_dma *),
void *data);
-void devm_acpi_dma_controller_free(struct device *dev);
struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
size_t index);
@@ -94,9 +94,6 @@ static inline int devm_acpi_dma_controller_register(struct device *dev,
{
return -ENODEV;
}
-static inline void devm_acpi_dma_controller_free(struct device *dev)
-{
-}
static inline struct dma_chan *acpi_dma_request_slave_chan_by_index(
struct device *dev, size_t index)
diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h
index 50d88bf1498d..0ded9220d379 100644
--- a/include/linux/acpi_pmtmr.h
+++ b/include/linux/acpi_pmtmr.h
@@ -26,6 +26,19 @@ static inline u32 acpi_pm_read_early(void)
return acpi_pm_read_verified() & ACPI_PM_MASK;
}
+/**
+ * Register callback for suspend and resume event
+ *
+ * @cb Callback triggered on suspend and resume
+ * @data Data passed with the callback
+ */
+void acpi_pmtmr_register_suspend_resume_callback(void (*cb)(void *data, bool suspend), void *data);
+
+/**
+ * Remove registered callback for suspend and resume event
+ */
+void acpi_pmtmr_unregister_suspend_resume_callback(void);
+
#else
static inline u32 acpi_pm_read_early(void)
diff --git a/include/linux/acpi_rimt.h b/include/linux/acpi_rimt.h
new file mode 100644
index 000000000000..fad3adc4d899
--- /dev/null
+++ b/include/linux/acpi_rimt.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024-2025, Ventana Micro Systems Inc.
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ */
+
+#ifndef _ACPI_RIMT_H
+#define _ACPI_RIMT_H
+
+#ifdef CONFIG_ACPI_RIMT
+int rimt_iommu_register(struct device *dev);
+#else
+static inline int rimt_iommu_register(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+#if defined(CONFIG_IOMMU_API) && defined(CONFIG_ACPI_RIMT)
+int rimt_iommu_configure_id(struct device *dev, const u32 *id_in);
+#else
+static inline int rimt_iommu_configure_id(struct device *dev, const u32 *id_in)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* _ACPI_RIMT_H */
diff --git a/include/linux/adi-axi-common.h b/include/linux/adi-axi-common.h
new file mode 100644
index 000000000000..37962ba530df
--- /dev/null
+++ b/include/linux/adi-axi-common.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Analog Devices AXI common registers & definitions
+ *
+ * Copyright 2019 Analog Devices Inc.
+ *
+ * https://wiki.analog.com/resources/fpga/docs/axi_ip
+ * https://wiki.analog.com/resources/fpga/docs/hdl/regmap
+ */
+
+#include <linux/types.h>
+
+#ifndef ADI_AXI_COMMON_H_
+#define ADI_AXI_COMMON_H_
+
+#define ADI_AXI_REG_VERSION 0x0000
+#define ADI_AXI_REG_FPGA_INFO 0x001C
+
+#define ADI_AXI_PCORE_VER(major, minor, patch) \
+ (((major) << 16) | ((minor) << 8) | (patch))
+
+#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff)
+#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff)
+#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff)
+
+/**
+ * adi_axi_pcore_ver_gteq() - check if a version is satisfied
+ * @version: the full version read from the hardware
+ * @major: the major version to compare against
+ * @minor: the minor version to compare against
+ *
+ * ADI AXI IP Cores use semantic versioning, so this can be used to check for
+ * feature availability.
+ *
+ * Return: true if the version is greater than or equal to the specified
+ * major and minor version, false otherwise.
+ */
+static inline bool adi_axi_pcore_ver_gteq(u32 version, u32 major, u32 minor)
+{
+ return ADI_AXI_PCORE_VER_MAJOR(version) > (major) ||
+ (ADI_AXI_PCORE_VER_MAJOR(version) == (major) &&
+ ADI_AXI_PCORE_VER_MINOR(version) >= (minor));
+}
+
+#define ADI_AXI_INFO_FPGA_TECH(info) (((info) >> 24) & 0xff)
+#define ADI_AXI_INFO_FPGA_FAMILY(info) (((info) >> 16) & 0xff)
+#define ADI_AXI_INFO_FPGA_SPEED_GRADE(info) (((info) >> 8) & 0xff)
+
+enum adi_axi_fpga_technology {
+ ADI_AXI_FPGA_TECH_UNKNOWN = 0,
+ ADI_AXI_FPGA_TECH_SERIES7,
+ ADI_AXI_FPGA_TECH_ULTRASCALE,
+ ADI_AXI_FPGA_TECH_ULTRASCALE_PLUS,
+};
+
+enum adi_axi_fpga_family {
+ ADI_AXI_FPGA_FAMILY_UNKNOWN = 0,
+ ADI_AXI_FPGA_FAMILY_ARTIX,
+ ADI_AXI_FPGA_FAMILY_KINTEX,
+ ADI_AXI_FPGA_FAMILY_VIRTEX,
+ ADI_AXI_FPGA_FAMILY_ZYNQ,
+};
+
+enum adi_axi_fpga_speed_grade {
+ ADI_AXI_FPGA_SPEED_UNKNOWN = 0,
+ ADI_AXI_FPGA_SPEED_1 = 10,
+ ADI_AXI_FPGA_SPEED_1L = 11,
+ ADI_AXI_FPGA_SPEED_1H = 12,
+ ADI_AXI_FPGA_SPEED_1HV = 13,
+ ADI_AXI_FPGA_SPEED_1LV = 14,
+ ADI_AXI_FPGA_SPEED_2 = 20,
+ ADI_AXI_FPGA_SPEED_2L = 21,
+ ADI_AXI_FPGA_SPEED_2LV = 22,
+ ADI_AXI_FPGA_SPEED_3 = 30,
+};
+
+#endif /* ADI_AXI_COMMON_H_ */
diff --git a/include/linux/adreno-smmu-priv.h b/include/linux/adreno-smmu-priv.h
index c637e0997f6d..d83c9175828f 100644
--- a/include/linux/adreno-smmu-priv.h
+++ b/include/linux/adreno-smmu-priv.h
@@ -45,11 +45,16 @@ struct adreno_smmu_fault_info {
* TTBR0 translation is enabled with the specified cfg
* @get_fault_info: Called by the GPU fault handler to get information about
* the fault
- * @set_stall: Configure whether stall on fault (CFCFG) is enabled. Call
- * before set_ttbr0_cfg(). If stalling on fault is enabled,
- * the GPU driver must call resume_translation()
+ * @set_stall: Configure whether stall on fault (CFCFG) is enabled. If
+ * stalling on fault is enabled, the GPU driver must call
+ * resume_translation()
* @resume_translation: Resume translation after a fault
*
+ * @set_prr_bit: [optional] Configure the GPU's Partially Resident
+ * Region (PRR) bit in the ACTLR register.
+ * @set_prr_addr: [optional] Configure the PRR_CFG_*ADDR register with
+ * the physical address of PRR page passed from GPU
+ * driver.
*
* The GPU driver (drm/msm) and adreno-smmu work together for controlling
* the GPU's SMMU instance. This is by necessity, as the GPU is directly
@@ -67,6 +72,8 @@ struct adreno_smmu_priv {
void (*get_fault_info)(const void *cookie, struct adreno_smmu_fault_info *info);
void (*set_stall)(const void *cookie, bool enabled);
void (*resume_translation)(const void *cookie, bool terminate);
+ void (*set_prr_bit)(const void *cookie, bool set);
+ void (*set_prr_addr)(const void *cookie, phys_addr_t page_addr);
};
#endif /* __ADRENO_SMMU_PRIV_H */
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 4b97f38f3fcf..02940be66324 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -16,10 +16,26 @@
#define AER_CORRECTABLE 2
#define DPC_FATAL 3
+/*
+ * AER and DPC capabilities TLP Logging register sizes (PCIe r6.2, sec 7.8.4
+ * & 7.9.14).
+ */
+#define PCIE_STD_NUM_TLP_HEADERLOG 4
+#define PCIE_STD_MAX_TLP_PREFIXLOG 4
+#define PCIE_STD_MAX_TLP_HEADERLOG (PCIE_STD_NUM_TLP_HEADERLOG + 10)
+
struct pci_dev;
struct pcie_tlp_log {
- u32 dw[4];
+ union {
+ u32 dw[PCIE_STD_MAX_TLP_HEADERLOG];
+ struct {
+ u32 _do_not_use[PCIE_STD_NUM_TLP_HEADERLOG];
+ u32 prefix[PCIE_STD_MAX_TLP_PREFIXLOG];
+ };
+ };
+ u8 header_len; /* Length of the Logged TLP Header in DWORDs */
+ bool flit; /* TLP was logged when in Flit mode */
};
struct aer_capability_regs {
@@ -37,8 +53,6 @@ struct aer_capability_regs {
u16 uncor_err_source;
};
-int pcie_read_tlp_log(struct pci_dev *dev, int where, struct pcie_tlp_log *log);
-
#if defined(CONFIG_PCIEAER)
int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
int pcie_aer_is_native(struct pci_dev *dev);
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index 05e758b8b894..3ffa5341dce2 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -20,12 +20,6 @@ enum alarmtimer_type {
ALARM_BOOTTIME_FREEZER,
};
-enum alarmtimer_restart {
- ALARMTIMER_NORESTART,
- ALARMTIMER_RESTART,
-};
-
-
#define ALARMTIMER_STATE_INACTIVE 0x00
#define ALARMTIMER_STATE_ENQUEUED 0x01
@@ -42,14 +36,14 @@ enum alarmtimer_restart {
struct alarm {
struct timerqueue_node node;
struct hrtimer timer;
- enum alarmtimer_restart (*function)(struct alarm *, ktime_t now);
+ void (*function)(struct alarm *, ktime_t now);
enum alarmtimer_type type;
int state;
void *data;
};
void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
- enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
+ void (*function)(struct alarm *, ktime_t));
void alarm_start(struct alarm *alarm, ktime_t start);
void alarm_start_relative(struct alarm *alarm, ktime_t start);
void alarm_restart(struct alarm *alarm);
diff --git a/include/linux/alcor_pci.h b/include/linux/alcor_pci.h
index c4a0b23846d8..dcb1d37dabc2 100644
--- a/include/linux/alcor_pci.h
+++ b/include/linux/alcor_pci.h
@@ -11,6 +11,7 @@
#define ALCOR_SD_CARD 0
#define ALCOR_MS_CARD 1
+#define DRV_NAME_ALCOR_PCI "alcor_pci"
#define DRV_NAME_ALCOR_PCI_SDMMC "alcor_sdmmc"
#define DRV_NAME_ALCOR_PCI_MS "alcor_ms"
diff --git a/include/linux/align.h b/include/linux/align.h
index 2b4acec7b95a..55debf105a5d 100644
--- a/include/linux/align.h
+++ b/include/linux/align.h
@@ -2,14 +2,6 @@
#ifndef _LINUX_ALIGN_H
#define _LINUX_ALIGN_H
-#include <linux/const.h>
-
-/* @a is a power of 2 value */
-#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
-#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
-#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
-#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
-#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a)))
-#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+#include <vdso/align.h>
#endif /* _LINUX_ALIGN_H */
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index abd24016a900..d40ac39bfbe8 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -30,6 +30,21 @@ struct alloc_tag {
struct alloc_tag_counters __percpu *counters;
} __aligned(8);
+struct alloc_tag_kernel_section {
+ struct alloc_tag *first_tag;
+ unsigned long count;
+};
+
+struct alloc_tag_module_section {
+ union {
+ unsigned long start_addr;
+ struct alloc_tag *first_tag;
+ };
+ unsigned long end_addr;
+ /* used size */
+ unsigned long size;
+};
+
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
#define CODETAG_EMPTY ((void *)1)
@@ -48,12 +63,19 @@ static inline void set_codetag_empty(union codetag_ref *ref)
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
static inline bool is_codetag_empty(union codetag_ref *ref) { return false; }
-static inline void set_codetag_empty(union codetag_ref *ref) {}
+
+static inline void set_codetag_empty(union codetag_ref *ref)
+{
+ if (ref)
+ ref->ct = NULL;
+}
#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
#ifdef CONFIG_MEM_ALLOC_PROFILING
+#define ALLOC_TAG_SECTION_NAME "alloc_tags"
+
struct codetag_bytes {
struct codetag *ct;
s64 bytes;
@@ -66,30 +88,42 @@ static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
return container_of(ct, struct alloc_tag, ct);
}
-#ifdef ARCH_NEEDS_WEAK_PER_CPU
+#if defined(CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU) && defined(MODULE)
/*
* When percpu variables are required to be defined as weak, static percpu
* variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
- * Instead we will accound all module allocations to a single counter.
+ * Instead we will account all module allocations to a single counter.
*/
DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
#define DEFINE_ALLOC_TAG(_alloc_tag) \
static struct alloc_tag _alloc_tag __used __aligned(8) \
- __section("alloc_tags") = { \
+ __section(ALLOC_TAG_SECTION_NAME) = { \
.ct = CODE_TAG_INIT, \
.counters = &_shared_alloc_tag };
-#else /* ARCH_NEEDS_WEAK_PER_CPU */
+#else /* CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU && MODULE */
+
+#ifdef MODULE
+
+#define DEFINE_ALLOC_TAG(_alloc_tag) \
+ static struct alloc_tag _alloc_tag __used __aligned(8) \
+ __section(ALLOC_TAG_SECTION_NAME) = { \
+ .ct = CODE_TAG_INIT, \
+ .counters = NULL };
+
+#else /* MODULE */
#define DEFINE_ALLOC_TAG(_alloc_tag) \
static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \
static struct alloc_tag _alloc_tag __used __aligned(8) \
- __section("alloc_tags") = { \
+ __section(ALLOC_TAG_SECTION_NAME) = { \
.ct = CODE_TAG_INIT, \
.counters = &_alloc_tag_cntr };
-#endif /* ARCH_NEEDS_WEAK_PER_CPU */
+#endif /* MODULE */
+
+#endif /* CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU && MODULE */
DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
mem_alloc_profiling_key);
@@ -118,11 +152,11 @@ static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag)
{
- WARN_ONCE(ref && ref->ct,
+ WARN_ONCE(ref && ref->ct && !is_codetag_empty(ref),
"alloc_tag was not cleared (got tag for %s:%u)\n",
ref->ct->filename, ref->ct->lineno);
- WARN_ONCE(!tag, "current->alloc_tag not set");
+ WARN_ONCE(!tag, "current->alloc_tag not set\n");
}
static inline void alloc_tag_sub_check(union codetag_ref *ref)
@@ -135,9 +169,21 @@ static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
#endif
/* Caller should verify both ref and tag to be valid */
-static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
+static inline bool __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
{
+ alloc_tag_add_check(ref, tag);
+ if (!ref || !tag)
+ return false;
+
ref->ct = &tag->ct;
+ return true;
+}
+
+static inline bool alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
+{
+ if (unlikely(!__alloc_tag_ref_set(ref, tag)))
+ return false;
+
/*
* We need in increment the call counter every time we have a new
* allocation or when we split a large allocation into smaller ones.
@@ -145,25 +191,13 @@ static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag
* counter because when we free each part the counter will be decremented.
*/
this_cpu_inc(tag->counters->calls);
-}
-
-static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
-{
- alloc_tag_add_check(ref, tag);
- if (!ref || !tag)
- return;
-
- __alloc_tag_ref_set(ref, tag);
+ return true;
}
static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
{
- alloc_tag_add_check(ref, tag);
- if (!ref || !tag)
- return;
-
- __alloc_tag_ref_set(ref, tag);
- this_cpu_add(tag->counters->bytes, bytes);
+ if (likely(alloc_tag_ref_set(ref, tag)))
+ this_cpu_add(tag->counters->bytes, bytes);
}
static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
@@ -187,6 +221,16 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
ref->ct = NULL;
}
+static inline void alloc_tag_set_inaccurate(struct alloc_tag *tag)
+{
+ tag->ct.flags |= CODETAG_FLAG_INACCURATE;
+}
+
+static inline bool alloc_tag_is_inaccurate(struct alloc_tag *tag)
+{
+ return !!(tag->ct.flags & CODETAG_FLAG_INACCURATE);
+}
+
#define alloc_tag_record(p) ((p) = current->alloc_tag)
#else /* CONFIG_MEM_ALLOC_PROFILING */
@@ -196,15 +240,22 @@ static inline bool mem_alloc_profiling_enabled(void) { return false; }
static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
size_t bytes) {}
static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
+static inline void alloc_tag_set_inaccurate(struct alloc_tag *tag) {}
+static inline bool alloc_tag_is_inaccurate(struct alloc_tag *tag) { return false; }
#define alloc_tag_record(p) do {} while (0)
#endif /* CONFIG_MEM_ALLOC_PROFILING */
#define alloc_hooks_tag(_tag, _do_alloc) \
({ \
- struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag); \
- typeof(_do_alloc) _res = _do_alloc; \
- alloc_tag_restore(_tag, _old); \
+ typeof(_do_alloc) _res; \
+ if (mem_alloc_profiling_enabled()) { \
+ struct alloc_tag * __maybe_unused _old; \
+ _old = alloc_tag_save(_tag); \
+ _res = _do_alloc; \
+ alloc_tag_restore(_tag, _old); \
+ } else \
+ _res = _do_alloc; \
_res; \
})
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 958a55bcc708..9946276aff73 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -105,7 +105,7 @@ enum amba_vendor {
AMBA_VENDOR_LSI = 0xb6,
};
-extern struct bus_type amba_bustype;
+extern const struct bus_type amba_bustype;
#define to_amba_device(d) container_of_const(d, struct amba_device, dev)
@@ -121,6 +121,7 @@ extern struct bus_type amba_bustype;
#ifdef CONFIG_ARM_AMBA
int __amba_driver_register(struct amba_driver *, struct module *);
void amba_driver_unregister(struct amba_driver *);
+bool dev_is_amba(const struct device *dev);
#else
static inline int __amba_driver_register(struct amba_driver *drv,
struct module *owner)
@@ -130,6 +131,10 @@ static inline int __amba_driver_register(struct amba_driver *drv,
static inline void amba_driver_unregister(struct amba_driver *drv)
{
}
+static inline bool dev_is_amba(const struct device *dev)
+{
+ return false;
+}
#endif
struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t);
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 2b90c48a6a87..edcee9f5335a 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -12,30 +12,16 @@
struct amd_iommu;
-/*
- * This is mainly used to communicate information back-and-forth
- * between SVM and IOMMU for setting up and tearing down posted
- * interrupt
- */
-struct amd_iommu_pi_data {
- u32 ga_tag;
- u32 prev_ga_tag;
- u64 base;
- bool is_guest_mode;
- struct vcpu_data *vcpu_data;
- void *ir_data;
-};
-
#ifdef CONFIG_AMD_IOMMU
struct task_struct;
struct pci_dev;
-extern int amd_iommu_detect(void);
+extern void amd_iommu_detect(void);
#else /* CONFIG_AMD_IOMMU */
-static inline int amd_iommu_detect(void) { return -ENODEV; }
+static inline void amd_iommu_detect(void) { }
#endif /* CONFIG_AMD_IOMMU */
@@ -44,10 +30,8 @@ static inline int amd_iommu_detect(void) { return -ENODEV; }
/* IOMMU AVIC Function */
extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
-extern int
-amd_iommu_update_ga(int cpu, bool is_run, void *data);
-
-extern int amd_iommu_activate_guest_mode(void *data);
+extern int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr);
+extern int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr);
extern int amd_iommu_deactivate_guest_mode(void *data);
#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
@@ -58,13 +42,12 @@ amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
return 0;
}
-static inline int
-amd_iommu_update_ga(int cpu, bool is_run, void *data)
+static inline int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr)
{
return 0;
}
-static inline int amd_iommu_activate_guest_mode(void *data)
+static inline int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr)
{
return 0;
}
@@ -87,8 +70,10 @@ struct amd_iommu *get_amd_iommu(unsigned int idx);
#ifdef CONFIG_KVM_AMD_SEV
int amd_iommu_snp_disable(void);
+extern bool amd_iommu_sev_tio_supported(void);
#else
static inline int amd_iommu_snp_disable(void) { return 0; }
+static inline bool amd_iommu_sev_tio_supported(void) { return false; }
#endif
#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/include/linux/amd-pmf-io.h b/include/linux/amd-pmf-io.h
index b4f818205216..6fa510f419c0 100644
--- a/include/linux/amd-pmf-io.h
+++ b/include/linux/amd-pmf-io.h
@@ -18,10 +18,12 @@
* enum sfh_message_type - Query the SFH message type
* @MT_HPD: Message ID to know the Human presence info from MP2 FW
* @MT_ALS: Message ID to know the Ambient light info from MP2 FW
+ * @MT_SRA: Message ID to know the SRA data from MP2 FW
*/
enum sfh_message_type {
MT_HPD,
MT_ALS,
+ MT_SRA,
};
/**
@@ -40,10 +42,23 @@ enum sfh_hpd_info {
* struct amd_sfh_info - get HPD sensor info from MP2 FW
* @ambient_light: Populates the ambient light information
* @user_present: Populates the user presence information
+ * @platform_type: Operating modes (clamshell, flat, tent, etc.)
+ * @laptop_placement: Device states (ontable, onlap, outbag)
*/
struct amd_sfh_info {
u32 ambient_light;
u8 user_present;
+ u32 platform_type;
+ u32 laptop_placement;
+};
+
+enum laptop_placement {
+ LP_UNKNOWN = 0,
+ ON_TABLE,
+ ON_LAP_MOTION,
+ IN_BAG,
+ OUT_OF_BAG,
+ LP_UNDEFINED,
};
int amd_get_sfh_info(struct amd_sfh_info *sfh_info, enum sfh_message_type op);
diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
deleted file mode 100644
index d58fc022ec46..000000000000
--- a/include/linux/amd-pstate.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/include/linux/amd-pstate.h
- *
- * Copyright (C) 2022 Advanced Micro Devices, Inc.
- *
- * Author: Meng Li <li.meng@amd.com>
- */
-
-#ifndef _LINUX_AMD_PSTATE_H
-#define _LINUX_AMD_PSTATE_H
-
-#include <linux/pm_qos.h>
-
-#define AMD_CPPC_EPP_PERFORMANCE 0x00
-#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80
-#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF
-#define AMD_CPPC_EPP_POWERSAVE 0xFF
-
-/*********************************************************************
- * AMD P-state INTERFACE *
- *********************************************************************/
-/**
- * struct amd_aperf_mperf
- * @aperf: actual performance frequency clock count
- * @mperf: maximum performance frequency clock count
- * @tsc: time stamp counter
- */
-struct amd_aperf_mperf {
- u64 aperf;
- u64 mperf;
- u64 tsc;
-};
-
-/**
- * struct amd_cpudata - private CPU data for AMD P-State
- * @cpu: CPU number
- * @req: constraint request to apply
- * @cppc_req_cached: cached performance request hints
- * @highest_perf: the maximum performance an individual processor may reach,
- * assuming ideal conditions
- * For platforms that do not support the preferred core feature, the
- * highest_pef may be configured with 166 or 255, to avoid max frequency
- * calculated wrongly. we take the fixed value as the highest_perf.
- * @nominal_perf: the maximum sustained performance level of the processor,
- * assuming ideal operating conditions
- * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
- * savings are achieved
- * @lowest_perf: the absolute lowest performance level of the processor
- * @prefcore_ranking: the preferred core ranking, the higher value indicates a higher
- * priority.
- * @min_limit_perf: Cached value of the performance corresponding to policy->min
- * @max_limit_perf: Cached value of the performance corresponding to policy->max
- * @min_limit_freq: Cached value of policy->min (in khz)
- * @max_limit_freq: Cached value of policy->max (in khz)
- * @max_freq: the frequency (in khz) that mapped to highest_perf
- * @min_freq: the frequency (in khz) that mapped to lowest_perf
- * @nominal_freq: the frequency (in khz) that mapped to nominal_perf
- * @lowest_nonlinear_freq: the frequency (in khz) that mapped to lowest_nonlinear_perf
- * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
- * @prev: Last Aperf/Mperf/tsc count value read from register
- * @freq: current cpu frequency value (in khz)
- * @boost_supported: check whether the Processor or SBIOS supports boost mode
- * @hw_prefcore: check whether HW supports preferred core featue.
- * Only when hw_prefcore and early prefcore param are true,
- * AMD P-State driver supports preferred core featue.
- * @epp_policy: Last saved policy used to set energy-performance preference
- * @epp_cached: Cached CPPC energy-performance preference value
- * @policy: Cpufreq policy value
- * @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
- *
- * The amd_cpudata is key private data for each CPU thread in AMD P-State, and
- * represents all the attributes and goals that AMD P-State requests at runtime.
- */
-struct amd_cpudata {
- int cpu;
-
- struct freq_qos_request req[2];
- u64 cppc_req_cached;
-
- u32 highest_perf;
- u32 nominal_perf;
- u32 lowest_nonlinear_perf;
- u32 lowest_perf;
- u32 prefcore_ranking;
- u32 min_limit_perf;
- u32 max_limit_perf;
- u32 min_limit_freq;
- u32 max_limit_freq;
-
- u32 max_freq;
- u32 min_freq;
- u32 nominal_freq;
- u32 lowest_nonlinear_freq;
-
- struct amd_aperf_mperf cur;
- struct amd_aperf_mperf prev;
-
- u64 freq;
- bool boost_supported;
- bool hw_prefcore;
-
- /* EPP feature related attributes*/
- s16 epp_policy;
- s16 epp_cached;
- u32 policy;
- u64 cppc_cap1_cached;
- bool suspended;
-};
-
-/*
- * enum amd_pstate_mode - driver working mode of amd pstate
- */
-enum amd_pstate_mode {
- AMD_PSTATE_UNDEFINED = 0,
- AMD_PSTATE_DISABLE,
- AMD_PSTATE_PASSIVE,
- AMD_PSTATE_ACTIVE,
- AMD_PSTATE_GUIDED,
- AMD_PSTATE_MAX,
-};
-
-static const char * const amd_pstate_mode_string[] = {
- [AMD_PSTATE_UNDEFINED] = "undefined",
- [AMD_PSTATE_DISABLE] = "disable",
- [AMD_PSTATE_PASSIVE] = "passive",
- [AMD_PSTATE_ACTIVE] = "active",
- [AMD_PSTATE_GUIDED] = "guided",
- NULL,
-};
-
-struct quirk_entry {
- u32 nominal_freq;
- u32 lowest_freq;
-};
-
-#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/include/linux/annotate.h b/include/linux/annotate.h
new file mode 100644
index 000000000000..2f1599c9e573
--- /dev/null
+++ b/include/linux/annotate.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ANNOTATE_H
+#define _LINUX_ANNOTATE_H
+
+#include <linux/objtool_types.h>
+
+#ifdef CONFIG_OBJTOOL
+
+#define __ASM_ANNOTATE(section, label, type) \
+ .pushsection section, "M", @progbits, 8; \
+ .long label - ., type; \
+ .popsection
+
+#ifndef __ASSEMBLY__
+
+#define ASM_ANNOTATE_LABEL(label, type) \
+ __stringify(__ASM_ANNOTATE(.discard.annotate_insn, label, type))
+
+#define ASM_ANNOTATE(type) \
+ "911: " \
+ __stringify(__ASM_ANNOTATE(.discard.annotate_insn, 911b, type))
+
+#define ASM_ANNOTATE_DATA(type) \
+ "912: " \
+ __stringify(__ASM_ANNOTATE(.discard.annotate_data, 912b, type))
+
+#else /* __ASSEMBLY__ */
+
+.macro ANNOTATE type
+.Lhere_\@:
+ __ASM_ANNOTATE(.discard.annotate_insn, .Lhere_\@, \type)
+.endm
+
+.macro ANNOTATE_DATA type
+.Lhere_\@:
+ __ASM_ANNOTATE(.discard.annotate_data, .Lhere_\@, \type)
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#else /* !CONFIG_OBJTOOL */
+#ifndef __ASSEMBLY__
+#define ASM_ANNOTATE_LABEL(label, type) ""
+#define ASM_ANNOTATE(type)
+#define ASM_ANNOTATE_DATA(type)
+#else /* __ASSEMBLY__ */
+.macro ANNOTATE type
+.endm
+.macro ANNOTATE_DATA type
+.endm
+#endif /* __ASSEMBLY__ */
+#endif /* !CONFIG_OBJTOOL */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Annotate away the various 'relocation to !ENDBR` complaints; knowing that
+ * these relocations will never be used for indirect calls.
+ */
+#define ANNOTATE_NOENDBR ASM_ANNOTATE(ANNOTYPE_NOENDBR)
+#define ANNOTATE_NOENDBR_SYM(sym) asm(ASM_ANNOTATE_LABEL(sym, ANNOTYPE_NOENDBR))
+
+/*
+ * This should be used immediately before an indirect jump/call. It tells
+ * objtool the subsequent indirect jump/call is vouched safe for retpoline
+ * builds.
+ */
+#define ANNOTATE_RETPOLINE_SAFE ASM_ANNOTATE(ANNOTYPE_RETPOLINE_SAFE)
+/*
+ * See linux/instrumentation.h
+ */
+#define ANNOTATE_INSTR_BEGIN(label) ASM_ANNOTATE_LABEL(label, ANNOTYPE_INSTR_BEGIN)
+#define ANNOTATE_INSTR_END(label) ASM_ANNOTATE_LABEL(label, ANNOTYPE_INSTR_END)
+/*
+ * objtool annotation to ignore the alternatives and only consider the original
+ * instruction(s).
+ */
+#define ANNOTATE_IGNORE_ALTERNATIVE ASM_ANNOTATE(ANNOTYPE_IGNORE_ALTS)
+/*
+ * This macro indicates that the following intra-function call is valid.
+ * Any non-annotated intra-function call will cause objtool to issue a warning.
+ */
+#define ANNOTATE_INTRA_FUNCTION_CALL ASM_ANNOTATE(ANNOTYPE_INTRA_FUNCTION_CALL)
+/*
+ * Use objtool to validate the entry requirement that all code paths do
+ * VALIDATE_UNRET_END before RET.
+ *
+ * NOTE: The macro must be used at the beginning of a global symbol, otherwise
+ * it will be ignored.
+ */
+#define ANNOTATE_UNRET_BEGIN ASM_ANNOTATE(ANNOTYPE_UNRET_BEGIN)
+/*
+ * This should be used to refer to an instruction that is considered
+ * terminating, like a noreturn CALL or UD2 when we know they are not -- eg
+ * WARN using UD2.
+ */
+#define ANNOTATE_REACHABLE(label) ASM_ANNOTATE_LABEL(label, ANNOTYPE_REACHABLE)
+/*
+ * This should not be used; it annotates away CFI violations. There are a few
+ * valid use cases like kexec handover to the next kernel image, and there is
+ * no security concern there.
+ *
+ * There are also a few real issues annotated away, like EFI because we can't
+ * control the EFI code.
+ */
+#define ANNOTATE_NOCFI_SYM(sym) asm(ASM_ANNOTATE_LABEL(sym, ANNOTYPE_NOCFI))
+
+/*
+ * Annotate a special section entry. This emables livepatch module generation
+ * to find and extract individual special section entries as needed.
+ */
+#define ANNOTATE_DATA_SPECIAL ASM_ANNOTATE_DATA(ANNOTYPE_DATA_SPECIAL)
+
+#else /* __ASSEMBLY__ */
+#define ANNOTATE_NOENDBR ANNOTATE type=ANNOTYPE_NOENDBR
+#define ANNOTATE_RETPOLINE_SAFE ANNOTATE type=ANNOTYPE_RETPOLINE_SAFE
+/* ANNOTATE_INSTR_BEGIN ANNOTATE type=ANNOTYPE_INSTR_BEGIN */
+/* ANNOTATE_INSTR_END ANNOTATE type=ANNOTYPE_INSTR_END */
+#define ANNOTATE_IGNORE_ALTERNATIVE ANNOTATE type=ANNOTYPE_IGNORE_ALTS
+#define ANNOTATE_INTRA_FUNCTION_CALL ANNOTATE type=ANNOTYPE_INTRA_FUNCTION_CALL
+#define ANNOTATE_UNRET_BEGIN ANNOTATE type=ANNOTYPE_UNRET_BEGIN
+#define ANNOTATE_REACHABLE ANNOTATE type=ANNOTYPE_REACHABLE
+#define ANNOTATE_NOCFI_SYM ANNOTATE type=ANNOTYPE_NOCFI
+#define ANNOTATE_DATA_SPECIAL ANNOTATE_DATA type=ANNOTYPE_DATA_SPECIAL
+#endif /* __ASSEMBLY__ */
+
+#endif /* _LINUX_ANNOTATE_H */
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index b721f360d759..ebd7f8935f96 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -11,21 +11,9 @@
void topology_normalize_cpu_scale(void);
int topology_update_cpu_topology(void);
-#ifdef CONFIG_ACPI_CPPC_LIB
-void topology_init_cpu_capacity_cppc(void);
-#endif
-
struct device_node;
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
-DECLARE_PER_CPU(unsigned long, cpu_scale);
-
-static inline unsigned long topology_get_cpu_scale(int cpu)
-{
- return per_cpu(cpu_scale, cpu);
-}
-
-void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
DECLARE_PER_CPU(unsigned long, capacity_freq_ref);
@@ -49,6 +37,7 @@ enum scale_freq_source {
SCALE_FREQ_SOURCE_CPUFREQ = 0,
SCALE_FREQ_SOURCE_ARCH,
SCALE_FREQ_SOURCE_CPPC,
+ SCALE_FREQ_SOURCE_VIRT,
};
struct scale_freq_data {
@@ -91,6 +80,11 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
#define topology_cluster_cpumask(cpu) (&cpu_topology[cpu].cluster_sibling)
#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
+
+#ifndef arch_cpu_is_threaded
+#define arch_cpu_is_threaded() (0)
+#endif
+
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
@@ -100,6 +94,21 @@ void remove_cpu_topology(unsigned int cpuid);
void reset_cpu_topology(void);
int parse_acpi_topology(void);
void freq_inv_set_max_ratio(int cpu, u64 max_rate);
-#endif
+
+/*
+ * Architectures like ARM64 don't have reliable architectural way to get SMT
+ * information and depend on the firmware (ACPI/OF) report. Non-SMT core won't
+ * initialize thread_id so we can use this to detect the SMT implementation.
+ */
+static inline bool topology_core_has_smt(int cpu)
+{
+ return cpu_topology[cpu].thread_id != -1;
+}
+
+#else
+
+static inline bool topology_core_has_smt(int cpu) { return false; }
+
+#endif /* CONFIG_GENERIC_ARCH_TOPOLOGY */
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
diff --git a/include/linux/args.h b/include/linux/args.h
index 8ff60a54eb7d..2e8e65d975c7 100644
--- a/include/linux/args.h
+++ b/include/linux/args.h
@@ -17,9 +17,9 @@
* that as _n.
*/
-/* This counts to 12. Any more, it will return 13th argument. */
-#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
-#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+/* This counts to 15. Any more, it will return 16th argument. */
+#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _n, X...) _n
+#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
/* Concatenate two parameters, but allow them to be expanded beforehand. */
#define __CONCAT(a, b) a ## b
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 083f85653716..50b47eba7d01 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -7,6 +7,11 @@
#include <linux/args.h>
#include <linux/init.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/uuid.h>
+#endif
+
#include <uapi/linux/const.h>
/*
@@ -107,14 +112,81 @@
ARM_SMCCC_FUNC_QUERY_CALL_UID)
/* KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74 */
-#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 0xb66fb428U
-#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 0xe911c52eU
-#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 0x564bcaa9U
-#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3 0x743a004dU
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM UUID_INIT(\
+ 0x28b46fb6, 0x2ec5, 0x11e9, \
+ 0xa9, 0xca, 0x4b, 0x56, \
+ 0x4d, 0x00, 0x3a, 0x74)
/* KVM "vendor specific" services */
#define ARM_SMCCC_KVM_FUNC_FEATURES 0
#define ARM_SMCCC_KVM_FUNC_PTP 1
+/* Start of pKVM hypercall range */
+#define ARM_SMCCC_KVM_FUNC_HYP_MEMINFO 2
+#define ARM_SMCCC_KVM_FUNC_MEM_SHARE 3
+#define ARM_SMCCC_KVM_FUNC_MEM_UNSHARE 4
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_5 5
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_6 6
+#define ARM_SMCCC_KVM_FUNC_MMIO_GUARD 7
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_8 8
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_9 9
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_10 10
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_11 11
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_12 12
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_13 13
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_14 14
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_15 15
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_16 16
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_17 17
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_18 18
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_19 19
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_20 20
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_21 21
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_22 22
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_23 23
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_24 24
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_25 25
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_26 26
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_27 27
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_28 28
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_29 29
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_30 30
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_31 31
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_32 32
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_33 33
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_34 34
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_35 35
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_36 36
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_37 37
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_38 38
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_39 39
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_40 40
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_41 41
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_42 42
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_43 43
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_44 44
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_45 45
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_46 46
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_47 47
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_48 48
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_49 49
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_50 50
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_51 51
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_52 52
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_53 53
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_54 54
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_55 55
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_56 56
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_57 57
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_58 58
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_59 59
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_60 60
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_61 61
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_62 62
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_63 63
+/* End of pKVM hypercall range */
+#define ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER 64
+#define ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS 65
+
#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127
#define ARM_SMCCC_KVM_NUM_FUNCS 128
@@ -137,6 +209,42 @@
ARM_SMCCC_OWNER_VENDOR_HYP, \
ARM_SMCCC_KVM_FUNC_PTP)
+#define ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_HYP_MEMINFO)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_MEM_SHARE)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_MEM_UNSHARE)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_MMIO_GUARD)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS)
+
/* ptp_kvm counter type ID */
#define KVM_PTP_VIRT_COUNTER 0
#define KVM_PTP_PHYS_COUNTER 1
@@ -227,8 +335,6 @@ u32 arm_smccc_get_version(void);
void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit);
-extern u64 smccc_has_sve_hint;
-
/**
* arm_smccc_get_soc_id_version()
*
@@ -247,6 +353,57 @@ s32 arm_smccc_get_soc_id_version(void);
*/
s32 arm_smccc_get_soc_id_revision(void);
+#ifndef __ASSEMBLY__
+
+/*
+ * Returns whether a specific hypervisor UUID is advertised for the
+ * Vendor Specific Hypervisor Service range.
+ */
+bool arm_smccc_hypervisor_has_uuid(const uuid_t *uuid);
+
+static inline uuid_t smccc_res_to_uuid(u32 r0, u32 r1, u32 r2, u32 r3)
+{
+ uuid_t uuid = {
+ .b = {
+ [0] = (r0 >> 0) & 0xff,
+ [1] = (r0 >> 8) & 0xff,
+ [2] = (r0 >> 16) & 0xff,
+ [3] = (r0 >> 24) & 0xff,
+
+ [4] = (r1 >> 0) & 0xff,
+ [5] = (r1 >> 8) & 0xff,
+ [6] = (r1 >> 16) & 0xff,
+ [7] = (r1 >> 24) & 0xff,
+
+ [8] = (r2 >> 0) & 0xff,
+ [9] = (r2 >> 8) & 0xff,
+ [10] = (r2 >> 16) & 0xff,
+ [11] = (r2 >> 24) & 0xff,
+
+ [12] = (r3 >> 0) & 0xff,
+ [13] = (r3 >> 8) & 0xff,
+ [14] = (r3 >> 16) & 0xff,
+ [15] = (r3 >> 24) & 0xff,
+ },
+ };
+
+ return uuid;
+}
+
+static inline u32 smccc_uuid_to_reg(const uuid_t *uuid, int reg)
+{
+ u32 val = 0;
+
+ val |= (u32)(uuid->b[4 * reg + 0] << 0);
+ val |= (u32)(uuid->b[4 * reg + 1] << 8);
+ val |= (u32)(uuid->b[4 * reg + 2] << 16);
+ val |= (u32)(uuid->b[4 * reg + 3] << 24);
+
+ return val;
+}
+
+#endif /* !__ASSEMBLY__ */
+
/**
* struct arm_smccc_res - Result from SMC/HVC call
* @a0-a3 result values from registers 0 to 3
@@ -327,15 +484,6 @@ struct arm_smccc_quirk {
};
/**
- * __arm_smccc_sve_check() - Set the SVE hint bit when doing SMC calls
- *
- * Sets the SMCCC hint bit to indicate if there is live state in the SVE
- * registers, this modifies x0 in place and should never be called from C
- * code.
- */
-asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0);
-
-/**
* __arm_smccc_smc() - make SMC calls
* @a0-a7: arguments passed in registers 0 to 7
* @res: result values from registers 0 to 3
@@ -402,20 +550,6 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#endif
-/* nVHE hypervisor doesn't have a current thread so needs separate checks */
-#if defined(CONFIG_ARM64_SVE) && !defined(__KVM_NVHE_HYPERVISOR__)
-
-#define SMCCC_SVE_CHECK ALTERNATIVE("nop \n", "bl __arm_smccc_sve_check \n", \
- ARM64_SVE)
-#define smccc_sve_clobbers "x16", "x30", "cc",
-
-#else
-
-#define SMCCC_SVE_CHECK
-#define smccc_sve_clobbers
-
-#endif
-
#define __constraint_read_2 "r" (arg0)
#define __constraint_read_3 __constraint_read_2, "r" (arg1)
#define __constraint_read_4 __constraint_read_3, "r" (arg2)
@@ -486,12 +620,11 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
register unsigned long r3 asm("r3"); \
CONCATENATE(__declare_arg_, \
COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); \
- asm volatile(SMCCC_SVE_CHECK \
- inst "\n" : \
+ asm volatile(inst "\n" : \
"=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \
: CONCATENATE(__constraint_read_, \
COUNT_ARGS(__VA_ARGS__)) \
- : smccc_sve_clobbers "memory"); \
+ : "memory"); \
if (___res) \
*___res = (typeof(*___res)){r0, r1, r2, r3}; \
} while (0)
@@ -540,7 +673,7 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
asm ("" : \
: CONCATENATE(__constraint_read_, \
COUNT_ARGS(__VA_ARGS__)) \
- : smccc_sve_clobbers "memory"); \
+ : "memory"); \
if (___res) \
___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
} while (0)
@@ -577,5 +710,45 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
method; \
})
+#ifdef CONFIG_ARM64
+
+#define __fail_smccc_1_2(___res) \
+ do { \
+ if (___res) \
+ ___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
+ } while (0)
+
+/*
+ * arm_smccc_1_2_invoke() - make an SMCCC v1.2 compliant call
+ *
+ * @args: SMC args are in the a0..a17 fields of the arm_smcc_1_2_regs structure
+ * @res: result values from registers 0 to 17
+ *
+ * This macro will make either an HVC call or an SMC call depending on the
+ * current SMCCC conduit. If no valid conduit is available then -1
+ * (SMCCC_RET_NOT_SUPPORTED) is returned in @res.a0 (if supplied).
+ *
+ * The return value also provides the conduit that was used.
+ */
+#define arm_smccc_1_2_invoke(args, res) ({ \
+ struct arm_smccc_1_2_regs *__args = args; \
+ struct arm_smccc_1_2_regs *__res = res; \
+ int method = arm_smccc_1_1_get_conduit(); \
+ switch (method) { \
+ case SMCCC_CONDUIT_HVC: \
+ arm_smccc_1_2_hvc(__args, __res); \
+ break; \
+ case SMCCC_CONDUIT_SMC: \
+ arm_smccc_1_2_smc(__args, __res); \
+ break; \
+ default: \
+ __fail_smccc_1_2(__res); \
+ method = SMCCC_CONDUIT_NONE; \
+ break; \
+ } \
+ method; \
+ })
+#endif /*CONFIG_ARM64*/
+
#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index c82d56768101..81e603839c4a 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -73,6 +73,11 @@
#define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88)
#define FFA_MEM_PERM_SET FFA_SMC_32(0x89)
#define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89)
+#define FFA_CONSOLE_LOG FFA_SMC_32(0x8A)
+#define FFA_PARTITION_INFO_GET_REGS FFA_SMC_64(0x8B)
+#define FFA_EL3_INTR_HANDLE FFA_SMC_32(0x8C)
+#define FFA_MSG_SEND_DIRECT_REQ2 FFA_SMC_64(0x8D)
+#define FFA_MSG_SEND_DIRECT_RESP2 FFA_SMC_64(0x8E)
/*
* For some calls it is necessary to use SMC64 to pass or return 64-bit values.
@@ -107,6 +112,7 @@
FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor)))
#define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0)
#define FFA_VERSION_1_1 FFA_PACK_VERSION_INFO(1, 1)
+#define FFA_VERSION_1_2 FFA_PACK_VERSION_INFO(1, 2)
/**
* FF-A specification mentions explicitly about '4K pages'. This should
@@ -122,6 +128,7 @@
#define FFA_FEAT_RXTX_MIN_SZ_4K 0
#define FFA_FEAT_RXTX_MIN_SZ_64K 1
#define FFA_FEAT_RXTX_MIN_SZ_16K 2
+#define FFA_FEAT_RXTX_MIN_SZ_MASK GENMASK(1, 0)
/* FFA Bus/Device/Driver related */
struct ffa_device {
@@ -149,7 +156,7 @@ struct ffa_driver {
struct device_driver driver;
};
-#define to_ffa_driver(d) container_of(d, struct ffa_driver, driver)
+#define to_ffa_driver(d) container_of_const(d, struct ffa_driver, driver)
static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data)
{
@@ -161,25 +168,31 @@ static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev)
return dev_get_drvdata(&fdev->dev);
}
+struct ffa_partition_info;
+
#if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)
-struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
- const struct ffa_ops *ops);
+struct ffa_device *
+ffa_device_register(const struct ffa_partition_info *part_info,
+ const struct ffa_ops *ops);
void ffa_device_unregister(struct ffa_device *ffa_dev);
int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
const char *mod_name);
void ffa_driver_unregister(struct ffa_driver *driver);
+void ffa_devices_unregister(void);
bool ffa_device_is_valid(struct ffa_device *ffa_dev);
#else
-static inline
-struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
- const struct ffa_ops *ops)
+static inline struct ffa_device *
+ffa_device_register(const struct ffa_partition_info *part_info,
+ const struct ffa_ops *ops)
{
return NULL;
}
static inline void ffa_device_unregister(struct ffa_device *dev) {}
+static inline void ffa_devices_unregister(void) {}
+
static inline int
ffa_driver_register(struct ffa_driver *driver, struct module *owner,
const char *mod_name)
@@ -212,6 +225,9 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; }
extern const struct bus_type ffa_bus_type;
+/* The FF-A 1.0 partition structure lacks the uuid[4] */
+#define FFA_1_0_PARTITON_INFO_SZ (8)
+
/* FFA transport related */
struct ffa_partition_info {
u16 id;
@@ -226,8 +242,12 @@ struct ffa_partition_info {
#define FFA_PARTITION_NOTIFICATION_RECV BIT(3)
/* partition runs in the AArch64 execution state. */
#define FFA_PARTITION_AARCH64_EXEC BIT(8)
+/* partition supports receipt of direct request2 */
+#define FFA_PARTITION_DIRECT_REQ2_RECV BIT(9)
+/* partition can send direct request2. */
+#define FFA_PARTITION_DIRECT_REQ2_SEND BIT(10)
u32 properties;
- u32 uuid[4];
+ uuid_t uuid;
};
static inline
@@ -245,6 +265,10 @@ bool ffa_partition_check_property(struct ffa_device *dev, u32 property)
#define ffa_partition_supports_direct_recv(dev) \
ffa_partition_check_property(dev, FFA_PARTITION_DIRECT_RECV)
+#define ffa_partition_supports_direct_req2_recv(dev) \
+ (ffa_partition_check_property(dev, FFA_PARTITION_DIRECT_REQ2_RECV) && \
+ !dev->mode_32bit)
+
/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */
struct ffa_send_direct_data {
unsigned long data0; /* w3/x3 */
@@ -260,6 +284,13 @@ struct ffa_indirect_msg_hdr {
u32 offset;
u32 send_recv_id;
u32 size;
+ u32 res1;
+ uuid_t uuid;
+};
+
+/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP}2 which pass data via registers */
+struct ffa_send_direct_data2 {
+ unsigned long data[14]; /* x4-x17 */
};
struct ffa_mem_region_addr_range {
@@ -307,6 +338,7 @@ struct ffa_mem_region_attributes {
* an `struct ffa_mem_region_addr_range`.
*/
u32 composite_off;
+ u8 impdef_val[16];
u64 reserved;
};
@@ -386,15 +418,31 @@ struct ffa_mem_region {
#define CONSTITUENTS_OFFSET(x) \
(offsetof(struct ffa_composite_mem_region, constituents[x]))
+#define FFA_EMAD_HAS_IMPDEF_FIELD(version) ((version) >= FFA_VERSION_1_2)
+#define FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version) ((version) > FFA_VERSION_1_0)
+
+static inline u32 ffa_emad_size_get(u32 ffa_version)
+{
+ u32 sz;
+ struct ffa_mem_region_attributes *ep_mem_access;
+
+ if (FFA_EMAD_HAS_IMPDEF_FIELD(ffa_version))
+ sz = sizeof(*ep_mem_access);
+ else
+ sz = sizeof(*ep_mem_access) - sizeof(ep_mem_access->impdef_val);
+
+ return sz;
+}
+
static inline u32
ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version)
{
- u32 offset = count * sizeof(struct ffa_mem_region_attributes);
+ u32 offset = count * ffa_emad_size_get(ffa_version);
/*
* Earlier to v1.1, the endpoint memory descriptor array started at
* offset 32(i.e. offset of ep_mem_offset in the current structure)
*/
- if (ffa_version <= FFA_VERSION_1_0)
+ if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(ffa_version))
offset += offsetof(struct ffa_mem_region, ep_mem_offset);
else
offset += sizeof(struct ffa_mem_region);
@@ -423,6 +471,8 @@ struct ffa_msg_ops {
int (*sync_send_receive)(struct ffa_device *dev,
struct ffa_send_direct_data *data);
int (*indirect_send)(struct ffa_device *dev, void *buf, size_t sz);
+ int (*sync_send_receive2)(struct ffa_device *dev,
+ struct ffa_send_direct_data2 *data);
};
struct ffa_mem_ops {
@@ -437,6 +487,7 @@ struct ffa_cpu_ops {
typedef void (*ffa_sched_recv_cb)(u16 vcpu, bool is_per_vcpu, void *cb_data);
typedef void (*ffa_notifier_cb)(int notify_id, void *cb_data);
+typedef void (*ffa_fwk_notifier_cb)(int notify_id, void *cb_data, void *buf);
struct ffa_notifier_ops {
int (*sched_recv_cb_register)(struct ffa_device *dev,
@@ -445,6 +496,10 @@ struct ffa_notifier_ops {
int (*notify_request)(struct ffa_device *dev, bool per_vcpu,
ffa_notifier_cb cb, void *cb_data, int notify_id);
int (*notify_relinquish)(struct ffa_device *dev, int notify_id);
+ int (*fwk_notify_request)(struct ffa_device *dev,
+ ffa_fwk_notifier_cb cb, void *cb_data,
+ int notify_id);
+ int (*fwk_notify_relinquish)(struct ffa_device *dev, int notify_id);
int (*notify_send)(struct ffa_device *dev, int notify_id, bool per_vcpu,
u16 vcpu);
};
diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h
new file mode 100644
index 000000000000..7f00c5285a32
--- /dev/null
+++ b/include/linux/arm_mpam.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2025 Arm Ltd. */
+
+#ifndef __LINUX_ARM_MPAM_H
+#define __LINUX_ARM_MPAM_H
+
+#include <linux/acpi.h>
+#include <linux/types.h>
+
+struct mpam_msc;
+
+enum mpam_msc_iface {
+ MPAM_IFACE_MMIO, /* a real MPAM MSC */
+ MPAM_IFACE_PCC, /* a fake MPAM MSC */
+};
+
+enum mpam_class_types {
+ MPAM_CLASS_CACHE, /* Caches, e.g. L2, L3 */
+ MPAM_CLASS_MEMORY, /* Main memory */
+ MPAM_CLASS_UNKNOWN, /* Everything else, e.g. SMMU */
+};
+
+#define MPAM_CLASS_ID_DEFAULT 255
+
+#ifdef CONFIG_ACPI_MPAM
+int acpi_mpam_parse_resources(struct mpam_msc *msc,
+ struct acpi_mpam_msc_node *tbl_msc);
+
+int acpi_mpam_count_msc(void);
+#else
+static inline int acpi_mpam_parse_resources(struct mpam_msc *msc,
+ struct acpi_mpam_msc_node *tbl_msc)
+{
+ return -EINVAL;
+}
+
+static inline int acpi_mpam_count_msc(void) { return -EINVAL; }
+#endif
+
+#ifdef CONFIG_ARM64_MPAM_DRIVER
+int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx,
+ enum mpam_class_types type, u8 class_id, int component_id);
+#else
+static inline int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx,
+ enum mpam_class_types type, u8 class_id,
+ int component_id)
+{
+ return -EINVAL;
+}
+#endif
+
+/**
+ * mpam_register_requestor() - Register a requestor with the MPAM driver
+ * @partid_max: The maximum PARTID value the requestor can generate.
+ * @pmg_max: The maximum PMG value the requestor can generate.
+ *
+ * Registers a requestor with the MPAM driver to ensure the chosen system-wide
+ * minimum PARTID and PMG values will allow the requestors features to be used.
+ *
+ * Returns an error if the registration is too late, and a larger PARTID/PMG
+ * value has been advertised to user-space. In this case the requestor should
+ * not use its MPAM features. Returns 0 on success.
+ */
+int mpam_register_requestor(u16 partid_max, u8 pmg_max);
+
+#endif /* __LINUX_ARM_MPAM_H */
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
index 255701e1251b..f652a5028b59 100644
--- a/include/linux/arm_sdei.h
+++ b/include/linux/arm_sdei.h
@@ -46,12 +46,12 @@ int sdei_unregister_ghes(struct ghes *ghes);
/* For use by arch code when CPU hotplug notifiers are not appropriate. */
int sdei_mask_local_cpu(void);
int sdei_unmask_local_cpu(void);
-void __init sdei_init(void);
+void __init acpi_sdei_init(void);
void sdei_handler_abort(void);
#else
static inline int sdei_mask_local_cpu(void) { return 0; }
static inline int sdei_unmask_local_cpu(void) { return 0; }
-static inline void sdei_init(void) { }
+static inline void acpi_sdei_init(void) { }
static inline void sdei_handler_abort(void) { }
#endif /* CONFIG_ARM_SDE_INTERFACE */
diff --git a/include/linux/asn1_decoder.h b/include/linux/asn1_decoder.h
index 83f9c6e1e5e9..b41bce82a191 100644
--- a/include/linux/asn1_decoder.h
+++ b/include/linux/asn1_decoder.h
@@ -9,6 +9,7 @@
#define _LINUX_ASN1_DECODER_H
#include <linux/asn1.h>
+#include <linux/types.h>
struct asn1_decoder;
diff --git a/include/linux/asn1_encoder.h b/include/linux/asn1_encoder.h
index 08cd0c2ad34f..d17484dffb74 100644
--- a/include/linux/asn1_encoder.h
+++ b/include/linux/asn1_encoder.h
@@ -6,7 +6,6 @@
#include <linux/types.h>
#include <linux/asn1.h>
#include <linux/asn1_ber_bytecode.h>
-#include <linux/bug.h>
#define asn1_oid_len(oid) (sizeof(oid)/sizeof(u32))
unsigned char *
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 5cc73d7e5b52..1ca9f9e05f4f 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -168,11 +168,6 @@ async_xor_offs(struct page *dest, unsigned int offset,
int src_cnt, size_t len, struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
-async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
- int src_cnt, size_t len, enum sum_check_flags *result,
- struct async_submit_ctl *submit);
-
-struct dma_async_tx_descriptor *
async_xor_val_offs(struct page *dest, unsigned int offset,
struct page **src_list, unsigned int *src_offset,
int src_cnt, size_t len, enum sum_check_flags *result,
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 792e10a09787..54b416e26995 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -29,6 +29,7 @@ enum {
ATA_MAX_SECTORS_128 = 128,
ATA_MAX_SECTORS = 256,
ATA_MAX_SECTORS_1024 = 1024,
+ ATA_MAX_SECTORS_8191 = 8191,
ATA_MAX_SECTORS_LBA48 = 65535,/* avoid count to be 0000h */
ATA_MAX_SECTORS_TAPE = 65535,
ATA_MAX_TRIM_RNUM = 64, /* 512-byte payload / (6-byte LBA + 2-byte range per entry) */
@@ -566,6 +567,7 @@ struct ata_bmdma_prd {
#define ata_id_has_ncq(id) ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8))
#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1)
#define ata_id_removable(id) ((id)[ATA_ID_CONFIG] & (1 << 7))
+#define ata_id_is_locked(id) (((id)[ATA_ID_DLF] & 0x7) == 0x7)
#define ata_id_has_atapi_AN(id) \
((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h
deleted file mode 100644
index 76860a461ed2..000000000000
--- a/include/linux/ath9k_platform.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2008 Atheros Communications Inc.
- * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _LINUX_ATH9K_PLATFORM_H
-#define _LINUX_ATH9K_PLATFORM_H
-
-#define ATH9K_PLAT_EEP_MAX_WORDS 2048
-
-struct ath9k_platform_data {
- const char *eeprom_name;
-
- u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS];
- u8 *macaddr;
-
- int led_pin;
- u32 gpio_mask;
- u32 gpio_val;
-
- u32 bt_active_pin;
- u32 bt_priority_pin;
- u32 wlan_active_pin;
-
- bool endian_check;
- bool is_clk_25mhz;
- bool tx_gain_buffalo;
- bool disable_2ghz;
- bool disable_5ghz;
- bool led_active_high;
-
- int (*get_mac_revision)(void);
- int (*external_reset)(void);
-
- bool use_eeprom;
-};
-
-#endif /* _LINUX_ATH9K_PLATFORM_H */
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 9b02961d65ee..70807c679f1a 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -185,6 +185,7 @@ struct atmdev_ops { /* only send is required */
int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd,
void __user *arg);
#endif
+ int (*pre_send)(struct atm_vcc *vcc, struct sk_buff *skb);
int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
int (*send_bh)(struct atm_vcc *vcc, struct sk_buff *skb);
int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags);
@@ -249,6 +250,12 @@ static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
ATM_SKB(skb)->atm_options = vcc->atm_options;
}
+static inline void atm_return_tx(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ WARN_ON_ONCE(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize,
+ &sk_atm(vcc)->sk_wmem_alloc));
+}
+
static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
{
atomic_add(truesize, &sk_atm(vcc)->sk_rmem_alloc);
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
index 956bcba5dbf2..2f9d36b72bd8 100644
--- a/include/linux/atomic/atomic-arch-fallback.h
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -2242,7 +2242,7 @@ raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
/**
* raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: int value to add
+ * @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -4368,7 +4368,7 @@ raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
/**
* raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: s64 value to add
+ * @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -4690,4 +4690,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
}
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 14850c0b0db20c62fdc78ccd1d42b98b88d76331
+// b565db590afeeff0d7c9485ccbca5bb6e155749f
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
index debd487fe971..37ab6314a9f7 100644
--- a/include/linux/atomic/atomic-instrumented.h
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -1276,7 +1276,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_read_write(old, sizeof(*old));
return raw_atomic_try_cmpxchg(v, old, new);
}
@@ -1298,7 +1298,7 @@ static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_read_write(old, sizeof(*old));
return raw_atomic_try_cmpxchg_acquire(v, old, new);
}
@@ -1321,7 +1321,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_read_write(old, sizeof(*old));
return raw_atomic_try_cmpxchg_release(v, old, new);
}
@@ -1343,13 +1343,13 @@ static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_read_write(old, sizeof(*old));
return raw_atomic_try_cmpxchg_relaxed(v, old, new);
}
/**
* atomic_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: int value to add
+ * @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -2854,7 +2854,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_read_write(old, sizeof(*old));
return raw_atomic64_try_cmpxchg(v, old, new);
}
@@ -2876,7 +2876,7 @@ static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_read_write(old, sizeof(*old));
return raw_atomic64_try_cmpxchg_acquire(v, old, new);
}
@@ -2899,7 +2899,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_read_write(old, sizeof(*old));
return raw_atomic64_try_cmpxchg_release(v, old, new);
}
@@ -2921,13 +2921,13 @@ static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_read_write(old, sizeof(*old));
return raw_atomic64_try_cmpxchg_relaxed(v, old, new);
}
/**
* atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: s64 value to add
+ * @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -4432,7 +4432,7 @@ atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_read_write(old, sizeof(*old));
return raw_atomic_long_try_cmpxchg(v, old, new);
}
@@ -4454,7 +4454,7 @@ static __always_inline bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_read_write(old, sizeof(*old));
return raw_atomic_long_try_cmpxchg_acquire(v, old, new);
}
@@ -4477,7 +4477,7 @@ atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_read_write(old, sizeof(*old));
return raw_atomic_long_try_cmpxchg_release(v, old, new);
}
@@ -4499,13 +4499,13 @@ static __always_inline bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
+ instrument_read_write(old, sizeof(*old));
return raw_atomic_long_try_cmpxchg_relaxed(v, old, new);
}
/**
* atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: long value to add
+ * @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -5050,4 +5050,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
-// ce5b65e0f1f8a276268b667194581d24bed219d4
+// f618ac667f868941a84ce0ab2242f1786e049ed4
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
index 3ef844b3ab8a..f86b29d90877 100644
--- a/include/linux/atomic/atomic-long.h
+++ b/include/linux/atomic/atomic-long.h
@@ -1535,7 +1535,7 @@ raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
/**
* raw_atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: long value to add
+ * @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -1809,4 +1809,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v)
}
#endif /* _LINUX_ATOMIC_LONG_H */
-// 1c4a26fc77f345342953770ebe3c4d08e7ce2f9a
+// eadf183c3600b8b92b91839dd3be6bcc560c752d
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h
index e4004d1e6725..b3643de9931d 100644
--- a/include/linux/attribute_container.h
+++ b/include/linux/attribute_container.h
@@ -61,14 +61,8 @@ int attribute_container_device_trigger_safe(struct device *dev,
int (*undo)(struct attribute_container *,
struct device *,
struct device *));
-void attribute_container_trigger(struct device *dev,
- int (*fn)(struct attribute_container *,
- struct device *));
int attribute_container_add_attrs(struct device *classdev);
int attribute_container_add_class_device(struct device *classdev);
-int attribute_container_add_class_device_adapter(struct attribute_container *cont,
- struct device *dev,
- struct device *classdev);
void attribute_container_remove_attrs(struct device *classdev);
void attribute_container_class_device_del(struct device *classdev);
struct attribute_container *attribute_container_classdev_to_container(struct device *);
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 0050ef288ab3..536f8ee8da81 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -37,6 +37,8 @@ struct audit_watch;
struct audit_tree;
struct sk_buff;
struct kern_ipc_perm;
+struct lsm_id;
+struct lsm_prop;
struct audit_krule {
u32 pflags;
@@ -147,6 +149,10 @@ extern unsigned compat_signal_class[];
#define AUDIT_TTY_ENABLE BIT(0)
#define AUDIT_TTY_LOG_PASSWD BIT(1)
+/* bit values for audit_cfg_lsm */
+#define AUDIT_CFG_LSM_SECCTX_SUBJECT BIT(0)
+#define AUDIT_CFG_LSM_SECCTX_OBJECT BIT(1)
+
struct filename;
#define AUDIT_OFF 0
@@ -185,6 +191,8 @@ extern void audit_log_path_denied(int type,
const char *operation);
extern void audit_log_lost(const char *message);
+extern int audit_log_subj_ctx(struct audit_buffer *ab, struct lsm_prop *prop);
+extern int audit_log_obj_ctx(struct audit_buffer *ab, struct lsm_prop *prop);
extern int audit_log_task_context(struct audit_buffer *ab);
extern void audit_log_task_info(struct audit_buffer *ab);
@@ -210,6 +218,8 @@ extern u32 audit_enabled;
extern int audit_signal_info(int sig, struct task_struct *t);
+extern void audit_cfg_lsm(const struct lsm_id *lsmid, int flags);
+
#else /* CONFIG_AUDIT */
static inline __printf(4, 5)
void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
@@ -245,6 +255,16 @@ static inline void audit_log_key(struct audit_buffer *ab, char *key)
{ }
static inline void audit_log_path_denied(int type, const char *operation)
{ }
+static inline int audit_log_subj_ctx(struct audit_buffer *ab,
+ struct lsm_prop *prop)
+{
+ return 0;
+}
+static inline int audit_log_obj_ctx(struct audit_buffer *ab,
+ struct lsm_prop *prop)
+{
+ return 0;
+}
static inline int audit_log_task_context(struct audit_buffer *ab)
{
return 0;
@@ -269,6 +289,9 @@ static inline int audit_signal_info(int sig, struct task_struct *t)
return 0;
}
+static inline void audit_cfg_lsm(const struct lsm_id *lsmid, int flags)
+{ }
+
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_AUDIT_COMPAT_GENERIC
@@ -417,7 +440,7 @@ extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
extern void __audit_log_capset(const struct cred *new, const struct cred *old);
extern void __audit_mmap_fd(int fd, int flags);
extern void __audit_openat2_how(struct open_how *how);
-extern void __audit_log_kern_module(char *name);
+extern void __audit_log_kern_module(const char *name);
extern void __audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar);
extern void __audit_tk_injoffset(struct timespec64 offset);
extern void __audit_ntp_log(const struct audit_ntp_data *ad);
@@ -519,7 +542,7 @@ static inline void audit_openat2_how(struct open_how *how)
__audit_openat2_how(how);
}
-static inline void audit_log_kern_module(char *name)
+static inline void audit_log_kern_module(const char *name)
{
if (!audit_dummy_context())
__audit_log_kern_module(name);
@@ -527,7 +550,7 @@ static inline void audit_log_kern_module(char *name)
static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar)
{
- if (!audit_dummy_context())
+ if (audit_enabled)
__audit_fanotify(response, friar);
}
@@ -677,9 +700,8 @@ static inline void audit_mmap_fd(int fd, int flags)
static inline void audit_openat2_how(struct open_how *how)
{ }
-static inline void audit_log_kern_module(char *name)
-{
-}
+static inline void audit_log_kern_module(const char *name)
+{ }
static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar)
{ }
diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h
index de21d9d24a95..4086afd0cc6b 100644
--- a/include/linux/auxiliary_bus.h
+++ b/include/linux/auxiliary_bus.h
@@ -58,6 +58,10 @@
* in
* @name: Match name found by the auxiliary device driver,
* @id: unique identitier if multiple devices of the same name are exported,
+ * @sysfs: embedded struct which hold all sysfs related fields,
+ * @sysfs.irqs: irqs xarray contains irq indices which are used by the device,
+ * @sysfs.lock: Synchronize irq sysfs creation,
+ * @sysfs.irq_dir_exists: whether "irqs" directory exists,
*
* An auxiliary_device represents a part of its parent device's functionality.
* It is given a name that, combined with the registering drivers
@@ -139,6 +143,11 @@ struct auxiliary_device {
struct device dev;
const char *name;
u32 id;
+ struct {
+ struct xarray irqs;
+ struct mutex lock; /* Synchronize irq sysfs creation */
+ bool irq_dir_exists;
+ } sysfs;
};
/**
@@ -203,7 +212,7 @@ static inline struct auxiliary_device *to_auxiliary_dev(struct device *dev)
return container_of(dev, struct auxiliary_device, dev);
}
-static inline struct auxiliary_driver *to_auxiliary_drv(struct device_driver *drv)
+static inline const struct auxiliary_driver *to_auxiliary_drv(const struct device_driver *drv)
{
return container_of(drv, struct auxiliary_driver, driver);
}
@@ -212,8 +221,24 @@ int auxiliary_device_init(struct auxiliary_device *auxdev);
int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname);
#define auxiliary_device_add(auxdev) __auxiliary_device_add(auxdev, KBUILD_MODNAME)
+#ifdef CONFIG_SYSFS
+int auxiliary_device_sysfs_irq_add(struct auxiliary_device *auxdev, int irq);
+void auxiliary_device_sysfs_irq_remove(struct auxiliary_device *auxdev,
+ int irq);
+#else /* CONFIG_SYSFS */
+static inline int
+auxiliary_device_sysfs_irq_add(struct auxiliary_device *auxdev, int irq)
+{
+ return 0;
+}
+
+static inline void
+auxiliary_device_sysfs_irq_remove(struct auxiliary_device *auxdev, int irq) {}
+#endif
+
static inline void auxiliary_device_uninit(struct auxiliary_device *auxdev)
{
+ mutex_destroy(&auxdev->sysfs.lock);
put_device(&auxdev->dev);
}
@@ -229,6 +254,23 @@ int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, struct module *
void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
+struct auxiliary_device *auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id);
+void auxiliary_device_destroy(void *auxdev);
+
+struct auxiliary_device *__devm_auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id);
+
+#define devm_auxiliary_device_create(dev, devname, platform_data) \
+ __devm_auxiliary_device_create(dev, KBUILD_MODNAME, devname, \
+ platform_data, 0)
+
/**
* module_auxiliary_driver() - Helper macro for registering an auxiliary driver
* @__auxiliary_driver: auxiliary driver struct
@@ -244,8 +286,4 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
#define module_auxiliary_driver(__auxiliary_driver) \
module_driver(__auxiliary_driver, auxiliary_driver_register, auxiliary_driver_unregister)
-struct auxiliary_device *auxiliary_find_device(struct device *start,
- const void *data,
- int (*match)(struct device *dev, const void *data));
-
#endif /* _AUXILIARY_BUS_H_ */
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index 8e177b67e82f..11bdab5522fd 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -89,6 +89,9 @@ enum virtchnl_rx_hsplit {
VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
};
+enum virtchnl_bw_limit_type {
+ VIRTCHNL_BW_SHAPER = 0,
+};
/* END GENERIC DEFINES */
/* Opcodes for VF-PF communication. These are placed in the v_opcode field
@@ -129,8 +132,8 @@ enum virtchnl_ops {
VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
- VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
- VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HASHCFG = 26,
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
VIRTCHNL_OP_REQUEST_QUEUES = 29,
@@ -151,6 +154,14 @@ enum virtchnl_ops {
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
+ /* opcode 58 and 59 are reserved */
+ VIRTCHNL_OP_1588_PTP_GET_CAPS = 60,
+ VIRTCHNL_OP_1588_PTP_GET_TIME = 61,
+ /* opcode 62 - 65 are reserved */
+ VIRTCHNL_OP_GET_QOS_CAPS = 66,
+ /* opcode 68 through 111 are reserved */
+ VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
+ VIRTCHNL_OP_CONFIG_QUANTA = 113,
VIRTCHNL_OP_MAX,
};
@@ -247,6 +258,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
/* used to negotiate communicating link speeds in Mbps */
#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
#define VIRTCHNL_VF_OFFLOAD_CRC BIT(10)
+#define VIRTCHNL_VF_OFFLOAD_TC_U32 BIT(11)
#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
#define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
@@ -260,6 +272,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26)
#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27)
#define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28)
+#define VIRTCHNL_VF_OFFLOAD_QOS BIT(29)
+#define VIRTCHNL_VF_CAP_PTP BIT(31)
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
@@ -299,6 +313,60 @@ struct virtchnl_txq_info {
VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+/* RX descriptor IDs (range from 0 to 63) */
+enum virtchnl_rx_desc_ids {
+ VIRTCHNL_RXDID_0_16B_BASE = 0,
+ VIRTCHNL_RXDID_1_32B_BASE = 1,
+ VIRTCHNL_RXDID_2_FLEX_SQ_NIC = 2,
+ VIRTCHNL_RXDID_3_FLEX_SQ_SW = 3,
+ VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB = 4,
+ VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL = 5,
+ VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2 = 6,
+ VIRTCHNL_RXDID_7_HW_RSVD = 7,
+ /* 8 through 15 are reserved */
+ VIRTCHNL_RXDID_16_COMMS_GENERIC = 16,
+ VIRTCHNL_RXDID_17_COMMS_AUX_VLAN = 17,
+ VIRTCHNL_RXDID_18_COMMS_AUX_IPV4 = 18,
+ VIRTCHNL_RXDID_19_COMMS_AUX_IPV6 = 19,
+ VIRTCHNL_RXDID_20_COMMS_AUX_FLOW = 20,
+ VIRTCHNL_RXDID_21_COMMS_AUX_TCP = 21,
+ /* 22 through 63 are reserved */
+};
+
+#define VIRTCHNL_RXDID_BIT(x) BIT_ULL(VIRTCHNL_RXDID_##x)
+
+/* RX descriptor ID bitmasks */
+enum virtchnl_rx_desc_id_bitmasks {
+ VIRTCHNL_RXDID_0_16B_BASE_M = VIRTCHNL_RXDID_BIT(0_16B_BASE),
+ VIRTCHNL_RXDID_1_32B_BASE_M = VIRTCHNL_RXDID_BIT(1_32B_BASE),
+ VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M = VIRTCHNL_RXDID_BIT(2_FLEX_SQ_NIC),
+ VIRTCHNL_RXDID_3_FLEX_SQ_SW_M = VIRTCHNL_RXDID_BIT(3_FLEX_SQ_SW),
+ VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M = VIRTCHNL_RXDID_BIT(4_FLEX_SQ_NIC_VEB),
+ VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL_M = VIRTCHNL_RXDID_BIT(5_FLEX_SQ_NIC_ACL),
+ VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2_M = VIRTCHNL_RXDID_BIT(6_FLEX_SQ_NIC_2),
+ VIRTCHNL_RXDID_7_HW_RSVD_M = VIRTCHNL_RXDID_BIT(7_HW_RSVD),
+ /* 8 through 15 are reserved */
+ VIRTCHNL_RXDID_16_COMMS_GENERIC_M = VIRTCHNL_RXDID_BIT(16_COMMS_GENERIC),
+ VIRTCHNL_RXDID_17_COMMS_AUX_VLAN_M = VIRTCHNL_RXDID_BIT(17_COMMS_AUX_VLAN),
+ VIRTCHNL_RXDID_18_COMMS_AUX_IPV4_M = VIRTCHNL_RXDID_BIT(18_COMMS_AUX_IPV4),
+ VIRTCHNL_RXDID_19_COMMS_AUX_IPV6_M = VIRTCHNL_RXDID_BIT(19_COMMS_AUX_IPV6),
+ VIRTCHNL_RXDID_20_COMMS_AUX_FLOW_M = VIRTCHNL_RXDID_BIT(20_COMMS_AUX_FLOW),
+ VIRTCHNL_RXDID_21_COMMS_AUX_TCP_M = VIRTCHNL_RXDID_BIT(21_COMMS_AUX_TCP),
+ /* 22 through 63 are reserved */
+};
+
+/* virtchnl_rxq_info_flags - definition of bits in the flags field of the
+ * virtchnl_rxq_info structure.
+ *
+ * @VIRTCHNL_PTP_RX_TSTAMP: request to enable Rx timestamping
+ *
+ * Other flag bits are currently reserved and they may be extended in the
+ * future.
+ */
+enum virtchnl_rxq_info_flags {
+ VIRTCHNL_PTP_RX_TSTAMP = BIT(0),
+};
+
/* VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of virtchnl_rxq_info.
@@ -321,8 +389,14 @@ struct virtchnl_rxq_info {
u32 databuffer_size;
u32 max_pkt_size;
u8 crc_disable;
- u8 rxdid;
- u8 pad1[2];
+ /* see enum virtchnl_rx_desc_ids;
+ * only used when VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is supported. Note
+ * that when the offload is not supported, the descriptor format aligns
+ * with VIRTCHNL_RXDID_1_32B_BASE.
+ */
+ enum virtchnl_rx_desc_ids rxdid:8;
+ enum virtchnl_rxq_info_flags flags:8; /* see virtchnl_rxq_info_flags */
+ u8 pad1;
u64 dma_ring_addr;
/* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
@@ -900,18 +974,19 @@ struct virtchnl_rss_lut {
VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut);
#define virtchnl_rss_lut_LEGACY_SIZEOF 6
-/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
- * VIRTCHNL_OP_SET_RSS_HENA
- * VF sends these messages to get and set the hash filter enable bits for RSS.
+/* VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS
+ * VIRTCHNL_OP_SET_RSS_HASHCFG
+ * VF sends these messages to get and set the hash filter configuration for RSS.
* By default, the PF sets these to all possible traffic types that the
* hardware supports. The VF can query this value if it wants to change the
* traffic types that are hashed by the hardware.
*/
-struct virtchnl_rss_hena {
- u64 hena;
+struct virtchnl_rss_hashcfg {
+ /* Bits defined by enum libie_filter_pctype */
+ u64 hashcfg;
};
-VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hashcfg);
/* Type of RSS algorithm */
enum virtchnl_rss_algorithm {
@@ -1022,10 +1097,6 @@ struct virtchnl_filter {
VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
-struct virtchnl_supported_rxdids {
- u64 supported_rxdids;
-};
-
/* VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
@@ -1121,6 +1192,7 @@ enum virtchnl_vfr_states {
};
#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
+#define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024
#define PROTO_HDR_SHIFT 5
#define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
@@ -1181,6 +1253,17 @@ enum virtchnl_proto_hdr_type {
VIRTCHNL_PROTO_HDR_ESP,
VIRTCHNL_PROTO_HDR_AH,
VIRTCHNL_PROTO_HDR_PFCP,
+ VIRTCHNL_PROTO_HDR_GTPC,
+ VIRTCHNL_PROTO_HDR_ECPRI,
+ VIRTCHNL_PROTO_HDR_L2TPV2,
+ VIRTCHNL_PROTO_HDR_PPP,
+ /* IPv4 and IPv6 Fragment header types are only associated to
+ * VIRTCHNL_PROTO_HDR_IPV4 and VIRTCHNL_PROTO_HDR_IPV6 respectively,
+ * cannot be used independently.
+ */
+ VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG,
+ VIRTCHNL_PROTO_HDR_GRE,
};
/* Protocol header field within a protocol header. */
@@ -1203,6 +1286,7 @@ enum virtchnl_proto_hdr_field {
VIRTCHNL_PROTO_HDR_IPV4_DSCP,
VIRTCHNL_PROTO_HDR_IPV4_TTL,
VIRTCHNL_PROTO_HDR_IPV4_PROT,
+ VIRTCHNL_PROTO_HDR_IPV4_CHKSUM,
/* IPV6 */
VIRTCHNL_PROTO_HDR_IPV6_SRC =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
@@ -1210,18 +1294,34 @@ enum virtchnl_proto_hdr_field {
VIRTCHNL_PROTO_HDR_IPV6_TC,
VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
VIRTCHNL_PROTO_HDR_IPV6_PROT,
+ /* IPV6 Prefix */
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_DST,
/* TCP */
VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
+ VIRTCHNL_PROTO_HDR_TCP_CHKSUM,
/* UDP */
VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
+ VIRTCHNL_PROTO_HDR_UDP_CHKSUM,
/* SCTP */
VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
+ VIRTCHNL_PROTO_HDR_SCTP_CHKSUM,
/* GTPU_IP */
VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
@@ -1245,6 +1345,28 @@ enum virtchnl_proto_hdr_field {
VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
VIRTCHNL_PROTO_HDR_PFCP_SEID,
+ /* GTPC */
+ VIRTCHNL_PROTO_HDR_GTPC_TEID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPC),
+ /* ECPRI */
+ VIRTCHNL_PROTO_HDR_ECPRI_MSG_TYPE =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ECPRI),
+ VIRTCHNL_PROTO_HDR_ECPRI_PC_RTC_ID,
+ /* IPv4 Dummy Fragment */
+ VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4_FRAG),
+ /* IPv6 Extension Fragment */
+ VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG),
+ /* GTPU_DWN/UP */
+ VIRTCHNL_PROTO_HDR_GTPU_DWN_QFI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN),
+ VIRTCHNL_PROTO_HDR_GTPU_UP_QFI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP),
+ /* L2TPv2 */
+ VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV2),
+ VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID,
};
struct virtchnl_proto_hdr {
@@ -1266,13 +1388,22 @@ struct virtchnl_proto_hdrs {
u8 pad[3];
/**
* specify where protocol header start from.
+ * must be 0 when sending a raw packet request.
* 0 - from the outer layer
* 1 - from the first inner layer
* 2 - from the second inner layer
* ....
**/
- int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
- struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+ u32 count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
+ union {
+ struct virtchnl_proto_hdr
+ proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+ struct {
+ u16 pkt_len;
+ u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+ u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+ } raw;
+ };
};
VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
@@ -1315,7 +1446,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
struct virtchnl_filter_action_set {
/* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
- int count;
+ u32 count;
struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
};
@@ -1405,6 +1536,141 @@ struct virtchnl_fdir_del {
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
+#define VIRTCHNL_1588_PTP_CAP_RX_TSTAMP BIT(1)
+#define VIRTCHNL_1588_PTP_CAP_READ_PHC BIT(2)
+
+/**
+ * struct virtchnl_ptp_caps - Defines the PTP caps available to the VF.
+ * @caps: On send, VF sets what capabilities it requests. On reply, PF
+ * indicates what has been enabled for this VF. The PF shall not set
+ * bits which were not requested by the VF.
+ * @rsvd: Reserved bits for future extension.
+ *
+ * Structure that defines the PTP capabilities available to the VF. The VF
+ * sends VIRTCHNL_OP_1588_PTP_GET_CAPS, and must fill in the ptp_caps field
+ * indicating what capabilities it is requesting. The PF will respond with the
+ * same message with the virtchnl_ptp_caps structure indicating what is
+ * enabled for the VF.
+ *
+ * VIRTCHNL_1588_PTP_CAP_RX_TSTAMP indicates that the VF receive queues have
+ * receive timestamps enabled in the flexible descriptors. Note that this
+ * requires a VF to also negotiate to enable advanced flexible descriptors in
+ * the receive path instead of the default legacy descriptor format.
+ *
+ * VIRTCHNL_1588_PTP_CAP_READ_PHC indicates that the VF may read the PHC time
+ * via the VIRTCHNL_OP_1588_PTP_GET_TIME command.
+ *
+ * Note that in the future, additional capability flags may be added which
+ * indicate additional extended support. All fields marked as reserved by this
+ * header will be set to zero. VF implementations should verify this to ensure
+ * that future extensions do not break compatibility.
+ */
+struct virtchnl_ptp_caps {
+ u32 caps;
+ u8 rsvd[44];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_ptp_caps);
+
+/**
+ * struct virtchnl_phc_time - Contains the 64bits of PHC clock time in ns.
+ * @time: PHC time in nanoseconds
+ * @rsvd: Reserved for future extension
+ *
+ * Structure received with VIRTCHNL_OP_1588_PTP_GET_TIME. Contains the 64bits
+ * of PHC clock time in nanoseconds.
+ *
+ * VIRTCHNL_OP_1588_PTP_GET_TIME may be sent to request the current time of
+ * the PHC. This op is available in case direct access via the PHC registers
+ * is not available.
+ */
+struct virtchnl_phc_time {
+ u64 time;
+ u8 rsvd[8];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_time);
+
+struct virtchnl_shaper_bw {
+ /* Unit is Kbps */
+ u32 committed;
+ u32 peak;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw);
+
+/* VIRTCHNL_OP_GET_QOS_CAPS
+ * VF sends this message to get its QoS Caps, such as
+ * TC number, Arbiter and Bandwidth.
+ */
+struct virtchnl_qos_cap_elem {
+ u8 tc_num;
+ u8 tc_prio;
+#define VIRTCHNL_ABITER_STRICT 0
+#define VIRTCHNL_ABITER_ETS 2
+ u8 arbiter;
+#define VIRTCHNL_STRICT_WEIGHT 1
+ u8 weight;
+ enum virtchnl_bw_limit_type type;
+ union {
+ struct virtchnl_shaper_bw shaper;
+ u8 pad2[32];
+ };
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem);
+
+struct virtchnl_qos_cap_list {
+ u16 vsi_id;
+ u16 num_elem;
+ struct virtchnl_qos_cap_elem cap[];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_qos_cap_list);
+#define virtchnl_qos_cap_list_LEGACY_SIZEOF 44
+
+/* VIRTCHNL_OP_CONFIG_QUEUE_BW */
+struct virtchnl_queue_bw {
+ u16 queue_id;
+ u8 tc;
+ u8 pad;
+ struct virtchnl_shaper_bw shaper;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);
+
+struct virtchnl_queues_bw_cfg {
+ u16 vsi_id;
+ u16 num_queues;
+ struct virtchnl_queue_bw cfg[];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_queues_bw_cfg);
+#define virtchnl_queues_bw_cfg_LEGACY_SIZEOF 16
+
+enum virtchnl_queue_type {
+ VIRTCHNL_QUEUE_TYPE_TX = 0,
+ VIRTCHNL_QUEUE_TYPE_RX = 1,
+};
+
+/* structure to specify a chunk of contiguous queues */
+struct virtchnl_queue_chunk {
+ /* see enum virtchnl_queue_type */
+ s32 type;
+ u16 start_queue_id;
+ u16 num_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk);
+
+struct virtchnl_quanta_cfg {
+ u16 quanta_size;
+ u16 pad;
+ struct virtchnl_queue_chunk queue_select;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
+
#define __vss_byone(p, member, count, old) \
(struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0)))
@@ -1427,6 +1693,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
__vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \
__vss(virtchnl_tc_info, __vss_byelem, p, m, c), \
__vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \
+ __vss(virtchnl_qos_cap_list, __vss_byelem, p, m, c), \
+ __vss(virtchnl_queues_bw_cfg, __vss_byelem, p, m, c), \
__vss(virtchnl_rss_key, __vss_byone, p, m, c), \
__vss(virtchnl_rss_lut, __vss_byone, p, m, c))
@@ -1562,10 +1830,10 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
valid_len = sizeof(struct virtchnl_rss_hfunc);
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
break;
- case VIRTCHNL_OP_SET_RSS_HENA:
- valid_len = sizeof(struct virtchnl_rss_hena);
+ case VIRTCHNL_OP_SET_RSS_HASHCFG:
+ valid_len = sizeof(struct virtchnl_rss_hashcfg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
@@ -1626,6 +1894,41 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
valid_len = sizeof(struct virtchnl_vlan_setting);
break;
+ case VIRTCHNL_OP_GET_QOS_CAPS:
+ break;
+ case VIRTCHNL_OP_CONFIG_QUEUE_BW:
+ valid_len = virtchnl_queues_bw_cfg_LEGACY_SIZEOF;
+ if (msglen >= valid_len) {
+ struct virtchnl_queues_bw_cfg *q_bw =
+ (struct virtchnl_queues_bw_cfg *)msg;
+
+ valid_len = virtchnl_struct_size(q_bw, cfg,
+ q_bw->num_queues);
+ if (q_bw->num_queues == 0) {
+ err_msg_format = true;
+ break;
+ }
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_QUANTA:
+ valid_len = sizeof(struct virtchnl_quanta_cfg);
+ if (msglen >= valid_len) {
+ struct virtchnl_quanta_cfg *q_quanta =
+ (struct virtchnl_quanta_cfg *)msg;
+
+ if (q_quanta->quanta_size == 0 ||
+ q_quanta->queue_select.num_queues == 0) {
+ err_msg_format = true;
+ break;
+ }
+ }
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+ valid_len = sizeof(struct virtchnl_ptp_caps);
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_TIME:
+ valid_len = sizeof(struct virtchnl_phc_time);
+ break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 2ad261082bba..0217c1073735 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -63,6 +63,8 @@ enum wb_reason {
struct wb_completion {
atomic_t cnt;
wait_queue_head_t *waitq;
+ unsigned long progress_stamp; /* The jiffies when slow progress is detected */
+ unsigned long wait_start; /* The jiffies when waiting for the writeback work to finish */
};
#define __WB_COMPLETION_INIT(_waitq) \
@@ -152,6 +154,10 @@ struct bdi_writeback {
struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
struct list_head b_attached; /* attached inodes, protected by list_lock */
struct list_head offline_node; /* anchored at offline_cgwbs */
+ struct work_struct switch_work; /* work used to perform inode switching
+ * to this wb */
+ struct llist_head switch_wbs_ctxs; /* queued contexts for
+ * writeback switching */
union {
struct work_struct release_work;
@@ -164,7 +170,9 @@ struct backing_dev_info {
u64 id;
struct rb_node rb_node; /* keyed by ->id */
struct list_head bdi_list;
- unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
+ /* max readahead in PAGE_SIZE units */
+ unsigned long __data_racy ra_pages;
+
unsigned long io_pages; /* max allowed IO size */
struct kref refcnt; /* Reference counter for the structure */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 8e7af9a03b41..0c8342747cab 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -66,16 +66,6 @@ static inline void wb_stat_mod(struct bdi_writeback *wb,
percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
}
-static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
-{
- wb_stat_mod(wb, item, 1);
-}
-
-static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
-{
- wb_stat_mod(wb, item, -1);
-}
-
static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
return percpu_counter_read_positive(&wb->stat[item]);
@@ -118,12 +108,10 @@ int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit
*
* BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages
* should contribute to accounting
- * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages
* BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold
*/
#define BDI_CAP_WRITEBACK (1 << 0)
-#define BDI_CAP_WRITEBACK_ACCT (1 << 1)
-#define BDI_CAP_STRICTLIMIT (1 << 2)
+#define BDI_CAP_STRICTLIMIT (1 << 1)
extern struct backing_dev_info noop_backing_dev_info;
@@ -249,6 +237,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
{
#ifdef CONFIG_LOCKDEP
WARN_ON_ONCE(debug_locks &&
+ (inode->i_sb->s_iflags & SB_I_CGROUPWB) &&
(!lockdep_is_held(&inode->i_lock) &&
!lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
!lockdep_is_held(&inode->i_wb->list_lock)));
@@ -288,10 +277,11 @@ unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
rcu_read_lock();
/*
- * Paired with store_release in inode_switch_wbs_work_fn() and
+ * Paired with a release fence in inode_do_switch_wbs() and
* ensures that we see the new wb if we see cleared I_WB_SWITCH.
*/
- cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
+ cookie->locked = inode_state_read_once(inode) & I_WB_SWITCH;
+ smp_rmb();
if (unlikely(cookie->locked))
xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
diff --git a/include/linux/backing-file.h b/include/linux/backing-file.h
index 4b61b0e57720..1476a6ed1bfd 100644
--- a/include/linux/backing-file.h
+++ b/include/linux/backing-file.h
@@ -14,9 +14,8 @@
struct backing_file_ctx {
const struct cred *cred;
- struct file *user_file;
- void (*accessed)(struct file *);
- void (*end_write)(struct file *);
+ void (*accessed)(struct file *file);
+ void (*end_write)(struct kiocb *iocb, ssize_t);
};
struct file *backing_file_open(const struct path *user_path, int flags,
@@ -31,13 +30,13 @@ ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
struct kiocb *iocb, int flags,
struct backing_file_ctx *ctx);
-ssize_t backing_file_splice_read(struct file *in, loff_t *ppos,
+ssize_t backing_file_splice_read(struct file *in, struct kiocb *iocb,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags,
struct backing_file_ctx *ctx);
ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
- struct file *out, loff_t *ppos, size_t len,
- unsigned int flags,
+ struct file *out, struct kiocb *iocb,
+ size_t len, unsigned int flags,
struct backing_file_ctx *ctx);
int backing_file_mmap(struct file *file, struct vm_area_struct *vma,
struct backing_file_ctx *ctx);
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 19a1c0e22629..f29a9ef1052e 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -10,9 +10,7 @@
#define _LINUX_BACKLIGHT_H
#include <linux/device.h>
-#include <linux/fb.h>
#include <linux/mutex.h>
-#include <linux/notifier.h>
#include <linux/types.h>
/**
@@ -66,24 +64,6 @@ enum backlight_type {
BACKLIGHT_TYPE_MAX,
};
-/**
- * enum backlight_notification - the type of notification
- *
- * The notifications that is used for notification sent to the receiver
- * that registered notifications using backlight_register_notifier().
- */
-enum backlight_notification {
- /**
- * @BACKLIGHT_REGISTERED: The backlight device is registered.
- */
- BACKLIGHT_REGISTERED,
-
- /**
- * @BACKLIGHT_UNREGISTERED: The backlight revice is unregistered.
- */
- BACKLIGHT_UNREGISTERED,
-};
-
/** enum backlight_scale - the type of scale used for brightness values
*
* The type of scale used for brightness values.
@@ -209,15 +189,19 @@ struct backlight_properties {
* attribute: /sys/class/backlight/<backlight>/bl_power
* When the power property is updated update_status() is called.
*
- * The possible values are: (0: full on, 1 to 3: power saving
- * modes; 4: full off), see FB_BLANK_XXX.
+ * The possible values are: (0: full on, 4: full off), see
+ * BACKLIGHT_POWER constants.
*
- * When the backlight device is enabled @power is set
- * to FB_BLANK_UNBLANK. When the backlight device is disabled
- * @power is set to FB_BLANK_POWERDOWN.
+ * When the backlight device is enabled, @power is set to
+ * BACKLIGHT_POWER_ON. When the backlight device is disabled,
+ * @power is set to BACKLIGHT_POWER_OFF.
*/
int power;
+#define BACKLIGHT_POWER_ON (0)
+#define BACKLIGHT_POWER_OFF (4)
+#define BACKLIGHT_POWER_REDUCED (1) // deprecated; don't use in new code
+
/**
* @type: The type of backlight supported.
*
@@ -293,11 +277,6 @@ struct backlight_device {
const struct backlight_ops *ops;
/**
- * @fb_notif: The framebuffer notifier block
- */
- struct notifier_block fb_notif;
-
- /**
* @entry: List entry of all registered backlight devices
*/
struct list_head entry;
@@ -308,15 +287,7 @@ struct backlight_device {
struct device dev;
/**
- * @fb_bl_on: The state of individual fbdev's.
- *
- * Multiple fbdev's may share one backlight device. The fb_bl_on
- * records the state of the individual fbdev.
- */
- bool fb_bl_on[FB_MAX];
-
- /**
- * @use_count: The number of uses of fb_bl_on.
+ * @use_count: The number of unblanked displays.
*/
int use_count;
};
@@ -346,7 +317,7 @@ static inline int backlight_enable(struct backlight_device *bd)
if (!bd)
return 0;
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
bd->props.state &= ~BL_CORE_FBBLANK;
return backlight_update_status(bd);
@@ -361,7 +332,7 @@ static inline int backlight_disable(struct backlight_device *bd)
if (!bd)
return 0;
- bd->props.power = FB_BLANK_POWERDOWN;
+ bd->props.power = BACKLIGHT_POWER_OFF;
bd->props.state |= BL_CORE_FBBLANK;
return backlight_update_status(bd);
@@ -380,7 +351,7 @@ static inline int backlight_disable(struct backlight_device *bd)
*/
static inline bool backlight_is_blank(const struct backlight_device *bd)
{
- return bd->props.power != FB_BLANK_UNBLANK ||
+ return bd->props.power != BACKLIGHT_POWER_ON ||
bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK);
}
@@ -417,13 +388,27 @@ void devm_backlight_device_unregister(struct device *dev,
struct backlight_device *bd);
void backlight_force_update(struct backlight_device *bd,
enum backlight_update_reason reason);
-int backlight_register_notifier(struct notifier_block *nb);
-int backlight_unregister_notifier(struct notifier_block *nb);
struct backlight_device *backlight_device_get_by_name(const char *name);
struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
int backlight_device_set_brightness(struct backlight_device *bd,
unsigned long brightness);
+#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
+void backlight_notify_blank(struct backlight_device *bd,
+ struct device *display_dev,
+ bool fb_on, bool prev_fb_on);
+void backlight_notify_blank_all(struct device *display_dev,
+ bool fb_on, bool prev_fb_on);
+#else
+static inline void backlight_notify_blank(struct backlight_device *bd,
+ struct device *display_dev,
+ bool fb_on, bool prev_fb_on)
+{ }
+static inline void backlight_notify_blank_all(struct device *display_dev,
+ bool fb_on, bool prev_fb_on)
+{ }
+#endif
+
#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
/**
diff --git a/include/linux/badblocks.h b/include/linux/badblocks.h
index 670f2dae692f..996493917f36 100644
--- a/include/linux/badblocks.h
+++ b/include/linux/badblocks.h
@@ -48,11 +48,11 @@ struct badblocks_context {
int ack;
};
-int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
- sector_t *first_bad, int *bad_sectors);
-int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
- int acknowledged);
-int badblocks_clear(struct badblocks *bb, sector_t s, int sectors);
+int badblocks_check(struct badblocks *bb, sector_t s, sector_t sectors,
+ sector_t *first_bad, sector_t *bad_sectors);
+bool badblocks_set(struct badblocks *bb, sector_t s, sector_t sectors,
+ int acknowledged);
+bool badblocks_clear(struct badblocks *bb, sector_t s, sector_t sectors);
void ack_all_badblocks(struct badblocks *bb);
ssize_t badblocks_show(struct badblocks *bb, char *page, int unack);
ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len,
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 5ca2d5699620..7cfe48769239 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -4,12 +4,13 @@
*
* Common interface definitions for making balloon pages movable by compaction.
*
- * Balloon page migration makes use of the general non-lru movable page
+ * Balloon page migration makes use of the general "movable_ops page migration"
* feature.
*
* page->private is used to reference the responsible balloon device.
- * page->mapping is used in context of non-lru page migration to reference
- * the address space operations for page isolation/migration/compaction.
+ * That these pages have movable_ops, and which movable_ops apply,
+ * is derived from the page type (PageOffline()) combined with the
+ * PG_movable_ops flag (PageMovableOps()).
*
* As the page isolation scanning step a compaction thread does is a lockless
* procedure (from a page standpoint), it might bring some racy situations while
@@ -17,12 +18,10 @@
* and safely perform balloon's page compaction and migration we must, always,
* ensure following these simple rules:
*
- * i. when updating a balloon's page ->mapping element, strictly do it under
- * the following lock order, independently of the far superior
- * locking scheme (lru_lock, balloon_lock):
+ * i. Setting the PG_movable_ops flag and page->private with the following
+ * lock order
* +-page_lock(page);
* +--spin_lock_irq(&b_dev_info->pages_lock);
- * ... page->mapping updates here ...
*
* ii. isolation or dequeueing procedure must remove the page from balloon
* device page list under b_dev_info->pages_lock.
@@ -78,6 +77,15 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
#ifdef CONFIG_BALLOON_COMPACTION
extern const struct movable_operations balloon_mops;
+/*
+ * balloon_page_device - get the b_dev_info descriptor for the balloon device
+ * that enqueues the given page.
+ */
+static inline struct balloon_dev_info *balloon_page_device(struct page *page)
+{
+ return (struct balloon_dev_info *)page_private(page);
+}
+#endif /* CONFIG_BALLOON_COMPACTION */
/*
* balloon_page_insert - insert a page into the balloon's page list and make
@@ -92,68 +100,34 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
__SetPageOffline(page);
- __SetPageMovable(page, &balloon_mops);
- set_page_private(page, (unsigned long)balloon);
+ if (IS_ENABLED(CONFIG_BALLOON_COMPACTION)) {
+ SetPageMovableOps(page);
+ set_page_private(page, (unsigned long)balloon);
+ }
list_add(&page->lru, &balloon->pages);
}
-/*
- * balloon_page_delete - delete a page from balloon's page list and clear
- * the page->private assignement accordingly.
- * @page : page to be released from balloon's page list
- *
- * Caller must ensure the page is locked and the spin_lock protecting balloon
- * pages list is held before deleting a page from the balloon device.
- */
-static inline void balloon_page_delete(struct page *page)
+static inline gfp_t balloon_mapping_gfp_mask(void)
{
- __ClearPageOffline(page);
- __ClearPageMovable(page);
- set_page_private(page, 0);
- /*
- * No touch page.lru field once @page has been isolated
- * because VM is using the field.
- */
- if (!PageIsolated(page))
- list_del(&page->lru);
+ if (IS_ENABLED(CONFIG_BALLOON_COMPACTION))
+ return GFP_HIGHUSER_MOVABLE;
+ return GFP_HIGHUSER;
}
/*
- * balloon_page_device - get the b_dev_info descriptor for the balloon device
- * that enqueues the given page.
+ * balloon_page_finalize - prepare a balloon page that was removed from the
+ * balloon list for release to the page allocator
+ * @page: page to be released to the page allocator
+ *
+ * Caller must ensure that the page is locked.
*/
-static inline struct balloon_dev_info *balloon_page_device(struct page *page)
+static inline void balloon_page_finalize(struct page *page)
{
- return (struct balloon_dev_info *)page_private(page);
+ if (IS_ENABLED(CONFIG_BALLOON_COMPACTION))
+ set_page_private(page, 0);
+ /* PageOffline is sticky until the page is freed to the buddy. */
}
-static inline gfp_t balloon_mapping_gfp_mask(void)
-{
- return GFP_HIGHUSER_MOVABLE;
-}
-
-#else /* !CONFIG_BALLOON_COMPACTION */
-
-static inline void balloon_page_insert(struct balloon_dev_info *balloon,
- struct page *page)
-{
- __SetPageOffline(page);
- list_add(&page->lru, &balloon->pages);
-}
-
-static inline void balloon_page_delete(struct page *page)
-{
- __ClearPageOffline(page);
- list_del(&page->lru);
-}
-
-static inline gfp_t balloon_mapping_gfp_mask(void)
-{
- return GFP_HIGHUSER;
-}
-
-#endif /* CONFIG_BALLOON_COMPACTION */
-
/*
* balloon_page_push - insert a page into a page list.
* @head : pointer to list
diff --git a/include/linux/base64.h b/include/linux/base64.h
index 660d4cb1ef31..a2c6c9222da3 100644
--- a/include/linux/base64.h
+++ b/include/linux/base64.h
@@ -8,9 +8,15 @@
#include <linux/types.h>
+enum base64_variant {
+ BASE64_STD, /* RFC 4648 (standard) */
+ BASE64_URLSAFE, /* RFC 4648 (base64url) */
+ BASE64_IMAP, /* RFC 3501 */
+};
+
#define BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3)
-int base64_encode(const u8 *src, int len, char *dst);
-int base64_decode(const char *src, int len, u8 *dst);
+int base64_encode(const u8 *src, int len, char *dst, bool padding, enum base64_variant variant);
+int base64_decode(const char *src, int len, u8 *dst, bool padding, enum base64_variant variant);
#endif /* _LINUX_BASE64_H */
diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h
index 7615f8d7b1ed..e4b6ce953ddb 100644
--- a/include/linux/bcm47xx_nvram.h
+++ b/include/linux/bcm47xx_nvram.h
@@ -7,7 +7,6 @@
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/vmalloc.h>
#ifdef CONFIG_BCM47XX_NVRAM
diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h
index f8254fd53e15..40a7da3ef50e 100644
--- a/include/linux/bcm47xx_sprom.h
+++ b/include/linux/bcm47xx_sprom.h
@@ -5,8 +5,8 @@
#ifndef __BCM47XX_SPROM_H
#define __BCM47XX_SPROM_H
+#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/vmalloc.h>
struct ssb_sprom;
diff --git a/include/linux/bcm963xx_nvram.h b/include/linux/bcm963xx_nvram.h
index c8c7f01159fe..48830bf18042 100644
--- a/include/linux/bcm963xx_nvram.h
+++ b/include/linux/bcm963xx_nvram.h
@@ -81,25 +81,21 @@ static int __maybe_unused bcm963xx_nvram_checksum(
const struct bcm963xx_nvram *nvram,
u32 *expected_out, u32 *actual_out)
{
+ const u32 zero = 0;
u32 expected, actual;
size_t len;
if (nvram->version <= 4) {
expected = nvram->checksum_v4;
- len = BCM963XX_NVRAM_V4_SIZE - sizeof(u32);
+ len = BCM963XX_NVRAM_V4_SIZE;
} else {
expected = nvram->checksum_v5;
- len = BCM963XX_NVRAM_V5_SIZE - sizeof(u32);
+ len = BCM963XX_NVRAM_V5_SIZE;
}
- /*
- * Calculate the CRC32 value for the nvram with a checksum value
- * of 0 without modifying or copying the nvram by combining:
- * - The CRC32 of the nvram without the checksum value
- * - The CRC32 of a zero checksum value (which is also 0)
- */
- actual = crc32_le_combine(
- crc32_le(~0, (u8 *)nvram, len), 0, sizeof(u32));
+ /* Calculate the CRC32 of the nvram with the checksum field set to 0. */
+ actual = crc32_le(~0, nvram, len - sizeof(u32));
+ actual = crc32_le(actual, &zero, sizeof(u32));
if (expected_out)
*expected_out = expected;
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 68da8dba5162..dba41b65ae0d 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -203,7 +203,7 @@ struct pci_dev;
#define BCMA_CORE_PCI_MDIO_RXCTRL0 0x840
/* PCIE Root Capability Register bits (Host mode only) */
-#define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001
+#define BCMA_CORE_PCI_RC_RRS_VISIBILITY 0x0001
struct bcma_drv_pci;
struct bcma_bus;
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 70f97f685bff..65abd5ab8836 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -19,13 +19,13 @@ struct linux_binprm {
#ifdef CONFIG_MMU
struct vm_area_struct *vma;
unsigned long vma_pages;
+ unsigned long argmin; /* rlimit marker for copy_strings() */
#else
# define MAX_ARG_PAGES 32
struct page *page[MAX_ARG_PAGES];
#endif
struct mm_struct *mm;
unsigned long p; /* current top of mem */
- unsigned long argmin; /* rlimit marker for copy_strings() */
unsigned int
/* Should an execfd be passed to userspace? */
have_execfd:1,
@@ -42,7 +42,14 @@ struct linux_binprm {
* Set when errors can no longer be returned to the
* original userspace.
*/
- point_of_no_return:1;
+ point_of_no_return:1,
+ /* Set when "comm" must come from the dentry. */
+ comm_from_dentry:1,
+ /*
+ * Set by user space to check executability according to the
+ * caller's environment.
+ */
+ is_check:1;
struct file *executable; /* Executable to pass to the interpreter */
struct file *interpreter;
struct file *file;
@@ -57,7 +64,7 @@ struct linux_binprm {
const char *fdpath; /* generated filename for execveat */
unsigned interp_flags;
int execfd; /* File descriptor of the executable */
- unsigned long loader, exec;
+ unsigned long exec;
struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */
@@ -83,7 +90,6 @@ struct linux_binfmt {
struct list_head lh;
struct module *module;
int (*load_binary)(struct linux_binprm *);
- int (*load_shlib)(struct file *);
#ifdef CONFIG_COREDUMP
int (*core_dump)(struct coredump_params *cprm);
unsigned long min_coredump; /* minimal dump size */
diff --git a/include/linux/bio-integrity.h b/include/linux/bio-integrity.h
new file mode 100644
index 000000000000..21e4652dcfd2
--- /dev/null
+++ b/include/linux/bio-integrity.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BIO_INTEGRITY_H
+#define _LINUX_BIO_INTEGRITY_H
+
+#include <linux/bio.h>
+
+enum bip_flags {
+ BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
+ BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
+ BIP_DISK_NOCHECK = 1 << 2, /* disable disk integrity checking */
+ BIP_IP_CHECKSUM = 1 << 3, /* IP checksum */
+ BIP_COPY_USER = 1 << 4, /* Kernel bounce buffer in use */
+ BIP_CHECK_GUARD = 1 << 5, /* guard check */
+ BIP_CHECK_REFTAG = 1 << 6, /* reftag check */
+ BIP_CHECK_APPTAG = 1 << 7, /* apptag check */
+
+ BIP_MEMPOOL = 1 << 15, /* buffer backed by mempool */
+};
+
+struct bio_integrity_payload {
+ struct bvec_iter bip_iter;
+
+ unsigned short bip_vcnt; /* # of integrity bio_vecs */
+ unsigned short bip_max_vcnt; /* integrity bio_vec slots */
+ unsigned short bip_flags; /* control flags */
+ u16 app_tag; /* application tag value */
+
+ struct bio_vec *bip_vec;
+};
+
+#define BIP_CLONE_FLAGS (BIP_MAPPED_INTEGRITY | BIP_IP_CHECKSUM | \
+ BIP_CHECK_GUARD | BIP_CHECK_REFTAG | BIP_CHECK_APPTAG)
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+
+#define bip_for_each_vec(bvl, bip, iter) \
+ for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
+
+#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
+ for_each_bio(_bio) \
+ bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
+
+static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
+{
+ if (bio->bi_opf & REQ_INTEGRITY)
+ return bio->bi_integrity;
+
+ return NULL;
+}
+
+static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+
+ if (bip)
+ return bip->bip_flags & flag;
+
+ return false;
+}
+
+static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
+{
+ return bip->bip_iter.bi_sector;
+}
+
+static inline void bip_set_seed(struct bio_integrity_payload *bip,
+ sector_t seed)
+{
+ bip->bip_iter.bi_sector = seed;
+}
+
+void bio_integrity_init(struct bio *bio, struct bio_integrity_payload *bip,
+ struct bio_vec *bvecs, unsigned int nr_vecs);
+struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp,
+ unsigned int nr);
+int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len,
+ unsigned int offset);
+int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter);
+int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta);
+void bio_integrity_unmap_user(struct bio *bio);
+bool bio_integrity_prep(struct bio *bio);
+void bio_integrity_advance(struct bio *bio, unsigned int bytes_done);
+void bio_integrity_trim(struct bio *bio);
+int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask);
+
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+
+static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
+{
+ return NULL;
+}
+
+static inline int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
+{
+ return -EINVAL;
+}
+
+static inline int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta)
+{
+ return -EINVAL;
+}
+
+static inline void bio_integrity_unmap_user(struct bio *bio)
+{
+}
+
+static inline bool bio_integrity_prep(struct bio *bio)
+{
+ return true;
+}
+
+static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
+ gfp_t gfp_mask)
+{
+ return 0;
+}
+
+static inline void bio_integrity_advance(struct bio *bio,
+ unsigned int bytes_done)
+{
+}
+
+static inline void bio_integrity_trim(struct bio *bio)
+{
+}
+
+static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
+{
+ return false;
+}
+
+static inline struct bio_integrity_payload *
+bio_integrity_alloc(struct bio *bio, gfp_t gfp, unsigned int nr)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
+ unsigned int len, unsigned int offset)
+{
+ return 0;
+}
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
+void bio_integrity_alloc_buf(struct bio *bio, bool zero_buffer);
+void bio_integrity_free_buf(struct bio_integrity_payload *bip);
+
+#endif /* _LINUX_BIO_INTEGRITY_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index d5379548d684..ad2d57908c1c 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -11,6 +11,7 @@
#include <linux/uio.h>
#define BIO_MAX_VECS 256U
+#define BIO_MAX_INLINE_VECS UIO_MAXIOV
struct queue_limits;
@@ -19,9 +20,6 @@ static inline unsigned int bio_max_segs(unsigned int nr_segs)
return min(nr_segs, BIO_MAX_VECS);
}
-#define bio_prio(bio) (bio)->bi_ioprio
-#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
-
#define bio_iter_iovec(bio, iter) \
bvec_iter_bvec((bio)->bi_io_vec, (iter))
@@ -293,7 +291,7 @@ static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
fi->folio = page_folio(bvec->bv_page);
fi->offset = bvec->bv_offset +
- PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
+ PAGE_SIZE * folio_page_idx(fi->folio, bvec->bv_page);
fi->_seg_count = bvec->bv_len;
fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
fi->_next = folio_next(fi->folio);
@@ -321,74 +319,13 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
#define bio_for_each_folio_all(fi, bio) \
for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
-enum bip_flags {
- BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
- BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
- BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
- BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
- BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
- BIP_INTEGRITY_USER = 1 << 5, /* Integrity payload is user address */
- BIP_COPY_USER = 1 << 6, /* Kernel bounce buffer in use */
-};
-
-/*
- * bio integrity payload
- */
-struct bio_integrity_payload {
- struct bio *bip_bio; /* parent bio */
-
- struct bvec_iter bip_iter;
-
- unsigned short bip_vcnt; /* # of integrity bio_vecs */
- unsigned short bip_max_vcnt; /* integrity bio_vec slots */
- unsigned short bip_flags; /* control flags */
-
- struct bvec_iter bio_iter; /* for rewinding parent bio */
-
- struct work_struct bip_work; /* I/O completion */
-
- struct bio_vec *bip_vec;
- struct bio_vec bip_inline_vecs[];/* embedded bvec array */
-};
-
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-
-static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
-{
- if (bio->bi_opf & REQ_INTEGRITY)
- return bio->bi_integrity;
-
- return NULL;
-}
-
-static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
-{
- struct bio_integrity_payload *bip = bio_integrity(bio);
-
- if (bip)
- return bip->bip_flags & flag;
-
- return false;
-}
-
-static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
-{
- return bip->bip_iter.bi_sector;
-}
-
-static inline void bip_set_seed(struct bio_integrity_payload *bip,
- sector_t seed)
-{
- bip->bip_iter.bi_sector = seed;
-}
-
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-
void bio_trim(struct bio *bio, sector_t offset, sector_t size);
extern struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs);
-struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
- unsigned *segs, struct bio_set *bs, unsigned max_bytes);
+int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
+ unsigned *segs, unsigned max_bytes, unsigned len_align);
+u8 bio_seg_gap(struct request_queue *q, struct bio *prev, struct bio *next,
+ u8 gaps_bit);
/**
* bio_next_split - get next @sectors from a bio, splitting if necessary
@@ -468,9 +405,13 @@ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
struct request_queue;
-extern int submit_bio_wait(struct bio *bio);
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
unsigned short max_vecs, blk_opf_t opf);
+static inline void bio_init_inline(struct bio *bio, struct block_device *bdev,
+ unsigned short max_vecs, blk_opf_t opf)
+{
+ bio_init(bio, bdev, bio_inline_vecs(bio), max_vecs, opf);
+}
extern void bio_uninit(struct bio *);
void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
void bio_chain(struct bio *, struct bio *);
@@ -479,16 +420,38 @@ int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len,
unsigned off);
bool __must_check bio_add_folio(struct bio *bio, struct folio *folio,
size_t len, size_t off);
-extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
- unsigned int, unsigned int);
-int bio_add_zone_append_page(struct bio *bio, struct page *page,
- unsigned int len, unsigned int offset);
void __bio_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int off);
void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
size_t off);
-int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
-void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter);
+void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len);
+
+/**
+ * bio_add_max_vecs - number of bio_vecs needed to add data to a bio
+ * @kaddr: kernel virtual address to add
+ * @len: length in bytes to add
+ *
+ * Calculate how many bio_vecs need to be allocated to add the kernel virtual
+ * address range in [@kaddr:@len] in the worse case.
+ */
+static inline unsigned int bio_add_max_vecs(void *kaddr, unsigned int len)
+{
+ if (is_vmalloc_addr(kaddr))
+ return DIV_ROUND_UP(offset_in_page(kaddr) + len, PAGE_SIZE);
+ return 1;
+}
+
+unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len);
+bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len);
+
+int submit_bio_wait(struct bio *bio);
+int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data,
+ size_t len, enum req_op op);
+
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
+ unsigned len_align_mask);
+
+void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter);
void __bio_release_pages(struct bio *bio, bool mark_dirty);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
@@ -695,10 +658,6 @@ struct bio_set {
mempool_t bio_pool;
mempool_t bvec_pool;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
- mempool_t bio_integrity_pool;
- mempool_t bvec_integrity_pool;
-#endif
unsigned int back_pad;
/*
@@ -721,95 +680,6 @@ static inline bool bioset_initialized(struct bio_set *bs)
return bs->bio_slab != NULL;
}
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-
-#define bip_for_each_vec(bvl, bip, iter) \
- for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
-
-#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
- for_each_bio(_bio) \
- bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
-
-int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed);
-extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
-extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
-extern bool bio_integrity_prep(struct bio *);
-extern void bio_integrity_advance(struct bio *, unsigned int);
-extern void bio_integrity_trim(struct bio *);
-extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
-extern int bioset_integrity_create(struct bio_set *, int);
-extern void bioset_integrity_free(struct bio_set *);
-extern void bio_integrity_init(void);
-
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-
-static inline void *bio_integrity(struct bio *bio)
-{
- return NULL;
-}
-
-static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
-{
- return 0;
-}
-
-static inline void bioset_integrity_free (struct bio_set *bs)
-{
- return;
-}
-
-static inline bool bio_integrity_prep(struct bio *bio)
-{
- return true;
-}
-
-static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
- gfp_t gfp_mask)
-{
- return 0;
-}
-
-static inline void bio_integrity_advance(struct bio *bio,
- unsigned int bytes_done)
-{
- return;
-}
-
-static inline void bio_integrity_trim(struct bio *bio)
-{
- return;
-}
-
-static inline void bio_integrity_init(void)
-{
- return;
-}
-
-static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
-{
- return false;
-}
-
-static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
- unsigned int nr)
-{
- return ERR_PTR(-EINVAL);
-}
-
-static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
- unsigned int len, unsigned int offset)
-{
- return 0;
-}
-
-static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf,
- ssize_t len, u32 seed)
-{
- return -EINVAL;
-}
-
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-
/*
* Mark a bio as polled. Note that for async polled IO, the caller must
* expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
@@ -829,6 +699,23 @@ static inline void bio_clear_polled(struct bio *bio)
bio->bi_opf &= ~REQ_POLLED;
}
+/**
+ * bio_is_zone_append - is this a zone append bio?
+ * @bio: bio to check
+ *
+ * Check if @bio is a zone append operation. Core block layer code and end_io
+ * handlers must use this instead of an open coded REQ_OP_ZONE_APPEND check
+ * because the block layer can rewrite REQ_OP_ZONE_APPEND to REQ_OP_WRITE if
+ * it is not natively supported.
+ */
+static inline bool bio_is_zone_append(struct bio *bio)
+{
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+ return false;
+ return bio_op(bio) == REQ_OP_ZONE_APPEND ||
+ bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
+}
+
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new);
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index bbc4730a6505..c0989b5b0407 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -13,7 +13,7 @@
* Don't use this unless you really need to: spin_lock() and spin_unlock()
* are significantly faster.
*/
-static inline void bit_spin_lock(int bitnum, unsigned long *addr)
+static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
{
/*
* Assuming the lock is uncontended, this never enters
@@ -38,7 +38,7 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr)
/*
* Return true if it was acquired
*/
-static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
+static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
{
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -54,7 +54,7 @@ static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
/*
* bit-based spin_unlock()
*/
-static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
+static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
@@ -71,7 +71,7 @@ static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
* non-atomic version, which can be used eg. if the bit lock itself is
* protecting the rest of the flags in the word.
*/
-static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
+static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 63928f173223..126dc5b380af 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -8,6 +8,7 @@
#define _LINUX_BITFIELD_H
#include <linux/build_bug.h>
+#include <linux/typecheck.h>
#include <asm/byteorder.h>
/*
@@ -16,6 +17,7 @@
* FIELD_{GET,PREP} macros take as first parameter shifted mask
* from which they extract the base mask and shift amount.
* Mask must be a compilation time constant.
+ * field_{get,prep} are variants that take a non-const mask.
*
* Example:
*
@@ -38,8 +40,7 @@
* FIELD_PREP(REG_FIELD_D, 0x40);
*
* Modify:
- * reg &= ~REG_FIELD_C;
- * reg |= FIELD_PREP(REG_FIELD_C, c);
+ * FIELD_MODIFY(REG_FIELD_C, &reg, c);
*/
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
@@ -60,7 +61,7 @@
#define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x))
-#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
+#define __BF_FIELD_CHECK_MASK(_mask, _val, _pfx) \
({ \
BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
_pfx "mask is not constant"); \
@@ -69,13 +70,33 @@
~((_mask) >> __bf_shf(_mask)) & \
(0 + (_val)) : 0, \
_pfx "value too large for the field"); \
- BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \
- __bf_cast_unsigned(_reg, ~0ull), \
- _pfx "type of reg too small for mask"); \
__BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
(1ULL << __bf_shf(_mask))); \
})
+#define __BF_FIELD_CHECK_REG(mask, reg, pfx) \
+ BUILD_BUG_ON_MSG(__bf_cast_unsigned(mask, mask) > \
+ __bf_cast_unsigned(reg, ~0ull), \
+ pfx "type of reg too small for mask")
+
+#define __BF_FIELD_CHECK(mask, reg, val, pfx) \
+ ({ \
+ __BF_FIELD_CHECK_MASK(mask, val, pfx); \
+ __BF_FIELD_CHECK_REG(mask, reg, pfx); \
+ })
+
+#define __FIELD_PREP(mask, val, pfx) \
+ ({ \
+ __BF_FIELD_CHECK_MASK(mask, val, pfx); \
+ ((typeof(mask))(val) << __bf_shf(mask)) & (mask); \
+ })
+
+#define __FIELD_GET(mask, reg, pfx) \
+ ({ \
+ __BF_FIELD_CHECK_MASK(mask, 0U, pfx); \
+ (typeof(mask))(((reg) & (mask)) >> __bf_shf(mask)); \
+ })
+
/**
* FIELD_MAX() - produce the maximum value representable by a field
* @_mask: shifted mask defining the field's length and position
@@ -112,8 +133,8 @@
*/
#define FIELD_PREP(_mask, _val) \
({ \
- __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
- ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
+ __BF_FIELD_CHECK_REG(_mask, 0ULL, "FIELD_PREP: "); \
+ __FIELD_PREP(_mask, _val, "FIELD_PREP: "); \
})
#define __BF_CHECK_POW2(n) BUILD_BUG_ON_ZERO(((n) & ((n) - 1)) != 0)
@@ -152,8 +173,25 @@
*/
#define FIELD_GET(_mask, _reg) \
({ \
- __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
- (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
+ __BF_FIELD_CHECK_REG(_mask, _reg, "FIELD_GET: "); \
+ __FIELD_GET(_mask, _reg, "FIELD_GET: "); \
+ })
+
+/**
+ * FIELD_MODIFY() - modify a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_reg_p: pointer to the memory that should be updated
+ * @_val: value to store in the bitfield
+ *
+ * FIELD_MODIFY() modifies the set of bits in @_reg_p specified by @_mask,
+ * by replacing them with the bitfield value passed in as @_val.
+ */
+#define FIELD_MODIFY(_mask, _reg_p, _val) \
+ ({ \
+ typecheck_pointer(_reg_p); \
+ __BF_FIELD_CHECK(_mask, *(_reg_p), _val, "FIELD_MODIFY: "); \
+ *(_reg_p) &= ~(_mask); \
+ *(_reg_p) |= (((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask)); \
})
extern void __compiletime_error("value doesn't fit into mask")
@@ -172,14 +210,14 @@ static __always_inline u64 field_mask(u64 field)
}
#define field_max(field) ((typeof(field))field_mask(field))
#define ____MAKE_OP(type,base,to,from) \
-static __always_inline __##type type##_encode_bits(base v, base field) \
+static __always_inline __##type __must_check type##_encode_bits(base v, base field) \
{ \
if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
__field_overflow(); \
return to((v & field_mask(field)) * field_multiplier(field)); \
} \
-static __always_inline __##type type##_replace_bits(__##type old, \
- base val, base field) \
+static __always_inline __##type __must_check type##_replace_bits(__##type old, \
+ base val, base field) \
{ \
return (old & ~to(field)) | type##_encode_bits(val, field); \
} \
@@ -188,7 +226,7 @@ static __always_inline void type##p_replace_bits(__##type *p, \
{ \
*p = (*p & ~to(field)) | type##_encode_bits(val, field); \
} \
-static __always_inline base type##_get_bits(__##type v, base field) \
+static __always_inline base __must_check type##_get_bits(__##type v, base field) \
{ \
return (from(v) & field)/field_multiplier(field); \
}
@@ -203,4 +241,62 @@ __MAKE_OP(64)
#undef __MAKE_OP
#undef ____MAKE_OP
+#define __field_prep(mask, val) \
+ ({ \
+ __auto_type __mask = (mask); \
+ typeof(__mask) __val = (val); \
+ unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
+ __ffs(__mask) : __ffs64(__mask); \
+ (__val << __shift) & __mask; \
+ })
+
+#define __field_get(mask, reg) \
+ ({ \
+ __auto_type __mask = (mask); \
+ typeof(__mask) __reg = (reg); \
+ unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
+ __ffs(__mask) : __ffs64(__mask); \
+ (__reg & __mask) >> __shift; \
+ })
+
+/**
+ * field_prep() - prepare a bitfield element
+ * @mask: shifted mask defining the field's length and position, must be
+ * non-zero
+ * @val: value to put in the field
+ *
+ * Return: field value masked and shifted to its final destination
+ *
+ * field_prep() masks and shifts up the value. The result should be
+ * combined with other fields of the bitfield using logical OR.
+ * Unlike FIELD_PREP(), @mask is not limited to a compile-time constant.
+ * Typical usage patterns are a value stored in a table, or calculated by
+ * shifting a constant by a variable number of bits.
+ * If you want to ensure that @mask is a compile-time constant, please use
+ * FIELD_PREP() directly instead.
+ */
+#define field_prep(mask, val) \
+ (__builtin_constant_p(mask) ? __FIELD_PREP(mask, val, "field_prep: ") \
+ : __field_prep(mask, val))
+
+/**
+ * field_get() - extract a bitfield element
+ * @mask: shifted mask defining the field's length and position, must be
+ * non-zero
+ * @reg: value of entire bitfield
+ *
+ * Return: extracted field value
+ *
+ * field_get() extracts the field specified by @mask from the
+ * bitfield passed in as @reg by masking and shifting it down.
+ * Unlike FIELD_GET(), @mask is not limited to a compile-time constant.
+ * Typical usage patterns are a value stored in a table, or calculated by
+ * shifting a constant by a variable number of bits.
+ * If you want to ensure that @mask is a compile-time constant, please use
+ * FIELD_GET() directly instead.
+ */
+#define field_get(mask, reg) \
+ (__builtin_constant_p(mask) ? __FIELD_GET(mask, reg, "field_get: ") \
+ : __field_get(mask, reg))
+
#endif
diff --git a/include/linux/bitmap-str.h b/include/linux/bitmap-str.h
index 17caeca94cab..53d3e1b32d3d 100644
--- a/include/linux/bitmap-str.h
+++ b/include/linux/bitmap-str.h
@@ -2,12 +2,14 @@
#ifndef __LINUX_BITMAP_STR_H
#define __LINUX_BITMAP_STR_H
+#include <linux/types.h>
+
int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits);
int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits);
-extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
- int nmaskbits, loff_t off, size_t count);
-extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
- int nmaskbits, loff_t off, size_t count);
+int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp, int nmaskbits,
+ loff_t off, size_t count);
+int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp, int nmaskbits,
+ loff_t off, size_t count);
int bitmap_parse(const char *buf, unsigned int buflen, unsigned long *dst, int nbits);
int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits);
int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 8c4768c44a01..b0395e4ccf90 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -23,7 +23,7 @@ struct device;
*
* Function implementations generic to all architectures are in
* lib/bitmap.c. Functions implementations that are architecture
- * specific are in various include/asm-<arch>/bitops.h headers
+ * specific are in various arch/<arch>/include/asm/bitops.h headers
* and other arch/<arch> specific files.
*
* See lib/bitmap.c for more details.
@@ -45,6 +45,7 @@ struct device;
* bitmap_copy(dst, src, nbits) *dst = *src
* bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
* bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
+ * bitmap_weighted_or(dst, src1, src2, nbits) *dst = *src1 | *src2. Returns Hamming Weight of dst
* bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
* bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
* bitmap_complement(dst, src, nbits) *dst = ~(*src)
@@ -165,6 +166,8 @@ bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
+unsigned int __bitmap_weighted_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
@@ -203,12 +206,12 @@ unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
* the bit offset of all zero areas this function finds is multiples of that
* power of 2. A @align_mask of 0 means no alignment is required.
*/
-static inline unsigned long
-bitmap_find_next_zero_area(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask)
+static __always_inline
+unsigned long bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask)
{
return bitmap_find_next_zero_area_off(map, size, start, nr,
align_mask, 0);
@@ -228,7 +231,7 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig,
#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
-static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
+static __always_inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
@@ -238,7 +241,7 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
memset(dst, 0, len);
}
-static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+static __always_inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
@@ -248,8 +251,8 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
memset(dst, 0xff, len);
}
-static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
- unsigned int nbits)
+static __always_inline
+void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
unsigned int len = bitmap_size(nbits);
@@ -262,14 +265,26 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
/*
* Copy bitmap and clear tail bits in last word.
*/
-static inline void bitmap_copy_clear_tail(unsigned long *dst,
- const unsigned long *src, unsigned int nbits)
+static __always_inline
+void bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
bitmap_copy(dst, src, nbits);
if (nbits % BITS_PER_LONG)
dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits);
}
+static inline void bitmap_copy_and_extend(unsigned long *to,
+ const unsigned long *from,
+ unsigned int count, unsigned int size)
+{
+ unsigned int copy = BITS_TO_LONGS(count);
+
+ memcpy(to, from, copy * sizeof(long));
+ if (count % BITS_PER_LONG)
+ to[copy - 1] &= BITMAP_LAST_WORD_MASK(count);
+ memset(to + copy, 0, bitmap_size(size) - copy * sizeof(long));
+}
+
/*
* On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64
* machines the order of hi and lo parts of numbers match the bitmap structure.
@@ -306,16 +321,18 @@ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits))
#endif
-static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_and(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_and(dst, src1, src2, nbits);
}
-static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+void bitmap_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 | *src2;
@@ -323,8 +340,21 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
__bitmap_or(dst, src1, src2, nbits);
}
-static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+unsigned int bitmap_weighted_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits)) {
+ *dst = *src1 | *src2;
+ return hweight_long(*dst & BITMAP_LAST_WORD_MASK(nbits));
+ } else {
+ return __bitmap_weighted_or(dst, src1, src2, nbits);
+ }
+}
+
+static __always_inline
+void bitmap_xor(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 ^ *src2;
@@ -332,16 +362,17 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
__bitmap_xor(dst, src1, src2, nbits);
}
-static inline bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_andnot(dst, src1, src2, nbits);
}
-static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
- unsigned int nbits)
+static __always_inline
+void bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = ~(*src);
@@ -356,8 +387,8 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr
#endif
#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
-static inline bool bitmap_equal(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_equal(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
@@ -376,10 +407,9 @@ static inline bool bitmap_equal(const unsigned long *src1,
*
* Returns: True if (*@src1 | *@src2) == *@src3, false otherwise
*/
-static inline bool bitmap_or_equal(const unsigned long *src1,
- const unsigned long *src2,
- const unsigned long *src3,
- unsigned int nbits)
+static __always_inline
+bool bitmap_or_equal(const unsigned long *src1, const unsigned long *src2,
+ const unsigned long *src3, unsigned int nbits)
{
if (!small_const_nbits(nbits))
return __bitmap_or_equal(src1, src2, src3, nbits);
@@ -387,9 +417,8 @@ static inline bool bitmap_or_equal(const unsigned long *src1,
return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits));
}
-static inline bool bitmap_intersects(const unsigned long *src1,
- const unsigned long *src2,
- unsigned int nbits)
+static __always_inline
+bool bitmap_intersects(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
@@ -397,8 +426,8 @@ static inline bool bitmap_intersects(const unsigned long *src1,
return __bitmap_intersects(src1, src2, nbits);
}
-static inline bool bitmap_subset(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_subset(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
@@ -406,7 +435,8 @@ static inline bool bitmap_subset(const unsigned long *src1,
return __bitmap_subset(src1, src2, nbits);
}
-static inline bool bitmap_empty(const unsigned long *src, unsigned nbits)
+static __always_inline
+bool bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -414,7 +444,8 @@ static inline bool bitmap_empty(const unsigned long *src, unsigned nbits)
return find_first_bit(src, nbits) == nbits;
}
-static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
+static __always_inline
+bool bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
@@ -448,8 +479,8 @@ unsigned long bitmap_weight_andnot(const unsigned long *src1,
return __bitmap_weight_andnot(src1, src2, nbits);
}
-static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
- unsigned int nbits)
+static __always_inline
+void bitmap_set(unsigned long *map, unsigned int start, unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__set_bit(start, map);
@@ -464,8 +495,8 @@ static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
__bitmap_set(map, start, nbits);
}
-static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
- unsigned int nbits)
+static __always_inline
+void bitmap_clear(unsigned long *map, unsigned int start, unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__clear_bit(start, map);
@@ -480,8 +511,9 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
__bitmap_clear(map, start, nbits);
}
-static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits)
+static __always_inline
+void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
@@ -489,8 +521,9 @@ static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *s
__bitmap_shift_right(dst, src, shift, nbits);
}
-static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits)
+static __always_inline
+void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits);
@@ -498,11 +531,12 @@ static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *sr
__bitmap_shift_left(dst, src, shift, nbits);
}
-static inline void bitmap_replace(unsigned long *dst,
- const unsigned long *old,
- const unsigned long *new,
- const unsigned long *mask,
- unsigned int nbits)
+static __always_inline
+void bitmap_replace(unsigned long *dst,
+ const unsigned long *old,
+ const unsigned long *new,
+ const unsigned long *mask,
+ unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*old & ~(*mask)) | (*new & *mask);
@@ -541,12 +575,13 @@ static inline void bitmap_replace(unsigned long *dst,
* ...0..11...0..10
* dst: 0000001100000010
*
- * A relationship exists between bitmap_scatter() and bitmap_gather().
+ * A relationship exists between bitmap_scatter() and bitmap_gather(). See
+ * bitmap_gather() for the bitmap gather detailed operations. TL;DR:
* bitmap_gather() can be seen as the 'reverse' bitmap_scatter() operation.
- * See bitmap_scatter() for details related to this relationship.
*/
-static inline void bitmap_scatter(unsigned long *dst, const unsigned long *src,
- const unsigned long *mask, unsigned int nbits)
+static __always_inline
+void bitmap_scatter(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
{
unsigned int n = 0;
unsigned int bit;
@@ -588,7 +623,9 @@ static inline void bitmap_scatter(unsigned long *dst, const unsigned long *src,
* dst: 0000000000011010
*
* A relationship exists between bitmap_gather() and bitmap_scatter(). See
- * bitmap_scatter() for the bitmap scatter detailed operations.
+ * bitmap_scatter() for the bitmap scatter detailed operations. TL;DR:
+ * bitmap_scatter() can be seen as the 'reverse' bitmap_gather() operation.
+ *
* Suppose scattered computed using bitmap_scatter(scattered, src, mask, n).
* The operation bitmap_gather(result, scattered, mask, n) leads to a result
* equal or equivalent to src.
@@ -599,8 +636,9 @@ static inline void bitmap_scatter(unsigned long *dst, const unsigned long *src,
* bitmap_scatter(res, src, mask, n) and a call to
* bitmap_scatter(res, result, mask, n) will lead to the same res value.
*/
-static inline void bitmap_gather(unsigned long *dst, const unsigned long *src,
- const unsigned long *mask, unsigned int nbits)
+static __always_inline
+void bitmap_gather(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
{
unsigned int n = 0;
unsigned int bit;
@@ -611,9 +649,9 @@ static inline void bitmap_gather(unsigned long *dst, const unsigned long *src,
__assign_bit(n++, dst, test_bit(bit, src));
}
-static inline void bitmap_next_set_region(unsigned long *bitmap,
- unsigned int *rs, unsigned int *re,
- unsigned int end)
+static __always_inline
+void bitmap_next_set_region(unsigned long *bitmap, unsigned int *rs,
+ unsigned int *re, unsigned int end)
{
*rs = find_next_bit(bitmap, end, *rs);
*re = find_next_zero_bit(bitmap, end, *rs + 1);
@@ -628,7 +666,8 @@ static inline void bitmap_next_set_region(unsigned long *bitmap,
* This is the complement to __bitmap_find_free_region() and releases
* the found region (by clearing it in the bitmap).
*/
-static inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
+static __always_inline
+void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
{
bitmap_clear(bitmap, pos, BIT(order));
}
@@ -644,7 +683,8 @@ static inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos
* Returns: 0 on success, or %-EBUSY if specified region wasn't
* free (not all bits were zero).
*/
-static inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
+static __always_inline
+int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
{
unsigned int len = BIT(order);
@@ -668,7 +708,8 @@ static inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos
* Returns: the bit offset in bitmap of the allocated region,
* or -errno on failure.
*/
-static inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
+static __always_inline
+int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
{
unsigned int pos, end; /* scans bitmap by regions of size order */
@@ -722,7 +763,7 @@ static inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bi
* That is ``(u32 *)(&val)[0]`` gets the upper 32 bits,
* but we expect the lower 32-bits of u64.
*/
-static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
+static __always_inline void bitmap_from_u64(unsigned long *dst, u64 mask)
{
bitmap_from_arr64(dst, &mask, 64);
}
@@ -737,9 +778,8 @@ static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
* @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return
* value is undefined.
*/
-static inline unsigned long bitmap_read(const unsigned long *map,
- unsigned long start,
- unsigned long nbits)
+static __always_inline
+unsigned long bitmap_read(const unsigned long *map, unsigned long start, unsigned long nbits)
{
size_t index = BIT_WORD(start);
unsigned long offset = start % BITS_PER_LONG;
@@ -772,8 +812,9 @@ static inline unsigned long bitmap_read(const unsigned long *map,
*
* For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed.
*/
-static inline void bitmap_write(unsigned long *map, unsigned long value,
- unsigned long start, unsigned long nbits)
+static __always_inline
+void bitmap_write(unsigned long *map, unsigned long value,
+ unsigned long start, unsigned long nbits)
{
size_t index;
unsigned long offset;
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 46d4bdc634c0..ea7898cc5903 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -8,7 +8,6 @@
#include <uapi/linux/kernel.h>
-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
@@ -47,12 +46,17 @@ extern unsigned long __sw_hweight64(__u64 w);
__builtin_constant_p(*(const unsigned long *)(addr))) ? \
const##op(nr, addr) : op(nr, addr))
+/*
+ * The following macros are non-atomic versions of their non-underscored
+ * counterparts.
+ */
#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
+
#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
#define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr)
@@ -225,6 +229,37 @@ static inline int get_count_order_long(unsigned long l)
}
/**
+ * parity8 - get the parity of an u8 value
+ * @value: the value to be examined
+ *
+ * Determine the parity of the u8 argument.
+ *
+ * Returns:
+ * 0 for even parity, 1 for odd parity
+ *
+ * Note: This function informs you about the current parity. Example to bail
+ * out when parity is odd:
+ *
+ * if (parity8(val) == 1)
+ * return -EBADMSG;
+ *
+ * If you need to calculate a parity bit, you need to draw the conclusion from
+ * this result yourself. Example to enforce odd parity, parity bit is bit 7:
+ *
+ * if (parity8(val) == 0)
+ * val ^= BIT(7);
+ */
+static inline int parity8(u8 val)
+{
+ /*
+ * One explanation of this algorithm:
+ * https://funloop.org/codex/problem/parity/README.html
+ */
+ val ^= val >> 4;
+ return (0x6996 >> (val & 0xf)) & 1;
+}
+
+/**
* __ffs64 - find first set bit in a 64 bit word
* @word: The 64 bit word
*
@@ -232,7 +267,7 @@ static inline int get_count_order_long(unsigned long l)
* The result is not defined if no bits are set, so check that @word
* is non-zero before calling this.
*/
-static inline unsigned int __ffs64(u64 word)
+static inline __attribute_const__ unsigned int __ffs64(u64 word)
{
#if BITS_PER_LONG == 32
if (((u32)word) == 0UL)
diff --git a/include/linux/bits.h b/include/linux/bits.h
index 0eb24d21aac2..a40cc861b3a7 100644
--- a/include/linux/bits.h
+++ b/include/linux/bits.h
@@ -2,16 +2,15 @@
#ifndef __LINUX_BITS_H
#define __LINUX_BITS_H
-#include <linux/const.h>
#include <vdso/bits.h>
#include <uapi/linux/bits.h>
-#include <asm/bitsperlong.h>
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG))
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
#define BITS_PER_BYTE 8
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
/*
* Create a contiguous bitmask starting at bit position @l and ending at
@@ -19,21 +18,72 @@
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
#if !defined(__ASSEMBLY__)
+
+/*
+ * Missing asm support
+ *
+ * GENMASK_U*() and BIT_U*() depend on BITS_PER_TYPE() which relies on sizeof(),
+ * something not available in asm. Nevertheless, fixed width integers is a C
+ * concept. Assembly code can rely on the long and long long versions instead.
+ */
+
#include <linux/build_bug.h>
-#define GENMASK_INPUT_CHECK(h, l) \
- (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
- __is_constexpr((l) > (h)), (l) > (h), 0)))
-#else
+#include <linux/compiler.h>
+#include <linux/overflow.h>
+
+#define GENMASK_INPUT_CHECK(h, l) BUILD_BUG_ON_ZERO(const_true((l) > (h)))
+
+/*
+ * Generate a mask for the specified type @t. Additional checks are made to
+ * guarantee the value returned fits in that type, relying on
+ * -Wshift-count-overflow compiler check to detect incompatible arguments.
+ * For example, all these create build errors or warnings:
+ *
+ * - GENMASK(15, 20): wrong argument order
+ * - GENMASK(72, 15): doesn't fit unsigned long
+ * - GENMASK_U32(33, 15): doesn't fit in a u32
+ */
+#define GENMASK_TYPE(t, h, l) \
+ ((t)(GENMASK_INPUT_CHECK(h, l) + \
+ (type_max(t) << (l) & \
+ type_max(t) >> (BITS_PER_TYPE(t) - 1 - (h)))))
+
+#define GENMASK(h, l) GENMASK_TYPE(unsigned long, h, l)
+#define GENMASK_ULL(h, l) GENMASK_TYPE(unsigned long long, h, l)
+
+#define GENMASK_U8(h, l) GENMASK_TYPE(u8, h, l)
+#define GENMASK_U16(h, l) GENMASK_TYPE(u16, h, l)
+#define GENMASK_U32(h, l) GENMASK_TYPE(u32, h, l)
+#define GENMASK_U64(h, l) GENMASK_TYPE(u64, h, l)
+#define GENMASK_U128(h, l) GENMASK_TYPE(u128, h, l)
+
+/*
+ * Fixed-type variants of BIT(), with additional checks like GENMASK_TYPE(). The
+ * following examples generate compiler warnings due to -Wshift-count-overflow:
+ *
+ * - BIT_U8(8)
+ * - BIT_U32(-1)
+ * - BIT_U32(40)
+ */
+#define BIT_INPUT_CHECK(type, nr) \
+ BUILD_BUG_ON_ZERO(const_true((nr) >= BITS_PER_TYPE(type)))
+
+#define BIT_TYPE(type, nr) ((type)(BIT_INPUT_CHECK(type, nr) + BIT_ULL(nr)))
+
+#define BIT_U8(nr) BIT_TYPE(u8, nr)
+#define BIT_U16(nr) BIT_TYPE(u16, nr)
+#define BIT_U32(nr) BIT_TYPE(u32, nr)
+#define BIT_U64(nr) BIT_TYPE(u64, nr)
+
+#else /* defined(__ASSEMBLY__) */
+
/*
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
* disable the input check if that is the case.
*/
-#define GENMASK_INPUT_CHECK(h, l) 0
-#endif
+#define GENMASK(h, l) __GENMASK(h, l)
+#define GENMASK_ULL(h, l) __GENMASK_ULL(h, l)
-#define GENMASK(h, l) \
- (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
-#define GENMASK_ULL(h, l) \
- (GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
+#endif /* !defined(__ASSEMBLY__) */
#endif /* __LINUX_BITS_H */
diff --git a/include/linux/blk-crypto-profile.h b/include/linux/blk-crypto-profile.h
index 90ab33cb5d0e..4f39e9cd7576 100644
--- a/include/linux/blk-crypto-profile.h
+++ b/include/linux/blk-crypto-profile.h
@@ -57,6 +57,62 @@ struct blk_crypto_ll_ops {
int (*keyslot_evict)(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot);
+
+ /**
+ * @derive_sw_secret: Derive the software secret from a hardware-wrapped
+ * key in ephemerally-wrapped form.
+ *
+ * This only needs to be implemented if BLK_CRYPTO_KEY_TYPE_HW_WRAPPED
+ * is supported.
+ *
+ * Must return 0 on success, -EBADMSG if the key is invalid, or another
+ * -errno code on other errors.
+ */
+ int (*derive_sw_secret)(struct blk_crypto_profile *profile,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]);
+
+ /**
+ * @import_key: Create a hardware-wrapped key by importing a raw key.
+ *
+ * This only needs to be implemented if BLK_CRYPTO_KEY_TYPE_HW_WRAPPED
+ * is supported.
+ *
+ * On success, must write the new key in long-term wrapped form to
+ * @lt_key and return its size in bytes. On failure, must return a
+ * -errno value.
+ */
+ int (*import_key)(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+ /**
+ * @generate_key: Generate a hardware-wrapped key.
+ *
+ * This only needs to be implemented if BLK_CRYPTO_KEY_TYPE_HW_WRAPPED
+ * is supported.
+ *
+ * On success, must write the new key in long-term wrapped form to
+ * @lt_key and return its size in bytes. On failure, must return a
+ * -errno value.
+ */
+ int (*generate_key)(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+ /**
+ * @prepare_key: Prepare a hardware-wrapped key to be used.
+ *
+ * Prepare a hardware-wrapped key to be used by converting it from
+ * long-term wrapped form to ephemerally-wrapped form. This only needs
+ * to be implemented if BLK_CRYPTO_KEY_TYPE_HW_WRAPPED is supported.
+ *
+ * On success, must write the key in ephemerally-wrapped form to
+ * @eph_key and return its size in bytes. On failure, must return
+ * -EBADMSG if the key is invalid, or another -errno on other error.
+ */
+ int (*prepare_key)(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
};
/**
@@ -85,6 +141,12 @@ struct blk_crypto_profile {
unsigned int max_dun_bytes_supported;
/**
+ * @key_types_supported: A bitmask of the supported key types:
+ * BLK_CRYPTO_KEY_TYPE_RAW and/or BLK_CRYPTO_KEY_TYPE_HW_WRAPPED.
+ */
+ unsigned int key_types_supported;
+
+ /**
* @modes_supported: Array of bitmasks that specifies whether each
* combination of crypto mode and data unit size is supported.
* Specifically, the i'th bit of modes_supported[crypto_mode] is set if
@@ -143,6 +205,17 @@ void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile);
void blk_crypto_profile_destroy(struct blk_crypto_profile *profile);
+int blk_crypto_import_key(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+int blk_crypto_generate_key(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+int blk_crypto_prepare_key(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent,
const struct blk_crypto_profile *child);
diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
index 5e5822c18ee4..58b0c5254a67 100644
--- a/include/linux/blk-crypto.h
+++ b/include/linux/blk-crypto.h
@@ -6,7 +6,9 @@
#ifndef __LINUX_BLK_CRYPTO_H
#define __LINUX_BLK_CRYPTO_H
+#include <linux/minmax.h>
#include <linux/types.h>
+#include <uapi/linux/blk-crypto.h>
enum blk_crypto_mode_num {
BLK_ENCRYPTION_MODE_INVALID,
@@ -17,7 +19,55 @@ enum blk_crypto_mode_num {
BLK_ENCRYPTION_MODE_MAX,
};
-#define BLK_CRYPTO_MAX_KEY_SIZE 64
+/*
+ * Supported types of keys. Must be bitflags due to their use in
+ * blk_crypto_profile::key_types_supported.
+ */
+enum blk_crypto_key_type {
+ /*
+ * Raw keys (i.e. "software keys"). These keys are simply kept in raw,
+ * plaintext form in kernel memory.
+ */
+ BLK_CRYPTO_KEY_TYPE_RAW = 0x1,
+
+ /*
+ * Hardware-wrapped keys. These keys are only present in kernel memory
+ * in ephemerally-wrapped form, and they can only be unwrapped by
+ * dedicated hardware. For details, see the "Hardware-wrapped keys"
+ * section of Documentation/block/inline-encryption.rst.
+ */
+ BLK_CRYPTO_KEY_TYPE_HW_WRAPPED = 0x2,
+};
+
+/*
+ * Currently the maximum raw key size is 64 bytes, as that is the key size of
+ * BLK_ENCRYPTION_MODE_AES_256_XTS which takes the longest key.
+ *
+ * The maximum hardware-wrapped key size depends on the hardware's key wrapping
+ * algorithm, which is a hardware implementation detail, so it isn't precisely
+ * specified. But currently 128 bytes is plenty in practice. Implementations
+ * are recommended to wrap a 32-byte key for the hardware KDF with AES-256-GCM,
+ * which should result in a size closer to 64 bytes than 128.
+ *
+ * Both of these values can trivially be increased if ever needed.
+ */
+#define BLK_CRYPTO_MAX_RAW_KEY_SIZE 64
+#define BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE 128
+
+#define BLK_CRYPTO_MAX_ANY_KEY_SIZE \
+ MAX(BLK_CRYPTO_MAX_RAW_KEY_SIZE, BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE)
+
+/*
+ * Size of the "software secret" which can be derived from a hardware-wrapped
+ * key. This is currently always 32 bytes. Note, the choice of 32 bytes
+ * assumes that the software secret is only used directly for algorithms that
+ * don't require more than a 256-bit key to get the desired security strength.
+ * If it were to be used e.g. directly as an AES-256-XTS key, then this would
+ * need to be increased (which is possible if hardware supports it, but care
+ * would need to be taken to avoid breaking users who need exactly 32 bytes).
+ */
+#define BLK_CRYPTO_SW_SECRET_SIZE 32
+
/**
* struct blk_crypto_config - an inline encryption key's crypto configuration
* @crypto_mode: encryption algorithm this key is for
@@ -26,20 +76,23 @@ enum blk_crypto_mode_num {
* ciphertext. This is always a power of 2. It might be e.g. the
* filesystem block size or the disk sector size.
* @dun_bytes: the maximum number of bytes of DUN used when using this key
+ * @key_type: the type of this key -- either raw or hardware-wrapped
*/
struct blk_crypto_config {
enum blk_crypto_mode_num crypto_mode;
unsigned int data_unit_size;
unsigned int dun_bytes;
+ enum blk_crypto_key_type key_type;
};
/**
* struct blk_crypto_key - an inline encryption key
- * @crypto_cfg: the crypto configuration (like crypto_mode, key size) for this
- * key
+ * @crypto_cfg: the crypto mode, data unit size, key type, and other
+ * characteristics of this key and how it will be used
* @data_unit_size_bits: log2 of data_unit_size
- * @size: size of this key in bytes (determined by @crypto_cfg.crypto_mode)
- * @raw: the raw bytes of this key. Only the first @size bytes are used.
+ * @size: size of this key in bytes. The size of a raw key is fixed for a given
+ * crypto mode, but the size of a hardware-wrapped key can vary.
+ * @bytes: the bytes of this key. Only the first @size bytes are significant.
*
* A blk_crypto_key is immutable once created, and many bios can reference it at
* the same time. It must not be freed until all bios using it have completed
@@ -49,7 +102,7 @@ struct blk_crypto_key {
struct blk_crypto_config crypto_cfg;
unsigned int data_unit_size_bits;
unsigned int size;
- u8 raw[BLK_CRYPTO_MAX_KEY_SIZE];
+ u8 bytes[BLK_CRYPTO_MAX_ANY_KEY_SIZE];
};
#define BLK_CRYPTO_MAX_IV_SIZE 32
@@ -87,7 +140,9 @@ bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
unsigned int bytes,
const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]);
-int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
+int blk_crypto_init_key(struct blk_crypto_key *blk_key,
+ const u8 *key_bytes, size_t key_size,
+ enum blk_crypto_key_type key_type,
enum blk_crypto_mode_num crypto_mode,
unsigned int dun_bytes,
unsigned int data_unit_size);
@@ -103,6 +158,10 @@ bool blk_crypto_config_supported_natively(struct block_device *bdev,
bool blk_crypto_config_supported(struct block_device *bdev,
const struct blk_crypto_config *cfg);
+int blk_crypto_derive_sw_secret(struct block_device *bdev,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]);
+
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline bool bio_has_crypt_ctx(struct bio *bio)
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
index e253e7bd0d17..a6b84206eb94 100644
--- a/include/linux/blk-integrity.h
+++ b/include/linux/blk-integrity.h
@@ -3,55 +3,58 @@
#define _LINUX_BLK_INTEGRITY_H
#include <linux/blk-mq.h>
+#include <linux/bio-integrity.h>
+#include <linux/blk-mq-dma.h>
struct request;
+/*
+ * Maximum contiguous integrity buffer allocation.
+ */
+#define BLK_INTEGRITY_MAX_SIZE SZ_2M
+
enum blk_integrity_flags {
- BLK_INTEGRITY_VERIFY = 1 << 0,
- BLK_INTEGRITY_GENERATE = 1 << 1,
+ BLK_INTEGRITY_NOVERIFY = 1 << 0,
+ BLK_INTEGRITY_NOGENERATE = 1 << 1,
BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
- BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
+ BLK_INTEGRITY_REF_TAG = 1 << 3,
+ BLK_INTEGRITY_STACKED = 1 << 4,
};
-struct blk_integrity_iter {
- void *prot_buf;
- void *data_buf;
- sector_t seed;
- unsigned int data_size;
- unsigned short interval;
- unsigned char tuple_size;
- unsigned char pi_offset;
- const char *disk_name;
-};
-
-typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
-typedef void (integrity_prepare_fn) (struct request *);
-typedef void (integrity_complete_fn) (struct request *, unsigned int);
-
-struct blk_integrity_profile {
- integrity_processing_fn *generate_fn;
- integrity_processing_fn *verify_fn;
- integrity_prepare_fn *prepare_fn;
- integrity_complete_fn *complete_fn;
- const char *name;
-};
+const char *blk_integrity_profile_name(struct blk_integrity *bi);
+bool queue_limits_stack_integrity(struct queue_limits *t,
+ struct queue_limits *b);
+static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t,
+ struct block_device *bdev)
+{
+ return queue_limits_stack_integrity(t, &bdev->bd_disk->queue->limits);
+}
#ifdef CONFIG_BLK_DEV_INTEGRITY
-void blk_integrity_register(struct gendisk *, struct blk_integrity *);
-void blk_integrity_unregister(struct gendisk *);
-int blk_integrity_compare(struct gendisk *, struct gendisk *);
-int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
- struct scatterlist *);
+int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
+
int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
+int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
+ ssize_t bytes);
+int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd,
+ struct logical_block_metadata_cap __user *argp);
+bool blk_rq_integrity_dma_map_iter_start(struct request *req,
+ struct device *dma_dev, struct dma_iova_state *state,
+ struct blk_dma_iter *iter);
+bool blk_rq_integrity_dma_map_iter_next(struct request *req,
+ struct device *dma_dev, struct blk_dma_iter *iter);
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
{
- struct blk_integrity *bi = &disk->queue->integrity;
+ return q->limits.integrity.metadata_size;
+}
- if (!bi->profile)
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+ if (!blk_integrity_queue_supports_integrity(disk->queue))
return NULL;
-
- return bi;
+ return &disk->queue->limits.integrity;
}
static inline struct blk_integrity *
@@ -60,18 +63,6 @@ bdev_get_integrity(struct block_device *bdev)
return blk_get_integrity(bdev->bd_disk);
}
-static inline bool
-blk_integrity_queue_supports_integrity(struct request_queue *q)
-{
- return q->integrity.profile;
-}
-
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
-{
- q->limits.max_integrity_segments = segs;
-}
-
static inline unsigned short
queue_max_integrity_segments(const struct request_queue *q)
{
@@ -97,7 +88,7 @@ static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
unsigned int sectors)
{
- return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
+ return bio_integrity_intervals(bi, sectors) * bi->metadata_size;
}
static inline bool blk_integrity_rq(struct request *rq)
@@ -106,54 +97,59 @@ static inline bool blk_integrity_rq(struct request *rq)
}
/*
- * Return the first bvec that contains integrity data. Only drivers that are
- * limited to a single integrity segment should use this helper.
+ * Return the current bvec that contains the integrity data. bip_iter may be
+ * advanced to iterate over the integrity data.
*/
-static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+static inline struct bio_vec rq_integrity_vec(struct request *rq)
{
- if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
- return NULL;
- return rq->bio->bi_integrity->bip_vec;
+ return mp_bvec_iter_bvec(rq->bio->bi_integrity->bip_vec,
+ rq->bio->bi_integrity->bip_iter);
}
#else /* CONFIG_BLK_DEV_INTEGRITY */
+static inline int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd,
+ struct logical_block_metadata_cap __user *argp)
+{
+ return -ENOIOCTLCMD;
+}
static inline int blk_rq_count_integrity_sg(struct request_queue *q,
struct bio *b)
{
return 0;
}
-static inline int blk_rq_map_integrity_sg(struct request_queue *q,
- struct bio *b,
+static inline int blk_rq_map_integrity_sg(struct request *q,
struct scatterlist *s)
{
return 0;
}
-static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
-{
- return NULL;
-}
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+static inline int blk_rq_integrity_map_user(struct request *rq,
+ void __user *ubuf,
+ ssize_t bytes)
{
- return NULL;
+ return -EINVAL;
}
-static inline bool
-blk_integrity_queue_supports_integrity(struct request_queue *q)
+static inline bool blk_rq_integrity_dma_map_iter_start(struct request *req,
+ struct device *dma_dev, struct dma_iova_state *state,
+ struct blk_dma_iter *iter)
{
return false;
}
-static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
+static inline bool blk_rq_integrity_dma_map_iter_next(struct request *req,
+ struct device *dma_dev, struct blk_dma_iter *iter)
{
- return 0;
+ return false;
}
-static inline void blk_integrity_register(struct gendisk *d,
- struct blk_integrity *b)
+static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
{
+ return NULL;
}
-static inline void blk_integrity_unregister(struct gendisk *d)
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
{
+ return NULL;
}
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
{
+ return false;
}
static inline unsigned short
queue_max_integrity_segments(const struct request_queue *q)
@@ -177,9 +173,11 @@ static inline int blk_integrity_rq(struct request *rq)
return 0;
}
-static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+static inline struct bio_vec rq_integrity_vec(struct request *rq)
{
- return NULL;
+ /* the optimizer will remove all calls to this function */
+ return (struct bio_vec){ };
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
#endif /* _LINUX_BLK_INTEGRITY_H */
diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h
new file mode 100644
index 000000000000..cb88fc791fbd
--- /dev/null
+++ b/include/linux/blk-mq-dma.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef BLK_MQ_DMA_H
+#define BLK_MQ_DMA_H
+
+#include <linux/blk-mq.h>
+#include <linux/pci-p2pdma.h>
+
+struct blk_map_iter {
+ struct bvec_iter iter;
+ struct bio *bio;
+ struct bio_vec *bvecs;
+ bool is_integrity;
+};
+
+struct blk_dma_iter {
+ /* Output address range for this iteration */
+ dma_addr_t addr;
+ u32 len;
+ struct pci_p2pdma_map_state p2pdma;
+
+ /* Status code. Only valid when blk_rq_dma_map_iter_* returned false */
+ blk_status_t status;
+
+ /* Internal to blk_rq_dma_map_iter_* */
+ struct blk_map_iter iter;
+};
+
+bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, struct blk_dma_iter *iter);
+bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, struct blk_dma_iter *iter);
+
+/**
+ * blk_rq_dma_map_coalesce - were all segments coalesced?
+ * @state: DMA state to check
+ *
+ * Returns true if blk_rq_dma_map_iter_start coalesced all segments into a
+ * single DMA range.
+ */
+static inline bool blk_rq_dma_map_coalesce(struct dma_iova_state *state)
+{
+ return dma_use_iova(state);
+}
+
+/**
+ * blk_rq_dma_unmap - try to DMA unmap a request
+ * @req: request to unmap
+ * @dma_dev: device to unmap from
+ * @state: DMA IOVA state
+ * @mapped_len: number of bytes to unmap
+ * @map: peer-to-peer mapping type
+ *
+ * Returns %false if the callers need to manually unmap every DMA segment
+ * mapped using @iter or %true if no work is left to be done.
+ */
+static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, size_t mapped_len,
+ enum pci_p2pdma_map_type map)
+{
+ if (map == PCI_P2PDMA_MAP_BUS_ADDR)
+ return true;
+
+ if (dma_use_iova(state)) {
+ unsigned int attrs = 0;
+
+ if (map == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
+ attrs |= DMA_ATTR_MMIO;
+
+ dma_iova_destroy(dma_dev, state, mapped_len, rq_dma_dir(req),
+ attrs);
+ return true;
+ }
+
+ return !dma_need_unmap(dma_dev);
+}
+#endif /* BLK_MQ_DMA_H */
diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h
deleted file mode 100644
index ca544e1d3508..000000000000
--- a/include/linux/blk-mq-pci.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_BLK_MQ_PCI_H
-#define _LINUX_BLK_MQ_PCI_H
-
-struct blk_mq_queue_map;
-struct pci_dev;
-
-void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
- int offset);
-
-#endif /* _LINUX_BLK_MQ_PCI_H */
diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h
deleted file mode 100644
index 13226e9b22dd..000000000000
--- a/include/linux/blk-mq-virtio.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_BLK_MQ_VIRTIO_H
-#define _LINUX_BLK_MQ_VIRTIO_H
-
-struct blk_mq_queue_map;
-struct virtio_device;
-
-void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
- struct virtio_device *vdev, int first_vec);
-
-#endif /* _LINUX_BLK_MQ_VIRTIO_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 89ba6b16fe8b..cae9e857aea4 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -9,6 +9,7 @@
#include <linux/prefetch.h>
#include <linux/srcu.h>
#include <linux/rw_hint.h>
+#include <linux/rwsem.h>
struct blk_mq_tags;
struct blk_flush_queue;
@@ -27,38 +28,61 @@ typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
* request flags */
typedef __u32 __bitwise req_flags_t;
-/* drive already may have started this one */
-#define RQF_STARTED ((__force req_flags_t)(1 << 1))
-/* request for flush sequence */
-#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
-/* merge of different types, fail separately */
-#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
-/* don't call prep for this one */
-#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
-/* use hctx->sched_tags */
-#define RQF_SCHED_TAGS ((__force req_flags_t)(1 << 8))
-/* use an I/O scheduler for this request */
-#define RQF_USE_SCHED ((__force req_flags_t)(1 << 9))
-/* vaguely specified driver internal error. Ignored by the block layer */
-#define RQF_FAILED ((__force req_flags_t)(1 << 10))
-/* don't warn about errors */
-#define RQF_QUIET ((__force req_flags_t)(1 << 11))
-/* account into disk and partition IO statistics */
-#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
-/* runtime pm request */
-#define RQF_PM ((__force req_flags_t)(1 << 15))
-/* on IO scheduler merge hash */
-#define RQF_HASHED ((__force req_flags_t)(1 << 16))
-/* track IO completion time */
-#define RQF_STATS ((__force req_flags_t)(1 << 17))
-/* Look at ->special_vec for the actual data payload instead of the
- bio chain. */
-#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
-/* The request completion needs to be signaled to zone write pluging. */
-#define RQF_ZONE_WRITE_PLUGGING ((__force req_flags_t)(1 << 20))
-/* ->timeout has been called, don't expire again */
-#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
-#define RQF_RESV ((__force req_flags_t)(1 << 23))
+/* Keep rqf_name[] in sync with the definitions below */
+enum rqf_flags {
+ /* drive already may have started this one */
+ __RQF_STARTED,
+ /* request for flush sequence */
+ __RQF_FLUSH_SEQ,
+ /* merge of different types, fail separately */
+ __RQF_MIXED_MERGE,
+ /* don't call prep for this one */
+ __RQF_DONTPREP,
+ /* use hctx->sched_tags */
+ __RQF_SCHED_TAGS,
+ /* use an I/O scheduler for this request */
+ __RQF_USE_SCHED,
+ /* vaguely specified driver internal error. Ignored by block layer */
+ __RQF_FAILED,
+ /* don't warn about errors */
+ __RQF_QUIET,
+ /* account into disk and partition IO statistics */
+ __RQF_IO_STAT,
+ /* runtime pm request */
+ __RQF_PM,
+ /* on IO scheduler merge hash */
+ __RQF_HASHED,
+ /* track IO completion time */
+ __RQF_STATS,
+ /* Look at ->special_vec for the actual data payload instead of the
+ bio chain. */
+ __RQF_SPECIAL_PAYLOAD,
+ /* request completion needs to be signaled to zone write plugging. */
+ __RQF_ZONE_WRITE_PLUGGING,
+ /* ->timeout has been called, don't expire again */
+ __RQF_TIMED_OUT,
+ __RQF_RESV,
+ __RQF_BITS
+};
+
+#define RQF_STARTED ((__force req_flags_t)(1 << __RQF_STARTED))
+#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << __RQF_FLUSH_SEQ))
+#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << __RQF_MIXED_MERGE))
+#define RQF_DONTPREP ((__force req_flags_t)(1 << __RQF_DONTPREP))
+#define RQF_SCHED_TAGS ((__force req_flags_t)(1 << __RQF_SCHED_TAGS))
+#define RQF_USE_SCHED ((__force req_flags_t)(1 << __RQF_USE_SCHED))
+#define RQF_FAILED ((__force req_flags_t)(1 << __RQF_FAILED))
+#define RQF_QUIET ((__force req_flags_t)(1 << __RQF_QUIET))
+#define RQF_IO_STAT ((__force req_flags_t)(1 << __RQF_IO_STAT))
+#define RQF_PM ((__force req_flags_t)(1 << __RQF_PM))
+#define RQF_HASHED ((__force req_flags_t)(1 << __RQF_HASHED))
+#define RQF_STATS ((__force req_flags_t)(1 << __RQF_STATS))
+#define RQF_SPECIAL_PAYLOAD \
+ ((__force req_flags_t)(1 << __RQF_SPECIAL_PAYLOAD))
+#define RQF_ZONE_WRITE_PLUGGING \
+ ((__force req_flags_t)(1 << __RQF_ZONE_WRITE_PLUGGING))
+#define RQF_TIMED_OUT ((__force req_flags_t)(1 << __RQF_TIMED_OUT))
+#define RQF_RESV ((__force req_flags_t)(1 << __RQF_RESV))
/* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \
@@ -126,19 +150,21 @@ struct request {
* physical address coalescing is performed.
*/
unsigned short nr_phys_segments;
-
-#ifdef CONFIG_BLK_DEV_INTEGRITY
unsigned short nr_integrity_segments;
-#endif
+
+ /*
+ * The lowest set bit for address gaps between physical segments. This
+ * provides information necessary for dma optimization opprotunities,
+ * like for testing if the segments can be coalesced against the
+ * device's iommu granule.
+ */
+ unsigned char phys_gap_bit;
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct bio_crypt_ctx *crypt_ctx;
struct blk_crypto_keyslot *crypt_keyslot;
#endif
- enum rw_hint write_hint;
- unsigned short ioprio;
-
enum mq_rq_state state;
atomic_t ref;
@@ -190,6 +216,14 @@ struct request {
void *end_io_data;
};
+/*
+ * Returns a mask with all bits starting at req->phys_gap_bit set to 1.
+ */
+static inline unsigned long req_phys_gap_mask(const struct request *req)
+{
+ return ~(((1 << req->phys_gap_bit) >> 1) - 1);
+}
+
static inline enum req_op req_op(const struct request *req)
{
return req->cmd_flags & REQ_OP_MASK;
@@ -202,7 +236,9 @@ static inline bool blk_rq_is_passthrough(struct request *rq)
static inline unsigned short req_get_ioprio(struct request *req)
{
- return req->ioprio;
+ if (req->bio)
+ return req->bio->bi_ioprio;
+ return 0;
}
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
@@ -210,62 +246,61 @@ static inline unsigned short req_get_ioprio(struct request *req)
#define rq_dma_dir(rq) \
(op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
-#define rq_list_add(listptr, rq) do { \
- (rq)->rq_next = *(listptr); \
- *(listptr) = rq; \
-} while (0)
-
-#define rq_list_add_tail(lastpptr, rq) do { \
- (rq)->rq_next = NULL; \
- **(lastpptr) = rq; \
- *(lastpptr) = &rq->rq_next; \
-} while (0)
-
-#define rq_list_pop(listptr) \
-({ \
- struct request *__req = NULL; \
- if ((listptr) && *(listptr)) { \
- __req = *(listptr); \
- *(listptr) = __req->rq_next; \
- } \
- __req; \
-})
+static inline int rq_list_empty(const struct rq_list *rl)
+{
+ return rl->head == NULL;
+}
-#define rq_list_peek(listptr) \
-({ \
- struct request *__req = NULL; \
- if ((listptr) && *(listptr)) \
- __req = *(listptr); \
- __req; \
-})
+static inline void rq_list_init(struct rq_list *rl)
+{
+ rl->head = NULL;
+ rl->tail = NULL;
+}
-#define rq_list_for_each(listptr, pos) \
- for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
+static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq)
+{
+ rq->rq_next = NULL;
+ if (rl->tail)
+ rl->tail->rq_next = rq;
+ else
+ rl->head = rq;
+ rl->tail = rq;
+}
+
+static inline void rq_list_add_head(struct rq_list *rl, struct request *rq)
+{
+ rq->rq_next = rl->head;
+ rl->head = rq;
+ if (!rl->tail)
+ rl->tail = rq;
+}
-#define rq_list_for_each_safe(listptr, pos, nxt) \
- for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \
- pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
+static inline struct request *rq_list_pop(struct rq_list *rl)
+{
+ struct request *rq = rl->head;
-#define rq_list_next(rq) (rq)->rq_next
-#define rq_list_empty(list) ((list) == (struct request *) NULL)
+ if (rq) {
+ rl->head = rl->head->rq_next;
+ if (!rl->head)
+ rl->tail = NULL;
+ rq->rq_next = NULL;
+ }
-/**
- * rq_list_move() - move a struct request from one list to another
- * @src: The source list @rq is currently in
- * @dst: The destination list that @rq will be appended to
- * @rq: The request to move
- * @prev: The request preceding @rq in @src (NULL if @rq is the head)
- */
-static inline void rq_list_move(struct request **src, struct request **dst,
- struct request *rq, struct request *prev)
+ return rq;
+}
+
+static inline struct request *rq_list_peek(struct rq_list *rl)
{
- if (prev)
- prev->rq_next = rq->rq_next;
- else
- *src = rq->rq_next;
- rq_list_add(dst, rq);
+ return rl->head;
}
+#define rq_list_for_each(rl, pos) \
+ for (pos = rq_list_peek((rl)); (pos); pos = pos->rq_next)
+
+#define rq_list_for_each_safe(rl, pos, nxt) \
+ for (pos = rq_list_peek((rl)), nxt = pos->rq_next; \
+ pos; pos = nxt, nxt = pos ? pos->rq_next : NULL)
+
/**
* enum blk_eh_timer_return - How the timeout handler should proceed
* @BLK_EH_DONE: The block driver completed the command or will complete it at
@@ -278,9 +313,6 @@ enum blk_eh_timer_return {
BLK_EH_RESET_TIMER,
};
-#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
-#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
-
/**
* struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
* block device
@@ -491,6 +523,11 @@ enum hctx_type {
* request_queue.tag_set_list.
* @srcu: Use as lock when type of the request queue is blocking
* (BLK_MQ_F_BLOCKING).
+ * @tags_srcu: SRCU used to defer freeing of tags page_list to prevent
+ * use-after-free when iterating tags.
+ * @update_nr_hwq_lock:
+ * Synchronize updating nr_hw_queues with add/del disk &
+ * switching elevator.
*/
struct blk_mq_tag_set {
const struct blk_mq_ops *ops;
@@ -512,6 +549,9 @@ struct blk_mq_tag_set {
struct mutex tag_list_lock;
struct list_head tag_list;
struct srcu_struct *srcu;
+ struct srcu_struct tags_srcu;
+
+ struct rw_semaphore update_nr_hwq_lock;
};
/**
@@ -553,7 +593,7 @@ struct blk_mq_ops {
* empty the @rqlist completely, then the rest will be queued
* individually by the block layer upon return.
*/
- void (*queue_rqs)(struct request **rqlist);
+ void (*queue_rqs)(struct rq_list *rqlist);
/**
* @get_budget: Reserve budget before queue request, once .queue_rq is
@@ -644,8 +684,8 @@ struct blk_mq_ops {
#endif
};
+/* Keep hctx_flag_name[] in sync with the definitions below */
enum {
- BLK_MQ_F_SHOULD_MERGE = 1 << 0,
BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
/*
* Set when this device requires underlying blk-mq device for
@@ -653,37 +693,35 @@ enum {
*/
BLK_MQ_F_STACKING = 1 << 2,
BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
- BLK_MQ_F_BLOCKING = 1 << 5,
- /* Do not allow an I/O scheduler to be configured. */
- BLK_MQ_F_NO_SCHED = 1 << 6,
+ BLK_MQ_F_BLOCKING = 1 << 4,
+
+ /*
+ * Alloc tags on a round-robin base instead of the first available one.
+ */
+ BLK_MQ_F_TAG_RR = 1 << 5,
+
/*
* Select 'none' during queue registration in case of a single hwq
* or shared hwqs instead of 'mq-deadline'.
*/
- BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
- BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
- BLK_MQ_F_ALLOC_POLICY_BITS = 1,
-
- BLK_MQ_S_STOPPED = 0,
- BLK_MQ_S_TAG_ACTIVE = 1,
- BLK_MQ_S_SCHED_RESTART = 2,
-
- /* hw queue is inactive after all its CPUs become offline */
- BLK_MQ_S_INACTIVE = 3,
-
- BLK_MQ_MAX_DEPTH = 10240,
+ BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 6,
- BLK_MQ_CPU_WORK_BATCH = 8,
+ BLK_MQ_F_MAX = 1 << 7,
};
-#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
- ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
- ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
-#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
- ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
- << BLK_MQ_F_ALLOC_POLICY_START_BIT)
+#define BLK_MQ_MAX_DEPTH (10240)
#define BLK_MQ_NO_HCTX_IDX (-1U)
+enum {
+ /* Keep hctx_state_name[] in sync with the definitions below */
+ BLK_MQ_S_STOPPED,
+ BLK_MQ_S_TAG_ACTIVE,
+ BLK_MQ_S_SCHED_RESTART,
+ /* hw queue is inactive after all its CPUs become offline */
+ BLK_MQ_S_INACTIVE,
+ BLK_MQ_S_MAX
+};
+
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
struct queue_limits *lim, void *queuedata,
struct lock_class_key *lkclass);
@@ -748,6 +786,7 @@ struct blk_mq_tags {
* request pool
*/
spinlock_t lock;
+ struct rcu_head rcu_head;
};
static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
@@ -831,12 +870,6 @@ void blk_mq_end_request_batch(struct io_comp_batch *ib);
*/
static inline bool blk_mq_need_time_stamp(struct request *rq)
{
- /*
- * passthrough io doesn't use iostat accounting, cgroup stats
- * and io scheduler functionalities.
- */
- if (blk_rq_is_passthrough(rq))
- return false;
return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
}
@@ -845,28 +878,46 @@ static inline bool blk_mq_is_reserved_rq(struct request *rq)
return rq->rq_flags & RQF_RESV;
}
-/*
+/**
+ * blk_mq_add_to_batch() - add a request to the completion batch
+ * @req: The request to add to batch
+ * @iob: The batch to add the request
+ * @is_error: Specify true if the request failed with an error
+ * @complete: The completaion handler for the request
+ *
* Batched completions only work when there is no I/O error and no special
* ->end_io handler.
+ *
+ * Return: true when the request was added to the batch, otherwise false
*/
static inline bool blk_mq_add_to_batch(struct request *req,
- struct io_comp_batch *iob, int ioerror,
+ struct io_comp_batch *iob, bool is_error,
void (*complete)(struct io_comp_batch *))
{
/*
- * blk_mq_end_request_batch() can't end request allocated from
- * sched tags
+ * Check various conditions that exclude batch processing:
+ * 1) No batch container
+ * 2) Has scheduler data attached
+ * 3) Not a passthrough request and end_io set
+ * 4) Not a passthrough request and failed with an error
*/
- if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror ||
- (req->end_io && !blk_rq_is_passthrough(req)))
+ if (!iob)
+ return false;
+ if (req->rq_flags & RQF_SCHED_TAGS)
return false;
+ if (!blk_rq_is_passthrough(req)) {
+ if (req->end_io)
+ return false;
+ if (is_error)
+ return false;
+ }
if (!iob->complete)
iob->complete = complete;
else if (iob->complete != complete)
return false;
iob->need_ts |= blk_mq_need_time_stamp(req);
- rq_list_add(&iob->req_list, req);
+ rq_list_add_tail(&iob->req_list, req);
return true;
}
@@ -893,14 +944,34 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv);
void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
-void blk_mq_freeze_queue(struct request_queue *q);
-void blk_mq_unfreeze_queue(struct request_queue *q);
+void blk_mq_freeze_queue_nomemsave(struct request_queue *q);
+void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q);
+static inline unsigned int __must_check
+blk_mq_freeze_queue(struct request_queue *q)
+{
+ unsigned int memflags = memalloc_noio_save();
+
+ blk_mq_freeze_queue_nomemsave(q);
+ return memflags;
+}
+static inline void
+blk_mq_unfreeze_queue(struct request_queue *q, unsigned int memflags)
+{
+ blk_mq_unfreeze_queue_nomemrestore(q);
+ memalloc_noio_restore(memflags);
+}
void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
+void blk_mq_unfreeze_queue_non_owner(struct request_queue *q);
+void blk_freeze_queue_start_non_owner(struct request_queue *q);
+unsigned int blk_mq_num_possible_queues(unsigned int max_queues);
+unsigned int blk_mq_num_online_queues(unsigned int max_queues);
void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
+void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
+ struct device *dev, unsigned int offset);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
@@ -944,8 +1015,20 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
return rq + 1;
}
+static inline struct blk_mq_hw_ctx *queue_hctx(struct request_queue *q, int id)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ rcu_read_lock();
+ hctx = rcu_dereference(q->queue_hw_ctx)[id];
+ rcu_read_unlock();
+
+ return hctx;
+}
+
#define queue_for_each_hw_ctx(q, hctx, i) \
- xa_for_each(&(q)->hctx_table, (i), (hctx))
+ for ((i) = 0; (i) < (q)->nr_hw_queues && \
+ ({ hctx = queue_hctx((q), i); 1; }); (i)++)
#define hctx_for_each_ctx(hctx, ctx, i) \
for ((i) = 0; (i) < (hctx)->nr_ctx && \
@@ -957,15 +1040,6 @@ static inline void blk_mq_cleanup_rq(struct request *rq)
rq->q->mq_ops->cleanup_rq(rq);
}
-static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
- unsigned int nr_segs)
-{
- rq->nr_phys_segments = nr_segs;
- rq->__data_len = bio->bi_iter.bi_size;
- rq->bio = rq->biotail = bio;
- rq->ioprio = bio_prio(bio);
-}
-
void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
struct lock_class_key *key);
@@ -997,8 +1071,8 @@ int blk_rq_map_user_io(struct request *, struct rq_map_data *,
int blk_rq_map_user_iov(struct request_queue *, struct request *,
struct rq_map_data *, const struct iov_iter *, gfp_t);
int blk_rq_unmap_user(struct bio *);
-int blk_rq_map_kern(struct request_queue *, struct request *, void *,
- unsigned int, gfp_t);
+int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len,
+ gfp_t gfp);
int blk_rq_append_bio(struct request *rq, struct bio *bio);
void blk_execute_rq_nowait(struct request *rq, bool at_head);
blk_status_t blk_execute_rq(struct request *rq, bool at_head);
@@ -1139,14 +1213,31 @@ static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
return max_t(unsigned short, rq->nr_phys_segments, 1);
}
-int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
- struct scatterlist *sglist, struct scatterlist **last_sg);
-static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
- struct scatterlist *sglist)
+/**
+ * blk_rq_nr_bvec - return number of bvecs in a request
+ * @rq: request to calculate bvecs for
+ *
+ * Returns the number of bvecs.
+ */
+static inline unsigned int blk_rq_nr_bvec(struct request *rq)
+{
+ struct req_iterator rq_iter;
+ struct bio_vec bv;
+ unsigned int nr_bvec = 0;
+
+ rq_for_each_bvec(bv, rq, rq_iter)
+ nr_bvec++;
+
+ return nr_bvec;
+}
+
+int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
+ struct scatterlist **last_sg);
+static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist)
{
struct scatterlist *last_sg = NULL;
- return __blk_rq_map_sg(q, rq, sglist, &last_sg);
+ return __blk_rq_map_sg(rq, sglist, &last_sg);
}
void blk_dump_rq_flags(struct request *, char *);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 781c4500491b..5dc061d318a4 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -71,6 +71,9 @@ struct block_device {
struct partition_meta_info *bd_meta_info;
int bd_writers;
+#ifdef CONFIG_SECURITY
+ void *bd_security;
+#endif
/*
* keep this out-of-line as it's both big and not needed in the fast
* path
@@ -162,6 +165,11 @@ typedef u16 blk_short_t;
*/
#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)17)
+/*
+ * Invalid size or alignment.
+ */
+#define BLK_STS_INVAL ((__force blk_status_t)19)
+
/**
* blk_path_error - returns true if error may be path related
* @error: status the request was completed with
@@ -190,10 +198,6 @@ static inline bool blk_path_error(blk_status_t error)
return true;
}
-struct bio_issue {
- u64 value;
-};
-
typedef __u32 __bitwise blk_opf_t;
typedef unsigned int blk_qc_t;
@@ -212,7 +216,20 @@ struct bio {
unsigned short bi_flags; /* BIO_* below */
unsigned short bi_ioprio;
enum rw_hint bi_write_hint;
+ u8 bi_write_stream;
blk_status_t bi_status;
+
+ /*
+ * The bvec gap bit indicates the lowest set bit in any address offset
+ * between all bi_io_vecs. This field is initialized only after the bio
+ * is split to the hardware limits (see bio_split_io_at()). The value
+ * may be used to consider DMA optimization when performing that
+ * mapping. The value is compared to a power of two mask where the
+ * result depends on any bit set within the mask, so saving the lowest
+ * bit is sufficient to know if any segment gap collides with the mask.
+ */
+ u8 bi_bvec_gap_bit;
+
atomic_t __bi_remaining;
struct bvec_iter bi_iter;
@@ -233,7 +250,8 @@ struct bio {
* on release of the bio.
*/
struct blkcg_gq *bi_blkg;
- struct bio_issue bi_issue;
+ /* Time that this bio was issued. */
+ u64 issue_time_ns;
#ifdef CONFIG_BLK_CGROUP_IOCOST
u64 bi_iocost_cost;
#endif
@@ -243,11 +261,9 @@ struct bio {
struct bio_crypt_ctx *bi_crypt_context;
#endif
- union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
- struct bio_integrity_payload *bi_integrity; /* data integrity */
+ struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
- };
unsigned short bi_vcnt; /* how many bio_vec's */
@@ -262,25 +278,22 @@ struct bio {
struct bio_vec *bi_io_vec; /* the actual vec list */
struct bio_set *bi_pool;
-
- /*
- * We can inline a number of vecs at the end of the bio, to avoid
- * double allocations for a small number of bio_vecs. This member
- * MUST obviously be kept at the very end of the bio.
- */
- struct bio_vec bi_inline_vecs[];
};
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
+static inline struct bio_vec *bio_inline_vecs(struct bio *bio)
+{
+ return (struct bio_vec *)(bio + 1);
+}
+
/*
* bio flags
*/
enum {
BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */
BIO_CLONED, /* doesn't own data */
- BIO_BOUNCED, /* bio is a bounce bio */
BIO_QUIET, /* Make BIO Quiet */
BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
BIO_REFFED, /* bio has elevated ->bi_cnt */
@@ -290,6 +303,14 @@ enum {
* of this bio. */
BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
+ /*
+ * This bio has completed bps throttling at the single tg granularity,
+ * which is different from BIO_BPS_THROTTLED. When the bio is enqueued
+ * into the sq->queued of the upper tg, or is about to be dispatched,
+ * this flag needs to be cleared. Since blk-throttle and rq_qos are not
+ * on the same hierarchical level, reuse the value.
+ */
+ BIO_TG_BPS_THROTTLED = BIO_QOS_THROTTLED,
BIO_QOS_MERGED, /* but went through rq_qos merge path */
BIO_REMAPPED,
BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */
@@ -332,15 +353,15 @@ enum req_op {
/* write the zero filled sector many times */
REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
/* Open a zone */
- REQ_OP_ZONE_OPEN = (__force blk_opf_t)10,
+ REQ_OP_ZONE_OPEN = (__force blk_opf_t)11,
/* Close a zone */
- REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11,
+ REQ_OP_ZONE_CLOSE = (__force blk_opf_t)13,
/* Transition a zone to full */
- REQ_OP_ZONE_FINISH = (__force blk_opf_t)12,
+ REQ_OP_ZONE_FINISH = (__force blk_opf_t)15,
/* reset a zone write pointer */
- REQ_OP_ZONE_RESET = (__force blk_opf_t)13,
+ REQ_OP_ZONE_RESET = (__force blk_opf_t)17,
/* reset all the zone present on the device */
- REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)15,
+ REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)19,
/* Driver private requests */
REQ_OP_DRV_IN = (__force blk_opf_t)34,
@@ -349,6 +370,7 @@ enum req_op {
REQ_OP_LAST = (__force blk_opf_t)36,
};
+/* Keep cmd_flag_name[] in sync with the definitions below */
enum req_flag_bits {
__REQ_FAILFAST_DEV = /* no driver retries of device errors */
REQ_OP_BITS,
@@ -370,7 +392,7 @@ enum req_flag_bits {
__REQ_SWAP, /* swap I/O */
__REQ_DRV, /* for driver use */
__REQ_FS_PRIVATE, /* for file system (submitter) use */
-
+ __REQ_ATOMIC, /* for atomic write operations */
/*
* Command specific flags, keep last:
*/
@@ -402,6 +424,7 @@ enum req_flag_bits {
#define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP)
#define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
#define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
+#define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC)
#define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
@@ -456,15 +479,13 @@ static inline bool op_is_discard(blk_opf_t op)
}
/*
- * Check if a bio or request operation is a zone management operation, with
- * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
- * due to its different handling in the block layer and device response in
- * case of command failure.
+ * Check if a bio or request operation is a zone management operation.
*/
static inline bool op_is_zone_mgmt(enum req_op op)
{
switch (op & REQ_OP_MASK) {
case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH:
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index aefdda9f4ec7..72e34acd439c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -25,6 +25,7 @@
#include <linux/uuid.h>
#include <linux/xarray.h>
#include <linux/file.h>
+#include <linux/lockdep.h>
struct module;
struct request_queue;
@@ -37,6 +38,7 @@ struct blk_flush_queue;
struct kiocb;
struct pr_ops;
struct rq_qos;
+struct blk_report_zones_args;
struct blk_queue_stats;
struct blk_stat_callback;
struct blk_crypto_profile;
@@ -105,13 +107,21 @@ enum {
struct disk_events;
struct badblocks;
+enum blk_integrity_checksum {
+ BLK_INTEGRITY_CSUM_NONE = 0,
+ BLK_INTEGRITY_CSUM_IP = 1,
+ BLK_INTEGRITY_CSUM_CRC = 2,
+ BLK_INTEGRITY_CSUM_CRC64 = 3,
+} __packed ;
+
struct blk_integrity {
- const struct blk_integrity_profile *profile;
unsigned char flags;
- unsigned char tuple_size;
+ enum blk_integrity_checksum csum_type;
+ unsigned char metadata_size;
unsigned char pi_offset;
unsigned char interval_exp;
unsigned char tag_size;
+ unsigned char pi_tuple_size;
};
typedef unsigned int __bitwise blk_mode_t;
@@ -163,6 +173,7 @@ struct gendisk {
#define GD_ADDED 4
#define GD_SUPPRESS_PART_SCAN 5
#define GD_OWNS_QUEUE 6
+#define GD_ZONE_APPEND_USED 7
struct mutex open_mutex; /* open/close mutex */
unsigned open_partitions; /* number of open partitions */
@@ -174,7 +185,6 @@ struct gendisk {
struct list_head slave_bdevs;
#endif
struct timer_rand_state *random;
- atomic_t sync_io; /* RAID */
struct disk_events *ev;
#ifdef CONFIG_BLK_DEV_ZONED
@@ -186,13 +196,13 @@ struct gendisk {
*/
unsigned int nr_zones;
unsigned int zone_capacity;
- unsigned long *conv_zones_bitmap;
- unsigned int zone_wplugs_hash_bits;
- spinlock_t zone_wplugs_lock;
- struct mempool_s *zone_wplugs_pool;
- struct hlist_head *zone_wplugs_hash;
- struct list_head zone_wplugs_err_list;
- struct work_struct zone_wplugs_work;
+ unsigned int last_zone_capacity;
+ u8 __rcu *zones_cond;
+ unsigned int zone_wplugs_hash_bits;
+ atomic_t nr_zone_wplugs;
+ spinlock_t zone_wplugs_lock;
+ struct mempool *zone_wplugs_pool;
+ struct hlist_head *zone_wplugs_hash;
struct workqueue_struct *zone_wplugs_wq;
#endif /* CONFIG_BLK_DEV_ZONED */
@@ -210,6 +220,8 @@ struct gendisk {
* devices that do not have multiple independent access ranges.
*/
struct blk_independent_access_ranges *ia_ranges;
+
+ struct mutex rqos_state_mutex; /* rqos state change mutex */
};
/**
@@ -260,9 +272,21 @@ static inline dev_t disk_devt(struct gendisk *disk)
return MKDEV(disk->major, disk->first_minor);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * We should strive for 1 << (PAGE_SHIFT + MAX_PAGECACHE_ORDER)
+ * however we constrain this to what we can validate and test.
+ */
+#define BLK_MAX_BLOCK_SIZE SZ_64K
+#else
+#define BLK_MAX_BLOCK_SIZE PAGE_SIZE
+#endif
+
+
+/* blk_validate_limits() validates bsize, so drivers don't usually need to */
static inline int blk_validate_block_size(unsigned long bsize)
{
- if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
+ if (bsize < 512 || bsize > BLK_MAX_BLOCK_SIZE || !is_power_of_2(bsize))
return -EINVAL;
return 0;
@@ -274,17 +298,79 @@ static inline bool blk_op_is_passthrough(blk_opf_t op)
return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
}
+/* flags set by the driver in queue_limits.features */
+typedef unsigned int __bitwise blk_features_t;
+
+/* supports a volatile write cache */
+#define BLK_FEAT_WRITE_CACHE ((__force blk_features_t)(1u << 0))
+
+/* supports passing on the FUA bit */
+#define BLK_FEAT_FUA ((__force blk_features_t)(1u << 1))
+
+/* rotational device (hard drive or floppy) */
+#define BLK_FEAT_ROTATIONAL ((__force blk_features_t)(1u << 2))
+
+/* contributes to the random number pool */
+#define BLK_FEAT_ADD_RANDOM ((__force blk_features_t)(1u << 3))
+
+/* do disk/partitions IO accounting */
+#define BLK_FEAT_IO_STAT ((__force blk_features_t)(1u << 4))
+
+/* don't modify data until writeback is done */
+#define BLK_FEAT_STABLE_WRITES ((__force blk_features_t)(1u << 5))
+
+/* always completes in submit context */
+#define BLK_FEAT_SYNCHRONOUS ((__force blk_features_t)(1u << 6))
+
+/* supports REQ_NOWAIT */
+#define BLK_FEAT_NOWAIT ((__force blk_features_t)(1u << 7))
+
+/* supports DAX */
+#define BLK_FEAT_DAX ((__force blk_features_t)(1u << 8))
+
+/* supports I/O polling */
+#define BLK_FEAT_POLL ((__force blk_features_t)(1u << 9))
+
+/* is a zoned device */
+#define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10))
+
+/* supports PCI(e) p2p requests */
+#define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12))
+
+/* skip this queue in blk_mq_(un)quiesce_tagset */
+#define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13))
+
+/* undocumented magic for bcache */
+#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
+ ((__force blk_features_t)(1u << 15))
+
+/* atomic writes enabled */
+#define BLK_FEAT_ATOMIC_WRITES \
+ ((__force blk_features_t)(1u << 16))
+
/*
- * BLK_BOUNCE_NONE: never bounce (default)
- * BLK_BOUNCE_HIGH: bounce all highmem pages
+ * Flags automatically inherited when stacking limits.
*/
-enum blk_bounce {
- BLK_BOUNCE_NONE,
- BLK_BOUNCE_HIGH,
-};
+#define BLK_FEAT_INHERIT_MASK \
+ (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
+ BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | \
+ BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
+
+/* internal flags in queue_limits.flags */
+typedef unsigned int __bitwise blk_flags_t;
+
+/* do not send FLUSH/FUA commands despite advertising a write cache */
+#define BLK_FLAG_WRITE_CACHE_DISABLED ((__force blk_flags_t)(1u << 0))
+
+/* I/O topology is misaligned */
+#define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1))
+
+/* passthrough command IO accounting */
+#define BLK_FLAG_IOSTATS_PASSTHROUGH ((__force blk_flags_t)(1u << 2))
struct queue_limits {
- enum blk_bounce bounce;
+ blk_features_t features;
+ blk_flags_t flags;
unsigned long seg_boundary_mask;
unsigned long virt_boundary_mask;
@@ -294,6 +380,7 @@ struct queue_limits {
unsigned int max_sectors;
unsigned int max_user_sectors;
unsigned int max_segment_size;
+ unsigned int max_fast_segment_size;
unsigned int physical_block_size;
unsigned int logical_block_size;
unsigned int alignment_offset;
@@ -304,19 +391,32 @@ struct queue_limits {
unsigned int max_user_discard_sectors;
unsigned int max_secure_erase_sectors;
unsigned int max_write_zeroes_sectors;
+ unsigned int max_wzeroes_unmap_sectors;
+ unsigned int max_hw_wzeroes_unmap_sectors;
+ unsigned int max_user_wzeroes_unmap_sectors;
+ unsigned int max_hw_zone_append_sectors;
unsigned int max_zone_append_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
unsigned int zone_write_granularity;
+ /* atomic write limits */
+ unsigned int atomic_write_hw_max;
+ unsigned int atomic_write_max_sectors;
+ unsigned int atomic_write_hw_boundary;
+ unsigned int atomic_write_boundary_sectors;
+ unsigned int atomic_write_hw_unit_min;
+ unsigned int atomic_write_unit_min;
+ unsigned int atomic_write_hw_unit_max;
+ unsigned int atomic_write_unit_max;
+
unsigned short max_segments;
unsigned short max_integrity_segments;
unsigned short max_discard_segments;
- unsigned char misaligned;
- unsigned char discard_misaligned;
- unsigned char raid_partial_stripes_expensive;
- bool zoned;
+ unsigned short max_write_streams;
+ unsigned int write_stream_granularity;
+
unsigned int max_open_zones;
unsigned int max_active_zones;
@@ -326,16 +426,25 @@ struct queue_limits {
* due to possible offsets.
*/
unsigned int dma_alignment;
+ unsigned int dma_pad_mask;
+
+ struct blk_integrity integrity;
};
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
void *data);
-void disk_set_zoned(struct gendisk *disk);
+int disk_report_zone(struct gendisk *disk, struct blk_zone *zone,
+ unsigned int idx, struct blk_report_zones_args *args);
+
+int blkdev_get_zone_info(struct block_device *bdev, sector_t sector,
+ struct blk_zone *zone);
#define BLK_ALL_ZONES ((unsigned int)-1)
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
+int blkdev_report_zones_cached(struct block_device *bdev, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
sector_t sectors, sector_t nr_sectors);
int blk_revalidate_disk_zones(struct gendisk *disk);
@@ -386,7 +495,7 @@ struct request_queue {
*/
unsigned long queue_flags;
- unsigned int rq_timeout;
+ unsigned int __data_racy rq_timeout;
unsigned int queue_depth;
@@ -394,9 +503,14 @@ struct request_queue {
/* hw dispatch queues */
unsigned int nr_hw_queues;
- struct xarray hctx_table;
+ struct blk_mq_hw_ctx * __rcu *queue_hw_ctx;
struct percpu_ref q_usage_counter;
+ struct lock_class_key io_lock_cls_key;
+ struct lockdep_map io_lockdep_map;
+
+ struct lock_class_key q_lock_cls_key;
+ struct lockdep_map q_lockdep_map;
struct request *last_merge;
@@ -413,10 +527,6 @@ struct request_queue {
struct queue_limits limits;
-#ifdef CONFIG_BLK_DEV_INTEGRITY
- struct blk_integrity integrity;
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-
#ifdef CONFIG_PM
struct device *dev;
enum rpm_status rpm_status;
@@ -438,8 +548,6 @@ struct request_queue {
*/
int id;
- unsigned int dma_pad_mask;
-
/*
* queue settings
*/
@@ -480,8 +588,22 @@ struct request_queue {
struct blk_flush_queue *fq;
struct list_head flush_list;
+ /*
+ * Protects against I/O scheduler switching, particularly when updating
+ * q->elevator. Since the elevator update code path may also modify q->
+ * nr_requests and wbt latency, this lock also protects the sysfs attrs
+ * nr_requests and wbt_lat_usec. Additionally the nr_hw_queues update
+ * may modify hctx tags, reserved-tags and cpumask, so this lock also
+ * helps protect the hctx sysfs/debugfs attrs. To ensure proper locking
+ * order during an elevator or nr_hw_queue update, first freeze the
+ * queue, then acquire ->elevator_lock.
+ */
+ struct mutex elevator_lock;
+
struct mutex sysfs_lock;
- struct mutex sysfs_dir_lock;
+ /*
+ * Protects queue limits and also sysfs attribute read_ahead_kb.
+ */
struct mutex limits_lock;
/*
@@ -498,6 +620,16 @@ struct request_queue {
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
+#ifdef CONFIG_LOCKDEP
+ struct task_struct *mq_freeze_owner;
+ int mq_freeze_owner_depth;
+ /*
+ * Records disk & queue state in current context, used in unfreeze
+ * queue
+ */
+ bool mq_freeze_disk_dead;
+ bool mq_freeze_queue_dying;
+#endif
wait_queue_head_t mq_freeze_wq;
/*
* Protect concurrent access to q_usage_counter by
@@ -515,65 +647,46 @@ struct request_queue {
* Serializes all debugfs metadata operations using the above dentries.
*/
struct mutex debugfs_mutex;
-
- bool mq_sysfs_init_done;
};
/* Keep blk_queue_flag_name[] in sync with the definitions below */
-#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
-#define QUEUE_FLAG_DYING 1 /* queue being torn down */
-#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
-#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
-#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */
-#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */
-#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
-#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
-#define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */
-#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
-#define QUEUE_FLAG_HW_WC 13 /* Write back caching supported */
-#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
-#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
-#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
-#define QUEUE_FLAG_WC 17 /* Write back caching */
-#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
-#define QUEUE_FLAG_DAX 19 /* device supports DAX */
-#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
-#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
-#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
-#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
-#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
-#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
-#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
-#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
-#define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */
-#define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/
-
-#define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \
- (1UL << QUEUE_FLAG_SAME_COMP) | \
- (1UL << QUEUE_FLAG_NOWAIT))
+enum {
+ QUEUE_FLAG_DYING, /* queue being torn down */
+ QUEUE_FLAG_NOMERGES, /* disable merge attempts */
+ QUEUE_FLAG_SAME_COMP, /* complete on same CPU-group */
+ QUEUE_FLAG_FAIL_IO, /* fake timeout */
+ QUEUE_FLAG_NOXMERGES, /* No extended merges */
+ QUEUE_FLAG_SAME_FORCE, /* force complete on same CPU */
+ QUEUE_FLAG_INIT_DONE, /* queue is initialized */
+ QUEUE_FLAG_STATS, /* track IO start and completion times */
+ QUEUE_FLAG_REGISTERED, /* queue has been registered to a disk */
+ QUEUE_FLAG_QUIESCED, /* queue has been quiesced */
+ QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */
+ QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */
+ QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */
+ QUEUE_FLAG_DISABLE_WBT_DEF, /* for sched to disable/enable wbt */
+ QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */
+ QUEUE_FLAG_QOS_ENABLED, /* qos is enabled */
+ QUEUE_FLAG_BIO_ISSUE_TIME, /* record bio->issue_time_ns */
+ QUEUE_FLAG_MAX
+};
+
+#define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP)
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
-bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
-#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
-#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
-#define blk_queue_stable_writes(q) \
- test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
-#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
-#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
-#define blk_queue_zone_resetall(q) \
- test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
-#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
-#define blk_queue_pci_p2pdma(q) \
- test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
+#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
+#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
+#define blk_queue_passthrough_stat(q) \
+ ((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
+#define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX)
+#define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA)
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
#define blk_queue_rq_alloc_time(q) \
test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
@@ -589,7 +702,11 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
#define blk_queue_skip_tagset_quiesce(q) \
- test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags)
+ ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE)
+#define blk_queue_disable_wbt(q) \
+ test_bit(QUEUE_FLAG_DISABLE_WBT_DEF, &(q)->queue_flags)
+#define blk_queue_no_elv_switch(q) \
+ test_bit(QUEUE_FLAG_NO_ELV_SWITCH, &(q)->queue_flags)
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
@@ -619,15 +736,8 @@ static inline enum rpm_status queue_rpm_status(struct request_queue *q)
static inline bool blk_queue_is_zoned(struct request_queue *q)
{
- return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && q->limits.zoned;
-}
-
-#ifdef CONFIG_BLK_DEV_ZONED
-unsigned int bdev_nr_zones(struct block_device *bdev);
-
-static inline unsigned int disk_nr_zones(struct gendisk *disk)
-{
- return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0;
+ return IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ (q->limits.features & BLK_FEAT_ZONED);
}
static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
@@ -637,18 +747,6 @@ static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
return sector >> ilog2(disk->queue->limits.chunk_sectors);
}
-static inline void disk_set_max_open_zones(struct gendisk *disk,
- unsigned int max_open_zones)
-{
- disk->queue->limits.max_open_zones = max_open_zones;
-}
-
-static inline void disk_set_max_active_zones(struct gendisk *disk,
- unsigned int max_active_zones)
-{
- disk->queue->limits.max_active_zones = max_active_zones;
-}
-
static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{
return bdev->bd_disk->queue->limits.max_open_zones;
@@ -659,36 +757,6 @@ static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
return bdev->bd_disk->queue->limits.max_active_zones;
}
-bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
-#else /* CONFIG_BLK_DEV_ZONED */
-static inline unsigned int bdev_nr_zones(struct block_device *bdev)
-{
- return 0;
-}
-
-static inline unsigned int disk_nr_zones(struct gendisk *disk)
-{
- return 0;
-}
-static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
-{
- return 0;
-}
-static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
-{
- return 0;
-}
-
-static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
-{
- return 0;
-}
-static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
-{
- return false;
-}
-#endif /* CONFIG_BLK_DEV_ZONED */
-
static inline unsigned int blk_queue_depth(struct request_queue *q)
{
if (q->queue_depth)
@@ -707,6 +775,9 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
#define for_each_bio(_bio) \
for (; _bio; _bio = _bio->bi_next)
+int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups,
+ struct fwnode_handle *fwnode);
int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
const struct attribute_group **groups);
static inline int __must_check add_disk(struct gendisk *disk)
@@ -738,13 +809,13 @@ static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag)
atomic_andnot(flag, &bdev->__bd_flags);
}
-static inline int get_disk_ro(struct gendisk *disk)
+static inline bool get_disk_ro(struct gendisk *disk)
{
return bdev_test_flag(disk->part0, BD_READ_ONLY) ||
test_bit(GD_READ_ONLY, &disk->state);
}
-static inline int bdev_read_only(struct block_device *bdev)
+static inline bool bdev_read_only(struct block_device *bdev)
{
return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk);
}
@@ -782,6 +853,114 @@ static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
(sb->s_blocksize_bits - SECTOR_SHIFT);
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
+{
+ return disk->nr_zones;
+}
+
+/**
+ * bio_needs_zone_write_plugging - Check if a BIO needs to be handled with zone
+ * write plugging
+ * @bio: The BIO being submitted
+ *
+ * Return true whenever @bio execution needs to be handled through zone
+ * write plugging (using blk_zone_plug_bio()). Return false otherwise.
+ */
+static inline bool bio_needs_zone_write_plugging(struct bio *bio)
+{
+ enum req_op op = bio_op(bio);
+
+ /*
+ * Only zoned block devices have a zone write plug hash table. But not
+ * all of them have one (e.g. DM devices may not need one).
+ */
+ if (!bio->bi_bdev->bd_disk->zone_wplugs_hash)
+ return false;
+
+ /* Only write operations need zone write plugging. */
+ if (!op_is_write(op))
+ return false;
+
+ /* Ignore empty flush */
+ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
+ return false;
+
+ /* Ignore BIOs that already have been handled by zone write plugging. */
+ if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
+ return false;
+
+ /*
+ * All zone write operations must be handled through zone write plugging
+ * using blk_zone_plug_bio().
+ */
+ switch (op) {
+ case REQ_OP_ZONE_APPEND:
+ case REQ_OP_WRITE:
+ case REQ_OP_WRITE_ZEROES:
+ case REQ_OP_ZONE_FINISH:
+ case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_RESET_ALL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
+
+/**
+ * disk_zone_capacity - returns the zone capacity of zone containing @sector
+ * @disk: disk to work with
+ * @sector: sector number within the querying zone
+ *
+ * Returns the zone capacity of a zone containing @sector. @sector can be any
+ * sector in the zone.
+ */
+static inline unsigned int disk_zone_capacity(struct gendisk *disk,
+ sector_t sector)
+{
+ sector_t zone_sectors = disk->queue->limits.chunk_sectors;
+
+ if (sector + zone_sectors >= get_capacity(disk))
+ return disk->last_zone_capacity;
+ return disk->zone_capacity;
+}
+static inline unsigned int bdev_zone_capacity(struct block_device *bdev,
+ sector_t pos)
+{
+ return disk_zone_capacity(bdev->bd_disk, pos);
+}
+
+bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector);
+
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
+{
+ return 0;
+}
+
+static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector)
+{
+ return false;
+}
+
+static inline bool bio_needs_zone_write_plugging(struct bio *bio)
+{
+ return false;
+}
+
+static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+{
+ return false;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
+
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
+{
+ return disk_nr_zones(bdev->bd_disk);
+}
+
int bdev_disk_changed(struct gendisk *disk, bool invalidate);
void put_disk(struct gendisk *disk);
@@ -839,6 +1018,8 @@ extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
void submit_bio_noacct(struct bio *bio);
struct bio *bio_split_to_limits(struct bio *bio);
+struct bio *bio_submit_split_bioset(struct bio *bio, unsigned int split_sectors,
+ struct bio_set *bs);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
@@ -879,14 +1060,15 @@ static inline bool bio_straddles_zones(struct bio *bio)
}
/*
- * Return how much of the chunk is left to be used for I/O at a given offset.
+ * Return how much within the boundary is left to be used for I/O at a given
+ * offset.
*/
-static inline unsigned int blk_chunk_sectors_left(sector_t offset,
- unsigned int chunk_sectors)
+static inline unsigned int blk_boundary_sectors_left(sector_t offset,
+ unsigned int boundary_sectors)
{
- if (unlikely(!is_power_of_2(chunk_sectors)))
- return chunk_sectors - sector_div(offset, chunk_sectors);
- return chunk_sectors - (offset & (chunk_sectors - 1));
+ if (unlikely(!is_power_of_2(boundary_sectors)))
+ return boundary_sectors - sector_div(offset, boundary_sectors);
+ return boundary_sectors - (offset & (boundary_sectors - 1));
}
/**
@@ -898,19 +1080,20 @@ static inline unsigned int blk_chunk_sectors_left(sector_t offset,
* the caller can modify. The caller must call queue_limits_commit_update()
* to finish the update.
*
- * Context: process context. The caller must have frozen the queue or ensured
- * that there is outstanding I/O by other means.
+ * Context: process context.
*/
static inline struct queue_limits
queue_limits_start_update(struct request_queue *q)
- __acquires(q->limits_lock)
{
mutex_lock(&q->limits_lock);
return q->limits;
}
+int queue_limits_commit_update_frozen(struct request_queue *q,
+ struct queue_limits *lim);
int queue_limits_commit_update(struct request_queue *q,
struct queue_limits *lim);
int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
+int blk_validate_limits(struct queue_limits *lim);
/**
* queue_limits_cancel_update - cancel an atomic update of queue limits
@@ -926,36 +1109,38 @@ static inline void queue_limits_cancel_update(struct request_queue *q)
}
/*
+ * These helpers are for drivers that have sloppy feature negotiation and might
+ * have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O
+ * completion handler when the device returned an indicator that the respective
+ * feature is not actually supported. They are racy and the driver needs to
+ * cope with that. Try to avoid this scheme if you can.
+ */
+static inline void blk_queue_disable_discard(struct request_queue *q)
+{
+ q->limits.max_discard_sectors = 0;
+}
+
+static inline void blk_queue_disable_secure_erase(struct request_queue *q)
+{
+ q->limits.max_secure_erase_sectors = 0;
+}
+
+static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
+{
+ q->limits.max_write_zeroes_sectors = 0;
+ q->limits.max_wzeroes_unmap_sectors = 0;
+}
+
+/*
* Access functions for manipulating queue properties
*/
-extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
-void blk_queue_max_secure_erase_sectors(struct request_queue *q,
- unsigned int max_sectors);
-extern void blk_queue_max_discard_sectors(struct request_queue *q,
- unsigned int max_discard_sectors);
-extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
- unsigned int max_write_same_sectors);
-extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
-extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
- unsigned int max_zone_append_sectors);
-extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
-void blk_queue_zone_write_granularity(struct request_queue *q,
- unsigned int size);
-extern void blk_queue_alignment_offset(struct request_queue *q,
- unsigned int alignment);
-void disk_update_readahead(struct gendisk *disk);
-extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
-extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
-extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
sector_t offset, const char *pfx);
-extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
-extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
struct blk_independent_access_ranges *
disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
@@ -967,6 +1152,11 @@ extern void blk_put_queue(struct request_queue *);
void blk_mark_disk_dead(struct gendisk *disk);
+struct rq_list {
+ struct request *head;
+ struct request *tail;
+};
+
#ifdef CONFIG_BLOCK
/*
* blk_plug permits building a queue of related requests by holding the I/O
@@ -980,10 +1170,10 @@ void blk_mark_disk_dead(struct gendisk *disk);
* blk_flush_plug() is called.
*/
struct blk_plug {
- struct request *mq_list; /* blk-mq requests */
+ struct rq_list mq_list; /* blk-mq requests */
/* if ios_left is > 1, we can batch tag/rq allocations */
- struct request *cached_rq;
+ struct rq_list cached_rqs;
u64 cur_ktime;
unsigned short nr_ios;
@@ -1076,6 +1266,7 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
+#define BLKDEV_ZERO_KILLABLE (1 << 2) /* interruptible by fatal signals */
extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
@@ -1116,14 +1307,10 @@ enum blk_default_limits {
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
-/*
- * Default upper limit for the software max_sectors limit used for
- * regular file system I/O. This can be increased through sysfs.
- *
- * Not to be confused with the max_hw_sector limit that is entirely
- * controlled by the driver, usually based on hardware limits.
- */
-#define BLK_DEF_MAX_SECTORS_CAP 2560u
+static inline struct queue_limits *bdev_limits(struct block_device *bdev)
+{
+ return &bdev_get_queue(bdev)->limits;
+}
static inline unsigned long queue_segment_boundary(const struct request_queue *q)
{
@@ -1165,24 +1352,9 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
return q->limits.max_segment_size;
}
-static inline unsigned int queue_limits_max_zone_append_sectors(struct queue_limits *l)
-{
- unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors);
-
- return min_not_zero(l->max_zone_append_sectors, max_sectors);
-}
-
-static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q)
-{
- if (!blk_queue_is_zoned(q))
- return 0;
-
- return queue_limits_max_zone_append_sectors(&q->limits);
-}
-
static inline bool queue_emulates_zone_append(struct request_queue *q)
{
- return blk_queue_is_zoned(q) && !q->limits.max_zone_append_sectors;
+ return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors;
}
static inline bool bdev_emulates_zone_append(struct block_device *bdev)
@@ -1193,7 +1365,7 @@ static inline bool bdev_emulates_zone_append(struct block_device *bdev)
static inline unsigned int
bdev_max_zone_append_sectors(struct block_device *bdev)
{
- return queue_max_zone_append_sectors(bdev_get_queue(bdev));
+ return bdev_limits(bdev)->max_zone_append_sectors;
}
static inline unsigned int bdev_max_segments(struct block_device *bdev)
@@ -1201,14 +1373,16 @@ static inline unsigned int bdev_max_segments(struct block_device *bdev)
return queue_max_segments(bdev_get_queue(bdev));
}
-static inline unsigned queue_logical_block_size(const struct request_queue *q)
+static inline unsigned short bdev_max_write_streams(struct block_device *bdev)
{
- int retval = 512;
-
- if (q && q->limits.logical_block_size)
- retval = q->limits.logical_block_size;
+ if (bdev_is_partition(bdev))
+ return 0;
+ return bdev_limits(bdev)->max_write_streams;
+}
- return retval;
+static inline unsigned queue_logical_block_size(const struct request_queue *q)
+{
+ return q->limits.logical_block_size;
}
static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
@@ -1231,7 +1405,7 @@ static inline unsigned int queue_io_min(const struct request_queue *q)
return q->limits.io_min;
}
-static inline int bdev_io_min(struct block_device *bdev)
+static inline unsigned int bdev_io_min(struct block_device *bdev)
{
return queue_io_min(bdev_get_queue(bdev));
}
@@ -1241,7 +1415,7 @@ static inline unsigned int queue_io_opt(const struct request_queue *q)
return q->limits.io_opt;
}
-static inline int bdev_io_opt(struct block_device *bdev)
+static inline unsigned int bdev_io_opt(struct block_device *bdev)
{
return queue_io_opt(bdev_get_queue(bdev));
}
@@ -1263,28 +1437,29 @@ unsigned int bdev_discard_alignment(struct block_device *bdev);
static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
{
- return bdev_get_queue(bdev)->limits.max_discard_sectors;
+ return bdev_limits(bdev)->max_discard_sectors;
}
static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
{
- return bdev_get_queue(bdev)->limits.discard_granularity;
+ return bdev_limits(bdev)->discard_granularity;
}
static inline unsigned int
bdev_max_secure_erase_sectors(struct block_device *bdev)
{
- return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;
+ return bdev_limits(bdev)->max_secure_erase_sectors;
}
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return q->limits.max_write_zeroes_sectors;
+ return bdev_limits(bdev)->max_write_zeroes_sectors;
+}
- return 0;
+static inline unsigned int
+bdev_write_zeroes_unmap_sectors(struct block_device *bdev)
+{
+ return bdev_limits(bdev)->max_wzeroes_unmap_sectors;
}
static inline bool bdev_nonrot(struct block_device *bdev)
@@ -1294,29 +1469,38 @@ static inline bool bdev_nonrot(struct block_device *bdev)
static inline bool bdev_synchronous(struct block_device *bdev)
{
- return test_bit(QUEUE_FLAG_SYNCHRONOUS,
- &bdev_get_queue(bdev)->queue_flags);
+ return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS;
}
static inline bool bdev_stable_writes(struct block_device *bdev)
{
- return test_bit(QUEUE_FLAG_STABLE_WRITES,
- &bdev_get_queue(bdev)->queue_flags);
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE)
+ return true;
+ return q->limits.features & BLK_FEAT_STABLE_WRITES;
+}
+
+static inline bool blk_queue_write_cache(struct request_queue *q)
+{
+ return (q->limits.features & BLK_FEAT_WRITE_CACHE) &&
+ !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED);
}
static inline bool bdev_write_cache(struct block_device *bdev)
{
- return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags);
+ return blk_queue_write_cache(bdev_get_queue(bdev));
}
static inline bool bdev_fua(struct block_device *bdev)
{
- return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
+ return bdev_limits(bdev)->features & BLK_FEAT_FUA;
}
static inline bool bdev_nowait(struct block_device *bdev)
{
- return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
+ return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT;
}
static inline bool bdev_is_zoned(struct block_device *bdev)
@@ -1338,6 +1522,12 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev)
return q->limits.chunk_sectors;
}
+static inline sector_t bdev_zone_start(struct block_device *bdev,
+ sector_t sector)
+{
+ return sector & ~(bdev_zone_sectors(bdev) - 1);
+}
+
static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
sector_t sector)
{
@@ -1356,9 +1546,43 @@ static inline bool bdev_is_zone_start(struct block_device *bdev,
return bdev_offset_from_zone_start(bdev, sector) == 0;
}
-static inline int queue_dma_alignment(const struct request_queue *q)
+/* Check whether @sector is a multiple of the zone size. */
+static inline bool bdev_is_zone_aligned(struct block_device *bdev,
+ sector_t sector)
{
- return q ? q->limits.dma_alignment : 511;
+ return bdev_is_zone_start(bdev, sector);
+}
+
+int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask);
+
+static inline unsigned int queue_dma_alignment(const struct request_queue *q)
+{
+ return q->limits.dma_alignment;
+}
+
+static inline unsigned int
+queue_atomic_write_unit_max_bytes(const struct request_queue *q)
+{
+ return q->limits.atomic_write_unit_max;
+}
+
+static inline unsigned int
+queue_atomic_write_unit_min_bytes(const struct request_queue *q)
+{
+ return q->limits.atomic_write_unit_min;
+}
+
+static inline unsigned int
+queue_atomic_write_boundary_bytes(const struct request_queue *q)
+{
+ return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT;
+}
+
+static inline unsigned int
+queue_atomic_write_max_bytes(const struct request_queue *q)
+{
+ return q->limits.atomic_write_max_sectors << SECTOR_SHIFT;
}
static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
@@ -1366,17 +1590,17 @@ static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
return queue_dma_alignment(bdev_get_queue(bdev));
}
-static inline bool bdev_iter_is_aligned(struct block_device *bdev,
- struct iov_iter *iter)
+static inline unsigned int
+blk_lim_dma_alignment_and_pad(struct queue_limits *lim)
{
- return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev),
- bdev_logical_block_size(bdev) - 1);
+ return lim->dma_alignment | lim->dma_pad_mask;
}
-static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
+static inline bool blk_rq_aligned(struct request_queue *q, unsigned long addr,
unsigned int len)
{
- unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+ unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits);
+
return !(addr & alignment) && !(len & alignment);
}
@@ -1429,13 +1653,14 @@ struct block_device_operations {
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
void (*unlock_native_capacity) (struct gendisk *);
- int (*getgeo)(struct block_device *, struct hd_geometry *);
+ int (*getgeo)(struct gendisk *, struct hd_geometry *);
int (*set_read_only)(struct block_device *bdev, bool ro);
void (*free_disk)(struct gendisk *disk);
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
int (*report_zones)(struct gendisk *, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data);
+ unsigned int nr_zones,
+ struct blk_report_zones_args *args);
char *(*devnode)(struct gendisk *disk, umode_t *mode);
/* returns the length of the identifier or a negative errno: */
int (*get_unique_id)(struct gendisk *disk, u8 id[16],
@@ -1490,7 +1715,7 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
}
-int bdev_read_only(struct block_device *bdev);
+int bdev_validate_blocksize(struct block_device *bdev, int block_size);
int set_blocksize(struct file *file, int size);
int lookup_bdev(const char *pathname, dev_t *dev);
@@ -1547,10 +1772,6 @@ int bd_prepare_to_claim(struct block_device *bdev, void *holder,
const struct blk_holder_ops *hops);
void bd_abort_claiming(struct block_device *bdev, void *holder);
-/* just for blk-cgroup, don't use elsewhere */
-struct block_device *blkdev_get_no_open(dev_t dev);
-void blkdev_put_no_open(struct block_device *bdev);
-
struct block_device *I_BDEV(struct inode *inode);
struct block_device *file_bdev(struct file *bdev_file);
bool disk_live(struct gendisk *disk);
@@ -1562,7 +1783,7 @@ int sync_blockdev(struct block_device *bdev);
int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
int sync_blockdev_nowait(struct block_device *bdev);
void sync_bdevs(bool wait);
-void bdev_statx_dioalign(struct inode *inode, struct kstat *stat);
+void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask);
void printk_all_partitions(void);
int __init early_lookup_bdev(const char *pathname, dev_t *dev);
#else
@@ -1580,7 +1801,8 @@ static inline int sync_blockdev_nowait(struct block_device *bdev)
static inline void sync_bdevs(bool wait)
{
}
-static inline void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
+static inline void bdev_statx(const struct path *path, struct kstat *stat,
+ u32 request_mask)
{
}
static inline void printk_all_partitions(void)
@@ -1597,11 +1819,58 @@ int bdev_thaw(struct block_device *bdev);
void bdev_fput(struct file *bdev_file);
struct io_comp_batch {
- struct request *req_list;
+ struct rq_list req_list;
bool need_ts;
void (*complete)(struct io_comp_batch *);
};
+static inline bool blk_atomic_write_start_sect_aligned(sector_t sector,
+ struct queue_limits *limits)
+{
+ unsigned int alignment = max(limits->atomic_write_hw_unit_min,
+ limits->atomic_write_hw_boundary);
+
+ return IS_ALIGNED(sector, alignment >> SECTOR_SHIFT);
+}
+
+static inline bool bdev_can_atomic_write(struct block_device *bdev)
+{
+ struct request_queue *bd_queue = bdev->bd_queue;
+ struct queue_limits *limits = &bd_queue->limits;
+
+ if (!limits->atomic_write_unit_min)
+ return false;
+
+ if (bdev_is_partition(bdev))
+ return blk_atomic_write_start_sect_aligned(bdev->bd_start_sect,
+ limits);
+
+ return true;
+}
+
+static inline unsigned int
+bdev_atomic_write_unit_min_bytes(struct block_device *bdev)
+{
+ if (!bdev_can_atomic_write(bdev))
+ return 0;
+ return queue_atomic_write_unit_min_bytes(bdev_get_queue(bdev));
+}
+
+static inline unsigned int
+bdev_atomic_write_unit_max_bytes(struct block_device *bdev)
+{
+ if (!bdev_can_atomic_write(bdev))
+ return 0;
+ return queue_atomic_write_unit_max_bytes(bdev_get_queue(bdev));
+}
+
+static inline int bio_split_rw_at(struct bio *bio,
+ const struct queue_limits *lim,
+ unsigned *segs, unsigned max_bytes)
+{
+ return bio_split_io_at(bio, lim, segs, max_bytes, lim->dma_alignment);
+}
+
#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
#endif /* _LINUX_BLKDEV_H */
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 122c62e561fc..05c8754456aa 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -14,11 +14,12 @@
#include <linux/sysfs.h>
struct blk_trace {
+ int version;
int trace_state;
struct rchan *rchan;
unsigned long __percpu *sequence;
unsigned char __percpu *msg_data;
- u16 act_mask;
+ u64 act_mask;
u64 start_lba;
u64 end_lba;
u32 pid;
diff --git a/include/linux/bnxt/hsi.h b/include/linux/bnxt/hsi.h
new file mode 100644
index 000000000000..47c34990cf23
--- /dev/null
+++ b/include/linux/bnxt/hsi.h
@@ -0,0 +1,11166 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2014-2018 Broadcom Limited
+ * Copyright (c) 2018-2025 Broadcom Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * DO NOT MODIFY!!! This file is automatically generated.
+ */
+
+#ifndef _BNXT_HSI_H_
+#define _BNXT_HSI_H_
+
+/* hwrm_cmd_hdr (size:128b/16B) */
+struct hwrm_cmd_hdr {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_resp_hdr (size:64b/8B) */
+struct hwrm_resp_hdr {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+#define CMD_DISCR_TLV_ENCAP 0x8000UL
+#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
+
+
+#define TLV_TYPE_HWRM_REQUEST 0x1UL
+#define TLV_TYPE_HWRM_RESPONSE 0x2UL
+#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2 0x6UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 0x7UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1_EXT 0x8UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1_EXT 0x9UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2_EXT 0xaUL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2_EXT 0xbUL
+#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
+#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
+#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
+#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
+#define TLV_TYPE_ENGINE_CKV_HOST_ALGORITHMS 0x8006UL
+#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL
+#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
+#define TLV_TYPE_ENGINE_CKV_FW_ECC_PUBLIC_KEY 0x8009UL
+#define TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS 0x800aUL
+#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS
+
+
+/* tlv (size:64b/8B) */
+struct tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 flags;
+ #define TLV_FLAGS_MORE 0x1UL
+ #define TLV_FLAGS_MORE_LAST 0x0UL
+ #define TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define TLV_FLAGS_REQUIRED 0x2UL
+ #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+};
+
+/* input (size:128b/16B) */
+struct input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* output (size:64b/8B) */
+struct output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+/* hwrm_short_input (size:128b/16B) */
+struct hwrm_short_input {
+ __le16 req_type;
+ __le16 signature;
+ #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
+ #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
+ __le16 target_id;
+ #define SHORT_REQ_TARGET_ID_DEFAULT 0x0UL
+ #define SHORT_REQ_TARGET_ID_TOOLS 0xfffdUL
+ #define SHORT_REQ_TARGET_ID_LAST SHORT_REQ_TARGET_ID_TOOLS
+ __le16 size;
+ __le64 req_addr;
+};
+
+/* cmd_nums (size:64b/8B) */
+struct cmd_nums {
+ __le16 req_type;
+ #define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_ECHO_RESPONSE 0xbUL
+ #define HWRM_ERROR_RECOVERY_QCFG 0xcUL
+ #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
+ #define HWRM_FUNC_BUF_UNRGTR 0xeUL
+ #define HWRM_FUNC_VF_CFG 0xfUL
+ #define HWRM_RESERVED1 0x10UL
+ #define HWRM_FUNC_RESET 0x11UL
+ #define HWRM_FUNC_GETFID 0x12UL
+ #define HWRM_FUNC_VF_ALLOC 0x13UL
+ #define HWRM_FUNC_VF_FREE 0x14UL
+ #define HWRM_FUNC_QCAPS 0x15UL
+ #define HWRM_FUNC_QCFG 0x16UL
+ #define HWRM_FUNC_CFG 0x17UL
+ #define HWRM_FUNC_QSTATS 0x18UL
+ #define HWRM_FUNC_CLR_STATS 0x19UL
+ #define HWRM_FUNC_DRV_UNRGTR 0x1aUL
+ #define HWRM_FUNC_VF_RESC_FREE 0x1bUL
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL
+ #define HWRM_FUNC_DRV_RGTR 0x1dUL
+ #define HWRM_FUNC_DRV_QVER 0x1eUL
+ #define HWRM_FUNC_BUF_RGTR 0x1fUL
+ #define HWRM_PORT_PHY_CFG 0x20UL
+ #define HWRM_PORT_MAC_CFG 0x21UL
+ #define HWRM_PORT_TS_QUERY 0x22UL
+ #define HWRM_PORT_QSTATS 0x23UL
+ #define HWRM_PORT_LPBK_QSTATS 0x24UL
+ #define HWRM_PORT_CLR_STATS 0x25UL
+ #define HWRM_PORT_LPBK_CLR_STATS 0x26UL
+ #define HWRM_PORT_PHY_QCFG 0x27UL
+ #define HWRM_PORT_MAC_QCFG 0x28UL
+ #define HWRM_PORT_MAC_PTP_QCFG 0x29UL
+ #define HWRM_PORT_PHY_QCAPS 0x2aUL
+ #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL
+ #define HWRM_PORT_PHY_I2C_READ 0x2cUL
+ #define HWRM_PORT_LED_CFG 0x2dUL
+ #define HWRM_PORT_LED_QCFG 0x2eUL
+ #define HWRM_PORT_LED_QCAPS 0x2fUL
+ #define HWRM_QUEUE_QPORTCFG 0x30UL
+ #define HWRM_QUEUE_QCFG 0x31UL
+ #define HWRM_QUEUE_CFG 0x32UL
+ #define HWRM_FUNC_VLAN_CFG 0x33UL
+ #define HWRM_FUNC_VLAN_QCFG 0x34UL
+ #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL
+ #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL
+ #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL
+ #define HWRM_QUEUE_PRI2COS_CFG 0x38UL
+ #define HWRM_QUEUE_COS2BW_QCFG 0x39UL
+ #define HWRM_QUEUE_COS2BW_CFG 0x3aUL
+ #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL
+ #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL
+ #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL
+ #define HWRM_VNIC_ALLOC 0x40UL
+ #define HWRM_VNIC_FREE 0x41UL
+ #define HWRM_VNIC_CFG 0x42UL
+ #define HWRM_VNIC_QCFG 0x43UL
+ #define HWRM_VNIC_TPA_CFG 0x44UL
+ #define HWRM_VNIC_TPA_QCFG 0x45UL
+ #define HWRM_VNIC_RSS_CFG 0x46UL
+ #define HWRM_VNIC_RSS_QCFG 0x47UL
+ #define HWRM_VNIC_PLCMODES_CFG 0x48UL
+ #define HWRM_VNIC_PLCMODES_QCFG 0x49UL
+ #define HWRM_VNIC_QCAPS 0x4aUL
+ #define HWRM_VNIC_UPDATE 0x4bUL
+ #define HWRM_RING_ALLOC 0x50UL
+ #define HWRM_RING_FREE 0x51UL
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
+ #define HWRM_RING_AGGINT_QCAPS 0x54UL
+ #define HWRM_RING_SCHQ_ALLOC 0x55UL
+ #define HWRM_RING_SCHQ_CFG 0x56UL
+ #define HWRM_RING_SCHQ_FREE 0x57UL
+ #define HWRM_RING_RESET 0x5eUL
+ #define HWRM_RING_GRP_ALLOC 0x60UL
+ #define HWRM_RING_GRP_FREE 0x61UL
+ #define HWRM_RING_CFG 0x62UL
+ #define HWRM_RING_QCFG 0x63UL
+ #define HWRM_RESERVED5 0x64UL
+ #define HWRM_RESERVED6 0x65UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
+ #define HWRM_QUEUE_MPLS_QCAPS 0x80UL
+ #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
+ #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
+ #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL
+ #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL
+ #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
+ #define HWRM_QUEUE_GLOBAL_CFG 0x86UL
+ #define HWRM_QUEUE_GLOBAL_QCFG 0x87UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG 0x88UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG 0x89UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL
+ #define HWRM_QUEUE_QCAPS 0x8cUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG 0x8dUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG 0x8eUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG 0x8fUL
+ #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
+ #define HWRM_CFA_L2_FILTER_FREE 0x91UL
+ #define HWRM_CFA_L2_FILTER_CFG 0x92UL
+ #define HWRM_CFA_L2_SET_RX_MASK 0x93UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL
+ #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL
+ #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL
+ #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL
+ #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL
+ #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL
+ #define HWRM_CFA_EM_FLOW_FREE 0x9dUL
+ #define HWRM_CFA_EM_FLOW_CFG 0x9eUL
+ #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
+ #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
+ #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG 0xa3UL
+ #define HWRM_STAT_CTX_ENG_QUERY 0xafUL
+ #define HWRM_STAT_CTX_ALLOC 0xb0UL
+ #define HWRM_STAT_CTX_FREE 0xb1UL
+ #define HWRM_STAT_CTX_QUERY 0xb2UL
+ #define HWRM_STAT_CTX_CLR_STATS 0xb3UL
+ #define HWRM_PORT_QSTATS_EXT 0xb4UL
+ #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL
+ #define HWRM_PORT_PHY_MDIO_READ 0xb6UL
+ #define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
+ #define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
+ #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
+ #define HWRM_RESERVED7 0xbaUL
+ #define HWRM_PORT_TX_FIR_CFG 0xbbUL
+ #define HWRM_PORT_TX_FIR_QCFG 0xbcUL
+ #define HWRM_PORT_ECN_QSTATS 0xbdUL
+ #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL
+ #define HWRM_FW_LIVEPATCH 0xbfUL
+ #define HWRM_FW_RESET 0xc0UL
+ #define HWRM_FW_QSTATUS 0xc1UL
+ #define HWRM_FW_HEALTH_CHECK 0xc2UL
+ #define HWRM_FW_SYNC 0xc3UL
+ #define HWRM_FW_STATE_QCAPS 0xc4UL
+ #define HWRM_FW_STATE_QUIESCE 0xc5UL
+ #define HWRM_FW_STATE_BACKUP 0xc6UL
+ #define HWRM_FW_STATE_RESTORE 0xc7UL
+ #define HWRM_FW_SET_TIME 0xc8UL
+ #define HWRM_FW_GET_TIME 0xc9UL
+ #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
+ #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
+ #define HWRM_FW_IPC_MAILBOX 0xccUL
+ #define HWRM_FW_ECN_CFG 0xcdUL
+ #define HWRM_FW_ECN_QCFG 0xceUL
+ #define HWRM_FW_SECURE_CFG 0xcfUL
+ #define HWRM_EXEC_FWD_RESP 0xd0UL
+ #define HWRM_REJECT_FWD_RESP 0xd1UL
+ #define HWRM_FWD_RESP 0xd2UL
+ #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
+ #define HWRM_OEM_CMD 0xd4UL
+ #define HWRM_PORT_PRBS_TEST 0xd5UL
+ #define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL
+ #define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL
+ #define HWRM_FW_STATE_UNQUIESCE 0xd8UL
+ #define HWRM_PORT_DSC_DUMP 0xd9UL
+ #define HWRM_PORT_EP_TX_QCFG 0xdaUL
+ #define HWRM_PORT_EP_TX_CFG 0xdbUL
+ #define HWRM_PORT_CFG 0xdcUL
+ #define HWRM_PORT_QCFG 0xddUL
+ #define HWRM_PORT_MAC_QCAPS 0xdfUL
+ #define HWRM_TEMP_MONITOR_QUERY 0xe0UL
+ #define HWRM_REG_POWER_QUERY 0xe1UL
+ #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
+ #define HWRM_REG_POWER_HISTOGRAM 0xe3UL
+ #define HWRM_MONITOR_PAX_HISTOGRAM_START 0xe4UL
+ #define HWRM_MONITOR_PAX_HISTOGRAM_COLLECT 0xe5UL
+ #define HWRM_STAT_QUERY_ROCE_STATS 0xe6UL
+ #define HWRM_STAT_QUERY_ROCE_STATS_EXT 0xe7UL
+ #define HWRM_WOL_FILTER_ALLOC 0xf0UL
+ #define HWRM_WOL_FILTER_FREE 0xf1UL
+ #define HWRM_WOL_FILTER_QCFG 0xf2UL
+ #define HWRM_WOL_REASON_QCFG 0xf3UL
+ #define HWRM_CFA_METER_QCAPS 0xf4UL
+ #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
+ #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
+ #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
+ #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL
+ #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL
+ #define HWRM_CFA_METER_INSTANCE_CFG 0xfaUL
+ #define HWRM_CFA_VFR_ALLOC 0xfdUL
+ #define HWRM_CFA_VFR_FREE 0xfeUL
+ #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL
+ #define HWRM_CFA_VF_PAIR_FREE 0x101UL
+ #define HWRM_CFA_VF_PAIR_INFO 0x102UL
+ #define HWRM_CFA_FLOW_ALLOC 0x103UL
+ #define HWRM_CFA_FLOW_FREE 0x104UL
+ #define HWRM_CFA_FLOW_FLUSH 0x105UL
+ #define HWRM_CFA_FLOW_STATS 0x106UL
+ #define HWRM_CFA_FLOW_INFO 0x107UL
+ #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL
+ #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL
+ #define HWRM_CFA_PAIR_ALLOC 0x10dUL
+ #define HWRM_CFA_PAIR_FREE 0x10eUL
+ #define HWRM_CFA_PAIR_INFO 0x10fUL
+ #define HWRM_FW_IPC_MSG 0x110UL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL
+ #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL
+ #define HWRM_CFA_FLOW_AGING_CFG 0x114UL
+ #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL
+ #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL
+ #define HWRM_CFA_CTX_MEM_RGTR 0x117UL
+ #define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL
+ #define HWRM_CFA_CTX_MEM_QCTX 0x119UL
+ #define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL
+ #define HWRM_CFA_COUNTER_QCAPS 0x11bUL
+ #define HWRM_CFA_COUNTER_CFG 0x11cUL
+ #define HWRM_CFA_COUNTER_QCFG 0x11dUL
+ #define HWRM_CFA_COUNTER_QSTATS 0x11eUL
+ #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL
+ #define HWRM_CFA_EEM_QCAPS 0x120UL
+ #define HWRM_CFA_EEM_CFG 0x121UL
+ #define HWRM_CFA_EEM_QCFG 0x122UL
+ #define HWRM_CFA_EEM_OP 0x123UL
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
+ #define HWRM_CFA_TFLIB 0x125UL
+ #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL
+ #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
+ #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL
+ #define HWRM_CFA_TLS_FILTER_FREE 0x129UL
+ #define HWRM_CFA_RELEASE_AFM_FUNC 0x12aUL
+ #define HWRM_ENGINE_CKV_STATUS 0x12eUL
+ #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
+ #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
+ #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL
+ #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL
+ #define HWRM_ENGINE_CKV_FLUSH 0x133UL
+ #define HWRM_ENGINE_CKV_RNG_GET 0x134UL
+ #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
+ #define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL
+ #define HWRM_ENGINE_CKV_KEY_LABEL_QCFG 0x137UL
+ #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
+ #define HWRM_ENGINE_QG_QUERY 0x13dUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL
+ #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL
+ #define HWRM_ENGINE_QG_METER_QUERY 0x142UL
+ #define HWRM_ENGINE_QG_METER_BIND 0x143UL
+ #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL
+ #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL
+ #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL
+ #define HWRM_ENGINE_SG_QUERY 0x147UL
+ #define HWRM_ENGINE_SG_METER_QUERY 0x148UL
+ #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL
+ #define HWRM_ENGINE_SG_QG_BIND 0x14aUL
+ #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL
+ #define HWRM_ENGINE_CONFIG_QUERY 0x154UL
+ #define HWRM_ENGINE_STATS_CONFIG 0x155UL
+ #define HWRM_ENGINE_STATS_CLEAR 0x156UL
+ #define HWRM_ENGINE_STATS_QUERY 0x157UL
+ #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL
+ #define HWRM_ENGINE_RQ_ALLOC 0x15eUL
+ #define HWRM_ENGINE_RQ_FREE 0x15fUL
+ #define HWRM_ENGINE_CQ_ALLOC 0x160UL
+ #define HWRM_ENGINE_CQ_FREE 0x161UL
+ #define HWRM_ENGINE_NQ_ALLOC 0x162UL
+ #define HWRM_ENGINE_NQ_FREE 0x163UL
+ #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
+ #define HWRM_ENGINE_FUNC_QCFG 0x165UL
+ #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
+ #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL
+ #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
+ #define HWRM_FUNC_VF_BW_CFG 0x195UL
+ #define HWRM_FUNC_VF_BW_QCFG 0x196UL
+ #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
+ #define HWRM_FUNC_QSTATS_EXT 0x198UL
+ #define HWRM_STAT_EXT_CTX_QUERY 0x199UL
+ #define HWRM_FUNC_SPD_CFG 0x19aUL
+ #define HWRM_FUNC_SPD_QCFG 0x19bUL
+ #define HWRM_FUNC_PTP_PIN_QCFG 0x19cUL
+ #define HWRM_FUNC_PTP_PIN_CFG 0x19dUL
+ #define HWRM_FUNC_PTP_CFG 0x19eUL
+ #define HWRM_FUNC_PTP_TS_QUERY 0x19fUL
+ #define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
+ #define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
+ #define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2 0x1a3UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2 0x1a4UL
+ #define HWRM_FUNC_DBR_PACING_CFG 0x1a5UL
+ #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL
+ #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL
+ #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL
+ #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL
+ #define HWRM_FUNC_SYNCE_CFG 0x1abUL
+ #define HWRM_FUNC_SYNCE_QCFG 0x1acUL
+ #define HWRM_FUNC_KEY_CTX_FREE 0x1adUL
+ #define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL
+ #define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL
+ #define HWRM_FUNC_LAG_CREATE 0x1b0UL
+ #define HWRM_FUNC_LAG_UPDATE 0x1b1UL
+ #define HWRM_FUNC_LAG_FREE 0x1b2UL
+ #define HWRM_FUNC_LAG_QCFG 0x1b3UL
+ #define HWRM_FUNC_TTX_PACING_RATE_PROF_QUERY 0x1c3UL
+ #define HWRM_FUNC_TTX_PACING_RATE_QUERY 0x1c4UL
+ #define HWRM_SELFTEST_QLIST 0x200UL
+ #define HWRM_SELFTEST_EXEC 0x201UL
+ #define HWRM_SELFTEST_IRQ 0x202UL
+ #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
+ #define HWRM_PCIE_QSTATS 0x204UL
+ #define HWRM_MFG_FRU_WRITE_CONTROL 0x205UL
+ #define HWRM_MFG_TIMERS_QUERY 0x206UL
+ #define HWRM_MFG_OTP_CFG 0x207UL
+ #define HWRM_MFG_OTP_QCFG 0x208UL
+ #define HWRM_MFG_HDMA_TEST 0x209UL
+ #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
+ #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
+ #define HWRM_MFG_SOC_IMAGE 0x20cUL
+ #define HWRM_MFG_SOC_QSTATUS 0x20dUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE 0x20eUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_READ 0x20fUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH 0x210UL
+ #define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL
+ #define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
+ #define HWRM_MFG_PRVSN_GET_STATE 0x213UL
+ #define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL
+ #define HWRM_MFG_PSOC_QSTATUS 0x215UL
+ #define HWRM_MFG_SELFTEST_QLIST 0x216UL
+ #define HWRM_MFG_SELFTEST_EXEC 0x217UL
+ #define HWRM_STAT_GENERIC_QSTATS 0x218UL
+ #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
+ #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
+ #define HWRM_MFG_TESTS 0x21bUL
+ #define HWRM_MFG_WRITE_CERT_NVM 0x21cUL
+ #define HWRM_PORT_POE_CFG 0x230UL
+ #define HWRM_PORT_POE_QCFG 0x231UL
+ #define HWRM_PORT_PHY_FDRSTAT 0x232UL
+ #define HWRM_UDCC_QCAPS 0x258UL
+ #define HWRM_UDCC_CFG 0x259UL
+ #define HWRM_UDCC_QCFG 0x25aUL
+ #define HWRM_UDCC_SESSION_CFG 0x25bUL
+ #define HWRM_UDCC_SESSION_QCFG 0x25cUL
+ #define HWRM_UDCC_SESSION_QUERY 0x25dUL
+ #define HWRM_UDCC_COMP_CFG 0x25eUL
+ #define HWRM_UDCC_COMP_QCFG 0x25fUL
+ #define HWRM_UDCC_COMP_QUERY 0x260UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x261UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x262UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x263UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_QCFG 0x264UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_QCFG 0x265UL
+ #define HWRM_TF 0x2bcUL
+ #define HWRM_TF_VERSION_GET 0x2bdUL
+ #define HWRM_TF_SESSION_OPEN 0x2c6UL
+ #define HWRM_TF_SESSION_REGISTER 0x2c8UL
+ #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
+ #define HWRM_TF_SESSION_CLOSE 0x2caUL
+ #define HWRM_TF_SESSION_QCFG 0x2cbUL
+ #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL
+ #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL
+ #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
+ #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
+ #define HWRM_TF_SESSION_RESC_INFO 0x2d0UL
+ #define HWRM_TF_SESSION_HOTUP_STATE_SET 0x2d1UL
+ #define HWRM_TF_SESSION_HOTUP_STATE_GET 0x2d2UL
+ #define HWRM_TF_TBL_TYPE_GET 0x2daUL
+ #define HWRM_TF_TBL_TYPE_SET 0x2dbUL
+ #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
+ #define HWRM_TF_EM_INSERT 0x2eaUL
+ #define HWRM_TF_EM_DELETE 0x2ebUL
+ #define HWRM_TF_EM_HASH_INSERT 0x2ecUL
+ #define HWRM_TF_EM_MOVE 0x2edUL
+ #define HWRM_TF_TCAM_SET 0x2f8UL
+ #define HWRM_TF_TCAM_GET 0x2f9UL
+ #define HWRM_TF_TCAM_MOVE 0x2faUL
+ #define HWRM_TF_TCAM_FREE 0x2fbUL
+ #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
+ #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
+ #define HWRM_TF_IF_TBL_SET 0x2feUL
+ #define HWRM_TF_IF_TBL_GET 0x2ffUL
+ #define HWRM_TF_RESC_USAGE_SET 0x300UL
+ #define HWRM_TF_RESC_USAGE_QUERY 0x301UL
+ #define HWRM_TF_TBL_TYPE_ALLOC 0x302UL
+ #define HWRM_TF_TBL_TYPE_FREE 0x303UL
+ #define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL
+ #define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL
+ #define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL
+ #define HWRM_TFC_TBL_SCOPE_DECONFIG 0x383UL
+ #define HWRM_TFC_TBL_SCOPE_FID_ADD 0x384UL
+ #define HWRM_TFC_TBL_SCOPE_FID_REM 0x385UL
+ #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC 0x386UL
+ #define HWRM_TFC_TBL_SCOPE_POOL_FREE 0x387UL
+ #define HWRM_TFC_SESSION_ID_ALLOC 0x388UL
+ #define HWRM_TFC_SESSION_FID_ADD 0x389UL
+ #define HWRM_TFC_SESSION_FID_REM 0x38aUL
+ #define HWRM_TFC_IDENT_ALLOC 0x38bUL
+ #define HWRM_TFC_IDENT_FREE 0x38cUL
+ #define HWRM_TFC_IDX_TBL_ALLOC 0x38dUL
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET 0x38eUL
+ #define HWRM_TFC_IDX_TBL_SET 0x38fUL
+ #define HWRM_TFC_IDX_TBL_GET 0x390UL
+ #define HWRM_TFC_IDX_TBL_FREE 0x391UL
+ #define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL
+ #define HWRM_TFC_TCAM_SET 0x393UL
+ #define HWRM_TFC_TCAM_GET 0x394UL
+ #define HWRM_TFC_TCAM_ALLOC 0x395UL
+ #define HWRM_TFC_TCAM_ALLOC_SET 0x396UL
+ #define HWRM_TFC_TCAM_FREE 0x397UL
+ #define HWRM_TFC_IF_TBL_SET 0x398UL
+ #define HWRM_TFC_IF_TBL_GET 0x399UL
+ #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
+ #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
+ #define HWRM_TFC_GLOBAL_ID_FREE 0x39cUL
+ #define HWRM_TFC_TCAM_PRI_UPDATE 0x39dUL
+ #define HWRM_TFC_HOT_UPGRADE_PROCESS 0x3a0UL
+ #define HWRM_SV 0x400UL
+ #define HWRM_DBG_SERDES_TEST 0xff0eUL
+ #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
+ #define HWRM_DBG_READ_DIRECT 0xff10UL
+ #define HWRM_DBG_READ_INDIRECT 0xff11UL
+ #define HWRM_DBG_WRITE_DIRECT 0xff12UL
+ #define HWRM_DBG_WRITE_INDIRECT 0xff13UL
+ #define HWRM_DBG_DUMP 0xff14UL
+ #define HWRM_DBG_ERASE_NVM 0xff15UL
+ #define HWRM_DBG_CFG 0xff16UL
+ #define HWRM_DBG_COREDUMP_LIST 0xff17UL
+ #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
+ #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
+ #define HWRM_DBG_FW_CLI 0xff1aUL
+ #define HWRM_DBG_I2C_CMD 0xff1bUL
+ #define HWRM_DBG_RING_INFO_GET 0xff1cUL
+ #define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
+ #define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
+ #define HWRM_DBG_DRV_TRACE 0xff1fUL
+ #define HWRM_DBG_QCAPS 0xff20UL
+ #define HWRM_DBG_QCFG 0xff21UL
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
+ #define HWRM_DBG_USEQ_ALLOC 0xff23UL
+ #define HWRM_DBG_USEQ_FREE 0xff24UL
+ #define HWRM_DBG_USEQ_FLUSH 0xff25UL
+ #define HWRM_DBG_USEQ_QCAPS 0xff26UL
+ #define HWRM_DBG_USEQ_CW_CFG 0xff27UL
+ #define HWRM_DBG_USEQ_SCHED_CFG 0xff28UL
+ #define HWRM_DBG_USEQ_RUN 0xff29UL
+ #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
+ #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
+ #define HWRM_DBG_COREDUMP_CAPTURE 0xff2cUL
+ #define HWRM_DBG_PTRACE 0xff2dUL
+ #define HWRM_DBG_SIM_CABLE_STATE 0xff2eUL
+ #define HWRM_DBG_TOKEN_QUERY_AUTH_IDS 0xff2fUL
+ #define HWRM_DBG_TOKEN_CFG 0xff30UL
+ #define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL
+ #define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL
+ #define HWRM_NVM_DEFRAG 0xffecUL
+ #define HWRM_NVM_REQ_ARBITRATION 0xffedUL
+ #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
+ #define HWRM_NVM_VALIDATE_OPTION 0xffefUL
+ #define HWRM_NVM_FLUSH 0xfff0UL
+ #define HWRM_NVM_GET_VARIABLE 0xfff1UL
+ #define HWRM_NVM_SET_VARIABLE 0xfff2UL
+ #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL
+ #define HWRM_NVM_MODIFY 0xfff4UL
+ #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL
+ #define HWRM_NVM_GET_DEV_INFO 0xfff6UL
+ #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL
+ #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL
+ #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL
+ #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL
+ #define HWRM_NVM_GET_DIR_INFO 0xfffbUL
+ #define HWRM_NVM_RAW_DUMP 0xfffcUL
+ #define HWRM_NVM_READ 0xfffdUL
+ #define HWRM_NVM_WRITE 0xfffeUL
+ #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL
+ #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
+ __le16 unused_0[3];
+};
+
+/* ret_codes (size:64b/8B) */
+struct ret_codes {
+ __le16 error_code;
+ #define HWRM_ERR_CODE_SUCCESS 0x0UL
+ #define HWRM_ERR_CODE_FAIL 0x1UL
+ #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
+ #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
+ #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
+ #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
+ #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
+ #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
+ #define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC 0xcUL
+ #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL
+ #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
+ #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_BUSY 0x10UL
+ #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
+ #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
+ #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL
+ #define HWRM_ERR_CODE_SECURE_SOC_ERROR 0x14UL
+ #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
+ #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
+ #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ __le16 unused_0[3];
+};
+
+/* hwrm_err_output (size:128b/16B) */
+struct hwrm_err_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 opaque_0;
+ __le16 opaque_1;
+ u8 cmd_err;
+ u8 valid;
+};
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN 128
+#define HWRM_MAX_RESP_LEN 704
+#define HW_HASH_INDEX_SIZE 0x80
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1
+#define HWRM_TARGET_ID_BONO 0xFFF8
+#define HWRM_TARGET_ID_KONG 0xFFF9
+#define HWRM_TARGET_ID_APE 0xFFFA
+#define HWRM_TARGET_ID_TOOLS 0xFFFD
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 10
+#define HWRM_VERSION_UPDATE 3
+#define HWRM_VERSION_RSVD 133
+#define HWRM_VERSION_STR "1.10.3.133"
+
+/* hwrm_ver_get_input (size:192b/24B) */
+struct hwrm_ver_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 unused_0[5];
+};
+
+/* hwrm_ver_get_output (size:1408b/176B) */
+struct hwrm_ver_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hwrm_intf_maj_8b;
+ u8 hwrm_intf_min_8b;
+ u8 hwrm_intf_upd_8b;
+ u8 hwrm_intf_rsvd_8b;
+ u8 hwrm_fw_maj_8b;
+ u8 hwrm_fw_min_8b;
+ u8 hwrm_fw_bld_8b;
+ u8 hwrm_fw_rsvd_8b;
+ u8 mgmt_fw_maj_8b;
+ u8 mgmt_fw_min_8b;
+ u8 mgmt_fw_bld_8b;
+ u8 mgmt_fw_rsvd_8b;
+ u8 netctrl_fw_maj_8b;
+ u8 netctrl_fw_min_8b;
+ u8 netctrl_fw_bld_8b;
+ u8 netctrl_fw_rsvd_8b;
+ __le32 dev_caps_cfg;
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_DEBUG_TOKEN_SUPPORTED 0x20000UL
+ u8 roce_fw_maj_8b;
+ u8 roce_fw_min_8b;
+ u8 roce_fw_bld_8b;
+ u8 roce_fw_rsvd_8b;
+ char hwrm_fw_name[16];
+ char mgmt_fw_name[16];
+ char netctrl_fw_name[16];
+ char active_pkg_name[16];
+ char roce_fw_name[16];
+ __le16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+ u8 chip_bond_id;
+ u8 chip_platform_type;
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
+ __le16 max_req_win_len;
+ __le16 max_resp_len;
+ __le16 def_req_timeout;
+ u8 flags;
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
+ #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL
+ u8 unused_0[2];
+ u8 always_1;
+ __le16 hwrm_intf_major;
+ __le16 hwrm_intf_minor;
+ __le16 hwrm_intf_build;
+ __le16 hwrm_intf_patch;
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ __le16 max_ext_req_len;
+ __le16 max_req_timeout;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* eject_cmpl (size:128b/16B) */
+struct eject_cmpl {
+ __le16 type;
+ #define EJECT_CMPL_TYPE_MASK 0x3fUL
+ #define EJECT_CMPL_TYPE_SFT 0
+ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
+ #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ #define EJECT_CMPL_FLAGS_MASK 0xffc0UL
+ #define EJECT_CMPL_FLAGS_SFT 6
+ #define EJECT_CMPL_FLAGS_ERROR 0x40UL
+ __le16 len;
+ __le32 opaque;
+ __le16 v;
+ #define EJECT_CMPL_V 0x1UL
+ #define EJECT_CMPL_ERRORS_MASK 0xfffeUL
+ #define EJECT_CMPL_ERRORS_SFT 1
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH
+ __le16 reserved16;
+ __le32 unused_2;
+};
+
+/* hwrm_cmpl (size:128b/16B) */
+struct hwrm_cmpl {
+ __le16 type;
+ #define CMPL_TYPE_MASK 0x3fUL
+ #define CMPL_TYPE_SFT 0
+ #define CMPL_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE
+ __le16 sequence_id;
+ __le32 unused_1;
+ __le32 v;
+ #define CMPL_V 0x1UL
+ __le32 unused_3;
+};
+
+/* hwrm_fwd_req_cmpl (size:128b/16B) */
+struct hwrm_fwd_req_cmpl {
+ __le16 req_len_type;
+ #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_REQ_CMPL_TYPE_SFT 0
+ #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
+ #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
+ #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+ #define FWD_REQ_CMPL_REQ_LEN_SFT 6
+ __le16 source_id;
+ __le32 unused0;
+ __le32 req_buf_addr_v[2];
+ #define FWD_REQ_CMPL_V 0x1UL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+};
+
+/* hwrm_fwd_resp_cmpl (size:128b/16B) */
+struct hwrm_fwd_resp_cmpl {
+ __le16 type;
+ #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_RESP_CMPL_TYPE_SFT 0
+ #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
+ #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
+ __le16 source_id;
+ __le16 resp_len;
+ __le16 unused_1;
+ __le32 resp_buf_addr_v[2];
+ #define FWD_RESP_CMPL_V 0x1UL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+};
+
+/* hwrm_async_event_cmpl (size:128b/16B) */
+struct hwrm_async_event_cmpl {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_REPRESENTOR_PAIR_CHANGE 0x4eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_STAT_CHANGE 0x4fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HOST_COREDUMP 0x50UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ADPTV_QOS 0x51UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x52UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_status_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20
+};
+
+/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+};
+
+/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_speed_cfg_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
+};
+
+/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */
+struct hwrm_async_event_cmpl_reset_notify {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
+};
+
+/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_recovery {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL
+};
+
+/* hwrm_async_event_cmpl_ring_monitor_msg (size:128b/16B) */
+struct hwrm_async_event_cmpl_ring_monitor_msg {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_TX 0x0UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL 0x2UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_vf_cfg_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE 0x20UL
+};
+
+/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_default_vnic_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK 0xffc0UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT 6
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION 0x35UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK 0x3UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE 0x2UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK 0x3fcUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT 2
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK 0x3fffc00UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT 10
+};
+
+/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */
+struct hwrm_async_event_cmpl_hw_flow_aged {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31)
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31)
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX
+};
+
+/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */
+struct hwrm_async_event_cmpl_eem_cache_flush_req {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */
+struct hwrm_async_event_cmpl_eem_cache_flush_done {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0
+};
+
+/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */
+struct hwrm_async_event_cmpl_deferred_response {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */
+struct hwrm_async_event_cmpl_echo_request {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_phc_update (size:128b/16B) */
+struct hwrm_async_event_cmpl_phc_update {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT 16
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK 0xfUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE 0x4UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT 4
+};
+
+/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
+struct hwrm_async_event_cmpl_pps_timestamp {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL 0x0UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT 1
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT 4
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT 0
+};
+
+/* hwrm_async_event_cmpl_error_report (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0
+};
+
+/* hwrm_async_event_cmpl_dbg_buf_producer (size:128b/16B) */
+struct hwrm_async_event_cmpl_dbg_buf_producer {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT_TRACE 0x0UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT2_TRACE 0x1UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT_TRACE 0x2UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT2_TRACE 0x3UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP0_TRACE 0x4UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_L2_HWRM_TRACE 0x5UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ROCE_HWRM_TRACE 0x6UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA0_TRACE 0x7UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA1_TRACE 0x8UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA2_TRACE 0x9UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP1_TRACE 0xaUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ERR_QPC_TRACE 0xcUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ERR_QPC_TRACE
+};
+
+/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
+struct hwrm_async_event_cmpl_hwrm_error {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
+};
+
+/* hwrm_async_event_cmpl_error_report_base (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_base {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUP_UDCC_SES 0x7UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DB_DROP 0x8UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MD_TEMP 0x9UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_VNIC_ERR 0xaUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_VNIC_ERR
+};
+
+/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_pause_storm {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM
+};
+
+/* hwrm_async_event_cmpl_error_report_invalid_signal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_invalid_signal {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL
+};
+
+/* hwrm_async_event_cmpl_error_report_nvm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_nvm {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_SFT 8
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
+};
+
+/* hwrm_async_event_cmpl_error_report_doorbell_drop_threshold (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK 0xffffff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8
+};
+
+/* hwrm_async_event_cmpl_error_report_thermal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_thermal {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT 8
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK 0x700UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SFT 8
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN (0x0UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN (0x3UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR 0x800UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_DECREASING (0x0UL << 11)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING (0x1UL << 11)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING
+};
+
+/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
+};
+
+/* hwrm_func_reset_input (size:192b/24B) */
+struct hwrm_func_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ u8 func_reset_level;
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
+ u8 unused_0;
+};
+
+/* hwrm_func_reset_output (size:128b/16B) */
+struct hwrm_func_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_getfid_input (size:192b/24B) */
+struct hwrm_func_getfid_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
+ __le16 pci_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_func_getfid_output (size:128b/16B) */
+struct hwrm_func_getfid_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_alloc_input (size:192b/24B) */
+struct hwrm_func_vf_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_alloc_output (size:128b/16B) */
+struct hwrm_func_vf_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 first_vf_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_free_input (size:192b/24B) */
+struct hwrm_func_vf_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_free_output (size:128b/16B) */
+struct hwrm_func_vf_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_cfg_input (size:576b/72B) */
+struct hwrm_func_vf_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
+ #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_TX_KEY_CTXS 0x1000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_RX_KEY_CTXS 0x2000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_TX_KEY_CTXS 0x4000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_RX_KEY_CTXS 0x8000UL
+ __le16 mtu;
+ __le16 guest_vlan;
+ __le16 async_event_cr;
+ u8 dflt_mac_addr[6];
+ __le32 flags;
+ #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL
+ #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL
+ #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
+ #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
+ #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ __le16 num_msix;
+ u8 unused[2];
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+};
+
+/* hwrm_func_vf_cfg_output (size:128b/16B) */
+struct hwrm_func_vf_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_qcaps_input (size:192b/24B) */
+struct hwrm_func_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_qcaps_output (size:1152b/144B) */
+struct hwrm_func_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le32 flags;
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL
+ u8 mac_address[6];
+ __le16 max_rsscos_ctx;
+ __le16 max_cmpl_rings;
+ __le16 max_tx_rings;
+ __le16 max_rx_rings;
+ __le16 max_l2_ctxs;
+ __le16 max_vnics;
+ __le16 first_vf_id;
+ __le16 max_vfs;
+ __le16 max_stat_ctx;
+ __le32 max_encap_records;
+ __le32 max_decap_records;
+ __le32 max_tx_em_flows;
+ __le32 max_tx_wm_flows;
+ __le32 max_rx_em_flows;
+ __le32 max_rx_wm_flows;
+ __le32 max_mcast_filters;
+ __le32 max_flow_id;
+ __le32 max_hw_ring_grps;
+ __le16 max_sp_tx_rings;
+ __le16 max_msix_vfs;
+ __le32 flags_ext;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED 0x80000000UL
+ u8 max_schqs;
+ u8 mpc_chnls_cap;
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE 0x2UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
+ __le16 max_key_ctxs_alloc;
+ __le32 flags_ext2;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_CONCURRENT_KTLS_QUIC_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_CROSS_TC_CAP_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_CAP_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_RESERVATION_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DB_ERROR_STATS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_VF_STAT_EJECTION_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HOST_COREDUMP_SUPPORTED 0x80000000UL
+ __le16 tunnel_disable_flag;
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE 0x4UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE 0x8UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE 0x10UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
+ __le16 xid_partition_cap;
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_TX_CK 0x1UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL
+ u8 device_serial_number[8];
+ __le16 ctxs_per_partition;
+ __le16 max_tso_segs;
+ __le32 roce_vf_max_av;
+ __le32 roce_vf_max_cq;
+ __le32 roce_vf_max_mrw;
+ __le32 roce_vf_max_qp;
+ __le32 roce_vf_max_srq;
+ __le32 roce_vf_max_gid;
+ __le32 flags_ext3;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RX_RATE_PROFILE_SEL_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_BIDI_OPT_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_CHANGE_UDP_SRCPORT_SUPPORT 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_PCIE_COMPLIANCE_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MULTI_L2_DB_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_PCIE_SECURE_ATS_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MBUF_STATS_SUPPORTED 0x800UL
+ __le16 max_roce_vfs;
+ __le16 max_crypto_rx_flow_filters;
+ u8 unused_3[3];
+ u8 valid;
+};
+
+/* hwrm_func_qcfg_input (size:192b/24B) */
+struct hwrm_func_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_qcfg_output (size:1408b/176B) */
+struct hwrm_func_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le16 vlan;
+ __le16 flags;
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
+ #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
+ #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
+ #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
+ #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
+ #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
+ #define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
+ #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
+ #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL
+ #define FUNC_QCFG_RESP_FLAGS_ROCE_VNIC_ID_VALID 0x8000UL
+ u8 mac_address[6];
+ __le16 pci_id;
+ __le16 alloc_rsscos_ctx;
+ __le16 alloc_cmpl_rings;
+ __le16 alloc_tx_rings;
+ __le16 alloc_rx_rings;
+ __le16 alloc_l2_ctx;
+ __le16 alloc_vnics;
+ __le16 admin_mtu;
+ __le16 mru;
+ __le16 stat_ctx_id;
+ u8 port_partition_type;
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2 0x5UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
+ u8 port_pf_cnt;
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
+ __le16 dflt_vnic_id;
+ __le16 max_mtu_configured;
+ __le32 min_bw;
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 evb_mode;
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
+ #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
+ u8 options;
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4
+ __le16 alloc_vfs;
+ __le32 alloc_mcast_filters;
+ __le32 alloc_hw_ring_grps;
+ __le16 alloc_sp_tx_rings;
+ __le16 alloc_stat_ctx;
+ __le16 alloc_msix;
+ __le16 registered_vfs;
+ __le16 l2_doorbell_bar_size_kb;
+ u8 active_endpoints;
+ u8 always_1;
+ __le32 reset_addr_poll;
+ __le16 legacy_l2_db_size_kb;
+ __le16 svif_info;
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
+ u8 mpc_chnls;
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TCE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RCE_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL
+ u8 db_page_size;
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4KB 0x0UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_8KB 0x1UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_16KB 0x2UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_32KB 0x3UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_64KB 0x4UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_128KB 0x5UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_256KB 0x6UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_512KB 0x7UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_1MB 0x8UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB
+ __le16 roce_vnic_id;
+ __le32 partition_min_bw;
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le32 partition_max_bw;
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le16 host_mtu;
+ __le16 flags2;
+ #define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL
+ __le16 stag_vid;
+ u8 port_kdnet_mode;
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
+ u8 kdnet_pcie_function;
+ __le16 port_kdnet_fid;
+ u8 unused_5;
+ u8 roce_bidi_opt_mode;
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DISABLED 0x1UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED 0x2UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_SHARED 0x4UL
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ u8 lag_id;
+ u8 parif;
+ u8 fw_lag_id;
+ u8 unused_6;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL
+ __le16 mirror_vnic_id;
+ u8 max_link_width;
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X1 0x1UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X2 0x2UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X4 0x4UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X8 0x8UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X16 0x10UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_LAST FUNC_QCFG_RESP_MAX_LINK_WIDTH_X16
+ u8 max_link_speed;
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G1 0x1UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G2 0x2UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G3 0x3UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G4 0x4UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G5 0x5UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_LAST FUNC_QCFG_RESP_MAX_LINK_SPEED_G5
+ u8 negotiated_link_width;
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X1 0x1UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X2 0x2UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X4 0x4UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X8 0x8UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X16 0x10UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_LAST FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X16
+ u8 negotiated_link_speed;
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G1 0x1UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G2 0x2UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G3 0x3UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G4 0x4UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G5 0x5UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_LAST FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G5
+ u8 unused_7[2];
+ u8 pcie_compliance;
+ u8 unused_8;
+ __le16 l2_db_multi_page_size_kb;
+ u8 unused_9[5];
+ u8 valid;
+};
+
+/* hwrm_func_cfg_input (size:1280b/160B) */
+struct hwrm_func_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 num_msix;
+ __le32 flags;
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
+ #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
+ #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
+ #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
+ #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL
+ #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL
+ #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL
+ #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL
+ #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
+ #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
+ #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
+ #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
+ #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
+ #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
+ #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
+ #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
+ #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
+ __le32 enables;
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
+ #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
+ #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
+ #define FUNC_CFG_REQ_ENABLES_MPC_CHNLS 0x2000000UL
+ #define FUNC_CFG_REQ_ENABLES_PARTITION_MIN_BW 0x4000000UL
+ #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
+ #define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
+ #define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_TX_KEY_CTXS 0x40000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_RX_KEY_CTXS 0x80000000UL
+ __le16 admin_mtu;
+ __le16 mru;
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 dflt_mac_addr[6];
+ __le16 dflt_vlan;
+ __be32 dflt_ip_addr[4];
+ __le32 min_bw;
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ __le16 async_event_cr;
+ u8 vlan_antispoof_mode;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
+ u8 allowed_vlan_pris;
+ u8 evb_mode;
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
+ #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
+ u8 options;
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
+ __le16 num_mcast_filters;
+ __le16 schq_id;
+ __le16 mpc_chnls;
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_ENABLE 0x1UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_DISABLE 0x2UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_ENABLE 0x4UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_DISABLE 0x8UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_ENABLE 0x10UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_DISABLE 0x20UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_ENABLE 0x40UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE 0x80UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE 0x100UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE 0x200UL
+ __le32 partition_min_bw;
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le32 partition_max_bw;
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+ __be16 tpid;
+ __le16 host_mtu;
+ __le32 flags2;
+ #define FUNC_CFG_REQ_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST 0x1UL
+ #define FUNC_CFG_REQ_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST 0x2UL
+ __le32 enables2;
+ #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
+ #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
+ #define FUNC_CFG_REQ_ENABLES2_PHYSICAL_SLOT_NUMBER 0x800UL
+ #define FUNC_CFG_REQ_ENABLES2_PCIE_COMPLIANCE 0x1000UL
+ u8 port_kdnet_mode;
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED
+ u8 db_page_size;
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_4KB 0x0UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_8KB 0x1UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_16KB 0x2UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_32KB 0x3UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_64KB 0x4UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_128KB 0x5UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_256KB 0x6UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_512KB 0x7UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_1MB 0x8UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB
+ __le16 physical_slot_number;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK 0x2UL
+ u8 pcie_compliance;
+ u8 unused_2;
+};
+
+/* hwrm_func_cfg_output (size:128b/16B) */
+struct hwrm_func_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_cfg_cmd_err (size:64b/8B) */
+struct hwrm_func_cfg_cmd_err {
+ u8 code;
+ #define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_OUT_OF_RANGE 0x1UL
+ #define FUNC_CFG_CMD_ERR_CODE_NPAR_PARTITION_DOWN_FAILED 0x2UL
+ #define FUNC_CFG_CMD_ERR_CODE_TPID_SET_DFLT_VLAN_NOT_SET 0x3UL
+ #define FUNC_CFG_CMD_ERR_CODE_RES_ARRAY_ALLOC_FAILED 0x4UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_RING_ASSET_TEST_FAILED 0x5UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_RING_RES_UPDATE_FAILED 0x6UL
+ #define FUNC_CFG_CMD_ERR_CODE_APPLY_MAX_BW_FAILED 0x7UL
+ #define FUNC_CFG_CMD_ERR_CODE_ENABLE_EVB_FAILED 0x8UL
+ #define FUNC_CFG_CMD_ERR_CODE_RSS_CTXT_ASSET_TEST_FAILED 0x9UL
+ #define FUNC_CFG_CMD_ERR_CODE_RSS_CTXT_RES_UPDATE_FAILED 0xaUL
+ #define FUNC_CFG_CMD_ERR_CODE_CMPL_RING_ASSET_TEST_FAILED 0xbUL
+ #define FUNC_CFG_CMD_ERR_CODE_CMPL_RING_RES_UPDATE_FAILED 0xcUL
+ #define FUNC_CFG_CMD_ERR_CODE_NQ_ASSET_TEST_FAILED 0xdUL
+ #define FUNC_CFG_CMD_ERR_CODE_NQ_RES_UPDATE_FAILED 0xeUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_RING_ASSET_TEST_FAILED 0xfUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_RING_RES_UPDATE_FAILED 0x10UL
+ #define FUNC_CFG_CMD_ERR_CODE_VNIC_ASSET_TEST_FAILED 0x11UL
+ #define FUNC_CFG_CMD_ERR_CODE_VNIC_RES_UPDATE_FAILED 0x12UL
+ #define FUNC_CFG_CMD_ERR_CODE_FAILED_TO_START_STATS_THREAD 0x13UL
+ #define FUNC_CFG_CMD_ERR_CODE_RDMA_SRIOV_DISABLED 0x14UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_DISABLED 0x15UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_ASSET_TEST_FAILED 0x16UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_RES_UPDATE_FAILED 0x17UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_DISABLED 0x18UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_ASSET_TEST_FAILED 0x19UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_RES_UPDATE_FAILED 0x1aUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_DISABLED 0x1bUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_ASSET_TEST_FAILED 0x1cUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_RES_UPDATE_FAILED 0x1dUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_DISABLED 0x1eUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_ASSET_TEST_FAILED 0x1fUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_RES_UPDATE_FAILED 0x20UL
+ #define FUNC_CFG_CMD_ERR_CODE_INVALID_KDNET_MODE 0x21UL
+ #define FUNC_CFG_CMD_ERR_CODE_SCHQ_CFG_FAIL 0x22UL
+ #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_SCHQ_CFG_FAIL
+ u8 unused_0[7];
+};
+
+/* hwrm_func_qstats_input (size:192b/24B) */
+struct hwrm_func_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_REQ_FLAGS_L2_ONLY 0x4UL
+ u8 unused_0[5];
+};
+
+/* hwrm_func_qstats_output (size:1408b/176B) */
+struct hwrm_func_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 clear_seq;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_func_qstats_ext_input (size:256b/32B) */
+struct hwrm_func_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
+ u8 unused_0[1];
+ __le32 enables;
+ #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
+ __le16 schq_id;
+ __le16 traffic_class;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_qstats_ext_output (size:1536b/192B) */
+struct hwrm_func_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_clr_stats_input (size:192b/24B) */
+struct hwrm_func_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_clr_stats_output (size:128b/16B) */
+struct hwrm_func_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_resc_free_input (size:192b/24B) */
+struct hwrm_func_vf_resc_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_vf_resc_free_output (size:128b/16B) */
+struct hwrm_func_vf_resc_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_rgtr_input (size:896b/112B) */
+struct hwrm_func_drv_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL
+ __le32 enables;
+ #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
+ __le16 os_type;
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
+ u8 ver_maj_8b;
+ u8 ver_min_8b;
+ u8 ver_upd_8b;
+ u8 unused_0[3];
+ __le32 timestamp;
+ u8 unused_1[4];
+ __le32 vf_req_fwd[8];
+ __le32 async_event_fwd[8];
+ __le16 ver_maj;
+ __le16 ver_min;
+ __le16 ver_upd;
+ __le16 ver_patch;
+};
+
+/* hwrm_func_drv_rgtr_output (size:128b/16B) */
+struct hwrm_func_drv_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
+struct hwrm_func_drv_unrgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
+ u8 unused_0[4];
+};
+
+/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
+struct hwrm_func_drv_unrgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
+struct hwrm_func_buf_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
+ #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
+ __le16 vf_id;
+ __le16 req_buf_num_pages;
+ __le16 req_buf_page_size;
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
+ __le16 req_buf_len;
+ __le16 resp_buf_len;
+ u8 unused_0[2];
+ __le64 req_buf_page_addr0;
+ __le64 req_buf_page_addr1;
+ __le64 req_buf_page_addr2;
+ __le64 req_buf_page_addr3;
+ __le64 req_buf_page_addr4;
+ __le64 req_buf_page_addr5;
+ __le64 req_buf_page_addr6;
+ __le64 req_buf_page_addr7;
+ __le64 req_buf_page_addr8;
+ __le64 req_buf_page_addr9;
+ __le64 error_buf_addr;
+ __le64 resp_buf_addr;
+};
+
+/* hwrm_func_buf_rgtr_output (size:128b/16B) */
+struct hwrm_func_buf_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_qver_input (size:192b/24B) */
+struct hwrm_func_drv_qver_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 reserved;
+ __le16 fid;
+ u8 driver_type;
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_L2 0x0UL
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE 0x1UL
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_LAST FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE
+ u8 unused_0;
+};
+
+/* hwrm_func_drv_qver_output (size:256b/32B) */
+struct hwrm_func_drv_qver_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 os_type;
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
+ u8 ver_maj_8b;
+ u8 ver_min_8b;
+ u8 ver_upd_8b;
+ u8 unused_0[3];
+ __le16 ver_maj;
+ __le16 ver_min;
+ __le16 ver_upd;
+ __le16 ver_patch;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_func_resource_qcaps_input (size:192b/24B) */
+struct hwrm_func_resource_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_resource_qcaps_output (size:704b/88B) */
+struct hwrm_func_resource_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 max_vfs;
+ __le16 max_msix;
+ __le16 vf_reservation_strategy;
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ __le16 max_tx_scheduler_inputs;
+ __le16 flags;
+ #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_msix;
+ __le32 min_ktls_tx_key_ctxs;
+ __le32 max_ktls_tx_key_ctxs;
+ __le32 min_ktls_rx_key_ctxs;
+ __le32 max_ktls_rx_key_ctxs;
+ __le32 min_quic_tx_key_ctxs;
+ __le32 max_quic_tx_key_ctxs;
+ __le32 min_quic_rx_key_ctxs;
+ __le32 max_quic_rx_key_ctxs;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */
+struct hwrm_func_vf_resource_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 max_msix;
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ __le16 flags;
+ #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_msix;
+ __le32 min_ktls_tx_key_ctxs;
+ __le32 max_ktls_tx_key_ctxs;
+ __le32 min_ktls_rx_key_ctxs;
+ __le32 max_ktls_rx_key_ctxs;
+ __le32 min_quic_tx_key_ctxs;
+ __le32 max_quic_tx_key_ctxs;
+ __le32 min_quic_rx_key_ctxs;
+ __le32 max_quic_rx_key_ctxs;
+};
+
+/* hwrm_func_vf_resource_cfg_output (size:384b/48B) */
+struct hwrm_func_vf_resource_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reserved_rsscos_ctx;
+ __le16 reserved_cmpl_rings;
+ __le16 reserved_tx_rings;
+ __le16 reserved_rx_rings;
+ __le16 reserved_l2_ctxs;
+ __le16 reserved_vnics;
+ __le16 reserved_stat_ctx;
+ __le16 reserved_hw_ring_grps;
+ __le32 reserved_ktls_tx_key_ctxs;
+ __le32 reserved_ktls_rx_key_ctxs;
+ __le32 reserved_quic_tx_key_ctxs;
+ __le32 reserved_quic_rx_key_ctxs;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
+struct hwrm_func_backing_store_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_backing_store_qcaps_output (size:832b/104B) */
+struct hwrm_func_backing_store_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 qp_max_entries;
+ __le16 qp_min_qp1_entries;
+ __le16 qp_max_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_max_l2_entries;
+ __le32 srq_max_entries;
+ __le16 srq_entry_size;
+ __le16 cq_max_l2_entries;
+ __le32 cq_max_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_max_vnic_entries;
+ __le16 vnic_max_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le32 stat_max_entries;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le32 tqm_min_entries_per_ring;
+ __le32 tqm_max_entries_per_ring;
+ __le32 mrav_max_entries;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+ __le32 tim_max_entries;
+ __le16 mrav_num_entries_units;
+ u8 tqm_entries_multiple;
+ u8 ctx_kind_initializer;
+ __le16 ctx_init_mask;
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_TKC 0x40UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_RKC 0x80UL
+ u8 qp_init_offset;
+ u8 srq_init_offset;
+ u8 cq_init_offset;
+ u8 vnic_init_offset;
+ u8 tqm_fp_rings_count;
+ u8 stat_init_offset;
+ u8 mrav_init_offset;
+ u8 tqm_fp_rings_count_ext;
+ u8 tkc_init_offset;
+ u8 rkc_init_offset;
+ __le16 tkc_entry_size;
+ __le16 rkc_entry_size;
+ __le32 tkc_max_entries;
+ __le32 rkc_max_entries;
+ __le16 fast_qpmd_qp_num_entries;
+ u8 rsvd1[5];
+ u8 valid;
+};
+
+/* tqm_fp_ring_cfg (size:128b/16B) */
+struct tqm_fp_ring_cfg {
+ u8 tqm_ring_pg_size_tqm_ring_lvl;
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G
+ u8 unused[3];
+ __le32 tqm_ring_num_entries;
+ __le64 tqm_ring_page_dir;
+};
+
+/* hwrm_func_backing_store_cfg_input (size:2688b/336B) */
+struct hwrm_func_backing_store_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD 0x200000UL
+ u8 qpc_pg_size_qpc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G
+ u8 srq_pg_size_srq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G
+ u8 cq_pg_size_cq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G
+ u8 vnic_pg_size_vnic_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G
+ u8 stat_pg_size_stat_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G
+ u8 tqm_sp_pg_size_tqm_sp_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G
+ u8 tqm_ring0_pg_size_tqm_ring0_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G
+ u8 tqm_ring1_pg_size_tqm_ring1_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G
+ u8 tqm_ring2_pg_size_tqm_ring2_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G
+ u8 tqm_ring3_pg_size_tqm_ring3_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G
+ u8 tqm_ring4_pg_size_tqm_ring4_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G
+ u8 tqm_ring5_pg_size_tqm_ring5_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G
+ u8 tqm_ring6_pg_size_tqm_ring6_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G
+ u8 tqm_ring7_pg_size_tqm_ring7_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G
+ u8 mrav_pg_size_mrav_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G
+ u8 tim_pg_size_tim_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G
+ __le64 qpc_page_dir;
+ __le64 srq_page_dir;
+ __le64 cq_page_dir;
+ __le64 vnic_page_dir;
+ __le64 stat_page_dir;
+ __le64 tqm_sp_page_dir;
+ __le64 tqm_ring0_page_dir;
+ __le64 tqm_ring1_page_dir;
+ __le64 tqm_ring2_page_dir;
+ __le64 tqm_ring3_page_dir;
+ __le64 tqm_ring4_page_dir;
+ __le64 tqm_ring5_page_dir;
+ __le64 tqm_ring6_page_dir;
+ __le64 tqm_ring7_page_dir;
+ __le64 mrav_page_dir;
+ __le64 tim_page_dir;
+ __le32 qp_num_entries;
+ __le32 srq_num_entries;
+ __le32 cq_num_entries;
+ __le32 stat_num_entries;
+ __le32 tqm_sp_num_entries;
+ __le32 tqm_ring0_num_entries;
+ __le32 tqm_ring1_num_entries;
+ __le32 tqm_ring2_num_entries;
+ __le32 tqm_ring3_num_entries;
+ __le32 tqm_ring4_num_entries;
+ __le32 tqm_ring5_num_entries;
+ __le32 tqm_ring6_num_entries;
+ __le32 tqm_ring7_num_entries;
+ __le32 mrav_num_entries;
+ __le32 tim_num_entries;
+ __le16 qp_num_qp1_entries;
+ __le16 qp_num_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_num_l2_entries;
+ __le16 srq_entry_size;
+ __le16 cq_num_l2_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_num_vnic_entries;
+ __le16 vnic_num_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+ u8 tqm_ring8_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G
+ u8 ring8_unused[3];
+ __le32 tqm_ring8_num_entries;
+ __le64 tqm_ring8_page_dir;
+ u8 tqm_ring9_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G
+ u8 ring9_unused[3];
+ __le32 tqm_ring9_num_entries;
+ __le64 tqm_ring9_page_dir;
+ u8 tqm_ring10_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G
+ u8 ring10_unused[3];
+ __le32 tqm_ring10_num_entries;
+ __le64 tqm_ring10_page_dir;
+ __le32 tkc_num_entries;
+ __le32 rkc_num_entries;
+ __le64 tkc_page_dir;
+ __le64 rkc_page_dir;
+ __le16 tkc_entry_size;
+ __le16 rkc_entry_size;
+ u8 tkc_pg_size_tkc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G
+ u8 rkc_pg_size_rkc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G
+ __le16 qp_num_fast_qpmd_entries;
+};
+
+/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_error_recovery_qcfg_input (size:192b/24B) */
+struct hwrm_error_recovery_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */
+struct hwrm_error_recovery_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU 0x2UL
+ __le32 driver_polling_freq;
+ __le32 master_func_wait_period;
+ __le32 normal_func_wait_period;
+ __le32 master_func_wait_period_after_reset;
+ __le32 max_bailout_time_after_reset;
+ __le32 fw_health_status_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SFT 2
+ __le32 fw_heartbeat_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SFT 2
+ __le32 fw_reset_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SFT 2
+ __le32 reset_inprogress_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SFT 2
+ __le32 reset_inprogress_reg_mask;
+ u8 unused_0[3];
+ u8 reg_array_cnt;
+ __le32 reset_reg[16];
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2
+ __le32 reset_reg_val[16];
+ u8 delay_after_reset[16];
+ __le32 err_recovery_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_func_echo_response_input (size:192b/24B) */
+struct hwrm_func_echo_response_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 event_data1;
+ __le32 event_data2;
+};
+
+/* hwrm_func_echo_response_output (size:128b/16B) */
+struct hwrm_func_echo_response_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_pin_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_pin_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_pin_qcfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_pins;
+ u8 state;
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN0_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN1_ENABLED 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN2_ENABLED 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN3_ENABLED 0x8UL
+ u8 pin0_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT
+ u8 pin1_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT
+ u8 pin2_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 pin3_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_func_ptp_pin_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_pin_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_STATE 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_USAGE 0x8UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_STATE 0x10UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_USAGE 0x20UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_STATE 0x40UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_USAGE 0x80UL
+ u8 pin0_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED
+ u8 pin0_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT
+ u8 pin1_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED
+ u8 pin1_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT
+ u8 pin2_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED
+ u8 pin2_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 pin3_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED
+ u8 pin3_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 unused_0[4];
+};
+
+/* hwrm_func_ptp_pin_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_cfg_input (size:384b/48B) */
+struct hwrm_func_ptp_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT 0x1UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_SOURCE 0x2UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_PHASE 0x4UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME 0x40UL
+ u8 ptp_pps_event;
+ #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL
+ u8 ptp_freq_adj_dll_source;
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_NONE 0x0UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_0 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_1 0x2UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_2 0x3UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_3 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_0 0x5UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_1 0x6UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_2 0x7UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_3 0x8UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID 0xffUL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID
+ u8 ptp_freq_adj_dll_phase;
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_NONE 0x0UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M
+ u8 unused_0[3];
+ __le32 ptp_freq_adj_ext_period;
+ __le32 ptp_freq_adj_ext_up;
+ __le32 ptp_freq_adj_ext_phase_lower;
+ __le32 ptp_freq_adj_ext_phase_upper;
+ __le64 ptp_set_time;
+};
+
+/* hwrm_func_ptp_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ts_query_input (size:192b/24B) */
+struct hwrm_func_ptp_ts_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PPS_TIME 0x1UL
+ #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME 0x2UL
+ u8 unused_0[4];
+};
+
+/* hwrm_func_ptp_ts_query_output (size:320b/40B) */
+struct hwrm_func_ptp_ts_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 pps_event_ts;
+ __le64 ptm_local_ts;
+ __le64 ptm_system_ts;
+ __le32 ptm_link_delay;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_ext_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_MASTER_FID 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_FID 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_MODE 0x4UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_FAILOVER_TIMER 0x8UL
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ u8 phc_sec_mode;
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_SWITCH 0x0UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_ALL 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_LAST FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY
+ u8 unused_0;
+ __le32 failover_timer;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_ptp_ext_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_ext_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_ext_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_ext_qcfg_output (size:256b/32B) */
+struct hwrm_func_ptp_ext_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ __le16 phc_active_fid0;
+ __le16 phc_active_fid1;
+ __le32 last_failover_event;
+ __le16 from_fid;
+ __le16 to_fid;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_v2_input (size:512b/64B) */
+struct hwrm_func_backing_store_cfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_EXTEND 0x4UL
+ __le64 page_dir;
+ __le32 num_entries;
+ __le16 entry_size;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET 0x1UL
+ __le32 next_bs_offset;
+};
+
+/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 rsvd0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_qcfg_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ u8 rsvd[4];
+};
+
+/* hwrm_func_backing_store_qcfg_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ERR_QPC_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ __le64 page_dir;
+ __le32 num_entries;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ u8 rsvd[2];
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ u8 rsvd2[7];
+ u8 valid;
+};
+
+/* qpc_split_entries (size:128b/16B) */
+struct qpc_split_entries {
+ __le32 qp_num_l2_entries;
+ __le32 qp_num_qp1_entries;
+ __le32 qp_num_fast_qpmd_entries;
+ __le32 rsvd;
+};
+
+/* srq_split_entries (size:128b/16B) */
+struct srq_split_entries {
+ __le32 srq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* cq_split_entries (size:128b/16B) */
+struct cq_split_entries {
+ __le32 cq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* vnic_split_entries (size:128b/16B) */
+struct vnic_split_entries {
+ __le32 vnic_num_vnic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* mrav_split_entries (size:128b/16B) */
+struct mrav_split_entries {
+ __le32 mrav_num_av_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* ts_split_entries (size:128b/16B) */
+struct ts_split_entries {
+ __le32 region_num_entries;
+ u8 tsid;
+ u8 lkup_static_bkt_cnt_exp[2];
+ u8 locked;
+ __le32 rsvd2[2];
+};
+
+/* ck_split_entries (size:128b/16B) */
+struct ck_split_entries {
+ __le32 num_quic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcaps_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ u8 rsvd[6];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcaps_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ __le16 entry_size;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_BIN_DBG_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_NEXT_BS_OFFSET 0x40UL
+ __le32 instance_bit_map;
+ u8 ctx_init_value;
+ u8 ctx_init_offset;
+ u8 entry_multiple;
+ u8 rsvd;
+ __le32 max_num_entries;
+ __le32 min_num_entries;
+ __le16 next_valid_type;
+ u8 subtype_valid_cnt;
+ u8 exact_cnt_bit_map;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_0_EXACT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_1_EXACT 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_2_EXACT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_3_EXACT 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_SFT 4
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ __le16 max_instance_count;
+ u8 rsvd3;
+ u8 valid;
+};
+
+/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */
+struct hwrm_func_dbr_pacing_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */
+struct hwrm_func_dbr_pacing_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
+ u8 unused_0[7];
+ __le32 dbr_stat_db_fifo_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
+ __le32 dbr_stat_db_fifo_reg_watermark_mask;
+ u8 dbr_stat_db_fifo_reg_watermark_shift;
+ u8 unused_1[3];
+ __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
+ u8 dbr_stat_db_fifo_reg_fifo_room_shift;
+ u8 unused_2[3];
+ __le32 dbr_throttling_aeq_arm_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
+ u8 dbr_throttling_aeq_arm_reg_val;
+ u8 unused_3[3];
+ __le32 dbr_stat_db_max_fifo_depth;
+ __le32 primary_nq_id;
+ __le32 pacing_threshold;
+ u8 unused_4[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_if_change_input (size:192b/24B) */
+struct hwrm_func_drv_if_change_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL
+ __le32 unused;
+};
+
+/* hwrm_func_drv_if_change_output (size:128b/16B) */
+struct hwrm_func_drv_if_change_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE 0x4UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg_input (size:512b/64B) */
+struct hwrm_port_phy_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE 0x20000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE 0x40000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE 0x80000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_LINK_TRAINING_ENABLE 0x800000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_LINK_TRAINING_DISABLE 0x1000000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_PRECODING_ENABLE 0x2000000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_PRECODING_DISABLE 0x4000000UL
+ __le32 enables;
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2 0x2000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK 0x4000UL
+ __le16 port_id;
+ __le16 force_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
+ u8 auto_duplex;
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
+ u8 auto_pause;
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ u8 mgmt_flag;
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_LINK_RELEASE 0x1UL
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_MGMT_VALID 0x80UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL
+ u8 force_pause;
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
+ u8 unused_1;
+ __le32 preemphasis;
+ __le16 eee_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
+ __le32 tx_lpi_timer;
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
+ __le16 auto_link_pam4_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ __le16 auto_link_speeds2_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_25GB 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_40GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL
+ u8 unused_2[6];
+};
+
+/* hwrm_port_phy_cfg_output (size:128b/16B) */
+struct hwrm_port_phy_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */
+struct hwrm_port_phy_cfg_cmd_err {
+ u8 code;
+ #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY
+ u8 unused_0[7];
+};
+
+/* hwrm_port_phy_qcfg_input (size:192b/24B) */
+struct hwrm_port_phy_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_phy_qcfg_output (size:832b/104B) */
+struct hwrm_port_phy_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
+ u8 active_fec_signal_mode;
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (0x1UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (0x2UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (0x3UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (0x4UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (0x5UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (0x6UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_LAST PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE
+ __le16 link_speed;
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
+ u8 duplex_cfg;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
+ u8 pause;
+ #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
+ __le16 support_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
+ __le16 force_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
+ u8 auto_pause;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL
+ u8 force_pause;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
+ u8 module_status;
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_OVERHEATED 0x6UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR 0x28UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR 0x29UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR 0x2aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER 0x2bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2 0x2cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2 0x2dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2 0x2eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2 0x2fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8 0x30UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8 0x31UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8 0x32UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8 0x33UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4 0x34UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
+ u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE 0x4UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE
+ u8 xcvr_pkg_type;
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
+ u8 eee_config_phy_addr;
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
+ u8 parallel_detect;
+ #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
+ __le16 link_partner_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
+ u8 link_partner_adv_auto_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
+ u8 link_partner_adv_pause;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+ __le16 adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 link_partner_adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPDD (0x18UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP112 (0x1eUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFPDD (0x1fUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP (0x20UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP
+ __le16 fec_cfg;
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_SUPPORTED 0x200UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED 0x400UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED 0x800UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED 0x1000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED 0x2000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED 0x4000UL
+ u8 duplex_state;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
+ u8 option_flags;
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SPEEDS2_SUPPORTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_LINK_TRAINING 0x8UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_PRECODING 0x10UL
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le16 support_pam4_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB
+ __le16 auto_pam4_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
+ u8 link_partner_pam4_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
+ u8 link_down_reason;
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_TX_LASER_DISABLED 0x20UL
+ __le16 support_speeds2;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_800GB_PAM4_112 0x2000UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ __le16 auto_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL
+ u8 active_lanes;
+ u8 valid;
+};
+
+/* hwrm_port_mac_cfg_input (size:448b/56B) */
+struct hwrm_port_mac_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE 0x4000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE 0x8000UL
+ __le32 enables;
+ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_LOAD_CONTROL 0x800UL
+ __le16 port_id;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
+ u8 vlan_pri2cos_map_pri;
+ u8 reserved1;
+ u8 tunnel_pri2cos_map_pri;
+ u8 dscp2pri_map_pri;
+ __le16 rx_ts_capture_ptp_msg_type;
+ __le16 tx_ts_capture_ptp_msg_type;
+ u8 cos_field_cfg;
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ u8 unused_0[3];
+ __le32 ptp_freq_adj_ppb;
+ u8 unused_1[3];
+ u8 ptp_load_control;
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_IMMEDIATE 0x1UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT 0x2UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_LAST PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT
+ __le64 ptp_adj_phase;
+};
+
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
+struct hwrm_port_mac_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ __le16 mtu;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
+struct hwrm_port_mac_ptp_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_mac_ptp_qcfg_output (size:704b/88B) */
+struct hwrm_port_mac_ptp_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME 0x40UL
+ u8 unused_0[3];
+ __le32 rx_ts_reg_off_lower;
+ __le32 rx_ts_reg_off_upper;
+ __le32 rx_ts_reg_off_seq_id;
+ __le32 rx_ts_reg_off_src_id_0;
+ __le32 rx_ts_reg_off_src_id_1;
+ __le32 rx_ts_reg_off_src_id_2;
+ __le32 rx_ts_reg_off_domain_id;
+ __le32 rx_ts_reg_off_fifo;
+ __le32 rx_ts_reg_off_fifo_adv;
+ __le32 rx_ts_reg_off_granularity;
+ __le32 tx_ts_reg_off_lower;
+ __le32 tx_ts_reg_off_upper;
+ __le32 tx_ts_reg_off_seq_id;
+ __le32 tx_ts_reg_off_fifo;
+ __le32 tx_ts_reg_off_granularity;
+ __le32 ts_ref_clock_reg_lower;
+ __le32 ts_ref_clock_reg_upper;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518b_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047b_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518b_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
+/* hwrm_port_qstats_input (size:320b/40B) */
+struct hwrm_port_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 flags;
+ #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_output (size:128b/16B) */
+struct hwrm_port_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 flags;
+ #define PORT_QSTATS_RESP_FLAGS_CLEARED 0x1UL
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* tx_port_stats_ext (size:2048b/256B) */
+struct tx_port_stats_ext {
+ __le64 tx_bytes_cos0;
+ __le64 tx_bytes_cos1;
+ __le64 tx_bytes_cos2;
+ __le64 tx_bytes_cos3;
+ __le64 tx_bytes_cos4;
+ __le64 tx_bytes_cos5;
+ __le64 tx_bytes_cos6;
+ __le64 tx_bytes_cos7;
+ __le64 tx_packets_cos0;
+ __le64 tx_packets_cos1;
+ __le64 tx_packets_cos2;
+ __le64 tx_packets_cos3;
+ __le64 tx_packets_cos4;
+ __le64 tx_packets_cos5;
+ __le64 tx_packets_cos6;
+ __le64 tx_packets_cos7;
+ __le64 pfc_pri0_tx_duration_us;
+ __le64 pfc_pri0_tx_transitions;
+ __le64 pfc_pri1_tx_duration_us;
+ __le64 pfc_pri1_tx_transitions;
+ __le64 pfc_pri2_tx_duration_us;
+ __le64 pfc_pri2_tx_transitions;
+ __le64 pfc_pri3_tx_duration_us;
+ __le64 pfc_pri3_tx_transitions;
+ __le64 pfc_pri4_tx_duration_us;
+ __le64 pfc_pri4_tx_transitions;
+ __le64 pfc_pri5_tx_duration_us;
+ __le64 pfc_pri5_tx_transitions;
+ __le64 pfc_pri6_tx_duration_us;
+ __le64 pfc_pri6_tx_transitions;
+ __le64 pfc_pri7_tx_duration_us;
+ __le64 pfc_pri7_tx_transitions;
+};
+
+/* rx_port_stats_ext (size:3904b/488B) */
+struct rx_port_stats_ext {
+ __le64 link_down_events;
+ __le64 continuous_pause_events;
+ __le64 resume_pause_events;
+ __le64 continuous_roce_pause_events;
+ __le64 resume_roce_pause_events;
+ __le64 rx_bytes_cos0;
+ __le64 rx_bytes_cos1;
+ __le64 rx_bytes_cos2;
+ __le64 rx_bytes_cos3;
+ __le64 rx_bytes_cos4;
+ __le64 rx_bytes_cos5;
+ __le64 rx_bytes_cos6;
+ __le64 rx_bytes_cos7;
+ __le64 rx_packets_cos0;
+ __le64 rx_packets_cos1;
+ __le64 rx_packets_cos2;
+ __le64 rx_packets_cos3;
+ __le64 rx_packets_cos4;
+ __le64 rx_packets_cos5;
+ __le64 rx_packets_cos6;
+ __le64 rx_packets_cos7;
+ __le64 pfc_pri0_rx_duration_us;
+ __le64 pfc_pri0_rx_transitions;
+ __le64 pfc_pri1_rx_duration_us;
+ __le64 pfc_pri1_rx_transitions;
+ __le64 pfc_pri2_rx_duration_us;
+ __le64 pfc_pri2_rx_transitions;
+ __le64 pfc_pri3_rx_duration_us;
+ __le64 pfc_pri3_rx_transitions;
+ __le64 pfc_pri4_rx_duration_us;
+ __le64 pfc_pri4_rx_transitions;
+ __le64 pfc_pri5_rx_duration_us;
+ __le64 pfc_pri5_rx_transitions;
+ __le64 pfc_pri6_rx_duration_us;
+ __le64 pfc_pri6_rx_transitions;
+ __le64 pfc_pri7_rx_duration_us;
+ __le64 pfc_pri7_rx_transitions;
+ __le64 rx_bits;
+ __le64 rx_buffer_passed_threshold;
+ __le64 rx_pcs_symbol_err;
+ __le64 rx_corrected_bits;
+ __le64 rx_discard_bytes_cos0;
+ __le64 rx_discard_bytes_cos1;
+ __le64 rx_discard_bytes_cos2;
+ __le64 rx_discard_bytes_cos3;
+ __le64 rx_discard_bytes_cos4;
+ __le64 rx_discard_bytes_cos5;
+ __le64 rx_discard_bytes_cos6;
+ __le64 rx_discard_bytes_cos7;
+ __le64 rx_discard_packets_cos0;
+ __le64 rx_discard_packets_cos1;
+ __le64 rx_discard_packets_cos2;
+ __le64 rx_discard_packets_cos3;
+ __le64 rx_discard_packets_cos4;
+ __le64 rx_discard_packets_cos5;
+ __le64 rx_discard_packets_cos6;
+ __le64 rx_discard_packets_cos7;
+ __le64 rx_fec_corrected_blocks;
+ __le64 rx_fec_uncorrectable_blocks;
+ __le64 rx_filter_miss;
+ __le64 rx_fec_symbol_err;
+};
+
+/* hwrm_port_qstats_ext_input (size:320b/40B) */
+struct hwrm_port_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 flags;
+ #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0;
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_ext_output (size:128b/16B) */
+struct hwrm_port_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ __le16 total_active_cos_queues;
+ u8 flags;
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEARED 0x2UL
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats_input (size:256b/32B) */
+struct hwrm_port_lpbk_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 lpbk_stat_size;
+ u8 flags;
+ #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 lpbk_stat_host_addr;
+};
+
+/* hwrm_port_lpbk_qstats_output (size:128b/16B) */
+struct hwrm_port_lpbk_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 lpbk_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* port_lpbk_stats (size:640b/80B) */
+struct port_lpbk_stats {
+ __le64 lpbk_ucast_frames;
+ __le64 lpbk_mcast_frames;
+ __le64 lpbk_bcast_frames;
+ __le64 lpbk_ucast_bytes;
+ __le64 lpbk_mcast_bytes;
+ __le64 lpbk_bcast_bytes;
+ __le64 lpbk_tx_discards;
+ __le64 lpbk_tx_errors;
+ __le64 lpbk_rx_discards;
+ __le64 lpbk_rx_errors;
+};
+
+/* hwrm_port_ecn_qstats_input (size:256b/32B) */
+struct hwrm_port_ecn_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 ecn_stat_buf_size;
+ u8 flags;
+ #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+ __le64 ecn_stat_host_addr;
+};
+
+/* hwrm_port_ecn_qstats_output (size:128b/16B) */
+struct hwrm_port_ecn_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ecn_stat_buf_size;
+ u8 mark_en;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* port_stats_ecn (size:512b/64B) */
+struct port_stats_ecn {
+ __le64 mark_cnt_cos0;
+ __le64 mark_cnt_cos1;
+ __le64 mark_cnt_cos2;
+ __le64 mark_cnt_cos3;
+ __le64 mark_cnt_cos4;
+ __le64 mark_cnt_cos5;
+ __le64 mark_cnt_cos6;
+ __le64 mark_cnt_cos7;
+};
+
+/* hwrm_port_clr_stats_input (size:192b/24B) */
+struct hwrm_port_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 flags;
+ #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL
+ u8 unused_0[5];
+};
+
+/* hwrm_port_clr_stats_output (size:128b/16B) */
+struct hwrm_port_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */
+struct hwrm_port_lpbk_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
+struct hwrm_port_lpbk_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_ts_query_input (size:320b/40B) */
+struct hwrm_port_ts_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX
+ #define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL
+ __le16 port_id;
+ u8 unused_0[2];
+ __le16 enables;
+ #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL
+ __le16 ts_req_timeout;
+ __le32 ptp_seq_id;
+ __le16 ptp_hdr_offset;
+ u8 unused_1[6];
+};
+
+/* hwrm_port_ts_query_output (size:192b/24B) */
+struct hwrm_port_ts_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ptp_msg_ts;
+ __le16 ptp_msg_seqid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcaps_input (size:192b/24B) */
+struct hwrm_port_phy_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_phy_qcaps_output (size:320b/40B) */
+struct hwrm_port_phy_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL
+ u8 port_cnt;
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_12 0xcUL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_12
+ __le16 supported_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+ __le16 supported_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+ __le16 supported_speeds_eee_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
+ __le32 tx_lpi_timer_low;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
+ __le32 valid_tx_lpi_timer_high;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24
+ __le16 supported_pam4_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL
+ __le16 supported_pam4_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
+ __le16 flags2;
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL
+ u8 internal_port_cnt;
+ u8 unused_0;
+ __le16 supported_speeds2_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 0x2000UL
+ __le16 supported_speeds2_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_port_phy_i2c_write_input (size:832b/104B) */
+struct hwrm_port_phy_i2c_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER 0x2UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 bank_number;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+ __le32 data[16];
+};
+
+/* hwrm_port_phy_i2c_write_output (size:128b/16B) */
+struct hwrm_port_phy_i2c_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
+struct hwrm_port_phy_i2c_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER 0x2UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 bank_number;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
+struct hwrm_port_phy_i2c_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 data[16];
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_mdio_write_input (size:320b/40B) */
+struct hwrm_port_phy_mdio_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[2];
+ __le16 port_id;
+ u8 phy_addr;
+ u8 dev_addr;
+ __le16 reg_addr;
+ __le16 reg_data;
+ u8 cl45_mdio;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_mdio_write_output (size:128b/16B) */
+struct hwrm_port_phy_mdio_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_mdio_read_input (size:256b/32B) */
+struct hwrm_port_phy_mdio_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[2];
+ __le16 port_id;
+ u8 phy_addr;
+ u8 dev_addr;
+ __le16 reg_addr;
+ u8 cl45_mdio;
+ u8 unused_1;
+};
+
+/* hwrm_port_phy_mdio_read_output (size:128b/16B) */
+struct hwrm_port_phy_mdio_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reg_data;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_port_led_cfg_input (size:512b/64B) */
+struct hwrm_port_led_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
+ __le16 port_id;
+ u8 num_leds;
+ u8 rsvd;
+ u8 led0_id;
+ u8 led0_state;
+ #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 rsvd0;
+ u8 led1_id;
+ u8 led1_state;
+ #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 rsvd1;
+ u8 led2_id;
+ u8 led2_state;
+ #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 rsvd2;
+ u8 led3_id;
+ u8 led3_state;
+ #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 rsvd3;
+};
+
+/* hwrm_port_led_cfg_output (size:128b/16B) */
+struct hwrm_port_led_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcfg_input (size:192b/24B) */
+struct hwrm_port_led_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_led_qcfg_output (size:448b/56B) */
+struct hwrm_port_led_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
+ u8 led0_state;
+ #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
+ u8 led1_state;
+ #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
+ u8 led2_state;
+ #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
+ u8 led3_state;
+ #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 unused_4[6];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcaps_input (size:192b/24B) */
+struct hwrm_port_led_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_led_qcaps_output (size:384b/48B) */
+struct hwrm_port_led_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 unused[3];
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
+ u8 led0_group_id;
+ u8 unused_0;
+ __le16 led0_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led0_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
+ u8 led1_group_id;
+ u8 unused_1;
+ __le16 led1_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led1_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
+ u8 led2_group_id;
+ u8 unused_2;
+ __le16 led2_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led2_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
+ u8 led3_group_id;
+ u8 unused_3;
+ __le16 led3_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led3_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 unused_4[3];
+ u8 valid;
+};
+
+/* hwrm_port_mac_qcaps_input (size:192b/24B) */
+struct hwrm_port_mac_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_mac_qcaps_output (size:128b/16B) */
+struct hwrm_port_mac_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x1UL
+ #define PORT_MAC_QCAPS_RESP_FLAGS_REMOTE_LPBK_SUPPORTED 0x2UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_queue_qportcfg_input (size:192b/24B) */
+struct hwrm_queue_qportcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
+ __le16 port_id;
+ u8 drv_qmap_cap;
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED
+ u8 unused_0;
+};
+
+/* hwrm_queue_qportcfg_output (size:1344b/168B) */
+struct hwrm_queue_qportcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 max_configurable_queues;
+ u8 max_configurable_lossless_queues;
+ u8 queue_cfg_allowed;
+ u8 queue_cfg_info;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_USE_PROFILE_TYPE 0x2UL
+ u8 queue_pfcenable_cfg_allowed;
+ u8 queue_pri2cos_cfg_allowed;
+ u8 queue_cos2bw_cfg_allowed;
+ u8 queue_id0;
+ u8 queue_id0_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id1;
+ u8 queue_id1_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id2;
+ u8 queue_id2_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id3;
+ u8 queue_id3_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id4;
+ u8 queue_id4_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id5;
+ u8 queue_id5_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id6;
+ u8 queue_id6_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id7;
+ u8 queue_id7_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id0_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ char qid0_name[16];
+ char qid1_name[16];
+ char qid2_name[16];
+ char qid3_name[16];
+ char qid4_name[16];
+ char qid5_name[16];
+ char qid6_name[16];
+ char qid7_name[16];
+ u8 queue_id1_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id2_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id3_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id4_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id5_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id6_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id7_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 valid;
+};
+
+/* hwrm_queue_qcfg_input (size:192b/24B) */
+struct hwrm_queue_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX
+ __le32 queue_id;
+};
+
+/* hwrm_queue_qcfg_output (size:128b/16B) */
+struct hwrm_queue_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 queue_len;
+ u8 service_profile;
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN
+ u8 queue_cfg_info;
+ #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_queue_cfg_input (size:320b/40B) */
+struct hwrm_queue_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
+ __le32 enables;
+ #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
+ #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
+ __le32 queue_id;
+ __le32 dflt_len;
+ u8 service_profile;
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_cfg_output (size:128b/16B) */
+struct hwrm_queue_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
+ __le16 port_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
+ u8 port_id;
+ u8 unused_0[3];
+};
+
+/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 queue_cfg_info;
+ #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
+struct hwrm_queue_pri2cos_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
+ __le32 enables;
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
+ u8 port_id;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
+struct hwrm_queue_pri2cos_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
+struct hwrm_queue_cos2bw_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
+struct hwrm_queue_cos2bw_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 queue_id0;
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ struct {
+ u8 queue_id;
+ __le32 queue_id_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id_pri_lvl;
+ u8 queue_id_bw_weight;
+ } __packed cfg[7];
+ u8 unused_2[4];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
+struct hwrm_queue_cos2bw_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
+ __le16 port_id;
+ u8 queue_id0;
+ u8 unused_0;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ struct {
+ u8 queue_id;
+ __le32 queue_id_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id_pri_lvl;
+ u8 queue_id_bw_weight;
+ } __packed cfg[7];
+ u8 unused_1[5];
+};
+
+/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
+struct hwrm_queue_cos2bw_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
+struct hwrm_queue_dscp_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 port_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
+struct hwrm_queue_dscp_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_dscp_bits;
+ u8 unused_0;
+ __le16 max_entries;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
+struct hwrm_queue_dscp2pri_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ u8 port_id;
+ u8 unused_0;
+ __le16 dest_data_buffer_size;
+ u8 unused_1[4];
+};
+
+/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 entry_cnt;
+ u8 default_pri;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
+struct hwrm_queue_dscp2pri_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le32 flags;
+ #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
+ __le32 enables;
+ #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
+ u8 port_id;
+ u8 default_pri;
+ __le16 entry_cnt;
+ u8 unused_0[4];
+};
+
+/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcaps_input (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcaps_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 max_pfcwd_timeout;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcwd_timeout_cfg_input (size:192b/24B) */
+struct hwrm_queue_pfcwd_timeout_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 pfcwd_timeout_value;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_pfcwd_timeout_cfg_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcfg_input (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcfg_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pfcwd_timeout_value;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_alloc_input (size:192b/24B) */
+struct hwrm_vnic_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
+ #define VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID 0x4UL
+ __le16 virtio_net_fid;
+ __le16 vnic_id;
+};
+
+/* hwrm_vnic_alloc_output (size:128b/16B) */
+struct hwrm_vnic_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_vnic_update_input (size:256b/32B) */
+struct hwrm_vnic_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 enables;
+ #define VNIC_UPDATE_REQ_ENABLES_VNIC_STATE_VALID 0x1UL
+ #define VNIC_UPDATE_REQ_ENABLES_MRU_VALID 0x2UL
+ #define VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID 0x4UL
+ u8 vnic_state;
+ #define VNIC_UPDATE_REQ_VNIC_STATE_NORMAL 0x0UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_DROP 0x1UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_LAST VNIC_UPDATE_REQ_VNIC_STATE_DROP
+ u8 metadata_format_type;
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_0 0x0UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_1 0x1UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_2 0x2UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_3 0x3UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 0x4UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_LAST VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4
+ __le16 mru;
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_update_output (size:128b/16B) */
+struct hwrm_vnic_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_free_input (size:192b/24B) */
+struct hwrm_vnic_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_free_output (size:128b/16B) */
+struct hwrm_vnic_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_cfg_input (size:384b/48B) */
+struct hwrm_vnic_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
+ #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
+ #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
+ #define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL
+ __le32 enables;
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
+ #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
+ #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
+ #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
+ #define VNIC_CFG_REQ_ENABLES_RAW_QP_ID 0x400UL
+ __le16 vnic_id;
+ __le16 dflt_ring_grp;
+ __le16 rss_rule;
+ __le16 cos_rule;
+ __le16 lb_rule;
+ __le16 mru;
+ __le16 default_rx_ring_id;
+ __le16 default_cmpl_ring_id;
+ __le16 queue_id;
+ u8 rx_csum_v2_mode;
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
+ u8 l2_cqe_mode;
+ #define VNIC_CFG_REQ_L2_CQE_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
+ __le32 raw_qp_id;
+};
+
+/* hwrm_vnic_cfg_output (size:128b/16B) */
+struct hwrm_vnic_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_qcaps_input (size:192b/24B) */
+struct hwrm_vnic_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_qcaps_output (size:192b/24B) */
+struct hwrm_vnic_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ u8 unused_0[2];
+ __le32 flags;
+ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
+ #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
+ #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
+ #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP 0x8000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP 0x10000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP 0x20000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V3_CAP 0x80000UL
+ #define VNIC_QCAPS_RESP_FLAGS_L2_CQE_MODE_CAP 0x100000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP 0x200000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_PORTCOS_MAPPING_MODE 0x4000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP 0x40000000UL
+ __le16 max_aggs_supported;
+ u8 unused_1[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */
+struct hwrm_vnic_tpa_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_PACK_AS_GRO 0x100UL
+ __le32 enables;
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN 0x10UL
+ __le16 vnic_id;
+ __le16 max_agg_segs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
+ u8 unused_0[2];
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+ __le32 tnl_tpa_en_bitmap;
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE 0x8UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 0x10UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6 0x20UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
+struct hwrm_vnic_tpa_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_tpa_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vnic_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_tpa_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le16 max_agg_segs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+ __le32 tnl_tpa_en_bitmap;
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE 0x8UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV4 0x10UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV6 0x20UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
+struct hwrm_vnic_rss_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 hash_type;
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6 0x400UL
+ __le16 vnic_id;
+ u8 ring_table_pair_index;
+ u8 hash_mode_flags;
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
+ __le64 ring_grp_tbl_addr;
+ __le64 hash_key_tbl_addr;
+ __le16 rss_ctx_idx;
+ u8 flags;
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT 0x4UL
+ u8 ring_select_mode;
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_LAST VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
+struct hwrm_vnic_rss_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_rss_cfg_cmd_err {
+ u8 code;
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_UNABLE_TO_GET_RSS_CFG 0x2UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_TYPE_UNSUPPORTED 0x3UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_TYPE_ERR 0x4UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_MODE_FAIL 0x5UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_RING_GRP_TABLE_ALLOC_ERR 0x6UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_KEY_ALLOC_ERR 0x7UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_DMA_FAILED 0x8UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_RX_RING_ALLOC_ERR 0x9UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_CMPL_RING_ALLOC_ERR 0xaUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HW_SET_RSS_FAILED 0xbUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_CTX_INVALID 0xcUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_INVALID 0xdUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_RING_TABLE_PAIR_INVALID 0xeUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_RING_TABLE_PAIR_INVALID
+ u8 unused_0[7];
+};
+
+/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_rss_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_ctx_idx;
+ __le16 vnic_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
+struct hwrm_vnic_rss_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 hash_type;
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV6 0x400UL
+ u8 unused_0[4];
+ __le32 hash_key[10];
+ u8 hash_mode_flags;
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
+ u8 ring_select_mode;
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_LAST VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ u8 unused_1[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
+struct hwrm_vnic_plcmodes_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL
+ __le32 enables;
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL
+ __le32 vnic_id;
+ __le16 jumbo_thresh;
+ __le16 hds_offset;
+ __le16 hds_threshold;
+ __le16 max_bds;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
+struct hwrm_vnic_plcmodes_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_plcmodes_cfg_cmd_err {
+ u8 code;
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD 0x1UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_LAST VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD
+ u8 unused_0[7];
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_alloc_input (size:768b/96B) */
+struct hwrm_ring_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
+ #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
+ #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
+ #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
+ #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
+ #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RATE_PROFILE_VALID 0x1000UL
+ #define RING_ALLOC_REQ_ENABLES_DPI_VALID 0x2000UL
+ u8 ring_type;
+ #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
+ #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
+ u8 cmpl_coal_cnt;
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_OFF 0x0UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_4 0x1UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_8 0x2UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_12 0x3UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_16 0x4UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_24 0x5UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_32 0x6UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_48 0x7UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 0x8UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_96 0x9UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_128 0xaUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_192 0xbUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_256 0xcUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_320 0xdUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_384 0xeUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX
+ __le16 flags;
+ #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
+ #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL
+ #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL
+ #define RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE 0x8UL
+ __le64 page_tbl_addr;
+ __le32 fbo;
+ u8 page_size;
+ u8 page_tbl_depth;
+ __le16 schq_id;
+ __le32 length;
+ __le16 logical_id;
+ __le16 cmpl_ring_id;
+ __le16 queue_id;
+ __le16 rx_buf_size;
+ __le16 rx_ring_id;
+ __le16 nq_ring_id;
+ __le16 ring_arb_cfg;
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+ __le16 steering_tag;
+ __le32 reserved3;
+ __le32 stat_ctx_id;
+ __le32 reserved4;
+ __le32 max_bw;
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 int_mode;
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+ #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
+ #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
+ #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
+ #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
+ u8 mpc_chnls_type;
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE 0x0UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE 0x1UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA 0x2UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
+ u8 rx_rate_profile_sel;
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_DEFAULT 0x0UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE 0x1UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_LAST RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE
+ u8 unused_4;
+ __le64 cq_handle;
+ __le16 dpi;
+ __le16 unused_5[3];
+};
+
+/* hwrm_ring_alloc_output (size:128b/16B) */
+struct hwrm_ring_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ring_id;
+ __le16 logical_ring_id;
+ u8 push_buffer_index;
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* hwrm_ring_free_input (size:256b/32B) */
+struct hwrm_ring_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
+ #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
+ #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
+ u8 flags;
+ #define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL
+ #define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID
+ __le16 ring_id;
+ __le32 prod_idx;
+ __le32 opaque;
+ __le32 unused_1;
+};
+
+/* hwrm_ring_free_output (size:128b/16B) */
+struct hwrm_ring_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_reset_input (size:192b/24B) */
+struct hwrm_ring_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL
+ #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP
+ u8 unused_0;
+ __le16 ring_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_ring_reset_output (size:128b/16B) */
+struct hwrm_ring_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 push_buffer_index;
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[3];
+ u8 consumer_idx[3];
+ u8 valid;
+};
+
+/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */
+struct hwrm_ring_aggint_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */
+struct hwrm_ring_aggint_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 cmpl_params;
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL
+ __le32 nq_params;
+ #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ __le16 num_cmpl_dma_aggr_min;
+ __le16 num_cmpl_dma_aggr_max;
+ __le16 num_cmpl_dma_aggr_during_int_min;
+ __le16 num_cmpl_dma_aggr_during_int_max;
+ __le16 cmpl_aggr_dma_tmr_min;
+ __le16 cmpl_aggr_dma_tmr_max;
+ __le16 cmpl_aggr_dma_tmr_during_int_min;
+ __le16 cmpl_aggr_dma_tmr_during_int_max;
+ __le16 int_lat_tmr_min_min;
+ __le16 int_lat_tmr_min_max;
+ __le16 int_lat_tmr_max_min;
+ __le16 int_lat_tmr_max_max;
+ __le16 num_cmpl_aggr_int_min;
+ __le16 num_cmpl_aggr_int_max;
+ __le16 timer_units;
+ u8 unused_0[1];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ __le16 enables;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_alloc_input (size:192b/24B) */
+struct hwrm_ring_grp_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 cr;
+ __le16 rr;
+ __le16 ar;
+ __le16 sc;
+};
+
+/* hwrm_ring_grp_alloc_output (size:128b/16B) */
+struct hwrm_ring_grp_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 ring_group_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_free_input (size:192b/24B) */
+struct hwrm_ring_grp_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 ring_group_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_grp_free_output (size:128b/16B) */
+struct hwrm_ring_grp_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
+#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
+#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
+#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL
+
+/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
+struct hwrm_cfa_l2_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_XDP_DISABLE 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_SOURCE_VALID 0x80UL
+ __le32 enables;
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS 0x20000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_NUM_VLANS 0x40000UL
+ u8 l2_addr[6];
+ u8 num_vlans;
+ u8 t_num_vlans;
+ u8 l2_addr_mask[6];
+ __le16 l2_ovlan;
+ __le16 l2_ovlan_mask;
+ __le16 l2_ivlan;
+ __le16 l2_ivlan_mask;
+ u8 unused_1[2];
+ u8 t_l2_addr[6];
+ u8 unused_2[2];
+ u8 t_l2_addr_mask[6];
+ __le16 t_l2_ovlan;
+ __le16 t_l2_ovlan_mask;
+ __le16 t_l2_ivlan;
+ __le16 t_l2_ivlan_mask;
+ u8 src_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
+ u8 unused_3;
+ __le32 src_id;
+ u8 tunnel_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_4;
+ __le16 dst_id;
+ __le16 mirror_vnic_id;
+ u8 pri_hint;
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
+ u8 unused_5;
+ __le32 unused_6;
+ __le64 l2_filter_id_hint;
+};
+
+/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_l2_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 l2_filter_id;
+ __le32 flow_id;
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_l2_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 l2_filter_id;
+};
+
+/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_l2_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */
+struct hwrm_cfa_l2_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP (0x3UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP
+ __le32 enables;
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC 0x4UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID 0x8UL
+ __le64 l2_filter_id;
+ __le32 dst_id;
+ __le32 new_mirror_vnic_id;
+ __le32 prof_func;
+ __le32 l2_context_id;
+};
+
+/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
+struct hwrm_cfa_l2_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
+struct hwrm_cfa_l2_set_rx_mask_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 mask;
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
+ __le64 mc_tbl_addr;
+ __le32 num_mc_entries;
+ u8 unused_0[4];
+ __le64 vlan_tag_tbl_addr;
+ __le32 num_vlan_tags;
+ u8 unused_1[4];
+};
+
+/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
+struct hwrm_cfa_l2_set_rx_mask_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
+struct hwrm_cfa_l2_set_rx_mask_cmd_err {
+ u8 code;
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_MAX_VLAN_TAGS 0x2UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_VNIC_ID 0x3UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_ACTION 0x4UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_ACTION
+ u8 unused_0[7];
+};
+
+/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ __le32 enables;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+ __le64 l2_filter_id;
+ u8 l2_addr[6];
+ __le16 l2_ivlan;
+ __le32 l3_addr[4];
+ __le32 t_l3_addr[4];
+ u8 l3_addr_type;
+ u8 t_l3_addr_type;
+ u8 tunnel_type;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 tunnel_flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL
+ __le32 vni;
+ __le32 dst_vnic_id;
+ __le32 mirror_vnic_id;
+};
+
+/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tunnel_filter_id;
+ __le32 flow_id;
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 tunnel_filter_id;
+};
+
+/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_tunnel_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
+struct hwrm_vxlan_ipv4_hdr {
+ u8 ver_hlen;
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
+ u8 tos;
+ __be16 ip_id;
+ __be16 flags_frag_offset;
+ u8 ttl;
+ u8 protocol;
+ __be32 src_ip_addr;
+ __be32 dest_ip_addr;
+};
+
+/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
+struct hwrm_vxlan_ipv6_hdr {
+ __be32 ver_tc_flow_label;
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
+ __be16 payload_len;
+ u8 next_hdr;
+ u8 ttl;
+ __be32 src_ip_addr[4];
+ __be32 dest_ip_addr[4];
+};
+
+/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */
+struct hwrm_cfa_encap_data_vxlan {
+ u8 src_mac_addr[6];
+ __le16 unused_0;
+ u8 dst_mac_addr[6];
+ u8 num_vlan_tags;
+ u8 unused_1;
+ __be16 ovlan_tpid;
+ __be16 ovlan_tci;
+ __be16 ivlan_tpid;
+ __be16 ivlan_tci;
+ __le32 l3[10];
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 vni;
+ u8 hdr_rsvd0[3];
+ u8 hdr_rsvd1;
+ u8 hdr_flags;
+ u8 unused[3];
+};
+
+/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
+struct hwrm_cfa_encap_record_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_EXTERNAL 0x2UL
+ u8 encap_type;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE
+ u8 unused_0[3];
+ __le32 encap_data[20];
+};
+
+/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
+struct hwrm_cfa_encap_record_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 encap_record_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
+struct hwrm_cfa_encap_record_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_record_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
+struct hwrm_cfa_encap_record_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
+struct hwrm_cfa_ntuple_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_NO_L2_CONTEXT 0x40UL
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX 0x80000UL
+ __le64 l2_filter_id;
+ u8 src_macaddr[6];
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+ u8 ip_protocol;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMP 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMPV6 0x3aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD 0xffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD
+ __le16 dst_id;
+ __le16 rfs_ring_tbl_idx;
+ u8 tunnel_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 pri_hint;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST
+ __be32 src_ipaddr[4];
+ __be32 src_ipaddr_mask[4];
+ __be32 dst_ipaddr[4];
+ __be32 dst_ipaddr_mask[4];
+ __be16 src_port;
+ __be16 src_port_mask;
+ __be16 dst_port;
+ __be16 dst_port_mask;
+ __le64 ntuple_filter_id_hint;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_ntuple_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ntuple_filter_id;
+ __le32 flow_id;
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
+struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
+ u8 code;
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ZERO_MAC 0x65UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_BC_MC_MAC 0x66UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_VNIC 0x67UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_PF_FID 0x68UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_L2_CTXT_ID 0x69UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_NULL_L2_CTXT_CFG 0x6aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_NULL_L2_DATA_FLD 0x6bUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_CFA_LAYOUT 0x6cUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_CTXT_ALLOC_FAIL 0x6dUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ROCE_FLOW_ERR 0x6eUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_OWNER_FID 0x6fUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ZERO_REF_CNT 0x70UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_FLOW_TYPE 0x71UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_IVLAN 0x72UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_MAX_VLAN_ID 0x73UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_TNL_REQ 0x74UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_ADDR 0x75UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_IVLAN 0x76UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L3_ADDR 0x77UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L3_ADDR_TYPE 0x78UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_T_L3_ADDR_TYPE 0x79UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_DST_VNIC_ID 0x7aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VNI 0x7bUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_DST_ID 0x7cUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_FAIL_ROCE_L2_FLOW 0x7dUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_NPAR_VLAN 0x7eUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ATSP_ADD 0x7fUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_DFLT_VLAN_FAIL 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_L3_TYPE 0x81UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VAL_FAIL_TNL_FLOW 0x82UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VAL_FAIL_TNL_FLOW
+ u8 unused_0[7];
+};
+
+/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_ntuple_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 ntuple_filter_id;
+};
+
+/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_ntuple_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
+struct hwrm_cfa_ntuple_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_NO_L2_CONTEXT 0x4UL
+ __le64 ntuple_filter_id;
+ __le32 new_dst_id;
+ __le32 new_mirror_vnic_id;
+ __le16 new_meter_instance_id;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
+ u8 unused_1[6];
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
+struct hwrm_cfa_ntuple_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
+struct hwrm_cfa_decap_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
+ __le32 enables;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ __be32 tunnel_id;
+ u8 tunnel_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_0;
+ __le16 unused_1;
+ u8 src_macaddr[6];
+ u8 unused_2[2];
+ u8 dst_macaddr[6];
+ __be16 ovlan_vid;
+ __be16 ivlan_vid;
+ __be16 t_ovlan_vid;
+ __be16 t_ivlan_vid;
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+ u8 ip_protocol;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+ __le16 unused_3;
+ __le32 unused_4;
+ __be32 src_ipaddr[4];
+ __be32 dst_ipaddr[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __le16 dst_id;
+ __le16 l2_ctxt_ref_id;
+};
+
+/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
+struct hwrm_cfa_decap_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 decap_filter_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_decap_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 decap_filter_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_decap_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
+struct hwrm_cfa_flow_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flags;
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_VHOST_ID_USE_VLAN 0x200UL
+ __le16 src_fid;
+ __le32 tunnel_handle;
+ __le16 action_flags;
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NO_FLOW_COUNTER_ALLOC 0x2000UL
+ __le16 dst_fid;
+ __be16 l2_rewrite_vlan_tpid;
+ __be16 l2_rewrite_vlan_tci;
+ __le16 act_meter_id;
+ __le16 ref_flow_handle;
+ __be16 ethertype;
+ __be16 outer_vlan_tci;
+ __be16 dmac[3];
+ __be16 inner_vlan_tci;
+ __be16 smac[3];
+ u8 ip_dst_mask_len;
+ u8 ip_src_mask_len;
+ __be32 ip_dst[4];
+ __be32 ip_src[4];
+ __be16 l4_src_port;
+ __be16 l4_src_port_mask;
+ __be16 l4_dst_port;
+ __be16 l4_dst_port_mask;
+ __be32 nat_ip_address[4];
+ __be16 l2_rewrite_dmac[3];
+ __be16 nat_port;
+ __be16 l2_rewrite_smac[3];
+ u8 ip_proto;
+ u8 tunnel_type;
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+};
+
+/* hwrm_cfa_flow_alloc_output (size:256b/32B) */
+struct hwrm_cfa_flow_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flow_handle;
+ u8 unused_0[2];
+ __le32 flow_id;
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX
+ __le64 ext_flow_handle;
+ __le32 flow_counter_id;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */
+struct hwrm_cfa_flow_alloc_cmd_err {
+ u8 code;
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB
+ u8 unused_0[7];
+};
+
+/* hwrm_cfa_flow_free_input (size:256b/32B) */
+struct hwrm_cfa_flow_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flow_handle;
+ __le16 unused_0;
+ __le32 flow_counter_id;
+ __le64 ext_flow_handle;
+};
+
+/* hwrm_cfa_flow_free_output (size:256b/32B) */
+struct hwrm_cfa_flow_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet;
+ __le64 byte;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_info_input (size:256b/32B) */
+struct hwrm_cfa_flow_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flow_handle;
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_TX 0x3000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT_RX 0x9000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT_RX 0xa000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_RX 0xb000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX 0xc000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_LAST CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX
+ u8 unused_0[6];
+ __le64 ext_flow_handle;
+};
+
+/* hwrm_cfa_flow_info_output (size:5632b/704B) */
+struct hwrm_cfa_flow_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define CFA_FLOW_INFO_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_FLOW_INFO_RESP_FLAGS_PATH_RX 0x2UL
+ u8 profile;
+ __le16 src_fid;
+ __le16 dst_fid;
+ __le16 l2_ctxt_id;
+ __le64 em_info;
+ __le64 tcam_info;
+ __le64 vfp_tcam_info;
+ __le16 ar_id;
+ __le16 flow_handle;
+ __le32 tunnel_handle;
+ __le16 flow_timer;
+ u8 unused_0[6];
+ __le32 flow_key_data[130];
+ __le32 flow_action_info[30];
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_stats_input (size:640b/80B) */
+struct hwrm_cfa_flow_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 num_flows;
+ __le16 flow_handle_0;
+ __le16 flow_handle_1;
+ __le16 flow_handle_2;
+ __le16 flow_handle_3;
+ __le16 flow_handle_4;
+ __le16 flow_handle_5;
+ __le16 flow_handle_6;
+ __le16 flow_handle_7;
+ __le16 flow_handle_8;
+ __le16 flow_handle_9;
+ u8 unused_0[2];
+ __le32 flow_id_0;
+ __le32 flow_id_1;
+ __le32 flow_id_2;
+ __le32 flow_id_3;
+ __le32 flow_id_4;
+ __le32 flow_id_5;
+ __le32 flow_id_6;
+ __le32 flow_id_7;
+ __le32 flow_id_8;
+ __le32 flow_id_9;
+};
+
+/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
+struct hwrm_cfa_flow_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet_0;
+ __le64 packet_1;
+ __le64 packet_2;
+ __le64 packet_3;
+ __le64 packet_4;
+ __le64 packet_5;
+ __le64 packet_6;
+ __le64 packet_7;
+ __le64 packet_8;
+ __le64 packet_9;
+ __le64 byte_0;
+ __le64 byte_1;
+ __le64 byte_2;
+ __le64 byte_3;
+ __le64 byte_4;
+ __le64 byte_5;
+ __le64 byte_6;
+ __le64 byte_7;
+ __le64 byte_8;
+ __le64 byte_9;
+ __le16 flow_hits;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
+struct hwrm_cfa_vfr_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 reserved;
+ u8 unused_0[4];
+ char vfr_name[32];
+};
+
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
+struct hwrm_cfa_vfr_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rx_cfa_code;
+ __le16 tx_cfa_action;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_free_input (size:448b/56B) */
+struct hwrm_cfa_vfr_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ char vfr_name[32];
+ __le16 vf_id;
+ __le16 reserved;
+ u8 unused_0[4];
+};
+
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
+struct hwrm_cfa_vfr_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */
+struct hwrm_cfa_eem_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ __le32 unused_0;
+};
+
+/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */
+struct hwrm_cfa_eem_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x4UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x8UL
+ __le32 unused_0;
+ __le32 supported;
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL
+ __le32 max_entries_supported;
+ __le16 key_entry_size;
+ __le16 record_entry_size;
+ __le16 efc_entry_size;
+ __le16 fid_entry_size;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_cfg_input (size:384b/48B) */
+struct hwrm_cfa_eem_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ #define CFA_EEM_CFG_REQ_FLAGS_SECONDARY_PF 0x8UL
+ __le16 group_id;
+ __le16 unused_0;
+ __le32 num_entries;
+ __le32 unused_1;
+ __le16 key0_ctx_id;
+ __le16 key1_ctx_id;
+ __le16 record_ctx_id;
+ __le16 efc_ctx_id;
+ __le16 fid_ctx_id;
+ __le16 unused_2;
+ __le32 unused_3;
+};
+
+/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
+struct hwrm_cfa_eem_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */
+struct hwrm_cfa_eem_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_QCFG_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCFG_REQ_FLAGS_PATH_RX 0x2UL
+ __le32 unused_0;
+};
+
+/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */
+struct hwrm_cfa_eem_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_EEM_QCFG_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ __le32 num_entries;
+ __le16 key0_ctx_id;
+ __le16 key1_ctx_id;
+ __le16 record_ctx_id;
+ __le16 efc_ctx_id;
+ __le16 fid_ctx_id;
+ u8 unused_2[5];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_op_input (size:192b/24B) */
+struct hwrm_cfa_eem_op_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_OP_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_OP_REQ_FLAGS_PATH_RX 0x2UL
+ __le16 unused_0;
+ __le16 op;
+ #define CFA_EEM_OP_REQ_OP_RESERVED 0x0UL
+ #define CFA_EEM_OP_REQ_OP_EEM_DISABLE 0x1UL
+ #define CFA_EEM_OP_REQ_OP_EEM_ENABLE 0x2UL
+ #define CFA_EEM_OP_REQ_OP_EEM_CLEANUP 0x3UL
+ #define CFA_EEM_OP_REQ_OP_LAST CFA_EEM_OP_REQ_OP_EEM_CLEANUP
+};
+
+/* hwrm_cfa_eem_op_output (size:128b/16B) */
+struct hwrm_cfa_eem_op_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_adv_flow_mgnt_qcaps_input (size:256b/32B) */
+struct hwrm_cfa_adv_flow_mgnt_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[4];
+};
+
+/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */
+struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED 0x100000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED 0x200000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ u8 tunnel_next_proto;
+ u8 unused_0[6];
+};
+
+/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ __be16 tunnel_dst_port_val;
+ u8 upar_in_use;
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR0 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR1 0x2UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR2 0x4UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR3 0x8UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR4 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR5 0x20UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR6 0x40UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR7 0x80UL
+ u8 status;
+ #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_CHIP_LEVEL 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_FUNC_LEVEL 0x2UL
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ u8 tunnel_next_proto;
+ __be16 tunnel_dst_port_val;
+ u8 unused_0[4];
+};
+
+/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ u8 error_info;
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED 0x3UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED
+ u8 upar_in_use;
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR2 0x4UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR3 0x8UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR4 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR5 0x20UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR6 0x40UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR7 0x80UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ u8 tunnel_next_proto;
+ __le16 tunnel_dst_port_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 error_info;
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_SUCCESS 0x0UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_OWNER 0x1UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED 0x2UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED
+ u8 unused_1[6];
+ u8 valid;
+};
+
+/* ctx_hw_stats (size:1280b/160B) */
+struct ctx_hw_stats {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 tpa_pkts;
+ __le64 tpa_bytes;
+ __le64 tpa_events;
+ __le64 tpa_aborts;
+};
+
+/* ctx_hw_stats_ext (size:1408b/176B) */
+struct ctx_hw_stats_ext {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+};
+
+/* hwrm_stat_ctx_alloc_input (size:384b/48B) */
+struct hwrm_stat_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 stats_dma_addr;
+ __le32 update_period_ms;
+ u8 stat_ctx_flags;
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_DUP_HOST_BUF 0x2UL
+ u8 unused_0;
+ __le16 stats_dma_length;
+ __le16 flags;
+ #define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL
+ __le16 steering_tag;
+ __le32 stat_ctx_id;
+ __le16 alloc_seq_id;
+ u8 unused_1[6];
+};
+
+/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
+struct hwrm_stat_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_free_input (size:192b/24B) */
+struct hwrm_stat_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_stat_ctx_free_output (size:128b/16B) */
+struct hwrm_stat_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 flags;
+ #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ctx_query_output (size:1408b/176B) */
+struct hwrm_stat_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ext_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 flags;
+ #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */
+struct hwrm_stat_ext_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
+struct hwrm_stat_ctx_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
+struct hwrm_stat_ctx_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_pcie_qstats_input (size:256b/32B) */
+struct hwrm_pcie_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 pcie_stat_size;
+ u8 unused_0[6];
+ __le64 pcie_stat_host_addr;
+};
+
+/* hwrm_pcie_qstats_output (size:128b/16B) */
+struct hwrm_pcie_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pcie_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* pcie_ctx_hw_stats (size:768b/96B) */
+struct pcie_ctx_hw_stats {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+};
+
+/* pcie_ctx_hw_stats_v2 (size:4544b/568B) */
+struct pcie_ctx_hw_stats_v2 {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+ __le32 pcie_tl_credit_nph_histogram[8];
+ __le32 pcie_tl_credit_ph_histogram[8];
+ __le32 pcie_tl_credit_pd_histogram[8];
+ __le32 pcie_cmpl_latest_times[4];
+ __le32 pcie_cmpl_longest_time;
+ __le32 pcie_cmpl_shortest_time;
+ __le32 unused_0[2];
+ __le32 pcie_cmpl_latest_headers[4][4];
+ __le32 pcie_cmpl_longest_headers[4][4];
+ __le32 pcie_cmpl_shortest_headers[4][4];
+ __le32 pcie_wr_latency_histogram[12];
+ __le32 pcie_wr_latency_all_normal_count;
+ __le32 unused_1;
+ __le64 pcie_posted_packet_count;
+ __le64 pcie_non_posted_packet_count;
+ __le64 pcie_other_packet_count;
+ __le64 pcie_blocked_packet_count;
+ __le64 pcie_cmpl_packet_count;
+ __le32 pcie_rd_latency_histogram[12];
+ __le32 pcie_rd_latency_all_normal_count;
+ __le32 unused_2;
+};
+
+/* hwrm_stat_generic_qstats_input (size:256b/32B) */
+struct hwrm_stat_generic_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 generic_stat_size;
+ u8 flags;
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 generic_stat_host_addr;
+};
+
+/* hwrm_stat_generic_qstats_output (size:128b/16B) */
+struct hwrm_stat_generic_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 generic_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* generic_sw_hw_stats (size:1472b/184B) */
+struct generic_sw_hw_stats {
+ __le64 pcie_statistics_tx_tlp;
+ __le64 pcie_statistics_rx_tlp;
+ __le64 pcie_credit_fc_hdr_posted;
+ __le64 pcie_credit_fc_hdr_nonposted;
+ __le64 pcie_credit_fc_hdr_cmpl;
+ __le64 pcie_credit_fc_data_posted;
+ __le64 pcie_credit_fc_data_nonposted;
+ __le64 pcie_credit_fc_data_cmpl;
+ __le64 pcie_credit_fc_tgt_nonposted;
+ __le64 pcie_credit_fc_tgt_data_posted;
+ __le64 pcie_credit_fc_tgt_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_data_posted;
+ __le64 pcie_cmpl_longest;
+ __le64 pcie_cmpl_shortest;
+ __le64 cache_miss_count_cfcq;
+ __le64 cache_miss_count_cfcs;
+ __le64 cache_miss_count_cfcc;
+ __le64 cache_miss_count_cfcm;
+ __le64 hw_db_recov_dbs_dropped;
+ __le64 hw_db_recov_drops_serviced;
+ __le64 hw_db_recov_dbs_recovered;
+ __le64 hw_db_recov_oo_drop_count;
+};
+
+/* hwrm_fw_reset_input (size:192b/24B) */
+struct hwrm_fw_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION 0x8UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION
+ u8 selfrst_status;
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE
+ u8 host_idx;
+ u8 flags;
+ #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
+ #define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL
+ u8 unused_0[4];
+};
+
+/* hwrm_fw_reset_output (size:128b/16B) */
+struct hwrm_fw_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_qstatus_input (size:192b/24B) */
+struct hwrm_fw_qstatus_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_qstatus_output (size:128b/16B) */
+struct hwrm_fw_qstatus_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
+ u8 nvm_option_action_status;
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_set_time_input (size:256b/32B) */
+struct hwrm_fw_set_time_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 year;
+ #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+ #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 unused_0;
+ __le16 millisecond;
+ __le16 zone;
+ #define FW_SET_TIME_REQ_ZONE_UTC 0
+ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 65535
+ #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
+ u8 unused_1[4];
+};
+
+/* hwrm_fw_set_time_output (size:128b/16B) */
+struct hwrm_fw_set_time_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_struct_hdr (size:128b/16B) */
+struct hwrm_struct_hdr {
+ __le16 struct_id;
+ #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
+ #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL
+ #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
+ #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
+ #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
+ #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
+ #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_COUNT 0x12cUL
+ #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND 0x12dUL
+ #define STRUCT_HDR_STRUCT_ID_DBG_TOKEN_CLAIMS 0x190UL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_DBG_TOKEN_CLAIMS
+ __le16 len;
+ u8 version;
+ #define STRUCT_HDR_VERSION_0 0x0UL
+ #define STRUCT_HDR_VERSION_1 0x1UL
+ #define STRUCT_HDR_VERSION_LAST STRUCT_HDR_VERSION_1
+ u8 count;
+ __le16 subtype;
+ __le16 next_offset;
+ #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
+ u8 unused_0[6];
+};
+
+/* hwrm_struct_data_dcbx_app (size:64b/8B) */
+struct hwrm_struct_data_dcbx_app {
+ __be16 protocol_id;
+ u8 protocol_selector;
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT
+ u8 priority;
+ u8 valid;
+ u8 unused_0[3];
+};
+
+/* hwrm_fw_set_structured_data_input (size:256b/32B) */
+struct hwrm_fw_set_structured_data_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ u8 hdr_cnt;
+ u8 unused_0[5];
+};
+
+/* hwrm_fw_set_structured_data_output (size:128b/16B) */
+struct hwrm_fw_set_structured_data_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
+struct hwrm_fw_set_structured_data_cmd_err {
+ u8 code;
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_ALREADY_ADDED 0x4UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_INST_IN_PROG 0x5UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_INST_IN_PROG
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_get_structured_data_input (size:256b/32B) */
+struct hwrm_fw_get_structured_data_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 structure_id;
+ __le16 subtype;
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_SUPPORTED 0x320UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_ACTIVE 0x321UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_ACTIVE
+ u8 count;
+ u8 unused_0;
+};
+
+/* hwrm_fw_get_structured_data_output (size:128b/16B) */
+struct hwrm_fw_get_structured_data_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hdr_cnt;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */
+struct hwrm_fw_get_structured_data_cmd_err {
+ u8 code;
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_livepatch_query_input (size:192b/24B) */
+struct hwrm_fw_livepatch_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 fw_target;
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_LAST FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_livepatch_query_output (size:640b/80B) */
+struct hwrm_fw_livepatch_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ char install_ver[32];
+ char active_ver[32];
+ __le16 status_flags;
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 0x1UL
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 0x2UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_input (size:256b/32B) */
+struct hwrm_fw_livepatch_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 opcode;
+ #define FW_LIVEPATCH_REQ_OPCODE_ACTIVATE 0x1UL
+ #define FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE 0x2UL
+ #define FW_LIVEPATCH_REQ_OPCODE_LAST FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE
+ u8 fw_target;
+ #define FW_LIVEPATCH_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_LAST FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW
+ u8 loadtype;
+ #define FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL 0x1UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT 0x2UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_LAST FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT
+ u8 flags;
+ __le32 patch_len;
+ __le64 host_addr;
+};
+
+/* hwrm_fw_livepatch_output (size:128b/16B) */
+struct hwrm_fw_livepatch_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_cmd_err (size:64b/8B) */
+struct hwrm_fw_livepatch_cmd_err {
+ u8 code;
+ #define FW_LIVEPATCH_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE 0x1UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_TARGET 0x2UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED 0x3UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED 0x4UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED 0x5UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL 0x6UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER 0x7UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE 0x8UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED 0x9UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_LAST FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED
+ u8 unused_0[7];
+};
+
+/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
+struct hwrm_exec_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_exec_fwd_resp_output (size:128b/16B) */
+struct hwrm_exec_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
+struct hwrm_reject_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_reject_fwd_resp_output (size:128b/16B) */
+struct hwrm_reject_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_resp_input (size:1024b/128B) */
+struct hwrm_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_resp_target_id;
+ __le16 encap_resp_cmpl_ring;
+ __le16 encap_resp_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 encap_resp_addr;
+ __le32 encap_resp[24];
+};
+
+/* hwrm_fwd_resp_output (size:128b/16B) */
+struct hwrm_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
+struct hwrm_fwd_async_event_cmpl_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_async_event_target_id;
+ u8 unused_0[6];
+ __le32 encap_async_event_cmpl[4];
+};
+
+/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
+struct hwrm_fwd_async_event_cmpl_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_temp_monitor_query_input (size:128b/16B) */
+struct hwrm_temp_monitor_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_temp_monitor_query_output (size:192b/24B) */
+struct hwrm_temp_monitor_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 temp;
+ u8 phy_temp;
+ u8 om_temp;
+ u8 flags;
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE 0x20UL
+ u8 temp2;
+ u8 phy_temp2;
+ u8 om_temp2;
+ u8 warn_threshold;
+ u8 critical_threshold;
+ u8 fatal_threshold;
+ u8 shutdown_threshold;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_alloc_input (size:512b/64B) */
+struct hwrm_wol_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
+ __le16 port_id;
+ u8 wol_type;
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID
+ u8 unused_0[5];
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_buf_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[4];
+ __le64 pattern_buf_addr;
+ __le64 pattern_mask_addr;
+};
+
+/* hwrm_wol_filter_alloc_output (size:128b/16B) */
+struct hwrm_wol_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_free_input (size:256b/32B) */
+struct hwrm_wol_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
+ __le32 enables;
+ #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
+ __le16 port_id;
+ u8 wol_filter_id;
+ u8 unused_0[5];
+};
+
+/* hwrm_wol_filter_free_output (size:128b/16B) */
+struct hwrm_wol_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_qcfg_input (size:448b/56B) */
+struct hwrm_wol_filter_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 handle;
+ u8 unused_0[4];
+ __le64 pattern_buf_addr;
+ __le16 pattern_buf_size;
+ u8 unused_1[6];
+ __le64 pattern_mask_addr;
+ __le16 pattern_mask_size;
+ u8 unused_2[6];
+};
+
+/* hwrm_wol_filter_qcfg_output (size:256b/32B) */
+struct hwrm_wol_filter_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 next_handle;
+ u8 wol_filter_id;
+ u8 wol_type;
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID
+ __le32 unused_0;
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_wol_reason_qcfg_input (size:320b/40B) */
+struct hwrm_wol_reason_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+ __le64 wol_pkt_buf_addr;
+ __le16 wol_pkt_buf_size;
+ u8 unused_1[6];
+};
+
+/* hwrm_wol_reason_qcfg_output (size:128b/16B) */
+struct hwrm_wol_reason_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 wol_reason;
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID
+ u8 wol_pkt_len;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_dbg_read_direct_input (size:256b/32B) */
+struct hwrm_dbg_read_direct_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 read_addr;
+ __le32 read_len32;
+};
+
+/* hwrm_dbg_read_direct_output (size:128b/16B) */
+struct hwrm_dbg_read_direct_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 crc32;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_qcaps_input (size:192b/24B) */
+struct hwrm_dbg_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_dbg_qcaps_output (size:192b/24B) */
+struct hwrm_dbg_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_component_disable_caps;
+ #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
+ __le32 flags;
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
+ #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_DDR 0x10UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_CAPTURE 0x20UL
+ #define DBG_QCAPS_RESP_FLAGS_PTRACE 0x40UL
+ #define DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED 0x80UL
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_qcfg_input (size:192b/24B) */
+struct hwrm_dbg_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 flags;
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_MASK 0x3UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_SFT 0
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_NVM 0x0UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR 0x1UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR 0x2UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_LAST DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
+ __le32 coredump_component_disable_flags;
+ #define DBG_QCFG_REQ_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM 0x1UL
+};
+
+/* hwrm_dbg_qcfg_output (size:256b/32B) */
+struct hwrm_dbg_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_size;
+ __le32 flags;
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG 0x1UL
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG_SECONDARY 0x2UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE 0x4UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE_SECONDARY 0x8UL
+ #define DBG_QCFG_RESP_FLAGS_DEBUG_NOTIFY 0x10UL
+ #define DBG_QCFG_RESP_FLAGS_JTAG_DEBUG 0x20UL
+ __le16 async_cmpl_ring;
+ u8 unused_2[2];
+ __le32 crashdump_size;
+ u8 unused_3[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */
+struct hwrm_dbg_crashdump_medium_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 output_dest_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL
+ __le16 pg_size_lvl;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5
+ __le32 size;
+ __le32 coredump_component_disable_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL
+ __le32 unused_0;
+ __le64 pbl;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */
+struct hwrm_dbg_crashdump_medium_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* coredump_segment_record (size:128b/16B) */
+struct coredump_segment_record {
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 max_instances;
+ u8 version_hi;
+ u8 version_low;
+ u8 seg_flags;
+ u8 compress_flags;
+ #define SFLAG_COMPRESSED_ZLIB 0x1UL
+ u8 unused_0[2];
+ __le32 segment_len;
+};
+
+/* hwrm_dbg_coredump_list_input (size:256b/32B) */
+struct hwrm_dbg_coredump_list_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le16 seq_no;
+ u8 flags;
+ #define DBG_COREDUMP_LIST_REQ_FLAGS_CRASHDUMP 0x1UL
+ u8 unused_0[1];
+};
+
+/* hwrm_dbg_coredump_list_output (size:128b/16B) */
+struct hwrm_dbg_coredump_list_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 total_segments;
+ __le16 data_len;
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */
+struct hwrm_dbg_coredump_initiate_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_0;
+ u8 seg_flags;
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL
+ u8 unused_1[7];
+};
+
+/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */
+struct hwrm_dbg_coredump_initiate_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* coredump_data_hdr (size:128b/16B) */
+struct coredump_data_hdr {
+ __le32 address;
+ __le32 flags_length;
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL
+ __le32 instance;
+ __le32 next_offset;
+};
+
+/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */
+struct hwrm_dbg_coredump_retrieve_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le32 unused_0;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_1;
+ u8 seg_flags;
+ u8 unused_2;
+ __le16 unused_3;
+ __le32 unused_4;
+ __le32 seq_no;
+ __le32 unused_5;
+};
+
+/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */
+struct hwrm_dbg_coredump_retrieve_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 data_len;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_ring_info_get_input (size:192b/24B) */
+struct hwrm_dbg_ring_info_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ
+ u8 unused_0[3];
+ __le32 fw_ring_id;
+};
+
+/* hwrm_dbg_ring_info_get_output (size:192b/24B) */
+struct hwrm_dbg_ring_info_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 producer_index;
+ __le32 consumer_index;
+ __le32 cag_vector_ctrl;
+ __le16 st_tag;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_dbg_log_buffer_flush_input (size:192b/24B) */
+struct hwrm_dbg_log_buffer_flush_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE 0x0UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE 0x1UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE 0x2UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE 0x3UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE 0x4UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE 0x5UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE 0x6UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE 0x7UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE 0x8UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE 0x9UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE 0xaUL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE 0xcUL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_LAST DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE
+ u8 unused_1[2];
+ __le32 flags;
+ #define DBG_LOG_BUFFER_FLUSH_REQ_FLAGS_FLUSH_ALL_BUFFERS 0x1UL
+};
+
+/* hwrm_dbg_log_buffer_flush_output (size:128b/16B) */
+struct hwrm_dbg_log_buffer_flush_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 current_buffer_offset;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_read_input (size:320b/40B) */
+struct hwrm_nvm_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0[2];
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_read_output (size:128b/16B) */
+struct hwrm_nvm_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
+struct hwrm_nvm_get_dir_entries_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+};
+
+/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
+struct hwrm_nvm_get_dir_entries_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
+struct hwrm_nvm_get_dir_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
+struct hwrm_nvm_get_dir_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 entries;
+ __le32 entry_length;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_write_input (size:448b/56B) */
+struct hwrm_nvm_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 dir_data_length;
+ __le16 option;
+ __le16 flags;
+ #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL
+ #define NVM_WRITE_REQ_FLAGS_SKIP_CRID_CHECK 0x8UL
+ __le32 dir_item_length;
+ __le32 offset;
+ __le32 len;
+ __le32 unused_0;
+};
+
+/* hwrm_nvm_write_output (size:128b/16B) */
+struct hwrm_nvm_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_nvm_write_cmd_err (size:64b/8B) */
+struct hwrm_nvm_write_cmd_err {
+ u8 code;
+ #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_WRITE_CMD_ERR_CODE_WRITE_FAILED 0x3UL
+ #define NVM_WRITE_CMD_ERR_CODE_REQD_ERASE_FAILED 0x4UL
+ #define NVM_WRITE_CMD_ERR_CODE_VERIFY_FAILED 0x5UL
+ #define NVM_WRITE_CMD_ERR_CODE_INVALID_HEADER 0x6UL
+ #define NVM_WRITE_CMD_ERR_CODE_UPDATE_DIGEST_FAILED 0x7UL
+ #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_UPDATE_DIGEST_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_modify_input (size:320b/40B) */
+struct hwrm_nvm_modify_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_idx;
+ __le16 flags;
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_modify_output (size:128b/16B) */
+struct hwrm_nvm_modify_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
+ __le16 dir_idx;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 opt_ordinal;
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT
+ u8 unused_0[3];
+};
+
+/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le32 dir_data_length;
+ __le32 fw_ver;
+ __le16 dir_ordinal;
+ __le16 dir_idx;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
+struct hwrm_nvm_erase_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_idx;
+ u8 unused_0[6];
+};
+
+/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_erase_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dev_info_input (size:192b/24B) */
+struct hwrm_nvm_get_dev_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_dev_info_output (size:768b/96B) */
+struct hwrm_nvm_get_dev_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 manufacturer_id;
+ __le16 device_id;
+ __le32 sector_size;
+ __le32 nvram_size;
+ __le32 reserved_size;
+ __le32 available_size;
+ u8 nvm_cfg_ver_maj;
+ u8 nvm_cfg_ver_min;
+ u8 nvm_cfg_ver_upd;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID 0x1UL
+ char pkg_name[16];
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
+ __le16 srt2_fw_major;
+ __le16 srt2_fw_minor;
+ __le16 srt2_fw_build;
+ __le16 srt2_fw_patch;
+ u8 security_soc_fw_major;
+ u8 security_soc_fw_minor;
+ u8 security_soc_fw_build;
+ u8 security_soc_fw_patch;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_mod_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
+ __le16 dir_idx;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 checksum;
+};
+
+/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_mod_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_verify_update_input (size:192b/24B) */
+struct hwrm_nvm_verify_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_verify_update_output (size:128b/16B) */
+struct hwrm_nvm_verify_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_input (size:192b/24B) */
+struct hwrm_nvm_install_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 install_type;
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL
+ __le16 flags;
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_install_update_output (size:192b/24B) */
+struct hwrm_nvm_install_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 installed_items;
+ u8 result;
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_FAILURE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_MALLOC_FAILURE 0xfdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER 0xfbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER 0xf3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE 0xf2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER 0xecUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE 0xebUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM 0xeaUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH 0xe9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST 0xe8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER 0xe7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM 0xe6UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM 0xe5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH 0xe4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE 0xe1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV 0xceUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID 0xcdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR 0xccUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID 0xcbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM 0xc5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM 0xc4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM 0xc3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR 0xb9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR 0xb8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR 0xb7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND 0xb0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED 0xa7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED
+ u8 problem_item;
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE
+ u8 reset_required;
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
+struct hwrm_nvm_install_update_cmd_err {
+ u8 code;
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_DEFRAG_FAILED 0x5UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN_DIR_ERR 0x6UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN_DIR_ERR
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_variable_input (size:320b/40B) */
+struct hwrm_nvm_get_variable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
+ #define NVM_GET_VARIABLE_REQ_FLAGS_VALIDATE_OPT_VALUE 0x2UL
+ u8 unused_0;
+};
+
+/* hwrm_nvm_get_variable_output (size:128b/16B) */
+struct hwrm_nvm_get_variable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
+ u8 flags;
+ #define NVM_GET_VARIABLE_RESP_FLAGS_VALIDATE_OPT_VALUE 0x1UL
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_get_variable_cmd_err {
+ u8 code;
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_INDEX_INVALID 0x4UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_ACCESS_DENIED 0x5UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CB_FAILED 0x6UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_INVALID_DATA_LEN 0x7UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_NO_MEM 0x8UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_NO_MEM
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_set_variable_input (size:320b/40B) */
+struct hwrm_nvm_set_variable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
+ u8 unused_0;
+};
+
+/* hwrm_nvm_set_variable_output (size:128b/16B) */
+struct hwrm_nvm_set_variable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_set_variable_cmd_err {
+ u8 code;
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_ACTION_NOT_SUPPORTED 0x4UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_INDEX_INVALID 0x5UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_ACCESS_DENIED 0x6UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CB_FAILED 0x7UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_INVALID_DATA_LEN 0x8UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_NO_MEM 0x9UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_NO_MEM
+ u8 unused_0[7];
+};
+
+/* hwrm_selftest_qlist_input (size:128b/16B) */
+struct hwrm_selftest_qlist_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_selftest_qlist_output (size:2240b/280B) */
+struct hwrm_selftest_qlist_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_tests;
+ u8 available_tests;
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 offline_tests;
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0;
+ __le16 test_timeout;
+ u8 unused_1[2];
+ char test_name[8][32];
+ u8 eyescope_target_BER_support;
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED
+ u8 unused_2[6];
+ u8 valid;
+};
+
+/* hwrm_selftest_exec_input (size:192b/24B) */
+struct hwrm_selftest_exec_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0[7];
+};
+
+/* hwrm_selftest_exec_output (size:128b/16B) */
+struct hwrm_selftest_exec_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 requested_tests;
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 test_success;
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_selftest_irq_input (size:128b/16B) */
+struct hwrm_selftest_irq_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_selftest_irq_output (size:128b/16B) */
+struct hwrm_selftest_irq_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* dbc_dbc (size:64b/8B) */
+struct dbc_dbc {
+ __le32 index;
+ #define DBC_DBC_INDEX_MASK 0xffffffUL
+ #define DBC_DBC_INDEX_SFT 0
+ #define DBC_DBC_EPOCH 0x1000000UL
+ #define DBC_DBC_TOGGLE_MASK 0x6000000UL
+ #define DBC_DBC_TOGGLE_SFT 25
+ __le32 type_path_xid;
+ #define DBC_DBC_XID_MASK 0xfffffUL
+ #define DBC_DBC_XID_SFT 0
+ #define DBC_DBC_PATH_MASK 0x3000000UL
+ #define DBC_DBC_PATH_SFT 24
+ #define DBC_DBC_PATH_ROCE (0x0UL << 24)
+ #define DBC_DBC_PATH_L2 (0x1UL << 24)
+ #define DBC_DBC_PATH_ENGINE (0x2UL << 24)
+ #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE
+ #define DBC_DBC_VALID 0x4000000UL
+ #define DBC_DBC_DEBUG_TRACE 0x8000000UL
+ #define DBC_DBC_TYPE_MASK 0xf0000000UL
+ #define DBC_DBC_TYPE_SFT 28
+ #define DBC_DBC_TYPE_SQ (0x0UL << 28)
+ #define DBC_DBC_TYPE_RQ (0x1UL << 28)
+ #define DBC_DBC_TYPE_SRQ (0x2UL << 28)
+ #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28)
+ #define DBC_DBC_TYPE_CQ (0x4UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28)
+ #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28)
+ #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28)
+ #define DBC_DBC_TYPE_NQ (0xaUL << 28)
+ #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28)
+ #define DBC_DBC_TYPE_NQ_MASK (0xeUL << 28)
+ #define DBC_DBC_TYPE_NULL (0xfUL << 28)
+ #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL
+};
+
+/* db_push_start (size:64b/8B) */
+struct db_push_start {
+ u64 db;
+ #define DB_PUSH_START_DB_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_START_DB_INDEX_SFT 0
+ #define DB_PUSH_START_DB_PI_LO_MASK 0xff000000UL
+ #define DB_PUSH_START_DB_PI_LO_SFT 24
+ #define DB_PUSH_START_DB_XID_MASK 0xfffff00000000ULL
+ #define DB_PUSH_START_DB_XID_SFT 32
+ #define DB_PUSH_START_DB_PI_HI_MASK 0xf0000000000000ULL
+ #define DB_PUSH_START_DB_PI_HI_SFT 52
+ #define DB_PUSH_START_DB_TYPE_MASK 0xf000000000000000ULL
+ #define DB_PUSH_START_DB_TYPE_SFT 60
+ #define DB_PUSH_START_DB_TYPE_PUSH_START (0xcULL << 60)
+ #define DB_PUSH_START_DB_TYPE_PUSH_END (0xdULL << 60)
+ #define DB_PUSH_START_DB_TYPE_LAST DB_PUSH_START_DB_TYPE_PUSH_END
+};
+
+/* db_push_end (size:64b/8B) */
+struct db_push_end {
+ u64 db;
+ #define DB_PUSH_END_DB_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_END_DB_INDEX_SFT 0
+ #define DB_PUSH_END_DB_PI_LO_MASK 0xff000000UL
+ #define DB_PUSH_END_DB_PI_LO_SFT 24
+ #define DB_PUSH_END_DB_XID_MASK 0xfffff00000000ULL
+ #define DB_PUSH_END_DB_XID_SFT 32
+ #define DB_PUSH_END_DB_PI_HI_MASK 0xf0000000000000ULL
+ #define DB_PUSH_END_DB_PI_HI_SFT 52
+ #define DB_PUSH_END_DB_PATH_MASK 0x300000000000000ULL
+ #define DB_PUSH_END_DB_PATH_SFT 56
+ #define DB_PUSH_END_DB_PATH_ROCE (0x0ULL << 56)
+ #define DB_PUSH_END_DB_PATH_L2 (0x1ULL << 56)
+ #define DB_PUSH_END_DB_PATH_ENGINE (0x2ULL << 56)
+ #define DB_PUSH_END_DB_PATH_LAST DB_PUSH_END_DB_PATH_ENGINE
+ #define DB_PUSH_END_DB_DEBUG_TRACE 0x800000000000000ULL
+ #define DB_PUSH_END_DB_TYPE_MASK 0xf000000000000000ULL
+ #define DB_PUSH_END_DB_TYPE_SFT 60
+ #define DB_PUSH_END_DB_TYPE_PUSH_START (0xcULL << 60)
+ #define DB_PUSH_END_DB_TYPE_PUSH_END (0xdULL << 60)
+ #define DB_PUSH_END_DB_TYPE_LAST DB_PUSH_END_DB_TYPE_PUSH_END
+};
+
+/* db_push_info (size:64b/8B) */
+struct db_push_info {
+ u32 push_size_push_index;
+ #define DB_PUSH_INFO_PUSH_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_INFO_PUSH_INDEX_SFT 0
+ #define DB_PUSH_INFO_PUSH_SIZE_MASK 0x1f000000UL
+ #define DB_PUSH_INFO_PUSH_SIZE_SFT 24
+ u32 reserved32;
+};
+
+/* fw_status_reg (size:32b/4B) */
+struct fw_status_reg {
+ u32 fw_status;
+ #define FW_STATUS_REG_CODE_MASK 0xffffUL
+ #define FW_STATUS_REG_CODE_SFT 0
+ #define FW_STATUS_REG_CODE_READY 0x8000UL
+ #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY
+ #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL
+ #define FW_STATUS_REG_RECOVERABLE 0x20000UL
+ #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
+ #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
+ #define FW_STATUS_REG_SHUTDOWN 0x100000UL
+ #define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL
+ #define FW_STATUS_REG_RECOVERING 0x400000UL
+ #define FW_STATUS_REG_MANU_DEBUG_STATUS 0x800000UL
+};
+
+/* hcomm_status (size:64b/8B) */
+struct hcomm_status {
+ u32 sig_ver;
+ #define HCOMM_STATUS_VER_MASK 0xffUL
+ #define HCOMM_STATUS_VER_SFT 0
+ #define HCOMM_STATUS_VER_LATEST 0x1UL
+ #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST
+ #define HCOMM_STATUS_SIGNATURE_MASK 0xffffff00UL
+ #define HCOMM_STATUS_SIGNATURE_SFT 8
+ #define HCOMM_STATUS_SIGNATURE_VAL (0x484353UL << 8)
+ #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL
+ u32 fw_status_loc;
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK 0x3UL
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC 0x1UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 0x2UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 0x3UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1
+ #define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
+ #define HCOMM_STATUS_TRUE_OFFSET_SFT 2
+};
+#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
+
+#endif /* _BNXT_HSI_H_ */
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index 3f4b4ac527ca..25df9260d206 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -290,7 +290,7 @@ int __init xbc_get_info(int *node_size, size_t *data_size);
/* XBC cleanup data structures */
void __init _xbc_exit(bool early);
-static inline void xbc_exit(void)
+static __always_inline void xbc_exit(void)
{
_xbc_exit(false);
}
diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
index cffa38a73618..4c506e76a808 100644
--- a/include/linux/bootmem_info.h
+++ b/include/linux/bootmem_info.h
@@ -6,11 +6,10 @@
#include <linux/kmemleak.h>
/*
- * Types for free bootmem stored in page->lru.next. These have to be in
- * some random range in unsigned long space for debugging purposes.
+ * Types for free bootmem stored in the low bits of page->private.
*/
-enum {
- MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
+enum bootmem_type {
+ MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 1,
SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
MIX_SECTION_INFO,
NODE_INFO,
@@ -19,11 +18,23 @@ enum {
#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
+void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
+ unsigned long nr_pages);
void get_page_bootmem(unsigned long info, struct page *page,
- unsigned long type);
+ enum bootmem_type type);
void put_page_bootmem(struct page *page);
+static inline enum bootmem_type bootmem_type(const struct page *page)
+{
+ return (unsigned long)page->private & 0xf;
+}
+
+static inline unsigned long bootmem_info(const struct page *page)
+{
+ return (unsigned long)page->private >> 4;
+}
+
/*
* Any memory allocated via the memblock allocator and not via the
* buddy will be marked reserved already in the memmap. For those
@@ -31,7 +42,7 @@ void put_page_bootmem(struct page *page);
*/
static inline void free_bootmem_page(struct page *page)
{
- unsigned long magic = page->index;
+ enum bootmem_type type = bootmem_type(page);
/*
* The reserve_bootmem_region sets the reserved flag on bootmem
@@ -39,7 +50,7 @@ static inline void free_bootmem_page(struct page *page)
*/
VM_BUG_ON_PAGE(page_ref_count(page) != 2, page);
- if (magic == SECTION_INFO || magic == MIX_SECTION_INFO)
+ if (type == SECTION_INFO || type == MIX_SECTION_INFO)
put_page_bootmem(page);
else
VM_BUG_ON_PAGE(1, page);
@@ -49,12 +60,27 @@ static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
{
}
+static inline void register_page_bootmem_memmap(unsigned long section_nr,
+ struct page *map, unsigned long nr_pages)
+{
+}
+
static inline void put_page_bootmem(struct page *page)
{
}
+static inline enum bootmem_type bootmem_type(const struct page *page)
+{
+ return SECTION_INFO;
+}
+
+static inline unsigned long bootmem_info(const struct page *page)
+{
+ return 0;
+}
+
static inline void get_page_bootmem(unsigned long info, struct page *page,
- unsigned long type)
+ enum bootmem_type type)
{
}
diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h
index 0985221d5478..c9e6b26abab6 100644
--- a/include/linux/bpf-cgroup-defs.h
+++ b/include/linux/bpf-cgroup-defs.h
@@ -63,6 +63,7 @@ struct cgroup_bpf {
*/
struct hlist_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
u8 flags[MAX_CGROUP_BPF_ATTACH_TYPE];
+ u64 revisions[MAX_CGROUP_BPF_ATTACH_TYPE];
/* list of cgroup shared storages */
struct list_head storages;
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index fb3c3e7181e6..d1eb5c7729cb 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -77,9 +77,6 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
-#define for_each_cgroup_storage_type(stype) \
- for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
-
struct bpf_cgroup_storage_map;
struct bpf_storage_buffer {
@@ -103,7 +100,6 @@ struct bpf_cgroup_storage {
struct bpf_cgroup_link {
struct bpf_link link;
struct cgroup *cgroup;
- enum bpf_attach_type type;
};
struct bpf_prog_list {
@@ -111,10 +107,10 @@ struct bpf_prog_list {
struct bpf_prog *prog;
struct bpf_cgroup_link *link;
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
+ u32 flags;
};
-int cgroup_bpf_inherit(struct cgroup *cgrp);
-void cgroup_bpf_offline(struct cgroup *cgrp);
+void __init cgroup_bpf_lifetime_notifier_init(void);
int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
@@ -124,7 +120,7 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
- struct sockaddr *uaddr,
+ struct sockaddr_unsized *uaddr,
int *uaddrlen,
enum cgroup_bpf_attach_type atype,
void *t_ctx,
@@ -138,7 +134,7 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
short access, enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
- struct ctl_table *table, int write,
+ const struct ctl_table *table, int write,
char **buf, size_t *pcount, loff_t *ppos,
enum cgroup_bpf_attach_type atype);
@@ -209,7 +205,7 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
int __ret = 0; \
if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) { \
typeof(sk) __sk = sk_to_full_sk(sk); \
- if (sk_fullsock(__sk) && __sk == skb_to_full_sk(skb) && \
+ if (__sk && __sk == skb_to_full_sk(skb) && \
cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
CGROUP_INET_EGRESS); \
@@ -242,8 +238,9 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(atype)) \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
- atype, NULL, NULL); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, \
+ (struct sockaddr_unsized *)uaddr, uaddrlen, \
+ atype, NULL, NULL); \
__ret; \
})
@@ -252,8 +249,9 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
int __ret = 0; \
if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
- atype, t_ctx, NULL); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, \
+ (struct sockaddr_unsized *)uaddr, uaddrlen, \
+ atype, t_ctx, NULL); \
release_sock(sk); \
} \
__ret; \
@@ -270,8 +268,9 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
int __ret = 0; \
if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
- atype, NULL, &__flags); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, \
+ (struct sockaddr_unsized *)uaddr, uaddrlen, \
+ atype, NULL, &__flags); \
release_sock(sk); \
if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
*bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
@@ -390,14 +389,6 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
__ret; \
})
-#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
-({ \
- int __ret = 0; \
- if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
- copy_from_sockptr(&__ret, optlen, sizeof(int)); \
- __ret; \
-})
-
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
max_optlen, retval) \
({ \
@@ -434,12 +425,12 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr,
const struct bpf_func_proto *
cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
-const struct bpf_func_proto *
-cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
#else
-static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
-static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
+static inline void cgroup_bpf_lifetime_notifier_init(void)
+{
+ return;
+}
static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
enum bpf_prog_type ptype,
@@ -472,12 +463,6 @@ cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return NULL;
}
-static inline const struct bpf_func_proto *
-cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
-{
- return NULL;
-}
-
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
struct bpf_map *map) { return 0; }
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
@@ -518,7 +503,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
-#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
optlen, max_optlen, retval) ({ retval; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
@@ -526,8 +510,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
kernel_optval) ({ 0; })
-#define for_each_cgroup_storage_type(stype) for (; false; )
-
#endif /* CONFIG_CGROUP_BPF */
#endif /* _BPF_CGROUP_H */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5e694a308081..6498be4c44f8 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -7,6 +7,7 @@
#include <uapi/linux/bpf.h>
#include <uapi/linux/filter.h>
+#include <crypto/sha2.h>
#include <linux/workqueue.h>
#include <linux/file.h>
#include <linux/percpu.h>
@@ -30,6 +31,7 @@
#include <linux/static_call.h>
#include <linux/memcontrol.h>
#include <linux/cfi.h>
+#include <asm/rqspinlock.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -108,6 +110,7 @@ struct bpf_map_ops {
long (*map_pop_elem)(struct bpf_map *map, void *value);
long (*map_peek_elem)(struct bpf_map *map, void *value);
void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
+ int (*map_get_hash)(struct bpf_map *map, u32 hash_buf_size, void *hash_buf);
/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
@@ -203,8 +206,25 @@ enum btf_field_type {
BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
BPF_REFCOUNT = (1 << 9),
BPF_WORKQUEUE = (1 << 10),
+ BPF_UPTR = (1 << 11),
+ BPF_RES_SPIN_LOCK = (1 << 12),
+ BPF_TASK_WORK = (1 << 13),
};
+enum bpf_cgroup_storage_type {
+ BPF_CGROUP_STORAGE_SHARED,
+ BPF_CGROUP_STORAGE_PERCPU,
+ __BPF_CGROUP_STORAGE_MAX
+#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+};
+
+#ifdef CONFIG_CGROUP_BPF
+# define for_each_cgroup_storage_type(stype) \
+ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
+#else
+# define for_each_cgroup_storage_type(stype) for (; false; )
+#endif /* CONFIG_CGROUP_BPF */
+
typedef void (*btf_dtor_kfunc_t)(void *);
struct btf_field_kptr {
@@ -238,9 +258,11 @@ struct btf_record {
u32 cnt;
u32 field_mask;
int spin_lock_off;
+ int res_spin_lock_off;
int timer_off;
int wq_off;
int refcount_off;
+ int task_work_off;
struct btf_field fields[];
};
@@ -256,7 +278,22 @@ struct bpf_list_node_kern {
void *owner;
} __attribute__((aligned(8)));
+/* 'Ownership' of program-containing map is claimed by the first program
+ * that is going to use this map or by the first program which FD is
+ * stored in the map to make sure that all callers and callees have the
+ * same prog type, JITed flag and xdp_has_frags flag.
+ */
+struct bpf_map_owner {
+ enum bpf_prog_type type;
+ bool jited;
+ bool xdp_has_frags;
+ u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
+ const struct btf_type *attach_func_proto;
+ enum bpf_attach_type expected_attach_type;
+};
+
struct bpf_map {
+ u8 sha[SHA256_DIGEST_SIZE];
const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
#ifdef CONFIG_SECURITY
@@ -275,7 +312,7 @@ struct bpf_map {
u32 btf_value_type_id;
u32 btf_vmlinux_value_type_id;
struct btf *btf;
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
struct obj_cgroup *objcg;
#endif
char name[BPF_OBJ_NAME_LEN];
@@ -288,23 +325,16 @@ struct bpf_map {
struct rcu_head rcu;
};
atomic64_t writecnt;
- /* 'Ownership' of program-containing map is claimed by the first program
- * that is going to use this map or by the first program which FD is
- * stored in the map to make sure that all callers and callees have the
- * same prog type, JITed flag and xdp_has_frags flag.
- */
- struct {
- spinlock_t lock;
- enum bpf_prog_type type;
- bool jited;
- bool xdp_has_frags;
- } owner;
+ spinlock_t owner_lock;
+ struct bpf_map_owner *owner;
bool bypass_spec_v1;
bool frozen; /* write-once; write-protected by freeze_mutex */
bool free_after_mult_rcu_gp;
bool free_after_rcu_gp;
atomic64_t sleepable_refcnt;
s64 __percpu *elem_count;
+ u64 cookie; /* write-once */
+ char *excl_prog_sha;
};
static inline const char *btf_field_type_name(enum btf_field_type type)
@@ -312,6 +342,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
switch (type) {
case BPF_SPIN_LOCK:
return "bpf_spin_lock";
+ case BPF_RES_SPIN_LOCK:
+ return "bpf_res_spin_lock";
case BPF_TIMER:
return "bpf_timer";
case BPF_WORKQUEUE:
@@ -321,6 +353,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
return "kptr";
case BPF_KPTR_PERCPU:
return "percpu_kptr";
+ case BPF_UPTR:
+ return "uptr";
case BPF_LIST_HEAD:
return "bpf_list_head";
case BPF_LIST_NODE:
@@ -331,17 +365,27 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
return "bpf_rb_node";
case BPF_REFCOUNT:
return "bpf_refcount";
+ case BPF_TASK_WORK:
+ return "bpf_task_work";
default:
WARN_ON_ONCE(1);
return "unknown";
}
}
+#if IS_ENABLED(CONFIG_DEBUG_KERNEL)
+#define BPF_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
+#else
+#define BPF_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
+#endif
+
static inline u32 btf_field_type_size(enum btf_field_type type)
{
switch (type) {
case BPF_SPIN_LOCK:
return sizeof(struct bpf_spin_lock);
+ case BPF_RES_SPIN_LOCK:
+ return sizeof(struct bpf_res_spin_lock);
case BPF_TIMER:
return sizeof(struct bpf_timer);
case BPF_WORKQUEUE:
@@ -349,6 +393,7 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
+ case BPF_UPTR:
return sizeof(u64);
case BPF_LIST_HEAD:
return sizeof(struct bpf_list_head);
@@ -360,6 +405,8 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
return sizeof(struct bpf_rb_node);
case BPF_REFCOUNT:
return sizeof(struct bpf_refcount);
+ case BPF_TASK_WORK:
+ return sizeof(struct bpf_task_work);
default:
WARN_ON_ONCE(1);
return 0;
@@ -371,6 +418,8 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
switch (type) {
case BPF_SPIN_LOCK:
return __alignof__(struct bpf_spin_lock);
+ case BPF_RES_SPIN_LOCK:
+ return __alignof__(struct bpf_res_spin_lock);
case BPF_TIMER:
return __alignof__(struct bpf_timer);
case BPF_WORKQUEUE:
@@ -378,6 +427,7 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
+ case BPF_UPTR:
return __alignof__(u64);
case BPF_LIST_HEAD:
return __alignof__(struct bpf_list_head);
@@ -389,6 +439,8 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
return __alignof__(struct bpf_rb_node);
case BPF_REFCOUNT:
return __alignof__(struct bpf_refcount);
+ case BPF_TASK_WORK:
+ return __alignof__(struct bpf_task_work);
default:
WARN_ON_ONCE(1);
return 0;
@@ -413,11 +465,14 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
case BPF_RB_ROOT:
/* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
case BPF_SPIN_LOCK:
+ case BPF_RES_SPIN_LOCK:
case BPF_TIMER:
case BPF_WORKQUEUE:
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
case BPF_KPTR_PERCPU:
+ case BPF_UPTR:
+ case BPF_TASK_WORK:
break;
default:
WARN_ON_ONCE(1);
@@ -506,6 +561,25 @@ static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src
bpf_obj_memcpy(map->record, dst, src, map->value_size, true);
}
+static inline void bpf_obj_swap_uptrs(const struct btf_record *rec, void *dst, void *src)
+{
+ unsigned long *src_uptr, *dst_uptr;
+ const struct btf_field *field;
+ int i;
+
+ if (!btf_record_has_field(rec, BPF_UPTR))
+ return;
+
+ for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) {
+ if (field->type != BPF_UPTR)
+ continue;
+
+ src_uptr = src + field->offset;
+ dst_uptr = dst + field->offset;
+ swap(*src_uptr, *dst_uptr);
+ }
+}
+
static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size)
{
u32 curr_off = 0;
@@ -535,6 +609,7 @@ void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
bool lock_src);
void bpf_timer_cancel_and_free(void *timer);
void bpf_wq_cancel_and_free(void *timer);
+void bpf_task_work_cancel_and_free(void *timer);
void bpf_list_head_free(const struct btf_field *field, void *list_head,
struct bpf_spin_lock *spin_lock);
void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
@@ -588,6 +663,16 @@ int map_check_no_btf(const struct bpf_map *map,
bool bpf_map_meta_equal(const struct bpf_map *meta0,
const struct bpf_map *meta1);
+static inline bool bpf_map_has_internal_structs(struct bpf_map *map)
+{
+ return btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE | BPF_TASK_WORK);
+}
+
+void bpf_map_free_internal_structs(struct bpf_map *map, void *obj);
+
+int bpf_dynptr_from_file_sleepable(struct file *file, u32 flags,
+ struct bpf_dynptr *ptr__uninit);
+
extern const struct bpf_map_ops bpf_map_offload_ops;
/* bpf_type_flag contains a set of flags that are applicable to the values of
@@ -634,6 +719,7 @@ enum bpf_type_flag {
*/
PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
+ /* MEM can be uninitialized. */
MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS),
/* DYNPTR points to memory local to the bpf program. */
@@ -694,12 +780,30 @@ enum bpf_type_flag {
/* DYNPTR points to xdp_buff */
DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS),
+ /* Memory must be aligned on some architectures, used in combination with
+ * MEM_FIXED_SIZE.
+ */
+ MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is being written to, often combined with MEM_UNINIT. Non-presence
+ * of MEM_WRITE means that MEM is only being read. MEM_WRITE without the
+ * MEM_UNINIT means that memory needs to be initialized since it is also
+ * read.
+ */
+ MEM_WRITE = BIT(18 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to skb_metadata_end()-skb_metadata_len() */
+ DYNPTR_TYPE_SKB_META = BIT(19 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to file */
+ DYNPTR_TYPE_FILE = BIT(20 + BPF_BASE_TYPE_BITS),
+
__BPF_TYPE_FLAG_MAX,
__BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
};
#define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \
- | DYNPTR_TYPE_XDP)
+ | DYNPTR_TYPE_XDP | DYNPTR_TYPE_SKB_META | DYNPTR_TYPE_FILE)
/* Max number of base types. */
#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
@@ -731,8 +835,6 @@ enum bpf_arg_type {
ARG_ANYTHING, /* any (initialized) argument is ok */
ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
- ARG_PTR_TO_INT, /* pointer to int */
- ARG_PTR_TO_LONG, /* pointer to long */
ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */
@@ -743,7 +845,7 @@ enum bpf_arg_type {
ARG_PTR_TO_STACK, /* pointer to stack */
ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
- ARG_PTR_TO_KPTR, /* pointer to referenced kptr */
+ ARG_KPTR_XCHG_DEST, /* pointer to destination that kptrs are bpf_kptr_xchg'd into */
ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
__BPF_ARG_TYPE_MAX,
@@ -754,10 +856,10 @@ enum bpf_arg_type {
ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
- /* pointer to memory does not need to be initialized, helper function must fill
- * all bytes or clear them in error case.
+ /* Pointer to memory does not need to be initialized, since helper function
+ * fills all bytes or clears them in error case.
*/
- ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM,
+ ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | MEM_WRITE | ARG_PTR_TO_MEM,
/* Pointer to valid memory of size known at compile time. */
ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
@@ -807,6 +909,12 @@ struct bpf_func_proto {
bool gpl_only;
bool pkt_access;
bool might_sleep;
+ /* set to true if helper follows contract for llvm
+ * attribute bpf_fastcall:
+ * - void functions do not scratch r0
+ * - functions taking N arguments scratch only registers r1-rN
+ */
+ bool allow_fastcall;
enum bpf_return_type ret_type;
union {
struct {
@@ -889,14 +997,11 @@ enum bpf_reg_type {
* additional context, assume the value is non-null.
*/
PTR_TO_BTF_ID,
- /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
- * been checked for null. Used primarily to inform the verifier
- * an explicit null check is required for this struct.
- */
PTR_TO_MEM, /* reg points to valid memory region */
PTR_TO_ARENA,
PTR_TO_BUF, /* reg points to a read/write buffer */
PTR_TO_FUNC, /* reg points to a bpf program function */
+ PTR_TO_INSN, /* reg points to a bpf program instruction */
CONST_PTR_TO_DYNPTR, /* reg points to a const struct bpf_dynptr */
__BPF_REG_TYPE_MAX,
@@ -905,6 +1010,10 @@ enum bpf_reg_type {
PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
+ /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
+ * been checked for null. Used primarily to inform the verifier
+ * an explicit null check is required for this struct.
+ */
PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
/* This must be the last entry. Its purpose is to ensure the enum is
@@ -919,14 +1028,17 @@ static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
*/
struct bpf_insn_access_aux {
enum bpf_reg_type reg_type;
+ bool is_ldsx;
union {
int ctx_field_size;
struct {
struct btf *btf;
u32 btf_id;
+ u32 ref_obj_id;
};
};
struct bpf_verifier_log *log; /* for verbose logs */
+ bool is_retval; /* is accessing function return value ? */
};
static inline void
@@ -945,6 +1057,21 @@ static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
}
+/* Given a BPF_ATOMIC instruction @atomic_insn, return true if it is an
+ * atomic load or store, and false if it is a read-modify-write instruction.
+ */
+static inline bool
+bpf_atomic_is_load_store(const struct bpf_insn *atomic_insn)
+{
+ switch (atomic_insn->imm) {
+ case BPF_LOAD_ACQ:
+ case BPF_STORE_REL:
+ return true;
+ default:
+ return false;
+ }
+}
+
struct bpf_prog_ops {
int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
@@ -965,6 +1092,8 @@ struct bpf_verifier_ops {
struct bpf_insn_access_aux *info);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog);
+ int (*gen_epilogue)(struct bpf_insn *insn, const struct bpf_prog *prog,
+ s16 ctx_stack_off);
int (*gen_ld_abs)(const struct bpf_insn *orig,
struct bpf_insn *insn_buf);
u32 (*convert_ctx_access)(enum bpf_access_type type,
@@ -1003,14 +1132,6 @@ struct bpf_prog_offload {
u32 jited_len;
};
-enum bpf_cgroup_storage_type {
- BPF_CGROUP_STORAGE_SHARED,
- BPF_CGROUP_STORAGE_PERCPU,
- __BPF_CGROUP_STORAGE_MAX
-};
-
-#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
-
/* The longest tracepoint has 12 args.
* See include/trace/bpf_probe.h
*/
@@ -1021,7 +1142,7 @@ enum bpf_cgroup_storage_type {
*/
#define MAX_BPF_FUNC_REG_ARGS 5
-/* The argument is a structure. */
+/* The argument is a structure or a union. */
#define BTF_FMODEL_STRUCT_ARG BIT(0)
/* The argument is signed. */
@@ -1143,6 +1264,18 @@ typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_JMP
+static inline bool bpf_trampoline_use_jmp(u64 flags)
+{
+ return flags & BPF_TRAMP_F_CALL_ORIG && !(flags & BPF_TRAMP_F_SKIP_FRAME);
+}
+#else
+static inline bool bpf_trampoline_use_jmp(u64 flags)
+{
+ return false;
+}
+#endif
+
struct bpf_ksym {
unsigned long start;
unsigned long end;
@@ -1269,17 +1402,39 @@ enum bpf_dynptr_type {
BPF_DYNPTR_TYPE_SKB,
/* Underlying data is a xdp_buff */
BPF_DYNPTR_TYPE_XDP,
+ /* Points to skb_metadata_end()-skb_metadata_len() */
+ BPF_DYNPTR_TYPE_SKB_META,
+ /* Underlying data is a file */
+ BPF_DYNPTR_TYPE_FILE,
};
-int bpf_dynptr_check_size(u32 size);
-u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
-const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
-void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
+int bpf_dynptr_check_size(u64 size);
+u64 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
+const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u64 len);
+void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u64 len);
bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
+int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u64 offset,
+ void *src, u64 len, u64 flags);
+void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u64 offset,
+ void *buffer__opt, u64 buffer__szk);
+
+static inline int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u64 offset, u64 len)
+{
+ u64 size = __bpf_dynptr_size(ptr);
+
+ if (len > size || offset > size - len)
+ return -E2BIG;
+
+ return 0;
+}
#ifdef CONFIG_BPF_JIT
-int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
-int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
+int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr,
+ struct bpf_prog *tgt_prog);
+int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr,
+ struct bpf_prog *tgt_prog);
struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
@@ -1351,7 +1506,8 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
struct bpf_prog *to);
/* Called only from JIT-enabled code, so there's no need for stubs. */
-void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym);
+void bpf_image_ksym_init(void *data, unsigned int size, struct bpf_ksym *ksym);
+void bpf_image_ksym_add(struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym);
void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);
@@ -1360,12 +1516,14 @@ void bpf_jit_uncharge_modmem(u32 size);
bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
#else
static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
- struct bpf_trampoline *tr)
+ struct bpf_trampoline *tr,
+ struct bpf_prog *tgt_prog)
{
return -ENOTSUPP;
}
static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
- struct bpf_trampoline *tr)
+ struct bpf_trampoline *tr,
+ struct bpf_prog *tgt_prog)
{
return -ENOTSUPP;
}
@@ -1427,6 +1585,8 @@ struct bpf_ctx_arg_aux {
enum bpf_reg_type reg_type;
struct btf *btf;
u32 btf_id;
+ u32 ref_obj_id;
+ bool refcounted;
};
struct btf_mod_pair {
@@ -1436,6 +1596,37 @@ struct btf_mod_pair {
struct bpf_kfunc_desc_tab;
+enum bpf_stream_id {
+ BPF_STDOUT = 1,
+ BPF_STDERR = 2,
+};
+
+struct bpf_stream_elem {
+ struct llist_node node;
+ int total_len;
+ int consumed_len;
+ char str[];
+};
+
+enum {
+ /* 100k bytes */
+ BPF_STREAM_MAX_CAPACITY = 100000ULL,
+};
+
+struct bpf_stream {
+ atomic_t capacity;
+ struct llist_head log; /* list of in-flight stream elements in LIFO order */
+
+ struct mutex lock; /* lock protecting backlog_{head,tail} */
+ struct llist_node *backlog_head; /* list of in-flight stream elements in FIFO order */
+ struct llist_node *backlog_tail; /* tail of the list above */
+};
+
+struct bpf_stream_stage {
+ struct llist_head log;
+ int len;
+};
+
struct bpf_prog_aux {
atomic64_t refcnt;
u32 used_map_cnt;
@@ -1449,11 +1640,14 @@ struct bpf_prog_aux {
u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */
u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
u32 attach_btf_id; /* in-kernel BTF type id to attach to */
+ u32 attach_st_ops_member_off;
u32 ctx_arg_info_size;
u32 max_rdonly_access;
u32 max_rdwr_access;
+ u32 subprog_start;
struct btf *attach_btf;
- const struct bpf_ctx_arg_aux *ctx_arg_info;
+ struct bpf_ctx_arg_aux *ctx_arg_info;
+ void __percpu *priv_stack_ptr;
struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
struct bpf_prog *dst_prog;
struct bpf_trampoline *dst_trampoline;
@@ -1469,12 +1663,22 @@ struct bpf_prog_aux {
bool xdp_has_frags;
bool exception_cb;
bool exception_boundary;
+ bool is_extended; /* true if extended by freplace program */
+ bool jits_use_priv_stack;
+ bool priv_stack_requested;
+ bool changes_pkt_data;
+ bool might_sleep;
+ bool kprobe_write_ctx;
+ u64 prog_array_member_cnt; /* counts how many times as member of prog_array */
+ struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */
struct bpf_arena *arena;
+ void (*recursion_detected)(struct bpf_prog *prog); /* callback if recursion is detected */
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
const struct btf_type *attach_func_proto;
/* function name for valid attach_btf_id */
const char *attach_func_name;
struct bpf_prog **func;
+ struct bpf_prog_aux *main_prog_aux;
void *jit_data; /* JIT specific data. arch dependent */
struct bpf_jit_poke_descriptor *poke_tab;
struct bpf_kfunc_desc_tab *kfunc_tab;
@@ -1485,6 +1689,7 @@ struct bpf_prog_aux {
#endif
struct bpf_ksym ksym;
const struct bpf_prog_ops *ops;
+ const struct bpf_struct_ops *st_ops;
struct bpf_map **used_maps;
struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
struct btf_mod_pair *used_btfs;
@@ -1533,6 +1738,7 @@ struct bpf_prog_aux {
struct work_struct work;
struct rcu_head rcu;
};
+ struct bpf_stream stream[2];
};
struct bpf_prog {
@@ -1556,7 +1762,10 @@ struct bpf_prog {
enum bpf_attach_type expected_attach_type; /* For some prog types */
u32 len; /* Number of filter blocks */
u32 jited_len; /* Size of jited insns in bytes */
- u8 tag[BPF_TAG_SIZE];
+ union {
+ u8 digest[SHA256_DIGEST_SIZE];
+ u8 tag[BPF_TAG_SIZE];
+ };
struct bpf_prog_stats __percpu *stats;
int __percpu *active;
unsigned int (*bpf_func)(const void *ctx,
@@ -1584,6 +1793,10 @@ struct bpf_link {
enum bpf_link_type type;
const struct bpf_link_ops *ops;
struct bpf_prog *prog;
+
+ u32 flags;
+ enum bpf_attach_type attach_type;
+
/* rcu is used before freeing, work can be used to schedule that
* RCU-based freeing before that, so they never overlap
*/
@@ -1591,6 +1804,11 @@ struct bpf_link {
struct rcu_head rcu;
struct work_struct work;
};
+ /* whether BPF link itself has "sleepable" semantics, which can differ
+ * from underlying BPF program having a "sleepable" semantics, as BPF
+ * link's semantics is determined by target attach hook
+ */
+ bool sleepable;
};
struct bpf_link_ops {
@@ -1600,8 +1818,10 @@ struct bpf_link_ops {
*/
void (*dealloc)(struct bpf_link *link);
/* deallocate link resources callback, called after RCU grace period;
- * if underlying BPF program is sleepable we go through tasks trace
- * RCU GP and then "classic" RCU GP
+ * if either the underlying BPF program is sleepable or BPF link's
+ * target hook is sleepable, we'll go through tasks trace RCU GP and
+ * then "classic" RCU GP; this need for chaining tasks trace and
+ * classic RCU GPs is designated by setting bpf_link->sleepable flag
*/
void (*dealloc_deferred)(struct bpf_link *link);
int (*detach)(struct bpf_link *link);
@@ -1612,6 +1832,7 @@ struct bpf_link_ops {
struct bpf_link_info *info);
int (*update_map)(struct bpf_link *link, struct bpf_map *new_map,
struct bpf_map *old_map);
+ __poll_t (*poll)(struct file *file, struct poll_table_struct *pts);
};
struct bpf_tramp_link {
@@ -1627,7 +1848,6 @@ struct bpf_shim_tramp_link {
struct bpf_tracing_link {
struct bpf_tramp_link link;
- enum bpf_attach_type attach_type;
struct bpf_trampoline *trampoline;
struct bpf_prog *tgt_prog;
};
@@ -1714,12 +1934,14 @@ struct btf_member;
* reason, if this callback is not defined, the check is skipped as
* the struct_ops map will have final verification performed in
* @reg.
- * @type: BTF type.
- * @value_type: Value type.
+ * @cfi_stubs: Pointer to a structure of stub functions for CFI. These stubs
+ * provide the correct Control Flow Integrity hashes for the
+ * trampolines generated by BPF struct_ops.
+ * @owner: The module that owns this struct_ops. Used for module reference
+ * counting to ensure the module providing the struct_ops cannot be
+ * unloaded while in use.
* @name: The name of the struct bpf_struct_ops object.
* @func_models: Func models
- * @type_id: BTF type id.
- * @value_id: BTF value id.
*/
struct bpf_struct_ops {
const struct bpf_verifier_ops *verifier_ops;
@@ -1730,9 +1952,9 @@ struct bpf_struct_ops {
int (*init_member)(const struct btf_type *t,
const struct btf_member *member,
void *kdata, const void *udata);
- int (*reg)(void *kdata);
- void (*unreg)(void *kdata);
- int (*update)(void *kdata, void *old_kdata);
+ int (*reg)(void *kdata, struct bpf_link *link);
+ void (*unreg)(void *kdata, struct bpf_link *link);
+ int (*update)(void *kdata, void *old_kdata, struct bpf_link *link);
int (*validate)(void *kdata);
void *cfi_stubs;
struct module *owner;
@@ -1794,6 +2016,7 @@ struct bpf_struct_ops_common_value {
#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
+int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
void *value);
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
@@ -1818,6 +2041,7 @@ static inline void bpf_module_put(const void *data, struct module *owner)
module_put(owner);
}
int bpf_struct_ops_link_create(union bpf_attr *attr);
+u32 bpf_struct_ops_id(const void *kdata);
#ifdef CONFIG_NET
/* Define it here to avoid the use of forward declaration */
@@ -1850,6 +2074,10 @@ static inline void bpf_module_put(const void *data, struct module *owner)
{
module_put(owner);
}
+static inline int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
+{
+ return -ENOTSUPP;
+}
static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
void *key,
void *value)
@@ -1870,13 +2098,18 @@ static inline void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_op
#endif
+int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog,
+ const struct bpf_ctx_arg_aux *info, u32 cnt);
+
#if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
- int cgroup_atype);
+ int cgroup_atype,
+ enum bpf_attach_type attach_type);
void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog);
#else
static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
- int cgroup_atype)
+ int cgroup_atype,
+ enum bpf_attach_type attach_type)
{
return -EOPNOTSUPP;
}
@@ -1897,6 +2130,12 @@ struct bpf_array {
};
};
+/*
+ * The bpf_array_get_next_key() function may be used for all array-like
+ * maps, i.e., maps with u32 keys with range [0 ,..., max_entries)
+ */
+int bpf_array_get_next_key(struct bpf_map *map, void *key, void *next_key);
+
#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
#define MAX_TAIL_CALL_CNT 33
@@ -1905,6 +2144,7 @@ struct bpf_array {
*/
enum {
BPF_MAX_LOOPS = 8 * 1024 * 1024,
+ BPF_MAX_TIMED_LOOPS = 0xffff,
};
#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
@@ -1941,6 +2181,16 @@ static inline bool bpf_map_flags_access_ok(u32 access_flags)
(BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
}
+static inline struct bpf_map_owner *bpf_map_owner_alloc(struct bpf_map *map)
+{
+ return kzalloc(sizeof(*map->owner), GFP_ATOMIC);
+}
+
+static inline void bpf_map_owner_free(struct bpf_map *map)
+{
+ kfree(map->owner);
+}
+
struct bpf_event_entry {
struct perf_event *event;
struct file *perf_file;
@@ -1961,6 +2211,8 @@ int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
+const struct bpf_func_proto *bpf_get_perf_event_read_value_proto(void);
+
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
unsigned long off, unsigned long len);
typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
@@ -2119,26 +2371,25 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
* rcu-protected dynamically sized maps.
*/
static __always_inline u32
-bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
+bpf_prog_run_array_uprobe(const struct bpf_prog_array *array,
const void *ctx, bpf_prog_run_fn run_prog)
{
const struct bpf_prog_array_item *item;
const struct bpf_prog *prog;
- const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx;
struct bpf_trace_run_ctx run_ctx;
u32 ret = 1;
might_fault();
+ RCU_LOCKDEP_WARN(!rcu_read_lock_trace_held(), "no rcu lock held");
+
+ if (unlikely(!array))
+ return ret;
- rcu_read_lock_trace();
migrate_disable();
run_ctx.is_uprobe = true;
- array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
- if (unlikely(!array))
- goto out;
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
item = &array->items[0];
while ((prog = READ_ONCE(item->prog))) {
@@ -2153,12 +2404,16 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
rcu_read_unlock();
}
bpf_reset_run_ctx(old_run_ctx);
-out:
migrate_enable();
- rcu_read_unlock_trace();
return ret;
}
+bool bpf_jit_bypass_spec_v1(void);
+bool bpf_jit_bypass_spec_v4(void);
+
+#define bpf_rcu_lock_held() \
+ (rcu_read_lock_held() || rcu_read_lock_trace_held() || rcu_read_lock_bh_held())
+
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
extern struct mutex bpf_stats_enabled_mutex;
@@ -2185,6 +2440,7 @@ extern const struct super_operations bpf_super_ops;
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;
extern const struct file_operations bpf_iter_fops;
+extern const struct file_operations bpf_token_fops;
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
extern const struct bpf_prog_ops _name ## _prog_ops; \
@@ -2221,12 +2477,39 @@ struct btf_record *btf_record_dup(const struct btf_record *rec);
bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj);
+void bpf_obj_free_task_work(const struct btf_record *rec, void *obj);
void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
-struct bpf_map *__bpf_map_get(struct fd f);
+
+/*
+ * The __bpf_map_get() and __btf_get_by_fd() functions parse a file
+ * descriptor and return a corresponding map or btf object.
+ * Their names are double underscored to emphasize the fact that they
+ * do not increase refcnt. To also increase refcnt use corresponding
+ * bpf_map_get() and btf_get_by_fd() functions.
+ */
+
+static inline struct bpf_map *__bpf_map_get(struct fd f)
+{
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+ if (unlikely(fd_file(f)->f_op != &bpf_map_fops))
+ return ERR_PTR(-EINVAL);
+ return fd_file(f)->private_data;
+}
+
+static inline struct btf *__btf_get_by_fd(struct fd f)
+{
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+ if (unlikely(fd_file(f)->f_op != &btf_fops))
+ return ERR_PTR(-EINVAL);
+ return fd_file(f)->private_data;
+}
+
void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
@@ -2250,19 +2533,28 @@ int generic_map_delete_batch(struct bpf_map *map,
struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
-int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
+
+int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
unsigned long nr_pages, struct page **page_array);
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
int node);
+void *bpf_map_kmalloc_nolock(const struct bpf_map *map, size_t size, gfp_t flags,
+ int node);
void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
gfp_t flags);
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
size_t align, gfp_t flags);
#else
+/*
+ * These specialized allocators have to be macros for their allocations to be
+ * accounted separately (to have separate alloc_tag).
+ */
#define bpf_map_kmalloc_node(_map, _size, _flags, _node) \
kmalloc_node(_size, _flags, _node)
+#define bpf_map_kmalloc_nolock(_map, _size, _flags, _node) \
+ kmalloc_nolock(_size, _flags, _node)
#define bpf_map_kzalloc(_map, _size, _flags) \
kzalloc(_size, _flags)
#define bpf_map_kvcalloc(_map, _n, _size, _flags) \
@@ -2316,23 +2608,32 @@ static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
{
- return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
+ return bpf_jit_bypass_spec_v1() ||
+ cpu_mitigations_off() ||
+ bpf_token_capable(token, CAP_PERFMON);
}
static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
{
- return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
+ return bpf_jit_bypass_spec_v4() ||
+ cpu_mitigations_off() ||
+ bpf_token_capable(token, CAP_PERFMON);
}
int bpf_map_new_fd(struct bpf_map *map, int flags);
int bpf_prog_new_fd(struct bpf_prog *prog);
void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
- const struct bpf_link_ops *ops, struct bpf_prog *prog);
+ const struct bpf_link_ops *ops, struct bpf_prog *prog,
+ enum bpf_attach_type attach_type);
+void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
+ const struct bpf_link_ops *ops, struct bpf_prog *prog,
+ enum bpf_attach_type attach_type, bool sleepable);
int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
int bpf_link_settle(struct bpf_link_primer *primer);
void bpf_link_cleanup(struct bpf_link_primer *primer);
void bpf_link_inc(struct bpf_link *link);
+struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link);
void bpf_link_put(struct bpf_link *link);
int bpf_link_new_fd(struct bpf_link *link);
struct bpf_link *bpf_link_get_from_fd(u32 ufd);
@@ -2342,6 +2643,9 @@ void bpf_token_inc(struct bpf_token *token);
void bpf_token_put(struct bpf_token *token);
int bpf_token_create(union bpf_attr *attr);
struct bpf_token *bpf_token_get_from_fd(u32 ufd);
+int bpf_token_get_info_by_fd(struct bpf_token *token,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type);
@@ -2440,7 +2744,7 @@ struct bpf_iter__bpf_map_elem {
int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
-bool bpf_iter_prog_supported(struct bpf_prog *prog);
+int bpf_iter_prog_supported(struct bpf_prog *prog);
const struct bpf_func_proto *
bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
@@ -2464,7 +2768,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
u64 flags);
-int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
+int bpf_stackmap_extract(struct bpf_map *map, void *key, void *value, bool delete);
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
void *key, void *value, u64 map_flags);
@@ -2492,7 +2796,7 @@ struct sk_buff;
struct bpf_dtab_netdev;
struct bpf_cpu_map_entry;
-void __dev_flush(void);
+void __dev_flush(struct list_head *flush_list);
int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
@@ -2500,12 +2804,12 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
struct bpf_map *map, bool exclude_ingress);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
- struct bpf_prog *xdp_prog);
+ const struct bpf_prog *xdp_prog);
int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
- struct bpf_prog *xdp_prog, struct bpf_map *map,
- bool exclude_ingress);
+ const struct bpf_prog *xdp_prog,
+ struct bpf_map *map, bool exclude_ingress);
-void __cpu_map_flush(void);
+void __cpu_map_flush(struct list_head *flush_list);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
struct net_device *dev_rx);
int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
@@ -2641,9 +2945,8 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
enum bpf_dynptr_type type, u32 offset, u32 size);
void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
+void bpf_prog_report_arena_violation(bool write, unsigned long addr, unsigned long fault_ip);
-bool dev_check_flush(void);
-bool cpu_map_check_flush(void);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -2681,7 +2984,13 @@ bpf_prog_inc_not_zero(struct bpf_prog *prog)
static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
const struct bpf_link_ops *ops,
- struct bpf_prog *prog)
+ struct bpf_prog *prog, enum bpf_attach_type attach_type)
+{
+}
+
+static inline void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
+ const struct bpf_link_ops *ops, struct bpf_prog *prog,
+ enum bpf_attach_type attach_type, bool sleepable)
{
}
@@ -2704,6 +3013,11 @@ static inline void bpf_link_inc(struct bpf_link *link)
{
}
+static inline struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
+{
+ return NULL;
+}
+
static inline void bpf_link_put(struct bpf_link *link)
{
}
@@ -2731,7 +3045,14 @@ static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd)
return ERR_PTR(-EOPNOTSUPP);
}
-static inline void __dev_flush(void)
+static inline int bpf_token_get_info_by_fd(struct bpf_token *token,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void __dev_flush(struct list_head *flush_list)
{
}
@@ -2764,20 +3085,20 @@ struct sk_buff;
static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
struct sk_buff *skb,
- struct bpf_prog *xdp_prog)
+ const struct bpf_prog *xdp_prog)
{
return 0;
}
static inline
int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
- struct bpf_prog *xdp_prog, struct bpf_map *map,
- bool exclude_ingress)
+ const struct bpf_prog *xdp_prog,
+ struct bpf_map *map, bool exclude_ingress)
{
return 0;
}
-static inline void __cpu_map_flush(void)
+static inline void __cpu_map_flush(struct list_head *flush_list)
{
}
@@ -2912,6 +3233,11 @@ static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
{
}
+
+static inline void bpf_prog_report_arena_violation(bool write, unsigned long addr,
+ unsigned long fault_ip)
+{
+}
#endif /* CONFIG_BPF_SYSCALL */
static __always_inline int
@@ -2926,8 +3252,7 @@ bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
return ret;
}
-void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
- struct btf_mod_pair *used_btfs, u32 len);
+void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len);
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
enum bpf_prog_type type)
@@ -3155,6 +3480,38 @@ static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
#endif /* CONFIG_BPF_SYSCALL */
#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
+#if defined(CONFIG_KEYS) && defined(CONFIG_BPF_SYSCALL)
+
+struct bpf_key *bpf_lookup_user_key(s32 serial, u64 flags);
+struct bpf_key *bpf_lookup_system_key(u64 id);
+void bpf_key_put(struct bpf_key *bkey);
+int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
+ struct bpf_dynptr *sig_p,
+ struct bpf_key *trusted_keyring);
+
+#else
+static inline struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
+{
+ return NULL;
+}
+
+static inline struct bpf_key *bpf_lookup_system_key(u64 id)
+{
+ return NULL;
+}
+
+static inline void bpf_key_put(struct bpf_key *bkey)
+{
+}
+
+static inline int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
+ struct bpf_dynptr *sig_p,
+ struct bpf_key *trusted_keyring)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* defined(CONFIG_KEYS) && defined(CONFIG_BPF_SYSCALL) */
+
/* verifier prototypes for helper functions called from eBPF programs */
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
@@ -3176,7 +3533,9 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto;
extern const struct bpf_func_proto bpf_get_stack_proto;
+extern const struct bpf_func_proto bpf_get_stack_sleepable_proto;
extern const struct bpf_func_proto bpf_get_task_stack_proto;
+extern const struct bpf_func_proto bpf_get_task_stack_sleepable_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
extern const struct bpf_func_proto bpf_get_stack_proto_pe;
extern const struct bpf_func_proto bpf_sock_map_update_proto;
@@ -3184,6 +3543,7 @@ extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
+extern const struct bpf_func_proto bpf_current_task_under_cgroup_proto;
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
@@ -3258,8 +3618,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size);
-int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
- struct bpf_dynptr_kern *ptr);
+int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
+ struct bpf_dynptr *ptr);
#else
static inline bool bpf_sock_common_is_valid_access(int off, int size,
enum bpf_access_type type,
@@ -3281,8 +3641,8 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
{
return 0;
}
-static inline int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
- struct bpf_dynptr_kern *ptr)
+static inline int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
+ struct bpf_dynptr *ptr)
{
return -EOPNOTSUPP;
}
@@ -3350,12 +3710,14 @@ static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
#endif /* CONFIG_INET */
enum bpf_text_poke_type {
+ BPF_MOD_NOP,
BPF_MOD_CALL,
BPF_MOD_JUMP,
};
-int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
- void *addr1, void *addr2);
+int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
+ enum bpf_text_poke_type new_t, void *old_addr,
+ void *new_addr);
void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
struct bpf_prog *new, struct bpf_prog *old);
@@ -3369,6 +3731,16 @@ bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
#define MAX_BPRINTF_VARARGS 12
#define MAX_BPRINTF_BUF 1024
+/* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
+ * arguments representation.
+ */
+#define MAX_BPRINTF_BIN_ARGS 512
+
+struct bpf_bprintf_buffers {
+ char bin_args[MAX_BPRINTF_BIN_ARGS];
+ char buf[MAX_BPRINTF_BUF];
+};
+
struct bpf_bprintf_data {
u32 *bin_args;
char *buf;
@@ -3376,9 +3748,33 @@ struct bpf_bprintf_data {
bool get_buf;
};
-int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
+int bpf_bprintf_prepare(const char *fmt, u32 fmt_size, const u64 *raw_args,
u32 num_args, struct bpf_bprintf_data *data);
void bpf_bprintf_cleanup(struct bpf_bprintf_data *data);
+int bpf_try_get_buffers(struct bpf_bprintf_buffers **bufs);
+void bpf_put_buffers(void);
+
+void bpf_prog_stream_init(struct bpf_prog *prog);
+void bpf_prog_stream_free(struct bpf_prog *prog);
+int bpf_prog_stream_read(struct bpf_prog *prog, enum bpf_stream_id stream_id, void __user *buf, int len);
+void bpf_stream_stage_init(struct bpf_stream_stage *ss);
+void bpf_stream_stage_free(struct bpf_stream_stage *ss);
+__printf(2, 3)
+int bpf_stream_stage_printk(struct bpf_stream_stage *ss, const char *fmt, ...);
+int bpf_stream_stage_commit(struct bpf_stream_stage *ss, struct bpf_prog *prog,
+ enum bpf_stream_id stream_id);
+int bpf_stream_stage_dump_stack(struct bpf_stream_stage *ss);
+
+#define bpf_stream_printk(ss, ...) bpf_stream_stage_printk(&ss, __VA_ARGS__)
+#define bpf_stream_dump_stack(ss) bpf_stream_stage_dump_stack(&ss)
+
+#define bpf_stream_stage(ss, prog, stream_id, expr) \
+ ({ \
+ bpf_stream_stage_init(&ss); \
+ (expr); \
+ bpf_stream_stage_commit(&ss, prog, stream_id); \
+ bpf_stream_stage_free(&ss); \
+ })
#ifdef CONFIG_BPF_LSM
void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
@@ -3414,4 +3810,34 @@ static inline bool bpf_is_subprog(const struct bpf_prog *prog)
return prog->aux->func_idx != 0;
}
+int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char **filep,
+ const char **linep, int *nump);
+struct bpf_prog *bpf_prog_find_from_stack(void);
+
+int bpf_insn_array_init(struct bpf_map *map, const struct bpf_prog *prog);
+int bpf_insn_array_ready(struct bpf_map *map);
+void bpf_insn_array_release(struct bpf_map *map);
+void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len);
+void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
+
+#ifdef CONFIG_BPF_SYSCALL
+void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image);
+#else
+static inline void
+bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
+{
+}
+#endif
+
+static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags)
+{
+ if (flags & ~allowed_flags)
+ return -EINVAL;
+
+ if ((flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))
+ return -EINVAL;
+
+ return 0;
+}
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index dcddb0aef7d8..66432248cd81 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -18,9 +18,6 @@
#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
-#define bpf_rcu_lock_held() \
- (rcu_read_lock_held() || rcu_read_lock_trace_held() || \
- rcu_read_lock_bh_held())
struct bpf_local_storage_map_bucket {
struct hlist_head list;
raw_spinlock_t lock;
@@ -56,9 +53,7 @@ struct bpf_local_storage_map {
u32 bucket_log;
u16 elem_size;
u16 cache_idx;
- struct bpf_mem_alloc selem_ma;
- struct bpf_mem_alloc storage_ma;
- bool bpf_ma;
+ bool use_kmalloc_nolock;
};
struct bpf_local_storage_data {
@@ -77,7 +72,13 @@ struct bpf_local_storage_elem {
struct hlist_node map_node; /* Linked to bpf_local_storage_map */
struct hlist_node snode; /* Linked to bpf_local_storage */
struct bpf_local_storage __rcu *local_storage;
- struct rcu_head rcu;
+ union {
+ struct rcu_head rcu;
+ struct hlist_node free_node; /* used to postpone
+ * bpf_selem_free
+ * after raw_spin_unlock
+ */
+ };
/* 8 bytes hole */
/* The data is stored in another cacheline to minimize
* the number of cachelines access during a cache hit.
@@ -94,6 +95,7 @@ struct bpf_local_storage {
*/
struct rcu_head rcu;
raw_spinlock_t lock; /* Protect adding/removing from the "list" */
+ bool use_kmalloc_nolock;
};
/* U16_MAX is much more than enough for sk local storage
@@ -127,7 +129,7 @@ int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
struct bpf_map *
bpf_local_storage_map_alloc(union bpf_attr *attr,
struct bpf_local_storage_cache *cache,
- bool bpf_ma);
+ bool use_kmalloc_nolock);
void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
struct bpf_local_storage_map *smap,
@@ -181,10 +183,9 @@ void bpf_selem_link_map(struct bpf_local_storage_map *smap,
struct bpf_local_storage_elem *
bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
- bool charge_mem, gfp_t gfp_flags);
+ bool swap_uptrs, gfp_t gfp_flags);
void bpf_selem_free(struct bpf_local_storage_elem *selem,
- struct bpf_local_storage_map *smap,
bool reuse_now);
int
@@ -195,7 +196,7 @@ bpf_local_storage_alloc(void *owner,
struct bpf_local_storage_data *
bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
- void *value, u64 map_flags, gfp_t gfp_flags);
+ void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags);
u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);
diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
index 1de7ece5d36d..643809cc78c3 100644
--- a/include/linux/bpf_lsm.h
+++ b/include/linux/bpf_lsm.h
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
#include <linux/lsm_hooks.h>
#ifdef CONFIG_BPF_LSM
@@ -45,6 +46,13 @@ void bpf_inode_storage_free(struct inode *inode);
void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func);
+int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
+ struct bpf_retval_range *range);
+int bpf_set_dentry_xattr_locked(struct dentry *dentry, const char *name__str,
+ const struct bpf_dynptr *value_p, int flags);
+int bpf_remove_dentry_xattr_locked(struct dentry *dentry, const char *name__str);
+bool bpf_lsm_has_d_inode_locked(const struct bpf_prog *prog);
+
#else /* !CONFIG_BPF_LSM */
static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id)
@@ -78,6 +86,24 @@ static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
{
}
+static inline int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
+ struct bpf_retval_range *range)
+{
+ return -EOPNOTSUPP;
+}
+static inline int bpf_set_dentry_xattr_locked(struct dentry *dentry, const char *name__str,
+ const struct bpf_dynptr *value_p, int flags)
+{
+ return -EOPNOTSUPP;
+}
+static inline int bpf_remove_dentry_xattr_locked(struct dentry *dentry, const char *name__str)
+{
+ return -EOPNOTSUPP;
+}
+static inline bool bpf_lsm_has_d_inode_locked(const struct bpf_prog *prog)
+{
+ return false;
+}
#endif /* CONFIG_BPF_LSM */
#endif /* _LINUX_BPF_LSM_H */
diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
index aaf004d94322..e45162ef59bb 100644
--- a/include/linux/bpf_mem_alloc.h
+++ b/include/linux/bpf_mem_alloc.h
@@ -33,6 +33,9 @@ int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg
int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size);
void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
+/* Check the allocation size for kmalloc equivalent allocator */
+int bpf_mem_alloc_check_size(bool percpu, size_t size);
+
/* kmalloc/kfree equivalent: */
void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size);
void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 9f2a6b83b49e..b13de31e163f 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -133,6 +133,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_RINGBUF, ringbuf_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_BLOOM_FILTER, bloom_filter_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_USER_RINGBUF, user_ringbuf_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_ARENA, arena_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_INSN_ARRAY, insn_array_map_ops)
BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint)
BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing)
@@ -146,6 +147,7 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
BPF_LINK_TYPE(BPF_LINK_TYPE_NETFILTER, netfilter)
BPF_LINK_TYPE(BPF_LINK_TYPE_TCX, tcx)
BPF_LINK_TYPE(BPF_LINK_TYPE_NETKIT, netkit)
+BPF_LINK_TYPE(BPF_LINK_TYPE_SOCKMAP, sockmap)
#endif
#ifdef CONFIG_PERF_EVENTS
BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 50aa87f8d77f..130bcbd66f60 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -23,44 +23,8 @@
* (in the "-8,-16,...,-512" form)
*/
#define TMP_STR_BUF_LEN 320
-
-/* Liveness marks, used for registers and spilled-regs (in stack slots).
- * Read marks propagate upwards until they find a write mark; they record that
- * "one of this state's descendants read this reg" (and therefore the reg is
- * relevant for states_equal() checks).
- * Write marks collect downwards and do not propagate; they record that "the
- * straight-line code that reached this state (from its parent) wrote this reg"
- * (and therefore that reads propagated from this state or its descendants
- * should not propagate to its parent).
- * A state with a write mark can receive read marks; it just won't propagate
- * them to its parent, since the write mark is a property, not of the state,
- * but of the link between it and its parent. See mark_reg_read() and
- * mark_stack_slot_read() in kernel/bpf/verifier.c.
- */
-enum bpf_reg_liveness {
- REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
- REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */
- REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */
- REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
- REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */
- REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
-};
-
-/* For every reg representing a map value or allocated object pointer,
- * we consider the tuple of (ptr, id) for them to be unique in verifier
- * context and conside them to not alias each other for the purposes of
- * tracking lock state.
- */
-struct bpf_active_lock {
- /* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
- * there's no active lock held, and other fields have no
- * meaning. If non-NULL, it indicates that a lock is held and
- * id member has the reg->id of the register which can be >= 0.
- */
- void *ptr;
- /* This will be reg->id */
- u32 id;
-};
+/* Patch buffer size */
+#define INSN_BUF_SIZE 32
#define ITER_PREFIX "bpf_iter_"
@@ -73,7 +37,10 @@ enum bpf_iter_state {
struct bpf_reg_state {
/* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
- /* Fixed part of pointer offset, pointer types only */
+ /*
+ * Fixed part of pointer offset, pointer types only.
+ * Or constant delta between "linked" scalars with the same ID.
+ */
s32 off;
union {
/* valid when type == PTR_TO_PACKET */
@@ -126,6 +93,14 @@ struct bpf_reg_state {
int depth:30;
} iter;
+ /* For irq stack slots */
+ struct {
+ enum {
+ IRQ_NATIVE_KFUNC,
+ IRQ_LOCK_KFUNC,
+ } kfunc_class;
+ } irq;
+
/* Max size from any of the above. */
struct {
unsigned long raw1;
@@ -167,6 +142,13 @@ struct bpf_reg_state {
* Similarly to dynptrs, we use ID to track "belonging" of a reference
* to a specific instance of bpf_iter.
*/
+ /*
+ * Upper bit of ID is used to remember relationship between "linked"
+ * registers. Example:
+ * r1 = r2; both will have r1->id == r2->id == N
+ * r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10
+ */
+#define BPF_ADD_CONST (1U << 31)
u32 id;
/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
* from a pointer-cast helper, bpf_sk_fullsock() and
@@ -208,8 +190,6 @@ struct bpf_reg_state {
* allowed and has the same effect as bpf_sk_release(sk).
*/
u32 ref_obj_id;
- /* parentage chain for liveness checking */
- struct bpf_reg_state *parent;
/* Inside the callee two registers can be both PTR_TO_STACK like
* R1=fp-8 and R2=fp-8, but one of them points to this function stack
* while another to the caller's stack. To differentiate them 'frameno'
@@ -222,7 +202,6 @@ struct bpf_reg_state {
* patching which only happens after main verification finished.
*/
s32 subreg_def;
- enum bpf_reg_liveness live;
/* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
bool precise;
};
@@ -237,6 +216,7 @@ enum bpf_stack_slot_type {
*/
STACK_DYNPTR,
STACK_ITER,
+ STACK_IRQ_FLAG,
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
@@ -254,6 +234,17 @@ struct bpf_stack_state {
};
struct bpf_reference_state {
+ /* Each reference object has a type. Ensure REF_TYPE_PTR is zero to
+ * default to pointer reference on zero initialization of a state.
+ */
+ enum ref_state_type {
+ REF_TYPE_PTR = (1 << 1),
+ REF_TYPE_IRQ = (1 << 2),
+ REF_TYPE_LOCK = (1 << 3),
+ REF_TYPE_RES_LOCK = (1 << 4),
+ REF_TYPE_RES_LOCK_IRQ = (1 << 5),
+ REF_TYPE_LOCK_MASK = REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
+ } type;
/* Track each reference created with a unique id, even if the same
* instruction creates the reference multiple times (eg, via CALL).
*/
@@ -262,17 +253,10 @@ struct bpf_reference_state {
* is used purely to inform the user of a reference leak.
*/
int insn_idx;
- /* There can be a case like:
- * main (frame 0)
- * cb (frame 1)
- * func (frame 3)
- * cb (frame 4)
- * Hence for frame 4, if callback_ref just stored boolean, it would be
- * impossible to distinguish nested callback refs. Hence store the
- * frameno and compare that to callback_ref in check_reference_leak when
- * exiting a callback function.
- */
- int callback_ref;
+ /* Use to keep track of the source object of a lock, to ensure
+ * it matches on unlock.
+ */
+ void *ptr;
};
struct bpf_retval_range {
@@ -319,8 +303,6 @@ struct bpf_func_state {
u32 callback_depth;
/* The following fields should be last. See copy_func_state() */
- int acquired_refs;
- struct bpf_reference_state *refs;
/* The state of the stack. Each element of the array describes BPF_REG_SIZE
* (i.e. 8) bytes worth of stack memory.
* stack[0] represents bytes [*(r10-8)..*(r10-1)]
@@ -349,7 +331,11 @@ enum {
INSN_F_SPI_MASK = 0x3f, /* 6 bits */
INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
- INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */
+ INSN_F_STACK_ACCESS = BIT(9),
+
+ INSN_F_DST_REG_STACK = BIT(10), /* dst_reg is PTR_TO_STACK */
+ INSN_F_SRC_REG_STACK = BIT(11), /* src_reg is PTR_TO_STACK */
+ /* total 12 bits are used now. */
};
static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
@@ -358,9 +344,13 @@ static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
struct bpf_jmp_history_entry {
u32 idx;
/* insn idx can't be bigger than 1 million */
- u32 prev_idx : 22;
- /* special flags, e.g., whether insn is doing register stack spill/load */
- u32 flags : 10;
+ u32 prev_idx : 20;
+ /* special INSN_F_xxx flags */
+ u32 flags : 12;
+ /* additional registers that need precision tracking when this
+ * jump is backtracked, vector of six 10-bit records
+ */
+ u64 linked_regs;
};
/* Maximum number of register states that can exist at once */
@@ -369,6 +359,8 @@ struct bpf_verifier_state {
/* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES];
struct bpf_verifier_state *parent;
+ /* Acquired reference states */
+ struct bpf_reference_state *refs;
/*
* 'branches' field is the number of branches left to explore:
* 0 - all possible paths from this state reached bpf_exit or
@@ -418,30 +410,25 @@ struct bpf_verifier_state {
u32 insn_idx;
u32 curframe;
- struct bpf_active_lock active_lock;
+ u32 acquired_refs;
+ u32 active_locks;
+ u32 active_preempt_locks;
+ u32 active_irq_id;
+ u32 active_lock_id;
+ void *active_lock_ptr;
+ u32 active_rcu_locks;
+
bool speculative;
- bool active_rcu_lock;
- u32 active_preempt_lock;
- /* If this state was ever pointed-to by other state's loop_entry field
- * this flag would be set to true. Used to avoid freeing such states
- * while they are still in use.
- */
- bool used_as_loop_entry;
bool in_sleepable;
+ bool cleaned;
/* first and last insn idx of this verifier state */
u32 first_insn_idx;
u32 last_insn_idx;
- /* If this state is a part of states loop this field points to some
- * parent of this state such that:
- * - it is also a member of the same states loop;
- * - DFS states traversal starting from initial state visits loop_entry
- * state before this state.
- * Used to compute topmost loop entry for state loops.
- * State loops might appear because of open coded iterators logic.
- * See get_loop_entry() for more information.
- */
- struct bpf_verifier_state *loop_entry;
+ /* if this state is a backedge state then equal_state
+ * records cached state to which this state is equal.
+ */
+ struct bpf_verifier_state *equal_state;
/* jmp history recorded from first to last.
* backtracking is using it to go from last to first.
* For most states jmp_history_cnt is [0-3].
@@ -492,8 +479,10 @@ struct bpf_verifier_state {
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
- struct bpf_verifier_state_list *next;
- int miss_cnt, hit_cnt;
+ struct list_head node;
+ u32 miss_cnt;
+ u32 hit_cnt:31;
+ u32 in_free_list:1;
};
struct bpf_loop_inline_state {
@@ -520,6 +509,15 @@ struct bpf_map_ptr_state {
#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
BPF_ALU_SANITIZE_DST)
+/*
+ * An array of BPF instructions.
+ * Primary usage: return value of bpf_insn_successors.
+ */
+struct bpf_iarray {
+ int cnt;
+ u32 items[];
+};
+
struct bpf_insn_aux_data {
union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
@@ -551,17 +549,28 @@ struct bpf_insn_aux_data {
/* remember the offset of node field within type to rewrite */
u64 insert_off;
};
+ struct bpf_iarray *jt; /* jump table for gotox or bpf_tailcall call instruction */
struct btf_struct_meta *kptr_struct_meta;
u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
- bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
+ bool nospec; /* do not execute this instruction speculatively */
+ bool nospec_result; /* result is unsafe under speculation, nospec must follow */
bool zext_dst; /* this insn zero extends dst reg */
bool needs_zext; /* alu op needs to clear upper bits */
- bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
+ bool non_sleepable; /* helper/kfunc may be called from non-sleepable context */
bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
u8 alu_state; /* used in combination with alu_limit */
+ /* true if STX or LDX instruction is a part of a spill/fill
+ * pattern for a bpf_fastcall call.
+ */
+ u8 fastcall_pattern:1;
+ /* for CALL instructions, a number of spill/fill pairs in the
+ * bpf_fastcall pattern.
+ */
+ u8 fastcall_spills_num:3;
+ u8 arg_prog:4;
/* below fields are initialized once */
unsigned int orig_idx; /* original instruction index */
@@ -575,6 +584,13 @@ struct bpf_insn_aux_data {
* accepts callback function as a parameter.
*/
bool calls_callback;
+ /*
+ * CFG strongly connected component this instruction belongs to,
+ * zero if it is a singleton SCC.
+ */
+ u32 scc;
+ /* registers alive before this instruction. */
+ u16 live_regs_before;
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -625,12 +641,24 @@ struct bpf_subprog_arg_info {
};
};
+enum priv_stack_mode {
+ PRIV_STACK_UNKNOWN,
+ NO_PRIV_STACK,
+ PRIV_STACK_ADAPTIVE,
+};
+
struct bpf_subprog_info {
/* 'start' has to be the first field otherwise find_subprog() won't work */
u32 start; /* insn idx of function entry point */
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
+ u32 postorder_start; /* The idx to the env->cfg.insn_postorder */
+ u32 exit_idx; /* Index of one of the BPF_EXIT instructions in this subprogram */
u16 stack_depth; /* max. stack depth used by this function */
u16 stack_extra;
+ /* offsets in range [stack_depth .. fastcall_stack_off)
+ * are used for bpf_fastcall spills and fills.
+ */
+ s16 fastcall_stack_off;
bool has_tail_call: 1;
bool tail_call_reachable: 1;
bool has_ld_abs: 1;
@@ -638,8 +666,13 @@ struct bpf_subprog_info {
bool is_async_cb: 1;
bool is_exception_cb: 1;
bool args_cached: 1;
+ /* true if bpf_fastcall stack region is used by functions that can't be inlined */
+ bool keep_fastcall_stack: 1;
+ bool changes_pkt_data: 1;
+ bool might_sleep: 1;
+ u8 arg_cnt:3;
- u8 arg_cnt;
+ enum priv_stack_mode priv_stack_mode;
struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
};
@@ -667,6 +700,40 @@ struct bpf_idset {
u32 ids[BPF_ID_MAP_SIZE];
};
+/* see verifier.c:compute_scc_callchain() */
+struct bpf_scc_callchain {
+ /* call sites from bpf_verifier_state->frame[*]->callsite leading to this SCC */
+ u32 callsites[MAX_CALL_FRAMES - 1];
+ /* last frame in a chain is identified by SCC id */
+ u32 scc;
+};
+
+/* verifier state waiting for propagate_backedges() */
+struct bpf_scc_backedge {
+ struct bpf_scc_backedge *next;
+ struct bpf_verifier_state state;
+};
+
+struct bpf_scc_visit {
+ struct bpf_scc_callchain callchain;
+ /* first state in current verification path that entered SCC
+ * identified by the callchain
+ */
+ struct bpf_verifier_state *entry_state;
+ struct bpf_scc_backedge *backedges; /* list of backedges */
+ u32 num_backedges;
+};
+
+/* An array of bpf_scc_visit structs sharing tht same bpf_scc_callchain->scc
+ * but having different bpf_scc_callchain->callsites.
+ */
+struct bpf_scc_info {
+ u32 num_visits;
+ struct bpf_scc_visit visits[];
+};
+
+struct bpf_liveness;
+
/* single container for all structs
* one verifier_env per bpf_check() call
*/
@@ -682,12 +749,17 @@ struct bpf_verifier_env {
bool test_state_freq; /* test verifier with different pruning frequency */
bool test_reg_invariants; /* fail verification on register invariants violations */
struct bpf_verifier_state *cur_state; /* current verifier state */
- struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
- struct bpf_verifier_state_list *free_list;
+ /* Search pruning optimization, array of list_heads for
+ * lists of struct bpf_verifier_state_list.
+ */
+ struct list_head *explored_states;
+ struct list_head free_list; /* list of struct bpf_verifier_state_list */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
+ struct bpf_map *insn_array_maps[MAX_USED_MAPS]; /* array of INSN_ARRAY map's to be relocated */
u32 used_map_cnt; /* number of used maps */
u32 used_btf_cnt; /* number of used BTF objects */
+ u32 insn_array_map_cnt; /* number of used maps of type BPF_MAP_TYPE_INSN_ARRAY */
u32 id_gen; /* used to generate unique reg IDs */
u32 hidden_subprog_cnt; /* number of hidden subprogs */
int exception_callback_subprog;
@@ -714,7 +786,14 @@ struct bpf_verifier_env {
struct {
int *insn_state;
int *insn_stack;
+ /*
+ * vector of instruction indexes sorted in post-order, grouped by subprogram,
+ * see bpf_subprog_info->postorder_start.
+ */
+ int *insn_postorder;
int cur_stack;
+ /* current position in the insn_postorder vector */
+ int cur_postorder;
} cfg;
struct backtrack_state bt;
struct bpf_jmp_history_entry *cur_hist_ent;
@@ -737,6 +816,9 @@ struct bpf_verifier_env {
u32 peak_states;
/* longest register parentage chain walked for liveness marking */
u32 longest_mark_read_walk;
+ u32 free_list_size;
+ u32 explored_states_size;
+ u32 num_backedges;
bpfptr_t fd_array;
/* bit mask to keep track of whether a register has been accessed
@@ -746,10 +828,21 @@ struct bpf_verifier_env {
/* Same as scratched_regs but for stack slots */
u64 scratched_stack_slots;
u64 prev_log_pos, prev_insn_print_pos;
+ /* buffer used to temporary hold constants as scalar registers */
+ struct bpf_reg_state fake_reg[2];
/* buffer used to generate temporary string representations,
* e.g., in reg_type_str() to generate reg_type string
*/
char tmp_str_buf[TMP_STR_BUF_LEN];
+ struct bpf_insn insn_buf[INSN_BUF_SIZE];
+ struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
+ struct bpf_scc_callchain callchain_buf;
+ struct bpf_liveness *liveness;
+ /* array of pointers to bpf_scc_info indexed by SCC id */
+ struct bpf_scc_info **scc_info;
+ u32 scc_cnt;
+ struct bpf_iarray *succ;
+ struct bpf_iarray *gotox_tmp_buf;
};
static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
@@ -777,6 +870,19 @@ __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
u32 insn_off,
const char *prefix_fmt, ...);
+#define verifier_bug_if(cond, env, fmt, args...) \
+ ({ \
+ bool __cond = (cond); \
+ if (unlikely(__cond)) \
+ verifier_bug(env, fmt " (" #cond ")", ##args); \
+ (__cond); \
+ })
+#define verifier_bug(env, fmt, args...) \
+ ({ \
+ BPF_WARN_ONCE(1, "verifier bug: " fmt "\n", ##args); \
+ bpf_log(&env->log, "verifier bug: " fmt "\n", ##args); \
+ })
+
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *cur = env->cur_state;
@@ -844,8 +950,8 @@ static inline u32 type_flag(u32 type)
/* only use after check_attach_btf_id() */
static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
{
- return prog->type == BPF_PROG_TYPE_EXT ?
- prog->aux->dst_prog->type : prog->type;
+ return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ?
+ prog->aux->saved_dst_prog_type : prog->type;
}
static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
@@ -854,7 +960,9 @@ static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
case BPF_PROG_TYPE_TRACING:
return prog->expected_attach_type != BPF_TRACE_ITER;
case BPF_PROG_TYPE_STRUCT_OPS:
+ return prog->aux->jits_use_priv_stack;
case BPF_PROG_TYPE_LSM:
+ case BPF_PROG_TYPE_SYSCALL:
return false;
default:
return true;
@@ -893,6 +1001,11 @@ static inline bool type_is_sk_pointer(enum bpf_reg_type type)
type == PTR_TO_XDP_SOCK;
}
+static inline bool type_may_be_null(u32 type)
+{
+ return type & PTR_MAYBE_NULL;
+}
+
static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
{
env->scratched_regs |= 1U << regno;
@@ -940,13 +1053,38 @@ static inline bool bpf_stack_narrow_access_ok(int off, int fill_size, int spill_
return !(off % BPF_REG_SIZE);
}
+static inline bool insn_is_gotox(struct bpf_insn *insn)
+{
+ return BPF_CLASS(insn->code) == BPF_JMP &&
+ BPF_OP(insn->code) == BPF_JA &&
+ BPF_SRC(insn->code) == BPF_X;
+}
+
const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
const char *dynptr_type_str(enum bpf_dynptr_type type);
const char *iter_type_str(const struct btf *btf, u32 btf_id);
const char *iter_state_str(enum bpf_iter_state state);
-void print_verifier_state(struct bpf_verifier_env *env,
- const struct bpf_func_state *state, bool print_all);
-void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state);
+void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
+ u32 frameno, bool print_all);
+void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
+ u32 frameno);
+
+struct bpf_subprog_info *bpf_find_containing_subprog(struct bpf_verifier_env *env, int off);
+int bpf_jmp_offset(struct bpf_insn *insn);
+struct bpf_iarray *bpf_insn_successors(struct bpf_verifier_env *env, u32 idx);
+void bpf_fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask);
+bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx);
+
+int bpf_stack_liveness_init(struct bpf_verifier_env *env);
+void bpf_stack_liveness_free(struct bpf_verifier_env *env);
+int bpf_update_live_stack(struct bpf_verifier_env *env);
+int bpf_mark_stack_read(struct bpf_verifier_env *env, u32 frameno, u32 insn_idx, u64 mask);
+void bpf_mark_stack_write(struct bpf_verifier_env *env, u32 frameno, u64 mask);
+int bpf_reset_stack_write_marks(struct bpf_verifier_env *env, u32 insn_idx);
+int bpf_commit_stack_write_marks(struct bpf_verifier_env *env);
+int bpf_live_stack_query_init(struct bpf_verifier_env *env, struct bpf_verifier_state *st);
+bool bpf_stack_slot_alive(struct bpf_verifier_env *env, u32 frameno, u32 spi);
+void bpf_reset_live_stack_callchain(struct bpf_verifier_env *env);
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h
index 1af241525a17..f6e0795db484 100644
--- a/include/linux/bpfptr.h
+++ b/include/linux/bpfptr.h
@@ -67,7 +67,7 @@ static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset,
static inline void *kvmemdup_bpfptr_noprof(bpfptr_t src, size_t len)
{
- void *p = kvmalloc_noprof(len, GFP_USER | __GFP_NOWARN);
+ void *p = kvmalloc_node_align_noprof(len, 1, GFP_USER | __GFP_NOWARN, NUMA_NO_NODE);
if (!p)
return ERR_PTR(-ENOMEM);
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 1394ba302367..115a964f3006 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -137,6 +137,7 @@
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RSVD 0x0060
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN 0x0080
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100
#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
@@ -183,6 +184,12 @@
#define BCM_LED_MULTICOLOR_PROGRAM 0xa
/*
+ * Broadcom Synchronous Ethernet Controls (expansion register 0x0E)
+ */
+#define BCM_EXP_SYNC_ETHERNET (MII_BCM54XX_EXP_SEL_ER + 0x0E)
+#define BCM_EXP_SYNC_ETHERNET_MII_LITE BIT(11)
+
+/*
* BCM5482: Shadow registers
* Shadow values go into bits [14:10] of register 0x1c to select a shadow
* register to access.
@@ -271,12 +278,100 @@
#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
+/* BroadR-Reach LRE Registers. */
+#define MII_BCM54XX_LRECR 0x00 /* LRE Control Register */
+#define MII_BCM54XX_LRESR 0x01 /* LRE Status Register */
+#define MII_BCM54XX_LREPHYSID1 0x02 /* LRE PHYS ID 1 */
+#define MII_BCM54XX_LREPHYSID2 0x03 /* LRE PHYS ID 2 */
+#define MII_BCM54XX_LREANAA 0x04 /* LDS Auto-Negotiation Advertised Ability */
+#define MII_BCM54XX_LREANAC 0x05 /* LDS Auto-Negotiation Advertised Control */
+#define MII_BCM54XX_LREANPT 0x06 /* LDS Ability Next Page Transmit */
+#define MII_BCM54XX_LRELPA 0x07 /* LDS Link Partner Ability */
+#define MII_BCM54XX_LRELPNPM 0x08 /* LDS Link Partner Next Page Message */
+#define MII_BCM54XX_LRELPNPC 0x09 /* LDS Link Partner Next Page Control */
+#define MII_BCM54XX_LRELDSE 0x0a /* LDS Expansion Register */
+#define MII_BCM54XX_LREES 0x0f /* LRE Extended Status */
+
+/* LRE control register. */
+#define LRECR_RESET 0x8000 /* Reset to default state */
+#define LRECR_LOOPBACK 0x4000 /* Internal Loopback */
+#define LRECR_LDSRES 0x2000 /* Restart LDS Process */
+#define LRECR_LDSEN 0x1000 /* LDS Enable */
+#define LRECR_PDOWN 0x0800 /* Enable low power state */
+#define LRECR_ISOLATE 0x0400 /* Isolate data paths from MII */
+#define LRECR_SPEED100 0x0200 /* Select 100 Mbps */
+#define LRECR_SPEED10 0x0000 /* Select 10 Mbps */
+#define LRECR_4PAIRS 0x0020 /* Select 4 Pairs */
+#define LRECR_2PAIRS 0x0010 /* Select 2 Pairs */
+#define LRECR_1PAIR 0x0000 /* Select 1 Pair */
+#define LRECR_MASTER 0x0008 /* Force Master when LDS disabled */
+#define LRECR_SLAVE 0x0000 /* Force Slave when LDS disabled */
+
+/* LRE status register. */
+#define LRESR_100_1PAIR 0x2000 /* Can do 100Mbps 1 Pair */
+#define LRESR_100_4PAIR 0x1000 /* Can do 100Mbps 4 Pairs */
+#define LRESR_100_2PAIR 0x0800 /* Can do 100Mbps 2 Pairs */
+#define LRESR_10_2PAIR 0x0400 /* Can do 10Mbps 2 Pairs */
+#define LRESR_10_1PAIR 0x0200 /* Can do 10Mbps 1 Pair */
+#define LRESR_ESTATEN 0x0100 /* Extended Status in R15 */
+#define LRESR_RESV 0x0080 /* Unused... */
+#define LRESR_MFPS 0x0040 /* Can suppress Management Frames Preamble */
+#define LRESR_LDSCOMPLETE 0x0020 /* LDS Auto-negotiation complete */
+#define LRESR_8023 0x0010 /* Has IEEE 802.3 Support */
+#define LRESR_LDSABILITY 0x0008 /* LDS auto-negotiation capable */
+#define LRESR_LSTATUS 0x0004 /* Link status */
+#define LRESR_JCD 0x0002 /* Jabber detected */
+#define LRESR_ERCAP 0x0001 /* Ext-reg capability */
+
+/* LDS Auto-Negotiation Advertised Ability. */
+#define LREANAA_PAUSE_ASYM 0x8000 /* Can pause asymmetrically */
+#define LREANAA_PAUSE 0x4000 /* Can pause */
+#define LREANAA_100_1PAIR 0x0020 /* Can do 100Mbps 1 Pair */
+#define LREANAA_100_4PAIR 0x0010 /* Can do 100Mbps 4 Pair */
+#define LREANAA_100_2PAIR 0x0008 /* Can do 100Mbps 2 Pair */
+#define LREANAA_10_2PAIR 0x0004 /* Can do 10Mbps 2 Pair */
+#define LREANAA_10_1PAIR 0x0002 /* Can do 10Mbps 1 Pair */
+
+#define LRE_ADVERTISE_FULL (LREANAA_100_1PAIR | LREANAA_100_4PAIR | \
+ LREANAA_100_2PAIR | LREANAA_10_2PAIR | \
+ LREANAA_10_1PAIR)
+
+#define LRE_ADVERTISE_ALL LRE_ADVERTISE_FULL
+
+/* LDS Link Partner Ability. */
+#define LRELPA_PAUSE_ASYM 0x8000 /* Supports asymmetric pause */
+#define LRELPA_PAUSE 0x4000 /* Supports pause capability */
+#define LRELPA_100_1PAIR 0x0020 /* 100Mbps 1 Pair capable */
+#define LRELPA_100_4PAIR 0x0010 /* 100Mbps 4 Pair capable */
+#define LRELPA_100_2PAIR 0x0008 /* 100Mbps 2 Pair capable */
+#define LRELPA_10_2PAIR 0x0004 /* 10Mbps 2 Pair capable */
+#define LRELPA_10_1PAIR 0x0002 /* 10Mbps 1 Pair capable */
+
+/* LDS Expansion register. */
+#define LDSE_DOWNGRADE 0x8000 /* Can do LDS Speed Downgrade */
+#define LDSE_MASTER 0x4000 /* Master / Slave */
+#define LDSE_PAIRS_MASK 0x3000 /* Pair Count Mask */
+#define LDSE_PAIRS_SHIFT 12
+#define LDSE_4PAIRS (2 << LDSE_PAIRS_SHIFT) /* 4 Pairs Connection */
+#define LDSE_2PAIRS (1 << LDSE_PAIRS_SHIFT) /* 2 Pairs Connection */
+#define LDSE_1PAIR (0 << LDSE_PAIRS_SHIFT) /* 1 Pair Connection */
+#define LDSE_CABLEN_MASK 0x0FFF /* Cable Length Mask */
+
/* BCM54810 Registers */
#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL (MII_BCM54XX_EXP_SEL_ER + 0x90)
#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0)
#define BCM54810_SHD_CLK_CTL 0x3
#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9)
+/* BCM54811 Registers */
+#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL (MII_BCM54XX_EXP_SEL_ER + 0x9A)
+/* Access Control Override Enable */
+#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_EN BIT(15)
+/* Access Control Override Value */
+#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_OVERRIDE_VAL BIT(14)
+/* Access Control Value */
+#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_VAL BIT(13)
+
/* BCM54612E Registers */
#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34)
#define BCM54612E_LED4_CLK125OUT_EN (1 << 1)
diff --git a/include/linux/btf.h b/include/linux/btf.h
index f9e56fd12a9f..f06976ffb63f 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -75,6 +75,10 @@
#define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */
#define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */
#define KF_RCU_PROTECTED (1 << 11) /* kfunc should be protected by rcu cs when they are invoked */
+#define KF_FASTCALL (1 << 12) /* kfunc supports bpf_fastcall protocol */
+#define KF_ARENA_RET (1 << 13) /* kfunc returns an arena pointer */
+#define KF_ARENA_ARG1 (1 << 14) /* kfunc takes an arena pointer as its first argument */
+#define KF_ARENA_ARG2 (1 << 15) /* kfunc takes an arena pointer as its second argument */
/*
* Tag marking a kernel function as a kfunc. This is meant to minimize the
@@ -82,7 +86,7 @@
* as to avoid issues such as the compiler inlining or eliding either a static
* kfunc, or a global kfunc in an LTO build.
*/
-#define __bpf_kfunc __used noinline
+#define __bpf_kfunc __used __retain __noclone noinline
#define __bpf_kfunc_start_defs() \
__diag_push(); \
@@ -140,6 +144,7 @@ extern const struct file_operations btf_fops;
const char *btf_get_name(const struct btf *btf);
void btf_get(struct btf *btf);
void btf_put(struct btf *btf);
+const struct btf_header *btf_header(const struct btf *btf);
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz);
struct btf *btf_get_by_fd(int fd);
int btf_get_info_by_fd(const struct btf *btf,
@@ -212,8 +217,13 @@ int btf_get_fd_by_id(u32 id);
u32 btf_obj_id(const struct btf *btf);
bool btf_is_kernel(const struct btf *btf);
bool btf_is_module(const struct btf *btf);
+bool btf_is_vmlinux(const struct btf *btf);
struct module *btf_try_get_module(const struct btf *btf);
u32 btf_nr_types(const struct btf *btf);
+struct btf *btf_base_btf(const struct btf *btf);
+bool btf_type_is_i32(const struct btf_type *t);
+bool btf_type_is_i64(const struct btf_type *t);
+bool btf_type_is_primitive(const struct btf_type *t);
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
@@ -339,11 +349,21 @@ static inline u8 btf_int_offset(const struct btf_type *t)
return BTF_INT_OFFSET(*(u32 *)(t + 1));
}
+static inline __u8 btf_int_bits(const struct btf_type *t)
+{
+ return BTF_INT_BITS(*(__u32 *)(t + 1));
+}
+
static inline bool btf_type_is_scalar(const struct btf_type *t)
{
return btf_type_is_int(t) || btf_type_is_enum(t);
}
+static inline bool btf_type_is_fwd(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
+}
+
static inline bool btf_type_is_typedef(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF;
@@ -478,6 +498,11 @@ static inline struct btf_param *btf_params(const struct btf_type *t)
return (struct btf_param *)(t + 1);
}
+static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t)
+{
+ return (struct btf_decl_tag *)(t + 1);
+}
+
static inline int btf_id_cmp_func(const void *a, const void *b)
{
const int *pa = a, *pb = b;
@@ -500,6 +525,7 @@ bool btf_param_match_suffix(const struct btf *btf,
const char *suffix);
int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
u32 arg_no);
+u32 btf_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto, int off);
struct bpf_verifier_log;
@@ -515,9 +541,38 @@ static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *
}
#endif
+enum btf_field_iter_kind {
+ BTF_FIELD_ITER_IDS,
+ BTF_FIELD_ITER_STRS,
+};
+
+struct btf_field_desc {
+ /* once-per-type offsets */
+ int t_off_cnt, t_offs[2];
+ /* member struct size, or zero, if no members */
+ int m_sz;
+ /* repeated per-member offsets */
+ int m_off_cnt, m_offs[1];
+};
+
+struct btf_field_iter {
+ struct btf_field_desc desc;
+ void *p;
+ int m_idx;
+ int off_idx;
+ int vlen;
+};
+
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
+void btf_set_base_btf(struct btf *btf, const struct btf *base_btf);
+int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **map_ids);
+int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
+ enum btf_field_iter_kind iter_kind);
+__u32 *btf_field_iter_next(struct btf_field_iter *it);
+
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
+const char *btf_str_by_offset(const struct btf *btf, u32 offset);
struct btf *btf_parse_vmlinux(void);
struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog);
u32 *btf_kfunc_id_set_contains(const struct btf *btf, u32 kfunc_btf_id,
@@ -531,18 +586,52 @@ s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id);
int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
struct module *owner);
struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id);
+bool btf_is_projection_of(const char *pname, const char *tname);
bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
const struct btf_type *t, enum bpf_prog_type prog_type,
int arg);
int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type);
bool btf_types_are_same(const struct btf *btf1, u32 id1,
const struct btf *btf2, u32 id2);
+int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx);
+
+static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t)
+{
+ if (!btf_type_is_ptr(t))
+ return false;
+
+ t = btf_type_skip_modifiers(btf, t->type, NULL);
+
+ return btf_type_is_struct(t);
+}
#else
static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
u32 type_id)
{
return NULL;
}
+
+static inline void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
+{
+}
+
+static inline int btf_relocate(void *log, struct btf *btf, const struct btf *base_btf,
+ __u32 **map_ids)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
+ enum btf_field_iter_kind iter_kind)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline __u32 *btf_field_iter_next(struct btf_field_iter *it)
+{
+ return NULL;
+}
+
static inline const char *btf_name_by_offset(const struct btf *btf,
u32 offset)
{
@@ -589,16 +678,9 @@ static inline bool btf_types_are_same(const struct btf *btf1, u32 id1,
{
return false;
}
-#endif
-
-static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t)
+static inline int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx)
{
- if (!btf_type_is_ptr(t))
- return false;
-
- t = btf_type_skip_modifiers(btf, t->type, NULL);
-
- return btf_type_is_struct(t);
+ return -EOPNOTSUPP;
}
-
+#endif
#endif
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index c0e3e1426a82..139bdececdcf 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -283,5 +283,6 @@ extern u32 btf_tracing_ids[];
extern u32 bpf_cgroup_btf_id[];
extern u32 bpf_local_storage_map_btf_id[];
extern u32 btf_bpf_map_id[];
+extern u32 bpf_kmem_cache_btf_id[];
#endif
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index e022e40b099e..b16b88bfbc3e 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -34,6 +34,7 @@ enum bh_state_bits {
BH_Meta, /* Buffer contains metadata */
BH_Prio, /* Buffer should be submitted with REQ_PRIO */
BH_Defer_Completion, /* Defer AIO completion to workqueue */
+ BH_Migrate, /* Buffer is being migrated (norefs) */
BH_PrivateStart,/* not a state bit, but the first bit available
* for private allocation by other entities
@@ -53,7 +54,7 @@ typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
* filesystem and block layers. Nowadays the basic I/O unit
* is the bio, and buffer_heads are used for extracting block
* mappings (via a get_block_t call), for tracking state within
- * a page (via a page_mapping) and for wrapping bio submission
+ * a folio (via a folio_mapping) and for wrapping bio submission
* for backward compatibility reasons (e.g. submit_bh).
*/
struct buffer_head {
@@ -182,7 +183,6 @@ static inline unsigned long bh_offset(const struct buffer_head *bh)
BUG_ON(!PagePrivate(page)); \
((struct buffer_head *)page_private(page)); \
})
-#define page_has_buffers(page) PagePrivate(page)
#define folio_buffers(folio) folio_get_private(folio)
void buffer_check_dirty_writeback(struct folio *folio,
@@ -199,8 +199,7 @@ void folio_set_bh(struct buffer_head *bh, struct folio *folio,
unsigned long offset);
struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
gfp_t gfp);
-struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
- bool retry);
+struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size);
struct buffer_head *create_empty_buffers(struct folio *folio,
unsigned long blocksize, unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
@@ -224,6 +223,8 @@ void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
unsigned size);
+struct buffer_head *__find_get_block_nonatomic(struct block_device *bdev,
+ sector_t block, unsigned size);
struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
unsigned size, gfp_t gfp);
void __brelse(struct buffer_head *);
@@ -258,21 +259,19 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio,
int block_read_full_folio(struct folio *, get_block_t *);
bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
- struct page **pagep, get_block_t *get_block);
-int __block_write_begin(struct page *page, loff_t pos, unsigned len,
+ struct folio **foliop, get_block_t *get_block);
+int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block);
-int block_write_end(struct file *, struct address_space *,
- loff_t, unsigned, unsigned,
- struct page *, void *);
-int generic_write_end(struct file *, struct address_space *,
- loff_t, unsigned, unsigned,
- struct page *, void *);
+int block_write_end(loff_t pos, unsigned len, unsigned copied, struct folio *);
+int generic_write_end(const struct kiocb *, struct address_space *,
+ loff_t, unsigned len, unsigned copied,
+ struct folio *, void *);
void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
-int cont_write_begin(struct file *, struct address_space *, loff_t,
- unsigned, struct page **, void **,
+int cont_write_begin(const struct kiocb *, struct address_space *, loff_t,
+ unsigned, struct folio **, void **,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
-void block_commit_write(struct page *page, unsigned int from, unsigned int to);
+void block_commit_write(struct folio *folio, size_t from, size_t to);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
@@ -399,6 +398,12 @@ sb_find_get_block(struct super_block *sb, sector_t block)
return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
}
+static inline struct buffer_head *
+sb_find_get_block_nonatomic(struct super_block *sb, sector_t block)
+{
+ return __find_get_block_nonatomic(sb->s_bdev, block, sb->s_blocksize);
+}
+
static inline void
map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
{
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 348acf2558f3..17a4933c611b 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -42,6 +42,7 @@ void bug_get_file_line(struct bug_entry *bug, const char **file,
struct bug_entry *find_bug(unsigned long bugaddr);
enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
+enum bug_trap_type report_bug_entry(struct bug_entry *bug, struct pt_regs *regs);
/* These are defined by the architecture */
int is_valid_bugaddr(unsigned long addr);
@@ -62,6 +63,13 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
}
struct bug_entry;
+
+static inline enum bug_trap_type
+report_bug_entry(struct bug_entry *bug, struct pt_regs *regs)
+{
+ return BUG_TRAP_TYPE_BUG;
+}
+
static inline void bug_get_file_line(struct bug_entry *bug, const char **file,
unsigned int *line)
{
@@ -73,15 +81,23 @@ static inline void generic_bug_clear_once(void) {}
#endif /* CONFIG_GENERIC_BUG */
+#ifdef CONFIG_PRINTK
+void mem_dump_obj(void *object);
+#else
+static inline void mem_dump_obj(void *object) {}
+#endif
+
/*
* Since detected data corruption should stop operation on the affected
* structures. Return value must be checked and sanely acted on by caller.
*/
static inline __must_check bool check_data_corruption(bool v) { return v; }
-#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \
+#define CHECK_DATA_CORRUPTION(condition, addr, fmt, ...) \
check_data_corruption(({ \
bool corruption = unlikely(condition); \
if (corruption) { \
+ if (addr) \
+ mem_dump_obj(addr); \
if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
pr_err(fmt, ##__VA_ARGS__); \
BUG(); \
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
index 3aa3640f8c18..2cfbb4c65c78 100644
--- a/include/linux/build_bug.h
+++ b/include/linux/build_bug.h
@@ -4,17 +4,17 @@
#include <linux/compiler.h>
-#ifdef __CHECKER__
-#define BUILD_BUG_ON_ZERO(e) (0)
-#else /* __CHECKER__ */
/*
* Force a compilation error if condition is true, but also produce a
* result (of value 0 and type int), so the expression can be used
* e.g. in a structure initializer (or where-ever else comma expressions
* aren't permitted).
+ *
+ * Take an error message as an optional second argument. If omitted,
+ * default to the stringification of the tested expression.
*/
-#define BUILD_BUG_ON_ZERO(e) ((int)(sizeof(struct { int:(-!!(e)); })))
-#endif /* __CHECKER__ */
+#define BUILD_BUG_ON_ZERO(e, ...) \
+ __BUILD_BUG_ON_ZERO_MSG(e, ##__VA_ARGS__, #e " is true")
/* Force a compilation error if a constant expression is not a power of 2 */
#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
index 20aa3c2d89f7..831c1b4b626c 100644
--- a/include/linux/buildid.h
+++ b/include/linux/buildid.h
@@ -7,8 +7,8 @@
#define BUILD_ID_SIZE_MAX 20
struct vm_area_struct;
-int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
- __u32 *size);
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
+int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size);
#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO)
@@ -18,4 +18,29 @@ void init_vmlinux_build_id(void);
static inline void init_vmlinux_build_id(void) { }
#endif
+struct freader {
+ void *buf;
+ u32 buf_sz;
+ int err;
+ union {
+ struct {
+ struct file *file;
+ struct folio *folio;
+ void *addr;
+ loff_t folio_off;
+ bool may_fault;
+ };
+ struct {
+ const char *data;
+ u64 data_sz;
+ };
+ };
+};
+
+void freader_init_from_file(struct freader *r, void *buf, u32 buf_sz,
+ struct file *file, bool may_fault);
+void freader_init_from_mem(struct freader *r, const char *data, u64 data_sz);
+const void *freader_fetch(struct freader *r, loff_t file_off, size_t sz);
+void freader_cleanup(struct freader *r);
+
#endif
diff --git a/include/linux/bus/stm32_firewall_device.h b/include/linux/bus/stm32_firewall_device.h
index 18e0a2fc3816..eaa7a3f54450 100644
--- a/include/linux/bus/stm32_firewall_device.h
+++ b/include/linux/bus/stm32_firewall_device.h
@@ -114,27 +114,30 @@ void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 su
#else /* CONFIG_STM32_FIREWALL */
-int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall,
- unsigned int nb_firewall);
+static inline int stm32_firewall_get_firewall(struct device_node *np,
+ struct stm32_firewall *firewall,
+ unsigned int nb_firewall)
{
return -ENODEV;
}
-int stm32_firewall_grant_access(struct stm32_firewall *firewall)
+static inline int stm32_firewall_grant_access(struct stm32_firewall *firewall)
{
return -ENODEV;
}
-void stm32_firewall_release_access(struct stm32_firewall *firewall)
+static inline void stm32_firewall_release_access(struct stm32_firewall *firewall)
{
}
-int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id)
+static inline int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall,
+ u32 subsystem_id)
{
return -ENODEV;
}
-void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id)
+static inline void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall,
+ u32 subsystem_id)
{
}
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index bd1e361b351c..3fc0efa0825b 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -22,11 +22,8 @@ struct page;
* @bv_len: Number of bytes in the address range.
* @bv_offset: Start of the address range relative to the start of @bv_page.
*
- * The following holds for a bvec if n * PAGE_SIZE < bv_offset + bv_len:
- *
- * nth_page(@bv_page, n) == @bv_page + n
- *
- * This holds because page_is_mergeable() checks the above property.
+ * All pages within a bio_vec starting from @bv_page are contiguous and
+ * can simply be iterated (see bvec_advance()).
*/
struct bio_vec {
struct page *bv_page;
@@ -57,9 +54,12 @@ static inline void bvec_set_page(struct bio_vec *bv, struct page *page,
* @offset: offset into the folio
*/
static inline void bvec_set_folio(struct bio_vec *bv, struct folio *folio,
- unsigned int len, unsigned int offset)
+ size_t len, size_t offset)
{
- bvec_set_page(bv, &folio->page, len, offset);
+ unsigned long nr = offset / PAGE_SIZE;
+
+ WARN_ON_ONCE(len > UINT_MAX);
+ bvec_set_page(bv, folio_page(folio, nr), len, offset % PAGE_SIZE);
}
/**
@@ -184,6 +184,12 @@ static inline void bvec_iter_advance_single(const struct bio_vec *bv,
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
+#define for_each_mp_bvec(bvl, bio_vec, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((bvl = mp_bvec_iter_bvec((bio_vec), (iter))), 1); \
+ bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
+
/* for iterating one bio from start to end */
#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
{ \
@@ -280,4 +286,13 @@ static inline void *bvec_virt(struct bio_vec *bvec)
return page_address(bvec->bv_page) + bvec->bv_offset;
}
+/**
+ * bvec_phys - return the physical address for a bvec
+ * @bvec: bvec to return the physical address for
+ */
+static inline phys_addr_t bvec_phys(const struct bio_vec *bvec)
+{
+ return page_to_phys(bvec->bv_page) + bvec->bv_offset;
+}
+
#endif /* __LINUX_BVEC_H */
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
index c9a4c96c9943..55a44199de87 100644
--- a/include/linux/byteorder/generic.h
+++ b/include/linux/byteorder/generic.h
@@ -173,6 +173,38 @@ static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
}
}
+static inline void le64_to_cpu_array(u64 *buf, unsigned int words)
+{
+ while (words--) {
+ __le64_to_cpus(buf);
+ buf++;
+ }
+}
+
+static inline void cpu_to_le64_array(u64 *buf, unsigned int words)
+{
+ while (words--) {
+ __cpu_to_le64s(buf);
+ buf++;
+ }
+}
+
+static inline void memcpy_from_le32(u32 *dst, const __le32 *src, size_t words)
+{
+ size_t i;
+
+ for (i = 0; i < words; i++)
+ dst[i] = le32_to_cpu(src[i]);
+}
+
+static inline void memcpy_to_le32(__le32 *dst, const u32 *src, size_t words)
+{
+ size_t i;
+
+ for (i = 0; i < words; i++)
+ dst[i] = cpu_to_le32(src[i]);
+}
+
static inline void be16_add_cpu(__be16 *var, u16 val)
{
*var = cpu_to_be16(be16_to_cpu(*var) + val);
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 0ecb17bb6883..e69768f50d53 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -3,14 +3,37 @@
#define __LINUX_CACHE_H
#include <uapi/linux/kernel.h>
+#include <vdso/cache.h>
#include <asm/cache.h>
#ifndef L1_CACHE_ALIGN
#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
#endif
-#ifndef SMP_CACHE_BYTES
-#define SMP_CACHE_BYTES L1_CACHE_BYTES
+/**
+ * SMP_CACHE_ALIGN - align a value to the L2 cacheline size
+ * @x: value to align
+ *
+ * On some architectures, L2 ("SMP") CL size is bigger than L1, and sometimes,
+ * this needs to be accounted.
+ *
+ * Return: aligned value.
+ */
+#ifndef SMP_CACHE_ALIGN
+#define SMP_CACHE_ALIGN(x) ALIGN(x, SMP_CACHE_BYTES)
+#endif
+
+/*
+ * ``__aligned_largest`` aligns a field to the value most optimal for the
+ * target architecture to perform memory operations. Get the actual value
+ * to be able to use it anywhere else.
+ */
+#ifndef __LARGEST_ALIGN
+#define __LARGEST_ALIGN sizeof(struct { long x; } __aligned_largest)
+#endif
+
+#ifndef LARGEST_ALIGN
+#define LARGEST_ALIGN(x) ALIGN(x, __LARGEST_ALIGN)
#endif
/*
@@ -37,10 +60,6 @@
#define __ro_after_init __section(".data..ro_after_init")
#endif
-#ifndef ____cacheline_aligned
-#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
-#endif
-
#ifndef ____cacheline_aligned_in_smp
#ifdef CONFIG_SMP
#define ____cacheline_aligned_in_smp ____cacheline_aligned
@@ -95,6 +114,39 @@
__u8 __cacheline_group_end__##GROUP[0]
#endif
+/**
+ * __cacheline_group_begin_aligned - declare an aligned group start
+ * @GROUP: name of the group
+ * @...: optional group alignment
+ *
+ * The following block inside a struct:
+ *
+ * __cacheline_group_begin_aligned(grp);
+ * field a;
+ * field b;
+ * __cacheline_group_end_aligned(grp);
+ *
+ * will always be aligned to either the specified alignment or
+ * ``SMP_CACHE_BYTES``.
+ */
+#define __cacheline_group_begin_aligned(GROUP, ...) \
+ __cacheline_group_begin(GROUP) \
+ __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
+
+/**
+ * __cacheline_group_end_aligned - declare an aligned group end
+ * @GROUP: name of the group
+ * @...: optional alignment (same as was in __cacheline_group_begin_aligned())
+ *
+ * Note that the end marker is aligned to sizeof(long) to allow more precise
+ * size assertion. It also declares a padding at the end to avoid next field
+ * falling into this cacheline.
+ */
+#define __cacheline_group_end_aligned(GROUP, ...) \
+ __cacheline_group_end(GROUP) __aligned(sizeof(long)); \
+ struct { } __cacheline_group_pad__##GROUP \
+ __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
+
#ifndef CACHELINE_ASSERT_GROUP_MEMBER
#define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \
BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \
diff --git a/include/linux/cache_coherency.h b/include/linux/cache_coherency.h
new file mode 100644
index 000000000000..cc81c5733e31
--- /dev/null
+++ b/include/linux/cache_coherency.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cache coherency maintenance operation device drivers
+ *
+ * Copyright Huawei 2025
+ */
+#ifndef _LINUX_CACHE_COHERENCY_H_
+#define _LINUX_CACHE_COHERENCY_H_
+
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+
+struct cc_inval_params {
+ phys_addr_t addr;
+ size_t size;
+};
+
+struct cache_coherency_ops_inst;
+
+struct cache_coherency_ops {
+ int (*wbinv)(struct cache_coherency_ops_inst *cci,
+ struct cc_inval_params *invp);
+ int (*done)(struct cache_coherency_ops_inst *cci);
+};
+
+struct cache_coherency_ops_inst {
+ struct kref kref;
+ struct list_head node;
+ const struct cache_coherency_ops *ops;
+};
+
+int cache_coherency_ops_instance_register(struct cache_coherency_ops_inst *cci);
+void cache_coherency_ops_instance_unregister(struct cache_coherency_ops_inst *cci);
+
+struct cache_coherency_ops_inst *
+_cache_coherency_ops_instance_alloc(const struct cache_coherency_ops *ops,
+ size_t size);
+/**
+ * cache_coherency_ops_instance_alloc - Allocate cache coherency ops instance
+ * @ops: Cache maintenance operations
+ * @drv_struct: structure that contains the struct cache_coherency_ops_inst
+ * @member: Name of the struct cache_coherency_ops_inst member in @drv_struct.
+ *
+ * This allocates a driver specific structure and initializes the
+ * cache_coherency_ops_inst embedded in the drv_struct. Upon success the
+ * pointer must be freed via cache_coherency_ops_instance_put().
+ *
+ * Returns a &drv_struct * on success, %NULL on error.
+ */
+#define cache_coherency_ops_instance_alloc(ops, drv_struct, member) \
+ ({ \
+ static_assert(__same_type(struct cache_coherency_ops_inst, \
+ ((drv_struct *)NULL)->member)); \
+ static_assert(offsetof(drv_struct, member) == 0); \
+ (drv_struct *)_cache_coherency_ops_instance_alloc(ops, \
+ sizeof(drv_struct)); \
+ })
+void cache_coherency_ops_instance_put(struct cache_coherency_ops_inst *cci);
+
+#endif
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 2cb15fe4fe12..c8f4f0a0b874 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -3,7 +3,8 @@
#define _LINUX_CACHEINFO_H
#include <linux/bitops.h>
-#include <linux/cpumask.h>
+#include <linux/cpuhplock.h>
+#include <linux/cpumask_types.h>
#include <linux/smp.h>
struct device_node;
@@ -113,26 +114,40 @@ int acpi_get_cache_info(unsigned int cpu,
const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
/*
- * Get the id of the cache associated with @cpu at level @level.
+ * Get the cacheinfo structure for the cache associated with @cpu at
+ * level @level.
* cpuhp lock must be held.
*/
-static inline int get_cpu_cacheinfo_id(int cpu, int level)
+static inline struct cacheinfo *get_cpu_cacheinfo_level(int cpu, int level)
{
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
int i;
+ lockdep_assert_cpus_held();
+
for (i = 0; i < ci->num_leaves; i++) {
if (ci->info_list[i].level == level) {
if (ci->info_list[i].attributes & CACHE_ID)
- return ci->info_list[i].id;
- return -1;
+ return &ci->info_list[i];
+ return NULL;
}
}
- return -1;
+ return NULL;
}
-#ifdef CONFIG_ARM64
+/*
+ * Get the id of the cache associated with @cpu at level @level.
+ * cpuhp lock must be held.
+ */
+static inline int get_cpu_cacheinfo_id(int cpu, int level)
+{
+ struct cacheinfo *ci = get_cpu_cacheinfo_level(cpu, level);
+
+ return ci ? ci->id : -1;
+}
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
#define use_arch_cache_info() (true)
#else
#define use_arch_cache_info() (false)
@@ -140,8 +155,14 @@ static inline int get_cpu_cacheinfo_id(int cpu, int level)
#ifndef CONFIG_ARCH_HAS_CPU_CACHE_ALIASING
#define cpu_dcache_is_aliasing() false
+#define cpu_icache_is_aliasing() cpu_dcache_is_aliasing()
#else
#include <asm/cachetype.h>
+
+#ifndef cpu_icache_is_aliasing
+#define cpu_icache_is_aliasing() cpu_dcache_is_aliasing()
+#endif
+
#endif
#endif /* _LINUX_CACHEINFO_H */
diff --git a/include/linux/call_once.h b/include/linux/call_once.h
new file mode 100644
index 000000000000..13cd6469e7e5
--- /dev/null
+++ b/include/linux/call_once.h
@@ -0,0 +1,66 @@
+#ifndef _LINUX_CALL_ONCE_H
+#define _LINUX_CALL_ONCE_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+
+#define ONCE_NOT_STARTED 0
+#define ONCE_RUNNING 1
+#define ONCE_COMPLETED 2
+
+struct once {
+ atomic_t state;
+ struct mutex lock;
+};
+
+static inline void __once_init(struct once *once, const char *name,
+ struct lock_class_key *key)
+{
+ atomic_set(&once->state, ONCE_NOT_STARTED);
+ __mutex_init(&once->lock, name, key);
+}
+
+#define once_init(once) \
+do { \
+ static struct lock_class_key __key; \
+ __once_init((once), #once, &__key); \
+} while (0)
+
+/*
+ * call_once - Ensure a function has been called exactly once
+ *
+ * @once: Tracking struct
+ * @cb: Function to be called
+ *
+ * If @once has never completed successfully before, call @cb and, if
+ * it returns a zero or positive value, mark @once as completed. Return
+ * the value returned by @cb
+ *
+ * If @once has completed succesfully before, return 0.
+ *
+ * The call to @cb is implicitly surrounded by a mutex, though for
+ * efficiency the * function avoids taking it after the first call.
+ */
+static inline int call_once(struct once *once, int (*cb)(struct once *))
+{
+ int r, state;
+
+ /* Pairs with atomic_set_release() below. */
+ if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
+ return 0;
+
+ guard(mutex)(&once->lock);
+ state = atomic_read(&once->state);
+ if (unlikely(state != ONCE_NOT_STARTED))
+ return WARN_ON_ONCE(state != ONCE_COMPLETED) ? -EINVAL : 0;
+
+ atomic_set(&once->state, ONCE_RUNNING);
+ r = cb(once);
+ if (r < 0)
+ atomic_set(&once->state, ONCE_NOT_STARTED);
+ else
+ atomic_set_release(&once->state, ONCE_COMPLETED);
+ return r;
+}
+
+#endif /* _LINUX_CALL_ONCE_H */
diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h
index 9b8a9c39614b..726d909e87ce 100644
--- a/include/linux/can/bittiming.h
+++ b/include/linux/can/bittiming.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2020 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
- * Copyright (c) 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (c) 2021-2025 Vincent Mailhol <mailhol@kernel.org>
*/
#ifndef _CAN_BITTIMING_H
@@ -14,8 +14,14 @@
#define CAN_BITRATE_UNSET 0
#define CAN_BITRATE_UNKNOWN (-1U)
-#define CAN_CTRLMODE_TDC_MASK \
+#define CAN_CTRLMODE_FD_TDC_MASK \
(CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_TDC_MANUAL)
+#define CAN_CTRLMODE_XL_TDC_MASK \
+ (CAN_CTRLMODE_XL_TDC_AUTO | CAN_CTRLMODE_XL_TDC_MANUAL)
+#define CAN_CTRLMODE_TDC_AUTO_MASK \
+ (CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_XL_TDC_AUTO)
+#define CAN_CTRLMODE_TDC_MANUAL_MASK \
+ (CAN_CTRLMODE_TDC_MANUAL | CAN_CTRLMODE_XL_TDC_MANUAL)
/*
* struct can_tdc - CAN FD Transmission Delay Compensation parameters
@@ -81,6 +87,11 @@ struct can_tdc {
u32 tdcf;
};
+/* The transceiver decoding margin corresponds to t_Decode in ISO 11898-2 */
+#define CAN_PWM_DECODE_NS 5
+/* Maximum PWM symbol duration. Corresponds to t_SymbolNom_MAX - t_Decode */
+#define CAN_PWM_NS_MAX (205 - CAN_PWM_DECODE_NS)
+
/*
* struct can_tdc_const - CAN hardware-dependent constant for
* Transmission Delay Compensation
@@ -114,27 +125,85 @@ struct can_tdc_const {
u32 tdcf_max;
};
+/*
+ * struct can_pwm - CAN Pulse-Width Modulation (PWM) parameters
+ *
+ * @pwms: pulse width modulation short phase
+ * @pwml: pulse width modulation long phase
+ * @pwmo: pulse width modulation offset
+ */
+struct can_pwm {
+ u32 pwms;
+ u32 pwml;
+ u32 pwmo;
+};
+
+/*
+ * struct can_pwm - CAN hardware-dependent constants for Pulse-Width
+ * Modulation (PWM)
+ *
+ * @pwms_min: PWM short phase minimum value. Must be at least 1.
+ * @pwms_max: PWM short phase maximum value
+ * @pwml_min: PWM long phase minimum value. Must be at least 1.
+ * @pwml_max: PWM long phase maximum value
+ * @pwmo_min: PWM offset phase minimum value
+ * @pwmo_max: PWM offset phase maximum value
+ */
+struct can_pwm_const {
+ u32 pwms_min;
+ u32 pwms_max;
+ u32 pwml_min;
+ u32 pwml_max;
+ u32 pwmo_min;
+ u32 pwmo_max;
+};
+
+struct data_bittiming_params {
+ const struct can_bittiming_const *data_bittiming_const;
+ struct can_bittiming data_bittiming;
+ const struct can_tdc_const *tdc_const;
+ const struct can_pwm_const *pwm_const;
+ union {
+ struct can_tdc tdc;
+ struct can_pwm pwm;
+ };
+ const u32 *data_bitrate_const;
+ unsigned int data_bitrate_const_cnt;
+ int (*do_set_data_bittiming)(struct net_device *dev);
+ int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv);
+};
+
#ifdef CONFIG_CAN_CALC_BITTIMING
int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc, struct netlink_ext_ack *extack);
void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
const struct can_bittiming *dbt,
- u32 *ctrlmode, u32 ctrlmode_supported);
+ u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported);
+
+int can_calc_pwm(struct net_device *dev, struct netlink_ext_ack *extack);
#else /* !CONFIG_CAN_CALC_BITTIMING */
static inline int
can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
{
- netdev_err(dev, "bit-timing calculation not available\n");
+ NL_SET_ERR_MSG(extack, "bit-timing calculation not available\n");
return -EINVAL;
}
static inline void
can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
const struct can_bittiming *dbt,
- u32 *ctrlmode, u32 ctrlmode_supported)
+ u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported)
+{
+}
+
+static inline int
+can_calc_pwm(struct net_device *dev, struct netlink_ext_ack *extack)
{
+ NL_SET_ERR_MSG(extack,
+ "bit-timing calculation not available: manually provide PWML and PWMS\n");
+ return -EINVAL;
}
#endif /* CONFIG_CAN_CALC_BITTIMING */
@@ -149,6 +218,39 @@ int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const unsigned int bitrate_const_cnt,
struct netlink_ext_ack *extack);
+int can_validate_pwm_bittiming(const struct net_device *dev,
+ const struct can_pwm *pwm,
+ struct netlink_ext_ack *extack);
+
+/*
+ * can_get_relative_tdco() - TDCO relative to the sample point
+ *
+ * struct can_tdc::tdco represents the absolute offset from TDCV. Some
+ * controllers use instead an offset relative to the Sample Point (SP)
+ * such that:
+ *
+ * SSP = TDCV + absolute TDCO
+ * = TDCV + SP + relative TDCO
+ *
+ * -+----------- one bit ----------+-- TX pin
+ * |<--- Sample Point --->|
+ *
+ * --+----------- one bit ----------+-- RX pin
+ * |<-------- TDCV -------->|
+ * |<------------------------>| absolute TDCO
+ * |<--- Sample Point --->|
+ * | |<->| relative TDCO
+ * |<------------- Secondary Sample Point ------------>|
+ */
+static inline s32 can_get_relative_tdco(const struct data_bittiming_params *dbt_params)
+{
+ const struct can_bittiming *dbt = &dbt_params->data_bittiming;
+ s32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
+ dbt->phase_seg1) * dbt->brp;
+
+ return (s32)dbt_params->tdc.tdco - sample_point_in_tc;
+}
+
/*
* can_bit_time() - Duration of one bit
*
@@ -162,4 +264,17 @@ static inline unsigned int can_bit_time(const struct can_bittiming *bt)
return CAN_SYNC_SEG + bt->prop_seg + bt->phase_seg1 + bt->phase_seg2;
}
+/* Duration of one bit in minimum time quantum */
+static inline unsigned int can_bit_time_tqmin(const struct can_bittiming *bt)
+{
+ return can_bit_time(bt) * bt->brp;
+}
+
+/* Convert a duration from minimum a minimum time quantum to nano seconds */
+static inline u32 can_tqmin_to_ns(u32 tqmin, u32 clock_freq)
+{
+ return DIV_U64_ROUND_CLOSEST(mul_u32_u32(tqmin, NSEC_PER_SEC),
+ clock_freq);
+}
+
#endif /* !_CAN_BITTIMING_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 1b92aed49363..f6416a56e95d 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -45,16 +45,11 @@ struct can_priv {
struct net_device *dev;
struct can_device_stats can_stats;
- const struct can_bittiming_const *bittiming_const,
- *data_bittiming_const;
- struct can_bittiming bittiming, data_bittiming;
- const struct can_tdc_const *tdc_const;
- struct can_tdc tdc;
-
+ const struct can_bittiming_const *bittiming_const;
+ struct can_bittiming bittiming;
+ struct data_bittiming_params fd, xl;
unsigned int bitrate_const_cnt;
const u32 *bitrate_const;
- const u32 *data_bitrate_const;
- unsigned int data_bitrate_const_cnt;
u32 bitrate_max;
struct can_clock clock;
@@ -77,69 +72,22 @@ struct can_priv {
struct delayed_work restart_work;
int (*do_set_bittiming)(struct net_device *dev);
- int (*do_set_data_bittiming)(struct net_device *dev);
int (*do_set_mode)(struct net_device *dev, enum can_mode mode);
int (*do_set_termination)(struct net_device *dev, u16 term);
int (*do_get_state)(const struct net_device *dev,
enum can_state *state);
int (*do_get_berr_counter)(const struct net_device *dev,
struct can_berr_counter *bec);
- int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv);
};
-static inline bool can_tdc_is_enabled(const struct can_priv *priv)
+static inline bool can_fd_tdc_is_enabled(const struct can_priv *priv)
{
- return !!(priv->ctrlmode & CAN_CTRLMODE_TDC_MASK);
+ return !!(priv->ctrlmode & CAN_CTRLMODE_FD_TDC_MASK);
}
-/*
- * can_get_relative_tdco() - TDCO relative to the sample point
- *
- * struct can_tdc::tdco represents the absolute offset from TDCV. Some
- * controllers use instead an offset relative to the Sample Point (SP)
- * such that:
- *
- * SSP = TDCV + absolute TDCO
- * = TDCV + SP + relative TDCO
- *
- * -+----------- one bit ----------+-- TX pin
- * |<--- Sample Point --->|
- *
- * --+----------- one bit ----------+-- RX pin
- * |<-------- TDCV -------->|
- * |<------------------------>| absolute TDCO
- * |<--- Sample Point --->|
- * | |<->| relative TDCO
- * |<------------- Secondary Sample Point ------------>|
- */
-static inline s32 can_get_relative_tdco(const struct can_priv *priv)
+static inline bool can_xl_tdc_is_enabled(const struct can_priv *priv)
{
- const struct can_bittiming *dbt = &priv->data_bittiming;
- s32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
- dbt->phase_seg1) * dbt->brp;
-
- return (s32)priv->tdc.tdco - sample_point_in_tc;
-}
-
-/* helper to define static CAN controller features at device creation time */
-static inline int __must_check can_set_static_ctrlmode(struct net_device *dev,
- u32 static_mode)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- /* alloc_candev() succeeded => netdev_priv() is valid at this point */
- if (priv->ctrlmode_supported & static_mode) {
- netdev_warn(dev,
- "Controller features can not be supported and static at the same time\n");
- return -EINVAL;
- }
- priv->ctrlmode = static_mode;
-
- /* override MTU which was set by default in can_setup()? */
- if (static_mode & CAN_CTRLMODE_FD)
- dev->mtu = CANFD_MTU;
-
- return 0;
+ return !!(priv->ctrlmode & CAN_CTRLMODE_XL_TDC_MASK);
}
static inline u32 can_get_static_ctrlmode(struct can_priv *priv)
@@ -152,22 +100,6 @@ static inline bool can_is_canxl_dev_mtu(unsigned int mtu)
return (mtu >= CANXL_MIN_MTU && mtu <= CANXL_MAX_MTU);
}
-/* drop skb if it does not contain a valid CAN frame for sending */
-static inline bool can_dev_dropped_skb(struct net_device *dev, struct sk_buff *skb)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- if (priv->ctrlmode & CAN_CTRLMODE_LISTENONLY) {
- netdev_info_once(dev,
- "interface in listen only mode, dropping skb\n");
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return true;
- }
-
- return can_dropped_invalid_skb(dev, skb);
-}
-
void can_setup(struct net_device *dev);
struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
@@ -179,14 +111,27 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
void free_candev(struct net_device *dev);
/* a candev safe wrapper around netdev_priv */
+#if IS_ENABLED(CONFIG_CAN_NETLINK)
struct can_priv *safe_candev_priv(struct net_device *dev);
+#else
+static inline struct can_priv *safe_candev_priv(struct net_device *dev)
+{
+ return NULL;
+}
+#endif
int open_candev(struct net_device *dev);
void close_candev(struct net_device *dev);
-int can_change_mtu(struct net_device *dev, int new_mtu);
-int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd);
+void can_set_default_mtu(struct net_device *dev);
+int __must_check can_set_static_ctrlmode(struct net_device *dev,
+ u32 static_mode);
+int can_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg);
+int can_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int register_candev(struct net_device *dev);
void unregister_candev(struct net_device *dev);
@@ -195,6 +140,53 @@ int can_restart_now(struct net_device *dev);
void can_bus_off(struct net_device *dev);
const char *can_get_state_str(const enum can_state state);
+const char *can_get_ctrlmode_str(u32 ctrlmode);
+
+static inline bool can_dev_in_xl_only_mode(struct can_priv *priv)
+{
+ const u32 mixed_mode = CAN_CTRLMODE_FD | CAN_CTRLMODE_XL;
+
+ /* When CAN XL is enabled but FD is disabled we are running in
+ * the so-called 'CANXL-only mode' where the error signalling is
+ * disabled. This helper function determines the required value
+ * to disable error signalling in the CAN XL controller.
+ * The so-called CC/FD/XL 'mixed mode' requires error signalling.
+ */
+ return ((priv->ctrlmode & mixed_mode) == CAN_CTRLMODE_XL);
+}
+
+/* drop skb if it does not contain a valid CAN frame for sending */
+static inline bool can_dev_dropped_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ u32 silent_mode = priv->ctrlmode & (CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_RESTRICTED);
+
+ if (silent_mode) {
+ netdev_info_once(dev, "interface in %s mode, dropping skb\n",
+ can_get_ctrlmode_str(silent_mode));
+ goto invalid_skb;
+ }
+
+ if (!(priv->ctrlmode & CAN_CTRLMODE_FD) && can_is_canfd_skb(skb)) {
+ netdev_info_once(dev, "CAN FD is disabled, dropping skb\n");
+ goto invalid_skb;
+ }
+
+ if (can_dev_in_xl_only_mode(priv) && !can_is_canxl_skb(skb)) {
+ netdev_info_once(dev,
+ "Error signaling is disabled, dropping skb\n");
+ goto invalid_skb;
+ }
+
+ return can_dropped_invalid_skb(dev, skb);
+
+invalid_skb:
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+}
+
void can_state_get_by_berr_counter(const struct net_device *dev,
const struct can_berr_counter *bec,
enum can_state *tx_state,
diff --git a/include/linux/can/dev/peak_canfd.h b/include/linux/can/dev/peak_canfd.h
index f38772fd0c07..d3788a3d0942 100644
--- a/include/linux/can/dev/peak_canfd.h
+++ b/include/linux/can/dev/peak_canfd.h
@@ -2,8 +2,8 @@
/*
* CAN driver for PEAK System micro-CAN based adapters
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#ifndef PUCAN_H
#define PUCAN_H
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 0c356a517991..1fb08922552c 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -139,7 +139,6 @@ static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
}
#ifdef CONFIG_MULTIUSER
-extern bool has_capability(struct task_struct *t, int cap);
extern bool has_ns_capability(struct task_struct *t,
struct user_namespace *ns, int cap);
extern bool has_capability_noaudit(struct task_struct *t, int cap);
@@ -150,10 +149,6 @@ extern bool ns_capable(struct user_namespace *ns, int cap);
extern bool ns_capable_noaudit(struct user_namespace *ns, int cap);
extern bool ns_capable_setid(struct user_namespace *ns, int cap);
#else
-static inline bool has_capability(struct task_struct *t, int cap)
-{
- return true;
-}
static inline bool has_ns_capability(struct task_struct *t,
struct user_namespace *ns, int cap)
{
diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h
index 60693a145894..559353ad64ac 100644
--- a/include/linux/cc_platform.h
+++ b/include/linux/cc_platform.h
@@ -74,7 +74,7 @@ enum cc_attr {
CC_ATTR_GUEST_UNROLL_STRING_IO,
/**
- * @CC_ATTR_SEV_SNP: Guest SNP is active.
+ * @CC_ATTR_GUEST_SEV_SNP: Guest SNP is active.
*
* The platform/OS is running as a guest/virtual machine and actively
* using AMD SEV-SNP features.
@@ -82,14 +82,12 @@ enum cc_attr {
CC_ATTR_GUEST_SEV_SNP,
/**
- * @CC_ATTR_HOTPLUG_DISABLED: Hotplug is not supported or disabled.
+ * @CC_ATTR_GUEST_SNP_SECURE_TSC: SNP Secure TSC is active.
*
- * The platform/OS is running as a guest/virtual machine does not
- * support CPU hotplug feature.
- *
- * Examples include TDX Guest.
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using AMD SEV-SNP Secure TSC feature.
*/
- CC_ATTR_HOTPLUG_DISABLED,
+ CC_ATTR_GUEST_SNP_SECURE_TSC,
/**
* @CC_ATTR_HOST_SEV_SNP: AMD SNP enabled on the host.
@@ -98,6 +96,14 @@ enum cc_attr {
* enabled to run SEV-SNP guests.
*/
CC_ATTR_HOST_SEV_SNP,
+
+ /**
+ * @CC_ATTR_SNP_SECURE_AVIC: Secure AVIC mode is active.
+ *
+ * The host kernel is running with the necessary features enabled
+ * to run SEV-SNP guests with full Secure AVIC capabilities.
+ */
+ CC_ATTR_SNP_SECURE_AVIC,
};
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index 98c6fd0b39b6..b907e6c2307d 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -62,7 +62,6 @@ struct cdrom_device_info {
__u8 last_sense;
__u8 media_written; /* dirty flag, DVD+RW bookkeeping */
unsigned short mmc3_profile; /* current MMC3 profile */
- int (*exit)(struct cdrom_device_info *);
int mrw_mode_page;
bool opened_for_data;
__s64 last_media_change_ms;
@@ -77,7 +76,7 @@ struct cdrom_device_ops {
unsigned int clearing, int slot);
int (*tray_move) (struct cdrom_device_info *, int);
int (*lock_door) (struct cdrom_device_info *, int);
- int (*select_speed) (struct cdrom_device_info *, int);
+ int (*select_speed) (struct cdrom_device_info *, unsigned long);
int (*get_last_session) (struct cdrom_device_info *,
struct cdrom_multisession *);
int (*get_mcn) (struct cdrom_device_info *,
diff --git a/include/linux/cdx/bitfield.h b/include/linux/cdx/bitfield.h
new file mode 100644
index 000000000000..567f8ec47582
--- /dev/null
+++ b/include/linux/cdx/bitfield.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_BITFIELD_H
+#define CDX_BITFIELD_H
+
+#include <linux/bitfield.h>
+
+/* Lowest bit numbers and widths */
+#define CDX_DWORD_LBN 0
+#define CDX_DWORD_WIDTH 32
+
+/* Specified attribute (e.g. LBN) of the specified field */
+#define CDX_VAL(field, attribute) field ## _ ## attribute
+/* Low bit number of the specified field */
+#define CDX_LOW_BIT(field) CDX_VAL(field, LBN)
+/* Bit width of the specified field */
+#define CDX_WIDTH(field) CDX_VAL(field, WIDTH)
+/* High bit number of the specified field */
+#define CDX_HIGH_BIT(field) (CDX_LOW_BIT(field) + CDX_WIDTH(field) - 1)
+
+/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
+struct cdx_dword {
+ __le32 cdx_u32;
+};
+
+/* Value expanders for printk */
+#define CDX_DWORD_VAL(dword) \
+ ((unsigned int)le32_to_cpu((dword).cdx_u32))
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define CDX_DWORD_FIELD(dword, field) \
+ (FIELD_GET(GENMASK(CDX_HIGH_BIT(field), CDX_LOW_BIT(field)), \
+ le32_to_cpu((dword).cdx_u32)))
+
+/*
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define CDX_INSERT_FIELD(field, value) \
+ (FIELD_PREP(GENMASK(CDX_HIGH_BIT(field), \
+ CDX_LOW_BIT(field)), value))
+
+/*
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define CDX_INSERT_FIELDS(field1, value1, \
+ field2, value2, \
+ field3, value3, \
+ field4, value4, \
+ field5, value5, \
+ field6, value6, \
+ field7, value7) \
+ (CDX_INSERT_FIELD(field1, (value1)) | \
+ CDX_INSERT_FIELD(field2, (value2)) | \
+ CDX_INSERT_FIELD(field3, (value3)) | \
+ CDX_INSERT_FIELD(field4, (value4)) | \
+ CDX_INSERT_FIELD(field5, (value5)) | \
+ CDX_INSERT_FIELD(field6, (value6)) | \
+ CDX_INSERT_FIELD(field7, (value7)))
+
+#define CDX_POPULATE_DWORD(dword, ...) \
+ (dword).cdx_u32 = cpu_to_le32(CDX_INSERT_FIELDS(__VA_ARGS__))
+
+/* Populate a dword field with various numbers of arguments */
+#define CDX_POPULATE_DWORD_7 CDX_POPULATE_DWORD
+#define CDX_POPULATE_DWORD_6(dword, ...) \
+ CDX_POPULATE_DWORD_7(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_5(dword, ...) \
+ CDX_POPULATE_DWORD_6(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_4(dword, ...) \
+ CDX_POPULATE_DWORD_5(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_3(dword, ...) \
+ CDX_POPULATE_DWORD_4(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_2(dword, ...) \
+ CDX_POPULATE_DWORD_3(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_1(dword, ...) \
+ CDX_POPULATE_DWORD_2(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_SET_DWORD(dword) \
+ CDX_POPULATE_DWORD_1(dword, CDX_DWORD, 0xffffffff)
+
+#endif /* CDX_BITFIELD_H */
diff --git a/include/linux/cdx/cdx_bus.h b/include/linux/cdx/cdx_bus.h
index b57118aaa679..b1ba97f6c9ad 100644
--- a/include/linux/cdx/cdx_bus.h
+++ b/include/linux/cdx/cdx_bus.h
@@ -211,7 +211,7 @@ struct cdx_driver {
};
#define to_cdx_driver(_drv) \
- container_of(_drv, struct cdx_driver, driver)
+ container_of_const(_drv, struct cdx_driver, driver)
/* Macro to avoid include chaining to get THIS_MODULE */
#define cdx_driver_register(drv) \
@@ -234,7 +234,7 @@ int __must_check __cdx_driver_register(struct cdx_driver *cdx_driver,
*/
void cdx_driver_unregister(struct cdx_driver *cdx_driver);
-extern struct bus_type cdx_bus_type;
+extern const struct bus_type cdx_bus_type;
/**
* cdx_dev_reset - Reset CDX device
diff --git a/include/linux/cdx/edac_cdx_pcol.h b/include/linux/cdx/edac_cdx_pcol.h
new file mode 100644
index 000000000000..749db33bb482
--- /dev/null
+++ b/include/linux/cdx/edac_cdx_pcol.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Driver for AMD network controllers and boards
+ *
+ * Copyright (C) 2021, Xilinx, Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef MC_CDX_PCOL_H
+#define MC_CDX_PCOL_H
+#include <linux/cdx/mcdi.h>
+
+#define MC_CMD_EDAC_GET_DDR_CONFIG_OUT_WORD_LENGTH_LEN 4
+/* Number of registers for the DDR controller */
+#define MC_CMD_GET_DDR_CONFIG_OFST 4
+#define MC_CMD_GET_DDR_CONFIG_LEN 4
+
+/***********************************/
+/* MC_CMD_EDAC_GET_DDR_CONFIG
+ * Provides detailed configuration for the DDR controller of the given index.
+ */
+#define MC_CMD_EDAC_GET_DDR_CONFIG 0x3
+
+/* MC_CMD_EDAC_GET_DDR_CONFIG_IN msgrequest */
+#define MC_CMD_EDAC_GET_DDR_CONFIG_IN_CONTROLLER_INDEX_OFST 0
+#define MC_CMD_EDAC_GET_DDR_CONFIG_IN_CONTROLLER_INDEX_LEN 4
+
+#endif /* MC_CDX_PCOL_H */
diff --git a/include/linux/cdx/mcdi.h b/include/linux/cdx/mcdi.h
new file mode 100644
index 000000000000..74075305cba4
--- /dev/null
+++ b/include/linux/cdx/mcdi.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_MCDI_H
+#define CDX_MCDI_H
+
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/rpmsg.h>
+
+#include "linux/cdx/bitfield.h"
+
+/**
+ * enum cdx_mcdi_mode - MCDI transaction mode
+ * @MCDI_MODE_EVENTS: wait for an mcdi response callback.
+ * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
+ */
+enum cdx_mcdi_mode {
+ MCDI_MODE_EVENTS,
+ MCDI_MODE_FAIL,
+};
+
+#define MCDI_RPC_TIMEOUT (10 * HZ)
+#define MCDI_RPC_LONG_TIMEOU (60 * HZ)
+#define MCDI_RPC_POST_RST_TIME (10 * HZ)
+
+/**
+ * enum cdx_mcdi_cmd_state - State for an individual MCDI command
+ * @MCDI_STATE_QUEUED: Command not started and is waiting to run.
+ * @MCDI_STATE_RETRY: Command was submitted and MC rejected with no resources,
+ * as MC have too many outstanding commands. Command will be retried once
+ * another command returns.
+ * @MCDI_STATE_RUNNING: Command was accepted and is running.
+ * @MCDI_STATE_RUNNING_CANCELLED: Command is running but the issuer cancelled
+ * the command.
+ * @MCDI_STATE_FINISHED: Processing of this command has completed.
+ */
+
+enum cdx_mcdi_cmd_state {
+ MCDI_STATE_QUEUED,
+ MCDI_STATE_RETRY,
+ MCDI_STATE_RUNNING,
+ MCDI_STATE_RUNNING_CANCELLED,
+ MCDI_STATE_FINISHED,
+};
+
+/**
+ * struct cdx_mcdi - CDX MCDI Firmware interface, to interact
+ * with CDX controller.
+ * @mcdi: MCDI interface
+ * @mcdi_ops: MCDI operations
+ * @r5_rproc : R5 Remoteproc device handle
+ * @rpdev: RPMsg device
+ * @ept: RPMsg endpoint
+ * @work: Post probe work
+ */
+struct cdx_mcdi {
+ /* MCDI interface */
+ struct cdx_mcdi_data *mcdi;
+ const struct cdx_mcdi_ops *mcdi_ops;
+
+ struct rproc *r5_rproc;
+ struct rpmsg_device *rpdev;
+ struct rpmsg_endpoint *ept;
+ struct work_struct work;
+};
+
+struct cdx_mcdi_ops {
+ void (*mcdi_request)(struct cdx_mcdi *cdx,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len);
+ unsigned int (*mcdi_rpc_timeout)(struct cdx_mcdi *cdx, unsigned int cmd);
+};
+
+typedef void cdx_mcdi_async_completer(struct cdx_mcdi *cdx,
+ unsigned long cookie, int rc,
+ struct cdx_dword *outbuf,
+ size_t outlen_actual);
+
+/**
+ * struct cdx_mcdi_cmd - An outstanding MCDI command
+ * @ref: Reference count. There will be one reference if the command is
+ * in the mcdi_iface cmd_list, another if it's on a cleanup list,
+ * and a third if it's queued in the work queue.
+ * @list: The data for this entry in mcdi->cmd_list
+ * @cleanup_list: The data for this entry in a cleanup list
+ * @work: The work item for this command, queued in mcdi->workqueue
+ * @mcdi: The mcdi_iface for this command
+ * @state: The state of this command
+ * @inlen: inbuf length
+ * @inbuf: Input buffer
+ * @quiet: Whether to silence errors
+ * @reboot_seen: Whether a reboot has been seen during this command,
+ * to prevent duplicates
+ * @seq: Sequence number
+ * @started: Jiffies this command was started at
+ * @cookie: Context for completion function
+ * @completer: Completion function
+ * @handle: Command handle
+ * @cmd: Command number
+ * @rc: Return code
+ * @outlen: Length of output buffer
+ * @outbuf: Output buffer
+ */
+struct cdx_mcdi_cmd {
+ struct kref ref;
+ struct list_head list;
+ struct list_head cleanup_list;
+ struct work_struct work;
+ struct cdx_mcdi_iface *mcdi;
+ enum cdx_mcdi_cmd_state state;
+ size_t inlen;
+ const struct cdx_dword *inbuf;
+ bool quiet;
+ bool reboot_seen;
+ u8 seq;
+ unsigned long started;
+ unsigned long cookie;
+ cdx_mcdi_async_completer *completer;
+ unsigned int handle;
+ unsigned int cmd;
+ int rc;
+ size_t outlen;
+ struct cdx_dword *outbuf;
+ /* followed by inbuf data if necessary */
+};
+
+/**
+ * struct cdx_mcdi_iface - MCDI protocol context
+ * @cdx: The associated NIC
+ * @iface_lock: Serialise access to this structure
+ * @outstanding_cleanups: Count of cleanups
+ * @cmd_list: List of outstanding and running commands
+ * @workqueue: Workqueue used for delayed processing
+ * @cmd_complete_wq: Waitqueue for command completion
+ * @db_held_by: Command the MC doorbell is in use by
+ * @seq_held_by: Command each sequence number is in use by
+ * @prev_handle: The last used command handle
+ * @mode: Poll for mcdi completion, or wait for an mcdi_event
+ * @prev_seq: The last used sequence number
+ * @new_epoch: Indicates start of day or start of MC reboot recovery
+ */
+struct cdx_mcdi_iface {
+ struct cdx_mcdi *cdx;
+ /* Serialise access */
+ struct mutex iface_lock;
+ unsigned int outstanding_cleanups;
+ struct list_head cmd_list;
+ struct workqueue_struct *workqueue;
+ wait_queue_head_t cmd_complete_wq;
+ struct cdx_mcdi_cmd *db_held_by;
+ struct cdx_mcdi_cmd *seq_held_by[16];
+ unsigned int prev_handle;
+ enum cdx_mcdi_mode mode;
+ u8 prev_seq;
+ bool new_epoch;
+};
+
+/**
+ * struct cdx_mcdi_data - extra state for NICs that implement MCDI
+ * @iface: Interface/protocol state
+ * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
+ */
+struct cdx_mcdi_data {
+ struct cdx_mcdi_iface iface;
+ u32 fn_flags;
+};
+
+void cdx_mcdi_finish(struct cdx_mcdi *cdx);
+int cdx_mcdi_init(struct cdx_mcdi *cdx);
+void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len);
+int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ struct cdx_dword *outbuf, size_t outlen, size_t *outlen_actual);
+
+/*
+ * We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned.
+ */
+#define MCDI_DECLARE_BUF(_name, _len) struct cdx_dword _name[DIV_ROUND_UP(_len, 4)] = {{0}}
+#define _MCDI_PTR(_buf, _offset) \
+ ((u8 *)(_buf) + (_offset))
+#define MCDI_PTR(_buf, _field) \
+ _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
+#define _MCDI_CHECK_ALIGN(_ofst, _align) \
+ ((void)BUILD_BUG_ON_ZERO((_ofst) & ((_align) - 1)), \
+ (_ofst))
+#define _MCDI_DWORD(_buf, _field) \
+ ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+
+#define MCDI_SET_DWORD(_buf, _field, _value) \
+ CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), CDX_DWORD, _value)
+#define MCDI_DWORD(_buf, _field) \
+ CDX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), CDX_DWORD)
+#endif /* CDX_MCDI_H */
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index ee1d0e5f9789..c7f2c63b3bc3 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -504,20 +504,6 @@ struct ceph_mds_request_head_legacy {
#define CEPH_MDS_REQUEST_HEAD_VERSION 3
-struct ceph_mds_request_head_old {
- __le16 version; /* struct version */
- __le64 oldest_client_tid;
- __le32 mdsmap_epoch; /* on client */
- __le32 flags; /* CEPH_MDS_FLAG_* */
- __u8 num_retry, num_fwd; /* count retry, fwd attempts */
- __le16 num_releases; /* # include cap/lease release records */
- __le32 op; /* mds op code */
- __le32 caller_uid, caller_gid;
- __le64 ino; /* use this ino for openc, mkdir, mknod,
- etc. (if replaying) */
- union ceph_mds_request_args_ext args;
-} __attribute__ ((packed));
-
struct ceph_mds_request_head {
__le16 version; /* struct version */
__le64 oldest_client_tid;
@@ -808,7 +794,7 @@ struct ceph_mds_caps {
struct ceph_mds_cap_peer {
__le64 cap_id;
- __le32 seq;
+ __le32 issue_seq;
__le32 mseq;
__le32 mds;
__u8 flags;
@@ -822,7 +808,7 @@ struct ceph_mds_cap_release {
struct ceph_mds_cap_item {
__le64 ino;
__le64 cap_id;
- __le32 migrate_seq, seq;
+ __le32 migrate_seq, issue_seq;
} __attribute__ ((packed));
#define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index 04f3ace5787b..8fc1aed64113 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -6,7 +6,7 @@
#include <linux/bug.h>
#include <linux/slab.h>
#include <linux/time.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/ceph/types.h>
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 4497d0a6772c..63e0e2aa1ce9 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -4,7 +4,7 @@
#include <linux/ceph/ceph_debug.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/backing-dev.h>
#include <linux/completion.h>
#include <linux/exportfs.h>
@@ -306,8 +306,7 @@ struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client);
u64 ceph_client_gid(struct ceph_client *client);
extern void ceph_destroy_client(struct ceph_client *client);
extern void ceph_reset_client_addr(struct ceph_client *client);
-extern int __ceph_open_session(struct ceph_client *client,
- unsigned long started);
+extern int __ceph_open_session(struct ceph_client *client);
extern int ceph_open_session(struct ceph_client *client);
int ceph_wait_for_latest_osdmap(struct ceph_client *client,
unsigned long timeout);
@@ -317,12 +316,6 @@ extern void ceph_release_page_vector(struct page **pages, int num_pages);
extern void ceph_put_page_vector(struct page **pages, int num_pages,
bool dirty);
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
-extern int ceph_copy_user_to_page_vector(struct page **pages,
- const void __user *data,
- loff_t off, size_t len);
-extern void ceph_copy_to_page_vector(struct page **pages,
- const void *data,
- loff_t off, size_t len);
extern void ceph_copy_from_page_vector(struct page **pages,
void *data,
loff_t off, size_t len);
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 1717cc57cdac..6aa4c6478c9f 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -2,6 +2,7 @@
#ifndef __FS_CEPH_MESSENGER_H
#define __FS_CEPH_MESSENGER_H
+#include <crypto/sha2.h>
#include <linux/bvec.h>
#include <linux/crypto.h>
#include <linux/kref.h>
@@ -412,7 +413,8 @@ struct ceph_connection_v2_info {
struct ceph_msg_data_cursor in_cursor;
struct ceph_msg_data_cursor out_cursor;
- struct crypto_shash *hmac_tfm; /* post-auth signature */
+ struct hmac_sha256_key hmac_key; /* post-auth signature */
+ bool hmac_key_set;
struct crypto_aead *gcm_tfm; /* on-wire encryption */
struct aead_request *gcm_req;
struct crypto_wait gcm_wait;
@@ -548,12 +550,12 @@ void ceph_addr_set_port(struct ceph_entity_addr *addr, int p);
void ceph_con_process_message(struct ceph_connection *con);
int ceph_con_in_msg_alloc(struct ceph_connection *con,
struct ceph_msg_header *hdr, int *skip);
-void ceph_con_get_out_msg(struct ceph_connection *con);
+struct ceph_msg *ceph_con_get_out_msg(struct ceph_connection *con);
/* messenger_v1.c */
int ceph_con_v1_try_read(struct ceph_connection *con);
int ceph_con_v1_try_write(struct ceph_connection *con);
-void ceph_con_v1_revoke(struct ceph_connection *con);
+void ceph_con_v1_revoke(struct ceph_connection *con, struct ceph_msg *msg);
void ceph_con_v1_revoke_incoming(struct ceph_connection *con);
bool ceph_con_v1_opened(struct ceph_connection *con);
void ceph_con_v1_reset_session(struct ceph_connection *con);
@@ -562,7 +564,7 @@ void ceph_con_v1_reset_protocol(struct ceph_connection *con);
/* messenger_v2.c */
int ceph_con_v2_try_read(struct ceph_connection *con);
int ceph_con_v2_try_write(struct ceph_connection *con);
-void ceph_con_v2_revoke(struct ceph_connection *con);
+void ceph_con_v2_revoke(struct ceph_connection *con, struct ceph_msg *msg);
void ceph_con_v2_revoke_incoming(struct ceph_connection *con);
bool ceph_con_v2_opened(struct ceph_connection *con);
void ceph_con_v2_reset_session(struct ceph_connection *con);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index f66f6aac74f6..50b14a5661c7 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -449,8 +449,6 @@ extern int ceph_osdc_init(struct ceph_osd_client *osdc,
extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
extern void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc);
-extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
- struct ceph_msg *msg);
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
@@ -492,9 +490,6 @@ extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *,
struct page **pages, u64 length,
u32 alignment, bool pages_from_pool,
bool own_pages);
-extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *,
- unsigned int which,
- struct ceph_pagelist *pagelist);
#ifdef CONFIG_BLOCK
void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
unsigned int which,
@@ -511,9 +506,6 @@ void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
void osd_req_op_extent_osd_iter(struct ceph_osd_request *osd_req,
unsigned int which, struct iov_iter *iter);
-extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *,
- unsigned int which,
- struct ceph_pagelist *pagelist);
extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *,
unsigned int which,
struct page **pages, u64 length,
@@ -628,8 +620,6 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
u32 timeout,
struct page ***preply_pages,
size_t *preply_len);
-int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
- struct ceph_osd_linger_request *lreq);
int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h
index 5dead8486fd8..879bec0863aa 100644
--- a/include/linux/ceph/pagelist.h
+++ b/include/linux/ceph/pagelist.h
@@ -17,12 +17,6 @@ struct ceph_pagelist {
refcount_t refcnt;
};
-struct ceph_pagelist_cursor {
- struct ceph_pagelist *pl; /* pagelist, for error checking */
- struct list_head *page_lru; /* page in list */
- size_t room; /* room remaining to reset to */
-};
-
struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags);
extern void ceph_pagelist_release(struct ceph_pagelist *pl);
@@ -33,12 +27,6 @@ extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space);
extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl);
-extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
- struct ceph_pagelist_cursor *c);
-
-extern int ceph_pagelist_truncate(struct ceph_pagelist *pl,
- struct ceph_pagelist_cursor *c);
-
static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v)
{
__le64 ev = cpu_to_le64(v);
diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h
index 6617d9c68d86..83e6613d12ae 100644
--- a/include/linux/cfag12864b.h
+++ b/include/linux/cfag12864b.h
@@ -28,13 +28,6 @@
extern unsigned char * cfag12864b_buffer;
/*
- * Get the refresh rate of the LCD
- *
- * Returns the refresh rate (hertz).
- */
-extern unsigned int cfag12864b_getrate(void);
-
-/*
* Enable refreshing
*
* Returns 0 if successful (anyone was using it),
@@ -50,16 +43,6 @@ extern unsigned char cfag12864b_enable(void);
extern void cfag12864b_disable(void);
/*
- * Is enabled refreshing? (is anyone using the module?)
- *
- * Returns 0 if refreshing is not enabled (anyone is using it),
- * or != 0 if refreshing is enabled (someone is using it).
- *
- * Useful for buffer read-only modules.
- */
-extern unsigned char cfag12864b_isenabled(void);
-
-/*
* Is the module inited?
*/
extern unsigned char cfag12864b_isinited(void);
diff --git a/include/linux/cfi.h b/include/linux/cfi.h
index f0df518e11dd..1fd22ea6eba4 100644
--- a/include/linux/cfi.h
+++ b/include/linux/cfi.h
@@ -11,14 +11,9 @@
#include <linux/module.h>
#include <asm/cfi.h>
-#ifndef cfi_get_offset
-static inline int cfi_get_offset(void)
-{
- return 0;
-}
-#endif
+#ifdef CONFIG_CFI
+extern bool cfi_warn;
-#ifdef CONFIG_CFI_CLANG
enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr,
unsigned long *target, u32 type);
@@ -27,7 +22,45 @@ static inline enum bug_trap_type report_cfi_failure_noaddr(struct pt_regs *regs,
{
return report_cfi_failure(regs, addr, NULL, 0);
}
-#endif /* CONFIG_CFI_CLANG */
+
+#ifndef cfi_get_offset
+/*
+ * Returns the CFI prefix offset. By default, the compiler emits only
+ * a 4-byte CFI type hash before the function. If an architecture
+ * uses -fpatchable-function-entry=N,M where M>0 to change the prefix
+ * offset, they must override this function.
+ */
+static inline int cfi_get_offset(void)
+{
+ return 4;
+}
+#endif
+
+#ifndef cfi_get_func_hash
+static inline u32 cfi_get_func_hash(void *func)
+{
+ u32 hash;
+
+ if (get_kernel_nofault(hash, func - cfi_get_offset()))
+ return 0;
+
+ return hash;
+}
+#endif
+
+/* CFI type hashes for BPF function types */
+extern u32 cfi_bpf_hash;
+extern u32 cfi_bpf_subprog_hash;
+
+#else /* CONFIG_CFI */
+
+static inline int cfi_get_offset(void) { return 0; }
+static inline u32 cfi_get_func_hash(void *func) { return 0; }
+
+#define cfi_bpf_hash 0U
+#define cfi_bpf_subprog_hash 0U
+
+#endif /* CONFIG_CFI */
#ifdef CONFIG_ARCH_USES_CFI_TRAPS
bool is_cfi_trap(unsigned long addr);
diff --git a/include/linux/cfi_types.h b/include/linux/cfi_types.h
index 6b8713675765..a86af9bc8bdc 100644
--- a/include/linux/cfi_types.h
+++ b/include/linux/cfi_types.h
@@ -8,7 +8,7 @@
#ifdef __ASSEMBLY__
#include <linux/linkage.h>
-#ifdef CONFIG_CFI_CLANG
+#ifdef CONFIG_CFI
/*
* Use the __kcfi_typeid_<function> type identifier symbol to
* annotate indirectly called assembly functions. The compiler emits
@@ -29,17 +29,40 @@
#define SYM_TYPED_START(name, linkage, align...) \
SYM_TYPED_ENTRY(name, linkage, align)
-#else /* CONFIG_CFI_CLANG */
+#else /* CONFIG_CFI */
#define SYM_TYPED_START(name, linkage, align...) \
SYM_START(name, linkage, align)
-#endif /* CONFIG_CFI_CLANG */
+#endif /* CONFIG_CFI */
#ifndef SYM_TYPED_FUNC_START
#define SYM_TYPED_FUNC_START(name) \
SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#endif
+#else /* __ASSEMBLY__ */
+
+#ifdef CONFIG_CFI
+#define DEFINE_CFI_TYPE(name, func) \
+ /* \
+ * Force a reference to the function so the compiler generates \
+ * __kcfi_typeid_<func>. \
+ */ \
+ __ADDRESSABLE(func); \
+ /* u32 name __ro_after_init = __kcfi_typeid_<func> */ \
+ extern u32 name; \
+ asm ( \
+ " .pushsection .data..ro_after_init,\"aw\",\%progbits \n" \
+ " .type " #name ",\%object \n" \
+ " .globl " #name " \n" \
+ " .p2align 2, 0x0 \n" \
+ #name ": \n" \
+ " .4byte __kcfi_typeid_" #func " \n" \
+ " .size " #name ", 4 \n" \
+ " .popsection \n" \
+ );
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_CFI_TYPES_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index ea48c861cd36..b760a3c470a5 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -71,9 +71,6 @@ enum {
/* Cgroup is frozen. */
CGRP_FROZEN,
-
- /* Control group has to be killed. */
- CGRP_KILL,
};
/* cgroup_root->flags */
@@ -94,6 +91,12 @@ enum {
* cgroup_threadgroup_rwsem. This makes hot path operations such as
* forks and exits into the slow path and more expensive.
*
+ * Alleviate the contention between fork, exec, exit operations and
+ * writing to cgroup.procs by taking a per threadgroup rwsem instead of
+ * the global cgroup_threadgroup_rwsem. Fork and other operations
+ * from threads in different thread groups no longer contend with
+ * writing to cgroup.procs.
+ *
* The static usage pattern of creating a cgroup, enabling controllers,
* and then seeding it with CLONE_INTO_CGROUP doesn't require write
* locking cgroup_threadgroup_rwsem and thus doesn't benefit from
@@ -119,7 +122,12 @@ enum {
/*
* Enable hugetlb accounting for the memory controller.
*/
- CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19),
+ CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19),
+
+ /*
+ * Enable legacy local pids.events.
+ */
+ CGRP_ROOT_PIDS_LOCAL_EVENTS = (1 << 20),
};
/* cftype->flags */
@@ -138,6 +146,17 @@ enum {
__CFTYPE_ADDED = (1 << 18),
};
+enum cgroup_attach_lock_mode {
+ /* Default */
+ CGRP_ATTACH_LOCK_GLOBAL,
+
+ /* When pid=0 && threadgroup=false, see comments in cgroup_procs_write_start */
+ CGRP_ATTACH_LOCK_NONE,
+
+ /* When favordynmods is on, see comments above CGRP_ROOT_FAVOR_DYNMODS */
+ CGRP_ATTACH_LOCK_PER_THREADGROUP,
+};
+
/*
* cgroup_file is the handle for a file instance created in a cgroup which
* is used, for example, to generate file changed notifications. This can
@@ -167,13 +186,31 @@ struct cgroup_subsys_state {
/* reference count - access via css_[try]get() and css_put() */
struct percpu_ref refcnt;
- /* siblings list anchored at the parent's ->children */
+ /*
+ * Depending on the context, this field is initialized
+ * via css_rstat_init() at different places:
+ *
+ * when css is associated with cgroup::self
+ * when css->cgroup is the root cgroup
+ * performed in cgroup_init()
+ * when css->cgroup is not the root cgroup
+ * performed in cgroup_create()
+ * when css is associated with a subsystem
+ * when css->cgroup is the root cgroup
+ * performed in cgroup_init_subsys() in the non-early path
+ * when css->cgroup is not the root cgroup
+ * performed in css_create()
+ */
+ struct css_rstat_cpu __percpu *rstat_cpu;
+
+ /*
+ * siblings list anchored at the parent's ->children
+ *
+ * linkage is protected by cgroup_mutex or RCU
+ */
struct list_head sibling;
struct list_head children;
- /* flush target list anchored at cgrp->rstat_css_list */
- struct list_head rstat_css_node;
-
/*
* PI: Subsys-unique ID. 0 is unused and root is always 1. The
* matching css can be looked up using css_from_id().
@@ -205,6 +242,24 @@ struct cgroup_subsys_state {
* fields of the containing structure.
*/
struct cgroup_subsys_state *parent;
+
+ /*
+ * Keep track of total numbers of visible descendant CSSes.
+ * The total number of dying CSSes is tracked in
+ * css->cgroup->nr_dying_subsys[ssid].
+ * Protected by cgroup_mutex.
+ */
+ int nr_descendants;
+
+ /*
+ * A singly-linked list of css structures to be rstat flushed.
+ * This is a scratch field to be used exclusively by
+ * css_rstat_flush().
+ *
+ * Protected by rstat_base_lock when css is cgroup::self.
+ * Protected by css->ss->rstat_ss_lock otherwise.
+ */
+ struct cgroup_subsys_state *rstat_flush_next;
};
/*
@@ -310,14 +365,15 @@ struct cgroup_base_stat {
#ifdef CONFIG_SCHED_CORE
u64 forceidle_sum;
#endif
+ u64 ntime;
};
/*
* rstat - cgroup scalable recursive statistics. Accounting is done
- * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the
+ * per-cpu in css_rstat_cpu which is then lazily propagated up the
* hierarchy on reads.
*
- * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are
+ * When a stat gets updated, the css_rstat_cpu and its ancestors are
* linked into the updated tree. On the following read, propagation only
* considers and consumes the updated tree. This makes reading O(the
* number of descendants which have been active since last read) instead of
@@ -329,10 +385,26 @@ struct cgroup_base_stat {
* frequency decreases the cost of each read.
*
* This struct hosts both the fields which implement the above -
- * updated_children and updated_next - and the fields which track basic
- * resource statistics on top of it - bsync, bstat and last_bstat.
+ * updated_children and updated_next.
+ */
+struct css_rstat_cpu {
+ /*
+ * Child cgroups with stat updates on this cpu since the last read
+ * are linked on the parent's ->updated_children through
+ * ->updated_next. updated_children is terminated by its container css.
+ */
+ struct cgroup_subsys_state *updated_children;
+ struct cgroup_subsys_state *updated_next; /* NULL if not on the list */
+
+ struct llist_node lnode; /* lockless list for update */
+ struct cgroup_subsys_state *owner; /* back pointer */
+};
+
+/*
+ * This struct hosts the fields which track basic resource statistics on
+ * top of it - bsync, bstat and last_bstat.
*/
-struct cgroup_rstat_cpu {
+struct cgroup_rstat_base_cpu {
/*
* ->bsync protects ->bstat. These are the only fields which get
* updated in the hot path.
@@ -359,20 +431,6 @@ struct cgroup_rstat_cpu {
* deltas to propagate to the per-cpu subtree_bstat.
*/
struct cgroup_base_stat last_subtree_bstat;
-
- /*
- * Child cgroups with stat updates on this cpu since the last read
- * are linked on the parent's ->updated_children through
- * ->updated_next.
- *
- * In addition to being more compact, singly-linked list pointing
- * to the cgroup makes it unnecessary for each per-cpu struct to
- * point back to the associated cgroup.
- *
- * Protected by per-cpu cgroup_rstat_cpu_lock.
- */
- struct cgroup *updated_children; /* terminated by self cgroup */
- struct cgroup *updated_next; /* NULL iff not on the list */
};
struct cgroup_freezer_state {
@@ -380,7 +438,7 @@ struct cgroup_freezer_state {
bool freeze;
/* Should the cgroup actually be frozen? */
- int e_freeze;
+ bool e_freeze;
/* Fields below are protected by css_set_lock */
@@ -392,6 +450,23 @@ struct cgroup_freezer_state {
* frozen, SIGSTOPped, and PTRACEd.
*/
int nr_frozen_tasks;
+
+ /* Freeze time data consistency protection */
+ seqcount_spinlock_t freeze_seq;
+
+ /*
+ * Most recent time the cgroup was requested to freeze.
+ * Accesses guarded by freeze_seq counter. Writes serialized
+ * by css_set_lock.
+ */
+ u64 freeze_start_nsec;
+
+ /*
+ * Total duration the cgroup has spent freezing.
+ * Accesses guarded by freeze_seq counter. Writes serialized
+ * by css_set_lock.
+ */
+ u64 frozen_nsec;
};
struct cgroup {
@@ -443,6 +518,9 @@ struct cgroup {
int nr_threaded_children; /* # of live threaded child cgroups */
+ /* sequence number for cgroup.kill, serialized by css_set_lock. */
+ unsigned int kill_seq;
+
struct kernfs_node *kn; /* cgroup kernfs entry */
struct cgroup_file procs_file; /* handle for "cgroup.procs" */
struct cgroup_file events_file; /* handle for "cgroup.events" */
@@ -465,6 +543,12 @@ struct cgroup {
/* Private pointers for each registered subsystem */
struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
+ /*
+ * Keep track of total number of dying CSSes at and below this cgroup.
+ * Protected by cgroup_mutex.
+ */
+ int nr_dying_subsys[CGROUP_SUBSYS_COUNT];
+
struct cgroup_root *root;
/*
@@ -492,23 +576,23 @@ struct cgroup {
struct cgroup *dom_cgrp;
struct cgroup *old_dom_cgrp; /* used while enabling threaded */
- /* per-cpu recursive resource statistics */
- struct cgroup_rstat_cpu __percpu *rstat_cpu;
- struct list_head rstat_css_list;
-
/*
- * Add padding to separate the read mostly rstat_cpu and
- * rstat_css_list into a different cacheline from the following
- * rstat_flush_next and *bstat fields which can have frequent updates.
+ * Depending on the context, this field is initialized via
+ * css_rstat_init() at different places:
+ *
+ * when cgroup is the root cgroup
+ * performed in cgroup_setup_root()
+ * otherwise
+ * performed in cgroup_create()
*/
- CACHELINE_PADDING(_pad_);
+ struct cgroup_rstat_base_cpu __percpu *rstat_base_cpu;
/*
- * A singly-linked list of cgroup structures to be rstat flushed.
- * This is a scratch field to be used exclusively by
- * cgroup_rstat_flush_locked() and protected by cgroup_rstat_lock.
+ * Add padding to keep the read mostly rstat per-cpu pointer on a
+ * different cacheline than the following *bstat fields which can have
+ * frequent updates.
*/
- struct cgroup *rstat_flush_next;
+ CACHELINE_PADDING(_pad_);
/* cgroup basic resource statistics */
struct cgroup_base_stat last_bstat;
@@ -534,9 +618,6 @@ struct cgroup {
/* used to store eBPF programs */
struct cgroup_bpf bpf;
- /* If there is block congestion on this cgroup. */
- atomic_t congestion_count;
-
/* Used to store internal freezer state */
struct cgroup_freezer_state freezer;
@@ -598,9 +679,8 @@ struct cgroup_root {
*/
struct cftype {
/*
- * By convention, the name should begin with the name of the
- * subsystem, followed by a period. Zero length string indicates
- * end of cftype array.
+ * Name of the subsystem is prepended in cgroup_file_name().
+ * Zero length string indicates end of cftype array.
*/
char name[MAX_CFTYPE_NAME];
unsigned long private;
@@ -676,9 +756,7 @@ struct cftype {
__poll_t (*poll)(struct kernfs_open_file *of,
struct poll_table_struct *pt);
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lock_class_key lockdep_key;
-#endif
};
/*
@@ -692,6 +770,7 @@ struct cgroup_subsys {
void (*css_released)(struct cgroup_subsys_state *css);
void (*css_free)(struct cgroup_subsys_state *css);
void (*css_reset)(struct cgroup_subsys_state *css);
+ void (*css_killed)(struct cgroup_subsys_state *css);
void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
int (*css_extra_stat_show)(struct seq_file *seq,
struct cgroup_subsys_state *css);
@@ -701,7 +780,6 @@ struct cgroup_subsys {
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
- void (*post_attach)(void);
int (*can_fork)(struct task_struct *task,
struct css_set *cset);
void (*cancel_fork)(struct task_struct *task, struct css_set *cset);
@@ -771,20 +849,32 @@ struct cgroup_subsys {
* specifies the mask of subsystems that this one depends on.
*/
unsigned int depends_on;
+
+ spinlock_t rstat_ss_lock;
+ struct llist_head __percpu *lhead; /* lockless update list head */
};
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
+extern bool cgroup_enable_per_threadgroup_rwsem;
+
+struct cgroup_of_peak {
+ unsigned long value;
+ struct list_head list;
+};
/**
* cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
* @tsk: target task
*
* Allows cgroup operations to synchronize against threadgroup changes
- * using a percpu_rw_semaphore.
+ * using a global percpu_rw_semaphore and a per threadgroup rw_semaphore when
+ * favordynmods is on. See the comment above CGRP_ROOT_FAVOR_DYNMODS definition.
*/
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
{
percpu_down_read(&cgroup_threadgroup_rwsem);
+ if (cgroup_enable_per_threadgroup_rwsem)
+ down_read(&tsk->signal->cgroup_threadgroup_rwsem);
}
/**
@@ -795,6 +885,8 @@ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
*/
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
{
+ if (cgroup_enable_per_threadgroup_rwsem)
+ up_read(&tsk->signal->cgroup_threadgroup_rwsem);
percpu_up_read(&cgroup_threadgroup_rwsem);
}
@@ -842,14 +934,12 @@ static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
#endif
}
+#ifdef CONFIG_CGROUP_NET_CLASSID
static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
-#ifdef CONFIG_CGROUP_NET_CLASSID
return READ_ONCE(skcd->classid);
-#else
- return 0;
-#endif
}
+#endif
static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
u16 prioidx)
@@ -859,13 +949,13 @@ static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
#endif
}
+#ifdef CONFIG_CGROUP_NET_CLASSID
static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
u32 classid)
{
-#ifdef CONFIG_CGROUP_NET_CLASSID
WRITE_ONCE(skcd->classid, classid);
-#endif
}
+#endif
#else /* CONFIG_SOCK_CGROUP_DATA */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 2150ca60394b..bc892e3b37ee 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -10,8 +10,8 @@
*/
#include <linux/sched.h>
-#include <linux/cpumask.h>
#include <linux/nodemask.h>
+#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/cgroupstats.h>
#include <linux/fs.h>
@@ -19,6 +19,7 @@
#include <linux/kernfs.h>
#include <linux/jump_label.h>
#include <linux/types.h>
+#include <linux/notifier.h>
#include <linux/ns_common.h>
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
@@ -26,11 +27,10 @@
#include <linux/kernel_stat.h>
#include <linux/cgroup-defs.h>
+#include <linux/cgroup_namespace.h>
struct kernel_clone_args;
-#ifdef CONFIG_CGROUPS
-
/*
* All weight knobs on the default hierarchy should use the following min,
* default and max values. The default value is the logarithmic center of
@@ -40,7 +40,9 @@ struct kernel_clone_args;
#define CGROUP_WEIGHT_DFL 100
#define CGROUP_WEIGHT_MAX 10000
-enum {
+#ifdef CONFIG_CGROUPS
+
+enum css_task_iter_flags {
CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */
CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */
CSS_TASK_ITER_SKIPPED = (1U << 16), /* internal flags */
@@ -66,10 +68,16 @@ struct css_task_iter {
struct list_head iters_node; /* css_set->task_iters */
};
+enum cgroup_lifetime_events {
+ CGROUP_LIFETIME_ONLINE,
+ CGROUP_LIFETIME_OFFLINE,
+};
+
extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
extern spinlock_t css_set_lock;
+extern struct blocking_notifier_head cgroup_lifetime_notifier;
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
#include <linux/cgroup_subsys.h>
@@ -113,6 +121,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts);
void cgroup_file_notify(struct cgroup_file *cfile);
void cgroup_file_show(struct cgroup_file *cfile, bool show);
@@ -128,9 +137,10 @@ extern void cgroup_cancel_fork(struct task_struct *p,
struct kernel_clone_args *kargs);
extern void cgroup_post_fork(struct task_struct *p,
struct kernel_clone_args *kargs);
-void cgroup_exit(struct task_struct *p);
-void cgroup_release(struct task_struct *p);
-void cgroup_free(struct task_struct *p);
+void cgroup_task_exit(struct task_struct *p);
+void cgroup_task_dead(struct task_struct *p);
+void cgroup_task_release(struct task_struct *p);
+void cgroup_task_free(struct task_struct *p);
int cgroup_init_early(void);
int cgroup_init(void);
@@ -343,7 +353,23 @@ static inline u64 cgroup_id(const struct cgroup *cgrp)
*/
static inline bool css_is_dying(struct cgroup_subsys_state *css)
{
- return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
+ return css->flags & CSS_DYING;
+}
+
+static inline bool css_is_online(struct cgroup_subsys_state *css)
+{
+ return css->flags & CSS_ONLINE;
+}
+
+static inline bool css_is_self(struct cgroup_subsys_state *css)
+{
+ if (css == &css->cgroup->self) {
+ /* cgroup::self should not have subsystem association */
+ WARN_ON(css->ss != NULL);
+ return true;
+ }
+
+ return false;
}
static inline void cgroup_get(struct cgroup *cgrp)
@@ -631,6 +657,7 @@ static inline void cgroup_kthread_ready(void)
}
void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
+struct cgroup *__cgroup_get_from_id(u64 id);
struct cgroup *cgroup_get_from_id(u64 id);
#else /* !CONFIG_CGROUPS */
@@ -654,9 +681,10 @@ static inline void cgroup_cancel_fork(struct task_struct *p,
struct kernel_clone_args *kargs) {}
static inline void cgroup_post_fork(struct task_struct *p,
struct kernel_clone_args *kargs) {}
-static inline void cgroup_exit(struct task_struct *p) {}
-static inline void cgroup_release(struct task_struct *p) {}
-static inline void cgroup_free(struct task_struct *p) {}
+static inline void cgroup_task_exit(struct task_struct *p) {}
+static inline void cgroup_task_dead(struct task_struct *p) {}
+static inline void cgroup_task_release(struct task_struct *p) {}
+static inline void cgroup_task_free(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
@@ -687,10 +715,8 @@ static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
/*
* cgroup scalable recursive statistics.
*/
-void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
-void cgroup_rstat_flush(struct cgroup *cgrp);
-void cgroup_rstat_flush_hold(struct cgroup *cgrp);
-void cgroup_rstat_flush_release(struct cgroup *cgrp);
+void css_rstat_updated(struct cgroup_subsys_state *css, int cpu);
+void css_rstat_flush(struct cgroup_subsys_state *css);
/*
* Basic resource stats.
@@ -766,50 +792,6 @@ static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
#endif /* CONFIG_CGROUP_DATA */
-struct cgroup_namespace {
- struct ns_common ns;
- struct user_namespace *user_ns;
- struct ucounts *ucounts;
- struct css_set *root_cset;
-};
-
-extern struct cgroup_namespace init_cgroup_ns;
-
-#ifdef CONFIG_CGROUPS
-
-void free_cgroup_ns(struct cgroup_namespace *ns);
-
-struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
- struct user_namespace *user_ns,
- struct cgroup_namespace *old_ns);
-
-int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
- struct cgroup_namespace *ns);
-
-#else /* !CONFIG_CGROUPS */
-
-static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
-static inline struct cgroup_namespace *
-copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
- struct cgroup_namespace *old_ns)
-{
- return old_ns;
-}
-
-#endif /* !CONFIG_CGROUPS */
-
-static inline void get_cgroup_ns(struct cgroup_namespace *ns)
-{
- if (ns)
- refcount_inc(&ns->ns.count);
-}
-
-static inline void put_cgroup_ns(struct cgroup_namespace *ns)
-{
- if (ns && refcount_dec_and_test(&ns->ns.count))
- free_cgroup_ns(ns);
-}
-
#ifdef CONFIG_CGROUPS
void cgroup_enter_frozen(void);
@@ -855,4 +837,6 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id);
+struct cgroup_of_peak *of_peak(struct kernfs_open_file *of);
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cgroup_dmem.h b/include/linux/cgroup_dmem.h
new file mode 100644
index 000000000000..dd4869f1d736
--- /dev/null
+++ b/include/linux/cgroup_dmem.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _CGROUP_DMEM_H
+#define _CGROUP_DMEM_H
+
+#include <linux/types.h>
+#include <linux/llist.h>
+
+struct dmem_cgroup_pool_state;
+
+/* Opaque definition of a cgroup region, used internally */
+struct dmem_cgroup_region;
+
+#if IS_ENABLED(CONFIG_CGROUP_DMEM)
+struct dmem_cgroup_region *dmem_cgroup_register_region(u64 size, const char *name_fmt, ...) __printf(2,3);
+void dmem_cgroup_unregister_region(struct dmem_cgroup_region *region);
+int dmem_cgroup_try_charge(struct dmem_cgroup_region *region, u64 size,
+ struct dmem_cgroup_pool_state **ret_pool,
+ struct dmem_cgroup_pool_state **ret_limit_pool);
+void dmem_cgroup_uncharge(struct dmem_cgroup_pool_state *pool, u64 size);
+bool dmem_cgroup_state_evict_valuable(struct dmem_cgroup_pool_state *limit_pool,
+ struct dmem_cgroup_pool_state *test_pool,
+ bool ignore_low, bool *ret_hit_low);
+
+void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state *pool);
+#else
+static inline __printf(2,3) struct dmem_cgroup_region *
+dmem_cgroup_register_region(u64 size, const char *name_fmt, ...)
+{
+ return NULL;
+}
+
+static inline void dmem_cgroup_unregister_region(struct dmem_cgroup_region *region)
+{ }
+
+static inline int dmem_cgroup_try_charge(struct dmem_cgroup_region *region, u64 size,
+ struct dmem_cgroup_pool_state **ret_pool,
+ struct dmem_cgroup_pool_state **ret_limit_pool)
+{
+ *ret_pool = NULL;
+
+ if (ret_limit_pool)
+ *ret_limit_pool = NULL;
+
+ return 0;
+}
+
+static inline void dmem_cgroup_uncharge(struct dmem_cgroup_pool_state *pool, u64 size)
+{ }
+
+static inline
+bool dmem_cgroup_state_evict_valuable(struct dmem_cgroup_pool_state *limit_pool,
+ struct dmem_cgroup_pool_state *test_pool,
+ bool ignore_low, bool *ret_hit_low)
+{
+ return true;
+}
+
+static inline void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state *pool)
+{ }
+
+#endif
+#endif /* _CGROUP_DMEM_H */
diff --git a/include/linux/cgroup_namespace.h b/include/linux/cgroup_namespace.h
new file mode 100644
index 000000000000..78a8418558a4
--- /dev/null
+++ b/include/linux/cgroup_namespace.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CGROUP_NAMESPACE_H
+#define _LINUX_CGROUP_NAMESPACE_H
+
+#include <linux/ns_common.h>
+
+struct cgroup_namespace {
+ struct ns_common ns;
+ struct user_namespace *user_ns;
+ struct ucounts *ucounts;
+ struct css_set *root_cset;
+};
+
+extern struct cgroup_namespace init_cgroup_ns;
+
+#ifdef CONFIG_CGROUPS
+
+static inline struct cgroup_namespace *to_cg_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct cgroup_namespace, ns);
+}
+
+void free_cgroup_ns(struct cgroup_namespace *ns);
+
+struct cgroup_namespace *copy_cgroup_ns(u64 flags,
+ struct user_namespace *user_ns,
+ struct cgroup_namespace *old_ns);
+
+int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
+ struct cgroup_namespace *ns);
+
+static inline void get_cgroup_ns(struct cgroup_namespace *ns)
+{
+ ns_ref_inc(ns);
+}
+
+static inline void put_cgroup_ns(struct cgroup_namespace *ns)
+{
+ if (ns_ref_put(ns))
+ free_cgroup_ns(ns);
+}
+
+#else /* !CONFIG_CGROUPS */
+
+static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
+static inline struct cgroup_namespace *
+copy_cgroup_ns(u64 flags, struct user_namespace *user_ns,
+ struct cgroup_namespace *old_ns)
+{
+ return old_ns;
+}
+
+static inline void get_cgroup_ns(struct cgroup_namespace *ns) { }
+static inline void put_cgroup_ns(struct cgroup_namespace *ns) { }
+
+#endif /* !CONFIG_CGROUPS */
+
+#endif /* _LINUX_CGROUP_NAMESPACE_H */
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 445235487230..3fd0bcbf3080 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -65,6 +65,10 @@ SUBSYS(rdma)
SUBSYS(misc)
#endif
+#if IS_ENABLED(CONFIG_CGROUP_DMEM)
+SUBSYS(dmem)
+#endif
+
/*
* The following subsystems are not supported on the default hierarchy.
*/
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index c2d09bc4f976..8d41b917c77d 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -1,8 +1,157 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_GUARDS_H
-#define __LINUX_GUARDS_H
+#ifndef _LINUX_CLEANUP_H
+#define _LINUX_CLEANUP_H
#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/args.h>
+
+/**
+ * DOC: scope-based cleanup helpers
+ *
+ * The "goto error" pattern is notorious for introducing subtle resource
+ * leaks. It is tedious and error prone to add new resource acquisition
+ * constraints into code paths that already have several unwind
+ * conditions. The "cleanup" helpers enable the compiler to help with
+ * this tedium and can aid in maintaining LIFO (last in first out)
+ * unwind ordering to avoid unintentional leaks.
+ *
+ * As drivers make up the majority of the kernel code base, here is an
+ * example of using these helpers to clean up PCI drivers. The target of
+ * the cleanups are occasions where a goto is used to unwind a device
+ * reference (pci_dev_put()), or unlock the device (pci_dev_unlock())
+ * before returning.
+ *
+ * The DEFINE_FREE() macro can arrange for PCI device references to be
+ * dropped when the associated variable goes out of scope::
+ *
+ * DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
+ * ...
+ * struct pci_dev *dev __free(pci_dev_put) =
+ * pci_get_slot(parent, PCI_DEVFN(0, 0));
+ *
+ * The above will automatically call pci_dev_put() if @dev is non-NULL
+ * when @dev goes out of scope (automatic variable scope). If a function
+ * wants to invoke pci_dev_put() on error, but return @dev (i.e. without
+ * freeing it) on success, it can do::
+ *
+ * return no_free_ptr(dev);
+ *
+ * ...or::
+ *
+ * return_ptr(dev);
+ *
+ * The DEFINE_GUARD() macro can arrange for the PCI device lock to be
+ * dropped when the scope where guard() is invoked ends::
+ *
+ * DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
+ * ...
+ * guard(pci_dev)(dev);
+ *
+ * The lifetime of the lock obtained by the guard() helper follows the
+ * scope of automatic variable declaration. Take the following example::
+ *
+ * func(...)
+ * {
+ * if (...) {
+ * ...
+ * guard(pci_dev)(dev); // pci_dev_lock() invoked here
+ * ...
+ * } // <- implied pci_dev_unlock() triggered here
+ * }
+ *
+ * Observe the lock is held for the remainder of the "if ()" block not
+ * the remainder of "func()".
+ *
+ * The ACQUIRE() macro can be used in all places that guard() can be
+ * used and additionally support conditional locks::
+ *
+ * DEFINE_GUARD_COND(pci_dev, _try, pci_dev_trylock(_T))
+ * ...
+ * ACQUIRE(pci_dev_try, lock)(dev);
+ * rc = ACQUIRE_ERR(pci_dev_try, &lock);
+ * if (rc)
+ * return rc;
+ * // @lock is held
+ *
+ * Now, when a function uses both __free() and guard()/ACQUIRE(), or
+ * multiple instances of __free(), the LIFO order of variable definition
+ * order matters. GCC documentation says:
+ *
+ * "When multiple variables in the same scope have cleanup attributes,
+ * at exit from the scope their associated cleanup functions are run in
+ * reverse order of definition (last defined, first cleanup)."
+ *
+ * When the unwind order matters it requires that variables be defined
+ * mid-function scope rather than at the top of the file. Take the
+ * following example and notice the bug highlighted by "!!"::
+ *
+ * LIST_HEAD(list);
+ * DEFINE_MUTEX(lock);
+ *
+ * struct object {
+ * struct list_head node;
+ * };
+ *
+ * static struct object *alloc_add(void)
+ * {
+ * struct object *obj;
+ *
+ * lockdep_assert_held(&lock);
+ * obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ * if (obj) {
+ * LIST_HEAD_INIT(&obj->node);
+ * list_add(obj->node, &list):
+ * }
+ * return obj;
+ * }
+ *
+ * static void remove_free(struct object *obj)
+ * {
+ * lockdep_assert_held(&lock);
+ * list_del(&obj->node);
+ * kfree(obj);
+ * }
+ *
+ * DEFINE_FREE(remove_free, struct object *, if (_T) remove_free(_T))
+ * static int init(void)
+ * {
+ * struct object *obj __free(remove_free) = NULL;
+ * int err;
+ *
+ * guard(mutex)(&lock);
+ * obj = alloc_add();
+ *
+ * if (!obj)
+ * return -ENOMEM;
+ *
+ * err = other_init(obj);
+ * if (err)
+ * return err; // remove_free() called without the lock!!
+ *
+ * no_free_ptr(obj);
+ * return 0;
+ * }
+ *
+ * That bug is fixed by changing init() to call guard() and define +
+ * initialize @obj in this order::
+ *
+ * guard(mutex)(&lock);
+ * struct object *obj __free(remove_free) = alloc_add();
+ *
+ * Given that the "__free(...) = NULL" pattern for variables defined at
+ * the top of the function poses this potential interdependency problem
+ * the recommendation is to always define and assign variables in one
+ * statement and not group variable definitions at the top of the
+ * function when __free() is used.
+ *
+ * Lastly, given that the benefit of cleanup helpers is removal of
+ * "goto", and that the "goto" statement can jump between scopes, the
+ * expectation is that usage of "goto" and cleanup helpers is never
+ * mixed in the same function. I.e. for a given routine, convert all
+ * resources that need a "goto" cleanup to scope-based cleanup, or
+ * convert none of them.
+ */
/*
* DEFINE_FREE(name, type, free):
@@ -59,24 +208,46 @@
*/
#define DEFINE_FREE(_name, _type, _free) \
- static inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
+ static __always_inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
#define __free(_name) __cleanup(__free_##_name)
-#define __get_and_null_ptr(p) \
- ({ __auto_type __ptr = &(p); \
- __auto_type __val = *__ptr; \
- *__ptr = NULL; __val; })
+#define __get_and_null(p, nullvalue) \
+ ({ \
+ auto __ptr = &(p); \
+ auto __val = *__ptr; \
+ *__ptr = nullvalue; \
+ __val; \
+ })
-static inline __must_check
+static __always_inline __must_check
const volatile void * __must_check_fn(const volatile void *val)
{ return val; }
#define no_free_ptr(p) \
- ((typeof(p)) __must_check_fn(__get_and_null_ptr(p)))
+ ((typeof(p)) __must_check_fn((__force const volatile void *)__get_and_null(p, NULL)))
#define return_ptr(p) return no_free_ptr(p)
+/*
+ * Only for situations where an allocation is handed in to another function
+ * and consumed by that function on success.
+ *
+ * struct foo *f __free(kfree) = kzalloc(sizeof(*f), GFP_KERNEL);
+ *
+ * setup(f);
+ * if (some_condition)
+ * return -EINVAL;
+ * ....
+ * ret = bar(f);
+ * if (!ret)
+ * retain_and_null_ptr(f);
+ * return ret;
+ *
+ * After retain_and_null_ptr(f) the variable f is NULL and cannot be
+ * dereferenced anymore.
+ */
+#define retain_and_null_ptr(p) ((void)__get_and_null(p, NULL))
/*
* DEFINE_CLASS(name, type, exit, init, init_args...):
@@ -90,12 +261,16 @@ const volatile void * __must_check_fn(const volatile void *val)
* CLASS(name, var)(args...):
* declare the variable @var as an instance of the named class
*
+ * CLASS_INIT(name, var, init_expr):
+ * declare the variable @var as an instance of the named class with
+ * custom initialization expression.
+ *
* Ex.
*
* DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
*
* CLASS(fdget, f)(fd);
- * if (!f.file)
+ * if (fd_empty(f))
* return -EBADF;
*
* // use 'f' without concern
@@ -103,22 +278,34 @@ const volatile void * __must_check_fn(const volatile void *val)
#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...) \
typedef _type class_##_name##_t; \
-static inline void class_##_name##_destructor(_type *p) \
+static __always_inline void class_##_name##_destructor(_type *p) \
{ _type _T = *p; _exit; } \
-static inline _type class_##_name##_constructor(_init_args) \
+static __always_inline _type class_##_name##_constructor(_init_args) \
{ _type t = _init; return t; }
#define EXTEND_CLASS(_name, ext, _init, _init_args...) \
typedef class_##_name##_t class_##_name##ext##_t; \
-static inline void class_##_name##ext##_destructor(class_##_name##_t *p)\
+static __always_inline void class_##_name##ext##_destructor(class_##_name##_t *p) \
{ class_##_name##_destructor(p); } \
-static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
+static __always_inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
{ class_##_name##_t t = _init; return t; }
#define CLASS(_name, var) \
class_##_name##_t var __cleanup(class_##_name##_destructor) = \
class_##_name##_constructor
+#define CLASS_INIT(_name, _var, _init_expr) \
+ class_##_name##_t _var __cleanup(class_##_name##_destructor) = (_init_expr)
+
+#define __scoped_class(_name, var, _label, args...) \
+ for (CLASS(_name, var)(args); ; ({ goto _label; })) \
+ if (0) { \
+_label: \
+ break; \
+ } else
+
+#define scoped_class(_name, var, args...) \
+ __scoped_class(_name, var, __UNIQUE_ID(label), args)
/*
* DEFINE_GUARD(name, type, lock, unlock):
@@ -146,34 +333,125 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
* similar to scoped_guard(), except it does fail when the lock
* acquire fails.
*
+ * Only for conditional locks.
+ *
+ * ACQUIRE(name, var):
+ * a named instance of the (guard) class, suitable for conditional
+ * locks when paired with ACQUIRE_ERR().
+ *
+ * ACQUIRE_ERR(name, &var):
+ * a helper that is effectively a PTR_ERR() conversion of the guard
+ * pointer. Returns 0 when the lock was acquired and a negative
+ * error code otherwise.
*/
-#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
- DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
+#define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond) \
+static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
+
+#define DEFINE_CLASS_IS_UNCONDITIONAL(_name) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
- { return *_T; }
+ { return (void *)1; }
+
+#define __GUARD_IS_ERR(_ptr) \
+ ({ \
+ unsigned long _rc = (__force unsigned long)(_ptr); \
+ unlikely((_rc - 1) >= -MAX_ERRNO - 1); \
+ })
+
+#define __DEFINE_GUARD_LOCK_PTR(_name, _exp) \
+ static __always_inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
+ { \
+ void *_ptr = (void *)(__force unsigned long)*(_exp); \
+ if (IS_ERR(_ptr)) { \
+ _ptr = NULL; \
+ } \
+ return _ptr; \
+ } \
+ static __always_inline int class_##_name##_lock_err(class_##_name##_t *_T) \
+ { \
+ long _rc = (__force unsigned long)*(_exp); \
+ if (!_rc) { \
+ _rc = -EBUSY; \
+ } \
+ if (!IS_ERR_VALUE(_rc)) { \
+ _rc = 0; \
+ } \
+ return _rc; \
+ }
+
+#define DEFINE_CLASS_IS_GUARD(_name) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
+ __DEFINE_GUARD_LOCK_PTR(_name, _T)
+
+#define DEFINE_CLASS_IS_COND_GUARD(_name) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name, true); \
+ __DEFINE_GUARD_LOCK_PTR(_name, _T)
-#define DEFINE_GUARD_COND(_name, _ext, _condlock) \
+#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
+ DEFINE_CLASS(_name, _type, if (!__GUARD_IS_ERR(_T)) { _unlock; }, ({ _lock; _T; }), _type _T); \
+ DEFINE_CLASS_IS_GUARD(_name)
+
+#define DEFINE_GUARD_COND_4(_name, _ext, _lock, _cond) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
EXTEND_CLASS(_name, _ext, \
- ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
+ ({ void *_t = _T; int _RET = (_lock); if (_T && !(_cond)) _t = ERR_PTR(_RET); _t; }), \
class_##_name##_t _T) \
- static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
- { return class_##_name##_lock_ptr(_T); }
+ static __always_inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ { return class_##_name##_lock_ptr(_T); } \
+ static __always_inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \
+ { return class_##_name##_lock_err(_T); }
+
+/*
+ * Default binary condition; success on 'true'.
+ */
+#define DEFINE_GUARD_COND_3(_name, _ext, _lock) \
+ DEFINE_GUARD_COND_4(_name, _ext, _lock, _RET)
+
+#define DEFINE_GUARD_COND(X...) CONCATENATE(DEFINE_GUARD_COND_, COUNT_ARGS(X))(X)
#define guard(_name) \
CLASS(_name, __UNIQUE_ID(guard))
#define __guard_ptr(_name) class_##_name##_lock_ptr
+#define __guard_err(_name) class_##_name##_lock_err
+#define __is_cond_ptr(_name) class_##_name##_is_conditional
+
+#define ACQUIRE(_name, _var) CLASS(_name, _var)
+#define ACQUIRE_ERR(_name, _var) __guard_err(_name)(_var)
+
+/*
+ * Helper macro for scoped_guard().
+ *
+ * Note that the "!__is_cond_ptr(_name)" part of the condition ensures that
+ * compiler would be sure that for the unconditional locks the body of the
+ * loop (caller-provided code glued to the else clause) could not be skipped.
+ * It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too
+ * hard to deduce (even if could be proven true for unconditional locks).
+ */
+#define __scoped_guard(_name, _label, args...) \
+ for (CLASS(_name, scope)(args); \
+ __guard_ptr(_name)(&scope) || !__is_cond_ptr(_name); \
+ ({ goto _label; })) \
+ if (0) { \
+_label: \
+ break; \
+ } else
+
+#define scoped_guard(_name, args...) \
+ __scoped_guard(_name, __UNIQUE_ID(label), args)
-#define scoped_guard(_name, args...) \
- for (CLASS(_name, scope)(args), \
- *done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1)
+#define __scoped_cond_guard(_name, _fail, _label, args...) \
+ for (CLASS(_name, scope)(args); true; ({ goto _label; })) \
+ if (!__guard_ptr(_name)(&scope)) { \
+ BUILD_BUG_ON(!__is_cond_ptr(_name)); \
+ _fail; \
+_label: \
+ break; \
+ } else
-#define scoped_cond_guard(_name, _fail, args...) \
- for (CLASS(_name, scope)(args), \
- *done = NULL; !done; done = (void *)1) \
- if (!__guard_ptr(_name)(&scope)) _fail; \
- else
+#define scoped_cond_guard(_name, _fail, args...) \
+ __scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args)
/*
* Additional helper macros for generating lock guards with types, either for
@@ -201,19 +479,15 @@ typedef struct { \
__VA_ARGS__; \
} class_##_name##_t; \
\
-static inline void class_##_name##_destructor(class_##_name##_t *_T) \
+static __always_inline void class_##_name##_destructor(class_##_name##_t *_T) \
{ \
- if (_T->lock) { _unlock; } \
+ if (!__GUARD_IS_ERR(_T->lock)) { _unlock; } \
} \
\
-static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
-{ \
- return _T->lock; \
-}
-
+__DEFINE_GUARD_LOCK_PTR(_name, &_T->lock)
#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock) \
-static inline class_##_name##_t class_##_name##_constructor(_type *l) \
+static __always_inline class_##_name##_t class_##_name##_constructor(_type *l) \
{ \
class_##_name##_t _t = { .lock = l }, *_T = &_t; \
_lock; \
@@ -221,7 +495,7 @@ static inline class_##_name##_t class_##_name##_constructor(_type *l) \
}
#define __DEFINE_LOCK_GUARD_0(_name, _lock) \
-static inline class_##_name##_t class_##_name##_constructor(void) \
+static __always_inline class_##_name##_t class_##_name##_constructor(void) \
{ \
class_##_name##_t _t = { .lock = (void*)1 }, \
*_T __maybe_unused = &_t; \
@@ -230,21 +504,31 @@ static inline class_##_name##_t class_##_name##_constructor(void) \
}
#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \
+__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \
__DEFINE_LOCK_GUARD_1(_name, _type, _lock)
#define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \
+__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
__DEFINE_LOCK_GUARD_0(_name, _lock)
-#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \
+#define DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _cond) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
EXTEND_CLASS(_name, _ext, \
({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
- if (_T->lock && !(_condlock)) _T->lock = NULL; \
+ int _RET = (_lock); \
+ if (_T->lock && !(_cond)) _T->lock = ERR_PTR(_RET);\
_t; }), \
typeof_member(class_##_name##_t, lock) l) \
- static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
- { return class_##_name##_lock_ptr(_T); }
+ static __always_inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ { return class_##_name##_lock_ptr(_T); } \
+ static __always_inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \
+ { return class_##_name##_lock_err(_T); }
+
+#define DEFINE_LOCK_GUARD_1_COND_3(_name, _ext, _lock) \
+ DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _RET)
+#define DEFINE_LOCK_GUARD_1_COND(X...) CONCATENATE(DEFINE_LOCK_GUARD_1_COND_, COUNT_ARGS(X))(X)
-#endif /* __LINUX_GUARDS_H */
+#endif /* _LINUX_CLEANUP_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 4a537260f655..630705a47129 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -394,6 +394,20 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
__clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
NULL, (flags), (fixed_rate), 0, 0, true)
/**
+ * devm_clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define devm_clk_hw_register_fixed_rate_parent_data(dev, name, parent_data, flags, \
+ fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (fixed_rate), 0, \
+ 0, true)
+/**
* clk_hw_register_fixed_rate_parent_hw - register fixed-rate clock with
* the clock framework
* @dev: device that is registering this clock
@@ -609,6 +623,24 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
NULL, (flags), (reg), (bit_idx), \
(clk_gate_flags), (lock))
/**
+ * devm_clk_hw_register_gate_parent_hw - register a gate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_gate_parent_hw(dev, name, parent_hw, flags, \
+ reg, bit_idx, clk_gate_flags, \
+ lock) \
+ __devm_clk_hw_register_gate((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+/**
* devm_clk_hw_register_gate_parent_data - register a gate clock with the
* clock framework
* @dev: device that is registering this clock
@@ -675,13 +707,15 @@ struct clk_div_table {
* CLK_DIVIDER_BIG_ENDIAN - By default little endian register accesses are used
* for the divider register. Setting this flag makes the register accesses
* big endian.
+ * CLK_DIVIDER_EVEN_INTEGERS - clock divisor is 2, 4, 6, 8, 10, etc.
+ * Formula is 2 * (value read from hardware + 1).
*/
struct clk_divider {
struct clk_hw hw;
void __iomem *reg;
u8 shift;
u8 width;
- u8 flags;
+ u16 flags;
const struct clk_div_table *table;
spinlock_t *lock;
};
@@ -697,6 +731,7 @@ struct clk_divider {
#define CLK_DIVIDER_READ_ONLY BIT(5)
#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
#define CLK_DIVIDER_BIG_ENDIAN BIT(7)
+#define CLK_DIVIDER_EVEN_INTEGERS BIT(8)
extern const struct clk_ops clk_divider_ops;
extern const struct clk_ops clk_divider_ro_ops;
@@ -726,19 +761,21 @@ struct clk_hw *__clk_hw_register_divider(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ void __iomem *reg, u8 shift, u8 width,
+ unsigned long clk_divider_flags,
const struct clk_div_table *table, spinlock_t *lock);
struct clk_hw *__devm_clk_hw_register_divider(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ void __iomem *reg, u8 shift, u8 width,
+ unsigned long clk_divider_flags,
const struct clk_div_table *table, spinlock_t *lock);
struct clk *clk_register_divider_table(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, const struct clk_div_table *table,
- spinlock_t *lock);
+ unsigned long clk_divider_flags,
+ const struct clk_div_table *table, spinlock_t *lock);
/**
* clk_register_divider - register a divider clock with the clock framework
* @dev: device registering this clock
@@ -1123,6 +1160,9 @@ struct clk_hw *clk_hw_register_fixed_factor_with_accuracy_fwname(struct device *
struct device_node *np, const char *name, const char *fw_name,
unsigned long flags, unsigned int mult, unsigned int div,
unsigned long acc);
+struct clk_hw *clk_hw_register_fixed_factor_index(struct device *dev,
+ const char *name, unsigned int index, unsigned long flags,
+ unsigned int mult, unsigned int div);
void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
@@ -1320,6 +1360,32 @@ void clk_hw_unregister(struct clk_hw *hw);
/* helper functions */
const char *__clk_get_name(const struct clk *clk);
const char *clk_hw_get_name(const struct clk_hw *hw);
+
+/**
+ * clk_hw_get_dev() - get device from an hardware clock.
+ * @hw: the clk_hw pointer to get the struct device from
+ *
+ * This is a helper to get the struct device associated with a hardware
+ * clock. Some clock controllers, such as the one registered with
+ * CLK_OF_DECLARE(), may have not provided a device pointer while
+ * registering the clock.
+ *
+ * Return: the struct device associated with the clock, or NULL if there
+ * is none.
+ */
+struct device *clk_hw_get_dev(const struct clk_hw *hw);
+
+/**
+ * clk_hw_get_of_node() - get device_node from a hardware clock.
+ * @hw: the clk_hw pointer to get the struct device_node from
+ *
+ * This is a helper to get the struct device_node associated with a
+ * hardware clock.
+ *
+ * Return: the struct device_node associated with the clock, or NULL
+ * if there is none.
+ */
+struct device_node *clk_hw_get_of_node(const struct clk_hw *hw);
#ifdef CONFIG_COMMON_CLK
struct clk_hw *__clk_get_hw(struct clk *clk);
#else
@@ -1346,7 +1412,6 @@ unsigned long clk_hw_get_flags(const struct clk_hw *hw);
(clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT)
bool clk_hw_is_prepared(const struct clk_hw *hw);
-bool clk_hw_rate_is_protected(const struct clk_hw *hw);
bool clk_hw_is_enabled(const struct clk_hw *hw);
bool __clk_is_enabled(struct clk *clk);
struct clk *__clk_lookup(const char *name);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 0fa56d672532..b607482ca77e 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -496,11 +496,13 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks);
/**
- * devm_clk_bulk_get_all_enable - Get and enable all clocks of the consumer (managed)
+ * devm_clk_bulk_get_all_enabled - Get and enable all clocks of the consumer (managed)
* @dev: device for clock "consumer"
* @clks: pointer to the clk_bulk_data table of consumer
*
- * Returns success (0) or negative errno.
+ * Returns a positive value for the number of clocks obtained while the
+ * clock references are stored in the clk_bulk_data table in @clks field.
+ * Returns 0 if there're none and a negative value if something failed.
*
* This helper function allows drivers to get all clocks of the
* consumer and enables them in one operation with management.
@@ -508,8 +510,8 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
* is unbound.
*/
-int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
- struct clk_bulk_data **clks);
+int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
+ struct clk_bulk_data **clks);
/**
* devm_clk_get - lookup and obtain a managed reference to a clock producer.
@@ -641,6 +643,32 @@ struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id);
struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
/**
+ * devm_clk_get_optional_enabled_with_rate - devm_clk_get_optional() +
+ * clk_set_rate() +
+ * clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ * @rate: new clock rate
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_enabled().
+ *
+ * The returned clk (if valid) is prepared and enabled and rate was set.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
+ */
+struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev,
+ const char *id,
+ unsigned long rate);
+
+/**
* devm_get_clk_from_child - lookup and obtain a managed reference to a
* clock producer from child node.
* @dev: device for clock "consumer"
@@ -982,6 +1010,13 @@ static inline struct clk *devm_clk_get_optional_enabled(struct device *dev,
return NULL;
}
+static inline struct clk *
+devm_clk_get_optional_enabled_with_rate(struct device *dev, const char *id,
+ unsigned long rate)
+{
+ return NULL;
+}
+
static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks)
{
@@ -1001,7 +1036,7 @@ static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
return 0;
}
-static inline int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
+static inline int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
struct clk_bulk_data **clks)
{
return 0;
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index 7af499bdbecb..d60ce9708ea2 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -47,8 +47,6 @@
#define AT91_PMC_PCSR 0x18 /* Peripheral Clock Status Register */
#define AT91_PMC_PLL_ACR 0x18 /* PLL Analog Control Register [for SAM9X60] */
-#define AT91_PMC_PLL_ACR_DEFAULT_UPLL UL(0x12020010) /* Default PLL ACR value for UPLL */
-#define AT91_PMC_PLL_ACR_DEFAULT_PLLA UL(0x00020010) /* Default PLL ACR value for PLLA */
#define AT91_PMC_PLL_ACR_UTMIVR (1 << 12) /* UPLL Voltage regulator Control */
#define AT91_PMC_PLL_ACR_UTMIBG (1 << 13) /* UPLL Bandgap Control */
diff --git a/include/linux/clk/davinci.h b/include/linux/clk/davinci.h
index e1d37451e03f..787a81116b00 100644
--- a/include/linux/clk/davinci.h
+++ b/include/linux/clk/davinci.h
@@ -12,12 +12,6 @@
#include <linux/regmap.h>
/* function for registering clocks in early boot */
-
-#ifdef CONFIG_ARCH_DAVINCI_DA830
-int da830_pll_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DA850
int da850_pll0_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-#endif
#endif /* __LINUX_CLK_DAVINCI_PLL_H___ */
diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h
index 0ebbe2f0b45e..69d8159deee3 100644
--- a/include/linux/clk/renesas.h
+++ b/include/linux/clk/renesas.h
@@ -10,7 +10,9 @@
#ifndef __LINUX_CLK_RENESAS_H_
#define __LINUX_CLK_RENESAS_H_
+#include <linux/clk-provider.h>
#include <linux/types.h>
+#include <linux/units.h>
struct device;
struct device_node;
@@ -32,4 +34,147 @@ void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev);
#define cpg_mssr_attach_dev NULL
#define cpg_mssr_detach_dev NULL
#endif
+
+/**
+ * struct rzv2h_pll_limits - PLL parameter constraints
+ *
+ * This structure defines the minimum and maximum allowed values for
+ * various parameters used to configure a PLL. These limits ensure
+ * the PLL operates within valid and stable ranges.
+ *
+ * @fout: Output frequency range (in MHz)
+ * @fout.min: Minimum allowed output frequency
+ * @fout.max: Maximum allowed output frequency
+ *
+ * @fvco: PLL oscillation frequency range (in MHz)
+ * @fvco.min: Minimum allowed VCO frequency
+ * @fvco.max: Maximum allowed VCO frequency
+ *
+ * @m: Main-divider range
+ * @m.min: Minimum main-divider value
+ * @m.max: Maximum main-divider value
+ *
+ * @p: Pre-divider range
+ * @p.min: Minimum pre-divider value
+ * @p.max: Maximum pre-divider value
+ *
+ * @s: Divider range
+ * @s.min: Minimum divider value
+ * @s.max: Maximum divider value
+ *
+ * @k: Delta-sigma modulator range (signed)
+ * @k.min: Minimum delta-sigma value
+ * @k.max: Maximum delta-sigma value
+ */
+struct rzv2h_pll_limits {
+ struct {
+ u32 min;
+ u32 max;
+ } fout;
+
+ struct {
+ u32 min;
+ u32 max;
+ } fvco;
+
+ struct {
+ u16 min;
+ u16 max;
+ } m;
+
+ struct {
+ u8 min;
+ u8 max;
+ } p;
+
+ struct {
+ u8 min;
+ u8 max;
+ } s;
+
+ struct {
+ s16 min;
+ s16 max;
+ } k;
+};
+
+/**
+ * struct rzv2h_pll_pars - PLL configuration parameters
+ *
+ * This structure contains the configuration parameters for the
+ * Phase-Locked Loop (PLL), used to achieve a specific output frequency.
+ *
+ * @m: Main divider value
+ * @p: Pre-divider value
+ * @s: Output divider value
+ * @k: Delta-sigma modulation value
+ * @freq_millihz: Calculated PLL output frequency in millihertz
+ * @error_millihz: Frequency error from target in millihertz (signed)
+ */
+struct rzv2h_pll_pars {
+ u16 m;
+ u8 p;
+ u8 s;
+ s16 k;
+ u64 freq_millihz;
+ s64 error_millihz;
+};
+
+/**
+ * struct rzv2h_pll_div_pars - PLL parameters with post-divider
+ *
+ * This structure is used for PLLs that include an additional post-divider
+ * stage after the main PLL block. It contains both the PLL configuration
+ * parameters and the resulting frequency/error values after the divider.
+ *
+ * @pll: Main PLL configuration parameters (see struct rzv2h_pll_pars)
+ *
+ * @div: Post-divider configuration and result
+ * @div.divider_value: Divider applied to the PLL output
+ * @div.freq_millihz: Output frequency after divider in millihertz
+ * @div.error_millihz: Frequency error from target in millihertz (signed)
+ */
+struct rzv2h_pll_div_pars {
+ struct rzv2h_pll_pars pll;
+ struct {
+ u8 divider_value;
+ u64 freq_millihz;
+ s64 error_millihz;
+ } div;
+};
+
+#define RZV2H_CPG_PLL_DSI_LIMITS(name) \
+ static const struct rzv2h_pll_limits (name) = { \
+ .fout = { .min = 25 * MEGA, .max = 375 * MEGA }, \
+ .fvco = { .min = 1600 * MEGA, .max = 3200 * MEGA }, \
+ .m = { .min = 64, .max = 533 }, \
+ .p = { .min = 1, .max = 4 }, \
+ .s = { .min = 0, .max = 6 }, \
+ .k = { .min = -32768, .max = 32767 }, \
+ } \
+
+#ifdef CONFIG_CLK_RZV2H
+bool rzv2h_get_pll_pars(const struct rzv2h_pll_limits *limits,
+ struct rzv2h_pll_pars *pars, u64 freq_millihz);
+
+bool rzv2h_get_pll_divs_pars(const struct rzv2h_pll_limits *limits,
+ struct rzv2h_pll_div_pars *pars,
+ const u8 *table, u8 table_size, u64 freq_millihz);
+#else
+static inline bool rzv2h_get_pll_pars(const struct rzv2h_pll_limits *limits,
+ struct rzv2h_pll_pars *pars,
+ u64 freq_millihz)
+{
+ return false;
+}
+
+static inline bool rzv2h_get_pll_divs_pars(const struct rzv2h_pll_limits *limits,
+ struct rzv2h_pll_div_pars *pars,
+ const u8 *table, u8 table_size,
+ u64 freq_millihz)
+{
+ return false;
+}
+#endif
+
#endif
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index e656f63efdce..54a3fa370004 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -34,14 +34,14 @@ struct clk_omap_reg {
* @clk_ref: struct clk_hw pointer to the clock's reference clock input
* @control_reg: register containing the DPLL mode bitfield
* @enable_mask: mask of the DPLL mode bitfield in @control_reg
- * @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate()
- * @last_rounded_m: cache of the last M result of omap2_dpll_round_rate()
+ * @last_rounded_rate: cache of the last rate result of omap2_dpll_determine_rate()
+ * @last_rounded_m: cache of the last M result of omap2_dpll_determine_rate()
* @last_rounded_m4xen: cache of the last M4X result of
- * omap4_dpll_regm4xen_round_rate()
+ * omap4_dpll_regm4xen_determine_rate()
* @last_rounded_lpmode: cache of the last lpmode result of
* omap4_dpll_lpmode_recalc()
* @max_multiplier: maximum valid non-bypass multiplier value (actual)
- * @last_rounded_n: cache of the last N result of omap2_dpll_round_rate()
+ * @last_rounded_n: cache of the last N result of omap2_dpll_determine_rate()
* @min_divider: minimum valid non-bypass divider value (actual)
* @max_divider: maximum valid non-bypass divider value (actual)
* @max_rate: maximum clock rate for the DPLL
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 9aac31d856f3..b0df28ddd394 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -12,7 +12,7 @@
#ifdef CONFIG_GENERIC_CLOCKEVENTS
# include <linux/clocksource.h>
-# include <linux/cpumask.h>
+# include <linux/cpumask_types.h>
# include <linux/ktime.h>
# include <linux/notifier.h>
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 0ad8b550bb4b..65b7c41471c3 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -21,6 +21,7 @@
#include <asm/div64.h>
#include <asm/io.h>
+struct clocksource_base;
struct clocksource;
struct module;
@@ -48,8 +49,10 @@ struct module;
* @archdata: Optional arch-specific data
* @max_cycles: Maximum safe cycle value which won't overflow on
* multiplication
+ * @max_raw_delta: Maximum safe delta value for negative motion detection
* @name: Pointer to clocksource name
* @list: List head for registration (internal)
+ * @freq_khz: Clocksource frequency in khz.
* @rating: Rating value for selection (higher is better)
* To avoid rating inflation the following
* list should give you a guide as to how
@@ -70,6 +73,8 @@ struct module;
* validate the clocksource from which the snapshot was
* taken.
* @flags: Flags describing special properties
+ * @base: Hardware abstraction for clock on which a clocksource
+ * is based
* @enable: Optional function to enable the clocksource
* @disable: Optional function to disable the clocksource
* @suspend: Optional suspend function for the clocksource
@@ -105,12 +110,15 @@ struct clocksource {
struct arch_clocksource_data archdata;
#endif
u64 max_cycles;
+ u64 max_raw_delta;
const char *name;
struct list_head list;
+ u32 freq_khz;
int rating;
enum clocksource_ids id;
enum vdso_clock_mode vdso_clock_mode;
unsigned long flags;
+ struct clocksource_base *base;
int (*enable)(struct clocksource *cs);
void (*disable)(struct clocksource *cs);
@@ -209,7 +217,6 @@ static inline s64 clocksource_cyc2ns(u64 cycles, u32 mult, u32 shift)
extern int clocksource_unregister(struct clocksource*);
extern void clocksource_touch_watchdog(void);
-extern void clocksource_change_rating(struct clocksource *cs, int rating);
extern void clocksource_suspend(void);
extern void clocksource_resume(void);
extern struct clocksource * __init clocksource_default_clock(void);
@@ -306,4 +313,25 @@ static inline unsigned int clocksource_get_max_watchdog_retry(void)
void clocksource_verify_percpu(struct clocksource *cs);
+/**
+ * struct clocksource_base - hardware abstraction for clock on which a clocksource
+ * is based
+ * @id: Defaults to CSID_GENERIC. The id value is used for conversion
+ * functions which require that the current clocksource is based
+ * on a clocksource_base with a particular ID in certain snapshot
+ * functions to allow callers to validate the clocksource from
+ * which the snapshot was taken.
+ * @freq_khz: Nominal frequency of the base clock in kHz
+ * @offset: Offset between the base clock and the clocksource
+ * @numerator: Numerator of the clock ratio between base clock and the clocksource
+ * @denominator: Denominator of the clock ratio between base clock and the clocksource
+ */
+struct clocksource_base {
+ enum clocksource_ids id;
+ u32 freq_khz;
+ u64 offset;
+ u32 numerator;
+ u32 denominator;
+};
+
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/clocksource_ids.h b/include/linux/clocksource_ids.h
index a4fa3436940c..c4ef4ae2eded 100644
--- a/include/linux/clocksource_ids.h
+++ b/include/linux/clocksource_ids.h
@@ -6,9 +6,11 @@
enum clocksource_ids {
CSID_GENERIC = 0,
CSID_ARM_ARCH_COUNTER,
+ CSID_S390_TOD,
CSID_X86_TSC_EARLY,
CSID_X86_TSC,
CSID_X86_KVM_CLK,
+ CSID_X86_ART,
CSID_MAX,
};
diff --git a/include/linux/closure.h b/include/linux/closure.h
index 99155df162d0..880fe85e35e9 100644
--- a/include/linux/closure.h
+++ b/include/linux/closure.h
@@ -159,6 +159,7 @@ struct closure {
#ifdef CONFIG_DEBUG_CLOSURES
#define CLOSURE_MAGIC_DEAD 0xc054dead
#define CLOSURE_MAGIC_ALIVE 0xc054a11e
+#define CLOSURE_MAGIC_STACK 0xc05451cc
unsigned int magic;
struct list_head all;
@@ -285,6 +286,21 @@ static inline void closure_get(struct closure *cl)
}
/**
+ * closure_get_not_zero
+ */
+static inline bool closure_get_not_zero(struct closure *cl)
+{
+ unsigned old = atomic_read(&cl->remaining);
+ do {
+ if (!(old & CLOSURE_REMAINING_MASK))
+ return false;
+
+ } while (!atomic_try_cmpxchg_acquire(&cl->remaining, &old, old + 1));
+
+ return true;
+}
+
+/**
* closure_init - Initialize a closure, setting the refcount to 1
* @cl: closure to initialize
* @parent: parent of the new closure. cl will take a refcount on it for its
@@ -308,6 +324,18 @@ static inline void closure_init_stack(struct closure *cl)
{
memset(cl, 0, sizeof(struct closure));
atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->magic = CLOSURE_MAGIC_STACK;
+#endif
+}
+
+static inline void closure_init_stack_release(struct closure *cl)
+{
+ memset(cl, 0, sizeof(struct closure));
+ atomic_set_release(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->magic = CLOSURE_MAGIC_STACK;
+#endif
}
/**
@@ -355,6 +383,8 @@ do { \
*/
#define closure_return(_cl) continue_at((_cl), NULL, NULL)
+void closure_return_sync(struct closure *cl);
+
/**
* continue_at_nobarrier - jump to another function without barrier
*
@@ -424,4 +454,39 @@ do { \
__closure_wait_event(waitlist, _cond); \
} while (0)
+#define __closure_wait_event_timeout(waitlist, _cond, _until) \
+({ \
+ struct closure cl; \
+ long _t; \
+ \
+ closure_init_stack(&cl); \
+ \
+ while (1) { \
+ closure_wait(waitlist, &cl); \
+ if (_cond) { \
+ _t = max_t(long, 1L, _until - jiffies); \
+ break; \
+ } \
+ _t = max_t(long, 0L, _until - jiffies); \
+ if (!_t) \
+ break; \
+ closure_sync_timeout(&cl, _t); \
+ } \
+ closure_wake_up(waitlist); \
+ closure_sync(&cl); \
+ _t; \
+})
+
+/*
+ * Returns 0 if timeout expired, remaining time in jiffies (at least 1) if
+ * condition became true
+ */
+#define closure_wait_event_timeout(waitlist, _cond, _timeout) \
+({ \
+ unsigned long _until = jiffies + _timeout; \
+ (_cond) \
+ ? max_t(long, 1L, _until - jiffies) \
+ : __closure_wait_event_timeout(waitlist, _cond, _until);\
+})
+
#endif /* _LINUX_CLOSURE_H */
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 9db877506ea8..62d9c1cf6326 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -40,6 +40,9 @@ static inline int __init cma_declare_contiguous(phys_addr_t base,
return cma_declare_contiguous_nid(base, size, limit, alignment,
order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
}
+extern int __init cma_declare_contiguous_multi(phys_addr_t size,
+ phys_addr_t align, unsigned int order_per_bit,
+ const char *name, struct cma **res_cma, int nid);
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
const char *name,
@@ -50,6 +53,28 @@ extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
+extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end);
extern void cma_reserve_pages_on_error(struct cma *cma);
+
+#ifdef CONFIG_CMA
+struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
+bool cma_free_folio(struct cma *cma, const struct folio *folio);
+bool cma_validate_zones(struct cma *cma);
+#else
+static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
+{
+ return false;
+}
+static inline bool cma_validate_zones(struct cma *cma)
+{
+ return false;
+}
+#endif
+
#endif
diff --git a/include/linux/codetag.h b/include/linux/codetag.h
index c2a579ccd455..8ea2a5f7c98a 100644
--- a/include/linux/codetag.h
+++ b/include/linux/codetag.h
@@ -13,13 +13,19 @@ struct codetag_module;
struct seq_buf;
struct module;
+#define CODETAG_SECTION_START_PREFIX "__start_"
+#define CODETAG_SECTION_STOP_PREFIX "__stop_"
+
+/* codetag flags */
+#define CODETAG_FLAG_INACCURATE (1 << 0)
+
/*
* An instance of this structure is created in a special ELF section at every
* code location being tagged. At runtime, the special section is treated as
* an array of these.
*/
struct codetag {
- unsigned int flags; /* used in later patches */
+ unsigned int flags;
unsigned int lineno;
const char *modname;
const char *function;
@@ -33,10 +39,17 @@ union codetag_ref {
struct codetag_type_desc {
const char *section;
size_t tag_size;
- void (*module_load)(struct codetag_type *cttype,
- struct codetag_module *cmod);
- bool (*module_unload)(struct codetag_type *cttype,
- struct codetag_module *cmod);
+ int (*module_load)(struct module *mod,
+ struct codetag *start, struct codetag *end);
+ void (*module_unload)(struct module *mod,
+ struct codetag *start, struct codetag *end);
+#ifdef CONFIG_MODULES
+ void (*module_replaced)(struct module *mod, struct module *new_mod);
+ bool (*needs_section_mem)(struct module *mod, unsigned long size);
+ void *(*alloc_section_mem)(struct module *mod, unsigned long size,
+ unsigned int prepend, unsigned long align);
+ void (*free_section_mem)(struct module *mod, bool used);
+#endif
};
struct codetag_iterator {
@@ -44,6 +57,7 @@ struct codetag_iterator {
struct codetag_module *cmod;
unsigned long mod_id;
struct codetag *ct;
+ unsigned long mod_seq;
};
#ifdef MODULE
@@ -71,11 +85,31 @@ struct codetag_type *
codetag_register_type(const struct codetag_type_desc *desc);
#if defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES)
-void codetag_load_module(struct module *mod);
-bool codetag_unload_module(struct module *mod);
-#else
-static inline void codetag_load_module(struct module *mod) {}
-static inline bool codetag_unload_module(struct module *mod) { return true; }
-#endif
+
+bool codetag_needs_module_section(struct module *mod, const char *name,
+ unsigned long size);
+void *codetag_alloc_module_section(struct module *mod, const char *name,
+ unsigned long size, unsigned int prepend,
+ unsigned long align);
+void codetag_free_module_sections(struct module *mod);
+void codetag_module_replaced(struct module *mod, struct module *new_mod);
+int codetag_load_module(struct module *mod);
+void codetag_unload_module(struct module *mod);
+
+#else /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */
+
+static inline bool
+codetag_needs_module_section(struct module *mod, const char *name,
+ unsigned long size) { return false; }
+static inline void *
+codetag_alloc_module_section(struct module *mod, const char *name,
+ unsigned long size, unsigned int prepend,
+ unsigned long align) { return NULL; }
+static inline void codetag_free_module_sections(struct module *mod) {}
+static inline void codetag_module_replaced(struct module *mod, struct module *new_mod) {}
+static inline int codetag_load_module(struct module *mod) { return 0; }
+static inline void codetag_unload_module(struct module *mod) {}
+
+#endif /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */
#endif /* _LINUX_CODETAG_H */
diff --git a/include/linux/comedi/comedidev.h b/include/linux/comedi/comedidev.h
index c08416a7364b..35fdc41845ce 100644
--- a/include/linux/comedi/comedidev.h
+++ b/include/linux/comedi/comedidev.h
@@ -15,6 +15,7 @@
#include <linux/spinlock_types.h>
#include <linux/rwsem.h>
#include <linux/kref.h>
+#include <linux/completion.h>
#include <linux/comedi.h>
#define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
@@ -234,16 +235,12 @@ struct comedi_buf_page {
*
* A COMEDI data buffer is allocated as individual pages, either in
* conventional memory or DMA coherent memory, depending on the attached,
- * low-level hardware device. (The buffer pages also get mapped into the
- * kernel's contiguous virtual address space pointed to by the 'prealloc_buf'
- * member of &struct comedi_async.)
+ * low-level hardware device.
*
* The buffer is normally freed when the COMEDI device is detached from the
* low-level driver (which may happen due to device removal), but if it happens
* to be mmapped at the time, the pages cannot be freed until the buffer has
- * been munmapped. That is what the reference counter is for. (The virtual
- * address space pointed by 'prealloc_buf' is freed when the COMEDI device is
- * detached.)
+ * been munmapped. That is what the reference counter is for.
*/
struct comedi_buf_map {
struct device *dma_hw_dev;
@@ -255,7 +252,6 @@ struct comedi_buf_map {
/**
* struct comedi_async - Control data for asynchronous COMEDI commands
- * @prealloc_buf: Kernel virtual address of allocated acquisition buffer.
* @prealloc_bufsz: Buffer size (in bytes).
* @buf_map: Map of buffer pages.
* @max_bufsize: Maximum allowed buffer size (in bytes).
@@ -277,6 +273,8 @@ struct comedi_buf_map {
* @events: Bit-vector of events that have occurred.
* @cmd: Details of comedi command in progress.
* @wait_head: Task wait queue for file reader or writer.
+ * @run_complete: "run complete" completion event.
+ * @run_active: "run active" reference counter.
* @cb_mask: Bit-vector of events that should wake waiting tasks.
* @inttrig: Software trigger function for command, or NULL.
*
@@ -344,7 +342,6 @@ struct comedi_buf_map {
* less than or equal to UINT_MAX).
*/
struct comedi_async {
- void *prealloc_buf;
unsigned int prealloc_bufsz;
struct comedi_buf_map *buf_map;
unsigned int max_bufsize;
@@ -363,6 +360,8 @@ struct comedi_async {
unsigned int events;
struct comedi_cmd cmd;
wait_queue_head_t wait_head;
+ struct completion run_complete;
+ refcount_t run_active;
unsigned int cb_mask;
int (*inttrig)(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int x);
@@ -590,6 +589,8 @@ struct comedi_device *comedi_dev_get_from_minor(unsigned int minor);
int comedi_dev_put(struct comedi_device *dev);
bool comedi_is_subdevice_running(struct comedi_subdevice *s);
+bool comedi_get_is_subdevice_running(struct comedi_subdevice *s);
+void comedi_put_is_subdevice_running(struct comedi_subdevice *s);
void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size);
void comedi_set_spriv_auto_free(struct comedi_subdevice *s);
diff --git a/include/linux/comedi/comedilib.h b/include/linux/comedi/comedilib.h
index 0223c9cd9215..1f2b22b383cc 100644
--- a/include/linux/comedi/comedilib.h
+++ b/include/linux/comedi/comedilib.h
@@ -10,8 +10,38 @@
#ifndef _LINUX_COMEDILIB_H
#define _LINUX_COMEDILIB_H
-struct comedi_device *comedi_open(const char *path);
-int comedi_close(struct comedi_device *dev);
+struct comedi_device *comedi_open_from(const char *path, int from);
+
+/**
+ * comedi_open() - Open a COMEDI device from the kernel
+ * @filename: Fake pathname of the form "/dev/comediN".
+ *
+ * Converts @filename to a COMEDI device number and "opens" it if it exists
+ * and is attached to a low-level COMEDI driver.
+ *
+ * Return: A pointer to the COMEDI device on success.
+ * Return %NULL on failure.
+ */
+static inline struct comedi_device *comedi_open(const char *path)
+{
+ return comedi_open_from(path, -1);
+}
+
+int comedi_close_from(struct comedi_device *dev, int from);
+
+/**
+ * comedi_close() - Close a COMEDI device from the kernel
+ * @dev: COMEDI device.
+ *
+ * Closes a COMEDI device previously opened by comedi_open().
+ *
+ * Returns: 0
+ */
+static inline int comedi_close(struct comedi_device *dev)
+{
+ return comedi_close_from(dev, -1);
+}
+
int comedi_dio_get_config(struct comedi_device *dev, unsigned int subdev,
unsigned int chan, unsigned int *io);
int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index e94776496049..173d9c07a895 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -80,6 +80,11 @@ static inline unsigned long compact_gap(unsigned int order)
return 2UL << order;
}
+static inline int current_is_kcompactd(void)
+{
+ return current->flags & PF_KCOMPACTD;
+}
+
#ifdef CONFIG_COMPACTION
extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
@@ -90,7 +95,7 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
struct page **page);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern bool compaction_suitable(struct zone *zone, int order,
- int highest_zoneidx);
+ unsigned long watermark, int highest_zoneidx);
extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
@@ -108,7 +113,8 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat)
}
static inline bool compaction_suitable(struct zone *zone, int order,
- int highest_zoneidx)
+ unsigned long watermark,
+ int highest_zoneidx)
{
return false;
}
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 233f61ec8afc..56cebaff0c91 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -608,7 +608,7 @@ asmlinkage long compat_sys_fstatfs(unsigned int fd,
asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
struct compat_statfs64 __user *buf);
asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
-asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
+asmlinkage long compat_sys_ftruncate(unsigned int, compat_off_t);
/* No generic prototype for truncate64, ftruncate64, fallocate */
asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
int flags, umode_t mode);
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 4c1a39dcb624..107ce05bd16e 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_COMPILER_TYPES_H
-#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
+#error "Please do not include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
#endif
/* Compiler specific definitions for Clang compiler */
@@ -18,23 +18,42 @@
#define KASAN_ABI_VERSION 5
/*
+ * Clang 22 added preprocessor macros to match GCC, in hopes of eventually
+ * dropping __has_feature support for sanitizers:
+ * https://github.com/llvm/llvm-project/commit/568c23bbd3303518c5056d7f03444dae4fdc8a9c
+ * Create these macros for older versions of clang so that it is easy to clean
+ * up once the minimum supported version of LLVM for building the kernel always
+ * creates these macros.
+ *
* Note: Checking __has_feature(*_sanitizer) is only true if the feature is
* enabled. Therefore it is not required to additionally check defined(CONFIG_*)
* to avoid adding redundant attributes in other configurations.
*/
+#if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__)
+#define __SANITIZE_ADDRESS__
+#endif
+#if __has_feature(hwaddress_sanitizer) && !defined(__SANITIZE_HWADDRESS__)
+#define __SANITIZE_HWADDRESS__
+#endif
+#if __has_feature(thread_sanitizer) && !defined(__SANITIZE_THREAD__)
+#define __SANITIZE_THREAD__
+#endif
-#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
-/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
+/*
+ * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel.
+ */
+#ifdef __SANITIZE_HWADDRESS__
#define __SANITIZE_ADDRESS__
+#endif
+
+#ifdef __SANITIZE_ADDRESS__
#define __no_sanitize_address \
__attribute__((no_sanitize("address", "hwaddress")))
#else
#define __no_sanitize_address
#endif
-#if __has_feature(thread_sanitizer)
-/* emulate gcc's __SANITIZE_THREAD__ flag */
-#define __SANITIZE_THREAD__
+#ifdef __SANITIZE_THREAD__
#define __no_sanitize_thread \
__attribute__((no_sanitize("thread")))
#else
@@ -89,15 +108,13 @@
#define __no_sanitize_coverage
#endif
+/* Only Clang needs to disable the coverage sanitizer for kstack_erase. */
+#define __no_kstack_erase __no_sanitize_coverage
+
#if __has_feature(shadow_call_stack)
# define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
#endif
-#if __has_feature(kcfi)
-/* Disable CFI checking inside a function. */
-#define __nocfi __attribute__((__no_sanitize__("kcfi")))
-#endif
-
/*
* Turn individual warnings and errors on and off locally, depending
* on version.
@@ -128,3 +145,11 @@
*/
#define ASM_INPUT_G "ir"
#define ASM_INPUT_RM "r"
+
+/*
+ * Declare compiler support for __typeof_unqual__() operator.
+ *
+ * Bindgen uses LLVM even if our C compiler is GCC, so we cannot
+ * rely on the auto-detected CONFIG_CC_HAS_TYPEOF_UNQUAL.
+ */
+#define CC_HAS_TYPEOF_UNQUAL (__clang_major__ >= 19)
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index aff92b1d284f..5de824a0b3d7 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_COMPILER_TYPES_H
-#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
+#error "Please do not include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
#endif
/*
@@ -35,10 +35,6 @@
(typeof(ptr)) (__ptr + (off)); \
})
-#ifdef CONFIG_MITIGATION_RETPOLINE
-#define __noretpoline __attribute__((__indirect_branch__("keep")))
-#endif
-
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
#define __latent_entropy __attribute__((latent_entropy))
#endif
@@ -52,38 +48,6 @@
*/
#define barrier_before_unreachable() asm volatile("")
-/*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
- * control elsewhere.
- */
-#define unreachable() \
- do { \
- annotate_unreachable(); \
- barrier_before_unreachable(); \
- __builtin_unreachable(); \
- } while (0)
-
-/*
- * GCC 'asm goto' with outputs miscompiles certain code sequences:
- *
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=113921
- *
- * Work around it via the same compiler barrier quirk that we used
- * to use for the old 'asm goto' workaround.
- *
- * Also, always mark such 'asm goto' statements as volatile: all
- * asm goto statements are supposed to be volatile as per the
- * documentation, but some versions of gcc didn't actually do
- * that for asms with outputs:
- *
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98619
- */
-#ifdef CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND
-#define asm_goto_output(x...) \
- do { asm volatile goto(x); asm (""); } while (0)
-#endif
-
#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
@@ -100,7 +64,11 @@
#define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
#endif
+#ifdef __SANITIZE_HWADDRESS__
+#define __no_sanitize_address __attribute__((__no_sanitize__("hwaddress")))
+#else
#define __no_sanitize_address __attribute__((__no_sanitize_address__))
+#endif
#if defined(__SANITIZE_THREAD__)
#define __no_sanitize_thread __attribute__((__no_sanitize_thread__))
@@ -155,6 +123,8 @@
#define __diag_GCC_8(s)
#endif
+#define __diag_GCC_all(s) __diag(s)
+
#define __diag_ignore_all(option, comment) \
__diag(__diag_GCC_ignore option)
@@ -165,3 +135,11 @@
#if GCC_VERSION < 90100
#undef __alloc_size__
#endif
+
+/*
+ * Declare compiler support for __typeof_unqual__() operator.
+ *
+ * Bindgen uses LLVM even if our C compiler is GCC, so we cannot
+ * rely on the auto-detected CONFIG_CC_HAS_TYPEOF_UNQUAL.
+ */
+#define CC_HAS_TYPEOF_UNQUAL (__GNUC__ >= 14)
diff --git a/include/linux/compiler-version.h b/include/linux/compiler-version.h
index 573fa85b6c0c..ac1665a98a15 100644
--- a/include/linux/compiler-version.h
+++ b/include/linux/compiler-version.h
@@ -12,3 +12,33 @@
* and add dependency on include/config/CC_VERSION_TEXT, which is touched
* by Kconfig when the version string from the compiler changes.
*/
+
+/* Additional tree-wide dependencies start here. */
+
+/*
+ * If any of the GCC plugins change, we need to rebuild everything that
+ * was built with them, as they may have changed their behavior and those
+ * behaviors may need to be synchronized across all translation units.
+ */
+#ifdef GCC_PLUGINS
+#include <generated/gcc-plugins.h>
+#endif
+
+/*
+ * If the randstruct seed itself changes (whether for GCC plugins or
+ * Clang), the entire tree needs to be rebuilt since the randomization of
+ * structures may change between compilation units if not.
+ */
+#ifdef RANDSTRUCT
+#include <generated/randstruct_hash.h>
+#endif
+
+/*
+ * If any external changes affect Clang's integer wrapping sanitizer
+ * behavior, a full rebuild is needed as the coverage for wrapping types
+ * may have changed, which may impact the expected behaviors that should
+ * not differ between compilation units.
+ */
+#ifdef INTEGER_WRAP
+#include <generated/integer-wrap.h>
+#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 8c252e073bd8..04487c9bd751 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -109,44 +109,21 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
/* Unreachable code */
#ifdef CONFIG_OBJTOOL
-/*
- * These macros help objtool understand GCC code flow for unreachable code.
- * The __COUNTER__ based labels are a hack to make each instance of the macros
- * unique, to convince GCC not to merge duplicate inline asm statements.
- */
-#define __stringify_label(n) #n
-
-#define __annotate_reachable(c) ({ \
- asm volatile(__stringify_label(c) ":\n\t" \
- ".pushsection .discard.reachable\n\t" \
- ".long " __stringify_label(c) "b - .\n\t" \
- ".popsection\n\t"); \
-})
-#define annotate_reachable() __annotate_reachable(__COUNTER__)
-
-#define __annotate_unreachable(c) ({ \
- asm volatile(__stringify_label(c) ":\n\t" \
- ".pushsection .discard.unreachable\n\t" \
- ".long " __stringify_label(c) "b - .\n\t" \
- ".popsection\n\t" : : "i" (c)); \
-})
-#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
-
/* Annotate a C jump table to allow objtool to follow the code flow */
-#define __annotate_jump_table __section(".rodata..c_jump_table")
-
+#define __annotate_jump_table __section(".data.rel.ro.c_jump_table")
#else /* !CONFIG_OBJTOOL */
-#define annotate_reachable()
-#define annotate_unreachable()
#define __annotate_jump_table
#endif /* CONFIG_OBJTOOL */
-#ifndef unreachable
-# define unreachable() do { \
- annotate_unreachable(); \
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ */
+#define unreachable() do { \
+ barrier_before_unreachable(); \
__builtin_unreachable(); \
} while (0)
-#endif
/*
* KENTRY - kernel entry point
@@ -186,7 +163,11 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
__asm__ ("" : "=r" (var) : "0" (var))
#endif
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+/* Format: __UNIQUE_ID_<name>_<__COUNTER__> */
+#define __UNIQUE_ID(name) \
+ __PASTE(__UNIQUE_ID_, \
+ __PASTE(name, \
+ __PASTE(_, __COUNTER__)))
/**
* data_race - mark an expression as containing intentional data races
@@ -194,33 +175,92 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* This data_race() macro is useful for situations in which data races
* should be forgiven. One example is diagnostic code that accesses
* shared variables but is not a part of the core synchronization design.
+ * For example, if accesses to a given variable are protected by a lock,
+ * except for diagnostic code, then the accesses under the lock should
+ * be plain C-language accesses and those in the diagnostic code should
+ * use data_race(). This way, KCSAN will complain if buggy lockless
+ * accesses to that variable are introduced, even if the buggy accesses
+ * are protected by READ_ONCE() or WRITE_ONCE().
*
* This macro *does not* affect normal code generation, but is a hint
- * to tooling that data races here are to be ignored.
+ * to tooling that data races here are to be ignored. If the access must
+ * be atomic *and* KCSAN should ignore the access, use both data_race()
+ * and READ_ONCE(), for example, data_race(READ_ONCE(x)).
*/
#define data_race(expr) \
({ \
- __unqual_scalar_typeof(({ expr; })) __v = ({ \
- __kcsan_disable_current(); \
- expr; \
- }); \
+ __kcsan_disable_current(); \
+ auto __v = (expr); \
__kcsan_enable_current(); \
__v; \
})
+#ifdef __CHECKER__
+#define __BUILD_BUG_ON_ZERO_MSG(e, msg, ...) (0)
+#else /* __CHECKER__ */
+#define __BUILD_BUG_ON_ZERO_MSG(e, msg, ...) ((int)sizeof(struct {_Static_assert(!(e), msg);}))
+#endif /* __CHECKER__ */
+
+/* &a[0] degrades to a pointer: a different type from an array */
+#define __is_array(a) (!__same_type((a), &(a)[0]))
+#define __must_be_array(a) __BUILD_BUG_ON_ZERO_MSG(!__is_array(a), \
+ "must be array")
+
+#define __is_byte_array(a) (__is_array(a) && sizeof((a)[0]) == 1)
+#define __must_be_byte_array(a) __BUILD_BUG_ON_ZERO_MSG(!__is_byte_array(a), \
+ "must be byte array")
+
+/*
+ * If the "nonstring" attribute isn't available, we have to return true
+ * so the __must_*() checks pass when "nonstring" isn't supported.
+ */
+#if __has_attribute(__nonstring__) && defined(__annotated)
+#define __is_cstr(a) (!__annotated(a, nonstring))
+#define __is_noncstr(a) (__annotated(a, nonstring))
+#else
+#define __is_cstr(a) (true)
+#define __is_noncstr(a) (true)
+#endif
+
+/* Require C Strings (i.e. NUL-terminated) lack the "nonstring" attribute. */
+#define __must_be_cstr(p) \
+ __BUILD_BUG_ON_ZERO_MSG(!__is_cstr(p), \
+ "must be C-string (NUL-terminated)")
+#define __must_be_noncstr(p) \
+ __BUILD_BUG_ON_ZERO_MSG(!__is_noncstr(p), \
+ "must be non-C-string (not NUL-terminated)")
+
+/*
+ * Use __typeof_unqual__() when available.
+ *
+ * XXX: Remove test for __CHECKER__ once
+ * sparse learns about __typeof_unqual__().
+ */
+#if CC_HAS_TYPEOF_UNQUAL && !defined(__CHECKER__)
+# define USE_TYPEOF_UNQUAL 1
+#endif
+
+/*
+ * Define TYPEOF_UNQUAL() to use __typeof_unqual__() as typeof
+ * operator when available, to return an unqualified type of the exp.
+ */
+#if defined(USE_TYPEOF_UNQUAL)
+# define TYPEOF_UNQUAL(exp) __typeof_unqual__(exp)
+#else
+# define TYPEOF_UNQUAL(exp) __typeof__(exp)
+#endif
+
#endif /* __KERNEL__ */
+#if defined(CONFIG_CFI) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
/*
- * Force the compiler to emit 'sym' as a symbol, so that we can reference
- * it from inline assembler. Necessary in case 'sym' could be inlined
- * otherwise, or eliminated entirely due to lack of references that are
- * visible to the compiler.
+ * Force a reference to the external symbol so the compiler generates
+ * __kcfi_typid.
*/
-#define ___ADDRESSABLE(sym, __attrs) \
- static void * __used __attrs \
- __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)(uintptr_t)&sym;
-#define __ADDRESSABLE(sym) \
- ___ADDRESSABLE(sym, __section(".discard.addressable"))
+#define KCFI_REFERENCE(sym) __ADDRESSABLE(sym)
+#else
+#define KCFI_REFERENCE(sym)
+#endif
/**
* offset_to_ptr - convert a relative memory offset to an absolute pointer
@@ -233,8 +273,18 @@ static inline void *offset_to_ptr(const int *off)
#endif /* __ASSEMBLY__ */
-/* &a[0] degrades to a pointer: a different type from an array */
-#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+/*
+ * Force the compiler to emit 'sym' as a symbol, so that we can reference
+ * it from inline assembler. Necessary in case 'sym' could be inlined
+ * otherwise, or eliminated entirely due to lack of references that are
+ * visible to the compiler.
+ */
+#define ___ADDRESSABLE(sym, __attrs) \
+ static void * __used __attrs \
+ __UNIQUE_ID(__PASTE(addressable_, sym)) = (void *)(uintptr_t)&sym;
+
+#define __ADDRESSABLE(sym) \
+ ___ADDRESSABLE(sym, __section(".discard.addressable"))
/*
* This returns a constant expression while determining if an argument is
@@ -291,6 +341,37 @@ static inline void *offset_to_ptr(const int *off)
#define is_unsigned_type(type) (!is_signed_type(type))
/*
+ * Useful shorthand for "is this condition known at compile-time?"
+ *
+ * Note that the condition may involve non-constant values,
+ * but the compiler may know enough about the details of the
+ * values to determine that the condition is statically true.
+ */
+#define statically_true(x) (__builtin_constant_p(x) && (x))
+
+/*
+ * Similar to statically_true() but produces a constant expression
+ *
+ * To be used in conjunction with macros, such as BUILD_BUG_ON_ZERO(),
+ * which require their input to be a constant expression and for which
+ * statically_true() would otherwise fail.
+ *
+ * This is a trade-off: const_true() requires all its operands to be
+ * compile time constants. Else, it would always returns false even on
+ * the most trivial cases like:
+ *
+ * true || non_const_var
+ *
+ * On the opposite, statically_true() is able to fold more complex
+ * tautologies and will return true on expressions such as:
+ *
+ * !(non_const_var * 8 % 4)
+ *
+ * For the general case, statically_true() is better.
+ */
+#define const_true(x) __builtin_choose_expr(__is_constexpr(x), x, false)
+
+/*
* This is needed in functions which generate the stack canary, see
* arch/x86/kernel/smpboot.c::start_secondary() for an example.
*/
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 32284cd26d52..c16d4199bf92 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -95,19 +95,6 @@
#endif
/*
- * Optional: only supported since gcc >= 15
- * Optional: only supported since clang >= 18
- *
- * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896
- * clang: https://github.com/llvm/llvm-project/pull/76348
- */
-#if __has_attribute(__counted_by__)
-# define __counted_by(member) __attribute__((__counted_by__(member)))
-#else
-# define __counted_by(member)
-#endif
-
-/*
* Optional: not supported by gcc
* Optional: only supported since clang >= 14.0
*
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 93600de3800b..1280693766b9 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -11,9 +11,26 @@
#define __has_builtin(x) (0)
#endif
+/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
+#define ___PASTE(a, b) a##b
+#define __PASTE(a, b) ___PASTE(a, b)
+
#ifndef __ASSEMBLY__
/*
+ * C23 introduces "auto" as a standard way to define type-inferred
+ * variables, but "auto" has been a (useless) keyword even since K&R C,
+ * so it has always been "namespace reserved."
+ *
+ * Until at some future time we require C23 support, we need the gcc
+ * extension __auto_type, but there is no reason to put that elsewhere
+ * in the source code.
+ */
+#if __STDC_VERSION__ < 202311L
+# define auto __auto_type
+#endif
+
+/*
* Skipped when running bindgen due to a libclang issue;
* see https://github.com/rust-lang/rust-bindgen/issues/2244.
*/
@@ -57,7 +74,7 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __user BTF_TYPE_TAG(user)
# endif
# define __iomem
-# define __percpu BTF_TYPE_TAG(percpu)
+# define __percpu __percpu_qual BTF_TYPE_TAG(percpu)
# define __rcu BTF_TYPE_TAG(rcu)
# define __chk_user_ptr(x) (void)0
@@ -79,10 +96,6 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __builtin_warning(x, y...) (1)
#endif /* __CHECKER__ */
-/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
-#define ___PASTE(a,b) a##b
-#define __PASTE(a,b) ___PASTE(a,b)
-
#ifdef __KERNEL__
/* Attributes */
@@ -143,6 +156,29 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __preserve_most
#endif
+/*
+ * Annotating a function/variable with __retain tells the compiler to place
+ * the object in its own section and set the flag SHF_GNU_RETAIN. This flag
+ * instructs the linker to retain the object during garbage-cleanup or LTO
+ * phases.
+ *
+ * Note that the __used macro is also used to prevent functions or data
+ * being optimized out, but operates at the compiler/IR-level and may still
+ * allow unintended removal of objects during linking.
+ *
+ * Optional: only supported since gcc >= 11, clang >= 13
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-retain-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#retain
+ */
+#if __has_attribute(__retain__) && \
+ (defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || \
+ defined(CONFIG_LTO_CLANG))
+# define __retain __attribute__((__retain__))
+#else
+# define __retain
+#endif
+
/* Compiler specific macros. */
#ifdef __clang__
#include <linux/compiler-clang.h>
@@ -227,10 +263,9 @@ struct ftrace_likely_data {
/*
* GCC does not warn about unused static inline functions for -Wunused-function.
* Suppress the warning in clang as well by using __maybe_unused, but enable it
- * for W=1 build. This will allow clang to find unused functions. Remove the
- * __inline_maybe_unused entirely after fixing most of -Wunused-function warnings.
+ * for W=2 build. This will allow clang to find unused functions.
*/
-#ifdef KBUILD_EXTRA_WARN1
+#ifdef KBUILD_EXTRA_WARN2
#define __inline_maybe_unused
#else
#define __inline_maybe_unused __maybe_unused
@@ -243,6 +278,12 @@ struct ftrace_likely_data {
#define noinline_for_stack noinline
/*
+ * Use noinline_for_tracing for functions that should not be inlined.
+ * For tracing reasons.
+ */
+#define noinline_for_tracing noinline
+
+/*
* Sanitizer helper attributes: Because using __always_inline and
* __no_sanitize_* conflict, provide helper attributes that will either expand
* to __no_sanitize_* in compilation units where instrumentation is enabled
@@ -301,6 +342,60 @@ struct ftrace_likely_data {
#endif
/*
+ * The assume attribute is used to indicate that a certain condition is
+ * assumed to be true. If this condition is violated at runtime, the behavior
+ * is undefined. Compilers may or may not use this indication to generate
+ * optimized code.
+ *
+ * Note that the clang documentation states that optimizers may react
+ * differently to this attribute, and this may even have a negative
+ * performance impact. Therefore this attribute should be used with care.
+ *
+ * Optional: only supported since gcc >= 13
+ * Optional: only supported since clang >= 19
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Statement-Attributes.html#index-assume-statement-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#id13
+ *
+ */
+#ifdef CONFIG_CC_HAS_ASSUME
+# define __assume(expr) __attribute__((__assume__(expr)))
+#else
+# define __assume(expr)
+#endif
+
+/*
+ * Optional: only supported since gcc >= 15
+ * Optional: only supported since clang >= 18
+ *
+ * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896
+ * clang: https://github.com/llvm/llvm-project/pull/76348
+ *
+ * __bdos on clang < 19.1.2 can erroneously return 0:
+ * https://github.com/llvm/llvm-project/pull/110497
+ *
+ * __bdos on clang < 19.1.3 can be off by 4:
+ * https://github.com/llvm/llvm-project/pull/112636
+ */
+#ifdef CONFIG_CC_HAS_COUNTED_BY
+# define __counted_by(member) __attribute__((__counted_by__(member)))
+#else
+# define __counted_by(member)
+#endif
+
+/*
+ * Optional: only supported since gcc >= 15
+ * Optional: not supported by Clang
+ *
+ * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=117178
+ */
+#ifdef CONFIG_CC_HAS_MULTIDIMENSIONAL_NONSTRING
+# define __nonstring_array __attribute__((__nonstring__))
+#else
+# define __nonstring_array
+#endif
+
+/*
* Apply __counted_by() when the Endianness matches to increase test coverage.
*/
#ifdef __LITTLE_ENDIAN
@@ -311,8 +406,23 @@ struct ftrace_likely_data {
#define __counted_by_be(member) __counted_by(member)
#endif
+/*
+ * This designates the minimum number of elements a passed array parameter must
+ * have. For example:
+ *
+ * void some_function(u8 param[at_least 7]);
+ *
+ * If a caller passes an array with fewer than 7 elements, the compiler will
+ * emit a warning.
+ */
+#ifndef __CHECKER__
+#define at_least static
+#else
+#define at_least
+#endif
+
/* Do not trap wrapping arithmetic within an annotated function. */
-#ifdef CONFIG_UBSAN_SIGNED_WRAP
+#ifdef CONFIG_UBSAN_INTEGER_WRAP
# define __signed_wrap __attribute__((no_sanitize("signed-integer-overflow")))
#else
# define __signed_wrap
@@ -364,14 +474,26 @@ struct ftrace_likely_data {
# define randomized_struct_fields_end
#endif
+#ifndef __no_kstack_erase
+# define __no_kstack_erase
+#endif
+
#ifndef __noscs
# define __noscs
#endif
-#ifndef __nocfi
+#if defined(CONFIG_CFI)
+# define __nocfi __attribute__((__no_sanitize__("kcfi")))
+#else
# define __nocfi
#endif
+#if defined(CONFIG_ARCH_USES_CFI_GENERIC_LLVM_PASS)
+# define __nocfi_generic __nocfi
+#else
+# define __nocfi_generic
+#endif
+
/*
* Any place that could be marked with the "alloc_size" attribute is also
* a place to be marked with the "malloc" attribute, except those that may
@@ -389,6 +511,11 @@ struct ftrace_likely_data {
/*
* When the size of an allocated object is needed, use the best available
* mechanism to find it. (For cases where sizeof() cannot be used.)
+ *
+ * Optional: only supported since gcc >= 12
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Object-Size-Checking.html
+ * clang: https://clang.llvm.org/docs/LanguageExtensions.html#evaluating-object-size
*/
#if __has_builtin(__builtin_dynamic_object_size)
#define __struct_size(p) __builtin_dynamic_object_size(p, 0)
@@ -399,6 +526,16 @@ struct ftrace_likely_data {
#endif
/*
+ * Determine if an attribute has been applied to a variable.
+ * Using __annotated needs to check for __annotated being available,
+ * or negative tests may fail when annotation cannot be checked. For
+ * example, see the definition of __is_cstr().
+ */
+#if __has_builtin(__builtin_has_attribute)
+#define __annotated(var, attr) __builtin_has_attribute(var, attr)
+#endif
+
+/*
* Some versions of gcc do not mark 'asm goto' volatile:
*
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979
@@ -455,6 +592,12 @@ struct ftrace_likely_data {
sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
#ifdef __OPTIMIZE__
+/*
+ * #ifdef __OPTIMIZE__ is only a good approximation; for instance "make
+ * CFLAGS_foo.o=-Og" defines __OPTIMIZE__, does not elide the conditional code
+ * and can break compilation with wrong error message(s). Combine with
+ * -U__OPTIMIZE__ when needed.
+ */
# define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
/* \
@@ -468,7 +611,7 @@ struct ftrace_likely_data {
prefix ## suffix(); \
} while (0)
#else
-# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
+# define __compiletime_assert(condition, msg, prefix, suffix) ((void)(condition))
#endif
#define _compiletime_assert(condition, msg, prefix, suffix) \
diff --git a/include/linux/component.h b/include/linux/component.h
index df4aa75c9e7c..9d6c66401280 100644
--- a/include/linux/component.h
+++ b/include/linux/component.h
@@ -3,7 +3,7 @@
#define COMPONENT_H
#include <linux/stddef.h>
-
+#include <linux/types.h>
struct device;
@@ -90,6 +90,8 @@ int component_compare_dev_name(struct device *dev, void *data);
void component_master_del(struct device *,
const struct component_master_ops *);
+bool component_master_is_bound(struct device *parent,
+ const struct component_master_ops *ops);
struct component_match;
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 2606711adb18..ef65c75beeaa 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -64,8 +64,8 @@ extern void config_item_put(struct config_item *);
struct config_item_type {
struct module *ct_owner;
- struct configfs_item_operations *ct_item_ops;
- struct configfs_group_operations *ct_group_ops;
+ const struct configfs_item_operations *ct_item_ops;
+ const struct configfs_group_operations *ct_group_ops;
struct configfs_attribute **ct_attrs;
struct configfs_bin_attribute **ct_bin_attrs;
};
@@ -120,15 +120,19 @@ struct configfs_attribute {
ssize_t (*store)(struct config_item *, const char *, size_t);
};
-#define CONFIGFS_ATTR(_pfx, _name) \
+#define CONFIGFS_ATTR_PERM(_pfx, _name, _perm) \
static struct configfs_attribute _pfx##attr_##_name = { \
.ca_name = __stringify(_name), \
- .ca_mode = S_IRUGO | S_IWUSR, \
+ .ca_mode = _perm, \
.ca_owner = THIS_MODULE, \
.show = _pfx##_name##_show, \
.store = _pfx##_name##_store, \
}
+#define CONFIGFS_ATTR(_pfx, _name) CONFIGFS_ATTR_PERM( \
+ _pfx, _name, S_IRUGO | S_IWUSR \
+)
+
#define CONFIGFS_ATTR_RO(_pfx, _name) \
static struct configfs_attribute _pfx##attr_##_name = { \
.ca_name = __stringify(_name), \
@@ -216,6 +220,9 @@ struct configfs_group_operations {
struct config_group *(*make_group)(struct config_group *group, const char *name);
void (*disconnect_notify)(struct config_group *group, struct config_item *item);
void (*drop_item)(struct config_group *group, struct config_item *item);
+ bool (*is_visible)(struct config_item *item, struct configfs_attribute *attr, int n);
+ bool (*is_bin_visible)(struct config_item *item, struct configfs_bin_attribute *attr,
+ int n);
};
struct configfs_subsystem {
diff --git a/include/linux/console.h b/include/linux/console.h
index 31a8f5b85f5d..fc9f5c5c1b04 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -16,7 +16,10 @@
#include <linux/atomic.h>
#include <linux/bits.h>
+#include <linux/irq_work.h>
#include <linux/rculist.h>
+#include <linux/rcuwait.h>
+#include <linux/smp.h>
#include <linux/types.h>
#include <linux/vesa.h>
@@ -183,6 +186,8 @@ static inline void con_debug_leave(void) { }
* printing callbacks must not be called.
* @CON_NBCON: Console can operate outside of the legacy style console_lock
* constraints.
+ * @CON_NBCON_ATOMIC_UNSAFE: The write_atomic() callback is not safe and is
+ * therefore only used by nbcon_atomic_flush_unsafe().
*/
enum cons_flags {
CON_PRINTBUFFER = BIT(0),
@@ -194,6 +199,7 @@ enum cons_flags {
CON_EXTENDED = BIT(6),
CON_SUSPENDED = BIT(7),
CON_NBCON = BIT(8),
+ CON_NBCON_ATOMIC_UNSAFE = BIT(9),
};
/**
@@ -303,7 +309,7 @@ struct nbcon_write_context {
/**
* struct console - The console descriptor structure
* @name: The name of the console driver
- * @write: Write callback to output messages (Optional)
+ * @write: Legacy write callback to output messages (Optional)
* @read: Read callback for console input (Optional)
* @device: The underlying TTY device driver (Optional)
* @unblank: Callback to unblank the console (Optional)
@@ -320,10 +326,14 @@ struct nbcon_write_context {
* @data: Driver private data
* @node: hlist node for the console list
*
- * @write_atomic: Write callback for atomic context
* @nbcon_state: State for nbcon consoles
* @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @nbcon_device_ctxt: Context available for non-printing operations
+ * @nbcon_prev_seq: Seq num the previous nbcon owner was assigned to print
* @pbufs: Pointer to nbcon private buffer
+ * @kthread: Printer kthread for this console
+ * @rcuwait: RCU-safe wait object for @kthread waking
+ * @irq_work: Defer @kthread waking to IRQ work context
*/
struct console {
char name[16];
@@ -345,11 +355,121 @@ struct console {
struct hlist_node node;
/* nbcon console specific members */
- bool (*write_atomic)(struct console *con,
- struct nbcon_write_context *wctxt);
+
+ /**
+ * @write_atomic:
+ *
+ * NBCON callback to write out text in any context. (Optional)
+ *
+ * This callback is called with the console already acquired. However,
+ * a higher priority context is allowed to take it over by default.
+ *
+ * The callback must call nbcon_enter_unsafe() and nbcon_exit_unsafe()
+ * around any code where the takeover is not safe, for example, when
+ * manipulating the serial port registers.
+ *
+ * nbcon_enter_unsafe() will fail if the context has lost the console
+ * ownership in the meantime. In this case, the callback is no longer
+ * allowed to go forward. It must back out immediately and carefully.
+ * The buffer content is also no longer trusted since it no longer
+ * belongs to the context.
+ *
+ * The callback should allow the takeover whenever it is safe. It
+ * increases the chance to see messages when the system is in trouble.
+ * If the driver must reacquire ownership in order to finalize or
+ * revert hardware changes, nbcon_reacquire_nobuf() can be used.
+ * However, on reacquire the buffer content is no longer available. A
+ * reacquire cannot be used to resume printing.
+ *
+ * The callback can be called from any context (including NMI).
+ * Therefore it must avoid usage of any locking and instead rely
+ * on the console ownership for synchronization.
+ */
+ void (*write_atomic)(struct console *con, struct nbcon_write_context *wctxt);
+
+ /**
+ * @write_thread:
+ *
+ * NBCON callback to write out text in task context.
+ *
+ * This callback must be called only in task context with both
+ * device_lock() and the nbcon console acquired with
+ * NBCON_PRIO_NORMAL.
+ *
+ * The same rules for console ownership verification and unsafe
+ * sections handling applies as with write_atomic().
+ *
+ * The console ownership handling is necessary for synchronization
+ * against write_atomic() which is synchronized only via the context.
+ *
+ * The device_lock() provides the primary serialization for operations
+ * on the device. It might be as relaxed (mutex)[*] or as tight
+ * (disabled preemption and interrupts) as needed. It allows
+ * the kthread to operate in the least restrictive mode[**].
+ *
+ * [*] Standalone nbcon_context_try_acquire() is not safe with
+ * the preemption enabled, see nbcon_owner_matches(). But it
+ * can be safe when always called in the preemptive context
+ * under the device_lock().
+ *
+ * [**] The device_lock() makes sure that nbcon_context_try_acquire()
+ * would never need to spin which is important especially with
+ * PREEMPT_RT.
+ */
+ void (*write_thread)(struct console *con, struct nbcon_write_context *wctxt);
+
+ /**
+ * @device_lock:
+ *
+ * NBCON callback to begin synchronization with driver code.
+ *
+ * Console drivers typically must deal with access to the hardware
+ * via user input/output (such as an interactive login shell) and
+ * output of kernel messages via printk() calls. This callback is
+ * called by the printk-subsystem whenever it needs to synchronize
+ * with hardware access by the driver. It should be implemented to
+ * use whatever synchronization mechanism the driver is using for
+ * itself (for example, the port lock for uart serial consoles).
+ *
+ * The callback is always called from task context. It may use any
+ * synchronization method required by the driver.
+ *
+ * IMPORTANT: The callback MUST disable migration. The console driver
+ * may be using a synchronization mechanism that already takes
+ * care of this (such as spinlocks). Otherwise this function must
+ * explicitly call migrate_disable().
+ *
+ * The flags argument is provided as a convenience to the driver. It
+ * will be passed again to device_unlock(). It can be ignored if the
+ * driver does not need it.
+ */
+ void (*device_lock)(struct console *con, unsigned long *flags);
+
+ /**
+ * @device_unlock:
+ *
+ * NBCON callback to finish synchronization with driver code.
+ *
+ * It is the counterpart to device_lock().
+ *
+ * This callback is always called from task context. It must
+ * appropriately re-enable migration (depending on how device_lock()
+ * disabled migration).
+ *
+ * The flags argument is the value of the same variable that was
+ * passed to device_lock().
+ */
+ void (*device_unlock)(struct console *con, unsigned long flags);
+
atomic_t __private nbcon_state;
atomic_long_t __private nbcon_seq;
+ struct nbcon_context __private nbcon_device_ctxt;
+ atomic_long_t __private nbcon_prev_seq;
+
struct printk_buffers *pbufs;
+ struct task_struct *kthread;
+ struct rcuwait rcuwait;
+ struct irq_work irq_work;
};
#ifdef CONFIG_LOCKDEP
@@ -378,28 +498,34 @@ extern void console_list_unlock(void) __releases(console_mutex);
extern struct hlist_head console_list;
/**
- * console_srcu_read_flags - Locklessly read the console flags
+ * console_srcu_read_flags - Locklessly read flags of a possibly registered
+ * console
* @con: struct console pointer of console to read flags from
*
- * This function provides the necessary READ_ONCE() and data_race()
- * notation for locklessly reading the console flags. The READ_ONCE()
- * in this function matches the WRITE_ONCE() when @flags are modified
- * for registered consoles with console_srcu_write_flags().
+ * Locklessly reading @con->flags provides a consistent read value because
+ * there is at most one CPU modifying @con->flags and that CPU is using only
+ * read-modify-write operations to do so.
+ *
+ * Requires console_srcu_read_lock to be held, which implies that @con might
+ * be a registered console. The purpose of holding console_srcu_read_lock is
+ * to guarantee that the console state is valid (CON_SUSPENDED/CON_ENABLED)
+ * and that no exit/cleanup routines will run if the console is currently
+ * undergoing unregistration.
*
- * Only use this function to read console flags when locklessly
- * iterating the console list via srcu.
+ * If the caller is holding the console_list_lock or it is _certain_ that
+ * @con is not and will not become registered, the caller may read
+ * @con->flags directly instead.
*
* Context: Any context.
+ * Return: The current value of the @con->flags field.
*/
static inline short console_srcu_read_flags(const struct console *con)
{
WARN_ON_ONCE(!console_srcu_read_lock_is_held());
/*
- * Locklessly reading console->flags provides a consistent
- * read value because there is at most one CPU modifying
- * console->flags and that CPU is using only read-modify-write
- * operations to do so.
+ * The READ_ONCE() matches the WRITE_ONCE() when @flags are modified
+ * for registered consoles with console_srcu_write_flags().
*/
return data_race(READ_ONCE(con->flags));
}
@@ -477,13 +603,83 @@ static inline bool console_is_registered(const struct console *con)
hlist_for_each_entry(con, &console_list, node)
#ifdef CONFIG_PRINTK
+extern void nbcon_cpu_emergency_enter(void);
+extern void nbcon_cpu_emergency_exit(void);
extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
+extern void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
+ char *buf, unsigned int len);
extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
+extern void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt);
+extern bool nbcon_allow_unsafe_takeover(void);
+extern bool nbcon_kdb_try_acquire(struct console *con,
+ struct nbcon_write_context *wctxt);
+extern void nbcon_kdb_release(struct nbcon_write_context *wctxt);
+
+/*
+ * Check if the given console is currently capable and allowed to print
+ * records. Note that this function does not consider the current context,
+ * which can also play a role in deciding if @con can be used to print
+ * records.
+ */
+static inline bool console_is_usable(struct console *con, short flags, bool use_atomic)
+{
+ if (!(flags & CON_ENABLED))
+ return false;
+
+ if ((flags & CON_SUSPENDED))
+ return false;
+
+ if (flags & CON_NBCON) {
+ if (use_atomic) {
+ /* The write_atomic() callback is optional. */
+ if (!con->write_atomic)
+ return false;
+
+ /*
+ * An unsafe write_atomic() callback is only usable
+ * when unsafe takeovers are allowed.
+ */
+ if ((flags & CON_NBCON_ATOMIC_UNSAFE) && !nbcon_allow_unsafe_takeover())
+ return false;
+ }
+
+ /*
+ * For the !use_atomic case, @printk_kthreads_running is not
+ * checked because the write_thread() callback is also used
+ * via the legacy loop when the printer threads are not
+ * available.
+ */
+ } else {
+ if (!con->write)
+ return false;
+ }
+
+ /*
+ * Console drivers may assume that per-cpu resources have been
+ * allocated. So unless they're explicitly marked as being able to
+ * cope (CON_ANYTIME) don't call them until this CPU is officially up.
+ */
+ if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
+ return false;
+
+ return true;
+}
+
#else
+static inline void nbcon_cpu_emergency_enter(void) { }
+static inline void nbcon_cpu_emergency_exit(void) { }
static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
+static inline void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
+ char *buf, unsigned int len) { }
static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
+static inline void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt) { }
+static inline bool nbcon_kdb_try_acquire(struct console *con,
+ struct nbcon_write_context *wctxt) { return false; }
+static inline void nbcon_kdb_release(struct nbcon_write_context *wctxt) { }
+static inline bool console_is_usable(struct console *con, short flags,
+ bool use_atomic) { return false; }
#endif
extern int console_set_on_cmdline;
@@ -505,8 +701,8 @@ extern void console_conditional_schedule(void);
extern void console_unblank(void);
extern void console_flush_on_panic(enum con_flush_mode mode);
extern struct tty_driver *console_device(int *);
-extern void console_stop(struct console *);
-extern void console_start(struct console *);
+extern void console_suspend(struct console *);
+extern void console_resume(struct console *);
extern int is_console_locked(void);
extern int braille_register_console(struct console *, int index,
char *console_options, char *braille_options);
@@ -520,8 +716,8 @@ static inline void console_sysfs_notify(void)
extern bool console_suspend_enabled;
/* Suspend and resume console messages over PM events */
-extern void suspend_console(void);
-extern void resume_console(void);
+extern void console_suspend_all(void);
+extern void console_resume_all(void);
int mda_console_init(void);
@@ -538,6 +734,8 @@ void vcs_remove_sysfs(int index);
*/
extern atomic_t ignore_console_lock_warning;
+DEFINE_LOCK_GUARD_0(console_lock, console_lock(), console_unlock());
+
extern void console_init(void);
/* For deferred console takeover */
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index 20f564e98552..13b35637bd5a 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -145,6 +145,7 @@ struct vc_data {
unsigned int vc_need_wrap : 1;
unsigned int vc_can_do_color : 1;
unsigned int vc_report_mouse : 2;
+ unsigned int vc_bracketed_paste : 1;
unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */
unsigned char vc_utf_count;
int vc_utf_char;
@@ -158,6 +159,9 @@ struct vc_data {
struct uni_pagedict *uni_pagedict;
struct uni_pagedict **uni_pagedict_loc; /* [!] Location of uni_pagedict variable for this console */
u32 **vc_uni_lines; /* unicode screen content */
+ u16 *vc_saved_screen;
+ unsigned int vc_saved_cols;
+ unsigned int vc_saved_rows;
/* additional information is in vt_kern.h */
};
diff --git a/include/linux/consolemap.h b/include/linux/consolemap.h
index c35db4896c37..6180b803795c 100644
--- a/include/linux/consolemap.h
+++ b/include/linux/consolemap.h
@@ -28,6 +28,10 @@ int conv_uni_to_pc(struct vc_data *conp, long ucs);
u32 conv_8bit_to_uni(unsigned char c);
int conv_uni_to_8bit(u32 uni);
void console_map_init(void);
+bool ucs_is_double_width(uint32_t cp);
+bool ucs_is_zero_width(uint32_t cp);
+u32 ucs_recompose(u32 base, u32 mark);
+u32 ucs_get_fallback(u32 cp);
#else
static inline u16 inverse_translate(const struct vc_data *conp, u16 glyph,
bool use_unicode)
@@ -57,6 +61,26 @@ static inline int conv_uni_to_8bit(u32 uni)
}
static inline void console_map_init(void) { }
+
+static inline bool ucs_is_double_width(uint32_t cp)
+{
+ return false;
+}
+
+static inline bool ucs_is_zero_width(uint32_t cp)
+{
+ return false;
+}
+
+static inline u32 ucs_recompose(u32 base, u32 mark)
+{
+ return 0;
+}
+
+static inline u32 ucs_get_fallback(u32 cp)
+{
+ return 0;
+}
#endif /* CONFIG_CONSOLE_TRANSLATIONS */
#endif /* __LINUX_CONSOLEMAP_H__ */
diff --git a/include/linux/container_of.h b/include/linux/container_of.h
index 713890c867be..1f6ebf27d962 100644
--- a/include/linux/container_of.h
+++ b/include/linux/container_of.h
@@ -14,6 +14,7 @@
* @member: the name of the member within the struct.
*
* WARNING: any const qualifier of @ptr is lost.
+ * Do not use container_of() in new code.
*/
#define container_of(ptr, type, member) ({ \
void *__mptr = (void *)(ptr); \
@@ -28,6 +29,8 @@
* @ptr: the pointer to the member
* @type: the type of the container struct this is embedded in.
* @member: the name of the member within the struct.
+ *
+ * Always prefer container_of_const() instead of container_of() in new code.
*/
#define container_of_const(ptr, type, member) \
_Generic(ptr, \
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 6e76b9dba00e..af9fe87a0922 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -26,26 +26,26 @@ extern void user_exit_callable(void);
static inline void user_enter(void)
{
if (context_tracking_enabled())
- ct_user_enter(CONTEXT_USER);
+ ct_user_enter(CT_STATE_USER);
}
static inline void user_exit(void)
{
if (context_tracking_enabled())
- ct_user_exit(CONTEXT_USER);
+ ct_user_exit(CT_STATE_USER);
}
/* Called with interrupts disabled. */
static __always_inline void user_enter_irqoff(void)
{
if (context_tracking_enabled())
- __ct_user_enter(CONTEXT_USER);
+ __ct_user_enter(CT_STATE_USER);
}
static __always_inline void user_exit_irqoff(void)
{
if (context_tracking_enabled())
- __ct_user_exit(CONTEXT_USER);
+ __ct_user_exit(CT_STATE_USER);
}
static inline enum ctx_state exception_enter(void)
@@ -57,7 +57,7 @@ static inline enum ctx_state exception_enter(void)
return 0;
prev_ctx = __ct_state();
- if (prev_ctx != CONTEXT_KERNEL)
+ if (prev_ctx != CT_STATE_KERNEL)
ct_user_exit(prev_ctx);
return prev_ctx;
@@ -67,7 +67,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
{
if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
context_tracking_enabled()) {
- if (prev_ctx != CONTEXT_KERNEL)
+ if (prev_ctx != CT_STATE_KERNEL)
ct_user_enter(prev_ctx);
}
}
@@ -75,15 +75,17 @@ static inline void exception_exit(enum ctx_state prev_ctx)
static __always_inline bool context_tracking_guest_enter(void)
{
if (context_tracking_enabled())
- __ct_user_enter(CONTEXT_GUEST);
+ __ct_user_enter(CT_STATE_GUEST);
return context_tracking_enabled_this_cpu();
}
-static __always_inline void context_tracking_guest_exit(void)
+static __always_inline bool context_tracking_guest_exit(void)
{
if (context_tracking_enabled())
- __ct_user_exit(CONTEXT_GUEST);
+ __ct_user_exit(CT_STATE_GUEST);
+
+ return context_tracking_enabled_this_cpu();
}
#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
@@ -98,7 +100,7 @@ static inline void exception_exit(enum ctx_state prev_ctx) { }
static inline int ct_state(void) { return -1; }
static inline int __ct_state(void) { return -1; }
static __always_inline bool context_tracking_guest_enter(void) { return false; }
-static __always_inline void context_tracking_guest_exit(void) { }
+static __always_inline bool context_tracking_guest_exit(void) { return false; }
#define CT_WARN_ON(cond) do { } while (0)
#endif /* !CONFIG_CONTEXT_TRACKING_USER */
@@ -113,13 +115,17 @@ extern void ct_idle_enter(void);
extern void ct_idle_exit(void);
/*
- * Is the current CPU in an extended quiescent state?
+ * Is RCU watching the current CPU (IOW, it is not in an extended quiescent state)?
+ *
+ * Note that this returns the actual boolean data (watching / not watching),
+ * whereas ct_rcu_watching() returns the RCU_WATCHING subvariable of
+ * context_tracking.state.
*
* No ordering, as we are sampling CPU-local information.
*/
-static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
+static __always_inline bool rcu_is_watching_curr_cpu(void)
{
- return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
+ return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING;
}
/*
@@ -140,9 +146,9 @@ static __always_inline bool warn_rcu_enter(void)
* lots of the actual reporting also relies on RCU.
*/
preempt_disable_notrace();
- if (rcu_dynticks_curr_cpu_in_eqs()) {
+ if (!rcu_is_watching_curr_cpu()) {
ret = true;
- ct_state_inc(RCU_DYNTICKS_IDX);
+ ct_state_inc(CT_RCU_WATCHING);
}
return ret;
@@ -151,7 +157,7 @@ static __always_inline bool warn_rcu_enter(void)
static __always_inline void warn_rcu_exit(bool rcu)
{
if (rcu)
- ct_state_inc(RCU_DYNTICKS_IDX);
+ ct_state_inc(CT_RCU_WATCHING);
preempt_enable_notrace();
}
diff --git a/include/linux/context_tracking_irq.h b/include/linux/context_tracking_irq.h
index c50b5670c4a5..197916ee91a4 100644
--- a/include/linux/context_tracking_irq.h
+++ b/include/linux/context_tracking_irq.h
@@ -10,12 +10,12 @@ void ct_irq_exit_irqson(void);
void ct_nmi_enter(void);
void ct_nmi_exit(void);
#else
-static inline void ct_irq_enter(void) { }
-static inline void ct_irq_exit(void) { }
+static __always_inline void ct_irq_enter(void) { }
+static __always_inline void ct_irq_exit(void) { }
static inline void ct_irq_enter_irqson(void) { }
static inline void ct_irq_exit_irqson(void) { }
-static inline void ct_nmi_enter(void) { }
-static inline void ct_nmi_exit(void) { }
+static __always_inline void ct_nmi_enter(void) { }
+static __always_inline void ct_nmi_exit(void) { }
#endif
#endif
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index bbff5f7f8803..0b81248aa03e 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -7,23 +7,17 @@
#include <linux/context_tracking_irq.h>
/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
-#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
+#define CT_NESTING_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
enum ctx_state {
- CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
- CONTEXT_KERNEL = 0,
- CONTEXT_IDLE = 1,
- CONTEXT_USER = 2,
- CONTEXT_GUEST = 3,
- CONTEXT_MAX = 4,
+ CT_STATE_DISABLED = -1, /* returned by ct_state() if unknown */
+ CT_STATE_KERNEL = 0,
+ CT_STATE_IDLE = 1,
+ CT_STATE_USER = 2,
+ CT_STATE_GUEST = 3,
+ CT_STATE_MAX = 4,
};
-/* Even value for idle, else odd. */
-#define RCU_DYNTICKS_IDX CONTEXT_MAX
-
-#define CT_STATE_MASK (CONTEXT_MAX - 1)
-#define CT_DYNTICKS_MASK (~CT_STATE_MASK)
-
struct context_tracking {
#ifdef CONFIG_CONTEXT_TRACKING_USER
/*
@@ -39,14 +33,50 @@ struct context_tracking {
atomic_t state;
#endif
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
- long dynticks_nesting; /* Track process nesting level. */
- long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
+ long nesting; /* Track process nesting level. */
+ long nmi_nesting; /* Track irq/NMI nesting level. */
#endif
};
+/*
+ * We cram two different things within the same atomic variable:
+ *
+ * CT_RCU_WATCHING_START CT_STATE_START
+ * | |
+ * v v
+ * MSB [ RCU watching counter ][ context_state ] LSB
+ * ^ ^
+ * | |
+ * CT_RCU_WATCHING_END CT_STATE_END
+ *
+ * Bits are used from the LSB upwards, so unused bits (if any) will always be in
+ * upper bits of the variable.
+ */
#ifdef CONFIG_CONTEXT_TRACKING
+#define CT_SIZE (sizeof(((struct context_tracking *)0)->state) * BITS_PER_BYTE)
+
+#define CT_STATE_WIDTH bits_per(CT_STATE_MAX - 1)
+#define CT_STATE_START 0
+#define CT_STATE_END (CT_STATE_START + CT_STATE_WIDTH - 1)
+
+#define CT_RCU_WATCHING_MAX_WIDTH (CT_SIZE - CT_STATE_WIDTH)
+#define CT_RCU_WATCHING_WIDTH (IS_ENABLED(CONFIG_RCU_DYNTICKS_TORTURE) ? 2 : CT_RCU_WATCHING_MAX_WIDTH)
+#define CT_RCU_WATCHING_START (CT_STATE_END + 1)
+#define CT_RCU_WATCHING_END (CT_RCU_WATCHING_START + CT_RCU_WATCHING_WIDTH - 1)
+#define CT_RCU_WATCHING BIT(CT_RCU_WATCHING_START)
+
+#define CT_STATE_MASK GENMASK(CT_STATE_END, CT_STATE_START)
+#define CT_RCU_WATCHING_MASK GENMASK(CT_RCU_WATCHING_END, CT_RCU_WATCHING_START)
+
+#define CT_UNUSED_WIDTH (CT_RCU_WATCHING_MAX_WIDTH - CT_RCU_WATCHING_WIDTH)
+
+static_assert(CT_STATE_WIDTH +
+ CT_RCU_WATCHING_WIDTH +
+ CT_UNUSED_WIDTH ==
+ CT_SIZE);
+
DECLARE_PER_CPU(struct context_tracking, context_tracking);
-#endif
+#endif /* CONFIG_CONTEXT_TRACKING */
#ifdef CONFIG_CONTEXT_TRACKING_USER
static __always_inline int __ct_state(void)
@@ -56,47 +86,47 @@ static __always_inline int __ct_state(void)
#endif
#ifdef CONFIG_CONTEXT_TRACKING_IDLE
-static __always_inline int ct_dynticks(void)
+static __always_inline int ct_rcu_watching(void)
{
- return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_DYNTICKS_MASK;
+ return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING_MASK;
}
-static __always_inline int ct_dynticks_cpu(int cpu)
+static __always_inline int ct_rcu_watching_cpu(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
- return atomic_read(&ct->state) & CT_DYNTICKS_MASK;
+ return atomic_read(&ct->state) & CT_RCU_WATCHING_MASK;
}
-static __always_inline int ct_dynticks_cpu_acquire(int cpu)
+static __always_inline int ct_rcu_watching_cpu_acquire(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
- return atomic_read_acquire(&ct->state) & CT_DYNTICKS_MASK;
+ return atomic_read_acquire(&ct->state) & CT_RCU_WATCHING_MASK;
}
-static __always_inline long ct_dynticks_nesting(void)
+static __always_inline long ct_nesting(void)
{
- return __this_cpu_read(context_tracking.dynticks_nesting);
+ return __this_cpu_read(context_tracking.nesting);
}
-static __always_inline long ct_dynticks_nesting_cpu(int cpu)
+static __always_inline long ct_nesting_cpu(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
- return ct->dynticks_nesting;
+ return ct->nesting;
}
-static __always_inline long ct_dynticks_nmi_nesting(void)
+static __always_inline long ct_nmi_nesting(void)
{
- return __this_cpu_read(context_tracking.dynticks_nmi_nesting);
+ return __this_cpu_read(context_tracking.nmi_nesting);
}
-static __always_inline long ct_dynticks_nmi_nesting_cpu(int cpu)
+static __always_inline long ct_nmi_nesting_cpu(int cpu)
{
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
- return ct->dynticks_nmi_nesting;
+ return ct->nmi_nesting;
}
#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
@@ -113,7 +143,7 @@ static __always_inline bool context_tracking_enabled_cpu(int cpu)
return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
}
-static inline bool context_tracking_enabled_this_cpu(void)
+static __always_inline bool context_tracking_enabled_this_cpu(void)
{
return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
}
@@ -123,14 +153,14 @@ static inline bool context_tracking_enabled_this_cpu(void)
*
* Returns the current cpu's context tracking state if context tracking
* is enabled. If context tracking is disabled, returns
- * CONTEXT_DISABLED. This should be used primarily for debugging.
+ * CT_STATE_DISABLED. This should be used primarily for debugging.
*/
static __always_inline int ct_state(void)
{
int ret;
if (!context_tracking_enabled())
- return CONTEXT_DISABLED;
+ return CT_STATE_DISABLED;
preempt_disable();
ret = __ct_state();
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 0904ba010341..68861da4cf7c 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -10,7 +10,7 @@
#ifdef CONFIG_COREDUMP
struct core_vma_metadata {
unsigned long start, end;
- unsigned long flags;
+ vm_flags_t flags;
unsigned long dump_size;
unsigned long pgoff;
struct file *file;
@@ -28,6 +28,7 @@ struct coredump_params {
int vma_count;
size_t vma_data_size;
struct core_vma_metadata *vma_meta;
+ struct pid *pid;
};
extern unsigned int core_file_note_size_limit;
@@ -42,9 +43,31 @@ extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
int dump_user_range(struct coredump_params *cprm, unsigned long start,
unsigned long len);
-extern void do_coredump(const kernel_siginfo_t *siginfo);
+extern void vfs_coredump(const kernel_siginfo_t *siginfo);
+
+/*
+ * Logging for the coredump code, ratelimited.
+ * The TGID and comm fields are added to the message.
+ */
+
+#define __COREDUMP_PRINTK(Level, Format, ...) \
+ do { \
+ char comm[TASK_COMM_LEN]; \
+ /* This will always be NUL terminated. */ \
+ memcpy(comm, current->comm, sizeof(comm)); \
+ printk_ratelimited(Level "coredump: %d(%*pE): " Format "\n", \
+ task_tgid_vnr(current), (int)strlen(comm), comm, ##__VA_ARGS__); \
+ } while (0) \
+
+#define coredump_report(fmt, ...) __COREDUMP_PRINTK(KERN_INFO, fmt, ##__VA_ARGS__)
+#define coredump_report_failure(fmt, ...) __COREDUMP_PRINTK(KERN_WARNING, fmt, ##__VA_ARGS__)
+
#else
-static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
+static inline void vfs_coredump(const kernel_siginfo_t *siginfo) {}
+
+#define coredump_report(...)
+#define coredump_report_failure(...)
+
#endif
#if defined(CONFIG_COREDUMP) && defined(CONFIG_SYSCTL)
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index 51ac441a37c3..89b0ac0014b0 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -49,12 +49,21 @@
* Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload.
* Used to associate a CPU with the CoreSight Trace ID.
* [07:00] - Trace ID - uses 8 bits to make value easy to read in file.
- * [59:08] - Unused (SBZ)
- * [63:60] - Version
+ * [39:08] - Sink ID - as reported in /sys/bus/event_source/devices/cs_etm/sinks/
+ * Added in minor version 1.
+ * [55:40] - Unused (SBZ)
+ * [59:56] - Minor Version - previously existing fields are compatible with
+ * all minor versions.
+ * [63:60] - Major Version - previously existing fields mean different things
+ * in new major versions.
*/
#define CS_AUX_HW_ID_TRACE_ID_MASK GENMASK_ULL(7, 0)
-#define CS_AUX_HW_ID_VERSION_MASK GENMASK_ULL(63, 60)
+#define CS_AUX_HW_ID_SINK_ID_MASK GENMASK_ULL(39, 8)
-#define CS_AUX_HW_ID_CURR_VERSION 0
+#define CS_AUX_HW_ID_MINOR_VERSION_MASK GENMASK_ULL(59, 56)
+#define CS_AUX_HW_ID_MAJOR_VERSION_MASK GENMASK_ULL(63, 60)
+
+#define CS_AUX_HW_ID_MAJOR_VERSION 0
+#define CS_AUX_HW_ID_MINOR_VERSION 1
#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index f09ace92176e..2b48be97fcd0 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -71,7 +71,8 @@ enum coresight_dev_subtype_source {
enum coresight_dev_subtype_helper {
CORESIGHT_DEV_SUBTYPE_HELPER_CATU,
- CORESIGHT_DEV_SUBTYPE_HELPER_ECT_CTI
+ CORESIGHT_DEV_SUBTYPE_HELPER_ECT_CTI,
+ CORESIGHT_DEV_SUBTYPE_HELPER_CTCU,
};
/**
@@ -172,6 +173,9 @@ struct coresight_desc {
* @dest_dev: a @coresight_device representation of the component
connected to @src_port. NULL until the device is created
* @link: Representation of the connection as a sysfs link.
+ * @filter_src_fwnode: filter source component's fwnode handle.
+ * @filter_src_dev: a @coresight_device representation of the component that
+ needs to be filtered.
*
* The full connection structure looks like this, where in_conns store
* references to same connection as the source device's out_conns.
@@ -200,8 +204,10 @@ struct coresight_connection {
struct coresight_device *dest_dev;
struct coresight_sysfs_link *link;
struct coresight_device *src_dev;
- atomic_t src_refcnt;
- atomic_t dest_refcnt;
+ struct fwnode_handle *filter_src_fwnode;
+ struct coresight_device *filter_src_dev;
+ int src_refcnt;
+ int dest_refcnt;
};
/**
@@ -218,6 +224,24 @@ struct coresight_sysfs_link {
const char *target_name;
};
+/* architecturally we have 128 IDs some of which are reserved */
+#define CORESIGHT_TRACE_IDS_MAX 128
+
+/**
+ * Trace ID map.
+ *
+ * @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs.
+ * Initialised so that the reserved IDs are permanently marked as
+ * in use.
+ * @perf_cs_etm_session_active: Number of Perf sessions using this ID map.
+ */
+struct coresight_trace_id_map {
+ DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
+ atomic_t __percpu *cpu_map;
+ atomic_t perf_cs_etm_session_active;
+ raw_spinlock_t lock;
+};
+
/**
* struct coresight_device - representation of a device as used by the framework
* @pdata: Platform data with device connections associated to this device.
@@ -227,15 +251,11 @@ struct coresight_sysfs_link {
* by @coresight_ops.
* @access: Device i/o access abstraction for this device.
* @dev: The device entity associated to this component.
- * @mode: This tracer's mode, i.e sysFS, Perf or disabled. This is
- * actually an 'enum cs_mode', but is stored in an atomic type.
- * This is always accessed through local_read() and local_set(),
- * but wherever it's done from within the Coresight device's lock,
- * a non-atomic read would also work. This is the main point of
- * synchronisation between code happening inside the sysfs mode's
- * coresight_mutex and outside when running in Perf mode. A compare
- * and exchange swap is done to atomically claim one mode or the
- * other.
+ * @mode: The device mode, i.e sysFS, Perf or disabled. This is actually
+ * an 'enum cs_mode' but stored in an atomic type. Access is always
+ * through atomic APIs, ensuring SMP-safe synchronisation between
+ * racing from sysFS and Perf mode. A compare-and-exchange
+ * operation is done to atomically claim one mode or the other.
* @refcnt: keep track of what is in use. Only access this outside of the
* device's spinlock when the coresight_mutex held and mode ==
* CS_MODE_SYSFS. Otherwise it must be accessed from inside the
@@ -264,20 +284,21 @@ struct coresight_device {
const struct coresight_ops *ops;
struct csdev_access access;
struct device dev;
- local_t mode;
+ atomic_t mode;
int refcnt;
bool orphan;
/* sink specific fields */
bool sysfs_sink_activated;
struct dev_ext_attribute *ea;
struct coresight_device *def_sink;
+ struct coresight_trace_id_map perf_sink_id_map;
/* sysfs links between components */
int nr_links;
bool has_conns_grp;
/* system configuration and feature lists */
struct list_head feature_csdev_list;
struct list_head config_csdev_list;
- spinlock_t cscfg_csdev_lock;
+ raw_spinlock_t cscfg_csdev_lock;
void *active_cscfg_ctxt;
};
@@ -305,17 +326,31 @@ static struct coresight_dev_list (var) = { \
#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
+/**
+ * struct coresight_path - data needed by enable/disable path
+ * @path_list: path from source to sink.
+ * @trace_id: trace_id of the whole path.
+ * @handle: handle of the aux_event.
+ */
+struct coresight_path {
+ struct list_head path_list;
+ u8 trace_id;
+ struct perf_output_handle *handle;
+};
+
enum cs_mode {
CS_MODE_DISABLED,
CS_MODE_SYSFS,
CS_MODE_PERF,
};
+#define coresight_ops(csdev) csdev->ops
#define source_ops(csdev) csdev->ops->source_ops
#define sink_ops(csdev) csdev->ops->sink_ops
#define link_ops(csdev) csdev->ops->link_ops
#define helper_ops(csdev) csdev->ops->helper_ops
#define ect_ops(csdev) csdev->ops->ect_ops
+#define panic_ops(csdev) csdev->ops->panic_ops
/**
* struct coresight_ops_sink - basic operations for a sink
@@ -328,7 +363,7 @@ enum cs_mode {
*/
struct coresight_ops_sink {
int (*enable)(struct coresight_device *csdev, enum cs_mode mode,
- void *data);
+ struct coresight_path *path);
int (*disable)(struct coresight_device *csdev);
void *(*alloc_buffer)(struct coresight_device *csdev,
struct perf_event *event, void **pages,
@@ -361,13 +396,17 @@ struct coresight_ops_link {
* is associated to.
* @enable: enables tracing for a source.
* @disable: disables tracing for a source.
+ * @resume_perf: resumes tracing for a source in perf session.
+ * @pause_perf: pauses tracing for a source in perf session.
*/
struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev);
int (*enable)(struct coresight_device *csdev, struct perf_event *event,
- enum cs_mode mode);
+ enum cs_mode mode, struct coresight_path *path);
void (*disable)(struct coresight_device *csdev,
struct perf_event *event);
+ int (*resume_perf)(struct coresight_device *csdev);
+ void (*pause_perf)(struct coresight_device *csdev);
};
/**
@@ -381,15 +420,29 @@ struct coresight_ops_source {
*/
struct coresight_ops_helper {
int (*enable)(struct coresight_device *csdev, enum cs_mode mode,
- void *data);
- int (*disable)(struct coresight_device *csdev, void *data);
+ struct coresight_path *path);
+ int (*disable)(struct coresight_device *csdev,
+ struct coresight_path *path);
+};
+
+
+/**
+ * struct coresight_ops_panic - Generic device ops for panic handing
+ *
+ * @sync : Sync the device register state/trace data
+ */
+struct coresight_ops_panic {
+ int (*sync)(struct coresight_device *csdev);
};
struct coresight_ops {
+ int (*trace_id)(struct coresight_device *csdev, enum cs_mode mode,
+ struct coresight_device *sink);
const struct coresight_ops_sink *sink_ops;
const struct coresight_ops_link *link_ops;
const struct coresight_ops_source *source_ops;
const struct coresight_ops_helper *helper_ops;
+ const struct coresight_ops_panic *panic_ops;
};
static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa,
@@ -420,32 +473,6 @@ static inline bool is_coresight_device(void __iomem *base)
return cid == CORESIGHT_CID;
}
-/*
- * Attempt to find and enable "APB clock" for the given device
- *
- * Returns:
- *
- * clk - Clock is found and enabled
- * NULL - clock is not found
- * ERROR - Clock is found but failed to enable
- */
-static inline struct clk *coresight_get_enable_apb_pclk(struct device *dev)
-{
- struct clk *pclk;
- int ret;
-
- pclk = clk_get(dev, "apb_pclk");
- if (IS_ERR(pclk))
- return NULL;
-
- ret = clk_prepare_enable(pclk);
- if (ret) {
- clk_put(pclk);
- return ERR_PTR(ret);
- }
- return pclk;
-}
-
#define CORESIGHT_PIDRn(i) (0xFE0 + ((i) * 4))
static inline u32 coresight_get_pid(struct csdev_access *csa)
@@ -569,9 +596,14 @@ static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 o
}
#endif /* CONFIG_64BIT */
+static inline bool coresight_is_device_source(struct coresight_device *csdev)
+{
+ return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE);
+}
+
static inline bool coresight_is_percpu_source(struct coresight_device *csdev)
{
- return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) &&
+ return csdev && coresight_is_device_source(csdev) &&
(csdev->subtype.source_subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_PROC);
}
@@ -588,13 +620,14 @@ static inline bool coresight_is_percpu_sink(struct coresight_device *csdev)
static inline bool coresight_take_mode(struct coresight_device *csdev,
enum cs_mode new_mode)
{
- return local_cmpxchg(&csdev->mode, CS_MODE_DISABLED, new_mode) ==
- CS_MODE_DISABLED;
+ int curr = CS_MODE_DISABLED;
+
+ return atomic_try_cmpxchg_acquire(&csdev->mode, &curr, new_mode);
}
static inline enum cs_mode coresight_get_mode(struct coresight_device *csdev)
{
- return local_read(&csdev->mode);
+ return atomic_read_acquire(&csdev->mode);
}
static inline void coresight_set_mode(struct coresight_device *csdev,
@@ -610,26 +643,30 @@ static inline void coresight_set_mode(struct coresight_device *csdev,
WARN(new_mode != CS_MODE_DISABLED && current_mode != CS_MODE_DISABLED &&
current_mode != new_mode, "Device already in use\n");
- local_set(&csdev->mode, new_mode);
+ atomic_set_release(&csdev->mode, new_mode);
}
-extern struct coresight_device *
-coresight_register(struct coresight_desc *desc);
-extern void coresight_unregister(struct coresight_device *csdev);
-extern int coresight_enable_sysfs(struct coresight_device *csdev);
-extern void coresight_disable_sysfs(struct coresight_device *csdev);
-extern int coresight_timeout(struct csdev_access *csa, u32 offset,
- int position, int value);
-
-extern int coresight_claim_device(struct coresight_device *csdev);
-extern int coresight_claim_device_unlocked(struct coresight_device *csdev);
-
-extern void coresight_disclaim_device(struct coresight_device *csdev);
-extern void coresight_disclaim_device_unlocked(struct coresight_device *csdev);
-extern char *coresight_alloc_device_name(struct coresight_dev_list *devs,
+struct coresight_device *coresight_register(struct coresight_desc *desc);
+void coresight_unregister(struct coresight_device *csdev);
+int coresight_enable_sysfs(struct coresight_device *csdev);
+void coresight_disable_sysfs(struct coresight_device *csdev);
+int coresight_timeout(struct csdev_access *csa, u32 offset, int position, int value);
+typedef void (*coresight_timeout_cb_t) (struct csdev_access *, u32, int, int);
+int coresight_timeout_action(struct csdev_access *csa, u32 offset, int position, int value,
+ coresight_timeout_cb_t cb);
+int coresight_claim_device(struct coresight_device *csdev);
+int coresight_claim_device_unlocked(struct coresight_device *csdev);
+
+int coresight_claim_device(struct coresight_device *csdev);
+int coresight_claim_device_unlocked(struct coresight_device *csdev);
+void coresight_clear_self_claim_tag(struct csdev_access *csa);
+void coresight_clear_self_claim_tag_unlocked(struct csdev_access *csa);
+void coresight_disclaim_device(struct coresight_device *csdev);
+void coresight_disclaim_device_unlocked(struct coresight_device *csdev);
+char *coresight_alloc_device_name(struct coresight_dev_list *devs,
struct device *dev);
-extern bool coresight_loses_context_with_cpu(struct device *dev);
+bool coresight_loses_context_with_cpu(struct device *dev);
u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset);
u32 coresight_read32(struct coresight_device *csdev, u32 offset);
@@ -642,7 +679,8 @@ void coresight_relaxed_write64(struct coresight_device *csdev,
u64 val, u32 offset);
void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset);
-extern int coresight_get_cpu(struct device *dev);
+int coresight_get_cpu(struct device *dev);
+int coresight_get_static_trace_id(struct device *dev, u32 *id);
struct coresight_platform_data *coresight_get_platform_data(struct device *dev);
struct coresight_connection *
@@ -660,8 +698,12 @@ coresight_find_output_type(struct coresight_platform_data *pdata,
union coresight_dev_subtype subtype);
int coresight_init_driver(const char *drv, struct amba_driver *amba_drv,
- struct platform_driver *pdev_drv);
+ struct platform_driver *pdev_drv, struct module *owner);
void coresight_remove_driver(struct amba_driver *amba_drv,
struct platform_driver *pdev_drv);
+int coresight_etm_get_trace_id(struct coresight_device *csdev, enum cs_mode mode,
+ struct coresight_device *sink);
+int coresight_get_enable_clocks(struct device *dev, struct clk **pclk,
+ struct clk **atclk);
#endif /* _LINUX_COREISGHT_H */
diff --git a/include/linux/counter.h b/include/linux/counter.h
index 426b7d58a438..f208e867dd0f 100644
--- a/include/linux/counter.h
+++ b/include/linux/counter.h
@@ -580,6 +580,9 @@ struct counter_array {
#define COUNTER_COMP_CEILING(_read, _write) \
COUNTER_COMP_COUNT_U64("ceiling", _read, _write)
+#define COUNTER_COMP_COMPARE(_read, _write) \
+ COUNTER_COMP_COUNT_U64("compare", _read, _write)
+
#define COUNTER_COMP_COUNT_MODE(_read, _write, _available) \
{ \
.type = COUNTER_COMP_COUNT_MODE, \
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 265b0f8fc0b3..5b1236d8c65b 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -89,6 +89,10 @@ enum {
#define CPER_NOTIFY_DMAR \
GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
0x72, 0x2D, 0xEB, 0x41)
+/* CXL Protocol Error Section */
+#define CPER_SEC_CXL_PROT_ERR \
+ GUID_INIT(0x80B9EFB4, 0x52B5, 0x4DE3, 0xA7, 0x77, 0x68, 0x78, \
+ 0x4B, 0x77, 0x10, 0x48)
/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
/*
@@ -293,11 +297,11 @@ enum {
#define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2)
#define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3)
-#define CPER_ARM_CACHE_ERROR 0
-#define CPER_ARM_TLB_ERROR 1
-#define CPER_ARM_BUS_ERROR 2
-#define CPER_ARM_VENDOR_ERROR 3
-#define CPER_ARM_MAX_TYPE CPER_ARM_VENDOR_ERROR
+#define CPER_ARM_ERR_TYPE_MASK GENMASK(4,1)
+#define CPER_ARM_CACHE_ERROR BIT(1)
+#define CPER_ARM_TLB_ERROR BIT(2)
+#define CPER_ARM_BUS_ERROR BIT(3)
+#define CPER_ARM_VENDOR_ERROR BIT(4)
#define CPER_ARM_ERR_VALID_TRANSACTION_TYPE BIT(0)
#define CPER_ARM_ERR_VALID_OPERATION_TYPE BIT(1)
@@ -584,6 +588,8 @@ const char *cper_mem_err_type_str(unsigned int);
const char *cper_mem_err_status_str(u64 status);
void cper_print_bits(const char *prefix, unsigned int bits,
const char * const strs[], unsigned int strs_size);
+int cper_bits_to_str(char *buf, int buf_size, unsigned long bits,
+ const char * const strs[], unsigned int strs_size);
void cper_mem_err_pack(const struct cper_sec_mem_err *,
struct cper_mem_err_compact *);
const char *cper_mem_err_unpack(struct trace_seq *,
@@ -601,4 +607,8 @@ void cper_estatus_print(const char *pfx,
int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus);
int cper_estatus_check(const struct acpi_hest_generic_status *estatus);
+struct cxl_cper_sec_prot_err;
+void cxl_cper_print_prot_err(const char *pfx,
+ const struct cxl_cper_sec_prot_err *prot_err);
+
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 861c3bfc5f17..487b3bf2e1ea 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -16,8 +16,8 @@
#include <linux/node.h>
#include <linux/compiler.h>
-#include <linux/cpumask.h>
#include <linux/cpuhotplug.h>
+#include <linux/cpuhplock.h>
#include <linux/cpu_smt.h>
struct device;
@@ -77,6 +77,13 @@ extern ssize_t cpu_show_gds(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_ghostwrite(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_old_microcode(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
@@ -115,6 +122,7 @@ extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
int bringup_hibernate_cpu(unsigned int sleep_cpu);
void bringup_nonboot_cpus(unsigned int max_cpus);
+int arch_cpu_rescan_dead_smt_siblings(void);
#else /* CONFIG_SMP */
#define cpuhp_tasks_frozen 0
@@ -129,41 +137,11 @@ static inline void cpu_maps_update_done(void)
static inline int add_cpu(unsigned int cpu) { return 0;}
+static inline int arch_cpu_rescan_dead_smt_siblings(void) { return 0; }
+
#endif /* CONFIG_SMP */
extern const struct bus_type cpu_subsys;
-extern int lockdep_is_cpus_held(void);
-
-#ifdef CONFIG_HOTPLUG_CPU
-extern void cpus_write_lock(void);
-extern void cpus_write_unlock(void);
-extern void cpus_read_lock(void);
-extern void cpus_read_unlock(void);
-extern int cpus_read_trylock(void);
-extern void lockdep_assert_cpus_held(void);
-extern void cpu_hotplug_disable(void);
-extern void cpu_hotplug_enable(void);
-void clear_tasks_mm_cpumask(int cpu);
-int remove_cpu(unsigned int cpu);
-int cpu_device_down(struct device *dev);
-extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu);
-
-#else /* CONFIG_HOTPLUG_CPU */
-
-static inline void cpus_write_lock(void) { }
-static inline void cpus_write_unlock(void) { }
-static inline void cpus_read_lock(void) { }
-static inline void cpus_read_unlock(void) { }
-static inline int cpus_read_trylock(void) { return true; }
-static inline void lockdep_assert_cpus_held(void) { }
-static inline void cpu_hotplug_disable(void) { }
-static inline void cpu_hotplug_enable(void) { }
-static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
-static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
-#endif /* !CONFIG_HOTPLUG_CPU */
-
-DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock())
-
#ifdef CONFIG_PM_SLEEP_SMP
extern int freeze_secondary_cpus(int primary);
extern void thaw_secondary_cpus(void);
@@ -179,7 +157,7 @@ static inline int suspend_disable_secondary_cpus(void)
}
static inline void suspend_enable_secondary_cpus(void)
{
- return thaw_secondary_cpus();
+ thaw_secondary_cpus();
}
#else /* !CONFIG_PM_SLEEP_SMP */
@@ -210,20 +188,31 @@ static inline void arch_cpu_finalize_init(void) { }
void play_idle_precise(u64 duration_ns, u64 latency_ns);
-static inline void play_idle(unsigned long duration_us)
-{
- play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX);
-}
-
#ifdef CONFIG_HOTPLUG_CPU
void cpuhp_report_idle_dead(void);
#else
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+enum cpu_attack_vectors {
+ CPU_MITIGATE_USER_KERNEL,
+ CPU_MITIGATE_USER_USER,
+ CPU_MITIGATE_GUEST_HOST,
+ CPU_MITIGATE_GUEST_GUEST,
+ NR_CPU_ATTACK_VECTORS,
+};
+
+enum smt_mitigations {
+ SMT_MITIGATIONS_OFF,
+ SMT_MITIGATIONS_AUTO,
+ SMT_MITIGATIONS_ON,
+};
+
#ifdef CONFIG_CPU_MITIGATIONS
extern bool cpu_mitigations_off(void);
extern bool cpu_mitigations_auto_nosmt(void);
+extern bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v);
+extern enum smt_mitigations smt_mitigations;
#else
static inline bool cpu_mitigations_off(void)
{
@@ -233,6 +222,11 @@ static inline bool cpu_mitigations_auto_nosmt(void)
{
return false;
}
+static inline bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v)
+{
+ return false;
+}
+#define smt_mitigations SMT_MITIGATIONS_OFF
#endif
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index a3bdc8a98f2c..2c774fb3c091 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/thermal.h>
-#include <linux/cpumask.h>
struct cpufreq_policy;
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
index cae324d10965..2fd7ba75362a 100644
--- a/include/linux/cpu_rmap.h
+++ b/include/linux/cpu_rmap.h
@@ -7,7 +7,7 @@
* Copyright 2011 Solarflare Communications Inc.
*/
-#include <linux/cpumask.h>
+#include <linux/cpumask_types.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/kref.h>
@@ -32,6 +32,7 @@ struct cpu_rmap {
#define CPU_RMAP_DIST_INF 0xffff
extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
+extern void cpu_rmap_get(struct cpu_rmap *rmap);
extern int cpu_rmap_put(struct cpu_rmap *rmap);
extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 20f7e98ee8af..0465d1e6f72a 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -26,12 +26,10 @@
*********************************************************************/
/*
* Frequency values here are CPU kHz
- *
- * Maximum transition latency is in nanoseconds - if it's unknown,
- * CPUFREQ_ETERNAL shall be used.
*/
-#define CPUFREQ_ETERNAL (-1)
+#define CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS NSEC_PER_MSEC
+
#define CPUFREQ_NAME_LEN 16
/* Print length for names. Extra 1 space for accommodating '\n' in prints */
#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
@@ -144,6 +142,9 @@ struct cpufreq_policy {
/* Per policy boost enabled flag. */
bool boost_enabled;
+ /* Per policy boost supported flag. */
+ bool boost_supported;
+
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
unsigned int cached_resolved_idx;
@@ -167,6 +168,12 @@ struct cpufreq_policy {
struct notifier_block nb_max;
};
+DEFINE_GUARD(cpufreq_policy_write, struct cpufreq_policy *,
+ down_write(&_T->rwsem), up_write(&_T->rwsem))
+
+DEFINE_GUARD(cpufreq_policy_read, struct cpufreq_policy *,
+ down_read(&_T->rwsem), up_read(&_T->rwsem))
+
/*
* Used for passing new cpufreq policy data to the cpufreq driver's ->verify()
* callback for sanitization. That callback is only expected to modify the min
@@ -210,6 +217,9 @@ static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
#endif
+/* Scope based cleanup macro for cpufreq_policy kobject reference counting */
+DEFINE_FREE(put_cpufreq_policy, struct cpufreq_policy *, if (_T) cpufreq_cpu_put(_T))
+
static inline bool policy_is_inactive(struct cpufreq_policy *policy)
{
return cpumask_empty(policy->cpus);
@@ -229,9 +239,6 @@ void disable_cpufreq(void);
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
-struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
-void cpufreq_cpu_release(struct cpufreq_policy *policy);
-int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
void refresh_frequency_limits(struct cpufreq_policy *policy);
void cpufreq_update_policy(unsigned int cpu);
void cpufreq_update_limits(unsigned int cpu);
@@ -389,14 +396,14 @@ struct cpufreq_driver {
unsigned int (*get)(unsigned int cpu);
/* Called to update policy limits on firmware notifications. */
- void (*update_limits)(unsigned int cpu);
+ void (*update_limits)(struct cpufreq_policy *policy);
/* optional */
int (*bios_limit)(int cpu, unsigned int *limit);
int (*online)(struct cpufreq_policy *policy);
int (*offline)(struct cpufreq_policy *policy);
- int (*exit)(struct cpufreq_policy *policy);
+ void (*exit)(struct cpufreq_policy *policy);
int (*suspend)(struct cpufreq_policy *policy);
int (*resume)(struct cpufreq_policy *policy);
@@ -577,12 +584,6 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
#define CPUFREQ_POLICY_POWERSAVE (1)
#define CPUFREQ_POLICY_PERFORMANCE (2)
-/*
- * The polling frequency depends on the capability of the processor. Default
- * polling frequency is 1000 times the transition latency of the processor.
- */
-#define LATENCY_MULTIPLIER (1000)
-
struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
int (*init)(struct cpufreq_policy *policy);
@@ -647,6 +648,15 @@ module_exit(__governor##_exit)
struct cpufreq_governor *cpufreq_default_governor(void);
struct cpufreq_governor *cpufreq_fallback_governor(void);
+#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
+bool sugov_is_governor(struct cpufreq_policy *policy);
+#else
+static inline bool sugov_is_governor(struct cpufreq_policy *policy)
+{
+ return false;
+}
+#endif
+
static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
{
if (policy->max < policy->cur)
@@ -768,26 +778,23 @@ struct cpufreq_frequency_table {
else
-int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table);
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy);
+
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy);
-int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
- struct cpufreq_frequency_table *table);
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation);
+ unsigned int target_freq, unsigned int min,
+ unsigned int max, unsigned int relation);
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
unsigned int freq);
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
#ifdef CONFIG_CPU_FREQ
-int cpufreq_boost_trigger_state(int state);
-int cpufreq_boost_enabled(void);
-int cpufreq_enable_boost_support(void);
-bool policy_has_boost_freq(struct cpufreq_policy *policy);
+bool cpufreq_boost_enabled(void);
+int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state);
/* Find lowest freq at or above target in a table in ascending order */
static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
@@ -842,12 +849,12 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
return best;
}
-/* Works only on sorted freq-tables */
-static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
- unsigned int target_freq,
- bool efficiencies)
+static inline int find_index_l(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ bool efficiencies)
{
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
return cpufreq_table_find_index_al(policy, target_freq,
@@ -857,6 +864,14 @@ static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
efficiencies);
}
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ bool efficiencies)
+{
+ return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies);
+}
+
/* Find highest freq at or below target in a table in ascending order */
static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
unsigned int target_freq,
@@ -910,12 +925,12 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
return best;
}
-/* Works only on sorted freq-tables */
-static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
- unsigned int target_freq,
- bool efficiencies)
+static inline int find_index_h(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ bool efficiencies)
{
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
return cpufreq_table_find_index_ah(policy, target_freq,
@@ -925,6 +940,14 @@ static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
efficiencies);
}
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ bool efficiencies)
+{
+ return find_index_h(policy, target_freq, policy->min, policy->max, efficiencies);
+}
+
/* Find closest freq to target in a table in ascending order */
static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
unsigned int target_freq,
@@ -995,12 +1018,12 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
return best;
}
-/* Works only on sorted freq-tables */
-static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
- unsigned int target_freq,
- bool efficiencies)
+static inline int find_index_c(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ bool efficiencies)
{
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
return cpufreq_table_find_index_ac(policy, target_freq,
@@ -1010,7 +1033,17 @@ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
efficiencies);
}
-static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ bool efficiencies)
+{
+ return find_index_c(policy, target_freq, policy->min, policy->max, efficiencies);
+}
+
+static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy,
+ unsigned int min, unsigned int max,
+ int idx)
{
unsigned int freq;
@@ -1019,11 +1052,13 @@ static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
freq = policy->freq_table[idx].frequency;
- return freq == clamp_val(freq, policy->min, policy->max);
+ return freq == clamp_val(freq, min, max);
}
static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int target_freq,
+ unsigned int min,
+ unsigned int max,
unsigned int relation)
{
bool efficiencies = policy->efficiencies_available &&
@@ -1034,29 +1069,26 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
relation &= ~CPUFREQ_RELATION_E;
if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
- return cpufreq_table_index_unsorted(policy, target_freq,
- relation);
+ return cpufreq_table_index_unsorted(policy, target_freq, min,
+ max, relation);
retry:
switch (relation) {
case CPUFREQ_RELATION_L:
- idx = cpufreq_table_find_index_l(policy, target_freq,
- efficiencies);
+ idx = find_index_l(policy, target_freq, min, max, efficiencies);
break;
case CPUFREQ_RELATION_H:
- idx = cpufreq_table_find_index_h(policy, target_freq,
- efficiencies);
+ idx = find_index_h(policy, target_freq, min, max, efficiencies);
break;
case CPUFREQ_RELATION_C:
- idx = cpufreq_table_find_index_c(policy, target_freq,
- efficiencies);
+ idx = find_index_c(policy, target_freq, min, max, efficiencies);
break;
default:
WARN_ON_ONCE(1);
return 0;
}
- /* Limit frequency index to honor policy->min/max */
- if (!cpufreq_is_in_limits(policy, idx) && efficiencies) {
+ /* Limit frequency index to honor min and max */
+ if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) {
efficiencies = false;
goto retry;
}
@@ -1113,10 +1145,9 @@ static inline int parse_perf_domain(int cpu, const char *list_name,
const char *cell_name,
struct of_phandle_args *args)
{
- struct device_node *cpu_np;
int ret;
- cpu_np = of_cpu_device_node_get(cpu);
+ struct device_node *cpu_np __free(device_node) = of_cpu_device_node_get(cpu);
if (!cpu_np)
return -ENODEV;
@@ -1124,9 +1155,6 @@ static inline int parse_perf_domain(int cpu, const char *list_name,
args);
if (ret < 0)
return ret;
-
- of_node_put(cpu_np);
-
return 0;
}
@@ -1160,23 +1188,14 @@ static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_
return 0;
}
#else
-static inline int cpufreq_boost_trigger_state(int state)
-{
- return 0;
-}
-static inline int cpufreq_boost_enabled(void)
-{
- return 0;
-}
-
-static inline int cpufreq_enable_boost_support(void)
+static inline bool cpufreq_boost_enabled(void)
{
- return -EINVAL;
+ return false;
}
-static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
+static inline int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
{
- return false;
+ return -EOPNOTSUPP;
}
static inline int
@@ -1194,7 +1213,7 @@ static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_
}
#endif
-extern unsigned int arch_freq_get_on_cpu(int cpu);
+extern int arch_freq_get_on_cpu(int cpu);
#ifndef arch_set_freq_scale
static __always_inline
@@ -1208,7 +1227,6 @@ void arch_set_freq_scale(const struct cpumask *cpus,
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
-extern struct freq_attr *cpufreq_generic_attr[];
int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
unsigned int cpufreq_generic_get(unsigned int cpu);
@@ -1216,6 +1234,8 @@ void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
+bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask);
+
static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
{
dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 7a5785f405b6..62cd7b35a29c 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -27,7 +27,7 @@
* startup callbacks sequentially from CPUHP_OFFLINE + 1 to CPUHP_ONLINE
* during a CPU online operation. During a CPU offline operation the
* installed teardown callbacks are invoked in the reverse order from
- * CPU_ONLINE - 1 down to CPUHP_OFFLINE.
+ * CPUHP_ONLINE - 1 down to CPUHP_OFFLINE.
*
* The state space has three sections: PREPARE, STARTING and ONLINE.
*
@@ -60,7 +60,6 @@ enum cpuhp_state {
/* PREPARE section invoked on a control CPU */
CPUHP_OFFLINE = 0,
CPUHP_CREATE_THREADS,
- CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE,
CPUHP_PERF_X86_AMD_UNCORE_PREP,
CPUHP_PERF_POWER,
@@ -91,7 +90,6 @@ enum cpuhp_state {
CPUHP_RADIX_DEAD,
CPUHP_PAGE_ALLOC,
CPUHP_NET_DEV_DEAD,
- CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_IOVA_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
CPUHP_PADATA_DEAD,
@@ -100,7 +98,6 @@ enum cpuhp_state {
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
- CPUHP_PROFILE_PREPARE,
CPUHP_X2APIC_PREPARE,
CPUHP_SMPCFD_PREPARE,
CPUHP_RELAY_PREPARE,
@@ -117,11 +114,11 @@ enum cpuhp_state {
CPUHP_NET_IUCV_PREPARE,
CPUHP_ARM_BL_PREPARE,
CPUHP_TRACE_RB_PREPARE,
- CPUHP_MM_ZS_PREPARE,
CPUHP_MM_ZSWP_POOL_PREPARE,
CPUHP_KVM_PPC_BOOK3S_PREPARE,
CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_PREPARE,
+ CPUHP_TMIGR_PREPARE,
CPUHP_MIPS_SOC_PREPARE,
CPUHP_BP_PREPARE_DYN,
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
@@ -144,14 +141,16 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
- CPUHP_AP_IRQ_LOONGARCH_STARTING,
+ CPUHP_AP_IRQ_EIOINTC_STARTING,
+ CPUHP_AP_IRQ_AVECINTC_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ CPUHP_AP_IRQ_ACLINT_SSWI_STARTING,
CPUHP_AP_IRQ_RISCV_IMSIC_STARTING,
+ CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
- CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
@@ -169,8 +168,10 @@ enum cpuhp_state {
CPUHP_AP_QCOM_TIMER_STARTING,
CPUHP_AP_TEGRA_TIMER_STARTING,
CPUHP_AP_ARMADA_TIMER_STARTING,
+ CPUHP_AP_LOONGARCH_ARCH_TIMER_STARTING,
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
+ CPUHP_AP_REALTEK_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
CPUHP_AP_CLINT_TIMER_STARTING,
CPUHP_AP_CSKY_TIMER_STARTING,
@@ -206,8 +207,6 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
- CPUHP_AP_PERF_X86_RAPL_ONLINE,
- CPUHP_AP_PERF_X86_CSTATE_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
@@ -226,6 +225,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE,
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
@@ -238,6 +238,7 @@ enum cpuhp_state {
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RANDOM_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
+ CPUHP_AP_KTHREADS_ONLINE,
CPUHP_AP_BASE_CACHEINFO_ONLINE,
CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 40,
diff --git a/include/linux/cpuhplock.h b/include/linux/cpuhplock.h
new file mode 100644
index 000000000000..f7aa20f62b87
--- /dev/null
+++ b/include/linux/cpuhplock.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * include/linux/cpuhplock.h - CPU hotplug locking
+ *
+ * Locking functions for CPU hotplug.
+ */
+#ifndef _LINUX_CPUHPLOCK_H_
+#define _LINUX_CPUHPLOCK_H_
+
+#include <linux/cleanup.h>
+#include <linux/errno.h>
+
+struct device;
+
+extern int lockdep_is_cpus_held(void);
+
+#ifdef CONFIG_HOTPLUG_CPU
+void cpus_write_lock(void);
+void cpus_write_unlock(void);
+void cpus_read_lock(void);
+void cpus_read_unlock(void);
+int cpus_read_trylock(void);
+void lockdep_assert_cpus_held(void);
+void cpu_hotplug_disable_offlining(void);
+void cpu_hotplug_disable(void);
+void cpu_hotplug_enable(void);
+void clear_tasks_mm_cpumask(int cpu);
+int remove_cpu(unsigned int cpu);
+int cpu_device_down(struct device *dev);
+void smp_shutdown_nonboot_cpus(unsigned int primary_cpu);
+
+#else /* CONFIG_HOTPLUG_CPU */
+
+static inline void cpus_write_lock(void) { }
+static inline void cpus_write_unlock(void) { }
+static inline void cpus_read_lock(void) { }
+static inline void cpus_read_unlock(void) { }
+static inline int cpus_read_trylock(void) { return true; }
+static inline void lockdep_assert_cpus_held(void) { }
+static inline void cpu_hotplug_disable_offlining(void) { }
+static inline void cpu_hotplug_disable(void) { }
+static inline void cpu_hotplug_enable(void) { }
+static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
+static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
+#endif /* !CONFIG_HOTPLUG_CPU */
+
+DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock())
+
+#endif /* _LINUX_CPUHPLOCK_H_ */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 3183aeb7f5b4..4073690504a7 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -61,7 +61,7 @@ struct cpuidle_state {
struct cpuidle_driver *drv,
int index);
- int (*enter_dead) (struct cpuidle_device *dev, int index);
+ void (*enter_dead) (struct cpuidle_device *dev, int index);
/*
* CPUs execute ->enter_s2idle with the local tick or entire timekeeping
@@ -248,7 +248,8 @@ extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev,
u64 latency_limit_ns);
extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns);
extern void cpuidle_use_deepest_state(u64 latency_limit_ns);
#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
@@ -256,7 +257,8 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
u64 latency_limit_ns)
{return -ENODEV; }
static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns)
{return -ENODEV; }
static inline void cpuidle_use_deepest_state(u64 latency_limit_ns)
{
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 23686bed441d..80211900f373 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -7,26 +7,16 @@
* set of CPUs in a system, one bit position per CPU number. In general,
* only nr_cpu_ids (<= NR_CPUS) bits are valid.
*/
-#include <linux/cleanup.h>
-#include <linux/kernel.h>
-#include <linux/threads.h>
-#include <linux/bitmap.h>
#include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/cleanup.h>
+#include <linux/cpumask_types.h>
#include <linux/gfp_types.h>
#include <linux/numa.h>
+#include <linux/threads.h>
+#include <linux/types.h>
-/* Don't assign or return these: may not be this big! */
-typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
-
-/**
- * cpumask_bits - get the bits in a cpumask
- * @maskp: the struct cpumask *
- *
- * You should only assume nr_cpu_ids bits of this mask are valid. This is
- * a macro so it's const-correct.
- */
-#define cpumask_bits(maskp) ((maskp)->bits)
+#include <asm/bug.h>
/**
* cpumask_pr_args - printf args to output a cpumask
@@ -42,7 +32,7 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
extern unsigned int nr_cpu_ids;
#endif
-static inline void set_nr_cpu_ids(unsigned int nr)
+static __always_inline void set_nr_cpu_ids(unsigned int nr)
{
#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
WARN_ON(nr != nr_cpu_ids);
@@ -93,6 +83,7 @@ static inline void set_nr_cpu_ids(unsigned int nr)
*
* cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
* cpu_present_mask - has bit 'cpu' set iff cpu is populated
+ * cpu_enabled_mask - has bit 'cpu' set iff cpu can be brought online
* cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler
* cpu_active_mask - has bit 'cpu' set iff cpu available to migration
*
@@ -125,16 +116,19 @@ static inline void set_nr_cpu_ids(unsigned int nr)
extern struct cpumask __cpu_possible_mask;
extern struct cpumask __cpu_online_mask;
+extern struct cpumask __cpu_enabled_mask;
extern struct cpumask __cpu_present_mask;
extern struct cpumask __cpu_active_mask;
extern struct cpumask __cpu_dying_mask;
#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
+#define cpu_enabled_mask ((const struct cpumask *)&__cpu_enabled_mask)
#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
#define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask)
extern atomic_t __num_online_cpus;
+extern unsigned int __num_possible_cpus;
extern cpumask_t cpus_booted_once_mask;
@@ -158,7 +152,7 @@ static __always_inline unsigned int cpumask_check(unsigned int cpu)
*
* Return: >= nr_cpu_ids if no cpus set.
*/
-static inline unsigned int cpumask_first(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_first(const struct cpumask *srcp)
{
return find_first_bit(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -169,7 +163,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
*
* Return: >= nr_cpu_ids if all cpus are set.
*/
-static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
return find_first_zero_bit(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -181,13 +175,26 @@ static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
*
* Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
*/
-static inline
+static __always_inline
unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
/**
+ * cpumask_first_andnot - return the first cpu from *srcp1 & ~*srcp2
+ * @srcp1: the first input
+ * @srcp2: the second input
+ *
+ * Return: >= nr_cpu_ids if no such cpu found.
+ */
+static __always_inline
+unsigned int cpumask_first_andnot(const struct cpumask *srcp1, const struct cpumask *srcp2)
+{
+ return find_first_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
+}
+
+/**
* cpumask_first_and_and - return the first cpu from *srcp1 & *srcp2 & *srcp3
* @srcp1: the first input
* @srcp2: the second input
@@ -195,7 +202,7 @@ unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask
*
* Return: >= nr_cpu_ids if no cpus set in all.
*/
-static inline
+static __always_inline
unsigned int cpumask_first_and_and(const struct cpumask *srcp1,
const struct cpumask *srcp2,
const struct cpumask *srcp3)
@@ -210,7 +217,7 @@ unsigned int cpumask_first_and_and(const struct cpumask *srcp1,
*
* Return: >= nr_cpumask_bits if no CPUs set.
*/
-static inline unsigned int cpumask_last(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_last(const struct cpumask *srcp)
{
return find_last_bit(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -222,7 +229,7 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
*
* Return: >= nr_cpu_ids if no further cpus set.
*/
-static inline
+static __always_inline
unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
@@ -238,7 +245,8 @@ unsigned int cpumask_next(int n, const struct cpumask *srcp)
*
* Return: >= nr_cpu_ids if no further cpus unset.
*/
-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
/* -1 is a legal arg here. */
if (n != -1)
@@ -248,18 +256,21 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
#if NR_CPUS == 1
/* Uniprocessor: there is only one valid CPU */
-static inline unsigned int cpumask_local_spread(unsigned int i, int node)
+static __always_inline
+unsigned int cpumask_local_spread(unsigned int i, int node)
{
return 0;
}
-static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return cpumask_first_and(src1p, src2p);
}
-static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_any_distribute(const struct cpumask *srcp)
{
return cpumask_first(srcp);
}
@@ -278,9 +289,9 @@ unsigned int cpumask_any_distribute(const struct cpumask *srcp);
*
* Return: >= nr_cpu_ids if no further cpus set in both.
*/
-static inline
+static __always_inline
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
- const struct cpumask *src2p)
+ const struct cpumask *src2p)
{
/* -1 is a legal arg here. */
if (n != -1)
@@ -290,35 +301,83 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
}
/**
- * for_each_cpu - iterate over every cpu in a mask
- * @cpu: the (optionally unsigned) integer iterator
- * @mask: the cpumask pointer
+ * cpumask_next_andnot - get the next cpu in *src1p & ~*src2p
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
*
- * After the loop, cpu is >= nr_cpu_ids.
+ * Return: >= nr_cpu_ids if no further cpus set in both.
*/
-#define for_each_cpu(cpu, mask) \
- for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits)
+static __always_inline
+unsigned int cpumask_next_andnot(int n, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_andnot_bit(cpumask_bits(src1p), cpumask_bits(src2p),
+ small_cpumask_bits, n + 1);
+}
-#if NR_CPUS == 1
-static inline
-unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+/**
+ * cpumask_next_and_wrap - get the next cpu in *src1p & *src2p, starting from
+ * @n+1. If nothing found, wrap around and start from
+ * the beginning
+ * @n: the cpu prior to the place to search (i.e. search starts from @n+1)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
+ *
+ * Return: next set bit, wrapped if needed, or >= nr_cpu_ids if @src1p & @src2p is empty.
+ */
+static __always_inline
+unsigned int cpumask_next_and_wrap(int n, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
- cpumask_check(start);
+ /* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
+ return find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
+ small_cpumask_bits, n + 1);
+}
- /*
- * Return the first available CPU when wrapping, or when starting before cpu0,
- * since there is only one valid option.
- */
- if (wrap && n >= 0)
- return nr_cpumask_bits;
+/**
+ * cpumask_next_wrap - get the next cpu in *src, starting from @n+1. If nothing
+ * found, wrap around and start from the beginning
+ * @n: the cpu prior to the place to search (i.e. search starts from @n+1)
+ * @src: cpumask pointer
+ *
+ * Return: next set bit, wrapped if needed, or >= nr_cpu_ids if @src is empty.
+ */
+static __always_inline
+unsigned int cpumask_next_wrap(int n, const struct cpumask *src)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_bit_wrap(cpumask_bits(src), small_cpumask_bits, n + 1);
+}
- return cpumask_first(mask);
+/**
+ * cpumask_random - get random cpu in *src.
+ * @src: cpumask pointer
+ *
+ * Return: random set bit, or >= nr_cpu_ids if @src is empty.
+ */
+static __always_inline
+unsigned int cpumask_random(const struct cpumask *src)
+{
+ return find_random_bit(cpumask_bits(src), nr_cpu_ids);
}
-#else
-unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
-#endif
+
+/**
+ * for_each_cpu - iterate over every cpu in a mask
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the cpumask pointer
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu(cpu, mask) \
+ for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits)
/**
* for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
@@ -396,19 +455,23 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits)
/**
- * cpumask_any_but - return a "random" in a cpumask, but not this one.
+ * cpumask_any_but - return an arbitrary cpu in a cpumask, but not this one.
* @mask: the cpumask to search
* @cpu: the cpu to ignore.
*
* Often used to find any cpu but smp_processor_id() in a mask.
+ * If @cpu == -1, the function is equivalent to cpumask_any().
* Return: >= nr_cpu_ids if no cpus set.
*/
-static inline
-unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
+static __always_inline
+unsigned int cpumask_any_but(const struct cpumask *mask, int cpu)
{
unsigned int i;
- cpumask_check(cpu);
+ /* -1 is a legal arg here. */
+ if (cpu != -1)
+ cpumask_check(cpu);
+
for_each_cpu(i, mask)
if (i != cpu)
break;
@@ -416,21 +479,25 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
}
/**
- * cpumask_any_and_but - pick a "random" cpu from *mask1 & *mask2, but not this one.
+ * cpumask_any_and_but - pick an arbitrary cpu from *mask1 & *mask2, but not this one.
* @mask1: the first input cpumask
* @mask2: the second input cpumask
* @cpu: the cpu to ignore
*
+ * If @cpu == -1, the function is equivalent to cpumask_any_and().
* Returns >= nr_cpu_ids if no cpus set.
*/
-static inline
+static __always_inline
unsigned int cpumask_any_and_but(const struct cpumask *mask1,
const struct cpumask *mask2,
- unsigned int cpu)
+ int cpu)
{
unsigned int i;
- cpumask_check(cpu);
+ /* -1 is a legal arg here. */
+ if (cpu != -1)
+ cpumask_check(cpu);
+
i = cpumask_first_and(mask1, mask2);
if (i != cpu)
return i;
@@ -439,46 +506,58 @@ unsigned int cpumask_any_and_but(const struct cpumask *mask1,
}
/**
- * cpumask_nth - get the Nth cpu in a cpumask
- * @srcp: the cpumask pointer
- * @cpu: the Nth cpu to find, starting from 0
+ * cpumask_any_andnot_but - pick an arbitrary cpu from *mask1 & ~*mask2, but not this one.
+ * @mask1: the first input cpumask
+ * @mask2: the second input cpumask
+ * @cpu: the cpu to ignore
*
- * Return: >= nr_cpu_ids if such cpu doesn't exist.
+ * If @cpu == -1, the function returns the first matching cpu.
+ * Returns >= nr_cpu_ids if no cpus set.
*/
-static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_any_andnot_but(const struct cpumask *mask1,
+ const struct cpumask *mask2,
+ int cpu)
{
- return find_nth_bit(cpumask_bits(srcp), small_cpumask_bits, cpumask_check(cpu));
+ unsigned int i;
+
+ /* -1 is a legal arg here. */
+ if (cpu != -1)
+ cpumask_check(cpu);
+
+ i = cpumask_first_andnot(mask1, mask2);
+ if (i != cpu)
+ return i;
+
+ return cpumask_next_andnot(cpu, mask1, mask2);
}
/**
- * cpumask_nth_and - get the Nth cpu in 2 cpumasks
- * @srcp1: the cpumask pointer
- * @srcp2: the cpumask pointer
+ * cpumask_nth - get the Nth cpu in a cpumask
+ * @srcp: the cpumask pointer
* @cpu: the Nth cpu to find, starting from 0
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
-static inline
-unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
- const struct cpumask *srcp2)
+static __always_inline
+unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
{
- return find_nth_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
- small_cpumask_bits, cpumask_check(cpu));
+ return find_nth_bit(cpumask_bits(srcp), small_cpumask_bits, cpumask_check(cpu));
}
/**
- * cpumask_nth_andnot - get the Nth cpu set in 1st cpumask, and clear in 2nd.
+ * cpumask_nth_and - get the Nth cpu in 2 cpumasks
* @srcp1: the cpumask pointer
* @srcp2: the cpumask pointer
* @cpu: the Nth cpu to find, starting from 0
*
* Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
-static inline
-unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
+static __always_inline
+unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
const struct cpumask *srcp2)
{
- return find_nth_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
+ return find_nth_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
small_cpumask_bits, cpumask_check(cpu));
}
@@ -517,16 +596,30 @@ unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
-static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline
+void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
-static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline
+void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
+/**
+ * cpumask_clear_cpus - clear cpus in a cpumask
+ * @dstp: the cpumask pointer
+ * @cpu: cpu number (< nr_cpu_ids)
+ * @ncpus: number of cpus to clear (< nr_cpu_ids)
+ */
+static __always_inline void cpumask_clear_cpus(struct cpumask *dstp,
+ unsigned int cpu, unsigned int ncpus)
+{
+ cpumask_check(cpu + ncpus - 1);
+ bitmap_clear(cpumask_bits(dstp), cpumask_check(cpu), ncpus);
+}
/**
* cpumask_clear_cpu - clear a cpu in a cpumask
@@ -544,29 +637,14 @@ static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
}
/**
- * cpumask_assign_cpu - assign a cpu in a cpumask
- * @cpu: cpu number (< nr_cpu_ids)
- * @dstp: the cpumask pointer
- * @bool: the value to assign
- */
-static __always_inline void cpumask_assign_cpu(int cpu, struct cpumask *dstp, bool value)
-{
- assign_bit(cpumask_check(cpu), cpumask_bits(dstp), value);
-}
-
-static __always_inline void __cpumask_assign_cpu(int cpu, struct cpumask *dstp, bool value)
-{
- __assign_bit(cpumask_check(cpu), cpumask_bits(dstp), value);
-}
-
-/**
* cpumask_test_cpu - test for a cpu in a cpumask
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
* Return: true if @cpu is set in @cpumask, else returns false
*/
-static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
@@ -580,7 +658,8 @@ static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpum
*
* Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
-static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -594,7 +673,8 @@ static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cp
*
* Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
-static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -603,7 +683,7 @@ static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *
* cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
-static inline void cpumask_setall(struct cpumask *dstp)
+static __always_inline void cpumask_setall(struct cpumask *dstp)
{
if (small_const_nbits(small_cpumask_bits)) {
cpumask_bits(dstp)[0] = BITMAP_LAST_WORD_MASK(nr_cpumask_bits);
@@ -616,7 +696,7 @@ static inline void cpumask_setall(struct cpumask *dstp)
* cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
-static inline void cpumask_clear(struct cpumask *dstp)
+static __always_inline void cpumask_clear(struct cpumask *dstp)
{
bitmap_zero(cpumask_bits(dstp), large_cpumask_bits);
}
@@ -629,9 +709,9 @@ static inline void cpumask_clear(struct cpumask *dstp)
*
* Return: false if *@dstp is empty, else returns true
*/
-static inline bool cpumask_and(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_and(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
@@ -643,22 +723,39 @@ static inline bool cpumask_and(struct cpumask *dstp,
* @src1p: the first input
* @src2p: the second input
*/
-static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
}
/**
+ * cpumask_weighted_or - *dstp = *src1p | *src2p and return the weight of the result
+ * @dstp: the cpumask result
+ * @src1p: the first input
+ * @src2p: the second input
+ *
+ * Return: The number of bits set in the resulting cpumask @dstp
+ */
+static __always_inline
+unsigned int cpumask_weighted_or(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return bitmap_weighted_or(cpumask_bits(dstp), cpumask_bits(src1p),
+ cpumask_bits(src2p), small_cpumask_bits);
+}
+
+/**
* cpumask_xor - *dstp = *src1p ^ *src2p
* @dstp: the cpumask result
* @src1p: the first input
* @src2p: the second input
*/
-static inline void cpumask_xor(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+void cpumask_xor(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
@@ -672,9 +769,9 @@ static inline void cpumask_xor(struct cpumask *dstp,
*
* Return: false if *@dstp is empty, else returns true
*/
-static inline bool cpumask_andnot(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
cpumask_bits(src2p), small_cpumask_bits);
@@ -687,8 +784,8 @@ static inline bool cpumask_andnot(struct cpumask *dstp,
*
* Return: true if the cpumasks are equal, false if not
*/
-static inline bool cpumask_equal(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
@@ -703,9 +800,9 @@ static inline bool cpumask_equal(const struct cpumask *src1p,
* Return: true if first cpumask ORed with second cpumask == third cpumask,
* otherwise false
*/
-static inline bool cpumask_or_equal(const struct cpumask *src1p,
- const struct cpumask *src2p,
- const struct cpumask *src3p)
+static __always_inline
+bool cpumask_or_equal(const struct cpumask *src1p, const struct cpumask *src2p,
+ const struct cpumask *src3p)
{
return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p),
cpumask_bits(src3p), small_cpumask_bits);
@@ -719,8 +816,8 @@ static inline bool cpumask_or_equal(const struct cpumask *src1p,
* Return: true if first cpumask ANDed with second cpumask is non-empty,
* otherwise false
*/
-static inline bool cpumask_intersects(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
@@ -733,8 +830,8 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
*
* Return: true if *@src1p is a subset of *@src2p, else returns false
*/
-static inline bool cpumask_subset(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
small_cpumask_bits);
@@ -746,7 +843,7 @@ static inline bool cpumask_subset(const struct cpumask *src1p,
*
* Return: true if srcp is empty (has no bits set), else false
*/
-static inline bool cpumask_empty(const struct cpumask *srcp)
+static __always_inline bool cpumask_empty(const struct cpumask *srcp)
{
return bitmap_empty(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -757,7 +854,7 @@ static inline bool cpumask_empty(const struct cpumask *srcp)
*
* Return: true if srcp is full (has all bits set), else false
*/
-static inline bool cpumask_full(const struct cpumask *srcp)
+static __always_inline bool cpumask_full(const struct cpumask *srcp)
{
return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
}
@@ -768,7 +865,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
*
* Return: count of bits set in *srcp
*/
-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_weight(const struct cpumask *srcp)
{
return bitmap_weight(cpumask_bits(srcp), small_cpumask_bits);
}
@@ -780,8 +877,8 @@ static inline unsigned int cpumask_weight(const struct cpumask *srcp)
*
* Return: count of bits set in both *srcp1 and *srcp2
*/
-static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
- const struct cpumask *srcp2)
+static __always_inline
+unsigned int cpumask_weight_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
@@ -793,8 +890,9 @@ static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
*
* Return: count of bits set in both *srcp1 and *srcp2
*/
-static inline unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
- const struct cpumask *srcp2)
+static __always_inline
+unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
{
return bitmap_weight_andnot(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
@@ -805,8 +903,8 @@ static inline unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
* @srcp: the input to shift
* @n: the number of bits to shift by
*/
-static inline void cpumask_shift_right(struct cpumask *dstp,
- const struct cpumask *srcp, int n)
+static __always_inline
+void cpumask_shift_right(struct cpumask *dstp, const struct cpumask *srcp, int n)
{
bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
small_cpumask_bits);
@@ -818,8 +916,8 @@ static inline void cpumask_shift_right(struct cpumask *dstp,
* @srcp: the input to shift
* @n: the number of bits to shift by
*/
-static inline void cpumask_shift_left(struct cpumask *dstp,
- const struct cpumask *srcp, int n)
+static __always_inline
+void cpumask_shift_left(struct cpumask *dstp, const struct cpumask *srcp, int n)
{
bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
nr_cpumask_bits);
@@ -830,14 +928,14 @@ static inline void cpumask_shift_left(struct cpumask *dstp,
* @dstp: the result
* @srcp: the input cpumask
*/
-static inline void cpumask_copy(struct cpumask *dstp,
- const struct cpumask *srcp)
+static __always_inline
+void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp)
{
bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), large_cpumask_bits);
}
/**
- * cpumask_any - pick a "random" cpu from *srcp
+ * cpumask_any - pick an arbitrary cpu from *srcp
* @srcp: the input cpumask
*
* Return: >= nr_cpu_ids if no cpus set.
@@ -845,7 +943,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
#define cpumask_any(srcp) cpumask_first(srcp)
/**
- * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
+ * cpumask_any_and - pick an arbitrary cpu from *mask1 & *mask2
* @mask1: the first input cpumask
* @mask2: the second input cpumask
*
@@ -867,8 +965,8 @@ static inline void cpumask_copy(struct cpumask *dstp,
*
* Return: -errno, or 0 for success.
*/
-static inline int cpumask_parse_user(const char __user *buf, int len,
- struct cpumask *dstp)
+static __always_inline
+int cpumask_parse_user(const char __user *buf, int len, struct cpumask *dstp)
{
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -881,8 +979,8 @@ static inline int cpumask_parse_user(const char __user *buf, int len,
*
* Return: -errno, or 0 for success.
*/
-static inline int cpumask_parselist_user(const char __user *buf, int len,
- struct cpumask *dstp)
+static __always_inline
+int cpumask_parselist_user(const char __user *buf, int len, struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
nr_cpumask_bits);
@@ -895,7 +993,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
*
* Return: -errno, or 0 for success.
*/
-static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
+static __always_inline int cpumask_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -907,7 +1005,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
*
* Return: -errno, or 0 for success.
*/
-static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
+static __always_inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -917,60 +1015,20 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
*
* Return: size to allocate for a &struct cpumask in bytes
*/
-static inline unsigned int cpumask_size(void)
+static __always_inline unsigned int cpumask_size(void)
{
return bitmap_size(large_cpumask_bits);
}
-/*
- * cpumask_var_t: struct cpumask for stack usage.
- *
- * Oh, the wicked games we play! In order to make kernel coding a
- * little more difficult, we typedef cpumask_var_t to an array or a
- * pointer: doing &mask on an array is a noop, so it still works.
- *
- * i.e.
- * cpumask_var_t tmpmask;
- * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
- * return -ENOMEM;
- *
- * ... use 'tmpmask' like a normal struct cpumask * ...
- *
- * free_cpumask_var(tmpmask);
- *
- *
- * However, one notable exception is there. alloc_cpumask_var() allocates
- * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
- * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
- *
- * cpumask_var_t tmpmask;
- * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
- * return -ENOMEM;
- *
- * var = *tmpmask;
- *
- * This code makes NR_CPUS length memcopy and brings to a memory corruption.
- * cpumask_copy() provide safe copy functionality.
- *
- * Note that there is another evil here: If you define a cpumask_var_t
- * as a percpu variable then the way to obtain the address of the cpumask
- * structure differently influences what this_cpu_* operation needs to be
- * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
- * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
- * other type of cpumask_var_t implementation is configured.
- *
- * Please also note that __cpumask_var_read_mostly can be used to declare
- * a cpumask_var_t variable itself (not its content) as read mostly.
- */
#ifdef CONFIG_CPUMASK_OFFSTACK
-typedef struct cpumask *cpumask_var_t;
#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
#define __cpumask_var_read_mostly __read_mostly
+#define CPUMASK_VAR_NULL NULL
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
-static inline
+static __always_inline
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
@@ -988,13 +1046,13 @@ bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
*
* Return: %true if allocation succeeded, %false if not
*/
-static inline
+static __always_inline
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
}
-static inline
+static __always_inline
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return alloc_cpumask_var(mask, flags | __GFP_ZERO);
@@ -1004,54 +1062,54 @@ void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
-static inline bool cpumask_available(cpumask_var_t mask)
+static __always_inline bool cpumask_available(cpumask_var_t mask)
{
return mask != NULL;
}
#else
-typedef struct cpumask cpumask_var_t[1];
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
#define __cpumask_var_read_mostly
+#define CPUMASK_VAR_NULL {}
-static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+static __always_inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return true;
}
-static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+static __always_inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
return true;
}
-static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+static __always_inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
cpumask_clear(*mask);
return true;
}
-static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+static __always_inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
cpumask_clear(*mask);
return true;
}
-static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
+static __always_inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
}
-static inline void free_cpumask_var(cpumask_var_t mask)
+static __always_inline void free_cpumask_var(cpumask_var_t mask)
{
}
-static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
+static __always_inline void free_bootmem_cpumask_var(cpumask_var_t mask)
{
}
-static inline bool cpumask_available(cpumask_var_t mask)
+static __always_inline bool cpumask_available(cpumask_var_t mask)
{
return true;
}
@@ -1072,54 +1130,40 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
#define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
#define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
#define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+
+#define for_each_possible_cpu_wrap(cpu, start) \
+ for ((void)(start), (cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_online_cpu_wrap(cpu, start) \
+ for ((void)(start), (cpu) = 0; (cpu) < 1; (cpu)++)
#else
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
+#define for_each_enabled_cpu(cpu) for_each_cpu((cpu), cpu_enabled_mask)
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
+
+#define for_each_possible_cpu_wrap(cpu, start) \
+ for_each_cpu_wrap((cpu), cpu_possible_mask, (start))
+#define for_each_online_cpu_wrap(cpu, start) \
+ for_each_cpu_wrap((cpu), cpu_online_mask, (start))
#endif
/* Wrappers for arch boot code to manipulate normally-constant masks */
void init_cpu_present(const struct cpumask *src);
void init_cpu_possible(const struct cpumask *src);
-void init_cpu_online(const struct cpumask *src);
-static inline void
-set_cpu_possible(unsigned int cpu, bool possible)
-{
- if (possible)
- cpumask_set_cpu(cpu, &__cpu_possible_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_possible_mask);
-}
+#define assign_cpu(cpu, mask, val) \
+ assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
-static inline void
-set_cpu_present(unsigned int cpu, bool present)
-{
- if (present)
- cpumask_set_cpu(cpu, &__cpu_present_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_present_mask);
-}
+#define __assign_cpu(cpu, mask, val) \
+ __assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
-void set_cpu_online(unsigned int cpu, bool online);
+#define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_enabled_mask, (enabled))
+#define set_cpu_present(cpu, present) assign_cpu((cpu), &__cpu_present_mask, (present))
+#define set_cpu_active(cpu, active) assign_cpu((cpu), &__cpu_active_mask, (active))
+#define set_cpu_dying(cpu, dying) assign_cpu((cpu), &__cpu_dying_mask, (dying))
-static inline void
-set_cpu_active(unsigned int cpu, bool active)
-{
- if (active)
- cpumask_set_cpu(cpu, &__cpu_active_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_active_mask);
-}
-
-static inline void
-set_cpu_dying(unsigned int cpu, bool dying)
-{
- if (dying)
- cpumask_set_cpu(cpu, &__cpu_dying_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_dying_mask);
-}
+void set_cpu_online(unsigned int cpu, bool online);
+void set_cpu_possible(unsigned int cpu, bool possible);
/**
* to_cpumask - convert a NR_CPUS bitmap to a struct cpumask *
@@ -1135,7 +1179,7 @@ set_cpu_dying(unsigned int cpu, bool dying)
((struct cpumask *)(1 ? (bitmap) \
: (void *)sizeof(__check_is_bitmap(bitmap))))
-static inline int __check_is_bitmap(const unsigned long *bitmap)
+static __always_inline int __check_is_bitmap(const unsigned long *bitmap)
{
return 1;
}
@@ -1150,7 +1194,7 @@ static inline int __check_is_bitmap(const unsigned long *bitmap)
extern const unsigned long
cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
-static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
+static __always_inline const struct cpumask *get_cpu_mask(unsigned int cpu)
{
const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
p -= cpu / BITS_PER_LONG;
@@ -1172,31 +1216,42 @@ static __always_inline unsigned int num_online_cpus(void)
{
return raw_atomic_read(&__num_online_cpus);
}
-#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
+
+static __always_inline unsigned int num_possible_cpus(void)
+{
+ return __num_possible_cpus;
+}
+
+#define num_enabled_cpus() cpumask_weight(cpu_enabled_mask)
#define num_present_cpus() cpumask_weight(cpu_present_mask)
#define num_active_cpus() cpumask_weight(cpu_active_mask)
-static inline bool cpu_online(unsigned int cpu)
+static __always_inline bool cpu_online(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_online_mask);
}
-static inline bool cpu_possible(unsigned int cpu)
+static __always_inline bool cpu_enabled(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_enabled_mask);
+}
+
+static __always_inline bool cpu_possible(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_possible_mask);
}
-static inline bool cpu_present(unsigned int cpu)
+static __always_inline bool cpu_present(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_present_mask);
}
-static inline bool cpu_active(unsigned int cpu)
+static __always_inline bool cpu_active(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_active_mask);
}
-static inline bool cpu_dying(unsigned int cpu)
+static __always_inline bool cpu_dying(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_dying_mask);
}
@@ -1205,30 +1260,36 @@ static inline bool cpu_dying(unsigned int cpu)
#define num_online_cpus() 1U
#define num_possible_cpus() 1U
+#define num_enabled_cpus() 1U
#define num_present_cpus() 1U
#define num_active_cpus() 1U
-static inline bool cpu_online(unsigned int cpu)
+static __always_inline bool cpu_online(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_possible(unsigned int cpu)
+static __always_inline bool cpu_possible(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_present(unsigned int cpu)
+static __always_inline bool cpu_enabled(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_active(unsigned int cpu)
+static __always_inline bool cpu_present(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_dying(unsigned int cpu)
+static __always_inline bool cpu_active(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static __always_inline bool cpu_dying(unsigned int cpu)
{
return false;
}
@@ -1262,7 +1323,7 @@ static inline bool cpu_dying(unsigned int cpu)
* Return: the length of the (null-terminated) @buf string, zero if
* nothing is copied.
*/
-static inline ssize_t
+static __always_inline ssize_t
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
{
return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
@@ -1285,9 +1346,9 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
* Return: the length of how many bytes have been copied, excluding
* terminating '\0'.
*/
-static inline ssize_t
-cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
- loff_t off, size_t count)
+static __always_inline
+ssize_t cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
{
return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
nr_cpu_ids, off, count) - 1;
@@ -1307,9 +1368,9 @@ cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
* Return: the length of how many bytes have been copied, excluding
* terminating '\0'.
*/
-static inline ssize_t
-cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
- loff_t off, size_t count)
+static __always_inline
+ssize_t cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
{
return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
nr_cpu_ids, off, count) - 1;
diff --git a/include/linux/cpumask_types.h b/include/linux/cpumask_types.h
new file mode 100644
index 000000000000..461ed1b6bcdb
--- /dev/null
+++ b/include/linux/cpumask_types.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_CPUMASK_TYPES_H
+#define __LINUX_CPUMASK_TYPES_H
+
+#include <linux/bitops.h>
+#include <linux/threads.h>
+
+/* Don't assign or return these: may not be this big! */
+typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
+
+/**
+ * cpumask_bits - get the bits in a cpumask
+ * @maskp: the struct cpumask *
+ *
+ * You should only assume nr_cpu_ids bits of this mask are valid. This is
+ * a macro so it's const-correct.
+ */
+#define cpumask_bits(maskp) ((maskp)->bits)
+
+/*
+ * cpumask_var_t: struct cpumask for stack usage.
+ *
+ * Oh, the wicked games we play! In order to make kernel coding a
+ * little more difficult, we typedef cpumask_var_t to an array or a
+ * pointer: doing &mask on an array is a noop, so it still works.
+ *
+ * i.e.
+ * cpumask_var_t tmpmask;
+ * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ * return -ENOMEM;
+ *
+ * ... use 'tmpmask' like a normal struct cpumask * ...
+ *
+ * free_cpumask_var(tmpmask);
+ *
+ *
+ * However, one notable exception is there. alloc_cpumask_var() allocates
+ * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
+ * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
+ *
+ * cpumask_var_t tmpmask;
+ * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ * return -ENOMEM;
+ *
+ * var = *tmpmask;
+ *
+ * This code makes NR_CPUS length memcopy and brings to a memory corruption.
+ * cpumask_copy() provide safe copy functionality.
+ *
+ * Note that there is another evil here: If you define a cpumask_var_t
+ * as a percpu variable then the way to obtain the address of the cpumask
+ * structure differently influences what this_cpu_* operation needs to be
+ * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
+ * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
+ * other type of cpumask_var_t implementation is configured.
+ *
+ * Please also note that __cpumask_var_read_mostly can be used to declare
+ * a cpumask_var_t variable itself (not its content) as read mostly.
+ */
+#ifdef CONFIG_CPUMASK_OFFSTACK
+typedef struct cpumask *cpumask_var_t;
+#else
+typedef struct cpumask cpumask_var_t[1];
+#endif /* CONFIG_CPUMASK_OFFSTACK */
+
+#endif /* __LINUX_CPUMASK_TYPES_H */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index de4cf0ee96f7..a98d3330385c 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -74,6 +74,7 @@ extern void inc_dl_tasks_cs(struct task_struct *task);
extern void dec_dl_tasks_cs(struct task_struct *task);
extern void cpuset_lock(void);
extern void cpuset_unlock(void);
+extern void cpuset_cpus_allowed_locked(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
extern bool cpuset_cpu_is_isolated(int cpu);
@@ -82,11 +83,11 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
void cpuset_init_current_mems_allowed(void);
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
-extern bool cpuset_node_allowed(int node, gfp_t gfp_mask);
+extern bool cpuset_current_node_allowed(int node, gfp_t gfp_mask);
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
- return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
+ return cpuset_current_node_allowed(zone_to_nid(z), gfp_mask);
}
static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
@@ -99,6 +100,7 @@ static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
const struct task_struct *tsk2);
+#ifdef CONFIG_CPUSETS_V1
#define cpuset_memory_pressure_bump() \
do { \
if (cpuset_memory_pressure_enabled) \
@@ -106,6 +108,9 @@ extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
} while (0)
extern int cpuset_memory_pressure_enabled;
extern void __cpuset_memory_pressure_bump(void);
+#else
+static inline void cpuset_memory_pressure_bump(void) { }
+#endif
extern void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task);
@@ -113,7 +118,6 @@ extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
extern int cpuset_mem_spread_node(void);
-extern int cpuset_slab_spread_node(void);
static inline int cpuset_do_page_mem_spread(void)
{
@@ -122,9 +126,11 @@ static inline int cpuset_do_page_mem_spread(void)
extern bool current_cpuset_is_being_rebound(void);
+extern void dl_rebuild_rd_accounting(void);
extern void rebuild_sched_domains(void);
extern void cpuset_print_current_mems_allowed(void);
+extern void cpuset_reset_sched_domains(void);
/*
* read_mems_allowed_begin is required when making decisions involving
@@ -168,6 +174,7 @@ static inline void set_mems_allowed(nodemask_t nodemask)
task_unlock(current);
}
+extern bool cpuset_node_allowed(struct cgroup *cgroup, int nid);
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
@@ -189,10 +196,16 @@ static inline void dec_dl_tasks_cs(struct task_struct *task) { }
static inline void cpuset_lock(void) { }
static inline void cpuset_unlock(void) { }
+static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
+ struct cpumask *mask)
+{
+ cpumask_copy(mask, task_cpu_possible_mask(p));
+}
+
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
- cpumask_copy(mask, task_cpu_possible_mask(p));
+ cpuset_cpus_allowed_locked(p, mask);
}
static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
@@ -246,11 +259,6 @@ static inline int cpuset_mem_spread_node(void)
return 0;
}
-static inline int cpuset_slab_spread_node(void)
-{
- return 0;
-}
-
static inline int cpuset_do_page_mem_spread(void)
{
return 0;
@@ -261,11 +269,20 @@ static inline bool current_cpuset_is_being_rebound(void)
return false;
}
+static inline void dl_rebuild_rd_accounting(void)
+{
+}
+
static inline void rebuild_sched_domains(void)
{
partition_sched_domains(1, NULL, NULL);
}
+static inline void cpuset_reset_sched_domains(void)
+{
+ partition_sched_domains(1, NULL, NULL);
+}
+
static inline void cpuset_print_current_mems_allowed(void)
{
}
@@ -284,6 +301,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
return false;
}
+static inline bool cpuset_node_allowed(struct cgroup *cgroup, int nid)
+{
+ return true;
+}
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 44305336314e..d35726d6a415 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -34,7 +34,12 @@ static inline void arch_kexec_protect_crashkres(void) { }
static inline void arch_kexec_unprotect_crashkres(void) { }
#endif
-
+#ifdef CONFIG_CRASH_DM_CRYPT
+int crash_load_dm_crypt_keys(struct kimage *image);
+ssize_t dm_crypt_keys_read(char *buf, size_t count, u64 *ppos);
+#else
+static inline int crash_load_dm_crypt_keys(struct kimage *image) {return 0; }
+#endif
#ifndef arch_crash_handle_hotplug_event
static inline void arch_crash_handle_hotplug_event(struct kimage *image, void *arg) { }
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index acc55626afdc..dd6fc3b2133b 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -15,11 +15,15 @@
extern unsigned long long elfcorehdr_addr;
extern unsigned long long elfcorehdr_size;
+extern unsigned long long dm_crypt_keys_addr;
+
#ifdef CONFIG_CRASH_DUMP
extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
extern void elfcorehdr_free(unsigned long long addr);
extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
+void elfcorehdr_fill_device_ram_ptload_elf64(Elf64_Phdr *phdr,
+ unsigned long long paddr, unsigned long long size);
extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
unsigned long from, unsigned long pfn,
unsigned long size, pgprot_t prot);
@@ -99,6 +103,12 @@ static inline void vmcore_unusable(void)
* indicated in the vmcore instead. For example, a ballooned page
* contains no data and reading from such a page will cause high
* load in the hypervisor.
+ * @get_device_ram: query RAM ranges that can only be detected by device
+ * drivers, such as the virtio-mem driver, so they can be included in
+ * the crash dump on architectures that allocate the elfcore hdr in the dump
+ * ("2nd") kernel. Indicated RAM ranges may contain holes to reduce the
+ * total number of ranges; such holes can be detected using the pfn_is_ram
+ * callback just like for other RAM.
* @next: List head to manage registered callbacks internally; initialized by
* register_vmcore_cb().
*
@@ -109,11 +119,44 @@ static inline void vmcore_unusable(void)
*/
struct vmcore_cb {
bool (*pfn_is_ram)(struct vmcore_cb *cb, unsigned long pfn);
+ int (*get_device_ram)(struct vmcore_cb *cb, struct list_head *list);
struct list_head next;
};
extern void register_vmcore_cb(struct vmcore_cb *cb);
extern void unregister_vmcore_cb(struct vmcore_cb *cb);
+struct vmcore_range {
+ struct list_head list;
+ unsigned long long paddr;
+ unsigned long long size;
+ loff_t offset;
+};
+
+/* Allocate a vmcore range and add it to the list. */
+static inline int vmcore_alloc_add_range(struct list_head *list,
+ unsigned long long paddr, unsigned long long size)
+{
+ struct vmcore_range *m = kzalloc(sizeof(*m), GFP_KERNEL);
+
+ if (!m)
+ return -ENOMEM;
+ m->paddr = paddr;
+ m->size = size;
+ list_add_tail(&m->list, list);
+ return 0;
+}
+
+/* Free a list of vmcore ranges. */
+static inline void vmcore_free_ranges(struct list_head *list)
+{
+ struct vmcore_range *m, *tmp;
+
+ list_for_each_entry_safe(m, tmp, list, list) {
+ list_del(&m->list);
+ kfree(m);
+ }
+}
+
#else /* !CONFIG_CRASH_DUMP */
static inline bool is_kdump_kernel(void) { return false; }
#endif /* CONFIG_CRASH_DUMP */
diff --git a/include/linux/crash_reserve.h b/include/linux/crash_reserve.h
index 5a9df944fb80..f0dc03d94ca2 100644
--- a/include/linux/crash_reserve.h
+++ b/include/linux/crash_reserve.h
@@ -13,12 +13,31 @@
*/
extern struct resource crashk_res;
extern struct resource crashk_low_res;
+extern struct range crashk_cma_ranges[];
+#if defined(CONFIG_CMA) && defined(CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION)
+#define CRASHKERNEL_CMA
+#define CRASHKERNEL_CMA_RANGES_MAX 4
+extern int crashk_cma_cnt;
+#else
+#define crashk_cma_cnt 0
+#define CRASHKERNEL_CMA_RANGES_MAX 0
+#endif
+
int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
unsigned long long *crash_size, unsigned long long *crash_base,
- unsigned long long *low_size, bool *high);
+ unsigned long long *low_size, unsigned long long *cma_size,
+ bool *high);
+
+void __init reserve_crashkernel_cma(unsigned long long cma_size);
#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+#ifndef arch_add_crash_res_to_iomem
+static inline bool arch_add_crash_res_to_iomem(void)
+{
+ return true;
+}
+#endif
#ifndef DEFAULT_CRASH_KERNEL_LOW_SIZE
#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
#endif
@@ -32,13 +51,12 @@ int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
#define CRASH_ADDR_HIGH_MAX memblock_end_of_DRAM()
#endif
-void __init reserve_crashkernel_generic(char *cmdline,
- unsigned long long crash_size,
- unsigned long long crash_base,
- unsigned long long crash_low_size,
- bool high);
+void __init reserve_crashkernel_generic(unsigned long long crash_size,
+ unsigned long long crash_base,
+ unsigned long long crash_low_size,
+ bool high);
#else
-static inline void __init reserve_crashkernel_generic(char *cmdline,
+static inline void __init reserve_crashkernel_generic(
unsigned long long crash_size,
unsigned long long crash_base,
unsigned long long crash_low_size,
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index 6bb0c0bf357b..ecc8bc2dd7f4 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -4,13 +4,11 @@
#include <linux/types.h>
-#define CRC_T10DIF_DIGEST_SIZE 2
-#define CRC_T10DIF_BLOCK_SIZE 1
-#define CRC_T10DIF_STRING "crct10dif"
+u16 crc_t10dif_update(u16 crc, const u8 *p, size_t len);
-extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
- size_t len);
-extern __u16 crc_t10dif(unsigned char const *, size_t);
-extern __u16 crc_t10dif_update(__u16 crc, unsigned char const *, size_t);
+static inline u16 crc_t10dif(const u8 *p, size_t len)
+{
+ return crc_t10dif_update(0, p, len);
+}
#endif
diff --git a/include/linux/crc16.h b/include/linux/crc16.h
index 9fa74529b317..b861d969b161 100644
--- a/include/linux/crc16.h
+++ b/include/linux/crc16.h
@@ -15,14 +15,7 @@
#include <linux/types.h>
-extern u16 const crc16_table[256];
-
-extern u16 crc16(u16 crc, const u8 *buffer, size_t len);
-
-static inline u16 crc16_byte(u16 crc, const u8 data)
-{
- return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff];
-}
+u16 crc16(u16 crc, const u8 *p, size_t len);
#endif /* __CRC16_H */
diff --git a/include/linux/crc32.h b/include/linux/crc32.h
index 9e8a032c1788..da78b215ff2e 100644
--- a/include/linux/crc32.h
+++ b/include/linux/crc32.h
@@ -1,69 +1,100 @@
-/*
- * crc32.h
- * See linux/lib/crc32.c for license and changes
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LINUX_CRC32_H
#define _LINUX_CRC32_H
#include <linux/types.h>
#include <linux/bitrev.h>
-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len);
-u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len);
-
/**
- * crc32_le_combine - Combine two crc32 check values into one. For two
- * sequences of bytes, seq1 and seq2 with lengths len1
- * and len2, crc32_le() check values were calculated
- * for each, crc1 and crc2.
+ * crc32_le() - Compute least-significant-bit-first IEEE CRC-32
+ * @crc: Initial CRC value. ~0 (recommended) or 0 for a new CRC computation, or
+ * the previous CRC value if computing incrementally.
+ * @p: Pointer to the data buffer
+ * @len: Length of data in bytes
+ *
+ * This implements the CRC variant that is often known as the IEEE CRC-32, or
+ * simply CRC-32, and is widely used in Ethernet and other applications:
+ *
+ * - Polynomial: x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 +
+ * x^7 + x^5 + x^4 + x^2 + x^1 + x^0
+ * - Bit order: Least-significant-bit-first
+ * - Polynomial in integer form: 0xedb88320
*
- * @crc1: crc32 of the first block
- * @crc2: crc32 of the second block
- * @len2: length of the second block
+ * This does *not* invert the CRC at the beginning or end. The caller is
+ * expected to do that if it needs to. Inverting at both ends is recommended.
*
- * Return: The crc32_le() check value of seq1 and seq2 concatenated,
- * requiring only crc1, crc2, and len2. Note: If seq_full denotes
- * the concatenated memory area of seq1 with seq2, and crc_full
- * the crc32_le() value of seq_full, then crc_full ==
- * crc32_le_combine(crc1, crc2, len2) when crc_full was seeded
- * with the same initializer as crc1, and crc2 seed was 0. See
- * also crc32_combine_test().
+ * For new applications, prefer to use CRC-32C instead. See crc32c().
+ *
+ * Context: Any context
+ * Return: The new CRC value
*/
-u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len);
+u32 crc32_le(u32 crc, const void *p, size_t len);
-static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
+/* This is just an alias for crc32_le(). */
+static inline u32 crc32(u32 crc, const void *p, size_t len)
{
- return crc32_le_shift(crc1, len2) ^ crc2;
+ return crc32_le(crc, p, len);
}
-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len);
-
/**
- * __crc32c_le_combine - Combine two crc32c check values into one. For two
- * sequences of bytes, seq1 and seq2 with lengths len1
- * and len2, __crc32c_le() check values were calculated
- * for each, crc1 and crc2.
+ * crc32_be() - Compute most-significant-bit-first IEEE CRC-32
+ * @crc: Initial CRC value. ~0 (recommended) or 0 for a new CRC computation, or
+ * the previous CRC value if computing incrementally.
+ * @p: Pointer to the data buffer
+ * @len: Length of data in bytes
*
- * @crc1: crc32c of the first block
- * @crc2: crc32c of the second block
- * @len2: length of the second block
+ * crc32_be() is the same as crc32_le() except that crc32_be() computes the
+ * *most-significant-bit-first* variant of the CRC. I.e., within each byte, the
+ * most significant bit is processed first (treated as highest order polynomial
+ * coefficient). The same bit order is also used for the CRC value itself:
*
- * Return: The __crc32c_le() check value of seq1 and seq2 concatenated,
- * requiring only crc1, crc2, and len2. Note: If seq_full denotes
- * the concatenated memory area of seq1 with seq2, and crc_full
- * the __crc32c_le() value of seq_full, then crc_full ==
- * __crc32c_le_combine(crc1, crc2, len2) when crc_full was
- * seeded with the same initializer as crc1, and crc2 seed
- * was 0. See also crc32c_combine_test().
+ * - Polynomial: x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 +
+ * x^7 + x^5 + x^4 + x^2 + x^1 + x^0
+ * - Bit order: Most-significant-bit-first
+ * - Polynomial in integer form: 0x04c11db7
+ *
+ * Context: Any context
+ * Return: The new CRC value
*/
-u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len);
+u32 crc32_be(u32 crc, const void *p, size_t len);
-static inline u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
-{
- return __crc32c_le_shift(crc1, len2) ^ crc2;
-}
+/**
+ * crc32c() - Compute CRC-32C
+ * @crc: Initial CRC value. ~0 (recommended) or 0 for a new CRC computation, or
+ * the previous CRC value if computing incrementally.
+ * @p: Pointer to the data buffer
+ * @len: Length of data in bytes
+ *
+ * This implements CRC-32C, i.e. the Castagnoli CRC. This is the recommended
+ * CRC variant to use in new applications that want a 32-bit CRC.
+ *
+ * - Polynomial: x^32 + x^28 + x^27 + x^26 + x^25 + x^23 + x^22 + x^20 + x^19 +
+ * x^18 + x^14 + x^13 + x^11 + x^10 + x^9 + x^8 + x^6 + x^0
+ * - Bit order: Least-significant-bit-first
+ * - Polynomial in integer form: 0x82f63b78
+ *
+ * This does *not* invert the CRC at the beginning or end. The caller is
+ * expected to do that if it needs to. Inverting at both ends is recommended.
+ *
+ * Context: Any context
+ * Return: The new CRC value
+ */
+u32 crc32c(u32 crc, const void *p, size_t len);
-#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length)
+/*
+ * crc32_optimizations() returns flags that indicate which CRC32 library
+ * functions are using architecture-specific optimizations. Unlike
+ * IS_ENABLED(CONFIG_CRC32_ARCH) it takes into account the different CRC32
+ * variants and also whether any needed CPU features are available at runtime.
+ */
+#define CRC32_LE_OPTIMIZATION BIT(0) /* crc32_le() is optimized */
+#define CRC32_BE_OPTIMIZATION BIT(1) /* crc32_be() is optimized */
+#define CRC32C_OPTIMIZATION BIT(2) /* crc32c() is optimized */
+#if IS_ENABLED(CONFIG_CRC32_ARCH)
+u32 crc32_optimizations(void);
+#else
+static inline u32 crc32_optimizations(void) { return 0; }
+#endif
/*
* Helpers for hash table generation of ethernet nics:
diff --git a/include/linux/crc32c.h b/include/linux/crc32c.h
index 357ae4611a45..b8cff2f4309a 100644
--- a/include/linux/crc32c.h
+++ b/include/linux/crc32c.h
@@ -2,11 +2,6 @@
#ifndef _LINUX_CRC32C_H
#define _LINUX_CRC32C_H
-#include <linux/types.h>
-
-extern u32 crc32c(u32 crc, const void *address, unsigned int length);
-
-/* This macro exists for backwards-compatibility. */
-#define crc32c_le crc32c
+#include <linux/crc32.h>
#endif /* _LINUX_CRC32C_H */
diff --git a/include/linux/crc32poly.h b/include/linux/crc32poly.h
index 62c4b7790a28..ccab711295fa 100644
--- a/include/linux/crc32poly.h
+++ b/include/linux/crc32poly.h
@@ -2,19 +2,13 @@
#ifndef _LINUX_CRC32_POLY_H
#define _LINUX_CRC32_POLY_H
-/*
- * There are multiple 16-bit CRC polynomials in common use, but this is
- * *the* standard CRC-32 polynomial, first popularized by Ethernet.
- * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
- */
+/* The polynomial used by crc32_le(), in integer form. See crc32_le(). */
#define CRC32_POLY_LE 0xedb88320
+
+/* The polynomial used by crc32_be(), in integer form. See crc32_be(). */
#define CRC32_POLY_BE 0x04c11db7
-/*
- * This is the CRC32c polynomial, as outlined by Castagnoli.
- * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
- * x^8+x^6+x^0
- */
-#define CRC32C_POLY_LE 0x82F63B78
+/* The polynomial used by crc32c(), in integer form. See crc32c(). */
+#define CRC32C_POLY_LE 0x82f63b78
#endif /* _LINUX_CRC32_POLY_H */
diff --git a/include/linux/crc64.h b/include/linux/crc64.h
index e044c60d1e61..fc0c06ab1993 100644
--- a/include/linux/crc64.h
+++ b/include/linux/crc64.h
@@ -1,18 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * See lib/crc64.c for the related specification and polynomial arithmetic.
- */
#ifndef _LINUX_CRC64_H
#define _LINUX_CRC64_H
#include <linux/types.h>
-#define CRC64_ROCKSOFT_STRING "crc64-rocksoft"
-
-u64 __pure crc64_be(u64 crc, const void *p, size_t len);
-u64 __pure crc64_rocksoft_generic(u64 crc, const void *p, size_t len);
+/**
+ * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
+ * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
+ * or the previous crc64 value if computing incrementally.
+ * @p: pointer to buffer over which CRC64 is run
+ * @len: length of buffer @p
+ */
+u64 crc64_be(u64 crc, const void *p, size_t len);
-u64 crc64_rocksoft(const unsigned char *buffer, size_t len);
-u64 crc64_rocksoft_update(u64 crc, const unsigned char *buffer, size_t len);
+/**
+ * crc64_nvme - Calculate CRC64-NVME
+ * @crc: seed value for computation. 0 for a new CRC calculation, or the
+ * previous crc64 value if computing incrementally.
+ * @p: pointer to buffer over which CRC64 is run
+ * @len: length of buffer @p
+ *
+ * This computes the CRC64 defined in the NVME NVM Command Set Specification,
+ * *including the bitwise inversion at the beginning and end*.
+ */
+u64 crc64_nvme(u64 crc, const void *p, size_t len);
#endif /* _LINUX_CRC64_H */
diff --git a/include/linux/crc7.h b/include/linux/crc7.h
index b462842f3c32..61d34749e437 100644
--- a/include/linux/crc7.h
+++ b/include/linux/crc7.h
@@ -3,13 +3,6 @@
#define _LINUX_CRC7_H
#include <linux/types.h>
-extern const u8 crc7_be_syndrome_table[256];
-
-static inline u8 crc7_be_byte(u8 crc, u8 data)
-{
- return crc7_be_syndrome_table[crc ^ data];
-}
-
extern u8 crc7_be(u8 crc, const u8 *buffer, size_t len);
#endif
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 2976f534a7a3..343a140a6ba2 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -20,6 +20,8 @@
struct cred;
struct inode;
+extern struct task_struct init_task;
+
/*
* COW Supplementary groups list
*/
@@ -148,16 +150,19 @@ struct cred {
extern void __put_cred(struct cred *);
extern void exit_creds(struct task_struct *);
-extern int copy_creds(struct task_struct *, unsigned long);
+extern int copy_creds(struct task_struct *, u64);
extern const struct cred *get_task_cred(struct task_struct *);
extern struct cred *cred_alloc_blank(void);
extern struct cred *prepare_creds(void);
extern struct cred *prepare_exec_creds(void);
extern int commit_creds(struct cred *);
extern void abort_creds(struct cred *);
-extern const struct cred *override_creds(const struct cred *);
-extern void revert_creds(const struct cred *);
extern struct cred *prepare_kernel_cred(struct task_struct *);
+static inline const struct cred *kernel_cred(void)
+{
+ /* shut up sparse */
+ return rcu_dereference_raw(init_task.cred);
+}
extern int set_security_override(struct cred *, u32);
extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
@@ -172,32 +177,26 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
cred->cap_inheritable));
}
-/**
- * get_new_cred_many - Get references on a new set of credentials
- * @cred: The new credentials to reference
- * @nr: Number of references to acquire
- *
- * Get references on the specified set of new credentials. The caller must
- * release all acquired references.
- */
-static inline struct cred *get_new_cred_many(struct cred *cred, int nr)
+static inline const struct cred *override_creds(const struct cred *override_cred)
{
- atomic_long_add(nr, &cred->usage);
- return cred;
+ return rcu_replace_pointer(current->cred, override_cred, 1);
}
-/**
- * get_new_cred - Get a reference on a new set of credentials
- * @cred: The new credentials to reference
- *
- * Get a reference on the specified set of new credentials. The caller must
- * release the reference.
- */
-static inline struct cred *get_new_cred(struct cred *cred)
+static inline const struct cred *revert_creds(const struct cred *revert_cred)
{
- return get_new_cred_many(cred, 1);
+ return rcu_replace_pointer(current->cred, revert_cred, 1);
}
+DEFINE_CLASS(override_creds,
+ const struct cred *,
+ revert_creds(_T),
+ override_creds(override_cred), const struct cred *override_cred)
+
+#define scoped_with_creds(cred) \
+ scoped_class(override_creds, __UNIQUE_ID(label), cred)
+
+#define scoped_with_kernel_creds() scoped_with_creds(kernel_cred())
+
/**
* get_cred_many - Get references on a set of credentials
* @cred: The credentials to reference
@@ -218,7 +217,8 @@ static inline const struct cred *get_cred_many(const struct cred *cred, int nr)
if (!cred)
return cred;
nonconst_cred->non_rcu = 0;
- return get_new_cred_many(nonconst_cred, nr);
+ atomic_long_add(nr, &nonconst_cred->usage);
+ return cred;
}
/*
@@ -280,6 +280,13 @@ static inline void put_cred(const struct cred *cred)
put_cred_many(cred, 1);
}
+DEFINE_CLASS(prepare_creds,
+ struct cred *,
+ if (_T) put_cred(_T),
+ prepare_creds(), void)
+
+DEFINE_FREE(put_cred, struct cred *, if (!IS_ERR_OR_NULL(_T)) put_cred(_T))
+
/**
* current_cred - Access the current task's subjective credentials
*
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index b164da5e129e..a2137e19be7d 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -13,7 +13,8 @@
#define _LINUX_CRYPTO_H
#include <linux/completion.h>
-#include <linux/refcount.h>
+#include <linux/errno.h>
+#include <linux/refcount_types.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -22,7 +23,6 @@
*/
#define CRYPTO_ALG_TYPE_MASK 0x0000000f
#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
-#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
#define CRYPTO_ALG_TYPE_AEAD 0x00000003
#define CRYPTO_ALG_TYPE_LSKCIPHER 0x00000004
#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
@@ -50,6 +50,15 @@
#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
/*
+ * Set if the algorithm data structure should be duplicated into
+ * kmalloc memory before registration. This is useful for hardware
+ * that can be disconnected at will. Do not use this if the data
+ * structure is embedded into a bigger one. Duplicate the overall
+ * data structure in the driver in that case.
+ */
+#define CRYPTO_ALG_DUP_FIRST 0x00000200
+
+/*
* Set if the algorithm has passed automated run-time testing. Note that
* if there is no run-time testing for a given algorithm it is considered
* to have passed.
@@ -124,6 +133,14 @@
*/
#define CRYPTO_ALG_FIPS_INTERNAL 0x00020000
+/* Set if the algorithm supports virtual addresses. */
+#define CRYPTO_ALG_REQ_VIRT 0x00040000
+
+/* Set if the algorithm cannot have a fallback (e.g., phmac). */
+#define CRYPTO_ALG_NO_FALLBACK 0x00080000
+
+/* The high bits 0xff000000 are reserved for type-specific flags. */
+
/*
* Transform masks and values (for crt_flags).
*/
@@ -133,6 +150,7 @@
#define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100
#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
+#define CRYPTO_TFM_REQ_ON_STACK 0x00000800
/*
* Miscellaneous stuff.
@@ -239,26 +257,7 @@ struct cipher_alg {
void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
};
-/**
- * struct compress_alg - compression/decompression algorithm
- * @coa_compress: Compress a buffer of specified length, storing the resulting
- * data in the specified buffer. Return the length of the
- * compressed data in dlen.
- * @coa_decompress: Decompress the source buffer, storing the uncompressed
- * data in the specified buffer. The length of the data is
- * returned in dlen.
- *
- * All fields are mandatory.
- */
-struct compress_alg {
- int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen);
- int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen);
-};
-
#define cra_cipher cra_u.cipher
-#define cra_compress cra_u.compress
/**
* struct crypto_alg - definition of a cryptograpic cipher algorithm
@@ -291,6 +290,7 @@ struct compress_alg {
* to the alignmask of the algorithm being used, in order to
* avoid the API having to realign them. Note: the alignmask is
* not supported for hash algorithms and is always 0 for them.
+ * @cra_reqsize: Size of the request context for this algorithm.
* @cra_priority: Priority of this transformation implementation. In case
* multiple transformations with same @cra_name are available to
* the Crypto API, the kernel will use the one with highest
@@ -309,27 +309,16 @@ struct compress_alg {
* transformation types. There are multiple options, such as
* &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type.
* This field might be empty. In that case, there are no common
- * callbacks. This is the case for: cipher, compress, shash.
+ * callbacks. This is the case for: cipher.
* @cra_u: Callbacks implementing the transformation. This is a union of
* multiple structures. Depending on the type of transformation selected
* by @cra_type and @cra_flags above, the associated structure must be
* filled with callbacks. This field might be empty. This is the case
* for ahash, shash.
- * @cra_init: Initialize the cryptographic transformation object. This function
- * is used to initialize the cryptographic transformation object.
- * This function is called only once at the instantiation time, right
- * after the transformation context was allocated. In case the
- * cryptographic hardware has some special requirements which need to
- * be handled by software, this function shall check for the precise
- * requirement of the transformation and put any software fallbacks
- * in place.
- * @cra_exit: Deinitialize the cryptographic transformation object. This is a
- * counterpart to @cra_init, used to remove various changes set in
- * @cra_init.
+ * @cra_init: Deprecated, do not use.
+ * @cra_exit: Deprecated, do not use.
* @cra_u.cipher: Union member which contains a single-block symmetric cipher
* definition. See @struct @cipher_alg.
- * @cra_u.compress: Union member which contains a (de)compression algorithm.
- * See @struct @compress_alg.
* @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
* @cra_list: internally used
* @cra_users: internally used
@@ -348,6 +337,7 @@ struct crypto_alg {
unsigned int cra_blocksize;
unsigned int cra_ctxsize;
unsigned int cra_alignmask;
+ unsigned int cra_reqsize;
int cra_priority;
refcount_t cra_refcnt;
@@ -359,7 +349,6 @@ struct crypto_alg {
union {
struct cipher_alg cipher;
- struct compress_alg compress;
} cra_u;
int (*cra_init)(struct crypto_tfm *tfm);
@@ -425,18 +414,16 @@ struct crypto_tfm {
u32 crt_flags;
int node;
-
+
+ struct crypto_tfm *fb;
+
void (*exit)(struct crypto_tfm *tfm);
-
+
struct crypto_alg *__crt_alg;
void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
};
-struct crypto_comp {
- struct crypto_tfm base;
-};
-
/*
* Transform user interface.
*/
@@ -472,6 +459,11 @@ static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_alignmask;
}
+static inline unsigned int crypto_tfm_alg_reqsize(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_reqsize;
+}
+
static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
{
return tfm->crt_flags;
@@ -493,52 +485,45 @@ static inline unsigned int crypto_tfm_ctx_alignment(void)
return __alignof__(tfm->__crt_ctx);
}
-static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
+static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm)
{
- return (struct crypto_comp *)tfm;
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
}
-static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
- u32 type, u32 mask)
+static inline bool crypto_req_on_stack(struct crypto_async_request *req)
{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_COMPRESS;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
+ return req->flags & CRYPTO_TFM_REQ_ON_STACK;
}
-static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
+static inline void crypto_request_set_callback(
+ struct crypto_async_request *req, u32 flags,
+ crypto_completion_t compl, void *data)
{
- return &tfm->base;
-}
+ u32 keep = CRYPTO_TFM_REQ_ON_STACK;
-static inline void crypto_free_comp(struct crypto_comp *tfm)
-{
- crypto_free_tfm(crypto_comp_tfm(tfm));
+ req->complete = compl;
+ req->data = data;
+ req->flags &= keep;
+ req->flags |= flags & ~keep;
}
-static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
+static inline void crypto_request_set_tfm(struct crypto_async_request *req,
+ struct crypto_tfm *tfm)
{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_COMPRESS;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return crypto_has_alg(alg_name, type, mask);
+ req->tfm = tfm;
+ req->flags &= ~CRYPTO_TFM_REQ_ON_STACK;
}
-static inline const char *crypto_comp_name(struct crypto_comp *tfm)
+struct crypto_async_request *crypto_request_clone(
+ struct crypto_async_request *req, size_t total, gfp_t gfp);
+
+static inline void crypto_stack_request_init(struct crypto_async_request *req,
+ struct crypto_tfm *tfm)
{
- return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
+ req->flags = 0;
+ crypto_request_set_tfm(req, tfm);
+ req->flags |= CRYPTO_TFM_REQ_ON_STACK;
}
-int crypto_comp_compress(struct crypto_comp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-
-int crypto_comp_decompress(struct crypto_comp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-
#endif /* _LINUX_CRYPTO_H */
diff --git a/include/linux/cxl-event.h b/include/linux/cxl-event.h
deleted file mode 100644
index 60b25020281f..000000000000
--- a/include/linux/cxl-event.h
+++ /dev/null
@@ -1,182 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2023 Intel Corporation. */
-#ifndef _LINUX_CXL_EVENT_H
-#define _LINUX_CXL_EVENT_H
-
-#include <linux/types.h>
-#include <linux/uuid.h>
-#include <linux/workqueue_types.h>
-
-/*
- * Common Event Record Format
- * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
- */
-struct cxl_event_record_hdr {
- u8 length;
- u8 flags[3];
- __le16 handle;
- __le16 related_handle;
- __le64 timestamp;
- u8 maint_op_class;
- u8 reserved[15];
-} __packed;
-
-#define CXL_EVENT_RECORD_DATA_LENGTH 0x50
-struct cxl_event_generic {
- struct cxl_event_record_hdr hdr;
- u8 data[CXL_EVENT_RECORD_DATA_LENGTH];
-} __packed;
-
-/*
- * General Media Event Record
- * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
- */
-#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
-struct cxl_event_gen_media {
- struct cxl_event_record_hdr hdr;
- __le64 phys_addr;
- u8 descriptor;
- u8 type;
- u8 transaction_type;
- u8 validity_flags[2];
- u8 channel;
- u8 rank;
- u8 device[3];
- u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
- u8 reserved[46];
-} __packed;
-
-/*
- * DRAM Event Record - DER
- * CXL rev 3.0 section 8.2.9.2.1.2; Table 3-44
- */
-#define CXL_EVENT_DER_CORRECTION_MASK_SIZE 0x20
-struct cxl_event_dram {
- struct cxl_event_record_hdr hdr;
- __le64 phys_addr;
- u8 descriptor;
- u8 type;
- u8 transaction_type;
- u8 validity_flags[2];
- u8 channel;
- u8 rank;
- u8 nibble_mask[3];
- u8 bank_group;
- u8 bank;
- u8 row[3];
- u8 column[2];
- u8 correction_mask[CXL_EVENT_DER_CORRECTION_MASK_SIZE];
- u8 reserved[0x17];
-} __packed;
-
-/*
- * Get Health Info Record
- * CXL rev 3.0 section 8.2.9.8.3.1; Table 8-100
- */
-struct cxl_get_health_info {
- u8 health_status;
- u8 media_status;
- u8 add_status;
- u8 life_used;
- u8 device_temp[2];
- u8 dirty_shutdown_cnt[4];
- u8 cor_vol_err_cnt[4];
- u8 cor_per_err_cnt[4];
-} __packed;
-
-/*
- * Memory Module Event Record
- * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
- */
-struct cxl_event_mem_module {
- struct cxl_event_record_hdr hdr;
- u8 event_type;
- struct cxl_get_health_info info;
- u8 reserved[0x3d];
-} __packed;
-
-/*
- * General Media or DRAM Event Common Fields
- * - provides common access to phys_addr
- */
-struct cxl_event_common {
- struct cxl_event_record_hdr hdr;
- __le64 phys_addr;
-} __packed;
-
-union cxl_event {
- struct cxl_event_generic generic;
- struct cxl_event_gen_media gen_media;
- struct cxl_event_dram dram;
- struct cxl_event_mem_module mem_module;
- struct cxl_event_common common;
-} __packed;
-
-/*
- * Common Event Record Format; in event logs
- * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
- */
-struct cxl_event_record_raw {
- uuid_t id;
- union cxl_event event;
-} __packed;
-
-enum cxl_event_type {
- CXL_CPER_EVENT_GENERIC,
- CXL_CPER_EVENT_GEN_MEDIA,
- CXL_CPER_EVENT_DRAM,
- CXL_CPER_EVENT_MEM_MODULE,
-};
-
-#define CPER_CXL_DEVICE_ID_VALID BIT(0)
-#define CPER_CXL_DEVICE_SN_VALID BIT(1)
-#define CPER_CXL_COMP_EVENT_LOG_VALID BIT(2)
-struct cxl_cper_event_rec {
- struct {
- u32 length;
- u64 validation_bits;
- struct cper_cxl_event_devid {
- u16 vendor_id;
- u16 device_id;
- u8 func_num;
- u8 device_num;
- u8 bus_num;
- u16 segment_num;
- u16 slot_num; /* bits 2:0 reserved */
- u8 reserved;
- } __packed device_id;
- struct cper_cxl_event_sn {
- u32 lower_dw;
- u32 upper_dw;
- } __packed dev_serial_num;
- } __packed hdr;
-
- union cxl_event event;
-} __packed;
-
-struct cxl_cper_work_data {
- enum cxl_event_type event_type;
- struct cxl_cper_event_rec rec;
-};
-
-#ifdef CONFIG_ACPI_APEI_GHES
-int cxl_cper_register_work(struct work_struct *work);
-int cxl_cper_unregister_work(struct work_struct *work);
-int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd);
-#else
-static inline int cxl_cper_register_work(struct work_struct *work)
-{
- return 0;
-}
-
-static inline int cxl_cper_unregister_work(struct work_struct *work)
-{
- return 0;
-}
-static inline int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
-{
- return 0;
-}
-#endif
-
-#endif /* _LINUX_CXL_EVENT_H */
diff --git a/include/linux/damon.h b/include/linux/damon.h
index f7da65e1ac04..3813373a9200 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -36,6 +36,16 @@ struct damon_addr_range {
};
/**
+ * struct damon_size_range - Represents size for filter to operate on [@min, @max].
+ * @min: Min size (inclusive).
+ * @max: Max size (inclusive).
+ */
+struct damon_size_range {
+ unsigned long min;
+ unsigned long max;
+};
+
+/**
* struct damon_region - Represents a monitoring target region.
* @ar: The address range of the region.
* @sampling_addr: Address of the sample for the next access check.
@@ -81,17 +91,23 @@ struct damon_region {
* @nr_regions: Number of monitoring target regions of this target.
* @regions_list: Head of the monitoring target regions of this target.
* @list: List head for siblings.
+ * @obsolete: Whether the commit destination target is obsolete.
*
* Each monitoring context could have multiple targets. For example, a context
* for virtual memory address spaces could have multiple target processes. The
* @pid should be set for appropriate &struct damon_operations including the
* virtual address spaces monitoring operations.
+ *
+ * @obsolete is used only for damon_commit_targets() source targets, to specify
+ * the matching destination targets are obsolete. Read damon_commit_targets()
+ * to see how it is handled.
*/
struct damon_target {
struct pid *pid;
unsigned int nr_regions;
struct list_head regions_list;
struct list_head list;
+ bool obsolete;
};
/**
@@ -100,19 +116,21 @@ struct damon_target {
*
* @DAMOS_WILLNEED: Call ``madvise()`` for the region with MADV_WILLNEED.
* @DAMOS_COLD: Call ``madvise()`` for the region with MADV_COLD.
- * @DAMOS_PAGEOUT: Call ``madvise()`` for the region with MADV_PAGEOUT.
+ * @DAMOS_PAGEOUT: Reclaim the region.
* @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE.
* @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE.
* @DAMOS_LRU_PRIO: Prioritize the region on its LRU lists.
* @DAMOS_LRU_DEPRIO: Deprioritize the region on its LRU lists.
+ * @DAMOS_MIGRATE_HOT: Migrate the regions prioritizing warmer regions.
+ * @DAMOS_MIGRATE_COLD: Migrate the regions prioritizing colder regions.
* @DAMOS_STAT: Do nothing but count the stat.
* @NR_DAMOS_ACTIONS: Total number of DAMOS actions
*
* The support of each action is up to running &struct damon_operations.
- * &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR supports all actions except
- * &enum DAMOS_LRU_PRIO and &enum DAMOS_LRU_DEPRIO. &enum DAMON_OPS_PADDR
- * supports only &enum DAMOS_PAGEOUT, &enum DAMOS_LRU_PRIO, &enum
- * DAMOS_LRU_DEPRIO, and &DAMOS_STAT.
+ * Refer to 'Operation Action' section of Documentation/mm/damon/design.rst for
+ * status of the supports.
+ *
+ * Note that DAMOS_PAGEOUT doesn't trigger demotions.
*/
enum damos_action {
DAMOS_WILLNEED,
@@ -122,6 +140,8 @@ enum damos_action {
DAMOS_NOHUGEPAGE,
DAMOS_LRU_PRIO,
DAMOS_LRU_DEPRIO,
+ DAMOS_MIGRATE_HOT,
+ DAMOS_MIGRATE_COLD,
DAMOS_STAT, /* Do nothing but only record the stat */
NR_DAMOS_ACTIONS,
};
@@ -131,6 +151,10 @@ enum damos_action {
*
* @DAMOS_QUOTA_USER_INPUT: User-input value.
* @DAMOS_QUOTA_SOME_MEM_PSI_US: System level some memory PSI in us.
+ * @DAMOS_QUOTA_NODE_MEM_USED_BP: MemUsed ratio of a node.
+ * @DAMOS_QUOTA_NODE_MEM_FREE_BP: MemFree ratio of a node.
+ * @DAMOS_QUOTA_NODE_MEMCG_USED_BP: MemUsed ratio of a node for a cgroup.
+ * @DAMOS_QUOTA_NODE_MEMCG_FREE_BP: MemFree ratio of a node for a cgroup.
* @NR_DAMOS_QUOTA_GOAL_METRICS: Number of DAMOS quota goal metrics.
*
* Metrics equal to larger than @NR_DAMOS_QUOTA_GOAL_METRICS are unsupported.
@@ -138,6 +162,10 @@ enum damos_action {
enum damos_quota_goal_metric {
DAMOS_QUOTA_USER_INPUT,
DAMOS_QUOTA_SOME_MEM_PSI_US,
+ DAMOS_QUOTA_NODE_MEM_USED_BP,
+ DAMOS_QUOTA_NODE_MEM_FREE_BP,
+ DAMOS_QUOTA_NODE_MEMCG_USED_BP,
+ DAMOS_QUOTA_NODE_MEMCG_FREE_BP,
NR_DAMOS_QUOTA_GOAL_METRICS,
};
@@ -147,6 +175,8 @@ enum damos_quota_goal_metric {
* @target_value: Target value of @metric to achieve with the tuning.
* @current_value: Current value of @metric.
* @last_psi_total: Last measured total PSI
+ * @nid: Node id.
+ * @memcg_id: Memcg id.
* @list: List head for siblings.
*
* Data structure for getting the current score of the quota tuning goal. The
@@ -157,6 +187,12 @@ enum damos_quota_goal_metric {
* If @metric is DAMOS_QUOTA_USER_INPUT, @current_value should be manually
* entered by the user, probably inside the kdamond callbacks. Otherwise,
* DAMON sets @current_value with self-measured value of @metric.
+ *
+ * If @metric is DAMOS_QUOTA_NODE_MEM_{USED,FREE}_BP, @nid represents the node
+ * id of the target node to account the used/free memory.
+ *
+ * If @metric is DAMOS_QUOTA_NODE_MEMCG_{USED,FREE}_BP, @nid and @memcg_id
+ * represents the node id and the cgroup to account the used memory for.
*/
struct damos_quota_goal {
enum damos_quota_goal_metric metric;
@@ -165,6 +201,10 @@ struct damos_quota_goal {
/* metric-dependent fields */
union {
u64 last_psi_total;
+ struct {
+ int nid;
+ unsigned short memcg_id;
+ };
};
struct list_head list;
};
@@ -189,11 +229,16 @@ struct damos_quota_goal {
* size quota is set, DAMON tries to apply the action only up to &sz bytes
* within &reset_interval.
*
- * Internally, the time quota is transformed to a size quota using estimated
- * throughput of the scheme's action. DAMON then compares it against &sz and
- * uses smaller one as the effective quota.
+ * To convince the different types of quotas and goals, DAMON internally
+ * converts those into one single size quota called "effective quota". DAMON
+ * internally uses it as the only one real quota. The conversion is made as
+ * follows.
+ *
+ * The time quota is transformed to a size quota using estimated throughput of
+ * the scheme's action. DAMON then compares it against &sz and uses smaller
+ * one as the effective quota.
*
- * If @goals is not empt, DAMON calculates yet another size quota based on the
+ * If @goals is not empty, DAMON calculates yet another size quota based on the
* goals using its internal feedback loop algorithm, for every @reset_interval.
* Then, if the new size quota is smaller than the effective quota, it uses the
* new size quota as the effective quota.
@@ -229,7 +274,6 @@ struct damos_quota {
unsigned long charge_addr_from;
/* For prioritization */
- unsigned long histogram[DAMOS_MAX_SCORE + 1];
unsigned int min_score;
/* For feedback loop */
@@ -283,21 +327,44 @@ struct damos_watermarks {
* @sz_tried: Total size of regions that the scheme is tried to be applied.
* @nr_applied: Total number of regions that the scheme is applied.
* @sz_applied: Total size of regions that the scheme is applied.
+ * @sz_ops_filter_passed:
+ * Total bytes that passed ops layer-handled DAMOS filters.
* @qt_exceeds: Total number of times the quota of the scheme has exceeded.
+ *
+ * "Tried an action to a region" in this context means the DAMOS core logic
+ * determined the region as eligible to apply the action. The access pattern
+ * (&struct damos_access_pattern), quotas (&struct damos_quota), watermarks
+ * (&struct damos_watermarks) and filters (&struct damos_filter) that handled
+ * on core logic can affect this. The core logic asks the operation set
+ * (&struct damon_operations) to apply the action to the region.
+ *
+ * "Applied an action to a region" in this context means the operation set
+ * (&struct damon_operations) successfully applied the action to the region, at
+ * least to a part of the region. The filters (&struct damos_filter) that
+ * handled on operation set layer and type of the action and pages of the
+ * region can affect this. For example, if a filter is set to exclude
+ * anonymous pages and the region has only anonymous pages, the region will be
+ * failed at applying the action. If the action is &DAMOS_PAGEOUT and all
+ * pages of the region are already paged out, the region will be failed at
+ * applying the action.
*/
struct damos_stat {
unsigned long nr_tried;
unsigned long sz_tried;
unsigned long nr_applied;
unsigned long sz_applied;
+ unsigned long sz_ops_filter_passed;
unsigned long qt_exceeds;
};
/**
* enum damos_filter_type - Type of memory for &struct damos_filter
* @DAMOS_FILTER_TYPE_ANON: Anonymous pages.
+ * @DAMOS_FILTER_TYPE_ACTIVE: Active pages.
* @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages.
* @DAMOS_FILTER_TYPE_YOUNG: Recently accessed pages.
+ * @DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: Page is part of a hugepage.
+ * @DAMOS_FILTER_TYPE_UNMAPPED: Unmapped pages.
* @DAMOS_FILTER_TYPE_ADDR: Address range.
* @DAMOS_FILTER_TYPE_TARGET: Data Access Monitoring target.
* @NR_DAMOS_FILTER_TYPES: Number of filter types.
@@ -315,8 +382,11 @@ struct damos_stat {
*/
enum damos_filter_type {
DAMOS_FILTER_TYPE_ANON,
+ DAMOS_FILTER_TYPE_ACTIVE,
DAMOS_FILTER_TYPE_MEMCG,
DAMOS_FILTER_TYPE_YOUNG,
+ DAMOS_FILTER_TYPE_HUGEPAGE_SIZE,
+ DAMOS_FILTER_TYPE_UNMAPPED,
DAMOS_FILTER_TYPE_ADDR,
DAMOS_FILTER_TYPE_TARGET,
NR_DAMOS_FILTER_TYPES,
@@ -324,31 +394,61 @@ enum damos_filter_type {
/**
* struct damos_filter - DAMOS action target memory filter.
- * @type: Type of the page.
- * @matching: If the matching page should filtered out or in.
+ * @type: Type of the target memory.
+ * @matching: Whether this is for @type-matching memory.
+ * @allow: Whether to include or exclude the @matching memory.
* @memcg_id: Memcg id of the question if @type is DAMOS_FILTER_MEMCG.
* @addr_range: Address range if @type is DAMOS_FILTER_TYPE_ADDR.
* @target_idx: Index of the &struct damon_target of
* &damon_ctx->adaptive_targets if @type is
* DAMOS_FILTER_TYPE_TARGET.
+ * @sz_range: Size range if @type is DAMOS_FILTER_TYPE_HUGEPAGE_SIZE.
* @list: List head for siblings.
*
* Before applying the &damos->action to a memory region, DAMOS checks if each
- * page of the region matches to this and avoid applying the action if so.
- * Support of each filter type depends on the running &struct damon_operations
- * and the type. Refer to &enum damos_filter_type for more detai.
+ * byte of the region matches to this given condition and avoid applying the
+ * action if so. Support of each filter type depends on the running &struct
+ * damon_operations and the type. Refer to &enum damos_filter_type for more
+ * details.
*/
struct damos_filter {
enum damos_filter_type type;
bool matching;
+ bool allow;
union {
unsigned short memcg_id;
struct damon_addr_range addr_range;
int target_idx;
+ struct damon_size_range sz_range;
};
struct list_head list;
};
+struct damon_ctx;
+struct damos;
+
+/**
+ * struct damos_walk_control - Control damos_walk().
+ *
+ * @walk_fn: Function to be called back for each region.
+ * @data: Data that will be passed to walk functions.
+ *
+ * Control damos_walk(), which requests specific kdamond to invoke the given
+ * function to each region that eligible to apply actions of the kdamond's
+ * schemes. Refer to damos_walk() for more details.
+ */
+struct damos_walk_control {
+ void (*walk_fn)(void *data, struct damon_ctx *ctx,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *s, unsigned long sz_filter_passed);
+ void *data;
+/* private: internal use only */
+ /* informs if the kdamond finished handling of the walk request */
+ struct completion completion;
+ /* informs if the walk is canceled. */
+ bool canceled;
+};
+
/**
* struct damos_access_pattern - Target access pattern of the given scheme.
* @min_sz_region: Minimum size of target regions.
@@ -368,13 +468,33 @@ struct damos_access_pattern {
};
/**
+ * struct damos_migrate_dests - Migration destination nodes and their weights.
+ * @node_id_arr: Array of migration destination node ids.
+ * @weight_arr: Array of migration weights for @node_id_arr.
+ * @nr_dests: Length of the @node_id_arr and @weight_arr arrays.
+ *
+ * @node_id_arr is an array of the ids of migration destination nodes.
+ * @weight_arr is an array of the weights for those. The weights in
+ * @weight_arr are for nodes in @node_id_arr of same array index.
+ */
+struct damos_migrate_dests {
+ unsigned int *node_id_arr;
+ unsigned int *weight_arr;
+ size_t nr_dests;
+};
+
+/**
* struct damos - Represents a Data Access Monitoring-based Operation Scheme.
* @pattern: Access pattern of target regions.
- * @action: &damo_action to be applied to the target regions.
+ * @action: &damos_action to be applied to the target regions.
* @apply_interval_us: The time between applying the @action.
* @quota: Control the aggressiveness of this scheme.
* @wmarks: Watermarks for automated (in)activation of this scheme.
- * @filters: Additional set of &struct damos_filter for &action.
+ * @migrate_dests: Destination nodes if @action is "migrate_{hot,cold}".
+ * @target_nid: Destination node if @action is "migrate_{hot,cold}".
+ * @core_filters: Additional set of &struct damos_filter for &action.
+ * @ops_filters: ops layer handling &struct damos_filter objects list.
+ * @last_applied: Last @action applied ops-managing entity.
* @stat: Statistics of this scheme.
* @list: List head for siblings.
*
@@ -389,9 +509,25 @@ struct damos_access_pattern {
* monitoring context are inactive, DAMON stops monitoring either, and just
* repeatedly checks the watermarks.
*
+ * @migrate_dests specifies multiple migration target nodes with different
+ * weights for migrate_hot or migrate_cold actions. @target_nid is ignored if
+ * this is set.
+ *
+ * @target_nid is used to set the migration target node for migrate_hot or
+ * migrate_cold actions, and @migrate_dests is unset.
+ *
* Before applying the &action to a memory region, &struct damon_operations
* implementation could check pages of the region and skip &action to respect
- * &filters
+ * &core_filters
+ *
+ * The minimum entity that @action can be applied depends on the underlying
+ * &struct damon_operations. Since it may not be aligned with the core layer
+ * abstract, namely &struct damon_region, &struct damon_operations could apply
+ * @action to same entity multiple times. Large folios that underlying on
+ * multiple &struct damon region objects could be such examples. The &struct
+ * damon_operations can use @last_applied to avoid that. DAMOS core logic
+ * unsets @last_applied when each regions walking for applying the scheme is
+ * finished.
*
* After applying the &action to each region, &stat_count and &stat_sz is
* updated to reflect the number of regions and total size of regions that the
@@ -407,10 +543,28 @@ struct damos {
* @action
*/
unsigned long next_apply_sis;
+ /* informs if ongoing DAMOS walk for this scheme is finished */
+ bool walk_completed;
+ /*
+ * If the current region in the filtering stage is allowed by core
+ * layer-handled filters. If true, operations layer allows it, too.
+ */
+ bool core_filters_allowed;
+ /* whether to reject core/ops filters umatched regions */
+ bool core_filters_default_reject;
+ bool ops_filters_default_reject;
/* public: */
struct damos_quota quota;
struct damos_watermarks wmarks;
- struct list_head filters;
+ union {
+ struct {
+ int target_nid;
+ struct damos_migrate_dests migrate_dests;
+ };
+ };
+ struct list_head core_filters;
+ struct list_head ops_filters;
+ void *last_applied;
struct damos_stat stat;
struct list_head list;
};
@@ -431,8 +585,6 @@ enum damon_ops_id {
NR_DAMON_OPS,
};
-struct damon_ctx;
-
/**
* struct damon_operations - Monitoring operations for given use cases.
*
@@ -441,10 +593,10 @@ struct damon_ctx;
* @update: Update operations-related data structures.
* @prepare_access_checks: Prepare next access check of target regions.
* @check_accesses: Check the accesses to target regions.
- * @reset_aggregated: Reset aggregated accesses monitoring results.
* @get_scheme_score: Get the score of a region for a scheme.
* @apply_scheme: Apply a DAMON-based operation scheme.
* @target_valid: Determine if the target is valid.
+ * @cleanup_target: Clean up each target before deallocation.
* @cleanup: Clean up the context.
*
* DAMON can be extended for various address spaces and usages. For this,
@@ -453,8 +605,7 @@ struct damon_ctx;
* (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting
* the monitoring, @update after each &damon_attrs.ops_update_interval, and
* @check_accesses, @target_valid and @prepare_access_checks after each
- * &damon_attrs.sample_interval. Finally, @reset_aggregated is called after
- * each &damon_attrs.aggr_interval.
+ * &damon_attrs.sample_interval.
*
* Each &struct damon_operations instance having valid @id can be registered
* via damon_register_ops() and selected by damon_select_ops() later.
@@ -469,16 +620,16 @@ struct damon_ctx;
* last preparation and update the number of observed accesses of each region.
* It should also return max number of observed accesses that made as a result
* of its update. The value will be used for regions adjustment threshold.
- * @reset_aggregated should reset the access monitoring results that aggregated
- * by @check_accesses.
* @get_scheme_score should return the priority score of a region for a scheme
* as an integer in [0, &DAMOS_MAX_SCORE].
* @apply_scheme is called from @kdamond when a region for user provided
* DAMON-based operation scheme is found. It should apply the scheme's action
* to the region and return bytes of the region that the action is successfully
- * applied.
+ * applied. It should also report how many bytes of the region has passed
+ * filters (&struct damos_filter) that handled by itself.
* @target_valid should check whether the target is still valid for the
* monitoring.
+ * @cleanup_target is called before the target will be deallocated.
* @cleanup is called from @kdamond just before its termination.
*/
struct damon_operations {
@@ -487,58 +638,67 @@ struct damon_operations {
void (*update)(struct damon_ctx *context);
void (*prepare_access_checks)(struct damon_ctx *context);
unsigned int (*check_accesses)(struct damon_ctx *context);
- void (*reset_aggregated)(struct damon_ctx *context);
int (*get_scheme_score)(struct damon_ctx *context,
struct damon_target *t, struct damon_region *r,
struct damos *scheme);
unsigned long (*apply_scheme)(struct damon_ctx *context,
struct damon_target *t, struct damon_region *r,
- struct damos *scheme);
+ struct damos *scheme, unsigned long *sz_filter_passed);
bool (*target_valid)(struct damon_target *t);
+ void (*cleanup_target)(struct damon_target *t);
void (*cleanup)(struct damon_ctx *context);
};
+/*
+ * struct damon_call_control - Control damon_call().
+ *
+ * @fn: Function to be called back.
+ * @data: Data that will be passed to @fn.
+ * @repeat: Repeat invocations.
+ * @return_code: Return code from @fn invocation.
+ * @dealloc_on_cancel: De-allocate when canceled.
+ *
+ * Control damon_call(), which requests specific kdamond to invoke a given
+ * function. Refer to damon_call() for more details.
+ */
+struct damon_call_control {
+ int (*fn)(void *data);
+ void *data;
+ bool repeat;
+ int return_code;
+ bool dealloc_on_cancel;
+/* private: internal use only */
+ /* informs if the kdamond finished handling of the request */
+ struct completion completion;
+ /* informs if the kdamond canceled @fn infocation */
+ bool canceled;
+ /* List head for siblings. */
+ struct list_head list;
+};
+
/**
- * struct damon_callback - Monitoring events notification callbacks.
- *
- * @before_start: Called before starting the monitoring.
- * @after_wmarks_check: Called after each schemes' watermarks check.
- * @after_sampling: Called after each sampling.
- * @after_aggregation: Called after each aggregation.
- * @before_damos_apply: Called before applying DAMOS action.
- * @before_terminate: Called before terminating the monitoring.
- * @private: User private data.
- *
- * The monitoring thread (&damon_ctx.kdamond) calls @before_start and
- * @before_terminate just before starting and finishing the monitoring,
- * respectively. Therefore, those are good places for installing and cleaning
- * @private.
- *
- * The monitoring thread calls @after_wmarks_check after each DAMON-based
- * operation schemes' watermarks check. If users need to make changes to the
- * attributes of the monitoring context while it's deactivated due to the
- * watermarks, this is the good place to do.
- *
- * The monitoring thread calls @after_sampling and @after_aggregation for each
- * of the sampling intervals and aggregation intervals, respectively.
- * Therefore, users can safely access the monitoring results without additional
- * protection. For the reason, users are recommended to use these callback for
- * the accesses to the results.
- *
- * If any callback returns non-zero, monitoring stops.
+ * struct damon_intervals_goal - Monitoring intervals auto-tuning goal.
+ *
+ * @access_bp: Access events observation ratio to achieve in bp.
+ * @aggrs: Number of aggregations to achieve @access_bp within.
+ * @min_sample_us: Minimum resulting sampling interval in microseconds.
+ * @max_sample_us: Maximum resulting sampling interval in microseconds.
+ *
+ * DAMON automatically tunes &damon_attrs->sample_interval and
+ * &damon_attrs->aggr_interval aiming the ratio in bp (1/10,000) of
+ * DAMON-observed access events to theoretical maximum amount within @aggrs
+ * aggregations be same to @access_bp. The logic increases
+ * &damon_attrs->aggr_interval and &damon_attrs->sampling_interval in same
+ * ratio if the current access events observation ratio is lower than the
+ * target for each @aggrs aggregations, and vice versa.
+ *
+ * If @aggrs is zero, the tuning is disabled and hence this struct is ignored.
*/
-struct damon_callback {
- void *private;
-
- int (*before_start)(struct damon_ctx *context);
- int (*after_wmarks_check)(struct damon_ctx *context);
- int (*after_sampling)(struct damon_ctx *context);
- int (*after_aggregation)(struct damon_ctx *context);
- int (*before_damos_apply)(struct damon_ctx *context,
- struct damon_target *target,
- struct damon_region *region,
- struct damos *scheme);
- void (*before_terminate)(struct damon_ctx *context);
+struct damon_intervals_goal {
+ unsigned long access_bp;
+ unsigned long aggrs;
+ unsigned long min_sample_us;
+ unsigned long max_sample_us;
};
/**
@@ -547,6 +707,7 @@ struct damon_callback {
* @sample_interval: The time between access samplings.
* @aggr_interval: The time between monitor results aggregations.
* @ops_update_interval: The time between monitoring operations updates.
+ * @intervals_goal: Intervals auto-tuning goal.
* @min_nr_regions: The minimum number of adaptive monitoring
* regions.
* @max_nr_regions: The maximum number of adaptive monitoring
@@ -560,14 +721,26 @@ struct damon_callback {
* ``mmap()`` calls from the application, in case of virtual memory monitoring)
* and applies the changes for each @ops_update_interval. All time intervals
* are in micro-seconds. Please refer to &struct damon_operations and &struct
- * damon_callback for more detail.
+ * damon_call_control for more detail.
*/
struct damon_attrs {
unsigned long sample_interval;
unsigned long aggr_interval;
unsigned long ops_update_interval;
+ struct damon_intervals_goal intervals_goal;
unsigned long min_nr_regions;
unsigned long max_nr_regions;
+/* private: internal use only */
+ /*
+ * @aggr_interval to @sample_interval ratio.
+ * Core-external components call damon_set_attrs() with &damon_attrs
+ * that this field is unset. In the case, damon_set_attrs() sets this
+ * field of resulting &damon_attrs. Core-internal components such as
+ * kdamond_tune_intervals() calls damon_set_attrs() with &damon_attrs
+ * that this field is set. In the case, damon_set_attrs() just keep
+ * it.
+ */
+ unsigned long aggr_samples;
};
/**
@@ -595,8 +768,8 @@ struct damon_attrs {
* Accesses to other fields must be protected by themselves.
*
* @ops: Set of monitoring operations for given use cases.
- * @callback: Set of callbacks for monitoring events notifications.
- *
+ * @addr_unit: Scale factor for core to ops address conversion.
+ * @min_sz_region: Minimum region size.
* @adaptive_targets: Head of monitoring targets (&damon_target) list.
* @schemes: Head of schemes (&damos) list.
*/
@@ -616,15 +789,30 @@ struct damon_ctx {
* update
*/
unsigned long next_ops_update_sis;
+ /*
+ * number of sample intervals that should be passed before next
+ * intervals tuning
+ */
+ unsigned long next_intervals_tune_sis;
/* for waiting until the execution of the kdamond_fn is started */
struct completion kdamond_started;
+ /* for scheme quotas prioritization */
+ unsigned long *regions_score_histogram;
+
+ /* lists of &struct damon_call_control */
+ struct list_head call_controls;
+ struct mutex call_controls_lock;
+
+ struct damos_walk_control *walk_control;
+ struct mutex walk_control_lock;
/* public: */
struct task_struct *kdamond;
struct mutex kdamond_lock;
struct damon_operations ops;
- struct damon_callback callback;
+ unsigned long addr_unit;
+ unsigned long min_sz_region;
struct list_head adaptive_targets;
struct list_head schemes;
@@ -683,11 +871,17 @@ static inline unsigned long damon_sz_region(struct damon_region *r)
#define damos_for_each_quota_goal_safe(goal, next, quota) \
list_for_each_entry_safe(goal, next, &(quota)->goals, list)
-#define damos_for_each_filter(f, scheme) \
- list_for_each_entry(f, &(scheme)->filters, list)
+#define damos_for_each_core_filter(f, scheme) \
+ list_for_each_entry(f, &(scheme)->core_filters, list)
-#define damos_for_each_filter_safe(f, next, scheme) \
- list_for_each_entry_safe(f, next, &(scheme)->filters, list)
+#define damos_for_each_core_filter_safe(f, next, scheme) \
+ list_for_each_entry_safe(f, next, &(scheme)->core_filters, list)
+
+#define damos_for_each_ops_filter(f, scheme) \
+ list_for_each_entry(f, &(scheme)->ops_filters, list)
+
+#define damos_for_each_ops_filter_safe(f, next, scheme) \
+ list_for_each_entry_safe(f, next, &(scheme)->ops_filters, list)
#ifdef CONFIG_DAMON
@@ -707,13 +901,14 @@ static inline void damon_insert_region(struct damon_region *r,
void damon_add_region(struct damon_region *r, struct damon_target *t);
void damon_destroy_region(struct damon_region *r, struct damon_target *t);
int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
- unsigned int nr_ranges);
+ unsigned int nr_ranges, unsigned long min_sz_region);
void damon_update_region_access_rate(struct damon_region *r, bool accessed,
struct damon_attrs *attrs);
struct damos_filter *damos_new_filter(enum damos_filter_type type,
- bool matching);
+ bool matching, bool allow);
void damos_add_filter(struct damos *s, struct damos_filter *f);
+bool damos_filter_for_ops(enum damos_filter_type type);
void damos_destroy_filter(struct damos_filter *f);
struct damos_quota_goal *damos_new_quota_goal(
@@ -726,15 +921,17 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
enum damos_action action,
unsigned long apply_interval_us,
struct damos_quota *quota,
- struct damos_watermarks *wmarks);
+ struct damos_watermarks *wmarks,
+ int target_nid);
void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
void damon_destroy_scheme(struct damos *s);
+int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src);
struct damon_target *damon_new_target(void);
void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
bool damon_targets_empty(struct damon_ctx *ctx);
void damon_free_target(struct damon_target *t);
-void damon_destroy_target(struct damon_target *t);
+void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx);
unsigned int damon_nr_regions(struct damon_target *t);
struct damon_ctx *damon_new_ctx(void);
@@ -742,6 +939,7 @@ void damon_destroy_ctx(struct damon_ctx *ctx);
int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs);
void damon_set_schemes(struct damon_ctx *ctx,
struct damos **schemes, ssize_t nr_schemes);
+int damon_commit_ctx(struct damon_ctx *old_ctx, struct damon_ctx *new_ctx);
int damon_nr_running_ctxs(void);
bool damon_is_registered_ops(enum damon_ops_id id);
int damon_register_ops(struct damon_operations *ops);
@@ -760,11 +958,17 @@ static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs
}
+bool damon_initialized(void);
int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+bool damon_is_running(struct damon_ctx *ctx);
+
+int damon_call(struct damon_ctx *ctx, struct damon_call_control *control);
+int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control);
int damon_set_region_biggest_system_ram_default(struct damon_target *t,
- unsigned long *start, unsigned long *end);
+ unsigned long *start, unsigned long *end,
+ unsigned long min_sz_region);
#endif /* CONFIG_DAMON */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 9d3e3327af4c..9d624f4d9df6 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -26,13 +26,7 @@ struct dax_operations {
* number of pages available for DAX at that pfn.
*/
long (*direct_access)(struct dax_device *, pgoff_t, long,
- enum dax_access_mode, void **, pfn_t *);
- /*
- * Validate whether this device is usable as an fsdax backing
- * device.
- */
- bool (*dax_supported)(struct dax_device *, struct block_device *, int,
- sector_t, sector_t);
+ enum dax_access_mode, void **, unsigned long *);
/* zero_page_range: required operation. Zero page range */
int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
/*
@@ -71,12 +65,13 @@ size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
/*
* Check if given mapping is supported by the file / underlying device.
*/
-static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
- struct dax_device *dax_dev)
+static inline bool daxdev_mapping_supported(vm_flags_t vm_flags,
+ const struct inode *inode,
+ struct dax_device *dax_dev)
{
- if (!(vma->vm_flags & VM_SYNC))
+ if (!(vm_flags & VM_SYNC))
return true;
- if (!IS_DAX(file_inode(vma->vm_file)))
+ if (!IS_DAX(inode))
return false;
return dax_synchronous(dax_dev);
}
@@ -116,10 +111,11 @@ static inline void set_dax_nomc(struct dax_device *dax_dev)
static inline void set_dax_synchronous(struct dax_device *dax_dev)
{
}
-static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
- struct dax_device *dax_dev)
+static inline bool daxdev_mapping_supported(vm_flags_t vm_flags,
+ const struct inode *inode,
+ struct dax_device *dax_dev)
{
- return !(vma->vm_flags & VM_SYNC);
+ return !(vm_flags & VM_SYNC);
}
static inline size_t dax_recovery_write(struct dax_device *dax_dev,
pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
@@ -213,6 +209,11 @@ int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
const struct iomap_ops *ops);
+static inline bool dax_page_is_idle(struct page *page)
+{
+ return page && page_ref_count(page) == 0;
+}
+
#if IS_ENABLED(CONFIG_DAX)
int dax_read_lock(void);
void dax_read_unlock(int id);
@@ -226,10 +227,23 @@ static inline void dax_read_unlock(int id)
{
}
#endif /* CONFIG_DAX */
+
+#if !IS_ENABLED(CONFIG_FS_DAX)
+static inline int __must_check dax_break_layout(struct inode *inode,
+ loff_t start, loff_t end, void (cb)(struct inode *))
+{
+ return 0;
+}
+
+static inline void dax_break_layout_final(struct inode *inode)
+{
+}
+#endif
+
bool dax_alive(struct dax_device *dax_dev);
void *dax_get_private(struct dax_device *dax_dev);
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
- enum dax_access_mode mode, void **kaddr, pfn_t *pfn);
+ enum dax_access_mode mode, void **kaddr, unsigned long *pfn);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
@@ -243,12 +257,23 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
- pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
+ unsigned long *pfnp, int *errp,
+ const struct iomap_ops *ops);
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
- unsigned int order, pfn_t pfn);
+ unsigned int order, unsigned long pfn);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
+void dax_delete_mapping_range(struct address_space *mapping,
+ loff_t start, loff_t end);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
+int __must_check dax_break_layout(struct inode *inode, loff_t start,
+ loff_t end, void (cb)(struct inode *));
+static inline int __must_check dax_break_layout_inode(struct inode *inode,
+ void (cb)(struct inode *))
+{
+ return dax_break_layout(inode, 0, LLONG_MAX, cb);
+}
+void dax_break_layout_final(struct inode *inode);
int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
struct inode *dest, loff_t destoff,
loff_t len, bool *is_same,
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index bf53e3894aae..898c60d21c92 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -57,6 +57,8 @@ struct qstr {
};
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
+#define QSTR_LEN(n,l) (struct qstr)QSTR_INIT(n,l)
+#define QSTR(n) QSTR_LEN(n, strlen(n))
extern const struct qstr empty_name;
extern const struct qstr slash_name;
@@ -68,16 +70,24 @@ extern const struct qstr dotdot_name;
* large memory footprint increase).
*/
#ifdef CONFIG_64BIT
-# define DNAME_INLINE_LEN 40 /* 192 bytes */
+# define DNAME_INLINE_WORDS 5 /* 192 bytes */
#else
# ifdef CONFIG_SMP
-# define DNAME_INLINE_LEN 40 /* 128 bytes */
+# define DNAME_INLINE_WORDS 9 /* 128 bytes */
# else
-# define DNAME_INLINE_LEN 44 /* 128 bytes */
+# define DNAME_INLINE_WORDS 11 /* 128 bytes */
# endif
#endif
+#define DNAME_INLINE_LEN (DNAME_INLINE_WORDS*sizeof(unsigned long))
+
+union shortname_store {
+ unsigned char string[DNAME_INLINE_LEN];
+ unsigned long words[DNAME_INLINE_WORDS];
+};
+
#define d_lock d_lockref.lock
+#define d_iname d_shortname.string
struct dentry {
/* RCU lookup touched fields */
@@ -85,17 +95,25 @@ struct dentry {
seqcount_spinlock_t d_seq; /* per dentry seqlock */
struct hlist_bl_node d_hash; /* lookup hash list */
struct dentry *d_parent; /* parent directory */
- struct qstr d_name;
+ union {
+ struct qstr __d_name; /* for use ONLY in fs/dcache.c */
+ const struct qstr d_name;
+ };
struct inode *d_inode; /* Where the name belongs to - NULL is
* negative */
- unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
+ union shortname_store d_shortname;
+ /* --- cacheline 1 boundary (64 bytes) was 32 bytes ago --- */
/* Ref lookup also touches following */
- struct lockref d_lockref; /* per-dentry lock and refcount */
const struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */
unsigned long d_time; /* used by d_revalidate */
void *d_fsdata; /* fs-specific data */
+ /* --- cacheline 2 boundary (128 bytes) --- */
+ struct lockref d_lockref; /* per-dentry lock and refcount
+ * keep separate from RCU lookup area if
+ * possible!
+ */
union {
struct list_head d_lru; /* LRU list */
@@ -131,7 +149,8 @@ enum d_real_type {
};
struct dentry_operations {
- int (*d_revalidate)(struct dentry *, unsigned int);
+ int (*d_revalidate)(struct inode *, const struct qstr *,
+ struct dentry *, unsigned int);
int (*d_weak_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, struct qstr *);
int (*d_compare)(const struct dentry *,
@@ -145,6 +164,8 @@ struct dentry_operations {
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(const struct path *, bool);
struct dentry *(*d_real)(struct dentry *, enum d_real_type type);
+ bool (*d_unalias_trylock)(const struct dentry *);
+ void (*d_unalias_unlock)(const struct dentry *);
} ____cacheline_aligned;
/*
@@ -156,65 +177,59 @@ struct dentry_operations {
*/
/* d_flags entries */
-#define DCACHE_OP_HASH BIT(0)
-#define DCACHE_OP_COMPARE BIT(1)
-#define DCACHE_OP_REVALIDATE BIT(2)
-#define DCACHE_OP_DELETE BIT(3)
-#define DCACHE_OP_PRUNE BIT(4)
-
-#define DCACHE_DISCONNECTED BIT(5)
- /* This dentry is possibly not currently connected to the dcache tree, in
- * which case its parent will either be itself, or will have this flag as
- * well. nfsd will not use a dentry with this bit set, but will first
- * endeavour to clear the bit either by discovering that it is connected,
- * or by performing lookup operations. Any filesystem which supports
- * nfsd_operations MUST have a lookup function which, if it finds a
- * directory inode with a DCACHE_DISCONNECTED dentry, will d_move that
- * dentry into place and return that dentry rather than the passed one,
- * typically using d_splice_alias. */
-
-#define DCACHE_REFERENCED BIT(6) /* Recently used, don't discard. */
-
-#define DCACHE_DONTCACHE BIT(7) /* Purge from memory on final dput() */
-
-#define DCACHE_CANT_MOUNT BIT(8)
-#define DCACHE_GENOCIDE BIT(9)
-#define DCACHE_SHRINK_LIST BIT(10)
-
-#define DCACHE_OP_WEAK_REVALIDATE BIT(11)
-
-#define DCACHE_NFSFS_RENAMED BIT(12)
- /* this dentry has been "silly renamed" and has to be deleted on the last
- * dput() */
-#define DCACHE_FSNOTIFY_PARENT_WATCHED BIT(14)
- /* Parent inode is watched by some fsnotify listener */
-
-#define DCACHE_DENTRY_KILLED BIT(15)
-
-#define DCACHE_MOUNTED BIT(16) /* is a mountpoint */
-#define DCACHE_NEED_AUTOMOUNT BIT(17) /* handle automount on this dir */
-#define DCACHE_MANAGE_TRANSIT BIT(18) /* manage transit from this dirent */
+enum dentry_flags {
+ DCACHE_OP_HASH = BIT(0),
+ DCACHE_OP_COMPARE = BIT(1),
+ DCACHE_OP_REVALIDATE = BIT(2),
+ DCACHE_OP_DELETE = BIT(3),
+ DCACHE_OP_PRUNE = BIT(4),
+ /*
+ * This dentry is possibly not currently connected to the dcache tree,
+ * in which case its parent will either be itself, or will have this
+ * flag as well. nfsd will not use a dentry with this bit set, but will
+ * first endeavour to clear the bit either by discovering that it is
+ * connected, or by performing lookup operations. Any filesystem which
+ * supports nfsd_operations MUST have a lookup function which, if it
+ * finds a directory inode with a DCACHE_DISCONNECTED dentry, will
+ * d_move that dentry into place and return that dentry rather than the
+ * passed one, typically using d_splice_alias.
+ */
+ DCACHE_DISCONNECTED = BIT(5),
+ DCACHE_REFERENCED = BIT(6), /* Recently used, don't discard. */
+ DCACHE_DONTCACHE = BIT(7), /* Purge from memory on final dput() */
+ DCACHE_CANT_MOUNT = BIT(8),
+ DCACHE_SHRINK_LIST = BIT(10),
+ DCACHE_OP_WEAK_REVALIDATE = BIT(11),
+ /*
+ * this dentry has been "silly renamed" and has to be deleted on the
+ * last dput()
+ */
+ DCACHE_NFSFS_RENAMED = BIT(12),
+ DCACHE_FSNOTIFY_PARENT_WATCHED = BIT(13), /* Parent inode is watched by some fsnotify listener */
+ DCACHE_DENTRY_KILLED = BIT(14),
+ DCACHE_MOUNTED = BIT(15), /* is a mountpoint */
+ DCACHE_NEED_AUTOMOUNT = BIT(16), /* handle automount on this dir */
+ DCACHE_MANAGE_TRANSIT = BIT(17), /* manage transit from this dirent */
+ DCACHE_LRU_LIST = BIT(18),
+ DCACHE_ENTRY_TYPE = (7 << 19), /* bits 19..21 are for storing type: */
+ DCACHE_MISS_TYPE = (0 << 19), /* Negative dentry */
+ DCACHE_WHITEOUT_TYPE = (1 << 19), /* Whiteout dentry (stop pathwalk) */
+ DCACHE_DIRECTORY_TYPE = (2 << 19), /* Normal directory */
+ DCACHE_AUTODIR_TYPE = (3 << 19), /* Lookupless directory (presumed automount) */
+ DCACHE_REGULAR_TYPE = (4 << 19), /* Regular file type */
+ DCACHE_SPECIAL_TYPE = (5 << 19), /* Other file type */
+ DCACHE_SYMLINK_TYPE = (6 << 19), /* Symlink */
+ DCACHE_NOKEY_NAME = BIT(22), /* Encrypted name encoded without key */
+ DCACHE_OP_REAL = BIT(23),
+ DCACHE_PAR_LOOKUP = BIT(24), /* being looked up (with parent locked shared) */
+ DCACHE_DENTRY_CURSOR = BIT(25),
+ DCACHE_NORCU = BIT(26), /* No RCU delay for freeing */
+ DCACHE_PERSISTENT = BIT(27)
+};
+
#define DCACHE_MANAGED_DENTRY \
(DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
-#define DCACHE_LRU_LIST BIT(19)
-
-#define DCACHE_ENTRY_TYPE (7 << 20) /* bits 20..22 are for storing type: */
-#define DCACHE_MISS_TYPE (0 << 20) /* Negative dentry */
-#define DCACHE_WHITEOUT_TYPE (1 << 20) /* Whiteout dentry (stop pathwalk) */
-#define DCACHE_DIRECTORY_TYPE (2 << 20) /* Normal directory */
-#define DCACHE_AUTODIR_TYPE (3 << 20) /* Lookupless directory (presumed automount) */
-#define DCACHE_REGULAR_TYPE (4 << 20) /* Regular file type */
-#define DCACHE_SPECIAL_TYPE (5 << 20) /* Other file type */
-#define DCACHE_SYMLINK_TYPE (6 << 20) /* Symlink */
-
-#define DCACHE_NOKEY_NAME BIT(25) /* Encrypted name encoded without key */
-#define DCACHE_OP_REAL BIT(26)
-
-#define DCACHE_PAR_LOOKUP BIT(28) /* being looked up (with parent locked shared) */
-#define DCACHE_DENTRY_CURSOR BIT(29)
-#define DCACHE_NORCU BIT(30) /* No RCU delay for freeing */
-
extern seqlock_t rename_lock;
/*
@@ -225,7 +240,6 @@ extern void d_instantiate_new(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
extern void d_delete(struct dentry *);
-extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op);
/* allocate/de-allocate */
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
@@ -233,10 +247,12 @@ extern struct dentry * d_alloc_anon(struct super_block *);
extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
wait_queue_head_t *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
+/* weird procfs mess; *NOT* exported */
+extern struct dentry * d_splice_alias_ops(struct inode *, struct dentry *,
+ const struct dentry_operations *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
extern bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
const struct qstr *name);
-extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
extern struct dentry *d_find_any_alias(struct inode *inode);
extern struct dentry * d_obtain_alias(struct inode *);
extern struct dentry * d_obtain_root(struct inode *);
@@ -252,6 +268,8 @@ extern void d_tmpfile(struct file *, struct inode *);
extern struct dentry *d_find_alias(struct inode *);
extern void d_prune_aliases(struct inode *);
+extern void d_dispose_if_unused(struct dentry *, struct list_head *);
+extern void shrink_dentry_list(struct list_head *);
extern struct dentry *d_find_alias_rcu(struct inode *);
@@ -271,13 +289,14 @@ extern void d_exchange(struct dentry *, struct dentry *);
extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
extern struct dentry *d_lookup(const struct dentry *, const struct qstr *);
-extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
static inline unsigned d_count(const struct dentry *dentry)
{
return dentry->d_lockref.count;
}
+ino_t d_parent_ino(struct dentry *dentry);
+
/*
* helper function for dentry_operations.d_dname() members
*/
@@ -501,12 +520,7 @@ static inline int simple_positive(const struct dentry *dentry)
return d_really_is_positive(dentry) && !d_unhashed(dentry);
}
-extern int sysctl_vfs_cache_pressure;
-
-static inline unsigned long vfs_pressure_ratio(unsigned long val)
-{
- return mult_frac(val, sysctl_vfs_cache_pressure, 100);
-}
+unsigned long vfs_pressure_ratio(unsigned long val);
/**
* d_inode - Get the actual inode of this dentry
@@ -582,7 +596,7 @@ static inline struct inode *d_real_inode(const struct dentry *dentry)
struct name_snapshot {
struct qstr name;
- unsigned char inline_name[DNAME_INLINE_LEN];
+ union shortname_store inline_name;
};
void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
void release_dentry_name_snapshot(struct name_snapshot *);
@@ -597,4 +611,8 @@ static inline struct dentry *d_next_sibling(const struct dentry *dentry)
return hlist_entry_safe(dentry->d_sib.next, struct dentry, d_sib);
}
+void set_default_d_op(struct super_block *, const struct dentry_operations *);
+struct dentry *d_make_persistent(struct dentry *, struct inode *);
+void d_make_discardable(struct dentry *dentry);
+
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 325af611909f..0b61b8b996d4 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -2,79 +2,8 @@
#ifndef _LINUX_DCCP_H
#define _LINUX_DCCP_H
-
-#include <linux/in.h>
-#include <linux/interrupt.h>
-#include <linux/ktime.h>
-#include <linux/list.h>
-#include <linux/uio.h>
-#include <linux/workqueue.h>
-
-#include <net/inet_connection_sock.h>
-#include <net/inet_sock.h>
-#include <net/inet_timewait_sock.h>
-#include <net/tcp_states.h>
#include <uapi/linux/dccp.h>
-enum dccp_state {
- DCCP_OPEN = TCP_ESTABLISHED,
- DCCP_REQUESTING = TCP_SYN_SENT,
- DCCP_LISTEN = TCP_LISTEN,
- DCCP_RESPOND = TCP_SYN_RECV,
- /*
- * States involved in closing a DCCP connection:
- * 1) ACTIVE_CLOSEREQ is entered by a server sending a CloseReq.
- *
- * 2) CLOSING can have three different meanings (RFC 4340, 8.3):
- * a. Client has performed active-close, has sent a Close to the server
- * from state OPEN or PARTOPEN, and is waiting for the final Reset
- * (in this case, SOCK_DONE == 1).
- * b. Client is asked to perform passive-close, by receiving a CloseReq
- * in (PART)OPEN state. It sends a Close and waits for final Reset
- * (in this case, SOCK_DONE == 0).
- * c. Server performs an active-close as in (a), keeps TIMEWAIT state.
- *
- * 3) The following intermediate states are employed to give passively
- * closing nodes a chance to process their unread data:
- * - PASSIVE_CLOSE (from OPEN => CLOSED) and
- * - PASSIVE_CLOSEREQ (from (PART)OPEN to CLOSING; case (b) above).
- */
- DCCP_ACTIVE_CLOSEREQ = TCP_FIN_WAIT1,
- DCCP_PASSIVE_CLOSE = TCP_CLOSE_WAIT, /* any node receiving a Close */
- DCCP_CLOSING = TCP_CLOSING,
- DCCP_TIME_WAIT = TCP_TIME_WAIT,
- DCCP_CLOSED = TCP_CLOSE,
- DCCP_NEW_SYN_RECV = TCP_NEW_SYN_RECV,
- DCCP_PARTOPEN = TCP_MAX_STATES,
- DCCP_PASSIVE_CLOSEREQ, /* clients receiving CloseReq */
- DCCP_MAX_STATES
-};
-
-enum {
- DCCPF_OPEN = TCPF_ESTABLISHED,
- DCCPF_REQUESTING = TCPF_SYN_SENT,
- DCCPF_LISTEN = TCPF_LISTEN,
- DCCPF_RESPOND = TCPF_SYN_RECV,
- DCCPF_ACTIVE_CLOSEREQ = TCPF_FIN_WAIT1,
- DCCPF_CLOSING = TCPF_CLOSING,
- DCCPF_TIME_WAIT = TCPF_TIME_WAIT,
- DCCPF_CLOSED = TCPF_CLOSE,
- DCCPF_NEW_SYN_RECV = TCPF_NEW_SYN_RECV,
- DCCPF_PARTOPEN = (1 << DCCP_PARTOPEN),
-};
-
-static inline struct dccp_hdr *dccp_hdr(const struct sk_buff *skb)
-{
- return (struct dccp_hdr *)skb_transport_header(skb);
-}
-
-static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen)
-{
- skb_push(skb, headlen);
- skb_reset_transport_header(skb);
- return memset(skb_transport_header(skb), 0, headlen);
-}
-
static inline struct dccp_hdr_ext *dccp_hdrx(const struct dccp_hdr *dh)
{
return (struct dccp_hdr_ext *)((unsigned char *)dh + sizeof(*dh));
@@ -85,12 +14,6 @@ static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh)
return sizeof(*dh) + (dh->dccph_x ? sizeof(struct dccp_hdr_ext) : 0);
}
-static inline unsigned int dccp_basic_hdr_len(const struct sk_buff *skb)
-{
- const struct dccp_hdr *dh = dccp_hdr(skb);
- return __dccp_basic_hdr_len(dh);
-}
-
static inline __u64 dccp_hdr_seq(const struct dccp_hdr *dh)
{
__u64 seq_nr = ntohs(dh->dccph_seq);
@@ -103,222 +26,10 @@ static inline __u64 dccp_hdr_seq(const struct dccp_hdr *dh)
return seq_nr;
}
-static inline struct dccp_hdr_request *dccp_hdr_request(struct sk_buff *skb)
-{
- return (struct dccp_hdr_request *)(skb_transport_header(skb) +
- dccp_basic_hdr_len(skb));
-}
-
-static inline struct dccp_hdr_ack_bits *dccp_hdr_ack_bits(const struct sk_buff *skb)
-{
- return (struct dccp_hdr_ack_bits *)(skb_transport_header(skb) +
- dccp_basic_hdr_len(skb));
-}
-
-static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb)
-{
- const struct dccp_hdr_ack_bits *dhack = dccp_hdr_ack_bits(skb);
- return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) + ntohl(dhack->dccph_ack_nr_low);
-}
-
-static inline struct dccp_hdr_response *dccp_hdr_response(struct sk_buff *skb)
-{
- return (struct dccp_hdr_response *)(skb_transport_header(skb) +
- dccp_basic_hdr_len(skb));
-}
-
-static inline struct dccp_hdr_reset *dccp_hdr_reset(struct sk_buff *skb)
-{
- return (struct dccp_hdr_reset *)(skb_transport_header(skb) +
- dccp_basic_hdr_len(skb));
-}
-
static inline unsigned int __dccp_hdr_len(const struct dccp_hdr *dh)
{
return __dccp_basic_hdr_len(dh) +
dccp_packet_hdr_len(dh->dccph_type);
}
-static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
-{
- return __dccp_hdr_len(dccp_hdr(skb));
-}
-
-/**
- * struct dccp_request_sock - represent DCCP-specific connection request
- * @dreq_inet_rsk: structure inherited from
- * @dreq_iss: initial sequence number, sent on the first Response (RFC 4340, 7.1)
- * @dreq_gss: greatest sequence number sent (for retransmitted Responses)
- * @dreq_isr: initial sequence number received in the first Request
- * @dreq_gsr: greatest sequence number received (for retransmitted Request(s))
- * @dreq_service: service code present on the Request (there is just one)
- * @dreq_featneg: feature negotiation options for this connection
- * The following two fields are analogous to the ones in dccp_sock:
- * @dreq_timestamp_echo: last received timestamp to echo (13.1)
- * @dreq_timestamp_echo: the time of receiving the last @dreq_timestamp_echo
- */
-struct dccp_request_sock {
- struct inet_request_sock dreq_inet_rsk;
- __u64 dreq_iss;
- __u64 dreq_gss;
- __u64 dreq_isr;
- __u64 dreq_gsr;
- __be32 dreq_service;
- spinlock_t dreq_lock;
- struct list_head dreq_featneg;
- __u32 dreq_timestamp_echo;
- __u32 dreq_timestamp_time;
-};
-
-static inline struct dccp_request_sock *dccp_rsk(const struct request_sock *req)
-{
- return (struct dccp_request_sock *)req;
-}
-
-extern struct inet_timewait_death_row dccp_death_row;
-
-extern int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
- struct sk_buff *skb);
-
-struct dccp_options_received {
- u64 dccpor_ndp:48;
- u32 dccpor_timestamp;
- u32 dccpor_timestamp_echo;
- u32 dccpor_elapsed_time;
-};
-
-struct ccid;
-
-enum dccp_role {
- DCCP_ROLE_UNDEFINED,
- DCCP_ROLE_LISTEN,
- DCCP_ROLE_CLIENT,
- DCCP_ROLE_SERVER,
-};
-
-struct dccp_service_list {
- __u32 dccpsl_nr;
- __be32 dccpsl_list[];
-};
-
-#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
-#define DCCP_SERVICE_CODE_IS_ABSENT 0
-
-static inline bool dccp_list_has_service(const struct dccp_service_list *sl,
- const __be32 service)
-{
- if (likely(sl != NULL)) {
- u32 i = sl->dccpsl_nr;
- while (i--)
- if (sl->dccpsl_list[i] == service)
- return true;
- }
- return false;
-}
-
-struct dccp_ackvec;
-
-/**
- * struct dccp_sock - DCCP socket state
- *
- * @dccps_swl - sequence number window low
- * @dccps_swh - sequence number window high
- * @dccps_awl - acknowledgement number window low
- * @dccps_awh - acknowledgement number window high
- * @dccps_iss - initial sequence number sent
- * @dccps_isr - initial sequence number received
- * @dccps_osr - first OPEN sequence number received
- * @dccps_gss - greatest sequence number sent
- * @dccps_gsr - greatest valid sequence number received
- * @dccps_gar - greatest valid ack number received on a non-Sync; initialized to %dccps_iss
- * @dccps_service - first (passive sock) or unique (active sock) service code
- * @dccps_service_list - second .. last service code on passive socket
- * @dccps_timestamp_echo - latest timestamp received on a TIMESTAMP option
- * @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo
- * @dccps_l_ack_ratio - feature-local Ack Ratio
- * @dccps_r_ack_ratio - feature-remote Ack Ratio
- * @dccps_l_seq_win - local Sequence Window (influences ack number validity)
- * @dccps_r_seq_win - remote Sequence Window (influences seq number validity)
- * @dccps_pcslen - sender partial checksum coverage (via sockopt)
- * @dccps_pcrlen - receiver partial checksum coverage (via sockopt)
- * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2)
- * @dccps_ndp_count - number of Non Data Packets since last data packet
- * @dccps_mss_cache - current value of MSS (path MTU minus header sizes)
- * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4)
- * @dccps_featneg - tracks feature-negotiation state (mostly during handshake)
- * @dccps_hc_rx_ackvec - rx half connection ack vector
- * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
- * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection)
- * @dccps_options_received - parsed set of retrieved options
- * @dccps_qpolicy - TX dequeueing policy, one of %dccp_packet_dequeueing_policy
- * @dccps_tx_qlen - maximum length of the TX queue
- * @dccps_role - role of this sock, one of %dccp_role
- * @dccps_hc_rx_insert_options - receiver wants to add options when acking
- * @dccps_hc_tx_insert_options - sender wants to add options when sending
- * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3)
- * @dccps_sync_scheduled - flag which signals "send out-of-band message soon"
- * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets
- * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing)
- * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs)
- */
-struct dccp_sock {
- /* inet_connection_sock has to be the first member of dccp_sock */
- struct inet_connection_sock dccps_inet_connection;
-#define dccps_syn_rtt dccps_inet_connection.icsk_ack.lrcvtime
- __u64 dccps_swl;
- __u64 dccps_swh;
- __u64 dccps_awl;
- __u64 dccps_awh;
- __u64 dccps_iss;
- __u64 dccps_isr;
- __u64 dccps_osr;
- __u64 dccps_gss;
- __u64 dccps_gsr;
- __u64 dccps_gar;
- __be32 dccps_service;
- __u32 dccps_mss_cache;
- struct dccp_service_list *dccps_service_list;
- __u32 dccps_timestamp_echo;
- __u32 dccps_timestamp_time;
- __u16 dccps_l_ack_ratio;
- __u16 dccps_r_ack_ratio;
- __u64 dccps_l_seq_win:48;
- __u64 dccps_r_seq_win:48;
- __u8 dccps_pcslen:4;
- __u8 dccps_pcrlen:4;
- __u8 dccps_send_ndp_count:1;
- __u64 dccps_ndp_count:48;
- unsigned long dccps_rate_last;
- struct list_head dccps_featneg;
- struct dccp_ackvec *dccps_hc_rx_ackvec;
- struct ccid *dccps_hc_rx_ccid;
- struct ccid *dccps_hc_tx_ccid;
- struct dccp_options_received dccps_options_received;
- __u8 dccps_qpolicy;
- __u32 dccps_tx_qlen;
- enum dccp_role dccps_role:2;
- __u8 dccps_hc_rx_insert_options:1;
- __u8 dccps_hc_tx_insert_options:1;
- __u8 dccps_server_timewait:1;
- __u8 dccps_sync_scheduled:1;
- struct tasklet_struct dccps_xmitlet;
- struct timer_list dccps_xmit_timer;
-};
-
-#define dccp_sk(ptr) container_of_const(ptr, struct dccp_sock, \
- dccps_inet_connection.icsk_inet.sk)
-
-static inline const char *dccp_role(const struct sock *sk)
-{
- switch (dccp_sk(sk)->dccps_role) {
- case DCCP_ROLE_UNDEFINED: return "undefined";
- case DCCP_ROLE_LISTEN: return "listen";
- case DCCP_ROLE_SERVER: return "server";
- case DCCP_ROLE_CLIENT: return "client";
- }
- return NULL;
-}
-
-extern void dccp_syn_ack_timeout(const struct request_sock *req);
-
#endif /* _LINUX_DCCP_H */
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index c9c65b132c0f..7cecda29447e 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -57,7 +57,6 @@ static const struct file_operations __fops = { \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
.write = (__is_signed) ? debugfs_attr_write_signed : debugfs_attr_write, \
- .llseek = no_llseek, \
}
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
@@ -68,13 +67,77 @@ static const struct file_operations __fops = { \
typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
+struct debugfs_short_fops {
+ ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
+ ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
+ loff_t (*llseek) (struct file *, loff_t, int);
+};
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
-struct dentry *debugfs_create_file(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops);
+struct dentry *debugfs_create_file_full(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const void *aux,
+ const struct file_operations *fops);
+struct dentry *debugfs_create_file_short(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const void *aux,
+ const struct debugfs_short_fops *fops);
+
+/**
+ * debugfs_create_file - create a file in the debugfs filesystem
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have.
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is NULL, then the
+ * file will be created in the root of the debugfs filesystem.
+ * @data: a pointer to something that the caller will want to get to later
+ * on. The inode.i_private pointer will point to this value on
+ * the open() call.
+ * @fops: a pointer to a struct file_operations or struct debugfs_short_fops that
+ * should be used for this file.
+ *
+ * This is the basic "create a file" function for debugfs. It allows for a
+ * wide range of flexibility in creating a file, or a directory (if you want
+ * to create a directory, the debugfs_create_dir() function is
+ * recommended to be used instead.)
+ *
+ * This function will return a pointer to a dentry if it succeeds. This
+ * pointer must be passed to the debugfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
+ * returned.
+ *
+ * If debugfs is not enabled in the kernel, the value -%ENODEV will be
+ * returned.
+ *
+ * If fops points to a struct debugfs_short_fops, then simple_open() will be
+ * used for the open, and only read/write/llseek are supported and are proxied,
+ * so no module reference or release are needed.
+ *
+ * NOTE: it's expected that most callers should _ignore_ the errors returned
+ * by this function. Other debugfs functions handle the fact that the "dentry"
+ * passed to them could be an error and they don't crash in that case.
+ * Drivers should generally work fine even if debugfs fails to init anyway.
+ */
+#define debugfs_create_file(name, mode, parent, data, fops) \
+ _Generic(fops, \
+ const struct file_operations *: debugfs_create_file_full, \
+ const struct debugfs_short_fops *: debugfs_create_file_short, \
+ struct file_operations *: debugfs_create_file_full, \
+ struct debugfs_short_fops *: debugfs_create_file_short) \
+ (name, mode, parent, data, NULL, fops)
+
+#define debugfs_create_file_aux(name, mode, parent, data, aux, fops) \
+ _Generic(fops, \
+ const struct file_operations *: debugfs_create_file_full, \
+ const struct debugfs_short_fops *: debugfs_create_file_short, \
+ struct file_operations *: debugfs_create_file_full, \
+ struct debugfs_short_fops *: debugfs_create_file_short) \
+ (name, mode, parent, data, aux, fops)
+
struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
@@ -99,7 +162,7 @@ void debugfs_remove(struct dentry *dentry);
void debugfs_lookup_and_remove(const char *name, struct dentry *parent);
-const struct file_operations *debugfs_real_fops(const struct file *filp);
+void *debugfs_get_aux(const struct file *file);
int debugfs_file_get(struct dentry *dentry);
void debugfs_file_put(struct dentry *dentry);
@@ -111,8 +174,7 @@ ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
-struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
- struct dentry *new_dir, const char *new_name);
+int debugfs_change_name(struct dentry *dentry, const char *fmt, ...) __printf(2, 3);
void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent,
u8 *value);
@@ -206,9 +268,17 @@ static inline struct dentry *debugfs_lookup(const char *name,
return ERR_PTR(-ENODEV);
}
+static inline struct dentry *debugfs_create_file_aux(const char *name,
+ umode_t mode, struct dentry *parent,
+ void *data, void *aux,
+ const void *fops)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
- const struct file_operations *fops)
+ const void *fops)
{
return ERR_PTR(-ENODEV);
}
@@ -258,7 +328,7 @@ static inline void debugfs_lookup_and_remove(const char *name,
struct dentry *parent)
{ }
-const struct file_operations *debugfs_real_fops(const struct file *filp);
+void *debugfs_get_aux(const struct file *file);
static inline int debugfs_file_get(struct dentry *dentry)
{
@@ -288,10 +358,10 @@ static inline ssize_t debugfs_attr_write_signed(struct file *file,
return -ENODEV;
}
-static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
- struct dentry *new_dir, char *new_name)
+static inline int __printf(2, 3) debugfs_change_name(struct dentry *dentry,
+ const char *fmt, ...)
{
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
}
static inline void debugfs_create_u8(const char *name, umode_t mode,
@@ -399,6 +469,11 @@ static inline ssize_t debugfs_read_file_str(struct file *file,
#endif
+#define debugfs_create_file_aux_num(name, mode, parent, data, n, fops) \
+ debugfs_create_file_aux(name, mode, parent, data, \
+ (void *)(unsigned long)n, fops)
+#define debugfs_get_aux_num(f) (unsigned long)debugfs_get_aux(f)
+
/**
* debugfs_create_xul - create a debugfs file that is used to read and write an
* unsigned long value, formatted in hexadecimal
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index 32444686b6ff..8b95545e7924 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -23,13 +23,17 @@ struct debug_obj_descr;
* @state: tracked object state
* @astate: current active state
* @object: pointer to the real object
+ * @batch_last: pointer to the last hlist node in a batch
* @descr: pointer to an object type specific debug description structure
*/
struct debug_obj {
- struct hlist_node node;
- enum debug_obj_state state;
- unsigned int astate;
- void *object;
+ struct hlist_node node;
+ enum debug_obj_state state;
+ unsigned int astate;
+ union {
+ void *object;
+ struct hlist_node *batch_last;
+ };
const struct debug_obj_descr *descr;
};
diff --git a/include/linux/decompress/unxz.h b/include/linux/decompress/unxz.h
index f764e2a7201e..3dd2658a9dab 100644
--- a/include/linux/decompress/unxz.h
+++ b/include/linux/decompress/unxz.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef DECOMPRESS_UNXZ_H
diff --git a/include/linux/delay.h b/include/linux/delay.h
index ff9cda975e30..46412c00033a 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -6,21 +6,12 @@
* Copyright (C) 1993 Linus Torvalds
*
* Delay routines, using a pre-computed "loops_per_jiffy" value.
- *
- * Please note that ndelay(), udelay() and mdelay() may return early for
- * several reasons:
- * 1. computed loops_per_jiffy too low (due to the time taken to
- * execute the timer interrupt.)
- * 2. cache behaviour affecting the time it takes to execute the
- * loop function.
- * 3. CPU clock rate changes.
- *
- * Please see this thread:
- * https://lists.openwall.net/linux-kernel/2011/01/09/56
+ * Sleep routines using timer list timers or hrtimers.
*/
#include <linux/math.h>
#include <linux/sched.h>
+#include <linux/jiffies.h>
extern unsigned long loops_per_jiffy;
@@ -35,12 +26,21 @@ extern unsigned long loops_per_jiffy;
* The 2nd mdelay() definition ensures GCC will optimize away the
* while loop for the common cases where n <= MAX_UDELAY_MS -- Paul G.
*/
-
#ifndef MAX_UDELAY_MS
#define MAX_UDELAY_MS 5
#endif
#ifndef mdelay
+/**
+ * mdelay - Inserting a delay based on milliseconds with busy waiting
+ * @n: requested delay in milliseconds
+ *
+ * See udelay() for basic information about mdelay() and it's variants.
+ *
+ * Please double check, whether mdelay() is the right way to go or whether a
+ * refactoring of the code is the better variant to be able to use msleep()
+ * instead.
+ */
#define mdelay(n) (\
(__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? udelay((n)*1000) : \
({unsigned long __ms=(n); while (__ms--) udelay(1000);}))
@@ -63,30 +63,75 @@ unsigned long msleep_interruptible(unsigned int msecs);
void usleep_range_state(unsigned long min, unsigned long max,
unsigned int state);
+/**
+ * usleep_range - Sleep for an approximate time
+ * @min: Minimum time in microseconds to sleep
+ * @max: Maximum time in microseconds to sleep
+ *
+ * For basic information please refer to usleep_range_state().
+ *
+ * The task will be in the state TASK_UNINTERRUPTIBLE during the sleep.
+ */
static inline void usleep_range(unsigned long min, unsigned long max)
{
usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
}
-static inline void usleep_idle_range(unsigned long min, unsigned long max)
+/**
+ * usleep_range_idle - Sleep for an approximate time with idle time accounting
+ * @min: Minimum time in microseconds to sleep
+ * @max: Maximum time in microseconds to sleep
+ *
+ * For basic information please refer to usleep_range_state().
+ *
+ * The sleeping task has the state TASK_IDLE during the sleep to prevent
+ * contribution to the load average.
+ */
+static inline void usleep_range_idle(unsigned long min, unsigned long max)
{
usleep_range_state(min, max, TASK_IDLE);
}
+/**
+ * ssleep - wrapper for seconds around msleep
+ * @seconds: Requested sleep duration in seconds
+ *
+ * Please refer to msleep() for detailed information.
+ */
static inline void ssleep(unsigned int seconds)
{
msleep(seconds * 1000);
}
-/* see Documentation/timers/timers-howto.rst for the thresholds */
+static const unsigned int max_slack_shift = 2;
+#define USLEEP_RANGE_UPPER_BOUND ((TICK_NSEC << max_slack_shift) / NSEC_PER_USEC)
+
+/**
+ * fsleep - flexible sleep which autoselects the best mechanism
+ * @usecs: requested sleep duration in microseconds
+ *
+ * flseep() selects the best mechanism that will provide maximum 25% slack
+ * to the requested sleep duration. Therefore it uses:
+ *
+ * * udelay() loop for sleep durations <= 10 microseconds to avoid hrtimer
+ * overhead for really short sleep durations.
+ * * usleep_range() for sleep durations which would lead with the usage of
+ * msleep() to a slack larger than 25%. This depends on the granularity of
+ * jiffies.
+ * * msleep() for all other sleep durations.
+ *
+ * Note: When %CONFIG_HIGH_RES_TIMERS is not set, all sleeps are processed with
+ * the granularity of jiffies and the slack might exceed 25% especially for
+ * short sleep durations.
+ */
static inline void fsleep(unsigned long usecs)
{
if (usecs <= 10)
udelay(usecs);
- else if (usecs <= 20000)
- usleep_range(usecs, 2 * usecs);
+ else if (usecs < USLEEP_RANGE_UPPER_BOUND)
+ usleep_range(usecs, usecs + (usecs >> max_slack_shift));
else
- msleep(DIV_ROUND_UP(usecs, 1000));
+ msleep(DIV_ROUND_UP(usecs, USEC_PER_MSEC));
}
#endif /* defined(_LINUX_DELAY_H) */
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 6639f48dac36..800dcc360db2 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -29,25 +29,39 @@ struct task_delay_info {
* XXX_delay contains the accumulated delay time in nanoseconds.
*/
u64 blkio_start;
+ u64 blkio_delay_max;
+ u64 blkio_delay_min;
u64 blkio_delay; /* wait for sync block io completion */
u64 swapin_start;
+ u64 swapin_delay_max;
+ u64 swapin_delay_min;
u64 swapin_delay; /* wait for swapin */
u32 blkio_count; /* total count of the number of sync block */
/* io operations performed */
u32 swapin_count; /* total count of swapin */
u64 freepages_start;
+ u64 freepages_delay_max;
+ u64 freepages_delay_min;
u64 freepages_delay; /* wait for memory reclaim */
u64 thrashing_start;
+ u64 thrashing_delay_max;
+ u64 thrashing_delay_min;
u64 thrashing_delay; /* wait for thrashing page */
u64 compact_start;
+ u64 compact_delay_max;
+ u64 compact_delay_min;
u64 compact_delay; /* wait for memory compact */
u64 wpcopy_start;
+ u64 wpcopy_delay_max;
+ u64 wpcopy_delay_min;
u64 wpcopy_delay; /* wait for write-protect copy */
+ u64 irq_delay_max;
+ u64 irq_delay_min;
u64 irq_delay; /* wait for IRQ/SOFTIRQ */
u32 freepages_count; /* total count of memory reclaim */
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
index ae80a303c216..eb2094e43050 100644
--- a/include/linux/dev_printk.h
+++ b/include/linux/dev_printk.h
@@ -276,5 +276,14 @@ do { \
dev_driver_string(dev), dev_name(dev), ## arg)
__printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
+__printf(3, 4) int dev_warn_probe(const struct device *dev, int err, const char *fmt, ...);
+
+/* Simple helper for dev_err_probe() when ERR_PTR() is to be returned. */
+#define dev_err_ptr_probe(dev, ___err, fmt, ...) \
+ ERR_PTR(dev_err_probe(dev, ___err, fmt, ##__VA_ARGS__))
+
+/* Simple helper for dev_err_probe() when ERR_CAST() is to be returned. */
+#define dev_err_cast_probe(dev, ___err_ptr, fmt, ...) \
+ ERR_PTR(dev_err_probe(dev, PTR_ERR(___err_ptr), fmt, ##__VA_ARGS__))
#endif /* _DEVICE_PRINTK_H_ */
diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h
index c8f7eb6cc191..377892604ff4 100644
--- a/include/linux/devcoredump.h
+++ b/include/linux/devcoredump.h
@@ -12,6 +12,9 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
+/* if data isn't read by userspace after 5 minutes then delete it */
+#define DEVCD_TIMEOUT (HZ * 60 * 5)
+
/*
* _devcd_free_sgtable - free all the memory of the given scatterlist table
* (i.e. both pages and scatterlist instances)
@@ -50,16 +53,17 @@ static inline void _devcd_free_sgtable(struct scatterlist *table)
kfree(delete_iter);
}
-
#ifdef CONFIG_DEV_COREDUMP
void dev_coredumpv(struct device *dev, void *data, size_t datalen,
gfp_t gfp);
-void dev_coredumpm(struct device *dev, struct module *owner,
- void *data, size_t datalen, gfp_t gfp,
- ssize_t (*read)(char *buffer, loff_t offset, size_t count,
- void *data, size_t datalen),
- void (*free)(void *data));
+void dev_coredumpm_timeout(struct device *dev, struct module *owner,
+ void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset,
+ size_t count, void *data,
+ size_t datalen),
+ void (*free)(void *data),
+ unsigned long timeout);
void dev_coredumpsg(struct device *dev, struct scatterlist *table,
size_t datalen, gfp_t gfp);
@@ -73,11 +77,13 @@ static inline void dev_coredumpv(struct device *dev, void *data,
}
static inline void
-dev_coredumpm(struct device *dev, struct module *owner,
- void *data, size_t datalen, gfp_t gfp,
- ssize_t (*read)(char *buffer, loff_t offset, size_t count,
- void *data, size_t datalen),
- void (*free)(void *data))
+dev_coredumpm_timeout(struct device *dev, struct module *owner,
+ void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset,
+ size_t count, void *data,
+ size_t datalen),
+ void (*free)(void *data),
+ unsigned long timeout)
{
free(data);
}
@@ -92,4 +98,29 @@ static inline void dev_coredump_put(struct device *dev)
}
#endif /* CONFIG_DEV_COREDUMP */
+/**
+ * dev_coredumpm - create device coredump with read/free methods
+ * @dev: the struct device for the crashed device
+ * @owner: the module that contains the read/free functions, use %THIS_MODULE
+ * @data: data cookie for the @read/@free functions
+ * @datalen: length of the data
+ * @gfp: allocation flags
+ * @read: function to read from the given buffer
+ * @free: function to free the given buffer
+ *
+ * Creates a new device coredump for the given device. If a previous one hasn't
+ * been read yet, the new coredump is discarded. The data lifetime is determined
+ * by the device coredump framework and when it is no longer needed the @free
+ * function will be called to free the data.
+ */
+static inline void dev_coredumpm(struct device *dev, struct module *owner,
+ void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset, size_t count,
+ void *data, size_t datalen),
+ void (*free)(void *data))
+{
+ dev_coredumpm_timeout(dev, owner, data, datalen, gfp, read, free,
+ DEVCD_TIMEOUT);
+}
+
#endif /* __DEVCOREDUMP_H */
diff --git a/include/linux/devfreq-governor.h b/include/linux/devfreq-governor.h
new file mode 100644
index 000000000000..dfdd0160a29f
--- /dev/null
+++ b/include/linux/devfreq-governor.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * governor.h - internal header for devfreq governors.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This header is for devfreq governors
+ */
+
+#ifndef __LINUX_DEVFREQ_DEVFREQ_H__
+#define __LINUX_DEVFREQ_DEVFREQ_H__
+
+#include <linux/devfreq.h>
+
+#define DEVFREQ_NAME_LEN 16
+
+#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev)
+
+/* Devfreq events */
+#define DEVFREQ_GOV_START 0x1
+#define DEVFREQ_GOV_STOP 0x2
+#define DEVFREQ_GOV_UPDATE_INTERVAL 0x3
+#define DEVFREQ_GOV_SUSPEND 0x4
+#define DEVFREQ_GOV_RESUME 0x5
+
+#define DEVFREQ_MIN_FREQ 0
+#define DEVFREQ_MAX_FREQ ULONG_MAX
+
+/*
+ * Definition of the governor feature flags
+ * - DEVFREQ_GOV_FLAG_IMMUTABLE
+ * : This governor is never changeable to other governors.
+ * - DEVFREQ_GOV_FLAG_IRQ_DRIVEN
+ * : The devfreq won't schedule the work for this governor.
+ */
+#define DEVFREQ_GOV_FLAG_IMMUTABLE BIT(0)
+#define DEVFREQ_GOV_FLAG_IRQ_DRIVEN BIT(1)
+
+/*
+ * Definition of governor attribute flags except for common sysfs attributes
+ * - DEVFREQ_GOV_ATTR_POLLING_INTERVAL
+ * : Indicate polling_interval sysfs attribute
+ * - DEVFREQ_GOV_ATTR_TIMER
+ * : Indicate timer sysfs attribute
+ */
+#define DEVFREQ_GOV_ATTR_POLLING_INTERVAL BIT(0)
+#define DEVFREQ_GOV_ATTR_TIMER BIT(1)
+
+/**
+ * struct devfreq_governor - Devfreq policy governor
+ * @node: list node - contains registered devfreq governors
+ * @name: Governor's name
+ * @attrs: Governor's sysfs attribute flags
+ * @flags: Governor's feature flags
+ * @get_target_freq: Returns desired operating frequency for the device.
+ * Basically, get_target_freq will run
+ * devfreq_dev_profile.get_dev_status() to get the
+ * status of the device (load = busy_time / total_time).
+ * @event_handler: Callback for devfreq core framework to notify events
+ * to governors. Events include per device governor
+ * init and exit, opp changes out of devfreq, suspend
+ * and resume of per device devfreq during device idle.
+ *
+ * Note that the callbacks are called with devfreq->lock locked by devfreq.
+ */
+struct devfreq_governor {
+ struct list_head node;
+
+ const char name[DEVFREQ_NAME_LEN];
+ const u64 attrs;
+ const u64 flags;
+ int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+ int (*event_handler)(struct devfreq *devfreq,
+ unsigned int event, void *data);
+};
+
+void devfreq_monitor_start(struct devfreq *devfreq);
+void devfreq_monitor_stop(struct devfreq *devfreq);
+void devfreq_monitor_suspend(struct devfreq *devfreq);
+void devfreq_monitor_resume(struct devfreq *devfreq);
+void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay);
+
+int devfreq_add_governor(struct devfreq_governor *governor);
+int devfreq_remove_governor(struct devfreq_governor *governor);
+
+int devm_devfreq_add_governor(struct device *dev,
+ struct devfreq_governor *governor);
+
+int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
+int devfreq_update_target(struct devfreq *devfreq, unsigned long freq);
+void devfreq_get_freq_range(struct devfreq *devfreq, unsigned long *min_freq,
+ unsigned long *max_freq);
+
+static inline int devfreq_update_stats(struct devfreq *df)
+{
+ if (!df->profile->get_dev_status)
+ return -EINVAL;
+
+ return df->profile->get_dev_status(df->dev.parent, &df->last_status);
+}
+#endif /* __LINUX_DEVFREQ_DEVFREQ_H__ */
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index d312ffbac4dd..dc1075dc3446 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -103,6 +103,8 @@ struct devfreq_dev_status {
*
* @is_cooling_device: A self-explanatory boolean giving the device a
* cooling effect property.
+ * @dev_groups: Optional device-specific sysfs attribute groups that to
+ * be attached to the devfreq device.
*/
struct devfreq_dev_profile {
unsigned long initial_freq;
@@ -119,6 +121,8 @@ struct devfreq_dev_profile {
unsigned int max_state;
bool is_cooling_device;
+
+ const struct attribute_group **dev_groups;
};
/**
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 82b2195efaca..38f625af6ab4 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -93,7 +93,14 @@ typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv,
char *result, unsigned int maxlen);
-typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
+/*
+ * Called with *forward == true. If it remains true, the ioctl should be
+ * forwarded to bdev. If it is reset to false, the target already fully handled
+ * the ioctl and the return value is the return value for the whole ioctl.
+ */
+typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward);
#ifdef CONFIG_BLK_DEV_ZONED
typedef int (*dm_report_zones_fn) (struct dm_target *ti,
@@ -149,7 +156,7 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
*/
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
long nr_pages, enum dax_access_mode node, void **kaddr,
- pfn_t *pfn);
+ unsigned long *pfn);
typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages);
@@ -180,6 +187,11 @@ int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
void dm_put_device(struct dm_target *ti, struct dm_dev *d);
/*
+ * Helper function for getting devices
+ */
+int dm_devt_from_path(const char *path, dev_t *dev_p);
+
+/*
* Information about a target type
*/
@@ -294,6 +306,9 @@ struct target_type {
#define dm_target_supports_mixed_zoned_model(type) (false)
#endif
+#define DM_TARGET_ATOMIC_WRITES 0x00000400
+#define dm_target_supports_atomic_writes(type) ((type)->features & DM_TARGET_ATOMIC_WRITES)
+
struct dm_target {
struct dm_table *table;
struct target_type *type;
@@ -358,22 +373,17 @@ struct dm_target {
bool discards_supported:1;
/*
- * Set if this target requires that discards be split on
- * 'max_discard_sectors' boundaries.
- */
- bool max_discard_granularity:1;
-
- /*
- * Set if this target requires that secure_erases be split on
- * 'max_secure_erase_sectors' boundaries.
+ * Automatically set by dm-core if this target supports
+ * REQ_OP_ZONE_RESET_ALL. Otherwise, this operation will be emulated
+ * using REQ_OP_ZONE_RESET. Target drivers must not set this manually.
*/
- bool max_secure_erase_granularity:1;
+ bool zone_reset_all_supported:1;
/*
- * Set if this target requires that write_zeroes be split on
- * 'max_write_zeroes_sectors' boundaries.
+ * Set if this target requires that discards be split on
+ * 'max_discard_sectors' boundaries.
*/
- bool max_write_zeroes_granularity:1;
+ bool max_discard_granularity:1;
/*
* Set if we need to limit the number of in-flight bios when swapping.
@@ -397,6 +407,27 @@ struct dm_target {
* bio_set_dev(). NOTE: ideally a target should _not_ need this.
*/
bool needs_bio_set_dev:1;
+
+ /*
+ * Set if the target supports flush optimization. If all the targets in
+ * a table have flush_bypasses_map set, the dm core will not send
+ * flushes to the targets via a ->map method. It will iterate over
+ * dm_table->devices and send flushes to the devices directly. This
+ * optimization reduces the number of flushes being sent when multiple
+ * targets in a table use the same underlying device.
+ *
+ * This optimization may be enabled on targets that just pass the
+ * flushes to the underlying devices without performing any other
+ * actions on the flush request. Currently, dm-linear and dm-stripe
+ * support it.
+ */
+ bool flush_bypasses_map:1;
+
+ /*
+ * Set if the target calls bio_integrity_alloc on bios received
+ * in the map method.
+ */
+ bool mempool_needs_integrity:1;
};
void *dm_per_bio_data(struct bio *bio, size_t data_size);
@@ -503,17 +534,22 @@ int dm_post_suspending(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
-union map_info *dm_get_rq_mapinfo(struct request *rq);
#ifdef CONFIG_BLK_DEV_ZONED
struct dm_report_zones_args {
struct dm_target *tgt;
+ struct gendisk *disk;
sector_t next_sector;
- void *orig_data;
- report_zones_cb orig_cb;
unsigned int zone_idx;
+ /* for block layer ->report_zones */
+ struct blk_report_zones_args *rep_args;
+
+ /* for internal users */
+ report_zones_cb cb;
+ void *data;
+
/* must be filled by ->report_zones before calling dm_report_zones_cb */
sector_t start;
};
diff --git a/include/linux/device.h b/include/linux/device.h
index fc3bd7116ab9..0be95294b6e6 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -26,9 +26,9 @@
#include <linux/atomic.h>
#include <linux/uidgid.h>
#include <linux/gfp.h>
-#include <linux/overflow.h>
#include <linux/device/bus.h>
#include <linux/device/class.h>
+#include <linux/device/devres.h>
#include <linux/device/driver.h>
#include <linux/cleanup.h>
#include <asm/device.h>
@@ -281,164 +281,6 @@ int __must_check device_create_bin_file(struct device *dev,
void device_remove_bin_file(struct device *dev,
const struct bin_attribute *attr);
-/* device resource management */
-typedef void (*dr_release_t)(struct device *dev, void *res);
-typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
-
-void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
- int nid, const char *name) __malloc;
-#define devres_alloc(release, size, gfp) \
- __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
-#define devres_alloc_node(release, size, gfp, nid) \
- __devres_alloc_node(release, size, gfp, nid, #release)
-
-void devres_for_each_res(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data,
- void (*fn)(struct device *, void *, void *),
- void *data);
-void devres_free(void *res);
-void devres_add(struct device *dev, void *res);
-void *devres_find(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-void *devres_get(struct device *dev, void *new_res,
- dr_match_t match, void *match_data);
-void *devres_remove(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-int devres_destroy(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-int devres_release(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-
-/* devres group */
-void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp);
-void devres_close_group(struct device *dev, void *id);
-void devres_remove_group(struct device *dev, void *id);
-int devres_release_group(struct device *dev, void *id);
-
-/* managed devm_k.alloc/kfree for device drivers */
-void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __alloc_size(2);
-void *devm_krealloc(struct device *dev, void *ptr, size_t size,
- gfp_t gfp) __must_check __realloc_size(3);
-__printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp,
- const char *fmt, va_list ap) __malloc;
-__printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp,
- const char *fmt, ...) __malloc;
-static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
-{
- return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
-}
-static inline void *devm_kmalloc_array(struct device *dev,
- size_t n, size_t size, gfp_t flags)
-{
- size_t bytes;
-
- if (unlikely(check_mul_overflow(n, size, &bytes)))
- return NULL;
-
- return devm_kmalloc(dev, bytes, flags);
-}
-static inline void *devm_kcalloc(struct device *dev,
- size_t n, size_t size, gfp_t flags)
-{
- return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
-}
-static inline __realloc_size(3, 4) void * __must_check
-devm_krealloc_array(struct device *dev, void *p, size_t new_n, size_t new_size, gfp_t flags)
-{
- size_t bytes;
-
- if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
- return NULL;
-
- return devm_krealloc(dev, p, bytes, flags);
-}
-
-void devm_kfree(struct device *dev, const void *p);
-char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
-const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
-void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
- __realloc_size(3);
-
-unsigned long devm_get_free_pages(struct device *dev,
- gfp_t gfp_mask, unsigned int order);
-void devm_free_pages(struct device *dev, unsigned long addr);
-
-#ifdef CONFIG_HAS_IOMEM
-void __iomem *devm_ioremap_resource(struct device *dev,
- const struct resource *res);
-void __iomem *devm_ioremap_resource_wc(struct device *dev,
- const struct resource *res);
-
-void __iomem *devm_of_iomap(struct device *dev,
- struct device_node *node, int index,
- resource_size_t *size);
-#else
-
-static inline
-void __iomem *devm_ioremap_resource(struct device *dev,
- const struct resource *res)
-{
- return ERR_PTR(-EINVAL);
-}
-
-static inline
-void __iomem *devm_ioremap_resource_wc(struct device *dev,
- const struct resource *res)
-{
- return ERR_PTR(-EINVAL);
-}
-
-static inline
-void __iomem *devm_of_iomap(struct device *dev,
- struct device_node *node, int index,
- resource_size_t *size)
-{
- return ERR_PTR(-EINVAL);
-}
-
-#endif
-
-/* allows to add/remove a custom action to devres stack */
-void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
-void devm_release_action(struct device *dev, void (*action)(void *), void *data);
-
-int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name);
-#define devm_add_action(dev, action, data) \
- __devm_add_action(dev, action, data, #action)
-
-static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *),
- void *data, const char *name)
-{
- int ret;
-
- ret = __devm_add_action(dev, action, data, name);
- if (ret)
- action(data);
-
- return ret;
-}
-#define devm_add_action_or_reset(dev, action, data) \
- __devm_add_action_or_reset(dev, action, data, #action)
-
-/**
- * devm_alloc_percpu - Resource-managed alloc_percpu
- * @dev: Device to allocate per-cpu memory for
- * @type: Type to allocate per-cpu memory for
- *
- * Managed alloc_percpu. Per-cpu memory allocated with this function is
- * automatically freed on driver detach.
- *
- * RETURNS:
- * Pointer to allocated memory on success, NULL on failure.
- */
-#define devm_alloc_percpu(dev, type) \
- ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
- __alignof__(type)))
-
-void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
- size_t align);
-void devm_free_percpu(struct device *dev, void __percpu *pdata);
-
struct device_dma_parameters {
/*
* a low level driver may set these to teach IOMMU code about
@@ -707,6 +549,8 @@ struct device_physical_location {
* for dma allocations. This flag is managed by the dma ops
* instance from ->dma_supported.
* @dma_skip_sync: DMA sync operations can be skipped for coherent buffers.
+ * @dma_iommu: Device is using default IOMMU implementation for DMA and
+ * doesn't rely on dma_ops structure.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -748,7 +592,7 @@ struct device {
struct dev_pin_info *pins;
#endif
struct dev_msi_info msi;
-#ifdef CONFIG_DMA_OPS
+#ifdef CONFIG_ARCH_HAS_DMA_OPS
const struct dma_map_ops *dma_ops;
#endif
u64 *dma_mask; /* dma mask (if dma'able device) */
@@ -822,6 +666,9 @@ struct device {
#ifdef CONFIG_DMA_NEED_SYNC
bool dma_skip_sync:1;
#endif
+#ifdef CONFIG_IOMMU_DMA
+ bool dma_iommu:1;
+#endif
};
/**
@@ -985,6 +832,9 @@ static inline bool device_pm_not_required(struct device *dev)
static inline void device_set_pm_not_required(struct device *dev)
{
dev->power.no_pm = true;
+#ifdef CONFIG_PM
+ dev->power.no_callbacks = true;
+#endif
}
static inline void dev_pm_syscore_device(struct device *dev, bool val)
@@ -1004,6 +854,42 @@ static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags)
return !!(dev->power.driver_flags & flags);
}
+static inline bool dev_pm_smart_suspend(struct device *dev)
+{
+#ifdef CONFIG_PM_SLEEP
+ return dev->power.smart_suspend;
+#else
+ return false;
+#endif
+}
+
+/*
+ * dev_pm_set_strict_midlayer - Update the device's power.strict_midlayer flag
+ * @dev: Target device.
+ * @val: New flag value.
+ *
+ * When set, power.strict_midlayer means that the middle layer power management
+ * code (typically, a bus type or a PM domain) does not expect its runtime PM
+ * suspend callback to be invoked at all during system-wide PM transitions and
+ * it does not expect its runtime PM resume callback to be invoked at any point
+ * when runtime PM is disabled for the device during system-wide PM transitions.
+ */
+static inline void dev_pm_set_strict_midlayer(struct device *dev, bool val)
+{
+#ifdef CONFIG_PM_SLEEP
+ dev->power.strict_midlayer = val;
+#endif
+}
+
+static inline bool dev_pm_strict_midlayer_is_set(struct device *dev)
+{
+#ifdef CONFIG_PM_SLEEP
+ return dev->power.strict_midlayer;
+#else
+ return false;
+#endif
+}
+
static inline void device_lock(struct device *dev)
{
mutex_lock(&dev->mutex);
@@ -1031,13 +917,6 @@ static inline void device_lock_assert(struct device *dev)
lockdep_assert_held(&dev->mutex);
}
-static inline struct device_node *dev_of_node(struct device *dev)
-{
- if (!IS_ENABLED(CONFIG_OF) || !dev)
- return NULL;
- return dev->of_node;
-}
-
static inline bool dev_has_sync_state(struct device *dev)
{
if (!dev)
@@ -1049,6 +928,18 @@ static inline bool dev_has_sync_state(struct device *dev)
return false;
}
+static inline int dev_set_drv_sync_state(struct device *dev,
+ void (*fn)(struct device *dev))
+{
+ if (!dev || !dev->driver)
+ return 0;
+ if (dev->driver->sync_state && dev->driver->sync_state != fn)
+ return -EBUSY;
+ if (!dev->driver->sync_state)
+ dev->driver->sync_state = fn;
+ return 0;
+}
+
static inline void dev_set_removable(struct device *dev,
enum device_removable removable)
{
@@ -1076,15 +967,44 @@ void device_del(struct device *dev);
DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T))
-int device_for_each_child(struct device *dev, void *data,
- int (*fn)(struct device *dev, void *data));
-int device_for_each_child_reverse(struct device *dev, void *data,
- int (*fn)(struct device *dev, void *data));
-struct device *device_find_child(struct device *dev, void *data,
- int (*match)(struct device *dev, void *data));
-struct device *device_find_child_by_name(struct device *parent,
- const char *name);
-struct device *device_find_any_child(struct device *parent);
+int device_for_each_child(struct device *parent, void *data,
+ device_iter_t fn);
+int device_for_each_child_reverse(struct device *parent, void *data,
+ device_iter_t fn);
+int device_for_each_child_reverse_from(struct device *parent,
+ struct device *from, void *data,
+ device_iter_t fn);
+struct device *device_find_child(struct device *parent, const void *data,
+ device_match_t match);
+/**
+ * device_find_child_by_name - device iterator for locating a child device.
+ * @parent: parent struct device
+ * @name: name of the child device
+ *
+ * This is similar to the device_find_child() function above, but it
+ * returns a reference to a device that has the name @name.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
+ */
+static inline struct device *device_find_child_by_name(struct device *parent,
+ const char *name)
+{
+ return device_find_child(parent, name, device_match_name);
+}
+
+/**
+ * device_find_any_child - device iterator for locating a child device, if any.
+ * @parent: parent struct device
+ *
+ * This is similar to the device_find_child() function above, but it
+ * returns a reference to a child device, if any.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
+ */
+static inline struct device *device_find_any_child(struct device *parent)
+{
+ return device_find_child(parent, NULL, device_match_any);
+}
int device_rename(struct device *dev, const char *new_name);
int device_move(struct device *dev, struct device *new_parent,
@@ -1144,10 +1064,21 @@ void unlock_device_hotplug(void);
int lock_device_hotplug_sysfs(void);
int device_offline(struct device *dev);
int device_online(struct device *dev);
+
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
-void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
void device_set_node(struct device *dev, struct fwnode_handle *fwnode);
+int device_add_of_node(struct device *dev, struct device_node *of_node);
+void device_remove_of_node(struct device *dev);
+void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
+struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode);
+
+static inline struct device_node *dev_of_node(struct device *dev)
+{
+ if (!IS_ENABLED(CONFIG_OF) || !dev)
+ return NULL;
+ return dev->of_node;
+}
static inline int dev_num_vf(struct device *dev)
{
@@ -1176,12 +1107,12 @@ static inline void *dev_get_platdata(const struct device *dev)
* Manual binding of a device to driver. See drivers/base/bus.c
* for information on use.
*/
-int __must_check device_driver_attach(struct device_driver *drv,
+int __must_check device_driver_attach(const struct device_driver *drv,
struct device *dev);
int __must_check device_bind_driver(struct device *dev);
void device_release_driver(struct device *dev);
int __must_check device_attach(struct device *dev);
-int __must_check driver_attach(struct device_driver *drv);
+int __must_check driver_attach(const struct device_driver *drv);
void device_initial_probe(struct device *dev);
int __must_check device_reprobe(struct device *dev);
@@ -1217,11 +1148,9 @@ static inline void device_remove_group(struct device *dev,
{
const struct attribute_group *groups[] = { grp, NULL };
- return device_remove_groups(dev, groups);
+ device_remove_groups(dev, groups);
}
-int __must_check devm_device_add_groups(struct device *dev,
- const struct attribute_group **groups);
int __must_check devm_device_add_group(struct device *dev,
const struct attribute_group *grp);
@@ -1257,6 +1186,11 @@ void device_links_supplier_sync_state_pause(void);
void device_links_supplier_sync_state_resume(void);
void device_link_wait_removal(void);
+static inline bool device_link_test(const struct device_link *link, u32 flags)
+{
+ return !!(link->flags & flags);
+}
+
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index 5ef4ec1c36c3..99b1002b3e31 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -48,6 +48,7 @@ struct fwnode_handle;
* will never get called until they do.
* @remove: Called when a device removed from this bus.
* @shutdown: Called at shut-down time to quiesce the device.
+ * @irq_get_affinity: Get IRQ affinity mask for the device on this bus.
*
* @online: Called to put the device back online (after offlining it).
* @offline: Called to put the device offline for hot-removal. May fail.
@@ -81,12 +82,14 @@ struct bus_type {
const struct attribute_group **dev_groups;
const struct attribute_group **drv_groups;
- int (*match)(struct device *dev, struct device_driver *drv);
+ int (*match)(struct device *dev, const struct device_driver *drv);
int (*uevent)(const struct device *dev, struct kobj_uevent_env *env);
int (*probe)(struct device *dev);
void (*sync_state)(struct device *dev);
void (*remove)(struct device *dev);
void (*shutdown)(struct device *dev);
+ const struct cpumask *(*irq_get_affinity)(struct device *dev,
+ unsigned int irq_vec);
int (*online)(struct device *dev);
int (*offline)(struct device *dev);
@@ -126,8 +129,12 @@ struct bus_attribute {
int __must_check bus_create_file(const struct bus_type *bus, struct bus_attribute *attr);
void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr);
+/* Matching function type for drivers/base APIs to find a specific device */
+typedef int (*device_match_t)(struct device *dev, const void *data);
+
/* Generic device matching functions that all busses can use to match with */
int device_match_name(struct device *dev, const void *name);
+int device_match_type(struct device *dev, const void *type);
int device_match_of_node(struct device *dev, const void *np);
int device_match_fwnode(struct device *dev, const void *fwnode);
int device_match_devt(struct device *dev, const void *pdevt);
@@ -135,12 +142,17 @@ int device_match_acpi_dev(struct device *dev, const void *adev);
int device_match_acpi_handle(struct device *dev, const void *handle);
int device_match_any(struct device *dev, const void *unused);
+/* Device iterating function type for various driver core for_each APIs */
+typedef int (*device_iter_t)(struct device *dev, void *data);
+
/* iterator helpers for buses */
-int bus_for_each_dev(const struct bus_type *bus, struct device *start, void *data,
- int (*fn)(struct device *dev, void *data));
+int bus_for_each_dev(const struct bus_type *bus, struct device *start,
+ void *data, device_iter_t fn);
struct device *bus_find_device(const struct bus_type *bus, struct device *start,
- const void *data,
- int (*match)(struct device *dev, const void *data));
+ const void *data, device_match_t match);
+struct device *bus_find_device_reverse(const struct bus_type *bus,
+ struct device *start, const void *data,
+ device_match_t match);
/**
* bus_find_device_by_name - device iterator for locating a particular device
* of a specific name.
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
index c576b49c55c2..65880e60c720 100644
--- a/include/linux/device/class.h
+++ b/include/linux/device/class.h
@@ -82,20 +82,18 @@ bool class_is_registered(const struct class *class);
struct class_compat;
struct class_compat *class_compat_register(const char *name);
void class_compat_unregister(struct class_compat *cls);
-int class_compat_create_link(struct class_compat *cls, struct device *dev,
- struct device *device_link);
-void class_compat_remove_link(struct class_compat *cls, struct device *dev,
- struct device *device_link);
+int class_compat_create_link(struct class_compat *cls, struct device *dev);
+void class_compat_remove_link(struct class_compat *cls, struct device *dev);
void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
const struct device *start, const struct device_type *type);
struct device *class_dev_iter_next(struct class_dev_iter *iter);
void class_dev_iter_exit(struct class_dev_iter *iter);
-int class_for_each_device(const struct class *class, const struct device *start, void *data,
- int (*fn)(struct device *dev, void *data));
+int class_for_each_device(const struct class *class, const struct device *start,
+ void *data, device_iter_t fn);
struct device *class_find_device(const struct class *class, const struct device *start,
- const void *data, int (*match)(struct device *, const void *));
+ const void *data, device_match_t match);
/**
* class_find_device_by_name - device iterator for locating a particular device
@@ -195,7 +193,7 @@ static inline int __must_check class_create_file(const struct class *class,
static inline void class_remove_file(const struct class *class,
const struct class_attribute *attr)
{
- return class_remove_file_ns(class, attr, NULL);
+ class_remove_file_ns(class, attr, NULL);
}
/* Simple class attribute that is just a static string */
diff --git a/include/linux/device/devres.h b/include/linux/device/devres.h
new file mode 100644
index 000000000000..9c1e3d643d69
--- /dev/null
+++ b/include/linux/device/devres.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DEVICE_DEVRES_H_
+#define _DEVICE_DEVRES_H_
+
+#include <linux/err.h>
+#include <linux/gfp_types.h>
+#include <linux/numa.h>
+#include <linux/overflow.h>
+#include <linux/stdarg.h>
+#include <linux/types.h>
+#include <asm/bug.h>
+#include <asm/percpu.h>
+
+struct device;
+struct device_node;
+struct resource;
+
+/* device resource management */
+typedef void (*dr_release_t)(struct device *dev, void *res);
+typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
+
+void * __malloc
+__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, const char *name);
+#define devres_alloc(release, size, gfp) \
+ __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
+#define devres_alloc_node(release, size, gfp, nid) \
+ __devres_alloc_node(release, size, gfp, nid, #release)
+
+void devres_for_each_res(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data,
+ void (*fn)(struct device *, void *, void *),
+ void *data);
+void devres_free(void *res);
+void devres_add(struct device *dev, void *res);
+void *devres_find(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
+void *devres_get(struct device *dev, void *new_res, dr_match_t match, void *match_data);
+void *devres_remove(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
+int devres_destroy(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
+int devres_release(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
+
+/* devres group */
+void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp);
+void devres_close_group(struct device *dev, void *id);
+void devres_remove_group(struct device *dev, void *id);
+int devres_release_group(struct device *dev, void *id);
+
+/* managed devm_k.alloc/kfree for device drivers */
+void * __alloc_size(2)
+devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
+void * __must_check __realloc_size(3)
+devm_krealloc(struct device *dev, void *ptr, size_t size, gfp_t gfp);
+static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
+{
+ return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
+}
+static inline void *devm_kmalloc_array(struct device *dev, size_t n, size_t size, gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
+ return NULL;
+
+ return devm_kmalloc(dev, bytes, flags);
+}
+static inline void *devm_kcalloc(struct device *dev, size_t n, size_t size, gfp_t flags)
+{
+ return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
+}
+static inline __realloc_size(3, 4) void * __must_check
+devm_krealloc_array(struct device *dev, void *p, size_t new_n, size_t new_size, gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
+ return NULL;
+
+ return devm_krealloc(dev, p, bytes, flags);
+}
+
+void devm_kfree(struct device *dev, const void *p);
+
+void * __realloc_size(3)
+devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp);
+const void *
+devm_kmemdup_const(struct device *dev, const void *src, size_t len, gfp_t gfp);
+static inline void *devm_kmemdup_array(struct device *dev, const void *src,
+ size_t n, size_t size, gfp_t flags)
+{
+ return devm_kmemdup(dev, src, size_mul(size, n), flags);
+}
+
+char * __malloc
+devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
+const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
+char * __printf(3, 0) __malloc
+devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap);
+char * __printf(3, 4) __malloc
+devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
+
+/**
+ * devm_alloc_percpu - Resource-managed alloc_percpu
+ * @dev: Device to allocate per-cpu memory for
+ * @type: Type to allocate per-cpu memory for
+ *
+ * Managed alloc_percpu. Per-cpu memory allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+#define devm_alloc_percpu(dev, type) \
+ ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), __alignof__(type)))
+
+void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, size_t align);
+
+unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order);
+void devm_free_pages(struct device *dev, unsigned long addr);
+
+#ifdef CONFIG_HAS_IOMEM
+
+void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res);
+void __iomem *devm_ioremap_resource_wc(struct device *dev, const struct resource *res);
+
+void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
+ resource_size_t *size);
+#else
+
+static inline
+void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res)
+{
+ return IOMEM_ERR_PTR(-EINVAL);
+}
+
+static inline
+void __iomem *devm_ioremap_resource_wc(struct device *dev, const struct resource *res)
+{
+ return IOMEM_ERR_PTR(-EINVAL);
+}
+
+static inline
+void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
+ resource_size_t *size)
+{
+ return IOMEM_ERR_PTR(-EINVAL);
+}
+
+#endif
+
+/* allows to add/remove a custom action to devres stack */
+int devm_remove_action_nowarn(struct device *dev, void (*action)(void *), void *data);
+
+/**
+ * devm_remove_action() - removes previously added custom action
+ * @dev: Device that owns the action
+ * @action: Function implementing the action
+ * @data: Pointer to data passed to @action implementation
+ *
+ * Removes instance of @action previously added by devm_add_action().
+ * Both action and data should match one of the existing entries.
+ */
+static inline
+void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
+{
+ WARN_ON(devm_remove_action_nowarn(dev, action, data));
+}
+
+void devm_release_action(struct device *dev, void (*action)(void *), void *data);
+
+int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name);
+#define devm_add_action(dev, action, data) \
+ __devm_add_action(dev, action, data, #action)
+
+static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *),
+ void *data, const char *name)
+{
+ int ret;
+
+ ret = __devm_add_action(dev, action, data, name);
+ if (ret)
+ action(data);
+
+ return ret;
+}
+#define devm_add_action_or_reset(dev, action, data) \
+ __devm_add_action_or_reset(dev, action, data, #action)
+
+bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data);
+
+#endif /* _DEVICE_DEVRES_H_ */
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index 7738f458995f..cd8e0f0a634b 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -146,18 +146,18 @@ struct driver_attribute {
#define DRIVER_ATTR_WO(_name) \
struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
-int __must_check driver_create_file(struct device_driver *driver,
+int __must_check driver_create_file(const struct device_driver *driver,
const struct driver_attribute *attr);
-void driver_remove_file(struct device_driver *driver,
+void driver_remove_file(const struct device_driver *driver,
const struct driver_attribute *attr);
int driver_set_override(struct device *dev, const char **override,
const char *s, size_t len);
int __must_check driver_for_each_device(struct device_driver *drv, struct device *start,
- void *data, int (*fn)(struct device *dev, void *));
-struct device *driver_find_device(struct device_driver *drv,
+ void *data, device_iter_t fn);
+struct device *driver_find_device(const struct device_driver *drv,
struct device *start, const void *data,
- int (*match)(struct device *dev, const void *data));
+ device_match_t match);
/**
* driver_find_device_by_name - device iterator for locating a particular device
@@ -165,7 +165,7 @@ struct device *driver_find_device(struct device_driver *drv,
* @drv: the driver we're iterating
* @name: name of the device to match
*/
-static inline struct device *driver_find_device_by_name(struct device_driver *drv,
+static inline struct device *driver_find_device_by_name(const struct device_driver *drv,
const char *name)
{
return driver_find_device(drv, NULL, name, device_match_name);
@@ -178,7 +178,7 @@ static inline struct device *driver_find_device_by_name(struct device_driver *dr
* @np: of_node pointer to match.
*/
static inline struct device *
-driver_find_device_by_of_node(struct device_driver *drv,
+driver_find_device_by_of_node(const struct device_driver *drv,
const struct device_node *np)
{
return driver_find_device(drv, NULL, np, device_match_of_node);
@@ -203,13 +203,13 @@ driver_find_device_by_fwnode(struct device_driver *drv,
* @drv: the driver we're iterating
* @devt: devt pointer to match.
*/
-static inline struct device *driver_find_device_by_devt(struct device_driver *drv,
+static inline struct device *driver_find_device_by_devt(const struct device_driver *drv,
dev_t devt)
{
return driver_find_device(drv, NULL, &devt, device_match_devt);
}
-static inline struct device *driver_find_next_device(struct device_driver *drv,
+static inline struct device *driver_find_next_device(const struct device_driver *drv,
struct device *start)
{
return driver_find_device(drv, start, NULL, device_match_any);
@@ -223,14 +223,14 @@ static inline struct device *driver_find_next_device(struct device_driver *drv,
* @adev: ACPI_COMPANION device to match.
*/
static inline struct device *
-driver_find_device_by_acpi_dev(struct device_driver *drv,
+driver_find_device_by_acpi_dev(const struct device_driver *drv,
const struct acpi_device *adev)
{
return driver_find_device(drv, NULL, adev, device_match_acpi_dev);
}
#else
static inline struct device *
-driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev)
+driver_find_device_by_acpi_dev(const struct device_driver *drv, const void *adev)
{
return NULL;
}
diff --git a/include/linux/device/faux.h b/include/linux/device/faux.h
new file mode 100644
index 000000000000..9f43c0e46aa4
--- /dev/null
+++ b/include/linux/device/faux.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2025 The Linux Foundation
+ *
+ * A "simple" faux bus that allows devices to be created and added
+ * automatically to it. This is to be used whenever you need to create a
+ * device that is not associated with any "real" system resources, and do
+ * not want to have to deal with a bus/driver binding logic. It is
+ * intended to be very simple, with only a create and a destroy function
+ * available.
+ */
+#ifndef _FAUX_DEVICE_H_
+#define _FAUX_DEVICE_H_
+
+#include <linux/container_of.h>
+#include <linux/device.h>
+
+/**
+ * struct faux_device - a "faux" device
+ * @dev: internal struct device of the object
+ *
+ * A simple faux device that can be created/destroyed. To be used when a
+ * driver only needs to have a device to "hang" something off. This can be
+ * used for downloading firmware or other basic tasks. Use this instead of
+ * a struct platform_device if the device has no resources assigned to
+ * it at all.
+ */
+struct faux_device {
+ struct device dev;
+};
+#define to_faux_device(x) container_of_const((x), struct faux_device, dev)
+
+/**
+ * struct faux_device_ops - a set of callbacks for a struct faux_device
+ * @probe: called when a faux device is probed by the driver core
+ * before the device is fully bound to the internal faux bus
+ * code. If probe succeeds, return 0, otherwise return a
+ * negative error number to stop the probe sequence from
+ * succeeding.
+ * @remove: called when a faux device is removed from the system
+ *
+ * Both @probe and @remove are optional, if not needed, set to NULL.
+ */
+struct faux_device_ops {
+ int (*probe)(struct faux_device *faux_dev);
+ void (*remove)(struct faux_device *faux_dev);
+};
+
+struct faux_device *faux_device_create(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops);
+struct faux_device *faux_device_create_with_groups(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops,
+ const struct attribute_group **groups);
+void faux_device_destroy(struct faux_device *faux_dev);
+
+static inline void *faux_device_get_drvdata(const struct faux_device *faux_dev)
+{
+ return dev_get_drvdata(&faux_dev->dev);
+}
+
+static inline void faux_device_set_drvdata(struct faux_device *faux_dev, void *data)
+{
+ dev_set_drvdata(&faux_dev->dev, data);
+}
+
+#endif /* _FAUX_DEVICE_H_ */
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index d02f32b7514e..0864773a57e8 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -18,15 +18,16 @@ static inline int devcgroup_inode_permission(struct inode *inode, int mask)
{
short type, access = 0;
+ if (likely(!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode)))
+ return 0;
+
if (likely(!inode->i_rdev))
return 0;
if (S_ISBLK(inode->i_mode))
type = DEVCG_DEV_BLOCK;
- else if (S_ISCHR(inode->i_mode))
+ else /* S_ISCHR by the test above */
type = DEVCG_DEV_CHAR;
- else
- return 0;
if (mask & MAY_WRITE)
access |= DEVCG_ACC_WRITE;
diff --git a/include/linux/dfl.h b/include/linux/dfl.h
index 0a7a00a0ee7f..1f02db0c1897 100644
--- a/include/linux/dfl.h
+++ b/include/linux/dfl.h
@@ -71,7 +71,7 @@ struct dfl_driver {
};
#define to_dfl_dev(d) container_of(d, struct dfl_device, dev)
-#define to_dfl_drv(d) container_of(d, struct dfl_driver, drv)
+#define to_dfl_drv(d) container_of_const(d, struct dfl_driver, drv)
/*
* use a macro to avoid include chaining to get THIS_MODULE.
diff --git a/include/linux/dibs.h b/include/linux/dibs.h
new file mode 100644
index 000000000000..c75607f8a5cf
--- /dev/null
+++ b/include/linux/dibs.h
@@ -0,0 +1,464 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Direct Internal Buffer Sharing
+ *
+ * Definitions for the DIBS module
+ *
+ * Copyright IBM Corp. 2025
+ */
+#ifndef _DIBS_H
+#define _DIBS_H
+
+#include <linux/device.h>
+#include <linux/uuid.h>
+
+/* DIBS - Direct Internal Buffer Sharing - concept
+ * -----------------------------------------------
+ * In the case of multiple system sharing the same hardware, dibs fabrics can
+ * provide dibs devices to these systems. The systems use dibs devices of the
+ * same fabric to communicate via dmbs (Direct Memory Buffers). Each dmb has
+ * exactly one owning local dibs device and one remote using dibs device, that
+ * is authorized to write into this dmb. This access control is provided by the
+ * dibs fabric.
+ *
+ * Because the access to the dmb is based on access to physical memory, it is
+ * lossless and synchronous. The remote devices can directly access any offset
+ * of the dmb.
+ *
+ * Dibs fabrics, dibs devices and dmbs are identified by tokens and ids.
+ * Dibs fabric id is unique within the same hardware (with the exception of the
+ * dibs loopback fabric), dmb token is unique within the same fabric, dibs
+ * device gids are guaranteed to be unique within the same fabric and
+ * statistically likely to be globally unique. The exchange of these tokens and
+ * ids between the systems is not part of the dibs concept.
+ *
+ * The dibs layer provides an abstraction between dibs device drivers and dibs
+ * clients.
+ */
+
+/* DMB - Direct Memory Buffer
+ * --------------------------
+ * A dibs client provides a dmb as input buffer for a local receiving
+ * dibs device for exactly one (remote) sending dibs device. Only this
+ * sending device can send data into this dmb using move_data(). Sender
+ * and receiver can be the same device. A dmb belongs to exactly one client.
+ */
+struct dibs_dmb {
+ /* tok - Token for this dmb
+ * Used by remote and local devices and clients to address this dmb.
+ * Provided by dibs fabric. Unique per dibs fabric.
+ */
+ u64 dmb_tok;
+ /* rgid - GID of designated remote sending device */
+ uuid_t rgid;
+ /* cpu_addr - buffer address */
+ void *cpu_addr;
+ /* len - buffer length */
+ u32 dmb_len;
+ /* idx - Index of this DMB on this receiving device */
+ u32 idx;
+ /* VLAN support (deprecated)
+ * In order to write into a vlan-tagged dmb, the remote device needs
+ * to belong to the this vlan
+ */
+ u32 vlan_valid;
+ u32 vlan_id;
+ /* optional, used by device driver */
+ dma_addr_t dma_addr;
+};
+
+/* DIBS events
+ * -----------
+ * Dibs devices can optionally notify dibs clients about events that happened
+ * in the fabric or at the remote device or remote dmb.
+ */
+enum dibs_event_type {
+ /* Buffer event, e.g. a remote dmb was unregistered */
+ DIBS_BUF_EVENT,
+ /* Device event, e.g. a remote dibs device was disabled */
+ DIBS_DEV_EVENT,
+ /* Software event, a dibs client can send an event signal to a
+ * remote dibs device.
+ */
+ DIBS_SW_EVENT,
+ DIBS_OTHER_TYPE };
+
+enum dibs_event_subtype {
+ DIBS_BUF_UNREGISTERED,
+ DIBS_DEV_DISABLED,
+ DIBS_DEV_ERR_STATE,
+ DIBS_OTHER_SUBTYPE
+};
+
+struct dibs_event {
+ u32 type;
+ u32 subtype;
+ /* uuid_null if invalid */
+ uuid_t gid;
+ /* zero if invalid */
+ u64 buffer_tok;
+ u64 time;
+ /* additional data or zero */
+ u64 data;
+};
+
+struct dibs_dev;
+
+/* DIBS client
+ * -----------
+ */
+#define MAX_DIBS_CLIENTS 8
+#define NO_DIBS_CLIENT 0xff
+/* All dibs clients have access to all dibs devices.
+ * A dibs client provides the following functions to be called by dibs layer or
+ * dibs device drivers:
+ */
+struct dibs_client_ops {
+ /**
+ * add_dev() - add a dibs device
+ * @dev: device that was added
+ *
+ * Will be called during dibs_register_client() for all existing
+ * dibs devices and whenever a new dibs device is registered.
+ * dev is usable until dibs_client.remove() is called.
+ * *dev is protected by device refcounting.
+ */
+ void (*add_dev)(struct dibs_dev *dev);
+ /**
+ * del_dev() - remove a dibs device
+ * @dev: device to be removed
+ *
+ * Will be called whenever a dibs device is removed.
+ * Will be called during dibs_unregister_client() for all existing
+ * dibs devices and whenever a dibs device is unregistered.
+ * The device has already stopped initiative for this client:
+ * No new handlers will be started.
+ * The device is no longer usable by this client after this call.
+ */
+ void (*del_dev)(struct dibs_dev *dev);
+ /**
+ * handle_irq() - Handle signaling for a DMB
+ * @dev: device that owns the dmb
+ * @idx: Index of the dmb that got signalled
+ * @dmbemask: signaling mask of the dmb
+ *
+ * Handle signaling for a dmb that was registered by this client
+ * for this device.
+ * The dibs device can coalesce multiple signaling triggers into a
+ * single call of handle_irq(). dmbemask can be used to indicate
+ * different kinds of triggers.
+ *
+ * Context: Called in IRQ context by dibs device driver
+ */
+ void (*handle_irq)(struct dibs_dev *dev, unsigned int idx,
+ u16 dmbemask);
+ /**
+ * handle_event() - Handle control information sent by device
+ * @dev: device reporting the event
+ * @event: ism event structure
+ *
+ * * Context: Called in IRQ context by dibs device driver
+ */
+ void (*handle_event)(struct dibs_dev *dev,
+ const struct dibs_event *event);
+};
+
+struct dibs_client {
+ /* client name for logging and debugging purposes */
+ const char *name;
+ const struct dibs_client_ops *ops;
+ /* client index - provided and used by dibs layer */
+ u8 id;
+};
+
+/* Functions to be called by dibs clients:
+ */
+/**
+ * dibs_register_client() - register a client with dibs layer
+ * @client: this client
+ *
+ * Will call client->ops->add_dev() for all existing dibs devices.
+ * Return: zero on success.
+ */
+int dibs_register_client(struct dibs_client *client);
+/**
+ * dibs_unregister_client() - unregister a client with dibs layer
+ * @client: this client
+ *
+ * Will call client->ops->del_dev() for all existing dibs devices.
+ * Return: zero on success.
+ */
+int dibs_unregister_client(struct dibs_client *client);
+
+/* dibs clients can call dibs device ops. */
+
+/* DIBS devices
+ * ------------
+ */
+
+/* Defined fabric id / CHID for all loopback devices:
+ * All dibs loopback devices report this fabric id. In this case devices with
+ * the same fabric id can NOT communicate via dibs. Only loopback devices with
+ * the same dibs device gid can communicate (=same device with itself).
+ */
+#define DIBS_LOOPBACK_FABRIC 0xFFFF
+
+/* A dibs device provides the following functions to be called by dibs clients.
+ * They are mandatory, unless marked 'optional'.
+ */
+struct dibs_dev_ops {
+ /**
+ * get_fabric_id()
+ * @dev: local dibs device
+ *
+ * Only devices on the same dibs fabric can communicate. Fabric_id is
+ * unique inside the same HW system. Use fabric_id for fast negative
+ * checks, but only query_remote_gid() can give a reliable positive
+ * answer:
+ * Different fabric_id: dibs is not possible
+ * Same fabric_id: dibs may be possible or not
+ * (e.g. different HW systems)
+ * EXCEPTION: DIBS_LOOPBACK_FABRIC denotes an ism_loopback device
+ * that can only communicate with itself. Use dibs_dev.gid
+ * or query_remote_gid()to determine whether sender and
+ * receiver use the same ism_loopback device.
+ * Return: 2 byte dibs fabric id
+ */
+ u16 (*get_fabric_id)(struct dibs_dev *dev);
+ /**
+ * query_remote_gid()
+ * @dev: local dibs device
+ * @rgid: gid of remote dibs device
+ * @vid_valid: if zero, vid will be ignored;
+ * deprecated, ignored if device does not support vlan
+ * @vid: VLAN id; deprecated, ignored if device does not support vlan
+ *
+ * Query whether a remote dibs device is reachable via this local device
+ * and this vlan id.
+ * Return: 0 if remote gid is reachable.
+ */
+ int (*query_remote_gid)(struct dibs_dev *dev, const uuid_t *rgid,
+ u32 vid_valid, u32 vid);
+ /**
+ * max_dmbs()
+ * Return: Max number of DMBs that can be registered for this kind of
+ * dibs_dev
+ */
+ int (*max_dmbs)(void);
+ /**
+ * register_dmb() - allocate and register a dmb
+ * @dev: dibs device
+ * @dmb: dmb struct to be registered
+ * @client: dibs client
+ * @vid: VLAN id; deprecated, ignored if device does not support vlan
+ *
+ * The following fields of dmb must provide valid input:
+ * @rgid: gid of remote user device
+ * @dmb_len: buffer length
+ * @idx: Optionally:requested idx (if non-zero)
+ * @vlan_valid: if zero, vlan_id will be ignored;
+ * deprecated, ignored if device does not support vlan
+ * @vlan_id: deprecated, ignored if device does not support vlan
+ * Upon return in addition the following fields will be valid:
+ * @dmb_tok: for usage by remote and local devices and clients
+ * @cpu_addr: allocated buffer
+ * @idx: dmb index, unique per dibs device
+ * @dma_addr: to be used by device driver,if applicable
+ *
+ * Allocate a dmb buffer and register it with this device and for this
+ * client.
+ * Return: zero on success
+ */
+ int (*register_dmb)(struct dibs_dev *dev, struct dibs_dmb *dmb,
+ struct dibs_client *client);
+ /**
+ * unregister_dmb() - unregister and free a dmb
+ * @dev: dibs device
+ * @dmb: dmb struct to be unregistered
+ * The following fields of dmb must provide valid input:
+ * @dmb_tok
+ * @cpu_addr
+ * @idx
+ *
+ * Free dmb.cpu_addr and unregister the dmb from this device.
+ * Return: zero on success
+ */
+ int (*unregister_dmb)(struct dibs_dev *dev, struct dibs_dmb *dmb);
+ /**
+ * move_data() - write into a remote dmb
+ * @dev: Local sending dibs device
+ * @dmb_tok: Token of the remote dmb
+ * @idx: signaling index in dmbemask
+ * @sf: signaling flag;
+ * if true, idx will be turned on at target dmbemask mask
+ * and target device will be signaled.
+ * @offset: offset within target dmb
+ * @data: pointer to data to be sent
+ * @size: length of data to be sent, can be zero.
+ *
+ * Use dev to write data of size at offset into a remote dmb
+ * identified by dmb_tok. Data is moved synchronously, *data can
+ * be freed when this function returns.
+ *
+ * If signaling flag (sf) is true, bit number idx bit will be turned
+ * on in the dmbemask mask when handle_irq() is called at the remote
+ * dibs client that owns the target dmb. The target device may chose
+ * to coalesce the signaling triggers of multiple move_data() calls
+ * to the same target dmb into a single handle_irq() call.
+ * Return: zero on success
+ */
+ int (*move_data)(struct dibs_dev *dev, u64 dmb_tok, unsigned int idx,
+ bool sf, unsigned int offset, void *data,
+ unsigned int size);
+ /**
+ * add_vlan_id() - add dibs device to vlan (optional, deprecated)
+ * @dev: dibs device
+ * @vlan_id: vlan id
+ *
+ * In order to write into a vlan-tagged dmb, the remote device needs
+ * to belong to the this vlan. A device can belong to more than 1 vlan.
+ * Any device can access an untagged dmb.
+ * Deprecated, only supported for backwards compatibility.
+ * Return: zero on success
+ */
+ int (*add_vlan_id)(struct dibs_dev *dev, u64 vlan_id);
+ /**
+ * del_vlan_id() - remove dibs device from vlan (optional, deprecated)
+ * @dev: dibs device
+ * @vlan_id: vlan id
+ * Return: zero on success
+ */
+ int (*del_vlan_id)(struct dibs_dev *dev, u64 vlan_id);
+ /**
+ * signal_event() - trigger an event at a remote dibs device (optional)
+ * @dev: local dibs device
+ * @rgid: gid of remote dibs device
+ * trigger_irq: zero: notification may be coalesced with other events
+ * non-zero: notify immediately
+ * @subtype: 4 byte event code, meaning is defined by dibs client
+ * @data: 8 bytes of additional information,
+ * meaning is defined by dibs client
+ *
+ * dibs devices can offer support for sending a control event of type
+ * EVENT_SWR to a remote dibs device.
+ * NOTE: handle_event() will be called for all registered dibs clients
+ * at the remote device.
+ * Return: zero on success
+ */
+ int (*signal_event)(struct dibs_dev *dev, const uuid_t *rgid,
+ u32 trigger_irq, u32 event_code, u64 info);
+ /**
+ * support_mmapped_rdmb() - can this device provide memory mapped
+ * remote dmbs? (optional)
+ * @dev: dibs device
+ *
+ * A dibs device can provide a kernel address + length, that represent
+ * a remote target dmb (like MMIO). Alternatively to calling
+ * move_data(), a dibs client can write into such a ghost-send-buffer
+ * (= to this kernel address) and the data will automatically
+ * immediately appear in the target dmb, even without calling
+ * move_data().
+ *
+ * Either all 3 function pointers for support_dmb_nocopy(),
+ * attach_dmb() and detach_dmb() are defined, or all of them must
+ * be NULL.
+ *
+ * Return: non-zero, if memory mapped remote dmbs are supported.
+ */
+ int (*support_mmapped_rdmb)(struct dibs_dev *dev);
+ /**
+ * attach_dmb() - attach local memory to a remote dmb
+ * @dev: Local sending ism device
+ * @dmb: all other parameters are passed in the form of a
+ * dmb struct
+ * TODO: (THIS IS CONFUSING, should be changed)
+ * dmb_tok: (in) Token of the remote dmb, we want to attach to
+ * cpu_addr: (out) MMIO address
+ * dma_addr: (out) MMIO address (if applicable, invalid otherwise)
+ * dmb_len: (out) length of local MMIO region,
+ * equal to length of remote DMB.
+ * sba_idx: (out) index of remote dmb (NOT HELPFUL, should be removed)
+ *
+ * Provides a memory address to the sender that can be used to
+ * directly write into the remote dmb.
+ * Memory is available until detach_dmb is called
+ *
+ * Return: Zero upon success, Error code otherwise
+ */
+ int (*attach_dmb)(struct dibs_dev *dev, struct dibs_dmb *dmb);
+ /**
+ * detach_dmb() - Detach the ghost buffer from a remote dmb
+ * @dev: ism device
+ * @token: dmb token of the remote dmb
+ *
+ * No need to free cpu_addr.
+ *
+ * Return: Zero upon success, Error code otherwise
+ */
+ int (*detach_dmb)(struct dibs_dev *dev, u64 token);
+};
+
+struct dibs_dev {
+ struct list_head list;
+ struct device dev;
+ /* To be filled by device driver, before calling dibs_dev_add(): */
+ const struct dibs_dev_ops *ops;
+ uuid_t gid;
+ /* priv pointer for device driver */
+ void *drv_priv;
+
+ /* priv pointer per client; for client usage only */
+ void *priv[MAX_DIBS_CLIENTS];
+
+ /* get this lock before accessing any of the fields below */
+ spinlock_t lock;
+ /* array of client ids indexed by dmb idx;
+ * can be used as indices into priv and subs arrays
+ */
+ u8 *dmb_clientid_arr;
+ /* Sparse array of all ISM clients */
+ struct dibs_client *subs[MAX_DIBS_CLIENTS];
+};
+
+static inline void dibs_set_priv(struct dibs_dev *dev,
+ struct dibs_client *client, void *priv)
+{
+ dev->priv[client->id] = priv;
+}
+
+static inline void *dibs_get_priv(struct dibs_dev *dev,
+ struct dibs_client *client)
+{
+ return dev->priv[client->id];
+}
+
+/* ------- End of client-only functions ----------- */
+
+/* Functions to be called by dibs device drivers:
+ */
+/**
+ * dibs_dev_alloc() - allocate and reference device structure
+ *
+ * The following fields will be valid upon successful return: dev
+ * NOTE: Use put_device(dibs_get_dev(@dibs)) to give up your reference instead
+ * of freeing @dibs @dev directly once you have successfully called this
+ * function.
+ * Return: Pointer to dibs device structure
+ */
+struct dibs_dev *dibs_dev_alloc(void);
+/**
+ * dibs_dev_add() - register with dibs layer and all clients
+ * @dibs: dibs device
+ *
+ * The following fields must be valid upon entry: dev, ops, drv_priv
+ * All fields will be valid upon successful return.
+ * Return: zero on success
+ */
+int dibs_dev_add(struct dibs_dev *dibs);
+/**
+ * dibs_dev_del() - unregister from dibs layer and all clients
+ * @dibs: dibs device
+ */
+void dibs_dev_del(struct dibs_dev *dibs);
+
+#endif /* _DIBS_H */
diff --git a/include/linux/dim.h b/include/linux/dim.h
index f343bc9aa2ec..06543fd40fcc 100644
--- a/include/linux/dim.h
+++ b/include/linux/dim.h
@@ -10,6 +10,15 @@
#include <linux/types.h>
#include <linux/workqueue.h>
+struct net_device;
+
+/* Number of DIM profiles and period mode. */
+#define NET_DIM_PARAMS_NUM_PROFILES 5
+#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256
+#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128
+#define NET_DIM_DEF_PROFILE_CQE 1
+#define NET_DIM_DEF_PROFILE_EQE 1
+
/*
* Number of events between DIM iterations.
* Causes a moderation of the algorithm run.
@@ -38,12 +47,45 @@
* @pkts: CQ packet counter suggestion (by DIM)
* @comps: Completion counter
* @cq_period_mode: CQ period count mode (from CQE/EQE)
+ * @rcu: for asynchronous kfree_rcu
*/
struct dim_cq_moder {
u16 usec;
u16 pkts;
u16 comps;
u8 cq_period_mode;
+ struct rcu_head rcu;
+};
+
+#define DIM_PROFILE_RX BIT(0) /* support rx profile modification */
+#define DIM_PROFILE_TX BIT(1) /* support tx profile modification */
+
+#define DIM_COALESCE_USEC BIT(0) /* support usec field modification */
+#define DIM_COALESCE_PKTS BIT(1) /* support pkts field modification */
+#define DIM_COALESCE_COMPS BIT(2) /* support comps field modification */
+
+/**
+ * struct dim_irq_moder - Structure for irq moderation information.
+ * Used to collect irq moderation related information.
+ *
+ * @profile_flags: DIM_PROFILE_*
+ * @coal_flags: DIM_COALESCE_* for Rx and Tx
+ * @dim_rx_mode: Rx DIM period count mode: CQE or EQE
+ * @dim_tx_mode: Tx DIM period count mode: CQE or EQE
+ * @rx_profile: DIM profile list for Rx
+ * @tx_profile: DIM profile list for Tx
+ * @rx_dim_work: Rx DIM worker scheduled by net_dim()
+ * @tx_dim_work: Tx DIM worker scheduled by net_dim()
+ */
+struct dim_irq_moder {
+ u8 profile_flags;
+ u8 coal_flags;
+ u8 dim_rx_mode;
+ u8 dim_tx_mode;
+ struct dim_cq_moder __rcu *rx_profile;
+ struct dim_cq_moder __rcu *tx_profile;
+ void (*rx_dim_work)(struct work_struct *work);
+ void (*tx_dim_work)(struct work_struct *work);
};
/**
@@ -192,6 +234,77 @@ enum dim_step_result {
};
/**
+ * net_dim_init_irq_moder - collect information to initialize irq moderation
+ * @dev: target network device
+ * @profile_flags: Rx or Tx profile modification capability
+ * @coal_flags: irq moderation params flags
+ * @rx_mode: CQ period mode for Rx
+ * @tx_mode: CQ period mode for Tx
+ * @rx_dim_work: Rx worker called after dim decision
+ * @tx_dim_work: Tx worker called after dim decision
+ *
+ * Return: 0 on success or a negative error code.
+ */
+int net_dim_init_irq_moder(struct net_device *dev, u8 profile_flags,
+ u8 coal_flags, u8 rx_mode, u8 tx_mode,
+ void (*rx_dim_work)(struct work_struct *work),
+ void (*tx_dim_work)(struct work_struct *work));
+
+/**
+ * net_dim_free_irq_moder - free fields for irq moderation
+ * @dev: target network device
+ */
+void net_dim_free_irq_moder(struct net_device *dev);
+
+/**
+ * net_dim_setting - initialize DIM's cq mode and schedule worker
+ * @dev: target network device
+ * @dim: DIM context
+ * @is_tx: true indicates the tx direction, false indicates the rx direction
+ */
+void net_dim_setting(struct net_device *dev, struct dim *dim, bool is_tx);
+
+/**
+ * net_dim_work_cancel - synchronously cancel dim's worker
+ * @dim: DIM context
+ */
+void net_dim_work_cancel(struct dim *dim);
+
+/**
+ * net_dim_get_rx_irq_moder - get DIM rx results based on profile_ix
+ * @dev: target network device
+ * @dim: DIM context
+ *
+ * Return: DIM irq moderation
+ */
+struct dim_cq_moder
+net_dim_get_rx_irq_moder(struct net_device *dev, struct dim *dim);
+
+/**
+ * net_dim_get_tx_irq_moder - get DIM tx results based on profile_ix
+ * @dev: target network device
+ * @dim: DIM context
+ *
+ * Return: DIM irq moderation
+ */
+struct dim_cq_moder
+net_dim_get_tx_irq_moder(struct net_device *dev, struct dim *dim);
+
+/**
+ * net_dim_set_rx_mode - set DIM rx cq mode
+ * @dev: target network device
+ * @rx_mode: target rx cq mode
+ */
+void net_dim_set_rx_mode(struct net_device *dev, u8 rx_mode);
+
+/**
+ * net_dim_set_tx_mode - set DIM tx cq mode
+ * @dev: target network device
+ * @tx_mode: target tx cq mode
+ */
+void net_dim_set_tx_mode(struct net_device *dev, u8 tx_mode);
+
+/**
* dim_on_top - check if current state is a good place to stop (top location)
* @dim: DIM context
*
@@ -238,7 +351,8 @@ void dim_park_tired(struct dim *dim);
* Takes into consideration counter wrap-around.
* Returned boolean indicates whether curr_stats are reliable.
*/
-bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+bool dim_calc_stats(const struct dim_sample *start,
+ const struct dim_sample *end,
struct dim_stats *curr_stats);
/**
@@ -311,7 +425,7 @@ struct dim_cq_moder net_dim_get_def_tx_moderation(u8 cq_period_mode);
* This is the main logic of the algorithm, where data is processed in order
* to decide on next required action.
*/
-void net_dim(struct dim *dim, struct dim_sample end_sample);
+void net_dim(struct dim *dim, const struct dim_sample *end_sample);
/* RDMA DIM */
diff --git a/include/linux/dio.h b/include/linux/dio.h
index 2b5923909f96..464331c4c4a7 100644
--- a/include/linux/dio.h
+++ b/include/linux/dio.h
@@ -93,7 +93,7 @@ struct dio_driver {
struct device_driver driver;
};
-#define to_dio_driver(drv) container_of(drv, struct dio_driver, driver)
+#define to_dio_driver(drv) container_of_const(drv, struct dio_driver, driver)
/* DIO/DIO-II boards all have the following 8bit registers.
* These are offsets from the base of the device.
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index c58c4f790c04..7e7b45b0d097 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -35,6 +35,9 @@ struct dlm_lockspace_ops {
int num_slots, int our_slot, uint32_t generation);
};
+/* only relevant for kernel lockspaces, will be removed in future */
+#define DLM_LSFL_SOFTIRQ __DLM_LSFL_RESERVED0
+
/*
* dlm_new_lockspace
*
@@ -55,6 +58,11 @@ struct dlm_lockspace_ops {
* used to select the directory node. Must be the same on all nodes.
* DLM_LSFL_NEWEXCL
* dlm_new_lockspace() should return -EEXIST if the lockspace exists.
+ * DLM_LSFL_SOFTIRQ
+ * dlm request callbacks (ast, bast) are softirq safe. Flag should be
+ * preferred by users. Will be default in some future. If set the
+ * strongest context for ast, bast callback is softirq as it avoids
+ * an additional context switch.
*
* lvblen: length of lvb in bytes. Must be multiple of 8.
* dlm_new_lockspace() returns an error if this does not match
@@ -80,12 +88,43 @@ int dlm_new_lockspace(const char *name, const char *cluster,
int *ops_result, dlm_lockspace_t **lockspace);
/*
+ * dlm_release_lockspace() release_option values:
+ *
+ * DLM_RELEASE_NO_LOCKS returns -EBUSY if any locks (lkb's)
+ * exist in the local lockspace.
+ *
+ * DLM_RELEASE_UNUSED previous value that is no longer used.
+ *
+ * DLM_RELEASE_NORMAL releases the lockspace regardless of any
+ * locks managed in the local lockspace.
+ *
+ * DLM_RELEASE_NO_EVENT release the lockspace regardless of any
+ * locks managed in the local lockspace, and does not submit
+ * a leave event to the cluster manager, so other nodes will
+ * not be notified that the node should be removed from the
+ * list of lockspace members.
+ *
+ * DLM_RELEASE_RECOVER like DLM_RELEASE_NORMAL, but the remaining
+ * nodes will handle the removal of the node as if the node
+ * had failed, e.g. the recover_slot() callback would be used.
+ */
+#define DLM_RELEASE_NO_LOCKS 0
+#define DLM_RELEASE_UNUSED 1
+#define DLM_RELEASE_NORMAL 2
+#define DLM_RELEASE_NO_EVENT 3
+#define DLM_RELEASE_RECOVER 4
+#define __DLM_RELEASE_MAX DLM_RELEASE_RECOVER
+
+/*
* dlm_release_lockspace
*
* Stop a lockspace.
+ *
+ * release_option: see DLM_RELEASE values above.
*/
-int dlm_release_lockspace(dlm_lockspace_t *lockspace, int force);
+int dlm_release_lockspace(dlm_lockspace_t *lockspace,
+ unsigned int release_option);
/*
* dlm_lock
@@ -121,7 +160,14 @@ int dlm_release_lockspace(dlm_lockspace_t *lockspace, int force);
* call.
*
* AST routines should not block (at least not for long), but may make
- * any locking calls they please.
+ * any locking calls they please. If DLM_LSFL_SOFTIRQ for kernel
+ * users of dlm_new_lockspace() is passed the ast and bast callbacks
+ * can be processed in softirq context. Also some of the callback
+ * contexts are in the same context as the DLM lock request API, users
+ * must not hold locks while calling dlm lock request API and trying
+ * to acquire this lock in the callback again, this will end in a
+ * lock recursion. For newer implementation the DLM_LSFL_SOFTIRQ
+ * should be used.
*/
int dlm_lock(dlm_lockspace_t *lockspace,
diff --git a/include/linux/dma-buf-mapping.h b/include/linux/dma-buf-mapping.h
new file mode 100644
index 000000000000..a3c0ce2d3a42
--- /dev/null
+++ b/include/linux/dma-buf-mapping.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * DMA BUF Mapping Helpers
+ *
+ */
+#ifndef __DMA_BUF_MAPPING_H__
+#define __DMA_BUF_MAPPING_H__
+#include <linux/dma-buf.h>
+
+struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach,
+ struct p2pdma_provider *provider,
+ struct dma_buf_phys_vec *phys_vec,
+ size_t nr_ranges, size_t size,
+ enum dma_data_direction dir);
+void dma_buf_free_sgt(struct dma_buf_attachment *attach, struct sg_table *sgt,
+ enum dma_data_direction dir);
+#endif
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 36216d28d8bd..0bc492090237 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -22,6 +22,7 @@
#include <linux/fs.h>
#include <linux/dma-fence.h>
#include <linux/wait.h>
+#include <linux/pci-p2pdma.h>
struct device;
struct dma_buf;
@@ -35,15 +36,6 @@ struct dma_buf_attachment;
*/
struct dma_buf_ops {
/**
- * @cache_sgt_mapping:
- *
- * If true the framework will cache the first mapping made for each
- * attachment. This avoids creating mappings for attachments multiple
- * times.
- */
- bool cache_sgt_mapping;
-
- /**
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
@@ -370,10 +362,8 @@ struct dma_buf {
*/
struct module *owner;
-#if IS_ENABLED(CONFIG_DEBUG_FS)
/** @list_node: node for dma_buf accounting and debugging. */
struct list_head list_node;
-#endif
/** @priv: exporter specific private data for this buffer object. */
void *priv;
@@ -493,8 +483,6 @@ struct dma_buf_attach_ops {
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
* @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
- * @sgt: cached mapping.
- * @dir: direction of cached mapping.
* @peer2peer: true if the importer can handle peer resources without pages.
* @priv: exporter specific attachment data.
* @importer_ops: importer operations for this attachment, if provided
@@ -514,8 +502,6 @@ struct dma_buf_attachment {
struct dma_buf *dmabuf;
struct device *dev;
struct list_head node;
- struct sg_table *sgt;
- enum dma_data_direction dir;
bool peer2peer;
const struct dma_buf_attach_ops *importer_ops;
void *importer_priv;
@@ -546,6 +532,16 @@ struct dma_buf_export_info {
};
/**
+ * struct dma_buf_phys_vec - describe continuous chunk of memory
+ * @paddr: physical address of that chunk
+ * @len: Length of this chunk
+ */
+struct dma_buf_phys_vec {
+ phys_addr_t paddr;
+ size_t len;
+};
+
+/**
* DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
* @name: export-info name
*
@@ -583,20 +579,6 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
return !!dmabuf->ops->pin;
}
-/**
- * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
- * mappings
- * @attach: the DMA-buf attachment to check
- *
- * Returns true if a DMA-buf importer wants to call the map/unmap functions with
- * the dma_resv lock held.
- */
-static inline bool
-dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
-{
- return !!attach->importer_ops;
-}
-
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev);
struct dma_buf_attachment *
@@ -636,4 +618,6 @@ int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
+struct dma_buf *dma_buf_iter_begin(void);
+struct dma_buf *dma_buf_iter_next(struct dma_buf *dmbuf);
#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-buf/heaps/cma.h b/include/linux/dma-buf/heaps/cma.h
new file mode 100644
index 000000000000..e751479e21e7
--- /dev/null
+++ b/include/linux/dma-buf/heaps/cma.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DMA_BUF_HEAP_CMA_H_
+#define DMA_BUF_HEAP_CMA_H_
+
+struct cma;
+
+#ifdef CONFIG_DMABUF_HEAPS_CMA
+int dma_heap_cma_register_heap(struct cma *cma);
+#else
+static inline int dma_heap_cma_register_heap(struct cma *cma)
+{
+ return 0;
+}
+#endif // CONFIG_DMABUF_HEAPS_CMA
+
+#endif // DMA_BUF_HEAP_CMA_H_
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index edbe13d00776..c249912456f9 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -12,7 +12,7 @@
#include <linux/mem_encrypt.h>
#include <linux/swiotlb.h>
-extern unsigned int zone_dma_bits;
+extern u64 zone_dma_limit;
/*
* Record the mapping of CPU physical to DMA addresses for a given region.
@@ -78,14 +78,18 @@ static inline dma_addr_t dma_range_map_max(const struct bus_dma_region *map)
#define phys_to_dma_unencrypted phys_to_dma
#endif
#else
-static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
- phys_addr_t paddr)
+static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{
if (dev->dma_range_map)
return translate_phys_to_dma(dev, paddr);
return paddr;
}
+static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
+ phys_addr_t paddr)
+{
+ return dma_addr_unencrypted(__phys_to_dma(dev, paddr));
+}
/*
* If memory encryption is supported, phys_to_dma will set the memory encryption
* bit in the DMA address, and dma_to_phys will clear it.
@@ -94,19 +98,20 @@ static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
*/
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
- return __sme_set(phys_to_dma_unencrypted(dev, paddr));
+ return dma_addr_encrypted(__phys_to_dma(dev, paddr));
}
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
{
phys_addr_t paddr;
+ dma_addr = dma_addr_canonical(dma_addr);
if (dev->dma_range_map)
paddr = translate_dma_to_phys(dev, dma_addr);
else
paddr = dma_addr;
- return __sme_clr(paddr);
+ return paddr;
}
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
@@ -144,7 +149,5 @@ void dma_direct_free_pages(struct device *dev, size_t size,
struct page *page, dma_addr_t dma_addr,
enum dma_data_direction dir);
int dma_direct_supported(struct device *dev, u64 mask);
-dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir, unsigned long attrs);
#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index ec7f25def392..079b3dec0a16 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -33,6 +33,7 @@ struct dma_fence_array_cb {
* @num_pending: fences in the array still pending
* @fences: array of the fences
* @work: internal irq_work function
+ * @callbacks: array of callback helpers
*/
struct dma_fence_array {
struct dma_fence base;
@@ -43,6 +44,8 @@ struct dma_fence_array {
struct dma_fence **fences;
struct irq_work work;
+
+ struct dma_fence_array_cb callbacks[] __counted_by(num_fences);
};
/**
@@ -76,6 +79,12 @@ to_dma_fence_array(struct dma_fence *fence)
for (index = 0, fence = dma_fence_array_first(head); fence; \
++(index), fence = dma_fence_array_next(head, index))
+struct dma_fence_array *dma_fence_array_alloc(int num_fences);
+void dma_fence_array_init(struct dma_fence_array *array,
+ int num_fences, struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any);
+
struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence **fences,
u64 context, unsigned seqno,
diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h
index ad9e2506c2f4..68c3c1e41014 100644
--- a/include/linux/dma-fence-chain.h
+++ b/include/linux/dma-fence-chain.h
@@ -85,6 +85,10 @@ dma_fence_chain_contained(struct dma_fence *fence)
* dma_fence_chain_alloc
*
* Returns a new struct dma_fence_chain object or NULL on failure.
+ *
+ * This specialized allocator has to be a macro for its allocations to be
+ * accounted separately (to have a separate alloc_tag). The typecast is
+ * intentional to enforce typesafety.
*/
#define dma_fence_chain_alloc() \
((struct dma_fence_chain *)kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL))
diff --git a/include/linux/dma-fence-unwrap.h b/include/linux/dma-fence-unwrap.h
index 66b1e56fbb81..62df222fe0f1 100644
--- a/include/linux/dma-fence-unwrap.h
+++ b/include/linux/dma-fence-unwrap.h
@@ -52,6 +52,8 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence **fences,
struct dma_fence_unwrap *cursors);
+int dma_fence_dedup_array(struct dma_fence **array, int num_fences);
+
/**
* dma_fence_unwrap_merge - unwrap and merge fences
*
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index e06bad467f55..64639e104110 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -26,6 +26,7 @@
struct dma_fence;
struct dma_fence_ops;
struct dma_fence_cb;
+struct seq_file;
/**
* struct dma_fence - software synchronization primitive
@@ -97,6 +98,7 @@ struct dma_fence {
};
enum dma_fence_flag_bits {
+ DMA_FENCE_FLAG_SEQNO64_BIT,
DMA_FENCE_FLAG_SIGNALED_BIT,
DMA_FENCE_FLAG_TIMESTAMP_BIT,
DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
@@ -125,14 +127,6 @@ struct dma_fence_cb {
*/
struct dma_fence_ops {
/**
- * @use_64bit_seqno:
- *
- * True if this dma_fence implementation uses 64bit seqno, false
- * otherwise.
- */
- bool use_64bit_seqno;
-
- /**
* @get_driver_name:
*
* Returns the driver name. This is a callback to allow drivers to
@@ -169,8 +163,8 @@ struct dma_fence_ops {
* implementation know that there is another driver waiting on the
* signal (ie. hw->sw case).
*
- * This function can be called from atomic context, but not
- * from irq context, so normal spinlocks can be used.
+ * This is called with irq's disabled, so only spinlocks which disable
+ * IRQ's can be used in the code outside of this callback.
*
* A return value of false indicates the fence already passed,
* or some failure occurred that made it impossible to enable
@@ -239,27 +233,6 @@ struct dma_fence_ops {
void (*release)(struct dma_fence *fence);
/**
- * @fence_value_str:
- *
- * Callback to fill in free-form debug info specific to this fence, like
- * the sequence number.
- *
- * This callback is optional.
- */
- void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
-
- /**
- * @timeline_value_str:
- *
- * Fills in the current value of the timeline as a string, like the
- * sequence number. Note that the specific fence passed to this function
- * should not matter, drivers should only use it to look up the
- * corresponding timeline structures.
- */
- void (*timeline_value_str)(struct dma_fence *fence,
- char *str, int size);
-
- /**
* @set_deadline:
*
* Callback to allow a fence waiter to inform the fence signaler of
@@ -283,6 +256,9 @@ struct dma_fence_ops {
void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, u64 seqno);
+void dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, u64 seqno);
+
void dma_fence_release(struct kref *kref);
void dma_fence_free(struct dma_fence *fence);
void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq);
@@ -403,6 +379,29 @@ bool dma_fence_remove_callback(struct dma_fence *fence,
void dma_fence_enable_sw_signaling(struct dma_fence *fence);
/**
+ * DOC: Safe external access to driver provided object members
+ *
+ * All data not stored directly in the dma-fence object, such as the
+ * &dma_fence.lock and memory potentially accessed by functions in the
+ * &dma_fence.ops table, MUST NOT be accessed after the fence has been signalled
+ * because after that point drivers are allowed to free it.
+ *
+ * All code accessing that data via the dma-fence API (or directly, which is
+ * discouraged), MUST make sure to contain the complete access within a
+ * &rcu_read_lock and &rcu_read_unlock pair.
+ *
+ * Some dma-fence API handles this automatically, while other, as for example
+ * &dma_fence_driver_name and &dma_fence_timeline_name, leave that
+ * responsibility to the caller.
+ *
+ * To enable this scheme to work drivers MUST ensure a RCU grace period elapses
+ * between signalling the fence and freeing the said data.
+ *
+ */
+const char __rcu *dma_fence_driver_name(struct dma_fence *fence);
+const char __rcu *dma_fence_timeline_name(struct dma_fence *fence);
+
+/**
* dma_fence_is_signaled_locked - Return an indication if the fence
* is signaled yet.
* @fence: the fence to check
@@ -462,21 +461,20 @@ dma_fence_is_signaled(struct dma_fence *fence)
/**
* __dma_fence_is_later - return if f1 is chronologically later than f2
+ * @fence: fence in whose context to do the comparison
* @f1: the first fence's seqno
* @f2: the second fence's seqno from the same context
- * @ops: dma_fence_ops associated with the seqno
*
* Returns true if f1 is chronologically later than f2. Both fences must be
* from the same context, since a seqno is not common across contexts.
*/
-static inline bool __dma_fence_is_later(u64 f1, u64 f2,
- const struct dma_fence_ops *ops)
+static inline bool __dma_fence_is_later(struct dma_fence *fence, u64 f1, u64 f2)
{
/* This is for backward compatibility with drivers which can only handle
* 32bit sequence numbers. Use a 64bit compare when the driver says to
* do so.
*/
- if (ops->use_64bit_seqno)
+ if (test_bit(DMA_FENCE_FLAG_SEQNO64_BIT, &fence->flags))
return f1 > f2;
return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0;
@@ -496,7 +494,7 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
if (WARN_ON(f1->context != f2->context))
return false;
- return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops);
+ return __dma_fence_is_later(f1, f1->seqno, f2->seqno);
}
/**
@@ -574,6 +572,12 @@ int dma_fence_get_status(struct dma_fence *fence);
* rather than success. This must be set before signaling (so that the value
* is visible before any waiters on the signal callback are woken). This
* helper exists to help catching erroneous setting of #dma_fence.error.
+ *
+ * Examples of error codes which drivers should use:
+ *
+ * * %-ENODATA This operation produced no data, no other operation affected.
+ * * %-ECANCELED All operations from the same context have been canceled.
+ * * %-ETIME Operation caused a timeout and potentially device reset.
*/
static inline void dma_fence_set_error(struct dma_fence *fence,
int error)
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 0c05561cad6e..27d15f60950a 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -9,22 +9,21 @@
#ifndef _DMA_HEAPS_H
#define _DMA_HEAPS_H
-#include <linux/cdev.h>
#include <linux/types.h>
struct dma_heap;
/**
* struct dma_heap_ops - ops to operate on a given heap
- * @allocate: allocate dmabuf and return struct dma_buf ptr
+ * @allocate: allocate dmabuf and return struct dma_buf ptr
*
* allocate returns dmabuf on success, ERR_PTR(-errno) on error.
*/
struct dma_heap_ops {
struct dma_buf *(*allocate)(struct dma_heap *heap,
unsigned long len,
- unsigned long fd_flags,
- unsigned long heap_flags);
+ u32 fd_flags,
+ u64 heap_flags);
};
/**
@@ -41,28 +40,10 @@ struct dma_heap_export_info {
void *priv;
};
-/**
- * dma_heap_get_drvdata() - get per-heap driver data
- * @heap: DMA-Heap to retrieve private data for
- *
- * Returns:
- * The per-heap data for the heap.
- */
void *dma_heap_get_drvdata(struct dma_heap *heap);
-/**
- * dma_heap_get_name() - get heap name
- * @heap: DMA-Heap to retrieve private data for
- *
- * Returns:
- * The char* for the heap name.
- */
const char *dma_heap_get_name(struct dma_heap *heap);
-/**
- * dma_heap_add - adds a heap to dmabuf heaps
- * @exp_info: information needed to register this heap
- */
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
#endif /* _DMA_HEAPS_H */
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 02a1c825896b..4809204c674c 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -13,20 +13,7 @@
struct cma;
struct iommu_ops;
-/*
- * Values for struct dma_map_ops.flags:
- *
- * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can
- * handle PCI P2PDMA pages in the map_sg/unmap_sg operation.
- * DMA_F_CAN_SKIP_SYNC: DMA sync operations can be skipped if the device is
- * coherent and it's not an SWIOTLB buffer.
- */
-#define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0)
-#define DMA_F_CAN_SKIP_SYNC (1 << 1)
-
struct dma_map_ops {
- unsigned int flags;
-
void *(*alloc)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
unsigned long attrs);
@@ -37,11 +24,6 @@ struct dma_map_ops {
gfp_t gfp);
void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
dma_addr_t dma_handle, enum dma_data_direction dir);
- struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size,
- enum dma_data_direction dir, gfp_t gfp,
- unsigned long attrs);
- void (*free_noncontiguous)(struct device *dev, size_t size,
- struct sg_table *sgt, enum dma_data_direction dir);
int (*mmap)(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t, unsigned long attrs);
@@ -49,10 +31,10 @@ struct dma_map_ops {
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
- dma_addr_t (*map_page)(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs);
- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
+ dma_addr_t (*map_phys)(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ void (*unmap_phys)(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs);
/*
@@ -64,12 +46,6 @@ struct dma_map_ops {
enum dma_data_direction dir, unsigned long attrs);
void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs);
- dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
- void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir);
void (*sync_single_for_device)(struct device *dev,
@@ -88,7 +64,7 @@ struct dma_map_ops {
unsigned long (*get_merge_boundary)(struct device *dev);
};
-#ifdef CONFIG_DMA_OPS
+#ifdef CONFIG_ARCH_HAS_DMA_OPS
#include <asm/dma-mapping.h>
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
@@ -103,7 +79,7 @@ static inline void set_dma_ops(struct device *dev,
{
dev->dma_ops = dma_ops;
}
-#else /* CONFIG_DMA_OPS */
+#else /* CONFIG_ARCH_HAS_DMA_OPS */
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return NULL;
@@ -112,7 +88,7 @@ static inline void set_dma_ops(struct device *dev,
const struct dma_map_ops *dma_ops)
{
}
-#endif /* CONFIG_DMA_OPS */
+#endif /* CONFIG_ARCH_HAS_DMA_OPS */
#ifdef CONFIG_DMA_CMA
extern struct cma *dma_contiguous_default_area;
@@ -171,6 +147,9 @@ static inline void dma_free_contiguous(struct device *dev, struct page *page,
{
__free_pages(page, get_order(size));
}
+static inline void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+}
#endif /* CONFIG_DMA_CMA*/
#ifdef CONFIG_DMA_DECLARE_COHERENT
@@ -219,20 +198,6 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
}
#endif /* CONFIG_DMA_GLOBAL_POOL */
-/*
- * This is the actual return value from the ->alloc_noncontiguous method.
- * The users of the DMA API should only care about the sg_table, but to make
- * the DMA-API internal vmaping and freeing easier we stash away the page
- * array as well (except for the fallback case). This can go away any time,
- * e.g. when a vmap-variant that takes a scatterlist comes along.
- */
-struct dma_sgt_handle {
- struct sg_table sgt;
- struct page **pages;
-};
-#define sgt_handle(sgt) \
- container_of((sgt), struct dma_sgt_handle, sgt)
-
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
@@ -274,7 +239,7 @@ static inline bool dev_is_dma_coherent(struct device *dev)
{
return true;
}
-#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
+#endif
static inline void dma_reset_need_sync(struct device *dev)
{
@@ -424,15 +389,15 @@ void *arch_dma_set_uncached(void *addr, size_t size);
void arch_dma_clear_uncached(void *addr, size_t size);
#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
-bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr);
-bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle);
+bool arch_dma_map_phys_direct(struct device *dev, phys_addr_t addr);
+bool arch_dma_unmap_phys_direct(struct device *dev, dma_addr_t dma_handle);
bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
int nents);
bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
int nents);
#else
-#define arch_dma_map_page_direct(d, a) (false)
-#define arch_dma_unmap_page_direct(d, a) (false)
+#define arch_dma_map_phys_direct(d, a) (false)
+#define arch_dma_unmap_phys_direct(d, a) (false)
#define arch_dma_map_sg_direct(d, s, n) (false)
#define arch_dma_unmap_sg_direct(d, s, n) (false)
#endif
@@ -466,58 +431,4 @@ static inline void debug_dma_dump_mappings(struct device *dev)
#endif /* CONFIG_DMA_API_DEBUG */
extern const struct dma_map_ops dma_dummy_ops;
-
-enum pci_p2pdma_map_type {
- /*
- * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping
- * type hasn't been calculated yet. Functions that return this enum
- * never return this value.
- */
- PCI_P2PDMA_MAP_UNKNOWN = 0,
-
- /*
- * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will
- * traverse the host bridge and the host bridge is not in the
- * allowlist. DMA Mapping routines should return an error when
- * this is returned.
- */
- PCI_P2PDMA_MAP_NOT_SUPPORTED,
-
- /*
- * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to
- * each other directly through a PCI switch and the transaction will
- * not traverse the host bridge. Such a mapping should program
- * the DMA engine with PCI bus addresses.
- */
- PCI_P2PDMA_MAP_BUS_ADDR,
-
- /*
- * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk
- * to each other, but the transaction traverses a host bridge on the
- * allowlist. In this case, a normal mapping either with CPU physical
- * addresses (in the case of dma-direct) or IOVA addresses (in the
- * case of IOMMUs) should be used to program the DMA engine.
- */
- PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
-};
-
-struct pci_p2pdma_map_state {
- struct dev_pagemap *pgmap;
- int map;
- u64 bus_off;
-};
-
-#ifdef CONFIG_PCI_P2PDMA
-enum pci_p2pdma_map_type
-pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
- struct scatterlist *sg);
-#else /* CONFIG_PCI_P2PDMA */
-static inline enum pci_p2pdma_map_type
-pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
- struct scatterlist *sg)
-{
- return PCI_P2PDMA_MAP_NOT_SUPPORTED;
-}
-#endif /* CONFIG_PCI_P2PDMA */
-
#endif /* _LINUX_DMA_MAP_OPS_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f693aafe221f..2ceda49c609f 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -2,15 +2,11 @@
#ifndef _LINUX_DMA_MAPPING_H
#define _LINUX_DMA_MAPPING_H
-#include <linux/cache.h>
-#include <linux/sizes.h>
-#include <linux/string.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
#include <linux/bug.h>
-#include <linux/mem_encrypt.h>
/**
* List of possible attributes associated with a DMA mapping. The semantics
@@ -63,6 +59,26 @@
#define DMA_ATTR_PRIVILEGED (1UL << 9)
/*
+ * DMA_ATTR_MMIO - Indicates memory-mapped I/O (MMIO) region for DMA mapping
+ *
+ * This attribute indicates the physical address is not normal system
+ * memory. It may not be used with kmap*()/phys_to_virt()/phys_to_page()
+ * functions, it may not be cacheable, and access using CPU load/store
+ * instructions may not be allowed.
+ *
+ * Usually this will be used to describe MMIO addresses, or other non-cacheable
+ * register addresses. When DMA mapping this sort of address we call
+ * the operation Peer to Peer as a one device is DMA'ing to another device.
+ * For PCI devices the p2pdma APIs must be used to determine if DMA_ATTR_MMIO
+ * is appropriate.
+ *
+ * For architectures that require cache flushing for DMA coherence
+ * DMA_ATTR_MMIO will not perform any cache flushing. The address
+ * provided must never be mapped cacheable into the CPU.
+ */
+#define DMA_ATTR_MMIO (1UL << 10)
+
+/*
* A dma_addr_t can hold any valid DMA or bus address for the platform. It can
* be given to a device to use as a DMA source or target. It is specific to a
* given device and there may be a translation between the CPU physical address
@@ -74,7 +90,23 @@
*/
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
-#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+#define DMA_BIT_MASK(n) GENMASK_ULL(n - 1, 0)
+
+struct dma_iova_state {
+ dma_addr_t addr;
+ u64 __size;
+};
+
+/*
+ * Use the high bit to mark if we used swiotlb for one or more ranges.
+ */
+#define DMA_IOVA_USE_SWIOTLB (1ULL << 63)
+
+static inline size_t dma_iova_size(struct dma_iova_state *state)
+{
+ /* Casting is needed for 32-bits systems */
+ return (size_t)(state->__size & ~DMA_IOVA_USE_SWIOTLB);
+}
#ifdef CONFIG_DMA_API_DEBUG
void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
@@ -106,6 +138,10 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
unsigned long attrs);
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs);
+dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs);
void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
@@ -160,6 +196,15 @@ static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
}
+static inline dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ return DMA_MAPPING_ERROR;
+}
+static inline void dma_unmap_phys(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+}
static inline unsigned int dma_map_sg_attrs(struct device *dev,
struct scatterlist *sg, int nents, enum dma_data_direction dir,
unsigned long attrs)
@@ -281,6 +326,70 @@ static inline int dma_mmap_noncontiguous(struct device *dev,
}
#endif /* CONFIG_HAS_DMA */
+#ifdef CONFIG_IOMMU_DMA
+/**
+ * dma_use_iova - check if the IOVA API is used for this state
+ * @state: IOVA state
+ *
+ * Return %true if the DMA transfers uses the dma_iova_*() calls or %false if
+ * they can't be used.
+ */
+static inline bool dma_use_iova(struct dma_iova_state *state)
+{
+ return state->__size != 0;
+}
+
+bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t size);
+void dma_iova_free(struct device *dev, struct dma_iova_state *state);
+void dma_iova_destroy(struct device *dev, struct dma_iova_state *state,
+ size_t mapped_len, enum dma_data_direction dir,
+ unsigned long attrs);
+int dma_iova_sync(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size);
+int dma_iova_link(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void dma_iova_unlink(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+#else /* CONFIG_IOMMU_DMA */
+static inline bool dma_use_iova(struct dma_iova_state *state)
+{
+ return false;
+}
+static inline bool dma_iova_try_alloc(struct device *dev,
+ struct dma_iova_state *state, phys_addr_t phys, size_t size)
+{
+ return false;
+}
+static inline void dma_iova_free(struct device *dev,
+ struct dma_iova_state *state)
+{
+}
+static inline void dma_iova_destroy(struct device *dev,
+ struct dma_iova_state *state, size_t mapped_len,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+}
+static inline int dma_iova_sync(struct device *dev,
+ struct dma_iova_state *state, size_t offset, size_t size)
+{
+ return -EOPNOTSUPP;
+}
+static inline int dma_iova_link(struct device *dev,
+ struct dma_iova_state *state, phys_addr_t phys, size_t offset,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ return -EOPNOTSUPP;
+}
+static inline void dma_iova_unlink(struct device *dev,
+ struct dma_iova_state *state, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+}
+#endif /* CONFIG_IOMMU_DMA */
+
#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir);
@@ -330,6 +439,7 @@ static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
{
return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false;
}
+bool dma_need_unmap(struct device *dev);
#else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
static inline bool dma_dev_need_sync(const struct device *dev)
{
@@ -355,6 +465,10 @@ static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
{
return false;
}
+static inline bool dma_need_unmap(struct device *dev)
+{
+ return false;
+}
#endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
struct page *dma_alloc_pages(struct device *dev, size_t size,
@@ -524,13 +638,11 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
return SZ_64K;
}
-static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
+static inline void dma_set_max_seg_size(struct device *dev, unsigned int size)
{
- if (dev->dma_parms) {
- dev->dma_parms->max_segment_size = size;
- return 0;
- }
- return -EIO;
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return;
+ dev->dma_parms->max_segment_size = size;
}
static inline unsigned long dma_get_seg_boundary(struct device *dev)
@@ -559,13 +671,11 @@ static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
return (dma_get_seg_boundary(dev) >> page_shift) + 1;
}
-static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
+static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask)
{
- if (dev->dma_parms) {
- dev->dma_parms->segment_boundary_mask = mask;
- return 0;
- }
- return -EIO;
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return;
+ dev->dma_parms->segment_boundary_mask = mask;
}
static inline unsigned int dma_get_min_align_mask(struct device *dev)
@@ -575,13 +685,12 @@ static inline unsigned int dma_get_min_align_mask(struct device *dev)
return 0;
}
-static inline int dma_set_min_align_mask(struct device *dev,
+static inline void dma_set_min_align_mask(struct device *dev,
unsigned int min_align_mask)
{
if (WARN_ON_ONCE(!dev->dma_parms))
- return -EIO;
+ return;
dev->dma_parms->min_align_mask = min_align_mask;
- return 0;
}
#ifndef dma_get_cache_alignment
@@ -638,10 +747,14 @@ static inline int dma_mmap_wc(struct device *dev,
#else
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
-#define dma_unmap_addr(PTR, ADDR_NAME) (0)
-#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
-#define dma_unmap_len(PTR, LEN_NAME) (0)
-#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+#define dma_unmap_addr(PTR, ADDR_NAME) \
+ ({ typeof(PTR) __p __maybe_unused = PTR; 0; })
+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+ do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
+#define dma_unmap_len(PTR, LEN_NAME) \
+ ({ typeof(PTR) __p __maybe_unused = PTR; 0; })
+#define dma_unmap_len_set(PTR, LEN_NAME, VAL) \
+ do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
#endif
#endif /* _LINUX_DMA_MAPPING_H */
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 8d0e34dad446..c5ab6fd9ebe8 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -105,10 +105,10 @@ enum dma_resv_usage {
* This should be used by submissions which don't want to participate in
* any implicit synchronization.
*
- * The most common case are preemption fences, page table updates, TLB
- * flushes as well as explicit synced user submissions.
+ * The most common cases are preemption fences, page table updates, TLB
+ * flushes as well as explicitly synced user submissions.
*
- * Explicit synced user user submissions can be promoted to
+ * Explicitly synced user submissions can be promoted to
* DMA_RESV_USAGE_READ or DMA_RESV_USAGE_WRITE as needed using
* dma_buf_import_sync_file() when implicit synchronization should
* become necessary after initial adding of the fence.
diff --git a/include/linux/dma/ipu-dma.h b/include/linux/dma/ipu-dma.h
deleted file mode 100644
index 6969391580d2..000000000000
--- a/include/linux/dma/ipu-dma.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Copyright (C) 2005-2007 Freescale Semiconductor, Inc.
- */
-
-#ifndef __LINUX_DMA_IPU_DMA_H
-#define __LINUX_DMA_IPU_DMA_H
-
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-
-/* IPU DMA Controller channel definitions. */
-enum ipu_channel {
- IDMAC_IC_0 = 0, /* IC (encoding task) to memory */
- IDMAC_IC_1 = 1, /* IC (viewfinder task) to memory */
- IDMAC_ADC_0 = 1,
- IDMAC_IC_2 = 2,
- IDMAC_ADC_1 = 2,
- IDMAC_IC_3 = 3,
- IDMAC_IC_4 = 4,
- IDMAC_IC_5 = 5,
- IDMAC_IC_6 = 6,
- IDMAC_IC_7 = 7, /* IC (sensor data) to memory */
- IDMAC_IC_8 = 8,
- IDMAC_IC_9 = 9,
- IDMAC_IC_10 = 10,
- IDMAC_IC_11 = 11,
- IDMAC_IC_12 = 12,
- IDMAC_IC_13 = 13,
- IDMAC_SDC_0 = 14, /* Background synchronous display data */
- IDMAC_SDC_1 = 15, /* Foreground data (overlay) */
- IDMAC_SDC_2 = 16,
- IDMAC_SDC_3 = 17,
- IDMAC_ADC_2 = 18,
- IDMAC_ADC_3 = 19,
- IDMAC_ADC_4 = 20,
- IDMAC_ADC_5 = 21,
- IDMAC_ADC_6 = 22,
- IDMAC_ADC_7 = 23,
- IDMAC_PF_0 = 24,
- IDMAC_PF_1 = 25,
- IDMAC_PF_2 = 26,
- IDMAC_PF_3 = 27,
- IDMAC_PF_4 = 28,
- IDMAC_PF_5 = 29,
- IDMAC_PF_6 = 30,
- IDMAC_PF_7 = 31,
-};
-
-/* Order significant! */
-enum ipu_channel_status {
- IPU_CHANNEL_FREE,
- IPU_CHANNEL_INITIALIZED,
- IPU_CHANNEL_READY,
- IPU_CHANNEL_ENABLED,
-};
-
-#define IPU_CHANNELS_NUM 32
-
-enum pixel_fmt {
- /* 1 byte */
- IPU_PIX_FMT_GENERIC,
- IPU_PIX_FMT_RGB332,
- IPU_PIX_FMT_YUV420P,
- IPU_PIX_FMT_YUV422P,
- IPU_PIX_FMT_YUV420P2,
- IPU_PIX_FMT_YVU422P,
- /* 2 bytes */
- IPU_PIX_FMT_RGB565,
- IPU_PIX_FMT_RGB666,
- IPU_PIX_FMT_BGR666,
- IPU_PIX_FMT_YUYV,
- IPU_PIX_FMT_UYVY,
- /* 3 bytes */
- IPU_PIX_FMT_RGB24,
- IPU_PIX_FMT_BGR24,
- /* 4 bytes */
- IPU_PIX_FMT_GENERIC_32,
- IPU_PIX_FMT_RGB32,
- IPU_PIX_FMT_BGR32,
- IPU_PIX_FMT_ABGR32,
- IPU_PIX_FMT_BGRA32,
- IPU_PIX_FMT_RGBA32,
-};
-
-enum ipu_color_space {
- IPU_COLORSPACE_RGB,
- IPU_COLORSPACE_YCBCR,
- IPU_COLORSPACE_YUV
-};
-
-/*
- * Enumeration of IPU rotation modes
- */
-enum ipu_rotate_mode {
- /* Note the enum values correspond to BAM value */
- IPU_ROTATE_NONE = 0,
- IPU_ROTATE_VERT_FLIP = 1,
- IPU_ROTATE_HORIZ_FLIP = 2,
- IPU_ROTATE_180 = 3,
- IPU_ROTATE_90_RIGHT = 4,
- IPU_ROTATE_90_RIGHT_VFLIP = 5,
- IPU_ROTATE_90_RIGHT_HFLIP = 6,
- IPU_ROTATE_90_LEFT = 7,
-};
-
-/*
- * Enumeration of DI ports for ADC.
- */
-enum display_port {
- DISP0,
- DISP1,
- DISP2,
- DISP3
-};
-
-struct idmac_video_param {
- unsigned short in_width;
- unsigned short in_height;
- uint32_t in_pixel_fmt;
- unsigned short out_width;
- unsigned short out_height;
- uint32_t out_pixel_fmt;
- unsigned short out_stride;
- bool graphics_combine_en;
- bool global_alpha_en;
- bool key_color_en;
- enum display_port disp;
- unsigned short out_left;
- unsigned short out_top;
-};
-
-/*
- * Union of initialization parameters for a logical channel. So far only video
- * parameters are used.
- */
-union ipu_channel_param {
- struct idmac_video_param video;
-};
-
-struct idmac_tx_desc {
- struct dma_async_tx_descriptor txd;
- struct scatterlist *sg; /* scatterlist for this */
- unsigned int sg_len; /* tx-descriptor. */
- struct list_head list;
-};
-
-struct idmac_channel {
- struct dma_chan dma_chan;
- dma_cookie_t completed; /* last completed cookie */
- union ipu_channel_param params;
- enum ipu_channel link; /* input channel, linked to the output */
- enum ipu_channel_status status;
- void *client; /* Only one client per channel */
- unsigned int n_tx_desc;
- struct idmac_tx_desc *desc; /* allocated tx-descriptors */
- struct scatterlist *sg[2]; /* scatterlist elements in buffer-0 and -1 */
- struct list_head free_list; /* free tx-descriptors */
- struct list_head queue; /* queued tx-descriptors */
- spinlock_t lock; /* protects sg[0,1], queue */
- struct mutex chan_mutex; /* protects status, cookie, free_list */
- bool sec_chan_en;
- int active_buffer;
- unsigned int eof_irq;
- char eof_name[16]; /* EOF IRQ name for request_irq() */
-};
-
-#define to_tx_desc(tx) container_of(tx, struct idmac_tx_desc, txd)
-#define to_idmac_chan(c) container_of(c, struct idmac_channel, dma_chan)
-
-#endif /* __LINUX_DMA_IPU_DMA_H */
diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h
index 1e491c5dcac2..5d43881e6fb7 100644
--- a/include/linux/dma/k3-udma-glue.h
+++ b/include/linux/dma/k3-udma-glue.h
@@ -136,12 +136,9 @@ u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn);
int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num);
-void k3_udma_glue_rx_put_irq(struct k3_udma_glue_rx_channel *rx_chn,
- u32 flow_num);
void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num, void *data,
- void (*cleanup)(void *data, dma_addr_t desc_dma),
- bool skip_fdq);
+ void (*cleanup)(void *data, dma_addr_t desc_dma));
int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_idx);
int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 752dbde4cec1..99efe2b9b4ea 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -84,7 +84,7 @@ enum dma_transfer_direction {
DMA_TRANS_NONE,
};
-/**
+/*
* Interleaved Transfer Request
* ----------------------------
* A chunk is collection of contiguous bytes to be transferred.
@@ -161,6 +161,16 @@ struct dma_interleaved_template {
};
/**
+ * struct dma_vec - DMA vector
+ * @addr: Bus address of the start of the vector
+ * @len: Length in bytes of the DMA vector
+ */
+struct dma_vec {
+ dma_addr_t addr;
+ size_t len;
+};
+
+/**
* enum dma_ctrl_flags - DMA flags to augment operation preparation,
* control completion, and communicate status.
* @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
@@ -213,7 +223,7 @@ enum sum_check_bits {
};
/**
- * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
+ * enum sum_check_flags - result of async_{xor,pq}_zero_sum operations
* @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
* @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
*/
@@ -276,7 +286,7 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
* pointer to the engine's metadata area
* 4. Read out the metadata from the pointer
*
- * Note: the two mode is not compatible and clients must use one mode for a
+ * Warning: the two modes are not compatible and clients must use one mode for a
* descriptor.
*/
enum dma_desc_metadata_mode {
@@ -584,9 +594,13 @@ struct dma_descriptor_metadata_ops {
* @phys: physical address of the descriptor
* @chan: target channel for this operation
* @tx_submit: accept the descriptor, assign ordered cookie and mark the
- * descriptor pending. To be pushed on .issue_pending() call
+ * descriptor pending. To be pushed on .issue_pending() call
+ * @desc_free: driver's callback function to free a resusable descriptor
+ * after completion
* @callback: routine to call after this operation is complete
+ * @callback_result: error result from a DMA transaction
* @callback_param: general parameter to pass to the callback routine
+ * @unmap: hook for generic DMA unmap data
* @desc_metadata_mode: core managed metadata mode to protect mixed use of
* DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
* DESC_METADATA_NONE
@@ -817,12 +831,14 @@ struct dma_filter {
* @device_prep_dma_memset: prepares a memset operation
* @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
+ * @device_prep_peripheral_dma_vec: prepares a scatter-gather DMA transfer,
+ * where the address and size of each segment is located in one entry of
+ * the dma_vec array.
* @device_prep_slave_sg: prepares a slave dma operation
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
* The function takes a buffer of size buf_len. The callback function will
* be called after period_len bytes have been transferred.
* @device_prep_interleaved_dma: Transfer expression in a generic way.
- * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
* @device_caps: May be used to override the generic DMA slave capabilities
* with per-channel specific ones
* @device_config: Pushes a new configuration to a channel, return 0 or an error
@@ -910,6 +926,10 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_peripheral_dma_vec)(
+ struct dma_chan *chan, const struct dma_vec *vecs,
+ size_t nents, enum dma_transfer_direction direction,
+ unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -921,9 +941,6 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
struct dma_chan *chan, struct dma_interleaved_template *xt,
unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
- struct dma_chan *chan, dma_addr_t dst, u64 data,
- unsigned long flags);
void (*device_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config);
@@ -973,6 +990,25 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
dir, flags, NULL);
}
+/**
+ * dmaengine_prep_peripheral_dma_vec() - Prepare a DMA scatter-gather descriptor
+ * @chan: The channel to be used for this descriptor
+ * @vecs: The array of DMA vectors that should be transferred
+ * @nents: The number of DMA vectors in the array
+ * @dir: Specifies the direction of the data transfer
+ * @flags: DMA engine flags
+ */
+static inline struct dma_async_tx_descriptor *dmaengine_prep_peripheral_dma_vec(
+ struct dma_chan *chan, const struct dma_vec *vecs, size_t nents,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_peripheral_dma_vec)
+ return NULL;
+
+ return chan->device->device_prep_peripheral_dma_vec(chan, vecs, nents,
+ dir, flags);
+}
+
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction dir, unsigned long flags)
@@ -1488,6 +1524,7 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
+struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name);
void dma_release_channel(struct dma_chan *chan);
int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
@@ -1524,6 +1561,12 @@ static inline struct dma_chan *dma_request_chan_by_mask(
{
return ERR_PTR(-ENODEV);
}
+
+static inline struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline void dma_release_channel(struct dma_chan *chan)
{
}
@@ -1575,7 +1618,8 @@ int dma_async_device_register(struct dma_device *device);
int dmaenginem_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
int dma_async_device_channel_register(struct dma_device *device,
- struct dma_chan *chan);
+ struct dma_chan *chan,
+ const char *name);
void dma_async_device_channel_unregister(struct dma_device *device,
struct dma_chan *chan);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
@@ -1598,14 +1642,14 @@ static inline struct dma_chan
{
struct dma_chan *chan;
- chan = dma_request_slave_channel(dev, name);
- if (chan)
+ chan = dma_request_chan(dev, name);
+ if (!IS_ERR(chan))
return chan;
if (!fn || !fn_param)
return NULL;
- return __dma_request_channel(&mask, fn, fn_param, NULL);
+ return dma_request_channel(mask, fn, fn_param);
}
static inline char *
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index f632ecfb4238..7d40b51933d1 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -11,6 +11,7 @@
#ifndef LINUX_DMAPOOL_H
#define LINUX_DMAPOOL_H
+#include <linux/nodemask_types.h>
#include <linux/scatterlist.h>
#include <asm/io.h>
@@ -18,8 +19,8 @@ struct device;
#ifdef CONFIG_HAS_DMA
-struct dma_pool *dma_pool_create(const char *name, struct device *dev,
- size_t size, size_t align, size_t allocation);
+struct dma_pool *dma_pool_create_node(const char *name, struct device *dev,
+ size_t size, size_t align, size_t boundary, int node);
void dma_pool_destroy(struct dma_pool *pool);
@@ -35,9 +36,12 @@ struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
void dmam_pool_destroy(struct dma_pool *pool);
#else /* !CONFIG_HAS_DMA */
-static inline struct dma_pool *dma_pool_create(const char *name,
- struct device *dev, size_t size, size_t align, size_t allocation)
-{ return NULL; }
+static inline struct dma_pool *dma_pool_create_node(const char *name,
+ struct device *dev, size_t size, size_t align, size_t boundary,
+ int node)
+{
+ return NULL;
+}
static inline void dma_pool_destroy(struct dma_pool *pool) { }
static inline void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle) { return NULL; }
@@ -49,6 +53,21 @@ static inline struct dma_pool *dmam_pool_create(const char *name,
static inline void dmam_pool_destroy(struct dma_pool *pool) { }
#endif /* !CONFIG_HAS_DMA */
+static inline struct dma_pool *dma_pool_create(const char *name,
+ struct device *dev, size_t size, size_t align, size_t boundary)
+{
+ return dma_pool_create_node(name, dev, size, align, boundary,
+ NUMA_NO_NODE);
+}
+
+/**
+ * dma_pool_zalloc - Get a zero-initialized block of DMA coherent memory.
+ * @pool: dma pool that will produce the block
+ * @mem_flags: GFP_* bitmask
+ * @handle: pointer to dma address of block
+ *
+ * Same as dma_pool_alloc(), but the returned memory is zeroed.
+ */
static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle)
{
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 499bb2c63483..692b2b445761 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -292,7 +292,6 @@ static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src)
struct irq_data;
extern void dmar_msi_unmask(struct irq_data *data);
extern void dmar_msi_mask(struct irq_data *data);
-extern void dmar_msi_read(int irq, struct msi_msg *msg);
extern void dmar_msi_write(int irq, struct msi_msg *msg);
extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern irqreturn_t dmar_fault(int irq, void *dev_id);
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
index d275736230b3..562f520b23c2 100644
--- a/include/linux/dpll.h
+++ b/include/linux/dpll.h
@@ -15,6 +15,7 @@
struct dpll_device;
struct dpll_pin;
+struct dpll_pin_esync;
struct dpll_device_ops {
int (*mode_get)(const struct dpll_device *dpll, void *dpll_priv,
@@ -25,6 +26,24 @@ struct dpll_device_ops {
struct netlink_ext_ack *extack);
int (*temp_get)(const struct dpll_device *dpll, void *dpll_priv,
s32 *temp, struct netlink_ext_ack *extack);
+ int (*clock_quality_level_get)(const struct dpll_device *dpll,
+ void *dpll_priv,
+ unsigned long *qls,
+ struct netlink_ext_ack *extack);
+ int (*phase_offset_monitor_set)(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state state,
+ struct netlink_ext_ack *extack);
+ int (*phase_offset_monitor_get)(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state *state,
+ struct netlink_ext_ack *extack);
+ int (*phase_offset_avg_factor_set)(const struct dpll_device *dpll,
+ void *dpll_priv, u32 factor,
+ struct netlink_ext_ack *extack);
+ int (*phase_offset_avg_factor_get)(const struct dpll_device *dpll,
+ void *dpll_priv, u32 *factor,
+ struct netlink_ext_ack *extack);
};
struct dpll_pin_ops {
@@ -83,6 +102,23 @@ struct dpll_pin_ops {
int (*ffo_get)(const struct dpll_pin *pin, void *pin_priv,
const struct dpll_device *dpll, void *dpll_priv,
s64 *ffo, struct netlink_ext_ack *extack);
+ int (*esync_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack);
+ int (*esync_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack);
+ int (*ref_sync_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_sync_pin,
+ void *ref_sync_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack);
+ int (*ref_sync_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_sync_pin,
+ void *ref_sync_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack);
};
struct dpll_pin_frequency {
@@ -111,6 +147,13 @@ struct dpll_pin_phase_adjust_range {
s32 max;
};
+struct dpll_pin_esync {
+ u64 freq;
+ const struct dpll_pin_frequency *range;
+ u8 range_num;
+ u8 pulse;
+};
+
struct dpll_pin_properties {
const char *board_label;
const char *panel_label;
@@ -120,6 +163,7 @@ struct dpll_pin_properties {
u32 freq_supported_num;
struct dpll_pin_frequency *freq_supported;
struct dpll_pin_phase_adjust_range phase_range;
+ u32 phase_gran;
};
#if IS_ENABLED(CONFIG_DPLL)
@@ -175,6 +219,9 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv);
+int dpll_pin_ref_sync_pair_add(struct dpll_pin *pin,
+ struct dpll_pin *ref_sync_pin);
+
int dpll_device_change_ntf(struct dpll_device *dpll);
int dpll_pin_change_ntf(struct dpll_pin *pin);
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
index f3664ee12170..d13aabdeb4b2 100644
--- a/include/linux/dsa/8021q.h
+++ b/include/linux/dsa/8021q.h
@@ -8,12 +8,18 @@
#include <net/dsa.h>
#include <linux/types.h>
+/* VBID is limited to three bits only and zero is reserved.
+ * Only 7 bridges can be enumerated.
+ */
+#define DSA_TAG_8021Q_MAX_NUM_BRIDGES 7
+
int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto);
void dsa_tag_8021q_unregister(struct dsa_switch *ds);
int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_bridge bridge);
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack);
void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge);
diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h
index b4f22112ba75..3ce7cbcc37a3 100644
--- a/include/linux/dsa/lan9303.h
+++ b/include/linux/dsa/lan9303.h
@@ -5,8 +5,8 @@ struct lan9303;
struct lan9303_phy_ops {
/* PHY 1 and 2 access*/
- int (*phy_read)(struct lan9303 *chip, int port, int regnum);
- int (*phy_write)(struct lan9303 *chip, int port,
+ int (*phy_read)(struct lan9303 *chip, int addr, int regnum);
+ int (*phy_write)(struct lan9303 *chip, int addr,
int regnum, u16 val);
};
diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
index dca2969015d8..620a3260fc08 100644
--- a/include/linux/dsa/ocelot.h
+++ b/include/linux/dsa/ocelot.h
@@ -5,6 +5,8 @@
#ifndef _NET_DSA_TAG_OCELOT_H
#define _NET_DSA_TAG_OCELOT_H
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/kthread.h>
#include <linux/packing.h>
#include <linux/skbuff.h>
@@ -13,6 +15,7 @@
struct ocelot_skb_cb {
struct sk_buff *clone;
unsigned int ptp_class; /* valid only for clones */
+ unsigned long ptp_tx_time; /* valid only for clones */
u32 tstamp_lo;
u8 ptp_cmd;
u8 ts_id;
@@ -273,4 +276,49 @@ static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
return rew_op;
}
+/**
+ * ocelot_xmit_get_vlan_info: Determine VLAN_TCI and TAG_TYPE for injected frame
+ * @skb: Pointer to socket buffer
+ * @br: Pointer to bridge device that the port is under, if any
+ * @vlan_tci:
+ * @tag_type:
+ *
+ * If the port is under a VLAN-aware bridge, remove the VLAN header from the
+ * payload and move it into the DSA tag, which will make the switch classify
+ * the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero,
+ * which is the pvid of standalone ports (OCELOT_STANDALONE_PVID), although not
+ * of VLAN-unaware bridge ports (that would be ocelot_vlan_unaware_pvid()).
+ * Anyway, VID 0 is fine because it is stripped on egress for these port modes,
+ * and source address learning is not performed for packets injected from the
+ * CPU anyway, so it doesn't matter that the VID is "wrong".
+ */
+static inline void ocelot_xmit_get_vlan_info(struct sk_buff *skb,
+ struct net_device *br,
+ u64 *vlan_tci, u64 *tag_type)
+{
+ struct vlan_ethhdr *hdr;
+ u16 proto, tci;
+
+ if (!br || !br_vlan_enabled(br)) {
+ *vlan_tci = 0;
+ *tag_type = IFH_TAG_TYPE_C;
+ return;
+ }
+
+ hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
+ br_vlan_get_proto(br, &proto);
+
+ if (ntohs(hdr->h_vlan_proto) == proto) {
+ vlan_remove_tag(skb, &tci);
+ *vlan_tci = tci;
+ } else {
+ rcu_read_lock();
+ br_vlan_get_pvid_rcu(br, &tci);
+ rcu_read_unlock();
+ *vlan_tci = tci;
+ }
+
+ *tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C;
+}
+
#endif
diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h
index 82ebf9223948..f8811c46b89e 100644
--- a/include/linux/dw_apb_timer.h
+++ b/include/linux/dw_apb_timer.h
@@ -34,9 +34,6 @@ struct dw_apb_clocksource {
};
void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced);
-void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced);
-void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced);
-void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced);
struct dw_apb_clock_event_device *
dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index ff44ec346162..05743900a116 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -38,11 +38,12 @@ struct _ddebug {
#define _DPRINTK_FLAGS_INCL_LINENO (1<<3)
#define _DPRINTK_FLAGS_INCL_TID (1<<4)
#define _DPRINTK_FLAGS_INCL_SOURCENAME (1<<5)
+#define _DPRINTK_FLAGS_INCL_STACK (1<<6)
#define _DPRINTK_FLAGS_INCL_ANY \
(_DPRINTK_FLAGS_INCL_MODNAME | _DPRINTK_FLAGS_INCL_FUNCNAME |\
_DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID |\
- _DPRINTK_FLAGS_INCL_SOURCENAME)
+ _DPRINTK_FLAGS_INCL_SOURCENAME | _DPRINTK_FLAGS_INCL_STACK)
#if defined DEBUG
#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT
@@ -160,6 +161,12 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
const struct ib_device *ibdev,
const char *fmt, ...);
+#define __dynamic_dump_stack(desc) \
+{ \
+ if (desc.flags & _DPRINTK_FLAGS_INCL_STACK) \
+ dump_stack(); \
+}
+
#define DEFINE_DYNAMIC_DEBUG_METADATA_CLS(name, cls, fmt) \
static struct _ddebug __aligned(8) \
__section("__dyndbg") name = { \
@@ -220,8 +227,10 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
*/
#define __dynamic_func_call_cls(id, cls, fmt, func, ...) do { \
DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \
- if (DYNAMIC_DEBUG_BRANCH(id)) \
+ if (DYNAMIC_DEBUG_BRANCH(id)) { \
func(&id, ##__VA_ARGS__); \
+ __dynamic_dump_stack(id); \
+ } \
} while (0)
#define __dynamic_func_call(id, fmt, func, ...) \
__dynamic_func_call_cls(id, _DPRINTK_CLASS_DFLT, fmt, \
@@ -229,8 +238,10 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
#define __dynamic_func_call_cls_no_desc(id, cls, fmt, func, ...) do { \
DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \
- if (DYNAMIC_DEBUG_BRANCH(id)) \
+ if (DYNAMIC_DEBUG_BRANCH(id)) { \
func(__VA_ARGS__); \
+ __dynamic_dump_stack(id); \
+ } \
} while (0)
#define __dynamic_func_call_no_desc(id, fmt, func, ...) \
__dynamic_func_call_cls_no_desc(id, _DPRINTK_CLASS_DFLT, \
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index 281298e77a15..808b1a5102e7 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -127,7 +127,7 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
if (WARN_ON_ONCE(count > DQL_MAX_OBJECT))
return;
- dql->last_obj_cnt = count;
+ WRITE_ONCE(dql->last_obj_cnt, count);
/* We want to force a write first, so that cpu do not attempt
* to get cache line containing last_obj_cnt, num_queued, adj_limit
diff --git a/include/linux/edac.h b/include/linux/edac.h
index b4ee8961e623..fa32f2aca22f 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -661,4 +661,226 @@ static inline struct dimm_info *edac_get_dimm(struct mem_ctl_info *mci,
return mci->dimms[index];
}
+
+#define EDAC_FEAT_NAME_LEN 128
+
+/* RAS feature type */
+enum edac_dev_feat {
+ RAS_FEAT_SCRUB,
+ RAS_FEAT_ECS,
+ RAS_FEAT_MEM_REPAIR,
+ RAS_FEAT_MAX
+};
+
+/**
+ * struct edac_scrub_ops - scrub device operations (all elements optional)
+ * @read_addr: read base address of scrubbing range.
+ * @read_size: read offset of scrubbing range.
+ * @write_addr: set base address of the scrubbing range.
+ * @write_size: set offset of the scrubbing range.
+ * @get_enabled_bg: check if currently performing background scrub.
+ * @set_enabled_bg: start or stop a bg-scrub.
+ * @get_min_cycle: get minimum supported scrub cycle duration in seconds.
+ * @get_max_cycle: get maximum supported scrub cycle duration in seconds.
+ * @get_cycle_duration: get current scrub cycle duration in seconds.
+ * @set_cycle_duration: set current scrub cycle duration in seconds.
+ */
+struct edac_scrub_ops {
+ int (*read_addr)(struct device *dev, void *drv_data, u64 *base);
+ int (*read_size)(struct device *dev, void *drv_data, u64 *size);
+ int (*write_addr)(struct device *dev, void *drv_data, u64 base);
+ int (*write_size)(struct device *dev, void *drv_data, u64 size);
+ int (*get_enabled_bg)(struct device *dev, void *drv_data, bool *enable);
+ int (*set_enabled_bg)(struct device *dev, void *drv_data, bool enable);
+ int (*get_min_cycle)(struct device *dev, void *drv_data, u32 *min);
+ int (*get_max_cycle)(struct device *dev, void *drv_data, u32 *max);
+ int (*get_cycle_duration)(struct device *dev, void *drv_data, u32 *cycle);
+ int (*set_cycle_duration)(struct device *dev, void *drv_data, u32 cycle);
+};
+
+#if IS_ENABLED(CONFIG_EDAC_SCRUB)
+int edac_scrub_get_desc(struct device *scrub_dev,
+ const struct attribute_group **attr_groups,
+ u8 instance);
+#else
+static inline int edac_scrub_get_desc(struct device *scrub_dev,
+ const struct attribute_group **attr_groups,
+ u8 instance)
+{ return -EOPNOTSUPP; }
+#endif /* CONFIG_EDAC_SCRUB */
+
+/**
+ * struct edac_ecs_ops - ECS device operations (all elements optional)
+ * @get_log_entry_type: read the log entry type value.
+ * @set_log_entry_type: set the log entry type value.
+ * @get_mode: read the mode value.
+ * @set_mode: set the mode value.
+ * @reset: reset the ECS counter.
+ * @get_threshold: read the threshold count per gigabits of memory cells.
+ * @set_threshold: set the threshold count per gigabits of memory cells.
+ */
+struct edac_ecs_ops {
+ int (*get_log_entry_type)(struct device *dev, void *drv_data, int fru_id, u32 *val);
+ int (*set_log_entry_type)(struct device *dev, void *drv_data, int fru_id, u32 val);
+ int (*get_mode)(struct device *dev, void *drv_data, int fru_id, u32 *val);
+ int (*set_mode)(struct device *dev, void *drv_data, int fru_id, u32 val);
+ int (*reset)(struct device *dev, void *drv_data, int fru_id, u32 val);
+ int (*get_threshold)(struct device *dev, void *drv_data, int fru_id, u32 *threshold);
+ int (*set_threshold)(struct device *dev, void *drv_data, int fru_id, u32 threshold);
+};
+
+struct edac_ecs_ex_info {
+ u16 num_media_frus;
+};
+
+#if IS_ENABLED(CONFIG_EDAC_ECS)
+int edac_ecs_get_desc(struct device *ecs_dev,
+ const struct attribute_group **attr_groups,
+ u16 num_media_frus);
+#else
+static inline int edac_ecs_get_desc(struct device *ecs_dev,
+ const struct attribute_group **attr_groups,
+ u16 num_media_frus)
+{ return -EOPNOTSUPP; }
+#endif /* CONFIG_EDAC_ECS */
+
+enum edac_mem_repair_type {
+ EDAC_REPAIR_PPR,
+ EDAC_REPAIR_CACHELINE_SPARING,
+ EDAC_REPAIR_ROW_SPARING,
+ EDAC_REPAIR_BANK_SPARING,
+ EDAC_REPAIR_RANK_SPARING,
+ EDAC_REPAIR_MAX
+};
+
+extern const char * const edac_repair_type[];
+
+enum edac_mem_repair_cmd {
+ EDAC_DO_MEM_REPAIR = 1,
+};
+
+/**
+ * struct edac_mem_repair_ops - memory repair operations
+ * (all elements are optional except do_repair, set_hpa/set_dpa)
+ * @get_repair_type: get the memory repair type, listed in
+ * enum edac_mem_repair_function.
+ * @get_persist_mode: get the current persist mode.
+ * false - Soft repair type (temporary repair).
+ * true - Hard memory repair type (permanent repair).
+ * @set_persist_mode: set the persist mode of the memory repair instance.
+ * @get_repair_safe_when_in_use: get whether memory media is accessible and
+ * data is retained during repair operation.
+ * @get_hpa: get current host physical address (HPA) of memory to repair.
+ * @set_hpa: set host physical address (HPA) of memory to repair.
+ * @get_min_hpa: get the minimum supported host physical address (HPA).
+ * @get_max_hpa: get the maximum supported host physical address (HPA).
+ * @get_dpa: get current device physical address (DPA) of memory to repair.
+ * @set_dpa: set device physical address (DPA) of memory to repair.
+ * In some states of system configuration (e.g. before address decoders
+ * have been configured), memory devices (e.g. CXL) may not have an active
+ * mapping in the host physical address map. As such, the memory
+ * to repair must be identified by a device specific physical addressing
+ * scheme using a device physical address(DPA). The DPA and other control
+ * attributes to use for the repair operations will be presented in related
+ * error records.
+ * @get_min_dpa: get the minimum supported device physical address (DPA).
+ * @get_max_dpa: get the maximum supported device physical address (DPA).
+ * @get_nibble_mask: get current nibble mask of memory to repair.
+ * @set_nibble_mask: set nibble mask of memory to repair.
+ * @get_bank_group: get current bank group of memory to repair.
+ * @set_bank_group: set bank group of memory to repair.
+ * @get_bank: get current bank of memory to repair.
+ * @set_bank: set bank of memory to repair.
+ * @get_rank: get current rank of memory to repair.
+ * @set_rank: set rank of memory to repair.
+ * @get_row: get current row of memory to repair.
+ * @set_row: set row of memory to repair.
+ * @get_column: get current column of memory to repair.
+ * @set_column: set column of memory to repair.
+ * @get_channel: get current channel of memory to repair.
+ * @set_channel: set channel of memory to repair.
+ * @get_sub_channel: get current subchannel of memory to repair.
+ * @set_sub_channel: set subchannel of memory to repair.
+ * @do_repair: Issue memory repair operation for the HPA/DPA and
+ * other control attributes set for the memory to repair.
+ *
+ * All elements are optional except do_repair and at least one of set_hpa/set_dpa.
+ */
+struct edac_mem_repair_ops {
+ int (*get_repair_type)(struct device *dev, void *drv_data, const char **type);
+ int (*get_persist_mode)(struct device *dev, void *drv_data, bool *persist);
+ int (*set_persist_mode)(struct device *dev, void *drv_data, bool persist);
+ int (*get_repair_safe_when_in_use)(struct device *dev, void *drv_data, bool *safe);
+ int (*get_hpa)(struct device *dev, void *drv_data, u64 *hpa);
+ int (*set_hpa)(struct device *dev, void *drv_data, u64 hpa);
+ int (*get_min_hpa)(struct device *dev, void *drv_data, u64 *hpa);
+ int (*get_max_hpa)(struct device *dev, void *drv_data, u64 *hpa);
+ int (*get_dpa)(struct device *dev, void *drv_data, u64 *dpa);
+ int (*set_dpa)(struct device *dev, void *drv_data, u64 dpa);
+ int (*get_min_dpa)(struct device *dev, void *drv_data, u64 *dpa);
+ int (*get_max_dpa)(struct device *dev, void *drv_data, u64 *dpa);
+ int (*get_nibble_mask)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_nibble_mask)(struct device *dev, void *drv_data, u32 val);
+ int (*get_bank_group)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_bank_group)(struct device *dev, void *drv_data, u32 val);
+ int (*get_bank)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_bank)(struct device *dev, void *drv_data, u32 val);
+ int (*get_rank)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_rank)(struct device *dev, void *drv_data, u32 val);
+ int (*get_row)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_row)(struct device *dev, void *drv_data, u32 val);
+ int (*get_column)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_column)(struct device *dev, void *drv_data, u32 val);
+ int (*get_channel)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_channel)(struct device *dev, void *drv_data, u32 val);
+ int (*get_sub_channel)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_sub_channel)(struct device *dev, void *drv_data, u32 val);
+ int (*do_repair)(struct device *dev, void *drv_data, u32 val);
+};
+
+#if IS_ENABLED(CONFIG_EDAC_MEM_REPAIR)
+int edac_mem_repair_get_desc(struct device *dev,
+ const struct attribute_group **attr_groups,
+ u8 instance);
+#else
+static inline int edac_mem_repair_get_desc(struct device *dev,
+ const struct attribute_group **attr_groups,
+ u8 instance)
+{ return -EOPNOTSUPP; }
+#endif /* CONFIG_EDAC_MEM_REPAIR */
+
+/* EDAC device feature information structure */
+struct edac_dev_data {
+ union {
+ const struct edac_scrub_ops *scrub_ops;
+ const struct edac_ecs_ops *ecs_ops;
+ const struct edac_mem_repair_ops *mem_repair_ops;
+ };
+ u8 instance;
+ void *private;
+};
+
+struct edac_dev_feat_ctx {
+ struct device dev;
+ void *private;
+ struct edac_dev_data *scrub;
+ struct edac_dev_data ecs;
+ struct edac_dev_data *mem_repair;
+};
+
+struct edac_dev_feature {
+ enum edac_dev_feat ft_type;
+ u8 instance;
+ union {
+ const struct edac_scrub_ops *scrub_ops;
+ const struct edac_ecs_ops *ecs_ops;
+ const struct edac_mem_repair_ops *mem_repair_ops;
+ };
+ void *ctx;
+ struct edac_ecs_ex_info ecs_info;
+};
+
+int edac_dev_register(struct device *parent, char *dev_name,
+ void *parent_pvt_data, int num_features,
+ const struct edac_dev_feature *ras_features);
#endif /* _LINUX_EDAC_H_ */
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
index c860c72a921d..3a485cc0e0fa 100644
--- a/include/linux/eeprom_93cx6.h
+++ b/include/linux/eeprom_93cx6.h
@@ -11,6 +11,8 @@
Supported chipsets: 93c46, 93c56 and 93c66.
*/
+#include <linux/bits.h>
+
/*
* EEPROM operation defines.
*/
@@ -34,6 +36,7 @@
* @register_write(struct eeprom_93cx6 *eeprom): handler to
* write to the eeprom register by using all reg_* fields.
* @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines
+ * @quirks: eeprom or controller quirks
* @drive_data: Set if we're driving the data line.
* @reg_data_in: register field to indicate data input
* @reg_data_out: register field to indicate data output
@@ -50,6 +53,9 @@ struct eeprom_93cx6 {
void (*register_write)(struct eeprom_93cx6 *eeprom);
int width;
+ unsigned int quirks;
+/* Some EEPROMs require an extra clock cycle before reading */
+#define PCI_EEPROM_QUIRK_EXTRA_READ_CYCLE BIT(0)
char drive_data;
char reg_data_in;
@@ -71,3 +77,8 @@ extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable);
extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom,
u8 addr, u16 data);
+
+static inline bool has_quirk_extra_read_cycle(struct eeprom_93cx6 *eeprom)
+{
+ return eeprom->quirks & PCI_EEPROM_QUIRK_EXTRA_READ_CYCLE;
+}
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
deleted file mode 100644
index 34c2175e6a1e..000000000000
--- a/include/linux/eeprom_93xx46.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Module: eeprom_93xx46
- * platform description for 93xx46 EEPROMs.
- */
-#include <linux/gpio/consumer.h>
-
-struct eeprom_93xx46_platform_data {
- unsigned char flags;
-#define EE_ADDR8 0x01 /* 8 bit addr. cfg */
-#define EE_ADDR16 0x02 /* 16 bit addr. cfg */
-#define EE_READONLY 0x08 /* forbid writing */
-#define EE_SIZE1K 0x10 /* 1 kb of data, that is a 93xx46 */
-#define EE_SIZE2K 0x20 /* 2 kb of data, that is a 93xx56 */
-#define EE_SIZE4K 0x40 /* 4 kb of data, that is a 93xx66 */
-
- unsigned int quirks;
-/* Single word read transfers only; no sequential read. */
-#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
-/* Instructions such as EWEN are (addrlen + 2) in length. */
-#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
-/* Add extra cycle after address during a read */
-#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2)
-
- /*
- * optional hooks to control additional logic
- * before and after spi transfer.
- */
- void (*prepare)(void *);
- void (*finish)(void *);
- struct gpio_desc *select;
-};
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 418e555459da..2a43094e23f7 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -74,10 +74,10 @@ typedef void *efi_handle_t;
*/
typedef guid_t efi_guid_t __aligned(__alignof__(u32));
-#define EFI_GUID(a, b, c, d...) (efi_guid_t){ { \
+#define EFI_GUID(a, b, c, d...) ((efi_guid_t){ { \
(a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
(b) & 0xff, ((b) >> 8) & 0xff, \
- (c) & 0xff, ((c) >> 8) & 0xff, d } }
+ (c) & 0xff, ((c) >> 8) & 0xff, d } })
/*
* Generic EFI table header
@@ -114,21 +114,22 @@ typedef struct {
#define EFI_MAX_MEMORY_TYPE 16
/* Attribute values: */
-#define EFI_MEMORY_UC ((u64)0x0000000000000001ULL) /* uncached */
-#define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */
-#define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */
-#define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */
-#define EFI_MEMORY_UCE ((u64)0x0000000000000010ULL) /* uncached, exported */
-#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
-#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
-#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
-#define EFI_MEMORY_NV ((u64)0x0000000000008000ULL) /* non-volatile */
-#define EFI_MEMORY_MORE_RELIABLE \
- ((u64)0x0000000000010000ULL) /* higher reliability */
-#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */
-#define EFI_MEMORY_SP ((u64)0x0000000000040000ULL) /* soft reserved */
-#define EFI_MEMORY_CPU_CRYPTO ((u64)0x0000000000080000ULL) /* supports encryption */
-#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
+#define EFI_MEMORY_UC BIT_ULL(0) /* uncached */
+#define EFI_MEMORY_WC BIT_ULL(1) /* write-coalescing */
+#define EFI_MEMORY_WT BIT_ULL(2) /* write-through */
+#define EFI_MEMORY_WB BIT_ULL(3) /* write-back */
+#define EFI_MEMORY_UCE BIT_ULL(4) /* uncached, exported */
+#define EFI_MEMORY_WP BIT_ULL(12) /* write-protect */
+#define EFI_MEMORY_RP BIT_ULL(13) /* read-protect */
+#define EFI_MEMORY_XP BIT_ULL(14) /* execute-protect */
+#define EFI_MEMORY_NV BIT_ULL(15) /* non-volatile */
+#define EFI_MEMORY_MORE_RELIABLE BIT_ULL(16) /* higher reliability */
+#define EFI_MEMORY_RO BIT_ULL(17) /* read-only */
+#define EFI_MEMORY_SP BIT_ULL(18) /* soft reserved */
+#define EFI_MEMORY_CPU_CRYPTO BIT_ULL(19) /* supports encryption */
+#define EFI_MEMORY_HOT_PLUGGABLE BIT_ULL(20) /* supports unplugging at runtime */
+#define EFI_MEMORY_RUNTIME BIT_ULL(63) /* range requires runtime mapping */
+
#define EFI_MEMORY_DESCRIPTOR_VERSION 1
#define EFI_PAGE_SHIFT 12
@@ -289,7 +290,7 @@ typedef efi_status_t efi_get_variable_t (efi_char16_t *name, efi_guid_t *vendor,
unsigned long *data_size, void *data);
typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char16_t *name,
efi_guid_t *vendor);
-typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor,
+typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size,
void *data);
typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count);
@@ -363,7 +364,6 @@ void efi_native_runtime_setup(void);
#define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94)
-#define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2)
#define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
#define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
#define LINUX_EFI_CRASH_GUID EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
@@ -373,18 +373,19 @@ void efi_native_runtime_setup(void);
#define EFI_DEVICE_PATH_TO_TEXT_PROTOCOL_GUID EFI_GUID(0x8b843e20, 0x8132, 0x4852, 0x90, 0xcc, 0x55, 0x1a, 0x4e, 0x4a, 0x7f, 0x1c)
#define EFI_DEVICE_PATH_FROM_TEXT_PROTOCOL_GUID EFI_GUID(0x05c99a21, 0xc70f, 0x4ad2, 0x8a, 0x5f, 0x35, 0xdf, 0x33, 0x43, 0xf5, 0x1e)
#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
-#define EFI_UGA_PROTOCOL_GUID EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
+#define EFI_EDID_DISCOVERED_PROTOCOL_GUID EFI_GUID(0x1c0c34f6, 0xd380, 0x41fa, 0xa0, 0x49, 0x8a, 0xd0, 0x6c, 0x1a, 0x66, 0xaa)
+#define EFI_EDID_ACTIVE_PROTOCOL_GUID EFI_GUID(0xbd8c1056, 0x9f36, 0x44ec, 0x92, 0xa8, 0xa6, 0x33, 0x7f, 0x81, 0x79, 0x86)
#define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
#define EFI_FILE_INFO_ID EFI_GUID(0x09576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
#define EFI_SYSTEM_RESOURCE_TABLE_GUID EFI_GUID(0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80)
#define EFI_FILE_SYSTEM_GUID EFI_GUID(0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
#define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
-#define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
#define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
#define EFI_RNG_ALGORITHM_RAW EFI_GUID(0xe43176d7, 0xb6e8, 0x4827, 0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61)
#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
+#define APPLE_SET_OS_PROTOCOL_GUID EFI_GUID(0xc5c5da95, 0x7d5c, 0x45e6, 0xb2, 0xf1, 0x3f, 0xd5, 0x2b, 0xb1, 0x00, 0x77)
#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f)
#define EFI_TCG2_FINAL_EVENTS_TABLE_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25)
#define EFI_LOAD_FILE_PROTOCOL_GUID EFI_GUID(0x56ec3091, 0x954c, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
@@ -440,6 +441,7 @@ void efi_native_runtime_setup(void);
/* OVMF protocol GUIDs */
#define OVMF_SEV_MEMORY_ACCEPTANCE_PROTOCOL_GUID EFI_GUID(0xc5a010fe, 0x38a7, 0x4531, 0x8a, 0x4a, 0x05, 0x00, 0xd2, 0xfd, 0x16, 0x49)
+#define OVMF_MEMORY_LOG_TABLE_GUID EFI_GUID(0x95305139, 0xb20f, 0x4723, 0x84, 0x25, 0x62, 0x7c, 0x88, 0x8f, 0xf1, 0x21)
typedef struct {
efi_guid_t guid;
@@ -580,15 +582,6 @@ struct efi_mem_range {
};
typedef struct {
- u32 version;
- u32 length;
- u64 memory_protection_attribute;
-} efi_properties_table_t;
-
-#define EFI_PROPERTIES_TABLE_VERSION 0x00010000
-#define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1
-
-typedef struct {
u16 version;
u16 length;
u32 runtime_services_supported;
@@ -607,7 +600,11 @@ typedef struct {
u32 num_entries;
u32 desc_size;
u32 flags;
- efi_memory_desc_t entry[0];
+ /*
+ * There are @num_entries following, each of size @desc_size bytes,
+ * including an efi_memory_desc_t header. See efi_memdesc_ptr().
+ */
+ efi_memory_desc_t entry[];
} efi_memory_attributes_table_t;
typedef struct {
@@ -648,6 +645,7 @@ extern struct efi {
unsigned long esrt; /* ESRT table */
unsigned long tpm_log; /* TPM2 Event Log table */
unsigned long tpm_final_log; /* TPM2 Final Events Log table */
+ unsigned long ovmf_debug_log;
unsigned long mokvar_table; /* MOK variable config table */
unsigned long coco_secret; /* Confidential computing secret table */
unsigned long unaccepted; /* Unaccepted memory table */
@@ -759,8 +757,6 @@ extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_mem_reserve(phys_addr_t addr, u64 size);
extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
-extern void efi_initialize_iomem_resources(struct resource *code_resource,
- struct resource *data_resource, struct resource *bss_resource);
extern u64 efi_get_fdt_params(struct efi_memory_map_data *data);
extern struct kobject *efi_kobj;
@@ -778,12 +774,12 @@ extern unsigned long efi_mem_attr_table;
*/
typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *, bool);
-extern int efi_memattr_init(void);
+extern void efi_memattr_init(void);
extern int efi_memattr_apply_permissions(struct mm_struct *mm,
efi_memattr_perm_setter fn);
/*
- * efi_early_memdesc_ptr - get the n-th EFI memmap descriptor
+ * efi_memdesc_ptr - get the n-th EFI memmap descriptor
* @map: the start of efi memmap
* @desc_size: the size of space for each EFI memmap descriptor
* @n: the index of efi memmap descriptor
@@ -801,7 +797,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
* during bootup since for_each_efi_memory_desc_xxx() is available after the
* kernel initializes the EFI subsystem to set up struct efi_memory_map.
*/
-#define efi_early_memdesc_ptr(map, desc_size, n) \
+#define efi_memdesc_ptr(map, desc_size, n) \
(efi_memory_desc_t *)((void *)(map) + ((n) * (desc_size)))
/* Iterate through an efi_memory_map */
@@ -868,10 +864,9 @@ static inline int efi_range_is_wc(unsigned long start, unsigned long len)
#define EFI_PARAVIRT 6 /* Access is via a paravirt interface */
#define EFI_ARCH_1 7 /* First arch-specific bit */
#define EFI_DBG 8 /* Print additional debug info at runtime */
-#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */
-#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
-#define EFI_MEM_NO_SOFT_RESERVE 11 /* Is the kernel configured to ignore soft reservations? */
-#define EFI_PRESERVE_BS_REGIONS 12 /* Are EFI boot-services memory segments available? */
+#define EFI_MEM_ATTR 9 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
+#define EFI_MEM_NO_SOFT_RESERVE 10 /* Is the kernel configured to ignore soft reservations? */
+#define EFI_PRESERVE_BS_REGIONS 11 /* Are EFI boot-services memory segments available? */
#ifdef CONFIG_EFI
/*
@@ -1133,6 +1128,8 @@ static inline bool efi_runtime_disabled(void) { return true; }
extern void efi_call_virt_check_flags(unsigned long flags, const void *caller);
extern unsigned long efi_call_virt_save_flags(void);
+void efi_runtime_assert_lock_held(void);
+
enum efi_secureboot_mode {
efi_secureboot_mode_unset,
efi_secureboot_mode_unknown,
@@ -1294,8 +1291,6 @@ struct linux_efi_memreserve {
void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size);
-char *efi_systab_show_arch(char *str);
-
/*
* The LINUX_EFI_MOK_VARIABLE_TABLE_GUID config table can be provided
* to the kernel by an EFI boot loader. The table contains a packed
@@ -1345,7 +1340,7 @@ struct linux_efi_initrd {
bool xen_efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table);
-static inline
+static __always_inline
bool efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table)
{
if (!IS_ENABLED(CONFIG_XEN_EFI))
@@ -1355,6 +1350,8 @@ bool efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table)
umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n);
+int ovmf_log_probe(unsigned long ovmf_debug_log_table);
+
/*
* efivar ops event type
*/
diff --git a/include/linux/ehl_pse_io_aux.h b/include/linux/ehl_pse_io_aux.h
new file mode 100644
index 000000000000..afb8587ee5fb
--- /dev/null
+++ b/include/linux/ehl_pse_io_aux.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel Elkhart Lake PSE I/O Auxiliary Device
+ *
+ * Copyright (c) 2025 Intel Corporation.
+ *
+ * Author: Raag Jadav <raag.jadav@intel.com>
+ */
+
+#ifndef _EHL_PSE_IO_AUX_H_
+#define _EHL_PSE_IO_AUX_H_
+
+#include <linux/ioport.h>
+
+#define EHL_PSE_IO_NAME "ehl_pse_io"
+#define EHL_PSE_GPIO_NAME "gpio"
+#define EHL_PSE_TIO_NAME "pps_tio"
+
+struct ehl_pse_io_data {
+ struct resource mem;
+ int irq;
+};
+
+#endif /* _EHL_PSE_IO_AUX_H_ */
diff --git a/include/linux/eisa.h b/include/linux/eisa.h
index b012e30afebd..cf55630b595b 100644
--- a/include/linux/eisa.h
+++ b/include/linux/eisa.h
@@ -28,6 +28,9 @@
#define EISA_CONFIG_ENABLED 1
#define EISA_CONFIG_FORCED 2
+/* Chosen to hold the longest string in eisa.ids. */
+#define EISA_DEVICE_INFO_NAME_SIZE 74
+
/* There is not much we can say about an EISA device, apart from
* signature, slot number, and base address. dma_mask is set by
* default to parent device mask..*/
@@ -41,7 +44,7 @@ struct eisa_device {
u64 dma_mask;
struct device dev; /* generic device */
#ifdef CONFIG_EISA_NAMES
- char pretty_name[50];
+ char pretty_name[EISA_DEVICE_INFO_NAME_SIZE];
#endif
};
@@ -60,12 +63,12 @@ struct eisa_driver {
struct device_driver driver;
};
-#define to_eisa_driver(drv) container_of(drv,struct eisa_driver, driver)
+#define to_eisa_driver(drv) container_of_const(drv,struct eisa_driver, driver)
/* These external functions are only available when EISA support is enabled. */
#ifdef CONFIG_EISA
-extern struct bus_type eisa_bus_type;
+extern const struct bus_type eisa_bus_type;
int eisa_driver_register (struct eisa_driver *edrv);
void eisa_driver_unregister (struct eisa_driver *edrv);
diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h
index 69b136e4dd2b..bb3dcded055f 100644
--- a/include/linux/elfnote.h
+++ b/include/linux/elfnote.h
@@ -60,23 +60,21 @@
#else /* !__ASSEMBLER__ */
#include <uapi/linux/elf.h>
+#include <linux/compiler.h>
/*
* Use an anonymous structure which matches the shape of
* Elf{32,64}_Nhdr, but includes the name and desc data. The size and
* type of name and desc depend on the macro arguments. "name" must
- * be a literal string, and "desc" must be passed by value. You may
- * only define one note per line, since __LINE__ is used to generate
- * unique symbols.
+ * be a literal string, and "desc" must be passed by value.
*/
-#define _ELFNOTE_PASTE(a,b) a##b
-#define _ELFNOTE(size, name, unique, type, desc) \
+#define ELFNOTE(size, name, type, desc) \
static const struct { \
struct elf##size##_note _nhdr; \
unsigned char _name[sizeof(name)] \
__attribute__((aligned(sizeof(Elf##size##_Word)))); \
typeof(desc) _desc \
__attribute__((aligned(sizeof(Elf##size##_Word)))); \
- } _ELFNOTE_PASTE(_note_, unique) \
+ } __UNIQUE_ID(note) \
__used \
__attribute__((section(".note." name), \
aligned(sizeof(Elf##size##_Word)), \
@@ -89,11 +87,10 @@
name, \
desc \
}
-#define ELFNOTE(size, name, type, desc) \
- _ELFNOTE(size, name, __LINE__, type, desc)
#define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc)
#define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc)
+
#endif /* __ASSEMBLER__ */
#endif /* _LINUX_ELFNOTE_H */
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index 1ff52020cf75..43aa6153dc57 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -54,7 +54,11 @@ struct em_perf_table {
/**
* struct em_perf_domain - Performance domain
* @em_table: Pointer to the runtime modifiable em_perf_table
+ * @node: node in em_pd_list (in energy_model.c)
+ * @id: A unique ID number for each performance domain
* @nr_perf_states: Number of performance states
+ * @min_perf_state: Minimum allowed Performance State index
+ * @max_perf_state: Maximum allowed Performance State index
* @flags: See "em_perf_domain flags"
* @cpus: Cpumask covering the CPUs of the domain. It's here
* for performance reasons to avoid potential cache
@@ -69,7 +73,11 @@ struct em_perf_table {
*/
struct em_perf_domain {
struct em_perf_table __rcu *em_table;
+ struct list_head node;
+ int id;
int nr_perf_states;
+ int min_perf_state;
+ int max_perf_state;
unsigned long flags;
unsigned long cpus[];
};
@@ -163,23 +171,29 @@ struct em_data_callback {
struct em_perf_domain *em_cpu_get(int cpu);
struct em_perf_domain *em_pd_get(struct device *dev);
int em_dev_update_perf_domain(struct device *dev,
- struct em_perf_table __rcu *new_table);
+ struct em_perf_table *new_table);
int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
- struct em_data_callback *cb, cpumask_t *span,
- bool microwatts);
+ const struct em_data_callback *cb,
+ const cpumask_t *cpus, bool microwatts);
+int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
+ const struct em_data_callback *cb,
+ const cpumask_t *cpus, bool microwatts);
void em_dev_unregister_perf_domain(struct device *dev);
-struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd);
-void em_table_free(struct em_perf_table __rcu *table);
+struct em_perf_table *em_table_alloc(struct em_perf_domain *pd);
+void em_table_free(struct em_perf_table *table);
int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
int nr_states);
int em_dev_update_chip_binning(struct device *dev);
+int em_update_performance_limits(struct em_perf_domain *pd,
+ unsigned long freq_min_khz, unsigned long freq_max_khz);
+void em_adjust_cpu_capacity(unsigned int cpu);
+void em_rebuild_sched_domains(void);
/**
* em_pd_get_efficient_state() - Get an efficient performance state from the EM
* @table: List of performance states, in ascending order
- * @nr_perf_states: Number of performance states
+ * @pd: performance domain for which this must be done
* @max_util: Max utilization to map with the EM
- * @pd_flags: Performance Domain flags
*
* It is called from the scheduler code quite frequently and as a consequence
* doesn't implement any check.
@@ -188,13 +202,16 @@ int em_dev_update_chip_binning(struct device *dev);
* requirement.
*/
static inline int
-em_pd_get_efficient_state(struct em_perf_state *table, int nr_perf_states,
- unsigned long max_util, unsigned long pd_flags)
+em_pd_get_efficient_state(struct em_perf_state *table,
+ struct em_perf_domain *pd, unsigned long max_util)
{
+ unsigned long pd_flags = pd->flags;
+ int min_ps = pd->min_perf_state;
+ int max_ps = pd->max_perf_state;
struct em_perf_state *ps;
int i;
- for (i = 0; i < nr_perf_states; i++) {
+ for (i = min_ps; i <= max_ps; i++) {
ps = &table[i];
if (ps->performance >= max_util) {
if (pd_flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES &&
@@ -204,7 +221,7 @@ em_pd_get_efficient_state(struct em_perf_state *table, int nr_perf_states,
}
}
- return nr_perf_states - 1;
+ return max_ps;
}
/**
@@ -231,9 +248,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
struct em_perf_state *ps;
int i;
-#ifdef CONFIG_SCHED_DEBUG
WARN_ONCE(!rcu_read_lock_held(), "EM: rcu read lock needed\n");
-#endif
if (!sum_util)
return 0;
@@ -253,8 +268,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
* requested performance.
*/
em_table = rcu_dereference(pd->em_table);
- i = em_pd_get_efficient_state(em_table->state, pd->nr_perf_states,
- max_util, pd->flags);
+ i = em_pd_get_efficient_state(em_table->state, pd, max_util);
ps = &em_table->state[i];
/*
@@ -338,8 +352,15 @@ struct em_data_callback {};
static inline
int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
- struct em_data_callback *cb, cpumask_t *span,
- bool microwatts)
+ const struct em_data_callback *cb,
+ const cpumask_t *cpus, bool microwatts)
+{
+ return -EINVAL;
+}
+static inline
+int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
+ const struct em_data_callback *cb,
+ const cpumask_t *cpus, bool microwatts)
{
return -EINVAL;
}
@@ -365,14 +386,14 @@ static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
return 0;
}
static inline
-struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd)
+struct em_perf_table *em_table_alloc(struct em_perf_domain *pd)
{
return NULL;
}
-static inline void em_table_free(struct em_perf_table __rcu *table) {}
+static inline void em_table_free(struct em_perf_table *table) {}
static inline
int em_dev_update_perf_domain(struct device *dev,
- struct em_perf_table __rcu *new_table)
+ struct em_perf_table *new_table)
{
return -EINVAL;
}
@@ -391,6 +412,14 @@ static inline int em_dev_update_chip_binning(struct device *dev)
{
return -EINVAL;
}
+static inline
+int em_update_performance_limits(struct em_perf_domain *pd,
+ unsigned long freq_min_khz, unsigned long freq_max_khz)
+{
+ return -EINVAL;
+}
+static inline void em_adjust_cpu_capacity(unsigned int cpu) {}
+static inline void em_rebuild_sched_domains(void) {}
#endif
#endif
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index b0fb775a600d..87efb38b7081 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -2,26 +2,15 @@
#ifndef __LINUX_ENTRYCOMMON_H
#define __LINUX_ENTRYCOMMON_H
-#include <linux/static_call_types.h>
+#include <linux/irq-entry-common.h>
+#include <linux/livepatch.h>
#include <linux/ptrace.h>
-#include <linux/syscalls.h>
+#include <linux/resume_user_mode.h>
#include <linux/seccomp.h>
#include <linux/sched.h>
-#include <linux/context_tracking.h>
-#include <linux/livepatch.h>
-#include <linux/resume_user_mode.h>
-#include <linux/tick.h>
-#include <linux/kmsan.h>
#include <asm/entry-common.h>
-
-/*
- * Define dummy _TIF work flags if not defined by the architecture or for
- * disabled functionality.
- */
-#ifndef _TIF_PATCH_PENDING
-# define _TIF_PATCH_PENDING (0)
-#endif
+#include <asm/syscall.h>
#ifndef _TIF_UPROBE
# define _TIF_UPROBE (0)
@@ -48,6 +37,7 @@
SYSCALL_WORK_SYSCALL_AUDIT | \
SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
ARCH_SYSCALL_WORK_ENTER)
+
#define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \
SYSCALL_WORK_SYSCALL_TRACE | \
SYSCALL_WORK_SYSCALL_AUDIT | \
@@ -55,87 +45,7 @@
SYSCALL_WORK_SYSCALL_EXIT_TRAP | \
ARCH_SYSCALL_WORK_EXIT)
-/*
- * TIF flags handled in exit_to_user_mode_loop()
- */
-#ifndef ARCH_EXIT_TO_USER_MODE_WORK
-# define ARCH_EXIT_TO_USER_MODE_WORK (0)
-#endif
-
-#define EXIT_TO_USER_MODE_WORK \
- (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
- _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
- ARCH_EXIT_TO_USER_MODE_WORK)
-
-/**
- * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
- * @regs: Pointer to currents pt_regs
- *
- * Defaults to an empty implementation. Can be replaced by architecture
- * specific code.
- *
- * Invoked from syscall_enter_from_user_mode() in the non-instrumentable
- * section. Use __always_inline so the compiler cannot push it out of line
- * and make it instrumentable.
- */
-static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs);
-
-#ifndef arch_enter_from_user_mode
-static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
-#endif
-
-/**
- * enter_from_user_mode - Establish state when coming from user mode
- *
- * Syscall/interrupt entry disables interrupts, but user mode is traced as
- * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
- *
- * 1) Tell lockdep that interrupts are disabled
- * 2) Invoke context tracking if enabled to reactivate RCU
- * 3) Trace interrupts off state
- *
- * Invoked from architecture specific syscall entry code with interrupts
- * disabled. The calling code has to be non-instrumentable. When the
- * function returns all state is correct and interrupts are still
- * disabled. The subsequent functions can be instrumented.
- *
- * This is invoked when there is architecture specific functionality to be
- * done between establishing state and enabling interrupts. The caller must
- * enable interrupts before invoking syscall_enter_from_user_mode_work().
- */
-static __always_inline void enter_from_user_mode(struct pt_regs *regs)
-{
- arch_enter_from_user_mode(regs);
- lockdep_hardirqs_off(CALLER_ADDR0);
-
- CT_WARN_ON(__ct_state() != CONTEXT_USER);
- user_exit_irqoff();
-
- instrumentation_begin();
- kmsan_unpoison_entry_regs(regs);
- trace_hardirqs_off_finish();
- instrumentation_end();
-}
-
-/**
- * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts
- * @regs: Pointer to currents pt_regs
- *
- * Invoked from architecture specific syscall entry code with interrupts
- * disabled. The calling code has to be non-instrumentable. When the
- * function returns all state is correct, interrupts are enabled and the
- * subsequent functions can be instrumented.
- *
- * This handles lockdep, RCU (context tracking) and tracing state, i.e.
- * the functionality provided by enter_from_user_mode().
- *
- * This is invoked when there is extra architecture specific functionality
- * to be done between establishing state and handling user mode entry work.
- */
-void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
-
-long syscall_trace_enter(struct pt_regs *regs, long syscall,
- unsigned long work);
+long syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long work);
/**
* syscall_enter_from_user_mode_work - Check and handle work before invoking
@@ -144,8 +54,8 @@ long syscall_trace_enter(struct pt_regs *regs, long syscall,
* @syscall: The syscall number
*
* Invoked from architecture specific syscall entry code with interrupts
- * enabled after invoking syscall_enter_from_user_mode_prepare() and extra
- * architecture specific work.
+ * enabled after invoking enter_from_user_mode(), enabling interrupts and
+ * extra architecture specific work.
*
* Returns: The original or a modified syscall number
*
@@ -181,8 +91,9 @@ static __always_inline long syscall_enter_from_user_mode_work(struct pt_regs *re
* function returns all state is correct, interrupts are enabled and the
* subsequent functions can be instrumented.
*
- * This is combination of syscall_enter_from_user_mode_prepare() and
- * syscall_enter_from_user_mode_work().
+ * This is the combination of enter_from_user_mode() and
+ * syscall_enter_from_user_mode_work() to be used when there is no
+ * architecture specific work to be done between the two.
*
* Returns: The original or a modified syscall number. See
* syscall_enter_from_user_mode_work() for further explanation.
@@ -202,168 +113,13 @@ static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, l
}
/**
- * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable()
- * @ti_work: Cached TIF flags gathered with interrupts disabled
- *
- * Defaults to local_irq_enable(). Can be supplied by architecture specific
- * code.
- */
-static inline void local_irq_enable_exit_to_user(unsigned long ti_work);
-
-#ifndef local_irq_enable_exit_to_user
-static inline void local_irq_enable_exit_to_user(unsigned long ti_work)
-{
- local_irq_enable();
-}
-#endif
-
-/**
- * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable()
- *
- * Defaults to local_irq_disable(). Can be supplied by architecture specific
- * code.
- */
-static inline void local_irq_disable_exit_to_user(void);
-
-#ifndef local_irq_disable_exit_to_user
-static inline void local_irq_disable_exit_to_user(void)
-{
- local_irq_disable();
-}
-#endif
-
-/**
- * arch_exit_to_user_mode_work - Architecture specific TIF work for exit
- * to user mode.
- * @regs: Pointer to currents pt_regs
- * @ti_work: Cached TIF flags gathered with interrupts disabled
- *
- * Invoked from exit_to_user_mode_loop() with interrupt enabled
- *
- * Defaults to NOOP. Can be supplied by architecture specific code.
- */
-static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
- unsigned long ti_work);
-
-#ifndef arch_exit_to_user_mode_work
-static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
- unsigned long ti_work)
-{
-}
-#endif
-
-/**
- * arch_exit_to_user_mode_prepare - Architecture specific preparation for
- * exit to user mode.
- * @regs: Pointer to currents pt_regs
- * @ti_work: Cached TIF flags gathered with interrupts disabled
- *
- * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last
- * function before return. Defaults to NOOP.
- */
-static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
- unsigned long ti_work);
-
-#ifndef arch_exit_to_user_mode_prepare
-static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
- unsigned long ti_work)
-{
-}
-#endif
-
-/**
- * arch_exit_to_user_mode - Architecture specific final work before
- * exit to user mode.
- *
- * Invoked from exit_to_user_mode() with interrupt disabled as the last
- * function before return. Defaults to NOOP.
- *
- * This needs to be __always_inline because it is non-instrumentable code
- * invoked after context tracking switched to user mode.
+ * syscall_exit_work - Handle work before returning to user mode
+ * @regs: Pointer to current pt_regs
+ * @work: Current thread syscall work
*
- * An architecture implementation must not do anything complex, no locking
- * etc. The main purpose is for speculation mitigations.
+ * Do one-time syscall specific work.
*/
-static __always_inline void arch_exit_to_user_mode(void);
-
-#ifndef arch_exit_to_user_mode
-static __always_inline void arch_exit_to_user_mode(void) { }
-#endif
-
-/**
- * arch_do_signal_or_restart - Architecture specific signal delivery function
- * @regs: Pointer to currents pt_regs
- *
- * Invoked from exit_to_user_mode_loop().
- */
-void arch_do_signal_or_restart(struct pt_regs *regs);
-
-/**
- * exit_to_user_mode_loop - do any pending work before leaving to user space
- */
-unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
- unsigned long ti_work);
-
-/**
- * exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
- * @regs: Pointer to pt_regs on entry stack
- *
- * 1) check that interrupts are disabled
- * 2) call tick_nohz_user_enter_prepare()
- * 3) call exit_to_user_mode_loop() if any flags from
- * EXIT_TO_USER_MODE_WORK are set
- * 4) check that interrupts are still disabled
- */
-static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
-{
- unsigned long ti_work;
-
- lockdep_assert_irqs_disabled();
-
- /* Flush pending rcuog wakeup before the last need_resched() check */
- tick_nohz_user_enter_prepare();
-
- ti_work = read_thread_flags();
- if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
- ti_work = exit_to_user_mode_loop(regs, ti_work);
-
- arch_exit_to_user_mode_prepare(regs, ti_work);
-
- /* Ensure that kernel state is sane for a return to userspace */
- kmap_assert_nomap();
- lockdep_assert_irqs_disabled();
- lockdep_sys_exit();
-}
-
-/**
- * exit_to_user_mode - Fixup state when exiting to user mode
- *
- * Syscall/interrupt exit enables interrupts, but the kernel state is
- * interrupts disabled when this is invoked. Also tell RCU about it.
- *
- * 1) Trace interrupts on state
- * 2) Invoke context tracking if enabled to adjust RCU state
- * 3) Invoke architecture specific last minute exit code, e.g. speculation
- * mitigations, etc.: arch_exit_to_user_mode()
- * 4) Tell lockdep that interrupts are enabled
- *
- * Invoked from architecture specific code when syscall_exit_to_user_mode()
- * is not suitable as the last step before returning to userspace. Must be
- * invoked with interrupts disabled and the caller must be
- * non-instrumentable.
- * The caller has to invoke syscall_exit_to_user_mode_work() before this.
- */
-static __always_inline void exit_to_user_mode(void)
-{
- instrumentation_begin();
- trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare();
- instrumentation_end();
-
- user_enter_irqoff();
- arch_exit_to_user_mode();
- lockdep_hardirqs_on(CALLER_ADDR0);
-}
+void syscall_exit_work(struct pt_regs *regs, unsigned long work);
/**
* syscall_exit_to_user_mode_work - Handle work before returning to user mode
@@ -378,7 +134,30 @@ static __always_inline void exit_to_user_mode(void)
* make the final state transitions. Interrupts must stay disabled between
* return from this function and the invocation of exit_to_user_mode().
*/
-void syscall_exit_to_user_mode_work(struct pt_regs *regs);
+static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
+{
+ unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
+ unsigned long nr = syscall_get_nr(current, regs);
+
+ CT_WARN_ON(ct_state() != CT_STATE_KERNEL);
+
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
+ if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
+ local_irq_enable();
+ }
+
+ rseq_debug_syscall_return(regs);
+
+ /*
+ * Do one-time syscall specific work. If these work items are
+ * enabled, we want to run them exactly once per syscall exit with
+ * interrupts enabled.
+ */
+ if (unlikely(work & SYSCALL_WORK_EXIT))
+ syscall_exit_work(regs, work);
+ local_irq_disable_exit_to_user();
+ syscall_exit_to_user_mode_prepare(regs);
+}
/**
* syscall_exit_to_user_mode - Handle work before returning to user mode
@@ -409,147 +188,12 @@ void syscall_exit_to_user_mode_work(struct pt_regs *regs);
* exit_to_user_mode(). This function is preferred unless there is a
* compelling architectural reason to use the separate functions.
*/
-void syscall_exit_to_user_mode(struct pt_regs *regs);
-
-/**
- * irqentry_enter_from_user_mode - Establish state before invoking the irq handler
- * @regs: Pointer to currents pt_regs
- *
- * Invoked from architecture specific entry code with interrupts disabled.
- * Can only be called when the interrupt entry came from user mode. The
- * calling code must be non-instrumentable. When the function returns all
- * state is correct and the subsequent functions can be instrumented.
- *
- * The function establishes state (lockdep, RCU (context tracking), tracing)
- */
-void irqentry_enter_from_user_mode(struct pt_regs *regs);
-
-/**
- * irqentry_exit_to_user_mode - Interrupt exit work
- * @regs: Pointer to current's pt_regs
- *
- * Invoked with interrupts disabled and fully valid regs. Returns with all
- * work handled, interrupts disabled such that the caller can immediately
- * switch to user mode. Called from architecture specific interrupt
- * handling code.
- *
- * The call order is #2 and #3 as described in syscall_exit_to_user_mode().
- * Interrupt exit is not invoking #1 which is the syscall specific one time
- * work.
- */
-void irqentry_exit_to_user_mode(struct pt_regs *regs);
-
-#ifndef irqentry_state
-/**
- * struct irqentry_state - Opaque object for exception state storage
- * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the
- * exit path has to invoke ct_irq_exit().
- * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that
- * lockdep state is restored correctly on exit from nmi.
- *
- * This opaque object is filled in by the irqentry_*_enter() functions and
- * must be passed back into the corresponding irqentry_*_exit() functions
- * when the exception is complete.
- *
- * Callers of irqentry_*_[enter|exit]() must consider this structure opaque
- * and all members private. Descriptions of the members are provided to aid in
- * the maintenance of the irqentry_*() functions.
- */
-typedef struct irqentry_state {
- union {
- bool exit_rcu;
- bool lockdep;
- };
-} irqentry_state_t;
-#endif
-
-/**
- * irqentry_enter - Handle state tracking on ordinary interrupt entries
- * @regs: Pointer to pt_regs of interrupted context
- *
- * Invokes:
- * - lockdep irqflag state tracking as low level ASM entry disabled
- * interrupts.
- *
- * - Context tracking if the exception hit user mode.
- *
- * - The hardirq tracer to keep the state consistent as low level ASM
- * entry disabled interrupts.
- *
- * As a precondition, this requires that the entry came from user mode,
- * idle, or a kernel context in which RCU is watching.
- *
- * For kernel mode entries RCU handling is done conditional. If RCU is
- * watching then the only RCU requirement is to check whether the tick has
- * to be restarted. If RCU is not watching then ct_irq_enter() has to be
- * invoked on entry and ct_irq_exit() on exit.
- *
- * Avoiding the ct_irq_enter/exit() calls is an optimization but also
- * solves the problem of kernel mode pagefaults which can schedule, which
- * is not possible after invoking ct_irq_enter() without undoing it.
- *
- * For user mode entries irqentry_enter_from_user_mode() is invoked to
- * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
- * would not be possible.
- *
- * Returns: An opaque object that must be passed to idtentry_exit()
- */
-irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
-
-/**
- * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt
- *
- * Conditional reschedule with additional sanity checks.
- */
-void raw_irqentry_exit_cond_resched(void);
-#ifdef CONFIG_PREEMPT_DYNAMIC
-#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
-#define irqentry_exit_cond_resched_dynamic_disabled NULL
-DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
-#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
-#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
-DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
-void dynamic_irqentry_exit_cond_resched(void);
-#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
-#endif
-#else /* CONFIG_PREEMPT_DYNAMIC */
-#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
-#endif /* CONFIG_PREEMPT_DYNAMIC */
-
-/**
- * irqentry_exit - Handle return from exception that used irqentry_enter()
- * @regs: Pointer to pt_regs (exception entry regs)
- * @state: Return value from matching call to irqentry_enter()
- *
- * Depending on the return target (kernel/user) this runs the necessary
- * preemption and work checks if possible and required and returns to
- * the caller with interrupts disabled and no further work pending.
- *
- * This is the last action before returning to the low level ASM code which
- * just needs to return to the appropriate context.
- *
- * Counterpart to irqentry_enter().
- */
-void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state);
-
-/**
- * irqentry_nmi_enter - Handle NMI entry
- * @regs: Pointer to currents pt_regs
- *
- * Similar to irqentry_enter() but taking care of the NMI constraints.
- */
-irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs);
-
-/**
- * irqentry_nmi_exit - Handle return from NMI handling
- * @regs: Pointer to pt_regs (NMI entry regs)
- * @irq_state: Return value from matching call to irqentry_nmi_enter()
- *
- * Last action before returning to the low level assembly code.
- *
- * Counterpart to irqentry_nmi_enter().
- */
-void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state);
+static __always_inline void syscall_exit_to_user_mode(struct pt_regs *regs)
+{
+ instrumentation_begin();
+ syscall_exit_to_user_mode_work(regs);
+ instrumentation_end();
+ exit_to_user_mode();
+}
#endif
diff --git a/include/linux/entry-kvm.h b/include/linux/entry-virt.h
index 6813171afccb..bfa767702d9a 100644
--- a/include/linux/entry-kvm.h
+++ b/include/linux/entry-virt.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_ENTRYKVM_H
-#define __LINUX_ENTRYKVM_H
+#ifndef __LINUX_ENTRYVIRT_H
+#define __LINUX_ENTRYVIRT_H
#include <linux/static_call_types.h>
#include <linux/resume_user_mode.h>
@@ -10,17 +10,16 @@
#include <linux/tick.h>
/* Transfer to guest mode work */
-#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
+#ifdef CONFIG_VIRT_XFER_TO_GUEST_WORK
#ifndef ARCH_XFER_TO_GUEST_MODE_WORK
# define ARCH_XFER_TO_GUEST_MODE_WORK (0)
#endif
#define XFER_TO_GUEST_MODE_WORK \
- (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \
- _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
-
-struct kvm_vcpu;
+ (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_SIGNAL | _TIF_NOTIFY_RESUME | \
+ ARCH_XFER_TO_GUEST_MODE_WORK)
/**
* arch_xfer_to_guest_mode_handle_work - Architecture specific xfer to guest
@@ -31,12 +30,10 @@ struct kvm_vcpu;
* Invoked from xfer_to_guest_mode_handle_work(). Defaults to NOOP. Can be
* replaced by architecture specific code.
*/
-static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
- unsigned long ti_work);
+static inline int arch_xfer_to_guest_mode_handle_work(unsigned long ti_work);
-#ifndef arch_xfer_to_guest_mode_work
-static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
- unsigned long ti_work)
+#ifndef arch_xfer_to_guest_mode_handle_work
+static inline int arch_xfer_to_guest_mode_handle_work(unsigned long ti_work)
{
return 0;
}
@@ -45,11 +42,10 @@ static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
/**
* xfer_to_guest_mode_handle_work - Check and handle pending work which needs
* to be handled before going to guest mode
- * @vcpu: Pointer to current's VCPU data
*
* Returns: 0 or an error code
*/
-int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
+int xfer_to_guest_mode_handle_work(void);
/**
* xfer_to_guest_mode_prepare - Perform last minute preparation work that
@@ -94,6 +90,6 @@ static inline bool xfer_to_guest_mode_work_pending(void)
lockdep_assert_irqs_disabled();
return __xfer_to_guest_mode_work_pending();
}
-#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
+#endif /* CONFIG_VIRT_XFER_TO_GUEST_WORK */
#endif
diff --git a/include/linux/err.h b/include/linux/err.h
index b5d9bb2a2349..8c37be0620ab 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -42,6 +42,20 @@ static inline void * __must_check ERR_PTR(long error)
}
/**
+ * INIT_ERR_PTR - Init a const error pointer.
+ * @error: A negative error code.
+ *
+ * Like ERR_PTR(), but usable to initialize static variables.
+ */
+#define INIT_ERR_PTR(error) ((void *)(error))
+
+/* Return the pointer in the percpu address space. */
+#define ERR_PTR_PCPU(error) ((void __percpu *)(unsigned long)ERR_PTR(error))
+
+/* Cast an error pointer to __iomem. */
+#define IOMEM_ERR_PTR(error) (__force void __iomem *)ERR_PTR(error)
+
+/**
* PTR_ERR - Extract the error code from an error pointer.
* @ptr: An error pointer.
* Return: The error code within @ptr.
@@ -51,6 +65,9 @@ static inline long __must_check PTR_ERR(__force const void *ptr)
return (long) ptr;
}
+/* Read an error pointer from the percpu address space. */
+#define PTR_ERR_PCPU(ptr) (PTR_ERR((const void *)(__force const unsigned long)(ptr)))
+
/**
* IS_ERR - Detect an error pointer.
* @ptr: The pointer to check.
@@ -61,6 +78,9 @@ static inline bool __must_check IS_ERR(__force const void *ptr)
return IS_ERR_VALUE((unsigned long)ptr);
}
+/* Read an error pointer from the percpu address space. */
+#define IS_ERR_PCPU(ptr) (IS_ERR((const void *)(__force const unsigned long)(ptr)))
+
/**
* IS_ERR_OR_NULL - Detect an error pointer or a null pointer.
* @ptr: The pointer to check.
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 2ad1ffa4ccb9..9a1eacf35d37 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -21,7 +21,7 @@
#include <linux/netdevice.h>
#include <linux/random.h>
#include <linux/crc32.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <asm/bitsperlong.h>
#ifdef __KERNEL__
@@ -81,7 +81,7 @@ static const u8 eth_ipv6_mcast_addr_base[ETH_ALEN] __aligned(2) =
* is_link_local_ether_addr - Determine if given Ethernet address is link-local
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if address is link local reserved addr (01:80:c2:00:00:0X) per
+ * Return: true if address is link local reserved addr (01:80:c2:00:00:0X) per
* IEEE 802.1Q 8.6.3 Frame filtering.
*
* Please note: addr must be aligned to u16.
@@ -104,7 +104,7 @@ static inline bool is_link_local_ether_addr(const u8 *addr)
* is_zero_ether_addr - Determine if give Ethernet address is all zeros.
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is all zeroes.
+ * Return: true if the address is all zeroes.
*
* Please note: addr must be aligned to u16.
*/
@@ -123,7 +123,7 @@ static inline bool is_zero_ether_addr(const u8 *addr)
* is_multicast_ether_addr - Determine if the Ethernet address is a multicast.
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is a multicast address.
+ * Return: true if the address is a multicast address.
* By definition the broadcast address is also a multicast address.
*/
static inline bool is_multicast_ether_addr(const u8 *addr)
@@ -157,7 +157,7 @@ static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
* is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802).
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is a local address.
+ * Return: true if the address is a local address.
*/
static inline bool is_local_ether_addr(const u8 *addr)
{
@@ -168,7 +168,7 @@ static inline bool is_local_ether_addr(const u8 *addr)
* is_broadcast_ether_addr - Determine if the Ethernet address is broadcast
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is the broadcast address.
+ * Return: true if the address is the broadcast address.
*
* Please note: addr must be aligned to u16.
*/
@@ -183,7 +183,7 @@ static inline bool is_broadcast_ether_addr(const u8 *addr)
* is_unicast_ether_addr - Determine if the Ethernet address is unicast
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is a unicast address.
+ * Return: true if the address is a unicast address.
*/
static inline bool is_unicast_ether_addr(const u8 *addr)
{
@@ -197,7 +197,7 @@ static inline bool is_unicast_ether_addr(const u8 *addr)
* Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not
* a multicast address, and is not FF:FF:FF:FF:FF:FF.
*
- * Return true if the address is valid.
+ * Return: true if the address is valid.
*
* Please note: addr must be aligned to u16.
*/
@@ -214,7 +214,7 @@ static inline bool is_valid_ether_addr(const u8 *addr)
*
* Check that the value from the Ethertype/length field is a valid Ethertype.
*
- * Return true if the valid is an 802.3 supported Ethertype.
+ * Return: true if the valid is an 802.3 supported Ethertype.
*/
static inline bool eth_proto_is_802_3(__be16 proto)
{
@@ -458,7 +458,7 @@ static inline bool ether_addr_is_ip_mcast(const u8 *addr)
* ether_addr_to_u64 - Convert an Ethernet address into a u64 value.
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return a u64 value of the address
+ * Return: a u64 value of the address
*/
static inline u64 ether_addr_to_u64(const u8 *addr)
{
@@ -636,8 +636,16 @@ static inline void eth_skb_pkt_type(struct sk_buff *skb,
}
}
+static inline struct ethhdr *eth_skb_pull_mac(struct sk_buff *skb)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+
+ skb_pull_inline(skb, ETH_HLEN);
+ return eth;
+}
+
/**
- * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
+ * eth_skb_pad - Pad buffer to minimum number of octets for Ethernet frame
* @skb: Buffer to pad
*
* An Ethernet frame should have a minimum size of 60 bytes. This function
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 6fd9107d3cc0..5c9162193d26 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -17,7 +17,13 @@
#include <linux/compat.h>
#include <linux/if_ether.h>
#include <linux/netlink.h>
+#include <linux/timer_types.h>
#include <uapi/linux/ethtool.h>
+#include <uapi/linux/ethtool_netlink_generated.h>
+#include <uapi/linux/net_tstamp.h>
+
+#define ETHTOOL_MM_MAX_VERIFY_TIME_MS 128
+#define ETHTOOL_MM_MAX_VERIFY_RETRIES 3
struct compat_ethtool_rx_flow_spec {
u32 flow_type;
@@ -77,6 +83,9 @@ enum {
* @cqe_size: Size of TX/RX completion queue event
* @tx_push_buf_len: Size of TX push buffer
* @tx_push_buf_max_len: Maximum allowed size of TX push buffer
+ * @hds_thresh: Packet size threshold for header data split (HDS)
+ * @hds_thresh_max: Maximum supported setting for @hds_threshold
+ *
*/
struct kernel_ethtool_ringparam {
u32 rx_buf_len;
@@ -86,6 +95,8 @@ struct kernel_ethtool_ringparam {
u32 cqe_size;
u32 tx_push_buf_len;
u32 tx_push_buf_max_len;
+ u32 hds_thresh;
+ u32 hds_thresh_max;
};
/**
@@ -96,6 +107,7 @@ struct kernel_ethtool_ringparam {
* @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push
* @ETHTOOL_RING_USE_TX_PUSH_BUF_LEN: capture for setting tx_push_buf_len
* @ETHTOOL_RING_USE_TCP_DATA_SPLIT: capture for setting tcp_data_split
+ * @ETHTOOL_RING_USE_HDS_THRS: capture for setting header-data-split-thresh
*/
enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
@@ -104,6 +116,7 @@ enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_RX_PUSH = BIT(3),
ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = BIT(4),
ETHTOOL_RING_USE_TCP_DATA_SPLIT = BIT(5),
+ ETHTOOL_RING_USE_HDS_THRS = BIT(6),
};
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
@@ -159,6 +172,57 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
return index % n_rx_rings;
}
+/**
+ * struct ethtool_rxfh_context - a custom RSS context configuration
+ * @indir_size: Number of u32 entries in indirection table
+ * @key_size: Size of hash key, in bytes
+ * @priv_size: Size of driver private data, in bytes
+ * @hfunc: RSS hash function identifier. One of the %ETH_RSS_HASH_*
+ * @input_xfrm: Defines how the input data is transformed. Valid values are one
+ * of %RXH_XFRM_*.
+ * @indir_configured: indir has been specified (at create time or subsequently)
+ * @key_configured: hkey has been specified (at create time or subsequently)
+ */
+struct ethtool_rxfh_context {
+ u32 indir_size;
+ u32 key_size;
+ u16 priv_size;
+ u8 hfunc;
+ u8 input_xfrm;
+ u8 indir_configured:1;
+ u8 key_configured:1;
+ /* private: driver private data, indirection table, and hash key are
+ * stored sequentially in @data area. Use below helpers to access.
+ */
+ u32 key_off;
+ u8 data[] __aligned(sizeof(void *));
+};
+
+static inline void *ethtool_rxfh_context_priv(struct ethtool_rxfh_context *ctx)
+{
+ return ctx->data;
+}
+
+static inline u32 *ethtool_rxfh_context_indir(struct ethtool_rxfh_context *ctx)
+{
+ return (u32 *)(ctx->data + ALIGN(ctx->priv_size, sizeof(u32)));
+}
+
+static inline u8 *ethtool_rxfh_context_key(struct ethtool_rxfh_context *ctx)
+{
+ return &ctx->data[ctx->key_off];
+}
+
+void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id);
+
+struct link_mode_info {
+ int speed;
+ u8 lanes;
+ u8 duplex;
+};
+
+extern const struct link_mode_info link_mode_params[];
+
/* declare a link mode bitmap */
#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \
DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS)
@@ -213,7 +277,7 @@ struct ethtool_link_ksettings {
* @mode : one of the ETHTOOL_LINK_MODE_*_BIT
* (not atomic, no bound checking)
*
- * Returns true/false.
+ * Returns: true/false.
*/
#define ethtool_link_ksettings_test_link_mode(ptr, name, mode) \
test_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
@@ -284,7 +348,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
#define ETHTOOL_COALESCE_TX_AGGR_MAX_BYTES BIT(24)
#define ETHTOOL_COALESCE_TX_AGGR_MAX_FRAMES BIT(25)
#define ETHTOOL_COALESCE_TX_AGGR_TIME_USECS BIT(26)
-#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(26, 0)
+#define ETHTOOL_COALESCE_RX_PROFILE BIT(27)
+#define ETHTOOL_COALESCE_TX_PROFILE BIT(28)
+#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(28, 0)
#define ETHTOOL_COALESCE_USECS \
(ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS)
@@ -366,6 +432,29 @@ struct ethtool_eth_phy_stats {
);
};
+/**
+ * struct ethtool_phy_stats - PHY-level statistics counters
+ * @rx_packets: Total successfully received frames
+ * @rx_bytes: Total successfully received bytes
+ * @rx_errors: Total received frames with errors (e.g., CRC errors)
+ * @tx_packets: Total successfully transmitted frames
+ * @tx_bytes: Total successfully transmitted bytes
+ * @tx_errors: Total transmitted frames with errors
+ *
+ * This structure provides a standardized interface for reporting
+ * PHY-level statistics counters. It is designed to expose statistics
+ * commonly provided by PHYs but not explicitly defined in the IEEE
+ * 802.3 standard.
+ */
+struct ethtool_phy_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_errors;
+};
+
/* Basic IEEE 802.3 MAC Ctrl statistics (30.3.3.*), not otherwise exposed
* via a more targeted API.
*/
@@ -403,7 +492,29 @@ struct ethtool_pause_stats {
};
#define ETHTOOL_MAX_LANES 8
+/*
+ * IEEE 802.3ck/df defines 16 bins for FEC histogram plus one more for
+ * the end-of-list marker, total 17 items
+ */
+#define ETHTOOL_FEC_HIST_MAX 17
+/**
+ * struct ethtool_fec_hist_range - error bits range for FEC histogram
+ * statistics
+ * @low: low bound of the bin (inclusive)
+ * @high: high bound of the bin (inclusive)
+ */
+struct ethtool_fec_hist_range {
+ u16 low;
+ u16 high;
+};
+struct ethtool_fec_hist {
+ struct ethtool_fec_hist_value {
+ u64 sum;
+ u64 per_lane[ETHTOOL_MAX_LANES];
+ } values[ETHTOOL_FEC_HIST_MAX];
+ const struct ethtool_fec_hist_range *ranges;
+};
/**
* struct ethtool_fec_stats - statistics for IEEE 802.3 FEC
* @corrected_blocks: number of received blocks corrected by FEC
@@ -447,7 +558,7 @@ struct ethtool_rmon_hist_range {
u16 high;
};
-#define ETHTOOL_RMON_HIST_MAX 10
+#define ETHTOOL_RMON_HIST_MAX 11
/**
* struct ethtool_rmon_stats - selected RMON (RFC 2819) statistics
@@ -483,6 +594,12 @@ struct ethtool_rmon_stats {
/**
* struct ethtool_ts_stats - HW timestamping statistics
* @pkts: Number of packets successfully timestamped by the hardware.
+ * @onestep_pkts_unconfirmed: Number of PTP packets with one-step TX
+ * timestamping that were sent, but for which the
+ * device offers no confirmation whether they made
+ * it onto the wire and the timestamp was inserted
+ * in the originTimestamp or correctionField, or
+ * not.
* @lost: Number of hardware timestamping requests where the timestamping
* information from the hardware never arrived for submission with
* the skb.
@@ -495,6 +612,7 @@ struct ethtool_rmon_stats {
struct ethtool_ts_stats {
struct_group(tx_stats,
u64 pkts;
+ u64 onestep_pkts_unconfirmed;
u64 lost;
u64 err;
);
@@ -504,17 +622,16 @@ struct ethtool_ts_stats {
#define ETH_MODULE_MAX_I2C_ADDRESS 0x7f
/**
- * struct ethtool_module_eeprom - EEPROM dump from specified page
- * @offset: Offset within the specified EEPROM page to begin read, in bytes.
- * @length: Number of bytes to read.
- * @page: Page number to read from.
- * @bank: Page bank number to read from, if applicable by EEPROM spec.
+ * struct ethtool_module_eeprom - plug-in module EEPROM read / write parameters
+ * @offset: When @offset is 0-127, it is used as an address to the Lower Memory
+ * (@page must be 0). Otherwise, it is used as an address to the
+ * Upper Memory.
+ * @length: Number of bytes to read / write.
+ * @page: Page number.
+ * @bank: Bank number, if supported by EEPROM spec.
* @i2c_address: I2C address of a page. Value less than 0x7f expected. Most
* EEPROMs use 0x50 or 0x51.
* @data: Pointer to buffer with EEPROM data of @length size.
- *
- * This can be used to manage pages during EEPROM dump in ethtool and pass
- * required information to the driver.
*/
struct ethtool_module_eeprom {
u32 offset;
@@ -628,6 +745,75 @@ struct ethtool_mm_stats {
u64 MACMergeHoldCount;
};
+enum ethtool_mmsv_event {
+ ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET,
+ ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET,
+ ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET,
+};
+
+/* MAC Merge verification mPacket type */
+enum ethtool_mpacket {
+ ETHTOOL_MPACKET_VERIFY,
+ ETHTOOL_MPACKET_RESPONSE,
+};
+
+struct ethtool_mmsv;
+
+/**
+ * struct ethtool_mmsv_ops - Operations for MAC Merge Software Verification
+ * @configure_tx: Driver callback for the event where the preemptible TX
+ * becomes active or inactive. Preemptible traffic
+ * classes must be committed to hardware only while
+ * preemptible TX is active.
+ * @configure_pmac: Driver callback for the event where the pMAC state
+ * changes as result of an administrative setting
+ * (ethtool) or a call to ethtool_mmsv_link_state_handle().
+ * @send_mpacket: Driver-provided method for sending a Verify or a Response
+ * mPacket.
+ */
+struct ethtool_mmsv_ops {
+ void (*configure_tx)(struct ethtool_mmsv *mmsv, bool tx_active);
+ void (*configure_pmac)(struct ethtool_mmsv *mmsv, bool pmac_enabled);
+ void (*send_mpacket)(struct ethtool_mmsv *mmsv, enum ethtool_mpacket mpacket);
+};
+
+/**
+ * struct ethtool_mmsv - MAC Merge Software Verification
+ * @ops: operations for MAC Merge Software Verification
+ * @dev: pointer to net_device structure
+ * @lock: serialize access to MAC Merge state between
+ * ethtool requests and link state updates.
+ * @status: current verification FSM state
+ * @verify_timer: timer for verification in local TX direction
+ * @verify_enabled: indicates if verification is enabled
+ * @verify_retries: number of retries for verification
+ * @pmac_enabled: indicates if the preemptible MAC is enabled
+ * @verify_time: time for verification in milliseconds
+ * @tx_enabled: indicates if transmission is enabled
+ */
+struct ethtool_mmsv {
+ const struct ethtool_mmsv_ops *ops;
+ struct net_device *dev;
+ spinlock_t lock;
+ enum ethtool_mm_verify_status status;
+ struct timer_list verify_timer;
+ bool verify_enabled;
+ int verify_retries;
+ bool pmac_enabled;
+ u32 verify_time;
+ bool tx_enabled;
+};
+
+void ethtool_mmsv_stop(struct ethtool_mmsv *mmsv);
+void ethtool_mmsv_link_state_handle(struct ethtool_mmsv *mmsv, bool up);
+void ethtool_mmsv_event_handle(struct ethtool_mmsv *mmsv,
+ enum ethtool_mmsv_event event);
+void ethtool_mmsv_get_mm(struct ethtool_mmsv *mmsv,
+ struct ethtool_mm_state *state);
+void ethtool_mmsv_set_mm(struct ethtool_mmsv *mmsv, struct ethtool_mm_cfg *cfg);
+void ethtool_mmsv_init(struct ethtool_mmsv *mmsv, struct net_device *dev,
+ const struct ethtool_mmsv_ops *ops);
+
/**
* struct ethtool_rxfh_param - RXFH (RSS) parameters
* @hfunc: Defines the current RSS hash function used by HW (or to be set to).
@@ -662,15 +848,66 @@ struct ethtool_rxfh_param {
};
/**
+ * struct ethtool_rxfh_fields - Rx Flow Hashing (RXFH) header field config
+ * @data: which header fields are used for hashing, bitmask of RXH_* defines
+ * @flow_type: L2-L4 network traffic flow type
+ * @rss_context: RSS context, will only be used if rxfh_per_ctx_fields is
+ * set in struct ethtool_ops
+ */
+struct ethtool_rxfh_fields {
+ u32 data;
+ u32 flow_type;
+ u32 rss_context;
+};
+
+/**
+ * struct kernel_ethtool_ts_info - kernel copy of struct ethtool_ts_info
+ * @cmd: command number = %ETHTOOL_GET_TS_INFO
+ * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
+ * @phc_index: device index of the associated PHC, or -1 if there is none
+ * @phc_qualifier: qualifier of the associated PHC
+ * @phc_source: source device of the associated PHC
+ * @phc_phyindex: index of PHY device source of the associated PHC
+ * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
+ * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
+ */
+struct kernel_ethtool_ts_info {
+ u32 cmd;
+ u32 so_timestamping;
+ int phc_index;
+ enum hwtstamp_provider_qualifier phc_qualifier;
+ enum hwtstamp_source phc_source;
+ int phc_phyindex;
+ u32 tx_types;
+ u32 rx_filters;
+};
+
+/**
* struct ethtool_ops - optional netdev operations
+ * @supported_input_xfrm: supported types of input xfrm from %RXH_XFRM_*.
* @cap_link_lanes_supported: indicates if the driver supports lanes
* parameter.
- * @cap_rss_ctx_supported: indicates if the driver supports RSS
- * contexts.
- * @cap_rss_sym_xor_supported: indicates if the driver supports symmetric-xor
- * RSS.
+ * @rxfh_per_ctx_fields: device supports selecting different header fields
+ * for Rx hash calculation and RSS for each additional context.
+ * @rxfh_per_ctx_key: device supports setting different RSS key for each
+ * additional context. Netlink API should report hfunc, key, and input_xfrm
+ * for every context, not just context 0.
+ * @cap_rss_rxnfc_adds: device supports nonzero ring_cookie in filters with
+ * %FLOW_RSS flag; the queue ID from the filter is added to the value from
+ * the indirection table to determine the delivery queue.
+ * @rxfh_indir_space: max size of RSS indirection tables, if indirection table
+ * size as returned by @get_rxfh_indir_size may change during lifetime
+ * of the device. Leave as 0 if the table size is constant.
+ * @rxfh_key_space: same as @rxfh_indir_space, but for the key.
+ * @rxfh_priv_size: size of the driver private data area the core should
+ * allocate for an RSS context (in &struct ethtool_rxfh_context).
+ * @rxfh_max_num_contexts: maximum (exclusive) supported RSS context ID.
+ * If this is zero then the core may choose any (nonzero) ID, otherwise
+ * the core will only use IDs strictly less than this value, as the
+ * @rss_context argument to @create_rxfh_context and friends.
* @supported_coalesce_params: supported types of interrupt coalescing.
* @supported_ring_params: supported ring params.
+ * @supported_hwtstamp_qualifiers: bitfield of supported hwtstamp qualifier.
* @get_drvinfo: Report driver/device information. Modern drivers no
* longer have to implement this callback. Most fields are
* correctly filled in by the core using system information, or
@@ -753,6 +990,7 @@ struct ethtool_rxfh_param {
* @reset: Reset (part of) the device, as specified by a bitmask of
* flags from &enum ethtool_reset_flags. Returns a negative
* error code or zero.
+ * @get_rx_ring_count: Return the number of RX rings
* @get_rxfh_key_size: Get the size of the RX flow hash key.
* Returns zero if not supported for this specific device.
* @get_rxfh_indir_size: Get the size of the RX flow hash indirection table.
@@ -765,6 +1003,34 @@ struct ethtool_rxfh_param {
* will remain unchanged.
* Returns a negative error code or zero. An error code must be returned
* if at least one unsupported change was requested.
+ * @get_rxfh_fields: Get header fields used for flow hashing.
+ * @set_rxfh_fields: Set header fields used for flow hashing.
+ * @create_rxfh_context: Create a new RSS context with the specified RX flow
+ * hash indirection table, hash key, and hash function.
+ * The &struct ethtool_rxfh_context for this context is passed in @ctx;
+ * note that the indir table, hkey and hfunc are not yet populated as
+ * of this call. The driver does not need to update these; the core
+ * will do so if this op succeeds.
+ * However, if @rxfh.indir is set to %NULL, the driver must update the
+ * indir table in @ctx with the (default or inherited) table actually in
+ * use; similarly, if @rxfh.key is %NULL, @rxfh.hfunc is
+ * %ETH_RSS_HASH_NO_CHANGE, or @rxfh.input_xfrm is %RXH_XFRM_NO_CHANGE,
+ * the driver should update the corresponding information in @ctx.
+ * If the driver provides this method, it must also provide
+ * @modify_rxfh_context and @remove_rxfh_context.
+ * Returns a negative error code or zero.
+ * @modify_rxfh_context: Reconfigure the specified RSS context. Allows setting
+ * the contents of the RX flow hash indirection table, hash key, and/or
+ * hash function associated with the given context.
+ * Parameters which are set to %NULL or zero will remain unchanged.
+ * The &struct ethtool_rxfh_context for this context is passed in @ctx;
+ * note that it will still contain the *old* settings. The driver does
+ * not need to update these; the core will do so if this op succeeds.
+ * Returns a negative error code or zero. An error code must be returned
+ * if at least one unsupported change was requested.
+ * @remove_rxfh_context: Remove the specified RSS context.
+ * The &struct ethtool_rxfh_context for this context is passed in @ctx.
+ * Returns a negative error code or zero.
* @get_channels: Get number of channels.
* @set_channels: Set number of channels. Returns a negative error code or
* zero.
@@ -775,10 +1041,11 @@ struct ethtool_rxfh_param {
* @get_ts_info: Get the time stamping and PTP hardware clock capabilities.
* It may be called with RCU, or rtnl or reference on the device.
* Drivers supporting transmit time stamps in software should set this to
- * ethtool_op_get_ts_info(). Drivers must not zero statistics which they
- * don't report. The stats structure is initialized to ETHTOOL_STAT_NOT_SET
- * indicating driver does not report statistics.
- * @get_ts_stats: Query the device hardware timestamping statistics.
+ * ethtool_op_get_ts_info().
+ * @get_ts_stats: Query the device hardware timestamping statistics. Drivers
+ * must not zero statistics which they don't report. The stats structure
+ * is initialized to ETHTOOL_STAT_NOT_SET indicating driver does not
+ * report statistics.
* @get_module_info: Get the size and type of the eeprom contained within
* a plug-in module.
* @get_module_eeprom: Get the eeprom information from the plug-in module
@@ -822,6 +1089,8 @@ struct ethtool_rxfh_param {
* @get_module_eeprom_by_page: Get a region of plug-in module EEPROM data from
* specified page. Returns a negative error code or the amount of bytes
* read.
+ * @set_module_eeprom_by_page: Write to a region of plug-in module EEPROM,
+ * from kernel space only. Returns a negative error code or zero.
* @get_eth_phy_stats: Query some of the IEEE 802.3 PHY statistics.
* @get_eth_mac_stats: Query some of the IEEE 802.3 MAC statistics.
* @get_eth_ctrl_stats: Query some of the IEEE 802.3 MAC Ctrl statistics.
@@ -849,11 +1118,18 @@ struct ethtool_rxfh_param {
* of the generic netdev features interface.
*/
struct ethtool_ops {
+ u32 supported_input_xfrm:8;
u32 cap_link_lanes_supported:1;
- u32 cap_rss_ctx_supported:1;
- u32 cap_rss_sym_xor_supported:1;
+ u32 rxfh_per_ctx_fields:1;
+ u32 rxfh_per_ctx_key:1;
+ u32 cap_rss_rxnfc_adds:1;
+ u32 rxfh_indir_space;
+ u16 rxfh_key_space;
+ u16 rxfh_priv_size;
+ u32 rxfh_max_num_contexts;
u32 supported_coalesce_params;
u32 supported_ring_params;
+ u32 supported_hwtstamp_qualifiers;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
int (*get_regs_len)(struct net_device *);
void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
@@ -909,18 +1185,36 @@ struct ethtool_ops {
int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
int (*flash_device)(struct net_device *, struct ethtool_flash *);
int (*reset)(struct net_device *, u32 *);
+ u32 (*get_rx_ring_count)(struct net_device *dev);
u32 (*get_rxfh_key_size)(struct net_device *);
u32 (*get_rxfh_indir_size)(struct net_device *);
int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *);
int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *,
struct netlink_ext_ack *extack);
+ int (*get_rxfh_fields)(struct net_device *,
+ struct ethtool_rxfh_fields *);
+ int (*set_rxfh_fields)(struct net_device *,
+ const struct ethtool_rxfh_fields *,
+ struct netlink_ext_ack *extack);
+ int (*create_rxfh_context)(struct net_device *,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+ int (*modify_rxfh_context)(struct net_device *,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+ int (*remove_rxfh_context)(struct net_device *,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack);
void (*get_channels)(struct net_device *, struct ethtool_channels *);
int (*set_channels)(struct net_device *, struct ethtool_channels *);
int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
int (*get_dump_data)(struct net_device *,
struct ethtool_dump *, void *);
int (*set_dump)(struct net_device *, struct ethtool_dump *);
- int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);
+ int (*get_ts_info)(struct net_device *, struct kernel_ethtool_ts_info *);
void (*get_ts_stats)(struct net_device *dev,
struct ethtool_ts_stats *ts_stats);
int (*get_module_info)(struct net_device *,
@@ -942,7 +1236,8 @@ struct ethtool_ops {
int (*set_link_ksettings)(struct net_device *,
const struct ethtool_link_ksettings *);
void (*get_fec_stats)(struct net_device *dev,
- struct ethtool_fec_stats *fec_stats);
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist);
int (*get_fecparam)(struct net_device *,
struct ethtool_fecparam *);
int (*set_fecparam)(struct net_device *,
@@ -956,6 +1251,9 @@ struct ethtool_ops {
int (*get_module_eeprom_by_page)(struct net_device *dev,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack);
+ int (*set_module_eeprom_by_page)(struct net_device *dev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
void (*get_eth_phy_stats)(struct net_device *dev,
struct ethtool_eth_phy_stats *phy_stats);
void (*get_eth_mac_stats)(struct net_device *dev,
@@ -998,6 +1296,21 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd,
u32 *dev_speed, u8 *dev_duplex);
+/**
+ * struct ethtool_netdev_state - per-netdevice state for ethtool features
+ * @rss_ctx: XArray of custom RSS contexts
+ * @rss_lock: Protects entries in @rss_ctx. May be taken from
+ * within RTNL.
+ * @wol_enabled: Wake-on-LAN is enabled
+ * @module_fw_flash_in_progress: Module firmware flashing is in progress.
+ */
+struct ethtool_netdev_state {
+ struct xarray rss_ctx;
+ struct mutex rss_lock;
+ unsigned wol_enabled:1;
+ unsigned module_fw_flash_in_progress:1;
+};
+
struct phy_device;
struct phy_tdr_config;
struct phy_plca_cfg;
@@ -1057,13 +1370,14 @@ ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
* @dev: pointer to net_device structure
* @vclock_index: pointer to pointer of vclock index
*
- * Return number of phc vclocks
+ * Return: number of phc vclocks
*/
int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index);
/* Some generic methods drivers may use in their ethtool_ops */
u32 ethtool_op_get_link(struct net_device *dev);
-int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
+int ethtool_op_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *eti);
/**
* ethtool_mm_frag_size_add_to_min - Translate (standard) additional fragment
@@ -1110,9 +1424,10 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
* ethtool_get_ts_info_by_layer - Obtains time stamping capabilities from the MAC or PHY layer.
* @dev: pointer to net_device structure
* @info: buffer to hold the result
- * Returns zero on success, non-zero otherwise.
+ * Returns: zero on success, non-zero otherwise.
*/
-int ethtool_get_ts_info_by_layer(struct net_device *dev, struct ethtool_ts_info *info);
+int ethtool_get_ts_info_by_layer(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info);
/**
* ethtool_sprintf - Write formatted string to ethtool string data
@@ -1137,6 +1452,17 @@ extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
*/
extern void ethtool_puts(u8 **data, const char *str);
+/**
+ * ethtool_cpy - Write possibly-not-NUL-terminated string to ethtool string data
+ * @data: Pointer to a pointer to the start of string to write into
+ * @str: NUL-byte padded char array of size ETH_GSTRING_LEN to copy from
+ */
+#define ethtool_cpy(data, str) do { \
+ BUILD_BUG_ON(sizeof(str) != ETH_GSTRING_LEN); \
+ memcpy(*(data), str, ETH_GSTRING_LEN); \
+ *(data) += ETH_GSTRING_LEN; \
+} while (0)
+
/* Link mode to forced speed capabilities maps */
struct ethtool_forced_speed_map {
u32 speed;
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
index fae0dfb9a9c8..39254b2726c0 100644
--- a/include/linux/ethtool_netlink.h
+++ b/include/linux/ethtool_netlink.h
@@ -23,8 +23,10 @@ struct phy_device;
int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd);
void ethnl_cable_test_free(struct phy_device *phydev);
void ethnl_cable_test_finished(struct phy_device *phydev);
-int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, u8 result);
-int ethnl_cable_test_fault_length(struct phy_device *phydev, u8 pair, u32 cm);
+int ethnl_cable_test_result_with_src(struct phy_device *phydev, u8 pair,
+ u8 result, u32 src);
+int ethnl_cable_test_fault_length_with_src(struct phy_device *phydev, u8 pair,
+ u32 cm, u32 src);
int ethnl_cable_test_amplitude(struct phy_device *phydev, u8 pair, s16 mV);
int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV);
int ethnl_cable_test_step(struct phy_device *phydev, u32 first, u32 last,
@@ -41,6 +43,8 @@ void ethtool_aggregate_rmon_stats(struct net_device *dev,
struct ethtool_rmon_stats *rmon_stats);
bool ethtool_dev_mm_supported(struct net_device *dev);
+void ethnl_pse_send_ntf(struct net_device *netdev, unsigned long notif);
+
#else
static inline int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd)
{
@@ -54,14 +58,14 @@ static inline void ethnl_cable_test_free(struct phy_device *phydev)
static inline void ethnl_cable_test_finished(struct phy_device *phydev)
{
}
-static inline int ethnl_cable_test_result(struct phy_device *phydev, u8 pair,
- u8 result)
+static inline int ethnl_cable_test_result_with_src(struct phy_device *phydev,
+ u8 pair, u8 result, u32 src)
{
return -EOPNOTSUPP;
}
-static inline int ethnl_cable_test_fault_length(struct phy_device *phydev,
- u8 pair, u32 cm)
+static inline int ethnl_cable_test_fault_length_with_src(struct phy_device *phydev,
+ u8 pair, u32 cm, u32 src)
{
return -EOPNOTSUPP;
}
@@ -118,5 +122,25 @@ static inline bool ethtool_dev_mm_supported(struct net_device *dev)
return false;
}
+static inline void ethnl_pse_send_ntf(struct net_device *netdev,
+ unsigned long notif)
+{
+}
+
#endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */
+
+static inline int ethnl_cable_test_result(struct phy_device *phydev, u8 pair,
+ u8 result)
+{
+ return ethnl_cable_test_result_with_src(phydev, pair, result,
+ ETHTOOL_A_CABLE_INF_SRC_TDR);
+}
+
+static inline int ethnl_cable_test_fault_length(struct phy_device *phydev,
+ u8 pair, u32 cm)
+{
+ return ethnl_cable_test_fault_length_with_src(phydev, pair, cm,
+ ETHTOOL_A_CABLE_INF_SRC_TDR);
+}
+
#endif /* _LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index 3337745d81bd..ccb478eb174b 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -25,6 +25,10 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long t
/* Used to release the epoll bits inside the "struct file" */
void eventpoll_release_file(struct file *file);
+/* Copy ready events to userspace */
+int epoll_sendevents(struct file *file, struct epoll_event __user *events,
+ int maxevents);
+
/*
* This is called from inside fs/file_table.c:__fput() to unlink files
* from the eventpoll interface. We need to have this facility to cleanup
@@ -42,7 +46,7 @@ static inline void eventpoll_release(struct file *file)
* because the file in on the way to be removed and nobody ( but
* eventpoll ) has still a reference to this file.
*/
- if (likely(!file->f_ep))
+ if (likely(!READ_ONCE(file->f_ep)))
return;
/*
diff --git a/include/linux/execmem.h b/include/linux/execmem.h
index 32cef1144117..7de229134e30 100644
--- a/include/linux/execmem.h
+++ b/include/linux/execmem.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/moduleloader.h>
+#include <linux/cleanup.h>
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
!defined(CONFIG_KASAN_VMALLOC)
@@ -46,11 +47,41 @@ enum execmem_type {
/**
* enum execmem_range_flags - options for executable memory allocations
* @EXECMEM_KASAN_SHADOW: allocate kasan shadow
+ * @EXECMEM_ROX_CACHE: allocations should use ROX cache of huge pages
*/
enum execmem_range_flags {
EXECMEM_KASAN_SHADOW = (1 << 0),
+ EXECMEM_ROX_CACHE = (1 << 1),
};
+#ifdef CONFIG_ARCH_HAS_EXECMEM_ROX
+/**
+ * execmem_fill_trapping_insns - set memory to contain instructions that
+ * will trap
+ * @ptr: pointer to memory to fill
+ * @size: size of the range to fill
+ *
+ * A hook for architecures to fill execmem ranges with invalid instructions.
+ * Architectures that use EXECMEM_ROX_CACHE must implement this.
+ */
+void execmem_fill_trapping_insns(void *ptr, size_t size);
+
+/**
+ * execmem_restore_rox - restore read-only-execute permissions
+ * @ptr: address of the region to remap
+ * @size: size of the region to remap
+ *
+ * Restores read-only-execute permissions on a range [@ptr, @ptr + @size)
+ * after it was temporarily remapped as writable. Relies on architecture
+ * implementation of set_memory_rox() to restore mapping using large pages.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int execmem_restore_rox(void *ptr, size_t size);
+#else
+static inline int execmem_restore_rox(void *ptr, size_t size) { return 0; }
+#endif
+
/**
* struct execmem_range - definition of an address space suitable for code and
* related data allocations
@@ -118,11 +149,55 @@ struct execmem_info *execmem_arch_setup(void);
void *execmem_alloc(enum execmem_type type, size_t size);
/**
+ * execmem_alloc_rw - allocate writable executable memory
+ * @type: type of the allocation
+ * @size: how many bytes of memory are required
+ *
+ * Allocates memory that will contain executable code, either generated or
+ * loaded from kernel modules.
+ *
+ * Allocates memory that will contain data coupled with executable code,
+ * like data sections in kernel modules.
+ *
+ * Forces writable permissions on the allocated memory and the caller is
+ * responsible to manage the permissions afterwards.
+ *
+ * For architectures that use ROX cache the permissions will be set to R+W.
+ * For architectures that don't use ROX cache the default permissions for @type
+ * will be used as they must be writable.
+ *
+ * Return: a pointer to the allocated memory or %NULL
+ */
+void *execmem_alloc_rw(enum execmem_type type, size_t size);
+
+/**
* execmem_free - free executable memory
* @ptr: pointer to the memory that should be freed
*/
void execmem_free(void *ptr);
+DEFINE_FREE(execmem, void *, if (_T) execmem_free(_T));
+
+#ifdef CONFIG_MMU
+/**
+ * execmem_vmap - create virtual mapping for EXECMEM_MODULE_DATA memory
+ * @size: size of the virtual mapping in bytes
+ *
+ * Maps virtually contiguous area in the range suitable for EXECMEM_MODULE_DATA.
+ *
+ * Return: the area descriptor on success or %NULL on failure.
+ */
+struct vm_struct *execmem_vmap(size_t size);
+#endif
+
+/**
+ * execmem_is_rox - check if execmem is read-only
+ * @type - the execmem type to check
+ *
+ * Return: %true if the @type is read-only, %false if it's writable
+ */
+bool execmem_is_rox(enum execmem_type type);
+
#if defined(CONFIG_EXECMEM) && !defined(CONFIG_ARCH_WANTS_EXECMEM_LATE)
void execmem_init(void);
#else
diff --git a/include/linux/export.h b/include/linux/export.h
index 0bbd02fd351d..a686fd0ba406 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -24,11 +24,17 @@
.long sym
#endif
-#define ___EXPORT_SYMBOL(sym, license, ns) \
+/*
+ * LLVM integrated assembler cam merge adjacent string literals (like
+ * C and GNU-as) passed to '.ascii', but not to '.asciz' and chokes on:
+ *
+ * .asciz "MODULE_" "kvm" ;
+ */
+#define ___EXPORT_SYMBOL(sym, license, ns...) \
.section ".export_symbol","a" ASM_NL \
__export_symbol_##sym: ASM_NL \
.asciz license ASM_NL \
- .asciz ns ASM_NL \
+ .ascii ns "\0" ASM_NL \
__EXPORT_SYMBOL_REF(sym) ASM_NL \
.previous
@@ -52,22 +58,39 @@
#else
+#ifdef CONFIG_GENDWARFKSYMS
+/*
+ * With CONFIG_GENDWARFKSYMS, ensure the compiler emits debugging
+ * information for all exported symbols, including those defined in
+ * different TUs, by adding a __gendwarfksyms_ptr_<symbol> pointer
+ * that's discarded during the final link.
+ */
+#define __GENDWARFKSYMS_EXPORT(sym) \
+ static typeof(sym) *__gendwarfksyms_ptr_##sym __used \
+ __section(".discard.gendwarfksyms") = &sym;
+#else
+#define __GENDWARFKSYMS_EXPORT(sym)
+#endif
+
#define __EXPORT_SYMBOL(sym, license, ns) \
extern typeof(sym) sym; \
__ADDRESSABLE(sym) \
+ __GENDWARFKSYMS_EXPORT(sym) \
asm(__stringify(___EXPORT_SYMBOL(sym, license, ns)))
#endif
#ifdef DEFAULT_SYMBOL_NAMESPACE
-#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, __stringify(DEFAULT_SYMBOL_NAMESPACE))
+#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, DEFAULT_SYMBOL_NAMESPACE)
#else
#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, "")
#endif
#define EXPORT_SYMBOL(sym) _EXPORT_SYMBOL(sym, "")
#define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "GPL")
-#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", __stringify(ns))
-#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", __stringify(ns))
+#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", ns)
+#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", ns)
+
+#define EXPORT_SYMBOL_FOR_MODULES(sym, mods) __EXPORT_SYMBOL(sym, "GPL", "module:" mods)
#endif /* _LINUX_EXPORT_H */
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index bb37ad5cc954..f0cf2714ec52 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -3,6 +3,7 @@
#define LINUX_EXPORTFS_H 1
#include <linux/types.h>
+#include <linux/path.h>
struct dentry;
struct iattr;
@@ -122,6 +123,12 @@ enum fid_type {
FILEID_BCACHEFS_WITH_PARENT = 0xb2,
/*
+ *
+ * 64 bit namespace identifier, 32 bit namespace type, 32 bit inode number.
+ */
+ FILEID_NSFS = 0xf1,
+
+ /*
* 64 bit unique kernfs id
*/
FILEID_KERNFS = 0xfe,
@@ -156,8 +163,33 @@ struct fid {
};
};
+enum handle_to_path_flags {
+ HANDLE_CHECK_PERMS = (1 << 0),
+ HANDLE_CHECK_SUBTREE = (1 << 1),
+};
+
+struct handle_to_path_ctx {
+ struct path root;
+ enum handle_to_path_flags flags;
+ unsigned int fh_flags;
+};
+
#define EXPORT_FH_CONNECTABLE 0x1 /* Encode file handle with parent */
#define EXPORT_FH_FID 0x2 /* File handle may be non-decodeable */
+#define EXPORT_FH_DIR_ONLY 0x4 /* Only decode file handle for a directory */
+
+/*
+ * Filesystems use only lower 8 bits of file_handle type for fid_type.
+ * name_to_handle_at() uses upper 16 bits of type as user flags to be
+ * interpreted by open_by_handle_at().
+ */
+#define FILEID_USER_FLAGS_MASK 0xffff0000
+#define FILEID_USER_FLAGS(type) ((type) & FILEID_USER_FLAGS_MASK)
+
+/* Flags supported in encoded handle_type that is exported to user */
+#define FILEID_IS_CONNECTABLE 0x10000
+#define FILEID_IS_DIR 0x20000
+#define FILEID_VALID_USER_FLAGS (FILEID_IS_CONNECTABLE | FILEID_IS_DIR)
/**
* struct export_operations - for nfsd to communicate with file systems
@@ -204,18 +236,24 @@ struct fid {
* directory. The name should be stored in the @name (with the
* understanding that it is already pointing to a %NAME_MAX+1 sized
* buffer. get_name() should return %0 on success, a negative error code
- * or error. @get_name will be called without @parent->i_mutex held.
+ * or error. @get_name will be called without @parent->i_rwsem held.
*
* get_parent:
* @get_parent should find the parent directory for the given @child which
* is also a directory. In the event that it cannot be found, or storage
* space cannot be allocated, a %ERR_PTR should be returned.
*
+ * permission:
+ * Allow filesystems to specify a custom permission function.
+ *
+ * open:
+ * Allow filesystems to specify a custom open function.
+ *
* commit_metadata:
* @commit_metadata should commit metadata changes to stable storage.
*
* Locking rules:
- * get_parent is called with child->d_inode->i_mutex down
+ * get_parent is called with child->d_inode->i_rwsem down
* get_name is not (which is possibly inconsistent)
*/
@@ -237,6 +275,8 @@ struct export_operations {
bool write, u32 *device_generation);
int (*commit_blocks)(struct inode *inode, struct iomap *iomaps,
int nr_iomaps, struct iattr *iattr);
+ int (*permission)(struct handle_to_path_ctx *ctx, unsigned int oflags);
+ struct file * (*open)(const struct path *path, unsigned int oflags);
#define EXPORT_OP_NOWCC (0x1) /* don't collect v3 wcc data */
#define EXPORT_OP_NOSUBTREECHK (0x2) /* no subtree checking */
#define EXPORT_OP_CLOSE_BEFORE_UNLINK (0x4) /* close files before unlink */
@@ -245,21 +285,20 @@ struct export_operations {
atomic attribute updates
*/
#define EXPORT_OP_FLUSH_ON_CLOSE (0x20) /* fs flushes file data on close */
-#define EXPORT_OP_ASYNC_LOCK (0x40) /* fs can do async lock request */
+#define EXPORT_OP_NOLOCKS (0x40) /* no file locking support */
unsigned long flags;
};
/**
- * exportfs_lock_op_is_async() - export op supports async lock operation
+ * exportfs_cannot_lock() - check if export implements file locking
* @export_ops: the nfs export operations to check
*
- * Returns true if the nfs export_operations structure has
- * EXPORT_OP_ASYNC_LOCK in their flags set
+ * Returns true if the export does not support file locking.
*/
static inline bool
-exportfs_lock_op_is_async(const struct export_operations *export_ops)
+exportfs_cannot_lock(const struct export_operations *export_ops)
{
- return export_ops->flags & EXPORT_OP_ASYNC_LOCK;
+ return export_ops->flags & EXPORT_OP_NOLOCKS;
}
extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
@@ -288,6 +327,17 @@ static inline bool exportfs_can_encode_fh(const struct export_operations *nop,
if (fh_flags & EXPORT_FH_FID)
return exportfs_can_encode_fid(nop);
+ /* Normal file handles cannot be created without export ops */
+ if (!nop)
+ return false;
+
+ /*
+ * If a connectable file handle was requested, we need to make sure that
+ * filesystem can also decode connected file handles.
+ */
+ if ((fh_flags & EXPORT_FH_CONNECTABLE) && !nop->fh_to_parent)
+ return false;
+
/*
* If a decodeable file handle was requested, we need to make sure that
* filesystem can also decode file handles.
@@ -305,6 +355,7 @@ static inline int exportfs_encode_fid(struct inode *inode, struct fid *fid,
extern struct dentry *exportfs_decode_fh_raw(struct vfsmount *mnt,
struct fid *fid, int fh_len,
int fileid_type,
+ unsigned int flags,
int (*acceptable)(void *, struct dentry *),
void *context);
extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 41d1d71c36ff..a7880787cad3 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -17,17 +17,19 @@
#define F2FS_LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) /* log number for sector/blk */
#define F2FS_BLKSIZE PAGE_SIZE /* support only block == page */
#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */
+#define F2FS_SUM_BLKSIZE 4096 /* only support 4096 byte sum block */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_EXTENSION_LEN 8 /* max size of extension */
-#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
#define COMPRESS_ADDR ((block_t)-2) /* used as compressed data flag */
-#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
-#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
+#define F2FS_BLKSIZE_MASK (F2FS_BLKSIZE - 1)
+#define F2FS_BYTES_TO_BLK(bytes) ((unsigned long long)(bytes) >> F2FS_BLKSIZE_BITS)
+#define F2FS_BLK_TO_BYTES(blk) ((unsigned long long)(blk) << F2FS_BLKSIZE_BITS)
#define F2FS_BLK_END_BYTES(blk) (F2FS_BLK_TO_BYTES(blk + 1) - 1)
+#define F2FS_BLK_ALIGN(x) (F2FS_BYTES_TO_BLK((x) + F2FS_BLKSIZE - 1))
/* 0, 1(node nid), 2(meta nid) are reserved node id */
#define F2FS_RESERVED_NODE_NUM 3
@@ -77,6 +79,8 @@ enum stop_cp_reason {
STOP_CP_REASON_UPDATE_INODE,
STOP_CP_REASON_FLUSH_FAIL,
STOP_CP_REASON_NO_SEGMENT,
+ STOP_CP_REASON_CORRUPTED_FREE_BITMAP,
+ STOP_CP_REASON_CORRUPTED_NID,
STOP_CP_REASON_MAX,
};
@@ -259,15 +263,14 @@ struct node_footer {
#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \
get_extra_isize(inode))
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
-#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
+#define ADDRS_PER_INODE(inode) addrs_per_page(inode, true)
/* Address Pointers in a Direct Block */
#define DEF_ADDRS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
-#define ADDRS_PER_BLOCK(inode) addrs_per_block(inode)
+#define ADDRS_PER_BLOCK(inode) addrs_per_page(inode, false)
/* Node IDs in an Indirect Block */
#define NIDS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
-#define ADDRS_PER_PAGE(page, inode) \
- (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode))
+#define ADDRS_PER_PAGE(folio, inode) (addrs_per_page(inode, IS_INODE(folio)))
#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2)
@@ -279,7 +282,7 @@ struct node_footer {
#define F2FS_INLINE_DATA 0x02 /* file inline data flag */
#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
-#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
+#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries (obsolete) */
#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
#define F2FS_PIN_FILE 0x40 /* file should not be gced */
#define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */
@@ -439,7 +442,7 @@ struct f2fs_sit_block {
* from node's page's beginning to get a data block address.
* ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
*/
-#define ENTRIES_IN_SUM (F2FS_BLKSIZE / 8)
+#define ENTRIES_IN_SUM (F2FS_SUM_BLKSIZE / 8)
#define SUMMARY_SIZE (7) /* sizeof(struct f2fs_summary) */
#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */
#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM)
@@ -465,7 +468,7 @@ struct summary_footer {
__le32 check_sum; /* summary checksum */
} __packed;
-#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
+#define SUM_JOURNAL_SIZE (F2FS_SUM_BLKSIZE - SUM_FOOTER_SIZE -\
SUM_ENTRY_SIZE)
#define NAT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\
sizeof(struct nat_journal_entry))
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index f3f0b97b1675..7c38c6b76b60 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -25,12 +25,19 @@ struct space_resv {
#define FS_IOC_UNRESVSP64 _IOW('X', 43, struct space_resv)
#define FS_IOC_ZERO_RANGE _IOW('X', 57, struct space_resv)
-#define FALLOC_FL_SUPPORTED_MASK (FALLOC_FL_KEEP_SIZE | \
- FALLOC_FL_PUNCH_HOLE | \
- FALLOC_FL_COLLAPSE_RANGE | \
- FALLOC_FL_ZERO_RANGE | \
- FALLOC_FL_INSERT_RANGE | \
- FALLOC_FL_UNSHARE_RANGE)
+/*
+ * Mask of all supported fallocate modes. Only one can be set at a time.
+ *
+ * In addition to the mode bit, the mode argument can also encode flags.
+ * FALLOC_FL_KEEP_SIZE is the only supported flag so far.
+ */
+#define FALLOC_FL_MODE_MASK (FALLOC_FL_ALLOCATE_RANGE | \
+ FALLOC_FL_PUNCH_HOLE | \
+ FALLOC_FL_COLLAPSE_RANGE | \
+ FALLOC_FL_ZERO_RANGE | \
+ FALLOC_FL_INSERT_RANGE | \
+ FALLOC_FL_UNSHARE_RANGE | \
+ FALLOC_FL_WRITE_ZEROES)
/* on ia32 l_start is on a 32-bit boundary */
#if defined(CONFIG_X86_64)
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index 4f1c4f603118..879cff5eccd4 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -25,7 +25,7 @@
#define FANOTIFY_FID_BITS (FAN_REPORT_DFID_NAME_TARGET)
-#define FANOTIFY_INFO_MODES (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD)
+#define FANOTIFY_INFO_MODES (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD | FAN_REPORT_MNT)
/*
* fanotify_init() flags that require CAP_SYS_ADMIN.
@@ -36,6 +36,7 @@
#define FANOTIFY_ADMIN_INIT_FLAGS (FANOTIFY_PERM_CLASSES | \
FAN_REPORT_TID | \
FAN_REPORT_PIDFD | \
+ FAN_REPORT_FD_ERROR | \
FAN_UNLIMITED_QUEUE | \
FAN_UNLIMITED_MARKS)
@@ -46,7 +47,7 @@
* so one of the flags for reporting file handles is required.
*/
#define FANOTIFY_USER_INIT_FLAGS (FAN_CLASS_NOTIF | \
- FANOTIFY_FID_BITS | \
+ FANOTIFY_FID_BITS | FAN_REPORT_MNT | \
FAN_CLOEXEC | FAN_NONBLOCK)
#define FANOTIFY_INIT_FLAGS (FANOTIFY_ADMIN_INIT_FLAGS | \
@@ -57,7 +58,7 @@
#define FANOTIFY_INTERNAL_GROUP_FLAGS (FANOTIFY_UNPRIV)
#define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \
- FAN_MARK_FILESYSTEM)
+ FAN_MARK_FILESYSTEM | FAN_MARK_MNTNS)
#define FANOTIFY_MARK_CMD_BITS (FAN_MARK_ADD | FAN_MARK_REMOVE | \
FAN_MARK_FLUSH)
@@ -88,6 +89,16 @@
#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE | \
FAN_RENAME)
+/* Content events can be used to inspect file content */
+#define FANOTIFY_CONTENT_PERM_EVENTS (FAN_OPEN_PERM | FAN_OPEN_EXEC_PERM | \
+ FAN_ACCESS_PERM)
+/* Pre-content events can be used to fill file content */
+#define FANOTIFY_PRE_CONTENT_EVENTS (FAN_PRE_ACCESS)
+
+/* Events that require a permission response from user */
+#define FANOTIFY_PERM_EVENTS (FANOTIFY_CONTENT_PERM_EVENTS | \
+ FANOTIFY_PRE_CONTENT_EVENTS)
+
/* Events that can be reported with event->fd */
#define FANOTIFY_FD_EVENTS (FANOTIFY_PATH_EVENTS | FANOTIFY_PERM_EVENTS)
@@ -98,14 +109,13 @@
/* Events that can only be reported with data type FSNOTIFY_EVENT_ERROR */
#define FANOTIFY_ERROR_EVENTS (FAN_FS_ERROR)
+#define FANOTIFY_MOUNT_EVENTS (FAN_MNT_ATTACH | FAN_MNT_DETACH)
+
/* Events that user can request to be notified on */
#define FANOTIFY_EVENTS (FANOTIFY_PATH_EVENTS | \
FANOTIFY_INODE_EVENTS | \
- FANOTIFY_ERROR_EVENTS)
-
-/* Events that require a permission response from user */
-#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \
- FAN_OPEN_EXEC_PERM)
+ FANOTIFY_ERROR_EVENTS | \
+ FANOTIFY_MOUNT_EVENTS)
/* Extra flags that may be reported with event or control handling of events */
#define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR)
@@ -125,7 +135,9 @@
/* These masks check for invalid bits in permission responses. */
#define FANOTIFY_RESPONSE_ACCESS (FAN_ALLOW | FAN_DENY)
#define FANOTIFY_RESPONSE_FLAGS (FAN_AUDIT | FAN_INFO)
-#define FANOTIFY_RESPONSE_VALID_MASK (FANOTIFY_RESPONSE_ACCESS | FANOTIFY_RESPONSE_FLAGS)
+#define FANOTIFY_RESPONSE_VALID_MASK \
+ (FANOTIFY_RESPONSE_ACCESS | FANOTIFY_RESPONSE_FLAGS | \
+ (FAN_ERRNO_MASK << FAN_ERRNO_SHIFT))
/* Do not use these old uapi constants internally */
#undef FAN_ALL_CLASS_BITS
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 6d5edef09d45..58fd14c82270 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -2,13 +2,21 @@
#ifndef _LINUX_FAULT_INJECT_H
#define _LINUX_FAULT_INJECT_H
+#include <linux/err.h>
+#include <linux/types.h>
+
+struct dentry;
+struct kmem_cache;
+
+enum fault_flags {
+ FAULT_NOWARN = 1 << 0,
+};
+
#ifdef CONFIG_FAULT_INJECTION
-#include <linux/types.h>
-#include <linux/debugfs.h>
+#include <linux/atomic.h>
#include <linux/configfs.h>
#include <linux/ratelimit.h>
-#include <linux/atomic.h>
/*
* For explanation of the elements of this struct, see
@@ -32,10 +40,6 @@ struct fault_attr {
struct dentry *dname;
};
-enum fault_flags {
- FAULT_NOWARN = 1 << 0,
-};
-
#define FAULT_ATTR_INITIALIZER { \
.interval = 1, \
.times = ATOMIC_INIT(1), \
@@ -51,6 +55,28 @@ int setup_fault_attr(struct fault_attr *attr, char *str);
bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags);
bool should_fail(struct fault_attr *attr, ssize_t size);
+#else /* CONFIG_FAULT_INJECTION */
+
+struct fault_attr {
+};
+
+#define DECLARE_FAULT_ATTR(name) struct fault_attr name = {}
+
+static inline int setup_fault_attr(struct fault_attr *attr, char *str)
+{
+ return 0; /* Note: 0 means error for __setup() handlers! */
+}
+static inline bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
+{
+ return false;
+}
+static inline bool should_fail(struct fault_attr *attr, ssize_t size)
+{
+ return false;
+}
+
+#endif /* CONFIG_FAULT_INJECTION */
+
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
struct dentry *fault_create_debugfs_attr(const char *name,
@@ -87,26 +113,19 @@ static inline void fault_config_init(struct fault_config *config,
#endif /* CONFIG_FAULT_INJECTION_CONFIGFS */
-#endif /* CONFIG_FAULT_INJECTION */
-
-struct kmem_cache;
-
-bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
-
#ifdef CONFIG_FAIL_PAGE_ALLOC
-bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
+bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
#else
-static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
return false;
}
#endif /* CONFIG_FAIL_PAGE_ALLOC */
-int should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#ifdef CONFIG_FAILSLAB
-extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags);
+int should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#else
-static inline bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
+static inline int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
{
return false;
}
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 0f0834939b6a..05cc251035da 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -21,6 +21,7 @@ struct fb_info;
struct file;
struct i2c_adapter;
struct inode;
+struct lcd_device;
struct module;
struct notifier_block;
struct page;
@@ -128,18 +129,12 @@ struct fb_cursor_user {
* Register/unregister for framebuffer events
*/
-/* The resolution of the passed in fb_info about to change */
-#define FB_EVENT_MODE_CHANGE 0x01
-
#ifdef CONFIG_GUMSTIX_AM200EPD
/* only used by mach-pxa/am200epd.c */
#define FB_EVENT_FB_REGISTERED 0x05
#define FB_EVENT_FB_UNREGISTERED 0x06
#endif
-/* A display blank is requested */
-#define FB_EVENT_BLANK 0x09
-
struct fb_event {
struct fb_info *info;
void *data;
@@ -224,7 +219,9 @@ struct fb_deferred_io {
int open_count; /* number of opened files; protected by fb_info lock */
struct mutex lock; /* mutex that protects the pageref list */
struct list_head pagereflist; /* list of pagerefs for touched pages */
+ struct address_space *mapping; /* page cache object for fb device */
/* callback */
+ struct page *(*get_page)(struct fb_info *info, unsigned long offset);
void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
};
#endif
@@ -469,6 +466,8 @@ struct fb_info {
struct list_head modelist; /* mode list */
struct fb_videomode *mode; /* current mode */
+ int blank; /* current blanking; see FB_BLANK_ constants */
+
#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
/* assigned backlight device */
/* set before framebuffer registration,
@@ -479,6 +478,13 @@ struct fb_info {
struct mutex bl_curve_mutex;
u8 bl_curve[FB_BACKLIGHT_LEVELS];
#endif
+
+ /*
+ * Assigned LCD device; set before framebuffer
+ * registration, remove after unregister
+ */
+ struct lcd_device *lcd_dev;
+
#ifdef CONFIG_FB_DEFERRED_IO
struct delayed_work deferred_work;
unsigned long npagerefs;
@@ -509,6 +515,7 @@ struct fb_info {
void *par;
bool skip_vt_switch; /* no VT switch on suspend/resume required */
+ bool skip_panic; /* Do not write to the fb after a panic */
};
/* This will go away
@@ -600,6 +607,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
/* fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
+extern int devm_register_framebuffer(struct device *dev, struct fb_info *fb_info);
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx,
u32 height, u32 shift_high, u32 shift_low, u32 mod);
@@ -744,13 +752,22 @@ extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max)
#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
struct backlight_device *fb_bl_device(struct fb_info *info);
+void fb_bl_notify_blank(struct fb_info *info, int old_blank);
#else
static inline struct backlight_device *fb_bl_device(struct fb_info *info)
{
return NULL;
}
+
+static inline void fb_bl_notify_blank(struct fb_info *info, int old_blank)
+{ }
#endif
+static inline struct lcd_device *fb_lcd_device(struct fb_info *info)
+{
+ return info->lcd_dev;
+}
+
/* fbmon.c */
#define FB_MAXTIMINGS 0
#define FB_VSYNCTIMINGS 1
diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h
index 2382dec6d6ab..f206370060e1 100644
--- a/include/linux/fbcon.h
+++ b/include/linux/fbcon.h
@@ -1,6 +1,13 @@
#ifndef _LINUX_FBCON_H
#define _LINUX_FBCON_H
+#include <linux/compiler_types.h>
+
+struct fb_blit_caps;
+struct fb_info;
+struct fb_var_screeninfo;
+struct fb_videomode;
+
#ifdef CONFIG_FRAMEBUFFER_CONSOLE
void __init fb_console_init(void);
void __exit fb_console_exit(void);
@@ -11,6 +18,7 @@ void fbcon_suspended(struct fb_info *info);
void fbcon_resumed(struct fb_info *info);
int fbcon_mode_deleted(struct fb_info *info,
struct fb_videomode *mode);
+void fbcon_delete_modelist(struct list_head *head);
void fbcon_new_modelist(struct fb_info *info);
void fbcon_get_requirement(struct fb_info *info,
struct fb_blit_caps *caps);
@@ -31,6 +39,7 @@ static inline void fbcon_suspended(struct fb_info *info) {}
static inline void fbcon_resumed(struct fb_info *info) {}
static inline int fbcon_mode_deleted(struct fb_info *info,
struct fb_videomode *mode) { return 0; }
+static inline void fbcon_delete_modelist(struct list_head *head) {}
static inline void fbcon_new_modelist(struct fb_info *info) {}
static inline void fbcon_get_requirement(struct fb_info *info,
struct fb_blit_caps *caps) {}
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 2944d4aa413b..c45306a9f007 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -22,7 +22,6 @@
* as this is the granularity returned by copy_fdset().
*/
#define NR_OPEN_DEFAULT BITS_PER_LONG
-#define NR_OPEN_MAX ~0U
struct fdtable {
unsigned int max_fds;
@@ -93,10 +92,6 @@ static inline struct file *files_lookup_fd_locked(struct files_struct *files, un
return files_lookup_fd_raw(files, fd);
}
-struct file *lookup_fdget_rcu(unsigned int fd);
-struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd);
-struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *fd);
-
static inline bool close_on_exec(unsigned int fd, const struct files_struct *files)
{
return test_bit(fd, files_fdtable(files)->close_on_exec);
@@ -106,17 +101,17 @@ struct task_struct;
void put_files_struct(struct files_struct *fs);
int unshare_files(void);
-struct files_struct *dup_fd(struct files_struct *, unsigned, int *) __latent_entropy;
+struct fd_range {
+ unsigned int from, to;
+};
+struct files_struct *dup_fd(struct files_struct *, struct fd_range *) __latent_entropy;
void do_close_on_exec(struct files_struct *);
int iterate_fd(struct files_struct *, unsigned,
int (*)(const void *, struct file *, unsigned),
const void *);
extern int close_fd(unsigned int fd);
-extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
extern struct file *file_close_fd(unsigned int fd);
-extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
- struct files_struct **new_fdp);
extern struct kmem_cache *files_cachep;
diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h
index c50882f19235..966092ffa89a 100644
--- a/include/linux/fiemap.h
+++ b/include/linux/fiemap.h
@@ -5,12 +5,18 @@
#include <uapi/linux/fiemap.h>
#include <linux/fs.h>
+/**
+ * struct fiemap_extent_info - fiemap request to a filesystem
+ * @fi_flags: Flags as passed from user
+ * @fi_extents_mapped: Number of mapped extents
+ * @fi_extents_max: Size of fiemap_extent array
+ * @fi_extents_start: Start of fiemap_extent array
+ */
struct fiemap_extent_info {
- unsigned int fi_flags; /* Flags as passed from user */
- unsigned int fi_extents_mapped; /* Number of mapped extents */
- unsigned int fi_extents_max; /* Size of fiemap_extent array */
- struct fiemap_extent __user *fi_extents_start; /* Start of
- fiemap_extent array */
+ unsigned int fi_flags;
+ unsigned int fi_extents_mapped;
+ unsigned int fi_extents_max;
+ struct fiemap_extent __user *fi_extents_start;
};
int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
diff --git a/include/linux/file.h b/include/linux/file.h
index 45d0f4800abd..cf389fde9bc2 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -11,6 +11,7 @@
#include <linux/posix_types.h>
#include <linux/errno.h>
#include <linux/cleanup.h>
+#include <linux/err.h>
struct file;
@@ -29,62 +30,59 @@ extern struct file *alloc_file_pseudo_noaccount(struct inode *, struct vfsmount
extern struct file *alloc_file_clone(struct file *, int flags,
const struct file_operations *);
-static inline void fput_light(struct file *file, int fput_needed)
-{
- if (fput_needed)
- fput(file);
-}
-
+/* either a reference to struct file + flags
+ * (cloned vs. borrowed, pos locked), with
+ * flags stored in lower bits of value,
+ * or empty (represented by 0).
+ */
struct fd {
- struct file *file;
- unsigned int flags;
+ unsigned long word;
};
#define FDPUT_FPUT 1
#define FDPUT_POS_UNLOCK 2
-static inline void fdput(struct fd fd)
+#define fd_file(f) ((struct file *)((f).word & ~(FDPUT_FPUT|FDPUT_POS_UNLOCK)))
+static inline bool fd_empty(struct fd f)
{
- if (fd.flags & FDPUT_FPUT)
- fput(fd.file);
+ return unlikely(!f.word);
}
-extern struct file *fget(unsigned int fd);
-extern struct file *fget_raw(unsigned int fd);
-extern struct file *fget_task(struct task_struct *task, unsigned int fd);
-extern unsigned long __fdget(unsigned int fd);
-extern unsigned long __fdget_raw(unsigned int fd);
-extern unsigned long __fdget_pos(unsigned int fd);
-extern void __f_unlock_pos(struct file *);
-
-static inline struct fd __to_fd(unsigned long v)
+#define EMPTY_FD (struct fd){0}
+static inline struct fd BORROWED_FD(struct file *f)
{
- return (struct fd){(struct file *)(v & ~3),v & 3};
+ return (struct fd){(unsigned long)f};
}
-
-static inline struct fd fdget(unsigned int fd)
+static inline struct fd CLONED_FD(struct file *f)
{
- return __to_fd(__fdget(fd));
+ return (struct fd){(unsigned long)f | FDPUT_FPUT};
}
-static inline struct fd fdget_raw(unsigned int fd)
+static inline void fdput(struct fd fd)
{
- return __to_fd(__fdget_raw(fd));
+ if (unlikely(fd.word & FDPUT_FPUT))
+ fput(fd_file(fd));
}
-static inline struct fd fdget_pos(int fd)
-{
- return __to_fd(__fdget_pos(fd));
-}
+extern struct file *fget(unsigned int fd);
+extern struct file *fget_raw(unsigned int fd);
+extern struct file *fget_task(struct task_struct *task, unsigned int fd);
+extern struct file *fget_task_next(struct task_struct *task, unsigned int *fd);
+extern void __f_unlock_pos(struct file *);
+
+struct fd fdget(unsigned int fd);
+struct fd fdget_raw(unsigned int fd);
+struct fd fdget_pos(unsigned int fd);
static inline void fdput_pos(struct fd f)
{
- if (f.flags & FDPUT_POS_UNLOCK)
- __f_unlock_pos(f.file);
+ if (f.word & FDPUT_POS_UNLOCK)
+ __f_unlock_pos(fd_file(f));
fdput(f);
}
DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd)
DEFINE_CLASS(fd_raw, struct fd, fdput(_T), fdget_raw(fd), int fd)
+DEFINE_CLASS(fd_pos, struct fd, fdput_pos(_T), fdget_pos(fd), int fd)
extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
@@ -96,6 +94,27 @@ extern void put_unused_fd(unsigned int fd);
DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
get_unused_fd_flags(flags), unsigned flags)
+DEFINE_FREE(fput, struct file *, if (!IS_ERR_OR_NULL(_T)) fput(_T))
+
+/*
+ * take_fd() will take care to set @fd to -EBADF ensuring that
+ * CLASS(get_unused_fd) won't call put_unused_fd(). This makes it
+ * easier to rely on CLASS(get_unused_fd):
+ *
+ * struct file *f;
+ *
+ * CLASS(get_unused_fd, fd)(O_CLOEXEC);
+ * if (fd < 0)
+ * return fd;
+ *
+ * f = dentry_open(&path, O_RDONLY, current_cred());
+ * if (IS_ERR(f))
+ * return PTR_ERR(f);
+ *
+ * fd_install(fd, f);
+ * return take_fd(fd);
+ */
+#define take_fd(fd) __get_and_null(fd, -EBADF)
extern void fd_install(unsigned int fd, struct file *file);
@@ -108,4 +127,130 @@ extern void __fput_sync(struct file *);
extern unsigned int sysctl_nr_open_min, sysctl_nr_open_max;
+/*
+ * fd_prepare: Combined fd + file allocation cleanup class.
+ * @err: Error code to indicate if allocation succeeded.
+ * @__fd: Allocated fd (may not be accessed directly)
+ * @__file: Allocated struct file pointer (may not be accessed directly)
+ *
+ * Allocates an fd and a file together. On error paths, automatically cleans
+ * up whichever resource was successfully allocated. Allows flexible file
+ * allocation with different functions per usage.
+ *
+ * Do not use directly.
+ */
+struct fd_prepare {
+ s32 err;
+ s32 __fd; /* do not access directly */
+ struct file *__file; /* do not access directly */
+};
+
+/* Typedef for fd_prepare cleanup guards. */
+typedef struct fd_prepare class_fd_prepare_t;
+
+/*
+ * Accessors for fd_prepare class members.
+ * _Generic() is used for zero-cost type safety.
+ */
+#define fd_prepare_fd(_fdf) \
+ (_Generic((_fdf), struct fd_prepare: (_fdf).__fd))
+
+#define fd_prepare_file(_fdf) \
+ (_Generic((_fdf), struct fd_prepare: (_fdf).__file))
+
+/* Do not use directly. */
+static inline void class_fd_prepare_destructor(const struct fd_prepare *fdf)
+{
+ if (unlikely(fdf->err)) {
+ if (likely(fdf->__fd >= 0))
+ put_unused_fd(fdf->__fd);
+ if (unlikely(!IS_ERR_OR_NULL(fdf->__file)))
+ fput(fdf->__file);
+ }
+}
+
+/* Do not use directly. */
+static inline int class_fd_prepare_lock_err(const struct fd_prepare *fdf)
+{
+ if (unlikely(fdf->err))
+ return fdf->err;
+ if (unlikely(fdf->__fd < 0))
+ return fdf->__fd;
+ if (unlikely(IS_ERR(fdf->__file)))
+ return PTR_ERR(fdf->__file);
+ if (unlikely(!fdf->__file))
+ return -ENOMEM;
+ return 0;
+}
+
+/*
+ * __FD_PREPARE_INIT - Helper to initialize fd_prepare class.
+ * @_fd_flags: flags for get_unused_fd_flags()
+ * @_file_owned: expression that returns struct file *
+ *
+ * Returns a struct fd_prepare with fd, file, and err set.
+ * If fd allocation fails, fd will be negative and err will be set. If
+ * fd succeeds but file_init_expr fails, file will be ERR_PTR and err
+ * will be set. The err field is the single source of truth for error
+ * checking.
+ */
+#define __FD_PREPARE_INIT(_fd_flags, _file_owned) \
+ ({ \
+ struct fd_prepare fdf = { \
+ .__fd = get_unused_fd_flags((_fd_flags)), \
+ }; \
+ if (likely(fdf.__fd >= 0)) \
+ fdf.__file = (_file_owned); \
+ fdf.err = ACQUIRE_ERR(fd_prepare, &fdf); \
+ fdf; \
+ })
+
+/*
+ * FD_PREPARE - Macro to declare and initialize an fd_prepare variable.
+ *
+ * Declares and initializes an fd_prepare variable with automatic
+ * cleanup. No separate scope required - cleanup happens when variable
+ * goes out of scope.
+ *
+ * @_fdf: name of struct fd_prepare variable to define
+ * @_fd_flags: flags for get_unused_fd_flags()
+ * @_file_owned: struct file to take ownership of (can be expression)
+ */
+#define FD_PREPARE(_fdf, _fd_flags, _file_owned) \
+ CLASS_INIT(fd_prepare, _fdf, __FD_PREPARE_INIT(_fd_flags, _file_owned))
+
+/*
+ * fd_publish - Publish prepared fd and file to the fd table.
+ * @_fdf: struct fd_prepare variable
+ */
+#define fd_publish(_fdf) \
+ ({ \
+ struct fd_prepare *fdp = &(_fdf); \
+ VFS_WARN_ON_ONCE(fdp->err); \
+ VFS_WARN_ON_ONCE(fdp->__fd < 0); \
+ VFS_WARN_ON_ONCE(IS_ERR_OR_NULL(fdp->__file)); \
+ fd_install(fdp->__fd, fdp->__file); \
+ fdp->__fd; \
+ })
+
+/* Do not use directly. */
+#define __FD_ADD(_fdf, _fd_flags, _file_owned) \
+ ({ \
+ FD_PREPARE(_fdf, _fd_flags, _file_owned); \
+ s32 ret = _fdf.err; \
+ if (likely(!ret)) \
+ ret = fd_publish(_fdf); \
+ ret; \
+ })
+
+/*
+ * FD_ADD - Allocate and install an fd and file in one step.
+ * @_fd_flags: flags for get_unused_fd_flags()
+ * @_file_owned: struct file to take ownership of
+ *
+ * Returns the allocated fd number, or negative error code on failure.
+ */
+#define FD_ADD(_fd_flags, _file_owned) \
+ __FD_ADD(__UNIQUE_ID(fd_prepare), _fd_flags, _file_owned)
+
#endif /* __LINUX_FILE_H */
diff --git a/include/linux/file_ref.h b/include/linux/file_ref.h
new file mode 100644
index 000000000000..31551e4cb8f3
--- /dev/null
+++ b/include/linux/file_ref.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_FILE_REF_H
+#define _LINUX_FILE_REF_H
+
+#include <linux/atomic.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
+
+/*
+ * file_ref is a reference count implementation specifically for use by
+ * files. It takes inspiration from rcuref but differs in key aspects
+ * such as support for SLAB_TYPESAFE_BY_RCU type caches.
+ *
+ * FILE_REF_ONEREF FILE_REF_MAXREF
+ * 0x0000000000000000UL 0x7FFFFFFFFFFFFFFFUL
+ * <-------------------valid ------------------->
+ *
+ * FILE_REF_SATURATED
+ * 0x8000000000000000UL 0xA000000000000000UL 0xBFFFFFFFFFFFFFFFUL
+ * <-----------------------saturation zone---------------------->
+ *
+ * FILE_REF_RELEASED FILE_REF_DEAD
+ * 0xC000000000000000UL 0xE000000000000000UL
+ * <-------------------dead zone------------------->
+ *
+ * FILE_REF_NOREF
+ * 0xFFFFFFFFFFFFFFFFUL
+ */
+
+#ifdef CONFIG_64BIT
+#define FILE_REF_ONEREF 0x0000000000000000UL
+#define FILE_REF_MAXREF 0x7FFFFFFFFFFFFFFFUL
+#define FILE_REF_SATURATED 0xA000000000000000UL
+#define FILE_REF_RELEASED 0xC000000000000000UL
+#define FILE_REF_DEAD 0xE000000000000000UL
+#define FILE_REF_NOREF 0xFFFFFFFFFFFFFFFFUL
+#else
+#define FILE_REF_ONEREF 0x00000000U
+#define FILE_REF_MAXREF 0x7FFFFFFFU
+#define FILE_REF_SATURATED 0xA0000000U
+#define FILE_REF_RELEASED 0xC0000000U
+#define FILE_REF_DEAD 0xE0000000U
+#define FILE_REF_NOREF 0xFFFFFFFFU
+#endif
+
+typedef struct {
+#ifdef CONFIG_64BIT
+ atomic64_t refcnt;
+#else
+ atomic_t refcnt;
+#endif
+} file_ref_t;
+
+/**
+ * file_ref_init - Initialize a file reference count
+ * @ref: Pointer to the reference count
+ * @cnt: The initial reference count typically '1'
+ */
+static inline void file_ref_init(file_ref_t *ref, unsigned long cnt)
+{
+ atomic_long_set(&ref->refcnt, cnt - 1);
+}
+
+bool __file_ref_put(file_ref_t *ref, unsigned long cnt);
+
+/**
+ * file_ref_get - Acquire one reference on a file
+ * @ref: Pointer to the reference count
+ *
+ * Similar to atomic_inc_not_zero() but saturates at FILE_REF_MAXREF.
+ *
+ * Provides full memory ordering.
+ *
+ * Return: False if the attempt to acquire a reference failed. This happens
+ * when the last reference has been put already. True if a reference
+ * was successfully acquired
+ */
+static __always_inline __must_check bool file_ref_get(file_ref_t *ref)
+{
+ /*
+ * Unconditionally increase the reference count with full
+ * ordering. The saturation and dead zones provide enough
+ * tolerance for this.
+ *
+ * If this indicates negative the file in question the fail can
+ * be freed and immediately reused due to SLAB_TYPSAFE_BY_RCU.
+ * Hence, unconditionally altering the file reference count to
+ * e.g., reset the file reference count back to the middle of
+ * the deadzone risk end up marking someone else's file as dead
+ * behind their back.
+ *
+ * It would be possible to do a careful:
+ *
+ * cnt = atomic_long_inc_return();
+ * if (likely(cnt >= 0))
+ * return true;
+ *
+ * and then something like:
+ *
+ * if (cnt >= FILE_REF_RELEASE)
+ * atomic_long_try_cmpxchg(&ref->refcnt, &cnt, FILE_REF_DEAD),
+ *
+ * to set the value back to the middle of the deadzone. But it's
+ * practically impossible to go from FILE_REF_DEAD to
+ * FILE_REF_ONEREF. It would need 2305843009213693952/2^61
+ * file_ref_get()s to resurrect such a dead file.
+ */
+ return !atomic_long_add_negative(1, &ref->refcnt);
+}
+
+/**
+ * file_ref_inc - Acquire one reference on a file
+ * @ref: Pointer to the reference count
+ *
+ * Acquire an additional reference on a file. Warns if the caller didn't
+ * already hold a reference.
+ */
+static __always_inline void file_ref_inc(file_ref_t *ref)
+{
+ long prior = atomic_long_fetch_inc_relaxed(&ref->refcnt);
+ WARN_ONCE(prior < 0, "file_ref_inc() on a released file reference");
+}
+
+/**
+ * file_ref_put -- Release a file reference
+ * @ref: Pointer to the reference count
+ *
+ * Provides release memory ordering, such that prior loads and stores
+ * are done before, and provides an acquire ordering on success such
+ * that free() must come after.
+ *
+ * Return: True if this was the last reference with no future references
+ * possible. This signals the caller that it can safely release
+ * the object which is protected by the reference counter.
+ * False if there are still active references or the put() raced
+ * with a concurrent get()/put() pair. Caller is not allowed to
+ * release the protected object.
+ */
+static __always_inline __must_check bool file_ref_put(file_ref_t *ref)
+{
+ long cnt;
+
+ /*
+ * While files are SLAB_TYPESAFE_BY_RCU and thus file_ref_put()
+ * calls don't risk UAFs when a file is recyclyed, it is still
+ * vulnerable to UAFs caused by freeing the whole slab page once
+ * it becomes unused. Prevent file_ref_put() from being
+ * preempted protects against this.
+ */
+ guard(preempt)();
+ /*
+ * Unconditionally decrease the reference count. The saturation
+ * and dead zones provide enough tolerance for this. If this
+ * fails then we need to handle the last reference drop and
+ * cases inside the saturation and dead zones.
+ */
+ cnt = atomic_long_dec_return(&ref->refcnt);
+ if (cnt >= 0)
+ return false;
+ return __file_ref_put(ref, cnt);
+}
+
+/**
+ * file_ref_put_close - drop a reference expecting it would transition to FILE_REF_NOREF
+ * @ref: Pointer to the reference count
+ *
+ * Semantically it is equivalent to calling file_ref_put(), but it trades lower
+ * performance in face of other CPUs also modifying the refcount for higher
+ * performance when this happens to be the last reference.
+ *
+ * For the last reference file_ref_put() issues 2 atomics. One to drop the
+ * reference and another to transition it to FILE_REF_DEAD. This routine does
+ * the work in one step, but in order to do it has to pre-read the variable which
+ * decreases scalability.
+ *
+ * Use with close() et al, stick to file_ref_put() by default.
+ */
+static __always_inline __must_check bool file_ref_put_close(file_ref_t *ref)
+{
+ long old;
+
+ old = atomic_long_read(&ref->refcnt);
+ if (likely(old == FILE_REF_ONEREF)) {
+ if (likely(atomic_long_try_cmpxchg(&ref->refcnt, &old, FILE_REF_DEAD)))
+ return true;
+ }
+ return file_ref_put(ref);
+}
+
+/**
+ * file_ref_read - Read the number of file references
+ * @ref: Pointer to the reference count
+ *
+ * Return: The number of held references (0 ... N)
+ */
+static inline unsigned long file_ref_read(file_ref_t *ref)
+{
+ unsigned long c = atomic_long_read(&ref->refcnt);
+
+ /* Return 0 if within the DEAD zone. */
+ return c >= FILE_REF_RELEASED ? 0 : c + 1;
+}
+
+/*
+ * __file_ref_read_raw - Return the value stored in ref->refcnt
+ * @ref: Pointer to the reference count
+ *
+ * Return: The raw value found in the counter
+ *
+ * A hack for file_needs_f_pos_lock(), you probably want to use
+ * file_ref_read() instead.
+ */
+static inline unsigned long __file_ref_read_raw(file_ref_t *ref)
+{
+ return atomic_long_read(&ref->refcnt);
+}
+
+#endif
diff --git a/include/linux/fileattr.h b/include/linux/fileattr.h
index 47c05a9851d0..f89dcfad3f8f 100644
--- a/include/linux/fileattr.h
+++ b/include/linux/fileattr.h
@@ -14,13 +14,33 @@
FS_XFLAG_NODUMP | FS_XFLAG_NOATIME | FS_XFLAG_DAX | \
FS_XFLAG_PROJINHERIT)
+/* Read-only inode flags */
+#define FS_XFLAG_RDONLY_MASK \
+ (FS_XFLAG_PREALLOC | FS_XFLAG_HASATTR)
+
+/* Flags to indicate valid value of fsx_ fields */
+#define FS_XFLAG_VALUES_MASK \
+ (FS_XFLAG_EXTSIZE | FS_XFLAG_COWEXTSIZE)
+
+/* Flags for directories */
+#define FS_XFLAG_DIRONLY_MASK \
+ (FS_XFLAG_RTINHERIT | FS_XFLAG_NOSYMLINKS | FS_XFLAG_EXTSZINHERIT)
+
+/* Misc settable flags */
+#define FS_XFLAG_MISC_MASK \
+ (FS_XFLAG_REALTIME | FS_XFLAG_NODEFRAG | FS_XFLAG_FILESTREAM)
+
+#define FS_XFLAGS_MASK \
+ (FS_XFLAG_COMMON | FS_XFLAG_RDONLY_MASK | FS_XFLAG_VALUES_MASK | \
+ FS_XFLAG_DIRONLY_MASK | FS_XFLAG_MISC_MASK)
+
/*
* Merged interface for miscellaneous file attributes. 'flags' originates from
* ext* and 'fsx_flags' from xfs. There's some overlap between the two, which
* is handled by the VFS helpers, so filesystems are free to implement just one
* or both of these sub-interfaces.
*/
-struct fileattr {
+struct file_kattr {
u32 flags; /* flags (FS_IOC_GETFLAGS/FS_IOC_SETFLAGS) */
/* struct fsxattr: */
u32 fsx_xflags; /* xflags field value (get/set) */
@@ -33,10 +53,10 @@ struct fileattr {
bool fsx_valid:1;
};
-int copy_fsxattr_to_user(const struct fileattr *fa, struct fsxattr __user *ufa);
+int copy_fsxattr_to_user(const struct file_kattr *fa, struct fsxattr __user *ufa);
-void fileattr_fill_xflags(struct fileattr *fa, u32 xflags);
-void fileattr_fill_flags(struct fileattr *fa, u32 flags);
+void fileattr_fill_xflags(struct file_kattr *fa, u32 xflags);
+void fileattr_fill_flags(struct file_kattr *fa, u32 flags);
/**
* fileattr_has_fsx - check for extended flags/attributes
@@ -45,15 +65,19 @@ void fileattr_fill_flags(struct fileattr *fa, u32 flags);
* Return: true if any attributes are present that are not represented in
* ->flags.
*/
-static inline bool fileattr_has_fsx(const struct fileattr *fa)
+static inline bool fileattr_has_fsx(const struct file_kattr *fa)
{
return fa->fsx_valid &&
((fa->fsx_xflags & ~FS_XFLAG_COMMON) || fa->fsx_extsize != 0 ||
fa->fsx_projid != 0 || fa->fsx_cowextsize != 0);
}
-int vfs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+int vfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
- struct fileattr *fa);
+ struct file_kattr *fa);
+int ioctl_getflags(struct file *file, unsigned int __user *argp);
+int ioctl_setflags(struct file *file, unsigned int __user *argp);
+int ioctl_fsgetxattr(struct file *file, void __user *argp);
+int ioctl_fssetxattr(struct file *file, void __user *argp);
#endif /* _LINUX_FILEATTR_H */
diff --git a/include/linux/filelock.h b/include/linux/filelock.h
index daee999d05f3..54b824c05299 100644
--- a/include/linux/filelock.h
+++ b/include/linux/filelock.h
@@ -159,6 +159,8 @@ int fcntl_setlk64(unsigned int, struct file *, unsigned int,
int fcntl_setlease(unsigned int fd, struct file *filp, int arg);
int fcntl_getlease(struct file *filp);
+int fcntl_setdeleg(unsigned int fd, struct file *filp, struct delegation *deleg);
+int fcntl_getdeleg(struct file *filp, struct delegation *deleg);
static inline bool lock_is_unlock(struct file_lock *fl)
{
@@ -175,9 +177,19 @@ static inline bool lock_is_write(struct file_lock *fl)
return fl->c.flc_type == F_WRLCK;
}
+static inline void locks_wake_up_waiter(struct file_lock_core *flc)
+{
+ wake_up(&flc->flc_wait);
+}
+
static inline void locks_wake_up(struct file_lock *fl)
{
- wake_up(&fl->c.flc_wait);
+ locks_wake_up_waiter(&fl->c);
+}
+
+static inline bool locks_can_async_lock(const struct file_operations *fops)
+{
+ return !fops->lock || fops->fop_flags & FOP_ASYNC_LOCK;
}
/* fs/locks.c */
@@ -202,7 +214,14 @@ int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
void locks_init_lease(struct file_lease *);
void locks_free_lease(struct file_lease *fl);
struct file_lease *locks_alloc_lease(void);
-int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
+
+#define LEASE_BREAK_LEASE BIT(0) // break leases and delegations
+#define LEASE_BREAK_DELEG BIT(1) // break delegations only
+#define LEASE_BREAK_LAYOUT BIT(2) // break layouts only
+#define LEASE_BREAK_NONBLOCK BIT(3) // non-blocking break
+#define LEASE_BREAK_OPEN_RDONLY BIT(4) // readonly open event
+
+int __break_lease(struct inode *inode, unsigned int flags);
void lease_get_mtime(struct inode *, struct timespec64 *time);
int generic_setlease(struct file *, int, struct file_lease **, void **priv);
int kernel_setlease(struct file *, int, struct file_lease **, void **);
@@ -261,6 +280,16 @@ static inline int fcntl_getlease(struct file *filp)
return F_UNLCK;
}
+static inline int fcntl_setdeleg(unsigned int fd, struct file *filp, struct delegation *deleg)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_getdeleg(struct file *filp, struct delegation *deleg)
+{
+ return -EINVAL;
+}
+
static inline bool lock_is_unlock(struct file_lock *fl)
{
return false;
@@ -357,7 +386,7 @@ static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *f
return -ENOLCK;
}
-static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
+static inline int __break_lease(struct inode *inode, unsigned int flags)
{
return 0;
}
@@ -418,83 +447,128 @@ static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
}
#ifdef CONFIG_FILE_LOCKING
+static inline unsigned int openmode_to_lease_flags(unsigned int mode)
+{
+ unsigned int flags = 0;
+
+ if ((mode & O_ACCMODE) == O_RDONLY)
+ flags |= LEASE_BREAK_OPEN_RDONLY;
+ if (mode & O_NONBLOCK)
+ flags |= LEASE_BREAK_NONBLOCK;
+ return flags;
+}
+
static inline int break_lease(struct inode *inode, unsigned int mode)
{
+ struct file_lock_context *flctx;
+
/*
* Since this check is lockless, we must ensure that any refcounts
* taken are done before checking i_flctx->flc_lease. Otherwise, we
* could end up racing with tasks trying to set a new lease on this
* file.
*/
+ flctx = READ_ONCE(inode->i_flctx);
+ if (!flctx)
+ return 0;
smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
- return __break_lease(inode, mode, FL_LEASE);
+ if (!list_empty_careful(&flctx->flc_lease))
+ return __break_lease(inode, LEASE_BREAK_LEASE | openmode_to_lease_flags(mode));
return 0;
}
-static inline int break_deleg(struct inode *inode, unsigned int mode)
+static inline int break_deleg(struct inode *inode, unsigned int flags)
{
+ struct file_lock_context *flctx;
+
/*
* Since this check is lockless, we must ensure that any refcounts
* taken are done before checking i_flctx->flc_lease. Otherwise, we
* could end up racing with tasks trying to set a new lease on this
* file.
*/
+ flctx = READ_ONCE(inode->i_flctx);
+ if (!flctx)
+ return 0;
smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
- return __break_lease(inode, mode, FL_DELEG);
+ if (!list_empty_careful(&flctx->flc_lease)) {
+ flags |= LEASE_BREAK_DELEG;
+ return __break_lease(inode, flags);
+ }
return 0;
}
-static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
+struct delegated_inode {
+ struct inode *di_inode;
+};
+
+static inline bool is_delegated(struct delegated_inode *di)
+{
+ return di->di_inode;
+}
+
+static inline int try_break_deleg(struct inode *inode,
+ struct delegated_inode *di)
{
int ret;
- ret = break_deleg(inode, O_WRONLY|O_NONBLOCK);
- if (ret == -EWOULDBLOCK && delegated_inode) {
- *delegated_inode = inode;
+ ret = break_deleg(inode, LEASE_BREAK_NONBLOCK);
+ if (ret == -EWOULDBLOCK && di) {
+ di->di_inode = inode;
ihold(inode);
}
return ret;
}
-static inline int break_deleg_wait(struct inode **delegated_inode)
+static inline int break_deleg_wait(struct delegated_inode *di)
{
int ret;
- ret = break_deleg(*delegated_inode, O_WRONLY);
- iput(*delegated_inode);
- *delegated_inode = NULL;
+ ret = break_deleg(di->di_inode, 0);
+ iput(di->di_inode);
+ di->di_inode = NULL;
return ret;
}
static inline int break_layout(struct inode *inode, bool wait)
{
smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
- return __break_lease(inode,
- wait ? O_WRONLY : O_WRONLY | O_NONBLOCK,
- FL_LAYOUT);
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) {
+ unsigned int flags = LEASE_BREAK_LAYOUT;
+
+ if (!wait)
+ flags |= LEASE_BREAK_NONBLOCK;
+
+ return __break_lease(inode, flags);
+ }
return 0;
}
#else /* !CONFIG_FILE_LOCKING */
-static inline int break_lease(struct inode *inode, unsigned int mode)
+struct delegated_inode { };
+
+static inline bool is_delegated(struct delegated_inode *di)
+{
+ return false;
+}
+
+static inline int break_lease(struct inode *inode, bool wait)
{
return 0;
}
-static inline int break_deleg(struct inode *inode, unsigned int mode)
+static inline int break_deleg(struct inode *inode, unsigned int flags)
{
return 0;
}
-static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
+static inline int try_break_deleg(struct inode *inode,
+ struct delegated_inode *delegated_inode)
{
return 0;
}
-static inline int break_deleg_wait(struct inode **delegated_inode)
+static inline int break_deleg_wait(struct delegated_inode *delegated_inode)
{
BUG();
return 0;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 0f12cf01070e..fd54fed8f95f 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -78,11 +78,14 @@ struct ctl_table_header;
/* unused opcode to mark special atomic instruction */
#define BPF_PROBE_ATOMIC 0xe0
+/* unused opcode to mark special ldsx instruction. Same as BPF_NOSPEC */
+#define BPF_PROBE_MEM32SX 0xc0
+
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0
/* unused opcode to mark speculation barrier for mitigating
- * Speculative Store Bypass
+ * Spectre v1 and v4
*/
#define BPF_NOSPEC 0xc0
@@ -364,6 +367,8 @@ static inline bool insn_is_cast_user(const struct bpf_insn *insn)
* BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
* BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
* BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
+ * BPF_LOAD_ACQ dst_reg = smp_load_acquire(src_reg + off16)
+ * BPF_STORE_REL smp_store_release(dst_reg + off16, src_reg)
*/
#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
@@ -437,6 +442,16 @@ static inline bool insn_is_cast_user(const struct bpf_insn *insn)
.off = OFF, \
.imm = 0 })
+/* Unconditional jumps, gotol pc + imm32 */
+
+#define BPF_JMP32_A(IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_JA, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
/* Relative call */
#define BPF_CALL_REL(TGT) \
@@ -459,6 +474,16 @@ static inline bool insn_is_cast_user(const struct bpf_insn *insn)
.off = 0, \
.imm = BPF_CALL_IMM(FUNC) })
+/* Kfunc call */
+
+#define BPF_CALL_KFUNC(OFF, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP | BPF_CALL, \
+ .dst_reg = 0, \
+ .src_reg = BPF_PSEUDO_KFUNC_CALL, \
+ .off = OFF, \
+ .imm = IMM })
+
/* Raw code statement block */
#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
@@ -649,6 +674,11 @@ struct bpf_prog_stats {
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));
+struct bpf_timed_may_goto {
+ u64 count;
+ u64 timestamp;
+};
+
struct sk_filter {
refcount_t refcnt;
struct rcu_head rcu;
@@ -682,11 +712,13 @@ static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
duration = sched_clock() - start;
- stats = this_cpu_ptr(prog->stats);
- flags = u64_stats_update_begin_irqsave(&stats->syncp);
- u64_stats_inc(&stats->cnt);
- u64_stats_add(&stats->nsecs, duration);
- u64_stats_update_end_irqrestore(&stats->syncp, flags);
+ if (likely(prog->stats)) {
+ stats = this_cpu_ptr(prog->stats);
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->cnt);
+ u64_stats_add(&stats->nsecs, duration);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
+ }
} else {
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
}
@@ -733,21 +765,128 @@ struct bpf_nh_params {
};
};
+/* flags for bpf_redirect_info kern_flags */
+#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
+#define BPF_RI_F_RI_INIT BIT(1)
+#define BPF_RI_F_CPU_MAP_INIT BIT(2)
+#define BPF_RI_F_DEV_MAP_INIT BIT(3)
+#define BPF_RI_F_XSK_MAP_INIT BIT(4)
+
struct bpf_redirect_info {
u64 tgt_index;
void *tgt_value;
struct bpf_map *map;
u32 flags;
- u32 kern_flags;
u32 map_id;
enum bpf_map_type map_type;
struct bpf_nh_params nh;
+ u32 kern_flags;
};
-DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
+struct bpf_net_context {
+ struct bpf_redirect_info ri;
+ struct list_head cpu_map_flush_list;
+ struct list_head dev_map_flush_list;
+ struct list_head xskmap_map_flush_list;
+};
-/* flags for bpf_redirect_info kern_flags */
-#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
+static inline struct bpf_net_context *bpf_net_ctx_set(struct bpf_net_context *bpf_net_ctx)
+{
+ struct task_struct *tsk = current;
+
+ if (tsk->bpf_net_context != NULL)
+ return NULL;
+ bpf_net_ctx->ri.kern_flags = 0;
+
+ tsk->bpf_net_context = bpf_net_ctx;
+ return bpf_net_ctx;
+}
+
+static inline void bpf_net_ctx_clear(struct bpf_net_context *bpf_net_ctx)
+{
+ if (bpf_net_ctx)
+ current->bpf_net_context = NULL;
+}
+
+static inline struct bpf_net_context *bpf_net_ctx_get(void)
+{
+ return current->bpf_net_context;
+}
+
+static inline struct bpf_redirect_info *bpf_net_ctx_get_ri(void)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+
+ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_RI_INIT)) {
+ memset(&bpf_net_ctx->ri, 0, offsetof(struct bpf_net_context, ri.nh));
+ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_RI_INIT;
+ }
+
+ return &bpf_net_ctx->ri;
+}
+
+static inline struct list_head *bpf_net_ctx_get_cpu_map_flush_list(void)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+
+ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_CPU_MAP_INIT)) {
+ INIT_LIST_HEAD(&bpf_net_ctx->cpu_map_flush_list);
+ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_CPU_MAP_INIT;
+ }
+
+ return &bpf_net_ctx->cpu_map_flush_list;
+}
+
+static inline struct list_head *bpf_net_ctx_get_dev_flush_list(void)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+
+ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_DEV_MAP_INIT)) {
+ INIT_LIST_HEAD(&bpf_net_ctx->dev_map_flush_list);
+ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_DEV_MAP_INIT;
+ }
+
+ return &bpf_net_ctx->dev_map_flush_list;
+}
+
+static inline struct list_head *bpf_net_ctx_get_xskmap_flush_list(void)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+
+ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_XSK_MAP_INIT)) {
+ INIT_LIST_HEAD(&bpf_net_ctx->xskmap_map_flush_list);
+ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_XSK_MAP_INIT;
+ }
+
+ return &bpf_net_ctx->xskmap_map_flush_list;
+}
+
+static inline void bpf_net_ctx_get_all_used_flush_lists(struct list_head **lh_map,
+ struct list_head **lh_dev,
+ struct list_head **lh_xsk)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+ u32 kern_flags = bpf_net_ctx->ri.kern_flags;
+ struct list_head *lh;
+
+ *lh_map = *lh_dev = *lh_xsk = NULL;
+
+ if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
+ return;
+
+ lh = &bpf_net_ctx->dev_map_flush_list;
+ if (kern_flags & BPF_RI_F_DEV_MAP_INIT && !list_empty(lh))
+ *lh_dev = lh;
+
+ lh = &bpf_net_ctx->cpu_map_flush_list;
+ if (kern_flags & BPF_RI_F_CPU_MAP_INIT && !list_empty(lh))
+ *lh_map = lh;
+
+ lh = &bpf_net_ctx->xskmap_map_flush_list;
+ if (IS_ENABLED(CONFIG_XDP_SOCKETS) &&
+ kern_flags & BPF_RI_F_XSK_MAP_INIT && !list_empty(lh))
+ *lh_xsk = lh;
+}
/* Compute the linear packet data range [data, data_end) which
* will be accessed by various program types (cls_bpf, act_bpf,
@@ -764,6 +903,26 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb)
cb->data_end = skb->data + skb_headlen(skb);
}
+static inline int bpf_prog_run_data_pointers(
+ const struct bpf_prog *prog,
+ struct sk_buff *skb)
+{
+ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+ void *save_data_meta, *save_data_end;
+ int res;
+
+ save_data_meta = cb->data_meta;
+ save_data_end = cb->data_end;
+
+ bpf_compute_data_pointers(skb);
+ res = bpf_prog_run(prog, skb);
+
+ cb->data_meta = save_data_meta;
+ cb->data_end = save_data_end;
+
+ return res;
+}
+
/* Similar to bpf_compute_data_pointers(), except that save orginal
* data in cb->data and cb->meta_data for restore.
*/
@@ -863,12 +1022,6 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
return prog->len * sizeof(struct bpf_insn);
}
-static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
-{
- return round_up(bpf_prog_insn_size(prog) +
- sizeof(__be64) + 1, SHA1_BLOCK_SIZE);
-}
-
static inline unsigned int bpf_prog_size(unsigned int proglen)
{
return max(sizeof(struct bpf_prog),
@@ -939,10 +1092,20 @@ bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
return set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
}
-int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap,
+ enum skb_drop_reason *reason);
+
static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
{
- return sk_filter_trim_cap(sk, skb, 1);
+ enum skb_drop_reason ignore_reason;
+
+ return sk_filter_trim_cap(sk, skb, 1, &ignore_reason);
+}
+
+static inline int sk_filter_reason(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *reason)
+{
+ return sk_filter_trim_cap(sk, skb, 1, reason);
}
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
@@ -1002,9 +1165,13 @@ bool bpf_jit_supports_exceptions(void);
bool bpf_jit_supports_ptr_xchg(void);
bool bpf_jit_supports_arena(void);
bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
+bool bpf_jit_supports_private_stack(void);
+bool bpf_jit_supports_timed_may_goto(void);
u64 bpf_arch_uaddress_limit(void);
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
-bool bpf_helper_changes_pkt_data(void *func);
+u64 arch_bpf_timed_may_goto(void);
+u64 bpf_check_timed_may_goto(struct bpf_timed_may_goto *);
+bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id);
static inline bool bpf_dump_raw_ok(const struct cred *cred)
{
@@ -1018,25 +1185,23 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
-void bpf_clear_redirect_map(struct bpf_map *map);
-
static inline bool xdp_return_frame_no_direct(void)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
}
static inline void xdp_set_return_frame_no_direct(void)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
}
static inline void xdp_clear_return_frame_no_direct(void)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
}
@@ -1063,17 +1228,18 @@ static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
* This does not appear to be a real limitation for existing software.
*/
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
- struct xdp_buff *xdp, struct bpf_prog *prog);
+ struct xdp_buff *xdp, const struct bpf_prog *prog);
int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp,
- struct bpf_prog *prog);
+ const struct bpf_prog *prog);
int xdp_do_redirect_frame(struct net_device *dev,
struct xdp_buff *xdp,
struct xdp_frame *xdpf,
- struct bpf_prog *prog);
+ const struct bpf_prog *prog);
void xdp_do_flush(void);
-void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act);
+void bpf_warn_invalid_xdp_action(const struct net_device *dev,
+ const struct bpf_prog *prog, u32 act);
#ifdef CONFIG_INET
struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
@@ -1129,8 +1295,7 @@ bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image,
struct bpf_binary_header **rw_hdr,
u8 **rw_image,
bpf_jit_fill_hole_t bpf_fill_ill_insns);
-int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
- struct bpf_binary_header *ro_header,
+int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header,
struct bpf_binary_header *rw_header);
void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
struct bpf_binary_header *rw_header);
@@ -1142,13 +1307,15 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const struct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed);
+const char *bpf_jit_get_prog_name(struct bpf_prog *prog);
+
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
u32 pass, void *image)
{
- pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
+ pr_err("flen=%u proglen=%u pass=%u image=%p from=%s pid=%d\n", flen,
proglen, pass, image, current->comm, task_pid_nr(current));
if (image)
@@ -1208,18 +1375,18 @@ static inline bool bpf_jit_kallsyms_enabled(void)
return false;
}
-const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
+int __bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char *sym);
bool is_bpf_text_address(unsigned long addr);
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym);
struct bpf_prog *bpf_prog_ksym_find(unsigned long addr);
-static inline const char *
+static inline int
bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
- const char *ret = __bpf_address_lookup(addr, size, off, sym);
+ int ret = __bpf_address_lookup(addr, size, off, sym);
if (ret && modname)
*modname = NULL;
@@ -1263,11 +1430,11 @@ static inline bool bpf_jit_kallsyms_enabled(void)
return false;
}
-static inline const char *
+static inline int
__bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char *sym)
{
- return NULL;
+ return 0;
}
static inline bool is_bpf_text_address(unsigned long addr)
@@ -1286,11 +1453,11 @@ static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
return NULL;
}
-static inline const char *
+static inline int
bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
- return NULL;
+ return 0;
}
static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
@@ -1370,7 +1537,7 @@ static inline int bpf_tell_extensions(void)
struct bpf_sock_addr_kern {
struct sock *sk;
- struct sockaddr *uaddr;
+ struct sockaddr_unsized *uaddr;
/* Temporary "register" to make indirect stores to nested structures
* defined above. We need three registers to make such a store, but
* only two (src and dst) are available at convert_ctx_access time
@@ -1392,6 +1559,7 @@ struct bpf_sock_ops_kern {
void *skb_data_end;
u8 op;
u8 is_fullsock;
+ u8 is_locked_tcp_sock;
u8 remaining_opt_len;
u64 temp; /* temp and everything after is not
* initialized to 0 before calling
@@ -1406,7 +1574,7 @@ struct bpf_sock_ops_kern {
struct bpf_sysctl_kern {
struct ctl_table_header *head;
- struct ctl_table *table;
+ const struct ctl_table *table;
void *cur_val;
size_t cur_len;
void *new_val;
@@ -1512,7 +1680,7 @@ extern struct static_key_false bpf_sk_lookup_enabled;
_all_pass || _selected_sk ? SK_PASS : SK_DROP; \
})
-static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
+static inline bool bpf_sk_lookup_run_v4(const struct net *net, int protocol,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 dport,
const int ifindex, struct sock **psk)
@@ -1549,7 +1717,7 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
}
#if IS_ENABLED(CONFIG_IPV6)
-static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
+static inline bool bpf_sk_lookup_run_v6(const struct net *net, int protocol,
const struct in6_addr *saddr,
const __be16 sport,
const struct in6_addr *daddr,
@@ -1592,7 +1760,7 @@ static __always_inline long __bpf_xdp_redirect_map(struct bpf_map *map, u64 inde
u64 flags, const u64 flag_mask,
void *lookup_elem(struct bpf_map *map, u32 key))
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
/* Lower bits of the flags are used as return code on lookup failure */
@@ -1635,6 +1803,9 @@ int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len);
void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len);
void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off,
void *buf, unsigned long len, bool flush);
+int __bpf_skb_meta_store_bytes(struct sk_buff *skb, u32 offset,
+ const void *from, u32 len, u64 flags);
+void *bpf_skb_meta_pointer(struct sk_buff *skb, u32 offset);
#else /* CONFIG_NET */
static inline int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset,
void *to, u32 len)
@@ -1669,6 +1840,18 @@ static inline void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, voi
unsigned long len, bool flush)
{
}
+
+static inline int __bpf_skb_meta_store_bytes(struct sk_buff *skb, u32 offset,
+ const void *from, u32 len,
+ u64 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void *bpf_skb_meta_pointer(struct sk_buff *skb, u32 offset)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
#endif /* CONFIG_NET */
#endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/find.h b/include/linux/find.h
index 5dfca4225fef..9d720ad92bc1 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -29,6 +29,8 @@ unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1, const unsign
unsigned long n);
extern unsigned long _find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size);
+unsigned long _find_first_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size);
unsigned long _find_first_and_and_bit(const unsigned long *addr1, const unsigned long *addr2,
const unsigned long *addr3, unsigned long size);
extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
@@ -42,6 +44,8 @@ unsigned long _find_next_bit_le(const unsigned long *addr, unsigned
long size, unsigned long offset);
#endif
+unsigned long find_random_bit(const unsigned long *addr, unsigned long size);
+
#ifndef find_next_bit
/**
* find_next_bit - find the next set bit in a memory region
@@ -52,7 +56,7 @@ unsigned long _find_next_bit_le(const unsigned long *addr, unsigned
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
@@ -81,7 +85,7 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_and_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
@@ -112,7 +116,7 @@ unsigned long find_next_and_bit(const unsigned long *addr1,
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_andnot_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
@@ -142,7 +146,7 @@ unsigned long find_next_andnot_bit(const unsigned long *addr1,
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_or_bit(const unsigned long *addr1,
const unsigned long *addr2, unsigned long size,
unsigned long offset)
@@ -171,7 +175,7 @@ unsigned long find_next_or_bit(const unsigned long *addr1,
* Returns the bit number of the next zero bit
* If no bits are zero, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
@@ -198,7 +202,7 @@ unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
* Returns the bit number of the first set bit.
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
@@ -224,7 +228,7 @@ unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
* Returns the bit number of the N'th set bit.
* If no such, returns >= @size.
*/
-static inline
+static __always_inline
unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
{
if (n >= size)
@@ -249,7 +253,7 @@ unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsign
* Returns the bit number of the N'th set bit.
* If no such, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size, unsigned long n)
{
@@ -266,33 +270,6 @@ unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *
}
/**
- * find_nth_andnot_bit - find N'th set bit in 2 memory regions,
- * flipping bits in 2nd region
- * @addr1: The 1st address to start the search at
- * @addr2: The 2nd address to start the search at
- * @size: The maximum number of bits to search
- * @n: The number of set bit, which position is needed, counting from 0
- *
- * Returns the bit number of the N'th set bit.
- * If no such, returns @size.
- */
-static inline
-unsigned long find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
- unsigned long size, unsigned long n)
-{
- if (n >= size)
- return size;
-
- if (small_const_nbits(size)) {
- unsigned long val = *addr1 & (~*addr2) & GENMASK(size - 1, 0);
-
- return val ? fns(val, n) : size;
- }
-
- return __find_nth_andnot_bit(addr1, addr2, size, n);
-}
-
-/**
* find_nth_and_andnot_bit - find N'th set bit in 2 memory regions,
* excluding those set in 3rd region
* @addr1: The 1st address to start the search at
@@ -332,7 +309,7 @@ unsigned long find_nth_and_andnot_bit(const unsigned long *addr1,
* Returns the bit number for the next set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size)
@@ -348,6 +325,29 @@ unsigned long find_first_and_bit(const unsigned long *addr1,
#endif
/**
+ * find_first_andnot_bit - find the first bit set in 1st memory region and unset in 2nd
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the first set bit
+ * If no bits are set, returns >= @size.
+ */
+static __always_inline
+unsigned long find_first_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & (~*addr2) & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_andnot_bit(addr1, addr2, size);
+}
+
+/**
* find_first_and_and_bit - find the first set bit in 3 memory regions
* @addr1: The first address to base the search on
* @addr2: The second address to base the search on
@@ -357,7 +357,7 @@ unsigned long find_first_and_bit(const unsigned long *addr1,
* Returns the bit number for the first set bit
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_first_and_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
const unsigned long *addr3,
@@ -381,7 +381,7 @@ unsigned long find_first_and_and_bit(const unsigned long *addr1,
* Returns the bit number of the first cleared bit.
* If no bits are zero, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
@@ -402,7 +402,7 @@ unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
*
* Returns the bit number of the last set bit, or size.
*/
-static inline
+static __always_inline
unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
{
if (small_const_nbits(size)) {
@@ -425,7 +425,7 @@ unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
* Returns the bit number for the next set bit, or first set bit up to @offset
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_and_bit_wrap(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size, unsigned long offset)
@@ -448,7 +448,7 @@ unsigned long find_next_and_bit_wrap(const unsigned long *addr1,
* Returns the bit number for the next set bit, or first set bit up to @offset
* If no bits are set, returns @size.
*/
-static inline
+static __always_inline
unsigned long find_next_bit_wrap(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
@@ -465,7 +465,7 @@ unsigned long find_next_bit_wrap(const unsigned long *addr,
* Helper for for_each_set_bit_wrap(). Make sure you're doing right thing
* before using it alone.
*/
-static inline
+static __always_inline
unsigned long __for_each_wrap(const unsigned long *bitmap, unsigned long size,
unsigned long start, unsigned long n)
{
@@ -506,20 +506,20 @@ extern unsigned long find_next_clump8(unsigned long *clump,
#if defined(__LITTLE_ENDIAN)
-static inline unsigned long find_next_zero_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
+static __always_inline
+unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset)
{
return find_next_zero_bit(addr, size, offset);
}
-static inline unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
+static __always_inline
+unsigned long find_next_bit_le(const void *addr, unsigned long size, unsigned long offset)
{
return find_next_bit(addr, size, offset);
}
-static inline unsigned long find_first_zero_bit_le(const void *addr,
- unsigned long size)
+static __always_inline
+unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
{
return find_first_zero_bit(addr, size);
}
@@ -527,7 +527,7 @@ static inline unsigned long find_first_zero_bit_le(const void *addr,
#elif defined(__BIG_ENDIAN)
#ifndef find_next_zero_bit_le
-static inline
+static __always_inline
unsigned long find_next_zero_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
@@ -546,7 +546,7 @@ unsigned long find_next_zero_bit_le(const void *addr, unsigned
#endif
#ifndef find_first_zero_bit_le
-static inline
+static __always_inline
unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
{
if (small_const_nbits(size)) {
@@ -560,7 +560,7 @@ unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
#endif
#ifndef find_next_bit_le
-static inline
+static __always_inline
unsigned long find_next_bit_le(const void *addr, unsigned
long size, unsigned long offset)
{
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 00abe0e5d602..6143b7d28eac 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -88,23 +88,30 @@ struct fw_card {
int node_id;
int generation;
- int current_tlabel;
- u64 tlabel_mask;
- struct list_head transaction_list;
u64 reset_jiffies;
- u32 split_timeout_hi;
- u32 split_timeout_lo;
- unsigned int split_timeout_cycles;
- unsigned int split_timeout_jiffies;
+ struct {
+ int current_tlabel;
+ u64 tlabel_mask;
+ struct list_head list;
+ spinlock_t lock;
+ } transactions;
+
+ struct {
+ u32 hi;
+ u32 lo;
+ unsigned int cycles;
+ unsigned int jiffies;
+ spinlock_t lock;
+ } split_timeout;
unsigned long long guid;
unsigned max_receive;
int link_speed;
int config_rom_generation;
- spinlock_t lock; /* Take this lock when handling the lists in
- * this struct. */
+ spinlock_t lock;
+
struct fw_node *local_node;
struct fw_node *root_node;
struct fw_node *irm_node;
@@ -115,8 +122,6 @@ struct fw_card {
int index;
struct list_head link;
- struct list_head phy_receiver_list;
-
struct delayed_work br_work; /* bus reset job */
bool br_short;
@@ -131,9 +136,16 @@ struct fw_card {
bool broadcast_channel_allocated;
u32 broadcast_channel;
- __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
+
+ struct {
+ __be32 buffer[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
+ spinlock_t lock;
+ } topology_map;
__be32 maint_utility_register;
+
+ struct workqueue_struct *isoc_wq;
+ struct workqueue_struct *async_wq;
};
static inline struct fw_card *fw_card_get(struct fw_card *card)
@@ -158,6 +170,20 @@ struct fw_attribute_group {
struct attribute *attrs[13];
};
+enum fw_device_quirk {
+ // See afa1282a35d3 ("firewire: core: check for 1394a compliant IRM, fix inaccessibility of Sony camcorder").
+ FW_DEVICE_QUIRK_IRM_IS_1394_1995_ONLY = BIT(0),
+
+ // See a509e43ff338 ("firewire: core: fix unstable I/O with Canon camcorder").
+ FW_DEVICE_QUIRK_IRM_IGNORES_BUS_MANAGER = BIT(1),
+
+ // MOTU Audio Express transfers acknowledge packet with 0x10 for pending state.
+ FW_DEVICE_QUIRK_ACK_PACKET_WITH_INVALID_PENDING_CODE = BIT(2),
+
+ // TASCAM FW-1082/FW-1804/FW-1884 often freezes when receiving S400 packets.
+ FW_DEVICE_QUIRK_UNSTABLE_AT_S400 = BIT(3),
+};
+
enum fw_device_state {
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING,
@@ -191,6 +217,9 @@ struct fw_device {
struct fw_card *card;
struct device device;
+ // A set of enum fw_device_quirk.
+ int quirks;
+
struct mutex client_list_mutex;
struct list_head client_list;
@@ -305,8 +334,7 @@ struct fw_packet {
* For successful transmission, the status code is the ack received
* from the destination. Otherwise it is one of the juju-specific
* rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK.
- * The callback can be called from tasklet context and thus
- * must never block.
+ * The callback can be called from workqueue and thus must never block.
*/
fw_packet_callback_t callback;
int ack;
@@ -339,7 +367,11 @@ struct fw_address_handler {
u64 length;
fw_address_callback_t address_callback;
void *callback_data;
+
+ // Only for core functions.
struct list_head link;
+ struct kref kref;
+ struct completion done;
};
struct fw_address_region {
@@ -379,6 +411,10 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
*
* A variation of __fw_send_request() to generate callback for response subaction without time
* stamp.
+ *
+ * The callback is invoked in the workqueue context in most cases. However, if an error is detected
+ * before queueing or the destination address refers to the local node, it is invoked in the
+ * current context instead.
*/
static inline void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
int destination_id, int generation, int speed,
@@ -408,6 +444,10 @@ static inline void fw_send_request(struct fw_card *card, struct fw_transaction *
* @callback_data: data to be passed to the transaction completion callback
*
* A variation of __fw_send_request() to generate callback for response subaction with time stamp.
+ *
+ * The callback is invoked in the workqueue context in most cases. However, if an error is detected
+ * before queueing or the destination address refers to the local node, it is invoked in the current
+ * context instead.
*/
static inline void fw_send_request_with_tstamp(struct fw_card *card, struct fw_transaction *t,
int tcode, int destination_id, int generation, int speed, unsigned long long offset,
@@ -462,9 +502,8 @@ struct fw_iso_packet {
/* rx: Sync bit, wait for matching sy */
u32 tag:2; /* tx: Tag in packet header */
u32 sy:4; /* tx: Sy in packet header */
- u32 header_length:8; /* Length of immediate header */
- /* tx: Top of 1394 isoch. data_block */
- u32 header[] __counted_by(header_length);
+ u32 header_length:8; /* Size of immediate header */
+ u32 header[]; /* tx: Top of 1394 isoch. data_block */
};
#define FW_ISO_CONTEXT_TRANSMIT 0
@@ -510,6 +549,7 @@ union fw_iso_callback {
struct fw_iso_context {
struct fw_card *card;
+ struct work_struct work;
int type;
int channel;
int speed;
@@ -529,6 +569,25 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
unsigned long payload);
void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
+
+/**
+ * fw_iso_context_schedule_flush_completions() - schedule work item to process isochronous context.
+ * @ctx: the isochronous context
+ *
+ * Schedule a work item on workqueue to process the isochronous context. The registered callback
+ * function is called by the worker when a queued packet buffer with the interrupt flag is
+ * completed, either after transmission in the IT context or after being filled in the IR context.
+ * The callback function is also called when the header buffer in the context becomes full, If it
+ * is required to process the context in the current context, fw_iso_context_flush_completions() is
+ * available instead.
+ *
+ * Context: Any context.
+ */
+static inline void fw_iso_context_schedule_flush_completions(struct fw_iso_context *ctx)
+{
+ queue_work(ctx->card->isoc_wq, &ctx->work);
+}
+
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags);
int fw_iso_context_stop(struct fw_iso_context *ctx);
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index f026f8926d79..aae1b85ffc10 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -98,6 +98,10 @@ static inline bool firmware_request_builtin(struct firmware *fw,
#if IS_REACHABLE(CONFIG_FW_LOADER)
int request_firmware(const struct firmware **fw, const char *name,
struct device *device);
+int firmware_request_nowait_nowarn(
+ struct module *module, const char *name,
+ struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context));
int firmware_request_nowarn(const struct firmware **fw, const char *name,
struct device *device);
int firmware_request_platform(const struct firmware **fw, const char *name,
@@ -123,6 +127,14 @@ static inline int request_firmware(const struct firmware **fw,
return -EINVAL;
}
+static inline int firmware_request_nowait_nowarn(
+ struct module *module, const char *name,
+ struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context))
+{
+ return -EINVAL;
+}
+
static inline int firmware_request_nowarn(const struct firmware **fw,
const char *name,
struct device *device)
diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h
index 82687e07a7c2..0ec1cdc5585d 100644
--- a/include/linux/firmware/cirrus/cs_dsp.h
+++ b/include/linux/firmware/cirrus/cs_dsp.h
@@ -42,6 +42,16 @@
#define CS_DSP_ACKED_CTL_MIN_VALUE 0
#define CS_DSP_ACKED_CTL_MAX_VALUE 0xFFFFFF
+/*
+ * Write sequence operation codes
+ */
+#define CS_DSP_WSEQ_FULL 0x00
+#define CS_DSP_WSEQ_ADDR8 0x02
+#define CS_DSP_WSEQ_L16 0x04
+#define CS_DSP_WSEQ_H16 0x05
+#define CS_DSP_WSEQ_UNLOCK 0xFD
+#define CS_DSP_WSEQ_END 0xFF
+
/**
* struct cs_dsp_region - Describes a logical memory region in DSP address space
* @type: Memory region type
@@ -54,14 +64,12 @@ struct cs_dsp_region {
/**
* struct cs_dsp_alg_region - Describes a logical algorithm region in DSP address space
- * @list: List node for internal use
* @alg: Algorithm id
* @ver: Expected algorithm version
* @type: Memory region type
* @base: Address of region
*/
struct cs_dsp_alg_region {
- struct list_head list;
unsigned int alg;
unsigned int ver;
int type;
@@ -94,7 +102,7 @@ struct cs_dsp_coeff_ctl {
const char *subname;
unsigned int subname_len;
unsigned int offset;
- size_t len;
+ unsigned int len;
unsigned int type;
unsigned int flags;
unsigned int set:1;
@@ -167,7 +175,7 @@ struct cs_dsp {
const struct cs_dsp_region *mem;
int num_mems;
- int fw_ver;
+ int wmfw_ver;
bool booted;
bool running;
@@ -180,8 +188,8 @@ struct cs_dsp {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_root;
- char *wmfw_file_name;
- char *bin_file_name;
+ const char *wmfw_file_name;
+ const char *bin_file_name;
#endif
};
@@ -213,13 +221,13 @@ int cs_dsp_adsp2_init(struct cs_dsp *dsp);
int cs_dsp_halo_init(struct cs_dsp *dsp);
int cs_dsp_adsp1_power_up(struct cs_dsp *dsp,
- const struct firmware *wmfw_firmware, char *wmfw_filename,
- const struct firmware *coeff_firmware, char *coeff_filename,
+ const struct firmware *wmfw_firmware, const char *wmfw_filename,
+ const struct firmware *coeff_firmware, const char *coeff_filename,
const char *fw_name);
void cs_dsp_adsp1_power_down(struct cs_dsp *dsp);
int cs_dsp_power_up(struct cs_dsp *dsp,
- const struct firmware *wmfw_firmware, char *wmfw_filename,
- const struct firmware *coeff_firmware, char *coeff_filename,
+ const struct firmware *wmfw_firmware, const char *wmfw_filename,
+ const struct firmware *coeff_firmware, const char *coeff_filename,
const char *fw_name);
void cs_dsp_power_down(struct cs_dsp *dsp);
int cs_dsp_run(struct cs_dsp *dsp);
@@ -259,6 +267,23 @@ struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
const char *cs_dsp_mem_region_name(unsigned int type);
/**
+ * struct cs_dsp_wseq - Describes a write sequence
+ * @ctl: Write sequence cs_dsp control
+ * @ops: Operations contained within
+ */
+struct cs_dsp_wseq {
+ struct cs_dsp_coeff_ctl *ctl;
+ struct list_head ops;
+};
+
+int cs_dsp_wseq_init(struct cs_dsp *dsp, struct cs_dsp_wseq *wseqs, unsigned int num_wseqs);
+int cs_dsp_wseq_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq, u32 addr, u32 data,
+ u8 op_code, bool update);
+int cs_dsp_wseq_multi_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq,
+ const struct reg_sequence *reg_seq, int num_regs,
+ u8 op_code, bool update);
+
+/**
* struct cs_dsp_chunk - Describes a buffer holding data formatted for the DSP
* @data: Pointer to underlying buffer memory
* @max: Pointer to end of the buffer memory
diff --git a/include/linux/firmware/cirrus/cs_dsp_test_utils.h b/include/linux/firmware/cirrus/cs_dsp_test_utils.h
new file mode 100644
index 000000000000..1f97764fdfd7
--- /dev/null
+++ b/include/linux/firmware/cirrus/cs_dsp_test_utils.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Support utilities for cs_dsp testing.
+ *
+ * Copyright (C) 2024 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#include <linux/regmap.h>
+#include <linux/firmware/cirrus/wmfw.h>
+
+struct kunit;
+struct cs_dsp_test;
+struct cs_dsp_test_local;
+
+/**
+ * struct cs_dsp_test - base class for test utilities
+ *
+ * @test: Pointer to struct kunit instance.
+ * @dsp: Pointer to struct cs_dsp instance.
+ * @local: Private data for each test suite.
+ */
+struct cs_dsp_test {
+ struct kunit *test;
+ struct cs_dsp *dsp;
+
+ struct cs_dsp_test_local *local;
+
+ /* private: Following members are private */
+ bool saw_bus_write;
+};
+
+/**
+ * struct cs_dsp_mock_alg_def - Info for creating a mock algorithm entry.
+ *
+ * @id: Algorithm ID.
+ * @ver: Algorithm version.
+ * @xm_base_words: XM base address in DSP words.
+ * @xm_size_words: XM size in DSP words.
+ * @ym_base_words: YM base address in DSP words.
+ * @ym_size_words: YM size in DSP words.
+ * @zm_base_words: ZM base address in DSP words.
+ * @zm_size_words: ZM size in DSP words.
+ */
+struct cs_dsp_mock_alg_def {
+ unsigned int id;
+ unsigned int ver;
+ unsigned int xm_base_words;
+ unsigned int xm_size_words;
+ unsigned int ym_base_words;
+ unsigned int ym_size_words;
+ unsigned int zm_base_words;
+ unsigned int zm_size_words;
+};
+
+struct cs_dsp_mock_coeff_def {
+ const char *shortname;
+ const char *fullname;
+ const char *description;
+ u16 type;
+ u16 flags;
+ u16 mem_type;
+ unsigned int offset_dsp_words;
+ unsigned int length_bytes;
+};
+
+/**
+ * struct cs_dsp_mock_xm_header - XM header builder
+ *
+ * @test_priv: Pointer to the struct cs_dsp_test.
+ * @blob_data: Pointer to the created blob data.
+ * @blob_size_bytes: Size of the data at blob_data.
+ */
+struct cs_dsp_mock_xm_header {
+ struct cs_dsp_test *test_priv;
+ void *blob_data;
+ size_t blob_size_bytes;
+};
+
+struct cs_dsp_mock_wmfw_builder;
+struct cs_dsp_mock_bin_builder;
+
+extern const unsigned int cs_dsp_mock_adsp2_32bit_sysbase;
+extern const unsigned int cs_dsp_mock_adsp2_16bit_sysbase;
+extern const unsigned int cs_dsp_mock_halo_core_base;
+extern const unsigned int cs_dsp_mock_halo_sysinfo_base;
+
+extern const struct cs_dsp_region cs_dsp_mock_halo_dsp1_regions[];
+extern const unsigned int cs_dsp_mock_halo_dsp1_region_sizes[];
+extern const struct cs_dsp_region cs_dsp_mock_adsp2_32bit_dsp1_regions[];
+extern const unsigned int cs_dsp_mock_adsp2_32bit_dsp1_region_sizes[];
+extern const struct cs_dsp_region cs_dsp_mock_adsp2_16bit_dsp1_regions[];
+extern const unsigned int cs_dsp_mock_adsp2_16bit_dsp1_region_sizes[];
+int cs_dsp_mock_count_regions(const unsigned int *region_sizes);
+unsigned int cs_dsp_mock_size_of_region(const struct cs_dsp *dsp, int mem_type);
+unsigned int cs_dsp_mock_base_addr_for_mem(struct cs_dsp_test *priv, int mem_type);
+unsigned int cs_dsp_mock_reg_addr_inc_per_unpacked_word(struct cs_dsp_test *priv);
+unsigned int cs_dsp_mock_reg_block_length_bytes(struct cs_dsp_test *priv, int mem_type);
+unsigned int cs_dsp_mock_reg_block_length_registers(struct cs_dsp_test *priv, int mem_type);
+unsigned int cs_dsp_mock_reg_block_length_dsp_words(struct cs_dsp_test *priv, int mem_type);
+bool cs_dsp_mock_has_zm(struct cs_dsp_test *priv);
+int cs_dsp_mock_packed_to_unpacked_mem_type(int packed_mem_type);
+unsigned int cs_dsp_mock_num_dsp_words_to_num_packed_regs(unsigned int num_dsp_words);
+unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *priv,
+ unsigned int alg_id,
+ int mem_type);
+unsigned int cs_dsp_mock_xm_header_get_fw_version(struct cs_dsp_mock_xm_header *header);
+void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv);
+int cs_dsp_mock_xm_header_write_to_regmap(struct cs_dsp_mock_xm_header *header);
+struct cs_dsp_mock_xm_header *cs_dsp_create_mock_xm_header(struct cs_dsp_test *priv,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs);
+
+int cs_dsp_mock_regmap_init(struct cs_dsp_test *priv);
+void cs_dsp_mock_regmap_drop_range(struct cs_dsp_test *priv,
+ unsigned int first_reg, unsigned int last_reg);
+void cs_dsp_mock_regmap_drop_regs(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_regs);
+void cs_dsp_mock_regmap_drop_bytes(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_bytes);
+void cs_dsp_mock_regmap_drop_system_regs(struct cs_dsp_test *priv);
+bool cs_dsp_mock_regmap_is_dirty(struct cs_dsp_test *priv, bool drop_system_regs);
+
+struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv,
+ int format_version,
+ unsigned int fw_version);
+void cs_dsp_mock_bin_add_raw_block(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_bin_add_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info);
+void cs_dsp_mock_bin_add_name(struct cs_dsp_mock_bin_builder *builder,
+ const char *name);
+void cs_dsp_mock_bin_add_patch(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int mem_region, unsigned int reg_addr_offset,
+ const void *payload_data, size_t payload_len_bytes);
+struct firmware *cs_dsp_mock_bin_get_firmware(struct cs_dsp_mock_bin_builder *builder);
+
+struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv,
+ int format_version);
+void cs_dsp_mock_wmfw_add_raw_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int mem_region, unsigned int mem_offset_dsp_words,
+ const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_wmfw_add_info(struct cs_dsp_mock_wmfw_builder *builder,
+ const char *info);
+void cs_dsp_mock_wmfw_add_data_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int mem_region, unsigned int mem_offset_dsp_words,
+ const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_wmfw_start_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder,
+ unsigned int alg_id,
+ const char *name,
+ const char *description);
+void cs_dsp_mock_wmfw_add_coeff_desc(struct cs_dsp_mock_wmfw_builder *builder,
+ const struct cs_dsp_mock_coeff_def *def);
+void cs_dsp_mock_wmfw_end_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder);
+struct firmware *cs_dsp_mock_wmfw_get_firmware(struct cs_dsp_mock_wmfw_builder *builder);
+int cs_dsp_mock_wmfw_format_version(struct cs_dsp_mock_wmfw_builder *builder);
diff --git a/include/linux/firmware/imx/sm.h b/include/linux/firmware/imx/sm.h
new file mode 100644
index 000000000000..a33b45027356
--- /dev/null
+++ b/include/linux/firmware/imx/sm.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef _SCMI_IMX_H
+#define _SCMI_IMX_H
+
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/scmi_imx_protocol.h>
+#include <linux/types.h>
+
+#define SCMI_IMX95_CTRL_PDM_CLK_SEL 0 /* AON PDM clock sel */
+#define SCMI_IMX95_CTRL_MQS1_SETTINGS 1 /* AON MQS settings */
+#define SCMI_IMX95_CTRL_SAI1_MCLK 2 /* AON SAI1 MCLK */
+#define SCMI_IMX95_CTRL_SAI3_MCLK 3 /* WAKE SAI3 MCLK */
+#define SCMI_IMX95_CTRL_SAI4_MCLK 4 /* WAKE SAI4 MCLK */
+#define SCMI_IMX95_CTRL_SAI5_MCLK 5 /* WAKE SAI5 MCLK */
+
+#define SCMI_IMX94_CTRL_PDM_CLK_SEL 0U /*!< AON PDM clock sel */
+#define SCMI_IMX94_CTRL_MQS1_SETTINGS 1U /*!< AON MQS settings */
+#define SCMI_IMX94_CTRL_MQS2_SETTINGS 2U /*!< WAKE MQS settings */
+#define SCMI_IMX94_CTRL_SAI1_MCLK 3U /*!< AON SAI1 MCLK */
+#define SCMI_IMX94_CTRL_SAI2_MCLK 4U /*!< WAKE SAI2 MCLK */
+#define SCMI_IMX94_CTRL_SAI3_MCLK 5U /*!< WAKE SAI3 MCLK */
+#define SCMI_IMX94_CTRL_SAI4_MCLK 6U /*!< WAKE SAI4 MCLK */
+
+#if IS_ENABLED(CONFIG_IMX_SCMI_MISC_DRV)
+int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val);
+int scmi_imx_misc_ctrl_set(u32 id, u32 val);
+#else
+static inline int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_misc_ctrl_set(u32 id, u32 val)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IMX_SCMI_CPU_DRV)
+int scmi_imx_cpu_start(u32 cpuid, bool start);
+int scmi_imx_cpu_started(u32 cpuid, bool *started);
+int scmi_imx_cpu_reset_vector_set(u32 cpuid, u64 vector, bool start, bool boot,
+ bool resume);
+#else
+static inline int scmi_imx_cpu_start(u32 cpuid, bool start)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_cpu_started(u32 cpuid, bool *started)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_cpu_reset_vector_set(u32 cpuid, u64 vector, bool start,
+ bool boot, bool resume)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+enum scmi_imx_lmm_op {
+ SCMI_IMX_LMM_BOOT,
+ SCMI_IMX_LMM_POWER_ON,
+ SCMI_IMX_LMM_SHUTDOWN,
+};
+
+/* For shutdown pperation */
+#define SCMI_IMX_LMM_OP_FORCEFUL 0
+#define SCMI_IMX_LMM_OP_GRACEFUL BIT(0)
+
+#if IS_ENABLED(CONFIG_IMX_SCMI_LMM_DRV)
+int scmi_imx_lmm_operation(u32 lmid, enum scmi_imx_lmm_op op, u32 flags);
+int scmi_imx_lmm_info(u32 lmid, struct scmi_imx_lmm_info *info);
+int scmi_imx_lmm_reset_vector_set(u32 lmid, u32 cpuid, u32 flags, u64 vector);
+#else
+static inline int scmi_imx_lmm_operation(u32 lmid, enum scmi_imx_lmm_op op, u32 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_lmm_info(u32 lmid, struct scmi_imx_lmm_info *info)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_lmm_reset_vector_set(u32 lmid, u32 cpuid, u32 flags, u64 vector)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#endif
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
index ee80ca4bb0d0..935dba3633b5 100644
--- a/include/linux/firmware/intel/stratix10-smc.h
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2017-2018, Intel Corporation
+ * Copyright (C) 2025, Altera Corporation
*/
#ifndef __STRATIX10_SMC_H
@@ -47,6 +48,10 @@
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \
ARM_SMCCC_OWNER_SIP, (func_num))
+#define INTEL_SIP_SMC_ASYNC_VAL(func_name) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_SIP, (func_name))
+
/**
* Return values in INTEL_SIP_SMC_* call
*
@@ -620,4 +625,110 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
#define INTEL_SIP_SMC_FCS_GET_PROVISION_DATA \
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA)
+/**
+ * Request INTEL_SIP_SMC_HWMON_READTEMP
+ * Sync call to request temperature
+ *
+ * Call register usage:
+ * a0 Temperature Channel
+ * a1-a7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 Temperature Value
+ * a2-a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_HWMON_READTEMP 32
+#define INTEL_SIP_SMC_HWMON_READTEMP \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_HWMON_READTEMP)
+
+/**
+ * Request INTEL_SIP_SMC_HWMON_READVOLT
+ * Sync call to request voltage
+ *
+ * Call register usage:
+ * a0 Voltage Channel
+ * a1-a7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 Voltage Value
+ * a2-a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_HWMON_READVOLT 33
+#define INTEL_SIP_SMC_HWMON_READVOLT \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_HWMON_READVOLT)
+
+/**
+ * Request INTEL_SIP_SMC_ASYNC_POLL
+ * Async call used by service driver at EL1 to query mailbox response from SDM.
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_ASYNC_POLL
+ * a1 transaction job id
+ * a2-17 will be used to return the response data
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1-17 will contain the response values from mailbox for the previous send
+ * transaction
+ * Or
+ * a0 INTEL_SIP_SMC_STATUS_NO_RESPONSE
+ * a1-17 not used
+ */
+#define INTEL_SIP_SMC_ASYNC_FUNC_ID_POLL (0xC8)
+#define INTEL_SIP_SMC_ASYNC_POLL \
+ INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_POLL)
+
+/**
+ * Request INTEL_SIP_SMC_ASYNC_RSU_GET_SPT
+ * Async call to get RSU SPT from SDM.
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_ASYNC_RSU_GET_SPT
+ * a1 transaction job id
+ * a2-a17 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED
+ * or INTEL_SIP_SMC_STATUS_BUSY
+ * a1-a17 not used
+ */
+#define INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_SPT (0xEA)
+#define INTEL_SIP_SMC_ASYNC_RSU_GET_SPT \
+ INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_SPT)
+
+/**
+ * Request INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS
+ * Async call to get RSU error status from SDM.
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS
+ * a1 transaction job id
+ * a2-a17 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED
+ * or INTEL_SIP_SMC_STATUS_BUSY
+ * a1-a17 not used
+ */
+#define INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_ERROR_STATUS (0xEB)
+#define INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS \
+ INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_ERROR_STATUS)
+
+/**
+ * Request INTEL_SIP_SMC_ASYNC_RSU_NOTIFY
+ * Async call to send NOTIFY value to SDM.
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_ASYNC_RSU_NOTIFY
+ * a1 transaction job id
+ * a2 notify value
+ * a3-a17 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED
+ * or INTEL_SIP_SMC_STATUS_BUSY
+ * a1-a17 not used
+ */
+#define INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_NOTIFY (0xEC)
+#define INTEL_SIP_SMC_ASYNC_RSU_NOTIFY \
+ INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_NOTIFY)
#endif
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index 60ed82112680..d290060f4c73 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2017-2018, Intel Corporation
+ * Copyright (C) 2025, Altera Corporation
*/
#ifndef __STRATIX10_SVC_CLIENT_H
@@ -11,10 +12,12 @@
*
* fpga: for FPGA configuration
* rsu: for remote status update
+ * hwmon: for hardware monitoring (voltage and temperature)
*/
#define SVC_CLIENT_FPGA "fpga"
#define SVC_CLIENT_RSU "rsu"
#define SVC_CLIENT_FCS "fcs"
+#define SVC_CLIENT_HWMON "hwmon"
/*
* Status of the sent command, in bit number
@@ -70,6 +73,7 @@
#define SVC_RSU_REQUEST_TIMEOUT_MS 300
#define SVC_FCS_REQUEST_TIMEOUT_MS 2000
#define SVC_COMPLETED_TIMEOUT_MS 30000
+#define SVC_HWMON_REQUEST_TIMEOUT_MS 300
struct stratix10_svc_chan;
@@ -124,6 +128,9 @@ struct stratix10_svc_chan;
* @COMMAND_RSU_DCMF_STATUS: query firmware for the DCMF status
* return status is SVC_STATUS_OK or SVC_STATUS_ERROR
*
+ * @COMMAND_RSU_GET_SPT_TABLE: query firmware for SPT table
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
* @COMMAND_FCS_REQUEST_SERVICE: request validation of image from firmware,
* return status is SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM
*
@@ -141,6 +148,12 @@ struct stratix10_svc_chan;
*
* @COMMAND_FCS_RANDOM_NUMBER_GEN: generate a random number, return status
* is SVC_STATUS_OK, SVC_STATUS_ERROR
+ *
+ * @COMMAND_HWMON_READTEMP: query the temperature from the hardware monitor,
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_HWMON_READVOLT: query the voltage from the hardware monitor,
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
*/
enum stratix10_svc_command_code {
/* for FPGA */
@@ -158,6 +171,7 @@ enum stratix10_svc_command_code {
COMMAND_RSU_DCMF_VERSION,
COMMAND_RSU_DCMF_STATUS,
COMMAND_FIRMWARE_VERSION,
+ COMMAND_RSU_GET_SPT_TABLE,
/* for FCS */
COMMAND_FCS_REQUEST_SERVICE = 20,
COMMAND_FCS_SEND_CERTIFICATE,
@@ -171,6 +185,9 @@ enum stratix10_svc_command_code {
COMMAND_MBOX_SEND_CMD = 100,
/* Non-mailbox SMC Call */
COMMAND_SMC_SVC_VERSION = 200,
+ /* for HWMON */
+ COMMAND_HWMON_READTEMP,
+ COMMAND_HWMON_READVOLT
};
/**
@@ -284,5 +301,92 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg);
* request process.
*/
void stratix10_svc_done(struct stratix10_svc_chan *chan);
+
+/**
+ * typedef async_callback_t - A type definition for an asynchronous callback function.
+ *
+ * This type defines a function pointer for an asynchronous callback.
+ * The callback function takes a single argument, which is a pointer to
+ * user-defined data.
+ *
+ * @cb_arg: Argument to be passed to the callback function.
+ */
+typedef void (*async_callback_t)(void *cb_arg);
+
+/**
+ * stratix10_svc_add_async_client - Add an asynchronous client to a Stratix 10
+ * service channel.
+ * @chan: Pointer to the Stratix 10 service channel structure.
+ * @use_unique_clientid: Boolean flag indicating whether to use a unique client ID.
+ *
+ * This function registers an asynchronous client with the specified Stratix 10
+ * service channel. If the use_unique_clientid flag is set to true, a unique client
+ * ID will be assigned to the client.
+ *
+ * Return: 0 on success, or a negative error code on failure:
+ * -EINVAL if the channel is NULL or the async controller is not initialized.
+ * -EALREADY if the async channel is already allocated.
+ * -ENOMEM if memory allocation fails.
+ * Other negative values if ID allocation fails
+ */
+int stratix10_svc_add_async_client(struct stratix10_svc_chan *chan, bool use_unique_clientid);
+
+/**
+ * stratix10_svc_remove_async_client - Remove an asynchronous client from the Stratix 10
+ * service channel.
+ * @chan: Pointer to the Stratix 10 service channel structure.
+ *
+ * This function removes an asynchronous client from the specified Stratix 10 service channel.
+ * It is typically used to clean up and release resources associated with the client.
+ *
+ * Return: 0 on success, -EINVAL if the channel or asynchronous channel is invalid.
+ */
+int stratix10_svc_remove_async_client(struct stratix10_svc_chan *chan);
+
+/**
+ * stratix10_svc_async_send - Send an asynchronous message to the SDM mailbox
+ * in EL3 secure firmware.
+ * @chan: Pointer to the service channel structure.
+ * @msg: Pointer to the message to be sent.
+ * @handler: Pointer to the handler object used by caller to track the transaction.
+ * @cb: Callback function to be called upon completion.
+ * @cb_arg: Argument to be passed to the callback function.
+ *
+ * This function sends a message asynchronously to the SDM mailbox in EL3 secure firmware.
+ * and registers a callback function to be invoked when the operation completes.
+ *
+ * Return: 0 on success,and negative error codes on failure.
+ */
+int stratix10_svc_async_send(struct stratix10_svc_chan *chan, void *msg, void **handler,
+ async_callback_t cb, void *cb_arg);
+
+/**
+ * stratix10_svc_async_poll - Polls the status of an asynchronous service request.
+ * @chan: Pointer to the service channel structure.
+ * @tx_handle: Handle to the transaction being polled.
+ * @data: Pointer to the callback data structure to be filled with the result.
+ *
+ * This function checks the status of an asynchronous service request
+ * and fills the provided callback data structure with the result.
+ *
+ * Return: 0 on success, -EINVAL if any input parameter is invalid or if the
+ * async controller is not initialized, -EAGAIN if the transaction is
+ * still in progress, or other negative error codes on failure.
+ */
+int stratix10_svc_async_poll(struct stratix10_svc_chan *chan, void *tx_handle,
+ struct stratix10_svc_cb_data *data);
+
+/**
+ * stratix10_svc_async_done - Complete an asynchronous transaction
+ * @chan: Pointer to the service channel structure
+ * @tx_handle: Pointer to the transaction handle
+ *
+ * This function completes an asynchronous transaction by removing the
+ * transaction from the hash table and deallocating the associated resources.
+ *
+ * Return: 0 on success, -EINVAL on invalid input or errors.
+ */
+int stratix10_svc_async_done(struct stratix10_svc_chan *chan, void *tx_handle);
+
#endif
diff --git a/include/linux/firmware/mediatek/mtk-adsp-ipc.h b/include/linux/firmware/mediatek/mtk-adsp-ipc.h
index 5b1d16fa3f56..6e86799a7dc4 100644
--- a/include/linux/firmware/mediatek/mtk-adsp-ipc.h
+++ b/include/linux/firmware/mediatek/mtk-adsp-ipc.h
@@ -40,7 +40,7 @@ struct mtk_adsp_chan {
struct mtk_adsp_ipc {
struct mtk_adsp_chan chans[MTK_ADSP_MBOX_NUM];
struct device *dev;
- struct mtk_adsp_ipc_ops *ops;
+ const struct mtk_adsp_ipc_ops *ops;
void *private_data;
};
diff --git a/include/linux/firmware/qcom/qcom_qseecom.h b/include/linux/firmware/qcom/qcom_qseecom.h
index 366243ee9609..3387897bf368 100644
--- a/include/linux/firmware/qcom/qcom_qseecom.h
+++ b/include/linux/firmware/qcom/qcom_qseecom.h
@@ -26,56 +26,11 @@ struct qseecom_client {
};
/**
- * qseecom_scm_dev() - Get the SCM device associated with the QSEECOM client.
- * @client: The QSEECOM client device.
- *
- * Returns the SCM device under which the provided QSEECOM client device
- * operates. This function is intended to be used for DMA allocations.
- */
-static inline struct device *qseecom_scm_dev(struct qseecom_client *client)
-{
- return client->aux_dev.dev.parent->parent;
-}
-
-/**
- * qseecom_dma_alloc() - Allocate DMA memory for a QSEECOM client.
- * @client: The QSEECOM client to allocate the memory for.
- * @size: The number of bytes to allocate.
- * @dma_handle: Pointer to where the DMA address should be stored.
- * @gfp: Allocation flags.
- *
- * Wrapper function for dma_alloc_coherent(), allocating DMA memory usable for
- * TZ/QSEECOM communication. Refer to dma_alloc_coherent() for details.
- */
-static inline void *qseecom_dma_alloc(struct qseecom_client *client, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
-{
- return dma_alloc_coherent(qseecom_scm_dev(client), size, dma_handle, gfp);
-}
-
-/**
- * dma_free_coherent() - Free QSEECOM DMA memory.
- * @client: The QSEECOM client for which the memory has been allocated.
- * @size: The number of bytes allocated.
- * @cpu_addr: Virtual memory address to free.
- * @dma_handle: DMA memory address to free.
- *
- * Wrapper function for dma_free_coherent(), freeing memory previously
- * allocated with qseecom_dma_alloc(). Refer to dma_free_coherent() for
- * details.
- */
-static inline void qseecom_dma_free(struct qseecom_client *client, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- return dma_free_coherent(qseecom_scm_dev(client), size, cpu_addr, dma_handle);
-}
-
-/**
* qcom_qseecom_app_send() - Send to and receive data from a given QSEE app.
* @client: The QSEECOM client associated with the target app.
- * @req: DMA address of the request buffer sent to the app.
+ * @req: Request buffer sent to the app (must be TZ memory).
* @req_size: Size of the request buffer.
- * @rsp: DMA address of the response buffer, written to by the app.
+ * @rsp: Response buffer, written to by the app (must be TZ memory).
* @rsp_size: Size of the response buffer.
*
* Sends a request to the QSEE app associated with the given client and read
@@ -90,8 +45,8 @@ static inline void qseecom_dma_free(struct qseecom_client *client, size_t size,
* Return: Zero on success, nonzero on failure.
*/
static inline int qcom_qseecom_app_send(struct qseecom_client *client,
- dma_addr_t req, size_t req_size,
- dma_addr_t rsp, size_t rsp_size)
+ void *req, size_t req_size,
+ void *rsp, size_t rsp_size)
{
return qcom_scm_qseecom_app_send(client->app_id, req, req_size, rsp, rsp_size);
}
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index aaa19f93ac43..a55ca771286b 100644
--- a/include/linux/firmware/qcom/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -85,6 +85,8 @@ int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
bool qcom_scm_restore_sec_cfg_available(void);
int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank);
+bool qcom_scm_set_gpu_smmu_aperture_is_available(void);
int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size);
@@ -103,6 +105,14 @@ bool qcom_scm_ice_available(void);
int qcom_scm_ice_invalidate_key(u32 index);
int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
enum qcom_scm_ice_cipher cipher, u32 data_unit_size);
+bool qcom_scm_has_wrapped_key_support(void);
+int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size,
+ u8 *sw_secret, size_t sw_secret_size);
+int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size);
+int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size,
+ u8 *eph_key, size_t eph_key_size);
+int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size,
+ u8 *lt_key, size_t lt_key_size);
bool qcom_scm_hdcp_available(void);
int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp);
@@ -115,11 +125,39 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
int qcom_scm_lmh_profile_change(u32 profile_id);
bool qcom_scm_lmh_dcvsh_available(void);
+/*
+ * Request TZ to program set of access controlled registers necessary
+ * irrespective of any features
+ */
+#define QCOM_SCM_GPU_ALWAYS_EN_REQ BIT(0)
+/*
+ * Request TZ to program BCL id to access controlled register when BCL is
+ * enabled
+ */
+#define QCOM_SCM_GPU_BCL_EN_REQ BIT(1)
+/*
+ * Request TZ to program set of access controlled register for CLX feature
+ * when enabled
+ */
+#define QCOM_SCM_GPU_CLX_EN_REQ BIT(2)
+/*
+ * Request TZ to program tsense ids to access controlled registers for reading
+ * gpu temperature sensors
+ */
+#define QCOM_SCM_GPU_TSENSE_EN_REQ BIT(3)
+
+int qcom_scm_gpu_init_regs(u32 gpu_req);
+
+int qcom_scm_shm_bridge_create(u64 pfn_and_ns_perm_flags,
+ u64 ipfn_and_s_perm_flags, u64 size_and_flags,
+ u64 ns_vmids, u64 *handle);
+int qcom_scm_shm_bridge_delete(u64 handle);
+
#ifdef CONFIG_QCOM_QSEECOM
int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id);
-int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
- dma_addr_t rsp, size_t rsp_size);
+int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size,
+ void *rsp, size_t rsp_size);
#else /* CONFIG_QCOM_QSEECOM */
@@ -129,12 +167,18 @@ static inline int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
}
static inline int qcom_scm_qseecom_app_send(u32 app_id,
- dma_addr_t req, size_t req_size,
- dma_addr_t rsp, size_t rsp_size)
+ void *req, size_t req_size,
+ void *rsp, size_t rsp_size)
{
return -EINVAL;
}
#endif /* CONFIG_QCOM_QSEECOM */
+int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size,
+ phys_addr_t outbuf, size_t outbuf_size,
+ u64 *result, u64 *response_type);
+int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size,
+ u64 *result, u64 *response_type);
+
#endif
diff --git a/include/linux/firmware/qcom/qcom_tzmem.h b/include/linux/firmware/qcom/qcom_tzmem.h
new file mode 100644
index 000000000000..23173e0c3ddd
--- /dev/null
+++ b/include/linux/firmware/qcom/qcom_tzmem.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023-2024 Linaro Ltd.
+ */
+
+#ifndef __QCOM_TZMEM_H
+#define __QCOM_TZMEM_H
+
+#include <linux/cleanup.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+
+struct device;
+struct qcom_tzmem_pool;
+
+/**
+ * enum qcom_tzmem_policy - Policy for pool growth.
+ */
+enum qcom_tzmem_policy {
+ /**
+ * @QCOM_TZMEM_POLICY_STATIC: Static pool,
+ * never grow above initial size.
+ */
+ QCOM_TZMEM_POLICY_STATIC = 1,
+ /**
+ * @QCOM_TZMEM_POLICY_MULTIPLIER: When out of memory,
+ * add increment * current size of memory.
+ */
+ QCOM_TZMEM_POLICY_MULTIPLIER,
+ /**
+ * @QCOM_TZMEM_POLICY_ON_DEMAND: When out of memory
+ * add as much as is needed until max_size.
+ */
+ QCOM_TZMEM_POLICY_ON_DEMAND,
+};
+
+/**
+ * struct qcom_tzmem_pool_config - TZ memory pool configuration.
+ * @initial_size: Number of bytes to allocate for the pool during its creation.
+ * @policy: Pool size growth policy.
+ * @increment: Used with policies that allow pool growth.
+ * @max_size: Size above which the pool will never grow.
+ */
+struct qcom_tzmem_pool_config {
+ size_t initial_size;
+ enum qcom_tzmem_policy policy;
+ size_t increment;
+ size_t max_size;
+};
+
+struct qcom_tzmem_pool *
+qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config);
+void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool);
+struct qcom_tzmem_pool *
+devm_qcom_tzmem_pool_new(struct device *dev,
+ const struct qcom_tzmem_pool_config *config);
+
+void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp);
+void qcom_tzmem_free(void *ptr);
+
+DEFINE_FREE(qcom_tzmem, void *, if (_T) qcom_tzmem_free(_T))
+
+phys_addr_t qcom_tzmem_to_phys(void *ptr);
+
+#if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE)
+int qcom_tzmem_shm_bridge_create(phys_addr_t paddr, size_t size, u64 *handle);
+void qcom_tzmem_shm_bridge_delete(u64 handle);
+#else
+static inline int qcom_tzmem_shm_bridge_create(phys_addr_t paddr,
+ size_t size, u64 *handle)
+{
+ return 0;
+}
+
+static inline void qcom_tzmem_shm_bridge_delete(u64 handle)
+{
+}
+#endif
+
+#endif /* __QCOM_TZMEM */
diff --git a/include/linux/firmware/samsung/exynos-acpm-protocol.h b/include/linux/firmware/samsung/exynos-acpm-protocol.h
new file mode 100644
index 000000000000..2091da965a5a
--- /dev/null
+++ b/include/linux/firmware/samsung/exynos-acpm-protocol.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+
+#ifndef __EXYNOS_ACPM_PROTOCOL_H
+#define __EXYNOS_ACPM_PROTOCOL_H
+
+#include <linux/types.h>
+
+struct acpm_handle;
+struct device_node;
+
+struct acpm_dvfs_ops {
+ int (*set_rate)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, unsigned int clk_id,
+ unsigned long rate);
+ unsigned long (*get_rate)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id,
+ unsigned int clk_id);
+};
+
+struct acpm_pmic_ops {
+ int (*read_reg)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 *buf);
+ int (*bulk_read)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, u8 *buf);
+ int (*write_reg)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value);
+ int (*bulk_write)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf);
+ int (*update_reg)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask);
+};
+
+struct acpm_ops {
+ struct acpm_dvfs_ops dvfs_ops;
+ struct acpm_pmic_ops pmic_ops;
+};
+
+/**
+ * struct acpm_handle - Reference to an initialized protocol instance
+ * @ops:
+ */
+struct acpm_handle {
+ struct acpm_ops ops;
+};
+
+struct device;
+
+#if IS_ENABLED(CONFIG_EXYNOS_ACPM_PROTOCOL)
+const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
+ struct device_node *np);
+#else
+
+static inline const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
+ struct device_node *np)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __EXYNOS_ACPM_PROTOCOL_H */
diff --git a/include/linux/firmware/thead/thead,th1520-aon.h b/include/linux/firmware/thead/thead,th1520-aon.h
new file mode 100644
index 000000000000..dae132b66873
--- /dev/null
+++ b/include/linux/firmware/thead/thead,th1520-aon.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Alibaba Group Holding Limited.
+ */
+
+#ifndef _THEAD_AON_H
+#define _THEAD_AON_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+#define AON_RPC_MSG_MAGIC (0xef)
+#define TH1520_AON_RPC_VERSION 2
+#define TH1520_AON_RPC_MSG_NUM 7
+
+struct th1520_aon_chan;
+
+enum th1520_aon_rpc_svc {
+ TH1520_AON_RPC_SVC_UNKNOWN = 0,
+ TH1520_AON_RPC_SVC_PM = 1,
+ TH1520_AON_RPC_SVC_MISC = 2,
+ TH1520_AON_RPC_SVC_AVFS = 3,
+ TH1520_AON_RPC_SVC_SYS = 4,
+ TH1520_AON_RPC_SVC_WDG = 5,
+ TH1520_AON_RPC_SVC_LPM = 6,
+ TH1520_AON_RPC_SVC_MAX = 0x3F,
+};
+
+enum th1520_aon_misc_func {
+ TH1520_AON_MISC_FUNC_UNKNOWN = 0,
+ TH1520_AON_MISC_FUNC_SET_CONTROL = 1,
+ TH1520_AON_MISC_FUNC_GET_CONTROL = 2,
+ TH1520_AON_MISC_FUNC_REGDUMP_CFG = 3,
+};
+
+enum th1520_aon_wdg_func {
+ TH1520_AON_WDG_FUNC_UNKNOWN = 0,
+ TH1520_AON_WDG_FUNC_START = 1,
+ TH1520_AON_WDG_FUNC_STOP = 2,
+ TH1520_AON_WDG_FUNC_PING = 3,
+ TH1520_AON_WDG_FUNC_TIMEOUTSET = 4,
+ TH1520_AON_WDG_FUNC_RESTART = 5,
+ TH1520_AON_WDG_FUNC_GET_STATE = 6,
+ TH1520_AON_WDG_FUNC_POWER_OFF = 7,
+ TH1520_AON_WDG_FUNC_AON_WDT_ON = 8,
+ TH1520_AON_WDG_FUNC_AON_WDT_OFF = 9,
+};
+
+enum th1520_aon_sys_func {
+ TH1520_AON_SYS_FUNC_UNKNOWN = 0,
+ TH1520_AON_SYS_FUNC_AON_RESERVE_MEM = 1,
+};
+
+enum th1520_aon_lpm_func {
+ TH1520_AON_LPM_FUNC_UNKNOWN = 0,
+ TH1520_AON_LPM_FUNC_REQUIRE_STR = 1,
+ TH1520_AON_LPM_FUNC_RESUME_STR = 2,
+ TH1520_AON_LPM_FUNC_REQUIRE_STD = 3,
+ TH1520_AON_LPM_FUNC_CPUHP = 4,
+ TH1520_AON_LPM_FUNC_REGDUMP_CFG = 5,
+};
+
+enum th1520_aon_pm_func {
+ TH1520_AON_PM_FUNC_UNKNOWN = 0,
+ TH1520_AON_PM_FUNC_SET_RESOURCE_REGULATOR = 1,
+ TH1520_AON_PM_FUNC_GET_RESOURCE_REGULATOR = 2,
+ TH1520_AON_PM_FUNC_SET_RESOURCE_POWER_MODE = 3,
+ TH1520_AON_PM_FUNC_PWR_SET = 4,
+ TH1520_AON_PM_FUNC_PWR_GET = 5,
+ TH1520_AON_PM_FUNC_CHECK_FAULT = 6,
+ TH1520_AON_PM_FUNC_GET_TEMPERATURE = 7,
+};
+
+struct th1520_aon_rpc_msg_hdr {
+ u8 ver; /* version of msg hdr */
+ u8 size; /* msg size ,uinit in bytes,the size includes rpc msg header self */
+ u8 svc; /* rpc main service id */
+ u8 func; /* rpc sub func id of specific service, sent by caller */
+} __packed __aligned(1);
+
+struct th1520_aon_rpc_ack_common {
+ struct th1520_aon_rpc_msg_hdr hdr;
+ u8 err_code;
+} __packed __aligned(1);
+
+#define RPC_SVC_MSG_TYPE_DATA 0
+#define RPC_SVC_MSG_TYPE_ACK 1
+#define RPC_SVC_MSG_NEED_ACK 0
+#define RPC_SVC_MSG_NO_NEED_ACK 1
+
+#define RPC_GET_VER(MESG) ((MESG)->ver)
+#define RPC_SET_VER(MESG, VER) ((MESG)->ver = (VER))
+#define RPC_GET_SVC_ID(MESG) ((MESG)->svc & 0x3F)
+#define RPC_SET_SVC_ID(MESG, ID) ((MESG)->svc |= 0x3F & (ID))
+#define RPC_GET_SVC_FLAG_MSG_TYPE(MESG) (((MESG)->svc & 0x80) >> 7)
+#define RPC_SET_SVC_FLAG_MSG_TYPE(MESG, TYPE) ((MESG)->svc |= (TYPE) << 7)
+#define RPC_GET_SVC_FLAG_ACK_TYPE(MESG) (((MESG)->svc & 0x40) >> 6)
+#define RPC_SET_SVC_FLAG_ACK_TYPE(MESG, ACK) ((MESG)->svc |= (ACK) << 6)
+
+#define RPC_SET_BE64(MESG, OFFSET, SET_DATA) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ u64 _set_data = (SET_DATA); \
+ data[_offset + 7] = _set_data & 0xFF; \
+ data[_offset + 6] = (_set_data & 0xFF00) >> 8; \
+ data[_offset + 5] = (_set_data & 0xFF0000) >> 16; \
+ data[_offset + 4] = (_set_data & 0xFF000000) >> 24; \
+ data[_offset + 3] = (_set_data & 0xFF00000000) >> 32; \
+ data[_offset + 2] = (_set_data & 0xFF0000000000) >> 40; \
+ data[_offset + 1] = (_set_data & 0xFF000000000000) >> 48; \
+ data[_offset + 0] = (_set_data & 0xFF00000000000000) >> 56; \
+ } while (0)
+
+#define RPC_SET_BE32(MESG, OFFSET, SET_DATA) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ u64 _set_data = (SET_DATA); \
+ data[_offset + 3] = (_set_data) & 0xFF; \
+ data[_offset + 2] = (_set_data & 0xFF00) >> 8; \
+ data[_offset + 1] = (_set_data & 0xFF0000) >> 16; \
+ data[_offset + 0] = (_set_data & 0xFF000000) >> 24; \
+ } while (0)
+
+#define RPC_SET_BE16(MESG, OFFSET, SET_DATA) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ u64 _set_data = (SET_DATA); \
+ data[_offset + 1] = (_set_data) & 0xFF; \
+ data[_offset + 0] = (_set_data & 0xFF00) >> 8; \
+ } while (0)
+
+#define RPC_SET_U8(MESG, OFFSET, SET_DATA) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ data[OFFSET] = (SET_DATA) & 0xFF; \
+ } while (0)
+
+#define RPC_GET_BE64(MESG, OFFSET, PTR) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ *(u32 *)(PTR) = \
+ (data[_offset + 7] | data[_offset + 6] << 8 | \
+ data[_offset + 5] << 16 | data[_offset + 4] << 24 | \
+ data[_offset + 3] << 32 | data[_offset + 2] << 40 | \
+ data[_offset + 1] << 48 | data[_offset + 0] << 56); \
+ } while (0)
+
+#define RPC_GET_BE32(MESG, OFFSET, PTR) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ *(u32 *)(PTR) = \
+ (data[_offset + 3] | data[_offset + 2] << 8 | \
+ data[_offset + 1] << 16 | data[_offset + 0] << 24); \
+ } while (0)
+
+#define RPC_GET_BE16(MESG, OFFSET, PTR) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ *(u16 *)(PTR) = (data[_offset + 1] | data[_offset + 0] << 8); \
+ } while (0)
+
+#define RPC_GET_U8(MESG, OFFSET, PTR) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ *(u8 *)(PTR) = (data[OFFSET]); \
+ } while (0)
+
+/*
+ * Defines for SC PM Power Mode
+ */
+#define TH1520_AON_PM_PW_MODE_OFF 0 /* Power off */
+#define TH1520_AON_PM_PW_MODE_STBY 1 /* Power in standby */
+#define TH1520_AON_PM_PW_MODE_LP 2 /* Power in low-power */
+#define TH1520_AON_PM_PW_MODE_ON 3 /* Power on */
+
+/*
+ * Defines for AON power islands
+ */
+#define TH1520_AON_AUDIO_PD 0
+#define TH1520_AON_VDEC_PD 1
+#define TH1520_AON_NPU_PD 2
+#define TH1520_AON_VENC_PD 3
+#define TH1520_AON_GPU_PD 4
+#define TH1520_AON_DSP0_PD 5
+#define TH1520_AON_DSP1_PD 6
+
+struct th1520_aon_chan *th1520_aon_init(struct device *dev);
+void th1520_aon_deinit(struct th1520_aon_chan *aon_chan);
+
+int th1520_aon_call_rpc(struct th1520_aon_chan *aon_chan, void *msg);
+int th1520_aon_power_update(struct th1520_aon_chan *aon_chan, u16 rsrc,
+ bool power_on);
+
+#endif /* _THEAD_AON_H */
diff --git a/include/linux/firmware/xlnx-event-manager.h b/include/linux/firmware/xlnx-event-manager.h
index 82e8254b0f80..645dd34155e6 100644
--- a/include/linux/firmware/xlnx-event-manager.h
+++ b/include/linux/firmware/xlnx-event-manager.h
@@ -1,4 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Event Management Driver
+ *
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
#ifndef _FIRMWARE_XLNX_EVENT_MANAGER_H_
#define _FIRMWARE_XLNX_EVENT_MANAGER_H_
@@ -7,6 +12,11 @@
#define CB_MAX_PAYLOAD_SIZE (4U) /*In payload maximum 32bytes */
+#define EVENT_SUBSYSTEM_RESTART (4U)
+
+#define PM_DEV_ACPU_0_0 (0x1810c0afU)
+#define PM_DEV_ACPU_0 (0x1810c003U)
+
/************************** Exported Function *****************************/
typedef void (*event_cb_func_t)(const u32 *payload, void *data);
diff --git a/include/linux/firmware/xlnx-zynqmp-ufs.h b/include/linux/firmware/xlnx-zynqmp-ufs.h
new file mode 100644
index 000000000000..d3538dd5822a
--- /dev/null
+++ b/include/linux/firmware/xlnx-zynqmp-ufs.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Firmware layer for UFS APIs.
+ *
+ * Copyright (c) 2025 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __FIRMWARE_XLNX_ZYNQMP_UFS_H__
+#define __FIRMWARE_XLNX_ZYNQMP_UFS_H__
+
+#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
+int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready);
+int zynqmp_pm_is_sram_init_done(bool *is_done);
+int zynqmp_pm_set_sram_bypass(void);
+int zynqmp_pm_get_ufs_calibration_values(u32 *val);
+#else
+static inline int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_is_sram_init_done(bool *is_done)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_sram_bypass(void)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_ufs_calibration_values(u32 *val)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __FIRMWARE_XLNX_ZYNQMP_UFS_H__ */
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 1a069a56c961..15fdbd089bbf 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -3,7 +3,7 @@
* Xilinx Zynq MPSoC Firmware layer
*
* Copyright (C) 2014-2021 Xilinx
- * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc.
+ * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc.
*
* Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/err.h>
+#include <linux/firmware/xlnx-zynqmp-ufs.h>
#define ZYNQMP_PM_VERSION_MAJOR 1
#define ZYNQMP_PM_VERSION_MINOR 0
@@ -32,25 +33,36 @@
/* SMC SIP service Call Function Identifier Prefix */
#define PM_SIP_SVC 0xC2000000
+/* SMC function ID to get SiP SVC version */
+#define GET_SIP_SVC_VERSION (0x8200ff03U)
+
+/* SiP Service Calls version numbers */
+#define SIP_SVC_VERSION_MAJOR (0U)
+#define SIP_SVC_VERSION_MINOR (2U)
+
+#define SIP_SVC_PASSTHROUGH_VERSION ((SIP_SVC_VERSION_MAJOR << 16) | \
+ SIP_SVC_VERSION_MINOR)
+
+/* Fixed ID for FW specific APIs */
+#define PASS_THROUGH_FW_CMD_ID GENMASK(11, 0)
+
/* PM API versions */
#define PM_API_VERSION_1 1
#define PM_API_VERSION_2 2
#define PM_PINCTRL_PARAM_SET_VERSION 2
-#define ZYNQMP_FAMILY_CODE 0x23
-#define VERSAL_FAMILY_CODE 0x26
-
-/* When all subfamily of platform need to support */
-#define ALL_SUB_FAMILY_CODE 0x00
-#define VERSAL_SUB_FAMILY_CODE 0x01
-#define VERSALNET_SUB_FAMILY_CODE 0x03
-
-#define FAMILY_CODE_MASK GENMASK(27, 21)
-#define SUB_FAMILY_CODE_MASK GENMASK(20, 19)
+/* Family codes */
+#define PM_ZYNQMP_FAMILY_CODE 0x1 /* ZynqMP family code */
+#define PM_VERSAL_FAMILY_CODE 0x2 /* Versal family code */
+#define PM_VERSAL_NET_FAMILY_CODE 0x3 /* Versal NET family code */
#define API_ID_MASK GENMASK(7, 0)
#define MODULE_ID_MASK GENMASK(11, 8)
+#define PLM_MODULE_ID_MASK GENMASK(15, 8)
+
+/* Firmware feature check version mask */
+#define FIRMWARE_VERSION_MASK 0xFFFFU
/* ATF only commands */
#define TF_A_PM_REGISTER_SGI 0xa04
@@ -59,7 +71,13 @@
#define GET_CALLBACK_DATA 0xa01
/* Number of 32bits values in payload */
-#define PAYLOAD_ARG_CNT 4U
+#define PAYLOAD_ARG_CNT 7U
+
+/* Number of 64bits arguments for SMC call */
+#define SMC_ARG_CNT_64 8U
+
+/* Number of 32bits arguments for SMC call */
+#define SMC_ARG_CNT_32 13U
/* Number of arguments for a callback */
#define CB_ARG_CNT 4
@@ -127,6 +145,7 @@
enum pm_module_id {
PM_MODULE_ID = 0x0,
+ XPM_MODULE_ID = 0x2,
XSEM_MODULE_ID = 0x3,
TF_A_MODULE_ID = 0xa,
};
@@ -140,6 +159,7 @@ enum pm_api_cb_id {
enum pm_api_id {
PM_API_FEATURES = 0,
PM_GET_API_VERSION = 1,
+ PM_GET_NODE_STATUS = 3,
PM_REGISTER_NOTIFIER = 5,
PM_FORCE_POWERDOWN = 8,
PM_REQUEST_WAKEUP = 10,
@@ -215,9 +235,14 @@ enum pm_ioctl_id {
/* Runtime feature configuration */
IOCTL_SET_FEATURE_CONFIG = 26,
IOCTL_GET_FEATURE_CONFIG = 27,
+ /* IOCTL for Secure Read/Write Interface */
+ IOCTL_READ_REG = 28,
+ IOCTL_MASK_WRITE_REG = 29,
/* Dynamic SD/GEM configuration */
IOCTL_SET_SD_CONFIG = 30,
IOCTL_SET_GEM_CONFIG = 31,
+ /* IOCTL to get default/current QoS */
+ IOCTL_GET_QOS = 34,
};
enum pm_query_id {
@@ -235,6 +260,7 @@ enum pm_query_id {
PM_QID_PINCTRL_GET_PIN_GROUPS = 11,
PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
PM_QID_CLOCK_GET_MAX_DIVISOR = 13,
+ PM_QID_PINCTRL_GET_ATTRIBUTES = 15,
};
enum rpu_oper_mode {
@@ -530,11 +556,12 @@ struct zynqmp_pm_query_data {
};
int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...);
+int zynqmp_pm_invoke_fw_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...);
#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
int zynqmp_pm_get_api_version(u32 *version);
int zynqmp_pm_get_chipid(u32 *idcode, u32 *version);
-int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily);
+int zynqmp_pm_get_family_info(u32 *family);
int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out);
int zynqmp_pm_clock_enable(u32 clock_id);
int zynqmp_pm_clock_disable(u32 clock_id);
@@ -550,12 +577,11 @@ int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data);
int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value);
int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type);
int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select);
-int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+int zynqmp_pm_reset_assert(const u32 reset,
const enum zynqmp_pm_reset_action assert_flag);
-int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status);
+int zynqmp_pm_reset_get_status(const u32 reset, u32 *status);
unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode);
int zynqmp_pm_bootmode_write(u32 ps_mode);
-int zynqmp_pm_init_finalize(void);
int zynqmp_pm_set_suspend_mode(u32 mode);
int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
const u32 qos, const enum zynqmp_pm_request_ack ack);
@@ -590,6 +616,9 @@ int zynqmp_pm_feature(const u32 api_id);
int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id);
int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value);
int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload);
+int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value);
+int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset,
+ u32 mask, u32 value);
int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset);
int zynqmp_pm_force_pwrdwn(const u32 target,
const enum zynqmp_pm_request_ack ack);
@@ -600,6 +629,8 @@ int zynqmp_pm_request_wake(const u32 node,
int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode);
int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode);
int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode);
+int zynqmp_pm_get_node_status(const u32 node, u32 *const status,
+ u32 *const requirements, u32 *const usage);
int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value);
int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
u32 value);
@@ -614,7 +645,7 @@ static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
return -ENODEV;
}
-static inline int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
+static inline int zynqmp_pm_get_family_info(u32 *family)
{
return -ENODEV;
}
@@ -695,14 +726,13 @@ static inline int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select)
return -ENODEV;
}
-static inline int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+static inline int zynqmp_pm_reset_assert(const u32 reset,
const enum zynqmp_pm_reset_action assert_flag)
{
return -ENODEV;
}
-static inline int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
- u32 *status)
+static inline int zynqmp_pm_reset_get_status(const u32 reset, u32 *status)
{
return -ENODEV;
}
@@ -717,11 +747,6 @@ static inline int zynqmp_pm_bootmode_write(u32 ps_mode)
return -ENODEV;
}
-static inline int zynqmp_pm_init_finalize(void)
-{
- return -ENODEV;
-}
-
static inline int zynqmp_pm_set_suspend_mode(u32 mode)
{
return -ENODEV;
@@ -893,6 +918,17 @@ static inline int zynqmp_pm_request_wake(const u32 node,
return -ENODEV;
}
+static inline int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset,
+ u32 mask, u32 value)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode)
{
return -ENODEV;
@@ -908,6 +944,13 @@ static inline int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mo
return -ENODEV;
}
+static inline int zynqmp_pm_get_node_status(const u32 node, u32 *const status,
+ u32 *const requirements,
+ u32 *const usage)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_set_sd_config(u32 node,
enum pm_sd_config_type config,
u32 value)
diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h
new file mode 100644
index 000000000000..adab609c972e
--- /dev/null
+++ b/include/linux/folio_queue.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Queue of folios definitions
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See:
+ *
+ * Documentation/core-api/folio_queue.rst
+ *
+ * for a description of the API.
+ */
+
+#ifndef _LINUX_FOLIO_QUEUE_H
+#define _LINUX_FOLIO_QUEUE_H
+
+#include <linux/pagevec.h>
+#include <linux/mm.h>
+
+/*
+ * Segment in a queue of running buffers. Each segment can hold a number of
+ * folios and a portion of the queue can be referenced with the ITER_FOLIOQ
+ * iterator. The possibility exists of inserting non-folio elements into the
+ * queue (such as gaps).
+ *
+ * Explicit prev and next pointers are used instead of a list_head to make it
+ * easier to add segments to tail and remove them from the head without the
+ * need for a lock.
+ */
+struct folio_queue {
+ struct folio_batch vec; /* Folios in the queue segment */
+ u8 orders[PAGEVEC_SIZE]; /* Order of each folio */
+ struct folio_queue *next; /* Next queue segment or NULL */
+ struct folio_queue *prev; /* Previous queue segment of NULL */
+ unsigned long marks; /* 1-bit mark per folio */
+ unsigned long marks2; /* Second 1-bit mark per folio */
+#if PAGEVEC_SIZE > BITS_PER_LONG
+#error marks is not big enough
+#endif
+ unsigned int rreq_id;
+ unsigned int debug_id;
+};
+
+/**
+ * folioq_init - Initialise a folio queue segment
+ * @folioq: The segment to initialise
+ * @rreq_id: The request identifier to use in tracelines.
+ *
+ * Initialise a folio queue segment and set an identifier to be used in traces.
+ *
+ * Note that the folio pointers are left uninitialised.
+ */
+static inline void folioq_init(struct folio_queue *folioq, unsigned int rreq_id)
+{
+ folio_batch_init(&folioq->vec);
+ folioq->next = NULL;
+ folioq->prev = NULL;
+ folioq->marks = 0;
+ folioq->marks2 = 0;
+ folioq->rreq_id = rreq_id;
+ folioq->debug_id = 0;
+}
+
+/**
+ * folioq_nr_slots: Query the capacity of a folio queue segment
+ * @folioq: The segment to query
+ *
+ * Query the number of folios that a particular folio queue segment might hold.
+ * [!] NOTE: This must not be assumed to be the same for every segment!
+ */
+static inline unsigned int folioq_nr_slots(const struct folio_queue *folioq)
+{
+ return PAGEVEC_SIZE;
+}
+
+/**
+ * folioq_count: Query the occupancy of a folio queue segment
+ * @folioq: The segment to query
+ *
+ * Query the number of folios that have been added to a folio queue segment.
+ * Note that this is not decreased as folios are removed from a segment.
+ */
+static inline unsigned int folioq_count(struct folio_queue *folioq)
+{
+ return folio_batch_count(&folioq->vec);
+}
+
+/**
+ * folioq_full: Query if a folio queue segment is full
+ * @folioq: The segment to query
+ *
+ * Query if a folio queue segment is fully occupied. Note that this does not
+ * change if folios are removed from a segment.
+ */
+static inline bool folioq_full(struct folio_queue *folioq)
+{
+ //return !folio_batch_space(&folioq->vec);
+ return folioq_count(folioq) >= folioq_nr_slots(folioq);
+}
+
+/**
+ * folioq_is_marked: Check first folio mark in a folio queue segment
+ * @folioq: The segment to query
+ * @slot: The slot number of the folio to query
+ *
+ * Determine if the first mark is set for the folio in the specified slot in a
+ * folio queue segment.
+ */
+static inline bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot)
+{
+ return test_bit(slot, &folioq->marks);
+}
+
+/**
+ * folioq_mark: Set the first mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Set the first mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_mark(struct folio_queue *folioq, unsigned int slot)
+{
+ set_bit(slot, &folioq->marks);
+}
+
+/**
+ * folioq_unmark: Clear the first mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Clear the first mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_unmark(struct folio_queue *folioq, unsigned int slot)
+{
+ clear_bit(slot, &folioq->marks);
+}
+
+/**
+ * folioq_is_marked2: Check second folio mark in a folio queue segment
+ * @folioq: The segment to query
+ * @slot: The slot number of the folio to query
+ *
+ * Determine if the second mark is set for the folio in the specified slot in a
+ * folio queue segment.
+ */
+static inline bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot)
+{
+ return test_bit(slot, &folioq->marks2);
+}
+
+/**
+ * folioq_mark2: Set the second mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Set the second mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_mark2(struct folio_queue *folioq, unsigned int slot)
+{
+ set_bit(slot, &folioq->marks2);
+}
+
+/**
+ * folioq_unmark2: Clear the second mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Clear the second mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_unmark2(struct folio_queue *folioq, unsigned int slot)
+{
+ clear_bit(slot, &folioq->marks2);
+}
+
+/**
+ * folioq_append: Add a folio to a folio queue segment
+ * @folioq: The segment to add to
+ * @folio: The folio to add
+ *
+ * Add a folio to the tail of the sequence in a folio queue segment, increasing
+ * the occupancy count and returning the slot number for the folio just added.
+ * The folio size is extracted and stored in the queue and the marks are left
+ * unmodified.
+ *
+ * Note that it's left up to the caller to check that the segment capacity will
+ * not be exceeded and to extend the queue.
+ */
+static inline unsigned int folioq_append(struct folio_queue *folioq, struct folio *folio)
+{
+ unsigned int slot = folioq->vec.nr++;
+
+ folioq->vec.folios[slot] = folio;
+ folioq->orders[slot] = folio_order(folio);
+ return slot;
+}
+
+/**
+ * folioq_append_mark: Add a folio to a folio queue segment
+ * @folioq: The segment to add to
+ * @folio: The folio to add
+ *
+ * Add a folio to the tail of the sequence in a folio queue segment, increasing
+ * the occupancy count and returning the slot number for the folio just added.
+ * The folio size is extracted and stored in the queue, the first mark is set
+ * and and the second and third marks are left unmodified.
+ *
+ * Note that it's left up to the caller to check that the segment capacity will
+ * not be exceeded and to extend the queue.
+ */
+static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct folio *folio)
+{
+ unsigned int slot = folioq->vec.nr++;
+
+ folioq->vec.folios[slot] = folio;
+ folioq->orders[slot] = folio_order(folio);
+ folioq_mark(folioq, slot);
+ return slot;
+}
+
+/**
+ * folioq_folio: Get a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the folio in the specified slot from a folio queue segment. Note
+ * that no bounds check is made and if the slot hasn't been added into yet, the
+ * pointer will be undefined. If the slot has been cleared, NULL will be
+ * returned.
+ */
+static inline struct folio *folioq_folio(const struct folio_queue *folioq, unsigned int slot)
+{
+ return folioq->vec.folios[slot];
+}
+
+/**
+ * folioq_folio_order: Get the order of a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the order of the folio in the specified slot from a folio queue
+ * segment. Note that no bounds check is made and if the slot hasn't been
+ * added into yet, the order returned will be 0.
+ */
+static inline unsigned int folioq_folio_order(const struct folio_queue *folioq, unsigned int slot)
+{
+ return folioq->orders[slot];
+}
+
+/**
+ * folioq_folio_size: Get the size of a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the size of the folio in the specified slot from a folio queue
+ * segment. Note that no bounds check is made and if the slot hasn't been
+ * added into yet, the size returned will be PAGE_SIZE.
+ */
+static inline size_t folioq_folio_size(const struct folio_queue *folioq, unsigned int slot)
+{
+ return PAGE_SIZE << folioq_folio_order(folioq, slot);
+}
+
+/**
+ * folioq_clear: Clear a folio from a folio queue segment
+ * @folioq: The segment to clear
+ * @slot: The folio slot to clear
+ *
+ * Clear a folio from a sequence in a folio queue segment and clear its marks.
+ * The occupancy count is left unchanged.
+ */
+static inline void folioq_clear(struct folio_queue *folioq, unsigned int slot)
+{
+ folioq->vec.folios[slot] = NULL;
+ folioq_unmark(folioq, slot);
+ folioq_unmark2(folioq, slot);
+}
+
+#endif /* _LINUX_FOLIO_QUEUE_H */
diff --git a/include/linux/font.h b/include/linux/font.h
index 81caffd51bb4..fd8625cd76b2 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -35,6 +35,7 @@ struct font_desc {
#define FONT6x10_IDX 10
#define TER16x32_IDX 11
#define FONT6x8_IDX 12
+#define TER10x18_IDX 13
extern const struct font_desc font_vga_8x8,
font_vga_8x16,
@@ -48,7 +49,8 @@ extern const struct font_desc font_vga_8x8,
font_mini_4x6,
font_6x10,
font_ter_16x32,
- font_6x8;
+ font_6x8,
+ font_ter_10x18;
/* Find a font with a specific name */
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index 7e0f340bf363..b3b53f8c1b28 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -596,16 +596,12 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
if (p_size != SIZE_MAX && p_size < size)
fortify_panic(func, FORTIFY_WRITE, p_size, size, true);
else if (q_size != SIZE_MAX && q_size < size)
- fortify_panic(func, FORTIFY_READ, p_size, size, true);
+ fortify_panic(func, FORTIFY_READ, q_size, size, true);
/*
* Warn when writing beyond destination field size.
*
- * We must ignore p_size_field == 0 for existing 0-element
- * fake flexible arrays, until they are all converted to
- * proper flexible arrays.
- *
- * The implementation of __builtin_*object_size() behaves
+ * Note the implementation of __builtin_*object_size() behaves
* like sizeof() when not directly referencing a flexible
* array member, which means there will be many bounds checks
* that will appear at run-time, without a way for them to be
@@ -613,13 +609,19 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
* is specifically the flexible array member).
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101832
*/
- if (p_size_field != 0 && p_size_field != SIZE_MAX &&
+ if (p_size_field != SIZE_MAX &&
p_size != p_size_field && p_size_field < size)
return true;
return false;
}
+/*
+ * To work around what seems to be an optimizer bug, the macro arguments
+ * need to have const copies or the values end up changed by the time they
+ * reach fortify_warn_once(). See commit 6f7630b1b5bc ("fortify: Capture
+ * __bos() results in const temp vars") for more details.
+ */
#define __fortify_memcpy_chk(p, q, size, p_size, q_size, \
p_size_field, q_size_field, op) ({ \
const size_t __fortify_size = (size_t)(size); \
@@ -627,6 +629,8 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
const size_t __q_size = (q_size); \
const size_t __p_size_field = (p_size_field); \
const size_t __q_size_field = (q_size_field); \
+ /* Keep a mutable version of the size for the final copy. */ \
+ size_t __copy_size = __fortify_size; \
fortify_warn_once(fortify_memcpy_chk(__fortify_size, __p_size, \
__q_size, __p_size_field, \
__q_size_field, FORTIFY_FUNC_ ##op), \
@@ -634,7 +638,11 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
__fortify_size, \
"field \"" #p "\" at " FILE_LINE, \
__p_size_field); \
- __underlying_##op(p, q, __fortify_size); \
+ /* Hide only the run-time size from value range tracking to */ \
+ /* silence compile-time false positive bounds warnings. */ \
+ if (!__builtin_constant_p(__copy_size)) \
+ OPTIMIZER_HIDE_VAR(__copy_size); \
+ __underlying_##op(p, q, __copy_size); \
})
/*
diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h
deleted file mode 100644
index 141ac3f251e6..000000000000
--- a/include/linux/fpga/adi-axi-common.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Analog Devices AXI common registers & definitions
- *
- * Copyright 2019 Analog Devices Inc.
- *
- * https://wiki.analog.com/resources/fpga/docs/axi_ip
- * https://wiki.analog.com/resources/fpga/docs/hdl/regmap
- */
-
-#ifndef ADI_AXI_COMMON_H_
-#define ADI_AXI_COMMON_H_
-
-#define ADI_AXI_REG_VERSION 0x0000
-
-#define ADI_AXI_PCORE_VER(major, minor, patch) \
- (((major) << 16) | ((minor) << 8) | (patch))
-
-#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff)
-#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff)
-#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff)
-
-#endif /* ADI_AXI_COMMON_H_ */
diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h
index f39869588117..0a3bcd1718f3 100644
--- a/include/linux/fprobe.h
+++ b/include/linux/fprobe.h
@@ -5,47 +5,69 @@
#include <linux/compiler.h>
#include <linux/ftrace.h>
-#include <linux/rethook.h>
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
+#include <linux/rhashtable.h>
+#include <linux/slab.h>
struct fprobe;
-
typedef int (*fprobe_entry_cb)(struct fprobe *fp, unsigned long entry_ip,
- unsigned long ret_ip, struct pt_regs *regs,
+ unsigned long ret_ip, struct ftrace_regs *regs,
void *entry_data);
typedef void (*fprobe_exit_cb)(struct fprobe *fp, unsigned long entry_ip,
- unsigned long ret_ip, struct pt_regs *regs,
+ unsigned long ret_ip, struct ftrace_regs *regs,
void *entry_data);
/**
+ * struct fprobe_hlist_node - address based hash list node for fprobe.
+ *
+ * @hlist: The hlist node for address search hash table.
+ * @addr: One of the probing address of @fp.
+ * @fp: The fprobe which owns this.
+ */
+struct fprobe_hlist_node {
+ struct rhlist_head hlist;
+ unsigned long addr;
+ struct fprobe *fp;
+};
+
+/**
+ * struct fprobe_hlist - hash list nodes for fprobe.
+ *
+ * @hlist: The hlist node for existence checking hash table.
+ * @rcu: rcu_head for RCU deferred release.
+ * @fp: The fprobe which owns this fprobe_hlist.
+ * @size: The size of @array.
+ * @array: The fprobe_hlist_node for each address to probe.
+ */
+struct fprobe_hlist {
+ struct hlist_node hlist;
+ struct rcu_head rcu;
+ struct fprobe *fp;
+ int size;
+ struct fprobe_hlist_node array[] __counted_by(size);
+};
+
+/**
* struct fprobe - ftrace based probe.
- * @ops: The ftrace_ops.
+ *
* @nmissed: The counter for missing events.
* @flags: The status flag.
- * @rethook: The rethook data structure. (internal data)
* @entry_data_size: The private data storage size.
- * @nr_maxactive: The max number of active functions.
* @entry_handler: The callback function for function entry.
* @exit_handler: The callback function for function exit.
+ * @hlist_array: The fprobe_hlist for fprobe search from IP hash table.
*/
struct fprobe {
-#ifdef CONFIG_FUNCTION_TRACER
- /*
- * If CONFIG_FUNCTION_TRACER is not set, CONFIG_FPROBE is disabled too.
- * But user of fprobe may keep embedding the struct fprobe on their own
- * code. To avoid build error, this will keep the fprobe data structure
- * defined here, but remove ftrace_ops data structure.
- */
- struct ftrace_ops ops;
-#endif
unsigned long nmissed;
unsigned int flags;
- struct rethook *rethook;
size_t entry_data_size;
- int nr_maxactive;
fprobe_entry_cb entry_handler;
fprobe_exit_cb exit_handler;
+
+ struct fprobe_hlist *hlist_array;
};
/* This fprobe is soft-disabled. */
@@ -73,6 +95,7 @@ int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num);
int register_fprobe_syms(struct fprobe *fp, const char **syms, int num);
int unregister_fprobe(struct fprobe *fp);
bool fprobe_is_registered(struct fprobe *fp);
+int fprobe_count_ips_from_filter(const char *filter, const char *notfilter);
#else
static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
{
@@ -94,6 +117,10 @@ static inline bool fprobe_is_registered(struct fprobe *fp)
{
return false;
}
+static inline int fprobe_count_ips_from_filter(const char *filter, const char *notfilter)
+{
+ return -EOPNOTSUPP;
+}
#endif
/**
@@ -121,4 +148,9 @@ static inline void enable_fprobe(struct fprobe *fp)
fp->flags &= ~FPROBE_FL_DISABLED;
}
+/* The entry data size is 4 bits (=16) * sizeof(long) in maximum */
+#define FPROBE_DATA_SIZE_BITS 4
+#define MAX_FPROBE_DATA_SIZE_WORD ((1L << FPROBE_DATA_SIZE_BITS) - 1)
+#define MAX_FPROBE_DATA_SIZE (MAX_FPROBE_DATA_SIZE_WORD * sizeof(long))
+
#endif
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index b303472255be..0a8c6c4d1a82 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -22,14 +22,18 @@ extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
extern unsigned int freeze_timeout_msecs;
/*
- * Check if a process has been frozen
+ * Check if a process has been frozen for PM or cgroup1 freezer. Note that
+ * cgroup2 freezer uses the job control mechanism and does not interact with
+ * the PM freezer.
*/
extern bool frozen(struct task_struct *p);
extern bool freezing_slow_path(struct task_struct *p);
/*
- * Check if there is a request to freeze a process
+ * Check if there is a request to freeze a task from PM or cgroup1 freezer.
+ * Note that cgroup2 freezer uses the job control mechanism and does not
+ * interact with the PM freezer.
*/
static inline bool freezing(struct task_struct *p)
{
@@ -47,6 +51,7 @@ extern int freeze_processes(void);
extern int freeze_kernel_threads(void);
extern void thaw_processes(void);
extern void thaw_kernel_threads(void);
+extern void thaw_process(struct task_struct *p);
static inline bool try_to_freeze(void)
{
@@ -62,9 +67,9 @@ extern bool freeze_task(struct task_struct *p);
extern bool set_freezable(void);
#ifdef CONFIG_CGROUP_FREEZER
-extern bool cgroup_freezing(struct task_struct *task);
+extern bool cgroup1_freezing(struct task_struct *task);
#else /* !CONFIG_CGROUP_FREEZER */
-static inline bool cgroup_freezing(struct task_struct *task)
+static inline bool cgroup1_freezing(struct task_struct *task)
{
return false;
}
@@ -80,6 +85,7 @@ static inline int freeze_processes(void) { return -ENOSYS; }
static inline int freeze_kernel_threads(void) { return -ENOSYS; }
static inline void thaw_processes(void) {}
static inline void thaw_kernel_threads(void) {}
+static inline void thaw_process(struct task_struct *p) {}
static inline bool try_to_freeze(void) { return false; }
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 0283cf366c2a..04ceeca12a0d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2,6 +2,8 @@
#ifndef _LINUX_FS_H
#define _LINUX_FS_H
+#include <linux/fs/super.h>
+#include <linux/vfsdebug.h>
#include <linux/linkage.h>
#include <linux/wait_bit.h>
#include <linux/kdev_t.h>
@@ -10,7 +12,6 @@
#include <linux/stat.h>
#include <linux/cache.h>
#include <linux/list.h>
-#include <linux/list_lru.h>
#include <linux/llist.h>
#include <linux/radix-tree.h>
#include <linux/xarray.h>
@@ -36,7 +37,6 @@
#include <linux/uuid.h>
#include <linux/errseq.h>
#include <linux/ioprio.h>
-#include <linux/fs_types.h>
#include <linux/build_bug.h>
#include <linux/stddef.h>
#include <linux/mount.h>
@@ -45,15 +45,15 @@
#include <linux/slab.h>
#include <linux/maple_tree.h>
#include <linux/rw_hint.h>
+#include <linux/file_ref.h>
+#include <linux/unicode.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
-struct backing_dev_info;
struct bdi_writeback;
struct bio;
struct io_comp_batch;
-struct export_operations;
struct fiemap_extent_info;
struct hd_geometry;
struct iovec;
@@ -67,18 +67,13 @@ struct vfsmount;
struct cred;
struct swap_info_struct;
struct seq_file;
-struct workqueue_struct;
struct iov_iter;
-struct fscrypt_inode_info;
-struct fscrypt_operations;
-struct fsverity_info;
-struct fsverity_operations;
struct fsnotify_mark_connector;
-struct fsnotify_sb_info;
struct fs_context;
struct fs_parameter_spec;
-struct fileattr;
+struct file_kattr;
struct iomap_ops;
+struct delegated_inode;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -125,8 +120,10 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define FMODE_EXEC ((__force fmode_t)(1 << 5))
/* File writes are restricted (block device specific) */
#define FMODE_WRITE_RESTRICTED ((__force fmode_t)(1 << 6))
+/* File supports atomic writes */
+#define FMODE_CAN_ATOMIC_WRITE ((__force fmode_t)(1 << 7))
-/* FMODE_* bits 7 to 8 */
+/* FMODE_* bit 8 */
/* 32bit hashes as llseek() offset (for directories) */
#define FMODE_32BITHASH ((__force fmode_t)(1 << 9))
@@ -144,8 +141,8 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* Expect random access pattern */
#define FMODE_RANDOM ((__force fmode_t)(1 << 12))
-/* File is huge (eg. /dev/mem): treat loff_t as unsigned */
-#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)(1 << 13))
+/* Supports IOCB_HAS_METADATA */
+#define FMODE_HAS_METADATA ((__force fmode_t)(1 << 13))
/* File is opened with O_PATH; almost nothing can be done with it */
#define FMODE_PATH ((__force fmode_t)(1 << 14))
@@ -170,13 +167,20 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define FMODE_NOREUSE ((__force fmode_t)(1 << 23))
-/* FMODE_* bit 24 */
-
/* File is embedded in backing_file object */
-#define FMODE_BACKING ((__force fmode_t)(1 << 25))
+#define FMODE_BACKING ((__force fmode_t)(1 << 24))
-/* File was opened by fanotify and shouldn't generate fanotify events */
-#define FMODE_NONOTIFY ((__force fmode_t)(1 << 26))
+/*
+ * Together with FMODE_NONOTIFY_PERM defines which fsnotify events shouldn't be
+ * generated (see below)
+ */
+#define FMODE_NONOTIFY ((__force fmode_t)(1 << 25))
+
+/*
+ * Together with FMODE_NONOTIFY defines which fsnotify events shouldn't be
+ * generated (see below)
+ */
+#define FMODE_NONOTIFY_PERM ((__force fmode_t)(1 << 26))
/* File is capable of returning -EAGAIN if I/O will block */
#define FMODE_NOWAIT ((__force fmode_t)(1 << 27))
@@ -188,6 +192,31 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define FMODE_NOACCOUNT ((__force fmode_t)(1 << 29))
/*
+ * The two FMODE_NONOTIFY* define which fsnotify events should not be generated
+ * for an open file. These are the possible values of
+ * (f->f_mode & FMODE_FSNOTIFY_MASK) and their meaning:
+ *
+ * FMODE_NONOTIFY - suppress all (incl. non-permission) events.
+ * FMODE_NONOTIFY_PERM - suppress permission (incl. pre-content) events.
+ * FMODE_NONOTIFY | FMODE_NONOTIFY_PERM - suppress only FAN_ACCESS_PERM.
+ */
+#define FMODE_FSNOTIFY_MASK \
+ (FMODE_NONOTIFY | FMODE_NONOTIFY_PERM)
+
+#define FMODE_FSNOTIFY_NONE(mode) \
+ ((mode & FMODE_FSNOTIFY_MASK) == FMODE_NONOTIFY)
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+#define FMODE_FSNOTIFY_HSM(mode) \
+ ((mode & FMODE_FSNOTIFY_MASK) == 0 || \
+ (mode & FMODE_FSNOTIFY_MASK) == (FMODE_NONOTIFY | FMODE_NONOTIFY_PERM))
+#define FMODE_FSNOTIFY_ACCESS_PERM(mode) \
+ ((mode & FMODE_FSNOTIFY_MASK) == 0)
+#else
+#define FMODE_FSNOTIFY_ACCESS_PERM(mode) 0
+#define FMODE_FSNOTIFY_HSM(mode) 0
+#endif
+
+/*
* Attribute flags. These should be or-ed together to figure out what
* has been changed!
*/
@@ -201,6 +230,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define ATTR_ATIME_SET (1 << 7)
#define ATTR_MTIME_SET (1 << 8)
#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */
+#define ATTR_CTIME_SET (1 << 10)
#define ATTR_KILL_SUID (1 << 11)
#define ATTR_KILL_SGID (1 << 12)
#define ATTR_FILE (1 << 13)
@@ -208,6 +238,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
#define ATTR_TIMES_SET (1 << 16)
#define ATTR_TOUCH (1 << 17)
+#define ATTR_DELEG (1 << 18) /* Delegated attrs. Don't break write delegations */
/*
* Whiteout is represented by a char device. The following constants define the
@@ -262,11 +293,6 @@ struct iattr {
};
/*
- * Includes for diskquotas.
- */
-#include <linux/quota.h>
-
-/*
* Maximum number of layers of fs stack. Needs to be limited to
* prevent kernel stack overflow
*/
@@ -317,6 +343,9 @@ struct readahead_control;
#define IOCB_SYNC (__force int) RWF_SYNC
#define IOCB_NOWAIT (__force int) RWF_NOWAIT
#define IOCB_APPEND (__force int) RWF_APPEND
+#define IOCB_ATOMIC (__force int) RWF_ATOMIC
+#define IOCB_DONTCACHE (__force int) RWF_DONTCACHE
+#define IOCB_NOSIGNAL (__force int) RWF_NOSIGNAL
/* non-RWF related bits - start at 16 */
#define IOCB_EVENTFD (1 << 16)
@@ -327,22 +356,9 @@ struct readahead_control;
#define IOCB_NOIO (1 << 20)
/* can use bio alloc cache */
#define IOCB_ALLOC_CACHE (1 << 21)
-/*
- * IOCB_DIO_CALLER_COMP can be set by the iocb owner, to indicate that the
- * iocb completion can be passed back to the owner for execution from a safe
- * context rather than needing to be punted through a workqueue. If this
- * flag is set, the bio completion handling may set iocb->dio_complete to a
- * handler function and iocb->private to context information for that handler.
- * The issuer should call the handler with that context information from task
- * context to complete the processing of the iocb. Note that while this
- * provides a task context for the dio_complete() callback, it should only be
- * used on the completion side for non-IO generating completions. It's fine to
- * call blocking functions from this callback, but they should not wait for
- * unrelated IO (like cache flushing, new IO generation, etc).
- */
-#define IOCB_DIO_CALLER_COMP (1 << 22)
/* kiocb is a read or write operation submitted by fs/aio.c. */
-#define IOCB_AIO_RW (1 << 23)
+#define IOCB_AIO_RW (1 << 22)
+#define IOCB_HAS_METADATA (1 << 23)
/* for use in trace events */
#define TRACE_IOCB_STRINGS \
@@ -351,13 +367,16 @@ struct readahead_control;
{ IOCB_SYNC, "SYNC" }, \
{ IOCB_NOWAIT, "NOWAIT" }, \
{ IOCB_APPEND, "APPEND" }, \
+ { IOCB_ATOMIC, "ATOMIC" }, \
+ { IOCB_DONTCACHE, "DONTCACHE" }, \
{ IOCB_EVENTFD, "EVENTFD"}, \
{ IOCB_DIRECT, "DIRECT" }, \
{ IOCB_WRITE, "WRITE" }, \
{ IOCB_WAITQ, "WAITQ" }, \
{ IOCB_NOIO, "NOIO" }, \
{ IOCB_ALLOC_CACHE, "ALLOC_CACHE" }, \
- { IOCB_DIO_CALLER_COMP, "CALLER_COMP" }
+ { IOCB_AIO_RW, "AIO_RW" }, \
+ { IOCB_HAS_METADATA, "AIO_HAS_METADATA" }
struct kiocb {
struct file *ki_filp;
@@ -366,23 +385,14 @@ struct kiocb {
void *private;
int ki_flags;
u16 ki_ioprio; /* See linux/ioprio.h */
- union {
- /*
- * Only used for async buffered reads, where it denotes the
- * page waitqueue associated with completing the read. Valid
- * IFF IOCB_WAITQ is set.
- */
- struct wait_page_queue *ki_waitq;
- /*
- * Can be used for O_DIRECT IO, where the completion handling
- * is punted back to the issuer of the IO. May only be set
- * if IOCB_DIO_CALLER_COMP is set by the issuer, and the issuer
- * must then check for presence of this handler when ki_complete
- * is invoked. The data passed in to this handler must be
- * assigned to ->private when dio_complete is assigned.
- */
- ssize_t (*dio_complete)(void *data);
- };
+ u8 ki_write_stream;
+
+ /*
+ * Only used for async buffered reads, where it denotes the page
+ * waitqueue associated with completing the read.
+ * Valid IFF IOCB_WAITQ is set.
+ */
+ struct wait_page_queue *ki_waitq;
};
static inline bool is_sync_kiocb(struct kiocb *kiocb)
@@ -391,7 +401,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
}
struct address_space_operations {
- int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*read_folio)(struct file *, struct folio *);
/* Write back some dirty pages from this mapping. */
@@ -402,12 +411,12 @@ struct address_space_operations {
void (*readahead)(struct readahead_control *);
- int (*write_begin)(struct file *, struct address_space *mapping,
+ int (*write_begin)(const struct kiocb *, struct address_space *mapping,
loff_t pos, unsigned len,
- struct page **pagep, void **fsdata);
- int (*write_end)(struct file *, struct address_space *mapping,
+ struct folio **foliop, void **fsdata);
+ int (*write_end)(const struct kiocb *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
+ struct folio *folio, void *fsdata);
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t);
@@ -482,7 +491,7 @@ struct address_space {
/*
* On most architectures that alignment is already the case; but
* must be enforced here for CRIS, to let the least significant bit
- * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
+ * of struct folio's "mapping" pointer be used for FOLIO_MAPPING_ANON.
*/
/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
@@ -493,7 +502,7 @@ struct address_space {
/*
* Returns true if any of the pages in the mapping are marked with the tag.
*/
-static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
+static inline bool mapping_tagged(const struct address_space *mapping, xa_mark_t tag)
{
return xa_marked(&mapping->i_pages, tag);
}
@@ -541,7 +550,7 @@ static inline void i_mmap_assert_write_locked(struct address_space *mapping)
/*
* Might pages of this file be mapped into userspace?
*/
-static inline int mapping_mapped(struct address_space *mapping)
+static inline int mapping_mapped(const struct address_space *mapping)
{
return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root);
}
@@ -555,7 +564,7 @@ static inline int mapping_mapped(struct address_space *mapping)
* If i_mmap_writable is negative, no new writable mappings are allowed. You
* can only deny writable mappings, if none exists right now.
*/
-static inline int mapping_writably_mapped(struct address_space *mapping)
+static inline int mapping_writably_mapped(const struct address_space *mapping)
{
return atomic_read(&mapping->i_mmap_writable) > 0;
}
@@ -614,11 +623,139 @@ is_uncached_acl(struct posix_acl *acl)
return (long)acl & 1;
}
-#define IOP_FASTPERM 0x0001
-#define IOP_LOOKUP 0x0002
-#define IOP_NOFOLLOW 0x0004
-#define IOP_XATTR 0x0008
+#define IOP_FASTPERM 0x0001
+#define IOP_LOOKUP 0x0002
+#define IOP_NOFOLLOW 0x0004
+#define IOP_XATTR 0x0008
#define IOP_DEFAULT_READLINK 0x0010
+#define IOP_MGTIME 0x0020
+#define IOP_CACHED_LINK 0x0040
+#define IOP_FASTPERM_MAY_EXEC 0x0080
+
+/*
+ * Inode state bits. Protected by inode->i_lock
+ *
+ * Four bits determine the dirty state of the inode: I_DIRTY_SYNC,
+ * I_DIRTY_DATASYNC, I_DIRTY_PAGES, and I_DIRTY_TIME.
+ *
+ * Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
+ * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
+ * various stages of removing an inode.
+ *
+ * Two bits are used for locking and completion notification, I_NEW and I_SYNC.
+ *
+ * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
+ * fdatasync() (unless I_DIRTY_DATASYNC is also set).
+ * Timestamp updates are the usual cause.
+ * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
+ * these changes separately from I_DIRTY_SYNC so that we
+ * don't have to write inode on fdatasync() when only
+ * e.g. the timestamps have changed.
+ * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
+ * I_DIRTY_TIME The inode itself has dirty timestamps, and the
+ * lazytime mount option is enabled. We keep track of this
+ * separately from I_DIRTY_SYNC in order to implement
+ * lazytime. This gets cleared if I_DIRTY_INODE
+ * (I_DIRTY_SYNC and/or I_DIRTY_DATASYNC) gets set. But
+ * I_DIRTY_TIME can still be set if I_DIRTY_SYNC is already
+ * in place because writeback might already be in progress
+ * and we don't want to lose the time update
+ * I_NEW Serves as both a mutex and completion notification.
+ * New inodes set I_NEW. If two processes both create
+ * the same inode, one of them will release its inode and
+ * wait for I_NEW to be released before returning.
+ * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
+ * also cause waiting on I_NEW, without I_NEW actually
+ * being set. find_inode() uses this to prevent returning
+ * nearly-dead inodes.
+ * I_WILL_FREE Must be set when calling write_inode_now() if i_count
+ * is zero. I_FREEING must be set when I_WILL_FREE is
+ * cleared.
+ * I_FREEING Set when inode is about to be freed but still has dirty
+ * pages or buffers attached or the inode itself is still
+ * dirty.
+ * I_CLEAR Added by clear_inode(). In this state the inode is
+ * clean and can be destroyed. Inode keeps I_FREEING.
+ *
+ * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
+ * prohibited for many purposes. iget() must wait for
+ * the inode to be completely released, then create it
+ * anew. Other functions will just ignore such inodes,
+ * if appropriate. I_NEW is used for waiting.
+ *
+ * I_SYNC Writeback of inode is running. The bit is set during
+ * data writeback, and cleared with a wakeup on the bit
+ * address once it is done. The bit is also used to pin
+ * the inode in memory for flusher thread.
+ *
+ * I_REFERENCED Marks the inode as recently references on the LRU list.
+ *
+ * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
+ * synchronize competing switching instances and to tell
+ * wb stat updates to grab the i_pages lock. See
+ * inode_switch_wbs_work_fn() for details.
+ *
+ * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
+ * and work dirs among overlayfs mounts.
+ *
+ * I_CREATING New object's inode in the middle of setting up.
+ *
+ * I_DONTCACHE Evict inode as soon as it is not used anymore.
+ *
+ * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists.
+ * Used to detect that mark_inode_dirty() should not move
+ * inode between dirty lists.
+ *
+ * I_PINNING_FSCACHE_WB Inode is pinning an fscache object for writeback.
+ *
+ * I_LRU_ISOLATING Inode is pinned being isolated from LRU without holding
+ * i_count.
+ *
+ * Q: What is the difference between I_WILL_FREE and I_FREEING?
+ *
+ * __I_{SYNC,NEW,LRU_ISOLATING} are used to derive unique addresses to wait
+ * upon. There's one free address left.
+ */
+
+enum inode_state_bits {
+ __I_NEW = 0U,
+ __I_SYNC = 1U,
+ __I_LRU_ISOLATING = 2U
+ /* reserved wait address bit 3 */
+};
+
+enum inode_state_flags_enum {
+ I_NEW = (1U << __I_NEW),
+ I_SYNC = (1U << __I_SYNC),
+ I_LRU_ISOLATING = (1U << __I_LRU_ISOLATING),
+ /* reserved flag bit 3 */
+ I_DIRTY_SYNC = (1U << 4),
+ I_DIRTY_DATASYNC = (1U << 5),
+ I_DIRTY_PAGES = (1U << 6),
+ I_WILL_FREE = (1U << 7),
+ I_FREEING = (1U << 8),
+ I_CLEAR = (1U << 9),
+ I_REFERENCED = (1U << 10),
+ I_LINKABLE = (1U << 11),
+ I_DIRTY_TIME = (1U << 12),
+ I_WB_SWITCH = (1U << 13),
+ I_OVL_INUSE = (1U << 14),
+ I_CREATING = (1U << 15),
+ I_DONTCACHE = (1U << 16),
+ I_SYNC_QUEUED = (1U << 17),
+ I_PINNING_NETFS_WB = (1U << 18)
+};
+
+#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
+#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
+#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
+
+/*
+ * Use inode_state_read() & friends to access.
+ */
+struct inode_state_flags {
+ enum inode_state_flags_enum __state;
+};
/*
* Keep mostly read-only and often accessed (especially for
@@ -628,14 +765,13 @@ is_uncached_acl(struct posix_acl *acl)
struct inode {
umode_t i_mode;
unsigned short i_opflags;
- kuid_t i_uid;
- kgid_t i_gid;
unsigned int i_flags;
-
#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *i_acl;
struct posix_acl *i_default_acl;
#endif
+ kuid_t i_uid;
+ kgid_t i_gid;
const struct inode_operations *i_op;
struct super_block *i_sb;
@@ -660,9 +796,13 @@ struct inode {
};
dev_t i_rdev;
loff_t i_size;
- struct timespec64 __i_atime;
- struct timespec64 __i_mtime;
- struct timespec64 __i_ctime; /* use inode_*_ctime accessors! */
+ time64_t i_atime_sec;
+ time64_t i_mtime_sec;
+ time64_t i_ctime_sec;
+ u32 i_atime_nsec;
+ u32 i_mtime_nsec;
+ u32 i_ctime_nsec;
+ u32 i_generation;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
u8 i_blkbits;
@@ -674,7 +814,8 @@ struct inode {
#endif
/* Misc */
- unsigned long i_state;
+ struct inode_state_flags i_state;
+ /* 32-bit hole */
struct rw_semaphore i_rwsem;
unsigned long dirtied_when; /* jiffies of first dirtying */
@@ -711,7 +852,10 @@ struct inode {
};
struct file_lock_context *i_flctx;
struct address_space i_data;
- struct list_head i_devices;
+ union {
+ struct list_head i_devices;
+ int i_linklen;
+ };
union {
struct pipe_inode_info *i_pipe;
struct cdev *i_cdev;
@@ -719,24 +863,114 @@ struct inode {
unsigned i_dir_seq;
};
- __u32 i_generation;
#ifdef CONFIG_FSNOTIFY
__u32 i_fsnotify_mask; /* all events this inode cares about */
+ /* 32-bit hole reserved for expanding i_fsnotify_mask */
struct fsnotify_mark_connector __rcu *i_fsnotify_marks;
#endif
-#ifdef CONFIG_FS_ENCRYPTION
- struct fscrypt_inode_info *i_crypt_info;
-#endif
-
-#ifdef CONFIG_FS_VERITY
- struct fsverity_info *i_verity_info;
-#endif
-
void *i_private; /* fs or device private pointer */
} __randomize_layout;
+/*
+ * i_state handling
+ *
+ * We hide all of it behind helpers so that we can validate consumers.
+ */
+static inline enum inode_state_flags_enum inode_state_read_once(struct inode *inode)
+{
+ return READ_ONCE(inode->i_state.__state);
+}
+
+static inline enum inode_state_flags_enum inode_state_read(struct inode *inode)
+{
+ lockdep_assert_held(&inode->i_lock);
+ return inode->i_state.__state;
+}
+
+static inline void inode_state_set_raw(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ WRITE_ONCE(inode->i_state.__state, inode->i_state.__state | flags);
+}
+
+static inline void inode_state_set(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ lockdep_assert_held(&inode->i_lock);
+ inode_state_set_raw(inode, flags);
+}
+
+static inline void inode_state_clear_raw(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ WRITE_ONCE(inode->i_state.__state, inode->i_state.__state & ~flags);
+}
+
+static inline void inode_state_clear(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ lockdep_assert_held(&inode->i_lock);
+ inode_state_clear_raw(inode, flags);
+}
+
+static inline void inode_state_assign_raw(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ WRITE_ONCE(inode->i_state.__state, flags);
+}
+
+static inline void inode_state_assign(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ lockdep_assert_held(&inode->i_lock);
+ inode_state_assign_raw(inode, flags);
+}
+
+static inline void inode_state_replace_raw(struct inode *inode,
+ enum inode_state_flags_enum clearflags,
+ enum inode_state_flags_enum setflags)
+{
+ enum inode_state_flags_enum flags;
+ flags = inode->i_state.__state;
+ flags &= ~clearflags;
+ flags |= setflags;
+ inode_state_assign_raw(inode, flags);
+}
+
+static inline void inode_state_replace(struct inode *inode,
+ enum inode_state_flags_enum clearflags,
+ enum inode_state_flags_enum setflags)
+{
+ lockdep_assert_held(&inode->i_lock);
+ inode_state_replace_raw(inode, clearflags, setflags);
+}
+
+static inline void inode_set_cached_link(struct inode *inode, char *link, int linklen)
+{
+ VFS_WARN_ON_INODE(strlen(link) != linklen, inode);
+ VFS_WARN_ON_INODE(inode->i_opflags & IOP_CACHED_LINK, inode);
+ inode->i_link = link;
+ inode->i_linklen = linklen;
+ inode->i_opflags |= IOP_CACHED_LINK;
+}
+
+/*
+ * Get bit address from inode->i_state to use with wait_var_event()
+ * infrastructre.
+ */
+#define inode_state_wait_address(inode, bit) ((char *)&(inode)->i_state + (bit))
+
+struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
+ struct inode *inode, u32 bit);
+
+static inline void inode_wake_up_bit(struct inode *inode, u32 bit)
+{
+ /* Caller is responsible for correct memory barriers. */
+ wake_up_var(inode_state_wait_address(inode, bit));
+}
+
struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode);
static inline unsigned int i_blocksize(const struct inode *node)
@@ -760,8 +994,10 @@ static inline void inode_fake_hash(struct inode *inode)
hlist_add_fake(&inode->i_hash);
}
+void wait_on_new_inode(struct inode *inode);
+
/*
- * inode->i_mutex nesting subclasses for the lock validator:
+ * inode->i_rwsem nesting subclasses for the lock validator:
*
* 0: the object of the current VFS operation
* 1: parent
@@ -791,6 +1027,11 @@ static inline void inode_lock(struct inode *inode)
down_write(&inode->i_rwsem);
}
+static inline __must_check int inode_lock_killable(struct inode *inode)
+{
+ return down_write_killable(&inode->i_rwsem);
+}
+
static inline void inode_unlock(struct inode *inode)
{
up_write(&inode->i_rwsem);
@@ -801,6 +1042,11 @@ static inline void inode_lock_shared(struct inode *inode)
down_read(&inode->i_rwsem);
}
+static inline __must_check int inode_lock_shared_killable(struct inode *inode)
+{
+ return down_read_killable(&inode->i_rwsem);
+}
+
static inline void inode_unlock_shared(struct inode *inode)
{
up_read(&inode->i_rwsem);
@@ -903,7 +1149,7 @@ static inline loff_t i_size_read(const struct inode *inode)
/*
* NOTE: unlike i_size_read(), i_size_write() does need locking around it
- * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount
+ * (normally i_rwsem), otherwise on 32bit/SMP an update of i_size_seqcount
* can be lost, resulting in subsequent i_size_read() calls spinning forever.
*/
static inline void i_size_write(struct inode *inode, loff_t i_size)
@@ -939,6 +1185,7 @@ static inline unsigned imajor(const struct inode *inode)
}
struct fown_struct {
+ struct file *file; /* backpointer for security modules */
rwlock_t lock; /* protects pid, uid, euid fields */
struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */
@@ -954,6 +1201,7 @@ struct fown_struct {
* and so were/are genuinely "ahead". Start next readahead when
* the first of these pages is accessed.
* @ra_pages: Maximum size of a readahead request, copied from the bdi.
+ * @order: Preferred folio order used for most recent readahead.
* @mmap_miss: How many mmap accesses missed in the page cache.
* @prev_pos: The last byte in the most recent read request.
*
@@ -965,7 +1213,8 @@ struct file_ra_state {
unsigned int size;
unsigned int async_size;
unsigned int ra_pages;
- unsigned int mmap_miss;
+ unsigned short order;
+ unsigned short mmap_miss;
loff_t prev_pos;
};
@@ -978,52 +1227,74 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
index < ra->start + ra->size);
}
-/*
- * f_{lock,count,pos_lock} members can be highly contended and share
- * the same cacheline. f_{lock,mode} are very frequently used together
- * and so share the same cacheline as well. The read-mostly
- * f_{path,inode,op} are kept on a separate cacheline.
+/**
+ * struct file - Represents a file
+ * @f_lock: Protects f_ep, f_flags. Must not be taken from IRQ context.
+ * @f_mode: FMODE_* flags often used in hotpaths
+ * @f_op: file operations
+ * @f_mapping: Contents of a cacheable, mappable object.
+ * @private_data: filesystem or driver specific data
+ * @f_inode: cached inode
+ * @f_flags: file flags
+ * @f_iocb_flags: iocb flags
+ * @f_cred: stashed credentials of creator/opener
+ * @f_owner: file owner
+ * @f_path: path of the file
+ * @__f_path: writable alias for @f_path; *ONLY* for core VFS and only before
+ * the file gets open
+ * @f_pos_lock: lock protecting file position
+ * @f_pipe: specific to pipes
+ * @f_pos: file position
+ * @f_security: LSM security context of this file
+ * @f_wb_err: writeback error
+ * @f_sb_err: per sb writeback errors
+ * @f_ep: link of all epoll hooks for this file
+ * @f_task_work: task work entry point
+ * @f_llist: work queue entrypoint
+ * @f_ra: file's readahead state
+ * @f_freeptr: Pointer used by SLAB_TYPESAFE_BY_RCU file cache (don't touch.)
+ * @f_ref: reference count
*/
struct file {
+ spinlock_t f_lock;
+ fmode_t f_mode;
+ const struct file_operations *f_op;
+ struct address_space *f_mapping;
+ void *private_data;
+ struct inode *f_inode;
+ unsigned int f_flags;
+ unsigned int f_iocb_flags;
+ const struct cred *f_cred;
+ struct fown_struct *f_owner;
+ /* --- cacheline 1 boundary (64 bytes) --- */
union {
- /* fput() uses task work when closing and freeing file (default). */
- struct callback_head f_task_work;
- /* fput() must use workqueue (most kernel threads). */
- struct llist_node f_llist;
- unsigned int f_iocb_flags;
+ const struct path f_path;
+ struct path __f_path;
};
-
- /*
- * Protects f_ep, f_flags.
- * Must not be taken from IRQ context.
- */
- spinlock_t f_lock;
- fmode_t f_mode;
- atomic_long_t f_count;
- struct mutex f_pos_lock;
- loff_t f_pos;
- unsigned int f_flags;
- struct fown_struct f_owner;
- const struct cred *f_cred;
- struct file_ra_state f_ra;
- struct path f_path;
- struct inode *f_inode; /* cached value */
- const struct file_operations *f_op;
-
- u64 f_version;
+ union {
+ /* regular files (with FMODE_ATOMIC_POS) and directories */
+ struct mutex f_pos_lock;
+ /* pipes */
+ u64 f_pipe;
+ };
+ loff_t f_pos;
#ifdef CONFIG_SECURITY
- void *f_security;
+ void *f_security;
#endif
- /* needed for tty driver, and maybe others */
- void *private_data;
-
+ /* --- cacheline 2 boundary (128 bytes) --- */
+ errseq_t f_wb_err;
+ errseq_t f_sb_err;
#ifdef CONFIG_EPOLL
- /* Used by fs/eventpoll.c to link all the hooks to this file */
- struct hlist_head *f_ep;
-#endif /* #ifdef CONFIG_EPOLL */
- struct address_space *f_mapping;
- errseq_t f_wb_err;
- errseq_t f_sb_err; /* for syncfs */
+ struct hlist_head *f_ep;
+#endif
+ union {
+ struct callback_head f_task_work;
+ struct llist_node f_llist;
+ struct file_ra_state f_ra;
+ freeptr_t f_freeptr;
+ };
+ file_ref_t f_ref;
+ /* --- cacheline 3 boundary (192 bytes) --- */
} __randomize_layout
__attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
@@ -1036,15 +1307,14 @@ struct file_handle {
static inline struct file *get_file(struct file *f)
{
- long prior = atomic_long_fetch_inc_relaxed(&f->f_count);
- WARN_ONCE(!prior, "struct file::f_count incremented from zero; use-after-free condition present!\n");
+ file_ref_inc(&f->f_ref);
return f;
}
struct file *get_file_rcu(struct file __rcu **f);
struct file *get_file_active(struct file **f);
-#define file_count(x) atomic_long_read(&(x)->f_count)
+#define file_count(f) file_ref_read(&(f)->f_ref)
#define MAX_NON_LFS ((1UL<<31) - 1)
@@ -1068,6 +1338,12 @@ struct file_lease;
#define OFFT_OFFSET_MAX type_max(off_t)
#endif
+int file_f_owner_allocate(struct file *file);
+static inline struct fown_struct *file_f_owner(const struct file *file)
+{
+ return READ_ONCE(file->f_owner);
+}
+
extern void send_sigio(struct fown_struct *fown, int fd, int band);
static inline struct inode *file_inode(const struct file *f)
@@ -1116,43 +1392,7 @@ extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force
extern int f_setown(struct file *filp, int who, int force);
extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
-extern int send_sigurg(struct fown_struct *fown);
-
-/*
- * sb->s_flags. Note that these mirror the equivalent MS_* flags where
- * represented in both.
- */
-#define SB_RDONLY BIT(0) /* Mount read-only */
-#define SB_NOSUID BIT(1) /* Ignore suid and sgid bits */
-#define SB_NODEV BIT(2) /* Disallow access to device special files */
-#define SB_NOEXEC BIT(3) /* Disallow program execution */
-#define SB_SYNCHRONOUS BIT(4) /* Writes are synced at once */
-#define SB_MANDLOCK BIT(6) /* Allow mandatory locks on an FS */
-#define SB_DIRSYNC BIT(7) /* Directory modifications are synchronous */
-#define SB_NOATIME BIT(10) /* Do not update access times. */
-#define SB_NODIRATIME BIT(11) /* Do not update directory access times */
-#define SB_SILENT BIT(15)
-#define SB_POSIXACL BIT(16) /* Supports POSIX ACLs */
-#define SB_INLINECRYPT BIT(17) /* Use blk-crypto for encrypted files */
-#define SB_KERNMOUNT BIT(22) /* this is a kern_mount call */
-#define SB_I_VERSION BIT(23) /* Update inode I_version field */
-#define SB_LAZYTIME BIT(25) /* Update the on-disk [acm]times lazily */
-
-/* These sb flags are internal to the kernel */
-#define SB_DEAD BIT(21)
-#define SB_DYING BIT(24)
-#define SB_SUBMOUNT BIT(26)
-#define SB_FORCE BIT(27)
-#define SB_NOSEC BIT(28)
-#define SB_BORN BIT(29)
-#define SB_ACTIVE BIT(30)
-#define SB_NOUSER BIT(31)
-
-/* These flags relate to encoding and casefolding */
-#define SB_ENC_STRICT_MODE_FL (1 << 0)
-
-#define sb_has_strict_encoding(sb) \
- (sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL)
+extern int send_sigurg(struct file *file);
/*
* Umount options
@@ -1164,185 +1404,6 @@ extern int send_sigurg(struct fown_struct *fown);
#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
-/* sb->s_iflags */
-#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
-#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
-#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
-#define SB_I_STABLE_WRITES 0x00000008 /* don't modify blks until WB is done */
-
-/* sb->s_iflags to limit user namespace mounts */
-#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
-#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
-#define SB_I_UNTRUSTED_MOUNTER 0x00000040
-#define SB_I_EVM_HMAC_UNSUPPORTED 0x00000080
-
-#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
-#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
-#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
-#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
-#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */
-
-/* Possible states of 'frozen' field */
-enum {
- SB_UNFROZEN = 0, /* FS is unfrozen */
- SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */
- SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */
- SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop
- * internal threads if needed) */
- SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */
-};
-
-#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
-
-struct sb_writers {
- unsigned short frozen; /* Is sb frozen? */
- int freeze_kcount; /* How many kernel freeze requests? */
- int freeze_ucount; /* How many userspace freeze requests? */
- struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
-};
-
-struct super_block {
- struct list_head s_list; /* Keep this first */
- dev_t s_dev; /* search index; _not_ kdev_t */
- unsigned char s_blocksize_bits;
- unsigned long s_blocksize;
- loff_t s_maxbytes; /* Max file size */
- struct file_system_type *s_type;
- const struct super_operations *s_op;
- const struct dquot_operations *dq_op;
- const struct quotactl_ops *s_qcop;
- const struct export_operations *s_export_op;
- unsigned long s_flags;
- unsigned long s_iflags; /* internal SB_I_* flags */
- unsigned long s_magic;
- struct dentry *s_root;
- struct rw_semaphore s_umount;
- int s_count;
- atomic_t s_active;
-#ifdef CONFIG_SECURITY
- void *s_security;
-#endif
- const struct xattr_handler * const *s_xattr;
-#ifdef CONFIG_FS_ENCRYPTION
- const struct fscrypt_operations *s_cop;
- struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */
-#endif
-#ifdef CONFIG_FS_VERITY
- const struct fsverity_operations *s_vop;
-#endif
-#if IS_ENABLED(CONFIG_UNICODE)
- struct unicode_map *s_encoding;
- __u16 s_encoding_flags;
-#endif
- struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
- struct list_head s_mounts; /* list of mounts; _not_ for fs use */
- struct block_device *s_bdev; /* can go away once we use an accessor for @s_bdev_file */
- struct file *s_bdev_file;
- struct backing_dev_info *s_bdi;
- struct mtd_info *s_mtd;
- struct hlist_node s_instances;
- unsigned int s_quota_types; /* Bitmask of supported quota types */
- struct quota_info s_dquot; /* Diskquota specific options */
-
- struct sb_writers s_writers;
-
- /*
- * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and
- * s_fsnotify_info together for cache efficiency. They are frequently
- * accessed and rarely modified.
- */
- void *s_fs_info; /* Filesystem private info */
-
- /* Granularity of c/m/atime in ns (cannot be worse than a second) */
- u32 s_time_gran;
- /* Time limits for c/m/atime in seconds */
- time64_t s_time_min;
- time64_t s_time_max;
-#ifdef CONFIG_FSNOTIFY
- __u32 s_fsnotify_mask;
- struct fsnotify_sb_info *s_fsnotify_info;
-#endif
-
- /*
- * q: why are s_id and s_sysfs_name not the same? both are human
- * readable strings that identify the filesystem
- * a: s_id is allowed to change at runtime; it's used in log messages,
- * and we want to when a device starts out as single device (s_id is dev
- * name) but then a device is hot added and we have to switch to
- * identifying it by UUID
- * but s_sysfs_name is a handle for programmatic access, and can't
- * change at runtime
- */
- char s_id[32]; /* Informational name */
- uuid_t s_uuid; /* UUID */
- u8 s_uuid_len; /* Default 16, possibly smaller for weird filesystems */
-
- /* if set, fs shows up under sysfs at /sys/fs/$FSTYP/s_sysfs_name */
- char s_sysfs_name[UUID_STRING_LEN + 1];
-
- unsigned int s_max_links;
-
- /*
- * The next field is for VFS *only*. No filesystems have any business
- * even looking at it. You had been warned.
- */
- struct mutex s_vfs_rename_mutex; /* Kludge */
-
- /*
- * Filesystem subtype. If non-empty the filesystem type field
- * in /proc/mounts will be "type.subtype"
- */
- const char *s_subtype;
-
- const struct dentry_operations *s_d_op; /* default d_op for dentries */
-
- struct shrinker *s_shrink; /* per-sb shrinker handle */
-
- /* Number of inodes with nlink == 0 but still referenced */
- atomic_long_t s_remove_count;
-
- /* Read-only state of the superblock is being changed */
- int s_readonly_remount;
-
- /* per-sb errseq_t for reporting writeback errors via syncfs */
- errseq_t s_wb_err;
-
- /* AIO completions deferred from interrupt context */
- struct workqueue_struct *s_dio_done_wq;
- struct hlist_head s_pins;
-
- /*
- * Owning user namespace and default context in which to
- * interpret filesystem uids, gids, quotas, device nodes,
- * xattrs and security labels.
- */
- struct user_namespace *s_user_ns;
-
- /*
- * The list_lru structure is essentially just a pointer to a table
- * of per-node lru lists, each of which has its own spinlock.
- * There is no need to put them into separate cachelines.
- */
- struct list_lru s_dentry_lru;
- struct list_lru s_inode_lru;
- struct rcu_head rcu;
- struct work_struct destroy_work;
-
- struct mutex s_sync_lock; /* sync serialisation lock */
-
- /*
- * Indicates how deep in a filesystem stack this SB is
- */
- int s_stack_depth;
-
- /* s_inode_list_lock protects s_inodes */
- spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp;
- struct list_head s_inodes; /* all inodes */
-
- spinlock_t s_inode_wblist_lock;
- struct list_head s_inodes_wb; /* writeback inodes */
-} __randomize_layout;
-
static inline struct user_namespace *i_user_ns(const struct inode *inode)
{
return inode->i_sb->s_user_ns;
@@ -1535,26 +1596,32 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb,
struct timespec64 current_time(struct inode *inode);
struct timespec64 inode_set_ctime_current(struct inode *inode);
+struct timespec64 inode_set_ctime_deleg(struct inode *inode,
+ struct timespec64 update);
static inline time64_t inode_get_atime_sec(const struct inode *inode)
{
- return inode->__i_atime.tv_sec;
+ return inode->i_atime_sec;
}
static inline long inode_get_atime_nsec(const struct inode *inode)
{
- return inode->__i_atime.tv_nsec;
+ return inode->i_atime_nsec;
}
static inline struct timespec64 inode_get_atime(const struct inode *inode)
{
- return inode->__i_atime;
+ struct timespec64 ts = { .tv_sec = inode_get_atime_sec(inode),
+ .tv_nsec = inode_get_atime_nsec(inode) };
+
+ return ts;
}
static inline struct timespec64 inode_set_atime_to_ts(struct inode *inode,
struct timespec64 ts)
{
- inode->__i_atime = ts;
+ inode->i_atime_sec = ts.tv_sec;
+ inode->i_atime_nsec = ts.tv_nsec;
return ts;
}
@@ -1563,28 +1630,32 @@ static inline struct timespec64 inode_set_atime(struct inode *inode,
{
struct timespec64 ts = { .tv_sec = sec,
.tv_nsec = nsec };
+
return inode_set_atime_to_ts(inode, ts);
}
static inline time64_t inode_get_mtime_sec(const struct inode *inode)
{
- return inode->__i_mtime.tv_sec;
+ return inode->i_mtime_sec;
}
static inline long inode_get_mtime_nsec(const struct inode *inode)
{
- return inode->__i_mtime.tv_nsec;
+ return inode->i_mtime_nsec;
}
static inline struct timespec64 inode_get_mtime(const struct inode *inode)
{
- return inode->__i_mtime;
+ struct timespec64 ts = { .tv_sec = inode_get_mtime_sec(inode),
+ .tv_nsec = inode_get_mtime_nsec(inode) };
+ return ts;
}
static inline struct timespec64 inode_set_mtime_to_ts(struct inode *inode,
struct timespec64 ts)
{
- inode->__i_mtime = ts;
+ inode->i_mtime_sec = ts.tv_sec;
+ inode->i_mtime_nsec = ts.tv_nsec;
return ts;
}
@@ -1596,28 +1667,37 @@ static inline struct timespec64 inode_set_mtime(struct inode *inode,
return inode_set_mtime_to_ts(inode, ts);
}
+/*
+ * Multigrain timestamps
+ *
+ * Conditionally use fine-grained ctime and mtime timestamps when there
+ * are users actively observing them via getattr. The primary use-case
+ * for this is NFS clients that use the ctime to distinguish between
+ * different states of the file, and that are often fooled by multiple
+ * operations that occur in the same coarse-grained timer tick.
+ */
+#define I_CTIME_QUERIED ((u32)BIT(31))
+
static inline time64_t inode_get_ctime_sec(const struct inode *inode)
{
- return inode->__i_ctime.tv_sec;
+ return inode->i_ctime_sec;
}
static inline long inode_get_ctime_nsec(const struct inode *inode)
{
- return inode->__i_ctime.tv_nsec;
+ return inode->i_ctime_nsec & ~I_CTIME_QUERIED;
}
static inline struct timespec64 inode_get_ctime(const struct inode *inode)
{
- return inode->__i_ctime;
-}
+ struct timespec64 ts = { .tv_sec = inode_get_ctime_sec(inode),
+ .tv_nsec = inode_get_ctime_nsec(inode) };
-static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode,
- struct timespec64 ts)
-{
- inode->__i_ctime = ts;
return ts;
}
+struct timespec64 inode_set_ctime_to_ts(struct inode *inode, struct timespec64 ts);
+
/**
* inode_set_ctime - set the ctime in the inode
* @inode: inode in which to set the ctime
@@ -1641,66 +1721,6 @@ struct timespec64 simple_inode_init_ts(struct inode *inode);
* Snapshotting support.
*/
-/*
- * These are internal functions, please use sb_start_{write,pagefault,intwrite}
- * instead.
- */
-static inline void __sb_end_write(struct super_block *sb, int level)
-{
- percpu_up_read(sb->s_writers.rw_sem + level-1);
-}
-
-static inline void __sb_start_write(struct super_block *sb, int level)
-{
- percpu_down_read(sb->s_writers.rw_sem + level - 1);
-}
-
-static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
-{
- return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1);
-}
-
-#define __sb_writers_acquired(sb, lev) \
- percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
-#define __sb_writers_release(sb, lev) \
- percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
-
-/**
- * __sb_write_started - check if sb freeze level is held
- * @sb: the super we write to
- * @level: the freeze level
- *
- * * > 0 - sb freeze level is held
- * * 0 - sb freeze level is not held
- * * < 0 - !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN
- */
-static inline int __sb_write_started(const struct super_block *sb, int level)
-{
- return lockdep_is_held_type(sb->s_writers.rw_sem + level - 1, 1);
-}
-
-/**
- * sb_write_started - check if SB_FREEZE_WRITE is held
- * @sb: the super we write to
- *
- * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
- */
-static inline bool sb_write_started(const struct super_block *sb)
-{
- return __sb_write_started(sb, SB_FREEZE_WRITE);
-}
-
-/**
- * sb_write_not_started - check if SB_FREEZE_WRITE is not held
- * @sb: the super we write to
- *
- * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
- */
-static inline bool sb_write_not_started(const struct super_block *sb)
-{
- return __sb_write_started(sb, SB_FREEZE_WRITE) <= 0;
-}
-
/**
* file_write_started - check if SB_FREEZE_WRITE is held
* @file: the file we write to
@@ -1731,157 +1751,44 @@ static inline bool file_write_not_started(const struct file *file)
return sb_write_not_started(file_inode(file)->i_sb);
}
-/**
- * sb_end_write - drop write access to a superblock
- * @sb: the super we wrote to
- *
- * Decrement number of writers to the filesystem. Wake up possible waiters
- * wanting to freeze the filesystem.
- */
-static inline void sb_end_write(struct super_block *sb)
-{
- __sb_end_write(sb, SB_FREEZE_WRITE);
-}
-
-/**
- * sb_end_pagefault - drop write access to a superblock from a page fault
- * @sb: the super we wrote to
- *
- * Decrement number of processes handling write page fault to the filesystem.
- * Wake up possible waiters wanting to freeze the filesystem.
- */
-static inline void sb_end_pagefault(struct super_block *sb)
-{
- __sb_end_write(sb, SB_FREEZE_PAGEFAULT);
-}
-
-/**
- * sb_end_intwrite - drop write access to a superblock for internal fs purposes
- * @sb: the super we wrote to
- *
- * Decrement fs-internal number of writers to the filesystem. Wake up possible
- * waiters wanting to freeze the filesystem.
- */
-static inline void sb_end_intwrite(struct super_block *sb)
-{
- __sb_end_write(sb, SB_FREEZE_FS);
-}
-
-/**
- * sb_start_write - get write access to a superblock
- * @sb: the super we write to
- *
- * When a process wants to write data or metadata to a file system (i.e. dirty
- * a page or an inode), it should embed the operation in a sb_start_write() -
- * sb_end_write() pair to get exclusion against file system freezing. This
- * function increments number of writers preventing freezing. If the file
- * system is already frozen, the function waits until the file system is
- * thawed.
- *
- * Since freeze protection behaves as a lock, users have to preserve
- * ordering of freeze protection and other filesystem locks. Generally,
- * freeze protection should be the outermost lock. In particular, we have:
- *
- * sb_start_write
- * -> i_mutex (write path, truncate, directory ops, ...)
- * -> s_umount (freeze_super, thaw_super)
- */
-static inline void sb_start_write(struct super_block *sb)
-{
- __sb_start_write(sb, SB_FREEZE_WRITE);
-}
-
-static inline bool sb_start_write_trylock(struct super_block *sb)
-{
- return __sb_start_write_trylock(sb, SB_FREEZE_WRITE);
-}
-
-/**
- * sb_start_pagefault - get write access to a superblock from a page fault
- * @sb: the super we write to
- *
- * When a process starts handling write page fault, it should embed the
- * operation into sb_start_pagefault() - sb_end_pagefault() pair to get
- * exclusion against file system freezing. This is needed since the page fault
- * is going to dirty a page. This function increments number of running page
- * faults preventing freezing. If the file system is already frozen, the
- * function waits until the file system is thawed.
- *
- * Since page fault freeze protection behaves as a lock, users have to preserve
- * ordering of freeze protection and other filesystem locks. It is advised to
- * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault
- * handling code implies lock dependency:
- *
- * mmap_lock
- * -> sb_start_pagefault
- */
-static inline void sb_start_pagefault(struct super_block *sb)
-{
- __sb_start_write(sb, SB_FREEZE_PAGEFAULT);
-}
-
-/**
- * sb_start_intwrite - get write access to a superblock for internal fs purposes
- * @sb: the super we write to
- *
- * This is the third level of protection against filesystem freezing. It is
- * free for use by a filesystem. The only requirement is that it must rank
- * below sb_start_pagefault.
- *
- * For example filesystem can call sb_start_intwrite() when starting a
- * transaction which somewhat eases handling of freezing for internal sources
- * of filesystem changes (internal fs threads, discarding preallocation on file
- * close, etc.).
- */
-static inline void sb_start_intwrite(struct super_block *sb)
-{
- __sb_start_write(sb, SB_FREEZE_FS);
-}
-
-static inline bool sb_start_intwrite_trylock(struct super_block *sb)
-{
- return __sb_start_write_trylock(sb, SB_FREEZE_FS);
-}
-
bool inode_owner_or_capable(struct mnt_idmap *idmap,
const struct inode *inode);
/*
* VFS helper functions..
*/
-int vfs_create(struct mnt_idmap *, struct inode *,
- struct dentry *, umode_t, bool);
-int vfs_mkdir(struct mnt_idmap *, struct inode *,
- struct dentry *, umode_t);
+int vfs_create(struct mnt_idmap *, struct dentry *, umode_t,
+ struct delegated_inode *);
+struct dentry *vfs_mkdir(struct mnt_idmap *, struct inode *,
+ struct dentry *, umode_t, struct delegated_inode *);
int vfs_mknod(struct mnt_idmap *, struct inode *, struct dentry *,
- umode_t, dev_t);
+ umode_t, dev_t, struct delegated_inode *);
int vfs_symlink(struct mnt_idmap *, struct inode *,
- struct dentry *, const char *);
+ struct dentry *, const char *, struct delegated_inode *);
int vfs_link(struct dentry *, struct mnt_idmap *, struct inode *,
- struct dentry *, struct inode **);
-int vfs_rmdir(struct mnt_idmap *, struct inode *, struct dentry *);
+ struct dentry *, struct delegated_inode *);
+int vfs_rmdir(struct mnt_idmap *, struct inode *, struct dentry *,
+ struct delegated_inode *);
int vfs_unlink(struct mnt_idmap *, struct inode *, struct dentry *,
- struct inode **);
+ struct delegated_inode *);
/**
* struct renamedata - contains all information required for renaming
- * @old_mnt_idmap: idmap of the old mount the inode was found from
- * @old_dir: parent of source
+ * @mnt_idmap: idmap of the mount in which the rename is happening.
+ * @old_parent: parent of source
* @old_dentry: source
- * @new_mnt_idmap: idmap of the new mount the inode was found from
- * @new_dir: parent of destination
+ * @new_parent: parent of destination
* @new_dentry: destination
* @delegated_inode: returns an inode needing a delegation break
* @flags: rename flags
*/
struct renamedata {
- struct mnt_idmap *old_mnt_idmap;
- struct inode *old_dir;
+ struct mnt_idmap *mnt_idmap;
+ struct dentry *old_parent;
struct dentry *old_dentry;
- struct mnt_idmap *new_mnt_idmap;
- struct inode *new_dir;
+ struct dentry *new_parent;
struct dentry *new_dentry;
- struct inode **delegated_inode;
+ struct delegated_inode *delegated_inode;
unsigned int flags;
} __randomize_layout;
@@ -1891,7 +1798,7 @@ static inline int vfs_whiteout(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *dentry)
{
return vfs_mknod(idmap, dir, dentry, S_IFCHR | WHITEOUT_MODE,
- WHITEOUT_DEV);
+ WHITEOUT_DEV, NULL);
}
struct file *kernel_tmpfile_open(struct mnt_idmap *idmap,
@@ -1909,8 +1816,6 @@ int vfs_fchown(struct file *file, uid_t user, gid_t group);
int vfs_fchmod(struct file *file, umode_t mode);
int vfs_utimes(const struct path *path, struct timespec64 *times);
-extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-
#ifdef CONFIG_COMPAT
extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
@@ -1926,6 +1831,8 @@ void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode,
extern bool may_open_dev(const struct path *path);
umode_t mode_strip_sgid(struct mnt_idmap *idmap,
const struct inode *dir, umode_t mode);
+bool in_group_or_capable(struct mnt_idmap *idmap,
+ const struct inode *inode, vfsgid_t vfsgid);
/*
* This is the "filldir" function type, used by readdir() to let
@@ -1941,8 +1848,18 @@ typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
struct dir_context {
filldir_t actor;
loff_t pos;
+ /*
+ * Filesystems MUST NOT MODIFY count, but may use as a hint:
+ * 0 unknown
+ * > 0 space in buffer (assume at least one entry)
+ * INT_MAX unlimited
+ */
+ int count;
};
+/* If OR-ed with d_type, pending signals are not checked */
+#define FILLDIR_FLAG_NOINTR 0x1000
+
/*
* These flags let !MMU mmap() govern direct device mapping vs immediate
* copying more easily for MAP_PRIVATE, especially for ROM filesystems.
@@ -2039,6 +1956,7 @@ struct file_operations {
int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *,
unsigned int poll_flags);
+ int (*mmap_prepare)(struct vm_area_desc *);
} __randomize_layout;
/* Supports async buffered reads */
@@ -2051,6 +1969,12 @@ struct file_operations {
#define FOP_DIO_PARALLEL_WRITE ((__force fop_flags_t)(1 << 3))
/* Contains huge pages */
#define FOP_HUGE_PAGES ((__force fop_flags_t)(1 << 4))
+/* Treat loff_t as unsigned (e.g., /dev/mem) */
+#define FOP_UNSIGNED_OFFSET ((__force fop_flags_t)(1 << 5))
+/* Supports asynchronous lock callbacks */
+#define FOP_ASYNC_LOCK ((__force fop_flags_t)(1 << 6))
+/* File system supports uncached read/write buffered IO */
+#define FOP_DONTCACHE ((__force fop_flags_t)(1 << 7))
/* Wrap a directory iterator that needs exclusive inode access */
int wrap_directory_iterator(struct file *, struct dir_context *,
@@ -2073,8 +1997,8 @@ struct inode_operations {
int (*unlink) (struct inode *,struct dentry *);
int (*symlink) (struct mnt_idmap *, struct inode *,struct dentry *,
const char *);
- int (*mkdir) (struct mnt_idmap *, struct inode *,struct dentry *,
- umode_t);
+ struct dentry *(*mkdir) (struct mnt_idmap *, struct inode *,
+ struct dentry *, umode_t);
int (*rmdir) (struct inode *,struct dentry *);
int (*mknod) (struct mnt_idmap *, struct inode *,struct dentry *,
umode_t,dev_t);
@@ -2097,16 +2021,43 @@ struct inode_operations {
int (*set_acl)(struct mnt_idmap *, struct dentry *,
struct posix_acl *, int);
int (*fileattr_set)(struct mnt_idmap *idmap,
- struct dentry *dentry, struct fileattr *fa);
- int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa);
+ struct dentry *dentry, struct file_kattr *fa);
+ int (*fileattr_get)(struct dentry *dentry, struct file_kattr *fa);
struct offset_ctx *(*get_offset_ctx)(struct inode *inode);
} ____cacheline_aligned;
-static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
+/* Did the driver provide valid mmap hook configuration? */
+static inline bool can_mmap_file(struct file *file)
+{
+ bool has_mmap = file->f_op->mmap;
+ bool has_mmap_prepare = file->f_op->mmap_prepare;
+
+ /* Hooks are mutually exclusive. */
+ if (WARN_ON_ONCE(has_mmap && has_mmap_prepare))
+ return false;
+ if (!has_mmap && !has_mmap_prepare)
+ return false;
+
+ return true;
+}
+
+int __compat_vma_mmap(const struct file_operations *f_op,
+ struct file *file, struct vm_area_struct *vma);
+int compat_vma_mmap(struct file *file, struct vm_area_struct *vma);
+
+static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
{
+ if (file->f_op->mmap_prepare)
+ return compat_vma_mmap(file, vma);
+
return file->f_op->mmap(file, vma);
}
+static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
+{
+ return file->f_op->mmap_prepare(desc);
+}
+
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
@@ -2128,61 +2079,6 @@ extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
struct file *dst_file, loff_t dst_pos,
loff_t len, unsigned int remap_flags);
-/**
- * enum freeze_holder - holder of the freeze
- * @FREEZE_HOLDER_KERNEL: kernel wants to freeze or thaw filesystem
- * @FREEZE_HOLDER_USERSPACE: userspace wants to freeze or thaw filesystem
- * @FREEZE_MAY_NEST: whether nesting freeze and thaw requests is allowed
- *
- * Indicate who the owner of the freeze or thaw request is and whether
- * the freeze needs to be exclusive or can nest.
- * Without @FREEZE_MAY_NEST, multiple freeze and thaw requests from the
- * same holder aren't allowed. It is however allowed to hold a single
- * @FREEZE_HOLDER_USERSPACE and a single @FREEZE_HOLDER_KERNEL freeze at
- * the same time. This is relied upon by some filesystems during online
- * repair or similar.
- */
-enum freeze_holder {
- FREEZE_HOLDER_KERNEL = (1U << 0),
- FREEZE_HOLDER_USERSPACE = (1U << 1),
- FREEZE_MAY_NEST = (1U << 2),
-};
-
-struct super_operations {
- struct inode *(*alloc_inode)(struct super_block *sb);
- void (*destroy_inode)(struct inode *);
- void (*free_inode)(struct inode *);
-
- void (*dirty_inode) (struct inode *, int flags);
- int (*write_inode) (struct inode *, struct writeback_control *wbc);
- int (*drop_inode) (struct inode *);
- void (*evict_inode) (struct inode *);
- void (*put_super) (struct super_block *);
- int (*sync_fs)(struct super_block *sb, int wait);
- int (*freeze_super) (struct super_block *, enum freeze_holder who);
- int (*freeze_fs) (struct super_block *);
- int (*thaw_super) (struct super_block *, enum freeze_holder who);
- int (*unfreeze_fs) (struct super_block *);
- int (*statfs) (struct dentry *, struct kstatfs *);
- int (*remount_fs) (struct super_block *, int *, char *);
- void (*umount_begin) (struct super_block *);
-
- int (*show_options)(struct seq_file *, struct dentry *);
- int (*show_devname)(struct seq_file *, struct dentry *);
- int (*show_path)(struct seq_file *, struct dentry *);
- int (*show_stats)(struct seq_file *, struct dentry *);
-#ifdef CONFIG_QUOTA
- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
- struct dquot __rcu **(*get_dquots)(struct inode *);
-#endif
- long (*nr_cached_objects)(struct super_block *,
- struct shrink_control *);
- long (*free_cached_objects)(struct super_block *,
- struct shrink_control *);
- void (*shutdown)(struct super_block *sb);
-};
-
/*
* Inode flags - they have no relation to superblock flags now
*/
@@ -2208,6 +2104,7 @@ struct super_operations {
#define S_CASEFOLD (1 << 15) /* Casefolded file */
#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */
#define S_KERNEL_FILE (1 << 17) /* File is in use by the kernel (eg. fs/cachefiles) */
+#define S_ANON_INODE (1 << 19) /* Inode is an anonymous inode */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -2224,7 +2121,6 @@ struct super_operations {
*/
#define __IS_FLG(inode, flg) ((inode)->i_sb->s_flags & (flg))
-static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & SB_RDONLY; }
#define IS_RDONLY(inode) sb_rdonly((inode)->i_sb)
#define IS_SYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS) || \
((inode)->i_flags & S_SYNC))
@@ -2264,6 +2160,7 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
(inode)->i_rdev == WHITEOUT_DEV)
+#define IS_ANON_FILE(inode) ((inode)->i_flags & S_ANON_INODE)
static inline bool HAS_UNMAPPED_ID(struct mnt_idmap *idmap,
struct inode *inode)
@@ -2292,112 +2189,6 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
};
}
-/*
- * Inode state bits. Protected by inode->i_lock
- *
- * Four bits determine the dirty state of the inode: I_DIRTY_SYNC,
- * I_DIRTY_DATASYNC, I_DIRTY_PAGES, and I_DIRTY_TIME.
- *
- * Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
- * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
- * various stages of removing an inode.
- *
- * Two bits are used for locking and completion notification, I_NEW and I_SYNC.
- *
- * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
- * fdatasync() (unless I_DIRTY_DATASYNC is also set).
- * Timestamp updates are the usual cause.
- * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
- * these changes separately from I_DIRTY_SYNC so that we
- * don't have to write inode on fdatasync() when only
- * e.g. the timestamps have changed.
- * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
- * I_DIRTY_TIME The inode itself has dirty timestamps, and the
- * lazytime mount option is enabled. We keep track of this
- * separately from I_DIRTY_SYNC in order to implement
- * lazytime. This gets cleared if I_DIRTY_INODE
- * (I_DIRTY_SYNC and/or I_DIRTY_DATASYNC) gets set. But
- * I_DIRTY_TIME can still be set if I_DIRTY_SYNC is already
- * in place because writeback might already be in progress
- * and we don't want to lose the time update
- * I_NEW Serves as both a mutex and completion notification.
- * New inodes set I_NEW. If two processes both create
- * the same inode, one of them will release its inode and
- * wait for I_NEW to be released before returning.
- * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
- * also cause waiting on I_NEW, without I_NEW actually
- * being set. find_inode() uses this to prevent returning
- * nearly-dead inodes.
- * I_WILL_FREE Must be set when calling write_inode_now() if i_count
- * is zero. I_FREEING must be set when I_WILL_FREE is
- * cleared.
- * I_FREEING Set when inode is about to be freed but still has dirty
- * pages or buffers attached or the inode itself is still
- * dirty.
- * I_CLEAR Added by clear_inode(). In this state the inode is
- * clean and can be destroyed. Inode keeps I_FREEING.
- *
- * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
- * prohibited for many purposes. iget() must wait for
- * the inode to be completely released, then create it
- * anew. Other functions will just ignore such inodes,
- * if appropriate. I_NEW is used for waiting.
- *
- * I_SYNC Writeback of inode is running. The bit is set during
- * data writeback, and cleared with a wakeup on the bit
- * address once it is done. The bit is also used to pin
- * the inode in memory for flusher thread.
- *
- * I_REFERENCED Marks the inode as recently references on the LRU list.
- *
- * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit().
- *
- * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
- * synchronize competing switching instances and to tell
- * wb stat updates to grab the i_pages lock. See
- * inode_switch_wbs_work_fn() for details.
- *
- * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
- * and work dirs among overlayfs mounts.
- *
- * I_CREATING New object's inode in the middle of setting up.
- *
- * I_DONTCACHE Evict inode as soon as it is not used anymore.
- *
- * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists.
- * Used to detect that mark_inode_dirty() should not move
- * inode between dirty lists.
- *
- * I_PINNING_FSCACHE_WB Inode is pinning an fscache object for writeback.
- *
- * Q: What is the difference between I_WILL_FREE and I_FREEING?
- */
-#define I_DIRTY_SYNC (1 << 0)
-#define I_DIRTY_DATASYNC (1 << 1)
-#define I_DIRTY_PAGES (1 << 2)
-#define __I_NEW 3
-#define I_NEW (1 << __I_NEW)
-#define I_WILL_FREE (1 << 4)
-#define I_FREEING (1 << 5)
-#define I_CLEAR (1 << 6)
-#define __I_SYNC 7
-#define I_SYNC (1 << __I_SYNC)
-#define I_REFERENCED (1 << 8)
-#define __I_DIO_WAKEUP 9
-#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
-#define I_LINKABLE (1 << 10)
-#define I_DIRTY_TIME (1 << 11)
-#define I_WB_SWITCH (1 << 13)
-#define I_OVL_INUSE (1 << 14)
-#define I_CREATING (1 << 15)
-#define I_DONTCACHE (1 << 16)
-#define I_SYNC_QUEUED (1 << 17)
-#define I_PINNING_NETFS_WB (1 << 18)
-
-#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
-#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
-#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
-
extern void __mark_inode_dirty(struct inode *, int);
static inline void mark_inode_dirty(struct inode *inode)
{
@@ -2409,6 +2200,11 @@ static inline void mark_inode_dirty_sync(struct inode *inode)
__mark_inode_dirty(inode, I_DIRTY_SYNC);
}
+static inline int icount_read(const struct inode *inode)
+{
+ return atomic_read(&inode->i_count);
+}
+
/*
* Returns true if the given inode itself only has dirty timestamps (its pages
* may still be dirty) and isn't currently being allocated or freed.
@@ -2420,8 +2216,8 @@ static inline void mark_inode_dirty_sync(struct inode *inode)
*/
static inline bool inode_is_dirtytime_only(struct inode *inode)
{
- return (inode->i_state & (I_DIRTY_TIME | I_NEW |
- I_FREEING | I_WILL_FREE)) == I_DIRTY_TIME;
+ return (inode_state_read_once(inode) &
+ (I_DIRTY_TIME | I_NEW | I_FREEING | I_WILL_FREE)) == I_DIRTY_TIME;
}
extern void inc_nlink(struct inode *inode);
@@ -2472,6 +2268,9 @@ struct file_system_type {
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
+#define FS_MGTIME 64 /* FS uses multigrain timestamps */
+#define FS_LBS 128 /* FS supports LBS */
+#define FS_POWER_FREEZE 256 /* Always freeze on suspend/hibernate */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
@@ -2495,21 +2294,22 @@ struct file_system_type {
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
-extern struct dentry *mount_bdev(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data,
- int (*fill_super)(struct super_block *, void *, int));
-extern struct dentry *mount_single(struct file_system_type *fs_type,
- int flags, void *data,
- int (*fill_super)(struct super_block *, void *, int));
-extern struct dentry *mount_nodev(struct file_system_type *fs_type,
- int flags, void *data,
- int (*fill_super)(struct super_block *, void *, int));
+/**
+ * is_mgtime: is this inode using multigrain timestamps
+ * @inode: inode to test for multigrain timestamps
+ *
+ * Return true if the inode uses multigrain timestamps, false otherwise.
+ */
+static inline bool is_mgtime(const struct inode *inode)
+{
+ return inode->i_opflags & IOP_MGTIME;
+}
+
extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
void retire_super(struct super_block *sb);
void generic_shutdown_super(struct super_block *sb);
void kill_block_super(struct super_block *sb);
void kill_anon_super(struct super_block *sb);
-void kill_litter_super(struct super_block *sb);
void deactivate_super(struct super_block *sb);
void deactivate_locked_super(struct super_block *sb);
int set_anon_super(struct super_block *s, void *data);
@@ -2526,10 +2326,17 @@ struct super_block *sget(struct file_system_type *type,
struct super_block *sget_dev(struct fs_context *fc, dev_t dev);
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
-#define fops_get(fops) \
- (((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
-#define fops_put(fops) \
- do { if (fops) module_put((fops)->owner); } while(0)
+#define fops_get(fops) ({ \
+ const struct file_operations *_fops = (fops); \
+ (((_fops) && try_module_get((_fops)->owner) ? (_fops) : NULL)); \
+})
+
+#define fops_put(fops) ({ \
+ const struct file_operations *_fops = (fops); \
+ if (_fops) \
+ module_put((_fops)->owner); \
+})
+
/*
* This one is to be used *ONLY* from ->open() instances.
* fops must be non-NULL, pinned down *and* module dependencies
@@ -2547,8 +2354,6 @@ extern int unregister_filesystem(struct file_system_type *);
extern int vfs_statfs(const struct path *, struct kstatfs *);
extern int user_statfs(const char __user *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
-int freeze_super(struct super_block *super, enum freeze_holder who);
-int thaw_super(struct super_block *super, enum freeze_holder who);
extern __printf(2, 3)
int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
extern int super_setup_bdi(struct super_block *sb);
@@ -2591,10 +2396,9 @@ static inline void super_set_sysfs_name_generic(struct super_block *sb, const ch
va_end(args);
}
-extern int current_umask(void);
-
extern void ihold(struct inode * inode);
extern void iput(struct inode *);
+void iput_not_last(struct inode *);
int inode_update_timestamps(struct inode *inode, int flags);
int generic_update_time(struct inode *, int);
@@ -2632,13 +2436,13 @@ static inline bool is_idmapped_mnt(const struct vfsmount *mnt)
return mnt_idmap(mnt) != &nop_mnt_idmap;
}
-extern long vfs_truncate(const struct path *, loff_t);
+int vfs_truncate(const struct path *, loff_t);
int do_truncate(struct mnt_idmap *, struct dentry *, loff_t start,
unsigned int time_attrs, struct file *filp);
extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
-extern long do_sys_open(int dfd, const char __user *filename, int flags,
- umode_t mode);
+int do_sys_open(int dfd, const char __user *filename, int flags,
+ umode_t mode);
extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(const struct path *,
@@ -2651,9 +2455,11 @@ static inline struct file *file_open_root_mnt(struct vfsmount *mnt,
}
struct file *dentry_open(const struct path *path, int flags,
const struct cred *creds);
+struct file *dentry_open_nonotify(const struct path *path, int flags,
+ const struct cred *cred);
struct file *dentry_create(const struct path *path, int flags, umode_t mode,
const struct cred *cred);
-struct path *backing_file_user_path(struct file *f);
+const struct path *backing_file_user_path(const struct file *f);
/*
* When mmapping a file on a stackable filesystem (e.g., overlayfs), the file
@@ -2665,14 +2471,14 @@ struct path *backing_file_user_path(struct file *f);
* by fstat() on that same fd.
*/
/* Get the path to display in /proc/<pid>/maps */
-static inline const struct path *file_user_path(struct file *f)
+static inline const struct path *file_user_path(const struct file *f)
{
if (unlikely(f->f_mode & FMODE_BACKING))
return backing_file_user_path(f);
return &f->f_path;
}
/* Get the inode whose inode number to display in /proc/<pid>/maps */
-static inline const struct inode *file_user_inode(struct file *f)
+static inline const struct inode *file_user_inode(const struct file *f)
{
if (unlikely(f->f_mode & FMODE_BACKING))
return d_inode(backing_file_user_path(f)->dentry);
@@ -2685,11 +2491,31 @@ static inline struct file *file_clone_open(struct file *file)
}
extern int filp_close(struct file *, fl_owner_t id);
-extern struct filename *getname_flags(const char __user *, int, int *);
+extern struct filename *getname_flags(const char __user *, int);
extern struct filename *getname_uflags(const char __user *, int);
-extern struct filename *getname(const char __user *);
+static inline struct filename *getname(const char __user *name)
+{
+ return getname_flags(name, 0);
+}
extern struct filename *getname_kernel(const char *);
+extern struct filename *__getname_maybe_null(const char __user *);
+static inline struct filename *getname_maybe_null(const char __user *name, int flags)
+{
+ if (!(flags & AT_EMPTY_PATH))
+ return getname(name);
+
+ if (!name)
+ return NULL;
+ return __getname_maybe_null(name);
+}
extern void putname(struct filename *name);
+DEFINE_FREE(putname, struct filename *, if (!IS_ERR_OR_NULL(_T)) putname(_T))
+
+static inline struct filename *refname(struct filename *name)
+{
+ atomic_inc(&name->refcnt);
+ return name;
+}
extern int finish_open(struct file *file, struct dentry *dentry,
int (*open)(struct inode *, struct file *));
@@ -2713,12 +2539,6 @@ extern struct kmem_cache *names_cachep;
#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
-extern struct super_block *blockdev_superblock;
-static inline bool sb_is_blkdev_sb(struct super_block *sb)
-{
- return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock;
-}
-
void emergency_thaw_all(void);
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
@@ -2764,6 +2584,8 @@ extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
extern int __must_check file_check_and_advance_wb_err(struct file *file);
extern int __must_check file_write_and_wait_range(struct file *file,
loff_t start, loff_t end);
+int filemap_flush_range(struct address_space *mapping, loff_t start,
+ loff_t end);
static inline int file_write_and_wait(struct file *file)
{
@@ -2796,6 +2618,11 @@ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
(iocb->ki_flags & IOCB_SYNC) ? 0 : 1);
if (ret)
return ret;
+ } else if (iocb->ki_flags & IOCB_DONTCACHE) {
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+
+ filemap_flush_range(mapping, iocb->ki_pos - count,
+ iocb->ki_pos - 1);
}
return count;
@@ -2814,7 +2641,7 @@ static inline int bmap(struct inode *inode, sector_t *block)
#endif
int notify_change(struct mnt_idmap *, struct dentry *,
- struct iattr *, struct inode **);
+ struct iattr *, struct delegated_inode *);
int inode_permission(struct mnt_idmap *, struct inode *, int);
int generic_permission(struct mnt_idmap *, struct inode *, int);
static inline int file_permission(struct file *file, int mask)
@@ -2844,7 +2671,7 @@ static inline bool inode_wrong_type(const struct inode *inode, umode_t mode)
* file_start_write - get write access to a superblock for regular file io
* @file: the file we want to write to
*
- * This is a variant of sb_start_write() which is a noop on non-regualr file.
+ * This is a variant of sb_start_write() which is a noop on non-regular file.
* Should be matched with a call to file_end_write().
*/
static inline void file_start_write(struct file *file)
@@ -2949,6 +2776,34 @@ static inline void allow_write_access(struct file *file)
if (file)
atomic_inc(&file_inode(file)->i_writecount);
}
+
+/*
+ * Do not prevent write to executable file when watched by pre-content events.
+ *
+ * Note that FMODE_FSNOTIFY_HSM mode is set depending on pre-content watches at
+ * the time of file open and remains constant for entire lifetime of the file,
+ * so if pre-content watches are added post execution or removed before the end
+ * of the execution, it will not cause i_writecount reference leak.
+ */
+static inline int exe_file_deny_write_access(struct file *exe_file)
+{
+ if (unlikely(FMODE_FSNOTIFY_HSM(exe_file->f_mode)))
+ return 0;
+ return deny_write_access(exe_file);
+}
+static inline void exe_file_allow_write_access(struct file *exe_file)
+{
+ if (unlikely(!exe_file || FMODE_FSNOTIFY_HSM(exe_file->f_mode)))
+ return;
+ allow_write_access(exe_file);
+}
+
+static inline void file_set_fsnotify_mode(struct file *file, fmode_t mode)
+{
+ file->f_mode &= ~FMODE_FSNOTIFY_MASK;
+ file->f_mode |= mode;
+}
+
static inline bool inode_is_open_for_write(const struct inode *inode)
{
return atomic_read(&inode->i_writecount) > 0;
@@ -2984,6 +2839,7 @@ extern struct file * open_exec(const char *);
/* fs/dcache.c -- generic fs support functions */
extern bool is_subdir(struct dentry *, struct dentry *);
extern bool path_is_under(const struct path *, const struct path *);
+u64 vfsmount_to_propagation_flags(struct vfsmount *mnt);
extern char *file_path(struct file *, char *, int);
@@ -2998,6 +2854,22 @@ static inline bool is_dot_dotdot(const char *name, size_t len)
(len == 1 || (len == 2 && name[1] == '.'));
}
+/**
+ * name_contains_dotdot - check if a file name contains ".." path components
+ * @name: File path string to check
+ * Search for ".." surrounded by either '/' or start/end of string.
+ */
+static inline bool name_contains_dotdot(const char *name)
+{
+ size_t name_len;
+
+ name_len = strlen(name);
+ return strcmp(name, "..") == 0 ||
+ strncmp(name, "../", 3) == 0 ||
+ strstr(name, "/../") != NULL ||
+ (name_len >= 3 && strcmp(name + name_len - 3, "/..") == 0);
+}
+
#include <linux/err.h>
/* needed for stackable file system support */
@@ -3005,14 +2877,19 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int whence);
extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence);
-extern int inode_init_always(struct super_block *, struct inode *);
+extern int inode_init_always_gfp(struct super_block *, struct inode *, gfp_t);
+static inline int inode_init_always(struct super_block *sb, struct inode *inode)
+{
+ return inode_init_always_gfp(sb, inode, GFP_NOFS);
+}
+
extern void inode_init_once(struct inode *);
extern void address_space_init_once(struct address_space *mapping);
extern struct inode * igrab(struct inode *);
extern ino_t iunique(struct super_block *, ino_t);
extern int inode_needs_sync(struct inode *inode);
-extern int generic_delete_inode(struct inode *inode);
-static inline int generic_drop_inode(struct inode *inode)
+extern int inode_just_drop(struct inode *inode);
+static inline int inode_generic_drop(struct inode *inode)
{
return !inode->i_nlink || inode_unhashed(inode);
}
@@ -3020,7 +2897,7 @@ extern void d_mark_dontcache(struct inode *inode);
extern struct inode *ilookup5_nowait(struct super_block *sb,
unsigned long hashval, int (*test)(struct inode *, void *),
- void *data);
+ void *data, bool *isnew);
extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data);
extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
@@ -3029,7 +2906,12 @@ extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
int (*test)(struct inode *, void *),
int (*set)(struct inode *, void *),
void *data);
-extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
+struct inode *iget5_locked(struct super_block *, unsigned long,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *);
+struct inode *iget5_locked_rcu(struct super_block *, unsigned long,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *);
extern struct inode * iget_locked(struct super_block *, unsigned long);
extern struct inode *find_inode_nowait(struct super_block *,
unsigned long,
@@ -3067,15 +2949,23 @@ static inline bool is_zero_ino(ino_t ino)
return (u32)ino == 0;
}
-extern void __iget(struct inode * inode);
+static inline void __iget(struct inode *inode)
+{
+ lockdep_assert_held(&inode->i_lock);
+ atomic_inc(&inode->i_count);
+}
+
extern void iget_failed(struct inode *);
extern void clear_inode(struct inode *);
extern void __destroy_inode(struct inode *);
-extern struct inode *new_inode_pseudo(struct super_block *sb);
+struct inode *alloc_inode(struct super_block *sb);
+static inline struct inode *new_inode_pseudo(struct super_block *sb)
+{
+ return alloc_inode(sb);
+}
extern struct inode *new_inode(struct super_block *sb);
extern void free_inode_nonrcu(struct inode *inode);
extern int setattr_should_drop_suidgid(struct mnt_idmap *, struct inode *);
-extern int file_remove_privs_flags(struct file *file, unsigned int flags);
extern int file_remove_privs(struct file *);
int setattr_should_drop_sgid(struct mnt_idmap *idmap,
const struct inode *inode);
@@ -3100,13 +2990,12 @@ static inline void remove_inode_hash(struct inode *inode)
}
extern void inode_sb_list_add(struct inode *inode);
-extern void inode_add_lru(struct inode *inode);
+extern void inode_lru_list_add(struct inode *inode);
-extern int sb_set_blocksize(struct super_block *, int);
-extern int sb_min_blocksize(struct super_block *, int);
-
-extern int generic_file_mmap(struct file *, struct vm_area_struct *);
-extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
+int generic_file_mmap(struct file *, struct vm_area_struct *);
+int generic_file_mmap_prepare(struct vm_area_desc *desc);
+int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
+int generic_file_readonly_mmap_prepare(struct vm_area_desc *desc);
extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
int generic_write_checks_count(struct kiocb *iocb, loff_t *count);
extern int generic_write_check_limits(struct file *file, loff_t pos,
@@ -3145,11 +3034,12 @@ extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern loff_t noop_llseek(struct file *file, loff_t offset, int whence);
-#define no_llseek NULL
extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize);
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
int whence, loff_t maxsize, loff_t eof);
+loff_t generic_llseek_cookie(struct file *file, loff_t offset, int whence,
+ u64 *cookie);
extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
int whence, loff_t size);
extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
@@ -3187,7 +3077,9 @@ static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
}
#endif
+bool inode_dio_finished(const struct inode *inode);
void inode_dio_wait(struct inode *inode);
+void inode_dio_wait_interruptible(struct inode *inode);
/**
* inode_dio_begin - signal start of a direct I/O requests
@@ -3211,7 +3103,7 @@ static inline void inode_dio_begin(struct inode *inode)
static inline void inode_dio_end(struct inode *inode)
{
if (atomic_dec_and_test(&inode->i_dio_count))
- wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
+ wake_up_var(&inode->i_dio_count);
}
extern void inode_set_flags(struct inode *inode, unsigned int flags,
@@ -3221,16 +3113,23 @@ extern const struct file_operations generic_ro_fops;
#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
-extern int readlink_copy(char __user *, int, const char *);
+extern int readlink_copy(char __user *, int, const char *, int);
extern int page_readlink(struct dentry *, char __user *, int);
+extern const char *page_get_link_raw(struct dentry *, struct inode *,
+ struct delayed_call *);
extern const char *page_get_link(struct dentry *, struct inode *,
struct delayed_call *);
extern void page_put_link(void *);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
+void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode);
void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *);
void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
+void generic_fill_statx_atomic_writes(struct kstat *stat,
+ unsigned int unit_min,
+ unsigned int unit_max,
+ unsigned int unit_max_opt);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
@@ -3270,9 +3169,13 @@ extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
extern void drop_super(struct super_block *sb);
extern void drop_super_exclusive(struct super_block *sb);
-extern void iterate_supers(void (*)(struct super_block *, void *), void *);
+extern void iterate_supers(void (*f)(struct super_block *, void *), void *arg);
extern void iterate_supers_type(struct file_system_type *,
void (*)(struct super_block *, void *), void *);
+void filesystems_freeze(bool freeze_all);
+void filesystems_thaw(void);
+
+void end_dirop(struct dentry *de);
extern int dcache_dir_open(struct inode *, struct file *);
extern int dcache_dir_close(struct inode *, struct file *);
@@ -3287,6 +3190,8 @@ extern int simple_open(struct inode *inode, struct file *file);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
+extern void __simple_unlink(struct inode *, struct dentry *);
+extern void __simple_rmdir(struct inode *, struct dentry *);
void simple_rename_timestamp(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
extern int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
@@ -3296,17 +3201,23 @@ extern int simple_rename(struct mnt_idmap *, struct inode *,
unsigned int);
extern void simple_recursive_removal(struct dentry *,
void (*callback)(struct dentry *));
+extern void simple_remove_by_name(struct dentry *, const char *,
+ void (*callback)(struct dentry *));
+extern void locked_recursive_removal(struct dentry *,
+ void (*callback)(struct dentry *));
extern int noop_fsync(struct file *, loff_t, loff_t, int);
extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
extern int simple_empty(struct dentry *);
-extern int simple_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct page **pagep, void **fsdata);
+extern int simple_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata);
extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
+struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
+ const struct inode *context_inode);
extern int simple_nosetlease(struct file *, int, struct file_lease **, void **);
-extern const struct dentry_operations simple_dentry_operations;
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
@@ -3320,6 +3231,8 @@ extern int simple_fill_super(struct super_block *, unsigned long,
const struct tree_descr *);
extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
extern void simple_release_fs(struct vfsmount **mount, int *count);
+struct dentry *simple_start_creating(struct dentry *, const char *);
+void simple_done_creating(struct dentry *);
extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
loff_t *ppos, const void *from, size_t available);
@@ -3334,7 +3247,6 @@ struct offset_ctx {
void simple_offset_init(struct offset_ctx *octx);
int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry);
void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry);
-int simple_offset_empty(struct dentry *dentry);
int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
int simple_offset_rename_exchange(struct inode *old_dir,
@@ -3351,15 +3263,60 @@ extern int generic_file_fsync(struct file *, loff_t, loff_t, int);
extern int generic_check_addressable(unsigned, u64);
extern void generic_set_sb_d_ops(struct super_block *sb);
+extern int generic_ci_match(const struct inode *parent,
+ const struct qstr *name,
+ const struct qstr *folded_name,
+ const u8 *de_name, u32 de_name_len);
-static inline bool sb_has_encoding(const struct super_block *sb)
-{
#if IS_ENABLED(CONFIG_UNICODE)
- return !!sb->s_encoding;
+int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str);
+int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name);
+
+/**
+ * generic_ci_validate_strict_name - Check if a given name is suitable
+ * for a directory
+ *
+ * This functions checks if the proposed filename is valid for the
+ * parent directory. That means that only valid UTF-8 filenames will be
+ * accepted for casefold directories from filesystems created with the
+ * strict encoding flag. That also means that any name will be
+ * accepted for directories that doesn't have casefold enabled, or
+ * aren't being strict with the encoding.
+ *
+ * @dir: inode of the directory where the new file will be created
+ * @name: name of the new file
+ *
+ * Return:
+ * * True: if the filename is suitable for this directory. It can be
+ * true if a given name is not suitable for a strict encoding
+ * directory, but the directory being used isn't strict
+ * * False if the filename isn't suitable for this directory. This only
+ * happens when a directory is casefolded and the filesystem is strict
+ * about its encoding.
+ */
+static inline bool generic_ci_validate_strict_name(struct inode *dir,
+ const struct qstr *name)
+{
+ if (!IS_CASEFOLDED(dir) || !sb_has_strict_encoding(dir->i_sb))
+ return true;
+
+ /*
+ * A casefold dir must have a encoding set, unless the filesystem
+ * is corrupted
+ */
+ if (WARN_ON_ONCE(!dir->i_sb->s_encoding))
+ return true;
+
+ return !utf8_validate(dir->i_sb->s_encoding, name);
+}
#else
- return false;
-#endif
+static inline bool generic_ci_validate_strict_name(struct inode *dir,
+ const struct qstr *name)
+{
+ return true;
}
+#endif
int may_setattr(struct mnt_idmap *idmap, struct inode *inode,
unsigned int ia_valid);
@@ -3370,9 +3327,14 @@ void setattr_copy(struct mnt_idmap *, struct inode *inode,
extern int file_update_time(struct file *file);
+static inline bool file_is_dax(const struct file *file)
+{
+ return file && IS_DAX(file->f_mapping->host);
+}
+
static inline bool vma_is_dax(const struct vm_area_struct *vma)
{
- return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
+ return file_is_dax(vma->vm_file);
}
static inline bool vma_is_fsdax(struct vm_area_struct *vma)
@@ -3403,7 +3365,8 @@ static inline int iocb_flags(struct file *file)
return res;
}
-static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
+static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags,
+ int rw_type)
{
int kiocb_flags = 0;
@@ -3420,7 +3383,20 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
if (flags & RWF_NOWAIT) {
if (!(ki->ki_filp->f_mode & FMODE_NOWAIT))
return -EOPNOTSUPP;
- kiocb_flags |= IOCB_NOIO;
+ }
+ if (flags & RWF_ATOMIC) {
+ if (rw_type != WRITE)
+ return -EOPNOTSUPP;
+ if (!(ki->ki_filp->f_mode & FMODE_CAN_ATOMIC_WRITE))
+ return -EOPNOTSUPP;
+ }
+ if (flags & RWF_DONTCACHE) {
+ /* file system must support it */
+ if (!(ki->ki_filp->f_op->fop_flags & FOP_DONTCACHE))
+ return -EOPNOTSUPP;
+ /* DAX mappings not supported */
+ if (IS_DAX(ki->ki_filp->f_mapping->host))
+ return -EOPNOTSUPP;
}
kiocb_flags |= (__force int) (flags & RWF_SUPPORTED);
if (flags & RWF_SYNC)
@@ -3436,20 +3412,6 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
return 0;
}
-static inline ino_t parent_ino(struct dentry *dentry)
-{
- ino_t res;
-
- /*
- * Don't strictly need d_lock here? If the parent ino could change
- * then surely we'd have a deeper race in the caller?
- */
- spin_lock(&dentry->d_lock);
- res = dentry->d_parent->d_inode->i_ino;
- spin_unlock(&dentry->d_lock);
- return res;
-}
-
/* Transaction based IO helpers */
/*
@@ -3529,11 +3491,9 @@ struct ctl_table;
int __init list_bdev_fs_names(char *buf, size_t size);
#define __FMODE_EXEC ((__force int) FMODE_EXEC)
-#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
-#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
- (flag & __FMODE_NONOTIFY)))
+#define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE))
static inline bool is_sxid(umode_t mode)
{
@@ -3574,7 +3534,7 @@ static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, "..", 2, ctx->pos,
- parent_ino(file->f_path.dentry), DT_DIR);
+ d_parent_ino(file->f_path.dentry), DT_DIR);
}
static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
{
@@ -3613,4 +3573,37 @@ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
int advice);
+static inline bool vfs_empty_path(int dfd, const char __user *path)
+{
+ char c;
+
+ if (dfd < 0)
+ return false;
+
+ /* We now allow NULL to be used for empty path. */
+ if (!path)
+ return true;
+
+ if (unlikely(get_user(c, path)))
+ return false;
+
+ return !c;
+}
+
+int generic_atomic_write_valid(struct kiocb *iocb, struct iov_iter *iter);
+
+static inline bool extensible_ioctl_valid(unsigned int cmd_a,
+ unsigned int cmd_b, size_t min_size)
+{
+ if (_IOC_DIR(cmd_a) != _IOC_DIR(cmd_b))
+ return false;
+ if (_IOC_TYPE(cmd_a) != _IOC_TYPE(cmd_b))
+ return false;
+ if (_IOC_NR(cmd_a) != _IOC_NR(cmd_b))
+ return false;
+ if (_IOC_SIZE(cmd_a) < min_size)
+ return false;
+ return true;
+}
+
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fs/super.h b/include/linux/fs/super.h
new file mode 100644
index 000000000000..f21ffbb6dea5
--- /dev/null
+++ b/include/linux/fs/super.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FS_SUPER_H
+#define _LINUX_FS_SUPER_H
+
+#include <linux/fs/super_types.h>
+#include <linux/unicode.h>
+
+/*
+ * These are internal functions, please use sb_start_{write,pagefault,intwrite}
+ * instead.
+ */
+static inline void __sb_end_write(struct super_block *sb, int level)
+{
+ percpu_up_read(sb->s_writers.rw_sem + level - 1);
+}
+
+static inline void __sb_start_write(struct super_block *sb, int level)
+{
+ percpu_down_read_freezable(sb->s_writers.rw_sem + level - 1, true);
+}
+
+static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
+{
+ return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1);
+}
+
+#define __sb_writers_acquired(sb, lev) \
+ percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev) - 1], 1, _THIS_IP_)
+#define __sb_writers_release(sb, lev) \
+ percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev) - 1], _THIS_IP_)
+
+/**
+ * __sb_write_started - check if sb freeze level is held
+ * @sb: the super we write to
+ * @level: the freeze level
+ *
+ * * > 0 - sb freeze level is held
+ * * 0 - sb freeze level is not held
+ * * < 0 - !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN
+ */
+static inline int __sb_write_started(const struct super_block *sb, int level)
+{
+ return lockdep_is_held_type(sb->s_writers.rw_sem + level - 1, 1);
+}
+
+/**
+ * sb_write_started - check if SB_FREEZE_WRITE is held
+ * @sb: the super we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ */
+static inline bool sb_write_started(const struct super_block *sb)
+{
+ return __sb_write_started(sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * sb_write_not_started - check if SB_FREEZE_WRITE is not held
+ * @sb: the super we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ */
+static inline bool sb_write_not_started(const struct super_block *sb)
+{
+ return __sb_write_started(sb, SB_FREEZE_WRITE) <= 0;
+}
+
+/**
+ * sb_end_write - drop write access to a superblock
+ * @sb: the super we wrote to
+ *
+ * Decrement number of writers to the filesystem. Wake up possible waiters
+ * wanting to freeze the filesystem.
+ */
+static inline void sb_end_write(struct super_block *sb)
+{
+ __sb_end_write(sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * sb_end_pagefault - drop write access to a superblock from a page fault
+ * @sb: the super we wrote to
+ *
+ * Decrement number of processes handling write page fault to the filesystem.
+ * Wake up possible waiters wanting to freeze the filesystem.
+ */
+static inline void sb_end_pagefault(struct super_block *sb)
+{
+ __sb_end_write(sb, SB_FREEZE_PAGEFAULT);
+}
+
+/**
+ * sb_end_intwrite - drop write access to a superblock for internal fs purposes
+ * @sb: the super we wrote to
+ *
+ * Decrement fs-internal number of writers to the filesystem. Wake up possible
+ * waiters wanting to freeze the filesystem.
+ */
+static inline void sb_end_intwrite(struct super_block *sb)
+{
+ __sb_end_write(sb, SB_FREEZE_FS);
+}
+
+/**
+ * sb_start_write - get write access to a superblock
+ * @sb: the super we write to
+ *
+ * When a process wants to write data or metadata to a file system (i.e. dirty
+ * a page or an inode), it should embed the operation in a sb_start_write() -
+ * sb_end_write() pair to get exclusion against file system freezing. This
+ * function increments number of writers preventing freezing. If the file
+ * system is already frozen, the function waits until the file system is
+ * thawed.
+ *
+ * Since freeze protection behaves as a lock, users have to preserve
+ * ordering of freeze protection and other filesystem locks. Generally,
+ * freeze protection should be the outermost lock. In particular, we have:
+ *
+ * sb_start_write
+ * -> i_rwsem (write path, truncate, directory ops, ...)
+ * -> s_umount (freeze_super, thaw_super)
+ */
+static inline void sb_start_write(struct super_block *sb)
+{
+ __sb_start_write(sb, SB_FREEZE_WRITE);
+}
+
+DEFINE_GUARD(super_write,
+ struct super_block *,
+ sb_start_write(_T),
+ sb_end_write(_T))
+
+static inline bool sb_start_write_trylock(struct super_block *sb)
+{
+ return __sb_start_write_trylock(sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * sb_start_pagefault - get write access to a superblock from a page fault
+ * @sb: the super we write to
+ *
+ * When a process starts handling write page fault, it should embed the
+ * operation into sb_start_pagefault() - sb_end_pagefault() pair to get
+ * exclusion against file system freezing. This is needed since the page fault
+ * is going to dirty a page. This function increments number of running page
+ * faults preventing freezing. If the file system is already frozen, the
+ * function waits until the file system is thawed.
+ *
+ * Since page fault freeze protection behaves as a lock, users have to preserve
+ * ordering of freeze protection and other filesystem locks. It is advised to
+ * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault
+ * handling code implies lock dependency:
+ *
+ * mmap_lock
+ * -> sb_start_pagefault
+ */
+static inline void sb_start_pagefault(struct super_block *sb)
+{
+ __sb_start_write(sb, SB_FREEZE_PAGEFAULT);
+}
+
+/**
+ * sb_start_intwrite - get write access to a superblock for internal fs purposes
+ * @sb: the super we write to
+ *
+ * This is the third level of protection against filesystem freezing. It is
+ * free for use by a filesystem. The only requirement is that it must rank
+ * below sb_start_pagefault.
+ *
+ * For example filesystem can call sb_start_intwrite() when starting a
+ * transaction which somewhat eases handling of freezing for internal sources
+ * of filesystem changes (internal fs threads, discarding preallocation on file
+ * close, etc.).
+ */
+static inline void sb_start_intwrite(struct super_block *sb)
+{
+ __sb_start_write(sb, SB_FREEZE_FS);
+}
+
+static inline bool sb_start_intwrite_trylock(struct super_block *sb)
+{
+ return __sb_start_write_trylock(sb, SB_FREEZE_FS);
+}
+
+static inline bool sb_rdonly(const struct super_block *sb)
+{
+ return sb->s_flags & SB_RDONLY;
+}
+
+static inline bool sb_is_blkdev_sb(struct super_block *sb)
+{
+ return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock;
+}
+
+#if IS_ENABLED(CONFIG_UNICODE)
+static inline struct unicode_map *sb_encoding(const struct super_block *sb)
+{
+ return sb->s_encoding;
+}
+
+/* Compare if two super blocks have the same encoding and flags */
+static inline bool sb_same_encoding(const struct super_block *sb1,
+ const struct super_block *sb2)
+{
+ if (sb1->s_encoding == sb2->s_encoding)
+ return true;
+
+ return (sb1->s_encoding && sb2->s_encoding &&
+ (sb1->s_encoding->version == sb2->s_encoding->version) &&
+ (sb1->s_encoding_flags == sb2->s_encoding_flags));
+}
+#else
+static inline struct unicode_map *sb_encoding(const struct super_block *sb)
+{
+ return NULL;
+}
+
+static inline bool sb_same_encoding(const struct super_block *sb1,
+ const struct super_block *sb2)
+{
+ return true;
+}
+#endif
+
+static inline bool sb_has_encoding(const struct super_block *sb)
+{
+ return !!sb_encoding(sb);
+}
+
+int sb_set_blocksize(struct super_block *sb, int size);
+int __must_check sb_min_blocksize(struct super_block *sb, int size);
+
+int freeze_super(struct super_block *super, enum freeze_holder who,
+ const void *freeze_owner);
+int thaw_super(struct super_block *super, enum freeze_holder who,
+ const void *freeze_owner);
+
+#endif /* _LINUX_FS_SUPER_H */
diff --git a/include/linux/fs/super_types.h b/include/linux/fs/super_types.h
new file mode 100644
index 000000000000..6bd3009e09b3
--- /dev/null
+++ b/include/linux/fs/super_types.h
@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FS_SUPER_TYPES_H
+#define _LINUX_FS_SUPER_TYPES_H
+
+#include <linux/fs_dirent.h>
+#include <linux/errseq.h>
+#include <linux/list_lru.h>
+#include <linux/list.h>
+#include <linux/list_bl.h>
+#include <linux/llist.h>
+#include <linux/uidgid.h>
+#include <linux/uuid.h>
+#include <linux/percpu-rwsem.h>
+#include <linux/workqueue_types.h>
+#include <linux/quota.h>
+
+struct backing_dev_info;
+struct block_device;
+struct dentry;
+struct dentry_operations;
+struct dquot_operations;
+struct export_operations;
+struct file;
+struct file_system_type;
+struct fscrypt_operations;
+struct fsnotify_sb_info;
+struct fsverity_operations;
+struct kstatfs;
+struct mount;
+struct mtd_info;
+struct quotactl_ops;
+struct shrinker;
+struct unicode_map;
+struct user_namespace;
+struct workqueue_struct;
+struct writeback_control;
+struct xattr_handler;
+
+extern struct super_block *blockdev_superblock;
+
+/* Possible states of 'frozen' field */
+enum {
+ SB_UNFROZEN = 0, /* FS is unfrozen */
+ SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */
+ SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */
+ SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop internal threads if needed) */
+ SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */
+};
+
+#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
+
+struct sb_writers {
+ unsigned short frozen; /* Is sb frozen? */
+ int freeze_kcount; /* How many kernel freeze requests? */
+ int freeze_ucount; /* How many userspace freeze requests? */
+ const void *freeze_owner; /* Owner of the freeze */
+ struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
+};
+
+/**
+ * enum freeze_holder - holder of the freeze
+ * @FREEZE_HOLDER_KERNEL: kernel wants to freeze or thaw filesystem
+ * @FREEZE_HOLDER_USERSPACE: userspace wants to freeze or thaw filesystem
+ * @FREEZE_MAY_NEST: whether nesting freeze and thaw requests is allowed
+ * @FREEZE_EXCL: a freeze that can only be undone by the owner
+ *
+ * Indicate who the owner of the freeze or thaw request is and whether
+ * the freeze needs to be exclusive or can nest.
+ * Without @FREEZE_MAY_NEST, multiple freeze and thaw requests from the
+ * same holder aren't allowed. It is however allowed to hold a single
+ * @FREEZE_HOLDER_USERSPACE and a single @FREEZE_HOLDER_KERNEL freeze at
+ * the same time. This is relied upon by some filesystems during online
+ * repair or similar.
+ */
+enum freeze_holder {
+ FREEZE_HOLDER_KERNEL = (1U << 0),
+ FREEZE_HOLDER_USERSPACE = (1U << 1),
+ FREEZE_MAY_NEST = (1U << 2),
+ FREEZE_EXCL = (1U << 3),
+};
+
+struct super_operations {
+ struct inode *(*alloc_inode)(struct super_block *sb);
+ void (*destroy_inode)(struct inode *inode);
+ void (*free_inode)(struct inode *inode);
+ void (*dirty_inode)(struct inode *inode, int flags);
+ int (*write_inode)(struct inode *inode, struct writeback_control *wbc);
+ int (*drop_inode)(struct inode *inode);
+ void (*evict_inode)(struct inode *inode);
+ void (*put_super)(struct super_block *sb);
+ int (*sync_fs)(struct super_block *sb, int wait);
+ int (*freeze_super)(struct super_block *sb, enum freeze_holder who,
+ const void *owner);
+ int (*freeze_fs)(struct super_block *sb);
+ int (*thaw_super)(struct super_block *sb, enum freeze_holder who,
+ const void *owner);
+ int (*unfreeze_fs)(struct super_block *sb);
+ int (*statfs)(struct dentry *dentry, struct kstatfs *kstatfs);
+ int (*remount_fs) (struct super_block *, int *, char *);
+ void (*umount_begin)(struct super_block *sb);
+
+ int (*show_options)(struct seq_file *seq, struct dentry *dentry);
+ int (*show_devname)(struct seq_file *seq, struct dentry *dentry);
+ int (*show_path)(struct seq_file *seq, struct dentry *dentry);
+ int (*show_stats)(struct seq_file *seq, struct dentry *dentry);
+#ifdef CONFIG_QUOTA
+ ssize_t (*quota_read)(struct super_block *sb, int type, char *data,
+ size_t len, loff_t off);
+ ssize_t (*quota_write)(struct super_block *sb, int type,
+ const char *data, size_t len, loff_t off);
+ struct dquot __rcu **(*get_dquots)(struct inode *inode);
+#endif
+ long (*nr_cached_objects)(struct super_block *sb,
+ struct shrink_control *sc);
+ long (*free_cached_objects)(struct super_block *sb,
+ struct shrink_control *sc);
+ /*
+ * If a filesystem can support graceful removal of a device and
+ * continue read-write operations, implement this callback.
+ *
+ * Return 0 if the filesystem can continue read-write.
+ * Non-zero return value or no such callback means the fs will be shutdown
+ * as usual.
+ */
+ int (*remove_bdev)(struct super_block *sb, struct block_device *bdev);
+ void (*shutdown)(struct super_block *sb);
+};
+
+struct super_block {
+ struct list_head s_list; /* Keep this first */
+ dev_t s_dev; /* search index; _not_ kdev_t */
+ unsigned char s_blocksize_bits;
+ unsigned long s_blocksize;
+ loff_t s_maxbytes; /* Max file size */
+ struct file_system_type *s_type;
+ const struct super_operations *s_op;
+ const struct dquot_operations *dq_op;
+ const struct quotactl_ops *s_qcop;
+ const struct export_operations *s_export_op;
+ unsigned long s_flags;
+ unsigned long s_iflags; /* internal SB_I_* flags */
+ unsigned long s_magic;
+ struct dentry *s_root;
+ struct rw_semaphore s_umount;
+ int s_count;
+ atomic_t s_active;
+#ifdef CONFIG_SECURITY
+ void *s_security;
+#endif
+ const struct xattr_handler *const *s_xattr;
+#ifdef CONFIG_FS_ENCRYPTION
+ const struct fscrypt_operations *s_cop;
+ struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */
+#endif
+#ifdef CONFIG_FS_VERITY
+ const struct fsverity_operations *s_vop;
+#endif
+#if IS_ENABLED(CONFIG_UNICODE)
+ struct unicode_map *s_encoding;
+ __u16 s_encoding_flags;
+#endif
+ struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
+ struct mount *s_mounts; /* list of mounts; _not_ for fs use */
+ struct block_device *s_bdev; /* can go away once we use an accessor for @s_bdev_file */
+ struct file *s_bdev_file;
+ struct backing_dev_info *s_bdi;
+ struct mtd_info *s_mtd;
+ struct hlist_node s_instances;
+ unsigned int s_quota_types; /* Bitmask of supported quota types */
+ struct quota_info s_dquot; /* Diskquota specific options */
+
+ struct sb_writers s_writers;
+
+ /*
+ * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and
+ * s_fsnotify_info together for cache efficiency. They are frequently
+ * accessed and rarely modified.
+ */
+ void *s_fs_info; /* Filesystem private info */
+
+ /* Granularity of c/m/atime in ns (cannot be worse than a second) */
+ u32 s_time_gran;
+ /* Time limits for c/m/atime in seconds */
+ time64_t s_time_min;
+ time64_t s_time_max;
+#ifdef CONFIG_FSNOTIFY
+ u32 s_fsnotify_mask;
+ struct fsnotify_sb_info *s_fsnotify_info;
+#endif
+
+ /*
+ * q: why are s_id and s_sysfs_name not the same? both are human
+ * readable strings that identify the filesystem
+ * a: s_id is allowed to change at runtime; it's used in log messages,
+ * and we want to when a device starts out as single device (s_id is dev
+ * name) but then a device is hot added and we have to switch to
+ * identifying it by UUID
+ * but s_sysfs_name is a handle for programmatic access, and can't
+ * change at runtime
+ */
+ char s_id[32]; /* Informational name */
+ uuid_t s_uuid; /* UUID */
+ u8 s_uuid_len; /* Default 16, possibly smaller for weird filesystems */
+
+ /* if set, fs shows up under sysfs at /sys/fs/$FSTYP/s_sysfs_name */
+ char s_sysfs_name[UUID_STRING_LEN + 1];
+
+ unsigned int s_max_links;
+ unsigned int s_d_flags; /* default d_flags for dentries */
+
+ /*
+ * The next field is for VFS *only*. No filesystems have any business
+ * even looking at it. You had been warned.
+ */
+ struct mutex s_vfs_rename_mutex; /* Kludge */
+
+ /*
+ * Filesystem subtype. If non-empty the filesystem type field
+ * in /proc/mounts will be "type.subtype"
+ */
+ const char *s_subtype;
+
+ const struct dentry_operations *__s_d_op; /* default d_op for dentries */
+
+ struct shrinker *s_shrink; /* per-sb shrinker handle */
+
+ /* Number of inodes with nlink == 0 but still referenced */
+ atomic_long_t s_remove_count;
+
+ /* Read-only state of the superblock is being changed */
+ int s_readonly_remount;
+
+ /* per-sb errseq_t for reporting writeback errors via syncfs */
+ errseq_t s_wb_err;
+
+ /* AIO completions deferred from interrupt context */
+ struct workqueue_struct *s_dio_done_wq;
+ struct hlist_head s_pins;
+
+ /*
+ * Owning user namespace and default context in which to
+ * interpret filesystem uids, gids, quotas, device nodes,
+ * xattrs and security labels.
+ */
+ struct user_namespace *s_user_ns;
+
+ /*
+ * The list_lru structure is essentially just a pointer to a table
+ * of per-node lru lists, each of which has its own spinlock.
+ * There is no need to put them into separate cachelines.
+ */
+ struct list_lru s_dentry_lru;
+ struct list_lru s_inode_lru;
+ struct rcu_head rcu;
+ struct work_struct destroy_work;
+
+ struct mutex s_sync_lock; /* sync serialisation lock */
+
+ /*
+ * Indicates how deep in a filesystem stack this SB is
+ */
+ int s_stack_depth;
+
+ /* s_inode_list_lock protects s_inodes */
+ spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp;
+ struct list_head s_inodes; /* all inodes */
+
+ spinlock_t s_inode_wblist_lock;
+ struct list_head s_inodes_wb; /* writeback inodes */
+ long s_min_writeback_pages;
+} __randomize_layout;
+
+/*
+ * sb->s_flags. Note that these mirror the equivalent MS_* flags where
+ * represented in both.
+ */
+#define SB_RDONLY BIT(0) /* Mount read-only */
+#define SB_NOSUID BIT(1) /* Ignore suid and sgid bits */
+#define SB_NODEV BIT(2) /* Disallow access to device special files */
+#define SB_NOEXEC BIT(3) /* Disallow program execution */
+#define SB_SYNCHRONOUS BIT(4) /* Writes are synced at once */
+#define SB_MANDLOCK BIT(6) /* Allow mandatory locks on an FS */
+#define SB_DIRSYNC BIT(7) /* Directory modifications are synchronous */
+#define SB_NOATIME BIT(10) /* Do not update access times. */
+#define SB_NODIRATIME BIT(11) /* Do not update directory access times */
+#define SB_SILENT BIT(15)
+#define SB_POSIXACL BIT(16) /* Supports POSIX ACLs */
+#define SB_INLINECRYPT BIT(17) /* Use blk-crypto for encrypted files */
+#define SB_KERNMOUNT BIT(22) /* this is a kern_mount call */
+#define SB_I_VERSION BIT(23) /* Update inode I_version field */
+#define SB_LAZYTIME BIT(25) /* Update the on-disk [acm]times lazily */
+
+/* These sb flags are internal to the kernel */
+#define SB_DEAD BIT(21)
+#define SB_DYING BIT(24)
+#define SB_FORCE BIT(27)
+#define SB_NOSEC BIT(28)
+#define SB_BORN BIT(29)
+#define SB_ACTIVE BIT(30)
+#define SB_NOUSER BIT(31)
+
+/* These flags relate to encoding and casefolding */
+#define SB_ENC_STRICT_MODE_FL (1 << 0)
+#define SB_ENC_NO_COMPAT_FALLBACK_FL (1 << 1)
+
+#define sb_has_strict_encoding(sb) \
+ (sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL)
+
+#if IS_ENABLED(CONFIG_UNICODE)
+#define sb_no_casefold_compat_fallback(sb) \
+ (sb->s_encoding_flags & SB_ENC_NO_COMPAT_FALLBACK_FL)
+#else
+#define sb_no_casefold_compat_fallback(sb) (1)
+#endif
+
+/* sb->s_iflags */
+#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
+#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
+#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
+#define SB_I_STABLE_WRITES 0x00000008 /* don't modify blks until WB is done */
+
+/* sb->s_iflags to limit user namespace mounts */
+#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
+#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
+#define SB_I_UNTRUSTED_MOUNTER 0x00000040
+#define SB_I_EVM_HMAC_UNSUPPORTED 0x00000080
+
+#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
+#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
+#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
+#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
+#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */
+#define SB_I_NOIDMAP 0x00002000 /* No idmapped mounts on this superblock */
+#define SB_I_ALLOW_HSM 0x00004000 /* Allow HSM events on this superblock */
+
+#endif /* _LINUX_FS_SUPER_TYPES_H */
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index c13e99cbbf81..0d6c8a6d7be2 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -134,8 +134,13 @@ extern struct fs_context *fs_context_for_submount(struct file_system_type *fs_ty
extern struct fs_context *vfs_dup_fs_context(struct fs_context *fc);
extern int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param);
-extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
- const char *value, size_t v_size);
+extern int vfs_parse_fs_qstr(struct fs_context *fc, const char *key,
+ const struct qstr *value);
+static inline int vfs_parse_fs_string(struct fs_context *fc, const char *key,
+ const char *value)
+{
+ return vfs_parse_fs_qstr(fc, key, value ? &QSTR(value) : NULL);
+}
int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
char *(*sep)(char **));
extern int generic_parse_monolithic(struct fs_context *fc, void *data);
@@ -144,8 +149,6 @@ extern void put_fs_context(struct fs_context *fc);
extern int vfs_parse_fs_param_source(struct fs_context *fc,
struct fs_parameter *param);
extern void fc_drop_locked(struct fs_context *fc);
-int reconfigure_single(struct super_block *s,
- int flags, void *data);
extern int get_tree_nodev(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
@@ -160,6 +163,12 @@ extern int get_tree_keyed(struct fs_context *fc,
int setup_bdev_super(struct super_block *sb, int sb_flags,
struct fs_context *fc);
+
+#define GET_TREE_BDEV_QUIET_LOOKUP 0x0001
+int get_tree_bdev_flags(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc), unsigned int flags);
+
extern int get_tree_bdev(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));
@@ -182,10 +191,12 @@ struct fc_log {
extern __attribute__((format(printf, 4, 5)))
void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt, ...);
-#define __logfc(fc, l, fmt, ...) logfc((fc)->log.log, NULL, \
- l, fmt, ## __VA_ARGS__)
-#define __plog(p, l, fmt, ...) logfc((p)->log, (p)->prefix, \
- l, fmt, ## __VA_ARGS__)
+#define __logfc(fc, l, fmt, ...) \
+ logfc((fc)->log.log, NULL, (l), (fmt), ## __VA_ARGS__)
+#define __plogp(p, prefix, l, fmt, ...) \
+ logfc((p)->log, (prefix), (l), (fmt), ## __VA_ARGS__)
+#define __plog(p, l, fmt, ...) __plogp(p, (p)->prefix, l, fmt, ## __VA_ARGS__)
+
/**
* infof - Store supplementary informational message
* @fc: The context in which to log the informational message
@@ -196,7 +207,9 @@ void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt,
*/
#define infof(fc, fmt, ...) __logfc(fc, 'i', fmt, ## __VA_ARGS__)
#define info_plog(p, fmt, ...) __plog(p, 'i', fmt, ## __VA_ARGS__)
-#define infofc(p, fmt, ...) __plog((&(fc)->log), 'i', fmt, ## __VA_ARGS__)
+#define infofc(fc, fmt, ...) __plog((&(fc)->log), 'i', fmt, ## __VA_ARGS__)
+#define infofcp(fc, prefix, fmt, ...) \
+ __plogp((&(fc)->log), prefix, 'i', fmt, ## __VA_ARGS__)
/**
* warnf - Store supplementary warning message
@@ -209,6 +222,8 @@ void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt,
#define warnf(fc, fmt, ...) __logfc(fc, 'w', fmt, ## __VA_ARGS__)
#define warn_plog(p, fmt, ...) __plog(p, 'w', fmt, ## __VA_ARGS__)
#define warnfc(fc, fmt, ...) __plog((&(fc)->log), 'w', fmt, ## __VA_ARGS__)
+#define warnfcp(fc, prefix, fmt, ...) \
+ __plogp((&(fc)->log), prefix, 'w', fmt, ## __VA_ARGS__)
/**
* errorf - Store supplementary error message
@@ -221,6 +236,8 @@ void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt,
#define errorf(fc, fmt, ...) __logfc(fc, 'e', fmt, ## __VA_ARGS__)
#define error_plog(p, fmt, ...) __plog(p, 'e', fmt, ## __VA_ARGS__)
#define errorfc(fc, fmt, ...) __plog((&(fc)->log), 'e', fmt, ## __VA_ARGS__)
+#define errorfcp(fc, prefix, fmt, ...) \
+ __plogp((&(fc)->log), prefix, 'e', fmt, ## __VA_ARGS__)
/**
* invalf - Store supplementary invalid argument error message
@@ -233,5 +250,7 @@ void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt,
#define invalf(fc, fmt, ...) (errorf(fc, fmt, ## __VA_ARGS__), -EINVAL)
#define inval_plog(p, fmt, ...) (error_plog(p, fmt, ## __VA_ARGS__), -EINVAL)
#define invalfc(fc, fmt, ...) (errorfc(fc, fmt, ## __VA_ARGS__), -EINVAL)
+#define invalfcp(fc, prefix, fmt, ...) \
+ (errorfcp(fc, prefix, fmt, ## __VA_ARGS__), -EINVAL)
#endif /* _LINUX_FS_CONTEXT_H */
diff --git a/include/linux/fs_types.h b/include/linux/fs_dirent.h
index 54816791196f..92f75c5bac19 100644
--- a/include/linux/fs_types.h
+++ b/include/linux/fs_dirent.h
@@ -1,6 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_FS_TYPES_H
-#define _LINUX_FS_TYPES_H
+#ifndef _LINUX_FS_DIRENT_H
+#define _LINUX_FS_DIRENT_H
+
+#include <linux/stat.h>
+#include <linux/types.h>
/*
* This is a header for the common implementation of dirent
@@ -66,10 +69,10 @@
/*
* declarations for helper functions, accompanying implementation
- * is in fs/fs_types.c
+ * is in fs/fs_dirent.c
*/
extern unsigned char fs_ftype_to_dtype(unsigned int filetype);
extern unsigned char fs_umode_to_ftype(umode_t mode);
extern unsigned char fs_umode_to_dtype(umode_t mode);
-#endif
+#endif /* _LINUX_FS_DIRENT_H */
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
index d3350979115f..5e8a3b546033 100644
--- a/include/linux/fs_parser.h
+++ b/include/linux/fs_parser.h
@@ -28,7 +28,8 @@ typedef int fs_param_type(struct p_log *,
*/
fs_param_type fs_param_is_bool, fs_param_is_u32, fs_param_is_s32, fs_param_is_u64,
fs_param_is_enum, fs_param_is_string, fs_param_is_blob, fs_param_is_blockdev,
- fs_param_is_path, fs_param_is_fd;
+ fs_param_is_path, fs_param_is_fd, fs_param_is_uid, fs_param_is_gid,
+ fs_param_is_file_or_string;
/*
* Specification of the type of value a parameter wants.
@@ -57,6 +58,8 @@ struct fs_parse_result {
int int_32; /* For spec_s32/spec_enum */
unsigned int uint_32; /* For spec_u32{,_octal,_hex}/spec_enum */
u64 uint_64; /* For spec_u64 */
+ kuid_t uid;
+ kgid_t gid;
};
};
@@ -81,15 +84,12 @@ extern int fs_lookup_param(struct fs_context *fc,
extern int lookup_constant(const struct constant_table tbl[], const char *name, int not_found);
+extern const struct constant_table bool_names[];
+
#ifdef CONFIG_VALIDATE_FS_PARSER
-extern bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
- int low, int high, int special);
extern bool fs_validate_description(const char *name,
const struct fs_parameter_spec *desc);
#else
-static inline bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
- int low, int high, int special)
-{ return true; }
static inline bool fs_validate_description(const char *name,
const struct fs_parameter_spec *desc)
{ return true; }
@@ -121,7 +121,7 @@ static inline bool fs_validate_description(const char *name,
#define fsparam_u32oct(NAME, OPT) \
__fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)8)
#define fsparam_u32hex(NAME, OPT) \
- __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *)16)
+ __fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)16)
#define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0, NULL)
#define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0, NULL)
#define fsparam_enum(NAME, OPT, array) __fsparam(fs_param_is_enum, NAME, OPT, 0, array)
@@ -131,6 +131,10 @@ static inline bool fs_validate_description(const char *name,
#define fsparam_bdev(NAME, OPT) __fsparam(fs_param_is_blockdev, NAME, OPT, 0, NULL)
#define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0, NULL)
#define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0, NULL)
+#define fsparam_file_or_string(NAME, OPT) \
+ __fsparam(fs_param_is_file_or_string, NAME, OPT, 0, NULL)
+#define fsparam_uid(NAME, OPT) __fsparam(fs_param_is_uid, NAME, OPT, 0, NULL)
+#define fsparam_gid(NAME, OPT) __fsparam(fs_param_is_gid, NAME, OPT, 0, NULL)
/* String parameter that allows empty argument */
#define fsparam_string_empty(NAME, OPT) \
diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h
index 2b1f74b24070..0cc2fa283305 100644
--- a/include/linux/fs_stack.h
+++ b/include/linux/fs_stack.h
@@ -3,7 +3,7 @@
#define _LINUX_FS_STACK_H
/* This file defines generic functions used primarily by stackable
- * filesystems; none of these functions require i_mutex to be held.
+ * filesystems; none of these functions require i_rwsem to be held.
*/
#include <linux/fs.h>
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index 783b48dedb72..0070764b790a 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -2,14 +2,14 @@
#ifndef _LINUX_FS_STRUCT_H
#define _LINUX_FS_STRUCT_H
+#include <linux/sched.h>
#include <linux/path.h>
#include <linux/spinlock.h>
#include <linux/seqlock.h>
struct fs_struct {
int users;
- spinlock_t lock;
- seqcount_spinlock_t seq;
+ seqlock_t seq;
int umask;
int in_exec;
struct path root, pwd;
@@ -26,20 +26,25 @@ extern int unshare_fs_struct(void);
static inline void get_fs_root(struct fs_struct *fs, struct path *root)
{
- spin_lock(&fs->lock);
+ read_seqlock_excl(&fs->seq);
*root = fs->root;
path_get(root);
- spin_unlock(&fs->lock);
+ read_sequnlock_excl(&fs->seq);
}
static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
{
- spin_lock(&fs->lock);
+ read_seqlock_excl(&fs->seq);
*pwd = fs->pwd;
path_get(pwd);
- spin_unlock(&fs->lock);
+ read_sequnlock_excl(&fs->seq);
}
extern bool current_chrooted(void);
+static inline int current_umask(void)
+{
+ return current->fs->umask;
+}
+
#endif /* _LINUX_FS_STRUCT_H */
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index bdf7f3eddf0a..4c91a019972b 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -19,6 +19,7 @@
enum fscache_cache_trace;
enum fscache_cookie_trace;
enum fscache_access_trace;
+enum fscache_volume_trace;
enum fscache_cache_state {
FSCACHE_CACHE_IS_NOT_PRESENT, /* No cache is present for this name */
@@ -97,6 +98,11 @@ extern void fscache_withdraw_cookie(struct fscache_cookie *cookie);
extern void fscache_io_error(struct fscache_cache *cache);
+extern struct fscache_volume *
+fscache_try_get_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where);
+extern void fscache_put_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where);
extern void fscache_end_volume_access(struct fscache_volume *volume,
struct fscache_cookie *cookie,
enum fscache_access_trace why);
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 9de27643607f..58fdb9605425 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -498,9 +498,6 @@ static inline void fscache_end_operation(struct netfs_cache_resources *cres)
*
* NETFS_READ_HOLE_IGNORE - Just try to read (may return a short read).
*
- * NETFS_READ_HOLE_CLEAR - Seek for data, clearing the part of the buffer
- * skipped over, then do as for IGNORE.
- *
* NETFS_READ_HOLE_FAIL - Give ENODATA if we encounter a hole.
*/
static inline
@@ -628,7 +625,7 @@ static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
term_func, term_func_priv,
using_pgpriv2, caching);
else if (term_func)
- term_func(term_func_priv, -ENOBUFS, false);
+ term_func(term_func_priv, -ENOBUFS);
}
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 772f822dc6b8..516aba5b858b 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -61,6 +61,12 @@ struct fscrypt_name {
/* Crypto operations for filesystems */
struct fscrypt_operations {
+ /*
+ * The offset of the pointer to struct fscrypt_inode_info in the
+ * filesystem-specific part of the inode, relative to the beginning of
+ * the common part of the inode (the 'struct inode').
+ */
+ ptrdiff_t inode_info_offs;
/*
* If set, then fs/crypto/ will allocate a global bounce page pool the
@@ -192,18 +198,47 @@ struct fscrypt_operations {
unsigned int *num_devs);
};
-int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags);
+int fscrypt_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags);
+
+/*
+ * Returns the address of the fscrypt info pointer within the
+ * filesystem-specific part of the inode. (To save memory on filesystems that
+ * don't support fscrypt, a field in 'struct inode' itself is no longer used.)
+ */
+static inline struct fscrypt_inode_info **
+fscrypt_inode_info_addr(const struct inode *inode)
+{
+ VFS_WARN_ON_ONCE(inode->i_sb->s_cop->inode_info_offs == 0);
+ return (void *)inode + inode->i_sb->s_cop->inode_info_offs;
+}
+
+/*
+ * Load the inode's fscrypt info pointer, using a raw dereference. Since this
+ * uses a raw dereference with no memory barrier, it is appropriate to use only
+ * when the caller knows the inode's key setup already happened, resulting in
+ * non-NULL fscrypt info. E.g., the file contents en/decryption functions use
+ * this, since fscrypt_file_open() set up the key.
+ */
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info_raw(const struct inode *inode)
+{
+ struct fscrypt_inode_info *ci = *fscrypt_inode_info_addr(inode);
+
+ VFS_WARN_ON_ONCE(ci == NULL);
+ return ci;
+}
static inline struct fscrypt_inode_info *
fscrypt_get_inode_info(const struct inode *inode)
{
/*
* Pairs with the cmpxchg_release() in fscrypt_setup_encryption_info().
- * I.e., another task may publish ->i_crypt_info concurrently, executing
- * a RELEASE barrier. We need to use smp_load_acquire() here to safely
+ * I.e., another task may publish the fscrypt info concurrently,
+ * executing a RELEASE barrier. Use smp_load_acquire() here to safely
* ACQUIRE the memory the other task published.
*/
- return smp_load_acquire(&inode->i_crypt_info);
+ return smp_load_acquire(fscrypt_inode_info_addr(inode));
}
/**
@@ -309,13 +344,11 @@ static inline void fscrypt_prepare_dentry(struct dentry *dentry,
/* crypto.c */
void fscrypt_enqueue_decrypt_work(struct work_struct *);
-struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
- unsigned int len,
- unsigned int offs,
- gfp_t gfp_flags);
+struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs, gfp_t gfp_flags);
int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
- u64 lblk_num, gfp_t gfp_flags);
+ u64 lblk_num);
int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
size_t offs);
@@ -333,12 +366,13 @@ static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
return (struct page *)page_private(bounce_page);
}
-static inline bool fscrypt_is_bounce_folio(struct folio *folio)
+static inline bool fscrypt_is_bounce_folio(const struct folio *folio)
{
return folio->mapping == NULL;
}
-static inline struct folio *fscrypt_pagecache_folio(struct folio *bounce_folio)
+static inline
+struct folio *fscrypt_pagecache_folio(const struct folio *bounce_folio)
{
return bounce_folio->private;
}
@@ -479,10 +513,8 @@ static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
}
-static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
- unsigned int len,
- unsigned int offs,
- gfp_t gfp_flags)
+static inline struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs, gfp_t gfp_flags)
{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -490,8 +522,7 @@ static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
static inline int fscrypt_encrypt_block_inplace(const struct inode *inode,
struct page *page,
unsigned int len,
- unsigned int offs, u64 lblk_num,
- gfp_t gfp_flags)
+ unsigned int offs, u64 lblk_num)
{
return -EOPNOTSUPP;
}
@@ -521,12 +552,13 @@ static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
return ERR_PTR(-EINVAL);
}
-static inline bool fscrypt_is_bounce_folio(struct folio *folio)
+static inline bool fscrypt_is_bounce_folio(const struct folio *folio)
{
return false;
}
-static inline struct folio *fscrypt_pagecache_folio(struct folio *bounce_folio)
+static inline
+struct folio *fscrypt_pagecache_folio(const struct folio *bounce_folio)
{
WARN_ON_ONCE(1);
return ERR_PTR(-EINVAL);
@@ -711,8 +743,8 @@ static inline u64 fscrypt_fname_siphash(const struct inode *dir,
return 0;
}
-static inline int fscrypt_d_revalidate(struct dentry *dentry,
- unsigned int flags)
+static inline int fscrypt_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
return 1;
}
diff --git a/include/linux/fsi.h b/include/linux/fsi.h
index 3df8c54868df..adea1b432f2d 100644
--- a/include/linux/fsi.h
+++ b/include/linux/fsi.h
@@ -44,7 +44,7 @@ struct fsi_driver {
};
#define to_fsi_dev(devp) container_of(devp, struct fsi_device, dev)
-#define to_fsi_drv(drvp) container_of(drvp, struct fsi_driver, drv)
+#define to_fsi_drv(drvp) container_of_const(drvp, struct fsi_driver, drv)
extern int fsi_driver_register(struct fsi_driver *fsi_drv);
extern void fsi_driver_unregister(struct fsi_driver *fsi_drv);
@@ -68,7 +68,7 @@ extern int fsi_slave_read(struct fsi_slave *slave, uint32_t addr,
extern int fsi_slave_write(struct fsi_slave *slave, uint32_t addr,
const void *val, size_t size);
-extern struct bus_type fsi_bus_type;
+extern const struct bus_type fsi_bus_type;
extern const struct device_type fsi_cdev_type;
enum fsi_dev_type {
diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h
index df25fffdc0ae..623ccfcbf39c 100644
--- a/include/linux/fsl/enetc_mdio.h
+++ b/include/linux/fsl/enetc_mdio.h
@@ -59,7 +59,8 @@ static inline int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id,
static inline int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id,
int devad, int regnum, u16 value)
{ return -EINVAL; }
-struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
+static inline struct enetc_hw *enetc_hw_alloc(struct device *dev,
+ void __iomem *port_regs)
{ return ERR_PTR(-EINVAL); }
#endif
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index a1b3de87a3d1..897d6211c163 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -56,7 +56,7 @@ struct fsl_mc_driver {
};
#define to_fsl_mc_driver(_drv) \
- container_of(_drv, struct fsl_mc_driver, driver)
+ container_of_const(_drv, struct fsl_mc_driver, driver)
/**
* enum fsl_mc_pool_type - Types of allocatable MC bus resources
@@ -417,8 +417,6 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
void fsl_mc_portal_free(struct fsl_mc_io *mc_io);
-int fsl_mc_portal_reset(struct fsl_mc_io *mc_io);
-
int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
enum fsl_mc_pool_type pool_type,
struct fsl_mc_device **new_mc_adev);
@@ -436,23 +434,23 @@ void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
u16 if_id);
-extern struct bus_type fsl_mc_bus_type;
-
-extern struct device_type fsl_mc_bus_dprc_type;
-extern struct device_type fsl_mc_bus_dpni_type;
-extern struct device_type fsl_mc_bus_dpio_type;
-extern struct device_type fsl_mc_bus_dpsw_type;
-extern struct device_type fsl_mc_bus_dpbp_type;
-extern struct device_type fsl_mc_bus_dpcon_type;
-extern struct device_type fsl_mc_bus_dpmcp_type;
-extern struct device_type fsl_mc_bus_dpmac_type;
-extern struct device_type fsl_mc_bus_dprtc_type;
-extern struct device_type fsl_mc_bus_dpseci_type;
-extern struct device_type fsl_mc_bus_dpdmux_type;
-extern struct device_type fsl_mc_bus_dpdcei_type;
-extern struct device_type fsl_mc_bus_dpaiop_type;
-extern struct device_type fsl_mc_bus_dpci_type;
-extern struct device_type fsl_mc_bus_dpdmai_type;
+extern const struct bus_type fsl_mc_bus_type;
+
+extern const struct device_type fsl_mc_bus_dprc_type;
+extern const struct device_type fsl_mc_bus_dpni_type;
+extern const struct device_type fsl_mc_bus_dpio_type;
+extern const struct device_type fsl_mc_bus_dpsw_type;
+extern const struct device_type fsl_mc_bus_dpbp_type;
+extern const struct device_type fsl_mc_bus_dpcon_type;
+extern const struct device_type fsl_mc_bus_dpmcp_type;
+extern const struct device_type fsl_mc_bus_dpmac_type;
+extern const struct device_type fsl_mc_bus_dprtc_type;
+extern const struct device_type fsl_mc_bus_dpseci_type;
+extern const struct device_type fsl_mc_bus_dpdmux_type;
+extern const struct device_type fsl_mc_bus_dpdcei_type;
+extern const struct device_type fsl_mc_bus_dpaiop_type;
+extern const struct device_type fsl_mc_bus_dpci_type;
+extern const struct device_type fsl_mc_bus_dpdmai_type;
static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
{
diff --git a/include/linux/fsl/netc_global.h b/include/linux/fsl/netc_global.h
new file mode 100644
index 000000000000..fdecca8c90f0
--- /dev/null
+++ b/include/linux/fsl/netc_global.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2024 NXP
+ */
+#ifndef __NETC_GLOBAL_H
+#define __NETC_GLOBAL_H
+
+#include <linux/io.h>
+
+static inline u32 netc_read(void __iomem *reg)
+{
+ return ioread32(reg);
+}
+
+static inline void netc_write(void __iomem *reg, u32 val)
+{
+ iowrite32(val, reg);
+}
+
+#endif
diff --git a/include/linux/fsl/ntmp.h b/include/linux/fsl/ntmp.h
new file mode 100644
index 000000000000..916dc4fe7de3
--- /dev/null
+++ b/include/linux/fsl/ntmp.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2025 NXP */
+#ifndef __NETC_NTMP_H
+#define __NETC_NTMP_H
+
+#include <linux/bitops.h>
+#include <linux/if_ether.h>
+
+struct maft_keye_data {
+ u8 mac_addr[ETH_ALEN];
+ __le16 resv;
+};
+
+struct maft_cfge_data {
+ __le16 si_bitmap;
+ __le16 resv;
+};
+
+struct netc_cbdr_regs {
+ void __iomem *pir;
+ void __iomem *cir;
+ void __iomem *mr;
+
+ void __iomem *bar0;
+ void __iomem *bar1;
+ void __iomem *lenr;
+};
+
+struct netc_tbl_vers {
+ u8 maft_ver;
+ u8 rsst_ver;
+};
+
+struct netc_cbdr {
+ struct device *dev;
+ struct netc_cbdr_regs regs;
+
+ int bd_num;
+ int next_to_use;
+ int next_to_clean;
+
+ int dma_size;
+ void *addr_base;
+ void *addr_base_align;
+ dma_addr_t dma_base;
+ dma_addr_t dma_base_align;
+
+ /* Serialize the order of command BD ring */
+ spinlock_t ring_lock;
+};
+
+struct ntmp_user {
+ int cbdr_num; /* number of control BD ring */
+ struct device *dev;
+ struct netc_cbdr *ring;
+ struct netc_tbl_vers tbl;
+};
+
+struct maft_entry_data {
+ struct maft_keye_data keye;
+ struct maft_cfge_data cfge;
+};
+
+#if IS_ENABLED(CONFIG_NXP_NETC_LIB)
+int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
+ const struct netc_cbdr_regs *regs);
+void ntmp_free_cbdr(struct netc_cbdr *cbdr);
+
+/* NTMP APIs */
+int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft);
+int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft);
+int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id);
+int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
+ int count);
+int ntmp_rsst_query_entry(struct ntmp_user *user,
+ u32 *table, int count);
+#else
+static inline int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
+ const struct netc_cbdr_regs *regs)
+{
+ return 0;
+}
+
+static inline void ntmp_free_cbdr(struct netc_cbdr *cbdr)
+{
+}
+
+static inline int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ return 0;
+}
+
+static inline int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ return 0;
+}
+
+static inline int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id)
+{
+ return 0;
+}
+
+static inline int ntmp_rsst_update_entry(struct ntmp_user *user,
+ const u32 *table, int count)
+{
+ return 0;
+}
+
+static inline int ntmp_rsst_query_entry(struct ntmp_user *user,
+ u32 *table, int count)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index b301bf7199d3..3601e25779ba 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -145,7 +145,6 @@ struct ptp_qoriq {
struct ptp_clock *clock;
struct ptp_clock_info caps;
struct resource *rsrc;
- struct dentry *debugfs_root;
struct device *dev;
bool extts_fifo_support;
bool fiper3_support;
@@ -195,14 +194,5 @@ int ptp_qoriq_settime(struct ptp_clock_info *ptp,
int ptp_qoriq_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on);
int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event);
-#ifdef CONFIG_DEBUG_FS
-void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq);
-void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq);
-#else
-static inline void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq)
-{ }
-static inline void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq)
-{ }
-#endif
#endif
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 5d231ce8709b..49f20c2f99bf 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -118,7 +118,6 @@ struct fsl_usb2_platform_data {
#define FSL_USB2_PORT0_ENABLED 0x00000001
#define FSL_USB2_PORT1_ENABLED 0x00000002
-#define FLS_USB2_WORKAROUND_ENGCM09152 (1 << 0)
struct spi_device;
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 4da80e92f804..28a9cb13fbfa 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -108,32 +108,35 @@ static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask)
fsnotify_parent(dentry, mask, dentry, FSNOTIFY_EVENT_DENTRY);
}
-static inline int fsnotify_file(struct file *file, __u32 mask)
+static inline int fsnotify_path(const struct path *path, __u32 mask)
{
- const struct path *path;
-
- if (file->f_mode & FMODE_NONOTIFY)
- return 0;
+ return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
+}
- path = &file->f_path;
- /* Permission events require group prio >= FSNOTIFY_PRIO_CONTENT */
- if (mask & ALL_FSNOTIFY_PERM_EVENTS &&
- !fsnotify_sb_has_priority_watchers(path->dentry->d_sb,
- FSNOTIFY_PRIO_CONTENT))
+static inline int fsnotify_file(struct file *file, __u32 mask)
+{
+ /*
+ * FMODE_NONOTIFY are fds generated by fanotify itself which should not
+ * generate new events. We also don't want to generate events for
+ * FMODE_PATH fds (involves open & close events) as they are just
+ * handle creation / destruction events and not "real" file events.
+ */
+ if (FMODE_FSNOTIFY_NONE(file->f_mode))
return 0;
- return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
+ return fsnotify_path(&file->f_path, mask);
}
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+
+int fsnotify_open_perm_and_set_mode(struct file *file);
+
/*
* fsnotify_file_area_perm - permission hook before access to file range
*/
static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
const loff_t *ppos, size_t count)
{
- __u32 fsnotify_mask = FS_ACCESS_PERM;
-
/*
* filesystem may be modified in the context of permission events
* (e.g. by HSM filling a file on access), so sb freeze protection
@@ -141,49 +144,92 @@ static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
*/
lockdep_assert_once(file_write_not_started(file));
- if (!(perm_mask & MAY_READ))
+ if (!(perm_mask & (MAY_READ | MAY_WRITE | MAY_ACCESS)))
+ return 0;
+
+ /*
+ * read()/write() and other types of access generate pre-content events.
+ */
+ if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode))) {
+ int ret = fsnotify_pre_content(&file->f_path, ppos, count);
+
+ if (ret)
+ return ret;
+ }
+
+ if (!(perm_mask & MAY_READ) ||
+ likely(!FMODE_FSNOTIFY_ACCESS_PERM(file->f_mode)))
return 0;
- return fsnotify_file(file, fsnotify_mask);
+ /*
+ * read() also generates the legacy FS_ACCESS_PERM event, so content
+ * scanners can inspect the content filled by pre-content event.
+ */
+ return fsnotify_path(&file->f_path, FS_ACCESS_PERM);
}
/*
- * fsnotify_file_perm - permission hook before file access
+ * fsnotify_mmap_perm - permission hook before mmap of file range
*/
-static inline int fsnotify_file_perm(struct file *file, int perm_mask)
+static inline int fsnotify_mmap_perm(struct file *file, int prot,
+ const loff_t off, size_t len)
{
- return fsnotify_file_area_perm(file, perm_mask, NULL, 0);
+ /*
+ * mmap() generates only pre-content events.
+ */
+ if (!file || likely(!FMODE_FSNOTIFY_HSM(file->f_mode)))
+ return 0;
+
+ return fsnotify_pre_content(&file->f_path, &off, len);
}
/*
- * fsnotify_open_perm - permission hook before file open
+ * fsnotify_truncate_perm - permission hook before file truncate
*/
-static inline int fsnotify_open_perm(struct file *file)
+static inline int fsnotify_truncate_perm(const struct path *path, loff_t length)
{
- int ret;
+ struct inode *inode = d_inode(path->dentry);
- if (file->f_flags & __FMODE_EXEC) {
- ret = fsnotify_file(file, FS_OPEN_EXEC_PERM);
- if (ret)
- return ret;
- }
+ if (!(inode->i_sb->s_iflags & SB_I_ALLOW_HSM) ||
+ !fsnotify_sb_has_priority_watchers(inode->i_sb,
+ FSNOTIFY_PRIO_PRE_CONTENT))
+ return 0;
+
+ return fsnotify_pre_content(path, &length, 0);
+}
- return fsnotify_file(file, FS_OPEN_PERM);
+/*
+ * fsnotify_file_perm - permission hook before file access (unknown range)
+ */
+static inline int fsnotify_file_perm(struct file *file, int perm_mask)
+{
+ return fsnotify_file_area_perm(file, perm_mask, NULL, 0);
}
#else
+static inline int fsnotify_open_perm_and_set_mode(struct file *file)
+{
+ return 0;
+}
+
static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
const loff_t *ppos, size_t count)
{
return 0;
}
-static inline int fsnotify_file_perm(struct file *file, int perm_mask)
+static inline int fsnotify_mmap_perm(struct file *file, int prot,
+ const loff_t off, size_t len)
{
return 0;
}
-static inline int fsnotify_open_perm(struct file *file)
+static inline int fsnotify_truncate_perm(const struct path *path, loff_t length)
+{
+ return 0;
+}
+
+static inline int fsnotify_file_perm(struct file *file, int perm_mask)
{
return 0;
}
@@ -249,6 +295,11 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt)
__fsnotify_vfsmount_delete(mnt);
}
+static inline void fsnotify_mntns_delete(struct mnt_namespace *mntns)
+{
+ __fsnotify_mntns_delete(mntns);
+}
+
/*
* fsnotify_inoderemove - an inode is going away
*/
@@ -457,4 +508,19 @@ static inline int fsnotify_sb_error(struct super_block *sb, struct inode *inode,
NULL, NULL, NULL, 0);
}
+static inline void fsnotify_mnt_attach(struct mnt_namespace *ns, struct vfsmount *mnt)
+{
+ fsnotify_mnt(FS_MNT_ATTACH, ns, mnt);
+}
+
+static inline void fsnotify_mnt_detach(struct mnt_namespace *ns, struct vfsmount *mnt)
+{
+ fsnotify_mnt(FS_MNT_DETACH, ns, mnt);
+}
+
+static inline void fsnotify_mnt_move(struct mnt_namespace *ns, struct vfsmount *mnt)
+{
+ fsnotify_mnt(FS_MNT_MOVE, ns, mnt);
+}
+
#endif /* _LINUX_FS_NOTIFY_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 4dd6143db271..0d954ea7b179 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -55,6 +55,13 @@
#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
#define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */
+/* #define FS_DIR_MODIFY 0x00080000 */ /* Deprecated (reserved) */
+
+#define FS_PRE_ACCESS 0x00100000 /* Pre-content access hook */
+
+#define FS_MNT_ATTACH 0x01000000 /* Mount was attached */
+#define FS_MNT_DETACH 0x02000000 /* Mount was detached */
+#define FS_MNT_MOVE (FS_MNT_ATTACH | FS_MNT_DETACH)
/*
* Set on inode mark that cares about things that happen to its children.
@@ -77,8 +84,17 @@
*/
#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | FS_RENAME)
-#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \
- FS_OPEN_EXEC_PERM)
+/* Mount namespace events */
+#define FSNOTIFY_MNT_EVENTS (FS_MNT_ATTACH | FS_MNT_DETACH)
+
+/* Content events can be used to inspect file content */
+#define FSNOTIFY_CONTENT_PERM_EVENTS (FS_OPEN_PERM | FS_OPEN_EXEC_PERM | \
+ FS_ACCESS_PERM)
+/* Pre-content events can be used to fill file content */
+#define FSNOTIFY_PRE_CONTENT_EVENTS (FS_PRE_ACCESS)
+
+#define ALL_FSNOTIFY_PERM_EVENTS (FSNOTIFY_CONTENT_PERM_EVENTS | \
+ FSNOTIFY_PRE_CONTENT_EVENTS)
/*
* This is a list of all events that may get sent to a parent that is watching
@@ -99,6 +115,7 @@
/* Events that can be reported to backends */
#define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \
+ FSNOTIFY_MNT_EVENTS | \
FS_EVENTS_POSS_ON_CHILD | \
FS_DELETE_SELF | FS_MOVE_SELF | \
FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
@@ -217,7 +234,6 @@ struct fsnotify_group {
#define FSNOTIFY_GROUP_USER 0x01 /* user allocated group */
#define FSNOTIFY_GROUP_DUPS 0x02 /* allow multiple marks per object */
-#define FSNOTIFY_GROUP_NOFS 0x04 /* group lock is not direct reclaim safe */
int flags;
unsigned int owner_flags; /* stored flags of mark_mutex owner */
@@ -234,6 +250,7 @@ struct fsnotify_group {
* full */
struct mem_cgroup *memcg; /* memcg to charge allocations */
+ struct user_namespace *user_ns; /* user ns where group was created */
/* groups can define private fields here or use the void *private */
union {
@@ -256,6 +273,8 @@ struct fsnotify_group {
int f_flags; /* event_f_flags from fanotify_init() */
struct ucounts *ucounts;
mempool_t error_events_pool;
+ /* chained on perm_group_list */
+ struct list_head perm_grp_list;
} fanotify_data;
#endif /* CONFIG_FANOTIFY */
};
@@ -268,30 +287,29 @@ struct fsnotify_group {
static inline void fsnotify_group_lock(struct fsnotify_group *group)
{
mutex_lock(&group->mark_mutex);
- if (group->flags & FSNOTIFY_GROUP_NOFS)
- group->owner_flags = memalloc_nofs_save();
+ group->owner_flags = memalloc_nofs_save();
}
static inline void fsnotify_group_unlock(struct fsnotify_group *group)
{
- if (group->flags & FSNOTIFY_GROUP_NOFS)
- memalloc_nofs_restore(group->owner_flags);
+ memalloc_nofs_restore(group->owner_flags);
mutex_unlock(&group->mark_mutex);
}
static inline void fsnotify_group_assert_locked(struct fsnotify_group *group)
{
WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
- if (group->flags & FSNOTIFY_GROUP_NOFS)
- WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
+ WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
}
/* When calling fsnotify tell it if the data is a path or inode */
enum fsnotify_data_type {
FSNOTIFY_EVENT_NONE,
+ FSNOTIFY_EVENT_FILE_RANGE,
FSNOTIFY_EVENT_PATH,
FSNOTIFY_EVENT_INODE,
FSNOTIFY_EVENT_DENTRY,
+ FSNOTIFY_EVENT_MNT,
FSNOTIFY_EVENT_ERROR,
};
@@ -301,6 +319,22 @@ struct fs_error_report {
struct super_block *sb;
};
+struct file_range {
+ const struct path *path;
+ loff_t pos;
+ size_t count;
+};
+
+static inline const struct path *file_range_path(const struct file_range *range)
+{
+ return range->path;
+}
+
+struct fsnotify_mnt {
+ const struct mnt_namespace *ns;
+ u64 mnt_id;
+};
+
static inline struct inode *fsnotify_data_inode(const void *data, int data_type)
{
switch (data_type) {
@@ -310,6 +344,8 @@ static inline struct inode *fsnotify_data_inode(const void *data, int data_type)
return d_inode(data);
case FSNOTIFY_EVENT_PATH:
return d_inode(((const struct path *)data)->dentry);
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return d_inode(file_range_path(data)->dentry);
case FSNOTIFY_EVENT_ERROR:
return ((struct fs_error_report *)data)->inode;
default:
@@ -325,6 +361,8 @@ static inline struct dentry *fsnotify_data_dentry(const void *data, int data_typ
return (struct dentry *)data;
case FSNOTIFY_EVENT_PATH:
return ((const struct path *)data)->dentry;
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return file_range_path(data)->dentry;
default:
return NULL;
}
@@ -336,6 +374,8 @@ static inline const struct path *fsnotify_data_path(const void *data,
switch (data_type) {
case FSNOTIFY_EVENT_PATH:
return data;
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return file_range_path(data);
default:
return NULL;
}
@@ -351,6 +391,8 @@ static inline struct super_block *fsnotify_data_sb(const void *data,
return ((struct dentry *)data)->d_sb;
case FSNOTIFY_EVENT_PATH:
return ((const struct path *)data)->dentry->d_sb;
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return file_range_path(data)->dentry->d_sb;
case FSNOTIFY_EVENT_ERROR:
return ((struct fs_error_report *) data)->sb;
default:
@@ -358,6 +400,24 @@ static inline struct super_block *fsnotify_data_sb(const void *data,
}
}
+static inline const struct fsnotify_mnt *fsnotify_data_mnt(const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_MNT:
+ return data;
+ default:
+ return NULL;
+ }
+}
+
+static inline u64 fsnotify_data_mnt_id(const void *data, int data_type)
+{
+ const struct fsnotify_mnt *mnt_data = fsnotify_data_mnt(data, data_type);
+
+ return mnt_data ? mnt_data->mnt_id : 0;
+}
+
static inline struct fs_error_report *fsnotify_data_error_report(
const void *data,
int data_type)
@@ -370,6 +430,18 @@ static inline struct fs_error_report *fsnotify_data_error_report(
}
}
+static inline const struct file_range *fsnotify_data_file_range(
+ const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return (struct file_range *)data;
+ default:
+ return NULL;
+ }
+}
+
/*
* Index to merged marks iterator array that correlates to a type of watch.
* The type of watched object can be deduced from the iterator type, but not
@@ -383,6 +455,7 @@ enum fsnotify_iter_type {
FSNOTIFY_ITER_TYPE_SB,
FSNOTIFY_ITER_TYPE_PARENT,
FSNOTIFY_ITER_TYPE_INODE2,
+ FSNOTIFY_ITER_TYPE_MNTNS,
FSNOTIFY_ITER_TYPE_COUNT
};
@@ -392,6 +465,7 @@ enum fsnotify_obj_type {
FSNOTIFY_OBJ_TYPE_INODE,
FSNOTIFY_OBJ_TYPE_VFSMOUNT,
FSNOTIFY_OBJ_TYPE_SB,
+ FSNOTIFY_OBJ_TYPE_MNTNS,
FSNOTIFY_OBJ_TYPE_COUNT,
FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT
};
@@ -576,8 +650,10 @@ extern int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
extern void fsnotify_sb_delete(struct super_block *sb);
+extern void __fsnotify_mntns_delete(struct mnt_namespace *mntns);
extern void fsnotify_sb_free(struct super_block *sb);
extern u32 fsnotify_get_cookie(void);
+extern void fsnotify_mnt(__u32 mask, struct mnt_namespace *ns, struct vfsmount *mnt);
static inline __u32 fsnotify_parent_needed_mask(__u32 mask)
{
@@ -594,12 +670,14 @@ static inline __u32 fsnotify_parent_needed_mask(__u32 mask)
static inline int fsnotify_inode_watches_children(struct inode *inode)
{
+ __u32 parent_mask = READ_ONCE(inode->i_fsnotify_mask);
+
/* FS_EVENT_ON_CHILD is set if the inode may care */
- if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD))
+ if (!(parent_mask & FS_EVENT_ON_CHILD))
return 0;
/* this inode might care about child events, does it care about the
* specific set of events that can happen on a child? */
- return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD;
+ return parent_mask & FS_EVENTS_POSS_ON_CHILD;
}
/*
@@ -613,7 +691,7 @@ static inline void fsnotify_update_flags(struct dentry *dentry)
/*
* Serialisation of setting PARENT_WATCHED on the dentries is provided
* by d_lock. If inotify_inode_watched changes after we have taken
- * d_lock, the following __fsnotify_update_child_dentry_flags call will
+ * d_lock, the following fsnotify_set_children_dentry_flags call will
* find our entry, so it will spin until we complete here, and update
* us with the new state.
*/
@@ -832,21 +910,6 @@ extern void fsnotify_wait_marks_destroyed(void);
/* Clear all of the marks of a group attached to a given object type */
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
unsigned int obj_type);
-/* run all the marks in a group, and clear all of the vfsmount marks */
-static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
-{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT);
-}
-/* run all the marks in a group, and clear all of the inode marks */
-static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
-{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE);
-}
-/* run all the marks in a group, and clear all of the sn marks */
-static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group)
-{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB);
-}
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
@@ -856,9 +919,17 @@ static inline void fsnotify_init_event(struct fsnotify_event *event)
{
INIT_LIST_HEAD(&event->list);
}
+int fsnotify_pre_content(const struct path *path, const loff_t *ppos,
+ size_t count);
#else
+static inline int fsnotify_pre_content(const struct path *path,
+ const loff_t *ppos, size_t count)
+{
+ return 0;
+}
+
static inline int fsnotify(__u32 mask, const void *data, int data_type,
struct inode *dir, const struct qstr *name,
struct inode *inode, u32 cookie)
@@ -881,6 +952,9 @@ static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
static inline void fsnotify_sb_delete(struct super_block *sb)
{}
+static inline void __fsnotify_mntns_delete(struct mnt_namespace *mntns)
+{}
+
static inline void fsnotify_sb_free(struct super_block *sb)
{}
@@ -895,6 +969,9 @@ static inline u32 fsnotify_get_cookie(void)
static inline void fsnotify_unmount_inodes(struct super_block *sb)
{}
+static inline void fsnotify_mnt(__u32 mask, struct mnt_namespace *ns, struct vfsmount *mnt)
+{}
+
#endif /* CONFIG_FSNOTIFY */
#endif /* __KERNEL __ */
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index 1eb7eae580be..5bc7280425a7 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -26,8 +26,16 @@
/* Arbitrary limit to bound the kmalloc() size. Can be changed. */
#define FS_VERITY_MAX_DESCRIPTOR_SIZE 16384
+struct fsverity_info;
+
/* Verity operations for filesystems */
struct fsverity_operations {
+ /**
+ * The offset of the pointer to struct fsverity_info in the
+ * filesystem-specific part of the inode, relative to the beginning of
+ * the common part of the inode (the 'struct inode').
+ */
+ ptrdiff_t inode_info_offs;
/**
* Begin enabling verity on the given file.
@@ -124,15 +132,37 @@ struct fsverity_operations {
#ifdef CONFIG_FS_VERITY
+/*
+ * Returns the address of the verity info pointer within the filesystem-specific
+ * part of the inode. (To save memory on filesystems that don't support
+ * fsverity, a field in 'struct inode' itself is no longer used.)
+ */
+static inline struct fsverity_info **
+fsverity_info_addr(const struct inode *inode)
+{
+ VFS_WARN_ON_ONCE(inode->i_sb->s_vop->inode_info_offs == 0);
+ return (void *)inode + inode->i_sb->s_vop->inode_info_offs;
+}
+
static inline struct fsverity_info *fsverity_get_info(const struct inode *inode)
{
/*
- * Pairs with the cmpxchg_release() in fsverity_set_info().
- * I.e., another task may publish ->i_verity_info concurrently,
- * executing a RELEASE barrier. We need to use smp_load_acquire() here
- * to safely ACQUIRE the memory the other task published.
+ * Since this function can be called on inodes belonging to filesystems
+ * that don't support fsverity at all, and fsverity_info_addr() doesn't
+ * work on such filesystems, we have to start with an IS_VERITY() check.
+ * Checking IS_VERITY() here is also useful to minimize the overhead of
+ * fsverity_active() on non-verity files.
+ */
+ if (!IS_VERITY(inode))
+ return NULL;
+
+ /*
+ * Pairs with the cmpxchg_release() in fsverity_set_info(). I.e.,
+ * another task may publish the inode's verity info concurrently,
+ * executing a RELEASE barrier. Use smp_load_acquire() here to safely
+ * ACQUIRE the memory the other task published.
*/
- return smp_load_acquire(&inode->i_verity_info);
+ return smp_load_acquire(fsverity_info_addr(inode));
}
/* enable.c */
@@ -156,12 +186,19 @@ void __fsverity_cleanup_inode(struct inode *inode);
* fsverity_cleanup_inode() - free the inode's verity info, if present
* @inode: an inode being evicted
*
- * Filesystems must call this on inode eviction to free ->i_verity_info.
+ * Filesystems must call this on inode eviction to free the inode's verity info.
*/
static inline void fsverity_cleanup_inode(struct inode *inode)
{
- if (inode->i_verity_info)
+ /*
+ * Only IS_VERITY() inodes can have verity info, so start by checking
+ * for IS_VERITY() (which is faster than retrieving the pointer to the
+ * verity info). This minimizes overhead for non-verity inodes.
+ */
+ if (IS_VERITY(inode))
__fsverity_cleanup_inode(inode);
+ else
+ VFS_WARN_ON_ONCE(*fsverity_info_addr(inode) != NULL);
}
/* read_metadata.c */
@@ -267,12 +304,12 @@ static inline bool fsverity_verify_page(struct page *page)
* fsverity_active() - do reads from the inode need to go through fs-verity?
* @inode: inode to check
*
- * This checks whether ->i_verity_info has been set.
+ * This checks whether the inode's verity info has been set.
*
* Filesystems call this from ->readahead() to check whether the pages need to
* be verified or not. Don't use IS_VERITY() for this purpose; it's subject to
* a race condition where the file is being read concurrently with
- * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.)
+ * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before the verity info.)
*
* Return: true if reads need to go through fs-verity, otherwise false
*/
@@ -287,7 +324,7 @@ static inline bool fsverity_active(const struct inode *inode)
* @filp: the struct file being set up
*
* When opening a verity file, deny the open if it is for writing. Otherwise,
- * set up the inode's ->i_verity_info if not already done.
+ * set up the inode's verity info if not already done.
*
* When combined with fscrypt, this must be called after fscrypt_file_open().
* Otherwise, we won't have the key set up to decrypt the verity metadata.
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 800995c425e0..770f0dc993cc 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -43,9 +43,8 @@ struct dyn_ftrace;
char *arch_ftrace_match_adjust(char *str, const char *search);
-#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
-struct fgraph_ret_regs;
-unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs);
+#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
+unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs);
#else
unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
#endif
@@ -86,15 +85,15 @@ struct ftrace_hash;
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
defined(CONFIG_DYNAMIC_FTRACE)
-const char *
+int
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym);
#else
-static inline const char *
+static inline int
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
- return NULL;
+ return 0;
}
#endif
@@ -113,14 +112,61 @@ static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *val
#ifdef CONFIG_FUNCTION_TRACER
-extern int ftrace_enabled;
+#include <linux/ftrace_regs.h>
-#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+extern int ftrace_enabled;
+/**
+ * ftrace_regs - ftrace partial/optimal register set
+ *
+ * ftrace_regs represents a group of registers which is used at the
+ * function entry and exit. There are three types of registers.
+ *
+ * - Registers for passing the parameters to callee, including the stack
+ * pointer. (e.g. rcx, rdx, rdi, rsi, r8, r9 and rsp on x86_64)
+ * - Registers for passing the return values to caller.
+ * (e.g. rax and rdx on x86_64)
+ * - Registers for hooking the function call and return including the
+ * frame pointer (the frame pointer is architecture/config dependent)
+ * (e.g. rip, rbp and rsp for x86_64)
+ *
+ * Also, architecture dependent fields can be used for internal process.
+ * (e.g. orig_ax on x86_64)
+ *
+ * Basically, ftrace_regs stores the registers related to the context.
+ * On function entry, registers for function parameters and hooking the
+ * function call are stored, and on function exit, registers for function
+ * return value and frame pointers are stored.
+ *
+ * And also, it dpends on the context that which registers are restored
+ * from the ftrace_regs.
+ * On the function entry, those registers will be restored except for
+ * the stack pointer, so that user can change the function parameters
+ * and instruction pointer (e.g. live patching.)
+ * On the function exit, only registers which is used for return values
+ * are restored.
+ *
+ * NOTE: user *must not* access regs directly, only do it via APIs, because
+ * the member can be changed according to the architecture.
+ * This is why the structure is empty here, so that nothing accesses
+ * the ftrace_regs directly.
+ */
struct ftrace_regs {
- struct pt_regs regs;
+ /* Nothing to see here, use the accessor functions! */
};
-#define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
+
+#define ftrace_regs_size() sizeof(struct __arch_ftrace_regs)
+
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+/*
+ * Architectures that define HAVE_DYNAMIC_FTRACE_WITH_ARGS must define their own
+ * arch_ftrace_get_regs() where it only returns pt_regs *if* it is fully
+ * populated. It should return NULL otherwise.
+ */
+static inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
+{
+ return &arch_ftrace_regs(fregs)->regs;
+}
/*
* ftrace_regs_set_instruction_pointer() is to be defined by the architecture
@@ -130,6 +176,12 @@ struct ftrace_regs {
#define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
+#ifdef CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS
+
+static_assert(sizeof(struct pt_regs) == ftrace_regs_size());
+
+#endif /* CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */
+
static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
{
if (!fregs)
@@ -138,6 +190,62 @@ static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs
return arch_ftrace_get_regs(fregs);
}
+#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) || \
+ defined(CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS)
+
+#ifndef arch_ftrace_partial_regs
+#define arch_ftrace_partial_regs(regs) do {} while (0)
+#endif
+
+static __always_inline struct pt_regs *
+ftrace_partial_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
+{
+ /*
+ * If CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS=y, ftrace_regs memory
+ * layout is including pt_regs. So always returns that address.
+ * Since arch_ftrace_get_regs() will check some members and may return
+ * NULL, we can not use it.
+ */
+ regs = &arch_ftrace_regs(fregs)->regs;
+
+ /* Allow arch specific updates to regs. */
+ arch_ftrace_partial_regs(regs);
+ return regs;
+}
+
+#endif /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS || CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */
+
+#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+
+/*
+ * Please define arch dependent pt_regs which compatible to the
+ * perf_arch_fetch_caller_regs() but based on ftrace_regs.
+ * This requires
+ * - user_mode(_regs) returns false (always kernel mode).
+ * - able to use the _regs for stack trace.
+ */
+#ifndef arch_ftrace_fill_perf_regs
+/* As same as perf_arch_fetch_caller_regs(), do nothing by default */
+#define arch_ftrace_fill_perf_regs(fregs, _regs) do {} while (0)
+#endif
+
+static __always_inline struct pt_regs *
+ftrace_fill_perf_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
+{
+ arch_ftrace_fill_perf_regs(fregs, regs);
+ return regs;
+}
+
+#else /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
+
+static __always_inline struct pt_regs *
+ftrace_fill_perf_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
+{
+ return &arch_ftrace_regs(fregs)->regs;
+}
+
+#endif
+
/*
* When true, the ftrace_regs_{get,set}_*() functions may be used on fregs.
* Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs.
@@ -150,22 +258,22 @@ static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs)
return ftrace_get_regs(fregs) != NULL;
}
-#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
-#define ftrace_regs_get_instruction_pointer(fregs) \
- instruction_pointer(ftrace_get_regs(fregs))
-#define ftrace_regs_get_argument(fregs, n) \
- regs_get_kernel_argument(ftrace_get_regs(fregs), n)
-#define ftrace_regs_get_stack_pointer(fregs) \
- kernel_stack_pointer(ftrace_get_regs(fregs))
-#define ftrace_regs_return_value(fregs) \
- regs_return_value(ftrace_get_regs(fregs))
-#define ftrace_regs_set_return_value(fregs, ret) \
- regs_set_return_value(ftrace_get_regs(fregs), ret)
-#define ftrace_override_function_with_return(fregs) \
- override_function_with_return(ftrace_get_regs(fregs))
-#define ftrace_regs_query_register_offset(name) \
- regs_query_register_offset(name)
-#endif
+#ifdef CONFIG_HAVE_REGS_AND_STACK_ACCESS_API
+static __always_inline unsigned long
+ftrace_regs_get_kernel_stack_nth(struct ftrace_regs *fregs, unsigned int nth)
+{
+ unsigned long *stackp;
+
+ stackp = (unsigned long *)ftrace_regs_get_stack_pointer(fregs);
+ if (((unsigned long)(stackp + nth) & ~(THREAD_SIZE - 1)) ==
+ ((unsigned long)stackp & ~(THREAD_SIZE - 1)))
+ return *(stackp + nth);
+
+ return 0;
+}
+#else /* !CONFIG_HAVE_REGS_AND_STACK_ACCESS_API */
+#define ftrace_regs_get_kernel_stack_nth(fregs, nth) (0L)
+#endif /* CONFIG_HAVE_REGS_AND_STACK_ACCESS_API */
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);
@@ -227,6 +335,8 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* ftrace_enabled.
* DIRECT - Used by the direct ftrace_ops helper for direct functions
* (internal ftrace only, should not be used by others)
+ * SUBOP - Is controlled by another op in field managed.
+ * GRAPH - Is a component of the fgraph_ops structure
*/
enum {
FTRACE_OPS_FL_ENABLED = BIT(0),
@@ -247,6 +357,9 @@ enum {
FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
FTRACE_OPS_FL_PERMANENT = BIT(16),
FTRACE_OPS_FL_DIRECT = BIT(17),
+ FTRACE_OPS_FL_SUBOP = BIT(18),
+ FTRACE_OPS_FL_GRAPH = BIT(19),
+ FTRACE_OPS_FL_JMP = BIT(20),
};
#ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
@@ -334,7 +447,9 @@ struct ftrace_ops {
unsigned long trampoline;
unsigned long trampoline_size;
struct list_head list;
+ struct list_head subop_list;
ftrace_ops_func_t ops_func;
+ struct ftrace_ops *managed;
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
unsigned long direct_call;
#endif
@@ -463,11 +578,41 @@ static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
unsigned long addr) { }
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
-#ifdef CONFIG_STACK_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_JMP
+static inline bool ftrace_is_jmp(unsigned long addr)
+{
+ return addr & 1;
+}
+
+static inline unsigned long ftrace_jmp_set(unsigned long addr)
+{
+ return addr | 1UL;
+}
+
+static inline unsigned long ftrace_jmp_get(unsigned long addr)
+{
+ return addr & ~1UL;
+}
+#else
+static inline bool ftrace_is_jmp(unsigned long addr)
+{
+ return false;
+}
+
+static inline unsigned long ftrace_jmp_set(unsigned long addr)
+{
+ return addr;
+}
+
+static inline unsigned long ftrace_jmp_get(unsigned long addr)
+{
+ return addr;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_JMP */
-extern int stack_tracer_enabled;
+#ifdef CONFIG_STACK_TRACER
-int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
+int stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
@@ -509,6 +654,30 @@ static inline void stack_tracer_disable(void) { }
static inline void stack_tracer_enable(void) { }
#endif
+enum {
+ FTRACE_UPDATE_CALLS = (1 << 0),
+ FTRACE_DISABLE_CALLS = (1 << 1),
+ FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
+ FTRACE_START_FUNC_RET = (1 << 3),
+ FTRACE_STOP_FUNC_RET = (1 << 4),
+ FTRACE_MAY_SLEEP = (1 << 5),
+};
+
+/* Arches can override ftrace_get_symaddr() to convert fentry_ip to symaddr. */
+#ifndef ftrace_get_symaddr
+/**
+ * ftrace_get_symaddr - return the symbol address from fentry_ip
+ * @fentry_ip: the address of ftrace location
+ *
+ * Get the symbol address from @fentry_ip (fast path). If there is no fast
+ * search path, this returns 0.
+ * User may need to use kallsyms API to find the symbol address.
+ */
+#define ftrace_get_symaddr(fentry_ip) (0)
+#endif
+
+void ftrace_sync_ipi(void *data);
+
#ifdef CONFIG_DYNAMIC_FTRACE
void ftrace_arch_code_modify_prepare(void);
@@ -603,15 +772,6 @@ void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
void ftrace_free_filter(struct ftrace_ops *ops);
void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
-enum {
- FTRACE_UPDATE_CALLS = (1 << 0),
- FTRACE_DISABLE_CALLS = (1 << 1),
- FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
- FTRACE_START_FUNC_RET = (1 << 3),
- FTRACE_STOP_FUNC_RET = (1 << 4),
- FTRACE_MAY_SLEEP = (1 << 5),
-};
-
/*
* The FTRACE_UPDATE_* enum is used to pass information back
* from the ftrace_update_record() and ftrace_test_record()
@@ -989,7 +1149,7 @@ static __always_inline unsigned long get_lock_parent_ip(void)
# define trace_preempt_off(a0, a1) do { } while (0)
#endif
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_init(void);
#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
#define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
@@ -1007,7 +1167,15 @@ static inline void ftrace_init(void) { }
*/
struct ftrace_graph_ent {
unsigned long func; /* Current function */
- int depth;
+ unsigned long depth;
+} __packed;
+
+/*
+ * Structure that defines an entry function trace with retaddr.
+ */
+struct fgraph_retaddr_ent {
+ struct ftrace_graph_ent ent;
+ unsigned long retaddr; /* Return address */
} __packed;
/*
@@ -1023,23 +1191,38 @@ struct ftrace_graph_ret {
int depth;
/* Number of functions that overran the depth limit for current task */
unsigned int overrun;
- unsigned long long calltime;
- unsigned long long rettime;
} __packed;
-/* Type of the callback handlers for tracing function graph*/
-typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
-typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
+struct fgraph_ops;
-extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
+/* Type of the callback handlers for tracing function graph*/
+typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *,
+ struct fgraph_ops *,
+ struct ftrace_regs *); /* return */
+typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *,
+ struct fgraph_ops *,
+ struct ftrace_regs *); /* entry */
+
+extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace,
+ struct fgraph_ops *gops,
+ struct ftrace_regs *fregs);
+bool ftrace_pids_enabled(struct ftrace_ops *ops);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
struct fgraph_ops {
trace_func_graph_ent_t entryfunc;
trace_func_graph_ret_t retfunc;
+ struct ftrace_ops ops; /* for the hash lists */
+ void *private;
+ trace_func_graph_ent_t saved_func;
+ int idx;
};
+void *fgraph_reserve_data(int idx, int size_bytes);
+void *fgraph_retrieve_data(int idx, int *size_bytes);
+void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth);
+
/*
* Stack of return addresses for functions
* of a thread.
@@ -1048,16 +1231,10 @@ struct fgraph_ops {
struct ftrace_ret_stack {
unsigned long ret;
unsigned long func;
- unsigned long long calltime;
-#ifdef CONFIG_FUNCTION_PROFILER
- unsigned long long subtime;
-#endif
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
unsigned long fp;
#endif
-#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
unsigned long *retp;
-#endif
};
/*
@@ -1068,14 +1245,23 @@ struct ftrace_ret_stack {
extern void return_to_handler(void);
extern int
-function_graph_enter(unsigned long ret, unsigned long func,
- unsigned long frame_pointer, unsigned long *retp);
+function_graph_enter_regs(unsigned long ret, unsigned long func,
+ unsigned long frame_pointer, unsigned long *retp,
+ struct ftrace_regs *fregs);
+
+static inline int function_graph_enter(unsigned long ret, unsigned long func,
+ unsigned long fp, unsigned long *retp)
+{
+ return function_graph_enter_regs(ret, func, fp, retp, NULL);
+}
struct ftrace_ret_stack *
-ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
+ftrace_graph_get_ret_stack(struct task_struct *task, int skip);
+unsigned long ftrace_graph_top_ret_addr(struct task_struct *task);
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp);
+unsigned long *fgraph_get_task_var(struct fgraph_ops *gops);
/*
* Sometimes we don't want to trace a function with the function
@@ -1114,6 +1300,9 @@ extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
+/* Used by assembly, but to quiet sparse warnings */
+extern struct ftrace_ops *function_trace_op;
+
static inline void pause_graph_tracing(void)
{
atomic_inc(&current->tracing_graph_pause);
@@ -1149,16 +1338,9 @@ static inline void unpause_graph_tracing(void) { }
#ifdef CONFIG_TRACING
enum ftrace_dump_mode;
-#define MAX_TRACER_SIZE 100
-extern char ftrace_dump_on_oops[];
extern int ftrace_dump_on_oops_enabled(void);
-extern int tracepoint_printk;
extern void disable_trace_on_warning(void);
-extern int __disable_trace_on_warning;
-
-int tracepoint_printk_sysctl(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
#else /* CONFIG_TRACING */
static inline void disable_trace_on_warning(void) { }
diff --git a/include/linux/ftrace_regs.h b/include/linux/ftrace_regs.h
new file mode 100644
index 000000000000..15627ceea9bc
--- /dev/null
+++ b/include/linux/ftrace_regs.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FTRACE_REGS_H
+#define _LINUX_FTRACE_REGS_H
+
+/*
+ * For archs that just copy pt_regs in ftrace regs, it can use this default.
+ * If an architecture does not use pt_regs, it must define all the below
+ * accessor functions.
+ */
+#ifndef HAVE_ARCH_FTRACE_REGS
+struct __arch_ftrace_regs {
+ struct pt_regs regs;
+};
+
+#define arch_ftrace_regs(fregs) ((struct __arch_ftrace_regs *)(fregs))
+
+struct ftrace_regs;
+
+#define ftrace_regs_get_instruction_pointer(fregs) \
+ instruction_pointer(&arch_ftrace_regs(fregs)->regs)
+#define ftrace_regs_get_argument(fregs, n) \
+ regs_get_kernel_argument(&arch_ftrace_regs(fregs)->regs, n)
+#define ftrace_regs_get_stack_pointer(fregs) \
+ kernel_stack_pointer(&arch_ftrace_regs(fregs)->regs)
+#define ftrace_regs_get_return_value(fregs) \
+ regs_return_value(&arch_ftrace_regs(fregs)->regs)
+#define ftrace_regs_set_return_value(fregs, ret) \
+ regs_set_return_value(&arch_ftrace_regs(fregs)->regs, ret)
+#define ftrace_override_function_with_return(fregs) \
+ override_function_with_return(&arch_ftrace_regs(fregs)->regs)
+#define ftrace_regs_query_register_offset(name) \
+ regs_query_register_offset(name)
+#define ftrace_regs_get_frame_pointer(fregs) \
+ frame_pointer(&arch_ftrace_regs(fregs)->regs)
+
+#endif /* HAVE_ARCH_FTRACE_REGS */
+
+/* This can be overridden by the architectures */
+#ifndef FTRACE_REGS_MAX_ARGS
+# define FTRACE_REGS_MAX_ARGS 6
+#endif
+
+#endif /* _LINUX_FTRACE_REGS_H */
diff --git a/include/linux/futex.h b/include/linux/futex.h
index b70df27d7e85..9e9750f04980 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -4,11 +4,11 @@
#include <linux/sched.h>
#include <linux/ktime.h>
+#include <linux/mm_types.h>
#include <uapi/linux/futex.h>
struct inode;
-struct mm_struct;
struct task_struct;
/*
@@ -34,6 +34,7 @@ union futex_key {
u64 i_seq;
unsigned long pgoff;
unsigned int offset;
+ /* unsigned int node; */
} shared;
struct {
union {
@@ -42,11 +43,13 @@ union futex_key {
};
unsigned long address;
unsigned int offset;
+ /* unsigned int node; */
} private;
struct {
u64 ptr;
unsigned long word;
unsigned int offset;
+ unsigned int node; /* NOT hashed! */
} both;
};
@@ -77,7 +80,20 @@ void futex_exec_release(struct task_struct *tsk);
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3);
-#else
+int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4);
+
+#ifdef CONFIG_FUTEX_PRIVATE_HASH
+int futex_hash_allocate_default(void);
+void futex_hash_free(struct mm_struct *mm);
+int futex_mm_init(struct mm_struct *mm);
+
+#else /* !CONFIG_FUTEX_PRIVATE_HASH */
+static inline int futex_hash_allocate_default(void) { return 0; }
+static inline int futex_hash_free(struct mm_struct *mm) { return 0; }
+static inline int futex_mm_init(struct mm_struct *mm) { return 0; }
+#endif /* CONFIG_FUTEX_PRIVATE_HASH */
+
+#else /* !CONFIG_FUTEX */
static inline void futex_init_task(struct task_struct *tsk) { }
static inline void futex_exit_recursive(struct task_struct *tsk) { }
static inline void futex_exit_release(struct task_struct *tsk) { }
@@ -88,6 +104,17 @@ static inline long do_futex(u32 __user *uaddr, int op, u32 val,
{
return -EINVAL;
}
+static inline int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4)
+{
+ return -EINVAL;
+}
+static inline int futex_hash_allocate_default(void)
+{
+ return 0;
+}
+static inline int futex_hash_free(struct mm_struct *mm) { return 0; }
+static inline int futex_mm_init(struct mm_struct *mm) { return 0; }
+
#endif
#endif
diff --git a/include/linux/fw_table.h b/include/linux/fw_table.h
index 3ff4c277296f..9bd605b87c4c 100644
--- a/include/linux/fw_table.h
+++ b/include/linux/fw_table.h
@@ -54,7 +54,7 @@ int cdat_table_parse(enum acpi_cdat_type type,
#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_ACPI_LIB(x)
#define __init_or_fwtbl_lib __init_or_acpilib
#else
-#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_NS_GPL(x, CXL)
+#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_NS_GPL(x, "CXL")
#define __init_or_fwtbl_lib
#endif
diff --git a/include/linux/fwctl.h b/include/linux/fwctl.h
new file mode 100644
index 000000000000..5d61fc8a6871
--- /dev/null
+++ b/include/linux/fwctl.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#ifndef __LINUX_FWCTL_H
+#define __LINUX_FWCTL_H
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/cleanup.h>
+#include <uapi/fwctl/fwctl.h>
+
+struct fwctl_device;
+struct fwctl_uctx;
+
+/**
+ * struct fwctl_ops - Driver provided operations
+ *
+ * fwctl_unregister() will wait until all excuting ops are completed before it
+ * returns. Drivers should be mindful to not let their ops run for too long as
+ * it will block device hot unplug and module unloading.
+ */
+struct fwctl_ops {
+ /**
+ * @device_type: The drivers assigned device_type number. This is uABI.
+ */
+ enum fwctl_device_type device_type;
+ /**
+ * @uctx_size: The size of the fwctl_uctx struct to allocate. The first
+ * bytes of this memory will be a fwctl_uctx. The driver can use the
+ * remaining bytes as its private memory.
+ */
+ size_t uctx_size;
+ /**
+ * @open_uctx: Called when a file descriptor is opened before the uctx
+ * is ever used.
+ */
+ int (*open_uctx)(struct fwctl_uctx *uctx);
+ /**
+ * @close_uctx: Called when the uctx is destroyed, usually when the FD
+ * is closed.
+ */
+ void (*close_uctx)(struct fwctl_uctx *uctx);
+ /**
+ * @info: Implement FWCTL_INFO. Return a kmalloc() memory that is copied
+ * to out_device_data. On input length indicates the size of the user
+ * buffer on output it indicates the size of the memory. The driver can
+ * ignore length on input, the core code will handle everything.
+ */
+ void *(*info)(struct fwctl_uctx *uctx, size_t *length);
+ /**
+ * @fw_rpc: Implement FWCTL_RPC. Deliver rpc_in/in_len to the FW and
+ * return the response and set out_len. rpc_in can be returned as the
+ * response pointer. Otherwise the returned pointer is freed with
+ * kvfree().
+ */
+ void *(*fw_rpc)(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
+ void *rpc_in, size_t in_len, size_t *out_len);
+};
+
+/**
+ * struct fwctl_device - Per-driver registration struct
+ * @dev: The sysfs (class/fwctl/fwctlXX) device
+ *
+ * Each driver instance will have one of these structs with the driver private
+ * data following immediately after. This struct is refcounted, it is freed by
+ * calling fwctl_put().
+ */
+struct fwctl_device {
+ struct device dev;
+ /* private: */
+ struct cdev cdev;
+
+ /* Protect uctx_list */
+ struct mutex uctx_list_lock;
+ struct list_head uctx_list;
+ /*
+ * Protect ops, held for write when ops becomes NULL during unregister,
+ * held for read whenever ops is loaded or an ops function is running.
+ */
+ struct rw_semaphore registration_lock;
+ const struct fwctl_ops *ops;
+};
+
+struct fwctl_device *_fwctl_alloc_device(struct device *parent,
+ const struct fwctl_ops *ops,
+ size_t size);
+/**
+ * fwctl_alloc_device - Allocate a fwctl
+ * @parent: Physical device that provides the FW interface
+ * @ops: Driver ops to register
+ * @drv_struct: 'struct driver_fwctl' that holds the struct fwctl_device
+ * @member: Name of the struct fwctl_device in @drv_struct
+ *
+ * This allocates and initializes the fwctl_device embedded in the drv_struct.
+ * Upon success the pointer must be freed via fwctl_put(). Returns a 'drv_struct
+ * \*' on success, NULL on error.
+ */
+#define fwctl_alloc_device(parent, ops, drv_struct, member) \
+ ({ \
+ static_assert(__same_type(struct fwctl_device, \
+ ((drv_struct *)NULL)->member)); \
+ static_assert(offsetof(drv_struct, member) == 0); \
+ (drv_struct *)_fwctl_alloc_device(parent, ops, \
+ sizeof(drv_struct)); \
+ })
+
+static inline struct fwctl_device *fwctl_get(struct fwctl_device *fwctl)
+{
+ get_device(&fwctl->dev);
+ return fwctl;
+}
+static inline void fwctl_put(struct fwctl_device *fwctl)
+{
+ put_device(&fwctl->dev);
+}
+DEFINE_FREE(fwctl, struct fwctl_device *, if (_T) fwctl_put(_T));
+
+int fwctl_register(struct fwctl_device *fwctl);
+void fwctl_unregister(struct fwctl_device *fwctl);
+
+/**
+ * struct fwctl_uctx - Per user FD context
+ * @fwctl: fwctl instance that owns the context
+ *
+ * Every FD opened by userspace will get a unique context allocation. Any driver
+ * private data will follow immediately after.
+ */
+struct fwctl_uctx {
+ struct fwctl_device *fwctl;
+ /* private: */
+ /* Head at fwctl_device::uctx_list */
+ struct list_head uctx_list_entry;
+};
+
+#endif
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 0d79070c5a70..097be89487bf 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -2,6 +2,11 @@
/*
* fwnode.h - Firmware device node object handle type definition.
*
+ * This header file provides low-level data types and definitions for firmware
+ * and device property providers. The respective API header files supplied by
+ * them should contain all of the requisite data types and definitions for end
+ * users, so including it directly should not be necessary.
+ *
* Copyright (C) 2015, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*/
@@ -91,7 +96,7 @@ struct fwnode_endpoint {
#define SWNODE_GRAPH_PORT_NAME_FMT "port@%u"
#define SWNODE_GRAPH_ENDPOINT_NAME_FMT "endpoint@%u"
-#define NR_FWNODE_REFERENCE_ARGS 8
+#define NR_FWNODE_REFERENCE_ARGS 16
/**
* struct fwnode_reference_args - Fwnode reference with additional arguments
@@ -112,6 +117,7 @@ struct fwnode_reference_args {
* @device_is_available: Return true if the device is available.
* @device_get_match_data: Return the device driver match data.
* @property_present: Return true if a property is present.
+ * @property_read_bool: Return a boolean property value.
* @property_read_int_array: Read an array of integer properties. Return zero on
* success, a negative error code otherwise.
* @property_read_string_array: Read an array of string properties. Return zero
@@ -141,6 +147,8 @@ struct fwnode_operations {
(*device_get_dma_attr)(const struct fwnode_handle *fwnode);
bool (*property_present)(const struct fwnode_handle *fwnode,
const char *propname);
+ bool (*property_read_bool)(const struct fwnode_handle *fwnode,
+ const char *propname);
int (*property_read_int_array)(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 07e370113b2b..86d62fdafd7a 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -58,7 +58,7 @@ struct gameport_driver {
bool ignore;
};
-#define to_gameport_driver(d) container_of(d, struct gameport_driver, driver)
+#define to_gameport_driver(d) container_of_const(d, struct gameport_driver, driver)
int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode);
void gameport_close(struct gameport *gameport);
diff --git a/include/linux/gcd.h b/include/linux/gcd.h
index cb572677fd7f..616e81a7f7e3 100644
--- a/include/linux/gcd.h
+++ b/include/linux/gcd.h
@@ -3,6 +3,9 @@
#define _GCD_H
#include <linux/compiler.h>
+#include <linux/jump_label.h>
+
+DECLARE_STATIC_KEY_TRUE(efficient_ffs_key);
unsigned long gcd(unsigned long a, unsigned long b) __attribute_const__;
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
index f3512fddf3d7..5b51c3d582d6 100644
--- a/include/linux/generic-radix-tree.h
+++ b/include/linux/generic-radix-tree.h
@@ -41,6 +41,7 @@
#include <linux/limits.h>
#include <linux/log2.h>
#include <linux/math.h>
+#include <linux/slab.h>
#include <linux/types.h>
struct genradix_root;
@@ -48,10 +49,63 @@ struct genradix_root;
#define GENRADIX_NODE_SHIFT 9
#define GENRADIX_NODE_SIZE (1U << GENRADIX_NODE_SHIFT)
+#define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *))
+#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
+
+/* depth that's needed for a genradix that can address up to ULONG_MAX: */
+#define GENRADIX_MAX_DEPTH \
+ DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT)
+
+#define GENRADIX_DEPTH_MASK \
+ ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
+
+static inline int genradix_depth_shift(unsigned depth)
+{
+ return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth;
+}
+
+/*
+ * Returns size (of data, in bytes) that a tree of a given depth holds:
+ */
+static inline size_t genradix_depth_size(unsigned depth)
+{
+ return 1UL << genradix_depth_shift(depth);
+}
+
+static inline unsigned genradix_root_to_depth(struct genradix_root *r)
+{
+ return (unsigned long) r & GENRADIX_DEPTH_MASK;
+}
+
+static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
+{
+ return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
+}
+
struct __genradix {
struct genradix_root *root;
};
+struct genradix_node {
+ union {
+ /* Interior node: */
+ struct genradix_node *children[GENRADIX_ARY];
+
+ /* Leaf: */
+ u8 data[GENRADIX_NODE_SIZE];
+ };
+};
+
+static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
+{
+ return kzalloc(GENRADIX_NODE_SIZE, gfp_mask);
+}
+
+static inline void genradix_free_node(struct genradix_node *node)
+{
+ kfree(node);
+}
+
/*
* NOTE: currently, sizeof(_type) must not be larger than GENRADIX_NODE_SIZE:
*/
@@ -128,6 +182,30 @@ static inline size_t __idx_to_offset(size_t idx, size_t obj_size)
#define __genradix_idx_to_offset(_radix, _idx) \
__idx_to_offset(_idx, __genradix_obj_size(_radix))
+static inline void *__genradix_ptr_inlined(struct __genradix *radix, size_t offset)
+{
+ struct genradix_root *r = READ_ONCE(radix->root);
+ struct genradix_node *n = genradix_root_to_node(r);
+ unsigned level = genradix_root_to_depth(r);
+ unsigned shift = genradix_depth_shift(level);
+
+ if (unlikely(ilog2(offset) >= genradix_depth_shift(level)))
+ return NULL;
+
+ while (n && shift > GENRADIX_NODE_SHIFT) {
+ shift -= GENRADIX_ARY_SHIFT;
+ n = n->children[offset >> shift];
+ offset &= (1UL << shift) - 1;
+ }
+
+ return n ? &n->data[offset] : NULL;
+}
+
+#define genradix_ptr_inlined(_radix, _idx) \
+ (__genradix_cast(_radix) \
+ __genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)))
+
void *__genradix_ptr(struct __genradix *, size_t);
/**
@@ -142,7 +220,24 @@ void *__genradix_ptr(struct __genradix *, size_t);
__genradix_ptr(&(_radix)->tree, \
__genradix_idx_to_offset(_radix, _idx)))
-void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t);
+void *__genradix_ptr_alloc(struct __genradix *, size_t,
+ struct genradix_node **, gfp_t);
+
+#define genradix_ptr_alloc_inlined(_radix, _idx, _gfp) \
+ (__genradix_cast(_radix) \
+ (__genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)) ?: \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ NULL, _gfp)))
+
+#define genradix_ptr_alloc_preallocated_inlined(_radix, _idx, _new_node, _gfp)\
+ (__genradix_cast(_radix) \
+ (__genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)) ?: \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ _new_node, _gfp)))
/**
* genradix_ptr_alloc - get a pointer to a genradix entry, allocating it
@@ -157,7 +252,13 @@ void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t);
(__genradix_cast(_radix) \
__genradix_ptr_alloc(&(_radix)->tree, \
__genradix_idx_to_offset(_radix, _idx), \
- _gfp))
+ NULL, _gfp))
+
+#define genradix_ptr_alloc_preallocated(_radix, _idx, _new_node, _gfp)\
+ (__genradix_cast(_radix) \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ _new_node, _gfp))
struct genradix_iter {
size_t offset;
diff --git a/include/linux/generic_pt/common.h b/include/linux/generic_pt/common.h
new file mode 100644
index 000000000000..6a9a1acb5aad
--- /dev/null
+++ b/include/linux/generic_pt/common.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#ifndef __GENERIC_PT_COMMON_H
+#define __GENERIC_PT_COMMON_H
+
+#include <linux/types.h>
+#include <linux/build_bug.h>
+#include <linux/bits.h>
+
+/**
+ * DOC: Generic Radix Page Table
+ *
+ * Generic Radix Page Table is a set of functions and helpers to efficiently
+ * parse radix style page tables typically seen in HW implementations. The
+ * interface is built to deliver similar code generation as the mm's pte/pmd/etc
+ * system by fully inlining the exact code required to handle each table level.
+ *
+ * Like the mm subsystem each format contributes its parsing implementation
+ * under common names and the common code implements the required algorithms.
+ *
+ * The system is divided into three logical levels:
+ *
+ * - The page table format and its manipulation functions
+ * - Generic helpers to give a consistent API regardless of underlying format
+ * - An algorithm implementation (e.g. IOMMU/DRM/KVM/MM)
+ *
+ * Multiple implementations are supported. The intention is to have the generic
+ * format code be re-usable for whatever specialized implementation is required.
+ * The generic code is solely about the format of the radix tree; it does not
+ * include memory allocation or higher level decisions that are left for the
+ * implementation.
+ *
+ * The generic framework supports a superset of functions across many HW
+ * implementations:
+ *
+ * - Entries comprised of contiguous blocks of IO PTEs for larger page sizes
+ * - Multi-level tables, up to 6 levels. Runtime selected top level
+ * - Runtime variable table level size (ARM's concatenated tables)
+ * - Expandable top level allowing dynamic sizing of table levels
+ * - Optional leaf entries at any level
+ * - 32-bit/64-bit virtual and output addresses, using every address bit
+ * - Dirty tracking
+ * - Sign extended addressing
+ */
+
+/**
+ * struct pt_common - struct for all page table implementations
+ */
+struct pt_common {
+ /**
+ * @top_of_table: Encodes the table top pointer and the top level in a
+ * single value. Must use READ_ONCE/WRITE_ONCE to access it. The lower
+ * bits of the aligned table pointer are used for the level.
+ */
+ uintptr_t top_of_table;
+ /**
+ * @max_oasz_lg2: Maximum number of bits the OA can contain. Upper bits
+ * must be zero. This may be less than what the page table format
+ * supports, but must not be more.
+ */
+ u8 max_oasz_lg2;
+ /**
+ * @max_vasz_lg2: Maximum number of bits the VA can contain. Upper bits
+ * are 0 or 1 depending on pt_full_va_prefix(). This may be less than
+ * what the page table format supports, but must not be more. When
+ * PT_FEAT_DYNAMIC_TOP is set this reflects the maximum VA capability.
+ */
+ u8 max_vasz_lg2;
+ /**
+ * @features: Bitmap of `enum pt_features`
+ */
+ unsigned int features;
+};
+
+/* Encoding parameters for top_of_table */
+enum {
+ PT_TOP_LEVEL_BITS = 3,
+ PT_TOP_LEVEL_MASK = GENMASK(PT_TOP_LEVEL_BITS - 1, 0),
+};
+
+/**
+ * enum pt_features - Features turned on in the table. Each symbol is a bit
+ * position.
+ */
+enum pt_features {
+ /**
+ * @PT_FEAT_DMA_INCOHERENT: Cache flush page table memory before
+ * assuming the HW can read it. Otherwise a SMP release is sufficient
+ * for HW to read it.
+ */
+ PT_FEAT_DMA_INCOHERENT,
+ /**
+ * @PT_FEAT_FULL_VA: The table can span the full VA range from 0 to
+ * PT_VADDR_MAX.
+ */
+ PT_FEAT_FULL_VA,
+ /**
+ * @PT_FEAT_DYNAMIC_TOP: The table's top level can be increased
+ * dynamically during map. This requires HW support for atomically
+ * setting both the table top pointer and the starting table level.
+ */
+ PT_FEAT_DYNAMIC_TOP,
+ /**
+ * @PT_FEAT_SIGN_EXTEND: The top most bit of the valid VA range sign
+ * extends up to the full pt_vaddr_t. This divides the page table into
+ * three VA ranges::
+ *
+ * 0 -> 2^N - 1 Lower
+ * 2^N -> (MAX - 2^N - 1) Non-Canonical
+ * MAX - 2^N -> MAX Upper
+ *
+ * In this mode pt_common::max_vasz_lg2 includes the sign bit and the
+ * upper bits that don't fall within the translation are just validated.
+ *
+ * If not set there is no sign extension and valid VA goes from 0 to 2^N
+ * - 1.
+ */
+ PT_FEAT_SIGN_EXTEND,
+ /**
+ * @PT_FEAT_FLUSH_RANGE: IOTLB maintenance is done by flushing IOVA
+ * ranges which will clean out any walk cache or any IOPTE fully
+ * contained by the range. The optimization objective is to minimize the
+ * number of flushes even if ranges include IOVA gaps that do not need
+ * to be flushed.
+ */
+ PT_FEAT_FLUSH_RANGE,
+ /**
+ * @PT_FEAT_FLUSH_RANGE_NO_GAPS: Like PT_FEAT_FLUSH_RANGE except that
+ * the optimization objective is to only flush IOVA that has been
+ * changed. This mode is suitable for cases like hypervisor shadowing
+ * where flushing unchanged ranges may cause the hypervisor to reparse
+ * significant amount of page table.
+ */
+ PT_FEAT_FLUSH_RANGE_NO_GAPS,
+ /* private: */
+ PT_FEAT_FMT_START,
+};
+
+struct pt_amdv1 {
+ struct pt_common common;
+};
+
+enum {
+ /*
+ * The memory backing the tables is encrypted. Use __sme_set() to adjust
+ * the page table pointers in the tree. This only works with
+ * CONFIG_AMD_MEM_ENCRYPT.
+ */
+ PT_FEAT_AMDV1_ENCRYPT_TABLES = PT_FEAT_FMT_START,
+ /*
+ * The PTEs are set to prevent cache incoherent traffic, such as PCI no
+ * snoop. This is set either at creation time or before the first map
+ * operation.
+ */
+ PT_FEAT_AMDV1_FORCE_COHERENCE,
+};
+
+struct pt_vtdss {
+ struct pt_common common;
+};
+
+enum {
+ /*
+ * The PTEs are set to prevent cache incoherent traffic, such as PCI no
+ * snoop. This is set either at creation time or before the first map
+ * operation.
+ */
+ PT_FEAT_VTDSS_FORCE_COHERENCE = PT_FEAT_FMT_START,
+ /*
+ * Prevent creating read-only PTEs. Used to work around HW errata
+ * ERRATA_772415_SPR17.
+ */
+ PT_FEAT_VTDSS_FORCE_WRITEABLE,
+};
+
+struct pt_x86_64 {
+ struct pt_common common;
+};
+
+enum {
+ /*
+ * The memory backing the tables is encrypted. Use __sme_set() to adjust
+ * the page table pointers in the tree. This only works with
+ * CONFIG_AMD_MEM_ENCRYPT.
+ */
+ PT_FEAT_X86_64_AMD_ENCRYPT_TABLES = PT_FEAT_FMT_START,
+};
+
+#endif
diff --git a/include/linux/generic_pt/iommu.h b/include/linux/generic_pt/iommu.h
new file mode 100644
index 000000000000..9eefbb74efd0
--- /dev/null
+++ b/include/linux/generic_pt/iommu.h
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#ifndef __GENERIC_PT_IOMMU_H
+#define __GENERIC_PT_IOMMU_H
+
+#include <linux/generic_pt/common.h>
+#include <linux/iommu.h>
+#include <linux/mm_types.h>
+
+struct iommu_iotlb_gather;
+struct pt_iommu_ops;
+struct pt_iommu_driver_ops;
+struct iommu_dirty_bitmap;
+
+/**
+ * DOC: IOMMU Radix Page Table
+ *
+ * The IOMMU implementation of the Generic Page Table provides an ops struct
+ * that is useful to go with an iommu_domain to serve the DMA API, IOMMUFD and
+ * the generic map/unmap interface.
+ *
+ * This interface uses a caller provided locking approach. The caller must have
+ * a VA range lock concept that prevents concurrent threads from calling ops on
+ * the same VA. Generally the range lock must be at least as large as a single
+ * map call.
+ */
+
+/**
+ * struct pt_iommu - Base structure for IOMMU page tables
+ *
+ * The format-specific struct will include this as the first member.
+ */
+struct pt_iommu {
+ /**
+ * @domain: The core IOMMU domain. The driver should use a union to
+ * overlay this memory with its previously existing domain struct to
+ * create an alias.
+ */
+ struct iommu_domain domain;
+
+ /**
+ * @ops: Function pointers to access the API
+ */
+ const struct pt_iommu_ops *ops;
+
+ /**
+ * @driver_ops: Function pointers provided by the HW driver to help
+ * manage HW details like caches.
+ */
+ const struct pt_iommu_driver_ops *driver_ops;
+
+ /**
+ * @nid: Node ID to use for table memory allocations. The IOMMU driver
+ * may want to set the NID to the device's NID, if there are multiple
+ * table walkers.
+ */
+ int nid;
+
+ /**
+ * @iommu_device: Device pointer used for any DMA cache flushing when
+ * PT_FEAT_DMA_INCOHERENT. This is the iommu device that created the
+ * page table which must have dma ops that perform cache flushing.
+ */
+ struct device *iommu_device;
+};
+
+/**
+ * struct pt_iommu_info - Details about the IOMMU page table
+ *
+ * Returned from pt_iommu_ops->get_info()
+ */
+struct pt_iommu_info {
+ /**
+ * @pgsize_bitmap: A bitmask where each set bit indicates
+ * a page size that can be natively stored in the page table.
+ */
+ u64 pgsize_bitmap;
+};
+
+struct pt_iommu_ops {
+ /**
+ * @set_dirty: Make the iova write dirty
+ * @iommu_table: Table to manipulate
+ * @iova: IO virtual address to start
+ *
+ * This is only used by iommufd testing. It makes the iova dirty so that
+ * read_and_clear_dirty() will see it as dirty. Unlike all the other ops
+ * this one is safe to call without holding any locking. It may return
+ * -EAGAIN if there is a race.
+ */
+ int (*set_dirty)(struct pt_iommu *iommu_table, dma_addr_t iova);
+
+ /**
+ * @get_info: Return the pt_iommu_info structure
+ * @iommu_table: Table to query
+ *
+ * Return some basic static information about the page table.
+ */
+ void (*get_info)(struct pt_iommu *iommu_table,
+ struct pt_iommu_info *info);
+
+ /**
+ * @deinit: Undo a format specific init operation
+ * @iommu_table: Table to destroy
+ *
+ * Release all of the memory. The caller must have already removed the
+ * table from all HW access and all caches.
+ */
+ void (*deinit)(struct pt_iommu *iommu_table);
+};
+
+/**
+ * struct pt_iommu_driver_ops - HW IOTLB cache flushing operations
+ *
+ * The IOMMU driver should implement these using container_of(iommu_table) to
+ * get to it's iommu_domain derived structure. All ops can be called in atomic
+ * contexts as they are buried under DMA API calls.
+ */
+struct pt_iommu_driver_ops {
+ /**
+ * @change_top: Update the top of table pointer
+ * @iommu_table: Table to operate on
+ * @top_paddr: New CPU physical address of the top pointer
+ * @top_level: IOMMU PT level of the new top
+ *
+ * Called under the get_top_lock() spinlock. The driver must update all
+ * HW references to this domain with a new top address and
+ * configuration. On return mappings placed in the new top must be
+ * reachable by the HW.
+ *
+ * top_level encodes the level in IOMMU PT format, level 0 is the
+ * smallest page size increasing from there. This has to be translated
+ * to any HW specific format. During this call the new top will not be
+ * visible to any other API.
+ *
+ * This op is only used by PT_FEAT_DYNAMIC_TOP, and is required if
+ * enabled.
+ */
+ void (*change_top)(struct pt_iommu *iommu_table, phys_addr_t top_paddr,
+ unsigned int top_level);
+
+ /**
+ * @get_top_lock: lock to hold when changing the table top
+ * @iommu_table: Table to operate on
+ *
+ * Return a lock to hold when changing the table top page table from
+ * being stored in HW. The lock will be held prior to calling
+ * change_top() and released once the top is fully visible.
+ *
+ * Typically this would be a lock that protects the iommu_domain's
+ * attachment list.
+ *
+ * This op is only used by PT_FEAT_DYNAMIC_TOP, and is required if
+ * enabled.
+ */
+ spinlock_t *(*get_top_lock)(struct pt_iommu *iommu_table);
+};
+
+static inline void pt_iommu_deinit(struct pt_iommu *iommu_table)
+{
+ /*
+ * It is safe to call pt_iommu_deinit() before an init, or if init
+ * fails. The ops pointer will only become non-NULL if deinit needs to be
+ * run.
+ */
+ if (iommu_table->ops)
+ iommu_table->ops->deinit(iommu_table);
+}
+
+/**
+ * struct pt_iommu_cfg - Common configuration values for all formats
+ */
+struct pt_iommu_cfg {
+ /**
+ * @features: Features required. Only these features will be turned on.
+ * The feature list should reflect what the IOMMU HW is capable of.
+ */
+ unsigned int features;
+ /**
+ * @hw_max_vasz_lg2: Maximum VA the IOMMU HW can support. This will
+ * imply the top level of the table.
+ */
+ u8 hw_max_vasz_lg2;
+ /**
+ * @hw_max_oasz_lg2: Maximum OA the IOMMU HW can support. The format
+ * might select a lower maximum OA.
+ */
+ u8 hw_max_oasz_lg2;
+};
+
+/* Generate the exported function signatures from iommu_pt.h */
+#define IOMMU_PROTOTYPES(fmt) \
+ phys_addr_t pt_iommu_##fmt##_iova_to_phys(struct iommu_domain *domain, \
+ dma_addr_t iova); \
+ int pt_iommu_##fmt##_map_pages(struct iommu_domain *domain, \
+ unsigned long iova, phys_addr_t paddr, \
+ size_t pgsize, size_t pgcount, \
+ int prot, gfp_t gfp, size_t *mapped); \
+ size_t pt_iommu_##fmt##_unmap_pages( \
+ struct iommu_domain *domain, unsigned long iova, \
+ size_t pgsize, size_t pgcount, \
+ struct iommu_iotlb_gather *iotlb_gather); \
+ int pt_iommu_##fmt##_read_and_clear_dirty( \
+ struct iommu_domain *domain, unsigned long iova, size_t size, \
+ unsigned long flags, struct iommu_dirty_bitmap *dirty); \
+ int pt_iommu_##fmt##_init(struct pt_iommu_##fmt *table, \
+ const struct pt_iommu_##fmt##_cfg *cfg, \
+ gfp_t gfp); \
+ void pt_iommu_##fmt##_hw_info(struct pt_iommu_##fmt *table, \
+ struct pt_iommu_##fmt##_hw_info *info)
+#define IOMMU_FORMAT(fmt, member) \
+ struct pt_iommu_##fmt { \
+ struct pt_iommu iommu; \
+ struct pt_##fmt member; \
+ }; \
+ IOMMU_PROTOTYPES(fmt)
+
+/*
+ * A driver uses IOMMU_PT_DOMAIN_OPS to populate the iommu_domain_ops for the
+ * iommu_pt
+ */
+#define IOMMU_PT_DOMAIN_OPS(fmt) \
+ .iova_to_phys = &pt_iommu_##fmt##_iova_to_phys, \
+ .map_pages = &pt_iommu_##fmt##_map_pages, \
+ .unmap_pages = &pt_iommu_##fmt##_unmap_pages
+#define IOMMU_PT_DIRTY_OPS(fmt) \
+ .read_and_clear_dirty = &pt_iommu_##fmt##_read_and_clear_dirty
+
+/*
+ * The driver should setup its domain struct like
+ * union {
+ * struct iommu_domain domain;
+ * struct pt_iommu_xxx xx;
+ * };
+ * PT_IOMMU_CHECK_DOMAIN(struct mock_iommu_domain, xx.iommu, domain);
+ *
+ * Which creates an alias between driver_domain.domain and
+ * driver_domain.xx.iommu.domain. This is to avoid a mass rename of existing
+ * driver_domain.domain users.
+ */
+#define PT_IOMMU_CHECK_DOMAIN(s, pt_iommu_memb, domain_memb) \
+ static_assert(offsetof(s, pt_iommu_memb.domain) == \
+ offsetof(s, domain_memb))
+
+struct pt_iommu_amdv1_cfg {
+ struct pt_iommu_cfg common;
+ unsigned int starting_level;
+};
+
+struct pt_iommu_amdv1_hw_info {
+ u64 host_pt_root;
+ u8 mode;
+};
+
+IOMMU_FORMAT(amdv1, amdpt);
+
+/* amdv1_mock is used by the iommufd selftest */
+#define pt_iommu_amdv1_mock pt_iommu_amdv1
+#define pt_iommu_amdv1_mock_cfg pt_iommu_amdv1_cfg
+struct pt_iommu_amdv1_mock_hw_info;
+IOMMU_PROTOTYPES(amdv1_mock);
+
+struct pt_iommu_vtdss_cfg {
+ struct pt_iommu_cfg common;
+ /* 4 is a 57 bit 5 level table */
+ unsigned int top_level;
+};
+
+struct pt_iommu_vtdss_hw_info {
+ u64 ssptptr;
+ u8 aw;
+};
+
+IOMMU_FORMAT(vtdss, vtdss_pt);
+
+struct pt_iommu_x86_64_cfg {
+ struct pt_iommu_cfg common;
+ /* 4 is a 57 bit 5 level table */
+ unsigned int top_level;
+};
+
+struct pt_iommu_x86_64_hw_info {
+ u64 gcr3_pt;
+ u8 levels;
+};
+
+IOMMU_FORMAT(x86_64, x86_64_pt);
+
+#undef IOMMU_PROTOTYPES
+#undef IOMMU_FORMAT
+#endif
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 7f9691d375f0..b155929af5b1 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -7,6 +7,7 @@
#include <linux/mmzone.h>
#include <linux/topology.h>
#include <linux/alloc_tag.h>
+#include <linux/cleanup.h>
#include <linux/sched.h>
struct vm_area_struct;
@@ -39,6 +40,25 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
}
+static inline bool gfpflags_allow_spinning(const gfp_t gfp_flags)
+{
+ /*
+ * !__GFP_DIRECT_RECLAIM -> direct claim is not allowed.
+ * !__GFP_KSWAPD_RECLAIM -> it's not safe to wake up kswapd.
+ * All GFP_* flags including GFP_NOWAIT use one or both flags.
+ * alloc_pages_nolock() is the only API that doesn't specify either flag.
+ *
+ * This is stronger than GFP_NOWAIT or GFP_ATOMIC because
+ * those are guaranteed to never block on a sleeping lock.
+ * Here we are enforcing that the allocation doesn't ever spin
+ * on any locks (i.e. only trylocks). There is no high level
+ * GFP_$FOO flag for this use in alloc_pages_nolock() as the
+ * regular page allocator doesn't fully support this
+ * allocation mode.
+ */
+ return !!(gfp_flags & __GFP_RECLAIM);
+}
+
#ifdef CONFIG_HIGHMEM
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
#else
@@ -212,35 +232,31 @@ struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_
unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
nodemask_t *nodemask, int nr_pages,
- struct list_head *page_list,
struct page **page_array);
#define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
-unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
+unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp,
unsigned long nr_pages,
struct page **page_array);
-#define alloc_pages_bulk_array_mempolicy(...) \
- alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__))
+#define alloc_pages_bulk_mempolicy(...) \
+ alloc_hooks(alloc_pages_bulk_mempolicy_noprof(__VA_ARGS__))
/* Bulk allocate order-0 pages */
-#define alloc_pages_bulk_list(_gfp, _nr_pages, _list) \
- __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL)
-
-#define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array) \
- __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array)
+#define alloc_pages_bulk(_gfp, _nr_pages, _page_array) \
+ __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array)
static inline unsigned long
-alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
+alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
struct page **page_array)
{
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
- return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, NULL, page_array);
+ return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
}
-#define alloc_pages_bulk_array_node(...) \
- alloc_hooks(alloc_pages_bulk_array_node_noprof(__VA_ARGS__))
+#define alloc_pages_bulk_node(...) \
+ alloc_hooks(alloc_pages_bulk_node_noprof(__VA_ARGS__))
static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
{
@@ -300,32 +316,32 @@ static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask,
#ifdef CONFIG_NUMA
struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order);
-struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
- struct mempolicy *mpol, pgoff_t ilx, int nid);
struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order);
+struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
+ struct mempolicy *mpol, pgoff_t ilx, int nid);
struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
- unsigned long addr, bool hugepage);
+ unsigned long addr);
#else
static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order)
{
return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order);
}
-static inline struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
- struct mempolicy *mpol, pgoff_t ilx, int nid)
+static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
{
- return alloc_pages_noprof(gfp, order);
+ return __folio_alloc_node_noprof(gfp, order, numa_node_id());
}
-static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
+static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
+ struct mempolicy *mpol, pgoff_t ilx, int nid)
{
- return __folio_alloc_node(gfp, order, numa_node_id());
+ return folio_alloc_noprof(gfp, order);
}
-#define vma_alloc_folio_noprof(gfp, order, vma, addr, hugepage) \
+#define vma_alloc_folio_noprof(gfp, order, vma, addr) \
folio_alloc_noprof(gfp, order)
#endif
#define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__))
-#define alloc_pages_mpol(...) alloc_hooks(alloc_pages_mpol_noprof(__VA_ARGS__))
#define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__))
+#define folio_alloc_mpol(...) alloc_hooks(folio_alloc_mpol_noprof(__VA_ARGS__))
#define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__))
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
@@ -333,12 +349,15 @@ static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
struct vm_area_struct *vma, unsigned long addr)
{
- struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr, false);
+ struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr);
return &folio->page;
}
#define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__))
+struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order);
+#define alloc_pages_nolock(...) alloc_hooks(alloc_pages_nolock_noprof(__VA_ARGS__))
+
extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
#define __get_free_pages(...) alloc_hooks(get_free_pages_noprof(__VA_ARGS__))
@@ -361,35 +380,14 @@ __meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mas
__get_free_pages((gfp_mask) | GFP_DMA, (order))
extern void __free_pages(struct page *page, unsigned int order);
+extern void free_pages_nolock(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
-struct page_frag_cache;
-void page_frag_cache_drain(struct page_frag_cache *nc);
-extern void __page_frag_cache_drain(struct page *page, unsigned int count);
-void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
- gfp_t gfp_mask, unsigned int align_mask);
-
-static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask,
- unsigned int align)
-{
- WARN_ON_ONCE(!is_power_of_2(align));
- return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
-}
-
-static inline void *page_frag_alloc(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask)
-{
- return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
-}
-
-extern void page_frag_free(void *addr);
-
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)
void page_alloc_init_cpuhp(void);
-int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp);
+bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(struct zone *zone);
void drain_local_pages(struct zone *zone);
@@ -426,9 +424,14 @@ static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
#ifdef CONFIG_CONTIG_ALLOC
+
+typedef unsigned int __bitwise acr_flags_t;
+#define ACR_FLAGS_NONE ((__force acr_flags_t)0) // ordinary allocation request
+#define ACR_FLAGS_CMA ((__force acr_flags_t)BIT(0)) // allocate for CMA
+
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
- unsigned migratetype, gfp_t gfp_mask);
+ acr_flags_t alloc_flags, gfp_t gfp_mask);
#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
@@ -438,4 +441,29 @@ extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_
#endif
void free_contig_range(unsigned long pfn, unsigned long nr_pages);
+#ifdef CONFIG_CONTIG_ALLOC
+static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
+ int nid, nodemask_t *node)
+{
+ struct page *page;
+
+ if (WARN_ON(!order || !(gfp & __GFP_COMP)))
+ return NULL;
+
+ page = alloc_contig_pages_noprof(1 << order, gfp, nid, node);
+
+ return page ? page_folio(page) : NULL;
+}
+#else
+static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
+ int nid, nodemask_t *node)
+{
+ return NULL;
+}
+#endif
+/* This should be paired with folio_put() rather than free_contig_range(). */
+#define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__))
+
+DEFINE_FREE(free_page, void *, free_page((unsigned long)_T))
+
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 313be4ad79fd..3de43b12209e 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -55,9 +55,7 @@ enum {
#ifdef CONFIG_LOCKDEP
___GFP_NOLOCKDEP_BIT,
#endif
-#ifdef CONFIG_SLAB_OBJ_EXT
___GFP_NO_OBJ_EXT_BIT,
-#endif
___GFP_LAST_BIT
};
@@ -98,11 +96,7 @@ enum {
#else
#define ___GFP_NOLOCKDEP 0
#endif
-#ifdef CONFIG_SLAB_OBJ_EXT
#define ___GFP_NO_OBJ_EXT BIT(___GFP_NO_OBJ_EXT_BIT)
-#else
-#define ___GFP_NO_OBJ_EXT 0
-#endif
/*
* Physical address zone modifiers (see linux/mmzone.h - low four bits)
@@ -215,7 +209,8 @@ enum {
* the caller still has to check for failures) while costly requests try to be
* not disruptive and back off even without invoking the OOM killer.
* The following three modifiers might be used to override some of these
- * implicit rules.
+ * implicit rules. Please note that all of them must be used along with
+ * %__GFP_DIRECT_RECLAIM flag.
*
* %__GFP_NORETRY: The VM implementation will try only very lightweight
* memory direct reclaim to get some memory under memory pressure (thus
@@ -246,11 +241,14 @@ enum {
* cannot handle allocation failures. The allocation could block
* indefinitely but will never return with failure. Testing for
* failure is pointless.
+ * It _must_ be blockable and used together with __GFP_DIRECT_RECLAIM.
+ * It should _never_ be used in non-sleepable contexts.
* New users should be evaluated carefully (and the flag should be
* used only when there is no reasonable failure policy) but it is
* definitely preferable to use the flag rather than opencode endless
* loop around allocator.
- * Using this flag for costly allocations is _highly_ discouraged.
+ * Allocating pages from the buddy with __GFP_NOFAIL and order > 1 is
+ * not supported. Please consider using kvmalloc() instead.
*/
#define __GFP_IO ((__force gfp_t)___GFP_IO)
#define __GFP_FS ((__force gfp_t)___GFP_FS)
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 56ac7e7a2889..8f85ddb26429 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * <linux/gpio.h>
+ * NOTE: This header *must not* be included.
*
* This is the LEGACY GPIO bulk include file, including legacy APIs. It is
* used for GPIO drivers still referencing the global GPIO numberspace,
@@ -13,41 +13,20 @@
#define __LINUX_GPIO_H
#include <linux/types.h>
+#ifdef CONFIG_GPIOLIB
+#include <linux/gpio/consumer.h>
+#endif
-struct device;
+#ifdef CONFIG_GPIOLIB_LEGACY
-/* see Documentation/driver-api/gpio/legacy.rst */
+struct device;
/* make these flag values available regardless of GPIO kconfig options */
-#define GPIOF_DIR_OUT (0 << 0)
-#define GPIOF_DIR_IN (1 << 0)
-
-#define GPIOF_INIT_LOW (0 << 1)
-#define GPIOF_INIT_HIGH (1 << 1)
-
-#define GPIOF_IN (GPIOF_DIR_IN)
-#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
-#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
-
-/* Gpio pin is active-low */
-#define GPIOF_ACTIVE_LOW (1 << 2)
-
-/**
- * struct gpio - a structure describing a GPIO with configuration
- * @gpio: the GPIO number
- * @flags: GPIO configuration as specified by GPIOF_*
- * @label: a literal description string of this GPIO
- */
-struct gpio {
- unsigned gpio;
- unsigned long flags;
- const char *label;
-};
+#define GPIOF_IN ((1 << 0))
+#define GPIOF_OUT_INIT_LOW ((0 << 0) | (0 << 1))
+#define GPIOF_OUT_INIT_HIGH ((0 << 0) | (1 << 1))
#ifdef CONFIG_GPIOLIB
-
-#include <linux/gpio/consumer.h>
-
/*
* "valid" GPIO numbers are nonnegative and may be passed to
* setup routines like gpio_request(). Only some valid numbers
@@ -68,19 +47,6 @@ static inline bool gpio_is_valid(int number)
* extra memory (for code and for per-GPIO table entries).
*/
-/*
- * At the end we want all GPIOs to be dynamically allocated from 0.
- * However, some legacy drivers still perform fixed allocation.
- * Until they are all fixed, leave 0-512 space for them.
- */
-#define GPIO_DYNAMIC_BASE 512
-/*
- * Define the maximum of the possible GPIO in the global numberspace.
- * While the GPIO base and numbers are positive, we limit it with signed
- * maximum as a lot of code is using negative values for special cases.
- */
-#define GPIO_DYNAMIC_MAX INT_MAX
-
/* Always use the library code for GPIO management calls,
* or when sleeping may be involved.
*/
@@ -102,7 +68,7 @@ static inline int gpio_get_value_cansleep(unsigned gpio)
}
static inline void gpio_set_value_cansleep(unsigned gpio, int value)
{
- return gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value);
+ gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value);
}
static inline int gpio_get_value(unsigned gpio)
@@ -111,7 +77,7 @@ static inline int gpio_get_value(unsigned gpio)
}
static inline void gpio_set_value(unsigned gpio, int value)
{
- return gpiod_set_raw_value(gpio_to_desc(gpio), value);
+ gpiod_set_raw_value(gpio_to_desc(gpio), value);
}
static inline int gpio_to_irq(unsigned gpio)
@@ -121,9 +87,6 @@ static inline int gpio_to_irq(unsigned gpio)
int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
-/* CONFIG_GPIOLIB: bindings for managed devices that want to request gpios */
-
-int devm_gpio_request(struct device *dev, unsigned gpio, const char *label);
int devm_gpio_request_one(struct device *dev, unsigned gpio,
unsigned long flags, const char *label);
@@ -201,13 +164,6 @@ static inline int gpio_to_irq(unsigned gpio)
return -EINVAL;
}
-static inline int devm_gpio_request(struct device *dev, unsigned gpio,
- const char *label)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
static inline int devm_gpio_request_one(struct device *dev, unsigned gpio,
unsigned long flags, const char *label)
{
@@ -216,5 +172,5 @@ static inline int devm_gpio_request_one(struct device *dev, unsigned gpio,
}
#endif /* ! CONFIG_GPIOLIB */
-
+#endif /* CONFIG_GPIOLIB_LEGACY */
#endif /* __LINUX_GPIO_H */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index db2dfbae8edb..cafeb7a40ad1 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -3,6 +3,7 @@
#define __LINUX_GPIO_CONSUMER_H
#include <linux/bits.h>
+#include <linux/err.h>
#include <linux/types.h>
struct acpi_device;
@@ -30,6 +31,7 @@ struct gpio_descs {
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
#define GPIOD_FLAGS_BIT_OPEN_DRAIN BIT(3)
+/* GPIOD_FLAGS_BIT_NONEXCLUSIVE is DEPRECATED, don't use in new code. */
#define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4)
/**
@@ -110,8 +112,6 @@ int gpiod_get_direction(struct gpio_desc *desc);
int gpiod_direction_input(struct gpio_desc *desc);
int gpiod_direction_output(struct gpio_desc *desc, int value);
int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
-int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
-int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
@@ -119,7 +119,7 @@ int gpiod_get_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
unsigned long *value_bitmap);
-void gpiod_set_value(struct gpio_desc *desc, int value);
+int gpiod_set_value(struct gpio_desc *desc, int value);
int gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
@@ -129,7 +129,7 @@ int gpiod_get_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
unsigned long *value_bitmap);
-void gpiod_set_raw_value(struct gpio_desc *desc, int value);
+int gpiod_set_raw_value(struct gpio_desc *desc, int value);
int gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
@@ -141,7 +141,7 @@ int gpiod_get_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
unsigned long *value_bitmap);
-void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
+int gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
int gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
@@ -151,7 +151,7 @@ int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
unsigned long *value_bitmap);
-void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
+int gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
@@ -167,10 +167,14 @@ int gpiod_cansleep(const struct gpio_desc *desc);
int gpiod_to_irq(const struct gpio_desc *desc);
int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
+bool gpiod_is_shared(const struct gpio_desc *desc);
+
/* Convert between the old gpio_ and new gpiod_ interfaces */
struct gpio_desc *gpio_to_desc(unsigned gpio);
int desc_to_gpio(const struct gpio_desc *desc);
+int gpiod_hwgpio(const struct gpio_desc *desc);
+
struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
const char *con_id, int index,
enum gpiod_flags flags,
@@ -181,13 +185,14 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
enum gpiod_flags flags,
const char *label);
+bool gpiod_is_equal(const struct gpio_desc *desc,
+ const struct gpio_desc *other);
+
#else /* CONFIG_GPIOLIB */
-#include <linux/err.h>
+#include <linux/bug.h>
#include <linux/kernel.h>
-#include <asm/bug.h>
-
static inline int gpiod_count(struct device *dev, const char *con_id)
{
return 0;
@@ -348,18 +353,6 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
WARN_ON(desc);
return -ENOSYS;
}
-static inline int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc,
- unsigned long flags)
-{
- WARN_ON(desc);
- return -ENOSYS;
-}
-static inline int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc,
- unsigned long flags)
-{
- WARN_ON(desc);
- return -ENOSYS;
-}
static inline int gpiod_get_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -375,10 +368,11 @@ static inline int gpiod_get_array_value(unsigned int array_size,
WARN_ON(desc_array);
return 0;
}
-static inline void gpiod_set_value(struct gpio_desc *desc, int value)
+static inline int gpiod_set_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(desc);
+ return 0;
}
static inline int gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -404,10 +398,11 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size,
WARN_ON(desc_array);
return 0;
}
-static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
+static inline int gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(desc);
+ return 0;
}
static inline int gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -434,10 +429,11 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
WARN_ON(desc_array);
return 0;
}
-static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
+static inline int gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(desc);
+ return 0;
}
static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -463,11 +459,12 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
WARN_ON(desc_array);
return 0;
}
-static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
- int value)
+static inline int gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
+ int value)
{
/* GPIO can never have been requested */
WARN_ON(desc);
+ return 0;
}
static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -527,6 +524,13 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
return -EINVAL;
}
+static inline bool gpiod_is_shared(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
+ return false;
+}
+
static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
{
return NULL;
@@ -558,8 +562,40 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
return ERR_PTR(-ENOSYS);
}
+static inline bool
+gpiod_is_equal(const struct gpio_desc *desc, const struct gpio_desc *other)
+{
+ WARN_ON(desc || other);
+ return false;
+}
+
#endif /* CONFIG_GPIOLIB */
+#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_HTE)
+int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
+int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
+#else
+
+#include <linux/bug.h>
+
+static inline int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc,
+ unsigned long flags)
+{
+ if (!IS_ENABLED(CONFIG_GPIOLIB))
+ WARN_ON(desc);
+
+ return -ENOSYS;
+}
+static inline int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc,
+ unsigned long flags)
+{
+ if (!IS_ENABLED(CONFIG_GPIOLIB))
+ WARN_ON(desc);
+
+ return -ENOSYS;
+}
+#endif /* CONFIG_GPIOLIB && CONFIG_HTE */
+
static inline
struct gpio_desc *devm_fwnode_gpiod_get(struct device *dev,
struct fwnode_handle *fwnode,
@@ -573,7 +609,7 @@ struct gpio_desc *devm_fwnode_gpiod_get(struct device *dev,
struct acpi_gpio_params {
unsigned int crs_entry_index;
- unsigned int line_index;
+ unsigned short line_index;
bool active_low;
};
@@ -608,8 +644,6 @@ int devm_acpi_dev_add_driver_gpios(struct device *dev,
#else /* CONFIG_GPIOLIB && CONFIG_ACPI */
-#include <linux/err.h>
-
static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
const struct acpi_gpio_mapping *gpios)
{
@@ -635,8 +669,6 @@ void gpiod_unexport(struct gpio_desc *desc);
#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
-#include <asm/errno.h>
-
static inline int gpiod_export(struct gpio_desc *desc,
bool direction_may_change)
{
@@ -655,4 +687,14 @@ static inline void gpiod_unexport(struct gpio_desc *desc)
#endif /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
+static inline int gpiod_multi_set_value_cansleep(struct gpio_descs *descs,
+ unsigned long *value_bitmap)
+{
+ if (IS_ERR_OR_NULL(descs))
+ return PTR_ERR_OR_ZERO(descs);
+
+ return gpiod_set_array_value_cansleep(descs->ndescs, descs->desc,
+ descs->info, value_bitmap);
+}
+
#endif
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 0032bb6e7d8f..fabe2baf7b50 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -14,6 +14,7 @@
#include <linux/property.h>
#include <linux/spinlock_types.h>
#include <linux/types.h>
+#include <linux/util_macros.h>
#ifdef CONFIG_GENERIC_MSI_IRQ
#include <asm/msi.h>
@@ -286,8 +287,9 @@ struct gpio_irq_chip {
/**
* @first:
*
- * Required for static IRQ allocation. If set, irq_domain_add_simple()
- * will allocate and map all IRQs during initialization.
+ * Required for static IRQ allocation. If set,
+ * irq_domain_create_simple() will allocate and map all IRQs
+ * during initialization.
*/
unsigned int first;
@@ -328,7 +330,8 @@ struct gpio_irq_chip {
* @fwnode: optional fwnode providing this controller's properties
* @owner: helps prevent removal of modules exporting active GPIOs
* @request: optional hook for chip-specific activation, such as
- * enabling module power and clock; may sleep
+ * enabling module power and clock; may sleep; must return 0 on success
+ * or negative error number on failure
* @free: optional hook for chip-specific deactivation, such as
* disabling module power and clock; may sleep
* @get_direction: returns direction for signal "offset", 0=out, 1=in,
@@ -344,10 +347,13 @@ struct gpio_irq_chip {
* @get: returns value for signal "offset", 0=low, 1=high, or negative error
* @get_multiple: reads values for multiple signals defined by "mask" and
* stores them in "bits", returns 0 on success or negative error
- * @set: assigns output value for signal "offset"
- * @set_multiple: assigns output values for multiple signals defined by "mask"
+ * @set: assigns output value for signal "offset", returns 0 on success or
+ * negative error value
+ * @set_multiple: assigns output values for multiple signals defined by
+ * "mask", returns 0 on success or negative error value
* @set_config: optional hook for all kinds of settings. Uses the same
- * packed config format as generic pinconf.
+ * packed config format as generic pinconf. Must return 0 on success and
+ * a negative error number on failure.
* @to_irq: optional hook supporting non-static gpiod_to_irq() mappings;
* implementation may not sleep
* @dbg_show: optional routine to show contents in debugfs; default code
@@ -382,27 +388,6 @@ struct gpio_irq_chip {
* implies that if the chip supports IRQs, these IRQs need to be threaded
* as the chip access may sleep when e.g. reading out the IRQ status
* registers.
- * @read_reg: reader function for generic GPIO
- * @write_reg: writer function for generic GPIO
- * @be_bits: if the generic GPIO has big endian bit order (bit 31 is representing
- * line 0, bit 30 is line 1 ... bit 0 is line 31) this is set to true by the
- * generic GPIO core. It is for internal housekeeping only.
- * @reg_dat: data (in) register for generic GPIO
- * @reg_set: output set register (out=high) for generic GPIO
- * @reg_clr: output clear register (out=low) for generic GPIO
- * @reg_dir_out: direction out setting register for generic GPIO
- * @reg_dir_in: direction in setting register for generic GPIO
- * @bgpio_dir_unreadable: indicates that the direction register(s) cannot
- * be read and we need to rely on out internal state tracking.
- * @bgpio_bits: number of register bits used for a generic GPIO i.e.
- * <register width> * 8
- * @bgpio_lock: used to lock chip->bgpio_data. Also, this is needed to keep
- * shadowed and real data registers writes together.
- * @bgpio_data: shadowed data register for generic GPIO to clear/set bits
- * safely.
- * @bgpio_dir: shadowed direction register for generic GPIO to clear/set
- * direction safely. A "1" in this word means the line is set as
- * output.
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
* they can all be accessed through a common programming interface.
@@ -436,9 +421,9 @@ struct gpio_chip {
int (*get_multiple)(struct gpio_chip *gc,
unsigned long *mask,
unsigned long *bits);
- void (*set)(struct gpio_chip *gc,
- unsigned int offset, int value);
- void (*set_multiple)(struct gpio_chip *gc,
+ int (*set)(struct gpio_chip *gc,
+ unsigned int offset, int value);
+ int (*set_multiple)(struct gpio_chip *gc,
unsigned long *mask,
unsigned long *bits);
int (*set_config)(struct gpio_chip *gc,
@@ -468,22 +453,6 @@ struct gpio_chip {
const char *const *names;
bool can_sleep;
-#if IS_ENABLED(CONFIG_GPIO_GENERIC)
- unsigned long (*read_reg)(void __iomem *reg);
- void (*write_reg)(void __iomem *reg, unsigned long data);
- bool be_bits;
- void __iomem *reg_dat;
- void __iomem *reg_set;
- void __iomem *reg_clr;
- void __iomem *reg_dir_out;
- void __iomem *reg_dir_in;
- bool bgpio_dir_unreadable;
- int bgpio_bits;
- raw_spinlock_t bgpio_lock;
- unsigned long bgpio_data;
- unsigned long bgpio_dir;
-#endif /* CONFIG_GPIO_GENERIC */
-
#ifdef CONFIG_GPIOLIB_IRQCHIP
/*
* With CONFIG_GPIOLIB_IRQCHIP we get an irqchip inside the gpiolib
@@ -499,14 +468,6 @@ struct gpio_chip {
struct gpio_irq_chip irq;
#endif /* CONFIG_GPIOLIB_IRQCHIP */
- /**
- * @valid_mask:
- *
- * If not %NULL, holds bitmask of GPIOs which are valid to be used
- * from the chip.
- */
- unsigned long *valid_mask;
-
#if defined(CONFIG_OF_GPIO)
/*
* If CONFIG_OF_GPIO is enabled, then all GPIO controllers described in
@@ -516,11 +477,33 @@ struct gpio_chip {
/**
* @of_gpio_n_cells:
*
- * Number of cells used to form the GPIO specifier.
+ * Number of cells used to form the GPIO specifier. The standard is 2
+ * cells:
+ *
+ * gpios = <&gpio offset flags>;
+ *
+ * some complex GPIO controllers instantiate more than one chip per
+ * device tree node and have 3 cells:
+ *
+ * gpios = <&gpio instance offset flags>;
+ *
+ * Legacy GPIO controllers may even have 1 cell:
+ *
+ * gpios = <&gpio offset>;
*/
unsigned int of_gpio_n_cells;
/**
+ * @of_node_instance_match:
+ *
+ * Determine if a chip is the right instance. Must be implemented by
+ * any driver using more than one gpio_chip per device tree node.
+ * Returns true if gc is the instance indicated by i (which is the
+ * first cell in the phandles for GPIO lines and gpio-ranges).
+ */
+ bool (*of_node_instance_match)(struct gpio_chip *gc, unsigned int i);
+
+ /**
* @of_xlate:
*
* Callback to translate a device tree GPIO specifier into a chip-
@@ -550,19 +533,29 @@ DEFINE_CLASS(_gpiochip_for_each_data,
const char **label, int *i)
/**
+ * for_each_hwgpio_in_range - Iterates over all GPIOs in a given range
+ * @_chip: Chip to iterate over.
+ * @_i: Loop counter.
+ * @_base: First GPIO in the ranger.
+ * @_size: Amount of GPIOs to check starting from @base.
+ * @_label: Place to store the address of the label if the GPIO is requested.
+ * Set to NULL for unused GPIOs.
+ */
+#define for_each_hwgpio_in_range(_chip, _i, _base, _size, _label) \
+ for (CLASS(_gpiochip_for_each_data, _data)(&_label, &_i); \
+ _i < _size; \
+ _i++, kfree(_label), _label = NULL) \
+ for_each_if(!IS_ERR(_label = gpiochip_dup_line_label(_chip, _base + _i)))
+
+/**
* for_each_hwgpio - Iterates over all GPIOs for given chip.
* @_chip: Chip to iterate over.
* @_i: Loop counter.
* @_label: Place to store the address of the label if the GPIO is requested.
* Set to NULL for unused GPIOs.
*/
-#define for_each_hwgpio(_chip, _i, _label) \
- for (CLASS(_gpiochip_for_each_data, _data)(&_label, &_i); \
- *_data.i < _chip->ngpio; \
- (*_data.i)++, kfree(*(_data.label)), *_data.label = NULL) \
- if (IS_ERR(*_data.label = \
- gpiochip_dup_line_label(_chip, *_data.i))) {} \
- else
+#define for_each_hwgpio(_chip, _i, _label) \
+ for_each_hwgpio_in_range(_chip, _i, 0, _chip->ngpio, _label)
/**
* for_each_requested_gpio_in_range - iterates over requested GPIOs in a given range
@@ -573,13 +566,8 @@ DEFINE_CLASS(_gpiochip_for_each_data,
* @_label: label of current GPIO
*/
#define for_each_requested_gpio_in_range(_chip, _i, _base, _size, _label) \
- for (CLASS(_gpiochip_for_each_data, _data)(&_label, &_i); \
- *_data.i < _size; \
- (*_data.i)++, kfree(*(_data.label)), *_data.label = NULL) \
- if ((*_data.label = \
- gpiochip_dup_line_label(_chip, _base + *_data.i)) == NULL) {} \
- else if (IS_ERR(*_data.label)) {} \
- else
+ for_each_hwgpio_in_range(_chip, _i, _base, _size, _label) \
+ for_each_if(_label)
/* Iterates over all requested GPIO of the given @chip */
#define for_each_requested_gpio(chip, i, label) \
@@ -632,10 +620,6 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
devm_gpiochip_add_data_with_key(dev, gc, data, NULL, NULL)
#endif /* CONFIG_LOCKDEP */
-static inline int gpiochip_add(struct gpio_chip *gc)
-{
- return gpiochip_add_data(gc, NULL);
-}
void gpiochip_remove(struct gpio_chip *gc);
int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc,
void *data, struct lock_class_key *lock_key,
@@ -682,16 +666,11 @@ bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset);
/* Sleep persistence inquiry for drivers */
bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset);
bool gpiochip_line_is_valid(const struct gpio_chip *gc, unsigned int offset);
+const unsigned long *gpiochip_query_valid_mask(const struct gpio_chip *gc);
/* get driver data */
void *gpiochip_get_data(struct gpio_chip *gc);
-struct bgpio_pdata {
- const char *label;
- int base;
- int ngpio;
-};
-
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
int gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc,
@@ -705,19 +684,6 @@ int gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
-int bgpio_init(struct gpio_chip *gc, struct device *dev,
- unsigned long sz, void __iomem *dat, void __iomem *set,
- void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
- unsigned long flags);
-
-#define BGPIOF_BIG_ENDIAN BIT(0)
-#define BGPIOF_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */
-#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
-#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
-#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
-#define BGPIOF_NO_OUTPUT BIT(5) /* only input */
-#define BGPIOF_NO_SET_ON_INPUT BIT(6)
-
#ifdef CONFIG_GPIOLIB_IRQCHIP
int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
struct irq_domain *domain);
@@ -752,23 +718,68 @@ struct gpio_pin_range {
#ifdef CONFIG_PINCTRL
-int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins);
+int gpiochip_add_pin_range_with_pins(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int pin_offset,
+ unsigned int const *pins,
+ unsigned int npins);
int gpiochip_add_pingroup_range(struct gpio_chip *gc,
struct pinctrl_dev *pctldev,
unsigned int gpio_offset, const char *pin_group);
void gpiochip_remove_pin_ranges(struct gpio_chip *gc);
+static inline int
+gpiochip_add_pin_range(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int pin_offset,
+ unsigned int npins)
+{
+ return gpiochip_add_pin_range_with_pins(gc, pinctl_name, gpio_offset,
+ pin_offset, NULL, npins);
+}
+
+static inline int
+gpiochip_add_sparse_pin_range(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int const *pins,
+ unsigned int npins)
+{
+ return gpiochip_add_pin_range_with_pins(gc, pinctl_name, gpio_offset, 0,
+ pins, npins);
+}
#else /* ! CONFIG_PINCTRL */
static inline int
+gpiochip_add_pin_range_with_pins(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int pin_offset,
+ unsigned int npins)
+{
+ return 0;
+}
+
+static inline int
gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
unsigned int npins)
{
return 0;
}
+
+static inline int
+gpiochip_add_sparse_pin_range(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int const *pins,
+ unsigned int npins)
+{
+ return 0;
+}
+
static inline int
gpiochip_add_pingroup_range(struct gpio_chip *gc,
struct pinctrl_dev *pctldev,
@@ -791,7 +802,6 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
enum gpiod_flags dflags);
void gpiochip_free_own_desc(struct gpio_desc *desc);
-struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum);
struct gpio_desc *
gpio_device_get_desc(struct gpio_device *gdev, unsigned int hwnum);
@@ -870,7 +880,7 @@ static inline void gpiochip_unlock_as_irq(struct gpio_chip *gc,
#define for_each_gpiochip_node(dev, child) \
device_for_each_child_node(dev, child) \
- if (!fwnode_property_present(child, "gpio-controller")) {} else
+ for_each_if(fwnode_property_present(child, "gpio-controller"))
static inline unsigned int gpiochip_node_count(struct device *dev)
{
diff --git a/include/linux/gpio/forwarder.h b/include/linux/gpio/forwarder.h
new file mode 100644
index 000000000000..ee5d8355f735
--- /dev/null
+++ b/include/linux/gpio/forwarder.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GPIO_FORWARDER_H
+#define __LINUX_GPIO_FORWARDER_H
+
+struct gpio_desc;
+struct gpio_chip;
+struct gpiochip_fwd;
+
+struct gpiochip_fwd *devm_gpiochip_fwd_alloc(struct device *dev,
+ unsigned int ngpios);
+int gpiochip_fwd_desc_add(struct gpiochip_fwd *fwd,
+ struct gpio_desc *desc, unsigned int offset);
+void gpiochip_fwd_desc_free(struct gpiochip_fwd *fwd, unsigned int offset);
+int gpiochip_fwd_register(struct gpiochip_fwd *fwd, void *data);
+
+struct gpio_chip *gpiochip_fwd_get_gpiochip(struct gpiochip_fwd *fwd);
+
+void *gpiochip_fwd_get_data(struct gpiochip_fwd *fwd);
+
+int gpiochip_fwd_gpio_request(struct gpiochip_fwd *fwd, unsigned int offset);
+int gpiochip_fwd_gpio_get_direction(struct gpiochip_fwd *fwd,
+ unsigned int offset);
+int gpiochip_fwd_gpio_direction_input(struct gpiochip_fwd *fwd,
+ unsigned int offset);
+int gpiochip_fwd_gpio_direction_output(struct gpiochip_fwd *fwd,
+ unsigned int offset,
+ int value);
+int gpiochip_fwd_gpio_get(struct gpiochip_fwd *fwd, unsigned int offset);
+int gpiochip_fwd_gpio_get_multiple(struct gpiochip_fwd *fwd,
+ unsigned long *mask,
+ unsigned long *bits);
+int gpiochip_fwd_gpio_set(struct gpiochip_fwd *fwd, unsigned int offset,
+ int value);
+int gpiochip_fwd_gpio_set_multiple(struct gpiochip_fwd *fwd,
+ unsigned long *mask,
+ unsigned long *bits);
+int gpiochip_fwd_gpio_set_config(struct gpiochip_fwd *fwd, unsigned int offset,
+ unsigned long config);
+int gpiochip_fwd_gpio_to_irq(struct gpiochip_fwd *fwd, unsigned int offset);
+
+#endif
diff --git a/include/linux/gpio/generic.h b/include/linux/gpio/generic.h
new file mode 100644
index 000000000000..ff566dc9c3cb
--- /dev/null
+++ b/include/linux/gpio/generic.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_GPIO_GENERIC_H
+#define __LINUX_GPIO_GENERIC_H
+
+#include <linux/cleanup.h>
+#include <linux/gpio/driver.h>
+#include <linux/spinlock.h>
+
+struct device;
+
+#define GPIO_GENERIC_BIG_ENDIAN BIT(0)
+#define GPIO_GENERIC_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */
+#define GPIO_GENERIC_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
+#define GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER BIT(3)
+#define GPIO_GENERIC_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
+#define GPIO_GENERIC_NO_OUTPUT BIT(5) /* only input */
+#define GPIO_GENERIC_NO_SET_ON_INPUT BIT(6)
+#define GPIO_GENERIC_PINCTRL_BACKEND BIT(7) /* Call pinctrl direction setters */
+#define GPIO_GENERIC_NO_INPUT BIT(8) /* only output */
+
+/**
+ * struct gpio_generic_chip_config - Generic GPIO chip configuration data
+ * @dev: Parent device of the new GPIO chip (compulsory).
+ * @sz: Size (width) of the MMIO registers in bytes, typically 1, 2 or 4.
+ * @dat: MMIO address for the register to READ the value of the GPIO lines, it
+ * is expected that a 1 in the corresponding bit in this register means
+ * the line is asserted.
+ * @set: MMIO address for the register to SET the value of the GPIO lines, it
+ * is expected that we write the line with 1 in this register to drive
+ * the GPIO line high.
+ * @clr: MMIO address for the register to CLEAR the value of the GPIO lines,
+ * it is expected that we write the line with 1 in this register to
+ * drive the GPIO line low. It is allowed to leave this address as NULL,
+ * in that case the SET register will be assumed to also clear the GPIO
+ * lines, by actively writing the line with 0.
+ * @dirout: MMIO address for the register to set the line as OUTPUT. It is
+ * assumed that setting a line to 1 in this register will turn that
+ * line into an output line. Conversely, setting the line to 0 will
+ * turn that line into an input.
+ * @dirin: MMIO address for the register to set this line as INPUT. It is
+ * assumed that setting a line to 1 in this register will turn that
+ * line into an input line. Conversely, setting the line to 0 will
+ * turn that line into an output.
+ * @flags: Different flags that will affect the behaviour of the device, such
+ * as endianness etc.
+ */
+struct gpio_generic_chip_config {
+ struct device *dev;
+ unsigned long sz;
+ void __iomem *dat;
+ void __iomem *set;
+ void __iomem *clr;
+ void __iomem *dirout;
+ void __iomem *dirin;
+ unsigned long flags;
+};
+
+/**
+ * struct gpio_generic_chip - Generic GPIO chip implementation.
+ * @gc: The underlying struct gpio_chip object, implementing low-level GPIO
+ * chip routines.
+ * @read_reg: reader function for generic GPIO
+ * @write_reg: writer function for generic GPIO
+ * @be_bits: if the generic GPIO has big endian bit order (bit 31 is
+ * representing line 0, bit 30 is line 1 ... bit 0 is line 31) this
+ * is set to true by the generic GPIO core. It is for internal
+ * housekeeping only.
+ * @reg_dat: data (in) register for generic GPIO
+ * @reg_set: output set register (out=high) for generic GPIO
+ * @reg_clr: output clear register (out=low) for generic GPIO
+ * @reg_dir_out: direction out setting register for generic GPIO
+ * @reg_dir_in: direction in setting register for generic GPIO
+ * @dir_unreadable: indicates that the direction register(s) cannot be read and
+ * we need to rely on out internal state tracking.
+ * @pinctrl: the generic GPIO uses a pin control backend.
+ * @bits: number of register bits used for a generic GPIO
+ * i.e. <register width> * 8
+ * @lock: used to lock chip->sdata. Also, this is needed to keep
+ * shadowed and real data registers writes together.
+ * @sdata: shadowed data register for generic GPIO to clear/set bits safely.
+ * @sdir: shadowed direction register for generic GPIO to clear/set direction
+ * safely. A "1" in this word means the line is set as output.
+ */
+struct gpio_generic_chip {
+ struct gpio_chip gc;
+ unsigned long (*read_reg)(void __iomem *reg);
+ void (*write_reg)(void __iomem *reg, unsigned long data);
+ bool be_bits;
+ void __iomem *reg_dat;
+ void __iomem *reg_set;
+ void __iomem *reg_clr;
+ void __iomem *reg_dir_out;
+ void __iomem *reg_dir_in;
+ bool dir_unreadable;
+ bool pinctrl;
+ int bits;
+ raw_spinlock_t lock;
+ unsigned long sdata;
+ unsigned long sdir;
+};
+
+static inline struct gpio_generic_chip *
+to_gpio_generic_chip(struct gpio_chip *gc)
+{
+ return container_of(gc, struct gpio_generic_chip, gc);
+}
+
+int gpio_generic_chip_init(struct gpio_generic_chip *chip,
+ const struct gpio_generic_chip_config *cfg);
+
+/**
+ * gpio_generic_chip_set() - Set the GPIO line value of the generic GPIO chip.
+ * @chip: Generic GPIO chip to use.
+ * @offset: Hardware offset of the line to set.
+ * @value: New GPIO line value.
+ *
+ * Some modules using the generic GPIO chip, need to set line values in their
+ * direction setters but they don't have access to the gpio-mmio symbols so
+ * they use the function pointer in struct gpio_chip directly. This is not
+ * optimal and can lead to crashes at run-time in some instances. This wrapper
+ * provides a safe interface for users.
+ *
+ * Returns: 0 on success, negative error number of failure.
+ */
+static inline int
+gpio_generic_chip_set(struct gpio_generic_chip *chip, unsigned int offset,
+ int value)
+{
+ if (WARN_ON(!chip->gc.set))
+ return -EOPNOTSUPP;
+
+ return chip->gc.set(&chip->gc, offset, value);
+}
+
+/**
+ * gpio_generic_read_reg() - Read a register using the underlying callback.
+ * @chip: Generic GPIO chip to use.
+ * @reg: Register to read.
+ *
+ * Returns: value read from register.
+ */
+static inline unsigned long
+gpio_generic_read_reg(struct gpio_generic_chip *chip, void __iomem *reg)
+{
+ if (WARN_ON(!chip->read_reg))
+ return 0;
+
+ return chip->read_reg(reg);
+}
+
+/**
+ * gpio_generic_write_reg() - Write a register using the underlying callback.
+ * @chip: Generic GPIO chip to use.
+ * @reg: Register to write to.
+ * @val: New value to write.
+ */
+static inline void gpio_generic_write_reg(struct gpio_generic_chip *chip,
+ void __iomem *reg, unsigned long val)
+{
+ if (WARN_ON(!chip->write_reg))
+ return;
+
+ chip->write_reg(reg, val);
+}
+
+#define gpio_generic_chip_lock(gen_gc) \
+ raw_spin_lock(&(gen_gc)->lock)
+
+#define gpio_generic_chip_unlock(gen_gc) \
+ raw_spin_unlock(&(gen_gc)->lock)
+
+#define gpio_generic_chip_lock_irqsave(gen_gc, flags) \
+ raw_spin_lock_irqsave(&(gen_gc)->lock, flags)
+
+#define gpio_generic_chip_unlock_irqrestore(gen_gc, flags) \
+ raw_spin_unlock_irqrestore(&(gen_gc)->lock, flags)
+
+DEFINE_LOCK_GUARD_1(gpio_generic_lock,
+ struct gpio_generic_chip,
+ gpio_generic_chip_lock(_T->lock),
+ gpio_generic_chip_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(gpio_generic_lock_irqsave,
+ struct gpio_generic_chip,
+ gpio_generic_chip_lock_irqsave(_T->lock, _T->flags),
+ gpio_generic_chip_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+#endif /* __LINUX_GPIO_GENERIC_H */
diff --git a/include/linux/gpio/gpio-nomadik.h b/include/linux/gpio/gpio-nomadik.h
index b5a84864650d..592a774a53cd 100644
--- a/include/linux/gpio/gpio-nomadik.h
+++ b/include/linux/gpio/gpio-nomadik.h
@@ -261,16 +261,14 @@ struct platform_device;
* true.
*/
void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
- struct gpio_chip *chip, unsigned int offset,
- unsigned int gpio);
+ struct gpio_chip *chip, unsigned int offset);
#else
static inline void nmk_gpio_dbg_show_one(struct seq_file *s,
struct pinctrl_dev *pctldev,
struct gpio_chip *chip,
- unsigned int offset,
- unsigned int gpio)
+ unsigned int offset)
{
}
diff --git a/include/linux/gpio/legacy-of-mm-gpiochip.h b/include/linux/gpio/legacy-of-mm-gpiochip.h
deleted file mode 100644
index 2e2bd3b19cc3..000000000000
--- a/include/linux/gpio/legacy-of-mm-gpiochip.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * OF helpers for the old of_mm_gpio_chip, used on ppc32 and nios2,
- * do not use in new code.
- *
- * Copyright (c) 2007-2008 MontaVista Software, Inc.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- */
-
-#ifndef __LINUX_GPIO_LEGACY_OF_MM_GPIO_CHIP_H
-#define __LINUX_GPIO_LEGACY_OF_MM_GPIO_CHIP_H
-
-#include <linux/gpio/driver.h>
-#include <linux/of.h>
-
-/*
- * OF GPIO chip for memory mapped banks
- */
-struct of_mm_gpio_chip {
- struct gpio_chip gc;
- void (*save_regs)(struct of_mm_gpio_chip *mm_gc);
- void __iomem *regs;
-};
-
-static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
-{
- return container_of(gc, struct of_mm_gpio_chip, gc);
-}
-
-extern int of_mm_gpiochip_add_data(struct device_node *np,
- struct of_mm_gpio_chip *mm_gc,
- void *data);
-extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
-
-#endif /* __LINUX_GPIO_LEGACY_OF_MM_GPIO_CHIP_H */
diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h
index a9f7b7faf57b..12d154732ca9 100644
--- a/include/linux/gpio/regmap.h
+++ b/include/linux/gpio/regmap.h
@@ -6,6 +6,7 @@
struct device;
struct fwnode_handle;
struct gpio_regmap;
+struct gpio_chip;
struct irq_domain;
struct regmap;
@@ -21,7 +22,7 @@ struct regmap;
* If not given, the fwnode of the parent is used.
* @label: (Optional) Descriptive name for GPIO controller.
* If not given, the name of the device is used.
- * @ngpio: Number of GPIOs
+ * @ngpio: (Optional) Number of GPIOs
* @names: (Optional) Array of names for gpios
* @reg_dat_base: (Optional) (in) register base address
* @reg_set_base: (Optional) set register base address
@@ -30,16 +31,27 @@ struct regmap;
* @reg_dir_out_base: (Optional) out setting register base address
* @reg_stride: (Optional) May be set if the registers (of the
* same type, dat, set, etc) are not consecutive.
- * @ngpio_per_reg: Number of GPIOs per register
+ * @ngpio_per_reg: (Optional) Number of GPIOs per register
* @irq_domain: (Optional) IRQ domain if the controller is
* interrupt-capable
* @reg_mask_xlate: (Optional) Translates base address and GPIO
* offset to a register/bitmask pair. If not
* given the default gpio_regmap_simple_xlate()
* is used.
+ * @fixed_direction_output:
+ * (Optional) Bitmap representing the fixed direction of
+ * the GPIO lines. Useful when there are GPIO lines with a
+ * fixed direction mixed together in the same register.
* @drvdata: (Optional) Pointer to driver specific data which is
* not used by gpio-remap but is provided "as is" to the
* driver callback(s).
+ * @init_valid_mask: (Optional) Routine to initialize @valid_mask, to be used
+ * if not all GPIOs are valid.
+ * @regmap_irq_chip: (Optional) Pointer on an regmap_irq_chip structure. If
+ * set, a regmap-irq device will be created and the IRQ
+ * domain will be set accordingly.
+ * @regmap_irq_line: (Optional) The IRQ the device uses to signal interrupts.
+ * @regmap_irq_flags: (Optional) The IRQF_ flags to use for the interrupt.
*
* The ->reg_mask_xlate translates a given base address and GPIO offset to
* register and mask pair. The base address is one of the given register
@@ -77,11 +89,22 @@ struct gpio_regmap_config {
int reg_stride;
int ngpio_per_reg;
struct irq_domain *irq_domain;
+ unsigned long *fixed_direction_output;
+
+#ifdef CONFIG_REGMAP_IRQ
+ struct regmap_irq_chip *regmap_irq_chip;
+ int regmap_irq_line;
+ unsigned long regmap_irq_flags;
+#endif
int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base,
unsigned int offset, unsigned int *reg,
unsigned int *mask);
+ int (*init_valid_mask)(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios);
+
void *drvdata;
};
diff --git a/include/linux/greybus.h b/include/linux/greybus.h
index 634c9511cf78..4d58e27ceaf6 100644
--- a/include/linux/greybus.h
+++ b/include/linux/greybus.h
@@ -64,7 +64,7 @@ struct greybus_driver {
struct device_driver driver;
};
-#define to_greybus_driver(d) container_of(d, struct greybus_driver, driver)
+#define to_greybus_driver(d) container_of_const(d, struct greybus_driver, driver)
static inline void greybus_set_drvdata(struct gb_bundle *bundle, void *data)
{
diff --git a/include/linux/group_cpus.h b/include/linux/group_cpus.h
index e42807ec61f6..9d4e5ab6c314 100644
--- a/include/linux/group_cpus.h
+++ b/include/linux/group_cpus.h
@@ -9,6 +9,6 @@
#include <linux/kernel.h>
#include <linux/cpu.h>
-struct cpumask *group_cpus_evenly(unsigned int numgrps);
+struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks);
#endif
diff --git a/include/linux/habanalabs/cpucp_if.h b/include/linux/habanalabs/cpucp_if.h
index f316c8d0f3fc..45f181bcf890 100644
--- a/include/linux/habanalabs/cpucp_if.h
+++ b/include/linux/habanalabs/cpucp_if.h
@@ -42,6 +42,12 @@ enum eq_event_id {
EQ_EVENT_PWR_BRK_ENTRY,
EQ_EVENT_PWR_BRK_EXIT,
EQ_EVENT_HEARTBEAT,
+ EQ_EVENT_CPLD_RESET_REASON,
+ EQ_EVENT_CPLD_SHUTDOWN,
+ EQ_EVENT_POWER_EVT_START,
+ EQ_EVENT_POWER_EVT_END,
+ EQ_EVENT_THERMAL_EVT_START,
+ EQ_EVENT_THERMAL_EVT_END,
};
/*
@@ -391,6 +397,9 @@ struct hl_eq_entry {
#define EQ_CTL_READY_SHIFT 31
#define EQ_CTL_READY_MASK 0x80000000
+#define EQ_CTL_EVENT_MODE_SHIFT 28
+#define EQ_CTL_EVENT_MODE_MASK 0x70000000
+
#define EQ_CTL_EVENT_TYPE_SHIFT 16
#define EQ_CTL_EVENT_TYPE_MASK 0x0FFF0000
@@ -853,9 +862,6 @@ struct cpucp_packet {
* result cannot be used to hold general purpose data.
*/
__le32 status_mask;
-
- /* random, used once number, for security packets */
- __le32 nonce;
};
union {
@@ -864,6 +870,9 @@ struct cpucp_packet {
/* For Generic packet sub index */
__le32 pkt_subidx;
+
+ /* random, used once number, for security packets */
+ __le32 nonce;
};
};
@@ -1140,6 +1149,7 @@ struct cpucp_security_info {
* (0 = fully functional, 1 = lower-half is not functional,
* 2 = upper-half is not functional)
* @sec_info: security information
+ * @cpld_timestamp: CPLD programmed F/W timestamp.
* @pll_map: Bit map of supported PLLs for current ASIC version.
* @mme_binning_mask: MME binning mask,
* bits [0:6] <==> dcore0 mme fma
@@ -1165,7 +1175,7 @@ struct cpucp_security_info {
struct cpucp_info {
struct cpucp_sensor sensors[CPUCP_MAX_SENSORS];
__u8 kernel_version[VERSION_MAX_LEN];
- __le32 reserved;
+ __le32 reserved1;
__le32 card_type;
__le32 card_location;
__le32 cpld_version;
@@ -1187,7 +1197,7 @@ struct cpucp_info {
__u8 substrate_version;
__u8 eq_health_check_supported;
struct cpucp_security_info sec_info;
- __le32 fw_hbm_region_size;
+ __le32 cpld_timestamp;
__u8 pll_map[PLL_MAP_LEN];
__le64 mme_binning_mask;
__u8 fw_os_version[VERSION_MAX_LEN];
@@ -1415,9 +1425,13 @@ struct cpucp_monitor_dump {
* from "pkt_subidx" field in struct cpucp_packet.
*
* HL_PASSTHROUGHT_VERSIONS - Fetch all firmware versions.
+ * HL_GET_ERR_COUNTERS_CMD - Command to get error counters
+ * HL_GET_P_STATE - get performance state
*/
enum hl_passthrough_type {
HL_PASSTHROUGH_VERSIONS,
+ HL_GET_ERR_COUNTERS_CMD,
+ HL_GET_P_STATE,
};
#endif /* CPUCP_IF_H */
diff --git a/include/linux/habanalabs/hl_boot_if.h b/include/linux/habanalabs/hl_boot_if.h
index 93366d5621fd..af5fb4ad77eb 100644
--- a/include/linux/habanalabs/hl_boot_if.h
+++ b/include/linux/habanalabs/hl_boot_if.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2018-2020 HabanaLabs, Ltd.
+ * Copyright 2018-2023 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -49,7 +49,6 @@ enum cpu_boot_err {
#define CPU_BOOT_ERR_FATAL_MASK \
((1 << CPU_BOOT_ERR_DRAM_INIT_FAIL) | \
(1 << CPU_BOOT_ERR_PLL_FAIL) | \
- (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL) | \
(1 << CPU_BOOT_ERR_BINNING_FAIL) | \
(1 << CPU_BOOT_ERR_DRAM_SKIPPED) | \
(1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL) | \
@@ -194,6 +193,8 @@ enum cpu_boot_dev_sts {
CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN = 24,
CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN = 25,
CPU_BOOT_DEV_STS_MAP_HWMON_EN = 26,
+ CPU_BOOT_DEV_STS_NIC_MEM_CLEAR_EN = 27,
+ CPU_BOOT_DEV_STS_MMU_PGTBL_DRAM_EN = 28,
CPU_BOOT_DEV_STS_ENABLED = 31,
CPU_BOOT_DEV_STS_SCND_EN = 63,
CPU_BOOT_DEV_STS_LAST = 64 /* we have 2 registers of 32 bits */
@@ -294,7 +295,7 @@ enum cpu_boot_dev_sts {
* Initialized in: linux
*
* CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN GIC access permission only from
- * previleged entity. FW sets this status
+ * privileged entity. FW sets this status
* bit for host. If this bit is set then
* GIC can not be accessed from host.
* Initialized in: linux
@@ -331,6 +332,17 @@ enum cpu_boot_dev_sts {
* HWMON enum mapping to cpucp enums.
* Initialized in: linux
*
+ * CPU_BOOT_DEV_STS0_NIC_MEM_CLEAR_EN
+ * If set, means f/w supports nic hbm memory clear and
+ * tmr,txs hbm memory init.
+ * Initialized in: zephyr-mgmt
+ *
+ * CPU_BOOT_DEV_STS_MMU_PGTBL_DRAM_EN
+ * MMU page tables are located in DRAM.
+ * F/W initializes security settings for MMU
+ * page tables to reside in DRAM.
+ * Initialized in: zephyr-mgmt
+ *
* CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
* This is a main indication that the
* running FW populates the device status
@@ -367,6 +379,8 @@ enum cpu_boot_dev_sts {
#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN)
#define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN)
#define CPU_BOOT_DEV_STS0_MAP_HWMON_EN (1 << CPU_BOOT_DEV_STS_MAP_HWMON_EN)
+#define CPU_BOOT_DEV_STS0_NIC_MEM_CLEAR_EN (1 << CPU_BOOT_DEV_STS_NIC_MEM_CLEAR_EN)
+#define CPU_BOOT_DEV_STS0_MMU_PGTBL_DRAM_EN (1 << CPU_BOOT_DEV_STS_MMU_PGTBL_DRAM_EN)
#define CPU_BOOT_DEV_STS0_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
#define CPU_BOOT_DEV_STS1_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
@@ -450,11 +464,11 @@ struct cpu_dyn_regs {
__le32 gic_dma_core_irq_ctrl;
__le32 gic_host_halt_irq;
__le32 gic_host_ints_irq;
- __le32 gic_host_soft_rst_irq;
+ __le32 reserved0;
__le32 gic_rot_qm_irq_ctrl;
- __le32 cpu_rst_status;
+ __le32 reserved1;
__le32 eng_arc_irq_ctrl;
- __le32 reserved1[20]; /* reserve for future use */
+ __le32 reserved2[20]; /* reserve for future use */
};
/* TODO: remove the desc magic after the code is updated to use message */
@@ -551,8 +565,9 @@ enum lkd_fw_ascii_msg_lvls {
LKD_FW_ASCII_MSG_DBG = 3,
};
-#define LKD_FW_ASCII_MSG_MAX_LEN 128
-#define LKD_FW_ASCII_MSG_MAX 4 /* consider ABI when changing */
+#define LKD_FW_ASCII_MSG_MAX_LEN 128
+#define LKD_FW_ASCII_MSG_MAX 4 /* consider ABI when changing */
+#define LKD_FW_ASCII_MSG_MIN_DESC_VERSION 3
struct lkd_fw_ascii_msg {
__u8 valid;
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 3bb87bf6bc65..96bda41d9148 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -59,6 +59,15 @@ enum hdmi_infoframe_type {
#define HDMI_DRM_INFOFRAME_SIZE 26
#define HDMI_VENDOR_INFOFRAME_SIZE 4
+/*
+ * HDMI 1.3a table 5-14 states that the largest InfoFrame_length is 27,
+ * not including the packet header or checksum byte. We include the
+ * checksum byte in HDMI_INFOFRAME_HEADER_SIZE, so this should allow
+ * HDMI_INFOFRAME_SIZE(MAX) to be the largest buffer we could ever need
+ * for any HDMI infoframe.
+ */
+#define HDMI_MAX_INFOFRAME_SIZE 27
+
#define HDMI_INFOFRAME_SIZE(type) \
(HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
@@ -436,7 +445,6 @@ ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer,
size_t size);
ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame,
void *buffer, size_t size);
-int hdmi_infoframe_check(union hdmi_infoframe *frame);
int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
const void *buffer, size_t size);
void hdmi_infoframe_log(const char *level, struct device *dev,
diff --git a/include/linux/hfs_common.h b/include/linux/hfs_common.h
new file mode 100644
index 000000000000..dadb5e0aa8a3
--- /dev/null
+++ b/include/linux/hfs_common.h
@@ -0,0 +1,653 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * HFS/HFS+ common definitions, inline functions,
+ * and shared functionality.
+ */
+
+#ifndef _HFS_COMMON_H_
+#define _HFS_COMMON_H_
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define hfs_dbg(fmt, ...) \
+ pr_debug("pid %d:%s:%d %s(): " fmt, \
+ current->pid, __FILE__, __LINE__, __func__, ##__VA_ARGS__) \
+
+/*
+ * Format of structures on disk
+ * Information taken from Apple Technote #1150 (HFS Plus Volume Format)
+ */
+
+/* offsets to various blocks */
+#define HFS_DD_BLK 0 /* Driver Descriptor block */
+#define HFS_PMAP_BLK 1 /* First block of partition map */
+#define HFS_MDB_BLK 2 /* Block (w/i partition) of MDB */
+
+/* magic numbers for various disk blocks */
+#define HFS_DRVR_DESC_MAGIC 0x4552 /* "ER": driver descriptor map */
+#define HFS_OLD_PMAP_MAGIC 0x5453 /* "TS": old-type partition map */
+#define HFS_NEW_PMAP_MAGIC 0x504D /* "PM": new-type partition map */
+#define HFS_SUPER_MAGIC 0x4244 /* "BD": HFS MDB (super block) */
+#define HFS_MFS_SUPER_MAGIC 0xD2D7 /* MFS MDB (super block) */
+
+#define HFSPLUS_VOLHEAD_SIG 0x482b
+#define HFSPLUS_VOLHEAD_SIGX 0x4858
+#define HFSPLUS_SUPER_MAGIC 0x482b
+
+#define HFSP_WRAP_MAGIC 0x4244
+#define HFSP_WRAP_ATTRIB_SLOCK 0x8000
+#define HFSP_WRAP_ATTRIB_SPARED 0x0200
+
+#define HFSP_WRAPOFF_SIG 0x00
+#define HFSP_WRAPOFF_ATTRIB 0x0A
+#define HFSP_WRAPOFF_ABLKSIZE 0x14
+#define HFSP_WRAPOFF_ABLKSTART 0x1C
+#define HFSP_WRAPOFF_EMBEDSIG 0x7C
+#define HFSP_WRAPOFF_EMBEDEXT 0x7E
+
+#define HFSP_HARDLINK_TYPE 0x686c6e6b /* 'hlnk' */
+#define HFSP_HFSPLUS_CREATOR 0x6866732b /* 'hfs+' */
+
+#define HFSP_SYMLINK_TYPE 0x736c6e6b /* 'slnk' */
+#define HFSP_SYMLINK_CREATOR 0x72686170 /* 'rhap' */
+
+#define HFSP_MOUNT_VERSION 0x482b4c78 /* 'H+Lx' */
+
+#define HFSP_HIDDENDIR_NAME \
+ "\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80HFS+ Private Data"
+
+/* various FIXED size parameters */
+#define HFS_SECTOR_SIZE 512 /* size of an HFS sector */
+#define HFS_SECTOR_SIZE_BITS 9 /* log_2(HFS_SECTOR_SIZE) */
+#define HFS_MAX_VALENCE 32767U
+
+#define HFSPLUS_SECTOR_SIZE HFS_SECTOR_SIZE
+#define HFSPLUS_SECTOR_SHIFT HFS_SECTOR_SIZE_BITS
+#define HFSPLUS_VOLHEAD_SECTOR 2
+#define HFSPLUS_MIN_VERSION 4
+#define HFSPLUS_CURRENT_VERSION 5
+
+#define HFS_NAMELEN 31 /* maximum length of an HFS filename */
+#define HFS_MAX_NAMELEN 128
+
+#define HFSPLUS_MAX_STRLEN 255
+#define HFSPLUS_ATTR_MAX_STRLEN 127
+
+/* Meanings of the drAtrb field of the MDB,
+ * Reference: _Inside Macintosh: Files_ p. 2-61
+ */
+#define HFS_SB_ATTRIB_HLOCK (1 << 7)
+#define HFS_SB_ATTRIB_UNMNT (1 << 8)
+#define HFS_SB_ATTRIB_SPARED (1 << 9)
+#define HFS_SB_ATTRIB_INCNSTNT (1 << 11)
+#define HFS_SB_ATTRIB_SLOCK (1 << 15)
+
+/* values for hfs_cat_rec.cdrType */
+#define HFS_CDR_DIR 0x01 /* folder (directory) */
+#define HFS_CDR_FIL 0x02 /* file */
+#define HFS_CDR_THD 0x03 /* folder (directory) thread */
+#define HFS_CDR_FTH 0x04 /* file thread */
+
+/* legal values for hfs_ext_key.FkType and hfs_file.fork */
+#define HFS_FK_DATA 0x00
+#define HFS_FK_RSRC 0xFF
+
+/* bits in hfs_fil_entry.Flags */
+#define HFS_FIL_LOCK 0x01 /* locked */
+#define HFS_FIL_THD 0x02 /* file thread */
+#define HFS_FIL_DOPEN 0x04 /* data fork open */
+#define HFS_FIL_ROPEN 0x08 /* resource fork open */
+#define HFS_FIL_DIR 0x10 /* directory (always clear) */
+#define HFS_FIL_NOCOPY 0x40 /* copy-protected file */
+#define HFS_FIL_USED 0x80 /* open */
+
+/* bits in hfs_dir_entry.Flags. dirflags is 16 bits. */
+#define HFS_DIR_LOCK 0x01 /* locked */
+#define HFS_DIR_THD 0x02 /* directory thread */
+#define HFS_DIR_INEXPFOLDER 0x04 /* in a shared area */
+#define HFS_DIR_MOUNTED 0x08 /* mounted */
+#define HFS_DIR_DIR 0x10 /* directory (always set) */
+#define HFS_DIR_EXPFOLDER 0x20 /* share point */
+
+/* bits hfs_finfo.fdFlags */
+#define HFS_FLG_INITED 0x0100
+#define HFS_FLG_LOCKED 0x1000
+#define HFS_FLG_INVISIBLE 0x4000
+
+/* Some special File ID numbers */
+#define HFS_POR_CNID 1 /* Parent Of the Root */
+#define HFSPLUS_POR_CNID HFS_POR_CNID
+#define HFS_ROOT_CNID 2 /* ROOT directory */
+#define HFSPLUS_ROOT_CNID HFS_ROOT_CNID
+#define HFS_EXT_CNID 3 /* EXTents B-tree */
+#define HFSPLUS_EXT_CNID HFS_EXT_CNID
+#define HFS_CAT_CNID 4 /* CATalog B-tree */
+#define HFSPLUS_CAT_CNID HFS_CAT_CNID
+#define HFS_BAD_CNID 5 /* BAD blocks file */
+#define HFSPLUS_BAD_CNID HFS_BAD_CNID
+#define HFS_ALLOC_CNID 6 /* ALLOCation file (HFS+) */
+#define HFSPLUS_ALLOC_CNID HFS_ALLOC_CNID
+#define HFS_START_CNID 7 /* STARTup file (HFS+) */
+#define HFSPLUS_START_CNID HFS_START_CNID
+#define HFS_ATTR_CNID 8 /* ATTRibutes file (HFS+) */
+#define HFSPLUS_ATTR_CNID HFS_ATTR_CNID
+#define HFS_EXCH_CNID 15 /* ExchangeFiles temp id */
+#define HFSPLUS_EXCH_CNID HFS_EXCH_CNID
+#define HFS_FIRSTUSER_CNID 16 /* first available user id */
+#define HFSPLUS_FIRSTUSER_CNID HFS_FIRSTUSER_CNID
+
+/*======== HFS/HFS+ structures as they appear on the disk ========*/
+
+typedef __be32 hfsplus_cnid;
+typedef __be16 hfsplus_unichr;
+
+/* Pascal-style string of up to 31 characters */
+struct hfs_name {
+ u8 len;
+ u8 name[HFS_NAMELEN];
+} __packed;
+
+/* A "string" as used in filenames, etc. */
+struct hfsplus_unistr {
+ __be16 length;
+ hfsplus_unichr unicode[HFSPLUS_MAX_STRLEN];
+} __packed;
+
+/*
+ * A "string" is used in attributes file
+ * for name of extended attribute
+ */
+struct hfsplus_attr_unistr {
+ __be16 length;
+ hfsplus_unichr unicode[HFSPLUS_ATTR_MAX_STRLEN];
+} __packed;
+
+struct hfs_extent {
+ __be16 block;
+ __be16 count;
+};
+typedef struct hfs_extent hfs_extent_rec[3];
+
+/* A single contiguous area of a file */
+struct hfsplus_extent {
+ __be32 start_block;
+ __be32 block_count;
+} __packed;
+typedef struct hfsplus_extent hfsplus_extent_rec[8];
+
+/* Information for a "Fork" in a file */
+struct hfsplus_fork_raw {
+ __be64 total_size;
+ __be32 clump_size;
+ __be32 total_blocks;
+ hfsplus_extent_rec extents;
+} __packed;
+
+struct hfs_mdb {
+ __be16 drSigWord; /* Signature word indicating fs type */
+ __be32 drCrDate; /* fs creation date/time */
+ __be32 drLsMod; /* fs modification date/time */
+ __be16 drAtrb; /* fs attributes */
+ __be16 drNmFls; /* number of files in root directory */
+ __be16 drVBMSt; /* location (in 512-byte blocks)
+ of the volume bitmap */
+ __be16 drAllocPtr; /* location (in allocation blocks)
+ to begin next allocation search */
+ __be16 drNmAlBlks; /* number of allocation blocks */
+ __be32 drAlBlkSiz; /* bytes in an allocation block */
+ __be32 drClpSiz; /* clumpsize, the number of bytes to
+ allocate when extending a file */
+ __be16 drAlBlSt; /* location (in 512-byte blocks)
+ of the first allocation block */
+ __be32 drNxtCNID; /* CNID to assign to the next
+ file or directory created */
+ __be16 drFreeBks; /* number of free allocation blocks */
+ u8 drVN[28]; /* the volume label */
+ __be32 drVolBkUp; /* fs backup date/time */
+ __be16 drVSeqNum; /* backup sequence number */
+ __be32 drWrCnt; /* fs write count */
+ __be32 drXTClpSiz; /* clumpsize for the extents B-tree */
+ __be32 drCTClpSiz; /* clumpsize for the catalog B-tree */
+ __be16 drNmRtDirs; /* number of directories in
+ the root directory */
+ __be32 drFilCnt; /* number of files in the fs */
+ __be32 drDirCnt; /* number of directories in the fs */
+ u8 drFndrInfo[32]; /* data used by the Finder */
+ __be16 drEmbedSigWord; /* embedded volume signature */
+ __be32 drEmbedExtent; /* starting block number (xdrStABN)
+ and number of allocation blocks
+ (xdrNumABlks) occupied by embedded
+ volume */
+ __be32 drXTFlSize; /* bytes in the extents B-tree */
+ hfs_extent_rec drXTExtRec; /* extents B-tree's first 3 extents */
+ __be32 drCTFlSize; /* bytes in the catalog B-tree */
+ hfs_extent_rec drCTExtRec; /* catalog B-tree's first 3 extents */
+} __packed;
+
+/* HFS+ Volume Header */
+struct hfsplus_vh {
+ __be16 signature;
+ __be16 version;
+ __be32 attributes;
+ __be32 last_mount_vers;
+ u32 reserved;
+
+ __be32 create_date;
+ __be32 modify_date;
+ __be32 backup_date;
+ __be32 checked_date;
+
+ __be32 file_count;
+ __be32 folder_count;
+
+ __be32 blocksize;
+ __be32 total_blocks;
+ __be32 free_blocks;
+
+ __be32 next_alloc;
+ __be32 rsrc_clump_sz;
+ __be32 data_clump_sz;
+ hfsplus_cnid next_cnid;
+
+ __be32 write_count;
+ __be64 encodings_bmp;
+
+ u32 finder_info[8];
+
+ struct hfsplus_fork_raw alloc_file;
+ struct hfsplus_fork_raw ext_file;
+ struct hfsplus_fork_raw cat_file;
+ struct hfsplus_fork_raw attr_file;
+ struct hfsplus_fork_raw start_file;
+} __packed;
+
+/* HFS+ volume attributes */
+#define HFSPLUS_VOL_UNMNT (1 << 8)
+#define HFSPLUS_VOL_SPARE_BLK (1 << 9)
+#define HFSPLUS_VOL_NOCACHE (1 << 10)
+#define HFSPLUS_VOL_INCNSTNT (1 << 11)
+#define HFSPLUS_VOL_NODEID_REUSED (1 << 12)
+#define HFSPLUS_VOL_JOURNALED (1 << 13)
+#define HFSPLUS_VOL_SOFTLOCK (1 << 15)
+#define HFSPLUS_VOL_UNUSED_NODE_FIX (1 << 31)
+
+struct hfs_point {
+ __be16 v;
+ __be16 h;
+} __packed;
+
+typedef struct hfs_point hfsp_point;
+
+struct hfs_rect {
+ __be16 top;
+ __be16 left;
+ __be16 bottom;
+ __be16 right;
+} __packed;
+
+typedef struct hfs_rect hfsp_rect;
+
+struct hfs_finfo {
+ __be32 fdType;
+ __be32 fdCreator;
+ __be16 fdFlags;
+ struct hfs_point fdLocation;
+ __be16 fdFldr;
+} __packed;
+
+typedef struct hfs_finfo FInfo;
+
+struct hfs_fxinfo {
+ __be16 fdIconID;
+ u8 fdUnused[8];
+ __be16 fdComment;
+ __be32 fdPutAway;
+} __packed;
+
+typedef struct hfs_fxinfo FXInfo;
+
+struct hfs_dinfo {
+ struct hfs_rect frRect;
+ __be16 frFlags;
+ struct hfs_point frLocation;
+ __be16 frView;
+} __packed;
+
+typedef struct hfs_dinfo DInfo;
+
+struct hfs_dxinfo {
+ struct hfs_point frScroll;
+ __be32 frOpenChain;
+ __be16 frUnused;
+ __be16 frComment;
+ __be32 frPutAway;
+} __packed;
+
+typedef struct hfs_dxinfo DXInfo;
+
+union hfs_finder_info {
+ struct {
+ struct hfs_finfo finfo;
+ struct hfs_fxinfo fxinfo;
+ } file;
+ struct {
+ struct hfs_dinfo dinfo;
+ struct hfs_dxinfo dxinfo;
+ } dir;
+} __packed;
+
+/* The key used in the catalog b-tree: */
+struct hfs_cat_key {
+ u8 key_len; /* number of bytes in the key */
+ u8 reserved; /* padding */
+ __be32 ParID; /* CNID of the parent dir */
+ struct hfs_name CName; /* The filename of the entry */
+} __packed;
+
+/* HFS+ catalog entry key */
+struct hfsplus_cat_key {
+ __be16 key_len;
+ hfsplus_cnid parent;
+ struct hfsplus_unistr name;
+} __packed;
+
+#define HFSPLUS_CAT_KEYLEN (sizeof(struct hfsplus_cat_key))
+
+/* The key used in the extents b-tree: */
+struct hfs_ext_key {
+ u8 key_len; /* number of bytes in the key */
+ u8 FkType; /* HFS_FK_{DATA,RSRC} */
+ __be32 FNum; /* The File ID of the file */
+ __be16 FABN; /* allocation blocks number*/
+} __packed;
+
+/* HFS+ extents tree key */
+struct hfsplus_ext_key {
+ __be16 key_len;
+ u8 fork_type;
+ u8 pad;
+ hfsplus_cnid cnid;
+ __be32 start_block;
+} __packed;
+
+#define HFSPLUS_EXT_KEYLEN sizeof(struct hfsplus_ext_key)
+
+typedef union hfs_btree_key {
+ u8 key_len; /* number of bytes in the key */
+ struct hfs_cat_key cat;
+ struct hfs_ext_key ext;
+} hfs_btree_key;
+
+#define HFS_MAX_CAT_KEYLEN (sizeof(struct hfs_cat_key) - sizeof(u8))
+#define HFS_MAX_EXT_KEYLEN (sizeof(struct hfs_ext_key) - sizeof(u8))
+
+typedef union hfs_btree_key btree_key;
+
+/* The catalog record for a file */
+struct hfs_cat_file {
+ s8 type; /* The type of entry */
+ u8 reserved;
+ u8 Flags; /* Flags such as read-only */
+ s8 Typ; /* file version number = 0 */
+ struct hfs_finfo UsrWds; /* data used by the Finder */
+ __be32 FlNum; /* The CNID */
+ __be16 StBlk; /* obsolete */
+ __be32 LgLen; /* The logical EOF of the data fork*/
+ __be32 PyLen; /* The physical EOF of the data fork */
+ __be16 RStBlk; /* obsolete */
+ __be32 RLgLen; /* The logical EOF of the rsrc fork */
+ __be32 RPyLen; /* The physical EOF of the rsrc fork */
+ __be32 CrDat; /* The creation date */
+ __be32 MdDat; /* The modified date */
+ __be32 BkDat; /* The last backup date */
+ struct hfs_fxinfo FndrInfo; /* more data for the Finder */
+ __be16 ClpSize; /* number of bytes to allocate
+ when extending files */
+ hfs_extent_rec ExtRec; /* first extent record
+ for the data fork */
+ hfs_extent_rec RExtRec; /* first extent record
+ for the resource fork */
+ u32 Resrv; /* reserved by Apple */
+} __packed;
+
+/* the catalog record for a directory */
+struct hfs_cat_dir {
+ s8 type; /* The type of entry */
+ u8 reserved;
+ __be16 Flags; /* flags */
+ __be16 Val; /* Valence: number of files and
+ dirs in the directory */
+ __be32 DirID; /* The CNID */
+ __be32 CrDat; /* The creation date */
+ __be32 MdDat; /* The modification date */
+ __be32 BkDat; /* The last backup date */
+ struct hfs_dinfo UsrInfo; /* data used by the Finder */
+ struct hfs_dxinfo FndrInfo; /* more data used by Finder */
+ u8 Resrv[16]; /* reserved by Apple */
+} __packed;
+
+/* the catalog record for a thread */
+struct hfs_cat_thread {
+ s8 type; /* The type of entry */
+ u8 reserved[9]; /* reserved by Apple */
+ __be32 ParID; /* CNID of parent directory */
+ struct hfs_name CName; /* The name of this entry */
+} __packed;
+
+/* A catalog tree record */
+typedef union hfs_cat_rec {
+ s8 type; /* The type of entry */
+ struct hfs_cat_file file;
+ struct hfs_cat_dir dir;
+ struct hfs_cat_thread thread;
+} hfs_cat_rec;
+
+/* POSIX permissions */
+struct hfsplus_perm {
+ __be32 owner;
+ __be32 group;
+ u8 rootflags;
+ u8 userflags;
+ __be16 mode;
+ __be32 dev;
+} __packed;
+
+#define HFSPLUS_FLG_NODUMP 0x01
+#define HFSPLUS_FLG_IMMUTABLE 0x02
+#define HFSPLUS_FLG_APPEND 0x04
+
+/* HFS/HFS+ BTree node descriptor */
+struct hfs_bnode_desc {
+ __be32 next; /* (V) Number of the next node at this level */
+ __be32 prev; /* (V) Number of the prev node at this level */
+ u8 type; /* (F) The type of node */
+ u8 height; /* (F) The level of this node (leaves=1) */
+ __be16 num_recs; /* (V) The number of records in this node */
+ u16 reserved;
+} __packed;
+
+/* HFS/HFS+ BTree node types */
+#define HFS_NODE_INDEX 0x00 /* An internal (index) node */
+#define HFS_NODE_HEADER 0x01 /* The tree header node (node 0) */
+#define HFS_NODE_MAP 0x02 /* Holds part of the bitmap of used nodes */
+#define HFS_NODE_LEAF 0xFF /* A leaf (ndNHeight==1) node */
+
+/* HFS/HFS+ BTree header */
+struct hfs_btree_header_rec {
+ __be16 depth; /* (V) The number of levels in this B-tree */
+ __be32 root; /* (V) The node number of the root node */
+ __be32 leaf_count; /* (V) The number of leaf records */
+ __be32 leaf_head; /* (V) The number of the first leaf node */
+ __be32 leaf_tail; /* (V) The number of the last leaf node */
+ __be16 node_size; /* (F) The number of bytes in a node (=512) */
+ __be16 max_key_len; /* (F) The length of a key in an index node */
+ __be32 node_count; /* (V) The total number of nodes */
+ __be32 free_nodes; /* (V) The number of unused nodes */
+ u16 reserved1;
+ __be32 clump_size; /* (F) clump size. not usually used. */
+ u8 btree_type; /* (F) BTree type */
+ u8 key_type;
+ __be32 attributes; /* (F) attributes */
+ u32 reserved3[16];
+} __packed;
+
+/* BTree attributes */
+#define BTREE_ATTR_BADCLOSE 0x00000001 /* b-tree not closed properly. not
+ used by hfsplus. */
+#define HFS_TREE_BIGKEYS 0x00000002 /* key length is u16 instead of u8.
+ used by hfsplus. */
+#define HFS_TREE_VARIDXKEYS 0x00000004 /* variable key length instead of
+ max key length. use din catalog
+ b-tree but not in extents
+ b-tree (hfsplus). */
+
+/* HFS+ BTree misc info */
+#define HFSPLUS_TREE_HEAD 0
+#define HFSPLUS_NODE_MXSZ 32768
+#define HFSPLUS_ATTR_TREE_NODE_SIZE 8192
+#define HFSPLUS_BTREE_HDR_NODE_RECS_COUNT 3
+#define HFSPLUS_BTREE_HDR_USER_BYTES 128
+
+/* btree key type */
+#define HFSPLUS_KEY_CASEFOLDING 0xCF /* case-insensitive */
+#define HFSPLUS_KEY_BINARY 0xBC /* case-sensitive */
+
+/* HFS+ folder data (part of an hfsplus_cat_entry) */
+struct hfsplus_cat_folder {
+ __be16 type;
+ __be16 flags;
+ __be32 valence;
+ hfsplus_cnid id;
+ __be32 create_date;
+ __be32 content_mod_date;
+ __be32 attribute_mod_date;
+ __be32 access_date;
+ __be32 backup_date;
+ struct hfsplus_perm permissions;
+ struct_group_attr(info, __packed,
+ DInfo user_info;
+ DXInfo finder_info;
+ );
+ __be32 text_encoding;
+ __be32 subfolders; /* Subfolder count in HFSX. Reserved in HFS+. */
+} __packed;
+
+/* HFS+ file data (part of a cat_entry) */
+struct hfsplus_cat_file {
+ __be16 type;
+ __be16 flags;
+ u32 reserved1;
+ hfsplus_cnid id;
+ __be32 create_date;
+ __be32 content_mod_date;
+ __be32 attribute_mod_date;
+ __be32 access_date;
+ __be32 backup_date;
+ struct hfsplus_perm permissions;
+ struct_group_attr(info, __packed,
+ FInfo user_info;
+ FXInfo finder_info;
+ );
+ __be32 text_encoding;
+ u32 reserved2;
+
+ struct hfsplus_fork_raw data_fork;
+ struct hfsplus_fork_raw rsrc_fork;
+} __packed;
+
+/* File and folder flag bits */
+#define HFSPLUS_FILE_LOCKED 0x0001
+#define HFSPLUS_FILE_THREAD_EXISTS 0x0002
+#define HFSPLUS_XATTR_EXISTS 0x0004
+#define HFSPLUS_ACL_EXISTS 0x0008
+#define HFSPLUS_HAS_FOLDER_COUNT 0x0010 /* Folder has subfolder count
+ * (HFSX only) */
+
+/* HFS+ catalog thread (part of a cat_entry) */
+struct hfsplus_cat_thread {
+ __be16 type;
+ s16 reserved;
+ hfsplus_cnid parentID;
+ struct hfsplus_unistr nodeName;
+} __packed;
+
+#define HFSPLUS_MIN_THREAD_SZ 10
+
+/* A data record in the catalog tree */
+typedef union {
+ __be16 type;
+ struct hfsplus_cat_folder folder;
+ struct hfsplus_cat_file file;
+ struct hfsplus_cat_thread thread;
+} __packed hfsplus_cat_entry;
+
+/* HFS+ catalog entry type */
+#define HFSPLUS_FOLDER 0x0001
+#define HFSPLUS_FILE 0x0002
+#define HFSPLUS_FOLDER_THREAD 0x0003
+#define HFSPLUS_FILE_THREAD 0x0004
+
+#define HFSPLUS_XATTR_FINDER_INFO_NAME "com.apple.FinderInfo"
+#define HFSPLUS_XATTR_ACL_NAME "com.apple.system.Security"
+
+#define HFSPLUS_ATTR_INLINE_DATA 0x10
+#define HFSPLUS_ATTR_FORK_DATA 0x20
+#define HFSPLUS_ATTR_EXTENTS 0x30
+
+/* HFS+ attributes tree key */
+struct hfsplus_attr_key {
+ __be16 key_len;
+ __be16 pad;
+ hfsplus_cnid cnid;
+ __be32 start_block;
+ struct hfsplus_attr_unistr key_name;
+} __packed;
+
+#define HFSPLUS_ATTR_KEYLEN sizeof(struct hfsplus_attr_key)
+
+/* HFS+ fork data attribute */
+struct hfsplus_attr_fork_data {
+ __be32 record_type;
+ __be32 reserved;
+ struct hfsplus_fork_raw the_fork;
+} __packed;
+
+/* HFS+ extension attribute */
+struct hfsplus_attr_extents {
+ __be32 record_type;
+ __be32 reserved;
+ struct hfsplus_extent extents;
+} __packed;
+
+#define HFSPLUS_MAX_INLINE_DATA_SIZE 3802
+
+/* HFS+ attribute inline data */
+struct hfsplus_attr_inline_data {
+ __be32 record_type;
+ __be32 reserved1;
+ u8 reserved2[6];
+ __be16 length;
+ u8 raw_bytes[HFSPLUS_MAX_INLINE_DATA_SIZE];
+} __packed;
+
+/* A data record in the attributes tree */
+typedef union {
+ __be32 record_type;
+ struct hfsplus_attr_fork_data fork_data;
+ struct hfsplus_attr_extents extents;
+ struct hfsplus_attr_inline_data inline_data;
+} __packed hfsplus_attr_entry;
+
+/* HFS+ generic BTree key */
+typedef union {
+ __be16 key_len;
+ struct hfsplus_cat_key cat;
+ struct hfsplus_ext_key ext;
+ struct hfsplus_attr_key attr;
+} __packed hfsplus_btree_key;
+
+#endif /* _HFS_COMMON_H_ */
diff --git a/include/linux/hid-over-i2c.h b/include/linux/hid-over-i2c.h
new file mode 100644
index 000000000000..3b1a0208a6b8
--- /dev/null
+++ b/include/linux/hid-over-i2c.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2024 Intel Corporation */
+
+#include <linux/bits.h>
+
+#ifndef _HID_OVER_I2C_H_
+#define _HID_OVER_I2C_H_
+
+#define HIDI2C_REG_LEN sizeof(__le16)
+
+/* Input report type definition in HIDI2C protocol */
+enum hidi2c_report_type {
+ HIDI2C_RESERVED = 0,
+ HIDI2C_INPUT,
+ HIDI2C_OUTPUT,
+ HIDI2C_FEATURE,
+};
+
+/* Power state type definition in HIDI2C protocol */
+enum hidi2c_power_state {
+ HIDI2C_ON,
+ HIDI2C_SLEEP,
+};
+
+/* Opcode type definition in HIDI2C protocol */
+enum hidi2c_opcode {
+ HIDI2C_RESET = 1,
+ HIDI2C_GET_REPORT,
+ HIDI2C_SET_REPORT,
+ HIDI2C_GET_IDLE,
+ HIDI2C_SET_IDLE,
+ HIDI2C_GET_PROTOCOL,
+ HIDI2C_SET_PROTOCOL,
+ HIDI2C_SET_POWER,
+};
+
+/**
+ * struct hidi2c_report_packet - Report packet definition in HIDI2C protocol
+ * @len: data field length
+ * @data: HIDI2C report packet data
+ */
+struct hidi2c_report_packet {
+ __le16 len;
+ u8 data[];
+} __packed;
+
+#define HIDI2C_LENGTH_LEN sizeof(__le16)
+
+#define HIDI2C_PACKET_LEN(data_len) ((data_len) + HIDI2C_LENGTH_LEN)
+#define HIDI2C_DATA_LEN(pkt_len) ((pkt_len) - HIDI2C_LENGTH_LEN)
+
+#define HIDI2C_CMD_MAX_RI 0x0F
+
+/**
+ * HIDI2C command data packet - Command packet definition in HIDI2C protocol
+ * @report_id: [0:3] report id (<15) for features or output reports
+ * @report_type: [4:5] indicate report type, reference to hidi2c_report_type
+ * @reserved0: [6:7] reserved bits
+ * @opcode: [8:11] command operation code, reference to hidi2c_opcode
+ * @reserved1: [12:15] reserved bits
+ * @report_id_optional: [23:16] appended 3rd byte.
+ * If the report_id in the low byte is set to the
+ * sentinel value (HIDI2C_CMD_MAX_RI), then this
+ * optional third byte represents the report id (>=15)
+ * Otherwise, not this 3rd byte.
+ */
+
+#define HIDI2C_CMD_LEN sizeof(__le16)
+#define HIDI2C_CMD_LEN_OPT (sizeof(__le16) + 1)
+#define HIDI2C_CMD_REPORT_ID GENMASK(3, 0)
+#define HIDI2C_CMD_REPORT_TYPE GENMASK(5, 4)
+#define HIDI2C_CMD_OPCODE GENMASK(11, 8)
+#define HIDI2C_CMD_OPCODE GENMASK(11, 8)
+#define HIDI2C_CMD_3RD_BYTE GENMASK(23, 16)
+
+#define HIDI2C_HID_DESC_BCDVERSION 0x100
+
+/**
+ * struct hidi2c_dev_descriptor - HIDI2C device descriptor definition
+ * @dev_desc_len: The length of the complete device descriptor, fixed to 0x1E (30).
+ * @bcd_ver: The version number of the HIDI2C protocol supported.
+ * In binary coded decimal (BCD) format.
+ * @report_desc_len: The length of the report descriptor
+ * @report_desc_reg: The register address to retrieve report descriptor
+ * @input_reg: the register address to retrieve input report
+ * @max_input_len: The length of the largest possible HID input (or feature) report
+ * @output_reg: the register address to send output report
+ * @max_output_len: The length of the largest output (or feature) report
+ * @cmd_reg: the register address to send command
+ * @data_reg: the register address to send command data
+ * @vendor_id: Device manufacturers vendor ID
+ * @product_id: Device unique model/product ID
+ * @version_id: Device’s unique version
+ * @reserved0: Reserved and should be 0
+ * @reserved1: Reserved and should be 0
+ */
+struct hidi2c_dev_descriptor {
+ __le16 dev_desc_len;
+ __le16 bcd_ver;
+ __le16 report_desc_len;
+ __le16 report_desc_reg;
+ __le16 input_reg;
+ __le16 max_input_len;
+ __le16 output_reg;
+ __le16 max_output_len;
+ __le16 cmd_reg;
+ __le16 data_reg;
+ __le16 vendor_id;
+ __le16 product_id;
+ __le16 version_id;
+ __le16 reserved0;
+ __le16 reserved1;
+} __packed;
+
+#define HIDI2C_DEV_DESC_LEN sizeof(struct hidi2c_dev_descriptor)
+
+#endif /* _HID_OVER_I2C_H_ */
diff --git a/include/linux/hid-over-spi.h b/include/linux/hid-over-spi.h
new file mode 100644
index 000000000000..da5a14b5e89b
--- /dev/null
+++ b/include/linux/hid-over-spi.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2024 Intel Corporation */
+
+#ifndef _HID_OVER_SPI_H_
+#define _HID_OVER_SPI_H_
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/* Input report type definition in HIDSPI protocol */
+enum input_report_type {
+ INVALID_INPUT_REPORT_TYPE_0 = 0,
+ DATA = 1,
+ INVALID_TYPE_2 = 2,
+ RESET_RESPONSE = 3,
+ COMMAND_RESPONSE = 4,
+ GET_FEATURE_RESPONSE = 5,
+ INVALID_TYPE_6 = 6,
+ DEVICE_DESCRIPTOR_RESPONSE = 7,
+ REPORT_DESCRIPTOR_RESPONSE = 8,
+ SET_FEATURE_RESPONSE = 9,
+ OUTPUT_REPORT_RESPONSE = 10,
+ GET_INPUT_REPORT_RESPONSE = 11,
+ INVALID_INPUT_REPORT_TYPE = 0xF,
+};
+
+/* Output report type definition in HIDSPI protocol */
+enum output_report_type {
+ INVALID_OUTPUT_REPORT_TYPE_0 = 0,
+ DEVICE_DESCRIPTOR = 1,
+ REPORT_DESCRIPTOR = 2,
+ SET_FEATURE = 3,
+ GET_FEATURE = 4,
+ OUTPUT_REPORT = 5,
+ GET_INPUT_REPORT = 6,
+ COMMAND_CONTENT = 7,
+};
+
+/* Set power command ID for output report */
+#define HIDSPI_SET_POWER_CMD_ID 1
+
+/* Power state definition in HIDSPI protocol */
+enum hidspi_power_state {
+ HIDSPI_ON = 1,
+ HIDSPI_SLEEP = 2,
+ HIDSPI_OFF = 3,
+};
+
+/**
+ * Input report header definition in HIDSPI protocol
+ * Report header size is 32bits, it includes:
+ * protocol_ver: [0:3] Current supported HIDSPI protocol version, must be 0x3
+ * reserved0: [4:7] Reserved bits
+ * input_report_len: [8:21] Input report length in number bytes divided by 4
+ * last_frag_flag: [22]Indicate if this packet is last fragment.
+ * 1 - indicates last fragment
+ * 0 - indicates additional fragments
+ * reserved1: [23] Reserved bits
+ * @sync_const: [24:31] Used to validate input report header, must be 0x5A
+ */
+#define HIDSPI_INPUT_HEADER_SIZE sizeof(u32)
+#define HIDSPI_INPUT_HEADER_VER GENMASK(3, 0)
+#define HIDSPI_INPUT_HEADER_REPORT_LEN GENMASK(21, 8)
+#define HIDSPI_INPUT_HEADER_LAST_FLAG BIT(22)
+#define HIDSPI_INPUT_HEADER_SYNC GENMASK(31, 24)
+
+/**
+ * struct input_report_body_header - Input report body header definition in HIDSPI protocol
+ * @input_report_type: indicate input report type, reference to enum input_report_type
+ * @content_len: this input report body packet length
+ * @content_id: indicate this input report's report id
+ */
+struct input_report_body_header {
+ u8 input_report_type;
+ __le16 content_len;
+ u8 content_id;
+} __packed;
+
+#define HIDSPI_INPUT_BODY_HEADER_SIZE sizeof(struct input_report_body_header)
+
+/**
+ * struct input_report_body - Input report body definition in HIDSPI protocol
+ * @body_hdr: input report body header
+ * @content: input report body content
+ */
+struct input_report_body {
+ struct input_report_body_header body_hdr;
+ u8 content[];
+} __packed;
+
+#define HIDSPI_INPUT_BODY_SIZE(content_len) ((content_len) + HIDSPI_INPUT_BODY_HEADER_SIZE)
+
+/**
+ * struct output_report_header - Output report header definition in HIDSPI protocol
+ * @report_type: output report type, reference to enum output_report_type
+ * @content_len: length of content
+ * @content_id: 0x00 - descriptors
+ * report id - Set/Feature feature or Input/Output Reports
+ * command opcode - for commands
+ */
+struct output_report_header {
+ u8 report_type;
+ __le16 content_len;
+ u8 content_id;
+} __packed;
+
+#define HIDSPI_OUTPUT_REPORT_HEADER_SIZE sizeof(struct output_report_header)
+
+/**
+ * struct output_report - Output report definition in HIDSPI protocol
+ * @output_hdr: output report header
+ * @content: output report content
+ */
+struct output_report {
+ struct output_report_header output_hdr;
+ u8 content[];
+} __packed;
+
+#define HIDSPI_OUTPUT_REPORT_SIZE(content_len) ((content_len) + HIDSPI_OUTPUT_REPORT_HEADER_SIZE)
+
+/**
+ * struct hidspi_dev_descriptor - HIDSPI device descriptor definition
+ * @dev_desc_len: The length of the complete device descriptor, fixed to 0x18 (24).
+ * @bcd_ver: The version number of the HIDSPI protocol supported.
+ * In binary coded decimal (BCD) format. Must be fixed to 0x0300.
+ * @rep_desc_len: The length of the report descriptor
+ * @max_input_len: The length of the largest possible HID input (or feature) report
+ * @max_output_len: The length of the largest output (or feature) report
+ * @max_frag_len: The length of the largest fragment, where a fragment represents
+ * the body of an input report.
+ * @vendor_id: Device manufacturers vendor ID
+ * @product_id: Device unique model/product ID
+ * @version_id: Device’s unique version
+ * @flags: Specify flags for the device’s operation
+ * @reserved: Reserved and should be 0
+ */
+struct hidspi_dev_descriptor {
+ __le16 dev_desc_len;
+ __le16 bcd_ver;
+ __le16 rep_desc_len;
+ __le16 max_input_len;
+ __le16 max_output_len;
+ __le16 max_frag_len;
+ __le16 vendor_id;
+ __le16 product_id;
+ __le16 version_id;
+ __le16 flags;
+ __le32 reserved;
+};
+
+#define HIDSPI_DEVICE_DESCRIPTOR_SIZE sizeof(struct hidspi_dev_descriptor)
+#define HIDSPI_INPUT_DEVICE_DESCRIPTOR_SIZE \
+ (HIDSPI_INPUT_BODY_HEADER_SIZE + HIDSPI_DEVICE_DESCRIPTOR_SIZE)
+
+#endif /* _HID_OVER_SPI_H_ */
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index c27329e2a5ad..e71056553108 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -17,7 +17,7 @@
* @attrib_id: Attribute id for this attribute.
* @report_id: Report id in which this information resides.
* @index: Field index in the report.
- * @units: Measurment unit for this attribute.
+ * @units: Measurement unit for this attribute.
* @unit_expo: Exponent used in the data.
* @size: Size in bytes for data size.
* @logical_minimum: Logical minimum value for this attribute.
@@ -39,8 +39,8 @@ struct hid_sensor_hub_attribute_info {
* struct sensor_hub_pending - Synchronous read pending information
* @status: Pending status true/false.
* @ready: Completion synchronization data.
- * @usage_id: Usage id for physical device, E.g. Gyro usage id.
- * @attr_usage_id: Usage Id of a field, E.g. X-AXIS for a gyro.
+ * @usage_id: Usage id for physical device, e.g. gyro usage id.
+ * @attr_usage_id: Usage Id of a field, e.g. X-axis for a gyro.
* @raw_size: Response size for a read request.
* @raw_data: Place holder for received response.
*/
@@ -104,10 +104,10 @@ struct hid_sensor_hub_callbacks {
int sensor_hub_device_open(struct hid_sensor_hub_device *hsdev);
/**
-* sensor_hub_device_clode() - Close hub device
+* sensor_hub_device_close() - Close hub device
* @hsdev: Hub device instance.
*
-* Used to clode hid device for sensor hub.
+* Used to close hid device for sensor hub.
*/
void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev);
@@ -128,12 +128,13 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
struct hid_sensor_hub_callbacks *usage_callback);
/**
-* sensor_hub_remove_callback() - Remove client callbacks
+* sensor_hub_remove_callback() - Remove client callback
* @hsdev: Hub device instance.
-* @usage_id: Usage id of the client (E.g. 0x200076 for Gyro).
+* @usage_id: Usage id of the client (e.g. 0x200076 for gyro).
*
-* If there is a callback registred, this call will remove that
-* callbacks, so that it will stop data and event notifications.
+* Removes a previously registered callback for the given usage_id
+* and hsdev. Once removed, the client will no longer receive data or
+* event notifications.
*/
int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
u32 usage_id);
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 6730ee900ee1..8a03d9696b1c 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -30,6 +30,8 @@
#define HID_USAGE_SENSOR_PROX 0x200011
#define HID_USAGE_SENSOR_DATA_PRESENCE 0x2004b0
#define HID_USAGE_SENSOR_HUMAN_PRESENCE 0x2004b1
+#define HID_USAGE_SENSOR_HUMAN_PROXIMITY 0x2004b2
+#define HID_USAGE_SENSOR_HUMAN_ATTENTION 0x2004bd
/* Pressure (200031) */
#define HID_USAGE_SENSOR_PRESSURE 0x200031
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 8e06d89698e6..dce862cafbbd 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -46,7 +46,7 @@ struct hid_item {
__s16 s16;
__u32 u32;
__s32 s32;
- __u8 *longdata;
+ const __u8 *longdata;
} data;
};
@@ -81,6 +81,8 @@ struct hid_item {
#define HID_MAIN_ITEM_TAG_FEATURE 11
#define HID_MAIN_ITEM_TAG_BEGIN_COLLECTION 10
#define HID_MAIN_ITEM_TAG_END_COLLECTION 12
+#define HID_MAIN_ITEM_TAG_RESERVED_MIN 13
+#define HID_MAIN_ITEM_TAG_RESERVED_MAX 15
/*
* HID report descriptor main item contents
@@ -154,6 +156,7 @@ struct hid_item {
#define HID_UP_TELEPHONY 0x000b0000
#define HID_UP_CONSUMER 0x000c0000
#define HID_UP_DIGITIZER 0x000d0000
+#define HID_UP_HAPTIC 0x000e0000
#define HID_UP_PID 0x000f0000
#define HID_UP_BATTERY 0x00850000
#define HID_UP_CAMERA 0x00900000
@@ -218,6 +221,7 @@ struct hid_item {
#define HID_GD_DOWN 0x00010091
#define HID_GD_RIGHT 0x00010092
#define HID_GD_LEFT 0x00010093
+#define HID_GD_DO_NOT_DISTURB 0x0001009b
/* Microsoft Win8 Wireless Radio Controls CA usage codes */
#define HID_GD_RFKILL_BTN 0x000100c6
#define HID_GD_RFKILL_LED 0x000100c7
@@ -313,6 +317,28 @@ struct hid_item {
#define HID_DG_TOOLSERIALNUMBER 0x000d005b
#define HID_DG_LATENCYMODE 0x000d0060
+#define HID_HP_SIMPLECONTROLLER 0x000e0001
+#define HID_HP_WAVEFORMLIST 0x000e0010
+#define HID_HP_DURATIONLIST 0x000e0011
+#define HID_HP_AUTOTRIGGER 0x000e0020
+#define HID_HP_MANUALTRIGGER 0x000e0021
+#define HID_HP_AUTOTRIGGERASSOCIATEDCONTROL 0x000e0022
+#define HID_HP_INTENSITY 0x000e0023
+#define HID_HP_REPEATCOUNT 0x000e0024
+#define HID_HP_RETRIGGERPERIOD 0x000e0025
+#define HID_HP_WAVEFORMVENDORPAGE 0x000e0026
+#define HID_HP_WAVEFORMVENDORID 0x000e0027
+#define HID_HP_WAVEFORMCUTOFFTIME 0x000e0028
+#define HID_HP_WAVEFORMNONE 0x000e1001
+#define HID_HP_WAVEFORMSTOP 0x000e1002
+#define HID_HP_WAVEFORMCLICK 0x000e1003
+#define HID_HP_WAVEFORMBUZZCONTINUOUS 0x000e1004
+#define HID_HP_WAVEFORMRUMBLECONTINUOUS 0x000e1005
+#define HID_HP_WAVEFORMPRESS 0x000e1006
+#define HID_HP_WAVEFORMRELEASE 0x000e1007
+#define HID_HP_VENDORWAVEFORMMIN 0x000e2001
+#define HID_HP_VENDORWAVEFORMMAX 0x000e2fff
+
#define HID_BAT_ABSOLUTESTATEOFCHARGE 0x00850065
#define HID_BAT_CHARGING 0x00850044
@@ -354,11 +380,14 @@ struct hid_item {
* | @HID_QUIRK_INPUT_PER_APP:
* | @HID_QUIRK_X_INVERT:
* | @HID_QUIRK_Y_INVERT:
+ * | @HID_QUIRK_IGNORE_MOUSE:
* | @HID_QUIRK_SKIP_OUTPUT_REPORTS:
* | @HID_QUIRK_SKIP_OUTPUT_REPORT_ID:
* | @HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP:
* | @HID_QUIRK_HAVE_SPECIAL_DRIVER:
* | @HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE:
+ * | @HID_QUIRK_IGNORE_SPECIAL_DRIVER
+ * | @HID_QUIRK_POWER_ON_AFTER_BACKLIGHT
* | @HID_QUIRK_FULLSPEED_INTERVAL:
* | @HID_QUIRK_NO_INIT_REPORTS:
* | @HID_QUIRK_NO_IGNORE:
@@ -378,12 +407,15 @@ struct hid_item {
#define HID_QUIRK_INPUT_PER_APP BIT(11)
#define HID_QUIRK_X_INVERT BIT(12)
#define HID_QUIRK_Y_INVERT BIT(13)
+#define HID_QUIRK_IGNORE_MOUSE BIT(14)
#define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16)
#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17)
#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18)
#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19)
#define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20)
#define HID_QUIRK_NOINVERT BIT(21)
+#define HID_QUIRK_IGNORE_SPECIAL_DRIVER BIT(22)
+#define HID_QUIRK_POWER_ON_AFTER_BACKLIGHT BIT(23)
#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28)
#define HID_QUIRK_NO_INIT_REPORTS BIT(29)
#define HID_QUIRK_NO_IGNORE BIT(30)
@@ -417,6 +449,12 @@ struct hid_item {
#define HID_BOOT_PROTOCOL 0
/*
+ * HID units
+ */
+#define HID_UNIT_GRAM 0x0101
+#define HID_UNIT_NEWTON 0xe111
+
+/*
* This is the global environment of the parser. This information is
* persistent for main-items. The global environment can be saved and
* restored with PUSH/POP statements.
@@ -599,15 +637,17 @@ enum hid_battery_status {
struct hid_driver;
struct hid_ll_driver;
-struct hid_device { /* device report descriptor */
- __u8 *dev_rdesc;
- unsigned dev_rsize;
- __u8 *rdesc;
- unsigned rsize;
+struct hid_device {
+ const __u8 *dev_rdesc; /* device report descriptor */
+ const __u8 *bpf_rdesc; /* bpf modified report descriptor, if any */
+ const __u8 *rdesc; /* currently used report descriptor */
+ unsigned int dev_rsize;
+ unsigned int bpf_rsize;
+ unsigned int rsize;
+ unsigned int collection_size; /* Number of allocated hid_collections */
struct hid_collection *collection; /* List of HID collections */
- unsigned collection_size; /* Number of allocated hid_collections */
- unsigned maxcollection; /* Number of parsed collections */
- unsigned maxapplication; /* Number of applications */
+ unsigned int maxcollection; /* Number of parsed collections */
+ unsigned int maxapplication; /* Number of applications */
__u16 bus; /* BUS ID */
__u16 group; /* Report group */
__u32 vendor; /* Vendor ID */
@@ -733,8 +773,9 @@ struct hid_descriptor {
__le16 bcdHID;
__u8 bCountryCode;
__u8 bNumDescriptors;
+ struct hid_class_descriptor rpt_desc;
- struct hid_class_descriptor desc[1];
+ struct hid_class_descriptor opt_descs[];
} __attribute__ ((packed));
#define HID_DEVICE(b, g, ven, prod) \
@@ -785,6 +826,8 @@ struct hid_usage_id {
* @suspend: invoked on suspend (NULL means nop)
* @resume: invoked on resume if device was not reset (NULL means nop)
* @reset_resume: invoked on resume if device was reset (NULL means nop)
+ * @on_hid_hw_open: invoked when hid core opens first instance (NULL means nop)
+ * @on_hid_hw_close: invoked when hid core closes last instance (NULL means nop)
*
* probe should return -errno on error, or 0 on success. During probe,
* input will not be passed to raw_event unless hid_device_io_start is
@@ -804,7 +847,7 @@ struct hid_usage_id {
* zero from them.
*/
struct hid_driver {
- char *name;
+ const char *name;
const struct hid_device_id *id_table;
struct list_head dyn_list;
@@ -822,7 +865,7 @@ struct hid_driver {
struct hid_usage *usage, __s32 value);
void (*report)(struct hid_device *hdev, struct hid_report *report);
- __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf,
+ const __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf,
unsigned int *size);
int (*input_mapping)(struct hid_device *hdev,
@@ -840,6 +883,8 @@ struct hid_driver {
int (*suspend)(struct hid_device *hdev, pm_message_t message);
int (*resume)(struct hid_device *hdev);
int (*reset_resume)(struct hid_device *hdev);
+ void (*on_hid_hw_open)(struct hid_device *hdev);
+ void (*on_hid_hw_close)(struct hid_device *hdev);
/* private: */
struct device_driver driver;
@@ -939,7 +984,10 @@ extern void hidinput_hid_event(struct hid_device *, struct hid_field *, struct h
extern void hidinput_report_event(struct hid_device *hid, struct hid_report *report);
extern int hidinput_connect(struct hid_device *hid, unsigned int force);
extern void hidinput_disconnect(struct hid_device *);
+void hidinput_reset_resume(struct hid_device *hid);
+struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type,
+ unsigned int application, unsigned int usage);
int hid_set_field(struct hid_field *, unsigned, __s32);
int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
int interrupt);
@@ -953,7 +1001,7 @@ struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device,
enum hid_report_type type, unsigned int id,
unsigned int application);
-int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
+int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size);
struct hid_report *hid_validate_values(struct hid_device *hid,
enum hid_report_type type, unsigned int id,
unsigned int field_index,
@@ -972,7 +1020,6 @@ const struct hid_device_id *hid_match_device(struct hid_device *hdev,
struct hid_driver *hdrv);
bool hid_compare_device_paths(struct hid_device *hdev_a,
struct hid_device *hdev_b, char separator);
-s32 hid_snto32(__u32 value, unsigned n);
__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
unsigned offset, unsigned n);
@@ -1125,6 +1172,13 @@ int __must_check hid_hw_open(struct hid_device *hdev);
void hid_hw_close(struct hid_device *hdev);
void hid_hw_request(struct hid_device *hdev,
struct hid_report *report, enum hid_class_request reqtype);
+int __hid_hw_raw_request(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, enum hid_report_type rtype,
+ enum hid_class_request reqtype,
+ __u64 source, bool from_bpf);
+int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, __u64 source,
+ bool from_bpf);
int hid_hw_raw_request(struct hid_device *hdev,
unsigned char reportnum, __u8 *buf,
size_t len, enum hid_report_type rtype,
@@ -1194,7 +1248,11 @@ static inline void hid_hw_wait(struct hid_device *hdev)
/**
* hid_report_len - calculate the report length
*
- * @report: the report we want to know the length
+ * @report: the report whose length we want to know
+ *
+ * The length counts the report ID byte, but only if the ID is nonzero
+ * and therefore is included in the report. Reports whose ID is zero
+ * never include an ID byte.
*/
static inline u32 hid_report_len(struct hid_report *report)
{
@@ -1209,12 +1267,6 @@ unsigned long hid_lookup_quirk(const struct hid_device *hdev);
int hid_quirks_init(char **quirks_param, __u16 bus, int count);
void hid_quirks_exit(__u16 bus);
-#ifdef CONFIG_HID_PID
-int hid_pidff_init(struct hid_device *hid);
-#else
-#define hid_pidff_init NULL
-#endif
-
#define dbg_hid(fmt, ...) pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__)
#define hid_err(hid, fmt, ...) \
@@ -1223,6 +1275,8 @@ int hid_pidff_init(struct hid_device *hid);
dev_notice(&(hid)->dev, fmt, ##__VA_ARGS__)
#define hid_warn(hid, fmt, ...) \
dev_warn(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_warn_ratelimited(hid, fmt, ...) \
+ dev_warn_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
#define hid_info(hid, fmt, ...) \
dev_info(&(hid)->dev, fmt, ##__VA_ARGS__)
#define hid_dbg(hid, fmt, ...) \
@@ -1239,4 +1293,15 @@ int hid_pidff_init(struct hid_device *hid);
#define hid_dbg_once(hid, fmt, ...) \
dev_dbg_once(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_err_ratelimited(hid, fmt, ...) \
+ dev_err_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_notice_ratelimited(hid, fmt, ...) \
+ dev_notice_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_warn_ratelimited(hid, fmt, ...) \
+ dev_warn_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_info_ratelimited(hid, fmt, ...) \
+ dev_info_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_dbg_ratelimited(hid, fmt, ...) \
+ dev_dbg_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
+
#endif
diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h
index eec2592dec12..a2e47dbcf82c 100644
--- a/include/linux/hid_bpf.h
+++ b/include/linux/hid_bpf.h
@@ -4,7 +4,8 @@
#define __HID_BPF_H
#include <linux/bpf.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/srcu.h>
#include <uapi/linux/hid.h>
struct hid_device;
@@ -20,15 +21,9 @@ struct hid_device;
* struct hid_bpf_ctx - User accessible data for all HID programs
*
* ``data`` is not directly accessible from the context. We need to issue
- * a call to ``hid_bpf_get_data()`` in order to get a pointer to that field.
+ * a call to hid_bpf_get_data() in order to get a pointer to that field.
*
- * All of these fields are currently read-only.
- *
- * @index: program index in the jump table. No special meaning (a smaller index
- * doesn't mean the program will be executed before another program with
- * a bigger index).
- * @hid: the ``struct hid_device`` representing the device itself
- * @report_type: used for ``hid_bpf_device_event()``
+ * @hid: the &struct hid_device representing the device itself
* @allocated_size: Allocated size of data.
*
* This is how much memory is available and can be requested
@@ -45,76 +40,147 @@ struct hid_device;
* ``size`` must always be less or equal than ``allocated_size`` (it is enforced
* once all BPF programs have been run).
* @retval: Return value of the previous program.
+ *
+ * ``hid`` and ``allocated_size`` are read-only, ``size`` and ``retval`` are read-write.
*/
struct hid_bpf_ctx {
- __u32 index;
- const struct hid_device *hid;
+ struct hid_device *hid;
__u32 allocated_size;
- enum hid_report_type report_type;
union {
__s32 retval;
__s32 size;
};
};
-/**
- * enum hid_bpf_attach_flags - flags used when attaching a HIF-BPF program
- *
- * @HID_BPF_FLAG_NONE: no specific flag is used, the kernel choses where to
- * insert the program
- * @HID_BPF_FLAG_INSERT_HEAD: insert the given program before any other program
- * currently attached to the device. This doesn't
- * guarantee that this program will always be first
- * @HID_BPF_FLAG_MAX: sentinel value, not to be used by the callers
- */
-enum hid_bpf_attach_flags {
- HID_BPF_FLAG_NONE = 0,
- HID_BPF_FLAG_INSERT_HEAD = _BITUL(0),
- HID_BPF_FLAG_MAX,
-};
-
-/* Following functions are tracepoints that BPF programs can attach to */
-int hid_bpf_device_event(struct hid_bpf_ctx *ctx);
-int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx);
-
/*
* Below is HID internal
*/
-/* internal function to call eBPF programs, not to be used by anybody */
-int __hid_bpf_tail_call(struct hid_bpf_ctx *ctx);
-
#define HID_BPF_MAX_PROGS_PER_DEV 64
#define HID_BPF_FLAG_MASK (((HID_BPF_FLAG_MAX - 1) << 1) - 1)
-/* types of HID programs to attach to */
-enum hid_bpf_prog_type {
- HID_BPF_PROG_TYPE_UNDEF = -1,
- HID_BPF_PROG_TYPE_DEVICE_EVENT, /* an event is emitted from the device */
- HID_BPF_PROG_TYPE_RDESC_FIXUP,
- HID_BPF_PROG_TYPE_MAX,
-};
struct hid_report_enum;
-struct hid_bpf_ops {
+struct hid_ops {
struct hid_report *(*hid_get_report)(struct hid_report_enum *report_enum, const u8 *data);
int (*hid_hw_raw_request)(struct hid_device *hdev,
unsigned char reportnum, __u8 *buf,
size_t len, enum hid_report_type rtype,
- enum hid_class_request reqtype);
- int (*hid_hw_output_report)(struct hid_device *hdev, __u8 *buf, size_t len);
+ enum hid_class_request reqtype,
+ u64 source, bool from_bpf);
+ int (*hid_hw_output_report)(struct hid_device *hdev, __u8 *buf, size_t len,
+ u64 source, bool from_bpf);
int (*hid_input_report)(struct hid_device *hid, enum hid_report_type type,
- u8 *data, u32 size, int interrupt);
+ u8 *data, u32 size, int interrupt, u64 source, bool from_bpf,
+ bool lock_already_taken);
struct module *owner;
const struct bus_type *bus_type;
};
-extern struct hid_bpf_ops *hid_bpf_ops;
+extern const struct hid_ops *hid_ops;
-struct hid_bpf_prog_list {
- u16 prog_idx[HID_BPF_MAX_PROGS_PER_DEV];
- u8 prog_cnt;
+/**
+ * struct hid_bpf_ops - A BPF struct_ops of callbacks allowing to attach HID-BPF
+ * programs to a HID device
+ * @hid_id: the HID uniq ID to attach to. This is writeable before ``load()``, and
+ * cannot be changed after
+ * @flags: flags used while attaching the struct_ops to the device. Currently only
+ * available value is %0 or ``BPF_F_BEFORE``.
+ * Writeable only before ``load()``
+ */
+struct hid_bpf_ops {
+ /* hid_id needs to stay first so we can easily change it
+ * from userspace.
+ */
+ int hid_id;
+ u32 flags;
+
+ /* private: do not show up in the docs */
+ struct list_head list;
+
+ /* public: rest should show up in the docs */
+
+ /**
+ * @hid_device_event: called whenever an event is coming in from the device
+ *
+ * It has the following arguments:
+ *
+ * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx
+ *
+ * Return: %0 on success and keep processing; a positive
+ * value to change the incoming size buffer; a negative
+ * error code to interrupt the processing of this event
+ *
+ * Context: Interrupt context.
+ */
+ int (*hid_device_event)(struct hid_bpf_ctx *ctx, enum hid_report_type report_type,
+ u64 source);
+
+ /**
+ * @hid_rdesc_fixup: called when the probe function parses the report descriptor
+ * of the HID device
+ *
+ * It has the following arguments:
+ *
+ * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx
+ *
+ * Return: %0 on success and keep processing; a positive
+ * value to change the incoming size buffer; a negative
+ * error code to interrupt the processing of this device
+ */
+ int (*hid_rdesc_fixup)(struct hid_bpf_ctx *ctx);
+
+ /**
+ * @hid_hw_request: called whenever a hid_hw_raw_request() call is emitted
+ * on the HID device
+ *
+ * It has the following arguments:
+ *
+ * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx
+ *
+ * ``reportnum``: the report number, as in hid_hw_raw_request()
+ *
+ * ``rtype``: the report type (``HID_INPUT_REPORT``, ``HID_FEATURE_REPORT``,
+ * ``HID_OUTPUT_REPORT``)
+ *
+ * ``reqtype``: the request
+ *
+ * ``source``: a u64 referring to a uniq but identifiable source. If %0, the
+ * kernel itself emitted that call. For hidraw, ``source`` is set
+ * to the associated ``struct file *``.
+ *
+ * Return: %0 to keep processing the request by hid-core; any other value
+ * stops hid-core from processing that event. A positive value should be
+ * returned with the number of bytes returned in the incoming buffer; a
+ * negative error code interrupts the processing of this call.
+ */
+ int (*hid_hw_request)(struct hid_bpf_ctx *ctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype,
+ u64 source);
+
+ /**
+ * @hid_hw_output_report: called whenever a hid_hw_output_report() call is emitted
+ * on the HID device
+ *
+ * It has the following arguments:
+ *
+ * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx
+ *
+ * ``source``: a u64 referring to a uniq but identifiable source. If %0, the
+ * kernel itself emitted that call. For hidraw, ``source`` is set
+ * to the associated ``struct file *``.
+ *
+ * Return: %0 to keep processing the request by hid-core; any other value
+ * stops hid-core from processing that event. A positive value should be
+ * returned with the number of bytes written to the device; a negative error
+ * code interrupts the processing of this call.
+ */
+ int (*hid_hw_output_report)(struct hid_bpf_ctx *ctx, u64 source);
+
+
+ /* private: do not show up in the docs */
+ struct hid_device *hdev;
};
/* stored in each device */
@@ -124,36 +190,46 @@ struct hid_bpf {
* to this HID device
*/
u32 allocated_data;
-
- struct hid_bpf_prog_list __rcu *progs[HID_BPF_PROG_TYPE_MAX]; /* attached BPF progs */
bool destroyed; /* prevents the assignment of any progs */
- spinlock_t progs_lock; /* protects RCU update of progs */
-};
-
-/* specific HID-BPF link when a program is attached to a device */
-struct hid_bpf_link {
- struct bpf_link link;
- int hid_table_index;
+ struct hid_bpf_ops *rdesc_ops;
+ struct list_head prog_list;
+ struct mutex prog_list_lock; /* protects prog_list update */
+ struct srcu_struct srcu; /* protects prog_list read-only access */
};
#ifdef CONFIG_HID_BPF
u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type, u8 *data,
- u32 *size, int interrupt);
+ u32 *size, int interrupt, u64 source, bool from_bpf);
+int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ u32 size, enum hid_report_type rtype,
+ enum hid_class_request reqtype,
+ u64 source, bool from_bpf);
+int dispatch_hid_bpf_output_report(struct hid_device *hdev, __u8 *buf, u32 size,
+ u64 source, bool from_bpf);
int hid_bpf_connect_device(struct hid_device *hdev);
void hid_bpf_disconnect_device(struct hid_device *hdev);
void hid_bpf_destroy_device(struct hid_device *hid);
-void hid_bpf_device_init(struct hid_device *hid);
-u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size);
+int hid_bpf_device_init(struct hid_device *hid);
+const u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size);
#else /* CONFIG_HID_BPF */
static inline u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type,
- u8 *data, u32 *size, int interrupt) { return data; }
+ u8 *data, u32 *size, int interrupt,
+ u64 source, bool from_bpf) { return data; }
+static inline int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
+ unsigned char reportnum, u8 *buf,
+ u32 size, enum hid_report_type rtype,
+ enum hid_class_request reqtype,
+ u64 source, bool from_bpf) { return 0; }
+static inline int dispatch_hid_bpf_output_report(struct hid_device *hdev, __u8 *buf, u32 size,
+ u64 source, bool from_bpf) { return 0; }
static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; }
static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {}
static inline void hid_bpf_destroy_device(struct hid_device *hid) {}
-static inline void hid_bpf_device_init(struct hid_device *hid) {}
-#define call_hid_bpf_rdesc_fixup(_hdev, _rdesc, _size) \
- ((u8 *)kmemdup(_rdesc, *(_size), GFP_KERNEL))
+static inline int hid_bpf_device_init(struct hid_device *hid) { return 0; }
+static inline const u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc,
+ unsigned int *size) { return rdesc; }
#endif /* CONFIG_HID_BPF */
diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h
index cd67f4ca5599..18fd30a288de 100644
--- a/include/linux/hidraw.h
+++ b/include/linux/hidraw.h
@@ -32,6 +32,7 @@ struct hidraw_list {
struct hidraw *hidraw;
struct list_head node;
struct mutex read_mutex;
+ bool revoked;
};
#ifdef CONFIG_HIDRAW
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index a3028e400a9c..0574c21ca45d 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -7,7 +7,7 @@
*/
#ifdef CONFIG_KMAP_LOCAL
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
-void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
+void *__kmap_local_page_prot(const struct page *page, pgprot_t prot);
void kunmap_local_indexed(const void *vaddr);
void kmap_local_fork(struct task_struct *tsk);
void __kmap_local_sched_out(void);
@@ -33,7 +33,7 @@ static inline void kmap_flush_tlb(unsigned long addr) { }
#endif
void *kmap_high(struct page *page);
-void kunmap_high(struct page *page);
+void kunmap_high(const struct page *page);
void __kmap_flush_unused(void);
struct page *__kmap_to_page(void *addr);
@@ -50,7 +50,7 @@ static inline void *kmap(struct page *page)
return addr;
}
-static inline void kunmap(struct page *page)
+static inline void kunmap(const struct page *page)
{
might_sleep();
if (!PageHighMem(page))
@@ -68,18 +68,26 @@ static inline void kmap_flush_unused(void)
__kmap_flush_unused();
}
-static inline void *kmap_local_page(struct page *page)
+static inline void *kmap_local_page(const struct page *page)
{
return __kmap_local_page_prot(page, kmap_prot);
}
-static inline void *kmap_local_folio(struct folio *folio, size_t offset)
+static inline void *kmap_local_page_try_from_panic(const struct page *page)
{
- struct page *page = folio_page(folio, offset / PAGE_SIZE);
+ if (!PageHighMem(page))
+ return page_address(page);
+ /* If the page is in HighMem, it's not safe to kmap it.*/
+ return NULL;
+}
+
+static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
+{
+ const struct page *page = folio_page(folio, offset / PAGE_SIZE);
return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
}
-static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
{
return __kmap_local_page_prot(page, prot);
}
@@ -94,7 +102,7 @@ static inline void __kunmap_local(const void *vaddr)
kunmap_local_indexed(vaddr);
}
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_disable();
@@ -105,7 +113,7 @@ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
return __kmap_local_page_prot(page, prot);
}
-static inline void *kmap_atomic(struct page *page)
+static inline void *kmap_atomic(const struct page *page)
{
return kmap_atomic_prot(page, kmap_prot);
}
@@ -131,22 +139,17 @@ static inline void __kunmap_atomic(const void *addr)
preempt_enable();
}
-unsigned int __nr_free_highpages(void);
-extern atomic_long_t _totalhigh_pages;
+unsigned long __nr_free_highpages(void);
+unsigned long __totalhigh_pages(void);
-static inline unsigned int nr_free_highpages(void)
+static inline unsigned long nr_free_highpages(void)
{
return __nr_free_highpages();
}
static inline unsigned long totalhigh_pages(void)
{
- return (unsigned long)atomic_long_read(&_totalhigh_pages);
-}
-
-static inline void totalhigh_pages_add(long count)
-{
- atomic_long_add(count, &_totalhigh_pages);
+ return __totalhigh_pages();
}
static inline bool is_kmap_addr(const void *x)
@@ -170,27 +173,32 @@ static inline void *kmap(struct page *page)
return page_address(page);
}
-static inline void kunmap_high(struct page *page) { }
+static inline void kunmap_high(const struct page *page) { }
static inline void kmap_flush_unused(void) { }
-static inline void kunmap(struct page *page)
+static inline void kunmap(const struct page *page)
{
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
kunmap_flush_on_unmap(page_address(page));
#endif
}
-static inline void *kmap_local_page(struct page *page)
+static inline void *kmap_local_page(const struct page *page)
+{
+ return page_address(page);
+}
+
+static inline void *kmap_local_page_try_from_panic(const struct page *page)
{
return page_address(page);
}
-static inline void *kmap_local_folio(struct folio *folio, size_t offset)
+static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
{
- return page_address(&folio->page) + offset;
+ return folio_address(folio) + offset;
}
-static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
{
return kmap_local_page(page);
}
@@ -207,7 +215,7 @@ static inline void __kunmap_local(const void *addr)
#endif
}
-static inline void *kmap_atomic(struct page *page)
+static inline void *kmap_atomic(const struct page *page)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_disable();
@@ -217,7 +225,7 @@ static inline void *kmap_atomic(struct page *page)
return page_address(page);
}
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot)
{
return kmap_atomic(page);
}
@@ -239,8 +247,8 @@ static inline void __kunmap_atomic(const void *addr)
preempt_enable();
}
-static inline unsigned int nr_free_highpages(void) { return 0; }
-static inline unsigned long totalhigh_pages(void) { return 0UL; }
+static inline unsigned long nr_free_highpages(void) { return 0; }
+static inline unsigned long totalhigh_pages(void) { return 0; }
static inline bool is_kmap_addr(const void *x)
{
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 00341b56d291..abc20f9810fd 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -43,7 +43,7 @@ static inline void *kmap(struct page *page);
* Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
* pages in the low memory area.
*/
-static inline void kunmap(struct page *page);
+static inline void kunmap(const struct page *page);
/**
* kmap_to_page - Get the page for a kmap'ed address
@@ -93,7 +93,7 @@ static inline void kmap_flush_unused(void);
* disabling migration in order to keep the virtual address stable across
* preemption. No caller of kmap_local_page() can rely on this side effect.
*/
-static inline void *kmap_local_page(struct page *page);
+static inline void *kmap_local_page(const struct page *page);
/**
* kmap_local_folio - Map a page in this folio for temporary usage
@@ -129,7 +129,7 @@ static inline void *kmap_local_page(struct page *page);
* Context: Can be invoked from any context.
* Return: The virtual address of @offset.
*/
-static inline void *kmap_local_folio(struct folio *folio, size_t offset);
+static inline void *kmap_local_folio(const struct folio *folio, size_t offset);
/**
* kmap_atomic - Atomically map a page for temporary usage - Deprecated!
@@ -176,10 +176,10 @@ static inline void *kmap_local_folio(struct folio *folio, size_t offset);
* kunmap_atomic(vaddr2);
* kunmap_atomic(vaddr1);
*/
-static inline void *kmap_atomic(struct page *page);
+static inline void *kmap_atomic(const struct page *page);
/* Highmem related interfaces for management code */
-static inline unsigned int nr_free_highpages(void);
+static inline unsigned long nr_free_highpages(void);
static inline unsigned long totalhigh_pages(void);
#ifndef ARCH_HAS_FLUSH_ANON_PAGE
@@ -226,8 +226,8 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
{
struct folio *folio;
- folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
- if (folio)
+ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr);
+ if (folio && user_alloc_needs_zeroing())
clear_user_highpage(&folio->page, vaddr);
return folio;
@@ -249,10 +249,12 @@ static inline void clear_highpage_kasan_tagged(struct page *page)
kunmap_local(kaddr);
}
-#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
+#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGES
-static inline void tag_clear_highpage(struct page *page)
+/* Return false to let people know we did not initialize the pages */
+static inline bool tag_clear_highpages(struct page *page, int numpages)
{
+ return false;
}
#endif
@@ -292,12 +294,6 @@ static inline void zero_user_segment(struct page *page,
zero_user_segments(page, start, end, 0, 0);
}
-static inline void zero_user(struct page *page,
- unsigned start, unsigned size)
-{
- zero_user_segments(page, start, start + size, 0, 0);
-}
-
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
static inline void copy_user_highpage(struct page *to, struct page *from,
@@ -352,6 +348,9 @@ static inline int copy_mc_user_highpage(struct page *to, struct page *from,
kunmap_local(vto);
kunmap_local(vfrom);
+ if (ret)
+ memory_failure_queue(page_to_pfn(from), 0);
+
return ret;
}
@@ -368,6 +367,9 @@ static inline int copy_mc_highpage(struct page *to, struct page *from)
kunmap_local(vto);
kunmap_local(vfrom);
+ if (ret)
+ memory_failure_queue(page_to_pfn(from), 0);
+
return ret;
}
#else
@@ -398,6 +400,33 @@ static inline void memcpy_page(struct page *dst_page, size_t dst_off,
kunmap_local(dst);
}
+static inline void memcpy_folio(struct folio *dst_folio, size_t dst_off,
+ struct folio *src_folio, size_t src_off, size_t len)
+{
+ VM_BUG_ON(dst_off + len > folio_size(dst_folio));
+ VM_BUG_ON(src_off + len > folio_size(src_folio));
+
+ do {
+ char *dst = kmap_local_folio(dst_folio, dst_off);
+ const char *src = kmap_local_folio(src_folio, src_off);
+ size_t chunk = len;
+
+ if (folio_test_highmem(dst_folio) &&
+ chunk > PAGE_SIZE - offset_in_page(dst_off))
+ chunk = PAGE_SIZE - offset_in_page(dst_off);
+ if (folio_test_highmem(src_folio) &&
+ chunk > PAGE_SIZE - offset_in_page(src_off))
+ chunk = PAGE_SIZE - offset_in_page(src_off);
+ memcpy(dst, src, chunk);
+ kunmap_local(src);
+ kunmap_local(dst);
+
+ dst_off += chunk;
+ src_off += chunk;
+ len -= chunk;
+ } while (len > 0);
+}
+
static inline void memset_page(struct page *page, size_t offset, int val,
size_t len)
{
@@ -455,7 +484,7 @@ static inline void memcpy_from_folio(char *to, struct folio *folio,
const char *from = kmap_local_folio(folio, offset);
size_t chunk = len;
- if (folio_test_highmem(folio) &&
+ if (folio_test_partial_kmap(folio) &&
chunk > PAGE_SIZE - offset_in_page(offset))
chunk = PAGE_SIZE - offset_in_page(offset);
memcpy(to, from, chunk);
@@ -483,7 +512,7 @@ static inline void memcpy_to_folio(struct folio *folio, size_t offset,
char *to = kmap_local_folio(folio, offset);
size_t chunk = len;
- if (folio_test_highmem(folio) &&
+ if (folio_test_partial_kmap(folio) &&
chunk > PAGE_SIZE - offset_in_page(offset))
chunk = PAGE_SIZE - offset_in_page(offset);
memcpy(to, from, chunk);
@@ -516,7 +545,7 @@ static inline __must_check void *folio_zero_tail(struct folio *folio,
{
size_t len = folio_size(folio) - offset;
- if (folio_test_highmem(folio)) {
+ if (folio_test_partial_kmap(folio)) {
size_t max = PAGE_SIZE - offset_in_page(offset);
while (len > max) {
@@ -554,7 +583,7 @@ static inline void folio_fill_tail(struct folio *folio, size_t offset,
VM_BUG_ON(offset + len > folio_size(folio));
- if (folio_test_highmem(folio)) {
+ if (folio_test_partial_kmap(folio)) {
size_t max = PAGE_SIZE - offset_in_page(offset);
while (len > max) {
@@ -591,7 +620,7 @@ static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
size_t offset = offset_in_folio(folio, pos);
char *from = kmap_local_folio(folio, offset);
- if (folio_test_highmem(folio)) {
+ if (folio_test_partial_kmap(folio)) {
offset = offset_in_page(offset);
len = min_t(size_t, len, PAGE_SIZE - offset);
} else
@@ -655,10 +684,4 @@ static inline void folio_release_kmap(struct folio *folio, void *addr)
kunmap_local(addr);
folio_put(folio);
}
-
-static inline void unmap_and_put_page(struct page *page, void *addr)
-{
- folio_release_kmap(page_folio(page), addr);
-}
-
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index 9d7754ad5e9b..ca1ec437a3ca 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -97,11 +97,18 @@
/* page number for queue file region */
#define QM_DOORBELL_PAGE_NR 1
+#define QM_DEV_ALG_MAX_LEN 256
+
+#define QM_MIG_REGION_SEL 0x100198
+#define QM_MIG_REGION_EN BIT(0)
+
/* uacce mode of the driver */
#define UACCE_MODE_NOUACCE 0 /* don't use uacce */
#define UACCE_MODE_SVA 1 /* use uacce sva mode */
#define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce"
+#define QM_ECC_MBIT BIT(2)
+
enum qm_stop_reason {
QM_NORMAL,
QM_SOFT_RESET,
@@ -122,6 +129,8 @@ enum qm_hw_ver {
QM_HW_V1 = 0x20,
QM_HW_V2 = 0x21,
QM_HW_V3 = 0x30,
+ QM_HW_V4 = 0x50,
+ QM_HW_V5 = 0x51,
};
enum qm_fun_type {
@@ -156,6 +165,7 @@ enum qm_cap_bits {
QM_SUPPORT_MB_COMMAND,
QM_SUPPORT_SVA_PREFETCH,
QM_SUPPORT_RPM,
+ QM_SUPPORT_DAE,
};
struct qm_dev_alg {
@@ -229,19 +239,28 @@ struct hisi_qm_status {
struct hisi_qm;
-struct hisi_qm_err_info {
- char *acpi_rst;
- u32 msi_wr_port;
+enum acc_err_result {
+ ACC_ERR_NONE,
+ ACC_ERR_NEED_RESET,
+ ACC_ERR_RECOVERED,
+};
+
+struct hisi_qm_err_mask {
u32 ecc_2bits_mask;
- u32 qm_shutdown_mask;
- u32 dev_shutdown_mask;
- u32 qm_reset_mask;
- u32 dev_reset_mask;
+ u32 shutdown_mask;
+ u32 reset_mask;
u32 ce;
u32 nfe;
u32 fe;
};
+struct hisi_qm_err_info {
+ char *acpi_rst;
+ u32 msi_wr_port;
+ struct hisi_qm_err_mask qm_err;
+ struct hisi_qm_err_mask dev_err;
+};
+
struct hisi_qm_err_status {
u32 is_qm_ecc_mbit;
u32 is_dev_ecc_mbit;
@@ -257,9 +276,13 @@ struct hisi_qm_err_ini {
void (*close_axi_master_ooo)(struct hisi_qm *qm);
void (*open_sva_prefetch)(struct hisi_qm *qm);
void (*close_sva_prefetch)(struct hisi_qm *qm);
- void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
void (*show_last_dfx_regs)(struct hisi_qm *qm);
void (*err_info_init)(struct hisi_qm *qm);
+ enum acc_err_result (*get_err_result)(struct hisi_qm *qm);
+ bool (*dev_is_abnormal)(struct hisi_qm *qm);
+ int (*set_priv_status)(struct hisi_qm *qm);
+ void (*disable_axi_error)(struct hisi_qm *qm);
+ void (*enable_axi_error)(struct hisi_qm *qm);
};
struct hisi_qm_cap_info {
@@ -274,13 +297,25 @@ struct hisi_qm_cap_info {
u32 v3_val;
};
+struct hisi_qm_cap_query_info {
+ u32 type;
+ const char *name;
+ u32 offset;
+ u32 v1_val;
+ u32 v2_val;
+ u32 v3_val;
+};
+
struct hisi_qm_cap_record {
u32 type;
+ const char *name;
u32 cap_val;
};
struct hisi_qm_cap_tables {
+ u32 qm_cap_size;
struct hisi_qm_cap_record *qm_cap_table;
+ u32 dev_cap_size;
struct hisi_qm_cap_record *dev_cap_table;
};
@@ -374,6 +409,8 @@ struct hisi_qm {
struct mutex mailbox_lock;
+ struct mutex ifc_lock;
+
const struct hisi_qm_hw_ops *ops;
struct qm_debug debug;
@@ -436,37 +473,6 @@ struct hisi_qp {
struct uacce_queue *uacce_q;
};
-static inline int q_num_set(const char *val, const struct kernel_param *kp,
- unsigned int device)
-{
- struct pci_dev *pdev;
- u32 n, q_num;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL);
- if (!pdev) {
- q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
- pr_info("No device found currently, suppose queue number is %u\n",
- q_num);
- } else {
- if (pdev->revision == QM_HW_V1)
- q_num = QM_QNUM_V1;
- else
- q_num = QM_QNUM_V2;
-
- pci_dev_put(pdev);
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret || n < QM_MIN_QNUM || n > q_num)
- return -EINVAL;
-
- return param_set_int(val, kp);
-}
-
static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
{
u32 n;
@@ -526,6 +532,8 @@ static inline void hisi_qm_del_list(struct hisi_qm *qm, struct hisi_qm_list *qm_
mutex_unlock(&qm_list->lock);
}
+int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device);
int hisi_qm_init(struct hisi_qm *qm);
void hisi_qm_uninit(struct hisi_qm *qm);
int hisi_qm_start(struct hisi_qm *qm);
@@ -559,9 +567,9 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
- u32 index, dma_addr_t *hw_sgl_dma);
+ u32 index, dma_addr_t *hw_sgl_dma, enum dma_data_direction dir);
void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
- struct hisi_acc_hw_sgl *hw_sgl);
+ struct hisi_acc_hw_sgl *hw_sgl, enum dma_data_direction dir);
struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
u32 count, u32 sge_nr);
void hisi_acc_free_sgl_pool(struct device *dev,
@@ -583,6 +591,9 @@ void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
const struct hisi_qm_cap_info *info_table,
u32 index, bool is_read);
+u32 hisi_qm_get_cap_value(struct hisi_qm *qm,
+ const struct hisi_qm_cap_query_info *info_table,
+ u32 index, bool is_read);
int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
u32 dev_algs_size);
diff --git a/include/linux/hmm-dma.h b/include/linux/hmm-dma.h
new file mode 100644
index 000000000000..f58b9fc71999
--- /dev/null
+++ b/include/linux/hmm-dma.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+#ifndef LINUX_HMM_DMA_H
+#define LINUX_HMM_DMA_H
+
+#include <linux/dma-mapping.h>
+
+struct dma_iova_state;
+struct pci_p2pdma_map_state;
+
+/*
+ * struct hmm_dma_map - array of PFNs and DMA addresses
+ *
+ * @state: DMA IOVA state
+ * @pfns: array of PFNs
+ * @dma_list: array of DMA addresses
+ * @dma_entry_size: size of each DMA entry in the array
+ */
+struct hmm_dma_map {
+ struct dma_iova_state state;
+ unsigned long *pfn_list;
+ dma_addr_t *dma_list;
+ size_t dma_entry_size;
+};
+
+int hmm_dma_map_alloc(struct device *dev, struct hmm_dma_map *map,
+ size_t nr_entries, size_t dma_entry_size);
+void hmm_dma_map_free(struct device *dev, struct hmm_dma_map *map);
+dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
+ size_t idx,
+ struct pci_p2pdma_map_state *p2pdma_state);
+bool hmm_dma_unmap_pfn(struct device *dev, struct hmm_dma_map *map, size_t idx);
+#endif /* LINUX_HMM_DMA_H */
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 126a36571667..db75ffc949a7 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -23,6 +23,10 @@ struct mmu_interval_notifier;
* HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_VALID)
* HMM_PFN_ERROR - accessing the pfn is impossible and the device should
* fail. ie poisoned memory, special pages, no vma, etc
+ * HMM_PFN_P2PDMA - P2P page
+ * HMM_PFN_P2PDMA_BUS - Bus mapped P2P transfer
+ * HMM_PFN_DMA_MAPPED - Flag preserved on input-to-output transformation
+ * to mark that page is already DMA mapped
*
* On input:
* 0 - Return the current state of the page, do not fault it.
@@ -36,13 +40,21 @@ enum hmm_pfn_flags {
HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1),
HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2),
HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3),
- HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 8),
+ /*
+ * Sticky flags, carried from input to output,
+ * don't forget to update HMM_PFN_INOUT_FLAGS
+ */
+ HMM_PFN_DMA_MAPPED = 1UL << (BITS_PER_LONG - 4),
+ HMM_PFN_P2PDMA = 1UL << (BITS_PER_LONG - 5),
+ HMM_PFN_P2PDMA_BUS = 1UL << (BITS_PER_LONG - 6),
+
+ HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 11),
/* Input flags */
HMM_PFN_REQ_FAULT = HMM_PFN_VALID,
HMM_PFN_REQ_WRITE = HMM_PFN_WRITE,
- HMM_PFN_FLAGS = 0xFFUL << HMM_PFN_ORDER_SHIFT,
+ HMM_PFN_FLAGS = ~((1UL << HMM_PFN_ORDER_SHIFT) - 1),
};
/*
@@ -58,6 +70,14 @@ static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn)
}
/*
+ * hmm_pfn_to_phys() - return physical address pointed to by a device entry
+ */
+static inline phys_addr_t hmm_pfn_to_phys(unsigned long hmm_pfn)
+{
+ return __pfn_to_phys(hmm_pfn & ~HMM_PFN_FLAGS);
+}
+
+/*
* hmm_pfn_to_map_order() - return the CPU mapping size order
*
* This is optionally useful to optimize processing of the pfn result
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 9c8119ed13a4..9fa9c30a34e6 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -14,12 +14,17 @@
enum host1x_class {
HOST1X_CLASS_HOST1X = 0x1,
+ HOST1X_CLASS_NVJPG1 = 0x7,
+ HOST1X_CLASS_NVENC = 0x21,
+ HOST1X_CLASS_NVENC1 = 0x22,
HOST1X_CLASS_GR2D = 0x51,
HOST1X_CLASS_GR2D_SB = 0x52,
HOST1X_CLASS_VIC = 0x5D,
HOST1X_CLASS_GR3D = 0x60,
+ HOST1X_CLASS_NVJPG = 0xC0,
HOST1X_CLASS_NVDEC = 0xF0,
HOST1X_CLASS_NVDEC1 = 0xF5,
+ HOST1X_CLASS_OFA = 0xF8,
};
struct host1x;
@@ -466,6 +471,7 @@ struct host1x_memory_context {
refcount_t ref;
struct pid *owner;
+ struct device_dma_parameters dma_parms;
struct device dev;
u64 dma_mask;
u32 stream_id;
diff --git a/include/linux/host1x_context_bus.h b/include/linux/host1x_context_bus.h
index 72462737a6db..c928cb432680 100644
--- a/include/linux/host1x_context_bus.h
+++ b/include/linux/host1x_context_bus.h
@@ -9,7 +9,7 @@
#include <linux/device.h>
#ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS
-extern struct bus_type host1x_context_device_bus_type;
+extern const struct bus_type host1x_context_device_bus_type;
#endif
#endif
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index aa1e65ccb615..2cf1bf65b225 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -154,14 +154,11 @@ static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
return ktime_to_ns(timer->node.expires);
}
-static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
-{
- return ktime_sub(timer->node.expires, timer->base->get_time());
-}
+ktime_t hrtimer_cb_get_time(const struct hrtimer *timer);
-static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
+static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
{
- return timer->base->get_time();
+ return ktime_sub(timer->node.expires, hrtimer_cb_get_time(timer));
}
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
@@ -200,8 +197,7 @@ __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
static inline ktime_t
hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
{
- return __hrtimer_expires_remaining_adjusted(timer,
- timer->base->get_time());
+ return __hrtimer_expires_remaining_adjusted(timer, hrtimer_cb_get_time(timer));
}
#ifdef CONFIG_TIMERFD
@@ -223,37 +219,25 @@ static inline void hrtimer_cancel_wait_running(struct hrtimer *timer)
}
#endif
+static inline enum hrtimer_restart hrtimer_dummy_timeout(struct hrtimer *unused)
+{
+ return HRTIMER_NORESTART;
+}
+
/* Exported timer functions: */
/* Initialize timers: */
-extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
- enum hrtimer_mode mode);
-extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
- enum hrtimer_mode mode);
+extern void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *),
+ clockid_t clock_id, enum hrtimer_mode mode);
+extern void hrtimer_setup_on_stack(struct hrtimer *timer,
+ enum hrtimer_restart (*function)(struct hrtimer *),
+ clockid_t clock_id, enum hrtimer_mode mode);
+extern void hrtimer_setup_sleeper_on_stack(struct hrtimer_sleeper *sl, clockid_t clock_id,
+ enum hrtimer_mode mode);
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
-extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock,
- enum hrtimer_mode mode);
-extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
- clockid_t clock_id,
- enum hrtimer_mode mode);
-
extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
#else
-static inline void hrtimer_init_on_stack(struct hrtimer *timer,
- clockid_t which_clock,
- enum hrtimer_mode mode)
-{
- hrtimer_init(timer, which_clock, mode);
-}
-
-static inline void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
- clockid_t clock_id,
- enum hrtimer_mode mode)
-{
- hrtimer_init_sleeper(sl, clock_id, mode);
-}
-
static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
#endif
@@ -337,6 +321,29 @@ static inline int hrtimer_callback_running(struct hrtimer *timer)
return timer->base->running == timer;
}
+/**
+ * hrtimer_update_function - Update the timer's callback function
+ * @timer: Timer to update
+ * @function: New callback function
+ *
+ * Only safe to call if the timer is not enqueued. Can be called in the callback function if the
+ * timer is not enqueued at the same time (see the comments above HRTIMER_STATE_ENQUEUED).
+ */
+static inline void hrtimer_update_function(struct hrtimer *timer,
+ enum hrtimer_restart (*function)(struct hrtimer *))
+{
+#ifdef CONFIG_PROVE_LOCKING
+ guard(raw_spinlock_irqsave)(&timer->base->cpu_base->lock);
+
+ if (WARN_ON_ONCE(hrtimer_is_queued(timer)))
+ return;
+
+ if (WARN_ON_ONCE(!function))
+ return;
+#endif
+ ACCESS_PRIVATE(timer, function) = function;
+}
+
/* Forward a hrtimer so it expires after now: */
extern u64
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
@@ -352,7 +359,7 @@ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
static inline u64 hrtimer_forward_now(struct hrtimer *timer,
ktime_t interval)
{
- return hrtimer_forward(timer, timer->base->get_time(), interval);
+ return hrtimer_forward(timer, hrtimer_cb_get_time(timer), interval);
}
/* Precise sleep: */
@@ -379,6 +386,7 @@ extern void __init hrtimers_init(void);
extern void sysrq_timer_list_show(void);
int hrtimers_prepare_cpu(unsigned int cpu);
+int hrtimers_cpu_starting(unsigned int cpu);
#ifdef CONFIG_HOTPLUG_CPU
int hrtimers_cpu_dying(unsigned int cpu);
#else
diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
index c3b4b7ed7c16..aa49ffa130e5 100644
--- a/include/linux/hrtimer_defs.h
+++ b/include/linux/hrtimer_defs.h
@@ -41,7 +41,6 @@
* @seq: seqcount around __run_hrtimer
* @running: pointer to the currently running hrtimer
* @active: red black tree root node for the active timers
- * @get_time: function to retrieve the current time of the clock
* @offset: offset of this clock to the monotonic base
*/
struct hrtimer_clock_base {
@@ -51,7 +50,6 @@ struct hrtimer_clock_base {
seqcount_raw_spinlock_t seq;
struct hrtimer *running;
struct timerqueue_head active;
- ktime_t (*get_time)(void);
ktime_t offset;
} __hrtimer_clock_base_align;
@@ -125,6 +123,7 @@ struct hrtimer_cpu_base {
ktime_t softirq_expires_next;
struct hrtimer *softirq_next_timer;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
+ call_single_data_t csd;
} ____cacheline_aligned;
diff --git a/include/linux/hrtimer_types.h b/include/linux/hrtimer_types.h
index ad66a3081735..8fbbb6bdf7a1 100644
--- a/include/linux/hrtimer_types.h
+++ b/include/linux/hrtimer_types.h
@@ -34,12 +34,12 @@ enum hrtimer_restart {
* @is_hard: Set if hrtimer will be expired in hard interrupt context
* even on RT.
*
- * The hrtimer structure must be initialized by hrtimer_init()
+ * The hrtimer structure must be initialized by hrtimer_setup()
*/
struct hrtimer {
struct timerqueue_node node;
ktime_t _softexpires;
- enum hrtimer_restart (*function)(struct hrtimer *);
+ enum hrtimer_restart (*__private function)(struct hrtimer *);
struct hrtimer_clock_base *base;
u8 state;
u8 is_rel;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index c8d3ec116e29..ae7f21aad0ac 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -2,16 +2,16 @@
#ifndef _LINUX_HUGE_MM_H
#define _LINUX_HUGE_MM_H
-#include <linux/sched/coredump.h>
#include <linux/mm_types.h>
#include <linux/fs.h> /* only for vma_is_dax() */
+#include <linux/kobject.h>
vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
-void huge_pmd_set_accessed(struct vm_fault *vmf);
+bool huge_pmd_set_accessed(struct vm_fault *vmf);
int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
struct vm_area_struct *vma);
@@ -37,8 +37,14 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, pgprot_t newprot,
unsigned long cp_flags);
-vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
-vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
+vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
+ bool write);
+vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
+ bool write);
+vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
+ bool write);
+vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
+ bool write);
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_UNSUPPORTED,
@@ -63,6 +69,7 @@ ssize_t single_hugepage_flag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf,
enum transparent_hugepage_flag flag);
extern struct kobj_attribute shmem_enabled_attr;
+extern struct kobj_attribute thpsize_shmem_enabled_attr;
/*
* Mask of all large folio orders supported for anonymous THP; all orders up to
@@ -72,21 +79,32 @@ extern struct kobj_attribute shmem_enabled_attr;
#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
/*
- * Mask of all large folio orders supported for file THP.
+ * Mask of all large folio orders supported for file THP. Folios in a DAX
+ * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
+ * it. Same to PFNMAPs where there's neither page* nor pagecache.
*/
-#define THP_ORDERS_ALL_FILE (BIT(PMD_ORDER) | BIT(PUD_ORDER))
+#define THP_ORDERS_ALL_SPECIAL \
+ (BIT(PMD_ORDER) | BIT(PUD_ORDER))
+#define THP_ORDERS_ALL_FILE_DEFAULT \
+ ((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
/*
* Mask of all large folio orders supported for THP.
*/
-#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
+#define THP_ORDERS_ALL \
+ (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT)
+
+enum tva_type {
+ TVA_SMAPS, /* Exposing "THPeligible:" in smaps. */
+ TVA_PAGEFAULT, /* Serving a page fault. */
+ TVA_KHUGEPAGED, /* Khugepaged collapse. */
+ TVA_FORCED_COLLAPSE, /* Forced collapse (e.g. MADV_COLLAPSE). */
+};
-#define TVA_SMAPS (1 << 0) /* Will be used for procfs */
-#define TVA_IN_PF (1 << 1) /* Page fault handler */
-#define TVA_ENFORCE_SYSFS (1 << 2) /* Obey sysfs configuration */
+#define thp_vma_allowable_order(vma, vm_flags, type, order) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, type, BIT(order)))
-#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
- (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
+#define split_folio(f) split_folio_to_list(f, NULL)
#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
#define HPAGE_PMD_SHIFT PMD_SHIFT
@@ -106,6 +124,57 @@ extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
+enum mthp_stat_item {
+ MTHP_STAT_ANON_FAULT_ALLOC,
+ MTHP_STAT_ANON_FAULT_FALLBACK,
+ MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
+ MTHP_STAT_ZSWPOUT,
+ MTHP_STAT_SWPIN,
+ MTHP_STAT_SWPIN_FALLBACK,
+ MTHP_STAT_SWPIN_FALLBACK_CHARGE,
+ MTHP_STAT_SWPOUT,
+ MTHP_STAT_SWPOUT_FALLBACK,
+ MTHP_STAT_SHMEM_ALLOC,
+ MTHP_STAT_SHMEM_FALLBACK,
+ MTHP_STAT_SHMEM_FALLBACK_CHARGE,
+ MTHP_STAT_SPLIT,
+ MTHP_STAT_SPLIT_FAILED,
+ MTHP_STAT_SPLIT_DEFERRED,
+ MTHP_STAT_NR_ANON,
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED,
+ __MTHP_STAT_COUNT
+};
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
+struct mthp_stat {
+ unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
+};
+
+DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
+
+static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
+{
+ if (order <= 0 || order > PMD_ORDER)
+ return;
+
+ this_cpu_add(mthp_stats.stats[order][item], delta);
+}
+
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+ mod_mthp_stat(order, item, 1);
+}
+
+#else
+static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
+{
+}
+
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+}
+#endif
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern unsigned long transparent_hugepage_flags;
@@ -126,18 +195,6 @@ static inline bool hugepage_global_always(void)
(1<<TRANSPARENT_HUGEPAGE_FLAG);
}
-static inline bool hugepage_flags_enabled(void)
-{
- /*
- * We cover both the anon and the file-backed case here; we must return
- * true if globally enabled, even when all anon sizes are set to never.
- * So we don't need to look at huge_anon_orders_inherit.
- */
- return hugepage_global_enabled() ||
- huge_anon_orders_always ||
- huge_anon_orders_madvise;
-}
-
static inline int highest_order(unsigned long orders)
{
return fls_long(orders) - 1;
@@ -208,29 +265,16 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
return orders;
}
-static inline bool file_thp_enabled(struct vm_area_struct *vma)
-{
- struct inode *inode;
-
- if (!vma->vm_file)
- return false;
-
- inode = vma->vm_file->f_inode;
-
- return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
- !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
-}
-
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
- unsigned long vm_flags,
- unsigned long tva_flags,
+ vm_flags_t vm_flags,
+ enum tva_type type,
unsigned long orders);
/**
* thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
* @vma: the vm area to check
* @vm_flags: use these vm_flags instead of vma->vm_flags
- * @tva_flags: Which TVA flags to honour
+ * @type: TVA type
* @orders: bitfield of all orders to consider
*
* Calculates the intersection of the requested hugepage orders and the allowed
@@ -243,12 +287,15 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
*/
static inline
unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
- unsigned long vm_flags,
- unsigned long tva_flags,
+ vm_flags_t vm_flags,
+ enum tva_type type,
unsigned long orders)
{
- /* Optimization to check if required orders are enabled early. */
- if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
+ /*
+ * Optimization to check if required orders are enabled early. Only
+ * forced collapse ignores sysfs configs.
+ */
+ if (type != TVA_FORCED_COLLAPSE && vma_is_anonymous(vma)) {
unsigned long mask = READ_ONCE(huge_anon_orders_always);
if (vm_flags & VM_HUGEPAGE)
@@ -262,106 +309,200 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
return 0;
}
- return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
+ return __thp_vma_allowable_orders(vma, vm_flags, type, orders);
}
-enum mthp_stat_item {
- MTHP_STAT_ANON_FAULT_ALLOC,
- MTHP_STAT_ANON_FAULT_FALLBACK,
- MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
- MTHP_STAT_ANON_SWPOUT,
- MTHP_STAT_ANON_SWPOUT_FALLBACK,
- __MTHP_STAT_COUNT
-};
-
-struct mthp_stat {
- unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
+struct thpsize {
+ struct kobject kobj;
+ struct list_head node;
+ int order;
};
-DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
-
-static inline void count_mthp_stat(int order, enum mthp_stat_item item)
-{
- if (order <= 0 || order > PMD_ORDER)
- return;
-
- this_cpu_inc(mthp_stats.stats[order][item]);
-}
+#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
+/*
+ * Check whether THPs are explicitly disabled for this VMA, for example,
+ * through madvise or prctl.
+ */
+static inline bool vma_thp_disabled(struct vm_area_struct *vma,
+ vm_flags_t vm_flags, bool forced_collapse)
+{
+ /* Are THPs disabled for this VMA? */
+ if (vm_flags & VM_NOHUGEPAGE)
+ return true;
+ /* Are THPs disabled for all VMAs in the whole process? */
+ if (mm_flags_test(MMF_DISABLE_THP_COMPLETELY, vma->vm_mm))
+ return true;
+ /*
+ * Are THPs disabled only for VMAs where we didn't get an explicit
+ * advise to use them?
+ */
+ if (vm_flags & VM_HUGEPAGE)
+ return false;
+ /*
+ * Forcing a collapse (e.g., madv_collapse), is a clear advice to
+ * use THPs.
+ */
+ if (forced_collapse)
+ return false;
+ return mm_flags_test(MMF_DISABLE_THP_EXCEPT_ADVISED, vma->vm_mm);
+}
+
+static inline bool thp_disabled_by_hw(void)
+{
+ /* If the hardware/firmware marked hugepage support disabled. */
+ return transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED);
+}
+
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags,
vm_flags_t vm_flags);
-bool can_split_folio(struct folio *folio, int *pextra_pins);
-int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+enum split_type {
+ SPLIT_TYPE_UNIFORM,
+ SPLIT_TYPE_NON_UNIFORM,
+};
+
+bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
+int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
+int folio_split_unmapped(struct folio *folio, unsigned int new_order);
+int min_order_for_split(struct folio *folio);
+int split_folio_to_list(struct folio *folio, struct list_head *list);
+bool folio_split_supported(struct folio *folio, unsigned int new_order,
+ enum split_type split_type, bool warns);
+int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
+ struct list_head *list);
+
+static inline int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+ unsigned int new_order)
+{
+ return __split_huge_page_to_list_to_order(page, list, new_order);
+}
+static inline int split_huge_page_to_order(struct page *page, unsigned int new_order)
+{
+ return split_huge_page_to_list_to_order(page, NULL, new_order);
+}
+
+/**
+ * try_folio_split_to_order() - try to split a @folio at @page to @new_order
+ * using non uniform split.
+ * @folio: folio to be split
+ * @page: split to @new_order at the given page
+ * @new_order: the target split order
+ *
+ * Try to split a @folio at @page using non uniform split to @new_order, if
+ * non uniform split is not supported, fall back to uniform split. After-split
+ * folios are put back to LRU list. Use min_order_for_split() to get the lower
+ * bound of @new_order.
+ *
+ * Return: 0 - split is successful, otherwise split failed.
+ */
+static inline int try_folio_split_to_order(struct folio *folio,
+ struct page *page, unsigned int new_order)
+{
+ if (!folio_split_supported(folio, new_order, SPLIT_TYPE_NON_UNIFORM, /* warns= */ false))
+ return split_huge_page_to_order(&folio->page, new_order);
+ return folio_split(folio, new_order, page, NULL);
+}
static inline int split_huge_page(struct page *page)
{
return split_huge_page_to_list_to_order(page, NULL, 0);
}
-void deferred_split_folio(struct folio *folio);
+void deferred_split_folio(struct folio *folio, bool partially_mapped);
+#ifdef CONFIG_MEMCG
+void reparent_deferred_split_queue(struct mem_cgroup *memcg);
+#endif
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct folio *folio);
+ unsigned long address, bool freeze);
+
+/**
+ * pmd_is_huge() - Is this PMD either a huge PMD entry or a software leaf entry?
+ * @pmd: The PMD to check.
+ *
+ * A huge PMD entry is a non-empty entry which is present and marked huge or a
+ * software leaf entry. This check be performed without the appropriate locks
+ * held, in which case the condition should be rechecked after they are
+ * acquired.
+ *
+ * Returns: true if this PMD is huge, false otherwise.
+ */
+static inline bool pmd_is_huge(pmd_t pmd)
+{
+ if (pmd_present(pmd)) {
+ return pmd_trans_huge(pmd);
+ } else if (!pmd_none(pmd)) {
+ /*
+ * Non-present PMDs must be valid huge non-present entries. We
+ * cannot assert that here due to header dependency issues.
+ */
+ return true;
+ }
+
+ return false;
+}
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
pmd_t *____pmd = (__pmd); \
- if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
- || pmd_devmap(*____pmd)) \
+ if (pmd_is_huge(*____pmd)) \
__split_huge_pmd(__vma, __pmd, __address, \
- false, NULL); \
+ false); \
} while (0)
-
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
- bool freeze, struct folio *folio);
+ bool freeze);
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address);
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pudp, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags);
+#else
+static inline int
+change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pudp, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags) { return 0; }
+#endif
+
#define split_huge_pud(__vma, __pud, __address) \
do { \
pud_t *____pud = (__pud); \
- if (pud_trans_huge(*____pud) \
- || pud_devmap(*____pud)) \
+ if (pud_trans_huge(*____pud)) \
__split_huge_pud(__vma, __pud, __address); \
} while (0)
-int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
+int hugepage_madvise(struct vm_area_struct *vma, vm_flags_t *vm_flags,
int advice);
-int madvise_collapse(struct vm_area_struct *vma,
- struct vm_area_struct **prev,
- unsigned long start, unsigned long end);
+int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, bool *lock_dropped);
void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, long adjust_next);
+ unsigned long end, struct vm_area_struct *next);
spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
-static inline int is_swap_pmd(pmd_t pmd)
-{
- return !pmd_none(pmd) && !pmd_present(pmd);
-}
-
/* mmap_lock must be held on entry */
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
- if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
+ if (pmd_is_huge(*pmd))
return __pmd_trans_huge_lock(pmd, vma);
- else
- return NULL;
+
+ return NULL;
}
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
struct vm_area_struct *vma)
{
- if (pud_trans_huge(*pud) || pud_devmap(*pud))
+ if (pud_trans_huge(*pud))
return __pud_trans_huge_lock(pud, vma);
else
return NULL;
@@ -370,45 +511,64 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
/**
* folio_test_pmd_mappable - Can we map this folio with a PMD?
* @folio: The folio to test
+ *
+ * Return: true - @folio can be mapped, false - @folio cannot be mapped.
*/
static inline bool folio_test_pmd_mappable(struct folio *folio)
{
return folio_order(folio) >= HPAGE_PMD_ORDER;
}
-struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
-
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
+vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf);
+
extern struct folio *huge_zero_folio;
extern unsigned long huge_zero_pfn;
static inline bool is_huge_zero_folio(const struct folio *folio)
{
+ VM_WARN_ON_ONCE(!folio);
+
return READ_ONCE(huge_zero_folio) == folio;
}
-static inline bool is_huge_zero_pmd(pmd_t pmd)
+static inline bool is_huge_zero_pfn(unsigned long pfn)
{
- return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
+ return READ_ONCE(huge_zero_pfn) == (pfn & ~(HPAGE_PMD_NR - 1));
}
-static inline bool is_huge_zero_pud(pud_t pud)
+static inline bool is_huge_zero_pmd(pmd_t pmd)
{
- return false;
+ return pmd_present(pmd) && is_huge_zero_pfn(pmd_pfn(pmd));
}
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
void mm_put_huge_zero_folio(struct mm_struct *mm);
-#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
+static inline struct folio *get_persistent_huge_zero_folio(void)
+{
+ if (!IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO))
+ return NULL;
+
+ if (unlikely(!huge_zero_folio))
+ return NULL;
+
+ return huge_zero_folio;
+}
static inline bool thp_migration_supported(void)
{
return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
}
+void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmd, bool freeze);
+bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmdp, struct folio *folio);
+void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd,
+ struct vm_area_struct *vma, unsigned long haddr);
+
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline bool folio_test_pmd_mappable(struct folio *folio)
@@ -429,8 +589,8 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
}
static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
- unsigned long vm_flags,
- unsigned long tva_flags,
+ vm_flags_t vm_flags,
+ enum tva_type type,
unsigned long orders)
{
return 0;
@@ -449,7 +609,7 @@ thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
}
static inline bool
-can_split_folio(struct folio *folio, int *pextra_pins)
+can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
{
return false;
}
@@ -457,33 +617,71 @@ static inline int
split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order)
{
- return 0;
+ VM_WARN_ON_ONCE_PAGE(1, page);
+ return -EINVAL;
+}
+static inline int split_huge_page_to_order(struct page *page, unsigned int new_order)
+{
+ VM_WARN_ON_ONCE_PAGE(1, page);
+ return -EINVAL;
}
static inline int split_huge_page(struct page *page)
{
- return 0;
+ VM_WARN_ON_ONCE_PAGE(1, page);
+ return -EINVAL;
+}
+
+static inline int min_order_for_split(struct folio *folio)
+{
+ VM_WARN_ON_ONCE_FOLIO(1, folio);
+ return -EINVAL;
+}
+
+static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
+{
+ VM_WARN_ON_ONCE_FOLIO(1, folio);
+ return -EINVAL;
}
-static inline void deferred_split_folio(struct folio *folio) {}
+
+static inline int try_folio_split_to_order(struct folio *folio,
+ struct page *page, unsigned int new_order)
+{
+ VM_WARN_ON_ONCE_FOLIO(1, folio);
+ return -EINVAL;
+}
+
+static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
+static inline void reparent_deferred_split_queue(struct mem_cgroup *memcg) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct folio *folio) {}
+ unsigned long address, bool freeze) {}
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
- unsigned long address, bool freeze, struct folio *folio) {}
+ unsigned long address, bool freeze) {}
+static inline void split_huge_pmd_locked(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmd,
+ bool freeze) {}
+
+static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp,
+ struct folio *folio)
+{
+ return false;
+}
#define split_huge_pud(__vma, __pmd, __address) \
do { } while (0)
static inline int hugepage_madvise(struct vm_area_struct *vma,
- unsigned long *vm_flags, int advice)
+ vm_flags_t *vm_flags, int advice)
{
return -EINVAL;
}
static inline int madvise_collapse(struct vm_area_struct *vma,
- struct vm_area_struct **prev,
- unsigned long start, unsigned long end)
+ unsigned long start,
+ unsigned long end, bool *lock_dropped)
{
return -EINVAL;
}
@@ -491,12 +689,8 @@ static inline int madvise_collapse(struct vm_area_struct *vma,
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
- long adjust_next)
-{
-}
-static inline int is_swap_pmd(pmd_t pmd)
+ struct vm_area_struct *next)
{
- return 0;
}
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
@@ -514,17 +708,22 @@ static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
return 0;
}
+static inline vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf)
+{
+ return 0;
+}
+
static inline bool is_huge_zero_folio(const struct folio *folio)
{
return false;
}
-static inline bool is_huge_zero_pmd(pmd_t pmd)
+static inline bool is_huge_zero_pfn(unsigned long pfn)
{
return false;
}
-static inline bool is_huge_zero_pud(pud_t pud)
+static inline bool is_huge_zero_pmd(pmd_t pmd)
{
return false;
}
@@ -534,13 +733,40 @@ static inline void mm_put_huge_zero_folio(struct mm_struct *mm)
return;
}
-static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
- unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
+static inline bool thp_migration_supported(void)
+{
+ return false;
+}
+
+static inline int highest_order(unsigned long orders)
+{
+ return 0;
+}
+
+static inline int next_order(unsigned long *orders, int prev)
+{
+ return 0;
+}
+
+static inline void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long address)
+{
+}
+
+static inline int change_huge_pud(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, pud_t *pudp,
+ unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags)
+{
+ return 0;
+}
+
+static inline struct folio *get_persistent_huge_zero_folio(void)
{
return NULL;
}
-static inline bool thp_migration_supported(void)
+static inline bool pmd_is_huge(pmd_t pmd)
{
return false;
}
@@ -557,7 +783,26 @@ static inline int split_folio_to_order(struct folio *folio, int new_order)
return split_folio_to_list_to_order(folio, NULL, new_order);
}
-#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
-#define split_folio(f) split_folio_to_order(f, 0)
+/**
+ * largest_zero_folio - Get the largest zero size folio available
+ *
+ * This function shall be used when mm_get_huge_zero_folio() cannot be
+ * used as there is no appropriate mm lifetime to tie the huge zero folio
+ * from the caller.
+ *
+ * Deduce the size of the folio with folio_size instead of assuming the
+ * folio size.
+ *
+ * Return: pointer to PMD sized zero folio if CONFIG_PERSISTENT_HUGE_ZERO_FOLIO
+ * is enabled or a single page sized zero folio
+ */
+static inline struct folio *largest_zero_folio(void)
+{
+ struct folio *folio = get_persistent_huge_zero_folio();
+
+ if (folio)
+ return folio;
+ return page_folio(ZERO_PAGE(0));
+}
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 2b3c3a404769..019a1c5281e4 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -14,18 +14,13 @@
#include <linux/pgtable.h>
#include <linux/gfp.h>
#include <linux/userfaultfd_k.h>
+#include <linux/nodemask.h>
struct ctl_table;
struct user_struct;
struct mmu_gather;
struct node;
-#ifndef CONFIG_ARCH_HAS_HUGEPD
-typedef struct { unsigned long pd; } hugepd_t;
-#define is_hugepd(hugepd) (0)
-#define __hugepd(x) ((hugepd_t) { (x) })
-#endif
-
void free_huge_folio(struct folio *folio);
#ifdef CONFIG_HUGETLB_PAGE
@@ -133,16 +128,13 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
unsigned long len);
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
struct vm_area_struct *, struct vm_area_struct *);
-struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags,
- unsigned int *page_mask);
void unmap_hugepage_range(struct vm_area_struct *,
- unsigned long, unsigned long, struct page *,
- zap_flags_t);
+ unsigned long start, unsigned long end,
+ struct folio *, zap_flags_t);
void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
- struct page *ref_page, zap_flags_t zap_flags);
+ struct folio *, zap_flags_t zap_flags);
void hugetlb_report_meminfo(struct seq_file *);
int hugetlb_report_node_meminfo(char *buf, int len, int nid);
void hugetlb_show_meminfo_node(int nid);
@@ -157,16 +149,15 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
uffd_flags_t flags,
struct folio **foliop);
#endif /* CONFIG_USERFAULTFD */
-bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
- struct vm_area_struct *vma,
- vm_flags_t vm_flags);
+long hugetlb_reserve_pages(struct inode *inode, long from, long to,
+ struct vm_area_desc *desc, vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
-bool isolate_hugetlb(struct folio *folio, struct list_head *list);
+bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list);
int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared);
-void folio_putback_active_hugetlb(struct folio *folio);
+void folio_putback_hugetlb(struct folio *folio);
void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
@@ -180,9 +171,14 @@ bool hugetlbfs_pagecache_present(struct hstate *h,
struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
-extern int sysctl_hugetlb_shm_group;
+extern int sysctl_hugetlb_shm_group __read_mostly;
extern struct list_head huge_boot_pages[MAX_NUMNODES];
+void hugetlb_bootmem_alloc(void);
+bool hugetlb_bootmem_allocated(void);
+extern nodemask_t hugetlb_bootmem_nodes;
+void hugetlb_bootmem_set_nodes(void);
+
/* arch callbacks */
#ifndef CONFIG_HIGHPTE
@@ -278,9 +274,10 @@ void hugetlb_vma_lock_release(struct kref *kref);
long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot,
unsigned long cp_flags);
-bool is_hugetlb_entry_migration(pte_t pte);
-bool is_hugetlb_entry_hwpoisoned(pte_t pte);
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
+void fixup_hugetlb_reservations(struct vm_area_struct *vma);
+void hugetlb_split(struct vm_area_struct *vma, unsigned long addr);
+int hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
#else /* !CONFIG_HUGETLB_PAGE */
@@ -360,12 +357,6 @@ static inline void hugetlb_show_meminfo_node(int nid)
{
}
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr, unsigned long len)
-{
- return -EINVAL;
-}
-
static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
{
}
@@ -397,13 +388,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
return 0;
}
-static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
- unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
-{
- BUG();
-}
-
#ifdef CONFIG_USERFAULTFD
static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
struct vm_area_struct *dst_vma,
@@ -423,7 +407,7 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
return NULL;
}
-static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list)
+static inline bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list)
{
return false;
}
@@ -439,7 +423,7 @@ static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
return 0;
}
-static inline void folio_putback_active_hugetlb(struct folio *folio)
+static inline void folio_putback_hugetlb(struct folio *folio)
{
}
@@ -458,7 +442,7 @@ static inline long hugetlb_change_protection(
static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page,
+ unsigned long end, struct folio *folio,
zap_flags_t zap_flags)
{
BUG();
@@ -474,6 +458,17 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
+static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_split(struct vm_area_struct *vma, unsigned long addr) {}
+
+static inline int hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
+{
+ return 0;
+}
+
#endif /* !CONFIG_HUGETLB_PAGE */
#ifndef pgd_write
@@ -555,16 +550,10 @@ static inline struct hstate *hstate_inode(struct inode *i)
}
#endif /* !CONFIG_HUGETLBFS */
-#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags);
-#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
-
unsigned long
-generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags);
+hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
/*
* huegtlb page specific state flags. These flags are located in page.private
@@ -603,6 +592,7 @@ enum hugetlb_page_flags {
HPG_freed,
HPG_vmemmap_optimized,
HPG_raw_hwp_unreliable,
+ HPG_cma,
__NR_HPAGEFLAGS,
};
@@ -616,47 +606,35 @@ static __always_inline \
bool folio_test_hugetlb_##flname(struct folio *folio) \
{ void *private = &folio->private; \
return test_bit(HPG_##flname, private); \
- } \
-static inline int HPage##uname(struct page *page) \
- { return test_bit(HPG_##flname, &(page->private)); }
+ }
#define SETHPAGEFLAG(uname, flname) \
static __always_inline \
void folio_set_hugetlb_##flname(struct folio *folio) \
{ void *private = &folio->private; \
set_bit(HPG_##flname, private); \
- } \
-static inline void SetHPage##uname(struct page *page) \
- { set_bit(HPG_##flname, &(page->private)); }
+ }
#define CLEARHPAGEFLAG(uname, flname) \
static __always_inline \
void folio_clear_hugetlb_##flname(struct folio *folio) \
{ void *private = &folio->private; \
clear_bit(HPG_##flname, private); \
- } \
-static inline void ClearHPage##uname(struct page *page) \
- { clear_bit(HPG_##flname, &(page->private)); }
+ }
#else
#define TESTHPAGEFLAG(uname, flname) \
static inline bool \
folio_test_hugetlb_##flname(struct folio *folio) \
- { return 0; } \
-static inline int HPage##uname(struct page *page) \
{ return 0; }
#define SETHPAGEFLAG(uname, flname) \
static inline void \
folio_set_hugetlb_##flname(struct folio *folio) \
- { } \
-static inline void SetHPage##uname(struct page *page) \
{ }
#define CLEARHPAGEFLAG(uname, flname) \
static inline void \
folio_clear_hugetlb_##flname(struct folio *folio) \
- { } \
-static inline void ClearHPage##uname(struct page *page) \
{ }
#endif
@@ -674,6 +652,7 @@ HPAGEFLAG(Temporary, temporary)
HPAGEFLAG(Freed, freed)
HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
+HPAGEFLAG(Cma, cma)
#ifdef CONFIG_HUGETLB_PAGE
@@ -681,6 +660,7 @@ HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
/* Defines one hugetlb page size */
struct hstate {
struct mutex resize_lock;
+ struct lock_class_key resize_key;
int next_nid_to_alloc;
int next_nid_to_free;
unsigned int order;
@@ -698,25 +678,35 @@ struct hstate {
unsigned int nr_huge_pages_node[MAX_NUMNODES];
unsigned int free_huge_pages_node[MAX_NUMNODES];
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
-#ifdef CONFIG_CGROUP_HUGETLB
- /* cgroup control files */
- struct cftype cgroup_files_dfl[8];
- struct cftype cgroup_files_legacy[10];
-#endif
char name[HSTATE_NAME_LEN];
};
+struct cma;
+
struct huge_bootmem_page {
struct list_head list;
struct hstate *hstate;
+ unsigned long flags;
+ struct cma *cma;
};
-int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
+#define HUGE_BOOTMEM_HVO 0x0001
+#define HUGE_BOOTMEM_ZONES_VALID 0x0002
+#define HUGE_BOOTMEM_CMA 0x0004
+
+bool hugetlb_bootmem_page_zones_valid(int nid, struct huge_bootmem_page *m);
+
+int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list);
+int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn);
+void wait_for_freed_hugetlb_folios(void);
struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
- unsigned long addr, int avoid_reserve);
+ unsigned long addr, bool cow_from_owner);
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask,
bool allow_alloc_fallback);
+struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask);
+
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
pgoff_t idx);
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
@@ -740,6 +730,11 @@ extern unsigned int default_hstate_idx;
#define default_hstate (hstates[default_hstate_idx])
+static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
+{
+ return HUGETLBFS_SB(inode->i_sb)->spool;
+}
+
static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
{
return folio->_hugetlb_subpool;
@@ -796,9 +791,14 @@ static inline unsigned huge_page_shift(struct hstate *h)
return h->order + PAGE_SHIFT;
}
+static inline bool order_is_gigantic(unsigned int order)
+{
+ return order > MAX_PAGE_ORDER;
+}
+
static inline bool hstate_is_gigantic(struct hstate *h)
{
- return huge_page_order(h) > MAX_PAGE_ORDER;
+ return order_is_gigantic(huge_page_order(h));
}
static inline unsigned int pages_per_huge_page(const struct hstate *h)
@@ -841,6 +841,17 @@ static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
}
#endif
+#ifndef arch_has_huge_bootmem_alloc
+/*
+ * Some architectures do their own bootmem allocation, so they can't use
+ * early CMA allocation.
+ */
+static inline bool arch_has_huge_bootmem_alloc(void)
+{
+ return false;
+}
+#endif
+
static inline struct hstate *folio_hstate(struct folio *folio)
{
VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
@@ -921,10 +932,11 @@ static inline bool hugepage_movable_supported(struct hstate *h)
/* Movability of hugepages depends on migration support. */
static inline gfp_t htlb_alloc_mask(struct hstate *h)
{
- if (hugepage_movable_supported(h))
- return GFP_HIGHUSER_MOVABLE;
- else
- return GFP_HIGHUSER;
+ gfp_t gfp = __GFP_COMP | __GFP_NOWARN;
+
+ gfp |= hugepage_movable_supported(h) ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER;
+
+ return gfp;
}
static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
@@ -966,10 +978,37 @@ static inline bool htlb_allow_alloc_fallback(int reason)
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
- if (huge_page_size(h) == PMD_SIZE)
+ const unsigned long size = huge_page_size(h);
+
+ VM_WARN_ON(size == PAGE_SIZE);
+
+ /*
+ * hugetlb must use the exact same PT locks as core-mm page table
+ * walkers would. When modifying a PTE table, hugetlb must take the
+ * PTE PT lock, when modifying a PMD table, hugetlb must take the PMD
+ * PT lock etc.
+ *
+ * The expectation is that any hugetlb folio smaller than a PMD is
+ * always mapped into a single PTE table and that any hugetlb folio
+ * smaller than a PUD (but at least as big as a PMD) is always mapped
+ * into a single PMD table.
+ *
+ * If that does not hold for an architecture, then that architecture
+ * must disable split PT locks such that all *_lockptr() functions
+ * will give us the same result: the per-MM PT lock.
+ *
+ * Note that with e.g., CONFIG_PGTABLE_LEVELS=2 where
+ * PGDIR_SIZE==P4D_SIZE==PUD_SIZE==PMD_SIZE, we'd use pud_lockptr()
+ * and core-mm would use pmd_lockptr(). However, in such configurations
+ * split PMD locks are disabled -- they don't make sense on a single
+ * PGDIR page table -- and the end result is the same.
+ */
+ if (size >= PUD_SIZE)
+ return pud_lockptr(mm, (pud_t *) pte);
+ else if (size >= PMD_SIZE || IS_ENABLED(CONFIG_HIGHPTE))
return pmd_lockptr(mm, (pmd_t *) pte);
- VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
- return &mm->page_table_lock;
+ /* pte_alloc_huge() only applies with !CONFIG_HIGHPTE */
+ return ptep_lockptr(mm, pte);
}
#ifndef hugepages_supported
@@ -1003,7 +1042,9 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
- return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
+ return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
}
#endif
@@ -1029,9 +1070,19 @@ void hugetlb_unregister_node(struct node *node);
*/
bool is_raw_hwpoison_page_in_hugepage(struct page *page);
+static inline unsigned long huge_page_mask_align(struct file *file)
+{
+ return PAGE_MASK & ~huge_page_mask(hstate_file(file));
+}
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
+static inline unsigned long huge_page_mask_align(struct file *file)
+{
+ return 0;
+}
+
static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
{
return NULL;
@@ -1043,15 +1094,32 @@ static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
return NULL;
}
-static inline int isolate_or_dissolve_huge_page(struct page *page,
+static inline int isolate_or_dissolve_huge_folio(struct folio *folio,
struct list_head *list)
{
return -ENOMEM;
}
+static inline int replace_free_hugepage_folios(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ return 0;
+}
+
+static inline void wait_for_freed_hugetlb_folios(void)
+{
+}
+
static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr,
- int avoid_reserve)
+ bool cow_from_owner)
+{
+ return NULL;
+}
+
+static inline struct folio *
+alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask)
{
return NULL;
}
@@ -1226,6 +1294,15 @@ static inline bool hugetlbfs_pagecache_present(
{
return false;
}
+
+static inline void hugetlb_bootmem_alloc(void)
+{
+}
+
+static inline bool hugetlb_bootmem_allocated(void)
+{
+ return false;
+}
#endif /* CONFIG_HUGETLB_PAGE */
static inline spinlock_t *huge_pte_lock(struct hstate *h,
@@ -1246,7 +1323,7 @@ static inline __init void hugetlb_cma_reserve(int order)
}
#endif
-#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
static inline bool hugetlb_pmd_shared(pte_t *pte)
{
return page_count(virt_to_page(pte)) > 1;
@@ -1282,8 +1359,7 @@ bool __vma_private_lock(struct vm_area_struct *vma);
static inline pte_t *
hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
{
-#if defined(CONFIG_HUGETLB_PAGE) && \
- defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
+#if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
/*
diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h
index 0660a03d37d9..a27aa0162918 100644
--- a/include/linux/hugetlb_inline.h
+++ b/include/linux/hugetlb_inline.h
@@ -2,22 +2,27 @@
#ifndef _LINUX_HUGETLB_INLINE_H
#define _LINUX_HUGETLB_INLINE_H
-#ifdef CONFIG_HUGETLB_PAGE
-
#include <linux/mm.h>
-static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
+#ifdef CONFIG_HUGETLB_PAGE
+
+static inline bool is_vm_hugetlb_flags(vm_flags_t vm_flags)
{
- return !!(vma->vm_flags & VM_HUGETLB);
+ return !!(vm_flags & VM_HUGETLB);
}
#else
-static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
+static inline bool is_vm_hugetlb_flags(vm_flags_t vm_flags)
{
return false;
}
#endif
+static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
+{
+ return is_vm_hugetlb_flags(vma->vm_flags);
+}
+
#endif
diff --git a/include/linux/hung_task.h b/include/linux/hung_task.h
new file mode 100644
index 000000000000..c4403eeb7144
--- /dev/null
+++ b/include/linux/hung_task.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Detect Hung Task: detecting tasks stuck in D state
+ *
+ * Copyright (C) 2025 Tongcheng Travel (www.ly.com)
+ * Author: Lance Yang <mingzhe.yang@ly.com>
+ */
+#ifndef __LINUX_HUNG_TASK_H
+#define __LINUX_HUNG_TASK_H
+
+#include <linux/bug.h>
+#include <linux/sched.h>
+#include <linux/compiler.h>
+
+/*
+ * @blocker: Combines lock address and blocking type.
+ *
+ * Since lock pointers are at least 4-byte aligned(32-bit) or 8-byte
+ * aligned(64-bit). This leaves the 2 least bits (LSBs) of the pointer
+ * always zero. So we can use these bits to encode the specific blocking
+ * type.
+ *
+ * Note that on architectures where this is not guaranteed, or for any
+ * unaligned lock, this tracking mechanism is silently skipped for that
+ * lock.
+ *
+ * Type encoding:
+ * 00 - Blocked on mutex (BLOCKER_TYPE_MUTEX)
+ * 01 - Blocked on semaphore (BLOCKER_TYPE_SEM)
+ * 10 - Blocked on rw-semaphore as READER (BLOCKER_TYPE_RWSEM_READER)
+ * 11 - Blocked on rw-semaphore as WRITER (BLOCKER_TYPE_RWSEM_WRITER)
+ */
+#define BLOCKER_TYPE_MUTEX 0x00UL
+#define BLOCKER_TYPE_SEM 0x01UL
+#define BLOCKER_TYPE_RWSEM_READER 0x02UL
+#define BLOCKER_TYPE_RWSEM_WRITER 0x03UL
+
+#define BLOCKER_TYPE_MASK 0x03UL
+
+#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
+static inline void hung_task_set_blocker(void *lock, unsigned long type)
+{
+ unsigned long lock_ptr = (unsigned long)lock;
+
+ WARN_ON_ONCE(!lock_ptr);
+ WARN_ON_ONCE(READ_ONCE(current->blocker));
+
+ /*
+ * If the lock pointer matches the BLOCKER_TYPE_MASK, return
+ * without writing anything.
+ */
+ if (lock_ptr & BLOCKER_TYPE_MASK)
+ return;
+
+ WRITE_ONCE(current->blocker, lock_ptr | type);
+}
+
+static inline void hung_task_clear_blocker(void)
+{
+ WRITE_ONCE(current->blocker, 0UL);
+}
+
+/*
+ * hung_task_get_blocker_type - Extracts blocker type from encoded blocker
+ * address.
+ *
+ * @blocker: Blocker pointer with encoded type (via LSB bits)
+ *
+ * Returns: BLOCKER_TYPE_MUTEX, BLOCKER_TYPE_SEM, etc.
+ */
+static inline unsigned long hung_task_get_blocker_type(unsigned long blocker)
+{
+ WARN_ON_ONCE(!blocker);
+
+ return blocker & BLOCKER_TYPE_MASK;
+}
+
+static inline void *hung_task_blocker_to_lock(unsigned long blocker)
+{
+ WARN_ON_ONCE(!blocker);
+
+ return (void *)(blocker & ~BLOCKER_TYPE_MASK);
+}
+#else
+static inline void hung_task_set_blocker(void *lock, unsigned long type)
+{
+}
+static inline void hung_task_clear_blocker(void)
+{
+}
+static inline unsigned long hung_task_get_blocker_type(unsigned long blocker)
+{
+ return 0UL;
+}
+static inline void *hung_task_blocker_to_lock(unsigned long blocker)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __LINUX_HUNG_TASK_H */
diff --git a/include/linux/hw_bitfield.h b/include/linux/hw_bitfield.h
new file mode 100644
index 000000000000..df202e167ce4
--- /dev/null
+++ b/include/linux/hw_bitfield.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2025, Collabora Ltd.
+ */
+
+#ifndef _LINUX_HW_BITFIELD_H
+#define _LINUX_HW_BITFIELD_H
+
+#include <linux/bitfield.h>
+#include <linux/build_bug.h>
+#include <linux/limits.h>
+
+/**
+ * FIELD_PREP_WM16() - prepare a bitfield element with a mask in the upper half
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP_WM16() masks and shifts up the value, as well as bitwise ORs the
+ * result with the mask shifted up by 16.
+ *
+ * This is useful for a common design of hardware registers where the upper
+ * 16-bit half of a 32-bit register is used as a write-enable mask. In such a
+ * register, a bit in the lower half is only updated if the corresponding bit
+ * in the upper half is high.
+ */
+#define FIELD_PREP_WM16(_mask, _val) \
+ ({ \
+ typeof(_val) __val = _val; \
+ typeof(_mask) __mask = _mask; \
+ __BF_FIELD_CHECK(__mask, ((u16)0U), __val, \
+ "HWORD_UPDATE: "); \
+ (((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) | \
+ ((__mask) << 16); \
+ })
+
+/**
+ * FIELD_PREP_WM16_CONST() - prepare a constant bitfield element with a mask in
+ * the upper half
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP_WM16_CONST() masks and shifts up the value, as well as bitwise ORs
+ * the result with the mask shifted up by 16.
+ *
+ * This is useful for a common design of hardware registers where the upper
+ * 16-bit half of a 32-bit register is used as a write-enable mask. In such a
+ * register, a bit in the lower half is only updated if the corresponding bit
+ * in the upper half is high.
+ *
+ * Unlike FIELD_PREP_WM16(), this is a constant expression and can therefore
+ * be used in initializers. Error checking is less comfortable for this
+ * version.
+ */
+#define FIELD_PREP_WM16_CONST(_mask, _val) \
+ ( \
+ FIELD_PREP_CONST(_mask, _val) | \
+ (BUILD_BUG_ON_ZERO(const_true((u64)(_mask) > U16_MAX)) + \
+ ((_mask) << 16)) \
+ )
+
+
+#endif /* _LINUX_HW_BITFIELD_H */
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index 136e9842120e..b424555753b1 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -13,9 +13,8 @@
#define LINUX_HWRANDOM_H_
#include <linux/completion.h>
-#include <linux/types.h>
-#include <linux/list.h>
#include <linux/kref.h>
+#include <linux/types.h>
/**
* struct hwrng - Hardware Random Number Generator driver
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index edf96f249eb5..301a83afbd66 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -24,6 +24,7 @@ enum hwmon_sensor_types {
hwmon_curr,
hwmon_power,
hwmon_energy,
+ hwmon_energy64,
hwmon_humidity,
hwmon_fan,
hwmon_pwm,
@@ -45,6 +46,7 @@ enum hwmon_chip_attributes {
hwmon_chip_power_samples,
hwmon_chip_temp_samples,
hwmon_chip_beep_enable,
+ hwmon_chip_pec,
};
#define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history)
@@ -60,6 +62,7 @@ enum hwmon_chip_attributes {
#define HWMON_C_POWER_SAMPLES BIT(hwmon_chip_power_samples)
#define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples)
#define HWMON_C_BEEP_ENABLE BIT(hwmon_chip_beep_enable)
+#define HWMON_C_PEC BIT(hwmon_chip_pec)
enum hwmon_temp_attributes {
hwmon_temp_enable,
@@ -366,7 +369,9 @@ enum hwmon_intrusion_attributes {
/**
* struct hwmon_ops - hwmon device operations
- * @is_visible: Callback to return attribute visibility. Mandatory.
+ * @visible: Static visibility. If non-zero, 'is_visible' is ignored.
+ * @is_visible: Callback to return attribute visibility. Mandatory unless
+ * 'visible' is non-zero.
* Parameters are:
* @const void *drvdata:
* Pointer to driver-private data structure passed
@@ -410,6 +415,7 @@ enum hwmon_intrusion_attributes {
* The function returns 0 on success or a negative error number.
*/
struct hwmon_ops {
+ umode_t visible;
umode_t (*is_visible)(const void *drvdata, enum hwmon_sensor_types type,
u32 attr, int channel);
int (*read)(struct device *dev, enum hwmon_sensor_types type,
@@ -479,7 +485,6 @@ devm_hwmon_device_register_with_info(struct device *dev,
const struct attribute_group **extra_groups);
void hwmon_device_unregister(struct device *dev);
-void devm_hwmon_device_unregister(struct device *dev);
int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel);
@@ -487,6 +492,9 @@ int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
char *hwmon_sanitize_name(const char *name);
char *devm_hwmon_sanitize_name(struct device *dev, const char *name);
+void hwmon_lock(struct device *dev);
+void hwmon_unlock(struct device *dev);
+
/**
* hwmon_is_bad_char - Is the char invalid in a hwmon name
* @ch: the char to be considered
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index bfe7c1f1ac6d..f35b42e8c5de 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -58,18 +58,16 @@ struct hwspinlock_pdata {
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
const struct hwspinlock_ops *ops, int base_id, int num_locks);
int hwspin_lock_unregister(struct hwspinlock_device *bank);
-struct hwspinlock *hwspin_lock_request(void);
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
int hwspin_lock_free(struct hwspinlock *hwlock);
int of_hwspin_lock_get_id(struct device_node *np, int index);
-int hwspin_lock_get_id(struct hwspinlock *hwlock);
int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
unsigned long *);
int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
+int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
-struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
unsigned int id);
int devm_hwspin_lock_unregister(struct device *dev,
@@ -94,11 +92,6 @@ int devm_hwspin_lock_register(struct device *dev,
* Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
* users. Others, which care, can still check this with IS_ERR.
*/
-static inline struct hwspinlock *hwspin_lock_request(void)
-{
- return ERR_PTR(-ENODEV);
-}
-
static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
{
return ERR_PTR(-ENODEV);
@@ -127,12 +120,12 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
}
-static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
+static inline int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
{
return 0;
}
-static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
+static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
{
return 0;
}
@@ -149,11 +142,6 @@ int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
return 0;
}
-static inline struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
-{
- return ERR_PTR(-ENODEV);
-}
-
static inline
struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
unsigned int id)
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 5e39baa7f6cb..dfc516c1c719 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -24,7 +24,7 @@
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/reciprocal_div.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#define MAX_PAGE_BUFFER_COUNT 32
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
@@ -265,16 +265,18 @@ static inline u32 hv_get_avail_to_write_percent(
* Linux kernel.
*/
-#define VERSION_WS2008 ((0 << 16) | (13))
-#define VERSION_WIN7 ((1 << 16) | (1))
-#define VERSION_WIN8 ((2 << 16) | (4))
-#define VERSION_WIN8_1 ((3 << 16) | (0))
-#define VERSION_WIN10 ((4 << 16) | (0))
-#define VERSION_WIN10_V4_1 ((4 << 16) | (1))
-#define VERSION_WIN10_V5 ((5 << 16) | (0))
-#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
-#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
-#define VERSION_WIN10_V5_3 ((5 << 16) | (3))
+#define VMBUS_MAKE_VERSION(MAJ, MIN) ((((u32)MAJ) << 16) | (MIN))
+#define VERSION_WS2008 VMBUS_MAKE_VERSION(0, 13)
+#define VERSION_WIN7 VMBUS_MAKE_VERSION(1, 1)
+#define VERSION_WIN8 VMBUS_MAKE_VERSION(2, 4)
+#define VERSION_WIN8_1 VMBUS_MAKE_VERSION(3, 0)
+#define VERSION_WIN10 VMBUS_MAKE_VERSION(4, 0)
+#define VERSION_WIN10_V4_1 VMBUS_MAKE_VERSION(4, 1)
+#define VERSION_WIN10_V5 VMBUS_MAKE_VERSION(5, 0)
+#define VERSION_WIN10_V5_1 VMBUS_MAKE_VERSION(5, 1)
+#define VERSION_WIN10_V5_2 VMBUS_MAKE_VERSION(5, 2)
+#define VERSION_WIN10_V5_3 VMBUS_MAKE_VERSION(5, 3)
+#define VERSION_WIN10_V6_0 VMBUS_MAKE_VERSION(6, 0)
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -335,14 +337,22 @@ struct vmbus_channel_offer {
} __packed;
/* Server Flags */
-#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
-#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
-#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
-#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
-#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
-#define VMBUS_CHANNEL_PARENT_OFFER 0x200
-#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
-#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
+#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 0x0001
+/*
+ * This flag indicates that the channel is offered by the paravisor, and must
+ * use encrypted memory for the channel ring buffer.
+ */
+#define VMBUS_CHANNEL_CONFIDENTIAL_RING_BUFFER 0x0002
+/*
+ * This flag indicates that the channel is offered by the paravisor, and must
+ * use encrypted memory for GPA direct packets and additional GPADLs.
+ */
+#define VMBUS_CHANNEL_CONFIDENTIAL_EXTERNAL_MEMORY 0x0004
+#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x0010
+#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x0100
+#define VMBUS_CHANNEL_PARENT_OFFER 0x0200
+#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x0400
+#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
struct vmpacket_descriptor {
u16 type;
@@ -371,19 +381,6 @@ struct vmtransfer_page_packet_header {
struct vmtransfer_page_range ranges[];
} __packed;
-struct vmgpadl_packet_header {
- struct vmpacket_descriptor d;
- u32 gpadl;
- u32 reserved;
-} __packed;
-
-struct vmadd_remove_transfer_page_set {
- struct vmpacket_descriptor d;
- u32 gpadl;
- u16 xfer_pageset_id;
- u16 reserved;
-} __packed;
-
/*
* This structure defines a range in guest physical space that can be made to
* look virtually contiguous.
@@ -395,30 +392,6 @@ struct gpa_range {
};
/*
- * This is the format for an Establish Gpadl packet, which contains a handle by
- * which this GPADL will be known and a set of GPA ranges associated with it.
- * This can be converted to a MDL by the guest OS. If there are multiple GPA
- * ranges, then the resulting MDL will be "chained," representing multiple VA
- * ranges.
- */
-struct vmestablish_gpadl {
- struct vmpacket_descriptor d;
- u32 gpadl;
- u32 range_cnt;
- struct gpa_range range[1];
-} __packed;
-
-/*
- * This is the format for a Teardown Gpadl packet, which indicates that the
- * GPADL handle in the Establish Gpadl packet will never be referenced again.
- */
-struct vmteardown_gpadl {
- struct vmpacket_descriptor d;
- u32 gpadl;
- u32 reserved; /* for alignment to a 8-byte boundary */
-} __packed;
-
-/*
* This is the format for a GPA-Direct packet, which contains a set of GPA
* ranges, in addition to commands and/or data.
*/
@@ -429,25 +402,6 @@ struct vmdata_gpa_direct {
struct gpa_range range[1];
} __packed;
-/* This is the format for a Additional Data Packet. */
-struct vmadditional_data {
- struct vmpacket_descriptor d;
- u64 total_bytes;
- u32 offset;
- u32 byte_cnt;
- unsigned char data[1];
-} __packed;
-
-union vmpacket_largest_possible_header {
- struct vmpacket_descriptor simple_hdr;
- struct vmtransfer_page_packet_header xfer_page_hdr;
- struct vmgpadl_packet_header gpadl_hdr;
- struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
- struct vmestablish_gpadl establish_gpadl_hdr;
- struct vmteardown_gpadl teardown_gpadl_hdr;
- struct vmdata_gpa_direct data_gpa_direct_hdr;
-};
-
#define VMPACKET_DATA_START_ADDRESS(__packet) \
(void *)(((unsigned char *)__packet) + \
((struct vmpacket_descriptor)__packet)->offset8 * 8)
@@ -677,6 +631,12 @@ struct vmbus_channel_relid_released {
u32 child_relid;
} __packed;
+/*
+ * Used by the paravisor only, means that the encrypted ring buffers and
+ * the encrypted external memory are supported
+ */
+#define VMBUS_FEATURE_FLAG_CONFIDENTIAL_CHANNELS 0x10
+
struct vmbus_channel_initiate_contact {
struct vmbus_channel_message_header header;
u32 vmbus_version_requested;
@@ -686,7 +646,8 @@ struct vmbus_channel_initiate_contact {
struct {
u8 msg_sint;
u8 msg_vtl;
- u8 reserved[6];
+ u8 reserved[2];
+ u32 feature_flags; /* VMBus version 6.0 */
};
};
u64 monitor_page1;
@@ -763,20 +724,6 @@ struct vmbus_channel_msginfo {
unsigned char msg[];
};
-struct vmbus_close_msg {
- struct vmbus_channel_msginfo info;
- struct vmbus_channel_close_channel msg;
-};
-
-/* Define connection identifier type. */
-union hv_connection_id {
- u32 asu32;
- struct {
- u32 id:24;
- u32 reserved:8;
- } u;
-};
-
enum vmbus_device_type {
HV_IDE = 0,
HV_SCSI,
@@ -865,7 +812,7 @@ struct vmbus_channel {
struct hv_ring_buffer_info outbound; /* send to parent */
struct hv_ring_buffer_info inbound; /* receive from parent */
- struct vmbus_close_msg close_msg;
+ struct vmbus_channel_close_channel close_msg;
/* Statistics */
u64 interrupts; /* Host to Guest interrupts */
@@ -1067,6 +1014,16 @@ struct vmbus_channel {
/* The max size of a packet on this channel */
u32 max_pkt_size;
+
+ /* function to mmap ring buffer memory to the channel's sysfs ring attribute */
+ int (*mmap_ring_buffer)(struct vmbus_channel *channel, struct vm_area_struct *vma);
+
+ /* boolean to control visibility of sysfs for ring buffer */
+ bool ring_sysfs_visible;
+ /* The ring buffer is encrypted */
+ bool co_ring_buffer;
+ /* The external memory is encrypted */
+ bool co_external_memory;
};
#define lock_requestor(channel, flags) \
@@ -1091,6 +1048,16 @@ u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
u64 rqst_addr);
u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id);
+static inline bool is_co_ring_buffer(const struct vmbus_channel_offer_channel *o)
+{
+ return !!(o->offer.chn_flags & VMBUS_CHANNEL_CONFIDENTIAL_RING_BUFFER);
+}
+
+static inline bool is_co_external_memory(const struct vmbus_channel_offer_channel *o)
+{
+ return !!(o->offer.chn_flags & VMBUS_CHANNEL_CONFIDENTIAL_EXTERNAL_MEMORY);
+}
+
static inline bool is_hvsock_offer(const struct vmbus_channel_offer_channel *o)
{
return !!(o->offer.chn_flags & VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
@@ -1226,13 +1193,6 @@ extern int vmbus_sendpacket(struct vmbus_channel *channel,
enum vmbus_packet_type type,
u32 flags);
-extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
- struct hv_page_buffer pagebuffers[],
- u32 pagecount,
- void *buffer,
- u32 bufferlen,
- u64 requestid);
-
extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
struct vmbus_packet_mpb_array *mpb,
u32 desc_size,
@@ -1330,11 +1290,7 @@ struct hv_device {
#define device_to_hv_device(d) container_of_const(d, struct hv_device, device)
-
-static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
-{
- return container_of(d, struct hv_driver, driver);
-}
+#define drv_to_hv_drv(d) container_of_const(d, struct hv_driver, driver)
static inline void hv_set_drvdata(struct hv_device *dev, void *data)
{
@@ -1346,6 +1302,8 @@ static inline void *hv_get_drvdata(struct hv_device *dev)
return dev_get_drvdata(&dev->device);
}
+struct device *hv_get_vmbus_root_device(void);
+
struct hv_ring_buffer_debug_info {
u32 current_interrupt_mask;
u32 current_read_index;
@@ -1563,6 +1521,7 @@ struct hv_util_service {
void *channel;
void (*util_cb)(void *);
int (*util_init)(struct hv_util_service *);
+ int (*util_init_transport)(void);
void (*util_deinit)(void);
int (*util_pre_suspend)(void);
int (*util_pre_resume)(void);
@@ -1673,6 +1632,7 @@ int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
const guid_t *shv_host_servie_id);
int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp);
void vmbus_set_event(struct vmbus_channel *channel);
+int vmbus_channel_set_cpu(struct vmbus_channel *channel, u32 target_cpu);
/* Get the start of the ring buffer. */
static inline void *
diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h
index 9efbc54e35e5..be5417303ecf 100644
--- a/include/linux/hypervisor.h
+++ b/include/linux/hypervisor.h
@@ -37,6 +37,9 @@ static inline bool hypervisor_isolated_pci_functions(void)
if (IS_ENABLED(CONFIG_S390))
return true;
+ if (IS_ENABLED(CONFIG_LOONGARCH))
+ return true;
+
return jailhouse_paravirt();
}
diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h
index 7c522fdd9ea7..e305bf32e40a 100644
--- a/include/linux/i2c-algo-pca.h
+++ b/include/linux/i2c-algo-pca.h
@@ -71,7 +71,7 @@ struct i2c_algo_pca_data {
void *data; /* private low level data */
void (*write_byte) (void *data, int reg, int val);
int (*read_byte) (void *data, int reg);
- int (*wait_for_completion) (void *data);
+ int (*wait_for_completion_cb) (void *data);
void (*reset_chip) (void *data);
/* For PCA9564, use one of the predefined frequencies:
* 330000, 288000, 217000, 146000, 88000, 59000, 44000, 36000
diff --git a/include/linux/i2c-atr.h b/include/linux/i2c-atr.h
index 4d5da161c225..2bb54dc87c8e 100644
--- a/include/linux/i2c-atr.h
+++ b/include/linux/i2c-atr.h
@@ -19,21 +19,59 @@ struct fwnode_handle;
struct i2c_atr;
/**
+ * enum i2c_atr_flags - Flags for an I2C ATR driver
+ *
+ * @I2C_ATR_F_STATIC: ATR does not support dynamic mapping, use static mapping.
+ * Mappings will only be added or removed as a result of
+ * devices being added or removed from a child bus.
+ * The ATR pool will have to be big enough to accomodate all
+ * devices expected to be added to the child buses.
+ * @I2C_ATR_F_PASSTHROUGH: Allow unmapped incoming addresses to pass through
+ */
+enum i2c_atr_flags {
+ I2C_ATR_F_STATIC = BIT(0),
+ I2C_ATR_F_PASSTHROUGH = BIT(1),
+};
+
+/**
* struct i2c_atr_ops - Callbacks from ATR to the device driver.
- * @attach_client: Notify the driver of a new device connected on a child
- * bus, with the alias assigned to it. The driver must
- * configure the hardware to use the alias.
- * @detach_client: Notify the driver of a device getting disconnected. The
- * driver must configure the hardware to stop using the
- * alias.
+ * @attach_addr: Notify the driver of a new device connected on a child
+ * bus, with the alias assigned to it. The driver must
+ * configure the hardware to use the alias.
+ * @detach_addr: Notify the driver of a device getting disconnected. The
+ * driver must configure the hardware to stop using the
+ * alias.
*
* All these functions return 0 on success, a negative error code otherwise.
*/
struct i2c_atr_ops {
- int (*attach_client)(struct i2c_atr *atr, u32 chan_id,
- const struct i2c_client *client, u16 alias);
- void (*detach_client)(struct i2c_atr *atr, u32 chan_id,
- const struct i2c_client *client);
+ int (*attach_addr)(struct i2c_atr *atr, u32 chan_id,
+ u16 addr, u16 alias);
+ void (*detach_addr)(struct i2c_atr *atr, u32 chan_id,
+ u16 addr);
+};
+
+/**
+ * struct i2c_atr_adap_desc - An ATR downstream bus descriptor
+ * @chan_id: Index of the new adapter (0 .. max_adapters-1). This value is
+ * passed to the callbacks in `struct i2c_atr_ops`.
+ * @parent: The device used as the parent of the new i2c adapter, or NULL
+ * to use the i2c-atr device as the parent.
+ * @bus_handle: The fwnode handle that points to the adapter's i2c
+ * peripherals, or NULL.
+ * @num_aliases: The number of aliases in this adapter's private alias pool. Set
+ * to zero if this adapter uses the ATR's global alias pool.
+ * @aliases: An optional array of private aliases used by the adapter
+ * instead of the ATR's global pool of aliases. Must contain
+ * exactly num_aliases entries if num_aliases > 0, is ignored
+ * otherwise.
+ */
+struct i2c_atr_adap_desc {
+ u32 chan_id;
+ struct device *parent;
+ struct fwnode_handle *bus_handle;
+ size_t num_aliases;
+ u16 *aliases;
};
/**
@@ -42,6 +80,7 @@ struct i2c_atr_ops {
* @dev: The device acting as an ATR
* @ops: Driver-specific callbacks
* @max_adapters: Maximum number of child adapters
+ * @flags: Flags for ATR
*
* The new ATR helper is connected to the parent adapter but has no child
* adapters. Call i2c_atr_add_adapter() to add some.
@@ -51,7 +90,8 @@ struct i2c_atr_ops {
* Return: pointer to the new ATR helper object, or ERR_PTR
*/
struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
- const struct i2c_atr_ops *ops, int max_adapters);
+ const struct i2c_atr_ops *ops, int max_adapters,
+ u32 flags);
/**
* i2c_atr_delete - Delete an I2C ATR helper.
@@ -65,12 +105,7 @@ void i2c_atr_delete(struct i2c_atr *atr);
/**
* i2c_atr_add_adapter - Create a child ("downstream") I2C bus.
* @atr: The I2C ATR
- * @chan_id: Index of the new adapter (0 .. max_adapters-1). This value is
- * passed to the callbacks in `struct i2c_atr_ops`.
- * @adapter_parent: The device used as the parent of the new i2c adapter, or NULL
- * to use the i2c-atr device as the parent.
- * @bus_handle: The fwnode handle that points to the adapter's i2c
- * peripherals, or NULL.
+ * @desc: An ATR adapter descriptor
*
* After calling this function a new i2c bus will appear. Adding and removing
* devices on the downstream bus will result in calls to the
@@ -85,9 +120,7 @@ void i2c_atr_delete(struct i2c_atr *atr);
*
* Return: 0 on success, a negative error code otherwise.
*/
-int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
- struct device *adapter_parent,
- struct fwnode_handle *bus_handle);
+int i2c_atr_add_adapter(struct i2c_atr *atr, struct i2c_atr_adap_desc *desc);
/**
* i2c_atr_del_adapter - Remove a child ("downstream") I2C bus added by
diff --git a/include/linux/i2c-of-prober.h b/include/linux/i2c-of-prober.h
new file mode 100644
index 000000000000..bb6d47f50ee5
--- /dev/null
+++ b/include/linux/i2c-of-prober.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Definitions for the Linux I2C OF component prober
+ *
+ * Copyright (C) 2024 Google LLC
+ */
+
+#ifndef _LINUX_I2C_OF_PROBER_H
+#define _LINUX_I2C_OF_PROBER_H
+
+#include <linux/kconfig.h>
+#include <linux/types.h>
+
+struct device;
+struct device_node;
+
+/**
+ * struct i2c_of_probe_ops - I2C OF component prober callbacks
+ *
+ * A set of callbacks to be used by i2c_of_probe_component().
+ *
+ * All callbacks are optional. Callbacks are called only once per run, and are
+ * used in the order they are defined in this structure.
+ *
+ * All callbacks that have return values shall return %0 on success,
+ * or a negative error number on failure.
+ *
+ * The @dev parameter passed to the callbacks is the same as @dev passed to
+ * i2c_of_probe_component(). It should only be used for dev_printk() calls
+ * and nothing else, especially not managed device resource (devres) APIs.
+ */
+struct i2c_of_probe_ops {
+ /**
+ * @enable: Retrieve and enable resources so that the components respond to probes.
+ *
+ * It is OK for this callback to return -EPROBE_DEFER since the intended use includes
+ * retrieving resources and enables them. Resources should be reverted to their initial
+ * state and released before returning if this fails.
+ */
+ int (*enable)(struct device *dev, struct device_node *bus_node, void *data);
+
+ /**
+ * @cleanup_early: Release exclusive resources prior to calling probe() on a
+ * detected component.
+ *
+ * Only called if a matching component is actually found. If none are found,
+ * resources that would have been released in this callback should be released in
+ * @free_resourcs_late instead.
+ */
+ void (*cleanup_early)(struct device *dev, void *data);
+
+ /**
+ * @cleanup: Opposite of @enable to balance refcounts and free resources after probing.
+ *
+ * Should check if resources were already freed by @cleanup_early.
+ */
+ void (*cleanup)(struct device *dev, void *data);
+};
+
+/**
+ * struct i2c_of_probe_cfg - I2C OF component prober configuration
+ * @ops: Callbacks for the prober to use.
+ * @type: A string to match the device node name prefix to probe for.
+ */
+struct i2c_of_probe_cfg {
+ const struct i2c_of_probe_ops *ops;
+ const char *type;
+};
+
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+
+int i2c_of_probe_component(struct device *dev, const struct i2c_of_probe_cfg *cfg, void *ctx);
+
+/**
+ * DOC: I2C OF component prober simple helpers
+ *
+ * Components such as trackpads are commonly connected to a devices baseboard
+ * with a 6-pin ribbon cable. That gives at most one voltage supply and one
+ * GPIO (commonly a "enable" or "reset" line) besides the I2C bus, interrupt
+ * pin, and common ground. Touchscreens, while integrated into the display
+ * panel's connection, typically have the same set of connections.
+ *
+ * A simple set of helpers are provided here for use with the I2C OF component
+ * prober. This implementation targets such components, allowing for at most
+ * one regulator supply.
+ *
+ * The following helpers are provided:
+ * * i2c_of_probe_simple_enable()
+ * * i2c_of_probe_simple_cleanup_early()
+ * * i2c_of_probe_simple_cleanup()
+ */
+
+/**
+ * struct i2c_of_probe_simple_opts - Options for simple I2C component prober callbacks
+ * @res_node_compatible: Compatible string of device node to retrieve resources from.
+ * @supply_name: Name of regulator supply.
+ * @gpio_name: Name of GPIO. NULL if no GPIO line is used. Empty string ("") if GPIO
+ * line is unnamed.
+ * @post_power_on_delay_ms: Delay after regulators are powered on. Passed to msleep().
+ * @post_gpio_config_delay_ms: Delay after GPIO is configured. Passed to msleep().
+ * @gpio_assert_to_enable: %true if GPIO should be asserted, i.e. set to logical high,
+ * to enable the component.
+ *
+ * This describes power sequences common for the class of components supported by the
+ * simple component prober:
+ * * @gpio_name is configured to the non-active setting according to @gpio_assert_to_enable.
+ * * @supply_name regulator supply is enabled.
+ * * Wait for @post_power_on_delay_ms to pass.
+ * * @gpio_name is configured to the active setting according to @gpio_assert_to_enable.
+ * * Wait for @post_gpio_config_delay_ms to pass.
+ */
+struct i2c_of_probe_simple_opts {
+ const char *res_node_compatible;
+ const char *supply_name;
+ const char *gpio_name;
+ unsigned int post_power_on_delay_ms;
+ unsigned int post_gpio_config_delay_ms;
+ bool gpio_assert_to_enable;
+};
+
+struct gpio_desc;
+struct regulator;
+
+struct i2c_of_probe_simple_ctx {
+ /* public: provided by user before helpers are used. */
+ const struct i2c_of_probe_simple_opts *opts;
+ /* private: internal fields for helpers. */
+ struct regulator *supply;
+ struct gpio_desc *gpiod;
+};
+
+int i2c_of_probe_simple_enable(struct device *dev, struct device_node *bus_node, void *data);
+void i2c_of_probe_simple_cleanup_early(struct device *dev, void *data);
+void i2c_of_probe_simple_cleanup(struct device *dev, void *data);
+
+extern struct i2c_of_probe_ops i2c_of_probe_simple_ops;
+
+#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+
+#endif /* _LINUX_I2C_OF_PROBER_H */
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
index ced1c6ead52a..dc1bd2ab4c13 100644
--- a/include/linux/i2c-smbus.h
+++ b/include/linux/i2c-smbus.h
@@ -44,9 +44,11 @@ static inline void i2c_free_slave_host_notify_device(struct i2c_client *client)
#endif
#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_DMI)
-void i2c_register_spd(struct i2c_adapter *adap);
+void i2c_register_spd_write_disable(struct i2c_adapter *adap);
+void i2c_register_spd_write_enable(struct i2c_adapter *adap);
#else
-static inline void i2c_register_spd(struct i2c_adapter *adap) { }
+static inline void i2c_register_spd_write_disable(struct i2c_adapter *adap) { }
+static inline void i2c_register_spd_write_enable(struct i2c_adapter *adap) { }
#endif
#endif /* _LINUX_I2C_SMBUS_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 5e6cd43a6dbd..20fd41b51d5c 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -30,7 +30,6 @@ extern const struct device_type i2c_client_type;
/* --- General options ------------------------------------------------ */
struct i2c_msg;
-struct i2c_algorithm;
struct i2c_adapter;
struct i2c_client;
struct i2c_driver;
@@ -304,7 +303,7 @@ struct i2c_driver {
u32 flags;
};
-#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
+#define to_i2c_driver(d) container_of_const(d, struct i2c_driver, driver)
/**
* struct i2c_client - represent an I2C slave device
@@ -322,6 +321,8 @@ struct i2c_driver {
* calls it to pass on slave events to the slave driver.
* @devres_group_id: id of the devres group that will be created for resources
* acquired when probing this device.
+ * @debugfs: pointer to the debugfs subdirectory which the I2C core created
+ * for this client.
*
* An i2c_client identifies a single device (i.e. chip) connected to an
* i2c bus. The behaviour exposed to Linux is defined by the driver
@@ -351,6 +352,7 @@ struct i2c_client {
i2c_slave_cb_t slave_cb; /* callback for slave mode */
#endif
void *devres_group_id; /* ID of probe devres group */
+ struct dentry *debugfs; /* per-client debugfs dir */
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
@@ -403,7 +405,6 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
* @addr: stored in i2c_client.addr
* @dev_name: Overrides the default <busnr>-<addr> dev_name if set
* @platform_data: stored in i2c_client.dev.platform_data
- * @of_node: pointer to OpenFirmware device node
* @fwnode: device node supplied by the platform firmware
* @swnode: software node for the device
* @resources: resources associated with the device
@@ -427,7 +428,6 @@ struct i2c_board_info {
unsigned short addr;
const char *dev_name;
void *platform_data;
- struct device_node *of_node;
struct fwnode_handle *fwnode;
const struct software_node *swnode;
const struct resource *resources;
@@ -512,46 +512,54 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
#endif /* I2C_BOARDINFO */
/**
- * struct i2c_algorithm - represent I2C transfer method
- * @master_xfer: Issue a set of i2c transactions to the given I2C adapter
- * defined by the msgs array, with num messages available to transfer via
- * the adapter specified by adap.
- * @master_xfer_atomic: same as @master_xfer. Yet, only using atomic context
- * so e.g. PMICs can be accessed very late before shutdown. Optional.
- * @smbus_xfer: Issue smbus transactions to the given I2C adapter. If this
+ * struct i2c_algorithm - represent I2C transfer methods
+ * @xfer: Transfer a given number of messages defined by the msgs array via
+ * the specified adapter.
+ * @xfer_atomic: Same as @xfer. Yet, only using atomic context so e.g. PMICs
+ * can be accessed very late before shutdown. Optional.
+ * @smbus_xfer: Issue SMBus transactions to the given I2C adapter. If this
* is not present, then the bus layer will try and convert the SMBus calls
* into I2C transfers instead.
- * @smbus_xfer_atomic: same as @smbus_xfer. Yet, only using atomic context
+ * @smbus_xfer_atomic: Same as @smbus_xfer. Yet, only using atomic context
* so e.g. PMICs can be accessed very late before shutdown. Optional.
* @functionality: Return the flags that this algorithm/adapter pair supports
* from the ``I2C_FUNC_*`` flags.
- * @reg_slave: Register given client to I2C slave mode of this adapter
- * @unreg_slave: Unregister given client from I2C slave mode of this adapter
+ * @reg_target: Register given client to local target mode of this adapter
+ * @unreg_target: Unregister given client from local target mode of this adapter
+ *
+ * @master_xfer: deprecated, use @xfer
+ * @master_xfer_atomic: deprecated, use @xfer_atomic
+ * @reg_slave: deprecated, use @reg_target
+ * @unreg_slave: deprecated, use @unreg_target
*
- * The following structs are for those who like to implement new bus drivers:
* i2c_algorithm is the interface to a class of hardware solutions which can
* be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
* to name two of the most common.
*
- * The return codes from the ``master_xfer{_atomic}`` fields should indicate the
+ * The return codes from the ``xfer{_atomic}`` fields should indicate the
* type of error code that occurred during the transfer, as documented in the
* Kernel Documentation file Documentation/i2c/fault-codes.rst. Otherwise, the
* number of messages executed should be returned.
*/
struct i2c_algorithm {
/*
- * If an adapter algorithm can't do I2C-level access, set master_xfer
+ * If an adapter algorithm can't do I2C-level access, set xfer
* to NULL. If an adapter algorithm can do SMBus access, set
* smbus_xfer. If set to NULL, the SMBus protocol is simulated
* using common I2C messages.
- *
- * master_xfer should return the number of messages successfully
- * processed, or a negative value on error
*/
- int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num);
- int (*master_xfer_atomic)(struct i2c_adapter *adap,
+ union {
+ int (*xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num);
+ int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num);
+ };
+ union {
+ int (*xfer_atomic)(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num);
+ int (*master_xfer_atomic)(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num);
+ };
int (*smbus_xfer)(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data *data);
@@ -563,8 +571,14 @@ struct i2c_algorithm {
u32 (*functionality)(struct i2c_adapter *adap);
#if IS_ENABLED(CONFIG_I2C_SLAVE)
- int (*reg_slave)(struct i2c_client *client);
- int (*unreg_slave)(struct i2c_client *client);
+ union {
+ int (*reg_target)(struct i2c_client *client);
+ int (*reg_slave)(struct i2c_client *client);
+ };
+ union {
+ int (*unreg_target)(struct i2c_client *client);
+ int (*unreg_slave)(struct i2c_client *client);
+ };
#endif
};
@@ -748,6 +762,9 @@ struct i2c_adapter {
struct regulator *bus_regulator;
struct dentry *debugfs;
+
+ /* 7bit address space */
+ DECLARE_BITMAP(addrs_in_instantiation, 1 << 7);
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
@@ -852,7 +869,6 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
/* i2c adapter classes (bitmask) */
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
-#define I2C_CLASS_SPD (1<<7) /* Memory modules */
/* Warn users that the adapter doesn't support classes anymore */
#define I2C_CLASS_DEPRECATED (1<<8)
@@ -934,6 +950,21 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
return (msg->addr << 1) | (msg->flags & I2C_M_RD);
}
+/*
+ * 10-bit address
+ * addr_1: 5'b11110 | addr[9:8] | (R/nW)
+ * addr_2: addr[7:0]
+ */
+static inline u8 i2c_10bit_addr_hi_from_msg(const struct i2c_msg *msg)
+{
+ return 0xf0 | ((msg->addr & GENMASK(9, 8)) >> 7) | (msg->flags & I2C_M_RD);
+}
+
+static inline u8 i2c_10bit_addr_lo_from_msg(const struct i2c_msg *msg)
+{
+ return msg->addr & GENMASK(7, 0);
+}
+
u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold);
void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred);
@@ -961,8 +992,6 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
#define builtin_i2c_driver(__i2c_driver) \
builtin_driver(__i2c_driver, i2c_add_driver)
-#endif /* I2C */
-
/* must call put_device() when done with returned i2c_client device */
struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode);
@@ -972,6 +1001,28 @@ struct i2c_adapter *i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode);
/* must call i2c_put_adapter() when done with returned i2c_adapter device */
struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode);
+#else /* I2C */
+
+static inline struct i2c_client *
+i2c_find_device_by_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct i2c_adapter *
+i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct i2c_adapter *
+i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+#endif /* !I2C */
+
#if IS_ENABLED(CONFIG_OF)
/* must call put_device() when done with returned i2c_client device */
static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
@@ -991,10 +1042,6 @@ static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node
return i2c_get_adapter_by_fwnode(of_fwnode_handle(node));
}
-const struct of_device_id
-*i2c_of_match_device(const struct of_device_id *matches,
- struct i2c_client *client);
-
int of_i2c_get_board_info(struct device *dev, struct device_node *node,
struct i2c_board_info *info);
@@ -1015,13 +1062,6 @@ static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node
return NULL;
}
-static inline const struct of_device_id
-*i2c_of_match_device(const struct of_device_id *matches,
- struct i2c_client *client)
-{
- return NULL;
-}
-
static inline int of_i2c_get_board_info(struct device *dev,
struct device_node *node,
struct i2c_board_info *info)
@@ -1034,7 +1074,7 @@ static inline int of_i2c_get_board_info(struct device *dev,
struct acpi_resource;
struct acpi_resource_i2c_serialbus;
-#if IS_ENABLED(CONFIG_ACPI)
+#if IS_REACHABLE(CONFIG_ACPI) && IS_REACHABLE(CONFIG_I2C)
bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
struct acpi_resource_i2c_serialbus **i2c);
int i2c_acpi_client_count(struct acpi_device *adev);
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
index e119f11948ef..9fcb6410a584 100644
--- a/include/linux/i3c/device.h
+++ b/include/linux/i3c/device.h
@@ -27,7 +27,7 @@
* These are the standard error codes as defined by the I3C specification.
* When -EIO is returned by the i3c_device_do_priv_xfers() or
* i3c_device_send_hdr_cmds() one can check the error code in
- * &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
+ * &struct_i3c_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
* what went wrong.
*
*/
@@ -39,20 +39,25 @@ enum i3c_error_code {
};
/**
- * enum i3c_hdr_mode - HDR mode ids
+ * enum i3c_xfer_mode - I3C xfer mode ids
* @I3C_HDR_DDR: DDR mode
* @I3C_HDR_TSP: TSP mode
* @I3C_HDR_TSL: TSL mode
+ * @I3C_SDR: SDR mode (NOT HDR mode)
*/
-enum i3c_hdr_mode {
- I3C_HDR_DDR,
- I3C_HDR_TSP,
- I3C_HDR_TSL,
+enum i3c_xfer_mode {
+ /* The below 3 value (I3C_HDR*) must match GETCAP1 Byte bit position */
+ I3C_HDR_DDR = 0,
+ I3C_HDR_TSP = 1,
+ I3C_HDR_TSL = 2,
+ /* Use for default SDR transfer mode */
+ I3C_SDR = 31,
};
/**
- * struct i3c_priv_xfer - I3C SDR private transfer
+ * struct i3c_xfer - I3C data transfer
* @rnw: encodes the transfer direction. true for a read, false for a write
+ * @cmd: Read/Write command in HDR mode, read: 0x80 - 0xff, write: 0x00 - 0x7f
* @len: transfer length in bytes of the transfer
* @actual_len: actual length in bytes are transferred by the controller
* @data: input/output buffer
@@ -60,8 +65,11 @@ enum i3c_hdr_mode {
* @data.out: output buffer. Must point to a DMA-able buffer
* @err: I3C error code
*/
-struct i3c_priv_xfer {
- u8 rnw;
+struct i3c_xfer {
+ union {
+ u8 rnw;
+ u8 cmd;
+ };
u16 len;
u16 actual_len;
union {
@@ -71,6 +79,9 @@ struct i3c_priv_xfer {
enum i3c_error_code err;
};
+/* keep back compatible */
+#define i3c_priv_xfer i3c_xfer
+
/**
* enum i3c_dcr - I3C DCR values
* @I3C_DCR_GENERIC_DEVICE: generic I3C device
@@ -183,10 +194,7 @@ struct i3c_driver {
const struct i3c_device_id *id_table;
};
-static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv)
-{
- return container_of(drv, struct i3c_driver, driver);
-}
+#define drv_to_i3cdrv(__drv) container_of_const(__drv, struct i3c_driver, driver)
struct device *i3cdev_to_dev(struct i3c_device *i3cdev);
@@ -248,7 +256,7 @@ void i3c_driver_unregister(struct i3c_driver *drv);
*
* Return: 0 if both registrations succeeds, a negative error code otherwise.
*/
-static inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv,
+static __always_inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv,
struct i2c_driver *i2cdrv)
{
int ret;
@@ -273,7 +281,7 @@ static inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv,
* Note that when CONFIG_I3C is not enabled, this function only unregisters the
* @i2cdrv.
*/
-static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
+static __always_inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
struct i2c_driver *i2cdrv)
{
if (IS_ENABLED(CONFIG_I3C))
@@ -286,7 +294,7 @@ static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
* module_i3c_i2c_driver() - Register a module providing an I3C and an I2C
* driver
* @__i3cdrv: the I3C driver to register
- * @__i2cdrv: the I3C driver to register
+ * @__i2cdrv: the I2C driver to register
*
* Provide generic init/exit functions that simply register/unregister an I3C
* and an I2C driver.
@@ -300,9 +308,15 @@ static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
i3c_i2c_driver_unregister, \
__i2cdrv)
-int i3c_device_do_priv_xfers(struct i3c_device *dev,
- struct i3c_priv_xfer *xfers,
- int nxfers);
+int i3c_device_do_xfers(struct i3c_device *dev, struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode);
+
+static inline int i3c_device_do_priv_xfers(struct i3c_device *dev,
+ struct i3c_xfer *xfers,
+ int nxfers)
+{
+ return i3c_device_do_xfers(dev, xfers, nxfers, I3C_SDR);
+}
int i3c_device_do_setdasa(struct i3c_device *dev);
@@ -344,5 +358,6 @@ int i3c_device_request_ibi(struct i3c_device *dev,
void i3c_device_free_ibi(struct i3c_device *dev);
int i3c_device_enable_ibi(struct i3c_device *dev);
int i3c_device_disable_ibi(struct i3c_device *dev);
+u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev);
#endif /* I3C_DEV_H */
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 0ca27dd86956..2fd850f4678b 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -33,6 +33,7 @@ enum {
struct i3c_master_controller;
struct i3c_bus;
struct i3c_device;
+extern const struct bus_type i3c_bus_type;
/**
* struct i3c_i2c_dev_desc - Common part of the I3C/I2C device descriptor
@@ -248,10 +249,15 @@ struct i3c_device {
*/
#define I3C_BUS_MAX_DEVS 11
-#define I3C_BUS_MAX_I3C_SCL_RATE 12900000
-#define I3C_BUS_TYP_I3C_SCL_RATE 12500000
-#define I3C_BUS_I2C_FM_PLUS_SCL_RATE 1000000
-#define I3C_BUS_I2C_FM_SCL_RATE 400000
+/* Taken from the I3C Spec V1.1.1, chapter 6.2. "Timing specification" */
+#define I3C_BUS_I2C_FM_PLUS_SCL_MAX_RATE 1000000
+#define I3C_BUS_I2C_FM_SCL_MAX_RATE 400000
+#define I3C_BUS_I3C_SCL_MAX_RATE 12900000
+#define I3C_BUS_I3C_SCL_TYP_RATE 12500000
+#define I3C_BUS_TAVAL_MIN_NS 1000
+#define I3C_BUS_TBUF_MIXED_FM_MIN_NS 1300
+#define I3C_BUS_THIGH_MIXED_MAX_NS 41
+#define I3C_BUS_TIDLE_MIN_NS 200000
#define I3C_BUS_TLOW_OD_MIN_NS 200
/**
@@ -277,13 +283,29 @@ enum i3c_bus_mode {
};
/**
+ * enum i3c_open_drain_speed - I3C open-drain speed
+ * @I3C_OPEN_DRAIN_SLOW_SPEED: Slow open-drain speed for sending the first
+ * broadcast address. The first broadcast address at this speed
+ * will be visible to all devices on the I3C bus. I3C devices
+ * working in I2C mode will turn off their spike filter when
+ * switching into I3C mode.
+ * @I3C_OPEN_DRAIN_NORMAL_SPEED: Normal open-drain speed in I3C bus mode.
+ */
+enum i3c_open_drain_speed {
+ I3C_OPEN_DRAIN_SLOW_SPEED,
+ I3C_OPEN_DRAIN_NORMAL_SPEED,
+};
+
+/**
* enum i3c_addr_slot_status - I3C address slot status
* @I3C_ADDR_SLOT_FREE: address is free
* @I3C_ADDR_SLOT_RSVD: address is reserved
* @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device
* @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device
* @I3C_ADDR_SLOT_STATUS_MASK: address slot mask
- *
+ * @I3C_ADDR_SLOT_EXT_STATUS_MASK: address slot mask with extended information
+ * @I3C_ADDR_SLOT_EXT_DESIRED: the bitmask represents addresses that are preferred by some devices,
+ * such as the "assigned-address" property in a device tree source.
* On an I3C bus, addresses are assigned dynamically, and we need to know which
* addresses are free to use and which ones are already assigned.
*
@@ -296,8 +318,12 @@ enum i3c_addr_slot_status {
I3C_ADDR_SLOT_I2C_DEV,
I3C_ADDR_SLOT_I3C_DEV,
I3C_ADDR_SLOT_STATUS_MASK = 3,
+ I3C_ADDR_SLOT_EXT_STATUS_MASK = 7,
+ I3C_ADDR_SLOT_EXT_DESIRED = BIT(2),
};
+#define I3C_ADDR_SLOT_STATUS_BITS 4
+
/**
* struct i3c_bus - I3C bus object
* @cur_master: I3C master currently driving the bus. Since I3C is multi-master
@@ -339,7 +365,7 @@ enum i3c_addr_slot_status {
struct i3c_bus {
struct i3c_dev_desc *cur_master;
int id;
- unsigned long addrslots[((I2C_MAX_ADDR + 1) * 2) / BITS_PER_LONG];
+ unsigned long addrslots[((I2C_MAX_ADDR + 1) * I3C_ADDR_SLOT_STATUS_BITS) / BITS_PER_LONG];
enum i3c_bus_mode mode;
struct {
unsigned long i3c;
@@ -392,7 +418,11 @@ struct i3c_bus {
* @send_ccc_cmd: send a CCC command
* This method is mandatory.
* @priv_xfers: do one or several private I3C SDR transfers
- * This method is mandatory.
+ * This method is mandatory when i3c_xfers is not implemented. It
+ * is deprecated.
+ * @i3c_xfers: do one or several I3C SDR or HDR transfers
+ * This method is mandatory when priv_xfers is not implemented but
+ * should be implemented instead of priv_xfers.
* @attach_i2c_dev: called every time an I2C device is attached to the bus.
* This is a good place to attach master controller specific
* data to I2C devices.
@@ -435,6 +465,7 @@ struct i3c_bus {
* NULL.
* @enable_hotjoin: enable hot join event detect.
* @disable_hotjoin: disable hot join event detect.
+ * @set_speed: adjust I3C open drain mode timing.
*/
struct i3c_master_controller_ops {
int (*bus_init)(struct i3c_master_controller *master);
@@ -447,13 +478,17 @@ struct i3c_master_controller_ops {
const struct i3c_ccc_cmd *cmd);
int (*send_ccc_cmd)(struct i3c_master_controller *master,
struct i3c_ccc_cmd *cmd);
+ /* Deprecated, please use i3c_xfers() */
int (*priv_xfers)(struct i3c_dev_desc *dev,
struct i3c_priv_xfer *xfers,
int nxfers);
+ int (*i3c_xfers)(struct i3c_dev_desc *dev,
+ struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode);
int (*attach_i2c_dev)(struct i2c_dev_desc *dev);
void (*detach_i2c_dev)(struct i2c_dev_desc *dev);
int (*i2c_xfers)(struct i2c_dev_desc *dev,
- const struct i2c_msg *xfers, int nxfers);
+ struct i2c_msg *xfers, int nxfers);
int (*request_ibi)(struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req);
void (*free_ibi)(struct i3c_dev_desc *dev);
@@ -463,6 +498,7 @@ struct i3c_master_controller_ops {
struct i3c_ibi_slot *slot);
int (*enable_hotjoin)(struct i3c_master_controller *master);
int (*disable_hotjoin)(struct i3c_master_controller *master);
+ int (*set_speed)(struct i3c_master_controller *master, enum i3c_open_drain_speed speed);
};
/**
@@ -530,6 +566,26 @@ struct i3c_master_controller {
#define i3c_bus_for_each_i3cdev(bus, dev) \
list_for_each_entry(dev, &(bus)->devs.i3c, common.node)
+/**
+ * struct i3c_dma - DMA transfer and mapping descriptor
+ * @dev: device object of a device doing DMA
+ * @buf: destination/source buffer for DMA
+ * @len: length of transfer
+ * @map_len: length of DMA mapping
+ * @addr: mapped DMA address for a Host Controller Driver
+ * @dir: DMA direction
+ * @bounce_buf: an allocated bounce buffer if transfer needs it or NULL
+ */
+struct i3c_dma {
+ struct device *dev;
+ void *buf;
+ size_t len;
+ size_t map_len;
+ dma_addr_t addr;
+ enum dma_data_direction dir;
+ void *bounce_buf;
+};
+
int i3c_master_do_i2c_xfers(struct i3c_master_controller *master,
const struct i2c_msg *xfers,
int nxfers);
@@ -547,6 +603,12 @@ int i3c_master_get_free_addr(struct i3c_master_controller *master,
int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
u8 addr);
int i3c_master_do_daa(struct i3c_master_controller *master);
+struct i3c_dma *i3c_master_dma_map_single(struct device *dev, void *ptr,
+ size_t len, bool force_bounce,
+ enum dma_data_direction dir);
+void i3c_master_dma_unmap_single(struct i3c_dma *dma_xfer);
+DEFINE_FREE(i3c_master_dma_unmap_single, void *,
+ if (_T) i3c_master_dma_unmap_single(_T))
int i3c_master_set_info(struct i3c_master_controller *master,
const struct i3c_device_info *info);
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index 95b07f8b77fe..00037c13abc8 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -54,15 +54,29 @@
struct serio;
+/**
+ * typedef i8042_filter_t - i8042 filter callback
+ * @data: Data received by the i8042 controller
+ * @str: Status register of the i8042 controller
+ * @serio: Serio of the i8042 controller
+ * @context: Context pointer associated with this callback
+ *
+ * This represents a i8042 filter callback which can be used with i8042_install_filter()
+ * and i8042_remove_filter() to filter the i8042 input for platform-specific key codes.
+ *
+ * Context: Interrupt context.
+ * Returns: true if the data should be filtered out, false if otherwise.
+ */
+typedef bool (*i8042_filter_t)(unsigned char data, unsigned char str, struct serio *serio,
+ void *context);
+
#if defined(CONFIG_SERIO_I8042) || defined(CONFIG_SERIO_I8042_MODULE)
void i8042_lock_chip(void);
void i8042_unlock_chip(void);
int i8042_command(unsigned char *param, int command);
-int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio));
-int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio));
+int i8042_install_filter(i8042_filter_t filter, void *context);
+int i8042_remove_filter(i8042_filter_t filter);
#else
@@ -79,14 +93,12 @@ static inline int i8042_command(unsigned char *param, int command)
return -ENODEV;
}
-static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio))
+static inline int i8042_install_filter(i8042_filter_t filter, void *context)
{
return -ENODEV;
}
-static inline int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio))
+static inline int i8042_remove_filter(i8042_filter_t filter)
{
return -ENODEV;
}
diff --git a/include/linux/i8253.h b/include/linux/i8253.h
index 8336b2f6f834..56c280eb2d4f 100644
--- a/include/linux/i8253.h
+++ b/include/linux/i8253.h
@@ -21,9 +21,9 @@
#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
extern raw_spinlock_t i8253_lock;
-extern bool i8253_clear_counter_on_shutdown;
extern struct clock_event_device i8253_clockevent;
extern void clockevent_i8253_init(bool oneshot);
+extern void clockevent_i8253_disable(void);
extern void setup_pit_timer(void);
diff --git a/include/linux/icmp.h b/include/linux/icmp.h
index 0af4d210ee31..043ec5d9c882 100644
--- a/include/linux/icmp.h
+++ b/include/linux/icmp.h
@@ -40,4 +40,36 @@ void ip_icmp_error_rfc4884(const struct sk_buff *skb,
struct sock_ee_data_rfc4884 *out,
int thlen, int off);
+/* RFC 4884 */
+#define ICMP_EXT_ORIG_DGRAM_MIN_LEN 128
+#define ICMP_EXT_VERSION_2 2
+
+/* ICMP Extension Object Classes */
+#define ICMP_EXT_OBJ_CLASS_IIO 2 /* RFC 5837 */
+
+/* Interface Information Object - RFC 5837 */
+enum {
+ ICMP_EXT_CTYPE_IIO_ROLE_IIF,
+};
+
+#define ICMP_EXT_CTYPE_IIO_ROLE(ROLE) ((ROLE) << 6)
+#define ICMP_EXT_CTYPE_IIO_MTU BIT(0)
+#define ICMP_EXT_CTYPE_IIO_NAME BIT(1)
+#define ICMP_EXT_CTYPE_IIO_IPADDR BIT(2)
+#define ICMP_EXT_CTYPE_IIO_IFINDEX BIT(3)
+
+struct icmp_ext_iio_name_subobj {
+ u8 len;
+ char name[IFNAMSIZ];
+};
+
+enum {
+ /* RFC 5837 - Incoming IP Interface Role */
+ ICMP_ERR_EXT_IIO_IIF,
+ /* Add new constants above. Used by "icmp_errors_extension_mask"
+ * sysctl.
+ */
+ ICMP_ERR_EXT_COUNT,
+};
+
#endif /* _LINUX_ICMP_H */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index da5f5fa4a3a6..789e23e67444 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -15,6 +15,7 @@
#include <linux/radix-tree.h>
#include <linux/gfp.h>
#include <linux/percpu.h>
+#include <linux/cleanup.h>
struct idr {
struct radix_tree_root idr_rt;
@@ -124,6 +125,22 @@ void *idr_get_next_ul(struct idr *, unsigned long *nextid);
void *idr_replace(struct idr *, void *, unsigned long id);
void idr_destroy(struct idr *);
+struct __class_idr {
+ struct idr *idr;
+ int id;
+};
+
+#define idr_null ((struct __class_idr){ NULL, -1 })
+#define take_idr_id(id) __get_and_null(id, idr_null)
+
+DEFINE_CLASS(idr_alloc, struct __class_idr,
+ if (_T.id >= 0) idr_remove(_T.idr, _T.id),
+ ((struct __class_idr){
+ .idr = idr,
+ .id = idr_alloc(idr, ptr, start, end, gfp),
+ }),
+ struct idr *idr, void *ptr, int start, int end, gfp_t gfp);
+
/**
* idr_init_base() - Initialise an IDR.
* @idr: IDR handle.
@@ -257,6 +274,7 @@ struct ida {
int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
void ida_free(struct ida *, unsigned int id);
void ida_destroy(struct ida *ida);
+int ida_find_first_range(struct ida *ida, unsigned int min, unsigned int max);
/**
* ida_alloc() - Allocate an unused ID.
@@ -316,16 +334,18 @@ static inline void ida_init(struct ida *ida)
xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
}
-/*
- * ida_simple_get() and ida_simple_remove() are deprecated. Use
- * ida_alloc() and ida_free() instead respectively.
- */
-#define ida_simple_get(ida, start, end, gfp) \
- ida_alloc_range(ida, start, (end) - 1, gfp)
-#define ida_simple_remove(ida, id) ida_free(ida, id)
-
static inline bool ida_is_empty(const struct ida *ida)
{
return xa_empty(&ida->xa);
}
+
+static inline bool ida_exists(struct ida *ida, unsigned int id)
+{
+ return ida_find_first_range(ida, id, id) == id;
+}
+
+static inline int ida_find_first(struct ida *ida)
+{
+ return ida_find_first_range(ida, 0, ~0);
+}
#endif /* __IDR_H__ */
diff --git a/include/linux/ieee80211-eht.h b/include/linux/ieee80211-eht.h
new file mode 100644
index 000000000000..f9782e46c5e5
--- /dev/null
+++ b/include/linux/ieee80211-eht.h
@@ -0,0 +1,1182 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 EHT definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_EHT_H
+#define LINUX_IEEE80211_EHT_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+/* need HE definitions for the inlines here */
+#include <linux/ieee80211-he.h>
+
+#define IEEE80211_TTLM_MAX_CNT 2
+#define IEEE80211_TTLM_CONTROL_DIRECTION 0x03
+#define IEEE80211_TTLM_CONTROL_DEF_LINK_MAP 0x04
+#define IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT 0x08
+#define IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT 0x10
+#define IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE 0x20
+
+#define IEEE80211_TTLM_DIRECTION_DOWN 0
+#define IEEE80211_TTLM_DIRECTION_UP 1
+#define IEEE80211_TTLM_DIRECTION_BOTH 2
+
+/**
+ * struct ieee80211_ttlm_elem - TID-To-Link Mapping element
+ *
+ * Defined in section 9.4.2.314 in P802.11be_D4
+ *
+ * @control: the first part of control field
+ * @optional: the second part of control field
+ */
+struct ieee80211_ttlm_elem {
+ u8 control;
+ u8 optional[];
+} __packed;
+
+#define IEEE80211_EHT_MCS_NSS_RX 0x0f
+#define IEEE80211_EHT_MCS_NSS_TX 0xf0
+
+/**
+ * struct ieee80211_eht_mcs_nss_supp_20mhz_only - EHT 20MHz only station max
+ * supported NSS for per MCS.
+ *
+ * For each field below, bits 0 - 3 indicate the maximal number of spatial
+ * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams
+ * for Tx.
+ *
+ * @rx_tx_mcs7_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 0 - 7.
+ * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 8 - 9.
+ * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 10 - 11.
+ * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 12 - 13.
+ * @rx_tx_max_nss: array of the previous fields for easier loop access
+ */
+struct ieee80211_eht_mcs_nss_supp_20mhz_only {
+ union {
+ struct {
+ u8 rx_tx_mcs7_max_nss;
+ u8 rx_tx_mcs9_max_nss;
+ u8 rx_tx_mcs11_max_nss;
+ u8 rx_tx_mcs13_max_nss;
+ };
+ u8 rx_tx_max_nss[4];
+ };
+};
+
+/**
+ * struct ieee80211_eht_mcs_nss_supp_bw - EHT max supported NSS per MCS (except
+ * 20MHz only stations).
+ *
+ * For each field below, bits 0 - 3 indicate the maximal number of spatial
+ * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams
+ * for Tx.
+ *
+ * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 0 - 9.
+ * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 10 - 11.
+ * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 12 - 13.
+ * @rx_tx_max_nss: array of the previous fields for easier loop access
+ */
+struct ieee80211_eht_mcs_nss_supp_bw {
+ union {
+ struct {
+ u8 rx_tx_mcs9_max_nss;
+ u8 rx_tx_mcs11_max_nss;
+ u8 rx_tx_mcs13_max_nss;
+ };
+ u8 rx_tx_max_nss[3];
+ };
+};
+
+/**
+ * struct ieee80211_eht_cap_elem_fixed - EHT capabilities fixed data
+ *
+ * This structure is the "EHT Capabilities element" fixed fields as
+ * described in P802.11be_D2.0 section 9.4.2.313.
+ *
+ * @mac_cap_info: MAC capabilities, see IEEE80211_EHT_MAC_CAP*
+ * @phy_cap_info: PHY capabilities, see IEEE80211_EHT_PHY_CAP*
+ */
+struct ieee80211_eht_cap_elem_fixed {
+ u8 mac_cap_info[2];
+ u8 phy_cap_info[9];
+} __packed;
+
+/**
+ * struct ieee80211_eht_cap_elem - EHT capabilities element
+ * @fixed: fixed parts, see &ieee80211_eht_cap_elem_fixed
+ * @optional: optional parts
+ */
+struct ieee80211_eht_cap_elem {
+ struct ieee80211_eht_cap_elem_fixed fixed;
+
+ /*
+ * Followed by:
+ * Supported EHT-MCS And NSS Set field: 4, 3, 6 or 9 octets.
+ * EHT PPE Thresholds field: variable length.
+ */
+ u8 optional[];
+} __packed;
+
+#define IEEE80211_EHT_OPER_INFO_PRESENT 0x01
+#define IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT 0x02
+#define IEEE80211_EHT_OPER_EHT_DEF_PE_DURATION 0x04
+#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_LIMIT 0x08
+#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_EXP_MASK 0x30
+#define IEEE80211_EHT_OPER_MCS15_DISABLE 0x40
+
+/**
+ * struct ieee80211_eht_operation - eht operation element
+ *
+ * This structure is the "EHT Operation Element" fields as
+ * described in P802.11be_D2.0 section 9.4.2.311
+ *
+ * @params: EHT operation element parameters. See &IEEE80211_EHT_OPER_*
+ * @basic_mcs_nss: indicates the EHT-MCSs for each number of spatial streams in
+ * EHT PPDUs that are supported by all EHT STAs in the BSS in transmit and
+ * receive.
+ * @optional: optional parts
+ */
+struct ieee80211_eht_operation {
+ u8 params;
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only basic_mcs_nss;
+ u8 optional[];
+} __packed;
+
+/**
+ * struct ieee80211_eht_operation_info - eht operation information
+ *
+ * @control: EHT operation information control.
+ * @ccfs0: defines a channel center frequency for a 20, 40, 80, 160, or 320 MHz
+ * EHT BSS.
+ * @ccfs1: defines a channel center frequency for a 160 or 320 MHz EHT BSS.
+ * @optional: optional parts
+ */
+struct ieee80211_eht_operation_info {
+ u8 control;
+ u8 ccfs0;
+ u8 ccfs1;
+ u8 optional[];
+} __packed;
+
+/* EHT MAC capabilities as defined in P802.11be_D2.0 section 9.4.2.313.2 */
+#define IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS 0x01
+#define IEEE80211_EHT_MAC_CAP0_OM_CONTROL 0x02
+#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 0x04
+#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 0x08
+#define IEEE80211_EHT_MAC_CAP0_RESTRICTED_TWT 0x10
+#define IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC 0x20
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK 0xc0
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_3895 0
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991 1
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454 2
+
+#define IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK 0x01
+#define IEEE80211_EHT_MAC_CAP1_EHT_TRS 0x02
+#define IEEE80211_EHT_MAC_CAP1_TXOP_RET 0x04
+#define IEEE80211_EHT_MAC_CAP1_TWO_BQRS 0x08
+#define IEEE80211_EHT_MAC_CAP1_EHT_LINK_ADAPT_MASK 0x30
+#define IEEE80211_EHT_MAC_CAP1_UNSOL_EPCS_PRIO_ACCESS 0x40
+
+/* EHT PHY capabilities as defined in P802.11be_D2.0 section 9.4.2.313.3 */
+#define IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ 0x02
+#define IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ 0x04
+#define IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI 0x08
+#define IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO 0x10
+#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER 0x20
+#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE 0x40
+
+/* EHT beamformee number of spatial streams <= 80MHz is split */
+#define IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK 0x80
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK 0x03
+
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK 0x1c
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK 0xe0
+
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK 0x07
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK 0x38
+
+/* EHT number of sounding dimensions for 320MHz is split */
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK 0xc0
+#define IEEE80211_EHT_PHY_CAP3_SOUNDING_DIM_320MHZ_MASK 0x01
+#define IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK 0x02
+#define IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK 0x04
+#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK 0x08
+#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK 0x10
+#define IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK 0x20
+#define IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK 0x40
+#define IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK 0x80
+
+#define IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO 0x01
+#define IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP 0x02
+#define IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP 0x04
+#define IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI 0x08
+#define IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK 0xf0
+
+#define IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK 0x01
+#define IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP 0x02
+#define IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP 0x04
+#define IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT 0x08
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK 0x30
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US 0
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US 1
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US 2
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US 3
+
+/* Maximum number of supported EHT LTF is split */
+#define IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK 0xc0
+#define IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF 0x40
+#define IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK 0x07
+
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_80MHZ 0x08
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_160MHZ 0x30
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_320MHZ 0x40
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK 0x78
+#define IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP 0x80
+
+#define IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW 0x01
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ 0x02
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ 0x04
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ 0x08
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ 0x10
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ 0x20
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ 0x40
+#define IEEE80211_EHT_PHY_CAP7_TB_SOUNDING_FDBK_RATE_LIMIT 0x80
+
+#define IEEE80211_EHT_PHY_CAP8_RX_1024QAM_WIDER_BW_DL_OFDMA 0x01
+#define IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA 0x02
+
+/*
+ * EHT operation channel width as defined in P802.11be_D2.0 section 9.4.2.311
+ */
+#define IEEE80211_EHT_OPER_CHAN_WIDTH 0x7
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_20MHZ 0
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_40MHZ 1
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ 2
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ 3
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ 4
+
+/* Calculate 802.11be EHT capabilities IE Tx/Rx EHT MCS NSS Support Field size */
+static inline u8
+ieee80211_eht_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap,
+ const struct ieee80211_eht_cap_elem_fixed *eht_cap,
+ bool from_ap)
+{
+ u8 count = 0;
+
+ /* on 2.4 GHz, if it supports 40 MHz, the result is 3 */
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
+ return 3;
+
+ /* on 2.4 GHz, these three bits are reserved, so should be 0 */
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)
+ count += 3;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ count += 3;
+
+ if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
+ count += 3;
+
+ if (count)
+ return count;
+
+ return from_ap ? 3 : 4;
+}
+
+/* 802.11be EHT PPE Thresholds */
+#define IEEE80211_EHT_PPE_THRES_NSS_POS 0
+#define IEEE80211_EHT_PPE_THRES_NSS_MASK 0xf
+#define IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK 0x1f0
+#define IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE 3
+#define IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE 9
+
+/*
+ * Calculate 802.11be EHT capabilities IE EHT field size
+ */
+static inline u8
+ieee80211_eht_ppe_size(u16 ppe_thres_hdr, const u8 *phy_cap_info)
+{
+ u32 n;
+
+ if (!(phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT))
+ return 0;
+
+ n = hweight16(ppe_thres_hdr &
+ IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n *= 1 + u16_get_bits(ppe_thres_hdr, IEEE80211_EHT_PPE_THRES_NSS_MASK);
+
+ /*
+ * Each pair is 6 bits, and we need to add the 9 "header" bits to the
+ * total size.
+ */
+ n = n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2 +
+ IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
+ return DIV_ROUND_UP(n, 8);
+}
+
+static inline bool
+ieee80211_eht_capa_size_ok(const u8 *he_capa, const u8 *data, u8 len,
+ bool from_ap)
+{
+ const struct ieee80211_eht_cap_elem_fixed *elem = (const void *)data;
+ u8 needed = sizeof(struct ieee80211_eht_cap_elem_fixed);
+
+ if (len < needed || !he_capa)
+ return false;
+
+ needed += ieee80211_eht_mcs_nss_size((const void *)he_capa,
+ (const void *)data,
+ from_ap);
+ if (len < needed)
+ return false;
+
+ if (elem->phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
+ u16 ppe_thres_hdr;
+
+ if (len < needed + sizeof(ppe_thres_hdr))
+ return false;
+
+ ppe_thres_hdr = get_unaligned_le16(data + needed);
+ needed += ieee80211_eht_ppe_size(ppe_thres_hdr,
+ elem->phy_cap_info);
+ }
+
+ return len >= needed;
+}
+
+static inline bool
+ieee80211_eht_oper_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_eht_operation *elem = (const void *)data;
+ u8 needed = sizeof(*elem);
+
+ if (len < needed)
+ return false;
+
+ if (elem->params & IEEE80211_EHT_OPER_INFO_PRESENT) {
+ needed += 3;
+
+ if (elem->params &
+ IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)
+ needed += 2;
+ }
+
+ return len >= needed;
+}
+
+/* must validate ieee80211_eht_oper_size_ok() first */
+static inline u16
+ieee80211_eht_oper_dis_subchan_bitmap(const struct ieee80211_eht_operation *eht_oper)
+{
+ const struct ieee80211_eht_operation_info *info =
+ (const void *)eht_oper->optional;
+
+ if (!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT))
+ return 0;
+
+ if (!(eht_oper->params & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT))
+ return 0;
+
+ return get_unaligned_le16(info->optional);
+}
+
+#define IEEE80211_BW_IND_DIS_SUBCH_PRESENT BIT(1)
+
+struct ieee80211_bandwidth_indication {
+ u8 params;
+ struct ieee80211_eht_operation_info info;
+} __packed;
+
+static inline bool
+ieee80211_bandwidth_indication_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_bandwidth_indication *bwi = (const void *)data;
+
+ if (len < sizeof(*bwi))
+ return false;
+
+ if (bwi->params & IEEE80211_BW_IND_DIS_SUBCH_PRESENT &&
+ len < sizeof(*bwi) + 2)
+ return false;
+
+ return true;
+}
+
+/* Protected EHT action codes */
+enum ieee80211_protected_eht_actioncode {
+ WLAN_PROTECTED_EHT_ACTION_TTLM_REQ = 0,
+ WLAN_PROTECTED_EHT_ACTION_TTLM_RES = 1,
+ WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN = 2,
+ WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_REQ = 3,
+ WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_RESP = 4,
+ WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_TEARDOWN = 5,
+ WLAN_PROTECTED_EHT_ACTION_EML_OP_MODE_NOTIF = 6,
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECOMMEND = 7,
+ WLAN_PROTECTED_EHT_ACTION_ML_OP_UPDATE_REQ = 8,
+ WLAN_PROTECTED_EHT_ACTION_ML_OP_UPDATE_RESP = 9,
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_NOTIF = 10,
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_REQ = 11,
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_RESP = 12,
+};
+
+/* multi-link device */
+#define IEEE80211_MLD_MAX_NUM_LINKS 15
+
+#define IEEE80211_ML_CONTROL_TYPE 0x0007
+#define IEEE80211_ML_CONTROL_TYPE_BASIC 0
+#define IEEE80211_ML_CONTROL_TYPE_PREQ 1
+#define IEEE80211_ML_CONTROL_TYPE_RECONF 2
+#define IEEE80211_ML_CONTROL_TYPE_TDLS 3
+#define IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS 4
+#define IEEE80211_ML_CONTROL_PRESENCE_MASK 0xfff0
+
+struct ieee80211_multi_link_elem {
+ __le16 control;
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_BASIC_PRES_LINK_ID 0x0010
+#define IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT 0x0020
+#define IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY 0x0040
+#define IEEE80211_MLC_BASIC_PRES_EML_CAPA 0x0080
+#define IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP 0x0100
+#define IEEE80211_MLC_BASIC_PRES_MLD_ID 0x0200
+#define IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP 0x0400
+
+#define IEEE80211_MED_SYNC_DELAY_DURATION 0x00ff
+#define IEEE80211_MED_SYNC_DELAY_SYNC_OFDM_ED_THRESH 0x0f00
+#define IEEE80211_MED_SYNC_DELAY_SYNC_MAX_NUM_TXOPS 0xf000
+
+/*
+ * Described in P802.11be_D3.0
+ * dot11MSDTimerDuration should default to 5484 (i.e. 171.375)
+ * dot11MSDOFDMEDthreshold defaults to -72 (i.e. 0)
+ * dot11MSDTXOPMAX defaults to 1
+ */
+#define IEEE80211_MED_SYNC_DELAY_DEFAULT 0x10ac
+
+#define IEEE80211_EML_CAP_EMLSR_SUPP 0x0001
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY 0x000e
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US 1
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_64US 2
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_128US 3
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US 4
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY 0x0070
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_16US 1
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_32US 2
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US 3
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_128US 4
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US 5
+#define IEEE80211_EML_CAP_EMLMR_SUPPORT 0x0080
+#define IEEE80211_EML_CAP_EMLMR_DELAY 0x0700
+#define IEEE80211_EML_CAP_EMLMR_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLMR_DELAY_32US 1
+#define IEEE80211_EML_CAP_EMLMR_DELAY_64US 2
+#define IEEE80211_EML_CAP_EMLMR_DELAY_128US 3
+#define IEEE80211_EML_CAP_EMLMR_DELAY_256US 4
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT 0x7800
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_0 0
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128US 1
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_256US 2
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_512US 3
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_1TU 4
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_2TU 5
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_4TU 6
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_8TU 7
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_16TU 8
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_32TU 9
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_64TU 10
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU 11
+
+#define IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS 0x000f
+#define IEEE80211_MLD_CAP_OP_SRS_SUPPORT 0x0010
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP 0x0060
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_NO_SUPP 0
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME 1
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_RESERVED 2
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_DIFF 3
+#define IEEE80211_MLD_CAP_OP_FREQ_SEP_TYPE_IND 0x0f80
+#define IEEE80211_MLD_CAP_OP_AAR_SUPPORT 0x1000
+#define IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT 0x2000
+#define IEEE80211_MLD_CAP_OP_ALIGNED_TWT_SUPPORT 0x4000
+
+struct ieee80211_mle_basic_common_info {
+ u8 len;
+ u8 mld_mac_addr[ETH_ALEN];
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_PREQ_PRES_MLD_ID 0x0010
+
+struct ieee80211_mle_preq_common_info {
+ u8 len;
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR 0x0010
+#define IEEE80211_MLC_RECONF_PRES_EML_CAPA 0x0020
+#define IEEE80211_MLC_RECONF_PRES_MLD_CAPA_OP 0x0040
+#define IEEE80211_MLC_RECONF_PRES_EXT_MLD_CAPA_OP 0x0080
+
+/* no fixed fields in RECONF */
+
+struct ieee80211_mle_tdls_common_info {
+ u8 len;
+ u8 ap_mld_mac_addr[ETH_ALEN];
+} __packed;
+
+#define IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR 0x0010
+
+/* no fixed fields in PRIO_ACCESS */
+
+/**
+ * ieee80211_mle_common_size - check multi-link element common size
+ * @data: multi-link element, must already be checked for size using
+ * ieee80211_mle_size_ok()
+ * Return: the size of the multi-link element's "common" subfield
+ */
+static inline u8 ieee80211_mle_common_size(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+
+ switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ case IEEE80211_ML_CONTROL_TYPE_PREQ:
+ case IEEE80211_ML_CONTROL_TYPE_TDLS:
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ /*
+ * The length is the first octet pointed by mle->variable so no
+ * need to add anything
+ */
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return sizeof(*mle) + mle->variable[0];
+}
+
+/**
+ * ieee80211_mle_get_link_id - returns the link ID
+ * @data: the basic multi link element
+ * Return: the link ID, or -1 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline int ieee80211_mle_get_link_id(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_LINK_ID))
+ return -1;
+
+ return *common;
+}
+
+/**
+ * ieee80211_mle_get_bss_param_ch_cnt - returns the BSS parameter change count
+ * @data: pointer to the basic multi link element
+ * Return: the BSS Parameter Change Count field value, or -1 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline int
+ieee80211_mle_get_bss_param_ch_cnt(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))
+ return -1;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+
+ return *common;
+}
+
+/**
+ * ieee80211_mle_get_eml_med_sync_delay - returns the medium sync delay
+ * @data: pointer to the multi-link element
+ * Return: the medium synchronization delay field value from the multi-link
+ * element, or the default value (%IEEE80211_MED_SYNC_DELAY_DEFAULT)
+ * if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY))
+ return IEEE80211_MED_SYNC_DELAY_DEFAULT;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+
+ return get_unaligned_le16(common);
+}
+
+/**
+ * ieee80211_mle_get_eml_cap - returns the EML capability
+ * @data: pointer to the multi-link element
+ * Return: the EML capability field value from the multi-link element,
+ * or 0 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u16 ieee80211_mle_get_eml_cap(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_EML_CAPA))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+
+ return get_unaligned_le16(common);
+}
+
+/**
+ * ieee80211_mle_get_mld_capa_op - returns the MLD capabilities and operations.
+ * @data: pointer to the multi-link element
+ * Return: the MLD capabilities and operations field value from the multi-link
+ * element, or 0 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /*
+ * common points now at the beginning of
+ * ieee80211_mle_basic_common_info
+ */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+
+ return get_unaligned_le16(common);
+}
+
+/* Defined in Figure 9-1074t in P802.11be_D7.0 */
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_OP_PARAM_UPDATE 0x0001
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_OP_RECO_MAX_LINKS_MASK 0x001e
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_NSTR_UPDATE 0x0020
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_EMLSR_ENA_ON_ONE_LINK 0x0040
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_BTM_MLD_RECO_MULTI_AP 0x0080
+
+/**
+ * ieee80211_mle_get_ext_mld_capa_op - returns the extended MLD capabilities
+ * and operations.
+ * @data: pointer to the multi-link element
+ * Return: the extended MLD capabilities and operations field value from
+ * the multi-link element, or 0 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u16 ieee80211_mle_get_ext_mld_capa_op(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /*
+ * common points now at the beginning of
+ * ieee80211_mle_basic_common_info
+ */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID)
+ common += 1;
+
+ return get_unaligned_le16(common);
+}
+
+/**
+ * ieee80211_mle_get_mld_id - returns the MLD ID
+ * @data: pointer to the multi-link element
+ * Return: The MLD ID in the given multi-link element, or 0 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u8 ieee80211_mle_get_mld_id(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /*
+ * common points now at the beginning of
+ * ieee80211_mle_basic_common_info
+ */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MLD_ID))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+ common += 2;
+
+ return *common;
+}
+
+/**
+ * ieee80211_mle_size_ok - validate multi-link element size
+ * @data: pointer to the element data
+ * @len: length of the containing element
+ * Return: whether or not the multi-link element size is OK
+ */
+static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u8 fixed = sizeof(*mle);
+ u8 common = 0;
+ bool check_common_len = false;
+ u16 control;
+
+ if (!data || len < fixed)
+ return false;
+
+ control = le16_to_cpu(mle->control);
+
+ switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+ check_common_len = true;
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP)
+ common += 2;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_PREQ:
+ common += sizeof(struct ieee80211_mle_preq_common_info);
+ if (control & IEEE80211_MLC_PREQ_PRES_MLD_ID)
+ common += 1;
+ check_common_len = true;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR)
+ common += ETH_ALEN;
+ if (control & IEEE80211_MLC_RECONF_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_RECONF_PRES_MLD_CAPA_OP)
+ common += 2;
+ if (control & IEEE80211_MLC_RECONF_PRES_EXT_MLD_CAPA_OP)
+ common += 2;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_TDLS:
+ common += sizeof(struct ieee80211_mle_tdls_common_info);
+ check_common_len = true;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ common = ETH_ALEN + 1;
+ break;
+ default:
+ /* we don't know this type */
+ return true;
+ }
+
+ if (len < fixed + common)
+ return false;
+
+ if (!check_common_len)
+ return true;
+
+ /* if present, common length is the first octet there */
+ return mle->variable[0] >= common;
+}
+
+/**
+ * ieee80211_mle_type_ok - validate multi-link element type and size
+ * @data: pointer to the element data
+ * @type: expected type of the element
+ * @len: length of the containing element
+ * Return: whether or not the multi-link element type matches and size is OK
+ */
+static inline bool ieee80211_mle_type_ok(const u8 *data, u8 type, size_t len)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control;
+
+ if (!ieee80211_mle_size_ok(data, len))
+ return false;
+
+ control = le16_to_cpu(mle->control);
+
+ if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) == type)
+ return true;
+
+ return false;
+}
+
+enum ieee80211_mle_subelems {
+ IEEE80211_MLE_SUBELEM_PER_STA_PROFILE = 0,
+ IEEE80211_MLE_SUBELEM_FRAGMENT = 254,
+};
+
+#define IEEE80211_MLE_STA_CONTROL_LINK_ID 0x000f
+#define IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE 0x0010
+#define IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT 0x0020
+#define IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT 0x0040
+#define IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT 0x0080
+#define IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT 0x0100
+#define IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT 0x0200
+#define IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE 0x0400
+#define IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT 0x0800
+
+struct ieee80211_mle_per_sta_profile {
+ __le16 control;
+ u8 sta_info_len;
+ u8 variable[];
+} __packed;
+
+/**
+ * ieee80211_mle_basic_sta_prof_size_ok - validate basic multi-link element sta
+ * profile size
+ * @data: pointer to the sub element data
+ * @len: length of the containing sub element
+ * Return: %true if the STA profile is large enough, %false otherwise
+ */
+static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data,
+ size_t len)
+{
+ const struct ieee80211_mle_per_sta_profile *prof = (const void *)data;
+ u16 control;
+ u8 fixed = sizeof(*prof);
+ u8 info_len = 1;
+
+ if (len < fixed)
+ return false;
+
+ control = le16_to_cpu(prof->control);
+
+ if (control & IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT)
+ info_len += 6;
+ if (control & IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT)
+ info_len += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT)
+ info_len += 8;
+ if (control & IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT)
+ info_len += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE &&
+ control & IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT) {
+ if (control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE)
+ info_len += 2;
+ else
+ info_len += 1;
+ }
+ if (control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT)
+ info_len += 1;
+
+ return prof->sta_info_len >= info_len &&
+ fixed + prof->sta_info_len - 1 <= len;
+}
+
+/**
+ * ieee80211_mle_basic_sta_prof_bss_param_ch_cnt - get per-STA profile BSS
+ * parameter change count
+ * @prof: the per-STA profile, having been checked with
+ * ieee80211_mle_basic_sta_prof_size_ok() for the correct length
+ *
+ * Return: The BSS parameter change count value if present, 0 otherwise.
+ */
+static inline u8
+ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(const struct ieee80211_mle_per_sta_profile *prof)
+{
+ u16 control = le16_to_cpu(prof->control);
+ const u8 *pos = prof->variable;
+
+ if (!(control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT))
+ return 0;
+
+ if (control & IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT)
+ pos += 6;
+ if (control & IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT)
+ pos += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT)
+ pos += 8;
+ if (control & IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT)
+ pos += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE &&
+ control & IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT) {
+ if (control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE)
+ pos += 2;
+ else
+ pos += 1;
+ }
+
+ return *pos;
+}
+
+#define IEEE80211_MLE_STA_RECONF_CONTROL_LINK_ID 0x000f
+#define IEEE80211_MLE_STA_RECONF_CONTROL_COMPLETE_PROFILE 0x0010
+#define IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT 0x0020
+#define IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT 0x0040
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE 0x0780
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_AP_REM 0
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_OP_PARAM_UPDATE 1
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_ADD_LINK 2
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_DEL_LINK 3
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_NSTR_STATUS 4
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT 0x0800
+
+/**
+ * ieee80211_mle_reconf_sta_prof_size_ok - validate reconfiguration multi-link
+ * element sta profile size.
+ * @data: pointer to the sub element data
+ * @len: length of the containing sub element
+ * Return: %true if the STA profile is large enough, %false otherwise
+ */
+static inline bool ieee80211_mle_reconf_sta_prof_size_ok(const u8 *data,
+ size_t len)
+{
+ const struct ieee80211_mle_per_sta_profile *prof = (const void *)data;
+ u16 control;
+ u8 fixed = sizeof(*prof);
+ u8 info_len = 1;
+
+ if (len < fixed)
+ return false;
+
+ control = le16_to_cpu(prof->control);
+
+ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT)
+ info_len += ETH_ALEN;
+ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT)
+ info_len += 2;
+ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT)
+ info_len += 2;
+
+ return prof->sta_info_len >= info_len &&
+ fixed + prof->sta_info_len - 1 <= len;
+}
+
+#define IEEE80211_MLE_STA_EPCS_CONTROL_LINK_ID 0x000f
+#define IEEE80211_EPCS_ENA_RESP_BODY_LEN 3
+
+static inline bool ieee80211_tid_to_link_map_size_ok(const u8 *data, size_t len)
+{
+ const struct ieee80211_ttlm_elem *t2l = (const void *)data;
+ u8 control, fixed = sizeof(*t2l), elem_len = 0;
+
+ if (len < fixed)
+ return false;
+
+ control = t2l->control;
+
+ if (control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT)
+ elem_len += 2;
+ if (control & IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT)
+ elem_len += 3;
+
+ if (!(control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP)) {
+ u8 bm_size;
+
+ elem_len += 1;
+ if (len < fixed + elem_len)
+ return false;
+
+ if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
+ bm_size = 1;
+ else
+ bm_size = 2;
+
+ elem_len += hweight8(t2l->optional[0]) * bm_size;
+ }
+
+ return len >= fixed + elem_len;
+}
+
+/**
+ * ieee80211_emlsr_pad_delay_in_us - Fetch the EMLSR Padding delay
+ * in microseconds
+ * @eml_cap: EML capabilities field value from common info field of
+ * the Multi-link element
+ * Return: the EMLSR Padding delay (in microseconds) encoded in the
+ * EML Capabilities field
+ */
+
+static inline u32 ieee80211_emlsr_pad_delay_in_us(u16 eml_cap)
+{
+ /* IEEE Std 802.11be-2024 Table 9-417i—Encoding of the EMLSR
+ * Padding Delay subfield.
+ */
+ u32 pad_delay = u16_get_bits(eml_cap,
+ IEEE80211_EML_CAP_EMLSR_PADDING_DELAY);
+
+ if (!pad_delay ||
+ pad_delay > IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US)
+ return 0;
+
+ return 32 * (1 << (pad_delay - 1));
+}
+
+/**
+ * ieee80211_emlsr_trans_delay_in_us - Fetch the EMLSR Transition
+ * delay in microseconds
+ * @eml_cap: EML capabilities field value from common info field of
+ * the Multi-link element
+ * Return: the EMLSR Transition delay (in microseconds) encoded in the
+ * EML Capabilities field
+ */
+
+static inline u32 ieee80211_emlsr_trans_delay_in_us(u16 eml_cap)
+{
+ /* IEEE Std 802.11be-2024 Table 9-417j—Encoding of the EMLSR
+ * Transition Delay subfield.
+ */
+ u32 trans_delay =
+ u16_get_bits(eml_cap,
+ IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY);
+
+ /* invalid values also just use 0 */
+ if (!trans_delay ||
+ trans_delay > IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US)
+ return 0;
+
+ return 16 * (1 << (trans_delay - 1));
+}
+
+/**
+ * ieee80211_eml_trans_timeout_in_us - Fetch the EMLSR Transition
+ * timeout value in microseconds
+ * @eml_cap: EML capabilities field value from common info field of
+ * the Multi-link element
+ * Return: the EMLSR Transition timeout (in microseconds) encoded in
+ * the EML Capabilities field
+ */
+
+static inline u32 ieee80211_eml_trans_timeout_in_us(u16 eml_cap)
+{
+ /* IEEE Std 802.11be-2024 Table 9-417m—Encoding of the
+ * Transition Timeout subfield.
+ */
+ u8 timeout = u16_get_bits(eml_cap,
+ IEEE80211_EML_CAP_TRANSITION_TIMEOUT);
+
+ /* invalid values also just use 0 */
+ if (!timeout || timeout > IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU)
+ return 0;
+
+ return 128 * (1 << (timeout - 1));
+}
+
+#define for_each_mle_subelement(_elem, _data, _len) \
+ if (ieee80211_mle_size_ok(_data, _len)) \
+ for_each_element(_elem, \
+ _data + ieee80211_mle_common_size(_data),\
+ _len - ieee80211_mle_common_size(_data))
+
+#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/ieee80211-he.h b/include/linux/ieee80211-he.h
new file mode 100644
index 000000000000..a08c446fbb04
--- /dev/null
+++ b/include/linux/ieee80211-he.h
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 HE definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_HE_H
+#define LINUX_IEEE80211_HE_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define IEEE80211_TWT_CONTROL_NDP BIT(0)
+#define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1)
+#define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3)
+#define IEEE80211_TWT_CONTROL_RX_DISABLED BIT(4)
+#define IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT BIT(5)
+
+#define IEEE80211_TWT_REQTYPE_REQUEST BIT(0)
+#define IEEE80211_TWT_REQTYPE_SETUP_CMD GENMASK(3, 1)
+#define IEEE80211_TWT_REQTYPE_TRIGGER BIT(4)
+#define IEEE80211_TWT_REQTYPE_IMPLICIT BIT(5)
+#define IEEE80211_TWT_REQTYPE_FLOWTYPE BIT(6)
+#define IEEE80211_TWT_REQTYPE_FLOWID GENMASK(9, 7)
+#define IEEE80211_TWT_REQTYPE_WAKE_INT_EXP GENMASK(14, 10)
+#define IEEE80211_TWT_REQTYPE_PROTECTION BIT(15)
+
+enum ieee80211_twt_setup_cmd {
+ TWT_SETUP_CMD_REQUEST,
+ TWT_SETUP_CMD_SUGGEST,
+ TWT_SETUP_CMD_DEMAND,
+ TWT_SETUP_CMD_GROUPING,
+ TWT_SETUP_CMD_ACCEPT,
+ TWT_SETUP_CMD_ALTERNATE,
+ TWT_SETUP_CMD_DICTATE,
+ TWT_SETUP_CMD_REJECT,
+};
+
+struct ieee80211_twt_params {
+ __le16 req_type;
+ __le64 twt;
+ u8 min_twt_dur;
+ __le16 mantissa;
+ u8 channel;
+} __packed;
+
+struct ieee80211_twt_setup {
+ u8 dialog_token;
+ u8 element_id;
+ u8 length;
+ u8 control;
+ u8 params[];
+} __packed;
+
+/**
+ * struct ieee80211_he_cap_elem - HE capabilities element
+ * @mac_cap_info: HE MAC Capabilities Information
+ * @phy_cap_info: HE PHY Capabilities Information
+ *
+ * This structure represents the fixed fields of the payload of the
+ * "HE capabilities element" as described in IEEE Std 802.11ax-2021
+ * sections 9.4.2.248.2 and 9.4.2.248.3.
+ */
+struct ieee80211_he_cap_elem {
+ u8 mac_cap_info[6];
+ u8 phy_cap_info[11];
+} __packed;
+
+#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5
+
+/**
+ * enum ieee80211_he_mcs_support - HE MCS support definitions
+ * @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
+ * number of streams
+ * @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported
+ * @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported
+ * @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported
+ *
+ * These definitions are used in each 2-bit subfield of the rx_mcs_*
+ * and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are
+ * both split into 8 subfields by number of streams. These values indicate
+ * which MCSes are supported for the number of streams the value appears
+ * for.
+ */
+enum ieee80211_he_mcs_support {
+ IEEE80211_HE_MCS_SUPPORT_0_7 = 0,
+ IEEE80211_HE_MCS_SUPPORT_0_9 = 1,
+ IEEE80211_HE_MCS_SUPPORT_0_11 = 2,
+ IEEE80211_HE_MCS_NOT_SUPPORTED = 3,
+};
+
+/**
+ * struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field
+ *
+ * This structure holds the data required for the Tx/Rx HE MCS NSS Support Field
+ * described in P802.11ax_D2.0 section 9.4.2.237.4
+ *
+ * @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel
+ * widths less than 80MHz.
+ * @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel
+ * widths less than 80MHz.
+ * @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel
+ * width 160MHz.
+ * @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel
+ * width 160MHz.
+ * @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for
+ * channel width 80p80MHz.
+ * @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for
+ * channel width 80p80MHz.
+ */
+struct ieee80211_he_mcs_nss_supp {
+ __le16 rx_mcs_80;
+ __le16 tx_mcs_80;
+ __le16 rx_mcs_160;
+ __le16 tx_mcs_160;
+ __le16 rx_mcs_80p80;
+ __le16 tx_mcs_80p80;
+} __packed;
+
+/**
+ * struct ieee80211_he_operation - HE Operation element
+ * @he_oper_params: HE Operation Parameters + BSS Color Information
+ * @he_mcs_nss_set: Basic HE-MCS And NSS Set
+ * @optional: Optional fields VHT Operation Information, Max Co-Hosted
+ * BSSID Indicator, and 6 GHz Operation Information
+ *
+ * This structure represents the payload of the "HE Operation
+ * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.249.
+ */
+struct ieee80211_he_operation {
+ __le32 he_oper_params;
+ __le16 he_mcs_nss_set;
+ u8 optional[];
+} __packed;
+
+/**
+ * struct ieee80211_he_spr - Spatial Reuse Parameter Set element
+ * @he_sr_control: SR Control
+ * @optional: Optional fields Non-SRG OBSS PD Max Offset, SRG OBSS PD
+ * Min Offset, SRG OBSS PD Max Offset, SRG BSS Color
+ * Bitmap, and SRG Partial BSSID Bitmap
+ *
+ * This structure represents the payload of the "Spatial Reuse
+ * Parameter Set element" as described in IEEE Std 802.11ax-2021
+ * section 9.4.2.252.
+ */
+struct ieee80211_he_spr {
+ u8 he_sr_control;
+ u8 optional[];
+} __packed;
+
+/**
+ * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field
+ * @aifsn: ACI/AIFSN
+ * @ecw_min_max: ECWmin/ECWmax
+ * @mu_edca_timer: MU EDCA Timer
+ *
+ * This structure represents the "MU AC Parameter Record" as described
+ * in IEEE Std 802.11ax-2021 section 9.4.2.251, Figure 9-788p.
+ */
+struct ieee80211_he_mu_edca_param_ac_rec {
+ u8 aifsn;
+ u8 ecw_min_max;
+ u8 mu_edca_timer;
+} __packed;
+
+/**
+ * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element
+ * @mu_qos_info: QoS Info
+ * @ac_be: MU AC_BE Parameter Record
+ * @ac_bk: MU AC_BK Parameter Record
+ * @ac_vi: MU AC_VI Parameter Record
+ * @ac_vo: MU AC_VO Parameter Record
+ *
+ * This structure represents the payload of the "MU EDCA Parameter Set
+ * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.251.
+ */
+struct ieee80211_mu_edca_param_set {
+ u8 mu_qos_info;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_be;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_bk;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_vi;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_vo;
+} __packed;
+
+/* 802.11ax HE MAC capabilities */
+#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
+#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02
+#define IEEE80211_HE_MAC_CAP0_TWT_RES 0x04
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP 0x00
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1 0x08
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2 0x10
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3 0x18
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK 0x18
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1 0x00
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2 0x20
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4 0x40
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8 0x60
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16 0x80
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32 0xa0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64 0xc0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED 0xe0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK 0xe0
+
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED 0x00
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128 0x01
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256 0x02
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512 0x03
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK 0x03
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US 0x00
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1 0x00
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_2 0x10
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_3 0x20
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_4 0x30
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_5 0x40
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_6 0x50
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_7 0x60
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8 0x70
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_MASK 0x70
+
+/* Link adaptation is split between byte HE_MAC_CAP1 and
+ * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE
+ * in which case the following values apply:
+ * 0 = No feedback.
+ * 1 = reserved.
+ * 2 = Unsolicited feedback.
+ * 3 = both
+ */
+#define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION 0x80
+
+#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01
+#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02
+#define IEEE80211_HE_MAC_CAP2_TRS 0x04
+#define IEEE80211_HE_MAC_CAP2_BSR 0x08
+#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10
+#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20
+#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40
+#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80
+
+#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02
+#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04
+
+/* The maximum length of an A-MDPU is defined by the combination of the Maximum
+ * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
+ * same field in the HE capabilities.
+ */
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_0 0x00
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1 0x08
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2 0x10
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3 0x18
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18
+#define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20
+#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
+#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80
+
+#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
+#define IEEE80211_HE_MAC_CAP4_QTP 0x02
+#define IEEE80211_HE_MAC_CAP4_BQR 0x04
+#define IEEE80211_HE_MAC_CAP4_PSR_RESP 0x08
+#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
+#define IEEE80211_HE_MAC_CAP4_OPS 0x20
+#define IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU 0x40
+/* Multi TID agg TX is split between byte #4 and #5
+ * The value is a combination of B39,B40,B41
+ */
+#define IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39 0x80
+
+#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01
+#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02
+#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION 0x04
+#define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08
+#define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10
+#define IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS 0x20
+#define IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING 0x40
+#define IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX 0x80
+
+#define IEEE80211_HE_VHT_MAX_AMPDU_FACTOR 20
+#define IEEE80211_HE_HT_MAX_AMPDU_FACTOR 16
+#define IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR 13
+
+/* 802.11ax HE PHY capabilities */
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL 0x1e
+
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe
+
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ 0x04
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ 0x08
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK 0x0f
+#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10
+#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20
+#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40
+/* Midamble RX/TX Max NSTS is split between byte #2 and byte #3 */
+#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS 0x80
+
+#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS 0x01
+#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02
+#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04
+#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08
+#define IEEE80211_HE_PHY_CAP2_DOPPLER_TX 0x10
+#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20
+
+/* Note that the meaning of UL MU below is different between an AP and a non-AP
+ * sta, where in the AP case it indicates support for Rx and in the non-AP sta
+ * case it indicates support for Tx.
+ */
+#define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO 0x40
+#define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO 0x80
+
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK 0x01
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK 0x02
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM 0x03
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK 0x03
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 0x04
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK 0x08
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK 0x10
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20
+#define IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU 0x40
+#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80
+
+#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01
+#define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER 0x02
+
+/* Minimal allowed value of Max STS under 80MHz is 3 */
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 0x10
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6 0x14
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7 0x18
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8 0x1c
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK 0x1c
+
+/* Minimal allowed value of Max STS above 80MHz is 3 */
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 0x60
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 0x80
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6 0xa0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7 0xc0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 0xe0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK 0xe0
+
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1 0x00
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 0x01
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3 0x02
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4 0x03
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5 0x04
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6 0x05
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7 0x06
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8 0x07
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK 0x07
+
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1 0x00
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 0x08
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3 0x10
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4 0x18
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5 0x20
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6 0x28
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7 0x30
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8 0x38
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK 0x38
+
+#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40
+#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80
+
+#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01
+#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02
+#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB 0x04
+#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB 0x08
+#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10
+#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20
+#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40
+#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80
+
+#define IEEE80211_HE_PHY_CAP7_PSR_BASED_SR 0x01
+#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP 0x02
+#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_3 0x18
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_4 0x20
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_5 0x28
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_6 0x30
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_7 0x38
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK 0x38
+#define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ 0x40
+#define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ 0x80
+
+#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI 0x01
+#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02
+#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04
+#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08
+#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10
+#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242 0x00
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484 0x40
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996 0x80
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996 0xc0
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK 0xc0
+
+#define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01
+#define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02
+#define IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU 0x04
+#define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08
+#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10
+#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US 0x0
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US 0x1
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US 0x2
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED 0x3
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS 6
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK 0xc0
+
+#define IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF 0x01
+
+/* 802.11ax HE TX/RX MCS NSS Support */
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS (11)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK 0x07c0
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK 0xf800
+
+/* TX/RX HE MCS Support field Highest MCS subfield encoding */
+enum ieee80211_he_highest_mcs_supported_subfield_enc {
+ HIGHEST_MCS_SUPPORTED_MCS7 = 0,
+ HIGHEST_MCS_SUPPORTED_MCS8,
+ HIGHEST_MCS_SUPPORTED_MCS9,
+ HIGHEST_MCS_SUPPORTED_MCS10,
+ HIGHEST_MCS_SUPPORTED_MCS11,
+};
+
+/* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */
+static inline u8
+ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap)
+{
+ u8 count = 4;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ count += 4;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ count += 4;
+
+ return count;
+}
+
+/* 802.11ax HE PPE Thresholds */
+#define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS (1)
+#define IEEE80211_PPE_THRES_NSS_POS (0)
+#define IEEE80211_PPE_THRES_NSS_MASK (7)
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU \
+ (BIT(5) | BIT(6))
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3)
+#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3)
+#define IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE (7)
+
+/*
+ * Calculate 802.11ax HE capabilities IE PPE field size
+ * Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8*
+ */
+static inline u8
+ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
+{
+ u8 n;
+
+ if ((phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0)
+ return 0;
+
+ n = hweight8(ppe_thres_hdr &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >>
+ IEEE80211_PPE_THRES_NSS_POS));
+
+ /*
+ * Each pair is 6 bits, and we need to add the 7 "header" bits to the
+ * total size.
+ */
+ n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7;
+ n = DIV_ROUND_UP(n, 8);
+
+ return n;
+}
+
+static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_he_cap_elem *he_cap_ie_elem = (const void *)data;
+ u8 needed = sizeof(*he_cap_ie_elem);
+
+ if (len < needed)
+ return false;
+
+ needed += ieee80211_he_mcs_nss_size(he_cap_ie_elem);
+ if (len < needed)
+ return false;
+
+ if (he_cap_ie_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ if (len < needed + 1)
+ return false;
+ needed += ieee80211_he_ppe_size(data[needed],
+ he_cap_ie_elem->phy_cap_info);
+ }
+
+ return len >= needed;
+}
+
+/* HE Operation defines */
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007
+#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
+#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000
+#define IEEE80211_HE_OPERATION_CO_HOSTED_BSS 0x00008000
+#define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000
+#define IEEE80211_HE_OPERATION_6GHZ_OP_INFO 0x00020000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24
+#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
+
+#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
+#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
+#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP 3
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP_OLD 4
+#define IEEE80211_6GHZ_CTRL_REG_AP_ROLE_NOT_RELEVANT 7
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP 8
+
+/**
+ * struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
+ * @primary: primary channel
+ * @control: control flags
+ * @ccfs0: channel center frequency segment 0
+ * @ccfs1: channel center frequency segment 1
+ * @minrate: minimum rate (in 1 Mbps units)
+ */
+struct ieee80211_he_6ghz_oper {
+ u8 primary;
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH 0x3
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ 0
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ 1
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3
+#define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4
+#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x78
+ u8 control;
+ u8 ccfs0;
+ u8 ccfs1;
+ u8 minrate;
+} __packed;
+
+/**
+ * enum ieee80211_reg_conn_bits - represents Regulatory connectivity field bits.
+ *
+ * This enumeration defines bit flags used to represent regulatory connectivity
+ * field bits.
+ *
+ * @IEEE80211_REG_CONN_LPI_VALID: Indicates whether the LPI bit is valid.
+ * @IEEE80211_REG_CONN_LPI_VALUE: Represents the value of the LPI bit.
+ * @IEEE80211_REG_CONN_SP_VALID: Indicates whether the SP bit is valid.
+ * @IEEE80211_REG_CONN_SP_VALUE: Represents the value of the SP bit.
+ */
+enum ieee80211_reg_conn_bits {
+ IEEE80211_REG_CONN_LPI_VALID = BIT(0),
+ IEEE80211_REG_CONN_LPI_VALUE = BIT(1),
+ IEEE80211_REG_CONN_SP_VALID = BIT(2),
+ IEEE80211_REG_CONN_SP_VALUE = BIT(3),
+};
+
+/* transmit power interpretation type of transmit power envelope element */
+enum ieee80211_tx_power_intrpt_type {
+ IEEE80211_TPE_LOCAL_EIRP,
+ IEEE80211_TPE_LOCAL_EIRP_PSD,
+ IEEE80211_TPE_REG_CLIENT_EIRP,
+ IEEE80211_TPE_REG_CLIENT_EIRP_PSD,
+};
+
+/* category type of transmit power envelope element */
+enum ieee80211_tx_power_category_6ghz {
+ IEEE80211_TPE_CAT_6GHZ_DEFAULT = 0,
+ IEEE80211_TPE_CAT_6GHZ_SUBORDINATE = 1,
+};
+
+/*
+ * For IEEE80211_TPE_LOCAL_EIRP / IEEE80211_TPE_REG_CLIENT_EIRP,
+ * setting to 63.5 dBm means no constraint.
+ */
+#define IEEE80211_TPE_MAX_TX_PWR_NO_CONSTRAINT 127
+
+/*
+ * For IEEE80211_TPE_LOCAL_EIRP_PSD / IEEE80211_TPE_REG_CLIENT_EIRP_PSD,
+ * setting to 127 indicates no PSD limit for the 20 MHz channel.
+ */
+#define IEEE80211_TPE_PSD_NO_LIMIT 127
+
+/**
+ * struct ieee80211_tx_pwr_env - Transmit Power Envelope
+ * @info: Transmit Power Information field
+ * @variable: Maximum Transmit Power field
+ *
+ * This structure represents the payload of the "Transmit Power
+ * Envelope element" as described in IEEE Std 802.11ax-2021 section
+ * 9.4.2.161
+ */
+struct ieee80211_tx_pwr_env {
+ u8 info;
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7
+#define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38
+#define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0
+
+#define IEEE80211_TX_PWR_ENV_EXT_COUNT 0xF
+
+static inline bool ieee80211_valid_tpe_element(const u8 *data, u8 len)
+{
+ const struct ieee80211_tx_pwr_env *env = (const void *)data;
+ u8 count, interpret, category;
+ u8 needed = sizeof(*env);
+ u8 N; /* also called N in the spec */
+
+ if (len < needed)
+ return false;
+
+ count = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_COUNT);
+ interpret = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ category = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
+
+ switch (category) {
+ case IEEE80211_TPE_CAT_6GHZ_DEFAULT:
+ case IEEE80211_TPE_CAT_6GHZ_SUBORDINATE:
+ break;
+ default:
+ return false;
+ }
+
+ switch (interpret) {
+ case IEEE80211_TPE_LOCAL_EIRP:
+ case IEEE80211_TPE_REG_CLIENT_EIRP:
+ if (count > 3)
+ return false;
+
+ /* count == 0 encodes 1 value for 20 MHz, etc. */
+ needed += count + 1;
+
+ if (len < needed)
+ return false;
+
+ /* there can be extension fields not accounted for in 'count' */
+
+ return true;
+ case IEEE80211_TPE_LOCAL_EIRP_PSD:
+ case IEEE80211_TPE_REG_CLIENT_EIRP_PSD:
+ if (count > 4)
+ return false;
+
+ N = count ? 1 << (count - 1) : 1;
+ needed += N;
+
+ if (len < needed)
+ return false;
+
+ if (len > needed) {
+ u8 K = u8_get_bits(env->variable[N],
+ IEEE80211_TX_PWR_ENV_EXT_COUNT);
+
+ needed += 1 + K;
+ if (len < needed)
+ return false;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
+ * @he_oper_ie: byte data of the He Operations IE, stating from the byte
+ * after the ext ID byte. It is assumed that he_oper_ie has at least
+ * sizeof(struct ieee80211_he_operation) bytes, the caller must have
+ * validated this.
+ * @return the actual size of the IE data (not including header), or 0 on error
+ */
+static inline u8
+ieee80211_he_oper_size(const u8 *he_oper_ie)
+{
+ const struct ieee80211_he_operation *he_oper = (const void *)he_oper_ie;
+ u8 oper_len = sizeof(struct ieee80211_he_operation);
+ u32 he_oper_params;
+
+ /* Make sure the input is not NULL */
+ if (!he_oper_ie)
+ return 0;
+
+ /* Calc required length */
+ he_oper_params = le32_to_cpu(he_oper->he_oper_params);
+ if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
+ oper_len += 3;
+ if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
+ oper_len++;
+ if (he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO)
+ oper_len += sizeof(struct ieee80211_he_6ghz_oper);
+
+ /* Add the first byte (extension ID) to the total length */
+ oper_len++;
+
+ return oper_len;
+}
+
+/**
+ * ieee80211_he_6ghz_oper - obtain 6 GHz operation field
+ * @he_oper: HE operation element (must be pre-validated for size)
+ * but may be %NULL
+ *
+ * Return: a pointer to the 6 GHz operation field, or %NULL
+ */
+static inline const struct ieee80211_he_6ghz_oper *
+ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
+{
+ const u8 *ret;
+ u32 he_oper_params;
+
+ if (!he_oper)
+ return NULL;
+
+ ret = (const void *)&he_oper->optional;
+
+ he_oper_params = le32_to_cpu(he_oper->he_oper_params);
+
+ if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO))
+ return NULL;
+ if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
+ ret += 3;
+ if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
+ ret++;
+
+ return (const void *)ret;
+}
+
+/* HE Spatial Reuse defines */
+#define IEEE80211_HE_SPR_PSR_DISALLOWED BIT(0)
+#define IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED BIT(1)
+#define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT BIT(2)
+#define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT BIT(3)
+#define IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED BIT(4)
+
+/*
+ * ieee80211_he_spr_size - calculate 802.11ax HE Spatial Reuse IE size
+ * @he_spr_ie: byte data of the He Spatial Reuse IE, stating from the byte
+ * after the ext ID byte. It is assumed that he_spr_ie has at least
+ * sizeof(struct ieee80211_he_spr) bytes, the caller must have validated
+ * this
+ * @return the actual size of the IE data (not including header), or 0 on error
+ */
+static inline u8
+ieee80211_he_spr_size(const u8 *he_spr_ie)
+{
+ const struct ieee80211_he_spr *he_spr = (const void *)he_spr_ie;
+ u8 spr_len = sizeof(struct ieee80211_he_spr);
+ u8 he_spr_params;
+
+ /* Make sure the input is not NULL */
+ if (!he_spr_ie)
+ return 0;
+
+ /* Calc required length */
+ he_spr_params = he_spr->he_sr_control;
+ if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
+ spr_len++;
+ if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
+ spr_len += 18;
+
+ /* Add the first byte (extension ID) to the total length */
+ spr_len++;
+
+ return spr_len;
+}
+
+struct ieee80211_he_6ghz_capa {
+ /* uses IEEE80211_HE_6GHZ_CAP_* below */
+ __le16 capa;
+} __packed;
+
+/* HE 6 GHz band capabilities */
+/* uses enum ieee80211_min_mpdu_spacing values */
+#define IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START 0x0007
+/* uses enum ieee80211_vht_max_ampdu_length_exp values */
+#define IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP 0x0038
+/* uses IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_* values */
+#define IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN 0x00c0
+/* WLAN_HT_CAP_SM_PS_* values */
+#define IEEE80211_HE_6GHZ_CAP_SM_PS 0x0600
+#define IEEE80211_HE_6GHZ_CAP_RD_RESPONDER 0x0800
+#define IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS 0x1000
+#define IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS 0x2000
+
+#endif /* LINUX_IEEE80211_HE_H */
diff --git a/include/linux/ieee80211-ht.h b/include/linux/ieee80211-ht.h
new file mode 100644
index 000000000000..21bbf470540f
--- /dev/null
+++ b/include/linux/ieee80211-ht.h
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 HT definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_HT_H
+#define LINUX_IEEE80211_HT_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+/* Maximal size of an A-MSDU that can be transported in a HT BA session */
+#define IEEE80211_MAX_MPDU_LEN_HT_BA 4095
+
+/* Maximal size of an A-MSDU */
+#define IEEE80211_MAX_MPDU_LEN_HT_3839 3839
+#define IEEE80211_MAX_MPDU_LEN_HT_7935 7935
+
+#define IEEE80211_HT_CTL_LEN 4
+
+enum ieee80211_ht_chanwidth_values {
+ IEEE80211_HT_CHANWIDTH_20MHZ = 0,
+ IEEE80211_HT_CHANWIDTH_ANY = 1,
+};
+
+/**
+ * struct ieee80211_bar - Block Ack Request frame format
+ * @frame_control: Frame Control
+ * @duration: Duration
+ * @ra: RA
+ * @ta: TA
+ * @control: BAR Control
+ * @start_seq_num: Starting Sequence Number (see Figure 9-37)
+ *
+ * This structure represents the "BlockAckReq frame format"
+ * as described in IEEE Std 802.11-2020 section 9.3.1.7.
+*/
+struct ieee80211_bar {
+ __le16 frame_control;
+ __le16 duration;
+ __u8 ra[ETH_ALEN];
+ __u8 ta[ETH_ALEN];
+ __le16 control;
+ __le16 start_seq_num;
+} __packed;
+
+/* 802.11 BAR control masks */
+#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
+#define IEEE80211_BAR_CTRL_MULTI_TID 0x0002
+#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
+#define IEEE80211_BAR_CTRL_TID_INFO_MASK 0xf000
+#define IEEE80211_BAR_CTRL_TID_INFO_SHIFT 12
+
+#define IEEE80211_HT_MCS_MASK_LEN 10
+
+/**
+ * struct ieee80211_mcs_info - Supported MCS Set field
+ * @rx_mask: RX mask
+ * @rx_highest: highest supported RX rate. If set represents
+ * the highest supported RX data rate in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest RX data rate supported.
+ * @tx_params: TX parameters
+ * @reserved: Reserved bits
+ *
+ * This structure represents the "Supported MCS Set field" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.55.4.
+ */
+struct ieee80211_mcs_info {
+ u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN];
+ __le16 rx_highest;
+ u8 tx_params;
+ u8 reserved[3];
+} __packed;
+
+/* 802.11n HT capability MSC set */
+#define IEEE80211_HT_MCS_RX_HIGHEST_MASK 0x3ff
+#define IEEE80211_HT_MCS_TX_DEFINED 0x01
+#define IEEE80211_HT_MCS_TX_RX_DIFF 0x02
+/* value 0 == 1 stream etc */
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK 0x0C
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT 2
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4
+#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10
+
+#define IEEE80211_HT_MCS_CHAINS(mcs) ((mcs) == 32 ? 1 : (1 + ((mcs) >> 3)))
+
+/*
+ * 802.11n D5.0 20.3.5 / 20.6 says:
+ * - indices 0 to 7 and 32 are single spatial stream
+ * - 8 to 31 are multiple spatial streams using equal modulation
+ * [8..15 for two streams, 16..23 for three and 24..31 for four]
+ * - remainder are multiple spatial streams using unequal modulation
+ */
+#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START 33
+#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE \
+ (IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8)
+
+/**
+ * struct ieee80211_ht_cap - HT capabilities element
+ * @cap_info: HT Capability Information
+ * @ampdu_params_info: A-MPDU Parameters
+ * @mcs: Supported MCS Set
+ * @extended_ht_cap_info: HT Extended Capabilities
+ * @tx_BF_cap_info: Transmit Beamforming Capabilities
+ * @antenna_selection_info: ASEL Capability
+ *
+ * This structure represents the payload of the "HT Capabilities
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.55.
+ */
+struct ieee80211_ht_cap {
+ __le16 cap_info;
+ u8 ampdu_params_info;
+
+ /* 16 bytes MCS information */
+ struct ieee80211_mcs_info mcs;
+
+ __le16 extended_ht_cap_info;
+ __le32 tx_BF_cap_info;
+ u8 antenna_selection_info;
+} __packed;
+
+/* 802.11n HT capabilities masks (for cap_info) */
+#define IEEE80211_HT_CAP_LDPC_CODING 0x0001
+#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002
+#define IEEE80211_HT_CAP_SM_PS 0x000C
+#define IEEE80211_HT_CAP_SM_PS_SHIFT 2
+#define IEEE80211_HT_CAP_GRN_FLD 0x0010
+#define IEEE80211_HT_CAP_SGI_20 0x0020
+#define IEEE80211_HT_CAP_SGI_40 0x0040
+#define IEEE80211_HT_CAP_TX_STBC 0x0080
+#define IEEE80211_HT_CAP_RX_STBC 0x0300
+#define IEEE80211_HT_CAP_RX_STBC_SHIFT 8
+#define IEEE80211_HT_CAP_DELAY_BA 0x0400
+#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
+#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
+#define IEEE80211_HT_CAP_RESERVED 0x2000
+#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000
+#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000
+
+/* 802.11n HT extended capabilities masks (for extended_ht_cap_info) */
+#define IEEE80211_HT_EXT_CAP_PCO 0x0001
+#define IEEE80211_HT_EXT_CAP_PCO_TIME 0x0006
+#define IEEE80211_HT_EXT_CAP_PCO_TIME_SHIFT 1
+#define IEEE80211_HT_EXT_CAP_MCS_FB 0x0300
+#define IEEE80211_HT_EXT_CAP_MCS_FB_SHIFT 8
+#define IEEE80211_HT_EXT_CAP_HTC_SUP 0x0400
+#define IEEE80211_HT_EXT_CAP_RD_RESPONDER 0x0800
+
+/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */
+#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03
+#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C
+#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2
+
+/*
+ * Maximum length of AMPDU that the STA can receive in high-throughput (HT).
+ * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
+ */
+enum ieee80211_max_ampdu_length_exp {
+ IEEE80211_HT_MAX_AMPDU_8K = 0,
+ IEEE80211_HT_MAX_AMPDU_16K = 1,
+ IEEE80211_HT_MAX_AMPDU_32K = 2,
+ IEEE80211_HT_MAX_AMPDU_64K = 3
+};
+
+#define IEEE80211_HT_MAX_AMPDU_FACTOR 13
+
+/* Minimum MPDU start spacing */
+enum ieee80211_min_mpdu_spacing {
+ IEEE80211_HT_MPDU_DENSITY_NONE = 0, /* No restriction */
+ IEEE80211_HT_MPDU_DENSITY_0_25 = 1, /* 1/4 usec */
+ IEEE80211_HT_MPDU_DENSITY_0_5 = 2, /* 1/2 usec */
+ IEEE80211_HT_MPDU_DENSITY_1 = 3, /* 1 usec */
+ IEEE80211_HT_MPDU_DENSITY_2 = 4, /* 2 usec */
+ IEEE80211_HT_MPDU_DENSITY_4 = 5, /* 4 usec */
+ IEEE80211_HT_MPDU_DENSITY_8 = 6, /* 8 usec */
+ IEEE80211_HT_MPDU_DENSITY_16 = 7 /* 16 usec */
+};
+
+/**
+ * struct ieee80211_ht_operation - HT operation IE
+ * @primary_chan: Primary Channel
+ * @ht_param: HT Operation Information parameters
+ * @operation_mode: HT Operation Information operation mode
+ * @stbc_param: HT Operation Information STBC params
+ * @basic_set: Basic HT-MCS Set
+ *
+ * This structure represents the payload of the "HT Operation
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.56.
+ */
+struct ieee80211_ht_operation {
+ u8 primary_chan;
+ u8 ht_param;
+ __le16 operation_mode;
+ __le16 stbc_param;
+ u8 basic_set[16];
+} __packed;
+
+/* for ht_param */
+#define IEEE80211_HT_PARAM_CHA_SEC_OFFSET 0x03
+#define IEEE80211_HT_PARAM_CHA_SEC_NONE 0x00
+#define IEEE80211_HT_PARAM_CHA_SEC_ABOVE 0x01
+#define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03
+#define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04
+#define IEEE80211_HT_PARAM_RIFS_MODE 0x08
+
+/* for operation_mode */
+#define IEEE80211_HT_OP_MODE_PROTECTION 0x0003
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONE 0
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER 1
+#define IEEE80211_HT_OP_MODE_PROTECTION_20MHZ 2
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3
+#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004
+#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010
+#define IEEE80211_HT_OP_MODE_CCFS2_SHIFT 5
+#define IEEE80211_HT_OP_MODE_CCFS2_MASK 0x1fe0
+
+/* for stbc_param */
+#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040
+#define IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT 0x0080
+#define IEEE80211_HT_STBC_PARAM_STBC_BEACON 0x0100
+#define IEEE80211_HT_STBC_PARAM_LSIG_TXOP_FULLPROT 0x0200
+#define IEEE80211_HT_STBC_PARAM_PCO_ACTIVE 0x0400
+#define IEEE80211_HT_STBC_PARAM_PCO_PHASE 0x0800
+
+
+/* block-ack parameters */
+#define IEEE80211_ADDBA_PARAM_AMSDU_MASK 0x0001
+#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
+#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
+#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
+#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
+#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
+
+/*
+ * A-MPDU buffer sizes
+ * According to HT size varies from 8 to 64 frames
+ * HE adds the ability to have up to 256 frames.
+ * EHT adds the ability to have up to 1K frames.
+ */
+#define IEEE80211_MIN_AMPDU_BUF 0x8
+#define IEEE80211_MAX_AMPDU_BUF_HT 0x40
+#define IEEE80211_MAX_AMPDU_BUF_HE 0x100
+#define IEEE80211_MAX_AMPDU_BUF_EHT 0x400
+
+
+/* Spatial Multiplexing Power Save Modes (for capability) */
+#define WLAN_HT_CAP_SM_PS_STATIC 0
+#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
+#define WLAN_HT_CAP_SM_PS_INVALID 2
+#define WLAN_HT_CAP_SM_PS_DISABLED 3
+
+/* for SM power control field lower two bits */
+#define WLAN_HT_SMPS_CONTROL_DISABLED 0
+#define WLAN_HT_SMPS_CONTROL_STATIC 1
+#define WLAN_HT_SMPS_CONTROL_DYNAMIC 3
+
+/* HT action codes */
+enum ieee80211_ht_actioncode {
+ WLAN_HT_ACTION_NOTIFY_CHANWIDTH = 0,
+ WLAN_HT_ACTION_SMPS = 1,
+ WLAN_HT_ACTION_PSMP = 2,
+ WLAN_HT_ACTION_PCO_PHASE = 3,
+ WLAN_HT_ACTION_CSI = 4,
+ WLAN_HT_ACTION_NONCOMPRESSED_BF = 5,
+ WLAN_HT_ACTION_COMPRESSED_BF = 6,
+ WLAN_HT_ACTION_ASEL_IDX_FEEDBACK = 7,
+};
+
+/* BACK action code */
+enum ieee80211_back_actioncode {
+ WLAN_ACTION_ADDBA_REQ = 0,
+ WLAN_ACTION_ADDBA_RESP = 1,
+ WLAN_ACTION_DELBA = 2,
+};
+
+/* BACK (block-ack) parties */
+enum ieee80211_back_parties {
+ WLAN_BACK_RECIPIENT = 0,
+ WLAN_BACK_INITIATOR = 1,
+};
+
+#endif /* LINUX_IEEE80211_HT_H */
diff --git a/include/linux/ieee80211-mesh.h b/include/linux/ieee80211-mesh.h
new file mode 100644
index 000000000000..4b829bcb38b6
--- /dev/null
+++ b/include/linux/ieee80211-mesh.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 mesh definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_MESH_H
+#define LINUX_IEEE80211_MESH_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define IEEE80211_MAX_MESH_ID_LEN 32
+
+struct ieee80211s_hdr {
+ u8 flags;
+ u8 ttl;
+ __le32 seqnum;
+ u8 eaddr1[ETH_ALEN];
+ u8 eaddr2[ETH_ALEN];
+} __packed __aligned(2);
+
+/* Mesh flags */
+#define MESH_FLAGS_AE_A4 0x1
+#define MESH_FLAGS_AE_A5_A6 0x2
+#define MESH_FLAGS_AE 0x3
+#define MESH_FLAGS_PS_DEEP 0x4
+
+/**
+ * enum ieee80211_preq_flags - mesh PREQ element flags
+ *
+ * @IEEE80211_PREQ_PROACTIVE_PREP_FLAG: proactive PREP subfield
+ */
+enum ieee80211_preq_flags {
+ IEEE80211_PREQ_PROACTIVE_PREP_FLAG = 1<<2,
+};
+
+/**
+ * enum ieee80211_preq_target_flags - mesh PREQ element per target flags
+ *
+ * @IEEE80211_PREQ_TO_FLAG: target only subfield
+ * @IEEE80211_PREQ_USN_FLAG: unknown target HWMP sequence number subfield
+ */
+enum ieee80211_preq_target_flags {
+ IEEE80211_PREQ_TO_FLAG = 1<<0,
+ IEEE80211_PREQ_USN_FLAG = 1<<2,
+};
+
+/**
+ * struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE
+ * @mesh_ttl: Time To Live
+ * @mesh_flags: Flags
+ * @mesh_reason: Reason Code
+ * @mesh_pre_value: Precedence Value
+ *
+ * This structure represents the payload of the "Mesh Channel Switch
+ * Parameters element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.102.
+ */
+struct ieee80211_mesh_chansw_params_ie {
+ u8 mesh_ttl;
+ u8 mesh_flags;
+ __le16 mesh_reason;
+ __le16 mesh_pre_value;
+} __packed;
+
+/**
+ * struct ieee80211_meshconf_ie - Mesh Configuration element
+ * @meshconf_psel: Active Path Selection Protocol Identifier
+ * @meshconf_pmetric: Active Path Selection Metric Identifier
+ * @meshconf_congest: Congestion Control Mode Identifier
+ * @meshconf_synch: Synchronization Method Identifier
+ * @meshconf_auth: Authentication Protocol Identifier
+ * @meshconf_form: Mesh Formation Info
+ * @meshconf_cap: Mesh Capability (see &enum mesh_config_capab_flags)
+ *
+ * This structure represents the payload of the "Mesh Configuration
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.97.
+ */
+struct ieee80211_meshconf_ie {
+ u8 meshconf_psel;
+ u8 meshconf_pmetric;
+ u8 meshconf_congest;
+ u8 meshconf_synch;
+ u8 meshconf_auth;
+ u8 meshconf_form;
+ u8 meshconf_cap;
+} __packed;
+
+/**
+ * enum mesh_config_capab_flags - Mesh Configuration IE capability field flags
+ *
+ * @IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish
+ * additional mesh peerings with other mesh STAs
+ * @IEEE80211_MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs
+ * @IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure
+ * is ongoing
+ * @IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL: STA is in deep sleep mode or has
+ * neighbors in deep sleep mode
+ *
+ * Enumerates the "Mesh Capability" as described in IEEE Std
+ * 802.11-2020 section 9.4.2.97.7.
+ */
+enum mesh_config_capab_flags {
+ IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS = 0x01,
+ IEEE80211_MESHCONF_CAPAB_FORWARDING = 0x08,
+ IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING = 0x20,
+ IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL = 0x40,
+};
+
+#define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1
+
+/*
+ * mesh channel switch parameters element's flag indicator
+ *
+ */
+#define WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT BIT(0)
+#define WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR BIT(1)
+#define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2)
+
+/**
+ * struct ieee80211_rann_ie - RANN (root announcement) element
+ * @rann_flags: Flags
+ * @rann_hopcount: Hop Count
+ * @rann_ttl: Element TTL
+ * @rann_addr: Root Mesh STA Address
+ * @rann_seq: HWMP Sequence Number
+ * @rann_interval: Interval
+ * @rann_metric: Metric
+ *
+ * This structure represents the payload of the "RANN element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.111.
+ */
+struct ieee80211_rann_ie {
+ u8 rann_flags;
+ u8 rann_hopcount;
+ u8 rann_ttl;
+ u8 rann_addr[ETH_ALEN];
+ __le32 rann_seq;
+ __le32 rann_interval;
+ __le32 rann_metric;
+} __packed;
+
+enum ieee80211_rann_flags {
+ RANN_FLAG_IS_GATE = 1 << 0,
+};
+
+/* Mesh action codes */
+enum ieee80211_mesh_actioncode {
+ WLAN_MESH_ACTION_LINK_METRIC_REPORT,
+ WLAN_MESH_ACTION_HWMP_PATH_SELECTION,
+ WLAN_MESH_ACTION_GATE_ANNOUNCEMENT,
+ WLAN_MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION,
+ WLAN_MESH_ACTION_MCCA_SETUP_REQUEST,
+ WLAN_MESH_ACTION_MCCA_SETUP_REPLY,
+ WLAN_MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST,
+ WLAN_MESH_ACTION_MCCA_ADVERTISEMENT,
+ WLAN_MESH_ACTION_MCCA_TEARDOWN,
+ WLAN_MESH_ACTION_TBTT_ADJUSTMENT_REQUEST,
+ WLAN_MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE,
+};
+
+/**
+ * enum ieee80211_mesh_sync_method - mesh synchronization method identifier
+ *
+ * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method
+ * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method
+ * that will be specified in a vendor specific information element
+ */
+enum ieee80211_mesh_sync_method {
+ IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1,
+ IEEE80211_SYNC_METHOD_VENDOR = 255,
+};
+
+/**
+ * enum ieee80211_mesh_path_protocol - mesh path selection protocol identifier
+ *
+ * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol
+ * @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will
+ * be specified in a vendor specific information element
+ */
+enum ieee80211_mesh_path_protocol {
+ IEEE80211_PATH_PROTOCOL_HWMP = 1,
+ IEEE80211_PATH_PROTOCOL_VENDOR = 255,
+};
+
+/**
+ * enum ieee80211_mesh_path_metric - mesh path selection metric identifier
+ *
+ * @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric
+ * @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be
+ * specified in a vendor specific information element
+ */
+enum ieee80211_mesh_path_metric {
+ IEEE80211_PATH_METRIC_AIRTIME = 1,
+ IEEE80211_PATH_METRIC_VENDOR = 255,
+};
+
+/**
+ * enum ieee80211_root_mode_identifier - root mesh STA mode identifier
+ *
+ * These attribute are used by dot11MeshHWMPRootMode to set root mesh STA mode
+ *
+ * @IEEE80211_ROOTMODE_NO_ROOT: the mesh STA is not a root mesh STA (default)
+ * @IEEE80211_ROOTMODE_ROOT: the mesh STA is a root mesh STA if greater than
+ * this value
+ * @IEEE80211_PROACTIVE_PREQ_NO_PREP: the mesh STA is a root mesh STA supports
+ * the proactive PREQ with proactive PREP subfield set to 0
+ * @IEEE80211_PROACTIVE_PREQ_WITH_PREP: the mesh STA is a root mesh STA
+ * supports the proactive PREQ with proactive PREP subfield set to 1
+ * @IEEE80211_PROACTIVE_RANN: the mesh STA is a root mesh STA supports
+ * the proactive RANN
+ */
+enum ieee80211_root_mode_identifier {
+ IEEE80211_ROOTMODE_NO_ROOT = 0,
+ IEEE80211_ROOTMODE_ROOT = 1,
+ IEEE80211_PROACTIVE_PREQ_NO_PREP = 2,
+ IEEE80211_PROACTIVE_PREQ_WITH_PREP = 3,
+ IEEE80211_PROACTIVE_RANN = 4,
+};
+
+#endif /* LINUX_IEEE80211_MESH_H */
diff --git a/include/linux/ieee80211-nan.h b/include/linux/ieee80211-nan.h
new file mode 100644
index 000000000000..d07959bf8a90
--- /dev/null
+++ b/include/linux/ieee80211-nan.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * WFA NAN definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_NAN_H
+#define LINUX_IEEE80211_NAN_H
+
+/* NAN operation mode, as defined in Wi-Fi Aware (TM) specification Table 81 */
+#define NAN_OP_MODE_PHY_MODE_VHT 0x01
+#define NAN_OP_MODE_PHY_MODE_HE 0x10
+#define NAN_OP_MODE_PHY_MODE_MASK 0x11
+#define NAN_OP_MODE_80P80MHZ 0x02
+#define NAN_OP_MODE_160MHZ 0x04
+#define NAN_OP_MODE_PNDL_SUPPRTED 0x08
+
+/* NAN Device capabilities, as defined in Wi-Fi Aware (TM) specification
+ * Table 79
+ */
+#define NAN_DEV_CAPA_DFS_OWNER 0x01
+#define NAN_DEV_CAPA_EXT_KEY_ID_SUPPORTED 0x02
+#define NAN_DEV_CAPA_SIM_NDP_RX_SUPPORTED 0x04
+#define NAN_DEV_CAPA_NDPE_SUPPORTED 0x08
+#define NAN_DEV_CAPA_S3_SUPPORTED 0x10
+
+#endif /* LINUX_IEEE80211_NAN_H */
diff --git a/include/linux/ieee80211-p2p.h b/include/linux/ieee80211-p2p.h
new file mode 100644
index 000000000000..180891c11f08
--- /dev/null
+++ b/include/linux/ieee80211-p2p.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * WFA P2P definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_P2P_H
+#define LINUX_IEEE80211_P2P_H
+
+#include <linux/types.h>
+/*
+ * Peer-to-Peer IE attribute related definitions.
+ */
+/*
+ * enum ieee80211_p2p_attr_id - identifies type of peer-to-peer attribute.
+ */
+enum ieee80211_p2p_attr_id {
+ IEEE80211_P2P_ATTR_STATUS = 0,
+ IEEE80211_P2P_ATTR_MINOR_REASON,
+ IEEE80211_P2P_ATTR_CAPABILITY,
+ IEEE80211_P2P_ATTR_DEVICE_ID,
+ IEEE80211_P2P_ATTR_GO_INTENT,
+ IEEE80211_P2P_ATTR_GO_CONFIG_TIMEOUT,
+ IEEE80211_P2P_ATTR_LISTEN_CHANNEL,
+ IEEE80211_P2P_ATTR_GROUP_BSSID,
+ IEEE80211_P2P_ATTR_EXT_LISTEN_TIMING,
+ IEEE80211_P2P_ATTR_INTENDED_IFACE_ADDR,
+ IEEE80211_P2P_ATTR_MANAGABILITY,
+ IEEE80211_P2P_ATTR_CHANNEL_LIST,
+ IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
+ IEEE80211_P2P_ATTR_DEVICE_INFO,
+ IEEE80211_P2P_ATTR_GROUP_INFO,
+ IEEE80211_P2P_ATTR_GROUP_ID,
+ IEEE80211_P2P_ATTR_INTERFACE,
+ IEEE80211_P2P_ATTR_OPER_CHANNEL,
+ IEEE80211_P2P_ATTR_INVITE_FLAGS,
+ /* 19 - 220: Reserved */
+ IEEE80211_P2P_ATTR_VENDOR_SPECIFIC = 221,
+
+ IEEE80211_P2P_ATTR_MAX
+};
+
+/* Notice of Absence attribute - described in P2P spec 4.1.14 */
+/* Typical max value used here */
+#define IEEE80211_P2P_NOA_DESC_MAX 4
+
+struct ieee80211_p2p_noa_desc {
+ u8 count;
+ __le32 duration;
+ __le32 interval;
+ __le32 start_time;
+} __packed;
+
+struct ieee80211_p2p_noa_attr {
+ u8 index;
+ u8 oppps_ctwindow;
+ struct ieee80211_p2p_noa_desc desc[IEEE80211_P2P_NOA_DESC_MAX];
+} __packed;
+
+#define IEEE80211_P2P_OPPPS_ENABLE_BIT BIT(7)
+#define IEEE80211_P2P_OPPPS_CTWINDOW_MASK 0x7F
+
+#endif /* LINUX_IEEE80211_P2P_H */
diff --git a/include/linux/ieee80211-s1g.h b/include/linux/ieee80211-s1g.h
new file mode 100644
index 000000000000..5b9ed2dcc00e
--- /dev/null
+++ b/include/linux/ieee80211-s1g.h
@@ -0,0 +1,575 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 S1G definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_S1G_H
+#define LINUX_IEEE80211_S1G_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+/* bits unique to S1G beacon frame control */
+#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100
+#define IEEE80211_S1G_BCN_CSSID 0x200
+#define IEEE80211_S1G_BCN_ANO 0x400
+
+/* see 802.11ah-2016 9.9 NDP CMAC frames */
+#define IEEE80211_S1G_1MHZ_NDP_BITS 25
+#define IEEE80211_S1G_1MHZ_NDP_BYTES 4
+#define IEEE80211_S1G_2MHZ_NDP_BITS 37
+#define IEEE80211_S1G_2MHZ_NDP_BYTES 5
+
+/**
+ * ieee80211_is_s1g_beacon - check if IEEE80211_FTYPE_EXT &&
+ * IEEE80211_STYPE_S1G_BEACON
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an S1G beacon
+ */
+static inline bool ieee80211_is_s1g_beacon(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE |
+ IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON);
+}
+
+/**
+ * ieee80211_s1g_has_next_tbtt - check if IEEE80211_S1G_BCN_NEXT_TBTT
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame contains the variable-length
+ * next TBTT field
+ */
+static inline bool ieee80211_s1g_has_next_tbtt(__le16 fc)
+{
+ return ieee80211_is_s1g_beacon(fc) &&
+ (fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT));
+}
+
+/**
+ * ieee80211_s1g_has_ano - check if IEEE80211_S1G_BCN_ANO
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame contains the variable-length
+ * ANO field
+ */
+static inline bool ieee80211_s1g_has_ano(__le16 fc)
+{
+ return ieee80211_is_s1g_beacon(fc) &&
+ (fc & cpu_to_le16(IEEE80211_S1G_BCN_ANO));
+}
+
+/**
+ * ieee80211_s1g_has_cssid - check if IEEE80211_S1G_BCN_CSSID
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame contains the variable-length
+ * compressed SSID field
+ */
+static inline bool ieee80211_s1g_has_cssid(__le16 fc)
+{
+ return ieee80211_is_s1g_beacon(fc) &&
+ (fc & cpu_to_le16(IEEE80211_S1G_BCN_CSSID));
+}
+
+/**
+ * enum ieee80211_s1g_chanwidth - S1G channel widths
+ * These are defined in IEEE802.11-2016ah Table 10-20
+ * as BSS Channel Width
+ *
+ * @IEEE80211_S1G_CHANWIDTH_1MHZ: 1MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_2MHZ: 2MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_4MHZ: 4MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_8MHZ: 8MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_16MHZ: 16MHz operating channel
+ */
+enum ieee80211_s1g_chanwidth {
+ IEEE80211_S1G_CHANWIDTH_1MHZ = 0,
+ IEEE80211_S1G_CHANWIDTH_2MHZ = 1,
+ IEEE80211_S1G_CHANWIDTH_4MHZ = 3,
+ IEEE80211_S1G_CHANWIDTH_8MHZ = 7,
+ IEEE80211_S1G_CHANWIDTH_16MHZ = 15,
+};
+
+/**
+ * enum ieee80211_s1g_pri_chanwidth - S1G primary channel widths
+ * described in IEEE80211-2024 Table 10-39.
+ *
+ * @IEEE80211_S1G_PRI_CHANWIDTH_2MHZ: 2MHz primary channel
+ * @IEEE80211_S1G_PRI_CHANWIDTH_1MHZ: 1MHz primary channel
+ */
+enum ieee80211_s1g_pri_chanwidth {
+ IEEE80211_S1G_PRI_CHANWIDTH_2MHZ = 0,
+ IEEE80211_S1G_PRI_CHANWIDTH_1MHZ = 1,
+};
+
+/**
+ * struct ieee80211_s1g_bcn_compat_ie - S1G Beacon Compatibility element
+ * @compat_info: Compatibility Information
+ * @beacon_int: Beacon Interval
+ * @tsf_completion: TSF Completion
+ *
+ * This structure represents the payload of the "S1G Beacon
+ * Compatibility element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.196.
+ */
+struct ieee80211_s1g_bcn_compat_ie {
+ __le16 compat_info;
+ __le16 beacon_int;
+ __le32 tsf_completion;
+} __packed;
+
+/**
+ * struct ieee80211_s1g_oper_ie - S1G Operation element
+ * @ch_width: S1G Operation Information Channel Width
+ * @oper_class: S1G Operation Information Operating Class
+ * @primary_ch: S1G Operation Information Primary Channel Number
+ * @oper_ch: S1G Operation Information Channel Center Frequency
+ * @basic_mcs_nss: Basic S1G-MCS and NSS Set
+ *
+ * This structure represents the payload of the "S1G Operation
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.212.
+ */
+struct ieee80211_s1g_oper_ie {
+ u8 ch_width;
+ u8 oper_class;
+ u8 primary_ch;
+ u8 oper_ch;
+ __le16 basic_mcs_nss;
+} __packed;
+
+/**
+ * struct ieee80211_aid_response_ie - AID Response element
+ * @aid: AID/Group AID
+ * @switch_count: AID Switch Count
+ * @response_int: AID Response Interval
+ *
+ * This structure represents the payload of the "AID Response element"
+ * as described in IEEE Std 802.11-2020 section 9.4.2.194.
+ */
+struct ieee80211_aid_response_ie {
+ __le16 aid;
+ u8 switch_count;
+ __le16 response_int;
+} __packed;
+
+struct ieee80211_s1g_cap {
+ u8 capab_info[10];
+ u8 supp_mcs_nss[5];
+} __packed;
+
+/**
+ * ieee80211_s1g_optional_len - determine length of optional S1G beacon fields
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: total length in bytes of the optional fixed-length fields
+ *
+ * S1G beacons may contain up to three optional fixed-length fields that
+ * precede the variable-length elements. Whether these fields are present
+ * is indicated by flags in the frame control field.
+ *
+ * From IEEE 802.11-2024 section 9.3.4.3:
+ * - Next TBTT field may be 0 or 3 bytes
+ * - Short SSID field may be 0 or 4 bytes
+ * - Access Network Options (ANO) field may be 0 or 1 byte
+ */
+static inline size_t
+ieee80211_s1g_optional_len(__le16 fc)
+{
+ size_t len = 0;
+
+ if (ieee80211_s1g_has_next_tbtt(fc))
+ len += 3;
+
+ if (ieee80211_s1g_has_cssid(fc))
+ len += 4;
+
+ if (ieee80211_s1g_has_ano(fc))
+ len += 1;
+
+ return len;
+}
+
+/* S1G Capabilities Information field */
+#define IEEE80211_S1G_CAPABILITY_LEN 15
+
+#define S1G_CAP0_S1G_LONG BIT(0)
+#define S1G_CAP0_SGI_1MHZ BIT(1)
+#define S1G_CAP0_SGI_2MHZ BIT(2)
+#define S1G_CAP0_SGI_4MHZ BIT(3)
+#define S1G_CAP0_SGI_8MHZ BIT(4)
+#define S1G_CAP0_SGI_16MHZ BIT(5)
+#define S1G_CAP0_SUPP_CH_WIDTH GENMASK(7, 6)
+
+#define S1G_SUPP_CH_WIDTH_2 0
+#define S1G_SUPP_CH_WIDTH_4 1
+#define S1G_SUPP_CH_WIDTH_8 2
+#define S1G_SUPP_CH_WIDTH_16 3
+#define S1G_SUPP_CH_WIDTH_MAX(cap) ((1 << FIELD_GET(S1G_CAP0_SUPP_CH_WIDTH, \
+ cap[0])) << 1)
+
+#define S1G_CAP1_RX_LDPC BIT(0)
+#define S1G_CAP1_TX_STBC BIT(1)
+#define S1G_CAP1_RX_STBC BIT(2)
+#define S1G_CAP1_SU_BFER BIT(3)
+#define S1G_CAP1_SU_BFEE BIT(4)
+#define S1G_CAP1_BFEE_STS GENMASK(7, 5)
+
+#define S1G_CAP2_SOUNDING_DIMENSIONS GENMASK(2, 0)
+#define S1G_CAP2_MU_BFER BIT(3)
+#define S1G_CAP2_MU_BFEE BIT(4)
+#define S1G_CAP2_PLUS_HTC_VHT BIT(5)
+#define S1G_CAP2_TRAVELING_PILOT GENMASK(7, 6)
+
+#define S1G_CAP3_RD_RESPONDER BIT(0)
+#define S1G_CAP3_HT_DELAYED_BA BIT(1)
+#define S1G_CAP3_MAX_MPDU_LEN BIT(2)
+#define S1G_CAP3_MAX_AMPDU_LEN_EXP GENMASK(4, 3)
+#define S1G_CAP3_MIN_MPDU_START GENMASK(7, 5)
+
+#define S1G_CAP4_UPLINK_SYNC BIT(0)
+#define S1G_CAP4_DYNAMIC_AID BIT(1)
+#define S1G_CAP4_BAT BIT(2)
+#define S1G_CAP4_TIME_ADE BIT(3)
+#define S1G_CAP4_NON_TIM BIT(4)
+#define S1G_CAP4_GROUP_AID BIT(5)
+#define S1G_CAP4_STA_TYPE GENMASK(7, 6)
+
+#define S1G_CAP5_CENT_AUTH_CONTROL BIT(0)
+#define S1G_CAP5_DIST_AUTH_CONTROL BIT(1)
+#define S1G_CAP5_AMSDU BIT(2)
+#define S1G_CAP5_AMPDU BIT(3)
+#define S1G_CAP5_ASYMMETRIC_BA BIT(4)
+#define S1G_CAP5_FLOW_CONTROL BIT(5)
+#define S1G_CAP5_SECTORIZED_BEAM GENMASK(7, 6)
+
+#define S1G_CAP6_OBSS_MITIGATION BIT(0)
+#define S1G_CAP6_FRAGMENT_BA BIT(1)
+#define S1G_CAP6_NDP_PS_POLL BIT(2)
+#define S1G_CAP6_RAW_OPERATION BIT(3)
+#define S1G_CAP6_PAGE_SLICING BIT(4)
+#define S1G_CAP6_TXOP_SHARING_IMP_ACK BIT(5)
+#define S1G_CAP6_VHT_LINK_ADAPT GENMASK(7, 6)
+
+#define S1G_CAP7_TACK_AS_PS_POLL BIT(0)
+#define S1G_CAP7_DUP_1MHZ BIT(1)
+#define S1G_CAP7_MCS_NEGOTIATION BIT(2)
+#define S1G_CAP7_1MHZ_CTL_RESPONSE_PREAMBLE BIT(3)
+#define S1G_CAP7_NDP_BFING_REPORT_POLL BIT(4)
+#define S1G_CAP7_UNSOLICITED_DYN_AID BIT(5)
+#define S1G_CAP7_SECTOR_TRAINING_OPERATION BIT(6)
+#define S1G_CAP7_TEMP_PS_MODE_SWITCH BIT(7)
+
+#define S1G_CAP8_TWT_GROUPING BIT(0)
+#define S1G_CAP8_BDT BIT(1)
+#define S1G_CAP8_COLOR GENMASK(4, 2)
+#define S1G_CAP8_TWT_REQUEST BIT(5)
+#define S1G_CAP8_TWT_RESPOND BIT(6)
+#define S1G_CAP8_PV1_FRAME BIT(7)
+
+#define S1G_CAP9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0)
+
+#define S1G_OPER_CH_WIDTH_PRIMARY BIT(0)
+#define S1G_OPER_CH_WIDTH_OPER GENMASK(4, 1)
+#define S1G_OPER_CH_PRIMARY_LOCATION BIT(5)
+
+#define S1G_2M_PRIMARY_LOCATION_LOWER 0
+#define S1G_2M_PRIMARY_LOCATION_UPPER 1
+
+#define LISTEN_INT_USF GENMASK(15, 14)
+#define LISTEN_INT_UI GENMASK(13, 0)
+
+#define IEEE80211_MAX_USF FIELD_MAX(LISTEN_INT_USF)
+#define IEEE80211_MAX_UI FIELD_MAX(LISTEN_INT_UI)
+
+/* S1G encoding types */
+#define IEEE80211_S1G_TIM_ENC_MODE_BLOCK 0
+#define IEEE80211_S1G_TIM_ENC_MODE_SINGLE 1
+#define IEEE80211_S1G_TIM_ENC_MODE_OLB 2
+
+enum ieee80211_s1g_actioncode {
+ WLAN_S1G_AID_SWITCH_REQUEST,
+ WLAN_S1G_AID_SWITCH_RESPONSE,
+ WLAN_S1G_SYNC_CONTROL,
+ WLAN_S1G_STA_INFO_ANNOUNCE,
+ WLAN_S1G_EDCA_PARAM_SET,
+ WLAN_S1G_EL_OPERATION,
+ WLAN_S1G_TWT_SETUP,
+ WLAN_S1G_TWT_TEARDOWN,
+ WLAN_S1G_SECT_GROUP_ID_LIST,
+ WLAN_S1G_SECT_ID_FEEDBACK,
+ WLAN_S1G_TWT_INFORMATION = 11,
+};
+
+/**
+ * ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon
+ * @fc: frame control bytes in little-endian byteorder
+ * @variable: pointer to the beacon frame elements
+ * @variable_len: length of the frame elements
+ * Return: whether or not the frame is an S1G short beacon. As per
+ * IEEE80211-2024 11.1.3.10.1, The S1G beacon compatibility element shall
+ * always be present as the first element in beacon frames generated at a
+ * TBTT (Target Beacon Transmission Time), so any frame not containing
+ * this element must have been generated at a TSBTT (Target Short Beacon
+ * Transmission Time) that is not a TBTT. Additionally, short beacons are
+ * prohibited from containing the S1G beacon compatibility element as per
+ * IEEE80211-2024 9.3.4.3 Table 9-76, so if we have an S1G beacon with
+ * either no elements or the first element is not the beacon compatibility
+ * element, we have a short beacon.
+ */
+static inline bool ieee80211_is_s1g_short_beacon(__le16 fc, const u8 *variable,
+ size_t variable_len)
+{
+ if (!ieee80211_is_s1g_beacon(fc))
+ return false;
+
+ /*
+ * If the frame does not contain at least 1 element (this is perfectly
+ * valid in a short beacon) and is an S1G beacon, we have a short
+ * beacon.
+ */
+ if (variable_len < 2)
+ return true;
+
+ return variable[0] != WLAN_EID_S1G_BCN_COMPAT;
+}
+
+struct s1g_tim_aid {
+ u16 aid;
+ u8 target_blk; /* Target block index */
+ u8 target_subblk; /* Target subblock index */
+ u8 target_subblk_bit; /* Target subblock bit */
+};
+
+struct s1g_tim_enc_block {
+ u8 enc_mode;
+ bool inverse;
+ const u8 *ptr;
+ u8 len;
+
+ /*
+ * For an OLB encoded block that spans multiple blocks, this
+ * is the offset into the span described by that encoded block.
+ */
+ u8 olb_blk_offset;
+};
+
+/*
+ * Helper routines to quickly extract the length of an encoded block. Validation
+ * is also performed to ensure the length extracted lies within the TIM.
+ */
+
+static inline int ieee80211_s1g_len_bitmap(const u8 *ptr, const u8 *end)
+{
+ u8 blkmap;
+ u8 n_subblks;
+
+ if (ptr >= end)
+ return -EINVAL;
+
+ blkmap = *ptr;
+ n_subblks = hweight8(blkmap);
+
+ if (ptr + 1 + n_subblks > end)
+ return -EINVAL;
+
+ return 1 + n_subblks;
+}
+
+static inline int ieee80211_s1g_len_single(const u8 *ptr, const u8 *end)
+{
+ return (ptr + 1 > end) ? -EINVAL : 1;
+}
+
+static inline int ieee80211_s1g_len_olb(const u8 *ptr, const u8 *end)
+{
+ if (ptr >= end)
+ return -EINVAL;
+
+ return (ptr + 1 + *ptr > end) ? -EINVAL : 1 + *ptr;
+}
+
+/*
+ * Enumerate all encoded blocks until we find the encoded block that describes
+ * our target AID. OLB is a special case as a single encoded block can describe
+ * multiple blocks as a single encoded block.
+ */
+static inline int ieee80211_s1g_find_target_block(struct s1g_tim_enc_block *enc,
+ const struct s1g_tim_aid *aid,
+ const u8 *ptr, const u8 *end)
+{
+ /* need at least block-control octet */
+ while (ptr + 1 <= end) {
+ u8 ctrl = *ptr++;
+ u8 mode = ctrl & 0x03;
+ bool contains, inverse = ctrl & BIT(2);
+ u8 span, blk_off = ctrl >> 3;
+ int len;
+
+ switch (mode) {
+ case IEEE80211_S1G_TIM_ENC_MODE_BLOCK:
+ len = ieee80211_s1g_len_bitmap(ptr, end);
+ contains = blk_off == aid->target_blk;
+ break;
+ case IEEE80211_S1G_TIM_ENC_MODE_SINGLE:
+ len = ieee80211_s1g_len_single(ptr, end);
+ contains = blk_off == aid->target_blk;
+ break;
+ case IEEE80211_S1G_TIM_ENC_MODE_OLB:
+ len = ieee80211_s1g_len_olb(ptr, end);
+ /*
+ * An OLB encoded block can describe more then one
+ * block, meaning an encoded OLB block can span more
+ * then a single block.
+ */
+ if (len > 0) {
+ /* Minus one for the length octet */
+ span = DIV_ROUND_UP(len - 1, 8);
+ /*
+ * Check if our target block lies within the
+ * block span described by this encoded block.
+ */
+ contains = (aid->target_blk >= blk_off) &&
+ (aid->target_blk < blk_off + span);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (len < 0)
+ return len;
+
+ if (contains) {
+ enc->enc_mode = mode;
+ enc->inverse = inverse;
+ enc->ptr = ptr;
+ enc->len = (u8)len;
+ enc->olb_blk_offset = blk_off;
+ return 0;
+ }
+
+ ptr += len;
+ }
+
+ return -ENOENT;
+}
+
+static inline bool ieee80211_s1g_parse_bitmap(struct s1g_tim_enc_block *enc,
+ struct s1g_tim_aid *aid)
+{
+ const u8 *ptr = enc->ptr;
+ u8 blkmap = *ptr++;
+
+ /*
+ * If our block bitmap does not contain a set bit that corresponds
+ * to our AID, it could mean a variety of things depending on if
+ * the encoding mode is inverted or not.
+ *
+ * 1. If inverted, it means the entire subblock is present and hence
+ * our AID has been set.
+ * 2. If not inverted, it means our subblock is not present and hence
+ * it is all zero meaning our AID is not set.
+ */
+ if (!(blkmap & BIT(aid->target_subblk)))
+ return enc->inverse;
+
+ /*
+ * Increment ptr by the number of set subblocks that appear before our
+ * target subblock. If our target subblock is 0, do nothing as ptr
+ * already points to our target subblock.
+ */
+ if (aid->target_subblk)
+ ptr += hweight8(blkmap & GENMASK(aid->target_subblk - 1, 0));
+
+ return !!(*ptr & BIT(aid->target_subblk_bit)) ^ enc->inverse;
+}
+
+static inline bool ieee80211_s1g_parse_single(struct s1g_tim_enc_block *enc,
+ struct s1g_tim_aid *aid)
+{
+ /*
+ * Single AID mode describes, as the name suggests, a single AID
+ * within the block described by the encoded block. The octet
+ * contains the 6 LSBs of the AID described in the block. The other
+ * 2 bits are reserved. When inversed, every single AID described
+ * by the current block have buffered traffic except for the AID
+ * described in the single AID octet.
+ */
+ return ((*enc->ptr & 0x3f) == (aid->aid & 0x3f)) ^ enc->inverse;
+}
+
+static inline bool ieee80211_s1g_parse_olb(struct s1g_tim_enc_block *enc,
+ struct s1g_tim_aid *aid)
+{
+ const u8 *ptr = enc->ptr;
+ u8 blk_len = *ptr++;
+ /*
+ * Given an OLB encoded block that describes multiple blocks,
+ * calculate the offset into the span. Then calculate the
+ * subblock location normally.
+ */
+ u16 span_offset = aid->target_blk - enc->olb_blk_offset;
+ u16 subblk_idx = span_offset * 8 + aid->target_subblk;
+
+ if (subblk_idx >= blk_len)
+ return enc->inverse;
+
+ return !!(ptr[subblk_idx] & BIT(aid->target_subblk_bit)) ^ enc->inverse;
+}
+
+/*
+ * An S1G PVB has 3 non optional encoding types, each that can be inverted.
+ * An S1G PVB is constructed with zero or more encoded block subfields. Each
+ * encoded block represents a single "block" of AIDs (64), and each encoded
+ * block can contain one of the 3 encoding types alongside a single bit for
+ * whether the bits should be inverted.
+ *
+ * As the standard makes no guarantee about the ordering of encoded blocks,
+ * we must parse every encoded block in the worst case scenario given an
+ * AID that lies within the last block.
+ */
+static inline bool ieee80211_s1g_check_tim(const struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid)
+{
+ int err;
+ struct s1g_tim_aid target_aid;
+ struct s1g_tim_enc_block enc_blk;
+
+ if (tim_len < 3)
+ return false;
+
+ target_aid.aid = aid;
+ target_aid.target_blk = (aid >> 6) & 0x1f;
+ target_aid.target_subblk = (aid >> 3) & 0x7;
+ target_aid.target_subblk_bit = aid & 0x7;
+
+ /*
+ * Find our AIDs target encoded block and fill &enc_blk with the
+ * encoded blocks information. If no entry is found or an error
+ * occurs return false.
+ */
+ err = ieee80211_s1g_find_target_block(&enc_blk, &target_aid,
+ tim->virtual_map,
+ (const u8 *)tim + tim_len + 2);
+ if (err)
+ return false;
+
+ switch (enc_blk.enc_mode) {
+ case IEEE80211_S1G_TIM_ENC_MODE_BLOCK:
+ return ieee80211_s1g_parse_bitmap(&enc_blk, &target_aid);
+ case IEEE80211_S1G_TIM_ENC_MODE_SINGLE:
+ return ieee80211_s1g_parse_single(&enc_blk, &target_aid);
+ case IEEE80211_S1G_TIM_ENC_MODE_OLB:
+ return ieee80211_s1g_parse_olb(&enc_blk, &target_aid);
+ default:
+ return false;
+ }
+}
+
+#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/ieee80211-vht.h b/include/linux/ieee80211-vht.h
new file mode 100644
index 000000000000..898dfb561fef
--- /dev/null
+++ b/include/linux/ieee80211-vht.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 VHT definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_VHT_H
+#define LINUX_IEEE80211_VHT_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define IEEE80211_MAX_MPDU_LEN_VHT_3895 3895
+#define IEEE80211_MAX_MPDU_LEN_VHT_7991 7991
+#define IEEE80211_MAX_MPDU_LEN_VHT_11454 11454
+
+/**
+ * enum ieee80211_vht_opmode_bits - VHT operating mode field bits
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK: channel width mask
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 20 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: 80 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: 160 MHz or 80+80 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_BW_160_80P80: 160 / 80+80 MHz indicator flag
+ * @IEEE80211_OPMODE_NOTIF_RX_NSS_MASK: number of spatial streams mask
+ * (the NSS value is the value of this field + 1)
+ * @IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT: number of spatial streams shift
+ * @IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF: indicates streams in SU-MIMO PPDU
+ * using a beamforming steering matrix
+ */
+enum ieee80211_vht_opmode_bits {
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 0x03,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ = 0,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ = 1,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ = 2,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ = 3,
+ IEEE80211_OPMODE_NOTIF_BW_160_80P80 = 0x04,
+ IEEE80211_OPMODE_NOTIF_RX_NSS_MASK = 0x70,
+ IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT = 4,
+ IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 0x80,
+};
+
+/*
+ * Maximum length of AMPDU that the STA can receive in VHT.
+ * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
+ */
+enum ieee80211_vht_max_ampdu_length_exp {
+ IEEE80211_VHT_MAX_AMPDU_8K = 0,
+ IEEE80211_VHT_MAX_AMPDU_16K = 1,
+ IEEE80211_VHT_MAX_AMPDU_32K = 2,
+ IEEE80211_VHT_MAX_AMPDU_64K = 3,
+ IEEE80211_VHT_MAX_AMPDU_128K = 4,
+ IEEE80211_VHT_MAX_AMPDU_256K = 5,
+ IEEE80211_VHT_MAX_AMPDU_512K = 6,
+ IEEE80211_VHT_MAX_AMPDU_1024K = 7
+};
+
+/**
+ * struct ieee80211_vht_mcs_info - VHT MCS information
+ * @rx_mcs_map: RX MCS map 2 bits for each stream, total 8 streams
+ * @rx_highest: Indicates highest long GI VHT PPDU data rate
+ * STA can receive. Rate expressed in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest RX data rate supported.
+ * The top 3 bits of this field indicate the Maximum NSTS,total
+ * (a beamformee capability.)
+ * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams
+ * @tx_highest: Indicates highest long GI VHT PPDU data rate
+ * STA can transmit. Rate expressed in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest TX data rate supported.
+ * The top 2 bits of this field are reserved, the
+ * 3rd bit from the top indiciates VHT Extended NSS BW
+ * Capability.
+ */
+struct ieee80211_vht_mcs_info {
+ __le16 rx_mcs_map;
+ __le16 rx_highest;
+ __le16 tx_mcs_map;
+ __le16 tx_highest;
+} __packed;
+
+/* for rx_highest */
+#define IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT 13
+#define IEEE80211_VHT_MAX_NSTS_TOTAL_MASK (7 << IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT)
+
+/* for tx_highest */
+#define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13)
+
+/**
+ * enum ieee80211_vht_mcs_support - VHT MCS support definitions
+ * @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
+ * number of streams
+ * @IEEE80211_VHT_MCS_SUPPORT_0_8: MCSes 0-8 are supported
+ * @IEEE80211_VHT_MCS_SUPPORT_0_9: MCSes 0-9 are supported
+ * @IEEE80211_VHT_MCS_NOT_SUPPORTED: This number of streams isn't supported
+ *
+ * These definitions are used in each 2-bit subfield of the @rx_mcs_map
+ * and @tx_mcs_map fields of &struct ieee80211_vht_mcs_info, which are
+ * both split into 8 subfields by number of streams. These values indicate
+ * which MCSes are supported for the number of streams the value appears
+ * for.
+ */
+enum ieee80211_vht_mcs_support {
+ IEEE80211_VHT_MCS_SUPPORT_0_7 = 0,
+ IEEE80211_VHT_MCS_SUPPORT_0_8 = 1,
+ IEEE80211_VHT_MCS_SUPPORT_0_9 = 2,
+ IEEE80211_VHT_MCS_NOT_SUPPORTED = 3,
+};
+
+/**
+ * struct ieee80211_vht_cap - VHT capabilities
+ *
+ * This structure is the "VHT capabilities element" as
+ * described in 802.11ac D3.0 8.4.2.160
+ * @vht_cap_info: VHT capability info
+ * @supp_mcs: VHT MCS supported rates
+ */
+struct ieee80211_vht_cap {
+ __le32 vht_cap_info;
+ struct ieee80211_vht_mcs_info supp_mcs;
+} __packed;
+
+/**
+ * enum ieee80211_vht_chanwidth - VHT channel width
+ * @IEEE80211_VHT_CHANWIDTH_USE_HT: use the HT operation IE to
+ * determine the channel width (20 or 40 MHz)
+ * @IEEE80211_VHT_CHANWIDTH_80MHZ: 80 MHz bandwidth
+ * @IEEE80211_VHT_CHANWIDTH_160MHZ: 160 MHz bandwidth
+ * @IEEE80211_VHT_CHANWIDTH_80P80MHZ: 80+80 MHz bandwidth
+ */
+enum ieee80211_vht_chanwidth {
+ IEEE80211_VHT_CHANWIDTH_USE_HT = 0,
+ IEEE80211_VHT_CHANWIDTH_80MHZ = 1,
+ IEEE80211_VHT_CHANWIDTH_160MHZ = 2,
+ IEEE80211_VHT_CHANWIDTH_80P80MHZ = 3,
+};
+
+/**
+ * struct ieee80211_vht_operation - VHT operation IE
+ *
+ * This structure is the "VHT operation element" as
+ * described in 802.11ac D3.0 8.4.2.161
+ * @chan_width: Operating channel width
+ * @center_freq_seg0_idx: center freq segment 0 index
+ * @center_freq_seg1_idx: center freq segment 1 index
+ * @basic_mcs_set: VHT Basic MCS rate set
+ */
+struct ieee80211_vht_operation {
+ u8 chan_width;
+ u8 center_freq_seg0_idx;
+ u8 center_freq_seg1_idx;
+ __le16 basic_mcs_set;
+} __packed;
+
+/* 802.11ac VHT Capabilities */
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002
+#define IEEE80211_VHT_CAP_MAX_MPDU_MASK 0x00000003
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_SHIFT 2
+#define IEEE80211_VHT_CAP_RXLDPC 0x00000010
+#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020
+#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040
+#define IEEE80211_VHT_CAP_TXSTBC 0x00000080
+#define IEEE80211_VHT_CAP_RXSTBC_1 0x00000100
+#define IEEE80211_VHT_CAP_RXSTBC_2 0x00000200
+#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
+#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
+#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
+#define IEEE80211_VHT_CAP_RXSTBC_SHIFT 8
+#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
+#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
+#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13
+#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK \
+ (7 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT)
+#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT 16
+#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK \
+ (7 << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT)
+#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000
+#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000
+#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000
+#define IEEE80211_VHT_CAP_HTC_VHT 0x00400000
+#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT 23
+#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK \
+ (7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT)
+#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB 0x08000000
+#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000
+#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
+#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
+#define IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT 30
+#define IEEE80211_VHT_CAP_EXT_NSS_BW_MASK 0xc0000000
+
+/**
+ * ieee80211_get_vht_max_nss - return max NSS for a given bandwidth/MCS
+ * @cap: VHT capabilities of the peer
+ * @bw: bandwidth to use
+ * @mcs: MCS index to use
+ * @ext_nss_bw_capable: indicates whether or not the local transmitter
+ * (rate scaling algorithm) can deal with the new logic
+ * (dot11VHTExtendedNSSBWCapable)
+ * @max_vht_nss: current maximum NSS as advertised by the STA in
+ * operating mode notification, can be 0 in which case the
+ * capability data will be used to derive this (from MCS support)
+ * Return: The maximum NSS that can be used for the given bandwidth/MCS
+ * combination
+ *
+ * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can
+ * vary for a given BW/MCS. This function parses the data.
+ *
+ * Note: This function is exported by cfg80211.
+ */
+int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
+ enum ieee80211_vht_chanwidth bw,
+ int mcs, bool ext_nss_bw_capable,
+ unsigned int max_vht_nss);
+
+/* VHT action codes */
+enum ieee80211_vht_actioncode {
+ WLAN_VHT_ACTION_COMPRESSED_BF = 0,
+ WLAN_VHT_ACTION_GROUPID_MGMT = 1,
+ WLAN_VHT_ACTION_OPMODE_NOTIF = 2,
+};
+
+#endif /* LINUX_IEEE80211_VHT_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index de2dce743ee2..96439de55f07 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -9,7 +9,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (c) 2018 - 2024 Intel Corporation
+ * Copyright (c) 2018 - 2025 Intel Corporation
*/
#ifndef LINUX_IEEE80211_H
@@ -20,7 +20,7 @@
#include <linux/etherdevice.h>
#include <linux/bitfield.h>
#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/*
* DS bit usage
@@ -43,6 +43,7 @@
#define IEEE80211_FCTL_VERS 0x0003
#define IEEE80211_FCTL_FTYPE 0x000c
#define IEEE80211_FCTL_STYPE 0x00f0
+#define IEEE80211_FCTL_TYPE (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)
#define IEEE80211_FCTL_TODS 0x0100
#define IEEE80211_FCTL_FROMDS 0x0200
#define IEEE80211_FCTL_MOREFRAGS 0x0400
@@ -109,15 +110,6 @@
#define IEEE80211_STYPE_DMG_BEACON 0x0000
#define IEEE80211_STYPE_S1G_BEACON 0x0010
-/* bits unique to S1G beacon */
-#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100
-
-/* see 802.11ah-2016 9.9 NDP CMAC frames */
-#define IEEE80211_S1G_1MHZ_NDP_BITS 25
-#define IEEE80211_S1G_1MHZ_NDP_BYTES 4
-#define IEEE80211_S1G_2MHZ_NDP_BITS 37
-#define IEEE80211_S1G_2MHZ_NDP_BYTES 5
-
#define IEEE80211_NDP_FTYPE_CTS 0
#define IEEE80211_NDP_FTYPE_CF_END 0
#define IEEE80211_NDP_FTYPE_PS_POLL 1
@@ -153,9 +145,6 @@
#define IEEE80211_ANO_NETTYPE_WILD 15
-/* bits unique to S1G beacon */
-#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100
-
/* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */
#define IEEE80211_CTL_EXT_POLL 0x2000
#define IEEE80211_CTL_EXT_SPR 0x3000
@@ -221,6 +210,7 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
#define IEEE80211_MAX_AID_S1G 8191
#define IEEE80211_MAX_TIM_LEN 251
#define IEEE80211_MAX_MESH_PEERINGS 63
+
/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
6.2.1.1.2.
@@ -234,21 +224,8 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
/* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */
#define IEEE80211_MAX_FRAME_LEN 2352
-/* Maximal size of an A-MSDU that can be transported in a HT BA session */
-#define IEEE80211_MAX_MPDU_LEN_HT_BA 4095
-
-/* Maximal size of an A-MSDU */
-#define IEEE80211_MAX_MPDU_LEN_HT_3839 3839
-#define IEEE80211_MAX_MPDU_LEN_HT_7935 7935
-
-#define IEEE80211_MAX_MPDU_LEN_VHT_3895 3895
-#define IEEE80211_MAX_MPDU_LEN_VHT_7991 7991
-#define IEEE80211_MAX_MPDU_LEN_VHT_11454 11454
-
#define IEEE80211_MAX_SSID_LEN 32
-#define IEEE80211_MAX_MESH_ID_LEN 32
-
#define IEEE80211_FIRST_TSPEC_TSID 8
#define IEEE80211_NUM_TIDS 16
@@ -299,8 +276,6 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
#define IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK 0x03
#define IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT 5
-#define IEEE80211_HT_CTL_LEN 4
-
/* trigger type within common_info of trigger frame */
#define IEEE80211_TRIGGER_TYPE_MASK 0xf
#define IEEE80211_TRIGGER_TYPE_BASIC 0x0
@@ -373,6 +348,7 @@ struct ieee80211_trigger {
/**
* ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame has to-DS set
*/
static inline bool ieee80211_has_tods(__le16 fc)
{
@@ -382,6 +358,7 @@ static inline bool ieee80211_has_tods(__le16 fc)
/**
* ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame has from-DS set
*/
static inline bool ieee80211_has_fromds(__le16 fc)
{
@@ -391,6 +368,7 @@ static inline bool ieee80211_has_fromds(__le16 fc)
/**
* ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not it's a 4-address frame (from-DS and to-DS set)
*/
static inline bool ieee80211_has_a4(__le16 fc)
{
@@ -401,6 +379,7 @@ static inline bool ieee80211_has_a4(__le16 fc)
/**
* ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame has more fragments (more frags bit set)
*/
static inline bool ieee80211_has_morefrags(__le16 fc)
{
@@ -410,6 +389,7 @@ static inline bool ieee80211_has_morefrags(__le16 fc)
/**
* ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the retry flag is set
*/
static inline bool ieee80211_has_retry(__le16 fc)
{
@@ -419,6 +399,7 @@ static inline bool ieee80211_has_retry(__le16 fc)
/**
* ieee80211_has_pm - check if IEEE80211_FCTL_PM is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the power management flag is set
*/
static inline bool ieee80211_has_pm(__le16 fc)
{
@@ -428,6 +409,7 @@ static inline bool ieee80211_has_pm(__le16 fc)
/**
* ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the more data flag is set
*/
static inline bool ieee80211_has_moredata(__le16 fc)
{
@@ -437,6 +419,7 @@ static inline bool ieee80211_has_moredata(__le16 fc)
/**
* ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the protected flag is set
*/
static inline bool ieee80211_has_protected(__le16 fc)
{
@@ -446,6 +429,7 @@ static inline bool ieee80211_has_protected(__le16 fc)
/**
* ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the order flag is set
*/
static inline bool ieee80211_has_order(__le16 fc)
{
@@ -455,6 +439,7 @@ static inline bool ieee80211_has_order(__le16 fc)
/**
* ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame type is management
*/
static inline bool ieee80211_is_mgmt(__le16 fc)
{
@@ -465,6 +450,7 @@ static inline bool ieee80211_is_mgmt(__le16 fc)
/**
* ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame type is control
*/
static inline bool ieee80211_is_ctl(__le16 fc)
{
@@ -475,6 +461,7 @@ static inline bool ieee80211_is_ctl(__le16 fc)
/**
* ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a data frame
*/
static inline bool ieee80211_is_data(__le16 fc)
{
@@ -485,6 +472,7 @@ static inline bool ieee80211_is_data(__le16 fc)
/**
* ieee80211_is_ext - check if type is IEEE80211_FTYPE_EXT
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame type is extended
*/
static inline bool ieee80211_is_ext(__le16 fc)
{
@@ -496,6 +484,7 @@ static inline bool ieee80211_is_ext(__le16 fc)
/**
* ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a QoS data frame
*/
static inline bool ieee80211_is_data_qos(__le16 fc)
{
@@ -510,6 +499,8 @@ static inline bool ieee80211_is_data_qos(__le16 fc)
/**
* ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a QoS data frame that has data
+ * (i.e. is not null data)
*/
static inline bool ieee80211_is_data_present(__le16 fc)
{
@@ -524,6 +515,7 @@ static inline bool ieee80211_is_data_present(__le16 fc)
/**
* ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an association request
*/
static inline bool ieee80211_is_assoc_req(__le16 fc)
{
@@ -534,6 +526,7 @@ static inline bool ieee80211_is_assoc_req(__le16 fc)
/**
* ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an association response
*/
static inline bool ieee80211_is_assoc_resp(__le16 fc)
{
@@ -544,6 +537,7 @@ static inline bool ieee80211_is_assoc_resp(__le16 fc)
/**
* ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a reassociation request
*/
static inline bool ieee80211_is_reassoc_req(__le16 fc)
{
@@ -554,6 +548,7 @@ static inline bool ieee80211_is_reassoc_req(__le16 fc)
/**
* ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a reassociation response
*/
static inline bool ieee80211_is_reassoc_resp(__le16 fc)
{
@@ -564,6 +559,7 @@ static inline bool ieee80211_is_reassoc_resp(__le16 fc)
/**
* ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a probe request
*/
static inline bool ieee80211_is_probe_req(__le16 fc)
{
@@ -574,6 +570,7 @@ static inline bool ieee80211_is_probe_req(__le16 fc)
/**
* ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a probe response
*/
static inline bool ieee80211_is_probe_resp(__le16 fc)
{
@@ -584,6 +581,7 @@ static inline bool ieee80211_is_probe_resp(__le16 fc)
/**
* ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a (regular, not S1G) beacon
*/
static inline bool ieee80211_is_beacon(__le16 fc)
{
@@ -592,42 +590,9 @@ static inline bool ieee80211_is_beacon(__le16 fc)
}
/**
- * ieee80211_is_s1g_beacon - check if IEEE80211_FTYPE_EXT &&
- * IEEE80211_STYPE_S1G_BEACON
- * @fc: frame control bytes in little-endian byteorder
- */
-static inline bool ieee80211_is_s1g_beacon(__le16 fc)
-{
- return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE |
- IEEE80211_FCTL_STYPE)) ==
- cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON);
-}
-
-/**
- * ieee80211_next_tbtt_present - check if IEEE80211_FTYPE_EXT &&
- * IEEE80211_STYPE_S1G_BEACON && IEEE80211_S1G_BCN_NEXT_TBTT
- * @fc: frame control bytes in little-endian byteorder
- */
-static inline bool ieee80211_next_tbtt_present(__le16 fc)
-{
- return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
- cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON) &&
- fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT);
-}
-
-/**
- * ieee80211_is_s1g_short_beacon - check if next tbtt present bit is set. Only
- * true for S1G beacons when they're short.
- * @fc: frame control bytes in little-endian byteorder
- */
-static inline bool ieee80211_is_s1g_short_beacon(__le16 fc)
-{
- return ieee80211_is_s1g_beacon(fc) && ieee80211_next_tbtt_present(fc);
-}
-
-/**
* ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an ATIM frame
*/
static inline bool ieee80211_is_atim(__le16 fc)
{
@@ -638,6 +603,7 @@ static inline bool ieee80211_is_atim(__le16 fc)
/**
* ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a disassociation frame
*/
static inline bool ieee80211_is_disassoc(__le16 fc)
{
@@ -648,6 +614,7 @@ static inline bool ieee80211_is_disassoc(__le16 fc)
/**
* ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an authentication frame
*/
static inline bool ieee80211_is_auth(__le16 fc)
{
@@ -658,6 +625,7 @@ static inline bool ieee80211_is_auth(__le16 fc)
/**
* ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a deauthentication frame
*/
static inline bool ieee80211_is_deauth(__le16 fc)
{
@@ -668,6 +636,7 @@ static inline bool ieee80211_is_deauth(__le16 fc)
/**
* ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an action frame
*/
static inline bool ieee80211_is_action(__le16 fc)
{
@@ -678,6 +647,7 @@ static inline bool ieee80211_is_action(__le16 fc)
/**
* ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a block-ACK request frame
*/
static inline bool ieee80211_is_back_req(__le16 fc)
{
@@ -688,6 +658,7 @@ static inline bool ieee80211_is_back_req(__le16 fc)
/**
* ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a block-ACK frame
*/
static inline bool ieee80211_is_back(__le16 fc)
{
@@ -698,6 +669,7 @@ static inline bool ieee80211_is_back(__le16 fc)
/**
* ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a PS-poll frame
*/
static inline bool ieee80211_is_pspoll(__le16 fc)
{
@@ -708,6 +680,7 @@ static inline bool ieee80211_is_pspoll(__le16 fc)
/**
* ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an RTS frame
*/
static inline bool ieee80211_is_rts(__le16 fc)
{
@@ -718,6 +691,7 @@ static inline bool ieee80211_is_rts(__le16 fc)
/**
* ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a CTS frame
*/
static inline bool ieee80211_is_cts(__le16 fc)
{
@@ -728,6 +702,7 @@ static inline bool ieee80211_is_cts(__le16 fc)
/**
* ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an ACK frame
*/
static inline bool ieee80211_is_ack(__le16 fc)
{
@@ -738,6 +713,7 @@ static inline bool ieee80211_is_ack(__le16 fc)
/**
* ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a CF-end frame
*/
static inline bool ieee80211_is_cfend(__le16 fc)
{
@@ -748,6 +724,7 @@ static inline bool ieee80211_is_cfend(__le16 fc)
/**
* ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a CF-end-ack frame
*/
static inline bool ieee80211_is_cfendack(__le16 fc)
{
@@ -758,6 +735,7 @@ static inline bool ieee80211_is_cfendack(__le16 fc)
/**
* ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a nullfunc frame
*/
static inline bool ieee80211_is_nullfunc(__le16 fc)
{
@@ -768,6 +746,7 @@ static inline bool ieee80211_is_nullfunc(__le16 fc)
/**
* ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a QoS nullfunc frame
*/
static inline bool ieee80211_is_qos_nullfunc(__le16 fc)
{
@@ -778,6 +757,7 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc)
/**
* ieee80211_is_trigger - check if frame is trigger frame
* @fc: frame control field in little-endian byteorder
+ * Return: whether or not the frame is a trigger frame
*/
static inline bool ieee80211_is_trigger(__le16 fc)
{
@@ -788,6 +768,7 @@ static inline bool ieee80211_is_trigger(__le16 fc)
/**
* ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a nullfunc or QoS nullfunc frame
*/
static inline bool ieee80211_is_any_nullfunc(__le16 fc)
{
@@ -797,6 +778,8 @@ static inline bool ieee80211_is_any_nullfunc(__le16 fc)
/**
* ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set
* @seq_ctrl: frame sequence control bytes in little-endian byteorder
+ * Return: whether or not the frame is the first fragment (also true if
+ * it's not fragmented at all)
*/
static inline bool ieee80211_is_first_frag(__le16 seq_ctrl)
{
@@ -806,6 +789,7 @@ static inline bool ieee80211_is_first_frag(__le16 seq_ctrl)
/**
* ieee80211_is_frag - check if a frame is a fragment
* @hdr: 802.11 header of the frame
+ * Return: whether or not the frame is a fragment
*/
static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr)
{
@@ -818,40 +802,6 @@ static inline u16 ieee80211_get_sn(struct ieee80211_hdr *hdr)
return le16_get_bits(hdr->seq_ctrl, IEEE80211_SCTL_SEQ);
}
-struct ieee80211s_hdr {
- u8 flags;
- u8 ttl;
- __le32 seqnum;
- u8 eaddr1[ETH_ALEN];
- u8 eaddr2[ETH_ALEN];
-} __packed __aligned(2);
-
-/* Mesh flags */
-#define MESH_FLAGS_AE_A4 0x1
-#define MESH_FLAGS_AE_A5_A6 0x2
-#define MESH_FLAGS_AE 0x3
-#define MESH_FLAGS_PS_DEEP 0x4
-
-/**
- * enum ieee80211_preq_flags - mesh PREQ element flags
- *
- * @IEEE80211_PREQ_PROACTIVE_PREP_FLAG: proactive PREP subfield
- */
-enum ieee80211_preq_flags {
- IEEE80211_PREQ_PROACTIVE_PREP_FLAG = 1<<2,
-};
-
-/**
- * enum ieee80211_preq_target_flags - mesh PREQ element per target flags
- *
- * @IEEE80211_PREQ_TO_FLAG: target only subfield
- * @IEEE80211_PREQ_USN_FLAG: unknown target HWMP sequence number subfield
- */
-enum ieee80211_preq_target_flags {
- IEEE80211_PREQ_TO_FLAG = 1<<0,
- IEEE80211_PREQ_USN_FLAG = 1<<2,
-};
-
/**
* struct ieee80211_quiet_ie - Quiet element
* @count: Quiet Count
@@ -931,24 +881,6 @@ struct ieee80211_sec_chan_offs_ie {
} __packed;
/**
- * struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE
- * @mesh_ttl: Time To Live
- * @mesh_flags: Flags
- * @mesh_reason: Reason Code
- * @mesh_pre_value: Precedence Value
- *
- * This structure represents the payload of the "Mesh Channel Switch
- * Parameters element" as described in IEEE Std 802.11-2020 section
- * 9.4.2.102.
- */
-struct ieee80211_mesh_chansw_params_ie {
- u8 mesh_ttl;
- u8 mesh_flags;
- __le16 mesh_reason;
- __le16 mesh_pre_value;
-} __packed;
-
-/**
* struct ieee80211_wide_bw_chansw_ie - wide bandwidth channel switch IE
* @new_channel_width: New Channel Width
* @new_center_freq_seg0: New Channel Center Frequency Segment 0
@@ -988,137 +920,6 @@ struct ieee80211_tim_ie {
};
} __packed;
-/**
- * struct ieee80211_meshconf_ie - Mesh Configuration element
- * @meshconf_psel: Active Path Selection Protocol Identifier
- * @meshconf_pmetric: Active Path Selection Metric Identifier
- * @meshconf_congest: Congestion Control Mode Identifier
- * @meshconf_synch: Synchronization Method Identifier
- * @meshconf_auth: Authentication Protocol Identifier
- * @meshconf_form: Mesh Formation Info
- * @meshconf_cap: Mesh Capability (see &enum mesh_config_capab_flags)
- *
- * This structure represents the payload of the "Mesh Configuration
- * element" as described in IEEE Std 802.11-2020 section 9.4.2.97.
- */
-struct ieee80211_meshconf_ie {
- u8 meshconf_psel;
- u8 meshconf_pmetric;
- u8 meshconf_congest;
- u8 meshconf_synch;
- u8 meshconf_auth;
- u8 meshconf_form;
- u8 meshconf_cap;
-} __packed;
-
-/**
- * enum mesh_config_capab_flags - Mesh Configuration IE capability field flags
- *
- * @IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish
- * additional mesh peerings with other mesh STAs
- * @IEEE80211_MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs
- * @IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure
- * is ongoing
- * @IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL: STA is in deep sleep mode or has
- * neighbors in deep sleep mode
- *
- * Enumerates the "Mesh Capability" as described in IEEE Std
- * 802.11-2020 section 9.4.2.97.7.
- */
-enum mesh_config_capab_flags {
- IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS = 0x01,
- IEEE80211_MESHCONF_CAPAB_FORWARDING = 0x08,
- IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING = 0x20,
- IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL = 0x40,
-};
-
-#define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1
-
-/*
- * mesh channel switch parameters element's flag indicator
- *
- */
-#define WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT BIT(0)
-#define WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR BIT(1)
-#define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2)
-
-/**
- * struct ieee80211_rann_ie - RANN (root announcement) element
- * @rann_flags: Flags
- * @rann_hopcount: Hop Count
- * @rann_ttl: Element TTL
- * @rann_addr: Root Mesh STA Address
- * @rann_seq: HWMP Sequence Number
- * @rann_interval: Interval
- * @rann_metric: Metric
- *
- * This structure represents the payload of the "RANN element" as
- * described in IEEE Std 802.11-2020 section 9.4.2.111.
- */
-struct ieee80211_rann_ie {
- u8 rann_flags;
- u8 rann_hopcount;
- u8 rann_ttl;
- u8 rann_addr[ETH_ALEN];
- __le32 rann_seq;
- __le32 rann_interval;
- __le32 rann_metric;
-} __packed;
-
-enum ieee80211_rann_flags {
- RANN_FLAG_IS_GATE = 1 << 0,
-};
-
-enum ieee80211_ht_chanwidth_values {
- IEEE80211_HT_CHANWIDTH_20MHZ = 0,
- IEEE80211_HT_CHANWIDTH_ANY = 1,
-};
-
-/**
- * enum ieee80211_vht_opmode_bits - VHT operating mode field bits
- * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK: channel width mask
- * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 20 MHz channel width
- * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width
- * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: 80 MHz channel width
- * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: 160 MHz or 80+80 MHz channel width
- * @IEEE80211_OPMODE_NOTIF_BW_160_80P80: 160 / 80+80 MHz indicator flag
- * @IEEE80211_OPMODE_NOTIF_RX_NSS_MASK: number of spatial streams mask
- * (the NSS value is the value of this field + 1)
- * @IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT: number of spatial streams shift
- * @IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF: indicates streams in SU-MIMO PPDU
- * using a beamforming steering matrix
- */
-enum ieee80211_vht_opmode_bits {
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 0x03,
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ = 0,
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ = 1,
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ = 2,
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ = 3,
- IEEE80211_OPMODE_NOTIF_BW_160_80P80 = 0x04,
- IEEE80211_OPMODE_NOTIF_RX_NSS_MASK = 0x70,
- IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT = 4,
- IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 0x80,
-};
-
-/**
- * enum ieee80211_s1g_chanwidth
- * These are defined in IEEE802.11-2016ah Table 10-20
- * as BSS Channel Width
- *
- * @IEEE80211_S1G_CHANWIDTH_1MHZ: 1MHz operating channel
- * @IEEE80211_S1G_CHANWIDTH_2MHZ: 2MHz operating channel
- * @IEEE80211_S1G_CHANWIDTH_4MHZ: 4MHz operating channel
- * @IEEE80211_S1G_CHANWIDTH_8MHZ: 8MHz operating channel
- * @IEEE80211_S1G_CHANWIDTH_16MHZ: 16MHz operating channel
- */
-enum ieee80211_s1g_chanwidth {
- IEEE80211_S1G_CHANWIDTH_1MHZ = 0,
- IEEE80211_S1G_CHANWIDTH_2MHZ = 1,
- IEEE80211_S1G_CHANWIDTH_4MHZ = 3,
- IEEE80211_S1G_CHANWIDTH_8MHZ = 7,
- IEEE80211_S1G_CHANWIDTH_16MHZ = 15,
-};
-
#define WLAN_SA_QUERY_TR_ID_LEN 2
#define WLAN_MEMBERSHIP_LEN 8
#define WLAN_USER_POSITION_LEN 16
@@ -1146,61 +947,6 @@ struct ieee80211_addba_ext_ie {
u8 data;
} __packed;
-/**
- * struct ieee80211_s1g_bcn_compat_ie - S1G Beacon Compatibility element
- * @compat_info: Compatibility Information
- * @beacon_int: Beacon Interval
- * @tsf_completion: TSF Completion
- *
- * This structure represents the payload of the "S1G Beacon
- * Compatibility element" as described in IEEE Std 802.11-2020 section
- * 9.4.2.196.
- */
-struct ieee80211_s1g_bcn_compat_ie {
- __le16 compat_info;
- __le16 beacon_int;
- __le32 tsf_completion;
-} __packed;
-
-/**
- * struct ieee80211_s1g_oper_ie - S1G Operation element
- * @ch_width: S1G Operation Information Channel Width
- * @oper_class: S1G Operation Information Operating Class
- * @primary_ch: S1G Operation Information Primary Channel Number
- * @oper_ch: S1G Operation Information Channel Center Frequency
- * @basic_mcs_nss: Basic S1G-MCS and NSS Set
- *
- * This structure represents the payload of the "S1G Operation
- * element" as described in IEEE Std 802.11-2020 section 9.4.2.212.
- */
-struct ieee80211_s1g_oper_ie {
- u8 ch_width;
- u8 oper_class;
- u8 primary_ch;
- u8 oper_ch;
- __le16 basic_mcs_nss;
-} __packed;
-
-/**
- * struct ieee80211_aid_response_ie - AID Response element
- * @aid: AID/Group AID
- * @switch_count: AID Switch Count
- * @response_int: AID Response Interval
- *
- * This structure represents the payload of the "AID Response element"
- * as described in IEEE Std 802.11-2020 section 9.4.2.194.
- */
-struct ieee80211_aid_response_ie {
- __le16 aid;
- u8 switch_count;
- __le16 response_int;
-} __packed;
-
-struct ieee80211_s1g_cap {
- u8 capab_info[10];
- u8 supp_mcs_nss[5];
-} __packed;
-
struct ieee80211_ext {
__le16 frame_control;
__le16 duration;
@@ -1209,84 +955,11 @@ struct ieee80211_ext {
u8 sa[ETH_ALEN];
__le32 timestamp;
u8 change_seq;
- u8 variable[0];
+ u8 variable[];
} __packed s1g_beacon;
- struct {
- u8 sa[ETH_ALEN];
- __le32 timestamp;
- u8 change_seq;
- u8 next_tbtt[3];
- u8 variable[0];
- } __packed s1g_short_beacon;
} u;
} __packed __aligned(2);
-#define IEEE80211_TWT_CONTROL_NDP BIT(0)
-#define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1)
-#define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3)
-#define IEEE80211_TWT_CONTROL_RX_DISABLED BIT(4)
-#define IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT BIT(5)
-
-#define IEEE80211_TWT_REQTYPE_REQUEST BIT(0)
-#define IEEE80211_TWT_REQTYPE_SETUP_CMD GENMASK(3, 1)
-#define IEEE80211_TWT_REQTYPE_TRIGGER BIT(4)
-#define IEEE80211_TWT_REQTYPE_IMPLICIT BIT(5)
-#define IEEE80211_TWT_REQTYPE_FLOWTYPE BIT(6)
-#define IEEE80211_TWT_REQTYPE_FLOWID GENMASK(9, 7)
-#define IEEE80211_TWT_REQTYPE_WAKE_INT_EXP GENMASK(14, 10)
-#define IEEE80211_TWT_REQTYPE_PROTECTION BIT(15)
-
-enum ieee80211_twt_setup_cmd {
- TWT_SETUP_CMD_REQUEST,
- TWT_SETUP_CMD_SUGGEST,
- TWT_SETUP_CMD_DEMAND,
- TWT_SETUP_CMD_GROUPING,
- TWT_SETUP_CMD_ACCEPT,
- TWT_SETUP_CMD_ALTERNATE,
- TWT_SETUP_CMD_DICTATE,
- TWT_SETUP_CMD_REJECT,
-};
-
-struct ieee80211_twt_params {
- __le16 req_type;
- __le64 twt;
- u8 min_twt_dur;
- __le16 mantissa;
- u8 channel;
-} __packed;
-
-struct ieee80211_twt_setup {
- u8 dialog_token;
- u8 element_id;
- u8 length;
- u8 control;
- u8 params[];
-} __packed;
-
-#define IEEE80211_TTLM_MAX_CNT 2
-#define IEEE80211_TTLM_CONTROL_DIRECTION 0x03
-#define IEEE80211_TTLM_CONTROL_DEF_LINK_MAP 0x04
-#define IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT 0x08
-#define IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT 0x10
-#define IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE 0x20
-
-#define IEEE80211_TTLM_DIRECTION_DOWN 0
-#define IEEE80211_TTLM_DIRECTION_UP 1
-#define IEEE80211_TTLM_DIRECTION_BOTH 2
-
-/**
- * struct ieee80211_ttlm_elem - TID-To-Link Mapping element
- *
- * Defined in section 9.4.2.314 in P802.11be_D4
- *
- * @control: the first part of control field
- * @optional: the second part of control field
- */
-struct ieee80211_ttlm_elem {
- u8 control;
- u8 optional[];
-} __packed;
-
/**
* struct ieee80211_bss_load_elem - BSS Load elemen
*
@@ -1411,6 +1084,8 @@ struct ieee80211_mgmt {
__le16 status;
__le16 capab;
__le16 timeout;
+ /* followed by BA Extension */
+ u8 variable[];
} __packed addba_resp;
struct{
u8 action_code;
@@ -1441,7 +1116,7 @@ struct ieee80211_mgmt {
u8 action_code;
u8 dialog_token;
__le16 capability;
- u8 variable[0];
+ u8 variable[];
} __packed tdls_discover_resp;
struct {
u8 action_code;
@@ -1490,12 +1165,27 @@ struct ieee80211_mgmt {
struct {
u8 action_code;
u8 dialog_token;
- u8 status_code;
+ __le16 status_code;
u8 variable[];
} __packed ttlm_res;
struct {
u8 action_code;
} __packed ttlm_tear_down;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 variable[];
+ } __packed ml_reconf_req;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 count;
+ u8 variable[];
+ } __packed ml_reconf_resp;
+ struct {
+ u8 action_code;
+ u8 variable[];
+ } __packed epcs;
} u;
} __packed action;
DECLARE_FLEX_ARRAY(u8, body); /* Generic frame body */
@@ -1506,16 +1196,18 @@ struct ieee80211_mgmt {
#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126
#define BSS_MEMBERSHIP_SELECTOR_GLK 125
-#define BSS_MEMBERSHIP_SELECTOR_EPS 124
+#define BSS_MEMBERSHIP_SELECTOR_EPD 124
#define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123
#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122
#define BSS_MEMBERSHIP_SELECTOR_EHT_PHY 121
+#define BSS_MEMBERSHIP_SELECTOR_MIN BSS_MEMBERSHIP_SELECTOR_EHT_PHY
+
/* mgmt header + 1 byte category code */
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
-/* Management MIC information element (IEEE 802.11w) */
+/* Management MIC information element (IEEE 802.11w) for CMAC */
struct ieee80211_mmie {
u8 element_id;
u8 length;
@@ -1533,6 +1225,15 @@ struct ieee80211_mmie_16 {
u8 mic[16];
} __packed;
+/* Management MIC information element (IEEE 802.11w) for all variants */
+struct ieee80211_mmie_var {
+ u8 element_id;
+ u8 length;
+ __le16 key_id;
+ u8 sequence_number[6];
+ u8 mic[]; /* 8 or 16 bytes */
+} __packed;
+
struct ieee80211_vendor_ie {
u8 element_id;
u8 len;
@@ -1609,1663 +1310,39 @@ struct ieee80211_tdls_data {
struct {
u8 dialog_token;
__le16 capability;
- u8 variable[0];
+ u8 variable[];
} __packed setup_req;
struct {
__le16 status_code;
u8 dialog_token;
__le16 capability;
- u8 variable[0];
+ u8 variable[];
} __packed setup_resp;
struct {
__le16 status_code;
u8 dialog_token;
- u8 variable[0];
+ u8 variable[];
} __packed setup_cfm;
struct {
__le16 reason_code;
- u8 variable[0];
+ u8 variable[];
} __packed teardown;
struct {
u8 dialog_token;
- u8 variable[0];
+ u8 variable[];
} __packed discover_req;
struct {
u8 target_channel;
u8 oper_class;
- u8 variable[0];
+ u8 variable[];
} __packed chan_switch_req;
struct {
__le16 status_code;
- u8 variable[0];
+ u8 variable[];
} __packed chan_switch_resp;
} u;
} __packed;
-/*
- * Peer-to-Peer IE attribute related definitions.
- */
-/*
- * enum ieee80211_p2p_attr_id - identifies type of peer-to-peer attribute.
- */
-enum ieee80211_p2p_attr_id {
- IEEE80211_P2P_ATTR_STATUS = 0,
- IEEE80211_P2P_ATTR_MINOR_REASON,
- IEEE80211_P2P_ATTR_CAPABILITY,
- IEEE80211_P2P_ATTR_DEVICE_ID,
- IEEE80211_P2P_ATTR_GO_INTENT,
- IEEE80211_P2P_ATTR_GO_CONFIG_TIMEOUT,
- IEEE80211_P2P_ATTR_LISTEN_CHANNEL,
- IEEE80211_P2P_ATTR_GROUP_BSSID,
- IEEE80211_P2P_ATTR_EXT_LISTEN_TIMING,
- IEEE80211_P2P_ATTR_INTENDED_IFACE_ADDR,
- IEEE80211_P2P_ATTR_MANAGABILITY,
- IEEE80211_P2P_ATTR_CHANNEL_LIST,
- IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
- IEEE80211_P2P_ATTR_DEVICE_INFO,
- IEEE80211_P2P_ATTR_GROUP_INFO,
- IEEE80211_P2P_ATTR_GROUP_ID,
- IEEE80211_P2P_ATTR_INTERFACE,
- IEEE80211_P2P_ATTR_OPER_CHANNEL,
- IEEE80211_P2P_ATTR_INVITE_FLAGS,
- /* 19 - 220: Reserved */
- IEEE80211_P2P_ATTR_VENDOR_SPECIFIC = 221,
-
- IEEE80211_P2P_ATTR_MAX
-};
-
-/* Notice of Absence attribute - described in P2P spec 4.1.14 */
-/* Typical max value used here */
-#define IEEE80211_P2P_NOA_DESC_MAX 4
-
-struct ieee80211_p2p_noa_desc {
- u8 count;
- __le32 duration;
- __le32 interval;
- __le32 start_time;
-} __packed;
-
-struct ieee80211_p2p_noa_attr {
- u8 index;
- u8 oppps_ctwindow;
- struct ieee80211_p2p_noa_desc desc[IEEE80211_P2P_NOA_DESC_MAX];
-} __packed;
-
-#define IEEE80211_P2P_OPPPS_ENABLE_BIT BIT(7)
-#define IEEE80211_P2P_OPPPS_CTWINDOW_MASK 0x7F
-
-/**
- * struct ieee80211_bar - Block Ack Request frame format
- * @frame_control: Frame Control
- * @duration: Duration
- * @ra: RA
- * @ta: TA
- * @control: BAR Control
- * @start_seq_num: Starting Sequence Number (see Figure 9-37)
- *
- * This structure represents the "BlockAckReq frame format"
- * as described in IEEE Std 802.11-2020 section 9.3.1.7.
-*/
-struct ieee80211_bar {
- __le16 frame_control;
- __le16 duration;
- __u8 ra[ETH_ALEN];
- __u8 ta[ETH_ALEN];
- __le16 control;
- __le16 start_seq_num;
-} __packed;
-
-/* 802.11 BAR control masks */
-#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
-#define IEEE80211_BAR_CTRL_MULTI_TID 0x0002
-#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
-#define IEEE80211_BAR_CTRL_TID_INFO_MASK 0xf000
-#define IEEE80211_BAR_CTRL_TID_INFO_SHIFT 12
-
-#define IEEE80211_HT_MCS_MASK_LEN 10
-
-/**
- * struct ieee80211_mcs_info - Supported MCS Set field
- * @rx_mask: RX mask
- * @rx_highest: highest supported RX rate. If set represents
- * the highest supported RX data rate in units of 1 Mbps.
- * If this field is 0 this value should not be used to
- * consider the highest RX data rate supported.
- * @tx_params: TX parameters
- * @reserved: Reserved bits
- *
- * This structure represents the "Supported MCS Set field" as
- * described in IEEE Std 802.11-2020 section 9.4.2.55.4.
- */
-struct ieee80211_mcs_info {
- u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN];
- __le16 rx_highest;
- u8 tx_params;
- u8 reserved[3];
-} __packed;
-
-/* 802.11n HT capability MSC set */
-#define IEEE80211_HT_MCS_RX_HIGHEST_MASK 0x3ff
-#define IEEE80211_HT_MCS_TX_DEFINED 0x01
-#define IEEE80211_HT_MCS_TX_RX_DIFF 0x02
-/* value 0 == 1 stream etc */
-#define IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK 0x0C
-#define IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT 2
-#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4
-#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10
-
-#define IEEE80211_HT_MCS_CHAINS(mcs) ((mcs) == 32 ? 1 : (1 + ((mcs) >> 3)))
-
-/*
- * 802.11n D5.0 20.3.5 / 20.6 says:
- * - indices 0 to 7 and 32 are single spatial stream
- * - 8 to 31 are multiple spatial streams using equal modulation
- * [8..15 for two streams, 16..23 for three and 24..31 for four]
- * - remainder are multiple spatial streams using unequal modulation
- */
-#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START 33
-#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE \
- (IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8)
-
-/**
- * struct ieee80211_ht_cap - HT capabilities element
- * @cap_info: HT Capability Information
- * @ampdu_params_info: A-MPDU Parameters
- * @mcs: Supported MCS Set
- * @extended_ht_cap_info: HT Extended Capabilities
- * @tx_BF_cap_info: Transmit Beamforming Capabilities
- * @antenna_selection_info: ASEL Capability
- *
- * This structure represents the payload of the "HT Capabilities
- * element" as described in IEEE Std 802.11-2020 section 9.4.2.55.
- */
-struct ieee80211_ht_cap {
- __le16 cap_info;
- u8 ampdu_params_info;
-
- /* 16 bytes MCS information */
- struct ieee80211_mcs_info mcs;
-
- __le16 extended_ht_cap_info;
- __le32 tx_BF_cap_info;
- u8 antenna_selection_info;
-} __packed;
-
-/* 802.11n HT capabilities masks (for cap_info) */
-#define IEEE80211_HT_CAP_LDPC_CODING 0x0001
-#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002
-#define IEEE80211_HT_CAP_SM_PS 0x000C
-#define IEEE80211_HT_CAP_SM_PS_SHIFT 2
-#define IEEE80211_HT_CAP_GRN_FLD 0x0010
-#define IEEE80211_HT_CAP_SGI_20 0x0020
-#define IEEE80211_HT_CAP_SGI_40 0x0040
-#define IEEE80211_HT_CAP_TX_STBC 0x0080
-#define IEEE80211_HT_CAP_RX_STBC 0x0300
-#define IEEE80211_HT_CAP_RX_STBC_SHIFT 8
-#define IEEE80211_HT_CAP_DELAY_BA 0x0400
-#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
-#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
-#define IEEE80211_HT_CAP_RESERVED 0x2000
-#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000
-#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000
-
-/* 802.11n HT extended capabilities masks (for extended_ht_cap_info) */
-#define IEEE80211_HT_EXT_CAP_PCO 0x0001
-#define IEEE80211_HT_EXT_CAP_PCO_TIME 0x0006
-#define IEEE80211_HT_EXT_CAP_PCO_TIME_SHIFT 1
-#define IEEE80211_HT_EXT_CAP_MCS_FB 0x0300
-#define IEEE80211_HT_EXT_CAP_MCS_FB_SHIFT 8
-#define IEEE80211_HT_EXT_CAP_HTC_SUP 0x0400
-#define IEEE80211_HT_EXT_CAP_RD_RESPONDER 0x0800
-
-/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */
-#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03
-#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C
-#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2
-
-/*
- * Maximum length of AMPDU that the STA can receive in high-throughput (HT).
- * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
- */
-enum ieee80211_max_ampdu_length_exp {
- IEEE80211_HT_MAX_AMPDU_8K = 0,
- IEEE80211_HT_MAX_AMPDU_16K = 1,
- IEEE80211_HT_MAX_AMPDU_32K = 2,
- IEEE80211_HT_MAX_AMPDU_64K = 3
-};
-
-/*
- * Maximum length of AMPDU that the STA can receive in VHT.
- * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
- */
-enum ieee80211_vht_max_ampdu_length_exp {
- IEEE80211_VHT_MAX_AMPDU_8K = 0,
- IEEE80211_VHT_MAX_AMPDU_16K = 1,
- IEEE80211_VHT_MAX_AMPDU_32K = 2,
- IEEE80211_VHT_MAX_AMPDU_64K = 3,
- IEEE80211_VHT_MAX_AMPDU_128K = 4,
- IEEE80211_VHT_MAX_AMPDU_256K = 5,
- IEEE80211_VHT_MAX_AMPDU_512K = 6,
- IEEE80211_VHT_MAX_AMPDU_1024K = 7
-};
-
-#define IEEE80211_HT_MAX_AMPDU_FACTOR 13
-
-/* Minimum MPDU start spacing */
-enum ieee80211_min_mpdu_spacing {
- IEEE80211_HT_MPDU_DENSITY_NONE = 0, /* No restriction */
- IEEE80211_HT_MPDU_DENSITY_0_25 = 1, /* 1/4 usec */
- IEEE80211_HT_MPDU_DENSITY_0_5 = 2, /* 1/2 usec */
- IEEE80211_HT_MPDU_DENSITY_1 = 3, /* 1 usec */
- IEEE80211_HT_MPDU_DENSITY_2 = 4, /* 2 usec */
- IEEE80211_HT_MPDU_DENSITY_4 = 5, /* 4 usec */
- IEEE80211_HT_MPDU_DENSITY_8 = 6, /* 8 usec */
- IEEE80211_HT_MPDU_DENSITY_16 = 7 /* 16 usec */
-};
-
-/**
- * struct ieee80211_ht_operation - HT operation IE
- * @primary_chan: Primary Channel
- * @ht_param: HT Operation Information parameters
- * @operation_mode: HT Operation Information operation mode
- * @stbc_param: HT Operation Information STBC params
- * @basic_set: Basic HT-MCS Set
- *
- * This structure represents the payload of the "HT Operation
- * element" as described in IEEE Std 802.11-2020 section 9.4.2.56.
- */
-struct ieee80211_ht_operation {
- u8 primary_chan;
- u8 ht_param;
- __le16 operation_mode;
- __le16 stbc_param;
- u8 basic_set[16];
-} __packed;
-
-/* for ht_param */
-#define IEEE80211_HT_PARAM_CHA_SEC_OFFSET 0x03
-#define IEEE80211_HT_PARAM_CHA_SEC_NONE 0x00
-#define IEEE80211_HT_PARAM_CHA_SEC_ABOVE 0x01
-#define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03
-#define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04
-#define IEEE80211_HT_PARAM_RIFS_MODE 0x08
-
-/* for operation_mode */
-#define IEEE80211_HT_OP_MODE_PROTECTION 0x0003
-#define IEEE80211_HT_OP_MODE_PROTECTION_NONE 0
-#define IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER 1
-#define IEEE80211_HT_OP_MODE_PROTECTION_20MHZ 2
-#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3
-#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004
-#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010
-#define IEEE80211_HT_OP_MODE_CCFS2_SHIFT 5
-#define IEEE80211_HT_OP_MODE_CCFS2_MASK 0x1fe0
-
-/* for stbc_param */
-#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040
-#define IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT 0x0080
-#define IEEE80211_HT_STBC_PARAM_STBC_BEACON 0x0100
-#define IEEE80211_HT_STBC_PARAM_LSIG_TXOP_FULLPROT 0x0200
-#define IEEE80211_HT_STBC_PARAM_PCO_ACTIVE 0x0400
-#define IEEE80211_HT_STBC_PARAM_PCO_PHASE 0x0800
-
-
-/* block-ack parameters */
-#define IEEE80211_ADDBA_PARAM_AMSDU_MASK 0x0001
-#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
-#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
-#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
-#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
-
-/*
- * A-MPDU buffer sizes
- * According to HT size varies from 8 to 64 frames
- * HE adds the ability to have up to 256 frames.
- * EHT adds the ability to have up to 1K frames.
- */
-#define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF_HT 0x40
-#define IEEE80211_MAX_AMPDU_BUF_HE 0x100
-#define IEEE80211_MAX_AMPDU_BUF_EHT 0x400
-
-
-/* Spatial Multiplexing Power Save Modes (for capability) */
-#define WLAN_HT_CAP_SM_PS_STATIC 0
-#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
-#define WLAN_HT_CAP_SM_PS_INVALID 2
-#define WLAN_HT_CAP_SM_PS_DISABLED 3
-
-/* for SM power control field lower two bits */
-#define WLAN_HT_SMPS_CONTROL_DISABLED 0
-#define WLAN_HT_SMPS_CONTROL_STATIC 1
-#define WLAN_HT_SMPS_CONTROL_DYNAMIC 3
-
-/**
- * struct ieee80211_vht_mcs_info - VHT MCS information
- * @rx_mcs_map: RX MCS map 2 bits for each stream, total 8 streams
- * @rx_highest: Indicates highest long GI VHT PPDU data rate
- * STA can receive. Rate expressed in units of 1 Mbps.
- * If this field is 0 this value should not be used to
- * consider the highest RX data rate supported.
- * The top 3 bits of this field indicate the Maximum NSTS,total
- * (a beamformee capability.)
- * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams
- * @tx_highest: Indicates highest long GI VHT PPDU data rate
- * STA can transmit. Rate expressed in units of 1 Mbps.
- * If this field is 0 this value should not be used to
- * consider the highest TX data rate supported.
- * The top 2 bits of this field are reserved, the
- * 3rd bit from the top indiciates VHT Extended NSS BW
- * Capability.
- */
-struct ieee80211_vht_mcs_info {
- __le16 rx_mcs_map;
- __le16 rx_highest;
- __le16 tx_mcs_map;
- __le16 tx_highest;
-} __packed;
-
-/* for rx_highest */
-#define IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT 13
-#define IEEE80211_VHT_MAX_NSTS_TOTAL_MASK (7 << IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT)
-
-/* for tx_highest */
-#define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13)
-
-/**
- * enum ieee80211_vht_mcs_support - VHT MCS support definitions
- * @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
- * number of streams
- * @IEEE80211_VHT_MCS_SUPPORT_0_8: MCSes 0-8 are supported
- * @IEEE80211_VHT_MCS_SUPPORT_0_9: MCSes 0-9 are supported
- * @IEEE80211_VHT_MCS_NOT_SUPPORTED: This number of streams isn't supported
- *
- * These definitions are used in each 2-bit subfield of the @rx_mcs_map
- * and @tx_mcs_map fields of &struct ieee80211_vht_mcs_info, which are
- * both split into 8 subfields by number of streams. These values indicate
- * which MCSes are supported for the number of streams the value appears
- * for.
- */
-enum ieee80211_vht_mcs_support {
- IEEE80211_VHT_MCS_SUPPORT_0_7 = 0,
- IEEE80211_VHT_MCS_SUPPORT_0_8 = 1,
- IEEE80211_VHT_MCS_SUPPORT_0_9 = 2,
- IEEE80211_VHT_MCS_NOT_SUPPORTED = 3,
-};
-
-/**
- * struct ieee80211_vht_cap - VHT capabilities
- *
- * This structure is the "VHT capabilities element" as
- * described in 802.11ac D3.0 8.4.2.160
- * @vht_cap_info: VHT capability info
- * @supp_mcs: VHT MCS supported rates
- */
-struct ieee80211_vht_cap {
- __le32 vht_cap_info;
- struct ieee80211_vht_mcs_info supp_mcs;
-} __packed;
-
-/**
- * enum ieee80211_vht_chanwidth - VHT channel width
- * @IEEE80211_VHT_CHANWIDTH_USE_HT: use the HT operation IE to
- * determine the channel width (20 or 40 MHz)
- * @IEEE80211_VHT_CHANWIDTH_80MHZ: 80 MHz bandwidth
- * @IEEE80211_VHT_CHANWIDTH_160MHZ: 160 MHz bandwidth
- * @IEEE80211_VHT_CHANWIDTH_80P80MHZ: 80+80 MHz bandwidth
- */
-enum ieee80211_vht_chanwidth {
- IEEE80211_VHT_CHANWIDTH_USE_HT = 0,
- IEEE80211_VHT_CHANWIDTH_80MHZ = 1,
- IEEE80211_VHT_CHANWIDTH_160MHZ = 2,
- IEEE80211_VHT_CHANWIDTH_80P80MHZ = 3,
-};
-
-/**
- * struct ieee80211_vht_operation - VHT operation IE
- *
- * This structure is the "VHT operation element" as
- * described in 802.11ac D3.0 8.4.2.161
- * @chan_width: Operating channel width
- * @center_freq_seg0_idx: center freq segment 0 index
- * @center_freq_seg1_idx: center freq segment 1 index
- * @basic_mcs_set: VHT Basic MCS rate set
- */
-struct ieee80211_vht_operation {
- u8 chan_width;
- u8 center_freq_seg0_idx;
- u8 center_freq_seg1_idx;
- __le16 basic_mcs_set;
-} __packed;
-
-/**
- * struct ieee80211_he_cap_elem - HE capabilities element
- * @mac_cap_info: HE MAC Capabilities Information
- * @phy_cap_info: HE PHY Capabilities Information
- *
- * This structure represents the fixed fields of the payload of the
- * "HE capabilities element" as described in IEEE Std 802.11ax-2021
- * sections 9.4.2.248.2 and 9.4.2.248.3.
- */
-struct ieee80211_he_cap_elem {
- u8 mac_cap_info[6];
- u8 phy_cap_info[11];
-} __packed;
-
-#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5
-
-/**
- * enum ieee80211_he_mcs_support - HE MCS support definitions
- * @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
- * number of streams
- * @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported
- * @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported
- * @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported
- *
- * These definitions are used in each 2-bit subfield of the rx_mcs_*
- * and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are
- * both split into 8 subfields by number of streams. These values indicate
- * which MCSes are supported for the number of streams the value appears
- * for.
- */
-enum ieee80211_he_mcs_support {
- IEEE80211_HE_MCS_SUPPORT_0_7 = 0,
- IEEE80211_HE_MCS_SUPPORT_0_9 = 1,
- IEEE80211_HE_MCS_SUPPORT_0_11 = 2,
- IEEE80211_HE_MCS_NOT_SUPPORTED = 3,
-};
-
-/**
- * struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field
- *
- * This structure holds the data required for the Tx/Rx HE MCS NSS Support Field
- * described in P802.11ax_D2.0 section 9.4.2.237.4
- *
- * @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel
- * widths less than 80MHz.
- * @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel
- * widths less than 80MHz.
- * @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel
- * width 160MHz.
- * @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel
- * width 160MHz.
- * @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for
- * channel width 80p80MHz.
- * @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for
- * channel width 80p80MHz.
- */
-struct ieee80211_he_mcs_nss_supp {
- __le16 rx_mcs_80;
- __le16 tx_mcs_80;
- __le16 rx_mcs_160;
- __le16 tx_mcs_160;
- __le16 rx_mcs_80p80;
- __le16 tx_mcs_80p80;
-} __packed;
-
-/**
- * struct ieee80211_he_operation - HE Operation element
- * @he_oper_params: HE Operation Parameters + BSS Color Information
- * @he_mcs_nss_set: Basic HE-MCS And NSS Set
- * @optional: Optional fields VHT Operation Information, Max Co-Hosted
- * BSSID Indicator, and 6 GHz Operation Information
- *
- * This structure represents the payload of the "HE Operation
- * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.249.
- */
-struct ieee80211_he_operation {
- __le32 he_oper_params;
- __le16 he_mcs_nss_set;
- u8 optional[];
-} __packed;
-
-/**
- * struct ieee80211_he_spr - Spatial Reuse Parameter Set element
- * @he_sr_control: SR Control
- * @optional: Optional fields Non-SRG OBSS PD Max Offset, SRG OBSS PD
- * Min Offset, SRG OBSS PD Max Offset, SRG BSS Color
- * Bitmap, and SRG Partial BSSID Bitmap
- *
- * This structure represents the payload of the "Spatial Reuse
- * Parameter Set element" as described in IEEE Std 802.11ax-2021
- * section 9.4.2.252.
- */
-struct ieee80211_he_spr {
- u8 he_sr_control;
- u8 optional[];
-} __packed;
-
-/**
- * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field
- * @aifsn: ACI/AIFSN
- * @ecw_min_max: ECWmin/ECWmax
- * @mu_edca_timer: MU EDCA Timer
- *
- * This structure represents the "MU AC Parameter Record" as described
- * in IEEE Std 802.11ax-2021 section 9.4.2.251, Figure 9-788p.
- */
-struct ieee80211_he_mu_edca_param_ac_rec {
- u8 aifsn;
- u8 ecw_min_max;
- u8 mu_edca_timer;
-} __packed;
-
-/**
- * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element
- * @mu_qos_info: QoS Info
- * @ac_be: MU AC_BE Parameter Record
- * @ac_bk: MU AC_BK Parameter Record
- * @ac_vi: MU AC_VI Parameter Record
- * @ac_vo: MU AC_VO Parameter Record
- *
- * This structure represents the payload of the "MU EDCA Parameter Set
- * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.251.
- */
-struct ieee80211_mu_edca_param_set {
- u8 mu_qos_info;
- struct ieee80211_he_mu_edca_param_ac_rec ac_be;
- struct ieee80211_he_mu_edca_param_ac_rec ac_bk;
- struct ieee80211_he_mu_edca_param_ac_rec ac_vi;
- struct ieee80211_he_mu_edca_param_ac_rec ac_vo;
-} __packed;
-
-#define IEEE80211_EHT_MCS_NSS_RX 0x0f
-#define IEEE80211_EHT_MCS_NSS_TX 0xf0
-
-/**
- * struct ieee80211_eht_mcs_nss_supp_20mhz_only - EHT 20MHz only station max
- * supported NSS for per MCS.
- *
- * For each field below, bits 0 - 3 indicate the maximal number of spatial
- * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams
- * for Tx.
- *
- * @rx_tx_mcs7_max_nss: indicates the maximum number of spatial streams
- * supported for reception and the maximum number of spatial streams
- * supported for transmission for MCS 0 - 7.
- * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams
- * supported for reception and the maximum number of spatial streams
- * supported for transmission for MCS 8 - 9.
- * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams
- * supported for reception and the maximum number of spatial streams
- * supported for transmission for MCS 10 - 11.
- * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
- * supported for reception and the maximum number of spatial streams
- * supported for transmission for MCS 12 - 13.
- * @rx_tx_max_nss: array of the previous fields for easier loop access
- */
-struct ieee80211_eht_mcs_nss_supp_20mhz_only {
- union {
- struct {
- u8 rx_tx_mcs7_max_nss;
- u8 rx_tx_mcs9_max_nss;
- u8 rx_tx_mcs11_max_nss;
- u8 rx_tx_mcs13_max_nss;
- };
- u8 rx_tx_max_nss[4];
- };
-};
-
-/**
- * struct ieee80211_eht_mcs_nss_supp_bw - EHT max supported NSS per MCS (except
- * 20MHz only stations).
- *
- * For each field below, bits 0 - 3 indicate the maximal number of spatial
- * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams
- * for Tx.
- *
- * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams
- * supported for reception and the maximum number of spatial streams
- * supported for transmission for MCS 0 - 9.
- * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams
- * supported for reception and the maximum number of spatial streams
- * supported for transmission for MCS 10 - 11.
- * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
- * supported for reception and the maximum number of spatial streams
- * supported for transmission for MCS 12 - 13.
- * @rx_tx_max_nss: array of the previous fields for easier loop access
- */
-struct ieee80211_eht_mcs_nss_supp_bw {
- union {
- struct {
- u8 rx_tx_mcs9_max_nss;
- u8 rx_tx_mcs11_max_nss;
- u8 rx_tx_mcs13_max_nss;
- };
- u8 rx_tx_max_nss[3];
- };
-};
-
-/**
- * struct ieee80211_eht_cap_elem_fixed - EHT capabilities fixed data
- *
- * This structure is the "EHT Capabilities element" fixed fields as
- * described in P802.11be_D2.0 section 9.4.2.313.
- *
- * @mac_cap_info: MAC capabilities, see IEEE80211_EHT_MAC_CAP*
- * @phy_cap_info: PHY capabilities, see IEEE80211_EHT_PHY_CAP*
- */
-struct ieee80211_eht_cap_elem_fixed {
- u8 mac_cap_info[2];
- u8 phy_cap_info[9];
-} __packed;
-
-/**
- * struct ieee80211_eht_cap_elem - EHT capabilities element
- * @fixed: fixed parts, see &ieee80211_eht_cap_elem_fixed
- * @optional: optional parts
- */
-struct ieee80211_eht_cap_elem {
- struct ieee80211_eht_cap_elem_fixed fixed;
-
- /*
- * Followed by:
- * Supported EHT-MCS And NSS Set field: 4, 3, 6 or 9 octets.
- * EHT PPE Thresholds field: variable length.
- */
- u8 optional[];
-} __packed;
-
-#define IEEE80211_EHT_OPER_INFO_PRESENT 0x01
-#define IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT 0x02
-#define IEEE80211_EHT_OPER_EHT_DEF_PE_DURATION 0x04
-#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_LIMIT 0x08
-#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_EXP_MASK 0x30
-
-/**
- * struct ieee80211_eht_operation - eht operation element
- *
- * This structure is the "EHT Operation Element" fields as
- * described in P802.11be_D2.0 section 9.4.2.311
- *
- * @params: EHT operation element parameters. See &IEEE80211_EHT_OPER_*
- * @basic_mcs_nss: indicates the EHT-MCSs for each number of spatial streams in
- * EHT PPDUs that are supported by all EHT STAs in the BSS in transmit and
- * receive.
- * @optional: optional parts
- */
-struct ieee80211_eht_operation {
- u8 params;
- struct ieee80211_eht_mcs_nss_supp_20mhz_only basic_mcs_nss;
- u8 optional[];
-} __packed;
-
-/**
- * struct ieee80211_eht_operation_info - eht operation information
- *
- * @control: EHT operation information control.
- * @ccfs0: defines a channel center frequency for a 20, 40, 80, 160, or 320 MHz
- * EHT BSS.
- * @ccfs1: defines a channel center frequency for a 160 or 320 MHz EHT BSS.
- * @optional: optional parts
- */
-struct ieee80211_eht_operation_info {
- u8 control;
- u8 ccfs0;
- u8 ccfs1;
- u8 optional[];
-} __packed;
-
-/* 802.11ac VHT Capabilities */
-#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
-#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
-#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002
-#define IEEE80211_VHT_CAP_MAX_MPDU_MASK 0x00000003
-#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
-#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
-#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C
-#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_SHIFT 2
-#define IEEE80211_VHT_CAP_RXLDPC 0x00000010
-#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020
-#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040
-#define IEEE80211_VHT_CAP_TXSTBC 0x00000080
-#define IEEE80211_VHT_CAP_RXSTBC_1 0x00000100
-#define IEEE80211_VHT_CAP_RXSTBC_2 0x00000200
-#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
-#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
-#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
-#define IEEE80211_VHT_CAP_RXSTBC_SHIFT 8
-#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
-#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
-#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13
-#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK \
- (7 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT)
-#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT 16
-#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK \
- (7 << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT)
-#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000
-#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000
-#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000
-#define IEEE80211_VHT_CAP_HTC_VHT 0x00400000
-#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT 23
-#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK \
- (7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT)
-#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB 0x08000000
-#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000
-#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
-#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
-#define IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT 30
-#define IEEE80211_VHT_CAP_EXT_NSS_BW_MASK 0xc0000000
-
-/**
- * ieee80211_get_vht_max_nss - return max NSS for a given bandwidth/MCS
- * @cap: VHT capabilities of the peer
- * @bw: bandwidth to use
- * @mcs: MCS index to use
- * @ext_nss_bw_capable: indicates whether or not the local transmitter
- * (rate scaling algorithm) can deal with the new logic
- * (dot11VHTExtendedNSSBWCapable)
- * @max_vht_nss: current maximum NSS as advertised by the STA in
- * operating mode notification, can be 0 in which case the
- * capability data will be used to derive this (from MCS support)
- *
- * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can
- * vary for a given BW/MCS. This function parses the data.
- *
- * Note: This function is exported by cfg80211.
- */
-int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
- enum ieee80211_vht_chanwidth bw,
- int mcs, bool ext_nss_bw_capable,
- unsigned int max_vht_nss);
-
-/**
- * enum ieee80211_ap_reg_power - regulatory power for a Access Point
- *
- * @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode
- * @IEEE80211_REG_LPI_AP: Indoor Access Point
- * @IEEE80211_REG_SP_AP: Standard power Access Point
- * @IEEE80211_REG_VLP_AP: Very low power Access Point
- * @IEEE80211_REG_AP_POWER_AFTER_LAST: internal
- * @IEEE80211_REG_AP_POWER_MAX: maximum value
- */
-enum ieee80211_ap_reg_power {
- IEEE80211_REG_UNSET_AP,
- IEEE80211_REG_LPI_AP,
- IEEE80211_REG_SP_AP,
- IEEE80211_REG_VLP_AP,
- IEEE80211_REG_AP_POWER_AFTER_LAST,
- IEEE80211_REG_AP_POWER_MAX =
- IEEE80211_REG_AP_POWER_AFTER_LAST - 1,
-};
-
-/**
- * enum ieee80211_client_reg_power - regulatory power for a client
- *
- * @IEEE80211_REG_UNSET_CLIENT: Client has no regulatory power mode
- * @IEEE80211_REG_DEFAULT_CLIENT: Default Client
- * @IEEE80211_REG_SUBORDINATE_CLIENT: Subordinate Client
- * @IEEE80211_REG_CLIENT_POWER_AFTER_LAST: internal
- * @IEEE80211_REG_CLIENT_POWER_MAX: maximum value
- */
-enum ieee80211_client_reg_power {
- IEEE80211_REG_UNSET_CLIENT,
- IEEE80211_REG_DEFAULT_CLIENT,
- IEEE80211_REG_SUBORDINATE_CLIENT,
- IEEE80211_REG_CLIENT_POWER_AFTER_LAST,
- IEEE80211_REG_CLIENT_POWER_MAX =
- IEEE80211_REG_CLIENT_POWER_AFTER_LAST - 1,
-};
-
-/* 802.11ax HE MAC capabilities */
-#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
-#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02
-#define IEEE80211_HE_MAC_CAP0_TWT_RES 0x04
-#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP 0x00
-#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1 0x08
-#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2 0x10
-#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3 0x18
-#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK 0x18
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1 0x00
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2 0x20
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4 0x40
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8 0x60
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16 0x80
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32 0xa0
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64 0xc0
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED 0xe0
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK 0xe0
-
-#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED 0x00
-#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128 0x01
-#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256 0x02
-#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512 0x03
-#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK 0x03
-#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US 0x00
-#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04
-#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08
-#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1 0x00
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_2 0x10
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_3 0x20
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_4 0x30
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_5 0x40
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_6 0x50
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_7 0x60
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8 0x70
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_MASK 0x70
-
-/* Link adaptation is split between byte HE_MAC_CAP1 and
- * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE
- * in which case the following values apply:
- * 0 = No feedback.
- * 1 = reserved.
- * 2 = Unsolicited feedback.
- * 3 = both
- */
-#define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION 0x80
-
-#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01
-#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02
-#define IEEE80211_HE_MAC_CAP2_TRS 0x04
-#define IEEE80211_HE_MAC_CAP2_BSR 0x08
-#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10
-#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20
-#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40
-#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80
-
-#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02
-#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04
-
-/* The maximum length of an A-MDPU is defined by the combination of the Maximum
- * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
- * same field in the HE capabilities.
- */
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_0 0x00
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1 0x08
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2 0x10
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3 0x18
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18
-#define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20
-#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
-#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80
-
-#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
-#define IEEE80211_HE_MAC_CAP4_QTP 0x02
-#define IEEE80211_HE_MAC_CAP4_BQR 0x04
-#define IEEE80211_HE_MAC_CAP4_PSR_RESP 0x08
-#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
-#define IEEE80211_HE_MAC_CAP4_OPS 0x20
-#define IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU 0x40
-/* Multi TID agg TX is split between byte #4 and #5
- * The value is a combination of B39,B40,B41
- */
-#define IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39 0x80
-
-#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01
-#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02
-#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION 0x04
-#define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08
-#define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10
-#define IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS 0x20
-#define IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING 0x40
-#define IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX 0x80
-
-#define IEEE80211_HE_VHT_MAX_AMPDU_FACTOR 20
-#define IEEE80211_HE_HT_MAX_AMPDU_FACTOR 16
-#define IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR 13
-
-/* 802.11ax HE PHY capabilities */
-#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
-#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
-#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
-#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10
-#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL 0x1e
-
-#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20
-#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40
-#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe
-
-#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01
-#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02
-#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ 0x04
-#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ 0x08
-#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK 0x0f
-#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10
-#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20
-#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40
-/* Midamble RX/TX Max NSTS is split between byte #2 and byte #3 */
-#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS 0x80
-
-#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS 0x01
-#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02
-#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04
-#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08
-#define IEEE80211_HE_PHY_CAP2_DOPPLER_TX 0x10
-#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20
-
-/* Note that the meaning of UL MU below is different between an AP and a non-AP
- * sta, where in the AP case it indicates support for Rx and in the non-AP sta
- * case it indicates support for Tx.
- */
-#define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO 0x40
-#define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO 0x80
-
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM 0x00
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK 0x01
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK 0x02
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM 0x03
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK 0x03
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 0x00
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 0x04
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM 0x00
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK 0x08
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK 0x10
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20
-#define IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU 0x40
-#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80
-
-#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01
-#define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER 0x02
-
-/* Minimal allowed value of Max STS under 80MHz is 3 */
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 0x10
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6 0x14
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7 0x18
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8 0x1c
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK 0x1c
-
-/* Minimal allowed value of Max STS above 80MHz is 3 */
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 0x60
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 0x80
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6 0xa0
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7 0xc0
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 0xe0
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK 0xe0
-
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1 0x00
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 0x01
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3 0x02
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4 0x03
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5 0x04
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6 0x05
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7 0x06
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8 0x07
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK 0x07
-
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1 0x00
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 0x08
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3 0x10
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4 0x18
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5 0x20
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6 0x28
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7 0x30
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8 0x38
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK 0x38
-
-#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40
-#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80
-
-#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01
-#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02
-#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB 0x04
-#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB 0x08
-#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10
-#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20
-#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40
-#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80
-
-#define IEEE80211_HE_PHY_CAP7_PSR_BASED_SR 0x01
-#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP 0x02
-#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04
-#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08
-#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10
-#define IEEE80211_HE_PHY_CAP7_MAX_NC_3 0x18
-#define IEEE80211_HE_PHY_CAP7_MAX_NC_4 0x20
-#define IEEE80211_HE_PHY_CAP7_MAX_NC_5 0x28
-#define IEEE80211_HE_PHY_CAP7_MAX_NC_6 0x30
-#define IEEE80211_HE_PHY_CAP7_MAX_NC_7 0x38
-#define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK 0x38
-#define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ 0x40
-#define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ 0x80
-
-#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI 0x01
-#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02
-#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04
-#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08
-#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10
-#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20
-#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242 0x00
-#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484 0x40
-#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996 0x80
-#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996 0xc0
-#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK 0xc0
-
-#define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01
-#define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02
-#define IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU 0x04
-#define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08
-#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10
-#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20
-#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US 0x0
-#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US 0x1
-#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US 0x2
-#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED 0x3
-#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS 6
-#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK 0xc0
-
-#define IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF 0x01
-
-/* 802.11ax HE TX/RX MCS NSS Support */
-#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
-#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6)
-#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS (11)
-#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK 0x07c0
-#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK 0xf800
-
-/* TX/RX HE MCS Support field Highest MCS subfield encoding */
-enum ieee80211_he_highest_mcs_supported_subfield_enc {
- HIGHEST_MCS_SUPPORTED_MCS7 = 0,
- HIGHEST_MCS_SUPPORTED_MCS8,
- HIGHEST_MCS_SUPPORTED_MCS9,
- HIGHEST_MCS_SUPPORTED_MCS10,
- HIGHEST_MCS_SUPPORTED_MCS11,
-};
-
-/* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */
-static inline u8
-ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap)
-{
- u8 count = 4;
-
- if (he_cap->phy_cap_info[0] &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
- count += 4;
-
- if (he_cap->phy_cap_info[0] &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
- count += 4;
-
- return count;
-}
-
-/* 802.11ax HE PPE Thresholds */
-#define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS (1)
-#define IEEE80211_PPE_THRES_NSS_POS (0)
-#define IEEE80211_PPE_THRES_NSS_MASK (7)
-#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU \
- (BIT(5) | BIT(6))
-#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78
-#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3)
-#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3)
-#define IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE (7)
-
-/*
- * Calculate 802.11ax HE capabilities IE PPE field size
- * Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8*
- */
-static inline u8
-ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
-{
- u8 n;
-
- if ((phy_cap_info[6] &
- IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0)
- return 0;
-
- n = hweight8(ppe_thres_hdr &
- IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
- n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >>
- IEEE80211_PPE_THRES_NSS_POS));
-
- /*
- * Each pair is 6 bits, and we need to add the 7 "header" bits to the
- * total size.
- */
- n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7;
- n = DIV_ROUND_UP(n, 8);
-
- return n;
-}
-
-static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len)
-{
- const struct ieee80211_he_cap_elem *he_cap_ie_elem = (const void *)data;
- u8 needed = sizeof(*he_cap_ie_elem);
-
- if (len < needed)
- return false;
-
- needed += ieee80211_he_mcs_nss_size(he_cap_ie_elem);
- if (len < needed)
- return false;
-
- if (he_cap_ie_elem->phy_cap_info[6] &
- IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
- if (len < needed + 1)
- return false;
- needed += ieee80211_he_ppe_size(data[needed],
- he_cap_ie_elem->phy_cap_info);
- }
-
- return len >= needed;
-}
-
-/* HE Operation defines */
-#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007
-#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008
-#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0
-#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
-#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000
-#define IEEE80211_HE_OPERATION_CO_HOSTED_BSS 0x00008000
-#define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000
-#define IEEE80211_HE_OPERATION_6GHZ_OP_INFO 0x00020000
-#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000
-#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24
-#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
-#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
-
-#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
-#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
-#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2
-#define IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP 3
-#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP 4
-
-/**
- * struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
- * @primary: primary channel
- * @control: control flags
- * @ccfs0: channel center frequency segment 0
- * @ccfs1: channel center frequency segment 1
- * @minrate: minimum rate (in 1 Mbps units)
- */
-struct ieee80211_he_6ghz_oper {
- u8 primary;
-#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH 0x3
-#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ 0
-#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ 1
-#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2
-#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3
-#define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4
-#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x38
- u8 control;
- u8 ccfs0;
- u8 ccfs1;
- u8 minrate;
-} __packed;
-
-/*
- * In "9.4.2.161 Transmit Power Envelope element" of "IEEE Std 802.11ax-2021",
- * it show four types in "Table 9-275a-Maximum Transmit Power Interpretation
- * subfield encoding", and two category for each type in "Table E-12-Regulatory
- * Info subfield encoding in the United States".
- * So it it totally max 8 Transmit Power Envelope element.
- */
-#define IEEE80211_TPE_MAX_IE_COUNT 8
-/*
- * In "Table 9-277—Meaning of Maximum Transmit Power Count subfield"
- * of "IEEE Std 802.11ax™‐2021", the max power level is 8.
- */
-#define IEEE80211_MAX_NUM_PWR_LEVEL 8
-
-#define IEEE80211_TPE_MAX_POWER_COUNT 8
-
-/* transmit power interpretation type of transmit power envelope element */
-enum ieee80211_tx_power_intrpt_type {
- IEEE80211_TPE_LOCAL_EIRP,
- IEEE80211_TPE_LOCAL_EIRP_PSD,
- IEEE80211_TPE_REG_CLIENT_EIRP,
- IEEE80211_TPE_REG_CLIENT_EIRP_PSD,
-};
-
-/**
- * struct ieee80211_tx_pwr_env - Transmit Power Envelope
- * @tx_power_info: Transmit Power Information field
- * @tx_power: Maximum Transmit Power field
- *
- * This structure represents the payload of the "Transmit Power
- * Envelope element" as described in IEEE Std 802.11ax-2021 section
- * 9.4.2.161
- */
-struct ieee80211_tx_pwr_env {
- u8 tx_power_info;
- s8 tx_power[IEEE80211_TPE_MAX_POWER_COUNT];
-} __packed;
-
-#define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7
-#define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38
-#define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0
-
-/*
- * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
- * @he_oper_ie: byte data of the He Operations IE, stating from the byte
- * after the ext ID byte. It is assumed that he_oper_ie has at least
- * sizeof(struct ieee80211_he_operation) bytes, the caller must have
- * validated this.
- * @return the actual size of the IE data (not including header), or 0 on error
- */
-static inline u8
-ieee80211_he_oper_size(const u8 *he_oper_ie)
-{
- const struct ieee80211_he_operation *he_oper = (const void *)he_oper_ie;
- u8 oper_len = sizeof(struct ieee80211_he_operation);
- u32 he_oper_params;
-
- /* Make sure the input is not NULL */
- if (!he_oper_ie)
- return 0;
-
- /* Calc required length */
- he_oper_params = le32_to_cpu(he_oper->he_oper_params);
- if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
- oper_len += 3;
- if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
- oper_len++;
- if (he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO)
- oper_len += sizeof(struct ieee80211_he_6ghz_oper);
-
- /* Add the first byte (extension ID) to the total length */
- oper_len++;
-
- return oper_len;
-}
-
-/**
- * ieee80211_he_6ghz_oper - obtain 6 GHz operation field
- * @he_oper: HE operation element (must be pre-validated for size)
- * but may be %NULL
- *
- * Return: a pointer to the 6 GHz operation field, or %NULL
- */
-static inline const struct ieee80211_he_6ghz_oper *
-ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
-{
- const u8 *ret;
- u32 he_oper_params;
-
- if (!he_oper)
- return NULL;
-
- ret = (const void *)&he_oper->optional;
-
- he_oper_params = le32_to_cpu(he_oper->he_oper_params);
-
- if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO))
- return NULL;
- if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
- ret += 3;
- if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
- ret++;
-
- return (const void *)ret;
-}
-
-/* HE Spatial Reuse defines */
-#define IEEE80211_HE_SPR_PSR_DISALLOWED BIT(0)
-#define IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED BIT(1)
-#define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT BIT(2)
-#define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT BIT(3)
-#define IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED BIT(4)
-
-/*
- * ieee80211_he_spr_size - calculate 802.11ax HE Spatial Reuse IE size
- * @he_spr_ie: byte data of the He Spatial Reuse IE, stating from the byte
- * after the ext ID byte. It is assumed that he_spr_ie has at least
- * sizeof(struct ieee80211_he_spr) bytes, the caller must have validated
- * this
- * @return the actual size of the IE data (not including header), or 0 on error
- */
-static inline u8
-ieee80211_he_spr_size(const u8 *he_spr_ie)
-{
- const struct ieee80211_he_spr *he_spr = (const void *)he_spr_ie;
- u8 spr_len = sizeof(struct ieee80211_he_spr);
- u8 he_spr_params;
-
- /* Make sure the input is not NULL */
- if (!he_spr_ie)
- return 0;
-
- /* Calc required length */
- he_spr_params = he_spr->he_sr_control;
- if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
- spr_len++;
- if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
- spr_len += 18;
-
- /* Add the first byte (extension ID) to the total length */
- spr_len++;
-
- return spr_len;
-}
-
-/* S1G Capabilities Information field */
-#define IEEE80211_S1G_CAPABILITY_LEN 15
-
-#define S1G_CAP0_S1G_LONG BIT(0)
-#define S1G_CAP0_SGI_1MHZ BIT(1)
-#define S1G_CAP0_SGI_2MHZ BIT(2)
-#define S1G_CAP0_SGI_4MHZ BIT(3)
-#define S1G_CAP0_SGI_8MHZ BIT(4)
-#define S1G_CAP0_SGI_16MHZ BIT(5)
-#define S1G_CAP0_SUPP_CH_WIDTH GENMASK(7, 6)
-
-#define S1G_SUPP_CH_WIDTH_2 0
-#define S1G_SUPP_CH_WIDTH_4 1
-#define S1G_SUPP_CH_WIDTH_8 2
-#define S1G_SUPP_CH_WIDTH_16 3
-#define S1G_SUPP_CH_WIDTH_MAX(cap) ((1 << FIELD_GET(S1G_CAP0_SUPP_CH_WIDTH, \
- cap[0])) << 1)
-
-#define S1G_CAP1_RX_LDPC BIT(0)
-#define S1G_CAP1_TX_STBC BIT(1)
-#define S1G_CAP1_RX_STBC BIT(2)
-#define S1G_CAP1_SU_BFER BIT(3)
-#define S1G_CAP1_SU_BFEE BIT(4)
-#define S1G_CAP1_BFEE_STS GENMASK(7, 5)
-
-#define S1G_CAP2_SOUNDING_DIMENSIONS GENMASK(2, 0)
-#define S1G_CAP2_MU_BFER BIT(3)
-#define S1G_CAP2_MU_BFEE BIT(4)
-#define S1G_CAP2_PLUS_HTC_VHT BIT(5)
-#define S1G_CAP2_TRAVELING_PILOT GENMASK(7, 6)
-
-#define S1G_CAP3_RD_RESPONDER BIT(0)
-#define S1G_CAP3_HT_DELAYED_BA BIT(1)
-#define S1G_CAP3_MAX_MPDU_LEN BIT(2)
-#define S1G_CAP3_MAX_AMPDU_LEN_EXP GENMASK(4, 3)
-#define S1G_CAP3_MIN_MPDU_START GENMASK(7, 5)
-
-#define S1G_CAP4_UPLINK_SYNC BIT(0)
-#define S1G_CAP4_DYNAMIC_AID BIT(1)
-#define S1G_CAP4_BAT BIT(2)
-#define S1G_CAP4_TIME_ADE BIT(3)
-#define S1G_CAP4_NON_TIM BIT(4)
-#define S1G_CAP4_GROUP_AID BIT(5)
-#define S1G_CAP4_STA_TYPE GENMASK(7, 6)
-
-#define S1G_CAP5_CENT_AUTH_CONTROL BIT(0)
-#define S1G_CAP5_DIST_AUTH_CONTROL BIT(1)
-#define S1G_CAP5_AMSDU BIT(2)
-#define S1G_CAP5_AMPDU BIT(3)
-#define S1G_CAP5_ASYMMETRIC_BA BIT(4)
-#define S1G_CAP5_FLOW_CONTROL BIT(5)
-#define S1G_CAP5_SECTORIZED_BEAM GENMASK(7, 6)
-
-#define S1G_CAP6_OBSS_MITIGATION BIT(0)
-#define S1G_CAP6_FRAGMENT_BA BIT(1)
-#define S1G_CAP6_NDP_PS_POLL BIT(2)
-#define S1G_CAP6_RAW_OPERATION BIT(3)
-#define S1G_CAP6_PAGE_SLICING BIT(4)
-#define S1G_CAP6_TXOP_SHARING_IMP_ACK BIT(5)
-#define S1G_CAP6_VHT_LINK_ADAPT GENMASK(7, 6)
-
-#define S1G_CAP7_TACK_AS_PS_POLL BIT(0)
-#define S1G_CAP7_DUP_1MHZ BIT(1)
-#define S1G_CAP7_MCS_NEGOTIATION BIT(2)
-#define S1G_CAP7_1MHZ_CTL_RESPONSE_PREAMBLE BIT(3)
-#define S1G_CAP7_NDP_BFING_REPORT_POLL BIT(4)
-#define S1G_CAP7_UNSOLICITED_DYN_AID BIT(5)
-#define S1G_CAP7_SECTOR_TRAINING_OPERATION BIT(6)
-#define S1G_CAP7_TEMP_PS_MODE_SWITCH BIT(7)
-
-#define S1G_CAP8_TWT_GROUPING BIT(0)
-#define S1G_CAP8_BDT BIT(1)
-#define S1G_CAP8_COLOR GENMASK(4, 2)
-#define S1G_CAP8_TWT_REQUEST BIT(5)
-#define S1G_CAP8_TWT_RESPOND BIT(6)
-#define S1G_CAP8_PV1_FRAME BIT(7)
-
-#define S1G_CAP9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0)
-
-#define S1G_OPER_CH_WIDTH_PRIMARY_1MHZ BIT(0)
-#define S1G_OPER_CH_WIDTH_OPER GENMASK(4, 1)
-
-/* EHT MAC capabilities as defined in P802.11be_D2.0 section 9.4.2.313.2 */
-#define IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS 0x01
-#define IEEE80211_EHT_MAC_CAP0_OM_CONTROL 0x02
-#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 0x04
-#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 0x08
-#define IEEE80211_EHT_MAC_CAP0_RESTRICTED_TWT 0x10
-#define IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC 0x20
-#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK 0xc0
-#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_3895 0
-#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991 1
-#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454 2
-
-#define IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK 0x01
-
-/* EHT PHY capabilities as defined in P802.11be_D2.0 section 9.4.2.313.3 */
-#define IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ 0x02
-#define IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ 0x04
-#define IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI 0x08
-#define IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO 0x10
-#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER 0x20
-#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE 0x40
-
-/* EHT beamformee number of spatial streams <= 80MHz is split */
-#define IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK 0x80
-#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK 0x03
-
-#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK 0x1c
-#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK 0xe0
-
-#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK 0x07
-#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK 0x38
-
-/* EHT number of sounding dimensions for 320MHz is split */
-#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK 0xc0
-#define IEEE80211_EHT_PHY_CAP3_SOUNDING_DIM_320MHZ_MASK 0x01
-#define IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK 0x02
-#define IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK 0x04
-#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK 0x08
-#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK 0x10
-#define IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK 0x20
-#define IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK 0x40
-#define IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK 0x80
-
-#define IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO 0x01
-#define IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP 0x02
-#define IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP 0x04
-#define IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI 0x08
-#define IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK 0xf0
-
-#define IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK 0x01
-#define IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP 0x02
-#define IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP 0x04
-#define IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT 0x08
-#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK 0x30
-#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US 0
-#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US 1
-#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US 2
-#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US 3
-
-/* Maximum number of supported EHT LTF is split */
-#define IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK 0xc0
-#define IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF 0x40
-#define IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK 0x07
-
-#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_80MHZ 0x08
-#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_160MHZ 0x30
-#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_320MHZ 0x40
-#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK 0x78
-#define IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP 0x80
-
-#define IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW 0x01
-#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ 0x02
-#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ 0x04
-#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ 0x08
-#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ 0x10
-#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ 0x20
-#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ 0x40
-#define IEEE80211_EHT_PHY_CAP7_TB_SOUNDING_FDBK_RATE_LIMIT 0x80
-
-#define IEEE80211_EHT_PHY_CAP8_RX_1024QAM_WIDER_BW_DL_OFDMA 0x01
-#define IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA 0x02
-
-/*
- * EHT operation channel width as defined in P802.11be_D2.0 section 9.4.2.311
- */
-#define IEEE80211_EHT_OPER_CHAN_WIDTH 0x7
-#define IEEE80211_EHT_OPER_CHAN_WIDTH_20MHZ 0
-#define IEEE80211_EHT_OPER_CHAN_WIDTH_40MHZ 1
-#define IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ 2
-#define IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ 3
-#define IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ 4
-
-/* Calculate 802.11be EHT capabilities IE Tx/Rx EHT MCS NSS Support Field size */
-static inline u8
-ieee80211_eht_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap,
- const struct ieee80211_eht_cap_elem_fixed *eht_cap,
- bool from_ap)
-{
- u8 count = 0;
-
- /* on 2.4 GHz, if it supports 40 MHz, the result is 3 */
- if (he_cap->phy_cap_info[0] &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
- return 3;
-
- /* on 2.4 GHz, these three bits are reserved, so should be 0 */
- if (he_cap->phy_cap_info[0] &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)
- count += 3;
-
- if (he_cap->phy_cap_info[0] &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
- count += 3;
-
- if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
- count += 3;
-
- if (count)
- return count;
-
- return from_ap ? 3 : 4;
-}
-
-/* 802.11be EHT PPE Thresholds */
-#define IEEE80211_EHT_PPE_THRES_NSS_POS 0
-#define IEEE80211_EHT_PPE_THRES_NSS_MASK 0xf
-#define IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK 0x1f0
-#define IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE 3
-#define IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE 9
-
-/*
- * Calculate 802.11be EHT capabilities IE EHT field size
- */
-static inline u8
-ieee80211_eht_ppe_size(u16 ppe_thres_hdr, const u8 *phy_cap_info)
-{
- u32 n;
-
- if (!(phy_cap_info[5] &
- IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT))
- return 0;
-
- n = hweight16(ppe_thres_hdr &
- IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
- n *= 1 + u16_get_bits(ppe_thres_hdr, IEEE80211_EHT_PPE_THRES_NSS_MASK);
-
- /*
- * Each pair is 6 bits, and we need to add the 9 "header" bits to the
- * total size.
- */
- n = n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2 +
- IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
- return DIV_ROUND_UP(n, 8);
-}
-
-static inline bool
-ieee80211_eht_capa_size_ok(const u8 *he_capa, const u8 *data, u8 len,
- bool from_ap)
-{
- const struct ieee80211_eht_cap_elem_fixed *elem = (const void *)data;
- u8 needed = sizeof(struct ieee80211_eht_cap_elem_fixed);
-
- if (len < needed || !he_capa)
- return false;
-
- needed += ieee80211_eht_mcs_nss_size((const void *)he_capa,
- (const void *)data,
- from_ap);
- if (len < needed)
- return false;
-
- if (elem->phy_cap_info[5] &
- IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
- u16 ppe_thres_hdr;
-
- if (len < needed + sizeof(ppe_thres_hdr))
- return false;
-
- ppe_thres_hdr = get_unaligned_le16(data + needed);
- needed += ieee80211_eht_ppe_size(ppe_thres_hdr,
- elem->phy_cap_info);
- }
-
- return len >= needed;
-}
-
-static inline bool
-ieee80211_eht_oper_size_ok(const u8 *data, u8 len)
-{
- const struct ieee80211_eht_operation *elem = (const void *)data;
- u8 needed = sizeof(*elem);
-
- if (len < needed)
- return false;
-
- if (elem->params & IEEE80211_EHT_OPER_INFO_PRESENT) {
- needed += 3;
-
- if (elem->params &
- IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)
- needed += 2;
- }
-
- return len >= needed;
-}
-
-/* must validate ieee80211_eht_oper_size_ok() first */
-static inline u16
-ieee80211_eht_oper_dis_subchan_bitmap(const struct ieee80211_eht_operation *eht_oper)
-{
- const struct ieee80211_eht_operation_info *info =
- (const void *)eht_oper->optional;
-
- if (!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT))
- return 0;
-
- if (!(eht_oper->params & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT))
- return 0;
-
- return get_unaligned_le16(info->optional);
-}
-
-#define IEEE80211_BW_IND_DIS_SUBCH_PRESENT BIT(1)
-
-struct ieee80211_bandwidth_indication {
- u8 params;
- struct ieee80211_eht_operation_info info;
-} __packed;
-
-static inline bool
-ieee80211_bandwidth_indication_size_ok(const u8 *data, u8 len)
-{
- const struct ieee80211_bandwidth_indication *bwi = (const void *)data;
-
- if (len < sizeof(*bwi))
- return false;
-
- if (bwi->params & IEEE80211_BW_IND_DIS_SUBCH_PRESENT &&
- len < sizeof(*bwi) + 2)
- return false;
-
- return true;
-}
-
-#define LISTEN_INT_USF GENMASK(15, 14)
-#define LISTEN_INT_UI GENMASK(13, 0)
-
-#define IEEE80211_MAX_USF FIELD_MAX(LISTEN_INT_USF)
-#define IEEE80211_MAX_UI FIELD_MAX(LISTEN_INT_UI)
-
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1
@@ -3416,8 +1493,8 @@ enum ieee80211_statuscode {
WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99,
WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103,
/* 802.11ai */
- WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108,
- WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109,
+ WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 112,
+ WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 113,
WLAN_STATUS_SAE_HASH_TO_ELEMENT = 126,
WLAN_STATUS_SAE_PK = 127,
WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING = 133,
@@ -3698,6 +1775,7 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_FILS_PUBLIC_KEY = 12,
WLAN_EID_EXT_FILS_NONCE = 13,
WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14,
+ WLAN_EID_EXT_DH_PARAMETER = 32,
WLAN_EID_EXT_HE_CAPABILITY = 35,
WLAN_EID_EXT_HE_OPERATION = 36,
WLAN_EID_EXT_UORA = 37,
@@ -3721,6 +1799,8 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_EHT_CAPABILITY = 108,
WLAN_EID_EXT_TID_TO_LINK_MAPPING = 109,
WLAN_EID_EXT_BANDWIDTH_INDICATION = 135,
+ WLAN_EID_EXT_KNOWN_STA_IDENTIFCATION = 136,
+ WLAN_EID_EXT_NON_AP_STA_REG_CON = 137,
};
/* Action category code */
@@ -3761,25 +1841,6 @@ enum ieee80211_spectrum_mgmt_actioncode {
WLAN_ACTION_SPCT_CHL_SWITCH = 4,
};
-/* HT action codes */
-enum ieee80211_ht_actioncode {
- WLAN_HT_ACTION_NOTIFY_CHANWIDTH = 0,
- WLAN_HT_ACTION_SMPS = 1,
- WLAN_HT_ACTION_PSMP = 2,
- WLAN_HT_ACTION_PCO_PHASE = 3,
- WLAN_HT_ACTION_CSI = 4,
- WLAN_HT_ACTION_NONCOMPRESSED_BF = 5,
- WLAN_HT_ACTION_COMPRESSED_BF = 6,
- WLAN_HT_ACTION_ASEL_IDX_FEEDBACK = 7,
-};
-
-/* VHT action codes */
-enum ieee80211_vht_actioncode {
- WLAN_VHT_ACTION_COMPRESSED_BF = 0,
- WLAN_VHT_ACTION_GROUPID_MGMT = 1,
- WLAN_VHT_ACTION_OPMODE_NOTIF = 2,
-};
-
/* Self Protected Action codes */
enum ieee80211_self_protected_actioncode {
WLAN_SP_RESERVED = 0,
@@ -3790,34 +1851,12 @@ enum ieee80211_self_protected_actioncode {
WLAN_SP_MGK_ACK = 5,
};
-/* Mesh action codes */
-enum ieee80211_mesh_actioncode {
- WLAN_MESH_ACTION_LINK_METRIC_REPORT,
- WLAN_MESH_ACTION_HWMP_PATH_SELECTION,
- WLAN_MESH_ACTION_GATE_ANNOUNCEMENT,
- WLAN_MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION,
- WLAN_MESH_ACTION_MCCA_SETUP_REQUEST,
- WLAN_MESH_ACTION_MCCA_SETUP_REPLY,
- WLAN_MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST,
- WLAN_MESH_ACTION_MCCA_ADVERTISEMENT,
- WLAN_MESH_ACTION_MCCA_TEARDOWN,
- WLAN_MESH_ACTION_TBTT_ADJUSTMENT_REQUEST,
- WLAN_MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE,
-};
-
/* Unprotected WNM action codes */
enum ieee80211_unprotected_wnm_actioncode {
WLAN_UNPROTECTED_WNM_ACTION_TIM = 0,
WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE = 1,
};
-/* Protected EHT action codes */
-enum ieee80211_protected_eht_actioncode {
- WLAN_PROTECTED_EHT_ACTION_TTLM_REQ = 0,
- WLAN_PROTECTED_EHT_ACTION_TTLM_RES = 1,
- WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN = 2,
-};
-
/* Security key length */
enum ieee80211_key_len {
WLAN_KEY_LEN_WEP40 = 5,
@@ -3834,18 +1873,14 @@ enum ieee80211_key_len {
WLAN_KEY_LEN_BIP_GMAC_256 = 32,
};
-enum ieee80211_s1g_actioncode {
- WLAN_S1G_AID_SWITCH_REQUEST,
- WLAN_S1G_AID_SWITCH_RESPONSE,
- WLAN_S1G_SYNC_CONTROL,
- WLAN_S1G_STA_INFO_ANNOUNCE,
- WLAN_S1G_EDCA_PARAM_SET,
- WLAN_S1G_EL_OPERATION,
- WLAN_S1G_TWT_SETUP,
- WLAN_S1G_TWT_TEARDOWN,
- WLAN_S1G_SECT_GROUP_ID_LIST,
- WLAN_S1G_SECT_ID_FEEDBACK,
- WLAN_S1G_TWT_INFORMATION = 11,
+/* Radio measurement action codes as defined in IEEE 802.11-2024 - Table 9-470 */
+enum ieee80211_radio_measurement_actioncode {
+ WLAN_RM_ACTION_RADIO_MEASUREMENT_REQUEST = 0,
+ WLAN_RM_ACTION_RADIO_MEASUREMENT_REPORT = 1,
+ WLAN_RM_ACTION_LINK_MEASUREMENT_REQUEST = 2,
+ WLAN_RM_ACTION_LINK_MEASUREMENT_REPORT = 3,
+ WLAN_RM_ACTION_NEIGHBOR_REPORT_REQUEST = 4,
+ WLAN_RM_ACTION_NEIGHBOR_REPORT_RESPONSE = 5,
};
#define IEEE80211_WEP_IV_LEN 4
@@ -3863,6 +1898,9 @@ enum ieee80211_s1g_actioncode {
#define IEEE80211_GCMP_HDR_LEN 8
#define IEEE80211_GCMP_MIC_LEN 16
#define IEEE80211_GCMP_PN_LEN 6
+#define IEEE80211_CMAC_128_MIC_LEN 8
+#define IEEE80211_CMAC_256_MIC_LEN 16
+#define IEEE80211_GMAC_MIC_LEN 16
#define FILS_NONCE_LEN 16
#define FILS_MAX_KEK_LEN 64
@@ -3988,71 +2026,15 @@ enum ieee80211_tdls_actioncode {
/* Defines support for enhanced multi-bssid advertisement*/
#define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(3)
+/* Enable Beacon Protection */
+#define WLAN_EXT_CAPA11_BCN_PROTECT BIT(4)
+
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
/* BSS Coex IE information field bits */
#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0)
-/**
- * enum ieee80211_mesh_sync_method - mesh synchronization method identifier
- *
- * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method
- * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method
- * that will be specified in a vendor specific information element
- */
-enum ieee80211_mesh_sync_method {
- IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1,
- IEEE80211_SYNC_METHOD_VENDOR = 255,
-};
-
-/**
- * enum ieee80211_mesh_path_protocol - mesh path selection protocol identifier
- *
- * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol
- * @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will
- * be specified in a vendor specific information element
- */
-enum ieee80211_mesh_path_protocol {
- IEEE80211_PATH_PROTOCOL_HWMP = 1,
- IEEE80211_PATH_PROTOCOL_VENDOR = 255,
-};
-
-/**
- * enum ieee80211_mesh_path_metric - mesh path selection metric identifier
- *
- * @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric
- * @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be
- * specified in a vendor specific information element
- */
-enum ieee80211_mesh_path_metric {
- IEEE80211_PATH_METRIC_AIRTIME = 1,
- IEEE80211_PATH_METRIC_VENDOR = 255,
-};
-
-/**
- * enum ieee80211_root_mode_identifier - root mesh STA mode identifier
- *
- * These attribute are used by dot11MeshHWMPRootMode to set root mesh STA mode
- *
- * @IEEE80211_ROOTMODE_NO_ROOT: the mesh STA is not a root mesh STA (default)
- * @IEEE80211_ROOTMODE_ROOT: the mesh STA is a root mesh STA if greater than
- * this value
- * @IEEE80211_PROACTIVE_PREQ_NO_PREP: the mesh STA is a root mesh STA supports
- * the proactive PREQ with proactive PREP subfield set to 0
- * @IEEE80211_PROACTIVE_PREQ_WITH_PREP: the mesh STA is a root mesh STA
- * supports the proactive PREQ with proactive PREP subfield set to 1
- * @IEEE80211_PROACTIVE_RANN: the mesh STA is a root mesh STA supports
- * the proactive RANN
- */
-enum ieee80211_root_mode_identifier {
- IEEE80211_ROOTMODE_NO_ROOT = 0,
- IEEE80211_ROOTMODE_ROOT = 1,
- IEEE80211_PROACTIVE_PREQ_NO_PREP = 2,
- IEEE80211_PROACTIVE_PREQ_WITH_PREP = 3,
- IEEE80211_PROACTIVE_RANN = 4,
-};
-
/*
* IEEE 802.11-2007 7.3.2.9 Country information element
*
@@ -4145,7 +2127,7 @@ enum ieee80211_idle_options {
};
/**
- * struct ieee80211_bss_max_idle_period_ie
+ * struct ieee80211_bss_max_idle_period_ie - BSS max idle period element struct
*
* This structure refers to "BSS Max idle period element"
*
@@ -4160,19 +2142,6 @@ struct ieee80211_bss_max_idle_period_ie {
u8 idle_options;
} __packed;
-/* BACK action code */
-enum ieee80211_back_actioncode {
- WLAN_ACTION_ADDBA_REQ = 0,
- WLAN_ACTION_ADDBA_RESP = 1,
- WLAN_ACTION_DELBA = 2,
-};
-
-/* BACK (block-ack) parties */
-enum ieee80211_back_parties {
- WLAN_BACK_RECIPIENT = 0,
- WLAN_BACK_INITIATOR = 1,
-};
-
/* SA Query action */
enum ieee80211_sa_query_action {
WLAN_ACTION_SA_QUERY_REQUEST = 0,
@@ -4180,7 +2149,7 @@ enum ieee80211_sa_query_action {
};
/**
- * struct ieee80211_bssid_index
+ * struct ieee80211_bssid_index - multiple BSSID index element structure
*
* This structure refers to "Multiple BSSID-index element"
*
@@ -4195,7 +2164,8 @@ struct ieee80211_bssid_index {
};
/**
- * struct ieee80211_multiple_bssid_configuration
+ * struct ieee80211_multiple_bssid_configuration - multiple BSSID configuration
+ * element structure
*
* This structure refers to "Multiple BSSID Configuration element"
*
@@ -4305,27 +2275,10 @@ struct ieee80211_tspec_ie {
__le16 medium_time;
} __packed;
-struct ieee80211_he_6ghz_capa {
- /* uses IEEE80211_HE_6GHZ_CAP_* below */
- __le16 capa;
-} __packed;
-
-/* HE 6 GHz band capabilities */
-/* uses enum ieee80211_min_mpdu_spacing values */
-#define IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START 0x0007
-/* uses enum ieee80211_vht_max_ampdu_length_exp values */
-#define IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP 0x0038
-/* uses IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_* values */
-#define IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN 0x00c0
-/* WLAN_HT_CAP_SM_PS_* values */
-#define IEEE80211_HE_6GHZ_CAP_SM_PS 0x0600
-#define IEEE80211_HE_6GHZ_CAP_RD_RESPONDER 0x0800
-#define IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS 0x1000
-#define IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS 0x2000
-
/**
* ieee80211_get_qos_ctl - get pointer to qos control bytes
* @hdr: the frame
+ * Return: a pointer to the QoS control field in the frame header
*
* The qos ctrl bytes come after the frame_control, duration, seq_num
* and 3 or 4 addresses of length ETH_ALEN. Checks frame_control to choose
@@ -4348,6 +2301,7 @@ static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr)
/**
* ieee80211_get_tid - get qos TID
* @hdr: the frame
+ * Return: the TID from the QoS control field
*/
static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr)
{
@@ -4359,6 +2313,7 @@ static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr)
/**
* ieee80211_get_SA - get pointer to SA
* @hdr: the frame
+ * Return: a pointer to the source address (SA)
*
* Given an 802.11 frame, this function returns the offset
* to the source address (SA). It does not verify that the
@@ -4378,6 +2333,7 @@ static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr)
/**
* ieee80211_get_DA - get pointer to DA
* @hdr: the frame
+ * Return: a pointer to the destination address (DA)
*
* Given an 802.11 frame, this function returns the offset
* to the destination address (DA). It does not verify that
@@ -4396,6 +2352,7 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
/**
* ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
* @skb: the skb to check, starting with the 802.11 header
+ * Return: whether or not the MMPDU is bufferable
*/
static inline bool ieee80211_is_bufferable_mmpdu(struct sk_buff *skb)
{
@@ -4434,6 +2391,7 @@ static inline bool ieee80211_is_bufferable_mmpdu(struct sk_buff *skb)
/**
* _ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame
* @hdr: the frame (buffer must include at least the first octet of payload)
+ * Return: whether or not the frame is a robust management frame
*/
static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
{
@@ -4470,6 +2428,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
/**
* ieee80211_is_robust_mgmt_frame - check if skb contains a robust mgmt frame
* @skb: the skb containing the frame, length will be checked
+ * Return: whether or not the frame is a robust management frame
*/
static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
{
@@ -4482,6 +2441,7 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
* ieee80211_is_public_action - check if frame is a public action frame
* @hdr: the frame
* @len: length of the frame
+ * Return: whether or not the frame is a public action frame
*/
static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
size_t len)
@@ -4527,8 +2487,9 @@ ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
/**
* _ieee80211_is_group_privacy_action - check if frame is a group addressed
- * privacy action frame
+ * privacy action frame
* @hdr: the frame
+ * Return: whether or not the frame is a group addressed privacy action frame
*/
static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr)
{
@@ -4544,8 +2505,9 @@ static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr)
/**
* ieee80211_is_group_privacy_action - check if frame is a group addressed
- * privacy action frame
+ * privacy action frame
* @skb: the skb containing the frame, length will be checked
+ * Return: whether or not the frame is a group addressed privacy action frame
*/
static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb)
{
@@ -4557,20 +2519,15 @@ static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb)
/**
* ieee80211_tu_to_usec - convert time units (TU) to microseconds
* @tu: the TUs
+ * Return: the time value converted to microseconds
*/
static inline unsigned long ieee80211_tu_to_usec(unsigned long tu)
{
return 1024 * tu;
}
-/**
- * ieee80211_check_tim - check if AID bit is set in TIM
- * @tim: the TIM IE
- * @tim_len: length of the TIM IE
- * @aid: the AID to look for
- */
-static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
- u8 tim_len, u16 aid)
+static inline bool __ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid)
{
u8 mask;
u8 index, indexn1, indexn2;
@@ -4594,8 +2551,10 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
}
/**
- * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet)
+ * ieee80211_get_tdls_action - get TDLS action code
* @skb: the skb containing the frame, length will not be checked
+ * Return: the TDLS action code, or -1 if it's not an encapsulated TDLS action
+ * frame
*
* This function assumes the frame is a data frame, and that the network header
* is in the correct place.
@@ -4635,6 +2594,7 @@ static inline int ieee80211_get_tdls_action(struct sk_buff *skb)
/**
* ieee80211_action_contains_tpc - checks if the frame contains TPC element
* @skb: the skb containing the frame, length will be checked
+ * Return: %true if the frame contains a TPC element, %false otherwise
*
* This function checks if it's either TPC report action frame or Link
* Measurement report action frame as defined in IEEE Std. 802.11-2012 8.5.2.5
@@ -4679,6 +2639,11 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
return true;
}
+/**
+ * ieee80211_is_timing_measurement - check if frame is timing measurement response
+ * @skb: the SKB to check
+ * Return: whether or not the frame is a valid timing measurement response
+ */
static inline bool ieee80211_is_timing_measurement(struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (void *)skb->data;
@@ -4698,6 +2663,11 @@ static inline bool ieee80211_is_timing_measurement(struct sk_buff *skb)
return false;
}
+/**
+ * ieee80211_is_ftm - check if frame is FTM response
+ * @skb: the SKB to check
+ * Return: whether or not the frame is a valid FTM response action frame
+ */
static inline bool ieee80211_is_ftm(struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (void *)skb->data;
@@ -4752,6 +2722,7 @@ struct element {
* @element: element pointer after for_each_element() or friends
* @data: same data pointer as passed to for_each_element() or friends
* @datalen: same data length as passed to for_each_element() or friends
+ * Return: %true if all elements were iterated, %false otherwise; see notes
*
* This function returns %true if all the data was parsed or considered
* while walking the elements. Only use this if your for_each_element()
@@ -4845,596 +2816,28 @@ struct ieee80211_tbtt_info_ge_11 {
struct ieee80211_rnr_mld_params mld_params;
} __packed;
-/* multi-link device */
-#define IEEE80211_MLD_MAX_NUM_LINKS 15
-
-#define IEEE80211_ML_CONTROL_TYPE 0x0007
-#define IEEE80211_ML_CONTROL_TYPE_BASIC 0
-#define IEEE80211_ML_CONTROL_TYPE_PREQ 1
-#define IEEE80211_ML_CONTROL_TYPE_RECONF 2
-#define IEEE80211_ML_CONTROL_TYPE_TDLS 3
-#define IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS 4
-#define IEEE80211_ML_CONTROL_PRESENCE_MASK 0xfff0
-
-struct ieee80211_multi_link_elem {
- __le16 control;
- u8 variable[];
-} __packed;
-
-#define IEEE80211_MLC_BASIC_PRES_LINK_ID 0x0010
-#define IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT 0x0020
-#define IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY 0x0040
-#define IEEE80211_MLC_BASIC_PRES_EML_CAPA 0x0080
-#define IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP 0x0100
-#define IEEE80211_MLC_BASIC_PRES_MLD_ID 0x0200
-
-#define IEEE80211_MED_SYNC_DELAY_DURATION 0x00ff
-#define IEEE80211_MED_SYNC_DELAY_SYNC_OFDM_ED_THRESH 0x0f00
-#define IEEE80211_MED_SYNC_DELAY_SYNC_MAX_NUM_TXOPS 0xf000
-
-/*
- * Described in P802.11be_D3.0
- * dot11MSDTimerDuration should default to 5484 (i.e. 171.375)
- * dot11MSDOFDMEDthreshold defaults to -72 (i.e. 0)
- * dot11MSDTXOPMAX defaults to 1
- */
-#define IEEE80211_MED_SYNC_DELAY_DEFAULT 0x10ac
-
-#define IEEE80211_EML_CAP_EMLSR_SUPP 0x0001
-#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY 0x000e
-#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_0US 0
-#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US 1
-#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_64US 2
-#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_128US 3
-#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US 4
-#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY 0x0070
-#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_0US 0
-#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_16US 1
-#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_32US 2
-#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US 3
-#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_128US 4
-#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US 5
-#define IEEE80211_EML_CAP_EMLMR_SUPPORT 0x0080
-#define IEEE80211_EML_CAP_EMLMR_DELAY 0x0700
-#define IEEE80211_EML_CAP_EMLMR_DELAY_0US 0
-#define IEEE80211_EML_CAP_EMLMR_DELAY_32US 1
-#define IEEE80211_EML_CAP_EMLMR_DELAY_64US 2
-#define IEEE80211_EML_CAP_EMLMR_DELAY_128US 3
-#define IEEE80211_EML_CAP_EMLMR_DELAY_256US 4
-#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT 0x7800
-#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_0 0
-#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128US 1
-#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_256US 2
-#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_512US 3
-#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_1TU 4
-#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_2TU 5
-#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_4TU 6
-#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_8TU 7
-#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_16TU 8
-#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_32TU 9
-#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_64TU 10
-#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU 11
-
-#define IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS 0x000f
-#define IEEE80211_MLD_CAP_OP_SRS_SUPPORT 0x0010
-#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP 0x0060
-#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_NO_SUPP 0
-#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME 1
-#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_RESERVED 2
-#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_DIFF 3
-#define IEEE80211_MLD_CAP_OP_FREQ_SEP_TYPE_IND 0x0f80
-#define IEEE80211_MLD_CAP_OP_AAR_SUPPORT 0x1000
-
-struct ieee80211_mle_basic_common_info {
- u8 len;
- u8 mld_mac_addr[ETH_ALEN];
- u8 variable[];
-} __packed;
-
-#define IEEE80211_MLC_PREQ_PRES_MLD_ID 0x0010
-
-struct ieee80211_mle_preq_common_info {
- u8 len;
- u8 variable[];
-} __packed;
-
-#define IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR 0x0010
-
-/* no fixed fields in RECONF */
-
-struct ieee80211_mle_tdls_common_info {
- u8 len;
- u8 ap_mld_mac_addr[ETH_ALEN];
-} __packed;
-
-#define IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR 0x0010
-
-/* no fixed fields in PRIO_ACCESS */
-
-/**
- * ieee80211_mle_common_size - check multi-link element common size
- * @data: multi-link element, must already be checked for size using
- * ieee80211_mle_size_ok()
- */
-static inline u8 ieee80211_mle_common_size(const u8 *data)
-{
- const struct ieee80211_multi_link_elem *mle = (const void *)data;
- u16 control = le16_to_cpu(mle->control);
- u8 common = 0;
-
- switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
- case IEEE80211_ML_CONTROL_TYPE_BASIC:
- case IEEE80211_ML_CONTROL_TYPE_PREQ:
- case IEEE80211_ML_CONTROL_TYPE_TDLS:
- case IEEE80211_ML_CONTROL_TYPE_RECONF:
- /*
- * The length is the first octet pointed by mle->variable so no
- * need to add anything
- */
- break;
- case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
- if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
- common += ETH_ALEN;
- return common;
- default:
- WARN_ON(1);
- return 0;
- }
-
- return sizeof(*mle) + common + mle->variable[0];
-}
-
-/**
- * ieee80211_mle_get_link_id - returns the link ID
- * @data: the basic multi link element
- *
- * The element is assumed to be of the correct type (BASIC) and big enough,
- * this must be checked using ieee80211_mle_type_ok().
- *
- * If the BSS link ID can't be found, -1 will be returned
- */
-static inline int ieee80211_mle_get_link_id(const u8 *data)
-{
- const struct ieee80211_multi_link_elem *mle = (const void *)data;
- u16 control = le16_to_cpu(mle->control);
- const u8 *common = mle->variable;
-
- /* common points now at the beginning of ieee80211_mle_basic_common_info */
- common += sizeof(struct ieee80211_mle_basic_common_info);
-
- if (!(control & IEEE80211_MLC_BASIC_PRES_LINK_ID))
- return -1;
-
- return *common;
-}
-
-/**
- * ieee80211_mle_get_bss_param_ch_cnt - returns the BSS parameter change count
- * @data: pointer to the basic multi link element
- *
- * The element is assumed to be of the correct type (BASIC) and big enough,
- * this must be checked using ieee80211_mle_type_ok().
- *
- * If the BSS parameter change count value can't be found (the presence bit
- * for it is clear), -1 will be returned.
- */
-static inline int
-ieee80211_mle_get_bss_param_ch_cnt(const u8 *data)
-{
- const struct ieee80211_multi_link_elem *mle = (const void *)data;
- u16 control = le16_to_cpu(mle->control);
- const u8 *common = mle->variable;
-
- /* common points now at the beginning of ieee80211_mle_basic_common_info */
- common += sizeof(struct ieee80211_mle_basic_common_info);
-
- if (!(control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))
- return -1;
-
- if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
- common += 1;
-
- return *common;
-}
-
-/**
- * ieee80211_mle_get_eml_med_sync_delay - returns the medium sync delay
- * @data: pointer to the multi link EHT IE
- *
- * The element is assumed to be of the correct type (BASIC) and big enough,
- * this must be checked using ieee80211_mle_type_ok().
- *
- * If the medium synchronization is not present, then the default value is
- * returned.
- */
-static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data)
-{
- const struct ieee80211_multi_link_elem *mle = (const void *)data;
- u16 control = le16_to_cpu(mle->control);
- const u8 *common = mle->variable;
-
- /* common points now at the beginning of ieee80211_mle_basic_common_info */
- common += sizeof(struct ieee80211_mle_basic_common_info);
-
- if (!(control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY))
- return IEEE80211_MED_SYNC_DELAY_DEFAULT;
-
- if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
- common += 1;
- if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
- common += 1;
-
- return get_unaligned_le16(common);
-}
-
-/**
- * ieee80211_mle_get_eml_cap - returns the EML capability
- * @data: pointer to the multi link EHT IE
- *
- * The element is assumed to be of the correct type (BASIC) and big enough,
- * this must be checked using ieee80211_mle_type_ok().
- *
- * If the EML capability is not present, 0 will be returned.
- */
-static inline u16 ieee80211_mle_get_eml_cap(const u8 *data)
-{
- const struct ieee80211_multi_link_elem *mle = (const void *)data;
- u16 control = le16_to_cpu(mle->control);
- const u8 *common = mle->variable;
-
- /* common points now at the beginning of ieee80211_mle_basic_common_info */
- common += sizeof(struct ieee80211_mle_basic_common_info);
-
- if (!(control & IEEE80211_MLC_BASIC_PRES_EML_CAPA))
- return 0;
-
- if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
- common += 1;
- if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
- common += 1;
- if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
- common += 2;
-
- return get_unaligned_le16(common);
-}
+#include "ieee80211-ht.h"
+#include "ieee80211-vht.h"
+#include "ieee80211-he.h"
+#include "ieee80211-eht.h"
+#include "ieee80211-mesh.h"
+#include "ieee80211-s1g.h"
+#include "ieee80211-p2p.h"
+#include "ieee80211-nan.h"
/**
- * ieee80211_mle_get_mld_capa_op - returns the MLD capabilities and operations.
- * @data: pointer to the multi link EHT IE
- *
- * The element is assumed to be of the correct type (BASIC) and big enough,
- * this must be checked using ieee80211_mle_type_ok().
- *
- * If the MLD capabilities and operations field is not present, 0 will be
- * returned.
- */
-static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data)
-{
- const struct ieee80211_multi_link_elem *mle = (const void *)data;
- u16 control = le16_to_cpu(mle->control);
- const u8 *common = mle->variable;
-
- /*
- * common points now at the beginning of
- * ieee80211_mle_basic_common_info
- */
- common += sizeof(struct ieee80211_mle_basic_common_info);
-
- if (!(control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP))
- return 0;
-
- if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
- common += 1;
- if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
- common += 1;
- if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
- common += 2;
- if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
- common += 2;
-
- return get_unaligned_le16(common);
-}
-
-/**
- * ieee80211_mle_get_mld_id - returns the MLD ID
- * @data: pointer to the multi link element
- *
- * The element is assumed to be of the correct type (BASIC) and big enough,
- * this must be checked using ieee80211_mle_type_ok().
- *
- * If the MLD ID is not present, 0 will be returned.
- */
-static inline u8 ieee80211_mle_get_mld_id(const u8 *data)
-{
- const struct ieee80211_multi_link_elem *mle = (const void *)data;
- u16 control = le16_to_cpu(mle->control);
- const u8 *common = mle->variable;
-
- /*
- * common points now at the beginning of
- * ieee80211_mle_basic_common_info
- */
- common += sizeof(struct ieee80211_mle_basic_common_info);
-
- if (!(control & IEEE80211_MLC_BASIC_PRES_MLD_ID))
- return 0;
-
- if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
- common += 1;
- if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
- common += 1;
- if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
- common += 2;
- if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
- common += 2;
- if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
- common += 2;
-
- return *common;
-}
-
-/**
- * ieee80211_mle_size_ok - validate multi-link element size
- * @data: pointer to the element data
- * @len: length of the containing element
- */
-static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len)
-{
- const struct ieee80211_multi_link_elem *mle = (const void *)data;
- u8 fixed = sizeof(*mle);
- u8 common = 0;
- bool check_common_len = false;
- u16 control;
-
- if (!data || len < fixed)
- return false;
-
- control = le16_to_cpu(mle->control);
-
- switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
- case IEEE80211_ML_CONTROL_TYPE_BASIC:
- common += sizeof(struct ieee80211_mle_basic_common_info);
- check_common_len = true;
- if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
- common += 1;
- if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
- common += 1;
- if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
- common += 2;
- if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
- common += 2;
- if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
- common += 2;
- if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID)
- common += 1;
- break;
- case IEEE80211_ML_CONTROL_TYPE_PREQ:
- common += sizeof(struct ieee80211_mle_preq_common_info);
- if (control & IEEE80211_MLC_PREQ_PRES_MLD_ID)
- common += 1;
- check_common_len = true;
- break;
- case IEEE80211_ML_CONTROL_TYPE_RECONF:
- if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR)
- common += ETH_ALEN;
- break;
- case IEEE80211_ML_CONTROL_TYPE_TDLS:
- common += sizeof(struct ieee80211_mle_tdls_common_info);
- check_common_len = true;
- break;
- case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
- if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
- common += ETH_ALEN;
- break;
- default:
- /* we don't know this type */
- return true;
- }
-
- if (len < fixed + common)
- return false;
-
- if (!check_common_len)
- return true;
-
- /* if present, common length is the first octet there */
- return mle->variable[0] >= common;
-}
-
-/**
- * ieee80211_mle_type_ok - validate multi-link element type and size
- * @data: pointer to the element data
- * @type: expected type of the element
- * @len: length of the containing element
- */
-static inline bool ieee80211_mle_type_ok(const u8 *data, u8 type, size_t len)
-{
- const struct ieee80211_multi_link_elem *mle = (const void *)data;
- u16 control;
-
- if (!ieee80211_mle_size_ok(data, len))
- return false;
-
- control = le16_to_cpu(mle->control);
-
- if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) == type)
- return true;
-
- return false;
-}
-
-enum ieee80211_mle_subelems {
- IEEE80211_MLE_SUBELEM_PER_STA_PROFILE = 0,
- IEEE80211_MLE_SUBELEM_FRAGMENT = 254,
-};
-
-#define IEEE80211_MLE_STA_CONTROL_LINK_ID 0x000f
-#define IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE 0x0010
-#define IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT 0x0020
-#define IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT 0x0040
-#define IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT 0x0080
-#define IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT 0x0100
-#define IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT 0x0200
-#define IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE 0x0400
-#define IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT 0x0800
-
-struct ieee80211_mle_per_sta_profile {
- __le16 control;
- u8 sta_info_len;
- u8 variable[];
-} __packed;
-
-/**
- * ieee80211_mle_basic_sta_prof_size_ok - validate basic multi-link element sta
- * profile size
- * @data: pointer to the sub element data
- * @len: length of the containing sub element
- */
-static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data,
- size_t len)
-{
- const struct ieee80211_mle_per_sta_profile *prof = (const void *)data;
- u16 control;
- u8 fixed = sizeof(*prof);
- u8 info_len = 1;
-
- if (len < fixed)
- return false;
-
- control = le16_to_cpu(prof->control);
-
- if (control & IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT)
- info_len += 6;
- if (control & IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT)
- info_len += 2;
- if (control & IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT)
- info_len += 8;
- if (control & IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT)
- info_len += 2;
- if (control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE &&
- control & IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT) {
- if (control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE)
- info_len += 2;
- else
- info_len += 1;
- }
- if (control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT)
- info_len += 1;
-
- return prof->sta_info_len >= info_len &&
- fixed + prof->sta_info_len - 1 <= len;
-}
-
-/**
- * ieee80211_mle_basic_sta_prof_bss_param_ch_cnt - get per-STA profile BSS
- * parameter change count
- * @prof: the per-STA profile, having been checked with
- * ieee80211_mle_basic_sta_prof_size_ok() for the correct length
- *
- * Return: The BSS parameter change count value if present, 0 otherwise.
- */
-static inline u8
-ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(const struct ieee80211_mle_per_sta_profile *prof)
-{
- u16 control = le16_to_cpu(prof->control);
- const u8 *pos = prof->variable;
-
- if (!(control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT))
- return 0;
-
- if (control & IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT)
- pos += 6;
- if (control & IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT)
- pos += 2;
- if (control & IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT)
- pos += 8;
- if (control & IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT)
- pos += 2;
- if (control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE &&
- control & IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT) {
- if (control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE)
- pos += 2;
- else
- pos += 1;
- }
-
- return *pos;
-}
-
-#define IEEE80211_MLE_STA_RECONF_CONTROL_LINK_ID 0x000f
-#define IEEE80211_MLE_STA_RECONF_CONTROL_COMPLETE_PROFILE 0x0010
-#define IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT 0x0020
-#define IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT 0x0040
-#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_UPDATE_TYPE 0x0780
-#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT 0x0800
-
-/**
- * ieee80211_mle_reconf_sta_prof_size_ok - validate reconfiguration multi-link
- * element sta profile size.
- * @data: pointer to the sub element data
- * @len: length of the containing sub element
+ * ieee80211_check_tim - check if AID bit is set in TIM
+ * @tim: the TIM IE
+ * @tim_len: length of the TIM IE
+ * @aid: the AID to look for
+ * @s1g: whether the TIM is from an S1G PPDU
+ * Return: whether or not traffic is indicated in the TIM for the given AID
*/
-static inline bool ieee80211_mle_reconf_sta_prof_size_ok(const u8 *data,
- size_t len)
+static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid, bool s1g)
{
- const struct ieee80211_mle_per_sta_profile *prof = (const void *)data;
- u16 control;
- u8 fixed = sizeof(*prof);
- u8 info_len = 1;
-
- if (len < fixed)
- return false;
-
- control = le16_to_cpu(prof->control);
-
- if (control & IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT)
- info_len += ETH_ALEN;
- if (control & IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT)
- info_len += 2;
- if (control & IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT)
- info_len += 2;
-
- return prof->sta_info_len >= info_len &&
- fixed + prof->sta_info_len - 1 <= len;
+ return s1g ? ieee80211_s1g_check_tim(tim, tim_len, aid) :
+ __ieee80211_check_tim(tim, tim_len, aid);
}
-static inline bool ieee80211_tid_to_link_map_size_ok(const u8 *data, size_t len)
-{
- const struct ieee80211_ttlm_elem *t2l = (const void *)data;
- u8 control, fixed = sizeof(*t2l), elem_len = 0;
-
- if (len < fixed)
- return false;
-
- control = t2l->control;
-
- if (control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT)
- elem_len += 2;
- if (control & IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT)
- elem_len += 3;
-
- if (!(control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP)) {
- u8 bm_size;
-
- elem_len += 1;
- if (len < fixed + elem_len)
- return false;
-
- if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
- bm_size = 1;
- else
- bm_size = 2;
-
- elem_len += hweight8(t2l->optional[0]) * bm_size;
- }
-
- return len >= fixed + elem_len;
-}
-
-#define for_each_mle_subelement(_elem, _data, _len) \
- if (ieee80211_mle_size_ok(_data, _len)) \
- for_each_element(_elem, \
- _data + ieee80211_mle_common_size(_data),\
- _len - ieee80211_mle_common_size(_data))
-
#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 3ff96ae31bf6..c5fe3b2a53e8 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -65,11 +65,9 @@ struct br_ip_list {
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
struct net_bridge;
-void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br,
- unsigned int cmd, struct ifreq *ifr,
+void brioctl_set(int (*hook)(struct net *net, unsigned int cmd,
void __user *uarg));
-int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd,
- struct ifreq *ifr, void __user *uarg);
+int br_ioctl_call(struct net *net, unsigned int cmd, void __user *uarg);
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 8a9792a6427a..61b7335aa037 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -19,6 +19,9 @@
#include <linux/skbuff.h>
#include <uapi/linux/if_ether.h>
+/* XX:XX:XX:XX:XX:XX */
+#define MAC_ADDR_STR_LEN (3 * ETH_ALEN - 1)
+
static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb_mac_header(skb);
diff --git a/include/linux/if_hsr.h b/include/linux/if_hsr.h
index 0404f5bf4f30..f4cf2dd36d19 100644
--- a/include/linux/if_hsr.h
+++ b/include/linux/if_hsr.h
@@ -13,6 +13,15 @@ enum hsr_version {
PRP_V1,
};
+enum hsr_port_type {
+ HSR_PT_NONE = 0, /* Must be 0, used by framereg */
+ HSR_PT_SLAVE_A,
+ HSR_PT_SLAVE_B,
+ HSR_PT_INTERLINK,
+ HSR_PT_MASTER,
+ HSR_PT_PORTS, /* This must be the last item in the enum */
+};
+
/* HSR Tag.
* As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB,
* path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
@@ -32,6 +41,10 @@ struct hsr_tag {
#if IS_ENABLED(CONFIG_HSR)
extern bool is_hsr_master(struct net_device *dev);
extern int hsr_get_version(struct net_device *dev, enum hsr_version *ver);
+struct net_device *hsr_get_port_ndev(struct net_device *ndev,
+ enum hsr_port_type pt);
+int hsr_get_port_type(struct net_device *hsr_dev, struct net_device *dev,
+ enum hsr_port_type *type);
#else
static inline bool is_hsr_master(struct net_device *dev)
{
@@ -42,6 +55,19 @@ static inline int hsr_get_version(struct net_device *dev,
{
return -EINVAL;
}
+
+static inline struct net_device *hsr_get_port_ndev(struct net_device *ndev,
+ enum hsr_port_type pt)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int hsr_get_port_type(struct net_device *hsr_dev,
+ struct net_device *dev,
+ enum hsr_port_type *type)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_HSR */
#endif /*_LINUX_IF_HSR_H_*/
diff --git a/include/linux/if_ltalk.h b/include/linux/if_ltalk.h
deleted file mode 100644
index 4cc1c0b77870..000000000000
--- a/include/linux/if_ltalk.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_LTALK_H
-#define __LINUX_LTALK_H
-
-#include <uapi/linux/if_ltalk.h>
-
-extern struct net_device *alloc_ltalkdev(int sizeof_priv);
-#endif
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 523025106a64..0f7281e3e448 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -59,8 +59,10 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
extern void macvlan_common_setup(struct net_device *dev);
-extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+struct rtnl_newlink_params;
+
+extern int macvlan_common_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack);
extern void macvlan_dellink(struct net_device *dev, struct list_head *head);
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index ff3beda1312c..db45d6f1c4f4 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -43,7 +43,7 @@ struct pppox_sock {
/* struct sock must be the first member of pppox_sock */
struct sock sk;
struct ppp_channel chan;
- struct pppox_sock *next; /* for hash table */
+ struct pppox_sock __rcu *next; /* for hash table */
union {
struct pppoe_opt pppoe;
struct pptp_opt pptp;
diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h
index 839d1e48b85e..c44bf6e80ecb 100644
--- a/include/linux/if_rmnet.h
+++ b/include/linux/if_rmnet.h
@@ -42,7 +42,7 @@ struct rmnet_map_ul_csum_header {
/* csum_info field:
* OFFSET: where (offset in bytes) to insert computed checksum
- * UDP: 1 = UDP checksum (zero checkum means no checksum)
+ * UDP: 1 = UDP checksum (zero checksum means no checksum)
* ENABLED: 1 = checksum computation requested
*/
#define MAP_CSUM_UL_OFFSET_MASK GENMASK(13, 0)
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index cdc684e04a2f..ce97d891cf72 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -191,8 +191,6 @@ struct team {
const struct header_ops *header_ops_cache;
- struct mutex lock; /* used for overall locking, e.g. port lists write */
-
/*
* List of enabled ports and their count
*/
@@ -223,7 +221,6 @@ struct team {
atomic_t count_pending;
struct delayed_work dw;
} mcast_rejoin;
- struct lock_class_key team_lock_key;
long mode_priv[TEAM_MODE_PRIV_LONGS];
};
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 043d442994b0..80166eb62f41 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -19,11 +19,6 @@ struct tun_msg_ctl {
void *ptr;
};
-struct tun_xdp_hdr {
- int buflen;
- struct virtio_net_hdr gso;
-};
-
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
struct ptr_ring *tun_get_tx_ring(struct file *file);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index c1645c86eed9..f7f34eb15e06 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -79,11 +79,6 @@ static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb)
/* found in socket.c */
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
-static inline bool is_vlan_dev(const struct net_device *dev)
-{
- return dev->priv_flags & IFF_802_1Q_VLAN;
-}
-
#define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all)
#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci)
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
@@ -136,7 +131,7 @@ struct vlan_pcpu_stats {
u32 tx_dropped;
};
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id);
@@ -176,6 +171,7 @@ struct netpoll;
* @real_dev_addr: address of underlying netdevice
* @dent: proc dir entry
* @vlan_pcpu_stats: ptr to percpu rx stats
+ * @netpoll: netpoll instance "propagated" down to @real_dev
*/
struct vlan_dev_priv {
unsigned int nr_ingress_mappings;
@@ -199,6 +195,11 @@ struct vlan_dev_priv {
#endif
};
+static inline bool is_vlan_dev(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_802_1Q_VLAN;
+}
+
static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
{
return netdev_priv(dev);
@@ -236,6 +237,11 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
extern bool vlan_uses_dev(const struct net_device *dev);
#else
+static inline bool is_vlan_dev(const struct net_device *dev)
+{
+ return false;
+}
+
static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id)
@@ -253,19 +259,19 @@ vlan_for_each(struct net_device *dev,
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
- BUG();
+ WARN_ON_ONCE(1);
return NULL;
}
static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
{
- BUG();
+ WARN_ON_ONCE(1);
return 0;
}
static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev)
{
- BUG();
+ WARN_ON_ONCE(1);
return 0;
}
@@ -310,7 +316,7 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
* eth_type_vlan - check for valid vlan ether type.
* @ethertype: ether type to check
*
- * Returns true if the ether type is a vlan ether type.
+ * Returns: true if the ether type is a vlan ether type.
*/
static inline bool eth_type_vlan(__be16 ethertype)
{
@@ -341,24 +347,25 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
* @mac_len: MAC header length including outer vlan headers
*
* Inserts the VLAN tag into @skb as part of the payload at offset mac_len
- * Returns error if skb_cow_head fails.
- *
* Does not change skb->protocol so this function can be used during receive.
+ *
+ * Returns: error if skb_cow_head fails.
*/
static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci,
unsigned int mac_len)
{
+ const u8 meta_len = mac_len > ETH_TLEN ? skb_metadata_len(skb) : 0;
struct vlan_ethhdr *veth;
- if (skb_cow_head(skb, VLAN_HLEN) < 0)
+ if (skb_cow_head(skb, meta_len + VLAN_HLEN) < 0)
return -ENOMEM;
skb_push(skb, VLAN_HLEN);
/* Move the mac header sans proto to the beginning of the new header. */
if (likely(mac_len > ETH_TLEN))
- memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
+ skb_postpush_data_move(skb, VLAN_HLEN, mac_len - ETH_TLEN);
if (skb_mac_header_was_set(skb))
skb->mac_header -= VLAN_HLEN;
@@ -390,9 +397,9 @@ static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
- * Returns error if skb_cow_head fails.
- *
* Does not change skb->protocol so this function can be used during receive.
+ *
+ * Returns: error if skb_cow_head fails.
*/
static inline int __vlan_insert_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
@@ -414,6 +421,8 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
* doesn't have to worry about freeing the original skb.
*
* Does not change skb->protocol so this function can be used during receive.
+ *
+ * Return: modified @skb on success, NULL on error (@skb is freed).
*/
static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
__be16 vlan_proto,
@@ -443,6 +452,8 @@ static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
* doesn't have to worry about freeing the original skb.
*
* Does not change skb->protocol so this function can be used during receive.
+ *
+ * Return: modified @skb on success, NULL on error (@skb is freed).
*/
static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
@@ -461,6 +472,8 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
+ *
+ * Return: modified @skb on success, NULL on error (@skb is freed).
*/
static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
__be16 vlan_proto,
@@ -533,7 +546,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
* @skb: skbuff to query
* @vlan_tci: buffer to store value
*
- * Returns error if the skb is not of VLAN type
+ * Returns: error if the skb is not of VLAN type
*/
static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
@@ -551,7 +564,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
* @skb: skbuff to query
* @vlan_tci: buffer to store value
*
- * Returns error if @skb->vlan_tci is not set correctly
+ * Returns: error if @skb->vlan_tci is not set correctly
*/
static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
u16 *vlan_tci)
@@ -570,7 +583,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
* @skb: skbuff to query
* @vlan_tci: buffer to store value
*
- * Returns error if the skb is not VLAN tagged
+ * Returns: error if the skb is not VLAN tagged
*/
static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
@@ -582,16 +595,19 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
}
/**
- * vlan_get_protocol - get protocol EtherType.
+ * __vlan_get_protocol_offset() - get protocol EtherType.
* @skb: skbuff to query
* @type: first vlan protocol
+ * @mac_offset: MAC offset
* @depth: buffer to store length of eth and vlan tags in bytes
*
- * Returns the EtherType of the packet, regardless of whether it is
+ * Returns: the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
- int *depth)
+static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
+ __be16 type,
+ int mac_offset,
+ int *depth)
{
unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
@@ -610,7 +626,8 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
do {
struct vlan_hdr vhdr, *vh;
- vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
+ vh = skb_header_pointer(skb, mac_offset + vlan_depth,
+ sizeof(vhdr), &vhdr);
if (unlikely(!vh || !--parse_depth))
return 0;
@@ -625,11 +642,17 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
return type;
}
+static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
+ int *depth)
+{
+ return __vlan_get_protocol_offset(skb, type, 0, depth);
+}
+
/**
* vlan_get_protocol - get protocol EtherType.
* @skb: skbuff to query
*
- * Returns the EtherType of the packet, regardless of whether it is
+ * Returns: the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
@@ -709,25 +732,23 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
*
* Expects the skb to contain a VLAN tag in the payload, and to have skb->data
* pointing at the MAC header.
- *
- * Returns a new pointer to skb->data, or NULL on failure to pull.
*/
-static inline void *vlan_remove_tag(struct sk_buff *skb, u16 *vlan_tci)
+static inline void vlan_remove_tag(struct sk_buff *skb, u16 *vlan_tci)
{
struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
*vlan_tci = ntohs(vhdr->h_vlan_TCI);
- memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
vlan_set_encap_proto(skb, vhdr);
- return __skb_pull(skb, VLAN_HLEN);
+ __skb_pull(skb, VLAN_HLEN);
+ skb_postpull_data_move(skb, VLAN_HLEN, 2 * ETH_ALEN);
}
/**
* skb_vlan_tagged - check if skb is vlan tagged.
* @skb: skbuff to query
*
- * Returns true if the skb is tagged, regardless of whether it is hardware
+ * Returns: true if the skb is tagged, regardless of whether it is hardware
* accelerated or not.
*/
static inline bool skb_vlan_tagged(const struct sk_buff *skb)
@@ -743,7 +764,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
* skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
* @skb: skbuff to query
*
- * Returns true if the skb is tagged with multiple vlan headers, regardless
+ * Returns: true if the skb is tagged with multiple vlan headers, regardless
* of whether it is hardware accelerated or not.
*/
static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
@@ -774,7 +795,7 @@ static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
* @skb: skbuff to query
* @features: features to be checked
*
- * Returns features without unsafe ones if the skb has multiple tags.
+ * Returns: features without unsafe ones if the skb has multiple tags.
*/
static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
netdev_features_t features)
@@ -798,9 +819,11 @@ static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
* @h1: Pointer to vlan header
* @h2: Pointer to vlan header
*
- * Compare two vlan headers, returns 0 if equal.
+ * Compare two vlan headers.
*
* Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
+ *
+ * Return: 0 if equal, arbitrary non-zero value if not equal.
*/
static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
const struct vlan_hdr *h2)
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 5171231f70a8..073b30a9b850 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -87,6 +87,8 @@ struct ip_mc_list {
char loaded;
unsigned char gsquery; /* check source marks? */
unsigned char crcount;
+ unsigned long mca_cstamp;
+ unsigned long mca_tstamp;
struct rcu_head rcu;
};
diff --git a/include/linux/iio/adc-helpers.h b/include/linux/iio/adc-helpers.h
new file mode 100644
index 000000000000..56b092a2a4c4
--- /dev/null
+++ b/include/linux/iio/adc-helpers.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * The industrial I/O ADC firmware property parsing helpers
+ *
+ * Copyright (c) 2025 Matti Vaittinen <mazziesaccount@gmail.com>
+ */
+
+#ifndef _INDUSTRIAL_IO_ADC_HELPERS_H_
+#define _INDUSTRIAL_IO_ADC_HELPERS_H_
+
+#include <linux/property.h>
+
+struct device;
+struct iio_chan_spec;
+
+static inline int iio_adc_device_num_channels(struct device *dev)
+{
+ return device_get_named_child_node_count(dev, "channel");
+}
+
+int devm_iio_adc_device_alloc_chaninfo_se(struct device *dev,
+ const struct iio_chan_spec *template,
+ int max_chan_id,
+ struct iio_chan_spec **cs);
+
+#endif /* _INDUSTRIAL_IO_ADC_HELPERS_H_ */
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 383614ebd760..6e70a412e218 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -29,7 +29,10 @@ struct ad_sd_calib_data {
struct ad_sigma_delta;
struct device;
+struct gpio_desc;
struct iio_dev;
+struct spi_offload;
+struct spi_offload_trigger;
/**
* struct ad_sigma_delta_info - Sigma Delta driver specific callbacks and options
@@ -37,10 +40,19 @@ struct iio_dev;
* @append_status: Will be called to enable status append at the end of the sample, may be NULL.
* @set_mode: Will be called to select the current mode, may be NULL.
* @disable_all: Will be called to disable all channels, may be NULL.
+ * @disable_one: Will be called to disable a single channel after
+ * ad_sigma_delta_single_conversion(), may be NULL.
+ * Usage of this callback expects iio_chan_spec.address to contain
+ * the value required for the driver to identify the channel.
* @postprocess_sample: Is called for each sampled data word, can be used to
* modify or drop the sample data, it, may be NULL.
* @has_registers: true if the device has writable and readable registers, false
* if there is just one read-only sample data shift register.
+ * @has_named_irqs: Set to true if there is more than one IRQ line.
+ * @supports_spi_offload: Set to true if the driver supports SPI offload. Often
+ * special considerations are needed for scan_type and other channel
+ * info, so individual drivers have to set this to let the core
+ * code know that it can use SPI offload if it is available.
* @addr_shift: Shift of the register address in the communications register.
* @read_mask: Mask for the communications register having the read bit set.
* @status_ch_mask: Mask for the channel number stored in status register.
@@ -48,22 +60,25 @@ struct iio_dev;
* be used.
* @irq_flags: flags for the interrupt used by the triggered buffer
* @num_slots: Number of sequencer slots
- * @irq_line: IRQ for reading conversions. If 0, spi->irq will be used
+ * @num_resetclks: Number of SPI clk cycles with MOSI=1 to reset the chip.
*/
struct ad_sigma_delta_info {
int (*set_channel)(struct ad_sigma_delta *, unsigned int channel);
int (*append_status)(struct ad_sigma_delta *, bool append);
int (*set_mode)(struct ad_sigma_delta *, enum ad_sigma_delta_mode mode);
int (*disable_all)(struct ad_sigma_delta *);
+ int (*disable_one)(struct ad_sigma_delta *, unsigned int chan);
int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample);
bool has_registers;
+ bool has_named_irqs;
+ bool supports_spi_offload;
unsigned int addr_shift;
unsigned int read_mask;
unsigned int status_ch_mask;
unsigned int data_reg;
unsigned long irq_flags;
unsigned int num_slots;
- int irq_line;
+ unsigned int num_resetclks;
};
/**
@@ -80,22 +95,28 @@ struct ad_sigma_delta {
/* private: */
struct completion completion;
+ spinlock_t irq_lock; /* protects .irq_dis and irq en/disable state */
bool irq_dis;
bool bus_locked;
bool keep_cs_asserted;
- uint8_t comm;
+ u8 comm;
const struct ad_sigma_delta_info *info;
unsigned int active_slots;
unsigned int current_slot;
unsigned int num_slots;
- int irq_line;
+ struct gpio_desc *rdy_gpiod;
+ int irq_line;
bool status_appended;
/* map slots to channels in order to know what to expect from devices */
unsigned int *slots;
- uint8_t *samples_buf;
+ struct spi_message sample_msg;
+ struct spi_transfer sample_xfer[2];
+ u8 *samples_buf;
+ struct spi_offload *offload;
+ struct spi_offload_trigger *offload_trigger;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -104,10 +125,16 @@ struct ad_sigma_delta {
* 'rx_buf' is up to 32 bits per sample + 64 bit timestamp,
* rounded to 16 bytes to take into account padding.
*/
- uint8_t tx_buf[4] __aligned(IIO_DMA_MINALIGN);
- uint8_t rx_buf[16] __aligned(8);
+ u8 tx_buf[4] __aligned(IIO_DMA_MINALIGN);
+ u8 rx_buf[16] __aligned(8);
+ u8 sample_addr;
};
+static inline bool ad_sigma_delta_has_spi_offload(struct ad_sigma_delta *sd)
+{
+ return sd->offload != NULL;
+}
+
static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd,
unsigned int channel)
{
@@ -140,6 +167,15 @@ static inline int ad_sigma_delta_disable_all(struct ad_sigma_delta *sd)
return 0;
}
+static inline int ad_sigma_delta_disable_one(struct ad_sigma_delta *sd,
+ unsigned int chan)
+{
+ if (sd->info->disable_one)
+ return sd->info->disable_one(sd, chan);
+
+ return 0;
+}
+
static inline int ad_sigma_delta_set_mode(struct ad_sigma_delta *sd,
unsigned int mode)
{
@@ -158,14 +194,13 @@ static inline int ad_sigma_delta_postprocess_sample(struct ad_sigma_delta *sd,
return 0;
}
-void ad_sd_set_comm(struct ad_sigma_delta *sigma_delta, uint8_t comm);
+void ad_sd_set_comm(struct ad_sigma_delta *sigma_delta, u8 comm);
int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
unsigned int size, unsigned int val);
int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
unsigned int size, unsigned int *val);
-int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
- unsigned int reset_length);
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta);
int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, int *val);
diff --git a/include/linux/iio/adc/qcom-vadc-common.h b/include/linux/iio/adc/qcom-vadc-common.h
index aa21b032e861..3bf4c49726a7 100644
--- a/include/linux/iio/adc/qcom-vadc-common.h
+++ b/include/linux/iio/adc/qcom-vadc-common.h
@@ -83,27 +83,27 @@ struct vadc_linear_graph {
/**
* enum vadc_scale_fn_type - Scaling function to convert ADC code to
* physical scaled units for the channel.
- * SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV).
- * SCALE_THERM_100K_PULLUP: Returns temperature in millidegC.
+ * @SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV).
+ * @SCALE_THERM_100K_PULLUP: Returns temperature in millidegC.
* Uses a mapping table with 100K pullup.
- * SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
- * SCALE_XOTHERM: Returns XO thermistor voltage in millidegC.
- * SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp
- * SCALE_HW_CALIB_DEFAULT: Default scaling to convert raw adc code to
+ * @SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * @SCALE_XOTHERM: Returns XO thermistor voltage in millidegC.
+ * @SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp
+ * @SCALE_HW_CALIB_DEFAULT: Default scaling to convert raw adc code to
* voltage (uV) with hardware applied offset/slope values to adc code.
- * SCALE_HW_CALIB_THERM_100K_PULLUP: Returns temperature in millidegC using
+ * @SCALE_HW_CALIB_THERM_100K_PULLUP: Returns temperature in millidegC using
* lookup table. The hardware applies offset/slope to adc code.
- * SCALE_HW_CALIB_XOTHERM: Returns XO thermistor voltage in millidegC using
+ * @SCALE_HW_CALIB_XOTHERM: Returns XO thermistor voltage in millidegC using
* 100k pullup. The hardware applies offset/slope to adc code.
- * SCALE_HW_CALIB_THERM_100K_PU_PM7: Returns temperature in millidegC using
+ * @SCALE_HW_CALIB_THERM_100K_PU_PM7: Returns temperature in millidegC using
* lookup table for PMIC7. The hardware applies offset/slope to adc code.
- * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * @SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
* The hardware applies offset/slope to adc code.
- * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * @SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
* The hardware applies offset/slope to adc code. This is for PMIC7.
- * SCALE_HW_CALIB_PM5_CHG_TEMP: Returns result in millidegrees for PMIC5
+ * @SCALE_HW_CALIB_PM5_CHG_TEMP: Returns result in millidegrees for PMIC5
* charger temperature.
- * SCALE_HW_CALIB_PM5_SMB_TEMP: Returns result in millidegrees for PMIC5
+ * @SCALE_HW_CALIB_PM5_SMB_TEMP: Returns result in millidegrees for PMIC5
* SMB1390 temperature.
*/
enum vadc_scale_fn_type {
@@ -120,6 +120,7 @@ enum vadc_scale_fn_type {
SCALE_HW_CALIB_PMIC_THERM_PM7,
SCALE_HW_CALIB_PM5_CHG_TEMP,
SCALE_HW_CALIB_PM5_SMB_TEMP,
+ /* private: */
SCALE_HW_CALIB_INVALID,
};
diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h
index 8099759d7242..7f815f3fed6a 100644
--- a/include/linux/iio/backend.h
+++ b/include/linux/iio/backend.h
@@ -3,6 +3,7 @@
#define _IIO_BACKEND_H_
#include <linux/types.h>
+#include <linux/iio/iio.h>
struct iio_chan_spec;
struct fwnode_handle;
@@ -13,15 +14,19 @@ struct iio_dev;
enum iio_backend_data_type {
IIO_BACKEND_TWOS_COMPLEMENT,
IIO_BACKEND_OFFSET_BINARY,
+ IIO_BACKEND_DATA_UNSIGNED,
IIO_BACKEND_DATA_TYPE_MAX
};
enum iio_backend_data_source {
- IIO_BACKEND_INTERNAL_CONTINUOS_WAVE,
+ IIO_BACKEND_INTERNAL_CONTINUOUS_WAVE,
IIO_BACKEND_EXTERNAL,
+ IIO_BACKEND_INTERNAL_RAMP_16BIT,
IIO_BACKEND_DATA_SOURCE_MAX
};
+#define iio_backend_debugfs_ptr(ptr) PTR_IF(IS_ENABLED(CONFIG_DEBUG_FS), ptr)
+
/**
* IIO_BACKEND_EX_INFO - Helper for an IIO extended channel attribute
* @_name: Attribute name
@@ -54,6 +59,8 @@ enum iio_backend_test_pattern {
IIO_BACKEND_NO_TEST_PATTERN,
/* modified prbs9 */
IIO_BACKEND_ADI_PRBS_9A = 32,
+ /* modified prbs23 */
+ IIO_BACKEND_ADI_PRBS_23A,
IIO_BACKEND_TEST_PATTERN_MAX
};
@@ -63,6 +70,20 @@ enum iio_backend_sample_trigger {
IIO_BACKEND_SAMPLE_TRIGGER_MAX
};
+enum iio_backend_interface_type {
+ IIO_BACKEND_INTERFACE_SERIAL_LVDS,
+ IIO_BACKEND_INTERFACE_SERIAL_CMOS,
+ IIO_BACKEND_INTERFACE_MAX
+};
+
+enum iio_backend_filter_type {
+ IIO_BACKEND_FILTER_TYPE_DISABLED,
+ IIO_BACKEND_FILTER_TYPE_SINC1,
+ IIO_BACKEND_FILTER_TYPE_SINC5,
+ IIO_BACKEND_FILTER_TYPE_SINC5_PLUS_COMP,
+ IIO_BACKEND_FILTER_TYPE_MAX
+};
+
/**
* struct iio_backend_ops - operations structure for an iio_backend
* @enable: Enable backend.
@@ -71,6 +92,7 @@ enum iio_backend_sample_trigger {
* @chan_disable: Disable one channel.
* @data_format_set: Configure the data format for a specific channel.
* @data_source_set: Configure the data source for a specific channel.
+ * @data_source_get: Data source getter for a specific channel.
* @set_sample_rate: Configure the sampling rate for a specific channel.
* @test_pattern_set: Configure a test pattern.
* @chan_status: Get the channel status.
@@ -81,6 +103,20 @@ enum iio_backend_sample_trigger {
* @extend_chan_spec: Extend an IIO channel.
* @ext_info_set: Extended info setter.
* @ext_info_get: Extended info getter.
+ * @interface_type_get: Interface type.
+ * @data_size_set: Data size.
+ * @oversampling_ratio_set: Set Oversampling ratio.
+ * @read_raw: Read a channel attribute from a backend device
+ * @debugfs_print_chan_status: Print channel status into a buffer.
+ * @debugfs_reg_access: Read or write register value of backend.
+ * @filter_type_set: Set filter type.
+ * @interface_data_align: Perform the data alignment process.
+ * @num_lanes_set: Set the number of lanes enabled.
+ * @ddr_enable: Enable interface DDR (Double Data Rate) mode.
+ * @ddr_disable: Disable interface DDR (Double Data Rate) mode.
+ * @data_stream_enable: Enable data stream.
+ * @data_stream_disable: Disable data stream.
+ * @data_transfer_addr: Set data address.
**/
struct iio_backend_ops {
int (*enable)(struct iio_backend *back);
@@ -91,6 +127,8 @@ struct iio_backend_ops {
const struct iio_backend_data_fmt *data);
int (*data_source_set)(struct iio_backend *back, unsigned int chan,
enum iio_backend_data_source data);
+ int (*data_source_get)(struct iio_backend *back, unsigned int chan,
+ enum iio_backend_data_source *data);
int (*set_sample_rate)(struct iio_backend *back, unsigned int chan,
u64 sample_rate_hz);
int (*test_pattern_set)(struct iio_backend *back,
@@ -113,15 +151,51 @@ struct iio_backend_ops {
const char *buf, size_t len);
int (*ext_info_get)(struct iio_backend *back, uintptr_t private,
const struct iio_chan_spec *chan, char *buf);
+ int (*interface_type_get)(struct iio_backend *back,
+ enum iio_backend_interface_type *type);
+ int (*data_size_set)(struct iio_backend *back, unsigned int size);
+ int (*oversampling_ratio_set)(struct iio_backend *back,
+ unsigned int chan, unsigned int ratio);
+ int (*read_raw)(struct iio_backend *back,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask);
+ int (*debugfs_print_chan_status)(struct iio_backend *back,
+ unsigned int chan, char *buf,
+ size_t len);
+ int (*debugfs_reg_access)(struct iio_backend *back, unsigned int reg,
+ unsigned int writeval, unsigned int *readval);
+ int (*filter_type_set)(struct iio_backend *back,
+ enum iio_backend_filter_type type);
+ int (*interface_data_align)(struct iio_backend *back, u32 timeout_us);
+ int (*num_lanes_set)(struct iio_backend *back, unsigned int num_lanes);
+ int (*ddr_enable)(struct iio_backend *back);
+ int (*ddr_disable)(struct iio_backend *back);
+ int (*data_stream_enable)(struct iio_backend *back);
+ int (*data_stream_disable)(struct iio_backend *back);
+ int (*data_transfer_addr)(struct iio_backend *back, u32 address);
+};
+
+/**
+ * struct iio_backend_info - info structure for an iio_backend
+ * @name: Backend name.
+ * @ops: Backend operations.
+ */
+struct iio_backend_info {
+ const char *name;
+ const struct iio_backend_ops *ops;
};
int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan);
int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan);
int devm_iio_backend_enable(struct device *dev, struct iio_backend *back);
+int iio_backend_enable(struct iio_backend *back);
+void iio_backend_disable(struct iio_backend *back);
int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan,
const struct iio_backend_data_fmt *data);
int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan,
enum iio_backend_data_source data);
+int iio_backend_data_source_get(struct iio_backend *back, unsigned int chan,
+ enum iio_backend_data_source *data);
int iio_backend_set_sampling_freq(struct iio_backend *back, unsigned int chan,
u64 sample_rate_hz);
int iio_backend_test_pattern_set(struct iio_backend *back,
@@ -136,22 +210,61 @@ int iio_backend_data_sample_trigger(struct iio_backend *back,
int devm_iio_backend_request_buffer(struct device *dev,
struct iio_backend *back,
struct iio_dev *indio_dev);
+int iio_backend_filter_type_set(struct iio_backend *back,
+ enum iio_backend_filter_type type);
+int iio_backend_interface_data_align(struct iio_backend *back, u32 timeout_us);
+int iio_backend_num_lanes_set(struct iio_backend *back, unsigned int num_lanes);
+int iio_backend_ddr_enable(struct iio_backend *back);
+int iio_backend_ddr_disable(struct iio_backend *back);
+int iio_backend_data_stream_enable(struct iio_backend *back);
+int iio_backend_data_stream_disable(struct iio_backend *back);
+int iio_backend_data_transfer_addr(struct iio_backend *back, u32 address);
ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private,
const struct iio_chan_spec *chan,
const char *buf, size_t len);
ssize_t iio_backend_ext_info_get(struct iio_dev *indio_dev, uintptr_t private,
const struct iio_chan_spec *chan, char *buf);
-
-int iio_backend_extend_chan_spec(struct iio_dev *indio_dev,
- struct iio_backend *back,
+int iio_backend_interface_type_get(struct iio_backend *back,
+ enum iio_backend_interface_type *type);
+int iio_backend_data_size_set(struct iio_backend *back, unsigned int size);
+int iio_backend_oversampling_ratio_set(struct iio_backend *back,
+ unsigned int chan,
+ unsigned int ratio);
+int iio_backend_read_raw(struct iio_backend *back,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask);
+int iio_backend_extend_chan_spec(struct iio_backend *back,
struct iio_chan_spec *chan);
void *iio_backend_get_priv(const struct iio_backend *conv);
struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name);
+struct iio_backend *devm_iio_backend_fwnode_get(struct device *dev,
+ const char *name,
+ struct fwnode_handle *fwnode);
struct iio_backend *
__devm_iio_backend_get_from_fwnode_lookup(struct device *dev,
struct fwnode_handle *fwnode);
int devm_iio_backend_register(struct device *dev,
- const struct iio_backend_ops *ops, void *priv);
+ const struct iio_backend_info *info, void *priv);
+
+static inline int iio_backend_read_scale(struct iio_backend *back,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ return iio_backend_read_raw(back, chan, val, val2, IIO_CHAN_INFO_SCALE);
+}
+
+static inline int iio_backend_read_offset(struct iio_backend *back,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ return iio_backend_read_raw(back, chan, val, val2,
+ IIO_CHAN_INFO_OFFSET);
+}
+ssize_t iio_backend_debugfs_print_chan_status(struct iio_backend *back,
+ unsigned int chan, char *buf,
+ size_t len);
+void iio_backend_debugfs_add(struct iio_backend *back,
+ struct iio_dev *indio_dev);
#endif
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index 6e27e47077d5..4f33e6a39797 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -7,6 +7,7 @@
#ifndef __INDUSTRIALIO_DMA_BUFFER_H__
#define __INDUSTRIALIO_DMA_BUFFER_H__
+#include <linux/atomic.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/spinlock.h>
@@ -16,6 +17,9 @@
struct iio_dma_buffer_queue;
struct iio_dma_buffer_ops;
struct device;
+struct dma_buf_attachment;
+struct dma_fence;
+struct sg_table;
/**
* enum iio_block_state - State of a struct iio_dma_buffer_block
@@ -41,6 +45,10 @@ enum iio_block_state {
* @queue: Parent DMA buffer queue
* @kref: kref used to manage the lifetime of block
* @state: Current state of the block
+ * @cyclic: True if this is a cyclic buffer
+ * @fileio: True if this buffer is used for fileio mode
+ * @sg_table: DMA table for the transfer when transferring a DMABUF
+ * @fence: DMA fence to be signaled when a DMABUF transfer is complete
*/
struct iio_dma_buffer_block {
/* May only be accessed by the owner of the block */
@@ -63,6 +71,12 @@ struct iio_dma_buffer_block {
* queue->list_lock if the block is not owned by the core.
*/
enum iio_block_state state;
+
+ bool cyclic;
+ bool fileio;
+
+ struct sg_table *sg_table;
+ struct dma_fence *fence;
};
/**
@@ -72,6 +86,7 @@ struct iio_dma_buffer_block {
* @pos: Read offset in the active block
* @block_size: Size of each block
* @next_dequeue: index of next block that will be dequeued
+ * @enabled: Whether the buffer is operating in fileio mode
*/
struct iio_dma_buffer_queue_fileio {
struct iio_dma_buffer_block *blocks[2];
@@ -80,6 +95,7 @@ struct iio_dma_buffer_queue_fileio {
size_t block_size;
unsigned int next_dequeue;
+ bool enabled;
};
/**
@@ -95,6 +111,7 @@ struct iio_dma_buffer_queue_fileio {
* the DMA controller
* @incoming: List of buffers on the incoming queue
* @active: Whether the buffer is currently active
+ * @num_dmabufs: Total number of DMABUFs attached to this queue
* @fileio: FileIO state
*/
struct iio_dma_buffer_queue {
@@ -107,6 +124,7 @@ struct iio_dma_buffer_queue {
struct list_head incoming;
bool active;
+ atomic_t num_dmabufs;
struct iio_dma_buffer_queue_fileio fileio;
};
@@ -144,4 +162,18 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
+struct iio_dma_buffer_block *
+iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
+ struct dma_buf_attachment *attach);
+void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block);
+int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block,
+ struct dma_fence *fence,
+ struct sg_table *sgt,
+ size_t size, bool cyclic);
+void iio_dma_buffer_lock_queue(struct iio_buffer *buffer);
+void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer);
+struct device *iio_dma_buffer_get_dma_dev(struct iio_buffer *buffer);
+
#endif
diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h
index 81d9a19aeb91..37f27545f69f 100644
--- a/include/linux/iio/buffer-dmaengine.h
+++ b/include/linux/iio/buffer-dmaengine.h
@@ -11,8 +11,9 @@
struct iio_dev;
struct device;
+struct dma_chan;
-void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
+void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer);
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
const char *channel,
@@ -26,6 +27,10 @@ int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
const char *channel,
enum iio_buffer_direction dir);
+int devm_iio_dmaengine_buffer_setup_with_handle(struct device *dev,
+ struct iio_dev *indio_dev,
+ struct dma_chan *chan,
+ enum iio_buffer_direction dir);
#define devm_iio_dmaengine_buffer_setup(dev, indio_dev, channel) \
devm_iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 418b1307d3f2..d37f82678f71 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -26,18 +26,14 @@ int iio_pop_from_buffer(struct iio_buffer *buffer, void *data);
* @data: sample data
* @timestamp: timestamp for the sample data
*
- * Pushes data to the IIO device's buffers. If timestamps are enabled for the
- * device the function will store the supplied timestamp as the last element in
- * the sample data buffer before pushing it to the device buffers. The sample
- * data buffer needs to be large enough to hold the additional timestamp
- * (usually the buffer should be indio->scan_bytes bytes large).
+ * DEPRECATED: Use iio_push_to_buffers_with_ts() instead.
*
* Returns 0 on success, a negative error code otherwise.
*/
static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
void *data, int64_t timestamp)
{
- if (indio_dev->scan_timestamp) {
+ if (ACCESS_PRIVATE(indio_dev, scan_timestamp)) {
size_t ts_offset = indio_dev->scan_bytes / sizeof(int64_t) - 1;
((int64_t *)data)[ts_offset] = timestamp;
}
@@ -45,6 +41,34 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
return iio_push_to_buffers(indio_dev, data);
}
+/**
+ * iio_push_to_buffers_with_ts() - push data and timestamp to buffers
+ * @indio_dev: iio_dev structure for device.
+ * @data: Pointer to sample data buffer.
+ * @data_total_len: The size of @data in bytes.
+ * @timestamp: Timestamp for the sample data.
+ *
+ * Pushes data to the IIO device's buffers. If timestamps are enabled for the
+ * device the function will store the supplied timestamp as the last element in
+ * the sample data buffer before pushing it to the device buffers. The sample
+ * data buffer needs to be large enough to hold the additional timestamp
+ * (usually the buffer should be at least indio->scan_bytes bytes large).
+ *
+ * Context: Any context.
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static inline int iio_push_to_buffers_with_ts(struct iio_dev *indio_dev,
+ void *data, size_t data_total_len,
+ s64 timestamp)
+{
+ if (unlikely(data_total_len < indio_dev->scan_bytes)) {
+ dev_err(&indio_dev->dev, "Undersized storage pushed to buffer\n");
+ return -ENOSPC;
+ }
+
+ return iio_push_to_buffers_with_timestamp(indio_dev, data, timestamp);
+}
+
int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
const void *data, size_t data_sz,
int64_t timestamp);
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
index 89c3fd7c29ca..c0b0e0992a85 100644
--- a/include/linux/iio/buffer_impl.h
+++ b/include/linux/iio/buffer_impl.h
@@ -9,8 +9,12 @@
#include <uapi/linux/iio/buffer.h>
#include <linux/iio/buffer.h>
+struct dma_buf_attachment;
+struct dma_fence;
struct iio_dev;
+struct iio_dma_buffer_block;
struct iio_buffer;
+struct sg_table;
/**
* INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be
@@ -20,7 +24,8 @@ struct iio_buffer;
/**
* struct iio_buffer_access_funcs - access functions for buffers.
- * @store_to: actually store stuff to the buffer
+ * @store_to: actually store stuff to the buffer - must be safe to
+ * call from any context (e.g. must not sleep).
* @read: try to get a specified number of bytes (must exist)
* @data_available: indicates how much data is available for reading from
* the buffer.
@@ -39,6 +44,17 @@ struct iio_buffer;
* device stops sampling. Calles are balanced with @enable.
* @release: called when the last reference to the buffer is dropped,
* should free all resources allocated by the buffer.
+ * @attach_dmabuf: called from userspace via ioctl to attach one external
+ * DMABUF.
+ * @detach_dmabuf: called from userspace via ioctl to detach one previously
+ * attached DMABUF.
+ * @enqueue_dmabuf: called from userspace via ioctl to queue this DMABUF
+ * object to this buffer. Requires a valid DMABUF fd, that
+ * was previouly attached to this buffer.
+ * @get_dma_dev: called to get the DMA channel associated with this buffer.
+ * @lock_queue: called when the core needs to lock the buffer queue;
+ * it is used when enqueueing DMABUF objects.
+ * @unlock_queue: used to unlock a previously locked buffer queue
* @modes: Supported operating modes by this buffer type
* @flags: A bitmask combination of INDIO_BUFFER_FLAG_*
*
@@ -68,6 +84,18 @@ struct iio_buffer_access_funcs {
void (*release)(struct iio_buffer *buffer);
+ struct iio_dma_buffer_block * (*attach_dmabuf)(struct iio_buffer *buffer,
+ struct dma_buf_attachment *attach);
+ void (*detach_dmabuf)(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block);
+ int (*enqueue_dmabuf)(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block,
+ struct dma_fence *fence, struct sg_table *sgt,
+ size_t size, bool cyclic);
+ struct device * (*get_dma_dev)(struct iio_buffer *buffer);
+ void (*lock_queue)(struct iio_buffer *buffer);
+ void (*unlock_queue)(struct iio_buffer *buffer);
+
unsigned int modes;
unsigned int flags;
};
@@ -136,6 +164,12 @@ struct iio_buffer {
/* @ref: Reference count of the buffer. */
struct kref ref;
+
+ /* @dmabufs: List of DMABUF attachments */
+ struct list_head dmabufs; /* P: dmabufs_mutex */
+
+ /* @dmabufs_mutex: Protects dmabufs */
+ struct mutex dmabufs_mutex;
};
/**
@@ -159,6 +193,8 @@ void iio_buffer_init(struct iio_buffer *buffer);
struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer);
void iio_buffer_put(struct iio_buffer *buffer);
+void iio_buffer_signal_dmabuf_done(struct dma_fence *fence, int ret);
+
#else /* CONFIG_IIO_BUFFER */
static inline void iio_buffer_get(struct iio_buffer *buffer) {}
diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h
index e72167b96d27..bb966abcde53 100644
--- a/include/linux/iio/common/cros_ec_sensors_core.h
+++ b/include/linux/iio/common/cros_ec_sensors_core.h
@@ -126,5 +126,6 @@ extern const struct dev_pm_ops cros_ec_sensors_pm_ops;
/* List of extended channel specification for all sensors. */
extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[];
+extern const struct iio_chan_spec_ext_info cros_ec_sensors_limited_info[];
#endif /* __CROS_EC_SENSORS_CORE_H */
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index e9910b41d48e..5039558267e4 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -131,7 +131,8 @@ struct iio_cb_buffer;
/**
* iio_channel_get_all_cb() - register callback for triggered capture
* @dev: Pointer to client device.
- * @cb: Callback function.
+ * @cb: Callback function. Must be safe to call from any context
+ * (e.g. must not sleep).
* @private: Private data passed to callback.
*
* NB right now we have no ability to mux data from multiple devices.
@@ -382,6 +383,24 @@ int iio_read_channel_scale(struct iio_channel *chan, int *val,
int *val2);
/**
+ * iio_multiply_value() - Multiply an IIO value
+ * @result: Destination pointer for the multiplication result
+ * @multiplier: Multiplier.
+ * @type: One of the IIO_VAL_* constants. This decides how the @val and
+ * @val2 parameters are interpreted.
+ * @val: Value being multiplied.
+ * @val2: Value being multiplied. @val2 use depends on type.
+ *
+ * Multiply an IIO value with a s64 multiplier storing the result as
+ * IIO_VAL_INT. This is typically used for scaling.
+ *
+ * Returns:
+ * IIO_VAL_INT on success or a negative error-number on failure.
+ */
+int iio_multiply_value(int *result, s64 multiplier,
+ unsigned int type, int val, int val2);
+
+/**
* iio_convert_raw_to_processed() - Converts a raw value to a processed value
* @chan: The channel being queried
* @raw: The raw IIO to convert
@@ -418,7 +437,7 @@ unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan);
* @chan: The channel being queried.
* @attr: The ext_info attribute to read.
* @buf: Where to store the attribute value. Assumed to hold
- * at least PAGE_SIZE bytes.
+ * at least PAGE_SIZE bytes and to be aligned at PAGE_SIZE.
*
* Returns the number of bytes written to buf (perhaps w/o zero termination;
* it need not even be a string), or an error code.
@@ -441,4 +460,14 @@ ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
const char *buf, size_t len);
+/**
+ * iio_read_channel_label() - read label for a given channel
+ * @chan: The channel being queried.
+ * @buf: Where to store the attribute value. Assumed to hold
+ * at least PAGE_SIZE bytes and to be aligned at PAGE_SIZE.
+ *
+ * Returns the number of bytes written to buf, or an error code.
+ */
+ssize_t iio_read_channel_label(struct iio_channel *chan, char *buf);
+
#endif
diff --git a/include/linux/iio/driver.h b/include/linux/iio/driver.h
index 7a157ed218f6..7f8b55551ed0 100644
--- a/include/linux/iio/driver.h
+++ b/include/linux/iio/driver.h
@@ -18,7 +18,7 @@ struct iio_map;
* @map: array of mappings specifying association of channel with client
*/
int iio_map_array_register(struct iio_dev *indio_dev,
- struct iio_map *map);
+ const struct iio_map *map);
/**
* iio_map_array_unregister() - tell the core to remove consumer mappings for
@@ -38,6 +38,7 @@ int iio_map_array_unregister(struct iio_dev *indio_dev);
* handle de-registration of the IIO map object when the device's refcount goes to
* zero.
*/
-int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps);
+int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev,
+ const struct iio_map *maps);
#endif
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
index a4558c45a548..72062a0c7c87 100644
--- a/include/linux/iio/events.h
+++ b/include/linux/iio/events.h
@@ -10,7 +10,7 @@
#include <uapi/linux/iio/events.h>
/**
- * IIO_EVENT_CODE() - create event identifier
+ * _IIO_EVENT_CODE() - create event identifier
* @chan_type: Type of the channel. Should be one of enum iio_chan_type.
* @diff: Whether the event is for an differential channel or not.
* @modifier: Modifier for the channel. Should be one of enum iio_modifier.
@@ -19,10 +19,13 @@
* @chan: Channel number for non-differential channels.
* @chan1: First channel number for differential channels.
* @chan2: Second channel number for differential channels.
+ *
+ * Drivers should use the specialized macros below instead of using this one
+ * directly.
*/
-#define IIO_EVENT_CODE(chan_type, diff, modifier, direction, \
- type, chan, chan1, chan2) \
+#define _IIO_EVENT_CODE(chan_type, diff, modifier, direction, \
+ type, chan, chan1, chan2) \
(((u64)type << 56) | ((u64)diff << 55) | \
((u64)direction << 48) | ((u64)modifier << 40) | \
((u64)chan_type << 32) | (((u16)chan2) << 16) | ((u16)chan1) | \
@@ -30,7 +33,8 @@
/**
- * IIO_MOD_EVENT_CODE() - create event identifier for modified channels
+ * IIO_MOD_EVENT_CODE() - create event identifier for modified (non
+ * differential) channels
* @chan_type: Type of the channel. Should be one of enum iio_chan_type.
* @number: Channel number.
* @modifier: Modifier for the channel. Should be one of enum iio_modifier.
@@ -40,10 +44,11 @@
#define IIO_MOD_EVENT_CODE(chan_type, number, modifier, \
type, direction) \
- IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0)
+ _IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0)
/**
- * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified channels
+ * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified (non
+ * differential) channels
* @chan_type: Type of the channel. Should be one of enum iio_chan_type.
* @number: Channel number.
* @type: Type of the event. Should be one of enum iio_event_type.
@@ -51,6 +56,18 @@
*/
#define IIO_UNMOD_EVENT_CODE(chan_type, number, type, direction) \
- IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0)
+ _IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0)
+
+/**
+ * IIO_DIFF_EVENT_CODE() - create event identifier for differential channels
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @chan1: First channel number for differential channels.
+ * @chan2: Second channel number for differential channels.
+ * @type: Type of the event. Should be one of enum iio_event_type.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ */
+
+#define IIO_DIFF_EVENT_CODE(chan_type, chan1, chan2, type, direction) \
+ _IIO_EVENT_CODE(chan_type, 1, 0, direction, type, 0, chan1, chan2)
#endif
diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h
index de45cf2ee1e4..ce2086f97e3f 100644
--- a/include/linux/iio/frequency/adf4350.h
+++ b/include/linux/iio/frequency/adf4350.h
@@ -51,7 +51,7 @@
/* REG3 Bit Definitions */
#define ADF4350_REG3_12BIT_CLKDIV(x) ((x) << 3)
-#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 16)
+#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 15)
#define ADF4350_REG3_12BIT_CSR_EN (1 << 18)
#define ADF4351_REG3_CHARGE_CANCELLATION_EN (1 << 21)
#define ADF4351_REG3_ANTI_BACKLASH_3ns_EN (1 << 22)
diff --git a/include/linux/iio/iio-gts-helper.h b/include/linux/iio/iio-gts-helper.h
index 9cb6c80dea71..66f830ab9b49 100644
--- a/include/linux/iio/iio-gts-helper.h
+++ b/include/linux/iio/iio-gts-helper.h
@@ -188,6 +188,9 @@ int iio_gts_total_gain_to_scale(struct iio_gts *gts, int total_gain,
int iio_gts_find_gain_sel_for_scale_using_time(struct iio_gts *gts, int time_sel,
int scale_int, int scale_nano,
int *gain_sel);
+int iio_gts_find_gain_time_sel_for_scale(struct iio_gts *gts, int scale_int,
+ int scale_nano, int *gain_sel,
+ int *time_sel);
int iio_gts_get_scale(struct iio_gts *gts, int gain, int time, int *scale_int,
int *scale_nano);
int iio_gts_find_new_gain_sel_by_old_gain_time(struct iio_gts *gts,
@@ -196,11 +199,15 @@ int iio_gts_find_new_gain_sel_by_old_gain_time(struct iio_gts *gts,
int iio_gts_find_new_gain_by_old_gain_time(struct iio_gts *gts, int old_gain,
int old_time, int new_time,
int *new_gain);
+int iio_gts_find_new_gain_by_gain_time_min(struct iio_gts *gts, int old_gain,
+ int old_time, int new_time,
+ int *new_gain, bool *in_range);
int iio_gts_avail_times(struct iio_gts *gts, const int **vals, int *type,
int *length);
int iio_gts_all_avail_scales(struct iio_gts *gts, const int **vals, int *type,
int *length);
int iio_gts_avail_scales_for_time(struct iio_gts *gts, int time,
const int **vals, int *type, int *length);
+int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time);
#endif
diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h
index 5aec3945555b..4247497f3f8b 100644
--- a/include/linux/iio/iio-opaque.h
+++ b/include/linux/iio/iio-opaque.h
@@ -28,7 +28,7 @@
* @groupcounter: index of next attribute group
* @legacy_scan_el_group: attribute group for legacy scan elements attribute group
* @legacy_buffer_group: attribute group for legacy buffer attributes group
- * @bounce_buffer: for devices that call iio_push_to_buffers_with_timestamp_unaligned()
+ * @bounce_buffer: for devices that call iio_push_to_buffers_with_ts_unaligned()
* @bounce_buffer_size: size of currently allocate bounce buffer
* @scan_index_timestamp: cache of the index to the timestamp
* @clock_id: timestamping clock posix identifier
@@ -70,7 +70,7 @@ struct iio_dev_opaque {
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_dentry;
- unsigned cached_reg_addr;
+ unsigned int cached_reg_addr;
char read_buf[20];
unsigned int read_buf_len;
#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 55e2b22086a1..872ebdf0dd77 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -7,9 +7,11 @@
#ifndef _INDUSTRIAL_IO_H_
#define _INDUSTRIAL_IO_H_
+#include <linux/align.h>
#include <linux/device.h>
#include <linux/cdev.h>
-#include <linux/cleanup.h>
+#include <linux/compiler_types.h>
+#include <linux/minmax.h>
#include <linux/slab.h>
#include <linux/iio/types.h>
/* IIO TODO LIST */
@@ -174,6 +176,27 @@ struct iio_event_spec {
};
/**
+ * struct iio_scan_type - specification for channel data format in buffer
+ * @sign: 's' or 'u' to specify signed or unsigned
+ * @realbits: Number of valid bits of data
+ * @storagebits: Realbits + padding
+ * @shift: Shift right by this before masking out realbits.
+ * @repeat: Number of times real/storage bits repeats. When the
+ * repeat element is more than 1, then the type element in
+ * sysfs will show a repeat value. Otherwise, the number
+ * of repetitions is omitted.
+ * @endianness: little or big endian
+ */
+struct iio_scan_type {
+ char sign;
+ u8 realbits;
+ u8 storagebits;
+ u8 shift;
+ u8 repeat;
+ enum iio_endian endianness;
+};
+
+/**
* struct iio_chan_spec - specification of a single channel
* @type: What type of measurement is the channel making.
* @channel: What number do we wish to assign the channel.
@@ -183,18 +206,13 @@ struct iio_event_spec {
* @address: Driver specific identifier.
* @scan_index: Monotonic index to give ordering in scans when read
* from a buffer.
- * @scan_type: struct describing the scan type
- * @scan_type.sign: 's' or 'u' to specify signed or unsigned
- * @scan_type.realbits: Number of valid bits of data
- * @scan_type.storagebits: Realbits + padding
- * @scan_type.shift: Shift right by this before masking out
- * realbits.
- * @scan_type.repeat: Number of times real/storage bits repeats.
- * When the repeat element is more than 1, then
- * the type element in sysfs will show a repeat
- * value. Otherwise, the number of repetitions
- * is omitted.
- * @scan_type.endianness: little or big endian
+ * @scan_type: struct describing the scan type - mutually exclusive
+ * with ext_scan_type.
+ * @ext_scan_type: Used in rare cases where there is more than one scan
+ * format for a channel. When this is used, the flag
+ * has_ext_scan_type must be set and the driver must
+ * implement get_current_scan_type in struct iio_info.
+ * @num_ext_scan_type: Number of elements in ext_scan_type.
* @info_mask_separate: What information is to be exported that is specific to
* this channel.
* @info_mask_separate_available: What availability information is to be
@@ -238,6 +256,7 @@ struct iio_event_spec {
* attributes but not for event codes.
* @output: Channel is output.
* @differential: Channel is differential.
+ * @has_ext_scan_type: True if ext_scan_type is used instead of scan_type.
*/
struct iio_chan_spec {
enum iio_chan_type type;
@@ -245,31 +264,31 @@ struct iio_chan_spec {
int channel2;
unsigned long address;
int scan_index;
- struct {
- char sign;
- u8 realbits;
- u8 storagebits;
- u8 shift;
- u8 repeat;
- enum iio_endian endianness;
- } scan_type;
- long info_mask_separate;
- long info_mask_separate_available;
- long info_mask_shared_by_type;
- long info_mask_shared_by_type_available;
- long info_mask_shared_by_dir;
- long info_mask_shared_by_dir_available;
- long info_mask_shared_by_all;
- long info_mask_shared_by_all_available;
+ union {
+ struct iio_scan_type scan_type;
+ struct {
+ const struct iio_scan_type *ext_scan_type;
+ unsigned int num_ext_scan_type;
+ };
+ };
+ unsigned long info_mask_separate;
+ unsigned long info_mask_separate_available;
+ unsigned long info_mask_shared_by_type;
+ unsigned long info_mask_shared_by_type_available;
+ unsigned long info_mask_shared_by_dir;
+ unsigned long info_mask_shared_by_dir_available;
+ unsigned long info_mask_shared_by_all;
+ unsigned long info_mask_shared_by_all_available;
const struct iio_event_spec *event_spec;
unsigned int num_event_specs;
const struct iio_chan_spec_ext_info *ext_info;
const char *extend_name;
const char *datasheet_name;
- unsigned modified:1;
- unsigned indexed:1;
- unsigned output:1;
- unsigned differential:1;
+ unsigned int modified:1;
+ unsigned int indexed:1;
+ unsigned int output:1;
+ unsigned int differential:1;
+ unsigned int has_ext_scan_type:1;
};
@@ -432,6 +451,9 @@ struct iio_trigger; /* forward declaration */
* for better event identification.
* @validate_trigger: function to validate the trigger when the
* current trigger gets changed.
+ * @get_current_scan_type: must be implemented by drivers that use ext_scan_type
+ * in the channel spec to return the index of the currently
+ * active ext_scan type for a channel.
* @update_scan_mode: function to configure device and scan buffer when
* channels have changed
* @debugfs_reg_access: function to read or write register value of device
@@ -494,7 +516,7 @@ struct iio_info {
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
- int state);
+ bool state);
int (*read_event_value)(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
@@ -516,16 +538,18 @@ struct iio_info {
int (*validate_trigger)(struct iio_dev *indio_dev,
struct iio_trigger *trig);
+ int (*get_current_scan_type)(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan);
int (*update_scan_mode)(struct iio_dev *indio_dev,
const unsigned long *scan_mask);
int (*debugfs_reg_access)(struct iio_dev *indio_dev,
- unsigned reg, unsigned writeval,
- unsigned *readval);
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval);
int (*fwnode_xlate)(struct iio_dev *indio_dev,
const struct fwnode_reference_args *iiospec);
- int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val);
+ int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned int val);
int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev,
- unsigned count);
+ unsigned int count);
};
/**
@@ -587,9 +611,9 @@ struct iio_dev {
int scan_bytes;
const unsigned long *available_scan_masks;
- unsigned masklength;
+ unsigned int __private masklength;
const unsigned long *active_scan_mask;
- bool scan_timestamp;
+ bool __private scan_timestamp;
struct iio_trigger *trig;
struct iio_poll_func *pollfunc;
struct iio_poll_func *pollfunc_event;
@@ -602,7 +626,7 @@ struct iio_dev {
const struct iio_info *info;
const struct iio_buffer_setup_ops *setup_ops;
- void *priv;
+ void *__private priv;
};
int iio_device_id(struct iio_dev *indio_dev);
@@ -637,34 +661,31 @@ void iio_device_unregister(struct iio_dev *indio_dev);
int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
struct module *this_mod);
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
-int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
-void iio_device_release_direct_mode(struct iio_dev *indio_dev);
+bool __iio_device_claim_direct(struct iio_dev *indio_dev);
+void __iio_device_release_direct(struct iio_dev *indio_dev);
/*
- * This autocleanup logic is normally used via
- * iio_device_claim_direct_scoped().
+ * Helper functions that allow claim and release of direct mode
+ * in a fashion that doesn't generate many false positives from sparse.
+ * Note this must remain static inline in the header so that sparse
+ * can see the __acquire() marking. Revisit when sparse supports
+ * __cond_acquires()
*/
-DEFINE_GUARD(iio_claim_direct, struct iio_dev *, iio_device_claim_direct_mode(_T),
- iio_device_release_direct_mode(_T))
+static inline bool iio_device_claim_direct(struct iio_dev *indio_dev)
+{
+ if (!__iio_device_claim_direct(indio_dev))
+ return false;
-DEFINE_GUARD_COND(iio_claim_direct, _try, ({
- struct iio_dev *dev;
- int d = iio_device_claim_direct_mode(_T);
+ __acquire(iio_dev);
- if (d < 0)
- dev = NULL;
- else
- dev = _T;
- dev;
- }))
+ return true;
+}
-/**
- * iio_device_claim_direct_scoped() - Scoped call to iio_device_claim_direct.
- * @fail: What to do on failure to claim device.
- * @iio_dev: Pointer to the IIO devices structure
- */
-#define iio_device_claim_direct_scoped(fail, iio_dev) \
- scoped_cond_guard(iio_claim_direct_try, fail, iio_dev)
+static inline void iio_device_release_direct(struct iio_dev *indio_dev)
+{
+ __iio_device_release_direct(indio_dev);
+ __release(indio_dev);
+}
int iio_device_claim_buffer_mode(struct iio_dev *indio_dev);
void iio_device_release_buffer_mode(struct iio_dev *indio_dev);
@@ -756,14 +777,51 @@ static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev)
* to in turn include IIO_DMA_MINALIGN'd elements such as buffers which
* must not share cachelines with the rest of the structure, thus making
* them safe for use with non-coherent DMA.
+ *
+ * A number of drivers also use this on buffers that include a 64-bit timestamp
+ * that is used with iio_push_to_buffers_with_ts(). Therefore, in the case where
+ * DMA alignment is not sufficient for proper timestamp alignment, we align to
+ * 8 bytes instead.
*/
-#define IIO_DMA_MINALIGN ARCH_DMA_MINALIGN
+#define IIO_DMA_MINALIGN MAX(ARCH_DMA_MINALIGN, sizeof(s64))
+
+#define __IIO_DECLARE_BUFFER_WITH_TS(type, name, count) \
+ type name[ALIGN((count), sizeof(s64) / sizeof(type)) + sizeof(s64) / sizeof(type)]
+
+/**
+ * IIO_DECLARE_BUFFER_WITH_TS() - Declare a buffer with timestamp
+ * @type: element type of the buffer
+ * @name: identifier name of the buffer
+ * @count: number of elements in the buffer
+ *
+ * Declares a buffer that is safe to use with iio_push_to_buffers_with_ts(). In
+ * addition to allocating enough space for @count elements of @type, it also
+ * allocates space for a s64 timestamp at the end of the buffer and ensures
+ * proper alignment of the timestamp.
+ */
+#define IIO_DECLARE_BUFFER_WITH_TS(type, name, count) \
+ __IIO_DECLARE_BUFFER_WITH_TS(type, name, count) __aligned(sizeof(s64))
+
+/**
+ * IIO_DECLARE_DMA_BUFFER_WITH_TS() - Declare a DMA-aligned buffer with timestamp
+ * @type: element type of the buffer
+ * @name: identifier name of the buffer
+ * @count: number of elements in the buffer
+ *
+ * Same as IIO_DECLARE_BUFFER_WITH_TS(), but is uses __aligned(IIO_DMA_MINALIGN)
+ * to ensure that the buffer doesn't share cachelines with anything that comes
+ * before it in a struct. This should not be used for stack-allocated buffers
+ * as stack memory cannot generally be used for DMA.
+ */
+#define IIO_DECLARE_DMA_BUFFER_WITH_TS(type, name, count) \
+ __IIO_DECLARE_BUFFER_WITH_TS(type, name, count) __aligned(IIO_DMA_MINALIGN)
+
struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv);
/* The information at the returned address is guaranteed to be cacheline aligned */
static inline void *iio_priv(const struct iio_dev *indio_dev)
{
- return indio_dev->priv;
+ return ACCESS_PRIVATE(indio_dev, priv);
}
void iio_device_free(struct iio_dev *indio_dev);
@@ -788,10 +846,28 @@ static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
}
#endif
+/**
+ * iio_device_suspend_triggering() - suspend trigger attached to an iio_dev
+ * @indio_dev: iio_dev associated with the device that will have triggers suspended
+ *
+ * Return 0 if successful, negative otherwise
+ **/
+int iio_device_suspend_triggering(struct iio_dev *indio_dev);
+
+/**
+ * iio_device_resume_triggering() - resume trigger attached to an iio_dev
+ * that was previously suspended with iio_device_suspend_triggering()
+ * @indio_dev: iio_dev associated with the device that will have triggers resumed
+ *
+ * Return 0 if successful, negative otherwise
+ **/
+int iio_device_resume_triggering(struct iio_dev *indio_dev);
+
#ifdef CONFIG_ACPI
bool iio_read_acpi_mount_matrix(struct device *dev,
struct iio_mount_matrix *orientation,
char *acpi_method);
+const char *iio_get_acpi_device_name_and_data(struct device *dev, const void **data);
#else
static inline bool iio_read_acpi_mount_matrix(struct device *dev,
struct iio_mount_matrix *orientation,
@@ -799,7 +875,68 @@ static inline bool iio_read_acpi_mount_matrix(struct device *dev,
{
return false;
}
+static inline const char *
+iio_get_acpi_device_name_and_data(struct device *dev, const void **data)
+{
+ return NULL;
+}
#endif
+static inline const char *iio_get_acpi_device_name(struct device *dev)
+{
+ return iio_get_acpi_device_name_and_data(dev, NULL);
+}
+
+/**
+ * iio_get_current_scan_type - Get the current scan type for a channel
+ * @indio_dev: the IIO device to get the scan type for
+ * @chan: the channel to get the scan type for
+ *
+ * Most devices only have one scan type per channel and can just access it
+ * directly without calling this function. Core IIO code and drivers that
+ * implement ext_scan_type in the channel spec should use this function to
+ * get the current scan type for a channel.
+ *
+ * Returns: the current scan type for the channel or error.
+ */
+static inline const struct iio_scan_type
+*iio_get_current_scan_type(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ int ret;
+
+ if (chan->has_ext_scan_type) {
+ ret = indio_dev->info->get_current_scan_type(indio_dev, chan);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (ret >= chan->num_ext_scan_type)
+ return ERR_PTR(-EINVAL);
+
+ return &chan->ext_scan_type[ret];
+ }
+
+ return &chan->scan_type;
+}
+
+/**
+ * iio_get_masklength - Get length of the channels mask
+ * @indio_dev: the IIO device to get the masklength for
+ */
+static inline unsigned int iio_get_masklength(const struct iio_dev *indio_dev)
+{
+ return ACCESS_PRIVATE(indio_dev, masklength);
+}
+
+int iio_active_scan_mask_index(struct iio_dev *indio_dev);
+
+/**
+ * iio_for_each_active_channel - Iterated over active channels
+ * @indio_dev: the IIO device
+ * @chan: Holds the index of the enabled channel
+ */
+#define iio_for_each_active_channel(indio_dev, chan) \
+ for_each_set_bit((chan), (indio_dev)->active_scan_mask, \
+ iio_get_masklength(indio_dev))
ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index 8898966bc0f0..bfb6df68e6c9 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -9,6 +9,7 @@
#ifndef __IIO_ADIS_H__
#define __IIO_ADIS_H__
+#include <linux/cleanup.h>
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/iio/iio.h>
@@ -21,6 +22,7 @@
#define ADIS_REG_PAGE_ID 0x00
struct adis;
+struct iio_dev_attr;
/**
* struct adis_timeouts - ADIS chip variant timeouts
@@ -42,6 +44,8 @@ struct adis_timeout {
* @glob_cmd_reg: Register address of the GLOB_CMD register
* @msc_ctrl_reg: Register address of the MSC_CTRL register
* @diag_stat_reg: Register address of the DIAG_STAT register
+ * @diag_stat_size: Length (in bytes) of the DIAG_STAT register. If 0 the
+ * default length is 2 bytes long.
* @prod_id_reg: Register address of the PROD_ID register
* @prod_id: Product ID code that should be expected when reading @prod_id_reg
* @self_test_mask: Bitmask of supported self-test operations
@@ -53,6 +57,7 @@ struct adis_timeout {
* @enable_irq: Hook for ADIS devices that have a special IRQ enable/disable
* @unmasked_drdy: True for devices that cannot mask/unmask the data ready pin
* @has_paging: True if ADIS device has paged registers
+ * @has_fifo: True if ADIS device has a hardware FIFO
* @burst_reg_cmd: Register command that triggers burst
* @burst_len: Burst size in the SPI RX buffer. If @burst_max_len is defined,
* this should be the minimum size supported by the device.
@@ -68,6 +73,7 @@ struct adis_data {
unsigned int glob_cmd_reg;
unsigned int msc_ctrl_reg;
unsigned int diag_stat_reg;
+ unsigned int diag_stat_size;
unsigned int prod_id_reg;
unsigned int prod_id;
@@ -84,6 +90,7 @@ struct adis_data {
bool unmasked_drdy;
bool has_paging;
+ bool has_fifo;
unsigned int burst_reg_cmd;
unsigned int burst_len;
@@ -92,13 +99,28 @@ struct adis_data {
};
/**
+ * struct adis_ops: Custom ops for adis devices.
+ * @write: Custom spi write implementation.
+ * @read: Custom spi read implementation.
+ * @reset: Custom sw reset implementation. The custom implementation does not
+ * need to sleep after the reset. It's done by the library already.
+ */
+struct adis_ops {
+ int (*write)(struct adis *adis, unsigned int reg, unsigned int value,
+ unsigned int size);
+ int (*read)(struct adis *adis, unsigned int reg, unsigned int *value,
+ unsigned int size);
+ int (*reset)(struct adis *adis);
+};
+
+/**
* struct adis - ADIS device instance data
* @spi: Reference to SPI device which owns this ADIS IIO device
* @trig: IIO trigger object data
* @data: ADIS chip variant specific data
- * @burst: ADIS burst transfer information
* @burst_extra_len: Burst extra length. Should only be used by devices that can
* dynamically change their burst mode length.
+ * @ops: ops struct for custom read and write functions
* @state_lock: Lock used by the device to protect state
* @msg: SPI message object
* @xfer: SPI transfer objects to be used for a @msg
@@ -114,7 +136,8 @@ struct adis {
const struct adis_data *data;
unsigned int burst_extra_len;
- /**
+ const struct adis_ops *ops;
+ /*
* The state_lock is meant to be used during operations that require
* a sequence of SPI R/W in order to protect the SPI transfer
* information (fields 'xfer', 'msg' & 'current_page') between
@@ -144,17 +167,12 @@ int __adis_reset(struct adis *adis);
* adis_reset() - Reset the device
* @adis: The adis device
*
- * Returns 0 on success, a negative error code otherwise
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_reset(struct adis *adis)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_reset(adis);
- mutex_unlock(&adis->state_lock);
-
- return ret;
+ guard(mutex)(&adis->state_lock);
+ return __adis_reset(adis);
}
int __adis_write_reg(struct adis *adis, unsigned int reg,
@@ -166,36 +184,42 @@ int __adis_read_reg(struct adis *adis, unsigned int reg,
* __adis_write_reg_8() - Write single byte to a register (unlocked)
* @adis: The adis device
* @reg: The address of the register to be written
- * @value: The value to write
+ * @val: The value to write
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg,
u8 val)
{
- return __adis_write_reg(adis, reg, val, 1);
+ return adis->ops->write(adis, reg, val, 1);
}
/**
* __adis_write_reg_16() - Write 2 bytes to a pair of registers (unlocked)
* @adis: The adis device
* @reg: The address of the lower of the two registers
- * @value: Value to be written
+ * @val: Value to be written
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg,
u16 val)
{
- return __adis_write_reg(adis, reg, val, 2);
+ return adis->ops->write(adis, reg, val, 2);
}
/**
* __adis_write_reg_32() - write 4 bytes to four registers (unlocked)
* @adis: The adis device
* @reg: The address of the lower of the four register
- * @value: Value to be written
+ * @val: Value to be written
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg,
u32 val)
{
- return __adis_write_reg(adis, reg, val, 4);
+ return adis->ops->write(adis, reg, val, 4);
}
/**
@@ -203,6 +227,8 @@ static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg,
* @adis: The adis device
* @reg: The address of the lower of the two registers
* @val: The value read back from the device
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg,
u16 *val)
@@ -210,7 +236,7 @@ static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg,
unsigned int tmp;
int ret;
- ret = __adis_read_reg(adis, reg, &tmp, 2);
+ ret = adis->ops->read(adis, reg, &tmp, 2);
if (ret == 0)
*val = tmp;
@@ -222,6 +248,8 @@ static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg,
* @adis: The adis device
* @reg: The address of the lower of the two registers
* @val: The value read back from the device
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg,
u32 *val)
@@ -229,7 +257,7 @@ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg,
unsigned int tmp;
int ret;
- ret = __adis_read_reg(adis, reg, &tmp, 4);
+ ret = adis->ops->read(adis, reg, &tmp, 4);
if (ret == 0)
*val = tmp;
@@ -240,19 +268,16 @@ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg,
* adis_write_reg() - write N bytes to register
* @adis: The adis device
* @reg: The address of the lower of the two registers
- * @value: The value to write to device (up to 4 bytes)
+ * @val: The value to write to device (up to 4 bytes)
* @size: The size of the @value (in bytes)
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_write_reg(struct adis *adis, unsigned int reg,
unsigned int val, unsigned int size)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_write_reg(adis, reg, val, size);
- mutex_unlock(&adis->state_lock);
-
- return ret;
+ guard(mutex)(&adis->state_lock);
+ return adis->ops->write(adis, reg, val, size);
}
/**
@@ -261,24 +286,23 @@ static inline int adis_write_reg(struct adis *adis, unsigned int reg,
* @reg: The address of the lower of the two registers
* @val: The value read back from the device
* @size: The size of the @val buffer
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static int adis_read_reg(struct adis *adis, unsigned int reg,
unsigned int *val, unsigned int size)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_read_reg(adis, reg, val, size);
- mutex_unlock(&adis->state_lock);
-
- return ret;
+ guard(mutex)(&adis->state_lock);
+ return adis->ops->read(adis, reg, val, size);
}
/**
* adis_write_reg_8() - Write single byte to a register
* @adis: The adis device
* @reg: The address of the register to be written
- * @value: The value to write
+ * @val: The value to write
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_write_reg_8(struct adis *adis, unsigned int reg,
u8 val)
@@ -290,7 +314,9 @@ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg,
* adis_write_reg_16() - Write 2 bytes to a pair of registers
* @adis: The adis device
* @reg: The address of the lower of the two registers
- * @value: Value to be written
+ * @val: Value to be written
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_write_reg_16(struct adis *adis, unsigned int reg,
u16 val)
@@ -302,7 +328,9 @@ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg,
* adis_write_reg_32() - write 4 bytes to four registers
* @adis: The adis device
* @reg: The address of the lower of the four register
- * @value: Value to be written
+ * @val: Value to be written
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_write_reg_32(struct adis *adis, unsigned int reg,
u32 val)
@@ -315,6 +343,8 @@ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg,
* @adis: The adis device
* @reg: The address of the lower of the two registers
* @val: The value read back from the device
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_read_reg_16(struct adis *adis, unsigned int reg,
u16 *val)
@@ -334,6 +364,8 @@ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg,
* @adis: The adis device
* @reg: The address of the lower of the two registers
* @val: The value read back from the device
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_read_reg_32(struct adis *adis, unsigned int reg,
u32 *val)
@@ -359,16 +391,14 @@ int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask,
* @size: Size of the register to update
*
* Updates the desired bits of @reg in accordance with @mask and @val.
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_update_bits_base(struct adis *adis, unsigned int reg,
const u32 mask, const u32 val, u8 size)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_update_bits_base(adis, reg, mask, val, size);
- mutex_unlock(&adis->state_lock);
- return ret;
+ guard(mutex)(&adis->state_lock);
+ return __adis_update_bits_base(adis, reg, mask, val, size);
}
/**
@@ -409,35 +439,19 @@ int __adis_enable_irq(struct adis *adis, bool enable);
static inline int adis_enable_irq(struct adis *adis, bool enable)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_enable_irq(adis, enable);
- mutex_unlock(&adis->state_lock);
-
- return ret;
+ guard(mutex)(&adis->state_lock);
+ return __adis_enable_irq(adis, enable);
}
static inline int adis_check_status(struct adis *adis)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_check_status(adis);
- mutex_unlock(&adis->state_lock);
-
- return ret;
-}
-
-static inline void adis_dev_lock(struct adis *adis)
-{
- mutex_lock(&adis->state_lock);
+ guard(mutex)(&adis->state_lock);
+ return __adis_check_status(adis);
}
-static inline void adis_dev_unlock(struct adis *adis)
-{
- mutex_unlock(&adis->state_lock);
-}
+#define adis_dev_auto_lock(adis) guard(mutex)(&(adis)->state_lock)
+#define adis_dev_auto_scoped_lock(adis) \
+ scoped_guard(mutex, &(adis)->state_lock)
int adis_single_conversion(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
@@ -515,11 +529,19 @@ int adis_single_conversion(struct iio_dev *indio_dev,
#define ADIS_ROT_CHAN(mod, addr, si, info_sep, info_all, bits) \
ADIS_MOD_CHAN(IIO_ROT, mod, addr, si, info_sep, info_all, bits)
+#define devm_adis_setup_buffer_and_trigger(adis, indio_dev, trigger_handler) \
+ devm_adis_setup_buffer_and_trigger_with_attrs((adis), (indio_dev), \
+ (trigger_handler), NULL, \
+ NULL)
+
#ifdef CONFIG_IIO_ADIS_LIB_BUFFER
int
-devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
- irq_handler_t trigger_handler);
+devm_adis_setup_buffer_and_trigger_with_attrs(struct adis *adis,
+ struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler,
+ const struct iio_buffer_setup_ops *ops,
+ const struct iio_dev_attr **buffer_attrs);
int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
@@ -529,8 +551,11 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
#else /* CONFIG_IIO_BUFFER */
static inline int
-devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
- irq_handler_t trigger_handler)
+devm_adis_setup_buffer_and_trigger_with_attrs(struct adis *adis,
+ struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler,
+ const struct iio_buffer_setup_ops *ops,
+ const struct iio_dev_attr **buffer_attrs)
{
return 0;
}
diff --git a/include/linux/iio/timer/stm32-lptim-trigger.h b/include/linux/iio/timer/stm32-lptim-trigger.h
index a34dcf6a6001..ce3cf0addb2e 100644
--- a/include/linux/iio/timer/stm32-lptim-trigger.h
+++ b/include/linux/iio/timer/stm32-lptim-trigger.h
@@ -14,6 +14,15 @@
#define LPTIM1_OUT "lptim1_out"
#define LPTIM2_OUT "lptim2_out"
#define LPTIM3_OUT "lptim3_out"
+#define LPTIM4_OUT "lptim4_out"
+#define LPTIM5_OUT "lptim5_out"
+
+#define LPTIM1_CH1 "lptim1_ch1"
+#define LPTIM1_CH2 "lptim1_ch2"
+#define LPTIM2_CH1 "lptim2_ch1"
+#define LPTIM2_CH2 "lptim2_ch2"
+#define LPTIM3_CH1 "lptim3_ch1"
+#define LPTIM4_CH1 "lptim4_ch1"
#if IS_REACHABLE(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
bool is_stm32_lptim_trigger(struct iio_trigger *trig);
diff --git a/include/linux/iio/timer/stm32-timer-trigger.h b/include/linux/iio/timer/stm32-timer-trigger.h
index 37572e4dc73a..1ee237b56183 100644
--- a/include/linux/iio/timer/stm32-timer-trigger.h
+++ b/include/linux/iio/timer/stm32-timer-trigger.h
@@ -72,6 +72,12 @@
#define TIM17_OC1 "tim17_oc1"
+#define TIM20_OC1 "tim20_oc1"
+#define TIM20_OC2 "tim20_oc2"
+#define TIM20_OC3 "tim20_oc3"
+#define TIM20_TRGO "tim20_trgo"
+#define TIM20_TRGO2 "tim20_trgo2"
+
#if IS_REACHABLE(CONFIG_IIO_STM32_TIMER_TRIGGER)
bool is_stm32_timer_trigger(struct iio_trigger *trig);
#else
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index d89982c98368..34eebad12d2c 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -69,6 +69,8 @@ enum iio_chan_info_enum {
IIO_CHAN_INFO_CALIBAMBIENT,
IIO_CHAN_INFO_ZEROPOINT,
IIO_CHAN_INFO_TROUGH,
+ IIO_CHAN_INFO_CONVDELAY,
+ IIO_CHAN_INFO_POWERFACTOR,
};
#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 0bae61a15b60..8e29cb4e6a01 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -32,6 +32,9 @@ static inline void ima_appraise_parse_cmdline(void) {}
#ifdef CONFIG_IMA_KEXEC
extern void ima_add_kexec_buffer(struct kimage *image);
+extern void ima_kexec_post_load(struct kimage *image);
+#else
+static inline void ima_kexec_post_load(struct kimage *image) {}
#endif
#else
diff --git a/include/linux/in6.h b/include/linux/in6.h
index 0777a21cbf86..403f926d33d8 100644
--- a/include/linux/in6.h
+++ b/include/linux/in6.h
@@ -18,6 +18,13 @@
#include <uapi/linux/in6.h>
+/* Large enough to hold both sockaddr_in and sockaddr_in6. */
+struct sockaddr_inet {
+ unsigned short sa_family;
+ char sa_data[sizeof(struct sockaddr_in6) -
+ sizeof(unsigned short)];
+};
+
/* IPv6 Wildcard Address (::) and Loopback Address (::1) defined in RFC2553
* NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined
* in network byte order, not in host byte order as are the IPv4 equivalents
diff --git a/include/linux/inet.h b/include/linux/inet.h
index bd8276e96e60..9158772f3559 100644
--- a/include/linux/inet.h
+++ b/include/linux/inet.h
@@ -55,6 +55,6 @@ extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char
extern int inet_pton_with_scope(struct net *net, unsigned short af,
const char *src, const char *port, struct sockaddr_storage *addr);
-extern bool inet_addr_is_any(struct sockaddr *addr);
+bool inet_addr_is_any(struct sockaddr_storage *addr);
#endif /* _LINUX_INET_H */
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index a9033696b0aa..704fd415c2b4 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -24,9 +24,6 @@ struct inet_diag_handler {
bool net_admin,
struct sk_buff *skb);
- size_t (*idiag_get_aux_size)(struct sock *sk,
- bool net_admin);
-
int (*destroy)(struct sk_buff *in_skb,
const struct inet_diag_req_v2 *req);
@@ -41,6 +38,11 @@ struct inet_diag_dump_data {
#define inet_diag_nla_bpf_stgs req_nlas[INET_DIAG_REQ_SK_BPF_STORAGES]
struct bpf_sk_storage_diag *bpf_stg_diag;
+ bool mark_needed; /* INET_DIAG_BC_MARK_COND present. */
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ bool cgroup_needed; /* INET_DIAG_BC_CGROUP_COND present. */
+#endif
+ bool userlocks_needed; /* INET_DIAG_BC_AUTO present. */
};
struct inet_connection_sock;
@@ -48,18 +50,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct sk_buff *skb, struct netlink_callback *cb,
const struct inet_diag_req_v2 *req,
u16 nlmsg_flags, bool net_admin);
-void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
- struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r);
-int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
- struct netlink_callback *cb,
- const struct inet_diag_req_v2 *req);
-
-struct sock *inet_diag_find_one_icsk(struct net *net,
- struct inet_hashinfo *hashinfo,
- const struct inet_diag_req_v2 *req);
-int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
+int inet_diag_bc_sk(const struct inet_diag_dump_data *cb_data, struct sock *sk);
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index cb5280e6cc21..5730ba6b1cfa 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -141,7 +141,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
ARP_EVICT_NOCARRIER)
struct in_ifaddr {
- struct hlist_node hash;
+ struct hlist_node addr_lst;
struct in_ifaddr __rcu *ifa_next;
struct in_device *ifa_dev;
struct rcu_head rcu_head;
@@ -226,6 +226,10 @@ static __inline__ bool bad_mask(__be32 mask, __be32 addr)
for (ifa = rtnl_dereference((in_dev)->ifa_list); ifa; \
ifa = rtnl_dereference(ifa->ifa_next))
+#define in_dev_for_each_ifa_rtnl_net(net, ifa, in_dev) \
+ for (ifa = rtnl_net_dereference(net, (in_dev)->ifa_list); ifa; \
+ ifa = rtnl_net_dereference(net, ifa->ifa_next))
+
#define in_dev_for_each_ifa_rcu(ifa, in_dev) \
for (ifa = rcu_dereference((in_dev)->ifa_list); ifa; \
ifa = rcu_dereference(ifa->ifa_next))
@@ -252,6 +256,11 @@ static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
return rtnl_dereference(dev->ip_ptr);
}
+static inline struct in_device *__in_dev_get_rtnl_net(const struct net_device *dev)
+{
+ return rtnl_net_dereference(dev_net(dev), dev->ip_ptr);
+}
+
/* called with rcu_read_lock or rtnl held */
static inline bool ip_ignore_linkdown(const struct net_device *dev)
{
diff --git a/include/linux/init.h b/include/linux/init.h
index 58cef4c2e59a..40331923b9f4 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -7,13 +7,6 @@
#include <linux/stringify.h>
#include <linux/types.h>
-/* Built-in __init functions needn't be compiled with retpoline */
-#if defined(__noretpoline) && !defined(MODULE)
-#define __noinitretpoline __noretpoline
-#else
-#define __noinitretpoline
-#endif
-
/* These macros are used to mark some functions or
* initialized data (doesn't apply to uninitialized data)
* as `initialization' functions. The kernel can take this
@@ -49,7 +42,8 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
-#define __init __section(".init.text") __cold __latent_entropy __noinitretpoline
+#define __init __section(".init.text") __cold __latent_entropy \
+ __no_kstack_erase
#define __initdata __section(".init.data")
#define __initconst __section(".init.rodata")
#define __exitdata __section(".exit.data")
@@ -84,11 +78,15 @@
#define __exit __section(".exit.text") __exitused __cold notrace
-/* Used for MEMORY_HOTPLUG */
-#define __meminit __section(".meminit.text") __cold notrace \
- __latent_entropy
-#define __meminitdata __section(".meminit.data")
-#define __meminitconst __section(".meminit.rodata")
+#ifdef CONFIG_MEMORY_HOTPLUG
+#define __meminit
+#define __meminitdata
+#define __meminitconst
+#else
+#define __meminit __init
+#define __meminitdata __initdata
+#define __meminitconst __initconst
+#endif
/* For assembly routines */
#define __HEAD .section ".head.text","ax"
@@ -99,10 +97,6 @@
#define __INITRODATA .section ".init.rodata","a",%progbits
#define __FINITDATA .previous
-#define __MEMINIT .section ".meminit.text", "ax"
-#define __MEMINITDATA .section ".meminit.data", "aw"
-#define __MEMINITRODATA .section ".meminit.rodata", "a"
-
/* silence warnings when references are OK */
#define __REF .section ".ref.text", "ax"
#define __REFDATA .section ".ref.data", "aw"
@@ -206,12 +200,13 @@ extern struct module __this_module;
/* Format: <modname>__<counter>_<line>_<fn> */
#define __initcall_id(fn) \
+ __PASTE(kmod_, \
__PASTE(__KBUILD_MODNAME, \
__PASTE(__, \
__PASTE(__COUNTER__, \
__PASTE(_, \
__PASTE(__LINE__, \
- __PASTE(_, fn))))))
+ __PASTE(_, fn)))))))
/* Format: __<prefix>__<iid><id> */
#define __initcall_name(prefix, __iid, id) \
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index bccb3f1f6262..a6cb241ea00c 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -25,7 +25,6 @@
extern struct files_struct init_files;
extern struct fs_struct init_fs;
extern struct nsproxy init_nsproxy;
-extern struct cred init_cred;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
diff --git a/include/linux/input.h b/include/linux/input.h
index c22ac465254b..7d7cb0593a63 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -275,7 +275,8 @@ struct input_handle;
* it may not sleep
* @events: event sequence handler. This method is being called by
* input core with interrupts disabled and dev->event_lock
- * spinlock held and so it may not sleep
+ * spinlock held and so it may not sleep. The method must return
+ * number of events passed to it.
* @filter: similar to @event; separates normal event handlers from
* "filters".
* @match: called after comparing device's id with handler's id_table
@@ -285,6 +286,10 @@ struct input_handle;
* @start: starts handler for given handle. This function is called by
* input core right after connect() method and also when a process
* that "grabbed" a device releases it
+ * @passive_observer: set to %true by drivers only interested in observing
+ * data stream from devices if there are other users present. Such
+ * drivers will not result in starting underlying hardware device
+ * when input_open_device() is called for their handles
* @legacy_minors: set to %true by drivers using legacy minor ranges
* @minor: beginning of range of 32 legacy minors for devices this driver
* can provide
@@ -312,14 +317,15 @@ struct input_handler {
void *private;
void (*event)(struct input_handle *handle, unsigned int type, unsigned int code, int value);
- void (*events)(struct input_handle *handle,
- const struct input_value *vals, unsigned int count);
+ unsigned int (*events)(struct input_handle *handle,
+ struct input_value *vals, unsigned int count);
bool (*filter)(struct input_handle *handle, unsigned int type, unsigned int code, int value);
bool (*match)(struct input_handler *handler, struct input_dev *dev);
int (*connect)(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id);
void (*disconnect)(struct input_handle *handle);
void (*start)(struct input_handle *handle);
+ bool passive_observer;
bool legacy_minors;
int minor;
const char *name;
@@ -338,12 +344,16 @@ struct input_handler {
* @name: name given to the handle by handler that created it
* @dev: input device the handle is attached to
* @handler: handler that works with the device through this handle
+ * @handle_events: event sequence handler. It is set up by the input core
+ * according to event handling method specified in the @handler. See
+ * input_handle_setup_event_handler().
+ * This method is being called by the input core with interrupts disabled
+ * and dev->event_lock spinlock held and so it may not sleep.
* @d_node: used to put the handle on device's list of attached handles
* @h_node: used to put the handle on handler's list of handles from which
* it gets events
*/
struct input_handle {
-
void *private;
int open;
@@ -352,6 +362,10 @@ struct input_handle {
struct input_dev *dev;
struct input_handler *handler;
+ unsigned int (*handle_events)(struct input_handle *handle,
+ struct input_value *vals,
+ unsigned int count);
+
struct list_head d_node;
struct list_head h_node;
};
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index b8d8d69eba29..90867f44ab4d 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -34,52 +34,6 @@ struct matrix_keymap_data {
unsigned int keymap_size;
};
-/**
- * struct matrix_keypad_platform_data - platform-dependent keypad data
- * @keymap_data: pointer to &matrix_keymap_data
- * @row_gpios: pointer to array of gpio numbers representing rows
- * @col_gpios: pointer to array of gpio numbers reporesenting colums
- * @num_row_gpios: actual number of row gpios used by device
- * @num_col_gpios: actual number of col gpios used by device
- * @col_scan_delay_us: delay, measured in microseconds, that is
- * needed before we can keypad after activating column gpio
- * @debounce_ms: debounce interval in milliseconds
- * @clustered_irq: may be specified if interrupts of all row/column GPIOs
- * are bundled to one single irq
- * @clustered_irq_flags: flags that are needed for the clustered irq
- * @active_low: gpio polarity
- * @wakeup: controls whether the device should be set up as wakeup
- * source
- * @no_autorepeat: disable key autorepeat
- * @drive_inactive_cols: drive inactive columns during scan, rather than
- * making them inputs.
- *
- * This structure represents platform-specific data that use used by
- * matrix_keypad driver to perform proper initialization.
- */
-struct matrix_keypad_platform_data {
- const struct matrix_keymap_data *keymap_data;
-
- const unsigned int *row_gpios;
- const unsigned int *col_gpios;
-
- unsigned int num_row_gpios;
- unsigned int num_col_gpios;
-
- unsigned int col_scan_delay_us;
-
- /* key debounce interval in milli-second */
- unsigned int debounce_ms;
-
- unsigned int clustered_irq;
- unsigned int clustered_irq_flags;
-
- bool active_low;
- bool wakeup;
- bool no_autorepeat;
- bool drive_inactive_cols;
-};
-
int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
const char *keymap_name,
unsigned int rows, unsigned int cols,
@@ -88,6 +42,4 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
int matrix_keypad_parse_properties(struct device *dev,
unsigned int *rows, unsigned int *cols);
-#define matrix_keypad_parse_of_params matrix_keypad_parse_properties
-
#endif /* _MATRIX_KEYPAD_H */
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index 2cf89a538b18..d30286298a00 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -17,6 +17,7 @@
#define INPUT_MT_DROP_UNUSED 0x0004 /* drop contacts not seen in frame */
#define INPUT_MT_TRACK 0x0008 /* use in-kernel tracking */
#define INPUT_MT_SEMI_MT 0x0010 /* semi-mt device, finger count handled manually */
+#define INPUT_MT_TOTAL_FORCE 0x0020 /* calculate total force from slots pressure */
/**
* struct input_mt_slot - represents the state of an input MT slot
diff --git a/include/linux/input/touch-overlay.h b/include/linux/input/touch-overlay.h
new file mode 100644
index 000000000000..0253e554d3cd
--- /dev/null
+++ b/include/linux/input/touch-overlay.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Javier Carrasco <javier.carrasco@wolfvision.net>
+ */
+
+#ifndef _TOUCH_OVERLAY
+#define _TOUCH_OVERLAY
+
+#include <linux/types.h>
+
+struct input_dev;
+
+int touch_overlay_map(struct list_head *list, struct input_dev *input);
+
+void touch_overlay_get_touchscreen_abs(struct list_head *list, u16 *x, u16 *y);
+
+bool touch_overlay_mapped_touchscreen(struct list_head *list);
+
+bool touch_overlay_process_contact(struct list_head *list,
+ struct input_dev *input,
+ struct input_mt_pos *pos, int slot);
+
+void touch_overlay_sync_frame(struct list_head *list, struct input_dev *input);
+
+#endif
diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h
index bc7babe91b2e..bf675a8aef8a 100644
--- a/include/linux/instrumentation.h
+++ b/include/linux/instrumentation.h
@@ -4,14 +4,14 @@
#ifdef CONFIG_NOINSTR_VALIDATION
+#include <linux/objtool.h>
#include <linux/stringify.h>
/* Begin/end of an instrumentation safe region */
#define __instrumentation_begin(c) ({ \
asm volatile(__stringify(c) ": nop\n\t" \
- ".pushsection .discard.instr_begin\n\t" \
- ".long " __stringify(c) "b - .\n\t" \
- ".popsection\n\t" : : "i" (c)); \
+ ANNOTATE_INSTR_BEGIN(__ASM_BREF(c)) \
+ : : "i" (c)); \
})
#define instrumentation_begin() __instrumentation_begin(__COUNTER__)
@@ -48,9 +48,8 @@
*/
#define __instrumentation_end(c) ({ \
asm volatile(__stringify(c) ": nop\n\t" \
- ".pushsection .discard.instr_end\n\t" \
- ".long " __stringify(c) "b - .\n\t" \
- ".popsection\n\t" : : "i" (c)); \
+ ANNOTATE_INSTR_END(__ASM_BREF(c)) \
+ : : "i" (c)); \
})
#define instrumentation_end() __instrumentation_end(__COUNTER__)
#else /* !CONFIG_NOINSTR_VALIDATION */
diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h
index 771622650247..2cd4f65aaa37 100644
--- a/include/linux/intel-ish-client-if.h
+++ b/include/linux/intel-ish-client-if.h
@@ -87,6 +87,8 @@ bool ishtp_wait_resume(struct ishtp_device *dev);
ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device);
/* Get device pointer of PCI device for DMA acces */
struct device *ishtp_get_pci_device(struct ishtp_cl_device *cl_device);
+/* Get the ISHTP workqueue */
+struct workqueue_struct *ishtp_get_workqueue(struct ishtp_cl_device *cl_device);
struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device);
void ishtp_cl_free(struct ishtp_cl *cl);
@@ -100,7 +102,6 @@ void ishtp_cl_destroy_connection(struct ishtp_cl *cl, bool reset);
int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
int ishtp_cl_flush_queues(struct ishtp_cl *cl);
int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
-bool ishtp_cl_tx_empty(struct ishtp_cl *cl);
struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl);
void *ishtp_get_client_data(struct ishtp_cl *cl);
void ishtp_set_client_data(struct ishtp_cl *cl, void *data);
@@ -108,6 +109,7 @@ struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl);
void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size);
void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size);
void ishtp_set_connection_state(struct ishtp_cl *cl, int state);
+int ishtp_get_connection_state(struct ishtp_cl *cl);
void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id);
void ishtp_put_device(struct ishtp_cl_device *cl_dev);
diff --git a/include/linux/intel_dg_nvm_aux.h b/include/linux/intel_dg_nvm_aux.h
new file mode 100644
index 000000000000..625d46a6b96e
--- /dev/null
+++ b/include/linux/intel_dg_nvm_aux.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_DG_NVM_AUX_H__
+#define __INTEL_DG_NVM_AUX_H__
+
+#include <linux/auxiliary_bus.h>
+#include <linux/container_of.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+
+#define INTEL_DG_NVM_REGIONS 13
+
+struct intel_dg_nvm_region {
+ const char *name;
+};
+
+struct intel_dg_nvm_dev {
+ struct auxiliary_device aux_dev;
+ bool writable_override;
+ bool non_posted_erase;
+ struct resource bar;
+ struct resource bar2;
+ const struct intel_dg_nvm_region *regions;
+};
+
+#define auxiliary_dev_to_intel_dg_nvm_dev(auxiliary_dev) \
+ container_of(auxiliary_dev, struct intel_dg_nvm_dev, aux_dev)
+
+#endif /* __INTEL_DG_NVM_AUX_H__ */
diff --git a/include/linux/intel_pmt_features.h b/include/linux/intel_pmt_features.h
new file mode 100644
index 000000000000..53573a4a49b7
--- /dev/null
+++ b/include/linux/intel_pmt_features.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _FEATURES_H
+#define _FEATURES_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/* Common masks */
+#define PMT_CAP_TELEM BIT(0)
+#define PMT_CAP_WATCHER BIT(1)
+#define PMT_CAP_CRASHLOG BIT(2)
+#define PMT_CAP_STREAMING BIT(3)
+#define PMT_CAP_THRESHOLD BIT(4)
+#define PMT_CAP_WINDOW BIT(5)
+#define PMT_CAP_CONFIG BIT(6)
+#define PMT_CAP_TRACING BIT(7)
+#define PMT_CAP_INBAND BIT(8)
+#define PMT_CAP_OOB BIT(9)
+#define PMT_CAP_SECURED_CHAN BIT(10)
+
+#define PMT_CAP_PMT_SP BIT(11)
+#define PMT_CAP_PMT_SP_POLICY GENMASK(17, 12)
+
+/* Per Core Performance Telemetry (PCPT) specific masks */
+#define PMT_CAP_PCPT_CORE_PERF BIT(18)
+#define PMT_CAP_PCPT_CORE_C0_RES BIT(19)
+#define PMT_CAP_PCPT_CORE_ACTIVITY BIT(20)
+#define PMT_CAP_PCPT_CACHE_PERF BIT(21)
+#define PMT_CAP_PCPT_QUALITY_TELEM BIT(22)
+
+/* Per Core Environmental Telemetry (PCET) specific masks */
+#define PMT_CAP_PCET_WORKPOINT_HIST BIT(18)
+#define PMT_CAP_PCET_CORE_CURR_TEMP BIT(19)
+#define PMT_CAP_PCET_CORE_INST_RES BIT(20)
+#define PMT_CAP_PCET_QUALITY_TELEM BIT(21) /* Same as PMT_CAP_PCPT */
+#define PMT_CAP_PCET_CORE_CDYN_LVL BIT(22)
+#define PMT_CAP_PCET_CORE_STRESS_LVL BIT(23)
+#define PMT_CAP_PCET_CORE_DAS BIT(24)
+#define PMT_CAP_PCET_FIVR_HEALTH BIT(25)
+#define PMT_CAP_PCET_ENERGY BIT(26)
+#define PMT_CAP_PCET_PEM_STATUS BIT(27)
+#define PMT_CAP_PCET_CORE_C_STATE BIT(28)
+
+/* Per RMID Performance Telemetry specific masks */
+#define PMT_CAP_RMID_CORES_PERF BIT(18)
+#define PMT_CAP_RMID_CACHE_PERF BIT(19)
+#define PMT_CAP_RMID_PERF_QUAL BIT(20)
+
+/* Accelerator Telemetry specific masks */
+#define PMT_CAP_ACCEL_CPM_TELEM BIT(18)
+#define PMT_CAP_ACCEL_TIP_TELEM BIT(19)
+
+/* Uncore Telemetry specific masks */
+#define PMT_CAP_UNCORE_IO_CA_TELEM BIT(18)
+#define PMT_CAP_UNCORE_RMID_TELEM BIT(19)
+#define PMT_CAP_UNCORE_D2D_ULA_TELEM BIT(20)
+#define PMT_CAP_UNCORE_PKGC_TELEM BIT(21)
+
+/* Crash Log specific masks */
+#define PMT_CAP_CRASHLOG_MAN_TRIG BIT(11)
+#define PMT_CAP_CRASHLOG_CORE BIT(12)
+#define PMT_CAP_CRASHLOG_UNCORE BIT(13)
+#define PMT_CAP_CRASHLOG_TOR BIT(14)
+#define PMT_CAP_CRASHLOG_S3M BIT(15)
+#define PMT_CAP_CRASHLOG_PERSISTENCY BIT(16)
+#define PMT_CAP_CRASHLOG_CLIP_GPIO BIT(17)
+#define PMT_CAP_CRASHLOG_PRE_RESET BIT(18)
+#define PMT_CAP_CRASHLOG_POST_RESET BIT(19)
+
+/* PeTe Log specific masks */
+#define PMT_CAP_PETE_MAN_TRIG BIT(11)
+#define PMT_CAP_PETE_ENCRYPTION BIT(12)
+#define PMT_CAP_PETE_PERSISTENCY BIT(13)
+#define PMT_CAP_PETE_REQ_TOKENS BIT(14)
+#define PMT_CAP_PETE_PROD_ENABLED BIT(15)
+#define PMT_CAP_PETE_DEBUG_ENABLED BIT(16)
+
+/* TPMI control specific masks */
+#define PMT_CAP_TPMI_MAILBOX BIT(11)
+#define PMT_CAP_TPMI_LOCK BIT(12)
+
+/* Tracing specific masks */
+#define PMT_CAP_TRACE_SRAR BIT(11)
+#define PMT_CAP_TRACE_CORRECTABLE BIT(12)
+#define PMT_CAP_TRACE_MCTP BIT(13)
+#define PMT_CAP_TRACE_MRT BIT(14)
+
+/* Per RMID Energy Telemetry specific masks */
+#define PMT_CAP_RMID_ENERGY BIT(18)
+#define PMT_CAP_RMID_ACTIVITY BIT(19)
+#define PMT_CAP_RMID_ENERGY_QUAL BIT(20)
+
+enum pmt_feature_id {
+ FEATURE_INVALID = 0x0,
+ FEATURE_PER_CORE_PERF_TELEM = 0x1,
+ FEATURE_PER_CORE_ENV_TELEM = 0x2,
+ FEATURE_PER_RMID_PERF_TELEM = 0x3,
+ FEATURE_ACCEL_TELEM = 0x4,
+ FEATURE_UNCORE_TELEM = 0x5,
+ FEATURE_CRASH_LOG = 0x6,
+ FEATURE_PETE_LOG = 0x7,
+ FEATURE_TPMI_CTRL = 0x8,
+ FEATURE_RESERVED = 0x9,
+ FEATURE_TRACING = 0xA,
+ FEATURE_PER_RMID_ENERGY_TELEM = 0xB,
+ FEATURE_MAX = 0xB,
+};
+
+enum feature_layout {
+ LAYOUT_RMID,
+ LAYOUT_WATCHER,
+ LAYOUT_COMMAND,
+ LAYOUT_CAPS_ONLY,
+};
+
+struct pmt_cap {
+ u32 mask;
+ const char *name;
+};
+
+extern const char * const pmt_feature_names[];
+extern enum feature_layout feature_layout[];
+extern struct pmt_cap pmt_cap_common[];
+extern struct pmt_cap pmt_cap_pcpt[];
+extern struct pmt_cap *pmt_caps_pcpt[];
+extern struct pmt_cap pmt_cap_pcet[];
+extern struct pmt_cap *pmt_caps_pcet[];
+extern struct pmt_cap pmt_cap_rmid_perf[];
+extern struct pmt_cap *pmt_caps_rmid_perf[];
+extern struct pmt_cap pmt_cap_accel[];
+extern struct pmt_cap *pmt_caps_accel[];
+extern struct pmt_cap pmt_cap_uncore[];
+extern struct pmt_cap *pmt_caps_uncore[];
+extern struct pmt_cap pmt_cap_crashlog[];
+extern struct pmt_cap *pmt_caps_crashlog[];
+extern struct pmt_cap pmt_cap_pete[];
+extern struct pmt_cap *pmt_caps_pete[];
+extern struct pmt_cap pmt_cap_tpmi[];
+extern struct pmt_cap *pmt_caps_tpmi[];
+extern struct pmt_cap pmt_cap_s3m[];
+extern struct pmt_cap *pmt_caps_s3m[];
+extern struct pmt_cap pmt_cap_tracing[];
+extern struct pmt_cap *pmt_caps_tracing[];
+extern struct pmt_cap pmt_cap_rmid_energy[];
+extern struct pmt_cap *pmt_caps_rmid_energy[];
+
+static inline bool pmt_feature_id_is_valid(enum pmt_feature_id id)
+{
+ if (id > FEATURE_MAX)
+ return false;
+
+ if (id == FEATURE_INVALID || id == FEATURE_RESERVED)
+ return false;
+
+ return true;
+}
+#endif
diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h
index c0397423d3a8..e9ade2ff4af6 100644
--- a/include/linux/intel_rapl.h
+++ b/include/linux/intel_rapl.h
@@ -152,7 +152,7 @@ struct rapl_if_priv {
union rapl_reg reg_unit;
union rapl_reg regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
int limits[RAPL_DOMAIN_MAX];
- int (*read_raw)(int id, struct reg_action *ra);
+ int (*read_raw)(int id, struct reg_action *ra, bool atomic);
int (*write_raw)(int id, struct reg_action *ra);
void *defaults;
void *rpi;
diff --git a/include/linux/intel_tcc.h b/include/linux/intel_tcc.h
index 8ff8eabb4a98..fa788817acfc 100644
--- a/include/linux/intel_tcc.h
+++ b/include/linux/intel_tcc.h
@@ -14,5 +14,6 @@ int intel_tcc_get_tjmax(int cpu);
int intel_tcc_get_offset(int cpu);
int intel_tcc_set_offset(int cpu, int offset);
int intel_tcc_get_temp(int cpu, int *temp, bool pkg);
+u32 intel_tcc_get_offset_mask(void);
#endif /* __INTEL_TCC_H__ */
diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h
index 1e880cb0f454..94c06bf214fb 100644
--- a/include/linux/intel_tpmi.h
+++ b/include/linux/intel_tpmi.h
@@ -8,6 +8,8 @@
#include <linux/bitfield.h>
+struct oobmsm_plat_info;
+
#define TPMI_VERSION_INVALID 0xff
#define TPMI_MINOR_VERSION(val) FIELD_GET(GENMASK(4, 0), val)
#define TPMI_MAJOR_VERSION(val) FIELD_GET(GENMASK(7, 5), val)
@@ -21,36 +23,15 @@ enum intel_tpmi_id {
TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */
TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */
TPMI_ID_SST = 5, /* Speed Select Technology */
+ TPMI_ID_PLR = 0xc, /* Performance Limit Reasons */
TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */
TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */
};
-/**
- * struct intel_tpmi_plat_info - Platform information for a TPMI device instance
- * @cdie_mask: Mask of all compute dies in the partition
- * @package_id: CPU Package id
- * @partition: Package partition id when multiple VSEC PCI devices per package
- * @segment: PCI segment ID
- * @bus_number: PCI bus number
- * @device_number: PCI device number
- * @function_number: PCI function number
- *
- * Structure to store platform data for a TPMI device instance. This
- * struct is used to return data via tpmi_get_platform_data().
- */
-struct intel_tpmi_plat_info {
- u16 cdie_mask;
- u8 package_id;
- u8 partition;
- u8 segment;
- u8 bus_number;
- u8 device_number;
- u8 function_number;
-};
-
-struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev);
+struct oobmsm_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev);
struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index);
int tpmi_get_resource_count(struct auxiliary_device *auxdev);
int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, bool *read_blocked,
bool *write_blocked);
+struct dentry *tpmi_get_debugfs_dir(struct auxiliary_device *auxdev);
#endif
diff --git a/include/linux/intel_vsec.h b/include/linux/intel_vsec.h
new file mode 100644
index 000000000000..53f6fe88e369
--- /dev/null
+++ b/include/linux/intel_vsec.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _INTEL_VSEC_H
+#define _INTEL_VSEC_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/intel_pmt_features.h>
+
+/*
+ * VSEC_CAP_UNUSED is reserved. It exists to prevent zero initialized
+ * intel_vsec devices from being automatically set to a known
+ * capability with ID 0
+ */
+#define VSEC_CAP_UNUSED BIT(0)
+#define VSEC_CAP_TELEMETRY BIT(1)
+#define VSEC_CAP_WATCHER BIT(2)
+#define VSEC_CAP_CRASHLOG BIT(3)
+#define VSEC_CAP_SDSI BIT(4)
+#define VSEC_CAP_TPMI BIT(5)
+#define VSEC_CAP_DISCOVERY BIT(6)
+#define VSEC_FEATURE_COUNT 7
+
+/* Intel DVSEC offsets */
+#define INTEL_DVSEC_ENTRIES 0xA
+#define INTEL_DVSEC_SIZE 0xB
+#define INTEL_DVSEC_TABLE 0xC
+#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
+#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
+#define TABLE_OFFSET_SHIFT 3
+
+struct pci_dev;
+struct resource;
+
+enum intel_vsec_id {
+ VSEC_ID_TELEMETRY = 2,
+ VSEC_ID_WATCHER = 3,
+ VSEC_ID_CRASHLOG = 4,
+ VSEC_ID_DISCOVERY = 12,
+ VSEC_ID_SDSI = 65,
+ VSEC_ID_TPMI = 66,
+};
+
+/**
+ * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers.
+ * @rev: Revision ID of the VSEC/DVSEC register space
+ * @length: Length of the VSEC/DVSEC register space
+ * @id: ID of the feature
+ * @num_entries: Number of instances of the feature
+ * @entry_size: Size of the discovery table for each feature
+ * @tbir: BAR containing the discovery tables
+ * @offset: BAR offset of start of the first discovery table
+ */
+struct intel_vsec_header {
+ u8 rev;
+ u16 length;
+ u16 id;
+ u8 num_entries;
+ u8 entry_size;
+ u8 tbir;
+ u32 offset;
+};
+
+enum intel_vsec_quirks {
+ /* Watcher feature not supported */
+ VSEC_QUIRK_NO_WATCHER = BIT(0),
+
+ /* Crashlog feature not supported */
+ VSEC_QUIRK_NO_CRASHLOG = BIT(1),
+
+ /* Use shift instead of mask to read discovery table offset */
+ VSEC_QUIRK_TABLE_SHIFT = BIT(2),
+
+ /* DVSEC not present (provided in driver data) */
+ VSEC_QUIRK_NO_DVSEC = BIT(3),
+
+ /* Platforms requiring quirk in the auxiliary driver */
+ VSEC_QUIRK_EARLY_HW = BIT(4),
+};
+
+/**
+ * struct pmt_callbacks - Callback infrastructure for PMT devices
+ * ->read_telem() when specified, called by client driver to access PMT data (instead
+ * of direct copy).
+ * @pdev: PCI device reference for the callback's use
+ * @guid: ID of data to acccss
+ * @data: buffer for the data to be copied
+ * @off: offset into the requested buffer
+ * @count: size of buffer
+ */
+struct pmt_callbacks {
+ int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, loff_t off, u32 count);
+};
+
+struct vsec_feature_dependency {
+ unsigned long feature;
+ unsigned long supplier_bitmap;
+};
+
+/**
+ * struct intel_vsec_platform_info - Platform specific data
+ * @parent: parent device in the auxbus chain
+ * @headers: list of headers to define the PMT client devices to create
+ * @deps: array of feature dependencies
+ * @priv_data: private data, usable by parent devices, currently a callback
+ * @caps: bitmask of PMT capabilities for the given headers
+ * @quirks: bitmask of VSEC device quirks
+ * @base_addr: allow a base address to be specified (rather than derived)
+ * @num_deps: Count feature dependencies
+ */
+struct intel_vsec_platform_info {
+ struct device *parent;
+ struct intel_vsec_header **headers;
+ const struct vsec_feature_dependency *deps;
+ void *priv_data;
+ unsigned long caps;
+ unsigned long quirks;
+ u64 base_addr;
+ int num_deps;
+};
+
+/**
+ * struct intel_sec_device - Auxbus specific device information
+ * @auxdev: auxbus device struct for auxbus access
+ * @pcidev: pci device associated with the device
+ * @resource: any resources shared by the parent
+ * @ida: id reference
+ * @num_resources: number of resources
+ * @id: xarray id
+ * @priv_data: any private data needed
+ * @quirks: specified quirks
+ * @base_addr: base address of entries (if specified)
+ * @cap_id: the enumerated id of the vsec feature
+ */
+struct intel_vsec_device {
+ struct auxiliary_device auxdev;
+ struct pci_dev *pcidev;
+ struct resource *resource;
+ struct ida *ida;
+ int num_resources;
+ int id; /* xa */
+ void *priv_data;
+ size_t priv_data_size;
+ unsigned long quirks;
+ u64 base_addr;
+ unsigned long cap_id;
+};
+
+/**
+ * struct oobmsm_plat_info - Platform information for a device instance
+ * @cdie_mask: Mask of all compute dies in the partition
+ * @package_id: CPU Package id
+ * @partition: Package partition id when multiple VSEC PCI devices per package
+ * @segment: PCI segment ID
+ * @bus_number: PCI bus number
+ * @device_number: PCI device number
+ * @function_number: PCI function number
+ *
+ * Structure to store platform data for a OOBMSM device instance.
+ */
+struct oobmsm_plat_info {
+ u16 cdie_mask;
+ u8 package_id;
+ u8 partition;
+ u8 segment;
+ u8 bus_number;
+ u8 device_number;
+ u8 function_number;
+};
+
+struct telemetry_region {
+ struct oobmsm_plat_info plat_info;
+ void __iomem *addr;
+ size_t size;
+ u32 guid;
+ u32 num_rmids;
+};
+
+struct pmt_feature_group {
+ enum pmt_feature_id id;
+ int count;
+ struct kref kref;
+ struct telemetry_region regions[];
+};
+
+int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
+ struct intel_vsec_device *intel_vsec_dev,
+ const char *name);
+
+static inline struct intel_vsec_device *dev_to_ivdev(struct device *dev)
+{
+ return container_of(dev, struct intel_vsec_device, auxdev.dev);
+}
+
+static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device *auxdev)
+{
+ return container_of(auxdev, struct intel_vsec_device, auxdev);
+}
+
+#if IS_ENABLED(CONFIG_INTEL_VSEC)
+int intel_vsec_register(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info);
+int intel_vsec_set_mapping(struct oobmsm_plat_info *plat_info,
+ struct intel_vsec_device *vsec_dev);
+struct oobmsm_plat_info *intel_vsec_get_mapping(struct pci_dev *pdev);
+#else
+static inline int intel_vsec_register(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
+{
+ return -ENODEV;
+}
+static inline int intel_vsec_set_mapping(struct oobmsm_plat_info *plat_info,
+ struct intel_vsec_device *vsec_dev)
+{
+ return -ENODEV;
+}
+static inline struct oobmsm_plat_info *intel_vsec_get_mapping(struct pci_dev *pdev)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_INTEL_PMT_TELEMETRY)
+struct pmt_feature_group *
+intel_pmt_get_regions_by_feature(enum pmt_feature_id id);
+
+void intel_pmt_put_feature_group(struct pmt_feature_group *feature_group);
+#else
+static inline struct pmt_feature_group *
+intel_pmt_get_regions_by_feature(enum pmt_feature_id id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void
+intel_pmt_put_feature_group(struct pmt_feature_group *feature_group) {}
+#endif
+
+#endif
diff --git a/include/linux/interconnect-clk.h b/include/linux/interconnect-clk.h
index 0cd80112bea5..9bcee3e9c56c 100644
--- a/include/linux/interconnect-clk.h
+++ b/include/linux/interconnect-clk.h
@@ -11,12 +11,16 @@ struct device;
struct icc_clk_data {
struct clk *clk;
const char *name;
+ unsigned int master_id;
+ unsigned int slave_id;
};
struct icc_provider *icc_clk_register(struct device *dev,
unsigned int first_id,
unsigned int num_clocks,
const struct icc_clk_data *data);
+int devm_icc_clk_register(struct device *dev, unsigned int first_id,
+ unsigned int num_clocks, const struct icc_clk_data *data);
void icc_clk_unregister(struct icc_provider *provider);
#endif
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
index f5aef8784692..8a2f652a05ff 100644
--- a/include/linux/interconnect-provider.h
+++ b/include/linux/interconnect-provider.h
@@ -116,8 +116,11 @@ struct icc_node {
int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
+struct icc_node *icc_node_create_dyn(void);
struct icc_node *icc_node_create(int id);
void icc_node_destroy(int id);
+int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider, const char *name);
+int icc_link_nodes(struct icc_node *src_node, struct icc_node **dst_node);
int icc_link_create(struct icc_node *node, const int dst_id);
void icc_node_add(struct icc_node *node, struct icc_provider *provider);
void icc_node_del(struct icc_node *node);
@@ -136,6 +139,11 @@ static inline int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
return -ENOTSUPP;
}
+static inline struct icc_node *icc_node_create_dyn(void)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline struct icc_node *icc_node_create(int id)
{
return ERR_PTR(-ENOTSUPP);
@@ -145,6 +153,17 @@ static inline void icc_node_destroy(int id)
{
}
+static inline int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider,
+ const char *name)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int icc_link_nodes(struct icc_node *src_node, struct icc_node **dst_node)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int icc_link_create(struct icc_node *node, const int dst_id)
{
return -ENOTSUPP;
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
index 97ac253df62c..4b12821528a6 100644
--- a/include/linux/interconnect.h
+++ b/include/linux/interconnect.h
@@ -16,10 +16,13 @@
#define MBps_to_icc(x) ((x) * 1000)
#define GBps_to_icc(x) ((x) * 1000 * 1000)
#define bps_to_icc(x) (1)
-#define kbps_to_icc(x) ((x) / 8 + ((x) % 8 ? 1 : 0))
+#define kbps_to_icc(x) (((x) + 7) / 8)
#define Mbps_to_icc(x) ((x) * 1000 / 8)
#define Gbps_to_icc(x) ((x) * 1000 * 1000 / 8)
+/* macro to indicate dynamic id allocation */
+#define ICC_ALLOC_DYN_ID -1
+
struct icc_path;
struct device;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5c9bdd3ffccc..266f2b39213a 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -5,13 +5,14 @@
#include <linux/kernel.h>
#include <linux/bitops.h>
-#include <linux/cpumask.h>
+#include <linux/cleanup.h>
#include <linux/irqreturn.h>
#include <linux/irqnr.h>
#include <linux/hardirq.h>
#include <linux/irqflags.h>
#include <linux/hrtimer.h>
#include <linux/kref.h>
+#include <linux/cpumask_types.h>
#include <linux/workqueue.h>
#include <linux/jump_label.h>
@@ -108,6 +109,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
* @name: name of the device
* @dev_id: cookie to identify the device
* @percpu_dev_id: cookie to identify the device
+ * @affinity: CPUs this irqaction is allowed to run on
* @next: pointer to the next irqaction for shared interrupts
* @irq: interrupt number
* @flags: flags (see IRQF_* above)
@@ -120,8 +122,11 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
*/
struct irqaction {
irq_handler_t handler;
- void *dev_id;
- void __percpu *percpu_dev_id;
+ union {
+ void *dev_id;
+ void __percpu *percpu_dev_id;
+ };
+ const struct cpumask *affinity;
struct irqaction *next;
irq_handler_t thread_fn;
struct task_struct *thread;
@@ -139,7 +144,7 @@ extern irqreturn_t no_action(int cpl, void *dev_id);
/*
* If a (PCI) device interrupt is not connected we set dev->irq to
* IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
- * can distingiush that case from other error returns.
+ * can distinguish that case from other error returns.
*
* 0x80000000 is guaranteed to be outside the available range of interrupts
* and easy to distinguish from other possible incorrect values.
@@ -168,7 +173,7 @@ static inline int __must_check
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
const char *name, void *dev)
{
- return request_threaded_irq(irq, handler, NULL, flags, name, dev);
+ return request_threaded_irq(irq, handler, NULL, flags | IRQF_COND_ONESHOT, name, dev);
}
extern int __must_check
@@ -178,7 +183,7 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
extern int __must_check
__request_percpu_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *devname,
- void __percpu *percpu_dev_id);
+ const cpumask_t *affinity, void __percpu *percpu_dev_id);
extern int __must_check
request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
@@ -189,12 +194,21 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id)
{
return __request_percpu_irq(irq, handler, 0,
- devname, percpu_dev_id);
+ devname, NULL, percpu_dev_id);
+}
+
+static inline int __must_check
+request_percpu_irq_affinity(unsigned int irq, irq_handler_t handler,
+ const char *devname, const cpumask_t *affinity,
+ void __percpu *percpu_dev_id)
+{
+ return __request_percpu_irq(irq, handler, 0,
+ devname, affinity, percpu_dev_id);
}
extern int __must_check
-request_percpu_nmi(unsigned int irq, irq_handler_t handler,
- const char *devname, void __percpu *dev);
+request_percpu_nmi(unsigned int irq, irq_handler_t handler, const char *name,
+ const struct cpumask *affinity, void __percpu *dev_id);
extern const void *free_irq(unsigned int, void *);
extern void free_percpu_irq(unsigned int, void __percpu *);
@@ -235,6 +249,9 @@ extern void enable_percpu_irq(unsigned int irq, unsigned int type);
extern bool irq_percpu_is_enabled(unsigned int irq);
extern void irq_wake_thread(unsigned int irq, void *dev_id);
+DEFINE_LOCK_GUARD_1(disable_irq, int,
+ disable_irq(*_T->lock), enable_irq(*_T->lock))
+
extern void disable_nmi_nosync(unsigned int irq);
extern void disable_percpu_nmi(unsigned int irq);
extern void enable_nmi(unsigned int irq);
@@ -272,7 +289,7 @@ struct irq_affinity_notify {
#define IRQ_AFFINITY_MAX_SETS 4
/**
- * struct irq_affinity - Description for automatic irq affinity assignements
+ * struct irq_affinity - Description for automatic irq affinity assignments
* @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
* the MSI(-X) vector space
* @post_vectors: Don't apply affinity to @post_vectors at end of
@@ -444,7 +461,7 @@ irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
static inline void disable_irq_nosync_lockdep(unsigned int irq)
{
disable_irq_nosync(irq);
-#ifdef CONFIG_LOCKDEP
+#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
local_irq_disable();
#endif
}
@@ -452,22 +469,14 @@ static inline void disable_irq_nosync_lockdep(unsigned int irq)
static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
{
disable_irq_nosync(irq);
-#ifdef CONFIG_LOCKDEP
+#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
local_irq_save(*flags);
#endif
}
-static inline void disable_irq_lockdep(unsigned int irq)
-{
- disable_irq(irq);
-#ifdef CONFIG_LOCKDEP
- local_irq_disable();
-#endif
-}
-
static inline void enable_irq_lockdep(unsigned int irq)
{
-#ifdef CONFIG_LOCKDEP
+#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
local_irq_enable();
#endif
enable_irq(irq);
@@ -475,7 +484,7 @@ static inline void enable_irq_lockdep(unsigned int irq)
static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
{
-#ifdef CONFIG_LOCKDEP
+#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
local_irq_restore(*flags);
#endif
enable_irq(irq);
@@ -590,7 +599,7 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
struct softirq_action
{
- void (*action)(struct softirq_action *);
+ void (*action)(void);
};
asmlinkage void do_softirq(void);
@@ -605,13 +614,60 @@ static inline void do_softirq_post_smp_call_flush(unsigned int unused)
}
#endif
-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
+extern void open_softirq(int nr, void (*action)(void));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
+/*
+ * With forced-threaded interrupts enabled a raised softirq is deferred to
+ * ksoftirqd unless it can be handled within the threaded interrupt. This
+ * affects timer_list timers and hrtimers which are explicitly marked with
+ * HRTIMER_MODE_SOFT.
+ * With PREEMPT_RT enabled more hrtimers are moved to softirq for processing
+ * which includes all timers which are not explicitly marked HRTIMER_MODE_HARD.
+ * Userspace controlled timers (like the clock_nanosleep() interface) is divided
+ * into two categories: Tasks with elevated scheduling policy including
+ * SCHED_{FIFO|RR|DL} and the remaining scheduling policy. The tasks with the
+ * elevated scheduling policy are woken up directly from the HARDIRQ while all
+ * other wake ups are delayed to softirq and so to ksoftirqd.
+ *
+ * The ksoftirqd runs at SCHED_OTHER policy at which it should remain since it
+ * handles the softirq in an overloaded situation (not handled everything
+ * within its last run).
+ * If the timers are handled at SCHED_OTHER priority then they competes with all
+ * other SCHED_OTHER tasks for CPU resources are possibly delayed.
+ * Moving timers softirqs to a low priority SCHED_FIFO thread instead ensures
+ * that timer are performed before scheduling any SCHED_OTHER thread.
+ */
+DECLARE_PER_CPU(struct task_struct *, ktimerd);
+DECLARE_PER_CPU(unsigned long, pending_timer_softirq);
+void raise_ktimers_thread(unsigned int nr);
+
+static inline unsigned int local_timers_pending_force_th(void)
+{
+ return __this_cpu_read(pending_timer_softirq);
+}
+
+static inline void raise_timer_softirq(unsigned int nr)
+{
+ lockdep_assert_in_irq();
+ if (force_irqthreads())
+ raise_ktimers_thread(nr);
+ else
+ __raise_softirq_irqoff(nr);
+}
+
+static inline unsigned int local_timers_pending(void)
+{
+ if (force_irqthreads())
+ return local_timers_pending_force_th();
+ else
+ return local_softirq_pending();
+}
+
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
static inline struct task_struct *this_cpu_ksoftirqd(void)
diff --git a/include/linux/interval_tree.h b/include/linux/interval_tree.h
index 2b8026a39906..9d5791e9f737 100644
--- a/include/linux/interval_tree.h
+++ b/include/linux/interval_tree.h
@@ -20,6 +20,10 @@ interval_tree_remove(struct interval_tree_node *node,
struct rb_root_cached *root);
extern struct interval_tree_node *
+interval_tree_subtree_search(struct interval_tree_node *node,
+ unsigned long start, unsigned long last);
+
+extern struct interval_tree_node *
interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start, unsigned long last);
diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h
index aaa8a0767aa3..c5a2fed49eb0 100644
--- a/include/linux/interval_tree_generic.h
+++ b/include/linux/interval_tree_generic.h
@@ -77,7 +77,7 @@ ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, \
* Cond2: start <= ITLAST(node) \
*/ \
\
-static ITSTRUCT * \
+ITSTATIC ITSTRUCT * \
ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
{ \
while (true) { \
@@ -104,12 +104,8 @@ ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
if (ITSTART(node) <= last) { /* Cond1 */ \
if (start <= ITLAST(node)) /* Cond2 */ \
return node; /* node is leftmost match */ \
- if (node->ITRB.rb_right) { \
- node = rb_entry(node->ITRB.rb_right, \
- ITSTRUCT, ITRB); \
- if (start <= node->ITSUBTREE) \
- continue; \
- } \
+ node = rb_entry(node->ITRB.rb_right, ITSTRUCT, ITRB); \
+ continue; \
} \
return NULL; /* No match */ \
} \
diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h
index f32522bb3aa5..d3eade7cf663 100644
--- a/include/linux/io-64-nonatomic-hi-lo.h
+++ b/include/linux/io-64-nonatomic-hi-lo.h
@@ -101,22 +101,38 @@ static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr)
#ifndef ioread64
#define ioread64_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define ioread64 __ioread64_hi_lo
+#else
#define ioread64 ioread64_hi_lo
#endif
+#endif
#ifndef iowrite64
#define iowrite64_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define iowrite64 __iowrite64_hi_lo
+#else
#define iowrite64 iowrite64_hi_lo
#endif
+#endif
#ifndef ioread64be
#define ioread64be_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define ioread64be __ioread64be_hi_lo
+#else
#define ioread64be ioread64be_hi_lo
#endif
+#endif
#ifndef iowrite64be
#define iowrite64be_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define iowrite64be __iowrite64be_hi_lo
+#else
#define iowrite64be iowrite64be_hi_lo
#endif
+#endif
#endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */
diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h
index 448a21435dba..94e676ec3d3f 100644
--- a/include/linux/io-64-nonatomic-lo-hi.h
+++ b/include/linux/io-64-nonatomic-lo-hi.h
@@ -101,22 +101,38 @@ static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr)
#ifndef ioread64
#define ioread64_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define ioread64 __ioread64_lo_hi
+#else
#define ioread64 ioread64_lo_hi
#endif
+#endif
#ifndef iowrite64
#define iowrite64_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define iowrite64 __iowrite64_lo_hi
+#else
#define iowrite64 iowrite64_lo_hi
#endif
+#endif
#ifndef ioread64be
#define ioread64be_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define ioread64be __ioread64be_lo_hi
+#else
#define ioread64be ioread64be_lo_hi
#endif
+#endif
#ifndef iowrite64be
#define iowrite64be_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define iowrite64be __iowrite64be_lo_hi
+#else
#define iowrite64be iowrite64be_lo_hi
#endif
+#endif
#endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 7376c1df9c90..c16353cc6e3c 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -225,7 +225,4 @@ io_mapping_free(struct io_mapping *iomap)
kfree(iomap);
}
-int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn, unsigned long size);
-
#endif /* _LINUX_IO_MAPPING_H */
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 86cf1f7ae389..7a1516011ccf 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -15,8 +15,6 @@ enum io_pgtable_fmt {
ARM_64_LPAE_S2,
ARM_V7S,
ARM_MALI_LPAE,
- AMD_IOMMU_V1,
- AMD_IOMMU_V2,
APPLE_DART,
APPLE_DART2,
IO_PGTABLE_NUM_FMTS,
@@ -85,6 +83,16 @@ struct io_pgtable_cfg {
*
* IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
* attributes set in the TCR for a non-coherent page-table walker.
+ *
+ * IO_PGTABLE_QUIRK_ARM_HD: Enables dirty tracking in stage 1 pagetable.
+ * IO_PGTABLE_QUIRK_ARM_S2FWB: Use the FWB format for the MemAttrs bits
+ *
+ * IO_PGTABLE_QUIRK_NO_WARN: Do not WARN_ON() on conflicting
+ * mappings, but silently return -EEXISTS. Normally an attempt
+ * to map over an existing mapping would indicate some sort of
+ * kernel bug, which would justify the WARN_ON(). But for GPU
+ * drivers, this could be under control of userspace. Which
+ * deserves an error return, but not to spam dmesg.
*/
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
@@ -92,6 +100,9 @@ struct io_pgtable_cfg {
#define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT BIT(4)
#define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
#define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
+ #define IO_PGTABLE_QUIRK_ARM_HD BIT(7)
+ #define IO_PGTABLE_QUIRK_ARM_S2FWB BIT(8)
+ #define IO_PGTABLE_QUIRK_NO_WARN BIT(9)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
@@ -167,16 +178,31 @@ struct io_pgtable_cfg {
struct {
u64 ttbr[4];
u32 n_ttbrs;
+ u32 n_levels;
} apple_dart_cfg;
+
+ struct {
+ int nid;
+ } amd;
};
};
/**
+ * struct arm_lpae_io_pgtable_walk_data - information from a pgtable walk
+ *
+ * @ptes: The recorded PTE values from the walk
+ */
+struct arm_lpae_io_pgtable_walk_data {
+ u64 ptes[4];
+};
+
+/**
* struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
*
* @map_pages: Map a physically contiguous range of pages of the same size.
* @unmap_pages: Unmap a range of virtually contiguous pages of the same size.
* @iova_to_phys: Translate iova to physical address.
+ * @pgtable_walk: (optional) Perform a page table walk for a given iova.
*
* These functions map directly onto the iommu_ops member functions with
* the same names.
@@ -190,6 +216,7 @@ struct io_pgtable_ops {
struct iommu_iotlb_gather *gather);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
unsigned long iova);
+ int (*pgtable_walk)(struct io_pgtable_ops *ops, unsigned long iova, void *wd);
int (*read_and_clear_dirty)(struct io_pgtable_ops *ops,
unsigned long iova, size_t size,
unsigned long flags,
diff --git a/include/linux/io.h b/include/linux/io.h
index 59ec5eea696c..0642c7ee41db 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -9,13 +9,10 @@
#include <linux/sizes.h>
#include <linux/types.h>
#include <linux/init.h>
-#include <linux/bug.h>
-#include <linux/err.h>
#include <asm/io.h>
#include <asm/page.h>
struct device;
-struct resource;
#ifndef __iowrite32_copy
void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
@@ -65,8 +62,6 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
}
#endif
-#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
-
void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
resource_size_t size);
void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
@@ -188,4 +183,25 @@ static inline void arch_io_free_memtype_wc(resource_size_t base,
int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
resource_size_t size);
+#ifdef CONFIG_STRICT_DEVMEM
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+ u64 from = ((u64)pfn) << PAGE_SHIFT;
+ u64 to = from + size;
+ u64 cursor = from;
+
+ while (cursor < to) {
+ if (!devmem_is_allowed(pfn))
+ return 0;
+ cursor += PAGE_SIZE;
+ pfn++;
+ }
+ return 1;
+}
+#else
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+ return 1;
+}
+#endif
#endif /* _LINUX_IO_H */
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index e123d5e17b52..85fe4e6b275c 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -15,10 +15,8 @@ bool io_is_uring_fops(struct file *file);
static inline void io_uring_files_cancel(void)
{
- if (current->io_uring) {
- io_uring_unreg_ringfd();
+ if (current->io_uring)
__io_uring_cancel(false);
- }
}
static inline void io_uring_task_cancel(void)
{
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index 447fbfd32215..375fd048c4cb 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -4,18 +4,20 @@
#include <uapi/linux/io_uring.h>
#include <linux/io_uring_types.h>
+#include <linux/blk-mq.h>
/* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
#define IORING_URING_CMD_CANCELABLE (1U << 30)
+/* io_uring_cmd is being issued again */
+#define IORING_URING_CMD_REISSUE (1U << 31)
struct io_uring_cmd {
struct file *file;
const struct io_uring_sqe *sqe;
- /* callback to defer completions to task context */
- void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
u32 cmd_op;
u32 flags;
u8 pdu[32]; /* available inline for free use */
+ u8 unused[8];
};
static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
@@ -23,9 +25,25 @@ static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
return sqe->cmd;
}
+static inline void io_uring_cmd_private_sz_check(size_t cmd_sz)
+{
+ BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu));
+}
+#define io_uring_cmd_to_pdu(cmd, pdu_type) ( \
+ io_uring_cmd_private_sz_check(sizeof(pdu_type)), \
+ ((pdu_type *)&(cmd)->pdu) \
+)
+
#if defined(CONFIG_IO_URING)
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
- struct iov_iter *iter, void *ioucmd);
+ struct iov_iter *iter,
+ struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags);
+int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
+ const struct iovec __user *uvec,
+ size_t uvec_segs,
+ int ddir, struct iov_iter *iter,
+ unsigned issue_flags);
/*
* Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
@@ -34,11 +52,11 @@ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
* Note: the caller should never hard code @issue_flags and is only allowed
* to pass the mask provided by the core io_uring code.
*/
-void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
- unsigned issue_flags);
+void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret, u64 res2,
+ unsigned issue_flags, bool is_cqe32);
void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *, unsigned),
+ io_req_tw_func_t task_work_cb,
unsigned flags);
/*
@@ -48,54 +66,119 @@ void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
unsigned int issue_flags);
+/* Execute the request from a blocking context */
+void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd);
+
+/*
+ * Select a buffer from the provided buffer group for multishot uring_cmd.
+ * Returns the selected buffer address and size.
+ */
+struct io_br_sel io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd,
+ unsigned buf_group, size_t *len,
+ unsigned int issue_flags);
+
+/*
+ * Complete a multishot uring_cmd event. This will post a CQE to the completion
+ * queue and update the provided buffer.
+ */
+bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd,
+ struct io_br_sel *sel, unsigned int issue_flags);
+
#else
-static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
- struct iov_iter *iter, void *ioucmd)
+static inline int
+io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags)
+{
+ return -EOPNOTSUPP;
+}
+static inline int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
+ const struct iovec __user *uvec,
+ size_t uvec_segs,
+ int ddir, struct iov_iter *iter,
+ unsigned issue_flags)
{
return -EOPNOTSUPP;
}
-static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
- ssize_t ret2, unsigned issue_flags)
+static inline void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret,
+ u64 ret2, unsigned issue_flags, bool is_cqe32)
{
}
static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *, unsigned),
- unsigned flags)
+ io_req_tw_func_t task_work_cb, unsigned flags)
{
}
static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
}
+static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
+{
+}
+static inline struct io_br_sel
+io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd, unsigned buf_group,
+ size_t *len, unsigned int issue_flags)
+{
+ return (struct io_br_sel) { .val = -EOPNOTSUPP };
+}
+static inline bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd,
+ struct io_br_sel *sel, unsigned int issue_flags)
+{
+ return true;
+}
#endif
-/*
- * Polled completions must ensure they are coming from a poll queue, and
- * hence are completed inside the usual poll handling loops.
- */
-static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd,
- ssize_t ret, ssize_t res2)
+static inline struct io_uring_cmd *io_uring_cmd_from_tw(struct io_tw_req tw_req)
{
- lockdep_assert(in_task());
- io_uring_cmd_done(ioucmd, ret, res2, 0);
+ return io_kiocb_to_cmd(tw_req.req, struct io_uring_cmd);
}
+/* task_work executor checks the deferred list completion */
+#define IO_URING_CMD_TASK_WORK_ISSUE_FLAGS IO_URING_F_COMPLETE_DEFER
+
/* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+ io_req_tw_func_t task_work_cb)
{
__io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
}
static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+ io_req_tw_func_t task_work_cb)
{
__io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
}
static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
{
- return cmd_to_io_kiocb(cmd)->task;
+ return cmd_to_io_kiocb(cmd)->tctx->task;
}
+/*
+ * Return uring_cmd's context reference as its context handle for driver to
+ * track per-context resource, such as registered kernel IO buffer
+ */
+static inline void *io_uring_cmd_ctx_handle(struct io_uring_cmd *cmd)
+{
+ return cmd_to_io_kiocb(cmd)->ctx;
+}
+
+static inline void io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret,
+ unsigned issue_flags)
+{
+ return __io_uring_cmd_done(ioucmd, ret, 0, issue_flags, false);
+}
+
+static inline void io_uring_cmd_done32(struct io_uring_cmd *ioucmd, s32 ret,
+ u64 res2, unsigned issue_flags)
+{
+ return __io_uring_cmd_done(ioucmd, ret, res2, issue_flags, true);
+}
+
+int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
+ void (*release)(void *), unsigned int index,
+ unsigned int issue_flags);
+int io_buffer_unregister_bvec(struct io_uring_cmd *cmd, unsigned int index,
+ unsigned int issue_flags);
+
#endif /* _LINUX_IO_URING_CMD_H */
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 7a6b190c7da7..e1adb0d20a0a 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -26,6 +26,8 @@ enum io_uring_cmd_flags {
IO_URING_F_MULTISHOT = 4,
/* executed by io-wq */
IO_URING_F_IOWQ = 8,
+ /* executed inline from syscall */
+ IO_URING_F_INLINE = 16,
/* int's last bit, sign checks are usually faster than a bit test */
IO_URING_F_NONBLOCK = INT_MIN,
@@ -50,24 +52,23 @@ struct io_wq_work_list {
struct io_wq_work {
struct io_wq_work_node list;
- unsigned flags;
+ atomic_t flags;
/* place it here instead of io_kiocb as it fills padding and saves 4B */
int cancel_seq;
};
-struct io_fixed_file {
- /* file * with additional FFS_* flags */
- unsigned long file_ptr;
+struct io_rsrc_data {
+ unsigned int nr;
+ struct io_rsrc_node **nodes;
};
struct io_file_table {
- struct io_fixed_file *files;
+ struct io_rsrc_data data;
unsigned long *bitmap;
unsigned int alloc_hint;
};
struct io_hash_bucket {
- spinlock_t lock;
struct hlist_head list;
} ____cacheline_aligned_in_smp;
@@ -76,6 +77,32 @@ struct io_hash_table {
unsigned hash_bits;
};
+struct io_mapped_region {
+ struct page **pages;
+ void *ptr;
+ unsigned nr_pages;
+ unsigned flags;
+};
+
+/*
+ * Return value from io_buffer_list selection, to avoid stashing it in
+ * struct io_kiocb. For legacy/classic provided buffers, keeping a reference
+ * across execution contexts are fine. But for ring provided buffers, the
+ * list may go away as soon as ->uring_lock is dropped. As the io_kiocb
+ * persists, it's better to just keep the buffer local for those cases.
+ */
+struct io_br_sel {
+ struct io_buffer_list *buf_list;
+ /*
+ * Some selection parts return the user address, others return an error.
+ */
+ union {
+ void __user *addr;
+ ssize_t val;
+ };
+};
+
+
/*
* Arbitrary limit, can be raised if need be
*/
@@ -85,6 +112,7 @@ struct io_uring_task {
/* submission side */
int cached_refs;
const struct io_ring_ctx *last;
+ struct task_struct *task;
struct io_wq *io_wq;
struct file *registered_rings[IO_RINGFD_REG_MAX];
@@ -100,6 +128,14 @@ struct io_uring_task {
} ____cacheline_aligned_in_smp;
};
+struct iou_vec {
+ union {
+ struct iovec *iovec;
+ struct bio_vec *bvec;
+ };
+ unsigned nr; /* number of struct iovec it can hold */
+};
+
struct io_uring {
u32 head;
u32 tail;
@@ -207,23 +243,15 @@ struct io_submit_state {
bool need_plug;
bool cq_flush;
unsigned short submit_nr;
- unsigned int cqes_count;
struct blk_plug plug;
};
-struct io_ev_fd {
- struct eventfd_ctx *cq_ev_fd;
- unsigned int eventfd_async: 1;
- struct rcu_head rcu;
- atomic_t refs;
- atomic_t ops;
-};
-
struct io_alloc_cache {
void **entries;
unsigned int nr_cached;
unsigned int max_cached;
- size_t elem_size;
+ unsigned int elem_size;
+ unsigned int init_clear;
};
struct io_ring_ctx {
@@ -248,6 +276,9 @@ struct io_ring_ctx {
struct io_rings *rings;
struct percpu_ref refs;
+ clockid_t clockid;
+ enum tk_offsets clock_offset;
+
enum task_work_notify_mode notify_method;
unsigned sq_thread_idle;
} ____cacheline_aligned_in_smp;
@@ -276,7 +307,6 @@ struct io_ring_ctx {
* Fixed resources fast path, should be accessed only under
* uring_lock, and updated through io_uring_register(2)
*/
- struct io_rsrc_node *rsrc_node;
atomic_t cancel_seq;
/*
@@ -289,25 +319,35 @@ struct io_ring_ctx {
struct io_wq_work_list iopoll_list;
struct io_file_table file_table;
- struct io_mapped_ubuf **user_bufs;
- unsigned nr_user_files;
- unsigned nr_user_bufs;
+ struct io_rsrc_data buf_table;
+ struct io_alloc_cache node_cache;
+ struct io_alloc_cache imu_cache;
struct io_submit_state submit_state;
+ /*
+ * Modifications are protected by ->uring_lock and ->mmap_lock.
+ * The buffer list's io mapped region should be stable once
+ * published.
+ */
struct xarray io_bl_xa;
- struct io_hash_table cancel_table_locked;
+ struct io_hash_table cancel_table;
struct io_alloc_cache apoll_cache;
struct io_alloc_cache netmsg_cache;
struct io_alloc_cache rw_cache;
- struct io_alloc_cache uring_cache;
+ struct io_alloc_cache cmd_cache;
/*
* Any cancelable uring_cmd is added to this list in
* ->uring_cmd() by io_uring_cmd_insert_cancelable()
*/
struct hlist_head cancelable_uring_cmd;
+ /*
+ * For Hybrid IOPOLL, runtime in hybrid polling, without
+ * scheduling time
+ */
+ u64 hybrid_poll_time;
} ____cacheline_aligned_in_smp;
struct {
@@ -321,7 +361,9 @@ struct io_ring_ctx {
unsigned cached_cq_tail;
unsigned cq_entries;
struct io_ev_fd __rcu *io_ev_fd;
- unsigned cq_extra;
+
+ void *cq_wait_arg;
+ size_t cq_wait_size;
} ____cacheline_aligned_in_smp;
/*
@@ -330,6 +372,7 @@ struct io_ring_ctx {
*/
struct {
struct llist_head work_llist;
+ struct llist_head retry_llist;
unsigned long check_cq;
atomic_t cq_wait_nr;
atomic_t cq_timeouts;
@@ -338,7 +381,7 @@ struct io_ring_ctx {
/* timeouts */
struct {
- spinlock_t timeout_lock;
+ raw_spinlock_t timeout_lock;
struct list_head timeout_list;
struct list_head ltimeout_list;
unsigned cq_last_tm_flush;
@@ -346,9 +389,7 @@ struct io_ring_ctx {
spinlock_t completion_lock;
- struct list_head io_buffers_comp;
struct list_head cq_overflow_list;
- struct io_hash_table cancel_table;
struct hlist_head waitid_list;
@@ -366,22 +407,12 @@ struct io_ring_ctx {
unsigned int file_alloc_start;
unsigned int file_alloc_end;
- struct list_head io_buffers_cache;
-
/* Keep this last, we don't need it for the fast path */
struct wait_queue_head poll_wq;
struct io_restriction restrictions;
- /* slow path rsrc auxilary data, used by update/register */
- struct io_mapped_ubuf *dummy_ubuf;
- struct io_rsrc_data *file_data;
- struct io_rsrc_data *buf_data;
-
- /* protected by ->uring_lock */
- struct list_head rsrc_ref_list;
- struct io_alloc_cache rsrc_node_cache;
- struct wait_queue_head rsrc_quiesce_wq;
- unsigned rsrc_quiesce;
+ /* Stores zcrx object pointers of type struct io_zcrx_ifq */
+ struct xarray zcrx_ctxs;
u32 pers_next;
struct xarray personalities;
@@ -405,34 +436,47 @@ struct io_ring_ctx {
struct callback_head poll_wq_task_work;
struct list_head defer_list;
+ unsigned nr_drained;
#ifdef CONFIG_NET_RX_BUSY_POLL
struct list_head napi_list; /* track busy poll napi_id */
spinlock_t napi_lock; /* napi_list lock */
/* napi busy poll default timeout */
- unsigned int napi_busy_poll_to;
+ ktime_t napi_busy_poll_dt;
bool napi_prefer_busy_poll;
- bool napi_enabled;
+ u8 napi_track_mode;
DECLARE_HASHTABLE(napi_ht, 4);
#endif
/* protected by ->completion_lock */
unsigned evfd_last_cq_tail;
+ unsigned nr_req_allocated;
/*
- * If IORING_SETUP_NO_MMAP is used, then the below holds
- * the gup'ed pages for the two rings, and the sqes.
+ * Protection for resize vs mmap races - both the mmap and resize
+ * side will need to grab this lock, to prevent either side from
+ * being run concurrently with the other.
*/
- unsigned short n_ring_pages;
- unsigned short n_sqe_pages;
- struct page **ring_pages;
- struct page **sqe_pages;
+ struct mutex mmap_lock;
+
+ struct io_mapped_region sq_region;
+ struct io_mapped_region ring_region;
+ /* used for optimised request parameter and wait argument passing */
+ struct io_mapped_region param_region;
};
+/*
+ * Token indicating function is called in task work context:
+ * ctx->uring_lock is held and any completions generated will be flushed.
+ * ONLY core io_uring.c should instantiate this struct.
+ */
struct io_tw_state {
+ bool cancel;
};
+/* Alias to use in code that doesn't instantiate struct io_tw_state */
+typedef struct io_tw_state io_tw_token_t;
enum {
REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
@@ -451,6 +495,7 @@ enum {
REQ_F_LINK_TIMEOUT_BIT,
REQ_F_NEED_CLEANUP_BIT,
REQ_F_POLLED_BIT,
+ REQ_F_HYBRID_IOPOLL_STATE_BIT,
REQ_F_BUFFER_SELECTED_BIT,
REQ_F_BUFFER_RING_BIT,
REQ_F_REISSUE_BIT,
@@ -461,18 +506,21 @@ enum {
REQ_F_SKIP_LINK_CQES_BIT,
REQ_F_SINGLE_POLL_BIT,
REQ_F_DOUBLE_POLL_BIT,
+ REQ_F_MULTISHOT_BIT,
REQ_F_APOLL_MULTISHOT_BIT,
REQ_F_CLEAR_POLLIN_BIT,
- REQ_F_HASH_LOCKED_BIT,
/* keep async read/write and isreg together and in order */
REQ_F_SUPPORT_NOWAIT_BIT,
REQ_F_ISREG_BIT,
REQ_F_POLL_NO_LAZY_BIT,
- REQ_F_CANCEL_SEQ_BIT,
REQ_F_CAN_POLL_BIT,
REQ_F_BL_EMPTY_BIT,
REQ_F_BL_NO_RECYCLE_BIT,
REQ_F_BUFFERS_COMMIT_BIT,
+ REQ_F_BUF_NODE_BIT,
+ REQ_F_HAS_METADATA_BIT,
+ REQ_F_IMPORT_BUFFER_BIT,
+ REQ_F_SQE_COPIED_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -511,6 +559,8 @@ enum {
REQ_F_NEED_CLEANUP = IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT),
/* already went through poll handler */
REQ_F_POLLED = IO_REQ_FLAG(REQ_F_POLLED_BIT),
+ /* every req only blocks once in hybrid poll */
+ REQ_F_IOPOLL_STATE = IO_REQ_FLAG(REQ_F_HYBRID_IOPOLL_STATE_BIT),
/* buffer already selected */
REQ_F_BUFFER_SELECTED = IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT),
/* buffer selected from ring, needs commit */
@@ -535,16 +585,14 @@ enum {
REQ_F_SINGLE_POLL = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT),
/* double poll may active */
REQ_F_DOUBLE_POLL = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT),
+ /* request posts multiple completions, should be set at prep time */
+ REQ_F_MULTISHOT = IO_REQ_FLAG(REQ_F_MULTISHOT_BIT),
/* fast poll multishot mode */
REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
/* recvmsg special flag, clear EPOLLIN */
REQ_F_CLEAR_POLLIN = IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT),
- /* hashed into ->cancel_hash_locked, protected by ->uring_lock */
- REQ_F_HASH_LOCKED = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT),
/* don't use lazy poll wake for this request */
REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
- /* cancel sequence is set and valid */
- REQ_F_CANCEL_SEQ = IO_REQ_FLAG(REQ_F_CANCEL_SEQ_BIT),
/* file is pollable */
REQ_F_CAN_POLL = IO_REQ_FLAG(REQ_F_CAN_POLL_BIT),
/* buffer list was empty after selection of buffer */
@@ -553,9 +601,24 @@ enum {
REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
/* buffer ring head needs incrementing on put */
REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
+ /* buf node is valid */
+ REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT),
+ /* request has read/write metadata assigned */
+ REQ_F_HAS_METADATA = IO_REQ_FLAG(REQ_F_HAS_METADATA_BIT),
+ /*
+ * For vectored fixed buffers, resolve iovec to registered buffers.
+ * For SEND_ZC, whether to import buffers (i.e. the first issue).
+ */
+ REQ_F_IMPORT_BUFFER = IO_REQ_FLAG(REQ_F_IMPORT_BUFFER_BIT),
+ /* ->sqe_copy() has been called, if necessary */
+ REQ_F_SQE_COPIED = IO_REQ_FLAG(REQ_F_SQE_COPIED_BIT),
+};
+
+struct io_tw_req {
+ struct io_kiocb *req;
};
-typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
+typedef void (*io_req_tw_func_t)(struct io_tw_req tw_req, io_tw_token_t tw);
struct io_task_work {
struct llist_node node;
@@ -590,7 +653,11 @@ static inline void io_kiocb_cmd_sz_check(size_t cmd_sz)
io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \
((cmd_type *)&(req)->cmd) \
)
-#define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr)
+
+static inline struct io_kiocb *cmd_to_io_kiocb(void *ptr)
+{
+ return ptr;
+}
struct io_kiocb {
union {
@@ -609,8 +676,7 @@ struct io_kiocb {
u8 iopoll_completed;
/*
* Can be either a fixed buffer index, or used with provided buffers.
- * For the latter, before issue it points to the buffer group ID,
- * and after selection it points to the buffer ID itself.
+ * For the latter, it points to the selected buffer ID.
*/
u16 buf_index;
@@ -622,20 +688,13 @@ struct io_kiocb {
struct io_cqe cqe;
struct io_ring_ctx *ctx;
- struct task_struct *task;
+ struct io_uring_task *tctx;
union {
- /* store used ubuf, so we can prevent reloading */
- struct io_mapped_ubuf *imu;
-
/* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
struct io_buffer *kbuf;
- /*
- * stores buffer ID for ring provided buffers, valid IFF
- * REQ_F_BUFFER_RING is set.
- */
- struct io_buffer_list *buf_list;
+ struct io_rsrc_node *buf_node;
};
union {
@@ -645,24 +704,34 @@ struct io_kiocb {
__poll_t apoll_events;
};
- struct io_rsrc_node *rsrc_node;
+ struct io_rsrc_node *file_node;
atomic_t refs;
- atomic_t poll_refs;
+ bool cancel_seq_set;
struct io_task_work io_task_work;
- /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
- struct hlist_node hash_node;
+ union {
+ /*
+ * for polled requests, i.e. IORING_OP_POLL_ADD and async armed
+ * poll
+ */
+ struct hlist_node hash_node;
+ /* For IOPOLL setup queues, with hybrid polling */
+ u64 iopoll_start;
+ /* for private io_kiocb freeing */
+ struct rcu_head rcu_head;
+ };
/* internal polling, see IORING_FEAT_FAST_POLL */
struct async_poll *apoll;
/* opcode allocated if it needs to store data for async defer */
void *async_data;
/* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
+ atomic_t poll_refs;
struct io_kiocb *link;
/* custom credentials, valid IFF REQ_F_CREDS is set */
const struct cred *creds;
struct io_wq_work work;
- struct {
+ struct io_big_cqe {
u64 extra1;
u64 extra2;
} big_cqe;
@@ -672,5 +741,4 @@ struct io_overflow_cqe {
struct list_head list;
struct io_uring_cqe cqe;
};
-
#endif
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 14f7eaf1b443..079d8773790c 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -118,8 +118,8 @@ struct task_struct;
#ifdef CONFIG_BLOCK
void put_io_context(struct io_context *ioc);
void exit_io_context(struct task_struct *task);
-int __copy_io(unsigned long clone_flags, struct task_struct *tsk);
-static inline int copy_io(unsigned long clone_flags, struct task_struct *tsk)
+int __copy_io(u64 clone_flags, struct task_struct *tsk);
+static inline int copy_io(u64 clone_flags, struct task_struct *tsk)
{
if (!current->io_context)
return 0;
@@ -129,7 +129,7 @@ static inline int copy_io(unsigned long clone_flags, struct task_struct *tsk)
struct io_context;
static inline void put_io_context(struct io_context *ioc) { }
static inline void exit_io_context(struct task_struct *task) { }
-static inline int copy_io(unsigned long clone_flags, struct task_struct *tsk)
+static inline int copy_io(u64 clone_flags, struct task_struct *tsk)
{
return 0;
}
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 6fc1c858013d..520e967cb501 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/mm_types.h>
#include <linux/blkdev.h>
+#include <linux/pagevec.h>
struct address_space;
struct fiemap_extent_info;
@@ -16,6 +17,7 @@ struct inode;
struct iomap_iter;
struct iomap_dio;
struct iomap_writepage_ctx;
+struct iomap_read_folio_ctx;
struct iov_iter;
struct kiocb;
struct page;
@@ -53,6 +55,16 @@ struct vm_fault;
*
* IOMAP_F_XATTR indicates that the iomap is for an extended attribute extent
* rather than a file data extent.
+ *
+ * IOMAP_F_BOUNDARY indicates that I/O and I/O completions for this iomap must
+ * never be merged with the mapping before it.
+ *
+ * IOMAP_F_ANON_WRITE indicates that (write) I/O does not have a target block
+ * assigned to it yet and the file system will do that in the bio submission
+ * handler, splitting the I/O as needed.
+ *
+ * IOMAP_F_ATOMIC_BIO indicates that (write) I/O will be issued as an atomic
+ * bio, i.e. set REQ_ATOMIC.
*/
#define IOMAP_F_NEW (1U << 0)
#define IOMAP_F_DIRTY (1U << 1)
@@ -64,6 +76,14 @@ struct vm_fault;
#define IOMAP_F_BUFFER_HEAD 0
#endif /* CONFIG_BUFFER_HEAD */
#define IOMAP_F_XATTR (1U << 5)
+#define IOMAP_F_BOUNDARY (1U << 6)
+#define IOMAP_F_ANON_WRITE (1U << 7)
+#define IOMAP_F_ATOMIC_BIO (1U << 8)
+
+/*
+ * Flag reserved for file system specific usage
+ */
+#define IOMAP_F_PRIVATE (1U << 12)
/*
* Flags set by the core iomap code during operations:
@@ -75,22 +95,14 @@ struct vm_fault;
* range it covers needs to be remapped by the high level before the operation
* can proceed.
*/
-#define IOMAP_F_SIZE_CHANGED (1U << 8)
-#define IOMAP_F_STALE (1U << 9)
-
-/*
- * Flags from 0x1000 up are for file system specific usage:
- */
-#define IOMAP_F_PRIVATE (1U << 12)
-
+#define IOMAP_F_SIZE_CHANGED (1U << 14)
+#define IOMAP_F_STALE (1U << 15)
/*
* Magic value for addr:
*/
#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
-struct iomap_folio_ops;
-
struct iomap {
u64 addr; /* disk offset of mapping, bytes */
loff_t offset; /* file offset of mapping, bytes */
@@ -101,12 +113,13 @@ struct iomap {
struct dax_device *dax_dev; /* dax_dev for dax operations */
void *inline_data;
void *private; /* filesystem private */
- const struct iomap_folio_ops *folio_ops;
u64 validity_cookie; /* used with .iomap_valid() */
};
static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos)
{
+ if (iomap->flags & IOMAP_F_ANON_WRITE)
+ return U64_MAX; /* invalid */
return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
}
@@ -129,16 +142,11 @@ static inline bool iomap_inline_data_valid(const struct iomap *iomap)
}
/*
- * When a filesystem sets folio_ops in an iomap mapping it returns, get_folio
- * and put_folio will be called for each folio written to. This only applies
- * to buffered writes as unbuffered writes will not typically have folios
- * associated with them.
- *
* When get_folio succeeds, put_folio will always be called to do any
* cleanup work necessary. put_folio is responsible for unlocking and putting
* @folio.
*/
-struct iomap_folio_ops {
+struct iomap_write_ops {
struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
unsigned len);
void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
@@ -160,6 +168,16 @@ struct iomap_folio_ops {
* locked by the iomap code.
*/
bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap);
+
+ /*
+ * Optional if the filesystem wishes to provide a custom handler for
+ * reading in the contents of a folio, otherwise iomap will default to
+ * submitting a bio read request.
+ *
+ * The read must be done synchronously.
+ */
+ int (*read_folio_range)(const struct iomap_iter *iter,
+ struct folio *folio, loff_t pos, size_t len);
};
/*
@@ -178,6 +196,8 @@ struct iomap_folio_ops {
#else
#define IOMAP_DAX 0
#endif /* CONFIG_FS_DAX */
+#define IOMAP_ATOMIC (1 << 9) /* torn-write protection */
+#define IOMAP_DONTCACHE (1 << 10)
struct iomap_ops {
/*
@@ -206,8 +226,10 @@ struct iomap_ops {
* calls to iomap_iter(). Treat as read-only in the body.
* @len: The remaining length of the file segment we're operating on.
* It is updated at the same time as @pos.
- * @processed: The number of bytes processed by the body in the most recent
- * iteration, or a negative errno. 0 causes the iteration to stop.
+ * @iter_start_pos: The original start pos for the current iomap. Used for
+ * incremental iter advance.
+ * @status: Status of the most recent iteration. Zero on success or a negative
+ * errno on error.
* @flags: Zero or more of the iomap_begin flags above.
* @iomap: Map describing the I/O iteration
* @srcmap: Source map for COW operations
@@ -216,28 +238,54 @@ struct iomap_iter {
struct inode *inode;
loff_t pos;
u64 len;
- s64 processed;
+ loff_t iter_start_pos;
+ int status;
unsigned flags;
struct iomap iomap;
struct iomap srcmap;
+ struct folio_batch *fbatch;
void *private;
};
int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops);
+int iomap_iter_advance(struct iomap_iter *iter, u64 count);
/**
- * iomap_length - length of the current iomap iteration
+ * iomap_length_trim - trimmed length of the current iomap iteration
* @iter: iteration structure
+ * @pos: File position to trim from.
+ * @len: Length of the mapping to trim to.
*
- * Returns the length that the operation applies to for the current iteration.
+ * Returns a trimmed length that the operation applies to for the current
+ * iteration.
*/
-static inline u64 iomap_length(const struct iomap_iter *iter)
+static inline u64 iomap_length_trim(const struct iomap_iter *iter, loff_t pos,
+ u64 len)
{
u64 end = iter->iomap.offset + iter->iomap.length;
if (iter->srcmap.type != IOMAP_HOLE)
end = min(end, iter->srcmap.offset + iter->srcmap.length);
- return min(iter->len, end - iter->pos);
+ return min(len, end - pos);
+}
+
+/**
+ * iomap_length - length of the current iomap iteration
+ * @iter: iteration structure
+ *
+ * Returns the length that the operation applies to for the current iteration.
+ */
+static inline u64 iomap_length(const struct iomap_iter *iter)
+{
+ return iomap_length_trim(iter, iter->pos, iter->len);
+}
+
+/**
+ * iomap_iter_advance_full - advance by the full length of current map
+ */
+static inline int iomap_iter_advance_full(struct iomap_iter *iter)
+{
+ return iomap_iter_advance(iter, iomap_length(iter));
}
/**
@@ -256,27 +304,70 @@ static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
return &i->iomap;
}
-ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
- const struct iomap_ops *ops);
-int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
- struct iomap *iomap, loff_t pos, loff_t length, ssize_t written,
- int (*punch)(struct inode *inode, loff_t pos, loff_t length));
+/*
+ * Return the file offset for the first unchanged block after a short write.
+ *
+ * If nothing was written, round @pos down to point at the first block in
+ * the range, else round up to include the partially written block.
+ */
+static inline loff_t iomap_last_written_block(struct inode *inode, loff_t pos,
+ ssize_t written)
+{
+ if (unlikely(!written))
+ return round_down(pos, i_blocksize(inode));
+ return round_up(pos + written, i_blocksize(inode));
+}
+
+/*
+ * Check if the range needs to be unshared for a FALLOC_FL_UNSHARE_RANGE
+ * operation.
+ *
+ * Don't bother with blocks that are not shared to start with; or mappings that
+ * cannot be shared, such as inline data, delalloc reservations, holes or
+ * unwritten extents.
+ *
+ * Note that we use srcmap directly instead of iomap_iter_srcmap as unsharing
+ * requires providing a separate source map, and the presence of one is a good
+ * indicator that unsharing is needed, unlike IOMAP_F_SHARED which can be set
+ * for any data that goes into the COW fork for XFS.
+ */
+static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter)
+{
+ return (iter->iomap.flags & IOMAP_F_SHARED) &&
+ iter->srcmap.type == IOMAP_MAPPED;
+}
-int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
-void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
+ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private);
+void iomap_read_folio(const struct iomap_ops *ops,
+ struct iomap_read_folio_ctx *ctx);
+void iomap_readahead(const struct iomap_ops *ops,
+ struct iomap_read_folio_ctx *ctx);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
- const struct iomap_ops *ops);
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops);
+loff_t iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t offset,
+ loff_t length);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
- bool *did_zero, const struct iomap_ops *ops);
+ bool *did_zero, const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private);
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
- const struct iomap_ops *ops);
-vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
- const struct iomap_ops *ops);
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private);
+vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
+ void *private);
+typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length,
+ struct iomap *iomap);
+void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
+ loff_t end_byte, unsigned flags, struct iomap *iomap,
+ iomap_punch_t punch);
+
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len, const struct iomap_ops *ops);
loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
@@ -287,16 +378,44 @@ sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
const struct iomap_ops *ops);
/*
+ * Flags for iomap_ioend->io_flags.
+ */
+/* shared COW extent */
+#define IOMAP_IOEND_SHARED (1U << 0)
+/* unwritten extent */
+#define IOMAP_IOEND_UNWRITTEN (1U << 1)
+/* don't merge into previous ioend */
+#define IOMAP_IOEND_BOUNDARY (1U << 2)
+/* is direct I/O */
+#define IOMAP_IOEND_DIRECT (1U << 3)
+/* is DONTCACHE I/O */
+#define IOMAP_IOEND_DONTCACHE (1U << 4)
+
+/*
+ * Flags that if set on either ioend prevent the merge of two ioends.
+ * (IOMAP_IOEND_BOUNDARY also prevents merges, but only one-way)
+ */
+#define IOMAP_IOEND_NOMERGE_FLAGS \
+ (IOMAP_IOEND_SHARED | IOMAP_IOEND_UNWRITTEN | IOMAP_IOEND_DIRECT | \
+ IOMAP_IOEND_DONTCACHE)
+
+/*
* Structure for writeback I/O completions.
+ *
+ * File systems can split a bio generated by iomap. In that case the parent
+ * ioend it was split from is recorded in ioend->io_parent.
*/
struct iomap_ioend {
struct list_head io_list; /* next ioend in chain */
- u16 io_type;
- u16 io_flags; /* IOMAP_F_* */
+ u16 io_flags; /* IOMAP_IOEND_* */
struct inode *io_inode; /* file being written to */
size_t io_size; /* size of the extent */
+ atomic_t io_remaining; /* completetion defer count */
+ int io_error; /* stashed away status */
+ struct iomap_ioend *io_parent; /* parent for completions */
loff_t io_offset; /* offset in the file */
sector_t io_sector; /* start sector of ioend */
+ void *io_private; /* file system private data */
struct bio io_bio; /* MUST BE LAST! */
};
@@ -307,48 +426,90 @@ static inline struct iomap_ioend *iomap_ioend_from_bio(struct bio *bio)
struct iomap_writeback_ops {
/*
- * Required, maps the blocks so that writeback can be performed on
- * the range starting at offset.
+ * Performs writeback on the passed in range
*
- * Can return arbitrarily large regions, but we need to call into it at
+ * Can map arbitrarily large regions, but we need to call into it at
* least once per folio to allow the file systems to synchronize with
* the write path that could be invalidating mappings.
*
* An existing mapping from a previous call to this method can be reused
* by the file system if it is still valid.
+ *
+ * If this succeeds, iomap_finish_folio_write() must be called once
+ * writeback completes for the range, regardless of whether the
+ * writeback succeeded or failed.
+ *
+ * Returns the number of bytes processed or a negative errno.
*/
- int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
- loff_t offset, unsigned len);
-
- /*
- * Optional, allows the file systems to perform actions just before
- * submitting the bio and/or override the bio end_io handler for complex
- * operations like copy on write extent manipulation or unwritten extent
- * conversions.
- */
- int (*prepare_ioend)(struct iomap_ioend *ioend, int status);
+ ssize_t (*writeback_range)(struct iomap_writepage_ctx *wpc,
+ struct folio *folio, u64 pos, unsigned int len,
+ u64 end_pos);
/*
- * Optional, allows the file system to discard state on a page where
- * we failed to submit any I/O.
+ * Submit a writeback context previously build up by ->writeback_range.
+ *
+ * Returns 0 if the context was successfully submitted, or a negative
+ * error code if not. If @error is non-zero a failure occurred, and
+ * the writeback context should be completed with an error.
*/
- void (*discard_folio)(struct folio *folio, loff_t pos);
+ int (*writeback_submit)(struct iomap_writepage_ctx *wpc, int error);
};
struct iomap_writepage_ctx {
struct iomap iomap;
- struct iomap_ioend *ioend;
+ struct inode *inode;
+ struct writeback_control *wbc;
const struct iomap_writeback_ops *ops;
u32 nr_folios; /* folios added to the ioend */
+ void *wb_ctx; /* pending writeback context */
};
+struct iomap_ioend *iomap_init_ioend(struct inode *inode, struct bio *bio,
+ loff_t file_offset, u16 ioend_flags);
+struct iomap_ioend *iomap_split_ioend(struct iomap_ioend *ioend,
+ unsigned int max_len, bool is_append);
void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
void iomap_ioend_try_merge(struct iomap_ioend *ioend,
struct list_head *more_ioends);
void iomap_sort_ioends(struct list_head *ioend_list);
-int iomap_writepages(struct address_space *mapping,
- struct writeback_control *wbc, struct iomap_writepage_ctx *wpc,
- const struct iomap_writeback_ops *ops);
+ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
+ loff_t pos, loff_t end_pos, unsigned int dirty_len);
+int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error);
+
+void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
+ int error);
+void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
+ size_t len);
+
+int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio);
+int iomap_writepages(struct iomap_writepage_ctx *wpc);
+
+struct iomap_read_folio_ctx {
+ const struct iomap_read_ops *ops;
+ struct folio *cur_folio;
+ struct readahead_control *rac;
+ void *read_ctx;
+};
+
+struct iomap_read_ops {
+ /*
+ * Read in a folio range.
+ *
+ * If this succeeds, iomap_finish_folio_read() must be called after the
+ * range is read in, regardless of whether the read succeeded or failed.
+ *
+ * Returns 0 on success or a negative error on failure.
+ */
+ int (*read_folio_range)(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx, size_t len);
+
+ /*
+ * Submit any pending read requests.
+ *
+ * This is optional.
+ */
+ void (*submit_read)(struct iomap_read_folio_ctx *ctx);
+};
/*
* Flags for direct I/O ->end_io:
@@ -393,6 +554,14 @@ struct iomap_dio_ops {
*/
#define IOMAP_DIO_PARTIAL (1 << 2)
+/*
+ * Ensure each bio is aligned to fs block size.
+ *
+ * For filesystems which need to calculate/verify the checksum of each fs
+ * block. Otherwise they may not be able to handle unaligned bios.
+ */
+#define IOMAP_DIO_FSBLOCK_ALIGNED (1 << 3)
+
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
unsigned int dio_flags, void *private, size_t done_before);
@@ -413,4 +582,32 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
# define iomap_swapfile_activate(sis, swapfile, pagespan, ops) (-EIO)
#endif /* CONFIG_SWAP */
+extern struct bio_set iomap_ioend_bioset;
+
+#ifdef CONFIG_BLOCK
+extern const struct iomap_read_ops iomap_bio_read_ops;
+
+static inline void iomap_bio_read_folio(struct folio *folio,
+ const struct iomap_ops *ops)
+{
+ struct iomap_read_folio_ctx ctx = {
+ .ops = &iomap_bio_read_ops,
+ .cur_folio = folio,
+ };
+
+ iomap_read_folio(ops, &ctx);
+}
+
+static inline void iomap_bio_readahead(struct readahead_control *rac,
+ const struct iomap_ops *ops)
+{
+ struct iomap_read_folio_ctx ctx = {
+ .ops = &iomap_bio_read_ops,
+ .rac = rac,
+ };
+
+ iomap_readahead(ops, &ctx);
+}
+#endif /* CONFIG_BLOCK */
+
#endif /* LINUX_IOMAP_H */
diff --git a/include/linux/iommu-dma.h b/include/linux/iommu-dma.h
new file mode 100644
index 000000000000..a92b3ff9b934
--- /dev/null
+++ b/include/linux/iommu-dma.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ *
+ * DMA operations that map physical memory through IOMMU.
+ */
+#ifndef _LINUX_IOMMU_DMA_H
+#define _LINUX_IOMMU_DMA_H
+
+#include <linux/dma-direction.h>
+
+#ifdef CONFIG_IOMMU_DMA
+static inline bool use_dma_iommu(struct device *dev)
+{
+ return dev->dma_iommu;
+}
+#else
+static inline bool use_dma_iommu(struct device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_IOMMU_DMA */
+
+dma_addr_t iommu_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void iommu_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, unsigned long attrs);
+int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+unsigned long iommu_dma_get_merge_boundary(struct device *dev);
+size_t iommu_dma_opt_mapping_size(void);
+size_t iommu_dma_max_mapping_size(struct device *dev);
+void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, unsigned long attrs);
+struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
+ enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
+void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir);
+void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt);
+#define iommu_dma_vunmap_noncontiguous(dev, vaddr) \
+ vunmap(vaddr);
+int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
+ size_t size, struct sg_table *sgt);
+void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir);
+void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir);
+
+#endif /* _LINUX_IOMMU_DMA_H */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 7bc8dff7cf6d..8c66284a91a8 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/of.h>
#include <linux/iova_bitmap.h>
+#include <uapi/linux/iommufd.h>
#define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1)
@@ -41,7 +42,12 @@ struct iommu_dirty_ops;
struct notifier_block;
struct iommu_sva;
struct iommu_dma_cookie;
+struct iommu_dma_msi_cookie;
struct iommu_fault_param;
+struct iommufd_ctx;
+struct iommufd_viommu;
+struct msi_desc;
+struct msi_msg;
#define IOMMU_FAULT_PERM_READ (1 << 0) /* read */
#define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */
@@ -124,12 +130,16 @@ struct iopf_fault {
struct iopf_group {
struct iopf_fault last_fault;
struct list_head faults;
+ size_t fault_count;
/* list node for iommu_fault_param::faults */
struct list_head pending_node;
struct work_struct work;
- struct iommu_domain *domain;
+ struct iommu_attach_handle *attach_handle;
/* The device's fault data parameter. */
struct iommu_fault_param *fault_param;
+ /* Used by handler provider to hook the group on its own lists. */
+ struct list_head node;
+ u32 cookie;
};
/**
@@ -157,6 +167,15 @@ struct iommu_domain_geometry {
bool force_aperture; /* DMA only allowed in mappable range? */
};
+enum iommu_domain_cookie_type {
+ IOMMU_COOKIE_NONE,
+ IOMMU_COOKIE_DMA_IOVA,
+ IOMMU_COOKIE_DMA_MSI,
+ IOMMU_COOKIE_FAULT_HANDLER,
+ IOMMU_COOKIE_SVA,
+ IOMMU_COOKIE_IOMMUFD,
+};
+
/* Domain feature flags */
#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
@@ -203,15 +222,18 @@ struct iommu_domain_geometry {
struct iommu_domain {
unsigned type;
+ enum iommu_domain_cookie_type cookie_type;
const struct iommu_domain_ops *ops;
const struct iommu_dirty_ops *dirty_ops;
const struct iommu_ops *owner; /* Whose domain_alloc we came from */
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
struct iommu_domain_geometry geometry;
- struct iommu_dma_cookie *iova_cookie;
int (*iopf_handler)(struct iopf_group *group);
- void *fault_data;
- union {
+
+ union { /* cookie */
+ struct iommu_dma_cookie *iova_cookie;
+ struct iommu_dma_msi_cookie *msi_cookie;
+ struct iommufd_hw_pagetable *iommufd_hwpt;
struct {
iommu_fault_handler_t handler;
void *handler_token;
@@ -295,28 +317,26 @@ struct iommu_iort_rmr_data {
u32 num_sids;
};
-/**
- * enum iommu_dev_features - Per device IOMMU features
- * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
- * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
- * enabling %IOMMU_DEV_FEAT_SVA requires
- * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page
- * Faults themselves instead of relying on the IOMMU. When
- * supported, this feature must be enabled before and
- * disabled after %IOMMU_DEV_FEAT_SVA.
- *
- * Device drivers enable a feature using iommu_dev_enable_feature().
- */
-enum iommu_dev_features {
- IOMMU_DEV_FEAT_SVA,
- IOMMU_DEV_FEAT_IOPF,
-};
-
#define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */
#define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */
#define IOMMU_PASID_INVALID (-1U)
typedef unsigned int ioasid_t;
+/* Read but do not clear any dirty bits */
+#define IOMMU_DIRTY_NO_CLEAR (1 << 0)
+
+/*
+ * Pages allocated through iommu_alloc_pages_node_sz() can be placed on this
+ * list using iommu_pages_list_add(). Note: ONLY pages from
+ * iommu_alloc_pages_node_sz() can be used this way!
+ */
+struct iommu_pages_list {
+ struct list_head pages;
+};
+
+#define IOMMU_PAGES_LIST_INIT(name) \
+ ((struct iommu_pages_list){ .pages = LIST_HEAD_INIT(name.pages) })
+
#ifdef CONFIG_IOMMU_API
/**
@@ -339,7 +359,7 @@ struct iommu_iotlb_gather {
unsigned long start;
unsigned long end;
size_t pgsize;
- struct list_head freelist;
+ struct iommu_pages_list freelist;
bool queued;
};
@@ -353,9 +373,6 @@ struct iommu_dirty_bitmap {
struct iommu_iotlb_gather *gather;
};
-/* Read but do not clear any dirty bits */
-#define IOMMU_DIRTY_NO_CLEAR (1 << 0)
-
/**
* struct iommu_dirty_ops - domain specific dirty tracking operations
* @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain
@@ -419,10 +436,10 @@ static inline int __iommu_copy_struct_from_user(
void *dst_data, const struct iommu_user_data *src_data,
unsigned int data_type, size_t data_len, size_t min_len)
{
- if (src_data->type != data_type)
- return -EINVAL;
if (WARN_ON(!dst_data || !src_data))
return -EINVAL;
+ if (src_data->type != data_type)
+ return -EINVAL;
if (src_data->len < min_len || data_len < src_data->len)
return -EINVAL;
return copy_struct_from_user(dst_data, data_len, src_data->uptr,
@@ -435,8 +452,8 @@ static inline int __iommu_copy_struct_from_user(
* include/uapi/linux/iommufd.h
* @user_data: Pointer to a struct iommu_user_data for user space data info
* @data_type: The data type of the @kdst. Must match with @user_data->type
- * @min_last: The last memember of the data structure @kdst points in the
- * initial version.
+ * @min_last: The last member of the data structure @kdst points in the initial
+ * version.
* Return 0 for success, otherwise -error.
*/
#define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
@@ -487,7 +504,9 @@ static inline int __iommu_copy_struct_from_user_array(
* @index: Index to the location in the array to copy user data from
* @min_last: The last member of the data structure @kdst points in the
* initial version.
- * Return 0 for success, otherwise -error.
+ *
+ * Copy a single entry from a user array. Return 0 for success, otherwise
+ * -error.
*/
#define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \
min_last) \
@@ -496,28 +515,113 @@ static inline int __iommu_copy_struct_from_user_array(
offsetofend(typeof(*(kdst)), min_last))
/**
+ * iommu_copy_struct_from_full_user_array - Copy iommu driver specific user
+ * space data from an iommu_user_data_array
+ * @kdst: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @kdst_entry_size: sizeof(*kdst)
+ * @user_array: Pointer to a struct iommu_user_data_array for a user space
+ * array
+ * @data_type: The data type of the @kdst. Must match with @user_array->type
+ *
+ * Copy the entire user array. kdst must have room for kdst_entry_size *
+ * user_array->entry_num bytes. Return 0 for success, otherwise -error.
+ */
+static inline int
+iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
+ struct iommu_user_data_array *user_array,
+ unsigned int data_type)
+{
+ unsigned int i;
+ int ret;
+
+ if (user_array->type != data_type)
+ return -EINVAL;
+ if (!user_array->entry_num)
+ return -EINVAL;
+ if (likely(user_array->entry_len == kdst_entry_size)) {
+ if (copy_from_user(kdst, user_array->uptr,
+ user_array->entry_num *
+ user_array->entry_len))
+ return -EFAULT;
+ }
+
+ /* Copy item by item */
+ for (i = 0; i != user_array->entry_num; i++) {
+ ret = copy_struct_from_user(
+ kdst + kdst_entry_size * i, kdst_entry_size,
+ user_array->uptr + user_array->entry_len * i,
+ user_array->entry_len);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * __iommu_copy_struct_to_user - Report iommu driver specific user space data
+ * @dst_data: Pointer to a struct iommu_user_data for user space data location
+ * @src_data: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @data_type: The data type of the @src_data. Must match with @dst_data.type
+ * @data_len: Length of current user data structure, i.e. sizeof(struct _src)
+ * @min_len: Initial length of user data structure for backward compatibility.
+ * This should be offsetofend using the last member in the user data
+ * struct that was initially added to include/uapi/linux/iommufd.h
+ */
+static inline int
+__iommu_copy_struct_to_user(const struct iommu_user_data *dst_data,
+ void *src_data, unsigned int data_type,
+ size_t data_len, size_t min_len)
+{
+ if (WARN_ON(!dst_data || !src_data))
+ return -EINVAL;
+ if (dst_data->type != data_type)
+ return -EINVAL;
+ if (dst_data->len < min_len || data_len < dst_data->len)
+ return -EINVAL;
+ return copy_struct_to_user(dst_data->uptr, dst_data->len, src_data,
+ data_len, NULL);
+}
+
+/**
+ * iommu_copy_struct_to_user - Report iommu driver specific user space data
+ * @user_data: Pointer to a struct iommu_user_data for user space data location
+ * @ksrc: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @data_type: The data type of the @ksrc. Must match with @user_data->type
+ * @min_last: The last member of the data structure @ksrc points in the initial
+ * version.
+ * Return 0 for success, otherwise -error.
+ */
+#define iommu_copy_struct_to_user(user_data, ksrc, data_type, min_last) \
+ __iommu_copy_struct_to_user(user_data, ksrc, data_type, sizeof(*ksrc), \
+ offsetofend(typeof(*ksrc), min_last))
+
+/**
* struct iommu_ops - iommu ops and capabilities
* @capable: check capability
* @hw_info: report iommu hardware information. The data buffer returned by this
* op is allocated in the iommu driver and freed by the caller after
- * use. The information type is one of enum iommu_hw_info_type defined
- * in include/uapi/linux/iommufd.h.
- * @domain_alloc: allocate and return an iommu domain if success. Otherwise
- * NULL is returned. The domain is not fully initialized until
- * the caller iommu_domain_alloc() returns.
- * @domain_alloc_user: Allocate an iommu domain corresponding to the input
- * parameters as defined in include/uapi/linux/iommufd.h.
- * Unlike @domain_alloc, it is called only by IOMMUFD and
- * must fully initialize the new domain before return.
- * Upon success, if the @user_data is valid and the @parent
- * points to a kernel-managed domain, the new domain must be
- * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be
- * NULL while the @user_data can be optionally provided, the
- * new domain must support __IOMMU_DOMAIN_PAGING.
- * Upon failure, ERR_PTR must be returned.
+ * use. @type can input a requested type and output a supported type.
+ * Driver should reject an unsupported data @type input
+ * @domain_alloc: Do not use in new drivers
+ * @domain_alloc_identity: allocate an IDENTITY domain. Drivers should prefer to
+ * use identity_domain instead. This should only be used
+ * if dynamic logic is necessary.
+ * @domain_alloc_paging_flags: Allocate an iommu domain corresponding to the
+ * input parameters as defined in
+ * include/uapi/linux/iommufd.h. The @user_data can be
+ * optionally provided, the new domain must support
+ * __IOMMU_DOMAIN_PAGING. Upon failure, ERR_PTR must be
+ * returned.
* @domain_alloc_paging: Allocate an iommu_domain that can be used for
- * UNMANAGED, DMA, and DMA_FQ domain types.
+ * UNMANAGED, DMA, and DMA_FQ domain types. This is the
+ * same as invoking domain_alloc_paging_flags() with
+ * @flags=0, @user_data=NULL. A driver should implement
+ * only one of the two ops.
* @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
+ * @domain_alloc_nested: Allocate an iommu_domain for nested translation.
* @probe_device: Add device to iommu driver handling
* @release_device: Remove device from iommu driver handling
* @probe_finalize: Do final setup work after the device is added to an IOMMU
@@ -527,18 +631,22 @@ static inline int __iommu_copy_struct_from_user_array(
* @of_xlate: add OF master IDs to iommu grouping
* @is_attach_deferred: Check if domain attach should be deferred from iommu
* driver init to device driver init (default no)
- * @dev_enable/disable_feat: per device entries to enable/disable
- * iommu specific features.
* @page_response: handle page request response
* @def_domain_type: device default domain type, return value:
* - IOMMU_DOMAIN_IDENTITY: must use an identity domain
* - IOMMU_DOMAIN_DMA: must use a dma domain
* - 0: use the default setting
* @default_domain_ops: the default ops for domains
- * @remove_dev_pasid: Remove any translation configurations of a specific
- * pasid, so that any DMA transactions with this pasid
- * will be blocked by the hardware.
- * @pgsize_bitmap: bitmap of all possible supported page sizes
+ * @get_viommu_size: Get the size of a driver-level vIOMMU structure for a given
+ * @dev corresponding to @viommu_type. Driver should return 0
+ * if vIOMMU isn't supported accordingly. It is required for
+ * driver to use the VIOMMU_STRUCT_SIZE macro to sanitize the
+ * driver-level vIOMMU structure related to the core one
+ * @viommu_init: Init the driver-level struct of an iommufd_viommu on a physical
+ * IOMMU instance @viommu->iommu_dev, as the set of virtualization
+ * resources shared/passed to user space IOMMU instance. Associate
+ * it with a nesting @parent_domain. It is required for driver to
+ * set @viommu->ops pointing to its own viommu_ops
* @owner: Driver module providing these ops
* @identity_domain: An always available, always attachable identity
* translation.
@@ -547,19 +655,30 @@ static inline int __iommu_copy_struct_from_user_array(
* @default_domain: If not NULL this will always be set as the default domain.
* This should be an IDENTITY/BLOCKED/PLATFORM domain.
* Do not use in new drivers.
+ * @user_pasid_table: IOMMU driver supports user-managed PASID table. There is
+ * no user domain for each PASID and the I/O page faults are
+ * forwarded through the user domain attached to the device
+ * RID.
*/
struct iommu_ops {
bool (*capable)(struct device *dev, enum iommu_cap);
- void *(*hw_info)(struct device *dev, u32 *length, u32 *type);
+ void *(*hw_info)(struct device *dev, u32 *length,
+ enum iommu_hw_info_type *type);
/* Domain allocation and freeing by the iommu driver */
+#if IS_ENABLED(CONFIG_FSL_PAMU)
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
- struct iommu_domain *(*domain_alloc_user)(
- struct device *dev, u32 flags, struct iommu_domain *parent,
+#endif
+ struct iommu_domain *(*domain_alloc_identity)(struct device *dev);
+ struct iommu_domain *(*domain_alloc_paging_flags)(
+ struct device *dev, u32 flags,
const struct iommu_user_data *user_data);
struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
struct iommu_domain *(*domain_alloc_sva)(struct device *dev,
struct mm_struct *mm);
+ struct iommu_domain *(*domain_alloc_nested)(
+ struct device *dev, struct iommu_domain *parent, u32 flags,
+ const struct iommu_user_data *user_data);
struct iommu_device *(*probe_device)(struct device *dev);
void (*release_device)(struct device *dev);
@@ -573,23 +692,24 @@ struct iommu_ops {
bool (*is_attach_deferred)(struct device *dev);
/* Per device IOMMU features */
- int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
- int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
-
void (*page_response)(struct device *dev, struct iopf_fault *evt,
struct iommu_page_response *msg);
int (*def_domain_type)(struct device *dev);
- void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid,
- struct iommu_domain *domain);
+
+ size_t (*get_viommu_size)(struct device *dev,
+ enum iommu_viommu_type viommu_type);
+ int (*viommu_init)(struct iommufd_viommu *viommu,
+ struct iommu_domain *parent_domain,
+ const struct iommu_user_data *user_data);
const struct iommu_domain_ops *default_domain_ops;
- unsigned long pgsize_bitmap;
struct module *owner;
struct iommu_domain *identity_domain;
struct iommu_domain *blocked_domain;
struct iommu_domain *release_domain;
struct iommu_domain *default_domain;
+ u8 user_pasid_table:1;
};
/**
@@ -607,7 +727,8 @@ struct iommu_ops {
* * EBUSY - device is attached to a domain and cannot be changed
* * ENODEV - device specific errors, not able to be attached
* * <others> - treated as ENODEV by the caller. Use is discouraged
- * @set_dev_pasid: set an iommu domain to a pasid of device
+ * @set_dev_pasid: set or replace an iommu domain to a pasid of device. The pasid of
+ * the device should be left in the old config in error case.
* @map_pages: map a physically contiguous set of pages of the same size to
* an iommu domain.
* @unmap_pages: unmap a number of pages of the same size from an iommu domain
@@ -626,14 +747,14 @@ struct iommu_ops {
* @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
* including no-snoop TLPs on PCIe or other platform
* specific mechanisms.
- * @enable_nesting: Enable nesting
* @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
* @free: Release the domain after use.
*/
struct iommu_domain_ops {
- int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
+ int (*attach_dev)(struct iommu_domain *domain, struct device *dev,
+ struct iommu_domain *old);
int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
- ioasid_t pasid);
+ ioasid_t pasid, struct iommu_domain *old);
int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
@@ -654,7 +775,6 @@ struct iommu_domain_ops {
dma_addr_t iova);
bool (*enforce_cache_coherency)(struct iommu_domain *domain);
- int (*enable_nesting)(struct iommu_domain *domain);
int (*set_pgtable_quirks)(struct iommu_domain *domain,
unsigned long quirks);
@@ -669,6 +789,7 @@ struct iommu_domain_ops {
* @dev: struct device for sysfs handling
* @singleton_group: Used internally for drivers that have only one group
* @max_pasids: number of supported PASIDs
+ * @ready: set once iommu_device_register() has completed successfully
*/
struct iommu_device {
struct list_head list;
@@ -677,6 +798,7 @@ struct iommu_device {
struct device *dev;
struct iommu_group *singleton_group;
u32 max_pasids;
+ bool ready;
};
/**
@@ -771,26 +893,30 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
{
*gather = (struct iommu_iotlb_gather) {
.start = ULONG_MAX,
- .freelist = LIST_HEAD_INIT(gather->freelist),
+ .freelist = IOMMU_PAGES_LIST_INIT(gather->freelist),
};
}
-extern int bus_iommu_probe(const struct bus_type *bus);
-extern bool iommu_present(const struct bus_type *bus);
extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
-extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus);
+struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags);
+static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
+{
+ return iommu_paging_domain_alloc_flags(dev, 0);
+}
extern void iommu_domain_free(struct iommu_domain *domain);
extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
-extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
- struct device *dev, ioasid_t pasid);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
+int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
+int iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
@@ -836,7 +962,6 @@ extern void iommu_group_put(struct iommu_group *group);
extern int iommu_group_id(struct iommu_group *group);
extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
-int iommu_enable_nesting(struct iommu_domain *domain);
int iommu_set_pgtable_quirks(struct iommu_domain *domain,
unsigned long quirks);
@@ -968,7 +1093,6 @@ extern struct iommu_group *generic_single_device_group(struct device *dev);
/**
* struct iommu_fwspec - per-device IOMMU instance data
- * @ops: ops for this device's IOMMU
* @iommu_fwnode: firmware handle for this device's IOMMU
* @flags: IOMMU_FWSPEC_* flags
* @num_ids: number of associated device IDs
@@ -979,7 +1103,6 @@ extern struct iommu_group *generic_single_device_group(struct device *dev);
* consumers.
*/
struct iommu_fwspec {
- const struct iommu_ops *ops;
struct fwnode_handle *iommu_fwnode;
u32 flags;
unsigned int num_ids;
@@ -988,28 +1111,37 @@ struct iommu_fwspec {
/* ATS is supported */
#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
+/* CANWBS is supported */
+#define IOMMU_FWSPEC_PCI_RC_CANWBS (1 << 1)
+
+/*
+ * An iommu attach handle represents a relationship between an iommu domain
+ * and a PASID or RID of a device. It is allocated and managed by the component
+ * that manages the domain and is stored in the iommu group during the time the
+ * domain is attached.
+ */
+struct iommu_attach_handle {
+ struct iommu_domain *domain;
+};
/**
* struct iommu_sva - handle to a device-mm bond
*/
struct iommu_sva {
+ struct iommu_attach_handle handle;
struct device *dev;
- struct iommu_domain *domain;
- struct list_head handle_item;
refcount_t users;
};
struct iommu_mm_data {
u32 pasid;
+ struct mm_struct *mm;
struct list_head sva_domains;
- struct list_head sva_handles;
+ struct list_head mm_list_elm;
};
-int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
- const struct iommu_ops *ops);
-void iommu_fwspec_free(struct device *dev);
+int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode);
int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids);
-const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode);
static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
{
@@ -1038,9 +1170,6 @@ void dev_iommu_priv_set(struct device *dev, void *priv);
extern struct mutex iommu_probe_device_lock;
int iommu_probe_device(struct device *dev);
-int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
-int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
-
int iommu_device_use_default_domain(struct device *dev);
void iommu_device_unuse_default_domain(struct device *dev);
@@ -1052,12 +1181,10 @@ int iommu_device_claim_dma_owner(struct device *dev, void *owner);
void iommu_device_release_dma_owner(struct device *dev);
int iommu_attach_device_pasid(struct iommu_domain *domain,
- struct device *dev, ioasid_t pasid);
+ struct device *dev, ioasid_t pasid,
+ struct iommu_attach_handle *handle);
void iommu_detach_device_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid);
-struct iommu_domain *
-iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
- unsigned int type);
ioasid_t iommu_alloc_global_pasid(struct device *dev);
void iommu_free_global_pasid(ioasid_t pasid);
#else /* CONFIG_IOMMU_API */
@@ -1071,19 +1198,20 @@ struct iommu_iotlb_gather {};
struct iommu_dirty_bitmap {};
struct iommu_dirty_ops {};
-static inline bool iommu_present(const struct bus_type *bus)
+static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
{
return false;
}
-static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
+static inline struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev,
+ unsigned int flags)
{
- return false;
+ return ERR_PTR(-ENODEV);
}
-static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
+static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline void iommu_domain_free(struct iommu_domain *domain)
@@ -1315,40 +1443,17 @@ static inline void iommu_device_unlink(struct device *dev, struct device *link)
}
static inline int iommu_fwspec_init(struct device *dev,
- struct fwnode_handle *iommu_fwnode,
- const struct iommu_ops *ops)
+ struct fwnode_handle *iommu_fwnode)
{
return -ENODEV;
}
-static inline void iommu_fwspec_free(struct device *dev)
-{
-}
-
static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
int num_ids)
{
return -ENODEV;
}
-static inline
-const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
-{
- return NULL;
-}
-
-static inline int
-iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
-{
- return -ENODEV;
-}
-
-static inline int
-iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
-{
- return -ENODEV;
-}
-
static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
{
return NULL;
@@ -1388,7 +1493,8 @@ static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner)
}
static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
- struct device *dev, ioasid_t pasid)
+ struct device *dev, ioasid_t pasid,
+ struct iommu_attach_handle *handle)
{
return -ENODEV;
}
@@ -1398,13 +1504,6 @@ static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
{
}
-static inline struct iommu_domain *
-iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
- unsigned int type)
-{
- return NULL;
-}
-
static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
{
return IOMMU_PASID_INVALID;
@@ -1413,6 +1512,18 @@ static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
static inline void iommu_free_global_pasid(ioasid_t pasid) {}
#endif /* CONFIG_IOMMU_API */
+#ifdef CONFIG_IRQ_MSI_IOMMU
+#ifdef CONFIG_IOMMU_API
+int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
+#else
+static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
+ phys_addr_t msi_addr)
+{
+ return 0;
+}
+#endif /* CONFIG_IOMMU_API */
+#endif /* CONFIG_IRQ_MSI_IOMMU */
+
#if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
void iommu_group_mutex_assert(struct device *dev);
#else
@@ -1446,32 +1557,12 @@ static inline void iommu_debugfs_setup(void) {}
#endif
#ifdef CONFIG_IOMMU_DMA
-#include <linux/msi.h>
-
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
-
-int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
-void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg);
-
#else /* CONFIG_IOMMU_DMA */
-
-struct msi_desc;
-struct msi_msg;
-
static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
{
return -ENODEV;
}
-
-static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
-{
- return 0;
-}
-
-static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
-{
-}
-
#endif /* CONFIG_IOMMU_DMA */
/*
@@ -1527,13 +1618,12 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev,
struct mm_struct *mm);
void iommu_sva_unbind_device(struct iommu_sva *handle);
u32 iommu_sva_get_pasid(struct iommu_sva *handle);
-struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
- struct mm_struct *mm);
+void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end);
#else
static inline struct iommu_sva *
iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
@@ -1553,12 +1643,7 @@ static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
}
static inline void mm_pasid_drop(struct mm_struct *mm) {}
-
-static inline struct iommu_domain *
-iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm)
-{
- return NULL;
-}
+static inline void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end) {}
#endif /* CONFIG_IOMMU_SVA */
#ifdef CONFIG_IOMMU_IOPF
@@ -1569,7 +1654,7 @@ struct iopf_queue *iopf_queue_alloc(const char *name);
void iopf_queue_free(struct iopf_queue *queue);
int iopf_queue_discard_partial(struct iopf_queue *queue);
void iopf_free_group(struct iopf_group *group);
-void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
+int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
void iopf_group_response(struct iopf_group *group,
enum iommu_page_response_code status);
#else
@@ -1607,9 +1692,10 @@ static inline void iopf_free_group(struct iopf_group *group)
{
}
-static inline void
+static inline int
iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
{
+ return -ENODEV;
}
static inline void iopf_group_response(struct iopf_group *group,
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
index ffc3a949f837..6e7efe83bc5d 100644
--- a/include/linux/iommufd.h
+++ b/include/linux/iommufd.h
@@ -6,25 +6,67 @@
#ifndef __LINUX_IOMMUFD_H
#define __LINUX_IOMMUFD_H
-#include <linux/types.h>
-#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/iommu.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <uapi/linux/iommufd.h>
struct device;
-struct iommufd_device;
-struct page;
-struct iommufd_ctx;
-struct iommufd_access;
struct file;
struct iommu_group;
+struct iommu_user_data;
+struct iommu_user_data_array;
+struct iommufd_access;
+struct iommufd_ctx;
+struct iommufd_device;
+struct iommufd_viommu_ops;
+struct page;
+
+enum iommufd_object_type {
+ IOMMUFD_OBJ_NONE,
+ IOMMUFD_OBJ_ANY = IOMMUFD_OBJ_NONE,
+ IOMMUFD_OBJ_DEVICE,
+ IOMMUFD_OBJ_HWPT_PAGING,
+ IOMMUFD_OBJ_HWPT_NESTED,
+ IOMMUFD_OBJ_IOAS,
+ IOMMUFD_OBJ_ACCESS,
+ IOMMUFD_OBJ_FAULT,
+ IOMMUFD_OBJ_VIOMMU,
+ IOMMUFD_OBJ_VDEVICE,
+ IOMMUFD_OBJ_VEVENTQ,
+ IOMMUFD_OBJ_HW_QUEUE,
+#ifdef CONFIG_IOMMUFD_TEST
+ IOMMUFD_OBJ_SELFTEST,
+#endif
+ IOMMUFD_OBJ_MAX,
+};
+
+/* Base struct for all objects with a userspace ID handle. */
+struct iommufd_object {
+ /*
+ * Destroy will sleep and wait for wait_cnt to go to zero. This allows
+ * concurrent users of the ID to reliably avoid causing a spurious
+ * destroy failure. Incrementing this count should either be short
+ * lived or be revoked and blocked during pre_destroy().
+ */
+ refcount_t wait_cnt;
+ refcount_t users;
+ enum iommufd_object_type type;
+ unsigned int id;
+};
struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
struct device *dev, u32 *id);
void iommufd_device_unbind(struct iommufd_device *idev);
-int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id);
-int iommufd_device_replace(struct iommufd_device *idev, u32 *pt_id);
-void iommufd_device_detach(struct iommufd_device *idev);
+int iommufd_device_attach(struct iommufd_device *idev, ioasid_t pasid,
+ u32 *pt_id);
+int iommufd_device_replace(struct iommufd_device *idev, ioasid_t pasid,
+ u32 *pt_id);
+void iommufd_device_detach(struct iommufd_device *idev, ioasid_t pasid);
struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev);
u32 iommufd_device_to_id(struct iommufd_device *idev);
@@ -54,6 +96,107 @@ void iommufd_access_detach(struct iommufd_access *access);
void iommufd_ctx_get(struct iommufd_ctx *ictx);
+struct iommufd_viommu {
+ struct iommufd_object obj;
+ struct iommufd_ctx *ictx;
+ struct iommu_device *iommu_dev;
+ struct iommufd_hwpt_paging *hwpt;
+
+ const struct iommufd_viommu_ops *ops;
+
+ struct xarray vdevs;
+ struct list_head veventqs;
+ struct rw_semaphore veventqs_rwsem;
+
+ enum iommu_viommu_type type;
+};
+
+struct iommufd_vdevice {
+ struct iommufd_object obj;
+ struct iommufd_viommu *viommu;
+ struct iommufd_device *idev;
+
+ /*
+ * Virtual device ID per vIOMMU, e.g. vSID of ARM SMMUv3, vDeviceID of
+ * AMD IOMMU, and vRID of Intel VT-d
+ */
+ u64 virt_id;
+
+ /* Clean up all driver-specific parts of an iommufd_vdevice */
+ void (*destroy)(struct iommufd_vdevice *vdev);
+};
+
+struct iommufd_hw_queue {
+ struct iommufd_object obj;
+ struct iommufd_viommu *viommu;
+ struct iommufd_access *access;
+
+ u64 base_addr; /* in guest physical address space */
+ size_t length;
+
+ enum iommu_hw_queue_type type;
+
+ /* Clean up all driver-specific parts of an iommufd_hw_queue */
+ void (*destroy)(struct iommufd_hw_queue *hw_queue);
+};
+
+/**
+ * struct iommufd_viommu_ops - vIOMMU specific operations
+ * @destroy: Clean up all driver-specific parts of an iommufd_viommu. The memory
+ * of the vIOMMU will be free-ed by iommufd core after calling this op
+ * @alloc_domain_nested: Allocate a IOMMU_DOMAIN_NESTED on a vIOMMU that holds a
+ * nesting parent domain (IOMMU_DOMAIN_PAGING). @user_data
+ * must be defined in include/uapi/linux/iommufd.h.
+ * It must fully initialize the new iommu_domain before
+ * returning. Upon failure, ERR_PTR must be returned.
+ * @cache_invalidate: Flush hardware cache used by a vIOMMU. It can be used for
+ * any IOMMU hardware specific cache: TLB and device cache.
+ * The @array passes in the cache invalidation requests, in
+ * form of a driver data structure. A driver must update the
+ * array->entry_num to report the number of handled requests.
+ * The data structure of the array entry must be defined in
+ * include/uapi/linux/iommufd.h
+ * @vdevice_size: Size of the driver-defined vDEVICE structure per this vIOMMU
+ * @vdevice_init: Initialize the driver-level structure of a vDEVICE object, or
+ * related HW procedure. @vdev is already initialized by iommufd
+ * core: vdev->dev and vdev->viommu pointers; vdev->id carries a
+ * per-vIOMMU virtual ID (refer to struct iommu_vdevice_alloc in
+ * include/uapi/linux/iommufd.h)
+ * If driver has a deinit function to revert what vdevice_init op
+ * does, it should set it to the @vdev->destroy function pointer
+ * @get_hw_queue_size: Get the size of a driver-defined HW queue structure for a
+ * given @viommu corresponding to @queue_type. Driver should
+ * return 0 if HW queue aren't supported accordingly. It is
+ * required for driver to use the HW_QUEUE_STRUCT_SIZE macro
+ * to sanitize the driver-level HW queue structure related
+ * to the core one
+ * @hw_queue_init_phys: Initialize the driver-level structure of a HW queue that
+ * is initialized with its core-level structure that holds
+ * all the info about a guest queue memory.
+ * Driver providing this op indicates that HW accesses the
+ * guest queue memory via physical addresses.
+ * @index carries the logical HW QUEUE ID per vIOMMU in a
+ * guest VM, for a multi-queue model. @base_addr_pa carries
+ * the physical location of the guest queue
+ * If driver has a deinit function to revert what this op
+ * does, it should set it to the @hw_queue->destroy pointer
+ */
+struct iommufd_viommu_ops {
+ void (*destroy)(struct iommufd_viommu *viommu);
+ struct iommu_domain *(*alloc_domain_nested)(
+ struct iommufd_viommu *viommu, u32 flags,
+ const struct iommu_user_data *user_data);
+ int (*cache_invalidate)(struct iommufd_viommu *viommu,
+ struct iommu_user_data_array *array);
+ const size_t vdevice_size;
+ int (*vdevice_init)(struct iommufd_vdevice *vdev);
+ size_t (*get_hw_queue_size)(struct iommufd_viommu *viommu,
+ enum iommu_hw_queue_type queue_type);
+ /* AMD's HW will add hw_queue_init simply using @hw_queue->base_addr */
+ int (*hw_queue_init_phys)(struct iommufd_hw_queue *hw_queue, u32 index,
+ phys_addr_t base_addr_pa);
+};
+
#if IS_ENABLED(CONFIG_IOMMUFD)
struct iommufd_ctx *iommufd_ctx_from_file(struct file *file);
struct iommufd_ctx *iommufd_ctx_from_fd(int fd);
@@ -95,8 +238,9 @@ static inline void iommufd_access_unpin_pages(struct iommufd_access *access,
{
}
-static inline int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
- void *data, size_t len, unsigned int flags)
+static inline int iommufd_access_rw(struct iommufd_access *access,
+ unsigned long iova, void *data, size_t len,
+ unsigned int flags)
{
return -EOPNOTSUPP;
}
@@ -111,4 +255,146 @@ static inline int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx)
return -EOPNOTSUPP;
}
#endif /* CONFIG_IOMMUFD */
+
+#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER_CORE)
+int _iommufd_object_depend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended);
+void _iommufd_object_undepend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended);
+int _iommufd_alloc_mmap(struct iommufd_ctx *ictx, struct iommufd_object *owner,
+ phys_addr_t mmio_addr, size_t length,
+ unsigned long *offset);
+void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner, unsigned long offset);
+struct device *iommufd_vdevice_to_device(struct iommufd_vdevice *vdev);
+struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
+ unsigned long vdev_id);
+int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
+ struct device *dev, unsigned long *vdev_id);
+int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
+ enum iommu_veventq_type type, void *event_data,
+ size_t data_len);
+#else /* !CONFIG_IOMMUFD_DRIVER_CORE */
+static inline int _iommufd_object_depend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+_iommufd_object_undepend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended)
+{
+}
+
+static inline int _iommufd_alloc_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner,
+ phys_addr_t mmio_addr, size_t length,
+ unsigned long *offset)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner,
+ unsigned long offset)
+{
+}
+
+static inline struct device *
+iommufd_vdevice_to_device(struct iommufd_vdevice *vdev)
+{
+ return NULL;
+}
+
+static inline struct device *
+iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id)
+{
+ return NULL;
+}
+
+static inline int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
+ struct device *dev,
+ unsigned long *vdev_id)
+{
+ return -ENOENT;
+}
+
+static inline int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
+ enum iommu_veventq_type type,
+ void *event_data, size_t data_len)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_IOMMUFD_DRIVER_CORE */
+
+#define VIOMMU_STRUCT_SIZE(drv_struct, member) \
+ (sizeof(drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_viommu, \
+ ((drv_struct *)NULL)->member)))
+
+#define VDEVICE_STRUCT_SIZE(drv_struct, member) \
+ (sizeof(drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_vdevice, \
+ ((drv_struct *)NULL)->member)))
+
+#define HW_QUEUE_STRUCT_SIZE(drv_struct, member) \
+ (sizeof(drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_hw_queue, \
+ ((drv_struct *)NULL)->member)))
+
+/*
+ * Helpers for IOMMU driver to build/destroy a dependency between two sibling
+ * structures created by one of the allocators above
+ */
+#define iommufd_hw_queue_depend(dependent, depended, member) \
+ ({ \
+ int ret = -EINVAL; \
+ \
+ static_assert(__same_type(struct iommufd_hw_queue, \
+ dependent->member)); \
+ static_assert(__same_type(typeof(*dependent), *depended)); \
+ if (!WARN_ON_ONCE(dependent->member.viommu != \
+ depended->member.viommu)) \
+ ret = _iommufd_object_depend(&dependent->member.obj, \
+ &depended->member.obj); \
+ ret; \
+ })
+
+#define iommufd_hw_queue_undepend(dependent, depended, member) \
+ ({ \
+ static_assert(__same_type(struct iommufd_hw_queue, \
+ dependent->member)); \
+ static_assert(__same_type(typeof(*dependent), *depended)); \
+ WARN_ON_ONCE(dependent->member.viommu != \
+ depended->member.viommu); \
+ _iommufd_object_undepend(&dependent->member.obj, \
+ &depended->member.obj); \
+ })
+
+/*
+ * Helpers for IOMMU driver to alloc/destroy an mmapable area for a structure.
+ *
+ * To support an mmappable MMIO region, kernel driver must first register it to
+ * iommufd core to allocate an @offset, during a driver-structure initialization
+ * (e.g. viommu_init op). Then, it should report to user space this @offset and
+ * the @length of the MMIO region for mmap syscall.
+ */
+static inline int iommufd_viommu_alloc_mmap(struct iommufd_viommu *viommu,
+ phys_addr_t mmio_addr,
+ size_t length,
+ unsigned long *offset)
+{
+ return _iommufd_alloc_mmap(viommu->ictx, &viommu->obj, mmio_addr,
+ length, offset);
+}
+
+static inline void iommufd_viommu_destroy_mmap(struct iommufd_viommu *viommu,
+ unsigned long offset)
+{
+ _iommufd_destroy_mmap(viommu->ictx, &viommu->obj, offset);
+}
#endif
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index 19a7b00baff4..bdd2e0652bc3 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -14,65 +14,64 @@
#include <linux/io.h>
/**
- * read_poll_timeout - Periodically poll an address until a condition is
- * met or a timeout occurs
- * @op: accessor function (takes @args as its arguments)
- * @val: Variable to read the value into
- * @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0
- * tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.rst).
- * @timeout_us: Timeout in us, 0 means never timeout
- * @sleep_before_read: if it is true, sleep @sleep_us before read.
- * @args: arguments for @op poll
+ * poll_timeout_us - Periodically poll and perform an operation until
+ * a condition is met or a timeout occurs
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @args is stored in @val. Must not
- * be called from atomic context if sleep_us or timeout_us are used.
+ * @op: Operation
+ * @cond: Break condition
+ * @sleep_us: Maximum time to sleep between operations in us (0 tight-loops).
+ * Please read usleep_range() function description for details and
+ * limitations.
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @sleep_before_op: if it is true, sleep @sleep_us before operation.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
*/
-#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \
- sleep_before_read, args...) \
+#define poll_timeout_us(op, cond, sleep_us, timeout_us, sleep_before_op) \
({ \
u64 __timeout_us = (timeout_us); \
unsigned long __sleep_us = (sleep_us); \
ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ int ___ret; \
might_sleep_if((__sleep_us) != 0); \
- if (sleep_before_read && __sleep_us) \
+ if ((sleep_before_op) && __sleep_us) \
usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
for (;;) { \
- (val) = op(args); \
- if (cond) \
+ bool __expired = __timeout_us && \
+ ktime_compare(ktime_get(), __timeout) > 0; \
+ /* guarantee 'op' and 'cond' are evaluated after timeout expired */ \
+ barrier(); \
+ op; \
+ if (cond) { \
+ ___ret = 0; \
break; \
- if (__timeout_us && \
- ktime_compare(ktime_get(), __timeout) > 0) { \
- (val) = op(args); \
+ } \
+ if (__expired) { \
+ ___ret = -ETIMEDOUT; \
break; \
} \
if (__sleep_us) \
usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
cpu_relax(); \
} \
- (cond) ? 0 : -ETIMEDOUT; \
+ ___ret; \
})
/**
- * read_poll_timeout_atomic - Periodically poll an address until a condition is
- * met or a timeout occurs
- * @op: accessor function (takes @args as its arguments)
- * @val: Variable to read the value into
- * @cond: Break condition (usually involving @val)
- * @delay_us: Time to udelay between reads in us (0 tight-loops). Should
- * be less than ~10us since udelay is used (see
- * Documentation/timers/timers-howto.rst).
- * @timeout_us: Timeout in us, 0 means never timeout
- * @delay_before_read: if it is true, delay @delay_us before read.
- * @args: arguments for @op poll
+ * poll_timeout_us_atomic - Periodically poll and perform an operation until
+ * a condition is met or a timeout occurs
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @args is stored in @val.
+ * @op: Operation
+ * @cond: Break condition
+ * @delay_us: Time to udelay between operations in us (0 tight-loops).
+ * Please read udelay() function description for details and
+ * limitations.
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @delay_before_op: if it is true, delay @delay_us before operation.
*
* This macro does not rely on timekeeping. Hence it is safe to call even when
* timekeeping is suspended, at the expense of an underestimation of wall clock
@@ -80,25 +79,33 @@
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout.
*/
-#define read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \
- delay_before_read, args...) \
+#define poll_timeout_us_atomic(op, cond, delay_us, timeout_us, \
+ delay_before_op) \
({ \
u64 __timeout_us = (timeout_us); \
s64 __left_ns = __timeout_us * NSEC_PER_USEC; \
unsigned long __delay_us = (delay_us); \
u64 __delay_ns = __delay_us * NSEC_PER_USEC; \
- if (delay_before_read && __delay_us) { \
+ int ___ret; \
+ if ((delay_before_op) && __delay_us) { \
udelay(__delay_us); \
if (__timeout_us) \
__left_ns -= __delay_ns; \
} \
for (;;) { \
- (val) = op(args); \
- if (cond) \
+ bool __expired = __timeout_us && __left_ns < 0; \
+ /* guarantee 'op' and 'cond' are evaluated after timeout expired */ \
+ barrier(); \
+ op; \
+ if (cond) { \
+ ___ret = 0; \
break; \
- if (__timeout_us && __left_ns < 0) { \
- (val) = op(args); \
+ } \
+ if (__expired) { \
+ ___ret = -ETIMEDOUT; \
break; \
} \
if (__delay_us) { \
@@ -110,26 +117,77 @@
if (__timeout_us) \
__left_ns--; \
} \
- (cond) ? 0 : -ETIMEDOUT; \
+ ___ret; \
})
/**
+ * read_poll_timeout - Periodically poll an address until a condition is
+ * met or a timeout occurs
+ * @op: accessor function (takes @args as its arguments)
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @sleep_before_read: if it is true, sleep @sleep_us before read.
+ * @args: arguments for @op poll
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @args is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ */
+#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \
+ sleep_before_read, args...) \
+ poll_timeout_us((val) = op(args), cond, sleep_us, timeout_us, sleep_before_read)
+
+/**
+ * read_poll_timeout_atomic - Periodically poll an address until a condition is
+ * met or a timeout occurs
+ * @op: accessor function (takes @args as its arguments)
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Please
+ * read udelay() function description for details and
+ * limitations.
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @delay_before_read: if it is true, delay @delay_us before read.
+ * @args: arguments for @op poll
+ *
+ * This macro does not rely on timekeeping. Hence it is safe to call even when
+ * timekeeping is suspended, at the expense of an underestimation of wall clock
+ * time, which is rather minimal with a non-zero delay_us.
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @args is stored in @val.
+ */
+#define read_poll_timeout_atomic(op, val, cond, sleep_us, timeout_us, \
+ sleep_before_read, args...) \
+ poll_timeout_us_atomic((val) = op(args), cond, sleep_us, timeout_us, sleep_before_read)
+
+/**
* readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
* @op: accessor function (takes @addr as its only argument)
* @addr: Address to poll
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0
- * tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.rst).
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @addr is stored in @val. Must not
- * be called from atomic context if sleep_us or timeout_us are used.
- *
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
*/
#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
read_poll_timeout(op, val, cond, sleep_us, timeout_us, false, addr)
@@ -140,16 +198,16 @@
* @addr: Address to poll
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
- * @delay_us: Time to udelay between reads in us (0 tight-loops). Should
- * be less than ~10us since udelay is used (see
- * Documentation/timers/timers-howto.rst).
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Please
+ * read udelay() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @addr is stored in @val.
- *
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val.
*/
#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, false, addr)
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index db7fe25f3370..9afa30f9346f 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -154,15 +154,20 @@ enum {
};
/* helpers to define resources */
-#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
+#define DEFINE_RES_NAMED_DESC(_start, _size, _name, _flags, _desc) \
(struct resource) { \
.start = (_start), \
.end = (_start) + (_size) - 1, \
.name = (_name), \
.flags = (_flags), \
- .desc = IORES_DESC_NONE, \
+ .desc = (_desc), \
}
+#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
+ DEFINE_RES_NAMED_DESC(_start, _size, _name, _flags, IORES_DESC_NONE)
+#define DEFINE_RES(_start, _size, _flags) \
+ DEFINE_RES_NAMED(_start, _size, NULL, _flags)
+
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO)
#define DEFINE_RES_IO(_start, _size) \
@@ -188,6 +193,42 @@ enum {
#define DEFINE_RES_DMA(_dma) \
DEFINE_RES_DMA_NAMED((_dma), NULL)
+/**
+ * typedef resource_alignf - Resource alignment callback
+ * @data: Private data used by the callback
+ * @res: Resource candidate range (an empty resource space)
+ * @size: The minimum size of the empty space
+ * @align: Alignment from the constraints
+ *
+ * Callback allows calculating resource placement and alignment beyond min,
+ * max, and align fields in the struct resource_constraint.
+ *
+ * Return: Start address for the resource.
+ */
+typedef resource_size_t (*resource_alignf)(void *data,
+ const struct resource *res,
+ resource_size_t size,
+ resource_size_t align);
+
+/**
+ * struct resource_constraint - constraints to be met while searching empty
+ * resource space
+ * @min: The minimum address for the memory range
+ * @max: The maximum address for the memory range
+ * @align: Alignment for the start address of the empty space
+ * @alignf: Additional alignment constraints callback
+ * @alignf_data: Data provided for @alignf callback
+ *
+ * Contains the range and alignment constraints that have to be met during
+ * find_resource_space(). @alignf can be NULL indicating no alignment beyond
+ * @align is necessary.
+ */
+struct resource_constraint {
+ resource_size_t min, max, align;
+ resource_alignf alignf;
+ void *alignf_data;
+};
+
/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
extern struct resource ioport_resource;
extern struct resource iomem_resource;
@@ -207,15 +248,44 @@ extern void arch_remove_reservations(struct resource *avail);
extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min,
resource_size_t max, resource_size_t align,
- resource_size_t (*alignf)(void *,
- const struct resource *,
- resource_size_t,
- resource_size_t),
+ resource_alignf alignf,
void *alignf_data);
struct resource *lookup_resource(struct resource *root, resource_size_t start);
int adjust_resource(struct resource *res, resource_size_t start,
resource_size_t size);
resource_size_t resource_alignment(struct resource *res);
+
+/**
+ * resource_set_size - Calculate resource end address from size and start
+ * @res: Resource descriptor
+ * @size: Size of the resource
+ *
+ * Calculate the end address for @res based on @size.
+ *
+ * Note: The start address of @res must be set when calling this function.
+ * Prefer resource_set_range() if setting both the start address and @size.
+ */
+static inline void resource_set_size(struct resource *res, resource_size_t size)
+{
+ res->end = res->start + size - 1;
+}
+
+/**
+ * resource_set_range - Set resource start and end addresses
+ * @res: Resource descriptor
+ * @start: Start address for the resource
+ * @size: Size of the resource
+ *
+ * Set @res start address and calculate the end address based on @size.
+ */
+static inline void resource_set_range(struct resource *res,
+ resource_size_t start,
+ resource_size_t size)
+{
+ res->start = start;
+ resource_set_size(res, size);
+}
+
static inline resource_size_t resource_size(const struct resource *res)
{
return res->end - res->start + 1;
@@ -264,6 +334,18 @@ static inline bool resource_union(const struct resource *r1, const struct resour
return true;
}
+/*
+ * Check if this resource is added to a resource tree or detached. Caller is
+ * responsible for not racing assignment.
+ */
+static inline bool resource_assigned(struct resource *res)
+{
+ return res->parent;
+}
+
+int find_resource_space(struct resource *root, struct resource *new,
+ resource_size_t size, struct resource_constraint *constraint);
+
/* Convenience shorthand with allocation */
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0)
#define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED)
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index db1249cd9692..5210e8371238 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -40,7 +40,7 @@ static inline int task_nice_ioclass(struct task_struct *task)
{
if (task->policy == SCHED_IDLE)
return IOPRIO_CLASS_IDLE;
- else if (task_is_realtime(task))
+ else if (rt_or_dl_task_policy(task))
return IOPRIO_CLASS_RT;
else
return IOPRIO_CLASS_BE;
@@ -60,7 +60,8 @@ static inline int __get_task_ioprio(struct task_struct *p)
int prio;
if (!ioc)
- return IOPRIO_DEFAULT;
+ return IOPRIO_PRIO_VALUE(task_nice_ioclass(p),
+ task_nice_ioprio(p));
if (p != current)
lockdep_assert_held(&p->alloc_lock);
diff --git a/include/linux/ioremap.h b/include/linux/ioremap.h
index f0e99fc7dd8b..2bd1661fe9ad 100644
--- a/include/linux/ioremap.h
+++ b/include/linux/ioremap.h
@@ -4,6 +4,7 @@
#include <linux/kasan.h>
#include <asm/pgtable.h>
+#include <asm/vmalloc.h>
#if defined(CONFIG_HAS_IOMEM) || defined(CONFIG_GENERIC_IOREMAP)
/*
diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
index 4696abfd311c..3e85afe794c0 100644
--- a/include/linux/iosys-map.h
+++ b/include/linux/iosys-map.h
@@ -264,12 +264,7 @@ static inline bool iosys_map_is_set(const struct iosys_map *map)
*/
static inline void iosys_map_clear(struct iosys_map *map)
{
- if (map->is_iomem) {
- map->vaddr_iomem = NULL;
- map->is_iomem = false;
- } else {
- map->vaddr = NULL;
- }
+ memset(map, 0, sizeof(*map));
}
/**
diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h
index 270454a6703d..f9a17fbbd398 100644
--- a/include/linux/iov_iter.h
+++ b/include/linux/iov_iter.h
@@ -10,6 +10,7 @@
#include <linux/uio.h>
#include <linux/bvec.h>
+#include <linux/folio_queue.h>
typedef size_t (*iov_step_f)(void *iter_base, size_t progress, size_t len,
void *priv, void *priv2);
@@ -141,6 +142,62 @@ size_t iterate_bvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
}
/*
+ * Handle ITER_FOLIOQ.
+ */
+static __always_inline
+size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct folio_queue *folioq = iter->folioq;
+ unsigned int slot = iter->folioq_slot;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ if (slot == folioq_nr_slots(folioq)) {
+ /* The iterator may have been extended. */
+ folioq = folioq->next;
+ slot = 0;
+ }
+
+ do {
+ struct folio *folio = folioq_folio(folioq, slot);
+ size_t part, remain = 0, consumed;
+ size_t fsize;
+ void *base;
+
+ if (!folio)
+ break;
+
+ fsize = folioq_folio_size(folioq, slot);
+ if (skip < fsize) {
+ base = kmap_local_folio(folio, skip);
+ part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
+ remain = step(base, progress, part, priv, priv2);
+ kunmap_local(base);
+ consumed = part - remain;
+ len -= consumed;
+ progress += consumed;
+ skip += consumed;
+ }
+ if (skip >= fsize) {
+ skip = 0;
+ slot++;
+ if (slot == folioq_nr_slots(folioq) && folioq->next) {
+ folioq = folioq->next;
+ slot = 0;
+ }
+ }
+ if (remain)
+ break;
+ } while (len);
+
+ iter->folioq_slot = slot;
+ iter->folioq = folioq;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
* Handle ITER_XARRAY.
*/
static __always_inline
@@ -249,6 +306,8 @@ size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv,
return iterate_bvec(iter, len, priv, priv2, step);
if (iov_iter_is_kvec(iter))
return iterate_kvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_folioq(iter))
+ return iterate_folioq(iter, len, priv, priv2, step);
if (iov_iter_is_xarray(iter))
return iterate_xarray(iter, len, priv, priv2, step);
return iterate_discard(iter, len, priv, priv2, step);
@@ -271,4 +330,51 @@ size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv,
return iterate_and_advance2(iter, len, priv, NULL, ustep, step);
}
+/**
+ * iterate_and_advance_kernel - Iterate over a kernel-internal iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @priv2: More data for the step functions.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * Iterate over the next part of an iterator, up to the specified length. The
+ * buffer is presented in segments, which for kernel iteration are broken up by
+ * physical pages and mapped, with the mapped address being presented.
+ *
+ * [!] Note This will only handle BVEC, KVEC, FOLIOQ, XARRAY and DISCARD-type
+ * iterators; it will not handle UBUF or IOVEC-type iterators.
+ *
+ * A step functions, @step, must be provided, one for handling mapped kernel
+ * addresses and the other is given user addresses which have the potential to
+ * fault since no pinning is performed.
+ *
+ * The step functions are passed the address and length of the segment, @priv,
+ * @priv2 and the amount of data so far iterated over (which can, for example,
+ * be added to @priv to point to the right part of a second buffer). The step
+ * functions should return the amount of the segment they didn't process (ie. 0
+ * indicates complete processsing).
+ *
+ * This function returns the amount of data processed (ie. 0 means nothing was
+ * processed and the value of @len means processes to completion).
+ */
+static __always_inline
+size_t iterate_and_advance_kernel(struct iov_iter *iter, size_t len, void *priv,
+ void *priv2, iov_step_f step)
+{
+ if (unlikely(iter->count < len))
+ len = iter->count;
+ if (unlikely(!len))
+ return 0;
+ if (iov_iter_is_bvec(iter))
+ return iterate_bvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_kvec(iter))
+ return iterate_kvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_folioq(iter))
+ return iterate_folioq(iter, len, priv, priv2, step);
+ if (iov_iter_is_xarray(iter))
+ return iterate_xarray(iter, len, priv, priv2, step);
+ return iterate_discard(iter, len, priv, priv2, step);
+}
+
#endif /* _LINUX_IOV_ITER_H */
diff --git a/include/linux/ipack.h b/include/linux/ipack.h
index 2c6936b8371f..455f6c2a1903 100644
--- a/include/linux/ipack.h
+++ b/include/linux/ipack.h
@@ -70,15 +70,13 @@ enum ipack_space {
IPACK_SPACE_COUNT,
};
-/**
- */
struct ipack_region {
phys_addr_t start;
size_t size;
};
/**
- * struct ipack_device
+ * struct ipack_device - subsystem representation of an IPack device
*
* @slot: Slot where the device is plugged in the carrier board
* @bus: ipack_bus_device where the device is plugged to.
@@ -89,7 +87,7 @@ struct ipack_region {
*
* Warning: Direct access to mapped memory is possible but the endianness
* is not the same with PCI carrier or VME carrier. The endianness is managed
- * by the carrier board throught bus->ops.
+ * by the carrier board through bus->ops.
*/
struct ipack_device {
unsigned int slot;
@@ -124,6 +122,7 @@ struct ipack_driver_ops {
* struct ipack_driver -- Specific data to each ipack device driver
*
* @driver: Device driver kernel representation
+ * @id_table: Device ID table for this driver
* @ops: Callbacks provided by the IPack device driver
*/
struct ipack_driver {
@@ -161,7 +160,7 @@ struct ipack_bus_ops {
};
/**
- * struct ipack_bus_device
+ * struct ipack_bus_device - IPack bus representation
*
* @dev: pointer to carrier device
* @slots: number of slots available
@@ -185,6 +184,8 @@ struct ipack_bus_device {
*
* The carrier board device should call this function to register itself as
* available bus device in ipack.
+ *
+ * Return: %NULL on error or &struct ipack_bus_device on success
*/
struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots,
const struct ipack_bus_ops *ops,
@@ -192,6 +193,8 @@ struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots,
/**
* ipack_bus_unregister -- unregister an ipack bus
+ *
+ * Return: %0
*/
int ipack_bus_unregister(struct ipack_bus_device *bus);
@@ -200,6 +203,8 @@ int ipack_bus_unregister(struct ipack_bus_device *bus);
*
* Called by a ipack driver to register itself as a driver
* that can manage ipack devices.
+ *
+ * Return: zero on success or error code on failure.
*/
int ipack_driver_register(struct ipack_driver *edrv, struct module *owner,
const char *name);
@@ -215,7 +220,7 @@ void ipack_driver_unregister(struct ipack_driver *edrv);
* function. The rest of the fields will be allocated and populated
* during initalization.
*
- * Return zero on success or error code on failure.
+ * Return: zero on success or error code on failure.
*
* NOTE: _Never_ directly free @dev after calling this function, even
* if it returned an error! Always use ipack_put_device() to give up the
@@ -230,7 +235,7 @@ int ipack_device_init(struct ipack_device *dev);
* Add a new IPack device. The call is done by the carrier driver
* after calling ipack_device_init().
*
- * Return zero on success or error code on failure.
+ * Return: zero on success or error code on failure.
*
* NOTE: _Never_ directly free @dev after calling this function, even
* if it returned an error! Always use ipack_put_device() to give up the
@@ -266,9 +271,11 @@ void ipack_put_device(struct ipack_device *dev);
.device = (dev)
/**
- * ipack_get_carrier - it increase the carrier ref. counter of
+ * ipack_get_carrier - try to increase the carrier ref. counter of
* the carrier module
* @dev: mezzanine device which wants to get the carrier
+ *
+ * Return: true on success.
*/
static inline int ipack_get_carrier(struct ipack_device *dev)
{
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index e8240cf2611a..12faca29bbb9 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -129,20 +129,25 @@ static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
#endif
#if defined(CONFIG_IPC_NS)
-extern struct ipc_namespace *copy_ipcs(unsigned long flags,
+static inline struct ipc_namespace *to_ipc_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct ipc_namespace, ns);
+}
+
+extern struct ipc_namespace *copy_ipcs(u64 flags,
struct user_namespace *user_ns, struct ipc_namespace *ns);
static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
{
if (ns)
- refcount_inc(&ns->ns.count);
+ ns_ref_inc(ns);
return ns;
}
static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
{
if (ns) {
- if (refcount_inc_not_zero(&ns->ns.count))
+ if (ns_ref_get(ns))
return ns;
}
@@ -151,7 +156,7 @@ static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns
extern void put_ipc_ns(struct ipc_namespace *ns);
#else
-static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
+static inline struct ipc_namespace *copy_ipcs(u64 flags,
struct user_namespace *user_ns, struct ipc_namespace *ns)
{
if (flags & CLONE_NEWIPC)
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index a1c9c0d48ebf..7da6602eab71 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -93,7 +93,8 @@ struct ipmi_user_hndl {
/*
* Called when the interface detects a watchdog pre-timeout. If
- * this is NULL, it will be ignored for the user.
+ * this is NULL, it will be ignored for the user. Note that you
+ * can't do any IPMI calls from here, it's called with locks held.
*/
void (*ipmi_watchdog_pretimeout)(void *handler_data);
@@ -126,7 +127,7 @@ int ipmi_create_user(unsigned int if_num,
* the users before you destroy the callback structures, it should be
* safe, too.
*/
-int ipmi_destroy_user(struct ipmi_user *user);
+void ipmi_destroy_user(struct ipmi_user *user);
/* Get the IPMI version of the BMC we are talking to. */
int ipmi_get_version(struct ipmi_user *user,
@@ -343,4 +344,14 @@ extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data);
/* Helper function for computing the IPMB checksum of some data. */
unsigned char ipmb_checksum(unsigned char *data, int size);
+/*
+ * For things that must send messages at panic time, like the IPMI watchdog
+ * driver that extends the reset time on a panic, use this to send messages
+ * from panic context. Note that this puts the driver into a mode that
+ * only works at panic time, so only use it then.
+ */
+void ipmi_panic_request_and_wait(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg);
+
#endif /* __LINUX_IPMI_H */
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 5d69820d8b02..892e2d656e1e 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -109,8 +109,9 @@ struct ipmi_smi_msg {
enum ipmi_smi_msg_type type;
- long msgid;
- void *user_data;
+ long msgid;
+ /* Response to this message, will be NULL if not from a user request. */
+ struct ipmi_recv_msg *recv_msg;
int data_size;
unsigned char data[IPMI_MAX_MSG_LENGTH];
@@ -168,9 +169,11 @@ struct ipmi_smi_handlers {
* are held when this is run. Message are delivered one at
* a time by the message handler, a new message will not be
* delivered until the previous message is returned.
+ *
+ * This can return an error if the SMI is not in a state where it
+ * can send a message.
*/
- void (*sender)(void *send_info,
- struct ipmi_smi_msg *msg);
+ int (*sender)(void *send_info, struct ipmi_smi_msg *msg);
/*
* Called by the upper layer to request that we try to get
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 383a0ea2ab91..7294e4e89b79 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -17,6 +17,7 @@ struct ipv6_devconf {
__s32 hop_limit;
__s32 mtu6;
__s32 forwarding;
+ __s32 force_forwarding;
__s32 disable_policy;
__s32 proxy_ndp;
__cacheline_group_end(ipv6_devconf_read_txrx);
@@ -89,6 +90,7 @@ struct ipv6_devconf {
__u8 ioam6_enabled;
__u8 ndisc_evict_nocarrier;
__u8 ra_honor_pio_life;
+ __u8 ra_honor_pio_pflag;
struct ctl_table_header *sysctl_header;
};
@@ -155,6 +157,7 @@ struct inet6_skb_parm {
#define IP6SKB_SEG6 256
#define IP6SKB_FAKEJUMBO 512
#define IP6SKB_MULTIPATH 1024
+#define IP6SKB_MCROUTE 2048
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
@@ -206,22 +209,26 @@ struct inet6_cork {
struct ipv6_txoptions *opt;
u8 hop_limit;
u8 tclass;
+ u8 dontfrag:1;
};
/* struct ipv6_pinfo - ipv6 private area */
struct ipv6_pinfo {
+ /* Used in tx path (inet6_csk_route_socket(), ip6_xmit()) */
struct in6_addr saddr;
- struct in6_pktinfo sticky_pktinfo;
- const struct in6_addr *daddr_cache;
+ __be32 flow_label;
+ u32 dst_cookie;
+ struct ipv6_txoptions __rcu *opt;
+ s16 hop_limit;
+ u8 pmtudisc;
+ u8 tclass;
#ifdef CONFIG_IPV6_SUBTREES
- const struct in6_addr *saddr_cache;
+ bool saddr_cache;
#endif
+ bool daddr_cache;
- __be32 flow_label;
- __u32 frag_size;
-
- s16 hop_limit;
u8 mcast_hops;
+ u32 frag_size;
int ucast_oif;
int mcast_oif;
@@ -229,7 +236,7 @@ struct ipv6_pinfo {
/* pktoption flags */
union {
struct {
- __u16 srcrt:1,
+ u16 srcrt:1,
osrcrt:1,
rxinfo:1,
rxoinfo:1,
@@ -246,29 +253,24 @@ struct ipv6_pinfo {
recvfragsize:1;
/* 1 bits hole */
} bits;
- __u16 all;
+ u16 all;
} rxopt;
/* sockopt flags */
- __u8 srcprefs; /* 001: prefer temporary address
+ u8 srcprefs; /* 001: prefer temporary address
* 010: prefer public address
* 100: prefer care-of address
*/
- __u8 pmtudisc;
- __u8 min_hopcount;
- __u8 tclass;
+ u8 min_hopcount;
__be32 rcv_flowinfo;
+ struct in6_pktinfo sticky_pktinfo;
- __u32 dst_cookie;
-
- struct ipv6_mc_socklist __rcu *ipv6_mc_list;
- struct ipv6_ac_socklist *ipv6_ac_list;
- struct ipv6_fl_socklist __rcu *ipv6_fl_list;
-
- struct ipv6_txoptions __rcu *opt;
struct sk_buff *pktoptions;
struct sk_buff *rxpmtu;
struct inet6_cork cork;
+
+ struct ipv6_mc_socklist __rcu *ipv6_mc_list;
+ struct ipv6_ac_socklist *ipv6_ac_list;
};
/* We currently use available bits from inet_sk(sk)->inet_flags,
@@ -291,7 +293,7 @@ struct raw6_sock {
__u32 offset; /* checksum offset */
struct icmp6_filter filter;
__u32 ip6mr_table;
-
+ struct numa_drop_counters drop_counters;
struct ipv6_pinfo inet6;
};
diff --git a/include/linux/irq-entry-common.h b/include/linux/irq-entry-common.h
new file mode 100644
index 000000000000..6ab913e57da0
--- /dev/null
+++ b/include/linux/irq-entry-common.h
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_IRQENTRYCOMMON_H
+#define __LINUX_IRQENTRYCOMMON_H
+
+#include <linux/context_tracking.h>
+#include <linux/kmsan.h>
+#include <linux/rseq_entry.h>
+#include <linux/static_call_types.h>
+#include <linux/syscalls.h>
+#include <linux/tick.h>
+#include <linux/unwind_deferred.h>
+
+#include <asm/entry-common.h>
+
+/*
+ * Define dummy _TIF work flags if not defined by the architecture or for
+ * disabled functionality.
+ */
+#ifndef _TIF_PATCH_PENDING
+# define _TIF_PATCH_PENDING (0)
+#endif
+
+/*
+ * TIF flags handled in exit_to_user_mode_loop()
+ */
+#ifndef ARCH_EXIT_TO_USER_MODE_WORK
+# define ARCH_EXIT_TO_USER_MODE_WORK (0)
+#endif
+
+#define EXIT_TO_USER_MODE_WORK \
+ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
+ _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | _TIF_RSEQ | \
+ ARCH_EXIT_TO_USER_MODE_WORK)
+
+/**
+ * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
+ * @regs: Pointer to currents pt_regs
+ *
+ * Defaults to an empty implementation. Can be replaced by architecture
+ * specific code.
+ *
+ * Invoked from syscall_enter_from_user_mode() in the non-instrumentable
+ * section. Use __always_inline so the compiler cannot push it out of line
+ * and make it instrumentable.
+ */
+static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs);
+
+#ifndef arch_enter_from_user_mode
+static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
+#endif
+
+/**
+ * arch_in_rcu_eqs - Architecture specific check for RCU extended quiescent
+ * states.
+ *
+ * Returns: true if the CPU is potentially in an RCU EQS, false otherwise.
+ *
+ * Architectures only need to define this if threads other than the idle thread
+ * may have an interruptible EQS. This does not need to handle idle threads. It
+ * is safe to over-estimate at the cost of redundant RCU management work.
+ *
+ * Invoked from irqentry_enter()
+ */
+#ifndef arch_in_rcu_eqs
+static __always_inline bool arch_in_rcu_eqs(void) { return false; }
+#endif
+
+/**
+ * enter_from_user_mode - Establish state when coming from user mode
+ * @regs: Pointer to currents pt_regs
+ *
+ * Syscall/interrupt entry disables interrupts, but user mode is traced as
+ * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct and interrupts are still
+ * disabled. The subsequent functions can be instrumented.
+ *
+ * This is invoked when there is architecture specific functionality to be
+ * done between establishing state and enabling interrupts. The caller must
+ * enable interrupts before invoking syscall_enter_from_user_mode_work().
+ */
+static __always_inline void enter_from_user_mode(struct pt_regs *regs)
+{
+ arch_enter_from_user_mode(regs);
+ lockdep_hardirqs_off(CALLER_ADDR0);
+
+ CT_WARN_ON(__ct_state() != CT_STATE_USER);
+ user_exit_irqoff();
+
+ instrumentation_begin();
+ kmsan_unpoison_entry_regs(regs);
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
+
+/**
+ * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable()
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Defaults to local_irq_enable(). Can be supplied by architecture specific
+ * code.
+ */
+static inline void local_irq_enable_exit_to_user(unsigned long ti_work);
+
+#ifndef local_irq_enable_exit_to_user
+static inline void local_irq_enable_exit_to_user(unsigned long ti_work)
+{
+ local_irq_enable();
+}
+#endif
+
+/**
+ * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable()
+ *
+ * Defaults to local_irq_disable(). Can be supplied by architecture specific
+ * code.
+ */
+static inline void local_irq_disable_exit_to_user(void);
+
+#ifndef local_irq_disable_exit_to_user
+static inline void local_irq_disable_exit_to_user(void)
+{
+ local_irq_disable();
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode_work - Architecture specific TIF work for exit
+ * to user mode.
+ * @regs: Pointer to currents pt_regs
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Invoked from exit_to_user_mode_loop() with interrupt enabled
+ *
+ * Defaults to NOOP. Can be supplied by architecture specific code.
+ */
+static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
+ unsigned long ti_work);
+
+#ifndef arch_exit_to_user_mode_work
+static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode_prepare - Architecture specific preparation for
+ * exit to user mode.
+ * @regs: Pointer to currents pt_regs
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last
+ * function before return. Defaults to NOOP.
+ */
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work);
+
+#ifndef arch_exit_to_user_mode_prepare
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode - Architecture specific final work before
+ * exit to user mode.
+ *
+ * Invoked from exit_to_user_mode() with interrupt disabled as the last
+ * function before return. Defaults to NOOP.
+ *
+ * This needs to be __always_inline because it is non-instrumentable code
+ * invoked after context tracking switched to user mode.
+ *
+ * An architecture implementation must not do anything complex, no locking
+ * etc. The main purpose is for speculation mitigations.
+ */
+static __always_inline void arch_exit_to_user_mode(void);
+
+#ifndef arch_exit_to_user_mode
+static __always_inline void arch_exit_to_user_mode(void) { }
+#endif
+
+/**
+ * arch_do_signal_or_restart - Architecture specific signal delivery function
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked from exit_to_user_mode_loop().
+ */
+void arch_do_signal_or_restart(struct pt_regs *regs);
+
+/* Handle pending TIF work */
+unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work);
+
+/**
+ * __exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs: Pointer to pt_regs on entry stack
+ *
+ * 1) check that interrupts are disabled
+ * 2) call tick_nohz_user_enter_prepare()
+ * 3) call exit_to_user_mode_loop() if any flags from
+ * EXIT_TO_USER_MODE_WORK are set
+ * 4) check that interrupts are still disabled
+ *
+ * Don't invoke directly, use the syscall/irqentry_ prefixed variants below
+ */
+static __always_inline void __exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+ unsigned long ti_work;
+
+ lockdep_assert_irqs_disabled();
+
+ /* Flush pending rcuog wakeup before the last need_resched() check */
+ tick_nohz_user_enter_prepare();
+
+ ti_work = read_thread_flags();
+ if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
+ ti_work = exit_to_user_mode_loop(regs, ti_work);
+
+ arch_exit_to_user_mode_prepare(regs, ti_work);
+}
+
+static __always_inline void __exit_to_user_mode_validate(void)
+{
+ /* Ensure that kernel state is sane for a return to userspace */
+ kmap_assert_nomap();
+ lockdep_assert_irqs_disabled();
+ lockdep_sys_exit();
+}
+
+/* Temporary workaround to keep ARM64 alive */
+static __always_inline void exit_to_user_mode_prepare_legacy(struct pt_regs *regs)
+{
+ __exit_to_user_mode_prepare(regs);
+ rseq_exit_to_user_mode_legacy();
+ __exit_to_user_mode_validate();
+}
+
+/**
+ * syscall_exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs: Pointer to pt_regs on entry stack
+ *
+ * Wrapper around __exit_to_user_mode_prepare() to separate the exit work for
+ * syscalls and interrupts.
+ */
+static __always_inline void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+ __exit_to_user_mode_prepare(regs);
+ rseq_syscall_exit_to_user_mode();
+ __exit_to_user_mode_validate();
+}
+
+/**
+ * irqentry_exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs: Pointer to pt_regs on entry stack
+ *
+ * Wrapper around __exit_to_user_mode_prepare() to separate the exit work for
+ * syscalls and interrupts.
+ */
+static __always_inline void irqentry_exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+ __exit_to_user_mode_prepare(regs);
+ rseq_irqentry_exit_to_user_mode();
+ __exit_to_user_mode_validate();
+}
+
+/**
+ * exit_to_user_mode - Fixup state when exiting to user mode
+ *
+ * Syscall/interrupt exit enables interrupts, but the kernel state is
+ * interrupts disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Invoke architecture specific last minute exit code, e.g. speculation
+ * mitigations, etc.: arch_exit_to_user_mode()
+ * 4) Tell lockdep that interrupts are enabled
+ *
+ * Invoked from architecture specific code when syscall_exit_to_user_mode()
+ * is not suitable as the last step before returning to userspace. Must be
+ * invoked with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke syscall_exit_to_user_mode_work() before this.
+ */
+static __always_inline void exit_to_user_mode(void)
+{
+ instrumentation_begin();
+ unwind_reset_info();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+
+ user_enter_irqoff();
+ arch_exit_to_user_mode();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+/**
+ * irqentry_enter_from_user_mode - Establish state before invoking the irq handler
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked from architecture specific entry code with interrupts disabled.
+ * Can only be called when the interrupt entry came from user mode. The
+ * calling code must be non-instrumentable. When the function returns all
+ * state is correct and the subsequent functions can be instrumented.
+ *
+ * The function establishes state (lockdep, RCU (context tracking), tracing)
+ */
+static __always_inline void irqentry_enter_from_user_mode(struct pt_regs *regs)
+{
+ enter_from_user_mode(regs);
+ rseq_note_user_irq_entry();
+}
+
+/**
+ * irqentry_exit_to_user_mode - Interrupt exit work
+ * @regs: Pointer to current's pt_regs
+ *
+ * Invoked with interrupts disabled and fully valid regs. Returns with all
+ * work handled, interrupts disabled such that the caller can immediately
+ * switch to user mode. Called from architecture specific interrupt
+ * handling code.
+ *
+ * The call order is #2 and #3 as described in syscall_exit_to_user_mode().
+ * Interrupt exit is not invoking #1 which is the syscall specific one time
+ * work.
+ */
+static __always_inline void irqentry_exit_to_user_mode(struct pt_regs *regs)
+{
+ instrumentation_begin();
+ irqentry_exit_to_user_mode_prepare(regs);
+ instrumentation_end();
+ exit_to_user_mode();
+}
+
+#ifndef irqentry_state
+/**
+ * struct irqentry_state - Opaque object for exception state storage
+ * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the
+ * exit path has to invoke ct_irq_exit().
+ * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that
+ * lockdep state is restored correctly on exit from nmi.
+ *
+ * This opaque object is filled in by the irqentry_*_enter() functions and
+ * must be passed back into the corresponding irqentry_*_exit() functions
+ * when the exception is complete.
+ *
+ * Callers of irqentry_*_[enter|exit]() must consider this structure opaque
+ * and all members private. Descriptions of the members are provided to aid in
+ * the maintenance of the irqentry_*() functions.
+ */
+typedef struct irqentry_state {
+ union {
+ bool exit_rcu;
+ bool lockdep;
+ };
+} irqentry_state_t;
+#endif
+
+/**
+ * irqentry_enter - Handle state tracking on ordinary interrupt entries
+ * @regs: Pointer to pt_regs of interrupted context
+ *
+ * Invokes:
+ * - lockdep irqflag state tracking as low level ASM entry disabled
+ * interrupts.
+ *
+ * - Context tracking if the exception hit user mode.
+ *
+ * - The hardirq tracer to keep the state consistent as low level ASM
+ * entry disabled interrupts.
+ *
+ * As a precondition, this requires that the entry came from user mode,
+ * idle, or a kernel context in which RCU is watching.
+ *
+ * For kernel mode entries RCU handling is done conditional. If RCU is
+ * watching then the only RCU requirement is to check whether the tick has
+ * to be restarted. If RCU is not watching then ct_irq_enter() has to be
+ * invoked on entry and ct_irq_exit() on exit.
+ *
+ * Avoiding the ct_irq_enter/exit() calls is an optimization but also
+ * solves the problem of kernel mode pagefaults which can schedule, which
+ * is not possible after invoking ct_irq_enter() without undoing it.
+ *
+ * For user mode entries irqentry_enter_from_user_mode() is invoked to
+ * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
+ * would not be possible.
+ *
+ * Returns: An opaque object that must be passed to idtentry_exit()
+ */
+irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
+
+/**
+ * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt
+ *
+ * Conditional reschedule with additional sanity checks.
+ */
+void raw_irqentry_exit_cond_resched(void);
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
+#define irqentry_exit_cond_resched_dynamic_disabled NULL
+DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
+#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+void dynamic_irqentry_exit_cond_resched(void);
+#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
+#endif
+#else /* CONFIG_PREEMPT_DYNAMIC */
+#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
+#endif /* CONFIG_PREEMPT_DYNAMIC */
+
+/**
+ * irqentry_exit - Handle return from exception that used irqentry_enter()
+ * @regs: Pointer to pt_regs (exception entry regs)
+ * @state: Return value from matching call to irqentry_enter()
+ *
+ * Depending on the return target (kernel/user) this runs the necessary
+ * preemption and work checks if possible and required and returns to
+ * the caller with interrupts disabled and no further work pending.
+ *
+ * This is the last action before returning to the low level ASM code which
+ * just needs to return to the appropriate context.
+ *
+ * Counterpart to irqentry_enter().
+ */
+void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state);
+
+/**
+ * irqentry_nmi_enter - Handle NMI entry
+ * @regs: Pointer to currents pt_regs
+ *
+ * Similar to irqentry_enter() but taking care of the NMI constraints.
+ */
+irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs);
+
+/**
+ * irqentry_nmi_exit - Handle return from NMI handling
+ * @regs: Pointer to pt_regs (NMI entry regs)
+ * @irq_state: Return value from matching call to irqentry_nmi_enter()
+ *
+ * Last action before returning to the low level assembly code.
+ *
+ * Counterpart to irqentry_nmi_enter().
+ */
+void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state);
+
+#endif
diff --git a/include/linux/irq.h b/include/linux/irq.h
index a217e1029c1d..4a9f1d7b08c3 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -64,7 +64,6 @@ enum irqchip_irq_state;
* IRQ_NOAUTOEN - Interrupt is not automatically enabled in
* request/setup_irq()
* IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
- * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
* IRQ_NESTED_THREAD - Interrupt nests into another thread
* IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
* IRQ_IS_POLLED - Always polled by another interrupt. Exclude
@@ -93,7 +92,6 @@ enum {
IRQ_NOREQUEST = (1 << 11),
IRQ_NOAUTOEN = (1 << 12),
IRQ_NO_BALANCING = (1 << 13),
- IRQ_MOVE_PCNTXT = (1 << 14),
IRQ_NESTED_THREAD = (1 << 15),
IRQ_NOTHREAD = (1 << 16),
IRQ_PER_CPU_DEVID = (1 << 17),
@@ -105,7 +103,7 @@ enum {
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
- IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
+ IRQ_NOAUTOEN | IRQ_LEVEL | IRQ_NO_BALANCING | \
IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN)
@@ -201,8 +199,6 @@ struct irq_data {
* IRQD_LEVEL - Interrupt is level triggered
* IRQD_WAKEUP_STATE - Interrupt is configured for wakeup
* from suspend
- * IRQD_MOVE_PCNTXT - Interrupt can be moved in process
- * context
* IRQD_IRQ_DISABLED - Disabled state of the interrupt
* IRQD_IRQ_MASKED - Masked state of the interrupt
* IRQD_IRQ_INPROGRESS - In progress state of the interrupt
@@ -233,7 +229,6 @@ enum {
IRQD_AFFINITY_SET = BIT(12),
IRQD_LEVEL = BIT(13),
IRQD_WAKEUP_STATE = BIT(14),
- IRQD_MOVE_PCNTXT = BIT(15),
IRQD_IRQ_DISABLED = BIT(16),
IRQD_IRQ_MASKED = BIT(17),
IRQD_IRQ_INPROGRESS = BIT(18),
@@ -338,11 +333,6 @@ static inline bool irqd_is_wakeup_set(struct irq_data *d)
return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
}
-static inline bool irqd_can_move_in_process_context(struct irq_data *d)
-{
- return __irqd_to_state(d) & IRQD_MOVE_PCNTXT;
-}
-
static inline bool irqd_irq_disabled(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_IRQ_DISABLED;
@@ -496,6 +486,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @ipi_send_mask: send an IPI to destination cpus in cpumask
* @irq_nmi_setup: function called from core code before enabling an NMI
* @irq_nmi_teardown: function called from core code after disabling an NMI
+ * @irq_force_complete_move: optional function to force complete pending irq move
* @flags: chip specific flags
*/
struct irq_chip {
@@ -547,6 +538,8 @@ struct irq_chip {
int (*irq_nmi_setup)(struct irq_data *data);
void (*irq_nmi_teardown)(struct irq_data *data);
+ void (*irq_force_complete_move)(struct irq_data *data);
+
unsigned long flags;
};
@@ -567,6 +560,7 @@ struct irq_chip {
* in the suspend path if they are in disabled state
* IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup
* IRQCHIP_IMMUTABLE: Don't ever change anything in this chip
+ * IRQCHIP_MOVE_DEFERRED: Move the interrupt in actual interrupt context
*/
enum {
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
@@ -581,6 +575,7 @@ enum {
IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9),
IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10),
IRQCHIP_IMMUTABLE = (1 << 11),
+ IRQCHIP_MOVE_DEFERRED = (1 << 12),
};
#include <linux/irqdesc.h>
@@ -602,7 +597,6 @@ enum {
struct irqaction;
extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
-extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
extern void irq_cpu_online(void);
@@ -620,6 +614,7 @@ extern int irq_affinity_online_cpu(unsigned int cpu);
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
+bool irq_can_move_in_process_context(struct irq_data *data);
void __irq_move_irq(struct irq_data *data);
static inline void irq_move_irq(struct irq_data *data)
{
@@ -627,11 +622,10 @@ static inline void irq_move_irq(struct irq_data *data)
__irq_move_irq(data);
}
void irq_move_masked_irq(struct irq_data *data);
-void irq_force_complete_move(struct irq_desc *desc);
#else
+static inline bool irq_can_move_in_process_context(struct irq_data *data) { return true; }
static inline void irq_move_irq(struct irq_data *data) { }
static inline void irq_move_masked_irq(struct irq_data *data) { }
-static inline void irq_force_complete_move(struct irq_desc *desc) { }
#endif
extern int no_irq_affinity;
@@ -661,7 +655,6 @@ extern void handle_bad_irq(struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
extern void handle_fasteoi_nmi(struct irq_desc *desc);
-extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data);
@@ -675,6 +668,8 @@ extern int irq_chip_set_parent_state(struct irq_data *data,
extern int irq_chip_get_parent_state(struct irq_data *data,
enum irqchip_irq_state which,
bool *state);
+extern void irq_chip_shutdown_parent(struct irq_data *data);
+extern unsigned int irq_chip_startup_parent(struct irq_data *data);
extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);
@@ -694,6 +689,9 @@ extern int irq_chip_request_resources_parent(struct irq_data *data);
extern void irq_chip_release_resources_parent(struct irq_data *data);
#endif
+/* Disable or mask interrupts during a kernel kexec */
+extern void machine_kexec_mask_interrupts(void);
+
/* Handling of unhandled and spurious interrupts: */
extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
@@ -702,7 +700,7 @@ extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
extern int noirqdebug_setup(char *str);
/* Checks whether the interrupt can be requested by request_irq(): */
-extern int can_request_irq(unsigned int irq, unsigned long irqflags);
+extern bool can_request_irq(unsigned int irq, unsigned long irqflags);
/* Dummy irq-chip implementations: */
extern struct irq_chip no_irq_chip;
@@ -720,10 +718,6 @@ static inline void irq_set_chip_and_handler(unsigned int irq,
}
extern int irq_set_percpu_devid(unsigned int irq);
-extern int irq_set_percpu_devid_partition(unsigned int irq,
- const struct cpumask *affinity);
-extern int irq_get_percpu_devid_partition(unsigned int irq,
- struct cpumask *affinity);
extern void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
@@ -979,10 +973,6 @@ static inline void irq_free_desc(unsigned int irq)
irq_free_descs(irq, 1);
}
-#ifdef CONFIG_GENERIC_IRQ_LEGACY
-void irq_init_desc(unsigned int irq);
-#endif
-
/**
* struct irq_chip_regs - register offsets for struct irq_gci
* @enable: Enable register offset to reg_base
@@ -991,7 +981,6 @@ void irq_init_desc(unsigned int irq);
* @ack: Ack register offset to reg_base
* @eoi: Eoi register offset to reg_base
* @type: Type configuration register offset to reg_base
- * @polarity: Polarity configuration register offset to reg_base
*/
struct irq_chip_regs {
unsigned long enable;
@@ -1000,7 +989,6 @@ struct irq_chip_regs {
unsigned long ack;
unsigned long eoi;
unsigned long type;
- unsigned long polarity;
};
/**
@@ -1040,8 +1028,6 @@ struct irq_chip_type {
* @irq_base: Interrupt base nr for this chip
* @irq_cnt: Number of interrupts handled by this chip
* @mask_cache: Cached mask register shared between all chip types
- * @type_cache: Cached type register
- * @polarity_cache: Cached polarity register
* @wake_enabled: Interrupt can wakeup from suspend
* @wake_active: Interrupt is marked as an wakeup from suspend source
* @num_ct: Number of available irq_chip_type instances (usually 1)
@@ -1068,8 +1054,6 @@ struct irq_chip_generic {
unsigned int irq_base;
unsigned int irq_cnt;
u32 mask_cache;
- u32 type_cache;
- u32 polarity_cache;
u32 wake_enabled;
u32 wake_active;
unsigned int num_ct;
@@ -1106,6 +1090,7 @@ enum irq_gc_flags {
* @irq_flags_to_set: IRQ* flags to set on irq setup
* @irq_flags_to_clear: IRQ* flags to clear on irq setup
* @gc_flags: Generic chip specific setup flags
+ * @exit: Function called on each chip when they are destroyed.
* @gc: Array of pointers to generic interrupt chips
*/
struct irq_domain_chip_generic {
@@ -1114,9 +1099,37 @@ struct irq_domain_chip_generic {
unsigned int irq_flags_to_clear;
unsigned int irq_flags_to_set;
enum irq_gc_flags gc_flags;
+ void (*exit)(struct irq_chip_generic *gc);
struct irq_chip_generic *gc[];
};
+/**
+ * struct irq_domain_chip_generic_info - Generic chip information structure
+ * @name: Name of the generic interrupt chip
+ * @handler: Interrupt handler used by the generic interrupt chip
+ * @irqs_per_chip: Number of interrupts each chip handles (max 32)
+ * @num_ct: Number of irq_chip_type instances associated with each
+ * chip
+ * @irq_flags_to_clear: IRQ_* bits to clear in the mapping function
+ * @irq_flags_to_set: IRQ_* bits to set in the mapping function
+ * @gc_flags: Generic chip specific setup flags
+ * @init: Function called on each chip when they are created.
+ * Allow to do some additional chip initialisation.
+ * @exit: Function called on each chip when they are destroyed.
+ * Allow to do some chip cleanup operation.
+ */
+struct irq_domain_chip_generic_info {
+ const char *name;
+ irq_flow_handler_t handler;
+ unsigned int irqs_per_chip;
+ unsigned int num_ct;
+ unsigned int irq_flags_to_clear;
+ unsigned int irq_flags_to_set;
+ enum irq_gc_flags gc_flags;
+ int (*init)(struct irq_chip_generic *gc);
+ void (*exit)(struct irq_chip_generic *gc);
+};
+
/* Generic chip callback functions */
void irq_gc_noop(struct irq_data *d);
void irq_gc_mask_disable_reg(struct irq_data *d);
@@ -1153,6 +1166,20 @@ int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc,
struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
+#ifdef CONFIG_GENERIC_IRQ_CHIP
+int irq_domain_alloc_generic_chips(struct irq_domain *d,
+ const struct irq_domain_chip_generic_info *info);
+void irq_domain_remove_generic_chips(struct irq_domain *d);
+#else
+static inline int
+irq_domain_alloc_generic_chips(struct irq_domain *d,
+ const struct irq_domain_chip_generic_info *info)
+{
+ return -EINVAL;
+}
+static inline void irq_domain_remove_generic_chips(struct irq_domain *d) { }
+#endif /* CONFIG_GENERIC_IRQ_CHIP */
+
int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
int num_ct, const char *name,
irq_flow_handler_t handler,
@@ -1187,31 +1214,6 @@ static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
-#ifdef CONFIG_SMP
-static inline void irq_gc_lock(struct irq_chip_generic *gc)
-{
- raw_spin_lock(&gc->lock);
-}
-
-static inline void irq_gc_unlock(struct irq_chip_generic *gc)
-{
- raw_spin_unlock(&gc->lock);
-}
-#else
-static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
-static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
-#endif
-
-/*
- * The irqsave variants are for usage in non interrupt code. Do not use
- * them in irq_chip callbacks. Use irq_gc_lock() instead.
- */
-#define irq_gc_lock_irqsave(gc, flags) \
- raw_spin_lock_irqsave(&(gc)->lock, flags)
-
-#define irq_gc_unlock_irqrestore(gc, flags) \
- raw_spin_unlock_irqrestore(&(gc)->lock, flags)
-
static inline void irq_reg_writel(struct irq_chip_generic *gc,
u32 val, int reg_offset)
{
diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h
index ab831e5ae748..89b4d8ff274b 100644
--- a/include/linux/irq_sim.h
+++ b/include/linux/irq_sim.h
@@ -16,11 +16,28 @@
* requested like normal irqs and enqueued from process context.
*/
+struct irq_sim_ops {
+ int (*irq_sim_irq_requested)(struct irq_domain *domain,
+ irq_hw_number_t hwirq, void *data);
+ void (*irq_sim_irq_released)(struct irq_domain *domain,
+ irq_hw_number_t hwirq, void *data);
+};
+
struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode,
unsigned int num_irqs);
struct irq_domain *devm_irq_domain_create_sim(struct device *dev,
struct fwnode_handle *fwnode,
unsigned int num_irqs);
+struct irq_domain *irq_domain_create_sim_full(struct fwnode_handle *fwnode,
+ unsigned int num_irqs,
+ const struct irq_sim_ops *ops,
+ void *data);
+struct irq_domain *
+devm_irq_domain_create_sim_full(struct device *dev,
+ struct fwnode_handle *fwnode,
+ unsigned int num_irqs,
+ const struct irq_sim_ops *ops,
+ void *data);
void irq_domain_remove_sim(struct irq_domain *domain);
#endif /* _LINUX_IRQ_SIM_H */
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 136f2980cba3..c5afd053ae32 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -2,8 +2,9 @@
#ifndef _LINUX_IRQ_WORK_H
#define _LINUX_IRQ_WORK_H
-#include <linux/smp_types.h>
+#include <linux/irq_work_types.h>
#include <linux/rcuwait.h>
+#include <linux/smp_types.h>
/*
* An entry can be in one of four states:
@@ -14,12 +15,6 @@
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
-struct irq_work {
- struct __call_single_node node;
- void (*func)(struct irq_work *);
- struct rcuwait irqwait;
-};
-
#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \
.node = { .u_flags = (_flags), }, \
.func = (_func), \
diff --git a/include/linux/irq_work_types.h b/include/linux/irq_work_types.h
new file mode 100644
index 000000000000..73abec5bb06e
--- /dev/null
+++ b/include/linux/irq_work_types.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IRQ_WORK_TYPES_H
+#define _LINUX_IRQ_WORK_TYPES_H
+
+#include <linux/smp_types.h>
+#include <linux/types.h>
+
+struct irq_work {
+ struct __call_single_node node;
+ void (*func)(struct irq_work *);
+ struct rcuwait irqwait;
+};
+
+#endif
diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h
index 9bdb2a781841..ede1fa938152 100644
--- a/include/linux/irqbypass.h
+++ b/include/linux/irqbypass.h
@@ -10,6 +10,7 @@
#include <linux/list.h>
+struct eventfd_ctx;
struct irq_bypass_consumer;
/*
@@ -18,20 +19,22 @@ struct irq_bypass_consumer;
* The IRQ bypass manager is a simple set of lists and callbacks that allows
* IRQ producers (ex. physical interrupt sources) to be matched to IRQ
* consumers (ex. virtualization hardware that allows IRQ bypass or offload)
- * via a shared token (ex. eventfd_ctx). Producers and consumers register
- * independently. When a token match is found, the optional @stop callback
- * will be called for each participant. The pair will then be connected via
- * the @add_* callbacks, and finally the optional @start callback will allow
- * any final coordination. When either participant is unregistered, the
- * process is repeated using the @del_* callbacks in place of the @add_*
- * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings
- * are not supported.
+ * via a shared eventfd_ctx. Producers and consumers register independently.
+ * When a producer and consumer are paired, i.e. an eventfd match is found, the
+ * optional @stop callback will be called for each participant. The pair will
+ * then be connected via the @add_* callbacks, and finally the optional @start
+ * callback will allow any final coordination. When either participant is
+ * unregistered, the process is repeated using the @del_* callbacks in place of
+ * the @add_* callbacks. eventfds must be unique per producer/consumer, 1:N
+ * pairings are not supported.
*/
+struct irq_bypass_consumer;
+
/**
* struct irq_bypass_producer - IRQ bypass producer definition
- * @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer (non-NULL)
+ * @eventfd: eventfd context used to match producers and consumers
+ * @consumer: The connected consumer (NULL if no connection)
* @irq: Linux IRQ number for the producer device
* @add_consumer: Connect the IRQ producer to an IRQ consumer (optional)
* @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional)
@@ -43,8 +46,8 @@ struct irq_bypass_consumer;
* for a physical device assigned to a VM.
*/
struct irq_bypass_producer {
- struct list_head node;
- void *token;
+ struct eventfd_ctx *eventfd;
+ struct irq_bypass_consumer *consumer;
int irq;
int (*add_consumer)(struct irq_bypass_producer *,
struct irq_bypass_consumer *);
@@ -56,8 +59,8 @@ struct irq_bypass_producer {
/**
* struct irq_bypass_consumer - IRQ bypass consumer definition
- * @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer (non-NULL)
+ * @eventfd: eventfd context used to match producers and consumers
+ * @producer: The connected producer (NULL if no connection)
* @add_producer: Connect the IRQ consumer to an IRQ producer
* @del_producer: Disconnect the IRQ consumer from an IRQ producer
* @stop: Perform any quiesce operations necessary prior to add/del (optional)
@@ -69,8 +72,9 @@ struct irq_bypass_producer {
* portions of the interrupt handling to the VM.
*/
struct irq_bypass_consumer {
- struct list_head node;
- void *token;
+ struct eventfd_ctx *eventfd;
+ struct irq_bypass_producer *producer;
+
int (*add_producer)(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
void (*del_producer)(struct irq_bypass_consumer *,
@@ -79,9 +83,11 @@ struct irq_bypass_consumer {
void (*start)(struct irq_bypass_consumer *);
};
-int irq_bypass_register_producer(struct irq_bypass_producer *);
-void irq_bypass_unregister_producer(struct irq_bypass_producer *);
-int irq_bypass_register_consumer(struct irq_bypass_consumer *);
-void irq_bypass_unregister_consumer(struct irq_bypass_consumer *);
+int irq_bypass_register_producer(struct irq_bypass_producer *producer,
+ struct eventfd_ctx *eventfd, int irq);
+void irq_bypass_unregister_producer(struct irq_bypass_producer *producer);
+int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer,
+ struct eventfd_ctx *eventfd);
+void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer);
#endif /* IRQBYPASS_H */
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h
index d5e6024cb2a8..bc4ddacd6ddc 100644
--- a/include/linux/irqchip.h
+++ b/include/linux/irqchip.h
@@ -17,12 +17,18 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+typedef int (*platform_irq_probe_t)(struct platform_device *, struct device_node *);
+
/* Undefined on purpose */
extern of_irq_init_cb_t typecheck_irq_init_cb;
+extern platform_irq_probe_t typecheck_irq_probe;
#define typecheck_irq_init_cb(fn) \
(__typecheck(typecheck_irq_init_cb, &fn) ? fn : fn)
+#define typecheck_irq_probe(fn) \
+ (__typecheck(typecheck_irq_probe, &fn) ? fn : fn)
+
/*
* This macro must be used by the different irqchip drivers to declare
* the association between their DT compatible string and their
@@ -42,7 +48,7 @@ extern int platform_irqchip_probe(struct platform_device *pdev);
static const struct of_device_id drv_name##_irqchip_match_table[] = {
#define IRQCHIP_MATCH(compat, fn) { .compatible = compat, \
- .data = typecheck_irq_init_cb(fn), },
+ .data = typecheck_irq_probe(fn), },
#define IRQCHIP_PLATFORM_DRIVER_END(drv_name, ...) \
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
index 1177f3a1aed5..fc0246cc05ac 100644
--- a/include/linux/irqchip/arm-gic-common.h
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -10,10 +10,6 @@
#include <linux/irqchip/arm-vgic-info.h>
#define GICD_INT_DEF_PRI 0xa0
-#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
- (GICD_INT_DEF_PRI << 16) |\
- (GICD_INT_DEF_PRI << 8) |\
- GICD_INT_DEF_PRI)
struct irq_domain;
struct fwnode_handle;
diff --git a/include/linux/irqchip/arm-gic-v3-prio.h b/include/linux/irqchip/arm-gic-v3-prio.h
new file mode 100644
index 000000000000..44157c9abb78
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v3-prio.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H
+#define __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H
+
+/*
+ * GIC priorities from the view of the PMR/RPR.
+ *
+ * These values are chosen to be valid in either the absolute priority space or
+ * the NS view of the priority space. The value programmed into the distributor
+ * and ITS will be chosen at boot time such that these values appear in the
+ * PMR/RPR.
+ *
+ * GICV3_PRIO_UNMASKED is the PMR view of the priority to use to permit both
+ * IRQs and pseudo-NMIs.
+ *
+ * GICV3_PRIO_IRQ is the PMR view of the priority of regular interrupts. This
+ * can be written to the PMR to mask regular IRQs.
+ *
+ * GICV3_PRIO_NMI is the PMR view of the priority of pseudo-NMIs. This can be
+ * written to the PMR to mask pseudo-NMIs.
+ *
+ * On arm64 some code sections either automatically switch back to PSR.I or
+ * explicitly require to not use priority masking. If bit GICV3_PRIO_PSR_I_SET
+ * is included in the priority mask, it indicates that PSR.I should be set and
+ * interrupt disabling temporarily does not rely on IRQ priorities.
+ */
+#define GICV3_PRIO_UNMASKED 0xe0
+#define GICV3_PRIO_IRQ 0xc0
+#define GICV3_PRIO_NMI 0x80
+
+#define GICV3_PRIO_PSR_I_SET (1 << 4)
+
+#ifndef __ASSEMBLER__
+
+#define __gicv3_prio_to_ns(p) (0xff & ((p) << 1))
+#define __gicv3_ns_to_prio(ns) (0x80 | ((ns) >> 1))
+
+#define __gicv3_prio_valid_ns(p) \
+ (__gicv3_ns_to_prio(__gicv3_prio_to_ns(p)) == (p))
+
+static_assert(__gicv3_prio_valid_ns(GICV3_PRIO_NMI));
+static_assert(__gicv3_prio_valid_ns(GICV3_PRIO_IRQ));
+
+static_assert(GICV3_PRIO_NMI < GICV3_PRIO_IRQ);
+static_assert(GICV3_PRIO_IRQ < GICV3_PRIO_UNMASKED);
+
+static_assert(GICV3_PRIO_IRQ < (GICV3_PRIO_IRQ | GICV3_PRIO_PSR_I_SET));
+
+#endif /* __ASSEMBLER */
+
+#endif /* __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 728691365464..70c0948f978e 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -638,7 +638,7 @@ struct fwnode_handle;
int __init its_lpi_memreserve_init(void);
int its_cpu_init(void);
int its_init(struct fwnode_handle *handle, struct rdists *rdists,
- struct irq_domain *domain);
+ struct irq_domain *domain, u8 irq_prio);
int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent);
static inline bool gic_enable_sre(void)
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
index 2c63375bbd43..0b0887099fd7 100644
--- a/include/linux/irqchip/arm-gic-v4.h
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -25,6 +25,14 @@ struct its_vm {
irq_hw_number_t db_lpi_base;
unsigned long *db_bitmap;
int nr_db_lpis;
+ /*
+ * Ensures mutual exclusion between updates to vlpi_count[]
+ * and map/unmap when using the ITSList mechanism.
+ *
+ * The lock order for any sequence involving the ITSList is
+ * vmapp_lock -> vpe_lock ->vmovp_lock.
+ */
+ raw_spinlock_t vmapp_lock;
u32 vlpi_count[GICv4_ITS_LIST_MAX];
};
@@ -58,10 +66,12 @@ struct its_vpe {
bool enabled;
bool group;
} sgi_config[16];
- atomic_t vmapp_count;
};
};
+ /* Track the VPE being mapped */
+ atomic_t vmapp_count;
+
/*
* Ensures mutual exclusion between affinity setting of the
* vPE and vLPI operations using vpe->col_idx.
@@ -136,7 +146,7 @@ int its_commit_vpe(struct its_vpe *vpe);
int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map);
-int its_unmap_vlpi(int irq);
+void its_unmap_vlpi(int irq);
int its_prop_update_vlpi(int irq, u8 config, bool inv);
int its_prop_update_vsgi(int irq, u8 priority, bool group);
diff --git a/include/linux/irqchip/arm-gic-v5.h b/include/linux/irqchip/arm-gic-v5.h
new file mode 100644
index 000000000000..68ddcdb1cec5
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v5.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 ARM Limited, All Rights Reserved.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V5_H
+#define __LINUX_IRQCHIP_ARM_GIC_V5_H
+
+#include <linux/iopoll.h>
+
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include <asm/sysreg.h>
+
+#define GICV5_IPIS_PER_CPU MAX_IPI
+
+/*
+ * INTID handling
+ */
+#define GICV5_HWIRQ_ID GENMASK(23, 0)
+#define GICV5_HWIRQ_TYPE GENMASK(31, 29)
+#define GICV5_HWIRQ_INTID GENMASK_ULL(31, 0)
+
+#define GICV5_HWIRQ_TYPE_PPI UL(0x1)
+#define GICV5_HWIRQ_TYPE_LPI UL(0x2)
+#define GICV5_HWIRQ_TYPE_SPI UL(0x3)
+
+/*
+ * Tables attributes
+ */
+#define GICV5_NO_READ_ALLOC 0b0
+#define GICV5_READ_ALLOC 0b1
+#define GICV5_NO_WRITE_ALLOC 0b0
+#define GICV5_WRITE_ALLOC 0b1
+
+#define GICV5_NON_CACHE 0b00
+#define GICV5_WB_CACHE 0b01
+#define GICV5_WT_CACHE 0b10
+
+#define GICV5_NON_SHARE 0b00
+#define GICV5_OUTER_SHARE 0b10
+#define GICV5_INNER_SHARE 0b11
+
+/*
+ * IRS registers and tables structures
+ */
+#define GICV5_IRS_IDR1 0x0004
+#define GICV5_IRS_IDR2 0x0008
+#define GICV5_IRS_IDR5 0x0014
+#define GICV5_IRS_IDR6 0x0018
+#define GICV5_IRS_IDR7 0x001c
+#define GICV5_IRS_CR0 0x0080
+#define GICV5_IRS_CR1 0x0084
+#define GICV5_IRS_SYNCR 0x00c0
+#define GICV5_IRS_SYNC_STATUSR 0x00c4
+#define GICV5_IRS_SPI_SELR 0x0108
+#define GICV5_IRS_SPI_CFGR 0x0114
+#define GICV5_IRS_SPI_STATUSR 0x0118
+#define GICV5_IRS_PE_SELR 0x0140
+#define GICV5_IRS_PE_STATUSR 0x0144
+#define GICV5_IRS_PE_CR0 0x0148
+#define GICV5_IRS_IST_BASER 0x0180
+#define GICV5_IRS_IST_CFGR 0x0190
+#define GICV5_IRS_IST_STATUSR 0x0194
+#define GICV5_IRS_MAP_L2_ISTR 0x01c0
+
+#define GICV5_IRS_IDR1_PRIORITY_BITS GENMASK(22, 20)
+#define GICV5_IRS_IDR1_IAFFID_BITS GENMASK(19, 16)
+
+#define GICV5_IRS_IDR1_PRIORITY_BITS_1BITS 0b000
+#define GICV5_IRS_IDR1_PRIORITY_BITS_2BITS 0b001
+#define GICV5_IRS_IDR1_PRIORITY_BITS_3BITS 0b010
+#define GICV5_IRS_IDR1_PRIORITY_BITS_4BITS 0b011
+#define GICV5_IRS_IDR1_PRIORITY_BITS_5BITS 0b100
+
+#define GICV5_IRS_IDR2_ISTMD_SZ GENMASK(19, 15)
+#define GICV5_IRS_IDR2_ISTMD BIT(14)
+#define GICV5_IRS_IDR2_IST_L2SZ GENMASK(13, 11)
+#define GICV5_IRS_IDR2_IST_LEVELS BIT(10)
+#define GICV5_IRS_IDR2_MIN_LPI_ID_BITS GENMASK(9, 6)
+#define GICV5_IRS_IDR2_LPI BIT(5)
+#define GICV5_IRS_IDR2_ID_BITS GENMASK(4, 0)
+
+#define GICV5_IRS_IDR5_SPI_RANGE GENMASK(24, 0)
+#define GICV5_IRS_IDR6_SPI_IRS_RANGE GENMASK(24, 0)
+#define GICV5_IRS_IDR7_SPI_BASE GENMASK(23, 0)
+
+#define GICV5_IRS_IST_L2SZ_SUPPORT_4KB(r) FIELD_GET(BIT(11), (r))
+#define GICV5_IRS_IST_L2SZ_SUPPORT_16KB(r) FIELD_GET(BIT(12), (r))
+#define GICV5_IRS_IST_L2SZ_SUPPORT_64KB(r) FIELD_GET(BIT(13), (r))
+
+#define GICV5_IRS_CR0_IDLE BIT(1)
+#define GICV5_IRS_CR0_IRSEN BIT(0)
+
+#define GICV5_IRS_CR1_VPED_WA BIT(15)
+#define GICV5_IRS_CR1_VPED_RA BIT(14)
+#define GICV5_IRS_CR1_VMD_WA BIT(13)
+#define GICV5_IRS_CR1_VMD_RA BIT(12)
+#define GICV5_IRS_CR1_VPET_WA BIT(11)
+#define GICV5_IRS_CR1_VPET_RA BIT(10)
+#define GICV5_IRS_CR1_VMT_WA BIT(9)
+#define GICV5_IRS_CR1_VMT_RA BIT(8)
+#define GICV5_IRS_CR1_IST_WA BIT(7)
+#define GICV5_IRS_CR1_IST_RA BIT(6)
+#define GICV5_IRS_CR1_IC GENMASK(5, 4)
+#define GICV5_IRS_CR1_OC GENMASK(3, 2)
+#define GICV5_IRS_CR1_SH GENMASK(1, 0)
+
+#define GICV5_IRS_SYNCR_SYNC BIT(31)
+
+#define GICV5_IRS_SYNC_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_SPI_STATUSR_V BIT(1)
+#define GICV5_IRS_SPI_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_SPI_SELR_ID GENMASK(23, 0)
+
+#define GICV5_IRS_SPI_CFGR_TM BIT(0)
+
+#define GICV5_IRS_PE_SELR_IAFFID GENMASK(15, 0)
+
+#define GICV5_IRS_PE_STATUSR_V BIT(1)
+#define GICV5_IRS_PE_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_PE_CR0_DPS BIT(0)
+
+#define GICV5_IRS_IST_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_IST_CFGR_STRUCTURE BIT(16)
+#define GICV5_IRS_IST_CFGR_ISTSZ GENMASK(8, 7)
+#define GICV5_IRS_IST_CFGR_L2SZ GENMASK(6, 5)
+#define GICV5_IRS_IST_CFGR_LPI_ID_BITS GENMASK(4, 0)
+
+#define GICV5_IRS_IST_CFGR_STRUCTURE_LINEAR 0b0
+#define GICV5_IRS_IST_CFGR_STRUCTURE_TWO_LEVEL 0b1
+
+#define GICV5_IRS_IST_CFGR_ISTSZ_4 0b00
+#define GICV5_IRS_IST_CFGR_ISTSZ_8 0b01
+#define GICV5_IRS_IST_CFGR_ISTSZ_16 0b10
+
+#define GICV5_IRS_IST_CFGR_L2SZ_4K 0b00
+#define GICV5_IRS_IST_CFGR_L2SZ_16K 0b01
+#define GICV5_IRS_IST_CFGR_L2SZ_64K 0b10
+
+#define GICV5_IRS_IST_BASER_ADDR_MASK GENMASK_ULL(55, 6)
+#define GICV5_IRS_IST_BASER_VALID BIT_ULL(0)
+
+#define GICV5_IRS_MAP_L2_ISTR_ID GENMASK(23, 0)
+
+#define GICV5_ISTL1E_VALID BIT_ULL(0)
+
+#define GICV5_ISTL1E_L2_ADDR_MASK GENMASK_ULL(55, 12)
+
+/*
+ * ITS registers and tables structures
+ */
+#define GICV5_ITS_IDR1 0x0004
+#define GICV5_ITS_IDR2 0x0008
+#define GICV5_ITS_CR0 0x0080
+#define GICV5_ITS_CR1 0x0084
+#define GICV5_ITS_DT_BASER 0x00c0
+#define GICV5_ITS_DT_CFGR 0x00d0
+#define GICV5_ITS_DIDR 0x0100
+#define GICV5_ITS_EIDR 0x0108
+#define GICV5_ITS_INV_EVENTR 0x010c
+#define GICV5_ITS_INV_DEVICER 0x0110
+#define GICV5_ITS_STATUSR 0x0120
+#define GICV5_ITS_SYNCR 0x0140
+#define GICV5_ITS_SYNC_STATUSR 0x0148
+
+#define GICV5_ITS_IDR1_L2SZ GENMASK(10, 8)
+#define GICV5_ITS_IDR1_ITT_LEVELS BIT(7)
+#define GICV5_ITS_IDR1_DT_LEVELS BIT(6)
+#define GICV5_ITS_IDR1_DEVICEID_BITS GENMASK(5, 0)
+
+#define GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(r) FIELD_GET(BIT(8), (r))
+#define GICV5_ITS_IDR1_L2SZ_SUPPORT_16KB(r) FIELD_GET(BIT(9), (r))
+#define GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(r) FIELD_GET(BIT(10), (r))
+
+#define GICV5_ITS_IDR2_XDMN_EVENTs GENMASK(6, 5)
+#define GICV5_ITS_IDR2_EVENTID_BITS GENMASK(4, 0)
+
+#define GICV5_ITS_CR0_IDLE BIT(1)
+#define GICV5_ITS_CR0_ITSEN BIT(0)
+
+#define GICV5_ITS_CR1_ITT_RA BIT(7)
+#define GICV5_ITS_CR1_DT_RA BIT(6)
+#define GICV5_ITS_CR1_IC GENMASK(5, 4)
+#define GICV5_ITS_CR1_OC GENMASK(3, 2)
+#define GICV5_ITS_CR1_SH GENMASK(1, 0)
+
+#define GICV5_ITS_DT_CFGR_STRUCTURE BIT(16)
+#define GICV5_ITS_DT_CFGR_L2SZ GENMASK(7, 6)
+#define GICV5_ITS_DT_CFGR_DEVICEID_BITS GENMASK(5, 0)
+
+#define GICV5_ITS_DT_BASER_ADDR_MASK GENMASK_ULL(55, 3)
+
+#define GICV5_ITS_INV_DEVICER_I BIT(31)
+#define GICV5_ITS_INV_DEVICER_EVENTID_BITS GENMASK(5, 1)
+#define GICV5_ITS_INV_DEVICER_L1 BIT(0)
+
+#define GICV5_ITS_DIDR_DEVICEID GENMASK_ULL(31, 0)
+
+#define GICV5_ITS_EIDR_EVENTID GENMASK(15, 0)
+
+#define GICV5_ITS_INV_EVENTR_I BIT(31)
+#define GICV5_ITS_INV_EVENTR_ITT_L2SZ GENMASK(2, 1)
+#define GICV5_ITS_INV_EVENTR_L1 BIT(0)
+
+#define GICV5_ITS_STATUSR_IDLE BIT(0)
+
+#define GICV5_ITS_SYNCR_SYNC BIT_ULL(63)
+#define GICV5_ITS_SYNCR_SYNCALL BIT_ULL(32)
+#define GICV5_ITS_SYNCR_DEVICEID GENMASK_ULL(31, 0)
+
+#define GICV5_ITS_SYNC_STATUSR_IDLE BIT(0)
+
+#define GICV5_DTL1E_VALID BIT_ULL(0)
+/* Note that there is no shift for the address by design */
+#define GICV5_DTL1E_L2_ADDR_MASK GENMASK_ULL(55, 3)
+#define GICV5_DTL1E_SPAN GENMASK_ULL(63, 60)
+
+#define GICV5_DTL2E_VALID BIT_ULL(0)
+#define GICV5_DTL2E_ITT_L2SZ GENMASK_ULL(2, 1)
+/* Note that there is no shift for the address by design */
+#define GICV5_DTL2E_ITT_ADDR_MASK GENMASK_ULL(55, 3)
+#define GICV5_DTL2E_ITT_DSWE BIT_ULL(57)
+#define GICV5_DTL2E_ITT_STRUCTURE BIT_ULL(58)
+#define GICV5_DTL2E_EVENT_ID_BITS GENMASK_ULL(63, 59)
+
+#define GICV5_ITTL1E_VALID BIT_ULL(0)
+/* Note that there is no shift for the address by design */
+#define GICV5_ITTL1E_L2_ADDR_MASK GENMASK_ULL(55, 3)
+#define GICV5_ITTL1E_SPAN GENMASK_ULL(63, 60)
+
+#define GICV5_ITTL2E_LPI_ID GENMASK_ULL(23, 0)
+#define GICV5_ITTL2E_DAC GENMASK_ULL(29, 28)
+#define GICV5_ITTL2E_VIRTUAL BIT_ULL(30)
+#define GICV5_ITTL2E_VALID BIT_ULL(31)
+#define GICV5_ITTL2E_VM_ID GENMASK_ULL(47, 32)
+
+#define GICV5_ITS_DT_ITT_CFGR_L2SZ_4k 0b00
+#define GICV5_ITS_DT_ITT_CFGR_L2SZ_16k 0b01
+#define GICV5_ITS_DT_ITT_CFGR_L2SZ_64k 0b10
+
+#define GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR 0
+#define GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL 1
+
+#define GICV5_ITS_HWIRQ_DEVICE_ID GENMASK_ULL(31, 0)
+#define GICV5_ITS_HWIRQ_EVENT_ID GENMASK_ULL(63, 32)
+
+/*
+ * IWB registers
+ */
+#define GICV5_IWB_IDR0 0x0000
+#define GICV5_IWB_CR0 0x0080
+#define GICV5_IWB_WENABLE_STATUSR 0x00c0
+#define GICV5_IWB_WENABLER 0x2000
+#define GICV5_IWB_WTMR 0x4000
+
+#define GICV5_IWB_IDR0_INT_DOMS GENMASK(14, 11)
+#define GICV5_IWB_IDR0_IW_RANGE GENMASK(10, 0)
+
+#define GICV5_IWB_CR0_IDLE BIT(1)
+#define GICV5_IWB_CR0_IWBEN BIT(0)
+
+#define GICV5_IWB_WENABLE_STATUSR_IDLE BIT(0)
+
+/*
+ * Global Data structures and functions
+ */
+struct gicv5_chip_data {
+ struct fwnode_handle *fwnode;
+ struct irq_domain *ppi_domain;
+ struct irq_domain *spi_domain;
+ struct irq_domain *lpi_domain;
+ struct irq_domain *ipi_domain;
+ u32 global_spi_count;
+ u8 cpuif_pri_bits;
+ u8 cpuif_id_bits;
+ u8 irs_pri_bits;
+ struct {
+ __le64 *l1ist_addr;
+ u32 l2_size;
+ u8 l2_bits;
+ bool l2;
+ } ist;
+};
+
+extern struct gicv5_chip_data gicv5_global_data __read_mostly;
+
+struct gicv5_irs_chip_data {
+ struct list_head entry;
+ struct fwnode_handle *fwnode;
+ void __iomem *irs_base;
+ u32 flags;
+ u32 spi_min;
+ u32 spi_range;
+ raw_spinlock_t spi_config_lock;
+};
+
+static inline int gicv5_wait_for_op_s_atomic(void __iomem *addr, u32 offset,
+ const char *reg_s, u32 mask,
+ u32 *val)
+{
+ void __iomem *reg = addr + offset;
+ u32 tmp;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(reg, tmp, tmp & mask, 1, 10 * USEC_PER_MSEC);
+ if (unlikely(ret == -ETIMEDOUT)) {
+ pr_err_ratelimited("%s timeout...\n", reg_s);
+ return ret;
+ }
+
+ if (val)
+ *val = tmp;
+
+ return 0;
+}
+
+static inline int gicv5_wait_for_op_s(void __iomem *addr, u32 offset,
+ const char *reg_s, u32 mask)
+{
+ void __iomem *reg = addr + offset;
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout(reg, val, val & mask, 1, 10 * USEC_PER_MSEC);
+ if (unlikely(ret == -ETIMEDOUT)) {
+ pr_err_ratelimited("%s timeout...\n", reg_s);
+ return ret;
+ }
+
+ return 0;
+}
+
+#define gicv5_wait_for_op_atomic(base, reg, mask, val) \
+ gicv5_wait_for_op_s_atomic(base, reg, #reg, mask, val)
+
+#define gicv5_wait_for_op(base, reg, mask) \
+ gicv5_wait_for_op_s(base, reg, #reg, mask)
+
+void __init gicv5_init_lpi_domain(void);
+void __init gicv5_free_lpi_domain(void);
+
+int gicv5_irs_of_probe(struct device_node *parent);
+void gicv5_irs_remove(void);
+int gicv5_irs_enable(void);
+void gicv5_irs_its_probe(void);
+int gicv5_irs_register_cpu(int cpuid);
+int gicv5_irs_cpu_to_iaffid(int cpu_id, u16 *iaffid);
+struct gicv5_irs_chip_data *gicv5_irs_lookup_by_spi_id(u32 spi_id);
+int gicv5_spi_irq_set_type(struct irq_data *d, unsigned int type);
+int gicv5_irs_iste_alloc(u32 lpi);
+void gicv5_irs_syncr(void);
+
+struct gicv5_its_devtab_cfg {
+ union {
+ struct {
+ __le64 *devtab;
+ } linear;
+ struct {
+ __le64 *l1devtab;
+ __le64 **l2ptrs;
+ } l2;
+ };
+ u32 cfgr;
+};
+
+struct gicv5_its_itt_cfg {
+ union {
+ struct {
+ __le64 *itt;
+ unsigned int num_ents;
+ } linear;
+ struct {
+ __le64 *l1itt;
+ __le64 **l2ptrs;
+ unsigned int num_l1_ents;
+ u8 l2sz;
+ } l2;
+ };
+ u8 event_id_bits;
+ bool l2itt;
+};
+
+void gicv5_init_lpis(u32 max);
+void gicv5_deinit_lpis(void);
+
+int gicv5_alloc_lpi(void);
+void gicv5_free_lpi(u32 lpi);
+
+void __init gicv5_its_of_probe(struct device_node *parent);
+#endif
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 2223f95079ce..d45fa19f9e47 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -86,7 +86,13 @@
#define GICH_HCR_EN (1 << 0)
#define GICH_HCR_UIE (1 << 1)
+#define GICH_HCR_LRENPIE (1 << 2)
#define GICH_HCR_NPIE (1 << 3)
+#define GICH_HCR_VGrp0EIE (1 << 4)
+#define GICH_HCR_VGrp0DIE (1 << 5)
+#define GICH_HCR_VGrp1EIE (1 << 6)
+#define GICH_HCR_VGrp1DIE (1 << 7)
+#define GICH_HCR_EOICOUNT GENMASK(31, 27)
#define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
diff --git a/include/linux/irqchip/arm-vgic-info.h b/include/linux/irqchip/arm-vgic-info.h
index a75b2c7de69d..67d9d960273b 100644
--- a/include/linux/irqchip/arm-vgic-info.h
+++ b/include/linux/irqchip/arm-vgic-info.h
@@ -15,6 +15,8 @@ enum gic_type {
GIC_V2,
/* Full GICv3, optionally with v2 compat */
GIC_V3,
+ /* Full GICv5, optionally with v3 compat */
+ GIC_V5,
};
struct gic_kvm_info {
@@ -22,6 +24,8 @@ struct gic_kvm_info {
enum gic_type type;
/* Virtual CPU interface */
struct resource vcpu;
+ /* GICv2 GICC VA */
+ void __iomem *gicc_base;
/* Interrupt number */
unsigned int maint_irq;
/* No interrupt mask, no need to use the above field */
diff --git a/include/linux/irqchip/irq-davinci-aintc.h b/include/linux/irqchip/irq-davinci-aintc.h
deleted file mode 100644
index ea4e087fac98..000000000000
--- a/include/linux/irqchip/irq-davinci-aintc.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019 Texas Instruments
- */
-
-#ifndef _LINUX_IRQ_DAVINCI_AINTC_
-#define _LINUX_IRQ_DAVINCI_AINTC_
-
-#include <linux/ioport.h>
-
-/**
- * struct davinci_aintc_config - configuration data for davinci-aintc driver.
- *
- * @reg: register range to map
- * @num_irqs: number of HW interrupts supported by the controller
- * @prios: an array of size num_irqs containing priority settings for
- * each interrupt
- */
-struct davinci_aintc_config {
- struct resource reg;
- unsigned int num_irqs;
- u8 *prios;
-};
-
-void davinci_aintc_init(const struct davinci_aintc_config *config);
-
-#endif /* _LINUX_IRQ_DAVINCI_AINTC_ */
diff --git a/include/linux/irqchip/irq-davinci-cp-intc.h b/include/linux/irqchip/irq-davinci-cp-intc.h
deleted file mode 100644
index 8d71ed5b5a61..000000000000
--- a/include/linux/irqchip/irq-davinci-cp-intc.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019 Texas Instruments
- */
-
-#ifndef _LINUX_IRQ_DAVINCI_CP_INTC_
-#define _LINUX_IRQ_DAVINCI_CP_INTC_
-
-#include <linux/ioport.h>
-
-/**
- * struct davinci_cp_intc_config - configuration data for davinci-cp-intc
- * driver.
- *
- * @reg: register range to map
- * @num_irqs: number of HW interrupts supported by the controller
- */
-struct davinci_cp_intc_config {
- struct resource reg;
- unsigned int num_irqs;
-};
-
-int davinci_cp_intc_init(const struct davinci_cp_intc_config *config);
-
-#endif /* _LINUX_IRQ_DAVINCI_CP_INTC_ */
diff --git a/include/linux/irqchip/irq-msi-lib.h b/include/linux/irqchip/irq-msi-lib.h
new file mode 100644
index 000000000000..224ac28e88d7
--- /dev/null
+++ b/include/linux/irqchip/irq-msi-lib.h
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2022 Linutronix GmbH
+// Copyright (C) 2022 Intel
+
+#ifndef _IRQCHIP_IRQ_MSI_LIB_H
+#define _IRQCHIP_IRQ_MSI_LIB_H
+
+#include <linux/bits.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+
+#ifdef CONFIG_PCI_MSI
+#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
+#else
+#define MATCH_PCI_MSI (0)
+#endif
+
+#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
+
+struct msi_domain_info;
+int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
+
+bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent,
+ struct msi_domain_info *info);
+
+#endif /* _IRQCHIP_IRQ_MSI_LIB_H */
diff --git a/include/linux/irqchip/irq-partition-percpu.h b/include/linux/irqchip/irq-partition-percpu.h
deleted file mode 100644
index 2f6ae7551748..000000000000
--- a/include/linux/irqchip/irq-partition-percpu.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2016 ARM Limited, All Rights Reserved.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#ifndef __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H
-#define __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H
-
-#include <linux/fwnode.h>
-#include <linux/cpumask.h>
-#include <linux/irqdomain.h>
-
-struct partition_affinity {
- cpumask_t mask;
- void *partition_id;
-};
-
-struct partition_desc;
-
-#ifdef CONFIG_PARTITION_PERCPU
-int partition_translate_id(struct partition_desc *desc, void *partition_id);
-struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
- struct partition_affinity *parts,
- int nr_parts,
- int chained_irq,
- const struct irq_domain_ops *ops);
-struct irq_domain *partition_get_domain(struct partition_desc *dsc);
-#else
-static inline int partition_translate_id(struct partition_desc *desc,
- void *partition_id)
-{
- return -EINVAL;
-}
-
-static inline
-struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
- struct partition_affinity *parts,
- int nr_parts,
- int chained_irq,
- const struct irq_domain_ops *ops)
-{
- return NULL;
-}
-
-static inline
-struct irq_domain *partition_get_domain(struct partition_desc *dsc)
-{
- return NULL;
-}
-#endif
-
-#endif /* __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H */
diff --git a/include/linux/irqchip/irq-renesas-rzv2h.h b/include/linux/irqchip/irq-renesas-rzv2h.h
new file mode 100644
index 000000000000..618a60d2eac0
--- /dev/null
+++ b/include/linux/irqchip/irq-renesas-rzv2h.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Renesas RZ/V2H(P) Interrupt Control Unit (ICU)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation.
+ */
+
+#ifndef __LINUX_IRQ_RENESAS_RZV2H
+#define __LINUX_IRQ_RENESAS_RZV2H
+
+#include <linux/platform_device.h>
+
+#define RZV2H_ICU_DMAC_REQ_NO_DEFAULT 0x3ff
+
+#ifdef CONFIG_RENESAS_RZV2H_ICU
+void rzv2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, u8 dmac_channel,
+ u16 req_no);
+#else
+static inline void rzv2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index,
+ u8 dmac_channel, u16 req_no) { }
+#endif
+
+#endif /* __LINUX_IRQ_RENESAS_RZV2H */
diff --git a/include/linux/irqchip/riscv-imsic.h b/include/linux/irqchip/riscv-imsic.h
index faf0b800b1b0..7f3ff5c5ea53 100644
--- a/include/linux/irqchip/riscv-imsic.h
+++ b/include/linux/irqchip/riscv-imsic.h
@@ -8,7 +8,8 @@
#include <linux/types.h>
#include <linux/bitops.h>
-#include <asm/csr.h>
+#include <linux/device.h>
+#include <linux/fwnode.h>
#define IMSIC_MMIO_PAGE_SHIFT 12
#define IMSIC_MMIO_PAGE_SZ BIT(IMSIC_MMIO_PAGE_SHIFT)
@@ -84,4 +85,11 @@ static inline const struct imsic_global_config *imsic_get_global_config(void)
#endif
+#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_RISCV_IMSIC)
+int imsic_platform_acpi_probe(struct fwnode_handle *fwnode);
+struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev);
+#else
+static inline struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev) { return NULL; }
+#endif
+
#endif
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index fd091c35d572..37e0b5b5600a 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -82,7 +82,6 @@ struct irq_desc {
int threads_handled_last;
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
- const struct cpumask *percpu_affinity;
#ifdef CONFIG_SMP
const struct cpumask *affinity_hint;
struct irq_affinity_notify *affinity_notify;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 21ecf582a0fe..952d3c8dd6b7 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -1,30 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * irq_domain - IRQ translation domains
+ * irq_domain - IRQ Translation Domains
*
- * Translation infrastructure between hw and linux irq numbers. This is
- * helpful for interrupt controllers to implement mapping between hardware
- * irq numbers and the Linux irq number space.
- *
- * irq_domains also have hooks for translating device tree or other
- * firmware interrupt representations into a hardware irq number that
- * can be mapped back to a Linux irq number without any extra platform
- * support code.
- *
- * Interrupt controller "domain" data structure. This could be defined as a
- * irq domain controller. That is, it handles the mapping between hardware
- * and virtual interrupt numbers for a given interrupt domain. The domain
- * structure is generally created by the PIC code for a given PIC instance
- * (though a domain can cover more than one PIC if they have a flat number
- * model). It's the domain callbacks that are responsible for setting the
- * irq_chip on a given irq_desc after it's been mapped.
- *
- * The host code and data structures use a fwnode_handle pointer to
- * identify the domain. In some cases, and in order to preserve source
- * code compatibility, this fwnode pointer is "upgraded" to a DT
- * device_node. For those firmware infrastructures that do not provide
- * a unique identifier for an interrupt controller, the irq_domain
- * code offers a fwnode allocator.
+ * See Documentation/core-api/irq/irq-domain.rst for the details.
*/
#ifndef _LINUX_IRQDOMAIN_H
@@ -61,24 +39,57 @@ struct msi_parent_ops;
* pass a device-specific description of an interrupt.
*/
struct irq_fwspec {
- struct fwnode_handle *fwnode;
- int param_count;
- u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
+ struct fwnode_handle *fwnode;
+ int param_count;
+ u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
};
+/**
+ * struct irq_fwspec_info - firmware provided IRQ information structure
+ *
+ * @flags: Information validity flags
+ * @affinity: Affinity mask for this interrupt
+ *
+ * This structure reports firmware-specific information about an
+ * interrupt. The only significant information is the affinity of a
+ * per-CPU interrupt, but this is designed to be extended as required.
+ */
+struct irq_fwspec_info {
+ unsigned long flags;
+ const struct cpumask *affinity;
+};
+
+#define IRQ_FWSPEC_INFO_AFFINITY_VALID BIT(0)
+
/* Conversion function from of_phandle_args fields to fwspec */
void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
unsigned int count, struct irq_fwspec *fwspec);
/**
* struct irq_domain_ops - Methods for irq_domain objects
- * @match: Match an interrupt controller device node to a host, returns
- * 1 on a match
- * @map: Create or update a mapping between a virtual irq number and a hw
- * irq number. This is called only once for a given mapping.
- * @unmap: Dispose of such a mapping
- * @xlate: Given a device tree node and interrupt specifier, decode
- * the hardware irq number and linux irq type value.
+ * @match: Match an interrupt controller device node to a domain, returns
+ * 1 on a match
+ * @select: Match an interrupt controller fw specification. It is more generic
+ * than @match as it receives a complete struct irq_fwspec. Therefore,
+ * @select is preferred if provided. Returns 1 on a match.
+ * @map: Create or update a mapping between a virtual irq number and a hw
+ * irq number. This is called only once for a given mapping.
+ * @unmap: Dispose of such a mapping
+ * @xlate: Given a device tree node and interrupt specifier, decode
+ * the hardware irq number and linux irq type value.
+ * @alloc: Allocate @nr_irqs interrupts starting from @virq.
+ * @free: Free @nr_irqs interrupts starting from @virq.
+ * @activate: Activate one interrupt in HW (@irqd). If @reserve is set, only
+ * reserve the vector. If unset, assign the vector (called from
+ * request_irq()).
+ * @deactivate: Disarm one interrupt (@irqd).
+ * @translate: Given @fwspec, decode the hardware irq number (@out_hwirq) and
+ * linux irq type value (@out_type). This is a generalised @xlate
+ * (over struct irq_fwspec) and is preferred if provided.
+ * @get_fwspec_info:
+ * Given @fwspec, report additional firmware-provided information in
+ * @info. Optional.
+ * @debug_show: For domains to show specific data for an interrupt in debugfs.
*
* Functions below are provided by the driver and called whenever a new mapping
* is created or an old mapping is disposed. The driver can then proceed to
@@ -86,29 +97,30 @@ void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
* to setup the irq_desc when returning from map().
*/
struct irq_domain_ops {
- int (*match)(struct irq_domain *d, struct device_node *node,
- enum irq_domain_bus_token bus_token);
- int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec,
- enum irq_domain_bus_token bus_token);
- int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
- void (*unmap)(struct irq_domain *d, unsigned int virq);
- int (*xlate)(struct irq_domain *d, struct device_node *node,
- const u32 *intspec, unsigned int intsize,
- unsigned long *out_hwirq, unsigned int *out_type);
+ int (*match)(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token);
+ int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
+ int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
+ void (*unmap)(struct irq_domain *d, unsigned int virq);
+ int (*xlate)(struct irq_domain *d, struct device_node *node,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/* extended V2 interfaces to support hierarchy irq_domains */
- int (*alloc)(struct irq_domain *d, unsigned int virq,
- unsigned int nr_irqs, void *arg);
- void (*free)(struct irq_domain *d, unsigned int virq,
- unsigned int nr_irqs);
- int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
- void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
- int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
- unsigned long *out_hwirq, unsigned int *out_type);
+ int (*alloc)(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg);
+ void (*free)(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs);
+ int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
+ void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
+ int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
+ int (*get_fwspec_info)(struct irq_fwspec *fwspec, struct irq_fwspec_info *info);
#endif
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
- void (*debug_show)(struct seq_file *m, struct irq_domain *d,
- struct irq_data *irqd, int ind);
+ void (*debug_show)(struct seq_file *m, struct irq_domain *d,
+ struct irq_data *irqd, int ind);
#endif
};
@@ -131,6 +143,9 @@ struct irq_domain_chip_generic;
* Optional elements:
* @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy
* to swap it for the of_node via the irq_domain_get_of_node accessor
+ * @bus_token: @fwnode's device_node might be used for several irq domains. But
+ * in connection with @bus_token, the pair shall be unique in a
+ * system.
* @gc: Pointer to a list of generic chips. There is a helper function for
* setting up one or more generic chips for interrupt controllers
* drivers using the generic chip library which uses this pointer.
@@ -141,9 +156,12 @@ struct irq_domain_chip_generic;
* purposes related to the irq domain.
* @parent: Pointer to parent irq_domain to support hierarchy irq_domains
* @msi_parent_ops: Pointer to MSI parent domain methods for per device domain init
+ * @exit: Function called when the domain is destroyed
*
* Revmap data, used internally by the irq domain code:
- * @revmap_size: Size of the linear map table @revmap[]
+ * @hwirq_max: Top limit for the HW irq number. Especially to avoid
+ * conflicts/failures with reserved HW irqs. Can be ~0.
+ * @revmap_size: Size of the linear map table @revmap
* @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
* @revmap: Linear table of irq_data pointers
*/
@@ -169,6 +187,7 @@ struct irq_domain {
#ifdef CONFIG_GENERIC_MSI_IRQ
const struct msi_parent_ops *msi_parent_ops;
#endif
+ void (*exit)(struct irq_domain *d);
/* reverse map data. The linear map gets appended to the irq_domain */
irq_hw_number_t hwirq_max;
@@ -182,7 +201,7 @@ enum {
/* Irq domain is hierarchical */
IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
- /* Irq domain name was allocated in __irq_domain_add() */
+ /* Irq domain name was allocated internally */
IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1),
/* Irq domain is an IPI domain with virq per cpu */
@@ -208,6 +227,15 @@ enum {
/* Irq domain is a MSI device domain */
IRQ_DOMAIN_FLAG_MSI_DEVICE = (1 << 9),
+ /* Irq domain must destroy generic chips when removed */
+ IRQ_DOMAIN_FLAG_DESTROY_GC = (1 << 10),
+
+ /* Address and data pair is mutable when irq_set_affinity() */
+ IRQ_DOMAIN_FLAG_MSI_IMMUTABLE = (1 << 11),
+
+ /* IRQ domain requires parent fwnode matching */
+ IRQ_DOMAIN_FLAG_FWNODE_PARENT = (1 << 12),
+
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
* for implementation specific purposes and ignored by the
@@ -221,8 +249,7 @@ static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
return to_of_node(d->fwnode);
}
-static inline void irq_domain_set_pm_device(struct irq_domain *d,
- struct device *dev)
+static inline void irq_domain_set_pm_device(struct irq_domain *d, struct device *dev)
{
if (d)
d->pm_dev = dev;
@@ -238,14 +265,12 @@ enum {
IRQCHIP_FWNODE_NAMED_ID,
};
-static inline
-struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name)
+static inline struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name)
{
return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL);
}
-static inline
-struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id)
+static inline struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id)
{
return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name,
NULL);
@@ -257,53 +282,87 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
}
void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
- irq_hw_number_t hwirq_max, int direct_max,
- const struct irq_domain_ops *ops,
- void *host_data);
-struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode,
- unsigned int size,
- unsigned int first_irq,
- const struct irq_domain_ops *ops,
- void *host_data);
-struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
- unsigned int size,
- unsigned int first_irq,
- irq_hw_number_t first_hwirq,
- const struct irq_domain_ops *ops,
- void *host_data);
-struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode,
- unsigned int size,
- unsigned int first_irq,
- irq_hw_number_t first_hwirq,
- const struct irq_domain_ops *ops,
- void *host_data);
-extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
- enum irq_domain_bus_token bus_token);
-extern void irq_set_default_host(struct irq_domain *host);
-extern struct irq_domain *irq_get_default_host(void);
-extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
- irq_hw_number_t hwirq, int node,
- const struct irq_affinity_desc *affinity);
-static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
-{
- return node ? &node->fwnode : NULL;
-}
+DEFINE_FREE(irq_domain_free_fwnode, struct fwnode_handle *, if (_T) irq_domain_free_fwnode(_T))
+
+struct irq_domain_chip_generic_info;
+
+/**
+ * struct irq_domain_info - Domain information structure
+ * @fwnode: firmware node for the interrupt controller
+ * @domain_flags: Additional flags to add to the domain flags
+ * @size: Size of linear map; 0 for radix mapping only
+ * @hwirq_max: Maximum number of interrupts supported by controller
+ * @direct_max: Maximum value of direct maps;
+ * Use ~0 for no limit; 0 for no direct mapping
+ * @hwirq_base: The first hardware interrupt number (legacy domains only)
+ * @virq_base: The first Linux interrupt number for legacy domains to
+ * immediately associate the interrupts after domain creation
+ * @bus_token: Domain bus token
+ * @name_suffix: Optional name suffix to avoid collisions when multiple
+ * domains are added using same fwnode
+ * @ops: Domain operation callbacks
+ * @host_data: Controller private data pointer
+ * @dev: Device which creates the domain
+ * @dgc_info: Geneneric chip information structure pointer used to
+ * create generic chips for the domain if not NULL.
+ * @init: Function called when the domain is created.
+ * Allow to do some additional domain initialisation.
+ * @exit: Function called when the domain is destroyed.
+ * Allow to do some additional cleanup operation.
+ */
+struct irq_domain_info {
+ struct fwnode_handle *fwnode;
+ unsigned int domain_flags;
+ unsigned int size;
+ irq_hw_number_t hwirq_max;
+ int direct_max;
+ unsigned int hwirq_base;
+ unsigned int virq_base;
+ enum irq_domain_bus_token bus_token;
+ const char *name_suffix;
+ const struct irq_domain_ops *ops;
+ void *host_data;
+ struct device *dev;
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ /**
+ * @parent: Pointer to the parent irq domain used in a hierarchy domain
+ */
+ struct irq_domain *parent;
+#endif
+ struct irq_domain_chip_generic_info *dgc_info;
+ int (*init)(struct irq_domain *d);
+ void (*exit)(struct irq_domain *d);
+};
+
+struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info);
+struct irq_domain *devm_irq_domain_instantiate(struct device *dev,
+ const struct irq_domain_info *info);
+
+struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, unsigned int size,
+ unsigned int first_irq,
+ const struct irq_domain_ops *ops, void *host_data);
+struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode, unsigned int size,
+ unsigned int first_irq, irq_hw_number_t first_hwirq,
+ const struct irq_domain_ops *ops, void *host_data);
+struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
+void irq_set_default_domain(struct irq_domain *domain);
+struct irq_domain *irq_get_default_domain(void);
+int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, irq_hw_number_t hwirq, int node,
+ const struct irq_affinity_desc *affinity);
extern const struct fwnode_operations irqchip_fwnode_ops;
-static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode)
+static inline bool is_fwnode_irqchip(const struct fwnode_handle *fwnode)
{
return fwnode && fwnode->ops == &irqchip_fwnode_ops;
}
-extern void irq_domain_update_bus_token(struct irq_domain *domain,
- enum irq_domain_bus_token bus_token);
+void irq_domain_update_bus_token(struct irq_domain *domain, enum irq_domain_bus_token bus_token);
-static inline
-struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
- enum irq_domain_bus_token bus_token)
+static inline struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+ enum irq_domain_bus_token bus_token)
{
struct irq_fwspec fwspec = {
.fwnode = fwnode,
@@ -315,7 +374,7 @@ struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
static inline struct irq_domain *irq_find_matching_host(struct device_node *node,
enum irq_domain_bus_token bus_token)
{
- return irq_find_matching_fwnode(of_node_to_fwnode(node), bus_token);
+ return irq_find_matching_fwnode(of_fwnode_handle(node), bus_token);
}
static inline struct irq_domain *irq_find_host(struct device_node *node)
@@ -329,88 +388,107 @@ static inline struct irq_domain *irq_find_host(struct device_node *node)
return d;
}
-static inline struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
- unsigned int size,
- unsigned int first_irq,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return irq_domain_create_simple(of_node_to_fwnode(of_node), size, first_irq, ops, host_data);
-}
-
-/**
- * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
- * @of_node: pointer to interrupt controller's device tree node.
- * @size: Number of interrupts in the domain.
- * @ops: map/unmap domain callbacks
- * @host_data: Controller private data pointer
- */
-static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
- unsigned int size,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
-}
-
#ifdef CONFIG_IRQ_DOMAIN_NOMAP
-static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
- unsigned int max_irq,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data);
+static inline struct irq_domain *irq_domain_create_nomap(struct fwnode_handle *fwnode,
+ unsigned int max_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ const struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .hwirq_max = max_irq,
+ .direct_max = max_irq,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d = irq_domain_instantiate(&info);
+
+ return IS_ERR(d) ? NULL : d;
}
-extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
+unsigned int irq_create_direct_mapping(struct irq_domain *domain);
#endif
-static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return __irq_domain_add(of_node_to_fwnode(of_node), 0, ~0, 0, ops, host_data);
-}
-
+/**
+ * irq_domain_create_linear - Allocate and register a linear revmap irq_domain.
+ * @fwnode: pointer to interrupt controller's FW node.
+ * @size: Number of interrupts in the domain.
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Returns: Newly created irq_domain
+ */
static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode,
- unsigned int size,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return __irq_domain_add(fwnode, size, size, 0, ops, host_data);
+ unsigned int size,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ const struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .size = size,
+ .hwirq_max = size,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d = irq_domain_instantiate(&info);
+
+ return IS_ERR(d) ? NULL : d;
}
static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return __irq_domain_add(fwnode, 0, ~0, 0, ops, host_data);
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ const struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .hwirq_max = ~0,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d = irq_domain_instantiate(&info);
+
+ return IS_ERR(d) ? NULL : d;
}
-extern void irq_domain_remove(struct irq_domain *host);
+void irq_domain_remove(struct irq_domain *domain);
-extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq);
-extern void irq_domain_associate_many(struct irq_domain *domain,
- unsigned int irq_base,
- irq_hw_number_t hwirq_base, int count);
+int irq_domain_associate(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq);
+void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
+ irq_hw_number_t hwirq_base, int count);
-extern unsigned int irq_create_mapping_affinity(struct irq_domain *host,
- irq_hw_number_t hwirq,
- const struct irq_affinity_desc *affinity);
-extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
-extern void irq_dispose_mapping(unsigned int virq);
+unsigned int irq_create_mapping_affinity(struct irq_domain *domain, irq_hw_number_t hwirq,
+ const struct irq_affinity_desc *affinity);
+unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
+void irq_dispose_mapping(unsigned int virq);
-static inline unsigned int irq_create_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq)
+/**
+ * irq_create_mapping - Map a hardware interrupt into linux irq space
+ * @domain: domain owning this hardware interrupt or NULL for default domain
+ * @hwirq: hardware irq number in that domain space
+ *
+ * Only one mapping per hardware interrupt is permitted.
+ *
+ * If the sense/trigger is to be specified, set_irq_type() should be called
+ * on the number returned from that call.
+ *
+ * Returns: Linux irq number or 0 on error
+ */
+static inline unsigned int irq_create_mapping(struct irq_domain *domain, irq_hw_number_t hwirq)
{
- return irq_create_mapping_affinity(host, hwirq, NULL);
+ return irq_create_mapping_affinity(domain, hwirq, NULL);
}
-extern struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain,
- irq_hw_number_t hwirq,
- unsigned int *irq);
+struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq,
+ unsigned int *irq);
+/**
+ * irq_resolve_mapping - Find a linux irq from a hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * Returns: Interrupt descriptor
+ */
static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
@@ -419,8 +497,10 @@ static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain,
/**
* irq_find_mapping() - Find a linux irq from a hw irq number.
- * @domain: domain owning this hardware interrupt
- * @hwirq: hardware irq number in that domain space
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * Returns: Linux irq number or 0 if not found
*/
static inline unsigned int irq_find_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
@@ -433,108 +513,117 @@ static inline unsigned int irq_find_mapping(struct irq_domain *domain,
return 0;
}
-static inline unsigned int irq_linear_revmap(struct irq_domain *domain,
- irq_hw_number_t hwirq)
-{
- return irq_find_mapping(domain, hwirq);
-}
-
extern const struct irq_domain_ops irq_domain_simple_ops;
/* stock xlate functions */
int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
-
-int irq_domain_translate_twocell(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- unsigned long *out_hwirq,
- unsigned int *out_type);
-
-int irq_domain_translate_onecell(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- unsigned long *out_hwirq,
- unsigned int *out_type);
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+int irq_domain_xlate_twothreecell(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+
+int irq_domain_translate_onecell(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
+int irq_domain_translate_twocell(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
+int irq_domain_translate_twothreecell(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
/* IPI functions */
int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest);
int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
/* V2 interfaces to support hierarchy IRQ domains. */
-extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
- unsigned int virq);
-extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hwirq,
- const struct irq_chip *chip,
- void *chip_data, irq_flow_handler_t handler,
- void *handler_data, const char *handler_name);
-extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
+struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, unsigned int virq);
+void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq,
+ const struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler,
+ void *handler_data, const char *handler_name);
+void irq_domain_reset_irq_data(struct irq_data *irq_data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
-extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
- unsigned int flags, unsigned int size,
- struct fwnode_handle *fwnode,
- const struct irq_domain_ops *ops, void *host_data);
-
-static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
- unsigned int flags,
- unsigned int size,
- struct device_node *node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return irq_domain_create_hierarchy(parent, flags, size,
- of_node_to_fwnode(node),
- ops, host_data);
-}
-
-extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
- unsigned int nr_irqs, int node, void *arg,
- bool realloc,
- const struct irq_affinity_desc *affinity);
-extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
-extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
-extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
-
-static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
- unsigned int nr_irqs, int node, void *arg)
-{
- return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false,
- NULL);
-}
-
-extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
- unsigned int irq_base,
- unsigned int nr_irqs, void *arg);
-extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
- unsigned int virq,
- irq_hw_number_t hwirq,
- const struct irq_chip *chip,
- void *chip_data);
-extern void irq_domain_free_irqs_common(struct irq_domain *domain,
- unsigned int virq,
- unsigned int nr_irqs);
-extern void irq_domain_free_irqs_top(struct irq_domain *domain,
- unsigned int virq, unsigned int nr_irqs);
-
-extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
-extern int irq_domain_pop_irq(struct irq_domain *domain, int virq);
-
-extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
- unsigned int irq_base,
- unsigned int nr_irqs, void *arg);
-
-extern void irq_domain_free_irqs_parent(struct irq_domain *domain,
- unsigned int irq_base,
- unsigned int nr_irqs);
-
-extern int irq_domain_disconnect_hierarchy(struct irq_domain *domain,
- unsigned int virq);
+/**
+ * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
+ * @parent: Parent irq domain to associate with the new domain
+ * @flags: Irq domain flags associated to the domain
+ * @size: Size of the domain. See below
+ * @fwnode: Optional fwnode of the interrupt controller
+ * @ops: Pointer to the interrupt domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * If @size is 0 a tree domain is created, otherwise a linear domain.
+ *
+ * If successful the parent is associated to the new domain and the
+ * domain flags are set.
+ *
+ * Returns: A pointer to IRQ domain, or %NULL on failure.
+ */
+static inline struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
+ unsigned int flags, unsigned int size,
+ struct fwnode_handle *fwnode,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ const struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .size = size,
+ .hwirq_max = size ? : ~0U,
+ .ops = ops,
+ .host_data = host_data,
+ .domain_flags = flags,
+ .parent = parent,
+ };
+ struct irq_domain *d = irq_domain_instantiate(&info);
+
+ return IS_ERR(d) ? NULL : d;
+}
+
+int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs,
+ int node, void *arg, bool realloc,
+ const struct irq_affinity_desc *affinity);
+void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
+int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
+void irq_domain_deactivate_irq(struct irq_data *irq_data);
+
+/**
+ * irq_domain_alloc_irqs - Allocate IRQs from domain
+ * @domain: domain to allocate from
+ * @nr_irqs: number of IRQs to allocate
+ * @node: NUMA node id for memory allocation
+ * @arg: domain specific argument
+ *
+ * See __irq_domain_alloc_irqs()' documentation.
+ */
+static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs,
+ int node, void *arg)
+{
+ return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false, NULL);
+}
+
+int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq, const struct irq_chip *chip,
+ void *chip_data);
+void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs);
+void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs);
+
+int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
+int irq_domain_pop_irq(struct irq_domain *domain, int virq);
+
+int irq_domain_alloc_irqs_parent(struct irq_domain *domain, unsigned int irq_base,
+ unsigned int nr_irqs, void *arg);
+
+void irq_domain_free_irqs_parent(struct irq_domain *domain, unsigned int irq_base,
+ unsigned int nr_irqs);
+
+int irq_domain_disconnect_hierarchy(struct irq_domain *domain, unsigned int virq);
+
+int irq_populate_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info);
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
@@ -543,8 +632,7 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
static inline bool irq_domain_is_ipi(struct irq_domain *domain)
{
- return domain->flags &
- (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE);
+ return domain->flags & (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE);
}
static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain)
@@ -572,15 +660,18 @@ static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
return domain->flags & IRQ_DOMAIN_FLAG_MSI_DEVICE;
}
+static inline bool irq_domain_is_msi_immutable(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI_IMMUTABLE;
+}
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
-static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
- unsigned int nr_irqs, int node, void *arg)
+static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs,
+ int node, void *arg)
{
return -1;
}
-static inline void irq_domain_free_irqs(unsigned int virq,
- unsigned int nr_irqs) { }
+static inline void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) { }
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
@@ -617,11 +708,14 @@ static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
return false;
}
+static inline int irq_populate_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
#ifdef CONFIG_GENERIC_MSI_IRQ
-int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq,
- unsigned int type);
+int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq, unsigned int type);
void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq);
#else
static inline int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq,
@@ -636,10 +730,44 @@ static inline void msi_device_domain_free_wired(struct irq_domain *domain, unsig
}
#endif
+static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(of_node),
+ .hwirq_max = ~0U,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d;
+
+ d = irq_domain_instantiate(&info);
+ return IS_ERR(d) ? NULL : d;
+}
+
+static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
+ unsigned int size,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(of_node),
+ .size = size,
+ .hwirq_max = size,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d;
+
+ d = irq_domain_instantiate(&info);
+ return IS_ERR(d) ? NULL : d;
+}
+
#else /* CONFIG_IRQ_DOMAIN */
static inline void irq_dispose_mapping(unsigned int virq) { }
-static inline struct irq_domain *irq_find_matching_fwnode(
- struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token)
+static inline struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+ enum irq_domain_bus_token bus_token)
{
return NULL;
}
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 3f003d5fde53..57b074e0cfbb 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -18,6 +18,8 @@
#include <asm/irqflags.h>
#include <asm/percpu.h>
+struct task_struct;
+
/* Currently lockdep_softirqs_on/off is used only by lockdep */
#ifdef CONFIG_PROVE_LOCKING
extern void lockdep_softirqs_on(unsigned long ip);
@@ -25,12 +27,16 @@
extern void lockdep_hardirqs_on_prepare(void);
extern void lockdep_hardirqs_on(unsigned long ip);
extern void lockdep_hardirqs_off(unsigned long ip);
+ extern void lockdep_cleanup_dead_cpu(unsigned int cpu,
+ struct task_struct *idle);
#else
static inline void lockdep_softirqs_on(unsigned long ip) { }
static inline void lockdep_softirqs_off(unsigned long ip) { }
static inline void lockdep_hardirqs_on_prepare(void) { }
static inline void lockdep_hardirqs_on(unsigned long ip) { }
static inline void lockdep_hardirqs_off(unsigned long ip) { }
+ static inline void lockdep_cleanup_dead_cpu(unsigned int cpu,
+ struct task_struct *idle) {}
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 3496baa0b07f..e97206c721a0 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -5,30 +5,36 @@
#include <uapi/linux/irqnr.h>
-extern int nr_irqs;
+unsigned int irq_get_nr_irqs(void) __pure;
+unsigned int irq_set_nr_irqs(unsigned int nr);
extern struct irq_desc *irq_to_desc(unsigned int irq);
unsigned int irq_get_next_irq(unsigned int offset);
-# define for_each_irq_desc(irq, desc) \
- for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \
- irq++, desc = irq_to_desc(irq)) \
- if (!desc) \
- ; \
- else
-
+#define for_each_irq_desc(irq, desc) \
+ for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \
+ __nr_irqs__ = 0) \
+ for (irq = 0, desc = irq_to_desc(irq); irq < __nr_irqs__; \
+ irq++, desc = irq_to_desc(irq)) \
+ if (!desc) \
+ ; \
+ else
# define for_each_irq_desc_reverse(irq, desc) \
- for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \
- irq--, desc = irq_to_desc(irq)) \
+ for (irq = irq_get_nr_irqs() - 1, desc = irq_to_desc(irq); \
+ irq >= 0; irq--, desc = irq_to_desc(irq)) \
if (!desc) \
; \
else
-# define for_each_active_irq(irq) \
- for (irq = irq_get_next_irq(0); irq < nr_irqs; \
- irq = irq_get_next_irq(irq + 1))
+#define for_each_active_irq(irq) \
+ for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \
+ __nr_irqs__ = 0) \
+ for (irq = irq_get_next_irq(0); irq < __nr_irqs__; \
+ irq = irq_get_next_irq(irq + 1))
-#define for_each_irq_nr(irq) \
- for (irq = 0; irq < nr_irqs; irq++)
+#define for_each_irq_nr(irq) \
+ for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \
+ __nr_irqs__ = 0) \
+ for (irq = 0; irq < __nr_irqs__; irq++)
#endif
diff --git a/include/linux/ism.h b/include/linux/ism.h
index 5428edd90982..b7feb4dcd5a8 100644
--- a/include/linux/ism.h
+++ b/include/linux/ism.h
@@ -11,37 +11,25 @@
#include <linux/workqueue.h>
-struct ism_dmb {
- u64 dmb_tok;
- u64 rgid;
- u32 dmb_len;
- u32 sba_idx;
- u32 vlan_valid;
- u32 vlan_id;
- void *cpu_addr;
- dma_addr_t dma_addr;
-};
-
/* Unless we gain unexpected popularity, this limit should hold for a while */
#define MAX_CLIENTS 8
#define ISM_NR_DMBS 1920
struct ism_dev {
spinlock_t lock; /* protects the ism device */
+ spinlock_t cmd_lock; /* serializes cmds */
struct list_head list;
+ struct dibs_dev *dibs;
struct pci_dev *pdev;
struct ism_sba *sba;
dma_addr_t sba_dma_addr;
DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
- u8 *sba_client_arr; /* entries are indices into 'clients' array */
void *priv[MAX_CLIENTS];
struct ism_eq *ieq;
dma_addr_t ieq_dma_addr;
- struct device dev;
- u64 local_gid;
int ieq_idx;
struct ism_client *subs[MAX_CLIENTS];
@@ -57,14 +45,7 @@ struct ism_event {
struct ism_client {
const char *name;
- void (*add)(struct ism_dev *dev);
- void (*remove)(struct ism_dev *dev);
void (*handle_event)(struct ism_dev *dev, struct ism_event *event);
- /* Parameter dmbemask contains a bit vector with updated DMBEs, if sent
- * via ism_move_data(). Callback function must handle all active bits
- * indicated by dmbemask.
- */
- void (*handle_irq)(struct ism_dev *dev, unsigned int bit, u16 dmbemask);
/* Private area - don't touch! */
u8 id;
};
@@ -81,12 +62,6 @@ static inline void ism_set_priv(struct ism_dev *dev, struct ism_client *client,
dev->priv[client->id] = priv;
}
-int ism_register_dmb(struct ism_dev *dev, struct ism_dmb *dmb,
- struct ism_client *client);
-int ism_unregister_dmb(struct ism_dev *dev, struct ism_dmb *dmb);
-int ism_move(struct ism_dev *dev, u64 dmb_tok, unsigned int idx, bool sf,
- unsigned int offset, void *data, unsigned int size);
-
const struct smcd_ops *ism_get_smcd_ops(void);
#endif /* _ISM_H */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index ab04c1c27fae..f5eaf76198f3 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -28,7 +28,7 @@
#include <linux/slab.h>
#include <linux/bit_spinlock.h>
#include <linux/blkdev.h>
-#include <crypto/hash.h>
+#include <linux/crc32c.h>
#endif
#define journal_oom_retry 1
@@ -459,7 +459,6 @@ struct jbd2_revoke_table_s;
* @h_ref: Reference count on this handle.
* @h_err: Field for caller's use to track errors through large fs operations.
* @h_sync: Flag for sync-on-close.
- * @h_jdata: Flag to force data journaling.
* @h_reserved: Flag for handle for reserved credits.
* @h_aborted: Flag indicating fatal error on handle.
* @h_type: For handle statistics.
@@ -491,7 +490,6 @@ struct jbd2_journal_handle
/* Flags [no locking] */
unsigned int h_sync: 1;
- unsigned int h_jdata: 1;
unsigned int h_reserved: 1;
unsigned int h_aborted: 1;
unsigned int h_type: 8;
@@ -700,12 +698,6 @@ struct transaction_s
/* Disk flush needs to be sent to fs partition [no locking] */
int t_need_data_flush;
-
- /*
- * For use by the filesystem to store fs-specific data
- * structures associated with the transaction
- */
- struct list_head t_private_list;
};
struct transaction_run_stats_s {
@@ -1086,6 +1078,13 @@ struct journal_s
int j_revoke_records_per_block;
/**
+ * @j_transaction_overhead_buffers:
+ *
+ * Number of blocks each transaction needs for its own bookkeeping
+ */
+ int j_transaction_overhead_buffers;
+
+ /**
* @j_commit_interval:
*
* What is the maximum transaction lifetime before we begin a commit?
@@ -1235,13 +1234,6 @@ struct journal_s
void *j_private;
/**
- * @j_chksum_driver:
- *
- * Reference to checksum algorithm driver via cryptoapi.
- */
- struct crypto_shash *j_chksum_driver;
-
- /**
* @j_csum_seed:
*
* Precomputed journal UUID checksum for seeding other checksums.
@@ -1261,6 +1253,12 @@ struct journal_s
*/
struct lockdep_map j_trans_commit_map;
#endif
+ /**
+ * @jbd2_trans_commit_key:
+ *
+ * "struct lock_class_key" for @j_trans_commit_map
+ */
+ struct lock_class_key jbd2_trans_commit_key;
/**
* @j_fc_cleanup_callback:
@@ -1388,9 +1386,6 @@ JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT)
#define JBD2_FLUSHED 0x008 /* The journal superblock has been flushed */
#define JBD2_LOADED 0x010 /* The journal superblock has been loaded */
#define JBD2_BARRIER 0x020 /* Use IDE barriers */
-#define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
- * data write error in ordered
- * mode */
#define JBD2_CYCLE_RECORD 0x080 /* Journal cycled record log on
* clean and empty filesystem
* logging area */
@@ -1407,7 +1402,6 @@ JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT)
*/
/* Filing buffers */
-extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
extern bool __jbd2_journal_refile_buffer(struct journal_head *);
extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
@@ -1588,6 +1582,11 @@ void jbd2_journal_put_journal_head(struct journal_head *jh);
*/
extern struct kmem_cache *jbd2_handle_cache;
+/*
+ * This specialized allocator has to be a macro for its allocations to be
+ * accounted separately (to have a separate alloc_tag). The typecast is
+ * intentional to enforce typesafety.
+ */
#define jbd2_alloc_handle(_gfp_flags) \
((handle_t *)kmem_cache_zalloc(jbd2_handle_cache, _gfp_flags))
@@ -1602,6 +1601,11 @@ static inline void jbd2_free_handle(handle_t *handle)
*/
extern struct kmem_cache *jbd2_inode_cache;
+/*
+ * This specialized allocator has to be a macro for its allocations to be
+ * accounted separately (to have a separate alloc_tag). The typecast is
+ * intentional to enforce typesafety.
+ */
#define jbd2_alloc_inode(_gfp_flags) \
((struct jbd2_inode *)kmem_cache_alloc(jbd2_inode_cache, _gfp_flags))
@@ -1617,10 +1621,12 @@ extern void jbd2_journal_destroy_revoke_record_cache(void);
extern void jbd2_journal_destroy_revoke_table_cache(void);
extern int __init jbd2_journal_init_revoke_record_cache(void);
extern int __init jbd2_journal_init_revoke_table_cache(void);
+struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size);
+void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table);
extern void jbd2_journal_destroy_revoke(journal_t *);
extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
-extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
+extern void jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
extern void jbd2_journal_write_revoke_records(transaction_t *transaction,
struct list_head *log_bufs);
@@ -1658,12 +1664,7 @@ int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out);
int jbd2_submit_inode_data(journal_t *journal, struct jbd2_inode *jinode);
int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode);
int jbd2_fc_wait_bufs(journal_t *journal, int num_blks);
-int jbd2_fc_release_bufs(journal_t *journal);
-
-static inline int jbd2_journal_get_max_txn_bufs(journal_t *journal)
-{
- return (journal->j_total_len - journal->j_fc_wbufsize) / 4;
-}
+void jbd2_fc_release_bufs(journal_t *journal);
/*
* is_journal_abort
@@ -1728,20 +1729,13 @@ static inline int tid_geq(tid_t x, tid_t y)
return (difference >= 0);
}
-extern int jbd2_journal_blocks_per_page(struct inode *inode);
+extern int jbd2_journal_blocks_per_folio(struct inode *inode);
extern size_t journal_tag_bytes(journal_t *journal);
-static inline bool jbd2_journal_has_csum_v2or3_feature(journal_t *j)
-{
- return jbd2_has_feature_csum2(j) || jbd2_has_feature_csum3(j);
-}
-
static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
{
- WARN_ON_ONCE(jbd2_journal_has_csum_v2or3_feature(journal) &&
- journal->j_chksum_driver == NULL);
-
- return journal->j_chksum_driver != NULL;
+ return jbd2_has_feature_csum2(journal) ||
+ jbd2_has_feature_csum3(journal);
}
static inline int jbd2_journal_get_num_fc_blks(journal_superblock_t *jsb)
@@ -1778,28 +1772,9 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal)
#define BJ_Reserved 4 /* Buffer is reserved for access by journal */
#define BJ_Types 5
-/* JBD uses a CRC32 checksum */
-#define JBD_MAX_CHECKSUM_SIZE 4
-
-static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
- const void *address, unsigned int length)
+static inline u32 jbd2_chksum(u32 crc, const void *address, unsigned int length)
{
- struct {
- struct shash_desc shash;
- char ctx[JBD_MAX_CHECKSUM_SIZE];
- } desc;
- int err;
-
- BUG_ON(crypto_shash_descsize(journal->j_chksum_driver) >
- JBD_MAX_CHECKSUM_SIZE);
-
- desc.shash.tfm = journal->j_chksum_driver;
- *(u32 *)desc.ctx = crc;
-
- err = crypto_shash_update(&desc.shash, address, length);
- BUG_ON(err);
-
- return *(u32 *)desc.ctx;
+ return crc32c(crc, address, length);
}
/* Return most recent uncommitted transaction */
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index ab7f8c152b89..7c1c1821c694 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -24,14 +24,14 @@
* Jozsef
*/
#include <linux/bitops.h>
-#include <linux/unaligned/packed_struct.h>
+#include <linux/unaligned.h>
/* Best hash sizes are of power of two */
#define jhash_size(n) ((u32)1<<(n))
/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */
#define jhash_mask(n) (jhash_size(n)-1)
-/* __jhash_mix -- mix 3 32-bit values reversibly. */
+/* __jhash_mix - mix 3 32-bit values reversibly. */
#define __jhash_mix(a, b, c) \
{ \
a -= c; a ^= rol32(c, 4); c += b; \
@@ -60,7 +60,7 @@
/* jhash - hash an arbitrary key
* @k: sequence of bytes as key
* @length: the length of the key
- * @initval: the previous hash, or an arbitray value
+ * @initval: the previous hash, or an arbitrary value
*
* The generic version, hashes an arbitrary sequence of bytes.
* No alignment or length assumptions are made about the input key.
@@ -77,9 +77,9 @@ static inline u32 jhash(const void *key, u32 length, u32 initval)
/* All but the last block: affect some 32 bits of (a,b,c) */
while (length > 12) {
- a += __get_unaligned_cpu32(k);
- b += __get_unaligned_cpu32(k + 4);
- c += __get_unaligned_cpu32(k + 8);
+ a += get_unaligned((u32 *)k);
+ b += get_unaligned((u32 *)(k + 4));
+ c += get_unaligned((u32 *)(k + 8));
__jhash_mix(a, b, c);
length -= 12;
k += 12;
@@ -110,7 +110,7 @@ static inline u32 jhash(const void *key, u32 length, u32 initval)
/* jhash2 - hash an array of u32's
* @k: the key which must be an array of u32's
* @length: the number of u32's in the key
- * @initval: the previous hash, or an arbitray value
+ * @initval: the previous hash, or an arbitrary value
*
* Returns the hash value of the key.
*/
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index d9f1435a5a13..fdef2c155c27 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -59,9 +59,9 @@
/* LATCH is used in the interval timer and ftape setup. */
#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
-extern int register_refined_jiffies(long clock_tick_rate);
+extern void register_refined_jiffies(long clock_tick_rate);
-/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */
+/* TICK_USEC is the time between ticks in usec */
#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ)
/* USER_TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
@@ -418,7 +418,7 @@ extern unsigned long preset_lpj;
#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
TICK_NSEC -1) / (u64)TICK_NSEC))
/*
- * The maximum jiffie value is (MAX_INT >> 1). Here we translate that
+ * The maximum jiffy value is (MAX_INT >> 1). Here we translate that
* into seconds. The 64-bit case will overflow if we are not careful,
* so use the messy SH_DIV macro to do it. Still all constants.
*/
@@ -502,7 +502,7 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m)
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor and
* handling any 32-bit overflows.
- * for the details see __msecs_to_jiffies()
+ * for the details see _msecs_to_jiffies()
*
* msecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
@@ -526,6 +526,19 @@ static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
}
}
+/**
+ * secs_to_jiffies: - convert seconds to jiffies
+ * @_secs: time in seconds
+ *
+ * Conversion is done by simple multiplication with HZ
+ *
+ * secs_to_jiffies() is defined as a macro rather than a static inline
+ * function so it can be used in static initializers.
+ *
+ * Return: jiffies value
+ */
+#define secs_to_jiffies(_secs) (unsigned long)((_secs) * HZ)
+
extern unsigned long __usecs_to_jiffies(const unsigned int u);
#if !(USEC_PER_SEC % HZ)
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
@@ -598,4 +611,16 @@ extern unsigned long nsecs_to_jiffies(u64 n);
#define TIMESTAMP_SIZE 30
+struct ctl_table;
+int proc_dointvec_jiffies(const struct ctl_table *table, int dir, void *buffer,
+ size_t *lenp, loff_t *ppos);
+int proc_dointvec_ms_jiffies_minmax(const struct ctl_table *table, int dir,
+ void *buffer, size_t *lenp, loff_t *ppos);
+int proc_dointvec_userhz_jiffies(const struct ctl_table *table, int dir,
+ void *buffer, size_t *lenp, loff_t *ppos);
+int proc_dointvec_ms_jiffies(const struct ctl_table *table, int dir, void *buffer,
+ size_t *lenp, loff_t *ppos);
+int proc_doulongvec_ms_jiffies_minmax(const struct ctl_table *table, int dir,
+ void *buffer, size_t *lenp, loff_t *ppos);
+
#endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index f5a2727ca4a9..fdb79dd1ebd8 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -75,6 +75,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/cleanup.h>
extern bool static_key_initialized;
@@ -347,6 +348,8 @@ static inline void static_key_disable(struct static_key *key)
#endif /* CONFIG_JUMP_LABEL */
+DEFINE_LOCK_GUARD_0(jump_label_lock, jump_label_lock(), jump_label_unlock())
+
#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
#define jump_label_enabled static_key_enabled
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index c3f075e8f60c..d5dd54c53ace 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -55,9 +55,8 @@ static inline void *dereference_symbol_descriptor(void *ptr)
if (is_ksym_addr((unsigned long)ptr))
return ptr;
- preempt_disable();
+ guard(rcu)();
mod = __module_address((unsigned long)ptr);
- preempt_enable();
if (mod)
ptr = dereference_module_function_descriptor(mod, ptr);
diff --git a/include/linux/kasan-enabled.h b/include/linux/kasan-enabled.h
index 6f612d69ea0c..9eca967d8526 100644
--- a/include/linux/kasan-enabled.h
+++ b/include/linux/kasan-enabled.h
@@ -4,32 +4,46 @@
#include <linux/static_key.h>
-#ifdef CONFIG_KASAN_HW_TAGS
-
+#if defined(CONFIG_ARCH_DEFER_KASAN) || defined(CONFIG_KASAN_HW_TAGS)
+/*
+ * Global runtime flag for KASAN modes that need runtime control.
+ * Used by ARCH_DEFER_KASAN architectures and HW_TAGS mode.
+ */
DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
+/*
+ * Runtime control for shadow memory initialization or HW_TAGS mode.
+ * Uses static key for architectures that need deferred KASAN or HW_TAGS.
+ */
static __always_inline bool kasan_enabled(void)
{
return static_branch_likely(&kasan_flag_enabled);
}
-static inline bool kasan_hw_tags_enabled(void)
+static inline void kasan_enable(void)
{
- return kasan_enabled();
+ static_branch_enable(&kasan_flag_enabled);
}
-
-#else /* CONFIG_KASAN_HW_TAGS */
-
-static inline bool kasan_enabled(void)
+#else
+/* For architectures that can enable KASAN early, use compile-time check. */
+static __always_inline bool kasan_enabled(void)
{
return IS_ENABLED(CONFIG_KASAN);
}
+static inline void kasan_enable(void) {}
+#endif /* CONFIG_ARCH_DEFER_KASAN || CONFIG_KASAN_HW_TAGS */
+
+#ifdef CONFIG_KASAN_HW_TAGS
+static inline bool kasan_hw_tags_enabled(void)
+{
+ return kasan_enabled();
+}
+#else
static inline bool kasan_hw_tags_enabled(void)
{
return false;
}
-
#endif /* CONFIG_KASAN_HW_TAGS */
#endif /* LINUX_KASAN_ENABLED_H */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 70d6a8f6e25d..f335c1d7b61d 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -29,6 +29,9 @@ typedef unsigned int __bitwise kasan_vmalloc_flags_t;
#define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
#define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
+#define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
+#define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */
+
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#include <linux/pgtable.h>
@@ -150,7 +153,7 @@ static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
/**
- * kasan_unpoison_new_object - Repoison a new slab object.
+ * kasan_poison_new_object - Repoison a new slab object.
* @cache: Cache the object belong to.
* @object: Pointer to the object.
*
@@ -175,13 +178,61 @@ static __always_inline void * __must_check kasan_init_slab_obj(
return (void *)object;
}
-bool __kasan_slab_free(struct kmem_cache *s, void *object,
- unsigned long ip, bool init);
+bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
+ unsigned long ip);
+/**
+ * kasan_slab_pre_free - Check whether freeing a slab object is safe.
+ * @object: Object to be freed.
+ *
+ * This function checks whether freeing the given object is safe. It may
+ * check for double-free and invalid-free bugs and report them.
+ *
+ * This function is intended only for use by the slab allocator.
+ *
+ * @Return true if freeing the object is unsafe; false otherwise.
+ */
+static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
+ void *object)
+{
+ if (kasan_enabled())
+ return __kasan_slab_pre_free(s, object, _RET_IP_);
+ return false;
+}
+
+bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
+ bool still_accessible, bool no_quarantine);
+/**
+ * kasan_slab_free - Poison, initialize, and quarantine a slab object.
+ * @object: Object to be freed.
+ * @init: Whether to initialize the object.
+ * @still_accessible: Whether the object contents are still accessible.
+ *
+ * This function informs that a slab object has been freed and is not
+ * supposed to be accessed anymore, except when @still_accessible is set
+ * (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU
+ * grace period might not have passed yet).
+ *
+ * For KASAN modes that have integrated memory initialization
+ * (kasan_has_integrated_init() == true), this function also initializes
+ * the object's memory. For other modes, the @init argument is ignored.
+ *
+ * This function might also take ownership of the object to quarantine it.
+ * When this happens, KASAN will defer freeing the object to a later
+ * stage and handle it internally until then. The return value indicates
+ * whether KASAN took ownership of the object.
+ *
+ * This function is intended only for use by the slab allocator.
+ *
+ * @Return true if KASAN took ownership of the object; false otherwise.
+ */
static __always_inline bool kasan_slab_free(struct kmem_cache *s,
- void *object, bool init)
+ void *object, bool init,
+ bool still_accessible,
+ bool no_quarantine)
{
if (kasan_enabled())
- return __kasan_slab_free(s, object, _RET_IP_, init);
+ return __kasan_slab_free(s, object, init, still_accessible,
+ no_quarantine);
return false;
}
@@ -371,7 +422,15 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
{
return (void *)object;
}
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
+
+static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
+{
+ return false;
+}
+
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
+ bool init, bool still_accessible,
+ bool no_quarantine)
{
return false;
}
@@ -435,7 +494,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_record_aux_stack(void *ptr);
-void kasan_record_aux_stack_noalloc(void *ptr);
#else /* CONFIG_KASAN_GENERIC */
@@ -453,7 +511,6 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_record_aux_stack(void *ptr) {}
-static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
#endif /* CONFIG_KASAN_GENERIC */
@@ -489,6 +546,12 @@ void kasan_report_async(void);
#endif /* CONFIG_KASAN_HW_TAGS */
+#ifdef CONFIG_KASAN_GENERIC
+void __init kasan_init_generic(void);
+#else
+static inline void kasan_init_generic(void) { }
+#endif
+
#ifdef CONFIG_KASAN_SW_TAGS
void __init kasan_init_sw_tags(void);
#else
@@ -508,10 +571,27 @@ static inline void kasan_init_hw_tags(void) { }
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
-void kasan_release_vmalloc(unsigned long start, unsigned long end,
+int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
+static inline int kasan_populate_vmalloc(unsigned long addr,
+ unsigned long size, gfp_t gfp_mask)
+{
+ if (kasan_enabled())
+ return __kasan_populate_vmalloc(addr, size, gfp_mask);
+ return 0;
+}
+void __kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
- unsigned long free_region_end);
+ unsigned long free_region_end,
+ unsigned long flags);
+static inline void kasan_release_vmalloc(unsigned long start, unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end,
+ unsigned long flags)
+{
+ if (kasan_enabled())
+ return __kasan_release_vmalloc(start, end, free_region_start,
+ free_region_end, flags);
+}
#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
@@ -519,14 +599,15 @@ static inline void kasan_populate_early_vm_area_shadow(void *start,
unsigned long size)
{ }
static inline int kasan_populate_vmalloc(unsigned long start,
- unsigned long size)
+ unsigned long size, gfp_t gfp_mask)
{
return 0;
}
static inline void kasan_release_vmalloc(unsigned long start,
unsigned long end,
unsigned long free_region_start,
- unsigned long free_region_end) { }
+ unsigned long free_region_end,
+ unsigned long flags) { }
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
@@ -554,14 +635,15 @@ static __always_inline void kasan_poison_vmalloc(const void *start,
static inline void kasan_populate_early_vm_area_shadow(void *start,
unsigned long size) { }
static inline int kasan_populate_vmalloc(unsigned long start,
- unsigned long size)
+ unsigned long size, gfp_t gfp_mask)
{
return 0;
}
static inline void kasan_release_vmalloc(unsigned long start,
unsigned long end,
unsigned long free_region_start,
- unsigned long free_region_end) { }
+ unsigned long free_region_end,
+ unsigned long flags) { }
static inline void *kasan_unpoison_vmalloc(const void *start,
unsigned long size,
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index 86c0f1d18998..9a2fa013c91d 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -20,19 +20,6 @@ struct kcore_list {
int type;
};
-struct vmcore {
- struct list_head list;
- unsigned long long paddr;
- unsigned long long size;
- loff_t offset;
-};
-
-struct vmcoredd_node {
- struct list_head list; /* List of dumps */
- void *buf; /* Buffer containing device's dump */
- unsigned int size; /* Size of the buffer */
-};
-
#ifdef CONFIG_PROC_KCORE
void __init kclist_add(struct kcore_list *, void *, size_t, int type);
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index b851ba415e03..0143358874b0 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -21,6 +21,8 @@ enum kcov_mode {
KCOV_MODE_TRACE_PC = 2,
/* Collecting comparison operands mode. */
KCOV_MODE_TRACE_CMP = 3,
+ /* The process owns a KCOV remote reference. */
+ KCOV_MODE_REMOTE = 4,
};
#define KCOV_IN_CTXSW (1 << 30)
@@ -62,13 +64,13 @@ static inline void kcov_remote_start_usb(u64 id)
static inline void kcov_remote_start_usb_softirq(u64 id)
{
- if (in_serving_softirq())
+ if (in_serving_softirq() && !in_hardirq())
kcov_remote_start_usb(id);
}
static inline void kcov_remote_stop_softirq(void)
{
- if (in_serving_softirq())
+ if (in_serving_softirq() && !in_hardirq())
kcov_remote_stop();
}
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index f6c2ddb16b95..741c58e86431 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -14,6 +14,7 @@
*/
#include <linux/list.h>
+#include <linux/smp.h>
/* Shifted versions of the command enable bits are be used if the command
* has no arguments (see kdb_check_flags). This allows commands, such as
@@ -104,7 +105,7 @@ extern int kdb_initial_cpu;
#define KDB_NOENVVALUE (-6)
#define KDB_NOTIMP (-7)
#define KDB_ENVFULL (-8)
-#define KDB_ENVBUFFULL (-9)
+#define KDB_KMALLOCFAILED (-9)
#define KDB_TOOMANYBPT (-10)
#define KDB_TOOMANYDBREGS (-11)
#define KDB_DUPBPT (-12)
@@ -140,9 +141,6 @@ extern const char *kdb_diemsg;
extern unsigned int kdb_flags; /* Global flags, see kdb_state for per cpu state */
-extern void kdb_save_flags(void);
-extern void kdb_restore_flags(void);
-
#define KDB_FLAG(flag) (kdb_flags & KDB_FLAG_##flag)
#define KDB_FLAG_SET(flag) ((void)(kdb_flags |= KDB_FLAG_##flag))
#define KDB_FLAG_CLEAR(flag) ((void)(kdb_flags &= ~KDB_FLAG_##flag))
@@ -210,11 +208,26 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
/* Dynamic kdb shell command registration */
extern int kdb_register(kdbtab_t *cmd);
extern void kdb_unregister(kdbtab_t *cmd);
+
+/* Return true when KDB as locked for printing a message on this CPU. */
+static inline
+bool kdb_printf_on_this_cpu(void)
+{
+ /*
+ * We can use raw_smp_processor_id() here because the task could
+ * not get migrated when KDB has locked for printing on this CPU.
+ */
+ return unlikely(READ_ONCE(kdb_printf_cpu) == raw_smp_processor_id());
+}
+
#else /* ! CONFIG_KGDB_KDB */
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
static inline void kdb_init(int level) {}
static inline int kdb_register(kdbtab_t *cmd) { return 0; }
static inline void kdb_unregister(kdbtab_t *cmd) {}
+
+static inline bool kdb_printf_on_this_cpu(void) { return false; }
+
#endif /* CONFIG_KGDB_KDB */
enum {
KDB_NOT_INITIALIZED,
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index 859f4b0c1b2b..196778a087c4 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -10,12 +10,11 @@
*/
#define KPF_RESERVED 32
#define KPF_MLOCKED 33
-#define KPF_MAPPEDTODISK 34
+#define KPF_OWNER_2 34
#define KPF_PRIVATE 35
#define KPF_PRIVATE_2 36
#define KPF_OWNER_PRIVATE 37
#define KPF_ARCH 38
-#define KPF_UNCACHED 39
#define KPF_SOFTDIRTY 40
#define KPF_ARCH_2 41
#define KPF_ARCH_3 42
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index be2e8c0a187e..5b46924fdff5 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -33,6 +33,7 @@
#include <linux/sprintf.h>
#include <linux/static_call_types.h>
#include <linux/instruction_pointer.h>
+#include <linux/util_macros.h>
#include <linux/wordpart.h>
#include <asm/byteorder.h>
@@ -41,19 +42,6 @@
#define STACK_MAGIC 0xdeadbeef
-/* generic data direction definitions */
-#define READ 0
-#define WRITE 1
-
-#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL)
-
-#define u64_to_user_ptr(x) ( \
-{ \
- typecheck(u64, (x)); \
- (void __user *)(uintptr_t)(x); \
-} \
-)
-
struct completion;
struct user;
@@ -176,11 +164,23 @@ extern int root_mountflags;
extern bool early_boot_irqs_disabled;
-/*
- * Values used for system_state. Ordering of the states must not be changed
+/**
+ * enum system_states - Values used for system_state.
+ *
+ * @SYSTEM_BOOTING: %0, no init needed
+ * @SYSTEM_SCHEDULING: system is ready for scheduling; OK to use RCU
+ * @SYSTEM_FREEING_INITMEM: system is freeing all of initmem; almost running
+ * @SYSTEM_RUNNING: system is up and running
+ * @SYSTEM_HALT: system entered clean system halt state
+ * @SYSTEM_POWER_OFF: system entered shutdown/clean power off state
+ * @SYSTEM_RESTART: system entered emergency power off or normal restart
+ * @SYSTEM_SUSPEND: system entered suspend or hibernate state
+ *
+ * Note:
+ * Ordering of the states must not be changed
* as code checks for <, <=, >, >= STATE.
*/
-extern enum system_states {
+enum system_states {
SYSTEM_BOOTING,
SYSTEM_SCHEDULING,
SYSTEM_FREEING_INITMEM,
@@ -189,7 +189,8 @@ extern enum system_states {
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
SYSTEM_SUSPEND,
-} system_state;
+};
+extern enum system_states system_state;
/*
* General tracing related utility functions - trace_printk(),
@@ -385,9 +386,9 @@ ftrace_vprintk(const char *fmt, va_list ap)
static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif /* CONFIG_TRACING */
-/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
-# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
+/* Rebuild everything on CONFIG_DYNAMIC_FTRACE */
+#ifdef CONFIG_DYNAMIC_FTRACE
+# define REBUILD_DUE_TO_DYNAMIC_FTRACE
#endif
/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
diff --git a/include/linux/kernel_read_file.h b/include/linux/kernel_read_file.h
index 90451e2e12bd..d613a7b4dd35 100644
--- a/include/linux/kernel_read_file.h
+++ b/include/linux/kernel_read_file.h
@@ -14,6 +14,7 @@
id(KEXEC_INITRAMFS, kexec-initramfs) \
id(POLICY, security-policy) \
id(X509_CERTIFICATE, x509-certificate) \
+ id(MODULE_COMPRESSED, kernel-module-compressed) \
id(MAX_ID, )
#define __fid_enumify(ENUM, dummy) READING_ ## ENUM,
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 9c042c6384bb..b97ce2df376f 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -5,7 +5,6 @@
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/percpu.h>
-#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/vtime.h>
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 87c79d076d6d..b5a5f32fdfd1 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -147,6 +147,11 @@ enum kernfs_root_flag {
* Support user xattrs to be written to nodes rooted at this root.
*/
KERNFS_ROOT_SUPPORT_USER_XATTR = 0x0008,
+
+ /*
+ * Renames must not change the parent node.
+ */
+ KERNFS_ROOT_INVARIANT_PARENT = 0x0010,
};
/* type-specific structures for kernfs_node union members */
@@ -199,8 +204,8 @@ struct kernfs_node {
* never moved to a different parent, it is safe to access the
* parent directly.
*/
- struct kernfs_node *parent;
- const char *name;
+ struct kernfs_node __rcu *__parent;
+ const char __rcu *name;
struct rb_node rb;
@@ -395,7 +400,7 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
}
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
-int kernfs_path_from_node(struct kernfs_node *root_kn, struct kernfs_node *kn,
+int kernfs_path_from_node(struct kernfs_node *kn_to, struct kernfs_node *kn_from,
char *buf, size_t buflen);
void pr_cont_kernfs_name(struct kernfs_node *kn);
void pr_cont_kernfs_path(struct kernfs_node *kn);
@@ -416,6 +421,7 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
unsigned int flags, void *priv);
void kernfs_destroy_root(struct kernfs_root *root);
+unsigned int kernfs_root_flags(struct kernfs_node *kn);
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
const char *name, umode_t mode,
@@ -514,6 +520,8 @@ kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags,
{ return ERR_PTR(-ENOSYS); }
static inline void kernfs_destroy_root(struct kernfs_root *root) { }
+static inline unsigned int kernfs_root_flags(struct kernfs_node *kn)
+{ return 0; }
static inline struct kernfs_node *
kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index f0e9f8eda7a3..ff7e231b0485 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -25,6 +25,10 @@
extern note_buf_t __percpu *crash_notes;
+#ifdef CONFIG_CRASH_DUMP
+#include <linux/prandom.h>
+#endif
+
#ifdef CONFIG_KEXEC_CORE
#include <linux/list.h>
#include <linux/compat.h>
@@ -68,8 +72,6 @@ extern note_buf_t __percpu *crash_notes;
#define KEXEC_CRASH_MEM_ALIGN PAGE_SIZE
#endif
-#define KEXEC_CORE_NOTE_NAME CRASH_CORE_NOTE_NAME
-
/*
* This structure is used to hold the arguments that are used when loading
* kernel binaries.
@@ -77,6 +79,12 @@ extern note_buf_t __percpu *crash_notes;
typedef unsigned long kimage_entry_t;
+/*
+ * This is a copy of the UAPI struct kexec_segment and must be identical
+ * to it because it gets copied straight from user space into kernel
+ * memory. Do not modify this structure unless you change the way segments
+ * get ingested from user space.
+ */
struct kexec_segment {
/*
* This pointer can point to user memory if kexec_load() system
@@ -170,7 +178,9 @@ int kexec_image_post_load_cleanup_default(struct kimage *image);
* @buf_align: Minimum alignment needed.
* @buf_min: The buffer can't be placed below this address.
* @buf_max: The buffer can't be placed above this address.
+ * @cma: CMA page if the buffer is backed by CMA.
* @top_down: Allocate from top of memory.
+ * @random: Place the buffer at a random position.
*/
struct kexec_buf {
struct kimage *image;
@@ -181,9 +191,35 @@ struct kexec_buf {
unsigned long buf_align;
unsigned long buf_min;
unsigned long buf_max;
+ struct page *cma;
bool top_down;
+#ifdef CONFIG_CRASH_DUMP
+ bool random;
+#endif
};
+
+#ifdef CONFIG_CRASH_DUMP
+static inline void kexec_random_range_start(unsigned long start,
+ unsigned long end,
+ struct kexec_buf *kbuf,
+ unsigned long *temp_start)
+{
+ unsigned short i;
+
+ if (kbuf->random) {
+ get_random_bytes(&i, sizeof(unsigned short));
+ *temp_start = start + (end - start) / USHRT_MAX * i;
+ }
+}
+#else
+static inline void kexec_random_range_start(unsigned long start,
+ unsigned long end,
+ struct kexec_buf *kbuf,
+ unsigned long *temp_start)
+{}
+#endif
+
int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf);
int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
void *buf, unsigned int size,
@@ -205,6 +241,15 @@ static inline int arch_kimage_file_post_load_cleanup(struct kimage *image)
}
#endif
+#ifndef arch_check_excluded_range
+static inline int arch_check_excluded_range(struct kimage *image,
+ unsigned long start,
+ unsigned long end)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_KEXEC_SIG
#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len);
@@ -303,6 +348,7 @@ struct kimage {
unsigned long nr_segments;
struct kexec_segment segment[KEXEC_SEGMENT_MAX];
+ struct page *segment_cma[KEXEC_SEGMENT_MAX];
struct list_head control_pages;
struct list_head dest_pages;
@@ -324,6 +370,7 @@ struct kimage {
*/
unsigned int hotplug_support:1;
#endif
+ unsigned int no_cma:1;
#ifdef ARCH_HAS_KIMAGE_ARCH
struct kimage_arch arch;
@@ -348,6 +395,9 @@ struct kimage {
/* Information for loading purgatory */
struct purgatory_info purgatory_info;
+
+ /* Force carrying over the DTB from the current boot */
+ bool force_dtb;
#endif
#ifdef CONFIG_CRASH_HOTPLUG
@@ -362,12 +412,24 @@ struct kimage {
phys_addr_t ima_buffer_addr;
size_t ima_buffer_size;
+
+ unsigned long ima_segment_index;
+ bool is_ima_segment_index_set;
#endif
+ struct {
+ struct kexec_segment *scratch;
+ phys_addr_t fdt;
+ } kho;
+
/* Core ELF header buffer */
void *elf_headers;
unsigned long elf_headers_sz;
unsigned long elf_load_addr;
+
+ /* dm crypt keys buffer */
+ unsigned long dm_crypt_keys_addr;
+ unsigned long dm_crypt_keys_sz;
};
/* kexec interface functions */
@@ -401,7 +463,8 @@ bool kexec_load_permitted(int kexec_image_type);
/* List of defined/legal kexec file flags */
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
- KEXEC_FILE_NO_INITRAMFS | KEXEC_FILE_DEBUG)
+ KEXEC_FILE_NO_INITRAMFS | KEXEC_FILE_DEBUG | \
+ KEXEC_FILE_NO_CMA | KEXEC_FILE_FORCE_DTB)
/* flag to track if kexec reboot is in progress */
extern bool kexec_in_progress;
@@ -467,13 +530,19 @@ extern bool kexec_file_dbg_print;
#define kexec_dprintk(fmt, arg...) \
do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
+extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size);
+extern void kimage_unmap_segment(void *buffer);
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
+struct kimage;
static inline void __crash_kexec(struct pt_regs *regs) { }
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
static inline int kexec_crash_loaded(void) { return 0; }
+static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size)
+{ return NULL; }
+static inline void kimage_unmap_segment(void *buffer) { }
#define kexec_in_progress false
#endif /* CONFIG_KEXEC_CORE */
diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h
new file mode 100644
index 000000000000..5f7b9de97e8d
--- /dev/null
+++ b/include/linux/kexec_handover.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_KEXEC_HANDOVER_H
+#define LINUX_KEXEC_HANDOVER_H
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct kho_scratch {
+ phys_addr_t addr;
+ phys_addr_t size;
+};
+
+struct folio;
+struct page;
+
+#define DECLARE_KHOSER_PTR(name, type) \
+ union { \
+ phys_addr_t phys; \
+ type ptr; \
+ } name
+#define KHOSER_STORE_PTR(dest, val) \
+ ({ \
+ typeof(val) v = val; \
+ typecheck(typeof((dest).ptr), v); \
+ (dest).phys = virt_to_phys(v); \
+ })
+#define KHOSER_LOAD_PTR(src) \
+ ({ \
+ typeof(src) s = src; \
+ (typeof((s).ptr))((s).phys ? phys_to_virt((s).phys) : NULL); \
+ })
+
+struct kho_vmalloc_chunk;
+struct kho_vmalloc {
+ DECLARE_KHOSER_PTR(first, struct kho_vmalloc_chunk *);
+ unsigned int total_pages;
+ unsigned short flags;
+ unsigned short order;
+};
+
+#ifdef CONFIG_KEXEC_HANDOVER
+bool kho_is_enabled(void);
+bool is_kho_boot(void);
+
+int kho_preserve_folio(struct folio *folio);
+void kho_unpreserve_folio(struct folio *folio);
+int kho_preserve_pages(struct page *page, unsigned int nr_pages);
+void kho_unpreserve_pages(struct page *page, unsigned int nr_pages);
+int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation);
+void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation);
+void *kho_alloc_preserve(size_t size);
+void kho_unpreserve_free(void *mem);
+void kho_restore_free(void *mem);
+struct folio *kho_restore_folio(phys_addr_t phys);
+struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages);
+void *kho_restore_vmalloc(const struct kho_vmalloc *preservation);
+int kho_add_subtree(const char *name, void *fdt);
+void kho_remove_subtree(void *fdt);
+int kho_retrieve_subtree(const char *name, phys_addr_t *phys);
+
+void kho_memory_init(void);
+
+void kho_populate(phys_addr_t fdt_phys, u64 fdt_len, phys_addr_t scratch_phys,
+ u64 scratch_len);
+#else
+static inline bool kho_is_enabled(void)
+{
+ return false;
+}
+
+static inline bool is_kho_boot(void)
+{
+ return false;
+}
+
+static inline int kho_preserve_folio(struct folio *folio)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void kho_unpreserve_folio(struct folio *folio) { }
+
+static inline int kho_preserve_pages(struct page *page, unsigned int nr_pages)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void kho_unpreserve_pages(struct page *page, unsigned int nr_pages) { }
+
+static inline int kho_preserve_vmalloc(void *ptr,
+ struct kho_vmalloc *preservation)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation) { }
+
+static inline void *kho_alloc_preserve(size_t size)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void kho_unpreserve_free(void *mem) { }
+static inline void kho_restore_free(void *mem) { }
+
+static inline struct folio *kho_restore_folio(phys_addr_t phys)
+{
+ return NULL;
+}
+
+static inline struct page *kho_restore_pages(phys_addr_t phys,
+ unsigned int nr_pages)
+{
+ return NULL;
+}
+
+static inline void *kho_restore_vmalloc(const struct kho_vmalloc *preservation)
+{
+ return NULL;
+}
+
+static inline int kho_add_subtree(const char *name, void *fdt)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void kho_remove_subtree(void *fdt) { }
+
+static inline int kho_retrieve_subtree(const char *name, phys_addr_t *phys)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void kho_memory_init(void) { }
+
+static inline void kho_populate(phys_addr_t fdt_phys, u64 fdt_len,
+ phys_addr_t scratch_phys, u64 scratch_len)
+{
+}
+#endif /* CONFIG_KEXEC_HANDOVER */
+
+#endif /* LINUX_KEXEC_HANDOVER_H */
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 5caf3ce82373..bb97bd3e5af4 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -107,11 +107,14 @@ struct key_type {
*/
int (*match_preparse)(struct key_match_data *match_data);
- /* Free preparsed match data (optional). This should be supplied it
- * ->match_preparse() is supplied. */
+ /*
+ * Free preparsed match data (optional). This should be supplied if
+ * ->match_preparse() is supplied.
+ */
void (*match_free)(struct key_match_data *match_data);
- /* clear some of the data from a key on revokation (optional)
+ /*
+ * Clear some of the data from a key on revocation (optional).
* - the key's semaphore will be write-locked by the caller
*/
void (*revoke)(struct key *key);
diff --git a/include/linux/key.h b/include/linux/key.h
index 943a432da3ae..81b8f05c6898 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -236,6 +236,7 @@ struct key {
#define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */
#define KEY_FLAG_KEEP 8 /* set if key should not be removed */
#define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */
+#define KEY_FLAG_USER_ALIVE 10 /* set if final put has not happened on key yet */
/* the key type and key description string
* - the desc is used to match a key against search criteria
@@ -436,9 +437,6 @@ extern key_ref_t keyring_search(key_ref_t keyring,
const char *description,
bool recurse);
-extern int keyring_add_key(struct key *keyring,
- struct key *key);
-
extern int keyring_restrict(key_ref_t keyring, const char *type,
const char *restriction);
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 88100cc9caba..0ad1ddbb8b99 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -124,7 +124,7 @@ static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp
if (!static_branch_likely(&kfence_allocation_key))
return NULL;
#endif
- if (likely(atomic_read(&kfence_allocation_gate)))
+ if (likely(atomic_read(&kfence_allocation_gate) > 0))
return NULL;
return __kfence_alloc(s, size, flags);
}
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 564868bdce89..8b81ac74829c 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -37,7 +37,6 @@
*/
#include <linux/array_size.h>
-#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/types.h>
@@ -371,6 +370,30 @@ __kfifo_int_must_check_helper( \
)
/**
+ * kfifo_alloc_node - dynamically allocates a new fifo buffer on a NUMA node
+ * @fifo: pointer to the fifo
+ * @size: the number of elements in the fifo, this must be a power of 2
+ * @gfp_mask: get_free_pages mask, passed to kmalloc()
+ * @node: NUMA node to allocate memory on
+ *
+ * This macro dynamically allocates a new fifo buffer with NUMA node awareness.
+ *
+ * The number of elements will be rounded-up to a power of 2.
+ * The fifo will be release with kfifo_free().
+ * Return 0 if no error, otherwise an error code.
+ */
+#define kfifo_alloc_node(fifo, size, gfp_mask, node) \
+__kfifo_int_must_check_helper( \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ __is_kfifo_ptr(__tmp) ? \
+ __kfifo_alloc_node(__kfifo, size, sizeof(*__tmp->type), gfp_mask, node) : \
+ -EINVAL; \
+}) \
+)
+
+/**
* kfifo_free - frees the fifo
* @fifo: the fifo to be freed
*/
@@ -900,8 +923,14 @@ __kfifo_uint_must_check_helper( \
)
-extern int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
- size_t esize, gfp_t gfp_mask);
+extern int __kfifo_alloc_node(struct __kfifo *fifo, unsigned int size,
+ size_t esize, gfp_t gfp_mask, int node);
+
+static inline int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
+ size_t esize, gfp_t gfp_mask)
+{
+ return __kfifo_alloc_node(fifo, size, esize, gfp_mask, NUMA_NO_NODE);
+}
extern void __kfifo_free(struct __kfifo *fifo);
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 76e891ee9e37..5eebbe7a3545 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -257,7 +257,6 @@ extern void kgdb_arch_late(void);
* hardware breakpoints.
* @correct_hw_break: Allow an architecture to specify how to correct the
* hardware debug registers.
- * @enable_nmi: Manage NMI-triggered entry to KGDB
*/
struct kgdb_arch {
unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
@@ -270,8 +269,6 @@ struct kgdb_arch {
void (*disable_hw_break)(struct pt_regs *regs);
void (*remove_all_hw_break)(void);
void (*correct_hw_break)(void);
-
- void (*enable_nmi)(bool on);
};
/**
@@ -306,16 +303,6 @@ extern const struct kgdb_arch arch_kgdb_ops;
extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
-#ifdef CONFIG_SERIAL_KGDB_NMI
-extern int kgdb_register_nmi_console(void);
-extern int kgdb_unregister_nmi_console(void);
-extern bool kgdb_nmi_poll_knock(void);
-#else
-static inline int kgdb_register_nmi_console(void) { return 0; }
-static inline int kgdb_unregister_nmi_console(void) { return 0; }
-static inline bool kgdb_nmi_poll_knock(void) { return true; }
-#endif
-
extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
extern struct kgdb_io *dbg_io_ops;
diff --git a/include/linux/kho/abi/luo.h b/include/linux/kho/abi/luo.h
new file mode 100644
index 000000000000..bb099c92e469
--- /dev/null
+++ b/include/linux/kho/abi/luo.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+
+/**
+ * DOC: Live Update Orchestrator ABI
+ *
+ * This header defines the stable Application Binary Interface used by the
+ * Live Update Orchestrator to pass state from a pre-update kernel to a
+ * post-update kernel. The ABI is built upon the Kexec HandOver framework
+ * and uses a Flattened Device Tree to describe the preserved data.
+ *
+ * This interface is a contract. Any modification to the FDT structure, node
+ * properties, compatible strings, or the layout of the `__packed` serialization
+ * structures defined here constitutes a breaking change. Such changes require
+ * incrementing the version number in the relevant `_COMPATIBLE` string to
+ * prevent a new kernel from misinterpreting data from an old kernel.
+ *
+ * Changes are allowed provided the compatibility version is incremented;
+ * however, backward/forward compatibility is only guaranteed for kernels
+ * supporting the same ABI version.
+ *
+ * FDT Structure Overview:
+ * The entire LUO state is encapsulated within a single KHO entry named "LUO".
+ * This entry contains an FDT with the following layout:
+ *
+ * .. code-block:: none
+ *
+ * / {
+ * compatible = "luo-v1";
+ * liveupdate-number = <...>;
+ *
+ * luo-session {
+ * compatible = "luo-session-v1";
+ * luo-session-header = <phys_addr_of_session_header_ser>;
+ * };
+ * };
+ *
+ * Main LUO Node (/):
+ *
+ * - compatible: "luo-v1"
+ * Identifies the overall LUO ABI version.
+ * - liveupdate-number: u64
+ * A counter tracking the number of successful live updates performed.
+ *
+ * Session Node (luo-session):
+ * This node describes all preserved user-space sessions.
+ *
+ * - compatible: "luo-session-v1"
+ * Identifies the session ABI version.
+ * - luo-session-header: u64
+ * The physical address of a `struct luo_session_header_ser`. This structure
+ * is the header for a contiguous block of memory containing an array of
+ * `struct luo_session_ser`, one for each preserved session.
+ *
+ * Serialization Structures:
+ * The FDT properties point to memory regions containing arrays of simple,
+ * `__packed` structures. These structures contain the actual preserved state.
+ *
+ * - struct luo_session_header_ser:
+ * Header for the session array. Contains the total page count of the
+ * preserved memory block and the number of `struct luo_session_ser`
+ * entries that follow.
+ *
+ * - struct luo_session_ser:
+ * Metadata for a single session, including its name and a physical pointer
+ * to another preserved memory block containing an array of
+ * `struct luo_file_ser` for all files in that session.
+ *
+ * - struct luo_file_ser:
+ * Metadata for a single preserved file. Contains the `compatible` string to
+ * find the correct handler in the new kernel, a user-provided `token` for
+ * identification, and an opaque `data` handle for the handler to use.
+ */
+
+#ifndef _LINUX_KHO_ABI_LUO_H
+#define _LINUX_KHO_ABI_LUO_H
+
+#include <uapi/linux/liveupdate.h>
+
+/*
+ * The LUO FDT hooks all LUO state for sessions, fds, etc.
+ * In the root it also carries "liveupdate-number" 64-bit property that
+ * corresponds to the number of live-updates performed on this machine.
+ */
+#define LUO_FDT_SIZE PAGE_SIZE
+#define LUO_FDT_KHO_ENTRY_NAME "LUO"
+#define LUO_FDT_COMPATIBLE "luo-v1"
+#define LUO_FDT_LIVEUPDATE_NUM "liveupdate-number"
+
+#define LIVEUPDATE_HNDL_COMPAT_LENGTH 48
+
+/**
+ * struct luo_file_ser - Represents the serialized preserves files.
+ * @compatible: File handler compatible string.
+ * @data: Private data
+ * @token: User provided token for this file
+ *
+ * If this structure is modified, LUO_SESSION_COMPATIBLE must be updated.
+ */
+struct luo_file_ser {
+ char compatible[LIVEUPDATE_HNDL_COMPAT_LENGTH];
+ u64 data;
+ u64 token;
+} __packed;
+
+/**
+ * struct luo_file_set_ser - Represents the serialized metadata for file set
+ * @files: The physical address of a contiguous memory block that holds
+ * the serialized state of files (array of luo_file_ser) in this file
+ * set.
+ * @count: The total number of files that were part of this session during
+ * serialization. Used for iteration and validation during
+ * restoration.
+ */
+struct luo_file_set_ser {
+ u64 files;
+ u64 count;
+} __packed;
+
+/*
+ * LUO FDT session node
+ * LUO_FDT_SESSION_HEADER: is a u64 physical address of struct
+ * luo_session_header_ser
+ */
+#define LUO_FDT_SESSION_NODE_NAME "luo-session"
+#define LUO_FDT_SESSION_COMPATIBLE "luo-session-v2"
+#define LUO_FDT_SESSION_HEADER "luo-session-header"
+
+/**
+ * struct luo_session_header_ser - Header for the serialized session data block.
+ * @count: The number of `struct luo_session_ser` entries that immediately
+ * follow this header in the memory block.
+ *
+ * This structure is located at the beginning of a contiguous block of
+ * physical memory preserved across the kexec. It provides the necessary
+ * metadata to interpret the array of session entries that follow.
+ *
+ * If this structure is modified, `LUO_FDT_SESSION_COMPATIBLE` must be updated.
+ */
+struct luo_session_header_ser {
+ u64 count;
+} __packed;
+
+/**
+ * struct luo_session_ser - Represents the serialized metadata for a LUO session.
+ * @name: The unique name of the session, provided by the userspace at
+ * the time of session creation.
+ * @file_set_ser: Serialized files belonging to this session,
+ *
+ * This structure is used to package session-specific metadata for transfer
+ * between kernels via Kexec Handover. An array of these structures (one per
+ * session) is created and passed to the new kernel, allowing it to reconstruct
+ * the session context.
+ *
+ * If this structure is modified, `LUO_FDT_SESSION_COMPATIBLE` must be updated.
+ */
+struct luo_session_ser {
+ char name[LIVEUPDATE_SESSION_NAME_LENGTH];
+ struct luo_file_set_ser file_set_ser;
+} __packed;
+
+#endif /* _LINUX_KHO_ABI_LUO_H */
diff --git a/include/linux/kho/abi/memfd.h b/include/linux/kho/abi/memfd.h
new file mode 100644
index 000000000000..da7d063474a1
--- /dev/null
+++ b/include/linux/kho/abi/memfd.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ *
+ * Copyright (C) 2025 Amazon.com Inc. or its affiliates.
+ * Pratyush Yadav <ptyadav@amazon.de>
+ */
+
+#ifndef _LINUX_KHO_ABI_MEMFD_H
+#define _LINUX_KHO_ABI_MEMFD_H
+
+#include <linux/types.h>
+#include <linux/kexec_handover.h>
+
+/**
+ * DOC: memfd Live Update ABI
+ *
+ * This header defines the ABI for preserving the state of a memfd across a
+ * kexec reboot using the LUO.
+ *
+ * The state is serialized into a packed structure `struct memfd_luo_ser`
+ * which is handed over to the next kernel via the KHO mechanism.
+ *
+ * This interface is a contract. Any modification to the structure layout
+ * constitutes a breaking change. Such changes require incrementing the
+ * version number in the MEMFD_LUO_FH_COMPATIBLE string.
+ */
+
+/**
+ * MEMFD_LUO_FOLIO_DIRTY - The folio is dirty.
+ *
+ * This flag indicates the folio contains data from user. A non-dirty folio is
+ * one that was allocated (say using fallocate(2)) but not written to.
+ */
+#define MEMFD_LUO_FOLIO_DIRTY BIT(0)
+
+/**
+ * MEMFD_LUO_FOLIO_UPTODATE - The folio is up-to-date.
+ *
+ * An up-to-date folio has been zeroed out. shmem zeroes out folios on first
+ * use. This flag tracks which folios need zeroing.
+ */
+#define MEMFD_LUO_FOLIO_UPTODATE BIT(1)
+
+/**
+ * struct memfd_luo_folio_ser - Serialized state of a single folio.
+ * @pfn: The page frame number of the folio.
+ * @flags: Flags to describe the state of the folio.
+ * @index: The page offset (pgoff_t) of the folio within the original file.
+ */
+struct memfd_luo_folio_ser {
+ u64 pfn:52;
+ u64 flags:12;
+ u64 index;
+} __packed;
+
+/**
+ * struct memfd_luo_ser - Main serialization structure for a memfd.
+ * @pos: The file's current position (f_pos).
+ * @size: The total size of the file in bytes (i_size).
+ * @nr_folios: Number of folios in the folios array.
+ * @folios: KHO vmalloc descriptor pointing to the array of
+ * struct memfd_luo_folio_ser.
+ */
+struct memfd_luo_ser {
+ u64 pos;
+ u64 size;
+ u64 nr_folios;
+ struct kho_vmalloc folios;
+} __packed;
+
+/* The compatibility string for memfd file handler */
+#define MEMFD_LUO_FH_COMPATIBLE "memfd-v1"
+
+#endif /* _LINUX_KHO_ABI_MEMFD_H */
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index f68865e19b0b..eb1946a70cff 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -2,8 +2,9 @@
#ifndef _LINUX_KHUGEPAGED_H
#define _LINUX_KHUGEPAGED_H
-#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
+#include <linux/mm.h>
+extern unsigned int khugepaged_max_ptes_none __read_mostly;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern struct attribute_group khugepaged_attr_group;
@@ -13,29 +14,21 @@ extern int start_stop_khugepaged(void);
extern void __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
extern void khugepaged_enter_vma(struct vm_area_struct *vma,
- unsigned long vm_flags);
+ vm_flags_t vm_flags);
extern void khugepaged_min_free_kbytes_update(void);
extern bool current_is_khugepaged(void);
-#ifdef CONFIG_SHMEM
extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
bool install_pmd);
-#else
-static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr, bool install_pmd)
-{
- return 0;
-}
-#endif
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
- if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
+ if (mm_flags_test(MMF_VM_HUGEPAGE, oldmm))
__khugepaged_enter(mm);
}
static inline void khugepaged_exit(struct mm_struct *mm)
{
- if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
+ if (mm_flags_test(MMF_VM_HUGEPAGE, mm))
__khugepaged_exit(mm);
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -46,7 +39,7 @@ static inline void khugepaged_exit(struct mm_struct *mm)
{
}
static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
- unsigned long vm_flags)
+ vm_flags_t vm_flags)
{
}
static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 6a3cd1bf4680..fbd424b2abb1 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -26,7 +26,9 @@ extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
extern void kmemleak_update_trace(const void *ptr) __ref;
extern void kmemleak_not_leak(const void *ptr) __ref;
+extern void kmemleak_transient_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
+extern void kmemleak_ignore_percpu(const void __percpu *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
@@ -93,6 +95,12 @@ static inline void kmemleak_update_trace(const void *ptr)
static inline void kmemleak_not_leak(const void *ptr)
{
}
+static inline void kmemleak_transient_leak(const void *ptr)
+{
+}
+static inline void kmemleak_ignore_percpu(const void __percpu *ptr)
+{
+}
static inline void kmemleak_ignore(const void *ptr)
{
}
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 68f69362d427..9a07c3215389 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -14,10 +14,7 @@
#include <linux/workqueue.h>
#include <linux/sysctl.h>
-#define KMOD_PATH_LEN 256
-
#ifdef CONFIG_MODULES
-extern char modprobe_path[]; /* for sysctl */
/* modprobe exit status on success, -ve on error. Return value
* usually useless though. */
extern __printf(2, 3)
diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
index e0c23a32cdf0..7da9fd506b39 100644
--- a/include/linux/kmsan.h
+++ b/include/linux/kmsan.h
@@ -133,6 +133,7 @@ void kmsan_kfree_large(const void *ptr);
* @prot: page protection flags used for vmap.
* @pages: array of pages.
* @page_shift: page_shift passed to vmap_range_noflush().
+ * @gfp_mask: gfp_mask to use internally.
*
* KMSAN maps shadow and origin pages of @pages into contiguous ranges in
* vmalloc metadata address range. Returns 0 on success, callers must check
@@ -142,7 +143,8 @@ int __must_check kmsan_vmap_pages_range_noflush(unsigned long start,
unsigned long end,
pgprot_t prot,
struct page **pages,
- unsigned int page_shift);
+ unsigned int page_shift,
+ gfp_t gfp_mask);
/**
* kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
@@ -182,8 +184,7 @@ void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
/**
* kmsan_handle_dma() - Handle a DMA data transfer.
- * @page: first page of the buffer.
- * @offset: offset of the buffer within the first page.
+ * @phys: physical address of the buffer.
* @size: buffer size.
* @dir: one of possible dma_data_direction values.
*
@@ -192,7 +193,7 @@ void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
* * initializes the buffer, if it is copied from device;
* * does both, if this is a DMA_BIDIRECTIONAL transfer.
*/
-void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+void kmsan_handle_dma(phys_addr_t phys, size_t size,
enum dma_data_direction dir);
/**
@@ -230,6 +231,67 @@ void kmsan_handle_urb(const struct urb *urb, bool is_out);
*/
void kmsan_unpoison_entry_regs(const struct pt_regs *regs);
+/**
+ * kmsan_get_metadata() - Return a pointer to KMSAN shadow or origins.
+ * @addr: kernel address.
+ * @is_origin: whether to return origins or shadow.
+ *
+ * Return NULL if metadata cannot be found.
+ */
+void *kmsan_get_metadata(void *addr, bool is_origin);
+
+/**
+ * kmsan_enable_current(): Enable KMSAN for the current task.
+ *
+ * Each kmsan_enable_current() current call must be preceded by a
+ * kmsan_disable_current() call. These call pairs may be nested.
+ */
+void kmsan_enable_current(void);
+
+/**
+ * kmsan_disable_current(): Disable KMSAN for the current task.
+ *
+ * Each kmsan_disable_current() current call must be followed by a
+ * kmsan_enable_current() call. These call pairs may be nested.
+ */
+void kmsan_disable_current(void);
+
+/**
+ * memset_no_sanitize_memory(): Fill memory without KMSAN instrumentation.
+ * @s: address of kernel memory to fill.
+ * @c: constant byte to fill the memory with.
+ * @n: number of bytes to fill.
+ *
+ * This is like memset(), but without KMSAN instrumentation.
+ */
+static inline void *memset_no_sanitize_memory(void *s, int c, size_t n)
+{
+ return __memset(s, c, n);
+}
+
+extern bool kmsan_enabled;
+extern int panic_on_kmsan;
+
+/*
+ * KMSAN performs a lot of consistency checks that are currently enabled by
+ * default. BUG_ON is normally discouraged in the kernel, unless used for
+ * debugging, but KMSAN itself is a debugging tool, so it makes little sense to
+ * recover if something goes wrong.
+ */
+#define KMSAN_WARN_ON(cond) \
+ ({ \
+ const bool __cond = WARN_ON(cond); \
+ if (unlikely(__cond)) { \
+ WRITE_ONCE(kmsan_enabled, false); \
+ if (panic_on_kmsan) { \
+ /* Can't call panic() here because */ \
+ /* of uaccess checks. */ \
+ BUG(); \
+ } \
+ } \
+ __cond; \
+ })
+
#else
static inline void kmsan_init_shadow(void)
@@ -287,7 +349,7 @@ static inline void kmsan_kfree_large(const void *ptr)
static inline int __must_check kmsan_vmap_pages_range_noflush(
unsigned long start, unsigned long end, pgprot_t prot,
- struct page **pages, unsigned int page_shift)
+ struct page **pages, unsigned int page_shift, gfp_t gfp_mask)
{
return 0;
}
@@ -311,8 +373,8 @@ static inline void kmsan_iounmap_page_range(unsigned long start,
{
}
-static inline void kmsan_handle_dma(struct page *page, size_t offset,
- size_t size, enum dma_data_direction dir)
+static inline void kmsan_handle_dma(phys_addr_t phys, size_t size,
+ enum dma_data_direction dir)
{
}
@@ -329,6 +391,21 @@ static inline void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
{
}
+static inline void kmsan_enable_current(void)
+{
+}
+
+static inline void kmsan_disable_current(void)
+{
+}
+
+static inline void *memset_no_sanitize_memory(void *s, int c, size_t n)
+{
+ return memset(s, c, n);
+}
+
+#define KMSAN_WARN_ON WARN_ON
+
#endif
#endif /* _LINUX_KMSAN_H */
diff --git a/include/linux/kmsan_types.h b/include/linux/kmsan_types.h
index 929287981afe..dfc59918b3c0 100644
--- a/include/linux/kmsan_types.h
+++ b/include/linux/kmsan_types.h
@@ -31,7 +31,7 @@ struct kmsan_context_state {
struct kmsan_ctx {
struct kmsan_context_state cstate;
int kmsan_in_runtime;
- bool allow_reporting;
+ unsigned int depth;
};
#endif /* _LINUX_KMSAN_TYPES_H */
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 906521c2329c..6055fc969877 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -40,6 +40,17 @@ struct kmsg_dump_iter {
};
/**
+ * struct kmsg_dump_detail - kernel crash detail
+ * @reason: reason for the crash, see kmsg_dump_reason.
+ * @description: optional short string, to provide additional information.
+ */
+
+struct kmsg_dump_detail {
+ enum kmsg_dump_reason reason;
+ const char *description;
+};
+
+/**
* struct kmsg_dumper - kernel crash message dumper structure
* @list: Entry in the dumper list (private)
* @dump: Call into dumping code which will retrieve the data with
@@ -49,13 +60,13 @@ struct kmsg_dump_iter {
*/
struct kmsg_dumper {
struct list_head list;
- void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
+ void (*dump)(struct kmsg_dumper *dumper, struct kmsg_dump_detail *detail);
enum kmsg_dump_reason max_reason;
bool registered;
};
#ifdef CONFIG_PRINTK
-void kmsg_dump(enum kmsg_dump_reason reason);
+void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc);
bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
char *line, size_t size, size_t *len);
@@ -71,7 +82,7 @@ int kmsg_dump_unregister(struct kmsg_dumper *dumper);
const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason);
#else
-static inline void kmsg_dump(enum kmsg_dump_reason reason)
+static inline void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc)
{
}
@@ -107,4 +118,9 @@ static inline const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
}
#endif
+static inline void kmsg_dump(enum kmsg_dump_reason reason)
+{
+ kmsg_dump_desc(reason, NULL);
+}
+
#endif /* _LINUX_KMSG_DUMP_H */
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
index be707748e7ce..150fe2ae1b6b 100644
--- a/include/linux/kobject_ns.h
+++ b/include/linux/kobject_ns.h
@@ -52,8 +52,6 @@ const struct kobj_ns_type_operations *kobj_ns_ops(const struct kobject *kobj);
bool kobj_ns_current_may_mount(enum kobj_ns_type type);
void *kobj_ns_grab_current(enum kobj_ns_type type);
-const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
-const void *kobj_ns_initial(enum kobj_ns_type type);
void kobj_ns_drop(enum kobj_ns_type type, void *ns);
#endif /* _LINUX_KOBJECT_NS_H */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 5fcbc254d186..8c4f3bb24429 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -269,15 +269,6 @@ extern unsigned long __stop_kprobe_blacklist[];
extern struct kretprobe_blackpoint kretprobe_blacklist[];
-#ifdef CONFIG_KPROBES_SANITY_TEST
-extern int init_test_probes(void);
-#else /* !CONFIG_KPROBES_SANITY_TEST */
-static inline int init_test_probes(void)
-{
- return 0;
-}
-#endif /* CONFIG_KPROBES_SANITY_TEST */
-
extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p);
diff --git a/include/linux/kref.h b/include/linux/kref.h
index d32e21a2538c..88e82ab1367c 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -46,18 +46,18 @@ static inline void kref_get(struct kref *kref)
}
/**
- * kref_put - decrement refcount for object.
- * @kref: object.
- * @release: pointer to the function that will clean up the object when the
+ * kref_put - Decrement refcount for object
+ * @kref: Object
+ * @release: Pointer to the function that will clean up the object when the
* last reference to the object is released.
- * This pointer is required, and it is not acceptable to pass kfree
- * in as this function.
*
- * Decrement the refcount, and if 0, call release().
- * Return 1 if the object was removed, otherwise return 0. Beware, if this
- * function returns 0, you still can not count on the kref from remaining in
- * memory. Only use the return value if you want to see if the kref is now
- * gone, not present.
+ * Decrement the refcount, and if 0, call @release. The caller may not
+ * pass NULL or kfree() as the release function.
+ *
+ * Return: 1 if this call removed the object, otherwise return 0. Beware,
+ * if this function returns 0, another caller may have removed the object
+ * by the time this function returns. The return value is only certain
+ * if you want to see if the object is definitely released.
*/
static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
{
@@ -68,17 +68,37 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
return 0;
}
+/**
+ * kref_put_mutex - Decrement refcount for object
+ * @kref: Object
+ * @release: Pointer to the function that will clean up the object when the
+ * last reference to the object is released.
+ * @mutex: Mutex which protects the release function.
+ *
+ * This variant of kref_lock() calls the @release function with the @mutex
+ * held. The @release function will release the mutex.
+ */
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
- struct mutex *lock)
+ struct mutex *mutex)
{
- if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) {
+ if (refcount_dec_and_mutex_lock(&kref->refcount, mutex)) {
release(kref);
return 1;
}
return 0;
}
+/**
+ * kref_put_lock - Decrement refcount for object
+ * @kref: Object
+ * @release: Pointer to the function that will clean up the object when the
+ * last reference to the object is released.
+ * @lock: Spinlock which protects the release function.
+ *
+ * This variant of kref_lock() calls the @release function with the @lock
+ * held. The @release function will release the lock.
+ */
static inline int kref_put_lock(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
@@ -94,8 +114,6 @@ static inline int kref_put_lock(struct kref *kref,
* kref_get_unless_zero - Increment refcount for object unless it is zero.
* @kref: object.
*
- * Return non-zero if the increment succeeded. Otherwise return 0.
- *
* This function is intended to simplify locking around refcounting for
* objects that can be looked up from a lookup structure, and which are
* removed from that lookup structure in the object destructor.
@@ -105,6 +123,8 @@ static inline int kref_put_lock(struct kref *kref,
* With a lookup followed by a kref_get_unless_zero *with return value check*
* locking in the kref_put path can be deferred to the actual removal from
* the lookup structure and RCU lookups become trivial.
+ *
+ * Return: non-zero if the increment succeeded. Otherwise return 0.
*/
static inline int __must_check kref_get_unless_zero(struct kref *kref)
{
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 52c63a9c5a9c..c982694c987b 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -13,13 +13,12 @@
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/sched.h>
-#include <linux/sched/coredump.h>
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, int advice, unsigned long *vm_flags);
-
-void ksm_add_vma(struct vm_area_struct *vma);
+ unsigned long end, int advice, vm_flags_t *vm_flags);
+vm_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file,
+ vm_flags_t vm_flags);
int ksm_enable_merge_any(struct mm_struct *mm);
int ksm_disable_merge_any(struct mm_struct *mm);
int ksm_disable(struct mm_struct *mm);
@@ -33,27 +32,43 @@ void __ksm_exit(struct mm_struct *mm);
*/
#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
-extern unsigned long ksm_zero_pages;
+extern atomic_long_t ksm_zero_pages;
+
+static inline void ksm_map_zero_page(struct mm_struct *mm)
+{
+ atomic_long_inc(&ksm_zero_pages);
+ atomic_long_inc(&mm->ksm_zero_pages);
+}
static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
{
if (is_ksm_zero_pte(pte)) {
- ksm_zero_pages--;
- mm->ksm_zero_pages--;
+ atomic_long_dec(&ksm_zero_pages);
+ atomic_long_dec(&mm->ksm_zero_pages);
}
}
-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline long mm_ksm_zero_pages(struct mm_struct *mm)
{
- if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
- return __ksm_enter(mm);
+ return atomic_long_read(&mm->ksm_zero_pages);
+}
- return 0;
+static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ /* Adding mm to ksm is best effort on fork. */
+ if (mm_flags_test(MMF_VM_MERGEABLE, oldmm)) {
+ long nr_ksm_zero_pages = atomic_long_read(&mm->ksm_zero_pages);
+
+ mm->ksm_merging_pages = 0;
+ mm->ksm_rmap_items = 0;
+ atomic_long_add(nr_ksm_zero_pages, &ksm_zero_pages);
+ __ksm_enter(mm);
+ }
}
static inline int ksm_execve(struct mm_struct *mm)
{
- if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
+ if (mm_flags_test(MMF_VM_MERGE_ANY, mm))
return __ksm_enter(mm);
return 0;
@@ -61,7 +76,7 @@ static inline int ksm_execve(struct mm_struct *mm)
static inline void ksm_exit(struct mm_struct *mm)
{
- if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
+ if (mm_flags_test(MMF_VM_MERGEABLE, mm))
__ksm_exit(mm);
}
@@ -81,14 +96,17 @@ struct folio *ksm_might_need_to_copy(struct folio *folio,
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
-void collect_procs_ksm(struct folio *folio, struct page *page,
+void collect_procs_ksm(const struct folio *folio, const struct page *page,
struct list_head *to_kill, int force_early);
long ksm_process_profit(struct mm_struct *);
+bool ksm_process_mergeable(struct mm_struct *mm);
#else /* !CONFIG_KSM */
-static inline void ksm_add_vma(struct vm_area_struct *vma)
+static inline vm_flags_t ksm_vma_flags(struct mm_struct *mm,
+ const struct file *file, vm_flags_t vm_flags)
{
+ return vm_flags;
}
static inline int ksm_disable(struct mm_struct *mm)
@@ -96,9 +114,8 @@ static inline int ksm_disable(struct mm_struct *mm)
return 0;
}
-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
- return 0;
}
static inline int ksm_execve(struct mm_struct *mm)
@@ -114,14 +131,15 @@ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
{
}
-static inline void collect_procs_ksm(struct folio *folio, struct page *page,
- struct list_head *to_kill, int force_early)
+static inline void collect_procs_ksm(const struct folio *folio,
+ const struct page *page, struct list_head *to_kill,
+ int force_early)
{
}
#ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, int advice, unsigned long *vm_flags)
+ unsigned long end, int advice, vm_flags_t *vm_flags)
{
return 0;
}
diff --git a/include/linux/stackleak.h b/include/linux/kstack_erase.h
index 3be2cb564710..bf3bf1905557 100644
--- a/include/linux/stackleak.h
+++ b/include/linux/kstack_erase.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_STACKLEAK_H
-#define _LINUX_STACKLEAK_H
+#ifndef _LINUX_KSTACK_ERASE_H
+#define _LINUX_KSTACK_ERASE_H
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
@@ -9,10 +9,10 @@
* Check that the poison value points to the unused hole in the
* virtual memory map for your platform.
*/
-#define STACKLEAK_POISON -0xBEEF
-#define STACKLEAK_SEARCH_DEPTH 128
+#define KSTACK_ERASE_POISON -0xBEEF
+#define KSTACK_ERASE_SEARCH_DEPTH 128
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+#ifdef CONFIG_KSTACK_ERASE
#include <asm/stacktrace.h>
#include <linux/linkage.h>
@@ -50,7 +50,7 @@ stackleak_task_high_bound(const struct task_struct *tsk)
static __always_inline unsigned long
stackleak_find_top_of_poison(const unsigned long low, const unsigned long high)
{
- const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
+ const unsigned int depth = KSTACK_ERASE_SEARCH_DEPTH / sizeof(unsigned long);
unsigned int poison_count = 0;
unsigned long poison_high = high;
unsigned long sp = high;
@@ -58,7 +58,7 @@ stackleak_find_top_of_poison(const unsigned long low, const unsigned long high)
while (sp > low && poison_count < depth) {
sp -= sizeof(unsigned long);
- if (*(unsigned long *)sp == STACKLEAK_POISON) {
+ if (*(unsigned long *)sp == KSTACK_ERASE_POISON) {
poison_count++;
} else {
poison_count = 0;
@@ -72,7 +72,7 @@ stackleak_find_top_of_poison(const unsigned long low, const unsigned long high)
static inline void stackleak_task_init(struct task_struct *t)
{
t->lowest_stack = stackleak_task_low_bound(t);
-# ifdef CONFIG_STACKLEAK_METRICS
+# ifdef CONFIG_KSTACK_ERASE_METRICS
t->prev_lowest_stack = t->lowest_stack;
# endif
}
@@ -80,9 +80,9 @@ static inline void stackleak_task_init(struct task_struct *t)
asmlinkage void noinstr stackleak_erase(void);
asmlinkage void noinstr stackleak_erase_on_task_stack(void);
asmlinkage void noinstr stackleak_erase_off_task_stack(void);
-void __no_caller_saved_registers noinstr stackleak_track_stack(void);
+void __no_caller_saved_registers noinstr __sanitizer_cov_stack_depth(void);
-#else /* !CONFIG_GCC_PLUGIN_STACKLEAK */
+#else /* !CONFIG_KSTACK_ERASE */
static inline void stackleak_task_init(struct task_struct *t) { }
#endif
diff --git a/include/linux/kstrtox.h b/include/linux/kstrtox.h
index 7fcf29a4e0de..6ea897222af1 100644
--- a/include/linux/kstrtox.h
+++ b/include/linux/kstrtox.h
@@ -143,6 +143,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
*/
extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+extern unsigned long simple_strntoul(const char *,char **,unsigned int,size_t);
extern long simple_strtol(const char *,char **,unsigned int);
extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
extern long long simple_strtoll(const char *,char **,unsigned int);
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index b11f53c1ba2e..8d27403888ce 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -85,6 +85,7 @@ kthread_run_on_cpu(int (*threadfn)(void *data), void *data,
void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
+int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
int kthread_stop_put(struct task_struct *k);
bool kthread_should_stop(void);
@@ -186,13 +187,58 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
int kthread_worker_fn(void *worker_ptr);
-__printf(2, 3)
-struct kthread_worker *
-kthread_create_worker(unsigned int flags, const char namefmt[], ...);
+__printf(3, 4)
+struct kthread_worker *kthread_create_worker_on_node(unsigned int flags,
+ int node,
+ const char namefmt[], ...);
+
+#define kthread_create_worker(flags, namefmt, ...) \
+ kthread_create_worker_on_node(flags, NUMA_NO_NODE, namefmt, ## __VA_ARGS__);
+
+/**
+ * kthread_run_worker - create and wake a kthread worker.
+ * @flags: flags modifying the default behavior of the worker
+ * @namefmt: printf-style name for the thread.
+ *
+ * Description: Convenient wrapper for kthread_create_worker() followed by
+ * wake_up_process(). Returns the kthread_worker or ERR_PTR(-ENOMEM).
+ */
+#define kthread_run_worker(flags, namefmt, ...) \
+({ \
+ struct kthread_worker *__kw \
+ = kthread_create_worker(flags, namefmt, ## __VA_ARGS__); \
+ if (!IS_ERR(__kw)) \
+ wake_up_process(__kw->task); \
+ __kw; \
+})
-__printf(3, 4) struct kthread_worker *
+struct kthread_worker *
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
- const char namefmt[], ...);
+ const char namefmt[]);
+
+/**
+ * kthread_run_worker_on_cpu - create and wake a cpu bound kthread worker.
+ * @cpu: CPU number
+ * @flags: flags modifying the default behavior of the worker
+ * @namefmt: printf-style name for the thread. Format is restricted
+ * to "name.*%u". Code fills in cpu number.
+ *
+ * Description: Convenient wrapper for kthread_create_worker_on_cpu()
+ * followed by wake_up_process(). Returns the kthread_worker or
+ * ERR_PTR(-ENOMEM).
+ */
+static inline struct kthread_worker *
+kthread_run_worker_on_cpu(int cpu, unsigned int flags,
+ const char namefmt[])
+{
+ struct kthread_worker *kw;
+
+ kw = kthread_create_worker_on_cpu(cpu, flags, namefmt);
+ if (!IS_ERR(kw))
+ wake_up_process(kw->task);
+
+ return kw;
+}
bool kthread_queue_work(struct kthread_worker *worker,
struct kthread_work *work);
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 3a4e723eae0f..383ed9985802 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -222,6 +222,11 @@ static inline ktime_t ns_to_ktime(u64 ns)
return ns;
}
+static inline ktime_t us_to_ktime(u64 us)
+{
+ return us * NSEC_PER_USEC;
+}
+
static inline ktime_t ms_to_ktime(u64 ms)
{
return ms * NSEC_PER_MSEC;
diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h
index 4862c98d80d3..eb10d87adf7d 100644
--- a/include/linux/kvm_dirty_ring.h
+++ b/include/linux/kvm_dirty_ring.h
@@ -32,7 +32,7 @@ struct kvm_dirty_ring {
* If CONFIG_HAVE_HVM_DIRTY_RING not defined, kvm_dirty_ring.o should
* not be included as well, so define these nop functions for the arch.
*/
-static inline u32 kvm_dirty_ring_get_rsvd_entries(void)
+static inline u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm)
{
return 0;
}
@@ -42,16 +42,17 @@ static inline bool kvm_use_dirty_bitmap(struct kvm *kvm)
return true;
}
-static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring,
+static inline int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring,
int index, u32 size)
{
return 0;
}
static inline int kvm_dirty_ring_reset(struct kvm *kvm,
- struct kvm_dirty_ring *ring)
+ struct kvm_dirty_ring *ring,
+ int *nr_entries_reset)
{
- return 0;
+ return -ENOENT;
}
static inline void kvm_dirty_ring_push(struct kvm_vcpu *vcpu,
@@ -71,22 +72,14 @@ static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
#else /* CONFIG_HAVE_KVM_DIRTY_RING */
-int kvm_cpu_dirty_log_size(void);
+int kvm_cpu_dirty_log_size(struct kvm *kvm);
bool kvm_use_dirty_bitmap(struct kvm *kvm);
bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm);
-u32 kvm_dirty_ring_get_rsvd_entries(void);
-int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size);
-
-/*
- * called with kvm->slots_lock held, returns the number of
- * processed pages.
- */
-int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring);
-
-/*
- * returns =0: successfully pushed
- * <0: unable to push, need to wait
- */
+u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm);
+int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring,
+ int index, u32 size);
+int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring,
+ int *nr_entries_reset);
void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset);
bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 692c01e41a18..d93f75b05ae2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2,7 +2,7 @@
#ifndef __KVM_HOST_H
#define __KVM_HOST_H
-
+#include <linux/entry-virt.h>
#include <linux/types.h>
#include <linux/hardirq.h>
#include <linux/list.h>
@@ -52,9 +52,10 @@
/*
* The bit 16 ~ bit 31 of kvm_userspace_memory_region::flags are internally
* used in kvm, other bits are visible for userspace which are defined in
- * include/linux/kvm_h.
+ * include/uapi/linux/kvm.h.
*/
-#define KVM_MEMSLOT_INVALID (1UL << 16)
+#define KVM_MEMSLOT_INVALID (1UL << 16)
+#define KVM_MEMSLOT_GMEM_ONLY (1UL << 17)
/*
* Bit 63 of the memslot generation number is an "update in-progress flag",
@@ -97,6 +98,7 @@
#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
#define KVM_PFN_ERR_SIGPENDING (KVM_PFN_ERR_MASK + 3)
+#define KVM_PFN_ERR_NEEDS_IO (KVM_PFN_ERR_MASK + 4)
/*
* error pfns indicate that the gfn is in slot but faild to
@@ -153,13 +155,6 @@ static inline bool kvm_is_error_gpa(gpa_t gpa)
return gpa == INVALID_GPA;
}
-#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
-
-static inline bool is_error_page(struct page *page)
-{
- return IS_ERR(page);
-}
-
#define KVM_REQUEST_MASK GENMASK(7,0)
#define KVM_REQUEST_NO_WAKEUP BIT(8)
#define KVM_REQUEST_WAIT BIT(9)
@@ -196,6 +191,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
+#define KVM_PIT_IRQ_SOURCE_ID 2
extern struct mutex kvm_lock;
extern struct list_head vm_list;
@@ -211,6 +207,7 @@ struct kvm_io_range {
struct kvm_io_bus {
int dev_count;
int ioeventfd_count;
+ struct rcu_head rcu;
struct kvm_io_range range[];
};
@@ -219,6 +216,7 @@ enum kvm_bus {
KVM_PIO_BUS,
KVM_VIRTIO_CCW_NOTIFY_BUS,
KVM_FAST_MMIO_BUS,
+ KVM_IOCSR_BUS,
KVM_NR_BUSES
};
@@ -260,12 +258,19 @@ union kvm_mmu_notifier_arg {
unsigned long attributes;
};
+enum kvm_gfn_range_filter {
+ KVM_FILTER_SHARED = BIT(0),
+ KVM_FILTER_PRIVATE = BIT(1),
+};
+
struct kvm_gfn_range {
struct kvm_memory_slot *slot;
gfn_t start;
gfn_t end;
union kvm_mmu_notifier_arg arg;
+ enum kvm_gfn_range_filter attr_filter;
bool may_block;
+ bool lockless;
};
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
@@ -279,21 +284,19 @@ enum {
READING_SHADOW_PAGE_TABLES,
};
-#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA)
-
struct kvm_host_map {
/*
* Only valid if the 'pfn' is managed by the host kernel (i.e. There is
* a 'struct page' for it. When using mem= kernel parameter some memory
* can be used as guest memory but they are not managed by host
* kernel).
- * If 'pfn' is not managed by the host kernel, this field is
- * initialized to KVM_UNMAPPED_PAGE.
*/
+ struct page *pinned_page;
struct page *page;
void *hva;
kvm_pfn_t pfn;
kvm_pfn_t gfn;
+ bool writable;
};
/*
@@ -342,7 +345,8 @@ struct kvm_vcpu {
#ifndef __KVM_HAVE_ARCH_WQP
struct rcuwait wait;
#endif
- struct pid __rcu *pid;
+ struct pid *pid;
+ rwlock_t pid_lock;
int sigset_active;
sigset_t sigset;
unsigned int halt_poll_ns;
@@ -378,8 +382,10 @@ struct kvm_vcpu {
bool dy_eligible;
} spin_loop;
#endif
+ bool wants_to_run;
bool preempted;
bool ready;
+ bool scheduled_out;
struct kvm_vcpu_arch arch;
struct kvm_vcpu_stat stat;
char stats_id[KVM_STATS_NAME_SIZE];
@@ -483,7 +489,15 @@ static __always_inline void guest_state_enter_irqoff(void)
*/
static __always_inline void guest_context_exit_irqoff(void)
{
- context_tracking_guest_exit();
+ /*
+ * Guest mode is treated as a quiescent state, see
+ * guest_context_enter_irqoff() for more details.
+ */
+ if (!context_tracking_guest_exit()) {
+ instrumentation_begin();
+ rcu_virt_note_context_switch();
+ instrumentation_end();
+ }
}
/*
@@ -590,15 +604,20 @@ struct kvm_memory_slot {
short id;
u16 as_id;
-#ifdef CONFIG_KVM_PRIVATE_MEM
+#ifdef CONFIG_KVM_GUEST_MEMFD
struct {
- struct file __rcu *file;
+ /*
+ * Writes protected by kvm->slots_lock. Acquiring a
+ * reference via kvm_gmem_get_file() is protected by
+ * either kvm->slots_lock or kvm->srcu.
+ */
+ struct file *file;
pgoff_t pgoff;
} gmem;
#endif
};
-static inline bool kvm_slot_can_be_private(const struct kvm_memory_slot *slot)
+static inline bool kvm_slot_has_gmem(const struct kvm_memory_slot *slot)
{
return slot && (slot->flags & KVM_MEM_GUEST_MEMFD);
}
@@ -702,17 +721,34 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
}
#endif
-/*
- * Arch code must define kvm_arch_has_private_mem if support for private memory
- * is enabled.
- */
-#if !defined(kvm_arch_has_private_mem) && !IS_ENABLED(CONFIG_KVM_PRIVATE_MEM)
+#ifndef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
{
return false;
}
#endif
+#ifdef CONFIG_KVM_GUEST_MEMFD
+bool kvm_arch_supports_gmem_init_shared(struct kvm *kvm);
+
+static inline u64 kvm_gmem_get_supported_flags(struct kvm *kvm)
+{
+ u64 flags = GUEST_MEMFD_FLAG_MMAP;
+
+ if (!kvm || kvm_arch_supports_gmem_init_shared(kvm))
+ flags |= GUEST_MEMFD_FLAG_INIT_SHARED;
+
+ return flags;
+}
+#endif
+
+#ifndef kvm_arch_has_readonly_mem
+static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm)
+{
+ return IS_ENABLED(CONFIG_HAVE_KVM_READONLY_MEM);
+}
+#endif
+
struct kvm_memslots {
u64 generation;
atomic_long_t last_used_slot;
@@ -836,7 +872,7 @@ struct kvm {
struct notifier_block pm_notifier;
#endif
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
- /* Protected by slots_locks (for writes) and RCU (for reads) */
+ /* Protected by slots_lock (for writes) and RCU (for reads) */
struct xarray mem_attr_array;
#endif
char stats_id[KVM_STATS_NAME_SIZE];
@@ -942,16 +978,29 @@ static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
}
+/*
+ * Get a bus reference under the update-side lock. No long-term SRCU reader
+ * references are permitted, to avoid stale reads vs concurrent IO
+ * registrations.
+ */
static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
{
- return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
- lockdep_is_held(&kvm->slots_lock) ||
- !refcount_read(&kvm->users_count));
+ return rcu_dereference_protected(kvm->buses[idx],
+ lockdep_is_held(&kvm->slots_lock));
}
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
int num_vcpus = atomic_read(&kvm->online_vcpus);
+
+ /*
+ * Explicitly verify the target vCPU is online, as the anti-speculation
+ * logic only limits the CPU's ability to speculate, e.g. given a "bad"
+ * index, clamping the index to 0 would return vCPU0, not NULL.
+ */
+ if (i >= num_vcpus)
+ return NULL;
+
i = array_index_nospec(i, num_vcpus);
/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
@@ -959,9 +1008,10 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
return xa_load(&kvm->vcpu_array, i);
}
-#define kvm_for_each_vcpu(idx, vcpup, kvm) \
- xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
- (atomic_read(&kvm->online_vcpus) - 1))
+#define kvm_for_each_vcpu(idx, vcpup, kvm) \
+ if (atomic_read(&kvm->online_vcpus)) \
+ xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
+ (atomic_read(&kvm->online_vcpus) - 1))
static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
{
@@ -982,19 +1032,19 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
void kvm_destroy_vcpus(struct kvm *kvm);
+int kvm_trylock_all_vcpus(struct kvm *kvm);
+int kvm_lock_all_vcpus(struct kvm *kvm);
+void kvm_unlock_all_vcpus(struct kvm *kvm);
+
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
-#ifdef __KVM_HAVE_IOAPIC
+#ifdef CONFIG_KVM_IOAPIC
void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
-void kvm_arch_post_irq_routing_update(struct kvm *kvm);
#else
static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
{
}
-static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
-{
-}
#endif
#ifdef CONFIG_HAVE_KVM_IRQCHIP
@@ -1159,6 +1209,10 @@ static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_
kvm_memslot_iter_is_valid(iter, end); \
kvm_memslot_iter_next(iter))
+struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
+struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
+struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
+
/*
* KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
* - create a new memory slot
@@ -1168,7 +1222,7 @@ static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_
* -- just change its flags
*
* Since flags can be changed by some of these operations, the following
- * differentiation is the best we can do for __kvm_set_memory_region():
+ * differentiation is the best we can do for kvm_set_memory_region():
*/
enum kvm_mr_change {
KVM_MR_CREATE,
@@ -1177,10 +1231,8 @@ enum kvm_mr_change {
KVM_MR_FLAGS_ONLY,
};
-int kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region2 *mem);
-int __kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region2 *mem);
+int kvm_set_internal_memslot(struct kvm *kvm,
+ const struct kvm_userspace_memory_region2 *mem);
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -1197,33 +1249,70 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm);
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot);
-int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
- struct page **pages, int nr_pages);
+int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct page **pages, int nr_pages);
+
+struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write);
+static inline struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
+{
+ return __gfn_to_page(kvm, gfn, true);
+}
-struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
bool *writable);
+
+static inline void kvm_release_page_unused(struct page *page)
+{
+ if (!page)
+ return;
+
+ put_page(page);
+}
+
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
-kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
-kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
- bool *writable);
-kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
-kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
-kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
- bool atomic, bool interruptible, bool *async,
- bool write_fault, bool *writable, hva_t *hva);
-
-void kvm_release_pfn_clean(kvm_pfn_t pfn);
-void kvm_release_pfn_dirty(kvm_pfn_t pfn);
-void kvm_set_pfn_dirty(kvm_pfn_t pfn);
-void kvm_set_pfn_accessed(kvm_pfn_t pfn);
-
-void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
+static inline void kvm_release_faultin_page(struct kvm *kvm, struct page *page,
+ bool unused, bool dirty)
+{
+ lockdep_assert_once(lockdep_is_held(&kvm->mmu_lock) || unused);
+
+ if (!page)
+ return;
+
+ /*
+ * If the page that KVM got from the *primary MMU* is writable, and KVM
+ * installed or reused a SPTE, mark the page/folio dirty. Note, this
+ * may mark a folio dirty even if KVM created a read-only SPTE, e.g. if
+ * the GFN is write-protected. Folios can't be safely marked dirty
+ * outside of mmu_lock as doing so could race with writeback on the
+ * folio. As a result, KVM can't mark folios dirty in the fast page
+ * fault handler, and so KVM must (somewhat) speculatively mark the
+ * folio dirty if KVM could locklessly make the SPTE writable.
+ */
+ if (unused)
+ kvm_release_page_unused(page);
+ else if (dirty)
+ kvm_release_page_dirty(page);
+ else
+ kvm_release_page_clean(page);
+}
+
+kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
+ unsigned int foll, bool *writable,
+ struct page **refcounted_page);
+
+static inline kvm_pfn_t kvm_faultin_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
+ bool write, bool *writable,
+ struct page **refcounted_page)
+{
+ return __kvm_faultin_pfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn,
+ write ? FOLL_WRITE : 0, writable, refcounted_page);
+}
+
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
@@ -1287,19 +1376,28 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
})
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
-struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
-struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
-struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
-kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
-kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
-int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
-void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
+int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map,
+ bool writable);
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map);
+
+static inline int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa,
+ struct kvm_host_map *map)
+{
+ return __kvm_vcpu_map(vcpu, gpa, map, true);
+}
+
+static inline int kvm_vcpu_map_readonly(struct kvm_vcpu *vcpu, gpa_t gpa,
+ struct kvm_host_map *map)
+{
+ return __kvm_vcpu_map(vcpu, gpa, map, false);
+}
+
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
@@ -1424,7 +1522,16 @@ bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
+
+#ifndef CONFIG_S390
+void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait);
+
+static inline void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ __kvm_vcpu_kick(vcpu, false);
+}
+#endif
+
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
@@ -1450,6 +1557,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
+long kvm_arch_vcpu_unlocked_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg);
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
@@ -1494,8 +1603,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg);
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
-void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
-
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
@@ -1514,16 +1621,30 @@ static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
#endif
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
-int kvm_arch_hardware_enable(void);
-void kvm_arch_hardware_disable(void);
+/*
+ * kvm_arch_{enable,disable}_virtualization() are called on one CPU, under
+ * kvm_usage_lock, immediately after/before 0=>1 and 1=>0 transitions of
+ * kvm_usage_count, i.e. at the beginning of the generic hardware enabling
+ * sequence, and at the end of the generic hardware disabling sequence.
+ */
+void kvm_arch_enable_virtualization(void);
+void kvm_arch_disable_virtualization(void);
+/*
+ * kvm_arch_{enable,disable}_virtualization_cpu() are called on "every" CPU to
+ * do the actual twiddling of hardware bits. The hooks are called on all
+ * online CPUs when KVM enables/disabled virtualization, and on a single CPU
+ * when that CPU is onlined/offlined (including for Resume/Suspend).
+ */
+int kvm_arch_enable_virtualization_cpu(void);
+void kvm_arch_disable_virtualization_cpu(void);
#endif
+bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu);
-int kvm_arch_post_init_vm(struct kvm *kvm);
void kvm_arch_pre_destroy_vm(struct kvm *kvm);
void kvm_arch_create_vm_debugfs(struct kvm *kvm);
@@ -1587,24 +1708,6 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
return false;
}
#endif
-#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
-void kvm_arch_start_assignment(struct kvm *kvm);
-void kvm_arch_end_assignment(struct kvm *kvm);
-bool kvm_arch_has_assigned_device(struct kvm *kvm);
-#else
-static inline void kvm_arch_start_assignment(struct kvm *kvm)
-{
-}
-
-static inline void kvm_arch_end_assignment(struct kvm *kvm)
-{
-}
-
-static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
-{
- return false;
-}
-#endif
static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
{
@@ -1655,13 +1758,9 @@ static inline void kvm_unregister_perf_callbacks(void) {}
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
-void kvm_arch_sync_events(struct kvm *kvm);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
-struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn);
-bool kvm_is_zone_device_page(struct page *page);
-
struct kvm_irq_ack_notifier {
struct hlist_node link;
unsigned gsi;
@@ -1686,8 +1785,6 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
-int kvm_request_irq_source_id(struct kvm *kvm);
-void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
/*
@@ -1955,8 +2052,6 @@ struct _kvm_stats_desc {
HALT_POLL_HIST_COUNT), \
STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking)
-extern struct dentry *kvm_debugfs_dir;
-
ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
const struct _kvm_stats_desc *desc,
void *stats, size_t size_stats,
@@ -2096,6 +2191,7 @@ int kvm_set_irq_routing(struct kvm *kvm,
const struct kvm_irq_routing_entry *entries,
unsigned nr,
unsigned flags);
+int kvm_init_irq_routing(struct kvm *kvm);
int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue);
@@ -2105,6 +2201,11 @@ void kvm_free_irq_routing(struct kvm *kvm);
static inline void kvm_free_irq_routing(struct kvm *kvm) {}
+static inline int kvm_init_irq_routing(struct kvm *kvm)
+{
+ return 0;
+}
+
#endif
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
@@ -2160,6 +2261,14 @@ static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
__kvm_make_request(req, vcpu);
}
+#ifndef CONFIG_S390
+static inline void kvm_make_request_and_kick(int req, struct kvm_vcpu *vcpu)
+{
+ kvm_make_request(req, vcpu);
+ __kvm_vcpu_kick(vcpu, req & KVM_REQUEST_WAIT);
+}
+#endif
+
static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
{
return READ_ONCE(vcpu->requests);
@@ -2192,6 +2301,7 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
}
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
+extern bool enable_virt_at_load;
extern bool kvm_rebooting;
#endif
@@ -2290,7 +2400,9 @@ static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
struct kvm_vcpu *kvm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
+struct kvm_kernel_irqfd;
+
bool kvm_arch_has_irq_bypass(void);
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
@@ -2298,10 +2410,9 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
-int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
- uint32_t guest_irq, bool set);
-bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *,
- struct kvm_kernel_irq_routing_entry *);
+void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd,
+ struct kvm_kernel_irq_routing_entry *old,
+ struct kvm_kernel_irq_routing_entry *new);
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
@@ -2328,18 +2439,6 @@ static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_HAVE_KVM_NO_POLL */
-#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
-long kvm_arch_vcpu_async_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg);
-#else
-static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
- unsigned int ioctl,
- unsigned long arg)
-{
- return -ENOIOCTLCMD;
-}
-#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
-
void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
@@ -2351,19 +2450,24 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
-typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
-
-int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
- uintptr_t data, const char *name,
- struct task_struct **thread_ptr);
-
-#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
+#ifdef CONFIG_VIRT_XFER_TO_GUEST_WORK
static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
{
vcpu->run->exit_reason = KVM_EXIT_INTR;
vcpu->stat.signal_exits++;
}
-#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
+
+static inline int kvm_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu)
+{
+ int r = xfer_to_guest_mode_handle_work();
+
+ if (r) {
+ WARN_ON_ONCE(r != -EINTR);
+ kvm_handle_signal_exit(vcpu);
+ }
+ return r;
+}
+#endif /* CONFIG_VIRT_XFER_TO_GUEST_WORK */
/*
* If more than one page is being (un)accounted, @virt must be the address of
@@ -2403,6 +2507,14 @@ static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE;
}
+static inline bool kvm_memslot_is_gmem_only(const struct kvm_memory_slot *slot)
+{
+ if (!IS_ENABLED(CONFIG_KVM_GUEST_MEMFD))
+ return false;
+
+ return slot->flags & KVM_MEMSLOT_GMEM_ONLY;
+}
+
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
{
@@ -2410,7 +2522,7 @@ static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn
}
bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
- unsigned long attrs);
+ unsigned long mask, unsigned long attrs);
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
struct kvm_gfn_range *range);
bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
@@ -2418,8 +2530,7 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
{
- return IS_ENABLED(CONFIG_KVM_PRIVATE_MEM) &&
- kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE;
+ return kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE;
}
#else
static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
@@ -2428,17 +2539,69 @@ static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
}
#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
-#ifdef CONFIG_KVM_PRIVATE_MEM
+#ifdef CONFIG_KVM_GUEST_MEMFD
int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
- gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
+ gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
+ int *max_order);
#else
static inline int kvm_gmem_get_pfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,
- kvm_pfn_t *pfn, int *max_order)
+ kvm_pfn_t *pfn, struct page **page,
+ int *max_order)
{
KVM_BUG_ON(1, kvm);
return -EIO;
}
-#endif /* CONFIG_KVM_PRIVATE_MEM */
+#endif /* CONFIG_KVM_GUEST_MEMFD */
+
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
+int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order);
+#endif
+
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE
+/**
+ * kvm_gmem_populate() - Populate/prepare a GPA range with guest data
+ *
+ * @kvm: KVM instance
+ * @gfn: starting GFN to be populated
+ * @src: userspace-provided buffer containing data to copy into GFN range
+ * (passed to @post_populate, and incremented on each iteration
+ * if not NULL)
+ * @npages: number of pages to copy from userspace-buffer
+ * @post_populate: callback to issue for each gmem page that backs the GPA
+ * range
+ * @opaque: opaque data to pass to @post_populate callback
+ *
+ * This is primarily intended for cases where a gmem-backed GPA range needs
+ * to be initialized with userspace-provided data prior to being mapped into
+ * the guest as a private page. This should be called with the slots->lock
+ * held so that caller-enforced invariants regarding the expected memory
+ * attributes of the GPA range do not race with KVM_SET_MEMORY_ATTRIBUTES.
+ *
+ * Returns the number of pages that were populated.
+ */
+typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
+ void __user *src, int order, void *opaque);
+
+long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
+ kvm_gmem_populate_cb post_populate, void *opaque);
+#endif
+
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
+void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
+#endif
+
+#ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
+long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
+ struct kvm_pre_fault_memory *range);
+#endif
+
+#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
+int kvm_enable_virtualization(void);
+void kvm_disable_virtualization(void);
+#else
+static inline int kvm_enable_virtualization(void) { return 0; }
+static inline void kvm_disable_virtualization(void) { }
+#endif
#endif
diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h
index 8ad43692e3bb..ef8c134ded8a 100644
--- a/include/linux/kvm_irqfd.h
+++ b/include/linux/kvm_irqfd.h
@@ -55,10 +55,13 @@ struct kvm_kernel_irqfd {
/* Used for setup/shutdown */
struct eventfd_ctx *eventfd;
struct list_head list;
- poll_table pt;
struct work_struct shutdown;
struct irq_bypass_consumer consumer;
struct irq_bypass_producer *producer;
+
+ struct kvm_vcpu *irq_bypass_vcpu;
+ struct list_head vcpu_list;
+ void *irq_bypass_data;
};
#endif /* __LINUX_KVM_IRQFD_H */
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 827ecc0b7e10..a568d8e6f4e8 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -3,6 +3,37 @@
#ifndef __KVM_TYPES_H__
#define __KVM_TYPES_H__
+#include <linux/bits.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <asm/kvm_types.h>
+
+#ifdef KVM_SUB_MODULES
+#define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol) \
+ EXPORT_SYMBOL_FOR_MODULES(symbol, __stringify(KVM_SUB_MODULES))
+#define EXPORT_SYMBOL_FOR_KVM(symbol) \
+ EXPORT_SYMBOL_FOR_MODULES(symbol, "kvm," __stringify(KVM_SUB_MODULES))
+#else
+#define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol)
+/*
+ * Allow architectures to provide a custom EXPORT_SYMBOL_FOR_KVM, but only
+ * if there are no submodules, e.g. to allow suppressing exports if KVM=m, but
+ * kvm.ko won't actually be built (due to lack of at least one submodule).
+ */
+#ifndef EXPORT_SYMBOL_FOR_KVM
+#if IS_MODULE(CONFIG_KVM)
+#define EXPORT_SYMBOL_FOR_KVM(symbol) EXPORT_SYMBOL_FOR_MODULES(symbol, "kvm")
+#else
+#define EXPORT_SYMBOL_FOR_KVM(symbol)
+#endif /* IS_MODULE(CONFIG_KVM) */
+#endif /* EXPORT_SYMBOL_FOR_KVM */
+#endif
+
+#ifndef __ASSEMBLER__
+
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
+
struct kvm;
struct kvm_async_pf;
struct kvm_device_ops;
@@ -19,13 +50,6 @@ struct kvm_memslots;
enum kvm_mr_change;
-#include <linux/bits.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <linux/spinlock_types.h>
-
-#include <asm/kvm_types.h>
-
/*
* Address types:
*
@@ -116,5 +140,6 @@ struct kvm_vcpu_stat_generic {
};
#define KVM_STATS_NAME_SIZE 48
+#endif /* !__ASSEMBLER__ */
#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/lcd.h b/include/linux/lcd.h
index 68703a51dc53..d4fa03722b72 100644
--- a/include/linux/lcd.h
+++ b/include/linux/lcd.h
@@ -11,8 +11,11 @@
#include <linux/device.h>
#include <linux/mutex.h>
-#include <linux/notifier.h>
-#include <linux/fb.h>
+
+#define LCD_POWER_ON (0)
+#define LCD_POWER_REDUCED (1) // deprecated; don't use in new code
+#define LCD_POWER_REDUCED_VSYNC_SUSPEND (2) // deprecated; don't use in new code
+#define LCD_POWER_OFF (4)
/* Notes on locking:
*
@@ -30,7 +33,6 @@
*/
struct lcd_device;
-struct fb_info;
struct lcd_properties {
/* The maximum value for contrast (read-only) */
@@ -47,11 +49,23 @@ struct lcd_ops {
int (*get_contrast)(struct lcd_device *);
/* Set LCD panel contrast */
int (*set_contrast)(struct lcd_device *, int contrast);
- /* Set LCD panel mode (resolutions ...) */
- int (*set_mode)(struct lcd_device *, struct fb_videomode *);
- /* Check if given framebuffer device is the one LCD is bound to;
- return 0 if not, !=0 if it is. If NULL, lcd always matches the fb. */
- int (*check_fb)(struct lcd_device *, struct fb_info *);
+
+ /*
+ * Set LCD panel mode (resolutions ...)
+ */
+ int (*set_mode)(struct lcd_device *lcd, u32 xres, u32 yres);
+
+ /*
+ * Check if the LCD controls the given display device. This
+ * operation is optional and if not implemented it is assumed that
+ * the display is always the one controlled by the LCD.
+ *
+ * RETURNS:
+ *
+ * If display_dev is NULL or display_dev matches the device controlled by
+ * the LCD, return true. Otherwise return false.
+ */
+ bool (*controls_device)(struct lcd_device *lcd, struct device *display_device);
};
struct lcd_device {
@@ -64,8 +78,11 @@ struct lcd_device {
const struct lcd_ops *ops;
/* Serialise access to set_power method */
struct mutex update_lock;
- /* The framebuffer notifier block */
- struct notifier_block fb_notif;
+
+ /**
+ * @entry: List entry of all registered lcd devices
+ */
+ struct list_head entry;
struct device dev;
};
@@ -110,6 +127,19 @@ extern void lcd_device_unregister(struct lcd_device *ld);
extern void devm_lcd_device_unregister(struct device *dev,
struct lcd_device *ld);
+#if IS_REACHABLE(CONFIG_LCD_CLASS_DEVICE)
+void lcd_notify_blank_all(struct device *display_dev, int power);
+void lcd_notify_mode_change_all(struct device *display_dev,
+ unsigned int width, unsigned int height);
+#else
+static inline void lcd_notify_blank_all(struct device *display_dev, int power)
+{}
+
+static inline void lcd_notify_mode_change_all(struct device *display_dev,
+ unsigned int width, unsigned int height)
+{}
+#endif
+
#define to_lcd_device(obj) container_of(obj, struct lcd_device, dev)
static inline void * lcd_get_data(struct lcd_device *ld_dev)
diff --git a/include/linux/leafops.h b/include/linux/leafops.h
new file mode 100644
index 000000000000..cfafe7a5e7b1
--- /dev/null
+++ b/include/linux/leafops.h
@@ -0,0 +1,619 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Describes operations that can be performed on software-defined page table
+ * leaf entries. These are abstracted from the hardware page table entries
+ * themselves by the softleaf_t type, see mm_types.h.
+ */
+#ifndef _LINUX_LEAFOPS_H
+#define _LINUX_LEAFOPS_H
+
+#include <linux/mm_types.h>
+#include <linux/swapops.h>
+#include <linux/swap.h>
+
+#ifdef CONFIG_MMU
+
+/* Temporary until swp_entry_t eliminated. */
+#define LEAF_TYPE_SHIFT SWP_TYPE_SHIFT
+
+enum softleaf_type {
+ /* Fundamental types. */
+ SOFTLEAF_NONE,
+ SOFTLEAF_SWAP,
+ /* Migration types. */
+ SOFTLEAF_MIGRATION_READ,
+ SOFTLEAF_MIGRATION_READ_EXCLUSIVE,
+ SOFTLEAF_MIGRATION_WRITE,
+ /* Device types. */
+ SOFTLEAF_DEVICE_PRIVATE_READ,
+ SOFTLEAF_DEVICE_PRIVATE_WRITE,
+ SOFTLEAF_DEVICE_EXCLUSIVE,
+ /* H/W posion types. */
+ SOFTLEAF_HWPOISON,
+ /* Marker types. */
+ SOFTLEAF_MARKER,
+};
+
+/**
+ * softleaf_mk_none() - Create an empty ('none') leaf entry.
+ * Returns: empty leaf entry.
+ */
+static inline softleaf_t softleaf_mk_none(void)
+{
+ return ((softleaf_t) { 0 });
+}
+
+/**
+ * softleaf_from_pte() - Obtain a leaf entry from a PTE entry.
+ * @pte: PTE entry.
+ *
+ * If @pte is present (therefore not a leaf entry) the function returns an empty
+ * leaf entry. Otherwise, it returns a leaf entry.
+ *
+ * Returns: Leaf entry.
+ */
+static inline softleaf_t softleaf_from_pte(pte_t pte)
+{
+ softleaf_t arch_entry;
+
+ if (pte_present(pte) || pte_none(pte))
+ return softleaf_mk_none();
+
+ pte = pte_swp_clear_flags(pte);
+ arch_entry = __pte_to_swp_entry(pte);
+
+ /* Temporary until swp_entry_t eliminated. */
+ return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+}
+
+/**
+ * softleaf_to_pte() - Obtain a PTE entry from a leaf entry.
+ * @entry: Leaf entry.
+ *
+ * This generates an architecture-specific PTE entry that can be utilised to
+ * encode the metadata the leaf entry encodes.
+ *
+ * Returns: Architecture-specific PTE entry encoding leaf entry.
+ */
+static inline pte_t softleaf_to_pte(softleaf_t entry)
+{
+ /* Temporary until swp_entry_t eliminated. */
+ return swp_entry_to_pte(entry);
+}
+
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+/**
+ * softleaf_from_pmd() - Obtain a leaf entry from a PMD entry.
+ * @pmd: PMD entry.
+ *
+ * If @pmd is present (therefore not a leaf entry) the function returns an empty
+ * leaf entry. Otherwise, it returns a leaf entry.
+ *
+ * Returns: Leaf entry.
+ */
+static inline softleaf_t softleaf_from_pmd(pmd_t pmd)
+{
+ softleaf_t arch_entry;
+
+ if (pmd_present(pmd) || pmd_none(pmd))
+ return softleaf_mk_none();
+
+ if (pmd_swp_soft_dirty(pmd))
+ pmd = pmd_swp_clear_soft_dirty(pmd);
+ if (pmd_swp_uffd_wp(pmd))
+ pmd = pmd_swp_clear_uffd_wp(pmd);
+ arch_entry = __pmd_to_swp_entry(pmd);
+
+ /* Temporary until swp_entry_t eliminated. */
+ return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+}
+
+#else
+
+static inline softleaf_t softleaf_from_pmd(pmd_t pmd)
+{
+ return softleaf_mk_none();
+}
+
+#endif
+
+/**
+ * softleaf_is_none() - Is the leaf entry empty?
+ * @entry: Leaf entry.
+ *
+ * Empty entries are typically the result of a 'none' page table leaf entry
+ * being converted to a leaf entry.
+ *
+ * Returns: true if the entry is empty, false otherwise.
+ */
+static inline bool softleaf_is_none(softleaf_t entry)
+{
+ return entry.val == 0;
+}
+
+/**
+ * softleaf_type() - Identify the type of leaf entry.
+ * @enntry: Leaf entry.
+ *
+ * Returns: the leaf entry type associated with @entry.
+ */
+static inline enum softleaf_type softleaf_type(softleaf_t entry)
+{
+ unsigned int type_num;
+
+ if (softleaf_is_none(entry))
+ return SOFTLEAF_NONE;
+
+ type_num = entry.val >> LEAF_TYPE_SHIFT;
+
+ if (type_num < MAX_SWAPFILES)
+ return SOFTLEAF_SWAP;
+
+ switch (type_num) {
+#ifdef CONFIG_MIGRATION
+ case SWP_MIGRATION_READ:
+ return SOFTLEAF_MIGRATION_READ;
+ case SWP_MIGRATION_READ_EXCLUSIVE:
+ return SOFTLEAF_MIGRATION_READ_EXCLUSIVE;
+ case SWP_MIGRATION_WRITE:
+ return SOFTLEAF_MIGRATION_WRITE;
+#endif
+#ifdef CONFIG_DEVICE_PRIVATE
+ case SWP_DEVICE_WRITE:
+ return SOFTLEAF_DEVICE_PRIVATE_WRITE;
+ case SWP_DEVICE_READ:
+ return SOFTLEAF_DEVICE_PRIVATE_READ;
+ case SWP_DEVICE_EXCLUSIVE:
+ return SOFTLEAF_DEVICE_EXCLUSIVE;
+#endif
+#ifdef CONFIG_MEMORY_FAILURE
+ case SWP_HWPOISON:
+ return SOFTLEAF_HWPOISON;
+#endif
+ case SWP_PTE_MARKER:
+ return SOFTLEAF_MARKER;
+ }
+
+ /* Unknown entry type. */
+ VM_WARN_ON_ONCE(1);
+ return SOFTLEAF_NONE;
+}
+
+/**
+ * softleaf_is_swap() - Is this leaf entry a swap entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a swap entry, otherwise false.
+ */
+static inline bool softleaf_is_swap(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_SWAP;
+}
+
+/**
+ * softleaf_is_migration_write() - Is this leaf entry a writable migration entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a writable migration entry, otherwise
+ * false.
+ */
+static inline bool softleaf_is_migration_write(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_MIGRATION_WRITE;
+}
+
+/**
+ * softleaf_is_migration_read() - Is this leaf entry a readable migration entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a readable migration entry, otherwise
+ * false.
+ */
+static inline bool softleaf_is_migration_read(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_MIGRATION_READ;
+}
+
+/**
+ * softleaf_is_migration_read_exclusive() - Is this leaf entry an exclusive
+ * readable migration entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is an exclusive readable migration entry,
+ * otherwise false.
+ */
+static inline bool softleaf_is_migration_read_exclusive(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_MIGRATION_READ_EXCLUSIVE;
+}
+
+/**
+ * softleaf_is_migration() - Is this leaf entry a migration entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a migration entry, otherwise false.
+ */
+static inline bool softleaf_is_migration(softleaf_t entry)
+{
+ switch (softleaf_type(entry)) {
+ case SOFTLEAF_MIGRATION_READ:
+ case SOFTLEAF_MIGRATION_READ_EXCLUSIVE:
+ case SOFTLEAF_MIGRATION_WRITE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * softleaf_is_device_private_write() - Is this leaf entry a device private
+ * writable entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a device private writable entry, otherwise
+ * false.
+ */
+static inline bool softleaf_is_device_private_write(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_DEVICE_PRIVATE_WRITE;
+}
+
+/**
+ * softleaf_is_device_private() - Is this leaf entry a device private entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a device private entry, otherwise false.
+ */
+static inline bool softleaf_is_device_private(softleaf_t entry)
+{
+ switch (softleaf_type(entry)) {
+ case SOFTLEAF_DEVICE_PRIVATE_WRITE:
+ case SOFTLEAF_DEVICE_PRIVATE_READ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * softleaf_is_device_exclusive() - Is this leaf entry a device-exclusive entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a device-exclusive entry, otherwise false.
+ */
+static inline bool softleaf_is_device_exclusive(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_DEVICE_EXCLUSIVE;
+}
+
+/**
+ * softleaf_is_hwpoison() - Is this leaf entry a hardware poison entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a hardware poison entry, otherwise false.
+ */
+static inline bool softleaf_is_hwpoison(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_HWPOISON;
+}
+
+/**
+ * softleaf_is_marker() - Is this leaf entry a marker?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a marker entry, otherwise false.
+ */
+static inline bool softleaf_is_marker(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_MARKER;
+}
+
+/**
+ * softleaf_to_marker() - Obtain marker associated with leaf entry.
+ * @entry: Leaf entry, softleaf_is_marker(@entry) must return true.
+ *
+ * Returns: Marker associated with the leaf entry.
+ */
+static inline pte_marker softleaf_to_marker(softleaf_t entry)
+{
+ VM_WARN_ON_ONCE(!softleaf_is_marker(entry));
+
+ return swp_offset(entry) & PTE_MARKER_MASK;
+}
+
+/**
+ * softleaf_has_pfn() - Does this leaf entry encode a valid PFN number?
+ * @entry: Leaf entry.
+ *
+ * A pfn swap entry is a special type of swap entry that always has a pfn stored
+ * in the swap offset. They can either be used to represent unaddressable device
+ * memory, to restrict access to a page undergoing migration or to represent a
+ * pfn which has been hwpoisoned and unmapped.
+ *
+ * Returns: true if the leaf entry encodes a PFN, otherwise false.
+ */
+static inline bool softleaf_has_pfn(softleaf_t entry)
+{
+ /* Make sure the swp offset can always store the needed fields. */
+ BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
+
+ if (softleaf_is_migration(entry))
+ return true;
+ if (softleaf_is_device_private(entry))
+ return true;
+ if (softleaf_is_device_exclusive(entry))
+ return true;
+ if (softleaf_is_hwpoison(entry))
+ return true;
+
+ return false;
+}
+
+/**
+ * softleaf_to_pfn() - Obtain PFN encoded within leaf entry.
+ * @entry: Leaf entry, softleaf_has_pfn(@entry) must return true.
+ *
+ * Returns: The PFN associated with the leaf entry.
+ */
+static inline unsigned long softleaf_to_pfn(softleaf_t entry)
+{
+ VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
+
+ /* Temporary until swp_entry_t eliminated. */
+ return swp_offset(entry) & SWP_PFN_MASK;
+}
+
+/**
+ * softleaf_to_page() - Obtains struct page for PFN encoded within leaf entry.
+ * @entry: Leaf entry, softleaf_has_pfn(@entry) must return true.
+ *
+ * Returns: Pointer to the struct page associated with the leaf entry's PFN.
+ */
+static inline struct page *softleaf_to_page(softleaf_t entry)
+{
+ struct page *page = pfn_to_page(softleaf_to_pfn(entry));
+
+ VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding page is locked
+ */
+ VM_WARN_ON_ONCE(softleaf_is_migration(entry) && !PageLocked(page));
+
+ return page;
+}
+
+/**
+ * softleaf_to_folio() - Obtains struct folio for PFN encoded within leaf entry.
+ * @entry: Leaf entry, softleaf_has_pfn(@entry) must return true.
+ *
+ * Returns: Pointer to the struct folio associated with the leaf entry's PFN.
+ */
+static inline struct folio *softleaf_to_folio(softleaf_t entry)
+{
+ struct folio *folio = pfn_folio(softleaf_to_pfn(entry));
+
+ VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding folio is locked.
+ */
+ VM_WARN_ON_ONCE(softleaf_is_migration(entry) &&
+ !folio_test_locked(folio));
+
+ return folio;
+}
+
+/**
+ * softleaf_is_poison_marker() - Is this leaf entry a poison marker?
+ * @entry: Leaf entry.
+ *
+ * The poison marker is set via UFFDIO_POISON. Userfaultfd-specific.
+ *
+ * Returns: true if the leaf entry is a poison marker, otherwise false.
+ */
+static inline bool softleaf_is_poison_marker(softleaf_t entry)
+{
+ if (!softleaf_is_marker(entry))
+ return false;
+
+ return softleaf_to_marker(entry) & PTE_MARKER_POISONED;
+}
+
+/**
+ * softleaf_is_guard_marker() - Is this leaf entry a guard region marker?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a guard marker, otherwise false.
+ */
+static inline bool softleaf_is_guard_marker(softleaf_t entry)
+{
+ if (!softleaf_is_marker(entry))
+ return false;
+
+ return softleaf_to_marker(entry) & PTE_MARKER_GUARD;
+}
+
+/**
+ * softleaf_is_uffd_wp_marker() - Is this leaf entry a userfautlfd write protect
+ * marker?
+ * @entry: Leaf entry.
+ *
+ * Userfaultfd-specific.
+ *
+ * Returns: true if the leaf entry is a UFFD WP marker, otherwise false.
+ */
+static inline bool softleaf_is_uffd_wp_marker(softleaf_t entry)
+{
+ if (!softleaf_is_marker(entry))
+ return false;
+
+ return softleaf_to_marker(entry) & PTE_MARKER_UFFD_WP;
+}
+
+#ifdef CONFIG_MIGRATION
+
+/**
+ * softleaf_is_migration_young() - Does this migration entry contain an accessed
+ * bit?
+ * @entry: Leaf entry.
+ *
+ * If the architecture can support storing A/D bits in migration entries, this
+ * determines whether the accessed (or 'young') bit was set on the migrated page
+ * table entry.
+ *
+ * Returns: true if the entry contains an accessed bit, otherwise false.
+ */
+static inline bool softleaf_is_migration_young(softleaf_t entry)
+{
+ VM_WARN_ON_ONCE(!softleaf_is_migration(entry));
+
+ if (migration_entry_supports_ad())
+ return swp_offset(entry) & SWP_MIG_YOUNG;
+ /* Keep the old behavior of aging page after migration */
+ return false;
+}
+
+/**
+ * softleaf_is_migration_dirty() - Does this migration entry contain a dirty bit?
+ * @entry: Leaf entry.
+ *
+ * If the architecture can support storing A/D bits in migration entries, this
+ * determines whether the dirty bit was set on the migrated page table entry.
+ *
+ * Returns: true if the entry contains a dirty bit, otherwise false.
+ */
+static inline bool softleaf_is_migration_dirty(softleaf_t entry)
+{
+ VM_WARN_ON_ONCE(!softleaf_is_migration(entry));
+
+ if (migration_entry_supports_ad())
+ return swp_offset(entry) & SWP_MIG_DIRTY;
+ /* Keep the old behavior of clean page after migration */
+ return false;
+}
+
+#else /* CONFIG_MIGRATION */
+
+static inline bool softleaf_is_migration_young(softleaf_t entry)
+{
+ return false;
+}
+
+static inline bool softleaf_is_migration_dirty(softleaf_t entry)
+{
+ return false;
+}
+#endif /* CONFIG_MIGRATION */
+
+/**
+ * pte_is_marker() - Does the PTE entry encode a marker leaf entry?
+ * @pte: PTE entry.
+ *
+ * Returns: true if this PTE is a marker leaf entry, otherwise false.
+ */
+static inline bool pte_is_marker(pte_t pte)
+{
+ return softleaf_is_marker(softleaf_from_pte(pte));
+}
+
+/**
+ * pte_is_uffd_wp_marker() - Does this PTE entry encode a userfaultfd write
+ * protect marker leaf entry?
+ * @pte: PTE entry.
+ *
+ * Returns: true if this PTE is a UFFD WP marker leaf entry, otherwise false.
+ */
+static inline bool pte_is_uffd_wp_marker(pte_t pte)
+{
+ const softleaf_t entry = softleaf_from_pte(pte);
+
+ return softleaf_is_uffd_wp_marker(entry);
+}
+
+/**
+ * pte_is_uffd_marker() - Does this PTE entry encode a userfault-specific marker
+ * leaf entry?
+ * @entry: Leaf entry.
+ *
+ * It's useful to be able to determine which leaf entries encode UFFD-specific
+ * markers so we can handle these correctly.
+ *
+ * Returns: true if this PTE entry is a UFFD-specific marker, otherwise false.
+ */
+static inline bool pte_is_uffd_marker(pte_t pte)
+{
+ const softleaf_t entry = softleaf_from_pte(pte);
+
+ if (!softleaf_is_marker(entry))
+ return false;
+
+ /* UFFD WP, poisoned swap entries are UFFD-handled. */
+ if (softleaf_is_uffd_wp_marker(entry))
+ return true;
+ if (softleaf_is_poison_marker(entry))
+ return true;
+
+ return false;
+}
+
+#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_ENABLE_THP_MIGRATION)
+
+/**
+ * pmd_is_device_private_entry() - Check if PMD contains a device private swap
+ * entry.
+ * @pmd: The PMD to check.
+ *
+ * Returns true if the PMD contains a swap entry that represents a device private
+ * page mapping. This is used for zone device private pages that have been
+ * swapped out but still need special handling during various memory management
+ * operations.
+ *
+ * Return: true if PMD contains device private entry, false otherwise
+ */
+static inline bool pmd_is_device_private_entry(pmd_t pmd)
+{
+ return softleaf_is_device_private(softleaf_from_pmd(pmd));
+}
+
+#else /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
+static inline bool pmd_is_device_private_entry(pmd_t pmd)
+{
+ return false;
+}
+
+#endif /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
+/**
+ * pmd_is_migration_entry() - Does this PMD entry encode a migration entry?
+ * @pmd: PMD entry.
+ *
+ * Returns: true if the PMD encodes a migration entry, otherwise false.
+ */
+static inline bool pmd_is_migration_entry(pmd_t pmd)
+{
+ return softleaf_is_migration(softleaf_from_pmd(pmd));
+}
+
+/**
+ * pmd_is_valid_softleaf() - Is this PMD entry a valid leaf entry?
+ * @pmd: PMD entry.
+ *
+ * PMD leaf entries are valid only if they are device private or migration
+ * entries. This function asserts that a PMD leaf entry is valid in this
+ * respect.
+ *
+ * Returns: true if the PMD entry is a valid leaf entry, otherwise false.
+ */
+static inline bool pmd_is_valid_softleaf(pmd_t pmd)
+{
+ const softleaf_t entry = softleaf_from_pmd(pmd);
+
+ /* Only device private, migration entries valid for PMD. */
+ return softleaf_is_device_private(entry) ||
+ softleaf_is_migration(entry);
+}
+
+#endif /* CONFIG_MMU */
+#endif /* _LINUX_LEAFOPS_H */
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
index 36df927ec4b7..775a96217518 100644
--- a/include/linux/led-class-flash.h
+++ b/include/linux/led-class-flash.h
@@ -45,6 +45,8 @@ struct led_flash_ops {
int (*timeout_set)(struct led_classdev_flash *fled_cdev, u32 timeout);
/* get the flash LED fault */
int (*fault_get)(struct led_classdev_flash *fled_cdev, u32 *fault);
+ /* set flash duration */
+ int (*duration_set)(struct led_classdev_flash *fled_cdev, u32 duration);
};
/*
@@ -75,6 +77,9 @@ struct led_classdev_flash {
/* flash timeout value in microseconds along with its constraints */
struct led_flash_setting timeout;
+ /* flash timeout value in microseconds along with its constraints */
+ struct led_flash_setting duration;
+
/* LED Flash class sysfs groups */
const struct attribute_group *sysfs_groups[LED_FLASH_SYSFS_GROUPS_SIZE];
};
@@ -192,7 +197,7 @@ int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
* @fled_cdev: the flash LED to set
* @timeout: the flash timeout to set it to
*
- * Set the flash strobe duration.
+ * Set the flash strobe timeout.
*
* Returns: 0 on success or negative error value on failure
*/
@@ -209,4 +214,15 @@ int led_set_flash_timeout(struct led_classdev_flash *fled_cdev, u32 timeout);
*/
int led_get_flash_fault(struct led_classdev_flash *fled_cdev, u32 *fault);
+/**
+ * led_set_flash_duration - set flash LED duration
+ * @fled_cdev: the flash LED to set
+ * @timeout: the flash duration to set it to
+ *
+ * Set the flash strobe duration.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+int led_set_flash_duration(struct led_classdev_flash *fled_cdev, u32 duration);
+
#endif /* __LINUX_FLASH_LEDS_H_INCLUDED */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 6300313c46b7..b16b803cc1ac 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -107,6 +107,8 @@ struct led_classdev {
#define LED_BRIGHT_HW_CHANGED BIT(21)
#define LED_RETAIN_AT_SHUTDOWN BIT(22)
#define LED_INIT_DEFAULT_TRIGGER BIT(23)
+#define LED_REJECT_NAME_CONFLICT BIT(24)
+#define LED_MULTI_COLOR BIT(25)
/* set_brightness_work / blink_timer flags, atomic, private. */
unsigned long work_flags;
@@ -169,6 +171,7 @@ struct led_classdev {
int new_blink_brightness;
void (*flash_resume)(struct led_classdev *led_cdev);
+ struct workqueue_struct *wq; /* LED workqueue */
struct work_struct set_brightness_work;
int delayed_set_value;
unsigned long delayed_delay_on;
@@ -236,7 +239,7 @@ struct led_classdev {
struct kernfs_node *brightness_hw_changed_kn;
#endif
- /* Ensures consistent access to the LED Flash Class device */
+ /* Ensures consistent access to the LED class device */
struct mutex led_access;
};
@@ -291,7 +294,6 @@ void led_remove_lookup(struct led_lookup_data *led_lookup);
struct led_classdev *__must_check led_get(struct device *dev, char *con_id);
struct led_classdev *__must_check devm_led_get(struct device *dev, char *con_id);
-extern struct led_classdev *of_led_get(struct device_node *np, int index);
extern void led_put(struct led_classdev *led_cdev);
struct led_classdev *__must_check devm_of_led_get(struct device *dev,
int index);
@@ -374,6 +376,25 @@ void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness);
int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value);
/**
+ * led_mc_set_brightness - set mc LED color intensity values and brightness
+ * @led_cdev: the LED to set
+ * @intensity_value: array of per color intensity values to set
+ * @num_colors: amount of entries in intensity_value array
+ * @brightness: the brightness to set the LED to
+ *
+ * Set a multi-color LED's per color intensity values and brightness.
+ * If necessary, this cancels the software blink timer. This function is
+ * guaranteed not to sleep.
+ *
+ * Calling this function on a non multi-color led_classdev or with the wrong
+ * num_colors value is an error. In this case an error will be logged once
+ * and the call will do nothing.
+ */
+void led_mc_set_brightness(struct led_classdev *led_cdev,
+ unsigned int *intensity_value, unsigned int num_colors,
+ unsigned int brightness);
+
+/**
* led_update_brightness - update LED brightness
* @led_cdev: the LED to query
*
@@ -428,6 +449,16 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
char *led_classdev_name);
/**
+ * led_get_color_name - get string representation of color ID
+ * @color_id: The LED_COLOR_ID_* constant
+ *
+ * Get the string name of a LED_COLOR_ID_* constant.
+ *
+ * Returns: A string constant or NULL on an invalid ID.
+ */
+const char *led_get_color_name(u8 color_id);
+
+/**
* led_sysfs_is_disabled - check if LED sysfs interface is disabled
* @led_cdev: the LED to query
*
@@ -490,6 +521,9 @@ void led_trigger_register_simple(const char *name,
struct led_trigger **trigger);
void led_trigger_unregister_simple(struct led_trigger *trigger);
void led_trigger_event(struct led_trigger *trigger, enum led_brightness event);
+void led_mc_trigger_event(struct led_trigger *trig,
+ unsigned int *intensity_value, unsigned int num_colors,
+ enum led_brightness brightness);
void led_trigger_blink(struct led_trigger *trigger, unsigned long delay_on,
unsigned long delay_off);
void led_trigger_blink_oneshot(struct led_trigger *trigger,
@@ -532,6 +566,9 @@ static inline void led_trigger_register_simple(const char *name,
static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {}
static inline void led_trigger_event(struct led_trigger *trigger,
enum led_brightness event) {}
+static inline void led_mc_trigger_event(struct led_trigger *trig,
+ unsigned int *intensity_value, unsigned int num_colors,
+ enum led_brightness brightness) {}
static inline void led_trigger_blink(struct led_trigger *trigger,
unsigned long delay_on,
unsigned long delay_off) {}
@@ -574,6 +611,8 @@ enum led_trigger_netdev_modes {
TRIGGER_NETDEV_FULL_DUPLEX,
TRIGGER_NETDEV_TX,
TRIGGER_NETDEV_RX,
+ TRIGGER_NETDEV_TX_ERR,
+ TRIGGER_NETDEV_RX_ERR,
/* Keep last */
__TRIGGER_NETDEV_MAX,
@@ -600,6 +639,12 @@ static inline void ledtrig_flash_ctrl(bool on) {}
static inline void ledtrig_torch_ctrl(bool on) {}
#endif
+#if IS_REACHABLE(CONFIG_LEDS_TRIGGER_BACKLIGHT)
+void ledtrig_backlight_blank(bool blank);
+#else
+static inline void ledtrig_backlight_blank(bool blank) {}
+#endif
+
/*
* Generic LED platform data for describing LED names and default triggers.
*/
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 13fb41d25da6..39534fafa36a 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -41,20 +41,90 @@
*/
#undef ATA_IRQ_TRAP /* define to ack screaming irqs */
-
-#define ata_print_version_once(dev, version) \
-({ \
- static bool __print_once; \
- \
- if (!__print_once) { \
- __print_once = true; \
- ata_print_version(dev, version); \
- } \
-})
-
/* defines only for the constants which don't work well as enums */
#define ATA_TAG_POISON 0xfafbfcfdU
+/*
+ * Quirk flags bits.
+ * ata_device->quirks is an unsigned int, so __ATA_QUIRK_MAX must not exceed 32.
+ */
+enum ata_quirks {
+ __ATA_QUIRK_DIAGNOSTIC, /* Failed boot diag */
+ __ATA_QUIRK_NODMA, /* DMA problems */
+ __ATA_QUIRK_NONCQ, /* Don't use NCQ */
+ __ATA_QUIRK_MAX_SEC_128, /* Limit max sects to 128 */
+ __ATA_QUIRK_BROKEN_HPA, /* Broken HPA */
+ __ATA_QUIRK_DISABLE, /* Disable it */
+ __ATA_QUIRK_HPA_SIZE, /* Native size off by one */
+ __ATA_QUIRK_IVB, /* cbl det validity bit bugs */
+ __ATA_QUIRK_STUCK_ERR, /* Stuck ERR on next PACKET */
+ __ATA_QUIRK_BRIDGE_OK, /* No bridge limits */
+ __ATA_QUIRK_ATAPI_MOD16_DMA, /* Use ATAPI DMA for commands that */
+ /* are not a multiple of 16 bytes */
+ __ATA_QUIRK_FIRMWARE_WARN, /* Firmware update warning */
+ __ATA_QUIRK_1_5_GBPS, /* Force 1.5 Gbps */
+ __ATA_QUIRK_NOSETXFER, /* Skip SETXFER, SATA only */
+ __ATA_QUIRK_BROKEN_FPDMA_AA, /* Skip AA */
+ __ATA_QUIRK_DUMP_ID, /* Dump IDENTIFY data */
+ __ATA_QUIRK_MAX_SEC_LBA48, /* Set max sects to 65535 */
+ __ATA_QUIRK_ATAPI_DMADIR, /* Device requires dmadir */
+ __ATA_QUIRK_NO_NCQ_TRIM, /* Do not use queued TRIM */
+ __ATA_QUIRK_NOLPM, /* Do not use LPM */
+ __ATA_QUIRK_WD_BROKEN_LPM, /* Some WDs have broken LPM */
+ __ATA_QUIRK_ZERO_AFTER_TRIM, /* Guarantees zero after trim */
+ __ATA_QUIRK_NO_DMA_LOG, /* Do not use DMA for log read */
+ __ATA_QUIRK_NOTRIM, /* Do not use TRIM */
+ __ATA_QUIRK_MAX_SEC_1024, /* Limit max sects to 1024 */
+ __ATA_QUIRK_MAX_SEC_8191, /* Limit max sects to 8191 */
+ __ATA_QUIRK_MAX_TRIM_128M, /* Limit max trim size to 128M */
+ __ATA_QUIRK_NO_NCQ_ON_ATI, /* Disable NCQ on ATI chipset */
+ __ATA_QUIRK_NO_LPM_ON_ATI, /* Disable LPM on ATI chipset */
+ __ATA_QUIRK_NO_ID_DEV_LOG, /* Identify device log missing */
+ __ATA_QUIRK_NO_LOG_DIR, /* Do not read log directory */
+ __ATA_QUIRK_NO_FUA, /* Do not use FUA */
+
+ __ATA_QUIRK_MAX,
+};
+
+/*
+ * Quirk flags: may be set by libata or controller drivers on drives.
+ * Some quirks may be drive/controller pair dependent.
+ */
+enum {
+ ATA_QUIRK_DIAGNOSTIC = (1U << __ATA_QUIRK_DIAGNOSTIC),
+ ATA_QUIRK_NODMA = (1U << __ATA_QUIRK_NODMA),
+ ATA_QUIRK_NONCQ = (1U << __ATA_QUIRK_NONCQ),
+ ATA_QUIRK_MAX_SEC_128 = (1U << __ATA_QUIRK_MAX_SEC_128),
+ ATA_QUIRK_BROKEN_HPA = (1U << __ATA_QUIRK_BROKEN_HPA),
+ ATA_QUIRK_DISABLE = (1U << __ATA_QUIRK_DISABLE),
+ ATA_QUIRK_HPA_SIZE = (1U << __ATA_QUIRK_HPA_SIZE),
+ ATA_QUIRK_IVB = (1U << __ATA_QUIRK_IVB),
+ ATA_QUIRK_STUCK_ERR = (1U << __ATA_QUIRK_STUCK_ERR),
+ ATA_QUIRK_BRIDGE_OK = (1U << __ATA_QUIRK_BRIDGE_OK),
+ ATA_QUIRK_ATAPI_MOD16_DMA = (1U << __ATA_QUIRK_ATAPI_MOD16_DMA),
+ ATA_QUIRK_FIRMWARE_WARN = (1U << __ATA_QUIRK_FIRMWARE_WARN),
+ ATA_QUIRK_1_5_GBPS = (1U << __ATA_QUIRK_1_5_GBPS),
+ ATA_QUIRK_NOSETXFER = (1U << __ATA_QUIRK_NOSETXFER),
+ ATA_QUIRK_BROKEN_FPDMA_AA = (1U << __ATA_QUIRK_BROKEN_FPDMA_AA),
+ ATA_QUIRK_DUMP_ID = (1U << __ATA_QUIRK_DUMP_ID),
+ ATA_QUIRK_MAX_SEC_LBA48 = (1U << __ATA_QUIRK_MAX_SEC_LBA48),
+ ATA_QUIRK_ATAPI_DMADIR = (1U << __ATA_QUIRK_ATAPI_DMADIR),
+ ATA_QUIRK_NO_NCQ_TRIM = (1U << __ATA_QUIRK_NO_NCQ_TRIM),
+ ATA_QUIRK_NOLPM = (1U << __ATA_QUIRK_NOLPM),
+ ATA_QUIRK_WD_BROKEN_LPM = (1U << __ATA_QUIRK_WD_BROKEN_LPM),
+ ATA_QUIRK_ZERO_AFTER_TRIM = (1U << __ATA_QUIRK_ZERO_AFTER_TRIM),
+ ATA_QUIRK_NO_DMA_LOG = (1U << __ATA_QUIRK_NO_DMA_LOG),
+ ATA_QUIRK_NOTRIM = (1U << __ATA_QUIRK_NOTRIM),
+ ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024),
+ ATA_QUIRK_MAX_SEC_8191 = (1U << __ATA_QUIRK_MAX_SEC_8191),
+ ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M),
+ ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI),
+ ATA_QUIRK_NO_LPM_ON_ATI = (1U << __ATA_QUIRK_NO_LPM_ON_ATI),
+ ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG),
+ ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR),
+ ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA),
+};
+
enum {
/* various global constants */
LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
@@ -114,7 +184,6 @@ enum {
ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */
ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */
ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */
- ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */
ATA_DFLAG_FEATURES_MASK = (ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \
ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \
@@ -230,9 +299,7 @@ enum {
/* bits 24:31 of host->flags are reserved for LLD specific flags */
- /* various lengths of time */
- ATA_TMOUT_BOOT = 30000, /* heuristic */
- ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */
+ /* Various lengths of time */
ATA_TMOUT_INTERNAL_QUICK = 5000,
ATA_TMOUT_MAX_PARK = 30000,
@@ -338,6 +405,7 @@ enum {
ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */
ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */
ATA_EHI_POST_SETMODE = (1 << 20), /* revalidating after setmode */
+ ATA_EHI_DID_PRINT_QUIRKS = (1 << 21), /* already printed quirks info */
ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET,
@@ -362,43 +430,7 @@ enum {
*/
ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8,
- /* Horkage types. May be set by libata or controller on drives
- (some horkage may be drive/controller pair dependent */
-
- ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */
- ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */
- ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */
- ATA_HORKAGE_MAX_SEC_128 = (1 << 3), /* Limit max sects to 128 */
- ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */
- ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */
- ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */
- ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */
- ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */
- ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
- ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
- not multiple of 16 bytes */
- ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
- ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
- ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
- ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
- ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
- ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
- ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
- ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
- ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
- ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
- ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
- ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
- ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
- ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
- ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
- ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */
- ATA_HORKAGE_NO_ID_DEV_LOG = (1 << 28), /* Identify device log missing */
- ATA_HORKAGE_NO_LOG_DIR = (1 << 29), /* Do not read log directory */
- ATA_HORKAGE_NO_FUA = (1 << 30), /* Do not use FUA */
-
- /* DMA mask for user DMA control: User visible values; DO NOT
- renumber */
+ /* User visible DMA mask for DMA control. DO NOT renumber. */
ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */
ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */
ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */
@@ -471,16 +503,28 @@ enum ata_completion_errors {
};
/*
- * Link power management policy: If you alter this, you also need to
- * alter libata-sata.c (for the ascii descriptions)
+ * Link Power Management (LPM) policies.
+ *
+ * The default LPM policy to use for a device link is defined using these values
+ * with the CONFIG_SATA_MOBILE_LPM_POLICY config option and applied through the
+ * target_lpm_policy field of struct ata_port.
+ *
+ * If you alter this, you also need to alter the policy names used with the
+ * sysfs attribute link_power_management_policy defined in libata-sata.c.
*/
enum ata_lpm_policy {
+ /* Keep firmware settings */
ATA_LPM_UNKNOWN,
+ /* No power savings (maximum performance) */
ATA_LPM_MAX_POWER,
+ /* HIPM (Partial) */
ATA_LPM_MED_POWER,
- ATA_LPM_MED_POWER_WITH_DIPM, /* Med power + DIPM as win IRST does */
- ATA_LPM_MIN_POWER_WITH_PARTIAL, /* Min Power + partial and slumber */
- ATA_LPM_MIN_POWER, /* Min power + no partial (slumber only) */
+ /* HIPM (Partial) and DIPM (Partial and Slumber) */
+ ATA_LPM_MED_POWER_WITH_DIPM,
+ /* HIPM (Partial and DevSleep) and DIPM (Partial and Slumber) */
+ ATA_LPM_MIN_POWER_WITH_PARTIAL,
+ /* HIPM (Slumber and DevSleep) and DIPM (Partial and Slumber) */
+ ATA_LPM_MIN_POWER,
};
enum ata_lpm_hints {
@@ -505,6 +549,7 @@ typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes)
extern struct device_attribute dev_attr_unload_heads;
#ifdef CONFIG_SATA_HOST
+extern struct device_attribute dev_attr_link_power_management_supported;
extern struct device_attribute dev_attr_link_power_management_policy;
extern struct device_attribute dev_attr_ncq_prio_supported;
extern struct device_attribute dev_attr_ncq_prio_enable;
@@ -660,10 +705,25 @@ struct ata_cpr_log {
struct ata_cpr cpr[] __counted_by(nr_cpr);
};
+struct ata_cdl {
+ /*
+ * Buffer to cache the CDL log page 18h (command duration descriptors)
+ * for SCSI-ATA translation.
+ */
+ u8 desc_log_buf[ATA_LOG_CDL_SIZE];
+
+ /*
+ * Buffer to handle reading the sense data for successful NCQ Commands
+ * log page for commands using a CDL with one of the limits policy set
+ * to 0xD (successful completion with sense data available bit set).
+ */
+ u8 ncq_sense_log_buf[ATA_LOG_SENSE_NCQ_SIZE];
+};
+
struct ata_device {
struct ata_link *link;
unsigned int devno; /* 0 or 1 */
- unsigned int horkage; /* List of broken features */
+ unsigned int quirks; /* List of broken features */
unsigned long flags; /* ATA_DFLAG_xxx */
struct scsi_device *sdev; /* attached SCSI device */
void *private_data;
@@ -706,6 +766,9 @@ struct ata_device {
u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
} ____cacheline_aligned;
+ /* General Purpose Log Directory log page */
+ u8 gp_log_dir[ATA_SECT_SIZE] ____cacheline_aligned;
+
/* DEVSLP Timing Variables from Identify Device Data Log */
u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
@@ -722,13 +785,16 @@ struct ata_device {
/* Concurrent positioning ranges */
struct ata_cpr_log *cpr_log;
- /* Command Duration Limits log support */
- u8 cdl[ATA_LOG_CDL_SIZE];
+ /* Command Duration Limits support */
+ struct ata_cdl *cdl;
/* error history */
int spdn_cnt;
/* ering is CLEAR_END, read comment above CLEAR_END */
struct ata_ering ering;
+
+ /* For EH */
+ u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
};
/* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are
@@ -814,7 +880,6 @@ struct ata_port {
/* Flags that change dynamically, protected by ap->lock */
unsigned int pflags; /* ATA_PFLAG_xxx */
unsigned int print_id; /* user visible unique port ID */
- unsigned int local_port_no; /* host local port num */
unsigned int port_no; /* 0 based port no. inside the host */
#ifdef CONFIG_ATA_SFF
@@ -875,9 +940,6 @@ struct ata_port {
#ifdef CONFIG_ATA_ACPI
struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
#endif
- /* owned by EH */
- u8 *ncq_sense_buf;
- u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
};
/* The following initializer overrides a method to NULL whether one of
@@ -887,6 +949,13 @@ struct ata_port {
*/
#define ATA_OP_NULL (void *)(unsigned long)(-ENOENT)
+struct ata_reset_operations {
+ ata_prereset_fn_t prereset;
+ ata_reset_fn_t softreset;
+ ata_reset_fn_t hardreset;
+ ata_postreset_fn_t postreset;
+};
+
struct ata_port_operations {
/*
* Command execution
@@ -913,14 +982,8 @@ struct ata_port_operations {
void (*freeze)(struct ata_port *ap);
void (*thaw)(struct ata_port *ap);
- ata_prereset_fn_t prereset;
- ata_reset_fn_t softreset;
- ata_reset_fn_t hardreset;
- ata_postreset_fn_t postreset;
- ata_prereset_fn_t pmp_prereset;
- ata_reset_fn_t pmp_softreset;
- ata_reset_fn_t pmp_hardreset;
- ata_postreset_fn_t pmp_postreset;
+ struct ata_reset_operations reset;
+ struct ata_reset_operations pmp_reset;
void (*error_handler)(struct ata_port *ap);
void (*lost_interrupt)(struct ata_port *ap);
void (*post_internal_cmd)(struct ata_queued_cmd *qc);
@@ -1065,11 +1128,9 @@ static inline bool ata_port_is_frozen(const struct ata_port *ap)
extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
-extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
-extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
+extern struct ata_host *ata_host_alloc(struct device *dev, int n_ports);
extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi, int n_ports);
extern void ata_host_get(struct ata_host *host);
@@ -1082,7 +1143,6 @@ extern int ata_host_activate(struct ata_host *host, int irq,
const struct scsi_host_template *sht);
extern void ata_host_detach(struct ata_host *host);
extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *);
-extern int ata_scsi_detect(struct scsi_host_template *sht);
extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd,
void __user *arg);
#ifdef CONFIG_COMPAT
@@ -1131,7 +1191,6 @@ extern int ata_xfer_mode2shift(u8 xfer_mode);
extern const char *ata_mode_string(unsigned int xfer_mask);
extern unsigned int ata_id_xfermask(const u16 *id);
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
-extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
@@ -1148,13 +1207,12 @@ extern void ata_qc_complete(struct ata_queued_cmd *qc);
extern u64 ata_qc_get_active(struct ata_port *ap);
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
extern int ata_std_bios_param(struct scsi_device *sdev,
- struct block_device *bdev,
+ struct gendisk *unused,
sector_t capacity, int geom[]);
extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
-extern int ata_scsi_slave_alloc(struct scsi_device *sdev);
-int ata_scsi_device_configure(struct scsi_device *sdev,
- struct queue_limits *lim);
-extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
+extern int ata_scsi_sdev_init(struct scsi_device *sdev);
+int ata_scsi_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim);
+extern void ata_scsi_sdev_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth);
extern int ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
@@ -1166,7 +1224,7 @@ extern int ata_ncq_prio_enabled(struct ata_port *ap, struct scsi_device *sdev,
extern int ata_ncq_prio_enable(struct ata_port *ap, struct scsi_device *sdev,
bool enable);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
-extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
+int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
@@ -1192,12 +1250,13 @@ extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
extern int sata_set_spd(struct ata_link *link);
+int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
extern int sata_link_hardreset(struct ata_link *link,
const unsigned int *timing, unsigned long deadline,
bool *online, int (*check_ready)(struct ata_link *));
extern int sata_link_resume(struct ata_link *link, const unsigned int *params,
unsigned long deadline);
-extern int ata_eh_read_sense_success_ncq_log(struct ata_link *link);
extern void ata_eh_analyze_ncq_error(struct ata_link *link);
#else
static inline const unsigned int *
@@ -1219,6 +1278,11 @@ static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
return -EOPNOTSUPP;
}
static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; }
+static inline int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ return -EOPNOTSUPP;
+}
static inline int sata_link_hardreset(struct ata_link *link,
const unsigned int *timing,
unsigned long deadline,
@@ -1235,10 +1299,6 @@ static inline int sata_link_resume(struct ata_link *link,
{
return -EOPNOTSUPP;
}
-static inline int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
-{
- return -EOPNOTSUPP;
-}
static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { }
#endif
extern int sata_link_debounce(struct ata_link *link,
@@ -1246,13 +1306,13 @@ extern int sata_link_debounce(struct ata_link *link,
extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
bool spm_wakeup);
extern int ata_slave_link_init(struct ata_port *ap);
-extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
- struct ata_port_info *, struct Scsi_Host *);
extern void ata_port_probe(struct ata_port *ap);
-extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
-extern void ata_sas_tport_delete(struct ata_port *ap);
-int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim,
- struct ata_port *ap);
+extern struct ata_port *ata_port_alloc(struct ata_host *host);
+extern void ata_port_free(struct ata_port *ap);
+extern int ata_tport_add(struct device *parent, struct ata_port *ap);
+extern void ata_tport_delete(struct ata_port *ap);
+int ata_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_port *ap);
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
extern void ata_tf_to_fis(const struct ata_taskfile *tf,
u8 pmp, int is_cmd, u8 *fis);
@@ -1312,7 +1372,7 @@ int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm);
int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm);
unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
const struct ata_acpi_gtm *gtm);
-int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm);
+int ata_acpi_cbl_pata_type(struct ata_port *ap);
#else
static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
{
@@ -1337,10 +1397,9 @@ static inline unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
return 0;
}
-static inline int ata_acpi_cbl_80wire(struct ata_port *ap,
- const struct ata_acpi_gtm *gtm)
+static inline int ata_acpi_cbl_pata_type(struct ata_port *ap)
{
- return 0;
+ return ATA_CBL_PATA40;
}
#endif
@@ -1359,9 +1418,6 @@ extern void ata_eh_thaw_port(struct ata_port *ap);
extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
-extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
- ata_postreset_fn_t postreset);
extern void ata_std_error_handler(struct ata_port *ap);
extern void ata_std_sched_eh(struct ata_port *ap);
extern void ata_std_end_eh(struct ata_port *ap);
@@ -1408,8 +1464,8 @@ extern const struct attribute_group *ata_common_sdev_groups[];
.this_id = ATA_SHT_THIS_ID, \
.emulated = ATA_SHT_EMULATED, \
.proc_name = drv_name, \
- .slave_alloc = ata_scsi_slave_alloc, \
- .slave_destroy = ata_scsi_slave_destroy, \
+ .sdev_init = ata_scsi_sdev_init, \
+ .sdev_destroy = ata_scsi_sdev_destroy, \
.bios_param = ata_std_bios_param, \
.unlock_native_capacity = ata_scsi_unlock_native_capacity,\
.max_sectors = ATA_MAX_SECTORS_LBA48
@@ -1417,14 +1473,14 @@ extern const struct attribute_group *ata_common_sdev_groups[];
#define ATA_SUBBASE_SHT(drv_name) \
__ATA_BASE_SHT(drv_name), \
.can_queue = ATA_DEF_QUEUE, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
- .device_configure = ata_scsi_device_configure
+ .tag_alloc_policy_rr = true, \
+ .sdev_configure = ata_scsi_sdev_configure
#define ATA_SUBBASE_SHT_QD(drv_name, drv_qd) \
__ATA_BASE_SHT(drv_name), \
.can_queue = drv_qd, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
- .device_configure = ata_scsi_device_configure
+ .tag_alloc_policy_rr = true, \
+ .sdev_configure = ata_scsi_sdev_configure
#define ATA_BASE_SHT(drv_name) \
ATA_SUBBASE_SHT(drv_name), \
@@ -1542,7 +1598,17 @@ do { \
#define ata_dev_dbg(dev, fmt, ...) \
ata_dev_printk(debug, dev, fmt, ##__VA_ARGS__)
-void ata_print_version(const struct device *dev, const char *version);
+#define ata_dev_warn_once(dev, fmt, ...) \
+ pr_warn_once("ata%u.%02u: " fmt, \
+ (dev)->link->ap->print_id, \
+ (dev)->link->pmp + (dev)->devno, \
+ ##__VA_ARGS__)
+
+static inline void ata_print_version_once(const struct device *dev,
+ const char *version)
+{
+ dev_dbg_once(dev, "version %s\n", version);
+}
/*
* ata_eh_info helpers
@@ -1574,6 +1640,8 @@ static inline void ata_port_desc_misc(struct ata_port *ap, int irq)
{
ata_port_desc(ap, "irq %d", irq);
ata_port_desc(ap, "lpm-pol %d", ap->target_lpm_policy);
+ if (ap->pflags & ATA_PFLAG_EXTERNAL)
+ ata_port_desc(ap, "ext");
}
static inline bool ata_tag_internal(unsigned int tag)
@@ -1969,7 +2037,6 @@ extern unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc,
extern unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw);
extern void ata_sff_irq_on(struct ata_port *ap);
-extern void ata_sff_irq_clear(struct ata_port *ap);
extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq);
extern void ata_sff_queue_work(struct work_struct *work);
@@ -2094,6 +2161,12 @@ static inline u8 ata_wait_idle(struct ata_port *ap)
return status;
}
+#else /* CONFIG_ATA_SFF */
+static inline int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_ATA_SFF */
#endif /* __LINUX_LIBATA_H__ */
diff --git a/include/linux/libgcc.h b/include/linux/libgcc.h
index fc388da6a027..0d68f9d6a6a7 100644
--- a/include/linux/libgcc.h
+++ b/include/linux/libgcc.h
@@ -34,4 +34,8 @@ long long notrace __lshrdi3(long long u, word_type b);
long long notrace __muldi3(long long u, long long v);
word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b);
+#ifdef CONFIG_HAVE_ARCH_LIBGCC_H
+#include <asm/libgcc.h>
+#endif
+
#endif /* __ASM_LIBGCC_H */
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index e772aae71843..28f086c4a187 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -6,12 +6,12 @@
*/
#ifndef __LIBNVDIMM_H__
#define __LIBNVDIMM_H__
-#include <linux/kernel.h>
+
+#include <linux/io.h>
#include <linux/sizes.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/uuid.h>
-#include <linux/spinlock.h>
-#include <linux/bio.h>
struct badrange_entry {
u64 start;
@@ -80,7 +80,9 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len, int *cmd_rc);
+struct attribute_group;
struct device_node;
+struct module;
struct nvdimm_bus_descriptor {
const struct attribute_group **attr_groups;
unsigned long cmd_mask;
@@ -121,6 +123,8 @@ struct nd_mapping_desc {
int position;
};
+struct bio;
+struct resource;
struct nd_region;
struct nd_region_desc {
struct resource *res;
@@ -147,8 +151,6 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev,
return (void __iomem *) devm_nvdimm_memremap(dev, offset, size, 0);
}
-struct nvdimm_bus;
-
/*
* Note that separate bits for locked + unlocked are defined so that
* 'flags == 0' corresponds to an error / not-supported state.
@@ -238,6 +240,9 @@ struct nvdimm_fw_ops {
int (*arm)(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arg);
};
+struct kobject;
+struct nvdimm_bus;
+
void badrange_init(struct badrange *badrange);
int badrange_add(struct badrange *badrange, u64 addr, u64 length);
void badrange_forget(struct badrange *badrange, phys_addr_t start,
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 5c8865bb59d9..b11660b706c5 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -134,10 +134,6 @@
.size name, .-name
#endif
-/* If symbol 'name' is treated as a subroutine (gets called, and returns)
- * then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of
- * static analysis tools such as stack depth analyzer.
- */
#ifndef ENDPROC
/* deprecated, use SYM_FUNC_END */
#define ENDPROC(name) \
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
index d94bfd9ac8cc..3b9de09871f6 100644
--- a/include/linux/linkmode.h
+++ b/include/linux/linkmode.h
@@ -37,8 +37,9 @@ static inline bool linkmode_empty(const unsigned long *src)
return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2)
+static inline bool linkmode_andnot(unsigned long *dst,
+ const unsigned long *src1,
+ const unsigned long *src2)
{
return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h
index d4d5b93efe84..e37699b7e839 100644
--- a/include/linux/linux_logo.h
+++ b/include/linux/linux_logo.h
@@ -10,9 +10,6 @@
* Copyright (C) 2001 Greg Banks <gnb@alphalink.com.au>
* Copyright (C) 2001 Jan-Benedict Glaw <jbglaw@lug-owl.de>
* Copyright (C) 2003 Geert Uytterhoeven <geert@linux-m68k.org>
- *
- * Serial_console ascii image can be any size,
- * but should contain %s to display the version
*/
#include <linux/init.h>
diff --git a/include/linux/list.h b/include/linux/list.h
index 5f4b0a39cf46..00ea8e5fb88b 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -20,8 +20,16 @@
* using the generic single-entry routines.
*/
+/**
+ * LIST_HEAD_INIT - initialize a &struct list_head's links to point to itself
+ * @name: name of the list_head
+ */
#define LIST_HEAD_INIT(name) { &(name), &(name) }
+/**
+ * LIST_HEAD - definition of a &struct list_head with initialization values
+ * @name: name of the list_head
+ */
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
@@ -50,9 +58,9 @@ static inline void INIT_LIST_HEAD(struct list_head *list)
* Performs the full set of list corruption checks before __list_add().
* On list corruption reports a warning, and returns false.
*/
-extern bool __list_valid_slowpath __list_add_valid_or_report(struct list_head *new,
- struct list_head *prev,
- struct list_head *next);
+bool __list_valid_slowpath __list_add_valid_or_report(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next);
/*
* Performs list corruption checks before __list_add(). Returns false if a
@@ -93,7 +101,7 @@ static __always_inline bool __list_add_valid(struct list_head *new,
* Performs the full set of list corruption checks before __list_del_entry().
* On list corruption reports a warning, and returns false.
*/
-extern bool __list_valid_slowpath __list_del_entry_valid_or_report(struct list_head *entry);
+bool __list_valid_slowpath __list_del_entry_valid_or_report(struct list_head *entry);
/*
* Performs list corruption checks before __list_del_entry(). Returns false if a
@@ -637,6 +645,20 @@ static inline void list_splice_tail_init(struct list_head *list,
})
/**
+ * list_last_entry_or_null - get the last element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note that if the list is empty, it returns NULL.
+ */
+#define list_last_entry_or_null(ptr, type, member) ({ \
+ struct list_head *head__ = (ptr); \
+ struct list_head *pos__ = READ_ONCE(head__->prev); \
+ pos__ != head__ ? list_entry(pos__, type, member) : NULL; \
+})
+
+/**
* list_next_entry - get the next element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
@@ -687,24 +709,6 @@ static inline void list_splice_tail_init(struct list_head *list,
for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next)
/**
- * list_for_each_reverse - iterate backwards over a list
- * @pos: the &struct list_head to use as a loop cursor.
- * @head: the head for your list.
- */
-#define list_for_each_reverse(pos, head) \
- for (pos = (head)->prev; pos != (head); pos = pos->prev)
-
-/**
- * list_for_each_rcu - Iterate over a list in an RCU-safe fashion
- * @pos: the &struct list_head to use as a loop cursor.
- * @head: the head for your list.
- */
-#define list_for_each_rcu(pos, head) \
- for (pos = rcu_dereference((head)->next); \
- !list_is_head(pos, (head)); \
- pos = rcu_dereference(pos->next))
-
-/**
* list_for_each_continue - continue iteration over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 792b67ceb631..fe739d35a864 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -32,6 +32,8 @@ struct list_lru_one {
struct list_head list;
/* may become negative during memcg reparenting */
long nr_items;
+ /* protects all fields above */
+ spinlock_t lock;
};
struct list_lru_memcg {
@@ -41,31 +43,41 @@ struct list_lru_memcg {
};
struct list_lru_node {
- /* protects all lists on the node, including per cgroup */
- spinlock_t lock;
/* global list, used for the root cgroup in cgroup aware lrus */
struct list_lru_one lru;
- long nr_items;
+ atomic_long_t nr_items;
} ____cacheline_aligned_in_smp;
struct list_lru {
struct list_lru_node *node;
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
struct list_head list;
int shrinker_id;
bool memcg_aware;
struct xarray xa;
#endif
+#ifdef CONFIG_LOCKDEP
+ struct lock_class_key *key;
+#endif
};
void list_lru_destroy(struct list_lru *lru);
int __list_lru_init(struct list_lru *lru, bool memcg_aware,
- struct lock_class_key *key, struct shrinker *shrinker);
+ struct shrinker *shrinker);
#define list_lru_init(lru) \
- __list_lru_init((lru), false, NULL, NULL)
+ __list_lru_init((lru), false, NULL)
#define list_lru_init_memcg(lru, shrinker) \
- __list_lru_init((lru), true, NULL, shrinker)
+ __list_lru_init((lru), true, shrinker)
+
+static inline int list_lru_init_memcg_key(struct list_lru *lru, struct shrinker *shrinker,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_LOCKDEP
+ lru->key = key;
+#endif
+ return list_lru_init_memcg(lru, shrinker);
+}
int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
gfp_t gfp);
@@ -79,13 +91,24 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren
* @memcg: the cgroup of the sublist to add the item to.
*
* If the element is already part of a list, this function returns doing
- * nothing. Therefore the caller does not need to keep state about whether or
- * not the element already belongs in the list and is allowed to lazy update
- * it. Note however that this is valid for *a* list, not *this* list. If
- * the caller organize itself in a way that elements can be in more than
- * one type of list, it is up to the caller to fully remove the item from
- * the previous list (with list_lru_del() for instance) before moving it
- * to @lru.
+ * nothing. This means that it is not necessary to keep state about whether or
+ * not the element already belongs in the list. That said, this logic only
+ * works if the item is in *this* list. If the item might be in some other
+ * list, then you cannot rely on this check and you must remove it from the
+ * other list before trying to insert it.
+ *
+ * The lru list consists of many sublists internally; the @nid and @memcg
+ * parameters are used to determine which sublist to insert the item into.
+ * It's important to use the right value of @nid and @memcg when deleting the
+ * item, since it might otherwise get deleted from the wrong sublist.
+ *
+ * This also applies when attempting to insert the item multiple times - if
+ * the item is currently in one sublist and you call list_lru_add() again, you
+ * must pass the right @nid and @memcg parameters so that the same sublist is
+ * used.
+ *
+ * You must ensure that the memcg is not freed during this call (e.g., with
+ * rcu or by taking a css refcnt).
*
* Return: true if the list was updated, false otherwise
*/
@@ -101,7 +124,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
* memcg of the sublist is determined by @item list_head. This assumption is
* valid for slab objects LRU such as dentries, inodes, etc.
*
- * Return value: true if the list was updated, false otherwise
+ * Return: true if the list was updated, false otherwise
*/
bool list_lru_add_obj(struct list_lru *lru, struct list_head *item);
@@ -113,8 +136,19 @@ bool list_lru_add_obj(struct list_lru *lru, struct list_head *item);
* @memcg: the cgroup of the sublist to delete the item from.
*
* This function works analogously as list_lru_add() in terms of list
- * manipulation. The comments about an element already pertaining to
- * a list are also valid for list_lru_del().
+ * manipulation.
+ *
+ * The comments in list_lru_add() about an element already being in a list are
+ * also valid for list_lru_del(), that is, you can delete an item that has
+ * already been removed or never been added. However, if the item is in a
+ * list, it must be in *this* list, and you must pass the right value of @nid
+ * and @memcg so that the right sublist is used.
+ *
+ * You must ensure that the memcg is not freed during this call (e.g., with
+ * rcu or by taking a css refcnt). When a memcg is deleted, list_lru entries
+ * are automatically moved to the parent memcg. This is done in a race-free
+ * way, so during deletion of an memcg both the old and new memcg will resolve
+ * to the same sublist internally.
*
* Return: true if the list was updated, false otherwise
*/
@@ -130,7 +164,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
* memcg of the sublist is determined by @item list_head. This assumption is
* valid for slab objects LRU such as dentries, inodes, etc.
*
- * Return value: true if the list was updated, false otherwise.
+ * Return: true if the list was updated, false otherwise.
*/
bool list_lru_del_obj(struct list_lru *lru, struct list_head *item);
@@ -172,7 +206,7 @@ void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
struct list_head *head);
typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
- struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
+ struct list_lru_one *list, void *cb_arg);
/**
* list_lru_walk_one: walk a @lru, isolating and disposing freeable items.
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index fa6e8471bd22..248db9b77ee2 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -28,6 +28,7 @@ struct hlist_nulls_node {
#define NULLS_MARKER(value) (1UL | (((long)value) << 1))
#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
+#define HLIST_NULLS_HEAD_INIT(nulls) {.first = (struct hlist_nulls_node *)NULLS_MARKER(nulls)}
#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 51a258c24ff5..772919e8096a 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -13,6 +13,7 @@
#include <linux/ftrace.h>
#include <linux/completion.h>
#include <linux/list.h>
+#include <linux/livepatch_external.h>
#include <linux/livepatch_sched.h>
#if IS_ENABLED(CONFIG_LIVEPATCH)
@@ -77,30 +78,6 @@ struct klp_func {
bool transition;
};
-struct klp_object;
-
-/**
- * struct klp_callbacks - pre/post live-(un)patch callback structure
- * @pre_patch: executed before code patching
- * @post_patch: executed after code patching
- * @pre_unpatch: executed before code unpatching
- * @post_unpatch: executed after code unpatching
- * @post_unpatch_enabled: flag indicating if post-unpatch callback
- * should run
- *
- * All callbacks are optional. Only the pre-patch callback, if provided,
- * will be unconditionally executed. If the parent klp_object fails to
- * patch for any reason, including a non-zero error status returned from
- * the pre-patch callback, no further callbacks will be executed.
- */
-struct klp_callbacks {
- int (*pre_patch)(struct klp_object *obj);
- void (*post_patch)(struct klp_object *obj);
- void (*pre_unpatch)(struct klp_object *obj);
- void (*post_unpatch)(struct klp_object *obj);
- bool post_unpatch_enabled;
-};
-
/**
* struct klp_object - kernel object structure for live patching
* @name: module name (or NULL for vmlinux)
diff --git a/include/linux/livepatch_external.h b/include/linux/livepatch_external.h
new file mode 100644
index 000000000000..138af19b0f5c
--- /dev/null
+++ b/include/linux/livepatch_external.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * External livepatch interfaces for patch creation tooling
+ */
+
+#ifndef _LINUX_LIVEPATCH_EXTERNAL_H_
+#define _LINUX_LIVEPATCH_EXTERNAL_H_
+
+#include <linux/types.h>
+
+#define KLP_RELOC_SEC_PREFIX ".klp.rela."
+#define KLP_SYM_PREFIX ".klp.sym."
+
+#define __KLP_PRE_PATCH_PREFIX __klp_pre_patch_callback_
+#define __KLP_POST_PATCH_PREFIX __klp_post_patch_callback_
+#define __KLP_PRE_UNPATCH_PREFIX __klp_pre_unpatch_callback_
+#define __KLP_POST_UNPATCH_PREFIX __klp_post_unpatch_callback_
+
+#define KLP_PRE_PATCH_PREFIX __stringify(__KLP_PRE_PATCH_PREFIX)
+#define KLP_POST_PATCH_PREFIX __stringify(__KLP_POST_PATCH_PREFIX)
+#define KLP_PRE_UNPATCH_PREFIX __stringify(__KLP_PRE_UNPATCH_PREFIX)
+#define KLP_POST_UNPATCH_PREFIX __stringify(__KLP_POST_UNPATCH_PREFIX)
+
+struct klp_object;
+
+typedef int (*klp_pre_patch_t)(struct klp_object *obj);
+typedef void (*klp_post_patch_t)(struct klp_object *obj);
+typedef void (*klp_pre_unpatch_t)(struct klp_object *obj);
+typedef void (*klp_post_unpatch_t)(struct klp_object *obj);
+
+/**
+ * struct klp_callbacks - pre/post live-(un)patch callback structure
+ * @pre_patch: executed before code patching
+ * @post_patch: executed after code patching
+ * @pre_unpatch: executed before code unpatching
+ * @post_unpatch: executed after code unpatching
+ * @post_unpatch_enabled: flag indicating if post-unpatch callback
+ * should run
+ *
+ * All callbacks are optional. Only the pre-patch callback, if provided,
+ * will be unconditionally executed. If the parent klp_object fails to
+ * patch for any reason, including a non-zero error status returned from
+ * the pre-patch callback, no further callbacks will be executed.
+ */
+struct klp_callbacks {
+ klp_pre_patch_t pre_patch;
+ klp_post_patch_t post_patch;
+ klp_pre_unpatch_t pre_unpatch;
+ klp_post_unpatch_t post_unpatch;
+ bool post_unpatch_enabled;
+};
+
+/*
+ * 'struct klp_{func,object}_ext' are compact "external" representations of
+ * 'struct klp_{func,object}'. They are used by objtool for livepatch
+ * generation. The structs are then read by the livepatch module and converted
+ * to the real structs before calling klp_enable_patch().
+ *
+ * TODO make these the official API for klp_enable_patch(). That should
+ * simplify livepatch's interface as well as its data structure lifetime
+ * management.
+ */
+struct klp_func_ext {
+ const char *old_name;
+ void *new_func;
+ unsigned long sympos;
+};
+
+struct klp_object_ext {
+ const char *name;
+ struct klp_func_ext *funcs;
+ struct klp_callbacks callbacks;
+ unsigned int nr_funcs;
+};
+
+#endif /* _LINUX_LIVEPATCH_EXTERNAL_H_ */
diff --git a/include/linux/livepatch_helpers.h b/include/linux/livepatch_helpers.h
new file mode 100644
index 000000000000..99d68d0773fa
--- /dev/null
+++ b/include/linux/livepatch_helpers.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LIVEPATCH_HELPERS_H
+#define _LINUX_LIVEPATCH_HELPERS_H
+
+/*
+ * Interfaces for use by livepatch patches
+ */
+
+#include <linux/syscalls.h>
+#include <linux/livepatch.h>
+
+#ifdef MODULE
+#define KLP_OBJNAME __KBUILD_MODNAME
+#else
+#define KLP_OBJNAME vmlinux
+#endif
+
+/* Livepatch callback registration */
+
+#define KLP_CALLBACK_PTRS ".discard.klp_callback_ptrs"
+
+#define KLP_PRE_PATCH_CALLBACK(func) \
+ klp_pre_patch_t __used __section(KLP_CALLBACK_PTRS) \
+ __PASTE(__KLP_PRE_PATCH_PREFIX, KLP_OBJNAME) = func
+
+#define KLP_POST_PATCH_CALLBACK(func) \
+ klp_post_patch_t __used __section(KLP_CALLBACK_PTRS) \
+ __PASTE(__KLP_POST_PATCH_PREFIX, KLP_OBJNAME) = func
+
+#define KLP_PRE_UNPATCH_CALLBACK(func) \
+ klp_pre_unpatch_t __used __section(KLP_CALLBACK_PTRS) \
+ __PASTE(__KLP_PRE_UNPATCH_PREFIX, KLP_OBJNAME) = func
+
+#define KLP_POST_UNPATCH_CALLBACK(func) \
+ klp_post_unpatch_t __used __section(KLP_CALLBACK_PTRS) \
+ __PASTE(__KLP_POST_UNPATCH_PREFIX, KLP_OBJNAME) = func
+
+/*
+ * Replace static_call() usage with this macro when create-diff-object
+ * recommends it due to the original static call key living in a module.
+ *
+ * This converts the static call to a regular indirect call.
+ */
+#define KLP_STATIC_CALL(name) \
+ ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
+
+/* Syscall patching */
+
+#define KLP_SYSCALL_DEFINE1(name, ...) KLP_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
+#define KLP_SYSCALL_DEFINE2(name, ...) KLP_SYSCALL_DEFINEx(2, _##name, __VA_ARGS__)
+#define KLP_SYSCALL_DEFINE3(name, ...) KLP_SYSCALL_DEFINEx(3, _##name, __VA_ARGS__)
+#define KLP_SYSCALL_DEFINE4(name, ...) KLP_SYSCALL_DEFINEx(4, _##name, __VA_ARGS__)
+#define KLP_SYSCALL_DEFINE5(name, ...) KLP_SYSCALL_DEFINEx(5, _##name, __VA_ARGS__)
+#define KLP_SYSCALL_DEFINE6(name, ...) KLP_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
+
+#define KLP_SYSCALL_DEFINEx(x, sname, ...) \
+ __KLP_SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
+
+#ifdef CONFIG_X86_64
+// TODO move this to arch/x86/include/asm/syscall_wrapper.h and share code
+#define __KLP_SYSCALL_DEFINEx(x, name, ...) \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __klp_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+ __X64_SYS_STUBx(x, name, __VA_ARGS__) \
+ __IA32_SYS_STUBx(x, name, __VA_ARGS__) \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = __klp_do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__));\
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
+ return ret; \
+ } \
+ static inline long __klp_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#endif
+
+#endif /* _LINUX_LIVEPATCH_HELPERS_H */
diff --git a/include/linux/livepatch_sched.h b/include/linux/livepatch_sched.h
index 013794fb5da0..065c185f2763 100644
--- a/include/linux/livepatch_sched.h
+++ b/include/linux/livepatch_sched.h
@@ -3,27 +3,23 @@
#define _LINUX_LIVEPATCH_SCHED_H_
#include <linux/jump_label.h>
-#include <linux/static_call_types.h>
+#include <linux/sched.h>
#ifdef CONFIG_LIVEPATCH
void __klp_sched_try_switch(void);
-#if !defined(CONFIG_PREEMPT_DYNAMIC) || !defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-
DECLARE_STATIC_KEY_FALSE(klp_sched_try_switch_key);
-static __always_inline void klp_sched_try_switch(void)
+static __always_inline void klp_sched_try_switch(struct task_struct *curr)
{
- if (static_branch_unlikely(&klp_sched_try_switch_key))
+ if (static_branch_unlikely(&klp_sched_try_switch_key) &&
+ READ_ONCE(curr->__state) & TASK_FREEZABLE)
__klp_sched_try_switch();
}
-#endif /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
-
#else /* !CONFIG_LIVEPATCH */
-static inline void klp_sched_try_switch(void) {}
-static inline void __klp_sched_try_switch(void) {}
+static inline void klp_sched_try_switch(struct task_struct *curr) {}
#endif /* CONFIG_LIVEPATCH */
#endif /* _LINUX_LIVEPATCH_SCHED_H_ */
diff --git a/include/linux/liveupdate.h b/include/linux/liveupdate.h
new file mode 100644
index 000000000000..a7f6ee5b6771
--- /dev/null
+++ b/include/linux/liveupdate.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+#ifndef _LINUX_LIVEUPDATE_H
+#define _LINUX_LIVEUPDATE_H
+
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/kho/abi/luo.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <uapi/linux/liveupdate.h>
+
+struct liveupdate_file_handler;
+struct file;
+
+/**
+ * struct liveupdate_file_op_args - Arguments for file operation callbacks.
+ * @handler: The file handler being called.
+ * @retrieved: The retrieve status for the 'can_finish / finish'
+ * operation.
+ * @file: The file object. For retrieve: [OUT] The callback sets
+ * this to the new file. For other ops: [IN] The caller sets
+ * this to the file being operated on.
+ * @serialized_data: The opaque u64 handle, preserve/prepare/freeze may update
+ * this field.
+ * @private_data: Private data for the file used to hold runtime state that
+ * is not preserved. Set by the handler's .preserve()
+ * callback, and must be freed in the handler's
+ * .unpreserve() callback.
+ *
+ * This structure bundles all parameters for the file operation callbacks.
+ * The 'data' and 'file' fields are used for both input and output.
+ */
+struct liveupdate_file_op_args {
+ struct liveupdate_file_handler *handler;
+ bool retrieved;
+ struct file *file;
+ u64 serialized_data;
+ void *private_data;
+};
+
+/**
+ * struct liveupdate_file_ops - Callbacks for live-updatable files.
+ * @can_preserve: Required. Lightweight check to see if this handler is
+ * compatible with the given file.
+ * @preserve: Required. Performs state-saving for the file.
+ * @unpreserve: Required. Cleans up any resources allocated by @preserve.
+ * @freeze: Optional. Final actions just before kernel transition.
+ * @unfreeze: Optional. Undo freeze operations.
+ * @retrieve: Required. Restores the file in the new kernel.
+ * @can_finish: Optional. Check if this FD can finish, i.e. all restoration
+ * pre-requirements for this FD are satisfied. Called prior to
+ * finish, in order to do successful finish calls for all
+ * resources in the session.
+ * @finish: Required. Final cleanup in the new kernel.
+ * @owner: Module reference
+ *
+ * All operations (except can_preserve) receive a pointer to a
+ * 'struct liveupdate_file_op_args' containing the necessary context.
+ */
+struct liveupdate_file_ops {
+ bool (*can_preserve)(struct liveupdate_file_handler *handler,
+ struct file *file);
+ int (*preserve)(struct liveupdate_file_op_args *args);
+ void (*unpreserve)(struct liveupdate_file_op_args *args);
+ int (*freeze)(struct liveupdate_file_op_args *args);
+ void (*unfreeze)(struct liveupdate_file_op_args *args);
+ int (*retrieve)(struct liveupdate_file_op_args *args);
+ bool (*can_finish)(struct liveupdate_file_op_args *args);
+ void (*finish)(struct liveupdate_file_op_args *args);
+ struct module *owner;
+};
+
+/**
+ * struct liveupdate_file_handler - Represents a handler for a live-updatable file type.
+ * @ops: Callback functions
+ * @compatible: The compatibility string (e.g., "memfd-v1", "vfiofd-v1")
+ * that uniquely identifies the file type this handler
+ * supports. This is matched against the compatible string
+ * associated with individual &struct file instances.
+ *
+ * Modules that want to support live update for specific file types should
+ * register an instance of this structure. LUO uses this registration to
+ * determine if a given file can be preserved and to find the appropriate
+ * operations to manage its state across the update.
+ */
+struct liveupdate_file_handler {
+ const struct liveupdate_file_ops *ops;
+ const char compatible[LIVEUPDATE_HNDL_COMPAT_LENGTH];
+
+ /* private: */
+
+ /*
+ * Used for linking this handler instance into a global list of
+ * registered file handlers.
+ */
+ struct list_head __private list;
+};
+
+#ifdef CONFIG_LIVEUPDATE
+
+/* Return true if live update orchestrator is enabled */
+bool liveupdate_enabled(void);
+
+/* Called during kexec to tell LUO that entered into reboot */
+int liveupdate_reboot(void);
+
+int liveupdate_register_file_handler(struct liveupdate_file_handler *fh);
+int liveupdate_unregister_file_handler(struct liveupdate_file_handler *fh);
+
+#else /* CONFIG_LIVEUPDATE */
+
+static inline bool liveupdate_enabled(void)
+{
+ return false;
+}
+
+static inline int liveupdate_reboot(void)
+{
+ return 0;
+}
+
+static inline int liveupdate_register_file_handler(struct liveupdate_file_handler *fh)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int liveupdate_unregister_file_handler(struct liveupdate_file_handler *fh)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_LIVEUPDATE */
+#endif /* _LINUX_LIVEUPDATE_H */
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 2c982ff7475a..607b2360c938 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -83,7 +83,7 @@ static inline void init_llist_head(struct llist_head *list)
*/
static inline void init_llist_node(struct llist_node *node)
{
- node->next = node;
+ WRITE_ONCE(node->next, node);
}
/**
@@ -97,7 +97,7 @@ static inline void init_llist_node(struct llist_node *node)
*/
static inline bool llist_on_list(const struct llist_node *node)
{
- return node->next != node;
+ return READ_ONCE(node->next) != node;
}
/**
@@ -220,12 +220,29 @@ static inline bool llist_empty(const struct llist_head *head)
static inline struct llist_node *llist_next(struct llist_node *node)
{
- return node->next;
+ return READ_ONCE(node->next);
}
-extern bool llist_add_batch(struct llist_node *new_first,
- struct llist_node *new_last,
- struct llist_head *head);
+/**
+ * llist_add_batch - add several linked entries in batch
+ * @new_first: first entry in batch to be added
+ * @new_last: last entry in batch to be added
+ * @head: the head for your lock-less list
+ *
+ * Return whether list is empty before adding.
+ */
+static inline bool llist_add_batch(struct llist_node *new_first,
+ struct llist_node *new_last,
+ struct llist_head *head)
+{
+ struct llist_node *first = READ_ONCE(head->first);
+
+ do {
+ new_last->next = first;
+ } while (!try_cmpxchg(&head->first, &first, new_first));
+
+ return !first;
+}
static inline bool __llist_add_batch(struct llist_node *new_first,
struct llist_node *new_last,
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
index e55010fa7329..b0e6ab329b00 100644
--- a/include/linux/local_lock.h
+++ b/include/linux/local_lock.h
@@ -6,6 +6,7 @@
/**
* local_lock_init - Runtime initialize a lock instance
+ * @lock: The lock variable
*/
#define local_lock_init(lock) __local_lock_init(lock)
@@ -13,13 +14,13 @@
* local_lock - Acquire a per CPU local lock
* @lock: The lock variable
*/
-#define local_lock(lock) __local_lock(lock)
+#define local_lock(lock) __local_lock(this_cpu_ptr(lock))
/**
* local_lock_irq - Acquire a per CPU local lock and disable interrupts
* @lock: The lock variable
*/
-#define local_lock_irq(lock) __local_lock_irq(lock)
+#define local_lock_irq(lock) __local_lock_irq(this_cpu_ptr(lock))
/**
* local_lock_irqsave - Acquire a per CPU local lock, save and disable
@@ -28,19 +29,19 @@
* @flags: Storage for interrupt flags
*/
#define local_lock_irqsave(lock, flags) \
- __local_lock_irqsave(lock, flags)
+ __local_lock_irqsave(this_cpu_ptr(lock), flags)
/**
* local_unlock - Release a per CPU local lock
* @lock: The lock variable
*/
-#define local_unlock(lock) __local_unlock(lock)
+#define local_unlock(lock) __local_unlock(this_cpu_ptr(lock))
/**
* local_unlock_irq - Release a per CPU local lock and enable interrupts
* @lock: The lock variable
*/
-#define local_unlock_irq(lock) __local_unlock_irq(lock)
+#define local_unlock_irq(lock) __local_unlock_irq(this_cpu_ptr(lock))
/**
* local_unlock_irqrestore - Release a per CPU local lock and restore
@@ -49,6 +50,58 @@
* @flags: Interrupt flags to restore
*/
#define local_unlock_irqrestore(lock, flags) \
- __local_unlock_irqrestore(lock, flags)
+ __local_unlock_irqrestore(this_cpu_ptr(lock), flags)
+
+/**
+ * local_trylock_init - Runtime initialize a lock instance
+ * @lock: The lock variable
+ */
+#define local_trylock_init(lock) __local_trylock_init(lock)
+
+/**
+ * local_trylock - Try to acquire a per CPU local lock
+ * @lock: The lock variable
+ *
+ * The function can be used in any context such as NMI or HARDIRQ. Due to
+ * locking constrains it will _always_ fail to acquire the lock in NMI or
+ * HARDIRQ context on PREEMPT_RT.
+ */
+#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock))
+
+#define local_lock_is_locked(lock) __local_lock_is_locked(lock)
+
+/**
+ * local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
+ * interrupts if acquired
+ * @lock: The lock variable
+ * @flags: Storage for interrupt flags
+ *
+ * The function can be used in any context such as NMI or HARDIRQ. Due to
+ * locking constrains it will _always_ fail to acquire the lock in NMI or
+ * HARDIRQ context on PREEMPT_RT.
+ */
+#define local_trylock_irqsave(lock, flags) \
+ __local_trylock_irqsave(this_cpu_ptr(lock), flags)
+
+DEFINE_GUARD(local_lock, local_lock_t __percpu*,
+ local_lock(_T),
+ local_unlock(_T))
+DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*,
+ local_lock_irq(_T),
+ local_unlock_irq(_T))
+DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
+ local_lock_irqsave(_T->lock, _T->flags),
+ local_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+#define local_lock_nested_bh(_lock) \
+ __local_lock_nested_bh(this_cpu_ptr(_lock))
+
+#define local_unlock_nested_bh(_lock) \
+ __local_unlock_nested_bh(this_cpu_ptr(_lock))
+
+DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
+ local_lock_nested_bh(_T),
+ local_unlock_nested_bh(_T))
#endif
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 975e33b793a7..8f82b4eb542f 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -15,6 +15,15 @@ typedef struct {
#endif
} local_lock_t;
+/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
+typedef struct {
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+ struct task_struct *owner;
+#endif
+ u8 acquired;
+} local_trylock_t;
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCAL_LOCK_DEBUG_INIT(lockname) \
.dep_map = { \
@@ -24,6 +33,9 @@ typedef struct {
}, \
.owner = NULL,
+# define LOCAL_TRYLOCK_DEBUG_INIT(lockname) \
+ LOCAL_LOCK_DEBUG_INIT(lockname)
+
static inline void local_lock_acquire(local_lock_t *l)
{
lock_map_acquire(&l->dep_map);
@@ -31,6 +43,13 @@ static inline void local_lock_acquire(local_lock_t *l)
l->owner = current;
}
+static inline void local_trylock_acquire(local_lock_t *l)
+{
+ lock_map_acquire_try(&l->dep_map);
+ DEBUG_LOCKS_WARN_ON(l->owner);
+ l->owner = current;
+}
+
static inline void local_lock_release(local_lock_t *l)
{
DEBUG_LOCKS_WARN_ON(l->owner != current);
@@ -44,12 +63,15 @@ static inline void local_lock_debug_init(local_lock_t *l)
}
#else /* CONFIG_DEBUG_LOCK_ALLOC */
# define LOCAL_LOCK_DEBUG_INIT(lockname)
+# define LOCAL_TRYLOCK_DEBUG_INIT(lockname)
static inline void local_lock_acquire(local_lock_t *l) { }
+static inline void local_trylock_acquire(local_lock_t *l) { }
static inline void local_lock_release(local_lock_t *l) { }
static inline void local_lock_debug_init(local_lock_t *l) { }
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
+#define INIT_LOCAL_TRYLOCK(lockname) { LOCAL_TRYLOCK_DEBUG_INIT(lockname) }
#define __local_lock_init(lock) \
do { \
@@ -62,42 +84,133 @@ do { \
local_lock_debug_init(lock); \
} while (0)
+#define __local_trylock_init(lock) __local_lock_init((local_lock_t *)lock)
+
+#define __spinlock_nested_bh_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
+ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
+ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
+ LD_LOCK_NORMAL); \
+ local_lock_debug_init(lock); \
+} while (0)
+
+#define __local_lock_acquire(lock) \
+ do { \
+ local_trylock_t *__tl; \
+ local_lock_t *__l; \
+ \
+ __l = (local_lock_t *)(lock); \
+ __tl = (local_trylock_t *)__l; \
+ _Generic((lock), \
+ local_trylock_t *: ({ \
+ lockdep_assert(__tl->acquired == 0); \
+ WRITE_ONCE(__tl->acquired, 1); \
+ }), \
+ local_lock_t *: (void)0); \
+ local_lock_acquire(__l); \
+ } while (0)
+
#define __local_lock(lock) \
do { \
preempt_disable(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ __local_lock_acquire(lock); \
} while (0)
#define __local_lock_irq(lock) \
do { \
local_irq_disable(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ __local_lock_acquire(lock); \
} while (0)
#define __local_lock_irqsave(lock, flags) \
do { \
local_irq_save(flags); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ __local_lock_acquire(lock); \
+ } while (0)
+
+#define __local_trylock(lock) \
+ ({ \
+ local_trylock_t *__tl; \
+ \
+ preempt_disable(); \
+ __tl = (lock); \
+ if (READ_ONCE(__tl->acquired)) { \
+ preempt_enable(); \
+ __tl = NULL; \
+ } else { \
+ WRITE_ONCE(__tl->acquired, 1); \
+ local_trylock_acquire( \
+ (local_lock_t *)__tl); \
+ } \
+ !!__tl; \
+ })
+
+#define __local_trylock_irqsave(lock, flags) \
+ ({ \
+ local_trylock_t *__tl; \
+ \
+ local_irq_save(flags); \
+ __tl = (lock); \
+ if (READ_ONCE(__tl->acquired)) { \
+ local_irq_restore(flags); \
+ __tl = NULL; \
+ } else { \
+ WRITE_ONCE(__tl->acquired, 1); \
+ local_trylock_acquire( \
+ (local_lock_t *)__tl); \
+ } \
+ !!__tl; \
+ })
+
+/* preemption or migration must be disabled before calling __local_lock_is_locked */
+#define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired)
+
+#define __local_lock_release(lock) \
+ do { \
+ local_trylock_t *__tl; \
+ local_lock_t *__l; \
+ \
+ __l = (local_lock_t *)(lock); \
+ __tl = (local_trylock_t *)__l; \
+ local_lock_release(__l); \
+ _Generic((lock), \
+ local_trylock_t *: ({ \
+ lockdep_assert(__tl->acquired == 1); \
+ WRITE_ONCE(__tl->acquired, 0); \
+ }), \
+ local_lock_t *: (void)0); \
} while (0)
#define __local_unlock(lock) \
do { \
- local_lock_release(this_cpu_ptr(lock)); \
+ __local_lock_release(lock); \
preempt_enable(); \
} while (0)
#define __local_unlock_irq(lock) \
do { \
- local_lock_release(this_cpu_ptr(lock)); \
+ __local_lock_release(lock); \
local_irq_enable(); \
} while (0)
#define __local_unlock_irqrestore(lock, flags) \
do { \
- local_lock_release(this_cpu_ptr(lock)); \
+ __local_lock_release(lock); \
local_irq_restore(flags); \
} while (0)
+#define __local_lock_nested_bh(lock) \
+ do { \
+ lockdep_assert_in_softirq(); \
+ local_lock_acquire((lock)); \
+ } while (0)
+
+#define __local_unlock_nested_bh(lock) \
+ local_lock_release((lock))
+
#else /* !CONFIG_PREEMPT_RT */
/*
@@ -105,18 +218,22 @@ do { \
* critical section while staying preemptible.
*/
typedef spinlock_t local_lock_t;
+typedef spinlock_t local_trylock_t;
#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
+#define INIT_LOCAL_TRYLOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
-#define __local_lock_init(l) \
+#define __local_lock_init(__l) \
do { \
- local_spin_lock_init((l)); \
+ local_spin_lock_init((__l)); \
} while (0)
+#define __local_trylock_init(__l) __local_lock_init(__l)
+
#define __local_lock(__lock) \
do { \
migrate_disable(); \
- spin_lock(this_cpu_ptr((__lock))); \
+ spin_lock((__lock)); \
} while (0)
#define __local_lock_irq(lock) __local_lock(lock)
@@ -130,7 +247,7 @@ typedef spinlock_t local_lock_t;
#define __local_unlock(__lock) \
do { \
- spin_unlock(this_cpu_ptr((__lock))); \
+ spin_unlock((__lock)); \
migrate_enable(); \
} while (0)
@@ -138,4 +255,41 @@ typedef spinlock_t local_lock_t;
#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
+#define __local_lock_nested_bh(lock) \
+do { \
+ lockdep_assert_in_softirq_func(); \
+ spin_lock((lock)); \
+} while (0)
+
+#define __local_unlock_nested_bh(lock) \
+do { \
+ spin_unlock((lock)); \
+} while (0)
+
+#define __local_trylock(lock) \
+ ({ \
+ int __locked; \
+ \
+ if (in_nmi() | in_hardirq()) { \
+ __locked = 0; \
+ } else { \
+ migrate_disable(); \
+ __locked = spin_trylock((lock)); \
+ if (!__locked) \
+ migrate_enable(); \
+ } \
+ __locked; \
+ })
+
+#define __local_trylock_irqsave(lock, flags) \
+ ({ \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __local_trylock(lock); \
+ })
+
+/* migration must be disabled before calling __local_lock_is_locked */
+#define __local_lock_is_locked(__lock) \
+ (rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
+
#endif /* CONFIG_PREEMPT_RT */
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 1b95fe31051f..330e38776bb2 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -12,6 +12,7 @@
/* XXX: a lot of this should really be under fs/lockd. */
+#include <linux/exportfs.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <net/ipv6.h>
@@ -200,7 +201,7 @@ extern const struct svc_procedure nlmsvc_procedures[24];
extern const struct svc_procedure nlmsvc_procedures4[24];
#endif
extern int nlmsvc_grace_period;
-extern unsigned long nlmsvc_timeout;
+extern unsigned long nlm_timeout;
extern bool nsm_use_hostnames;
extern u32 nsm_local_state;
@@ -278,9 +279,9 @@ __be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *,
struct nlm_host *, struct nlm_lock *, int,
struct nlm_cookie *, int);
__be32 nlmsvc_unlock(struct net *net, struct nlm_file *, struct nlm_lock *);
-__be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *,
- struct nlm_host *, struct nlm_lock *,
- struct nlm_lock *, struct nlm_cookie *);
+__be32 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
+ struct nlm_host *host, struct nlm_lock *lock,
+ struct nlm_lock *conflock);
__be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *);
void nlmsvc_retry_blocked(struct svc_rqst *rqstp);
void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
@@ -307,7 +308,7 @@ void nlmsvc_invalidate_all(void);
int nlmsvc_unlock_all_by_sb(struct super_block *sb);
int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
-static inline struct file *nlmsvc_file_file(struct nlm_file *file)
+static inline struct file *nlmsvc_file_file(const struct nlm_file *file)
{
return file->f_file[O_RDONLY] ?
file->f_file[O_RDONLY] : file->f_file[O_WRONLY];
@@ -318,6 +319,12 @@ static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
return file_inode(nlmsvc_file_file(file));
}
+static inline bool
+nlmsvc_file_cannot_lock(const struct nlm_file *file)
+{
+ return exportfs_cannot_lock(nlmsvc_file_file(file)->f_path.dentry->d_sb->s_export_op);
+}
+
static inline int __nlm_privileged_request4(const struct sockaddr *sap)
{
const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index 80cca9426761..17d53165d9f2 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -73,8 +73,6 @@ struct nlm_args {
u32 fsm_mode;
};
-typedef struct nlm_args nlm_args;
-
/*
* Generic lockd result
*/
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 5e51b0de4c4b..dd634103b014 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -173,14 +173,32 @@ static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
(lock)->dep_map.lock_type)
#define lockdep_set_subclass(lock, sub) \
- lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
+ lockdep_init_map_type(&(lock)->dep_map, (lock)->dep_map.name, (lock)->dep_map.key, sub,\
(lock)->dep_map.wait_type_inner, \
(lock)->dep_map.wait_type_outer, \
(lock)->dep_map.lock_type)
+/**
+ * lockdep_set_novalidate_class: disable checking of lock ordering on a given
+ * lock
+ * @lock: Lock to mark
+ *
+ * Lockdep will still record that this lock has been taken, and print held
+ * instances when dumping locks
+ */
#define lockdep_set_novalidate_class(lock) \
lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
+/**
+ * lockdep_set_notrack_class: disable lockdep tracking of a given lock entirely
+ * @lock: Lock to mark
+ *
+ * Bigger hammer than lockdep_set_novalidate_class: so far just for bcachefs,
+ * which takes more locks than lockdep is able to track (48).
+ */
+#define lockdep_set_notrack_class(lock) \
+ lockdep_set_class_and_name(lock, &__lockdep_no_track__, #lock)
+
/*
* Compare locking classes
*/
@@ -297,9 +315,6 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
.wait_type_inner = _wait_type, \
.lock_type = LD_LOCK_WAIT_OVERRIDE, }
-#define lock_map_assert_held(l) \
- lockdep_assert(lock_is_held(l) != LOCK_STATE_NOT_HELD)
-
#else /* !CONFIG_LOCKDEP */
static inline void lockdep_init_task(struct task_struct *task)
@@ -341,6 +356,7 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
#define lockdep_set_subclass(lock, sub) do { } while (0)
#define lockdep_set_novalidate_class(lock) do { } while (0)
+#define lockdep_set_notrack_class(lock) do { } while (0)
/*
* We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
@@ -391,8 +407,6 @@ extern int lockdep_is_held(const void *);
#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
struct lockdep_map __maybe_unused _name = {}
-#define lock_map_assert_held(l) do { (void)(l); } while (0)
-
#endif /* !LOCKDEP */
#ifdef CONFIG_PROVE_LOCKING
@@ -602,9 +616,11 @@ do { \
#define lockdep_assert_in_softirq() \
do { \
WARN_ON_ONCE(__lockdep_enabled && \
- (!in_softirq() || in_irq() || in_nmi())); \
+ (!in_softirq() || in_hardirq() || in_nmi())); \
} while (0)
+extern void lockdep_assert_in_softirq_func(void);
+
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
@@ -618,6 +634,7 @@ do { \
# define lockdep_assert_preemption_enabled() do { } while (0)
# define lockdep_assert_preemption_disabled() do { } while (0)
# define lockdep_assert_in_softirq() do { } while (0)
+# define lockdep_assert_in_softirq_func() do { } while (0)
#endif
#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index 70d30d40ea4a..eae115a26488 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -80,6 +80,7 @@ struct lock_class_key {
};
extern struct lock_class_key __lockdep_no_validate__;
+extern struct lock_class_key __lockdep_no_track__;
struct lock_trace;
@@ -174,7 +175,7 @@ struct lock_class_stats {
unsigned long bounces[nr_bounce_types];
};
-struct lock_class_stats lock_stats(struct lock_class *class);
+void lock_stats(struct lock_class *class, struct lock_class_stats *stats);
void clear_lock_stats(struct lock_class *class);
#endif
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index c3a1f78bc884..815d871fadfc 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -34,14 +34,27 @@ struct lockref {
};
};
-extern void lockref_get(struct lockref *);
-extern int lockref_put_return(struct lockref *);
-extern int lockref_get_not_zero(struct lockref *);
-extern int lockref_put_not_zero(struct lockref *);
-extern int lockref_put_or_lock(struct lockref *);
-
-extern void lockref_mark_dead(struct lockref *);
-extern int lockref_get_not_dead(struct lockref *);
+/**
+ * lockref_init - Initialize a lockref
+ * @lockref: pointer to lockref structure
+ *
+ * Initializes @lockref->count to 1.
+ */
+static inline void lockref_init(struct lockref *lockref)
+{
+ spin_lock_init(&lockref->lock);
+ lockref->count = 1;
+}
+
+void lockref_get(struct lockref *lockref);
+int lockref_put_return(struct lockref *lockref);
+bool lockref_get_not_zero(struct lockref *lockref);
+bool lockref_put_or_lock(struct lockref *lockref);
+#define lockref_put_or_lock(_lockref) \
+ (!__cond_lock((_lockref)->lock, !lockref_put_or_lock(_lockref)))
+
+void lockref_mark_dead(struct lockref *lockref);
+bool lockref_get_not_dead(struct lockref *lockref);
/* Must be called under spinlock for reliable results */
static inline bool __lockref_is_dead(const struct lockref *l)
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 9f30d087a128..2eac3fc9303d 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -41,7 +41,7 @@ int __ilog2_u64(u64 n)
* *not* considered a power of two.
* Return: true if @n is a power of 2, otherwise false.
*/
-static inline __attribute__((const))
+static __always_inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
@@ -255,4 +255,18 @@ int __bits_per(unsigned long n)
) : \
__bits_per(n) \
)
+
+/**
+ * max_pow_of_two_factor - return highest power-of-2 factor
+ * @n: parameter
+ *
+ * find highest power-of-2 which is evenly divisible into n.
+ * 0 is returned for n == 0 or 1.
+ */
+static inline __attribute__((const))
+unsigned int max_pow_of_two_factor(unsigned int n)
+{
+ return n & -n;
+}
+
#endif /* _LINUX_LOG2_H */
diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
index babf4e3c28ba..8f1a9408302f 100644
--- a/include/linux/logic_pio.h
+++ b/include/linux/logic_pio.h
@@ -17,7 +17,7 @@ enum {
struct logic_pio_hwaddr {
struct list_head list;
- struct fwnode_handle *fwnode;
+ const struct fwnode_handle *fwnode;
resource_size_t hw_start;
resource_size_t io_start;
resource_size_t size; /* range size populated */
@@ -110,8 +110,8 @@ void logic_outsl(unsigned long addr, const void *buffer, unsigned int count);
#endif /* CONFIG_INDIRECT_PIO */
#define MMIO_UPPER_LIMIT (IO_SPACE_LIMIT - PIO_INDIRECT_SIZE)
-struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
-unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
+struct logic_pio_hwaddr *find_io_range_by_fwnode(const struct fwnode_handle *fwnode);
+unsigned long logic_pio_trans_hwaddr(const struct fwnode_handle *fwnode,
resource_size_t hw_addr, resource_size_t size);
int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index c9afcdd9324c..ff82ef85a084 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -119,7 +119,7 @@ write intent log information, three of which are mentioned here.
*/
/* this defines an element in a tracked set
- * .colision is for hash table lookup.
+ * .collision is for hash table lookup.
* When we process a new IO request, we know its sector, thus can deduce the
* region number (label) easily. To do the label -> object lookup without a
* full list walk, we use a simple hash table.
@@ -145,7 +145,7 @@ write intent log information, three of which are mentioned here.
* But it avoids high order page allocations in kmalloc.
*/
struct lc_element {
- struct hlist_node colision;
+ struct hlist_node collision;
struct list_head list; /* LRU list or free list */
unsigned refcnt;
/* back "pointer" into lc_cache->element[index],
diff --git a/include/linux/lsm/apparmor.h b/include/linux/lsm/apparmor.h
new file mode 100644
index 000000000000..612cbfacb072
--- /dev/null
+++ b/include/linux/lsm/apparmor.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module interface to other subsystems.
+ * AppArmor presents single pointer to an aa_label structure.
+ */
+#ifndef __LINUX_LSM_APPARMOR_H
+#define __LINUX_LSM_APPARMOR_H
+
+struct aa_label;
+
+struct lsm_prop_apparmor {
+#ifdef CONFIG_SECURITY_APPARMOR
+ struct aa_label *label;
+#endif
+};
+
+#endif /* ! __LINUX_LSM_APPARMOR_H */
diff --git a/include/linux/lsm/bpf.h b/include/linux/lsm/bpf.h
new file mode 100644
index 000000000000..8106e206fcef
--- /dev/null
+++ b/include/linux/lsm/bpf.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module interface to other subsystems.
+ * BPF may present a single u32 value.
+ */
+#ifndef __LINUX_LSM_BPF_H
+#define __LINUX_LSM_BPF_H
+#include <linux/types.h>
+
+struct lsm_prop_bpf {
+#ifdef CONFIG_BPF_LSM
+ u32 secid;
+#endif
+};
+
+#endif /* ! __LINUX_LSM_BPF_H */
diff --git a/include/linux/lsm/selinux.h b/include/linux/lsm/selinux.h
new file mode 100644
index 000000000000..9455a6b5b910
--- /dev/null
+++ b/include/linux/lsm/selinux.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module interface to other subsystems.
+ * SELinux presents a single u32 value which is known as a secid.
+ */
+#ifndef __LINUX_LSM_SELINUX_H
+#define __LINUX_LSM_SELINUX_H
+#include <linux/types.h>
+
+struct lsm_prop_selinux {
+#ifdef CONFIG_SECURITY_SELINUX
+ u32 secid;
+#endif
+};
+
+#endif /* ! __LINUX_LSM_SELINUX_H */
diff --git a/include/linux/lsm/smack.h b/include/linux/lsm/smack.h
new file mode 100644
index 000000000000..ff730dd7a734
--- /dev/null
+++ b/include/linux/lsm/smack.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module interface to other subsystems.
+ * Smack presents a pointer into the global Smack label list.
+ */
+#ifndef __LINUX_LSM_SMACK_H
+#define __LINUX_LSM_SMACK_H
+
+struct smack_known;
+
+struct lsm_prop_smack {
+#ifdef CONFIG_SECURITY_SMACK
+ struct smack_known *skp;
+#endif
+};
+
+#endif /* ! __LINUX_LSM_SMACK_H */
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 97a8b21eb033..382c56a97bba 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -5,7 +5,7 @@
*
* Author : Etienne BASSET <etienne.basset@ensta.org>
*
- * All credits to : Stephen Smalley, <sds@tycho.nsa.gov>
+ * All credits to : Stephen Smalley
* All BUGS to : Etienne BASSET <etienne.basset@ensta.org>
*/
#ifndef _LSM_COMMON_LOGGING_
@@ -77,6 +77,7 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_LOCKDOWN 15
#define LSM_AUDIT_DATA_NOTIFICATION 16
#define LSM_AUDIT_DATA_ANONINODE 17
+#define LSM_AUDIT_DATA_NLMSGTYPE 18
union {
struct path path;
struct dentry *dentry;
@@ -98,6 +99,7 @@ struct common_audit_data {
struct lsm_ibendport_audit *ibendport;
int reason;
const char *anonclass;
+ u16 nlmsg_type;
} u;
/* this union contains LSM specific data */
union {
@@ -116,14 +118,36 @@ struct common_audit_data {
#define v4info fam.v4
#define v6info fam.v6
+#ifdef CONFIG_AUDIT
+
int ipv4_skb_to_auditdata(struct sk_buff *skb,
struct common_audit_data *ad, u8 *proto);
+#if IS_ENABLED(CONFIG_IPV6)
int ipv6_skb_to_auditdata(struct sk_buff *skb,
struct common_audit_data *ad, u8 *proto);
+#endif /* IS_ENABLED(CONFIG_IPV6) */
void common_lsm_audit(struct common_audit_data *a,
void (*pre_audit)(struct audit_buffer *, void *),
void (*post_audit)(struct audit_buffer *, void *));
+void audit_log_lsm_data(struct audit_buffer *ab,
+ const struct common_audit_data *a);
+
+#else /* CONFIG_AUDIT */
+
+static inline void common_lsm_audit(struct common_audit_data *a,
+ void (*pre_audit)(struct audit_buffer *, void *),
+ void (*post_audit)(struct audit_buffer *, void *))
+{
+}
+
+static inline void audit_log_lsm_data(struct audit_buffer *ab,
+ const struct common_audit_data *a)
+{
+}
+
+#endif /* CONFIG_AUDIT */
+
#endif
diff --git a/include/linux/lsm_count.h b/include/linux/lsm_count.h
new file mode 100644
index 000000000000..16eb49761b25
--- /dev/null
+++ b/include/linux/lsm_count.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2023 Google LLC.
+ */
+
+#ifndef __LINUX_LSM_COUNT_H
+#define __LINUX_LSM_COUNT_H
+
+#include <linux/args.h>
+
+#ifdef CONFIG_SECURITY
+
+/*
+ * Macros to count the number of LSMs enabled in the kernel at compile time.
+ */
+
+/*
+ * Capabilities is enabled when CONFIG_SECURITY is enabled.
+ */
+#if IS_ENABLED(CONFIG_SECURITY)
+#define CAPABILITIES_ENABLED 1,
+#else
+#define CAPABILITIES_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SELINUX)
+#define SELINUX_ENABLED 1,
+#else
+#define SELINUX_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SMACK)
+#define SMACK_ENABLED 1,
+#else
+#define SMACK_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_APPARMOR)
+#define APPARMOR_ENABLED 1,
+#else
+#define APPARMOR_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_TOMOYO)
+#define TOMOYO_ENABLED 1,
+#else
+#define TOMOYO_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_YAMA)
+#define YAMA_ENABLED 1,
+#else
+#define YAMA_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LOADPIN)
+#define LOADPIN_ENABLED 1,
+#else
+#define LOADPIN_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LOCKDOWN_LSM)
+#define LOCKDOWN_ENABLED 1,
+#else
+#define LOCKDOWN_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SAFESETID)
+#define SAFESETID_ENABLED 1,
+#else
+#define SAFESETID_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_BPF_LSM)
+#define BPF_LSM_ENABLED 1,
+#else
+#define BPF_LSM_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LANDLOCK)
+#define LANDLOCK_ENABLED 1,
+#else
+#define LANDLOCK_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_IMA)
+#define IMA_ENABLED 1,
+#else
+#define IMA_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_EVM)
+#define EVM_ENABLED 1,
+#else
+#define EVM_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_IPE)
+#define IPE_ENABLED 1,
+#else
+#define IPE_ENABLED
+#endif
+
+/*
+ * There is a trailing comma that we need to be accounted for. This is done by
+ * using a skipped argument in __COUNT_LSMS
+ */
+#define __COUNT_LSMS(skipped_arg, args...) COUNT_ARGS(args...)
+#define COUNT_LSMS(args...) __COUNT_LSMS(args)
+
+#define MAX_LSM_COUNT \
+ COUNT_LSMS( \
+ CAPABILITIES_ENABLED \
+ SELINUX_ENABLED \
+ SMACK_ENABLED \
+ APPARMOR_ENABLED \
+ TOMOYO_ENABLED \
+ YAMA_ENABLED \
+ LOADPIN_ENABLED \
+ LOCKDOWN_ENABLED \
+ SAFESETID_ENABLED \
+ BPF_LSM_ENABLED \
+ LANDLOCK_ENABLED \
+ IMA_ENABLED \
+ EVM_ENABLED \
+ IPE_ENABLED)
+
+#else
+
+#define MAX_LSM_COUNT 0
+
+#endif /* CONFIG_SECURITY */
+
+#endif /* __LINUX_LSM_COUNT_H */
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index f804b76cde44..8c42b4bde09c 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -48,7 +48,7 @@ LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
LSM_HOOK(int, 0, syslog, int type)
LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
const struct timezone *tz)
-LSM_HOOK(int, 1, vm_enough_memory, struct mm_struct *mm, long pages)
+LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, const struct file *file)
LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
@@ -83,9 +83,9 @@ LSM_HOOK(int, 0, move_mount, const struct path *from_path,
const struct path *to_path)
LSM_HOOK(int, -EOPNOTSUPP, dentry_init_security, struct dentry *dentry,
int mode, const struct qstr *name, const char **xattr_name,
- void **ctx, u32 *ctxlen)
+ struct lsm_context *cp)
LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode,
- struct qstr *name, const struct cred *old, struct cred *new)
+ const struct qstr *name, const struct cred *old, struct cred *new)
#ifdef CONFIG_SECURITY_PATH
LSM_HOOK(int, 0, path_unlink, const struct path *dir, struct dentry *dentry)
@@ -114,6 +114,7 @@ LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask,
unsigned int obj_type)
LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode)
LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode)
+LSM_HOOK(void, LSM_RET_VOID, inode_free_security_rcu, void *inode_security)
LSM_HOOK(int, -EOPNOTSUPP, inode_init_security, struct inode *inode,
struct inode *dir, const struct qstr *qstr, struct xattr *xattrs,
int *xattr_count)
@@ -144,6 +145,7 @@ LSM_HOOK(int, 0, inode_setattr, struct mnt_idmap *idmap, struct dentry *dentry,
LSM_HOOK(void, LSM_RET_VOID, inode_post_setattr, struct mnt_idmap *idmap,
struct dentry *dentry, int ia_valid)
LSM_HOOK(int, 0, inode_getattr, const struct path *path)
+LSM_HOOK(int, 0, inode_xattr_skipcap, const char *name)
LSM_HOOK(int, 0, inode_setxattr, struct mnt_idmap *idmap,
struct dentry *dentry, const char *name, const void *value,
size_t size, int flags)
@@ -155,6 +157,8 @@ LSM_HOOK(int, 0, inode_removexattr, struct mnt_idmap *idmap,
struct dentry *dentry, const char *name)
LSM_HOOK(void, LSM_RET_VOID, inode_post_removexattr, struct dentry *dentry,
const char *name)
+LSM_HOOK(int, 0, inode_file_setattr, struct dentry *dentry, struct file_kattr *fa)
+LSM_HOOK(int, 0, inode_file_getattr, struct dentry *dentry, struct file_kattr *fa)
LSM_HOOK(int, 0, inode_set_acl, struct mnt_idmap *idmap,
struct dentry *dentry, const char *acl_name, struct posix_acl *kacl)
LSM_HOOK(void, LSM_RET_VOID, inode_post_set_acl, struct dentry *dentry,
@@ -174,10 +178,13 @@ LSM_HOOK(int, -EOPNOTSUPP, inode_setsecurity, struct inode *inode,
const char *name, const void *value, size_t size, int flags)
LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer,
size_t buffer_size)
-LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, inode_getlsmprop, struct inode *inode,
+ struct lsm_prop *prop)
LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new)
LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, struct dentry *src,
const char *name)
+LSM_HOOK(int, 0, inode_setintegrity, const struct inode *inode,
+ enum lsm_integrity_type type, const void *value, size_t size)
LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir,
struct kernfs_node *kn)
LSM_HOOK(int, 0, file_permission, struct file *file, int mask)
@@ -204,7 +211,7 @@ LSM_HOOK(int, 0, file_open, struct file *file)
LSM_HOOK(int, 0, file_post_open, struct file *file, int mask)
LSM_HOOK(int, 0, file_truncate, struct file *file)
LSM_HOOK(int, 0, task_alloc, struct task_struct *task,
- unsigned long clone_flags)
+ u64 clone_flags)
LSM_HOOK(void, LSM_RET_VOID, task_free, struct task_struct *task)
LSM_HOOK(int, 0, cred_alloc_blank, struct cred *cred, gfp_t gfp)
LSM_HOOK(void, LSM_RET_VOID, cred_free, struct cred *cred)
@@ -213,6 +220,8 @@ LSM_HOOK(int, 0, cred_prepare, struct cred *new, const struct cred *old,
LSM_HOOK(void, LSM_RET_VOID, cred_transfer, struct cred *new,
const struct cred *old)
LSM_HOOK(void, LSM_RET_VOID, cred_getsecid, const struct cred *c, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, cred_getlsmprop, const struct cred *c,
+ struct lsm_prop *prop)
LSM_HOOK(int, 0, kernel_act_as, struct cred *new, u32 secid)
LSM_HOOK(int, 0, kernel_create_files_as, struct cred *new, struct inode *inode)
LSM_HOOK(int, 0, kernel_module_request, char *kmod_name)
@@ -231,9 +240,9 @@ LSM_HOOK(int, 0, task_fix_setgroups, struct cred *new, const struct cred * old)
LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid)
LSM_HOOK(int, 0, task_getpgid, struct task_struct *p)
LSM_HOOK(int, 0, task_getsid, struct task_struct *p)
-LSM_HOOK(void, LSM_RET_VOID, current_getsecid_subj, u32 *secid)
-LSM_HOOK(void, LSM_RET_VOID, task_getsecid_obj,
- struct task_struct *p, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, current_getlsmprop_subj, struct lsm_prop *prop)
+LSM_HOOK(void, LSM_RET_VOID, task_getlsmprop_obj,
+ struct task_struct *p, struct lsm_prop *prop)
LSM_HOOK(int, 0, task_setnice, struct task_struct *p, int nice)
LSM_HOOK(int, 0, task_setioprio, struct task_struct *p, int ioprio)
LSM_HOOK(int, 0, task_getioprio, struct task_struct *p)
@@ -252,8 +261,8 @@ LSM_HOOK(void, LSM_RET_VOID, task_to_inode, struct task_struct *p,
struct inode *inode)
LSM_HOOK(int, 0, userns_create, const struct cred *cred)
LSM_HOOK(int, 0, ipc_permission, struct kern_ipc_perm *ipcp, short flag)
-LSM_HOOK(void, LSM_RET_VOID, ipc_getsecid, struct kern_ipc_perm *ipcp,
- u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, ipc_getlsmprop, struct kern_ipc_perm *ipcp,
+ struct lsm_prop *prop)
LSM_HOOK(int, 0, msg_msg_alloc_security, struct msg_msg *msg)
LSM_HOOK(void, LSM_RET_VOID, msg_msg_free_security, struct msg_msg *msg)
LSM_HOOK(int, 0, msg_queue_alloc_security, struct kern_ipc_perm *perm)
@@ -288,15 +297,16 @@ LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, const char *name,
char **value)
LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size)
LSM_HOOK(int, 0, ismaclabel, const char *name)
-LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, char **secdata,
- u32 *seclen)
+LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, struct lsm_context *cp)
+LSM_HOOK(int, -EOPNOTSUPP, lsmprop_to_secctx, struct lsm_prop *prop,
+ struct lsm_context *cp)
LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid)
-LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
+LSM_HOOK(void, LSM_RET_VOID, release_secctx, struct lsm_context *cp)
LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen)
LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
-LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode, void **ctx,
- u32 *ctxlen)
+LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode,
+ struct lsm_context *cp)
#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
LSM_HOOK(int, 0, post_notification, const struct cred *w_cred,
@@ -352,8 +362,7 @@ LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void)
LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void)
LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req,
struct flowi_common *flic)
-LSM_HOOK(int, 0, tun_dev_alloc_security, void **security)
-LSM_HOOK(void, LSM_RET_VOID, tun_dev_free_security, void *security)
+LSM_HOOK(int, 0, tun_dev_alloc_security, void *security)
LSM_HOOK(int, 0, tun_dev_create, void)
LSM_HOOK(int, 0, tun_dev_attach_queue, void *security)
LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security)
@@ -373,8 +382,7 @@ LSM_HOOK(int, 0, mptcp_add_subflow, struct sock *sk, struct sock *ssk)
LSM_HOOK(int, 0, ib_pkey_access, void *sec, u64 subnet_prefix, u16 pkey)
LSM_HOOK(int, 0, ib_endport_manage_subnet, void *sec, const char *dev_name,
u8 port_num)
-LSM_HOOK(int, 0, ib_alloc_security, void **sec)
-LSM_HOOK(void, LSM_RET_VOID, ib_free_security, void *sec)
+LSM_HOOK(int, 0, ib_alloc_security, void *sec)
#endif /* CONFIG_SECURITY_INFINIBAND */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -402,7 +410,6 @@ LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid,
#ifdef CONFIG_KEYS
LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred,
unsigned long flags)
-LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key)
LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred,
enum key_need_perm need_perm)
LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **buffer)
@@ -413,24 +420,25 @@ LSM_HOOK(void, LSM_RET_VOID, key_post_create_or_update, struct key *keyring,
#ifdef CONFIG_AUDIT
LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr,
- void **lsmrule)
+ void **lsmrule, gfp_t gfp)
LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule)
-LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule)
+LSM_HOOK(int, 0, audit_rule_match, struct lsm_prop *prop, u32 field, u32 op,
+ void *lsmrule)
LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_BPF_SYSCALL
-LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size)
+LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode)
LSM_HOOK(int, 0, bpf_prog, struct bpf_prog *prog)
LSM_HOOK(int, 0, bpf_map_create, struct bpf_map *map, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
LSM_HOOK(void, LSM_RET_VOID, bpf_map_free, struct bpf_map *map)
LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog)
LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr,
- struct path *path)
+ const struct path *path)
LSM_HOOK(void, LSM_RET_VOID, bpf_token_free, struct bpf_token *token)
LSM_HOOK(int, 0, bpf_token_cmd, const struct bpf_token *token, enum bpf_cmd cmd)
LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap)
@@ -439,9 +447,8 @@ LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap)
LSM_HOOK(int, 0, locked_down, enum lockdown_reason what)
#ifdef CONFIG_PERF_EVENTS
-LSM_HOOK(int, 0, perf_event_open, struct perf_event_attr *attr, int type)
+LSM_HOOK(int, 0, perf_event_open, int type)
LSM_HOOK(int, 0, perf_event_alloc, struct perf_event *event)
-LSM_HOOK(void, LSM_RET_VOID, perf_event_free, struct perf_event *event)
LSM_HOOK(int, 0, perf_event_read, struct perf_event *event)
LSM_HOOK(int, 0, perf_event_write, struct perf_event *event)
#endif /* CONFIG_PERF_EVENTS */
@@ -450,4 +457,12 @@ LSM_HOOK(int, 0, perf_event_write, struct perf_event *event)
LSM_HOOK(int, 0, uring_override_creds, const struct cred *new)
LSM_HOOK(int, 0, uring_sqpoll, void)
LSM_HOOK(int, 0, uring_cmd, struct io_uring_cmd *ioucmd)
+LSM_HOOK(int, 0, uring_allowed, void)
#endif /* CONFIG_IO_URING */
+
+LSM_HOOK(void, LSM_RET_VOID, initramfs_populated, void)
+
+LSM_HOOK(int, 0, bdev_alloc_security, struct block_device *bdev)
+LSM_HOOK(void, LSM_RET_VOID, bdev_free_security, struct block_device *bdev)
+LSM_HOOK(int, 0, bdev_setintegrity, struct block_device *bdev,
+ enum lsm_integrity_type type, const void *value, size_t size)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index a2ade0ffe9e7..b92008641242 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -30,19 +30,47 @@
#include <linux/init.h>
#include <linux/rculist.h>
#include <linux/xattr.h>
+#include <linux/static_call.h>
+#include <linux/unroll.h>
+#include <linux/jump_label.h>
+#include <linux/lsm_count.h>
union security_list_options {
#define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__);
#include "lsm_hook_defs.h"
#undef LSM_HOOK
+ void *lsm_func_addr;
};
-struct security_hook_heads {
- #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME;
- #include "lsm_hook_defs.h"
- #undef LSM_HOOK
+/*
+ * @key: static call key as defined by STATIC_CALL_KEY
+ * @trampoline: static call trampoline as defined by STATIC_CALL_TRAMP
+ * @hl: The security_hook_list as initialized by the owning LSM.
+ * @active: Enabled when the static call has an LSM hook associated.
+ */
+struct lsm_static_call {
+ struct static_call_key *key;
+ void *trampoline;
+ struct security_hook_list *hl;
+ /* this needs to be true or false based on what the key defaults to */
+ struct static_key_false *active;
} __randomize_layout;
+/*
+ * Table of the static calls for each LSM hook.
+ * Once the LSMs are initialized, their callbacks will be copied to these
+ * tables such that the calls are filled backwards (from last to first).
+ * This way, we can jump directly to the first used static call, and execute
+ * all of them after. This essentially makes the entry point
+ * dynamic to adapt the number of static calls to the number of callbacks.
+ */
+struct lsm_static_calls_table {
+ #define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ struct lsm_static_call NAME[MAX_LSM_COUNT];
+ #include <linux/lsm_hook_defs.h>
+ #undef LSM_HOOK
+} __packed __randomize_layout;
+
/**
* struct lsm_id - Identify a Linux Security Module.
* @lsm: name of the LSM, must be approved by the LSM maintainers
@@ -51,53 +79,48 @@ struct security_hook_heads {
* Contains the information that identifies the LSM.
*/
struct lsm_id {
- const char *name;
- u64 id;
+ const char *name;
+ u64 id;
};
/*
* Security module hook list structure.
* For use with generic list macros for common operations.
+ *
+ * struct security_hook_list - Contents of a cacheable, mappable object.
+ * @scalls: The beginning of the array of static calls assigned to this hook.
+ * @hook: The callback for the hook.
+ * @lsm: The name of the lsm that owns this hook.
*/
struct security_hook_list {
- struct hlist_node list;
- struct hlist_head *head;
- union security_list_options hook;
- const struct lsm_id *lsmid;
+ struct lsm_static_call *scalls;
+ union security_list_options hook;
+ const struct lsm_id *lsmid;
} __randomize_layout;
/*
* Security blob size or offset data.
*/
struct lsm_blob_sizes {
- int lbs_cred;
- int lbs_file;
- int lbs_inode;
- int lbs_superblock;
- int lbs_ipc;
- int lbs_msg_msg;
- int lbs_task;
- int lbs_xattr_count; /* number of xattr slots in new_xattrs array */
+ unsigned int lbs_cred;
+ unsigned int lbs_file;
+ unsigned int lbs_ib;
+ unsigned int lbs_inode;
+ unsigned int lbs_sock;
+ unsigned int lbs_superblock;
+ unsigned int lbs_ipc;
+ unsigned int lbs_key;
+ unsigned int lbs_msg_msg;
+ unsigned int lbs_perf_event;
+ unsigned int lbs_task;
+ unsigned int lbs_xattr_count; /* num xattr slots in new_xattrs array */
+ unsigned int lbs_tun_dev;
+ unsigned int lbs_bdev;
+ unsigned int lbs_bpf_map;
+ unsigned int lbs_bpf_prog;
+ unsigned int lbs_bpf_token;
};
-/**
- * lsm_get_xattr_slot - Return the next available slot and increment the index
- * @xattrs: array storing LSM-provided xattrs
- * @xattr_count: number of already stored xattrs (updated)
- *
- * Retrieve the first available slot in the @xattrs array to fill with an xattr,
- * and increment @xattr_count.
- *
- * Return: The slot to fill in @xattrs if non-NULL, NULL otherwise.
- */
-static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs,
- int *xattr_count)
-{
- if (unlikely(!xattrs))
- return NULL;
- return &xattrs[(*xattr_count)++];
-}
-
/*
* LSM_RET_VOID is used as the default value in LSM_HOOK definitions for void
* LSM hooks (in include/linux/lsm_hook_defs.h).
@@ -110,11 +133,11 @@ static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs,
* care of the common case and reduces the amount of
* text involved.
*/
-#define LSM_HOOK_INIT(HEAD, HOOK) \
- { .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } }
-
-extern struct security_hook_heads security_hook_heads;
-extern char *lsm_names;
+#define LSM_HOOK_INIT(NAME, HOOK) \
+ { \
+ .scalls = static_calls_table.NAME, \
+ .hook = { .NAME = HOOK } \
+ }
extern void security_add_hooks(struct security_hook_list *hooks, int count,
const struct lsm_id *lsmid);
@@ -128,18 +151,38 @@ enum lsm_order {
LSM_ORDER_LAST = 1, /* This is only for integrity. */
};
+/**
+ * struct lsm_info - Define an individual LSM for the LSM framework.
+ * @id: LSM name/ID info
+ * @order: ordering with respect to other LSMs, optional
+ * @flags: descriptive flags, optional
+ * @blobs: LSM blob sharing, optional
+ * @enabled: controlled by CONFIG_LSM, optional
+ * @init: LSM specific initialization routine
+ * @initcall_pure: LSM callback for initcall_pure() setup, optional
+ * @initcall_early: LSM callback for early_initcall setup, optional
+ * @initcall_core: LSM callback for core_initcall() setup, optional
+ * @initcall_subsys: LSM callback for subsys_initcall() setup, optional
+ * @initcall_fs: LSM callback for fs_initcall setup, optional
+ * @nitcall_device: LSM callback for device_initcall() setup, optional
+ * @initcall_late: LSM callback for late_initcall() setup, optional
+ */
struct lsm_info {
- const char *name; /* Required. */
- enum lsm_order order; /* Optional: default is LSM_ORDER_MUTABLE */
- unsigned long flags; /* Optional: flags describing LSM */
- int *enabled; /* Optional: controlled by CONFIG_LSM */
- int (*init)(void); /* Required. */
- struct lsm_blob_sizes *blobs; /* Optional: for blob sharing. */
+ const struct lsm_id *id;
+ enum lsm_order order;
+ unsigned long flags;
+ struct lsm_blob_sizes *blobs;
+ int *enabled;
+ int (*init)(void);
+ int (*initcall_pure)(void);
+ int (*initcall_early)(void);
+ int (*initcall_core)(void);
+ int (*initcall_subsys)(void);
+ int (*initcall_fs)(void);
+ int (*initcall_device)(void);
+ int (*initcall_late)(void);
};
-extern struct lsm_info __start_lsm_info[], __end_lsm_info[];
-extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
-
#define DEFINE_LSM(lsm) \
static struct lsm_info __lsm_##lsm \
__used __section(".lsm_info.init") \
@@ -150,6 +193,26 @@ extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
__used __section(".early_lsm_info.init") \
__aligned(sizeof(unsigned long))
-extern int lsm_inode_alloc(struct inode *inode);
+
+/* DO NOT tamper with these variables outside of the LSM framework */
+extern struct lsm_static_calls_table static_calls_table __ro_after_init;
+
+/**
+ * lsm_get_xattr_slot - Return the next available slot and increment the index
+ * @xattrs: array storing LSM-provided xattrs
+ * @xattr_count: number of already stored xattrs (updated)
+ *
+ * Retrieve the first available slot in the @xattrs array to fill with an xattr,
+ * and increment @xattr_count.
+ *
+ * Return: The slot to fill in @xattrs if non-NULL, NULL otherwise.
+ */
+static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs,
+ int *xattr_count)
+{
+ if (unlikely(!xattrs))
+ return NULL;
+ return &xattrs[(*xattr_count)++];
+}
#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
index b16e15b9587a..ad6042a718b5 100644
--- a/include/linux/lz4.h
+++ b/include/linux/lz4.h
@@ -645,4 +645,10 @@ int LZ4_decompress_safe_usingDict(const char *source, char *dest,
int LZ4_decompress_fast_usingDict(const char *source, char *dest,
int originalSize, const char *dictStart, int dictSize);
+#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32)
+
+#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */
+#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
+#endif
+
#endif
diff --git a/include/linux/lzo.h b/include/linux/lzo.h
index e95c7d1092b2..4d30e3624acd 100644
--- a/include/linux/lzo.h
+++ b/include/linux/lzo.h
@@ -24,10 +24,18 @@
int lzo1x_1_compress(const unsigned char *src, size_t src_len,
unsigned char *dst, size_t *dst_len, void *wrkmem);
+/* Same as above but does not write more than dst_len to dst. */
+int lzo1x_1_compress_safe(const unsigned char *src, size_t src_len,
+ unsigned char *dst, size_t *dst_len, void *wrkmem);
+
/* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */
int lzorle1x_1_compress(const unsigned char *src, size_t src_len,
unsigned char *dst, size_t *dst_len, void *wrkmem);
+/* Same as above but does not write more than dst_len to dst. */
+int lzorle1x_1_compress_safe(const unsigned char *src, size_t src_len,
+ unsigned char *dst, size_t *dst_len, void *wrkmem);
+
/* safe decompression with overrun testing */
int lzo1x_decompress_safe(const unsigned char *src, size_t src_len,
unsigned char *dst, size_t *dst_len);
diff --git a/include/linux/mailbox/exynos-message.h b/include/linux/mailbox/exynos-message.h
new file mode 100644
index 000000000000..5a9ed5ce2046
--- /dev/null
+++ b/include/linux/mailbox/exynos-message.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Exynos mailbox message.
+ *
+ * Copyright 2024 Linaro Ltd.
+ */
+
+#ifndef _LINUX_EXYNOS_MESSAGE_H_
+#define _LINUX_EXYNOS_MESSAGE_H_
+
+#define EXYNOS_MBOX_CHAN_TYPE_DOORBELL 0
+#define EXYNOS_MBOX_CHAN_TYPE_DATA 1
+
+struct exynos_mbox_msg {
+ unsigned int chan_id;
+ unsigned int chan_type;
+};
+
+#endif /* _LINUX_EXYNOS_MESSAGE_H_ */
diff --git a/include/linux/mailbox/mchp-ipc.h b/include/linux/mailbox/mchp-ipc.h
new file mode 100644
index 000000000000..f084ac9e291b
--- /dev/null
+++ b/include/linux/mailbox/mchp-ipc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *Copyright (c) 2024 Microchip Technology Inc. All rights reserved.
+ */
+
+#ifndef _LINUX_MCHP_IPC_H_
+#define _LINUX_MCHP_IPC_H_
+
+#include <linux/mailbox_controller.h>
+#include <linux/types.h>
+
+struct mchp_ipc_msg {
+ u32 *buf;
+ u16 size;
+};
+
+struct mchp_ipc_sbi_chan {
+ void *buf_base_tx;
+ void *buf_base_rx;
+ void *msg_buf_tx;
+ void *msg_buf_rx;
+ phys_addr_t buf_base_tx_addr;
+ phys_addr_t buf_base_rx_addr;
+ phys_addr_t msg_buf_tx_addr;
+ phys_addr_t msg_buf_rx_addr;
+ int chan_aggregated_irq;
+ int mp_irq;
+ int mc_irq;
+ u32 id;
+ u32 max_msg_size;
+};
+
+#endif /* _LINUX_MCHP_IPC_H_ */
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index a8f0070c7aa9..e1555e06e7e5 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -75,9 +75,18 @@ struct cmdq_pkt {
dma_addr_t pa_base;
size_t cmd_buf_size; /* command occupied size */
size_t buf_size; /* real buffer size */
- void *cl;
};
+/**
+ * cmdq_get_shift_pa() - get the shift bits of physical address
+ * @chan: mailbox channel
+ *
+ * GCE can only fetch the command buffer address from a 32-bit register.
+ * Some SOCs support more than 32-bit command buffer address for GCE, which
+ * requires some shift bits to make the address fit into the 32-bit register.
+ *
+ * Return: the shift bits of physical address
+ */
u8 cmdq_get_shift_pa(struct mbox_chan *chan);
#endif /* __MTK_CMDQ_MAILBOX_H__ */
diff --git a/include/linux/mailbox/riscv-rpmi-message.h b/include/linux/mailbox/riscv-rpmi-message.h
new file mode 100644
index 000000000000..e135c6564d0c
--- /dev/null
+++ b/include/linux/mailbox/riscv-rpmi-message.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Ventana Micro Systems Inc. */
+
+#ifndef _LINUX_RISCV_RPMI_MESSAGE_H_
+#define _LINUX_RISCV_RPMI_MESSAGE_H_
+
+#include <linux/errno.h>
+#include <linux/mailbox_client.h>
+#include <linux/types.h>
+#include <linux/wordpart.h>
+
+/* RPMI version encode/decode macros */
+#define RPMI_VER_MAJOR(__ver) upper_16_bits(__ver)
+#define RPMI_VER_MINOR(__ver) lower_16_bits(__ver)
+#define RPMI_MKVER(__maj, __min) (((u32)(__maj) << 16) | (u16)(__min))
+
+/* RPMI message header */
+struct rpmi_message_header {
+ __le16 servicegroup_id;
+ u8 service_id;
+ u8 flags;
+ __le16 datalen;
+ __le16 token;
+};
+
+/* RPMI message */
+struct rpmi_message {
+ struct rpmi_message_header header;
+ u8 data[];
+};
+
+/* RPMI notification event */
+struct rpmi_notification_event {
+ __le16 event_datalen;
+ u8 event_id;
+ u8 reserved;
+ u8 event_data[];
+};
+
+/* RPMI error codes */
+enum rpmi_error_codes {
+ RPMI_SUCCESS = 0,
+ RPMI_ERR_FAILED = -1,
+ RPMI_ERR_NOTSUPP = -2,
+ RPMI_ERR_INVALID_PARAM = -3,
+ RPMI_ERR_DENIED = -4,
+ RPMI_ERR_INVALID_ADDR = -5,
+ RPMI_ERR_ALREADY = -6,
+ RPMI_ERR_EXTENSION = -7,
+ RPMI_ERR_HW_FAULT = -8,
+ RPMI_ERR_BUSY = -9,
+ RPMI_ERR_INVALID_STATE = -10,
+ RPMI_ERR_BAD_RANGE = -11,
+ RPMI_ERR_TIMEOUT = -12,
+ RPMI_ERR_IO = -13,
+ RPMI_ERR_NO_DATA = -14,
+ RPMI_ERR_RESERVED_START = -15,
+ RPMI_ERR_RESERVED_END = -127,
+ RPMI_ERR_VENDOR_START = -128,
+};
+
+static inline int rpmi_to_linux_error(int rpmi_error)
+{
+ switch (rpmi_error) {
+ case RPMI_SUCCESS:
+ return 0;
+ case RPMI_ERR_INVALID_PARAM:
+ case RPMI_ERR_BAD_RANGE:
+ case RPMI_ERR_INVALID_STATE:
+ return -EINVAL;
+ case RPMI_ERR_DENIED:
+ return -EPERM;
+ case RPMI_ERR_INVALID_ADDR:
+ case RPMI_ERR_HW_FAULT:
+ return -EFAULT;
+ case RPMI_ERR_ALREADY:
+ return -EALREADY;
+ case RPMI_ERR_BUSY:
+ return -EBUSY;
+ case RPMI_ERR_TIMEOUT:
+ return -ETIMEDOUT;
+ case RPMI_ERR_IO:
+ return -ECOMM;
+ case RPMI_ERR_FAILED:
+ case RPMI_ERR_NOTSUPP:
+ case RPMI_ERR_NO_DATA:
+ case RPMI_ERR_EXTENSION:
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* RPMI service group IDs */
+#define RPMI_SRVGRP_SYSTEM_MSI 0x00002
+#define RPMI_SRVGRP_CLOCK 0x00008
+
+/* RPMI clock service IDs */
+enum rpmi_clock_service_id {
+ RPMI_CLK_SRV_ENABLE_NOTIFICATION = 0x01,
+ RPMI_CLK_SRV_GET_NUM_CLOCKS = 0x02,
+ RPMI_CLK_SRV_GET_ATTRIBUTES = 0x03,
+ RPMI_CLK_SRV_GET_SUPPORTED_RATES = 0x04,
+ RPMI_CLK_SRV_SET_CONFIG = 0x05,
+ RPMI_CLK_SRV_GET_CONFIG = 0x06,
+ RPMI_CLK_SRV_SET_RATE = 0x07,
+ RPMI_CLK_SRV_GET_RATE = 0x08,
+ RPMI_CLK_SRV_ID_MAX_COUNT
+};
+
+/* RPMI system MSI service IDs */
+enum rpmi_sysmsi_service_id {
+ RPMI_SYSMSI_SRV_ENABLE_NOTIFICATION = 0x01,
+ RPMI_SYSMSI_SRV_GET_ATTRIBUTES = 0x02,
+ RPMI_SYSMSI_SRV_GET_MSI_ATTRIBUTES = 0x03,
+ RPMI_SYSMSI_SRV_SET_MSI_STATE = 0x04,
+ RPMI_SYSMSI_SRV_GET_MSI_STATE = 0x05,
+ RPMI_SYSMSI_SRV_SET_MSI_TARGET = 0x06,
+ RPMI_SYSMSI_SRV_GET_MSI_TARGET = 0x07,
+ RPMI_SYSMSI_SRV_ID_MAX_COUNT
+};
+
+/* RPMI Linux mailbox attribute IDs */
+enum rpmi_mbox_attribute_id {
+ RPMI_MBOX_ATTR_SPEC_VERSION,
+ RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE,
+ RPMI_MBOX_ATTR_SERVICEGROUP_ID,
+ RPMI_MBOX_ATTR_SERVICEGROUP_VERSION,
+ RPMI_MBOX_ATTR_IMPL_ID,
+ RPMI_MBOX_ATTR_IMPL_VERSION,
+ RPMI_MBOX_ATTR_MAX_ID
+};
+
+/* RPMI Linux mailbox message types */
+enum rpmi_mbox_message_type {
+ RPMI_MBOX_MSG_TYPE_GET_ATTRIBUTE,
+ RPMI_MBOX_MSG_TYPE_SET_ATTRIBUTE,
+ RPMI_MBOX_MSG_TYPE_SEND_WITH_RESPONSE,
+ RPMI_MBOX_MSG_TYPE_SEND_WITHOUT_RESPONSE,
+ RPMI_MBOX_MSG_TYPE_NOTIFICATION_EVENT,
+ RPMI_MBOX_MSG_MAX_TYPE
+};
+
+/* RPMI Linux mailbox message instance */
+struct rpmi_mbox_message {
+ enum rpmi_mbox_message_type type;
+ union {
+ struct {
+ enum rpmi_mbox_attribute_id id;
+ u32 value;
+ } attr;
+
+ struct {
+ u32 service_id;
+ void *request;
+ unsigned long request_len;
+ void *response;
+ unsigned long max_response_len;
+ unsigned long out_response_len;
+ } data;
+
+ struct {
+ u16 event_datalen;
+ u8 event_id;
+ u8 *event_data;
+ } notif;
+ };
+ int error;
+};
+
+/* RPMI Linux mailbox message helper routines */
+static inline void rpmi_mbox_init_get_attribute(struct rpmi_mbox_message *msg,
+ enum rpmi_mbox_attribute_id id)
+{
+ msg->type = RPMI_MBOX_MSG_TYPE_GET_ATTRIBUTE;
+ msg->attr.id = id;
+ msg->attr.value = 0;
+ msg->error = 0;
+}
+
+static inline void rpmi_mbox_init_set_attribute(struct rpmi_mbox_message *msg,
+ enum rpmi_mbox_attribute_id id,
+ u32 value)
+{
+ msg->type = RPMI_MBOX_MSG_TYPE_SET_ATTRIBUTE;
+ msg->attr.id = id;
+ msg->attr.value = value;
+ msg->error = 0;
+}
+
+static inline void rpmi_mbox_init_send_with_response(struct rpmi_mbox_message *msg,
+ u32 service_id,
+ void *request,
+ unsigned long request_len,
+ void *response,
+ unsigned long max_response_len)
+{
+ msg->type = RPMI_MBOX_MSG_TYPE_SEND_WITH_RESPONSE;
+ msg->data.service_id = service_id;
+ msg->data.request = request;
+ msg->data.request_len = request_len;
+ msg->data.response = response;
+ msg->data.max_response_len = max_response_len;
+ msg->data.out_response_len = 0;
+ msg->error = 0;
+}
+
+static inline void rpmi_mbox_init_send_without_response(struct rpmi_mbox_message *msg,
+ u32 service_id,
+ void *request,
+ unsigned long request_len)
+{
+ msg->type = RPMI_MBOX_MSG_TYPE_SEND_WITHOUT_RESPONSE;
+ msg->data.service_id = service_id;
+ msg->data.request = request;
+ msg->data.request_len = request_len;
+ msg->data.response = NULL;
+ msg->data.max_response_len = 0;
+ msg->data.out_response_len = 0;
+ msg->error = 0;
+}
+
+static inline void *rpmi_mbox_get_msg_response(struct rpmi_mbox_message *msg)
+{
+ return msg ? msg->data.response : NULL;
+}
+
+static inline int rpmi_mbox_send_message(struct mbox_chan *chan,
+ struct rpmi_mbox_message *msg)
+{
+ int ret;
+
+ /* Send message for the underlying mailbox channel */
+ ret = mbox_send_message(chan, msg);
+ if (ret < 0)
+ return ret;
+
+ /* Explicitly signal txdone for mailbox channel */
+ ret = msg->error;
+ mbox_client_txdone(chan, ret);
+ return ret;
+}
+
+#endif /* _LINUX_RISCV_RPMI_MESSAGE_H_ */
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
index 734694912ef7..c6eea9afb943 100644
--- a/include/linux/mailbox_client.h
+++ b/include/linux/mailbox_client.h
@@ -7,8 +7,8 @@
#ifndef __MAILBOX_CLIENT_H
#define __MAILBOX_CLIENT_H
-#include <linux/of.h>
#include <linux/device.h>
+#include <linux/of.h>
struct mbox_chan;
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index 6fee33cb52f5..80a427c7ca29 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -3,11 +3,11 @@
#ifndef __MAILBOX_CONTROLLER_H
#define __MAILBOX_CONTROLLER_H
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/hrtimer.h>
#include <linux/of.h>
#include <linux/types.h>
-#include <linux/hrtimer.h>
-#include <linux/device.h>
-#include <linux/completion.h>
struct mbox_chan;
@@ -66,6 +66,7 @@ struct mbox_chan_ops {
* no interrupt rises. Ignored if 'txdone_irq' is set.
* @txpoll_period: If 'txdone_poll' is in effect, the API polls for
* last TX's status after these many millisecs
+ * @fw_xlate: Controller driver specific mapping of channel via fwnode
* @of_xlate: Controller driver specific mapping of channel via DT
* @poll_hrt: API private. hrtimer used to poll for TXDONE on all
* channels.
@@ -79,6 +80,8 @@ struct mbox_controller {
bool txdone_irq;
bool txdone_poll;
unsigned txpoll_period;
+ struct mbox_chan *(*fw_xlate)(struct mbox_controller *mbox,
+ const struct fwnode_reference_args *sp);
struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
const struct of_phandle_args *sp);
/* Internal to API */
@@ -134,7 +137,4 @@ void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */
int devm_mbox_controller_register(struct device *dev,
struct mbox_controller *mbox);
-void devm_mbox_controller_unregister(struct device *dev,
- struct mbox_controller *mbox);
-
#endif /* __MAILBOX_CONTROLLER_H */
diff --git a/include/linux/maple.h b/include/linux/maple.h
index 9aae44efcfd4..3be4e567473c 100644
--- a/include/linux/maple.h
+++ b/include/linux/maple.h
@@ -97,7 +97,7 @@ int maple_add_packet(struct maple_device *mdev, u32 function,
void maple_clear_dev(struct maple_device *mdev);
#define to_maple_dev(n) container_of(n, struct maple_device, dev)
-#define to_maple_driver(n) container_of(n, struct maple_driver, drv)
+#define to_maple_driver(n) container_of_const(n, struct maple_driver, drv)
#define maple_get_drvdata(d) dev_get_drvdata(&(d)->dev)
#define maple_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p))
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index a53ad4dabd7e..66f98a3da8d8 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -52,22 +52,22 @@
* bit in the node type. This is possible by using bit 1 to indicate if bit 2
* is part of the type or the slot.
*
- * Once the type is decided, the decision of an allocation range type or a range
- * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE
- * flag.
+ * Once the type is decided, the decision of an allocation range type or a
+ * range type is done by examining the immutable tree flag for the
+ * MT_FLAGS_ALLOC_RANGE flag.
*
* Node types:
- * 0x??1 = Root
- * 0x?00 = 16 bit nodes
- * 0x010 = 32 bit nodes
- * 0x110 = 64 bit nodes
+ * 0b??1 = Root
+ * 0b?00 = 16 bit nodes
+ * 0b010 = 32 bit nodes
+ * 0b110 = 64 bit nodes
*
* Slot size and location in the parent pointer:
* type : slot location
- * 0x??1 : Root
- * 0x?00 : 16 bit values, type in 0-1, slot in 2-6
- * 0x010 : 32 bit values, type in 0-2, slot in 3-6
- * 0x110 : 64 bit values, type in 0-2, slot in 3-6
+ * 0b??1 : Root
+ * 0b?00 : 16 bit values, type in 0-1, slot in 2-6
+ * 0b010 : 32 bit values, type in 0-2, slot in 3-6
+ * 0b110 : 64 bit values, type in 0-2, slot in 3-6
*/
/*
@@ -75,8 +75,8 @@
* searching for gaps or any other code that needs to find the end of the data.
*/
struct maple_metadata {
- unsigned char end;
- unsigned char gap;
+ unsigned char end; /* end of data */
+ unsigned char gap; /* offset of largest gap */
};
/*
@@ -148,6 +148,18 @@ enum maple_type {
maple_arange_64,
};
+enum store_type {
+ wr_invalid,
+ wr_new_root,
+ wr_store_root,
+ wr_exact_fit,
+ wr_spanning_store,
+ wr_split_store,
+ wr_rebalance,
+ wr_append,
+ wr_node_store,
+ wr_slot_store,
+};
/**
* DOC: Maple tree flags
@@ -182,7 +194,6 @@ enum maple_type {
#define MAPLE_RESERVED_RANGE 4096
#ifdef CONFIG_LOCKDEP
-typedef struct lockdep_map *lockdep_map_p;
#define mt_lock_is_held(mt) \
(!(mt)->ma_external_lock || lock_is_held((mt)->ma_external_lock))
@@ -195,7 +206,6 @@ typedef struct lockdep_map *lockdep_map_p;
#define mt_on_stack(mt) (mt).ma_external_lock = NULL
#else
-typedef struct { /* nothing */ } lockdep_map_p;
#define mt_lock_is_held(mt) 1
#define mt_write_lock_is_held(mt) 1
#define mt_set_external_lock(mt, lock) do { } while (0)
@@ -212,14 +222,16 @@ typedef struct { /* nothing */ } lockdep_map_p;
* (set at tree creation time) and dynamic information set under the spinlock.
*
* Another use of flags are to indicate global states of the tree. This is the
- * case with the MAPLE_USE_RCU flag, which indicates the tree is currently in
+ * case with the MT_FLAGS_USE_RCU flag, which indicates the tree is currently in
* RCU mode. This mode was added to allow the tree to reuse nodes instead of
* re-allocating and RCU freeing nodes when there is a single user.
*/
struct maple_tree {
union {
- spinlock_t ma_lock;
- lockdep_map_p ma_external_lock;
+ spinlock_t ma_lock;
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map *ma_external_lock;
+#endif
};
unsigned int ma_flags;
void __rcu *ma_root;
@@ -430,12 +442,15 @@ struct ma_state {
struct maple_enode *node; /* The node containing this entry */
unsigned long min; /* The minimum index of this node - implied pivot min */
unsigned long max; /* The maximum index of this node - implied pivot max */
- struct maple_alloc *alloc; /* Allocated nodes for this operation */
+ struct slab_sheaf *sheaf; /* Allocated nodes for this operation */
+ struct maple_node *alloc; /* A single allocated node for fast path writes */
+ unsigned long node_request; /* The number of nodes to allocate for this operation */
enum maple_status status; /* The status of the state (active, start, none, etc) */
unsigned char depth; /* depth of tree descent during write */
unsigned char offset;
unsigned char mas_flags;
unsigned char end; /* The end of the node */
+ enum store_type store_type; /* The type of store needed for this operation */
};
struct ma_wr_state {
@@ -450,6 +465,8 @@ struct ma_wr_state {
void __rcu **slots; /* mas->node->slots pointer */
void *entry; /* The entry to write */
void *content; /* The existing entry that is being overwritten */
+ unsigned char vacant_height; /* Height of lowest node with free space */
+ unsigned char sufficient_height;/* Height of lowest node with min sufficiency + 1 nodes */
};
#define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock))
@@ -466,6 +483,9 @@ struct ma_wr_state {
#define MA_ERROR(err) \
((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
+/*
+ * When changing MA_STATE, remember to also change rust/kernel/maple_tree.rs
+ */
#define MA_STATE(name, mt, first, end) \
struct ma_state name = { \
.tree = mt, \
@@ -475,8 +495,11 @@ struct ma_wr_state {
.status = ma_start, \
.min = 0, \
.max = ULONG_MAX, \
+ .sheaf = NULL, \
.alloc = NULL, \
+ .node_request = 0, \
.mas_flags = 0, \
+ .store_type = wr_invalid, \
}
#define MA_WR_STATE(name, ma_state, wr_entry) \
@@ -484,6 +507,8 @@ struct ma_wr_state {
.mas = ma_state, \
.content = NULL, \
.entry = wr_entry, \
+ .vacant_height = 0, \
+ .sufficient_height = 0 \
}
#define MA_TOPIARY(name, tree) \
@@ -578,6 +603,20 @@ static __always_inline void mas_reset(struct ma_state *mas)
#define mas_for_each(__mas, __entry, __max) \
while (((__entry) = mas_find((__mas), (__max))) != NULL)
+/**
+ * mas_for_each_rev() - Iterate over a range of the maple tree in reverse order.
+ * @__mas: Maple Tree operation state (maple_state)
+ * @__entry: Entry retrieved from the tree
+ * @__min: minimum index to retrieve from the tree
+ *
+ * When returned, mas->index and mas->last will hold the entire range for the
+ * entry.
+ *
+ * Note: may return the zero entry.
+ */
+#define mas_for_each_rev(__mas, __entry, __min) \
+ while (((__entry) = mas_find_rev((__mas), (__min))) != NULL)
+
#ifdef CONFIG_DEBUG_MAPLE_TREE
enum mt_dump_format {
mt_dump_dec,
diff --git a/include/linux/math.h b/include/linux/math.h
index dd4152711de7..6dc1d1d32fbc 100644
--- a/include/linux/math.h
+++ b/include/linux/math.h
@@ -34,6 +34,18 @@
*/
#define round_down(x, y) ((x) & ~__round_mask(x, y))
+/**
+ * DIV_ROUND_UP_POW2 - divide and round up
+ * @n: numerator
+ * @d: denominator (must be a power of 2)
+ *
+ * Divides @n by @d and rounds up to next multiple of @d (which must be a power
+ * of 2). Avoids integer overflows that may occur with __KERNEL_DIV_ROUND_UP().
+ * Performance is roughly equivalent to __KERNEL_DIV_ROUND_UP().
+ */
+#define DIV_ROUND_UP_POW2(n, d) \
+ ((n) / (d) + !!((n) & ((d) - 1)))
+
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
#define DIV_ROUND_DOWN_ULL(ll, d) \
@@ -112,6 +124,8 @@ struct type##_fract { \
__##type numerator; \
__##type denominator; \
};
+__STRUCT_FRACT(s8)
+__STRUCT_FRACT(u8)
__STRUCT_FRACT(s16)
__STRUCT_FRACT(u16)
__STRUCT_FRACT(s32)
@@ -134,11 +148,16 @@ __STRUCT_FRACT(u32)
/**
* abs - return absolute value of an argument
- * @x: the value. If it is unsigned type, it is converted to signed type first.
- * char is treated as if it was signed (regardless of whether it really is)
- * but the macro's return type is preserved as char.
+ * @x: the value.
+ *
+ * If it is unsigned type, @x is converted to signed type first.
+ * char is treated as if it was signed (regardless of whether it really is)
+ * but the macro's return type is preserved as char.
+ *
+ * NOTE, for signed type if @x is the minimum, the returned result is undefined
+ * as there is not enough bits to represent it as a positive number.
*
- * Return: an absolute value of x.
+ * Return: an absolute value of @x.
*/
#define abs(x) __abs_choose_expr(x, long long, \
__abs_choose_expr(x, long, \
diff --git a/include/linux/math64.h b/include/linux/math64.h
index d34def7f9a8c..cc305206d89f 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -158,6 +158,17 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
}
#endif
+#ifndef add_u64_u32
+/*
+ * Many a GCC version also messes this up.
+ * Zero extending b and then spilling everything to stack.
+ */
+static inline u64 add_u64_u32(u64 a, u32 b)
+{
+ return a + b;
+}
+#endif
+
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
#ifndef mul_u64_u32_shr
@@ -282,7 +293,53 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
}
#endif /* mul_u64_u32_div */
-u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
+/**
+ * mul_u64_add_u64_div_u64 - unsigned 64bit multiply, add, and divide
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @c: unsigned 64bit addend
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * add a third value and then divide by a fourth.
+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: (@a * @b + @c) / @d
+ */
+u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d);
+
+/**
+ * mul_u64_u64_div_u64 - unsigned 64bit multiply and divide
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * and then divide by a third value.
+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: @a * @b / @d
+ */
+#define mul_u64_u64_div_u64(a, b, d) mul_u64_add_u64_div_u64(a, b, 0, d)
+
+/**
+ * mul_u64_u64_div_u64_roundup - unsigned 64bit multiply and divide rounded up
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * and then divide and round up.
+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: (@a * @b + @d - 1) / @d
+ */
+#define mul_u64_u64_div_u64_roundup(a, b, d) \
+ ({ u64 _tmp = (d); mul_u64_add_u64_div_u64(a, b, _tmp - 1, _tmp); })
+
/**
* DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
@@ -298,6 +355,19 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
/**
+ * DIV_U64_ROUND_UP - unsigned 64bit divide with 32bit divisor rounded up
+ * @ll: unsigned 64bit dividend
+ * @d: unsigned 32bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 32bit divisor
+ * and round up.
+ *
+ * Return: dividend / divisor rounded up
+ */
+#define DIV_U64_ROUND_UP(ll, d) \
+ ({ u32 _tmp = (d); div_u64((ll) + _tmp - 1, _tmp); })
+
+/**
* DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer
* @dividend: unsigned 64bit dividend
* @divisor: unsigned 64bit divisor
@@ -342,4 +412,19 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
div_s64((__x - (__d / 2)), __d); \
} \
)
+
+/**
+ * roundup_u64 - Round up a 64bit value to the next specified 32bit multiple
+ * @x: the value to up
+ * @y: 32bit multiple to round up to
+ *
+ * Rounds @x to the next multiple of @y. For 32bit @x values, see roundup and
+ * the faster round_up() for powers of 2.
+ *
+ * Return: rounded up value.
+ */
+static inline u64 roundup_u64(u64 x, u32 y)
+{
+ return DIV_U64_ROUND_UP(x, y) * y;
+}
#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/mc33xs2410.h b/include/linux/mc33xs2410.h
new file mode 100644
index 000000000000..31c0edf10dd7
--- /dev/null
+++ b/include/linux/mc33xs2410.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Liebherr-Electronics and Drives GmbH
+ */
+#ifndef _MC33XS2410_H
+#define _MC33XS2410_H
+
+#include <linux/spi/spi.h>
+
+MODULE_IMPORT_NS("PWM_MC33XS2410");
+
+int mc33xs2410_read_reg_ctrl(struct spi_device *spi, u8 reg, u16 *val);
+int mc33xs2410_read_reg_diag(struct spi_device *spi, u8 reg, u16 *val);
+int mc33xs2410_modify_reg(struct spi_device *spi, u8 reg, u8 mask, u8 val);
+
+#endif /* _MC33XS2410_H */
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index 0b971b24a804..4ab2691f51a6 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -94,10 +94,7 @@ struct mcb_driver {
void (*shutdown)(struct mcb_device *mdev);
};
-static inline struct mcb_driver *to_mcb_driver(struct device_driver *drv)
-{
- return container_of(drv, struct mcb_driver, driver);
-}
+#define to_mcb_driver(__drv) container_of_const(__drv, struct mcb_driver, driver)
static inline void *mcb_get_drvdata(struct mcb_device *dev)
{
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 68f8d2e970d4..42d6d47e445b 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -31,7 +31,7 @@ struct mdio_device {
struct mii_bus *bus;
char modalias[MDIO_NAME_SIZE];
- int (*bus_match)(struct device *dev, struct device_driver *drv);
+ int (*bus_match)(struct device *dev, const struct device_driver *drv);
void (*device_free)(struct mdio_device *mdiodev);
void (*device_remove)(struct mdio_device *mdiodev);
@@ -45,10 +45,7 @@ struct mdio_device {
unsigned int reset_deassert_delay;
};
-static inline struct mdio_device *to_mdio_device(const struct device *dev)
-{
- return container_of(dev, struct mdio_device, dev);
-}
+#define to_mdio_device(__dev) container_of_const(__dev, struct mdio_device, dev)
/* struct mdio_driver_common: Common to all MDIO drivers */
struct mdio_driver_common {
@@ -57,11 +54,8 @@ struct mdio_driver_common {
};
#define MDIO_DEVICE_FLAG_PHY 1
-static inline struct mdio_driver_common *
-to_mdio_common_driver(const struct device_driver *driver)
-{
- return container_of(driver, struct mdio_driver_common, driver);
-}
+#define to_mdio_common_driver(__drv_c) container_of_const(__drv_c, struct mdio_driver_common, \
+ driver)
/* struct mdio_driver: Generic MDIO driver */
struct mdio_driver {
@@ -80,12 +74,8 @@ struct mdio_driver {
void (*shutdown)(struct mdio_device *mdiodev);
};
-static inline struct mdio_driver *
-to_mdio_driver(const struct device_driver *driver)
-{
- return container_of(to_mdio_common_driver(driver), struct mdio_driver,
- mdiodrv);
-}
+#define to_mdio_driver(__drv_m) container_of_const(to_mdio_common_driver(__drv_m), \
+ struct mdio_driver, mdiodrv)
/* device driver data */
static inline void mdiodev_set_drvdata(struct mdio_device *mdio, void *data)
@@ -105,7 +95,6 @@ void mdio_device_remove(struct mdio_device *mdiodev);
void mdio_device_reset(struct mdio_device *mdiodev, int value);
int mdio_driver_register(struct mdio_driver *drv);
void mdio_driver_unregister(struct mdio_driver *drv);
-int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
static inline void mdio_device_get(struct mdio_device *mdiodev)
{
@@ -172,31 +161,12 @@ extern int mdio_set_flag(const struct mdio_if_info *mdio,
bool sense);
extern int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmds);
extern int mdio45_nway_restart(const struct mdio_if_info *mdio);
-extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
- struct ethtool_cmd *ecmd,
- u32 npage_adv, u32 npage_lpa);
extern void
mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio,
struct ethtool_link_ksettings *cmd,
u32 npage_adv, u32 npage_lpa);
/**
- * mdio45_ethtool_gset - get settings for ETHTOOL_GSET
- * @mdio: MDIO interface
- * @ecmd: Ethtool request structure
- *
- * Since the CSRs for auto-negotiation using next pages are not fully
- * standardised, this function does not attempt to decode them. Use
- * mdio45_ethtool_gset_npage() to specify advertisement bits from next
- * pages.
- */
-static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
- struct ethtool_cmd *ecmd)
-{
- mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0);
-}
-
-/**
* mdio45_ethtool_ksettings_get - get settings for ETHTOOL_GLINKSETTINGS
* @mdio: MDIO interface
* @cmd: Ethtool request structure
@@ -719,16 +689,7 @@ struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr);
* init/exit. Each module may only use this macro once, and calling it
* replaces module_init() and module_exit().
*/
-#define mdio_module_driver(_mdio_driver) \
-static int __init mdio_module_init(void) \
-{ \
- return mdio_driver_register(&_mdio_driver); \
-} \
-module_init(mdio_module_init); \
-static void __exit mdio_module_exit(void) \
-{ \
- mdio_driver_unregister(&_mdio_driver); \
-} \
-module_exit(mdio_module_exit)
+#define mdio_module_driver(_mdio_driver) \
+ module_driver(_mdio_driver, mdio_driver_register, mdio_driver_unregister)
#endif /* __LINUX_MDIO_H__ */
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index b38a56a13f39..a82755e1fc40 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -97,8 +97,6 @@ ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf,
ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf,
size_t length, unsigned long timeout);
ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
-ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
- size_t length);
ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
unsigned long timeout);
ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
@@ -107,8 +105,6 @@ ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf,
size_t length, u8 vtag, unsigned long timeout);
ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
u8 *vtag);
-ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
- size_t length, u8 *vtag);
ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
u8 *vtag, unsigned long timeout);
@@ -116,8 +112,8 @@ int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb);
int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
mei_cldev_cb_t notif_cb);
-const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev);
u8 mei_cldev_ver(const struct mei_cl_device *cldev);
+size_t mei_cldev_mtu(const struct mei_cl_device *cldev);
void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev);
void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data);
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index ae4526389261..07584c5e36fb 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -26,11 +26,34 @@
*/
#define __sme_set(x) ((x) | sme_me_mask)
#define __sme_clr(x) ((x) & ~sme_me_mask)
+
+#define dma_addr_encrypted(x) __sme_set(x)
+#define dma_addr_canonical(x) __sme_clr(x)
+
#else
#define __sme_set(x) (x)
#define __sme_clr(x) (x)
#endif
+/*
+ * dma_addr_encrypted() and dma_addr_unencrypted() are for converting a given DMA
+ * address to the respective type of addressing.
+ *
+ * dma_addr_canonical() is used to reverse any conversions for encrypted/decrypted
+ * back to the canonical address.
+ */
+#ifndef dma_addr_encrypted
+#define dma_addr_encrypted(x) (x)
+#endif
+
+#ifndef dma_addr_unencrypted
+#define dma_addr_unencrypted(x) (x)
+#endif
+
+#ifndef dma_addr_canonical
+#define dma_addr_canonical(x) (x)
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __MEM_ENCRYPT_H__ */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index e2082240586d..221118b5a16e 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -40,8 +40,17 @@ extern unsigned long long max_possible_pfn;
* via a driver, and never indicated in the firmware-provided memory map as
* system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
* kernel resource tree.
- * @MEMBLOCK_RSRV_NOINIT: memory region for which struct pages are
- * not initialized (only for reserved regions).
+ * @MEMBLOCK_RSRV_NOINIT: reserved memory region for which struct pages are not
+ * fully initialized. Users of this flag are responsible to properly initialize
+ * struct pages of this region
+ * @MEMBLOCK_RSRV_KERN: memory region that is reserved for kernel use,
+ * either explictitly with memblock_reserve_kern() or via memblock
+ * allocation APIs. All memblock allocations set this flag.
+ * @MEMBLOCK_KHO_SCRATCH: memory region that kexec can pass to the next
+ * kernel in handover mode. During early boot, we do not know about all
+ * memory reservations yet, so we get scratch memory from the previous
+ * kernel that we know is good to use. It is the only memory that
+ * allocations may happen from in this phase.
*/
enum memblock_flags {
MEMBLOCK_NONE = 0x0, /* No special request */
@@ -50,6 +59,8 @@ enum memblock_flags {
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */
MEMBLOCK_RSRV_NOINIT = 0x10, /* don't initialize struct pages */
+ MEMBLOCK_RSRV_KERN = 0x20, /* memory reserved for kernel use */
+ MEMBLOCK_KHO_SCRATCH = 0x40, /* scratch memory for kexec handover */
};
/**
@@ -116,7 +127,19 @@ int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_phys_free(phys_addr_t base, phys_addr_t size);
-int memblock_reserve(phys_addr_t base, phys_addr_t size);
+int __memblock_reserve(phys_addr_t base, phys_addr_t size, int nid,
+ enum memblock_flags flags);
+
+static __always_inline int memblock_reserve(phys_addr_t base, phys_addr_t size)
+{
+ return __memblock_reserve(base, size, NUMA_NO_NODE, 0);
+}
+
+static __always_inline int memblock_reserve_kern(phys_addr_t base, phys_addr_t size)
+{
+ return __memblock_reserve(base, size, NUMA_NO_NODE, MEMBLOCK_RSRV_KERN);
+}
+
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
#endif
@@ -132,8 +155,9 @@ int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
int memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size);
+int memblock_mark_kho_scratch(phys_addr_t base, phys_addr_t size);
+int memblock_clear_kho_scratch(phys_addr_t base, phys_addr_t size);
-void memblock_free_all(void);
void memblock_free(void *ptr, size_t size);
void reset_all_zones_managed_pages(void);
@@ -276,6 +300,11 @@ static inline bool memblock_is_driver_managed(struct memblock_region *m)
return m->flags & MEMBLOCK_DRIVER_MANAGED;
}
+static inline bool memblock_is_kho_scratch(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_KHO_SCRATCH;
+}
+
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
@@ -295,49 +324,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
-#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
- unsigned long *out_spfn,
- unsigned long *out_epfn);
-/**
- * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free
- * memblock areas
- * @i: u64 used as loop variable
- * @zone: zone in which all of the memory blocks reside
- * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
- * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
- *
- * Walks over free (memory && !reserved) areas of memblock in a specific
- * zone. Available once memblock and an empty zone is initialized. The main
- * assumption is that the zone start, end, and pgdat have been associated.
- * This way we can use the zone to determine NUMA node, and if a given part
- * of the memblock is valid for the zone.
- */
-#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \
- for (i = 0, \
- __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \
- i != U64_MAX; \
- __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
-
-/**
- * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
- * free memblock areas from a given point
- * @i: u64 used as loop variable
- * @zone: zone in which all of the memory blocks reside
- * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
- * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
- *
- * Walks over free (memory && !reserved) areas of memblock in a specific
- * zone, continuing from current position. Available as soon as memblock is
- * initialized.
- */
-#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
- for (; i != U64_MAX; \
- __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
-
-int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
-
-#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
/**
* for_each_free_mem_range - iterate through free memblock areas
@@ -399,6 +385,10 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
+/*
+ * MEMBLOCK_ALLOC_NOLEAKTRACE avoids kmemleak tracing. It implies
+ * MEMBLOCK_ALLOC_ACCESSIBLE
+ */
#define MEMBLOCK_ALLOC_NOLEAKTRACE 1
/* We are using top down, so it is safe to use 0 here */
@@ -438,6 +428,12 @@ static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}
+void *__memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align,
+ const char *func);
+
+#define memblock_alloc_or_panic(size, align) \
+ __memblock_alloc_or_panic(size, align, __func__)
+
static inline void *memblock_alloc_raw(phys_addr_t size,
phys_addr_t align)
{
@@ -446,7 +442,7 @@ static inline void *memblock_alloc_raw(phys_addr_t size,
NUMA_NO_NODE);
}
-static inline void *memblock_alloc_from(phys_addr_t size,
+static __always_inline void *memblock_alloc_from(phys_addr_t size,
phys_addr_t align,
phys_addr_t min_addr)
{
@@ -488,6 +484,8 @@ static inline __init_memblock bool memblock_bottom_up(void)
phys_addr_t memblock_phys_mem_size(void);
phys_addr_t memblock_reserved_size(void);
+phys_addr_t memblock_reserved_kern_size(phys_addr_t limit, int nid);
+unsigned long memblock_estimated_nr_free_pages(void);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
@@ -565,7 +563,7 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
}
/**
- * for_each_mem_region - itereate over memory regions
+ * for_each_mem_region - iterate over memory regions
* @region: loop variable
*/
#define for_each_mem_region(region) \
@@ -613,5 +611,14 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end) { }
static inline void memtest_report_meminfo(struct seq_file *m) { }
#endif
+#ifdef CONFIG_MEMBLOCK_KHO_SCRATCH
+void memblock_set_kho_scratch_only(void);
+void memblock_clear_kho_scratch_only(void);
+void memmap_init_kho_scratch_pages(void);
+#else
+static inline void memblock_set_kho_scratch_only(void) { }
+static inline void memblock_clear_kho_scratch_only(void) { }
+static inline void memmap_init_kho_scratch_pages(void) {}
+#endif
#endif /* _LINUX_MEMBLOCK_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 030d34e9d117..0651865a4564 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -52,12 +52,13 @@ enum memcg_memory_event {
MEMCG_SWAP_HIGH,
MEMCG_SWAP_MAX,
MEMCG_SWAP_FAIL,
+ MEMCG_SOCK_THROTTLED,
MEMCG_NR_MEMORY_EVENTS,
};
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
- unsigned int generation;
+ int generation;
};
#ifdef CONFIG_MEMCG
@@ -69,19 +70,8 @@ struct mem_cgroup_id {
refcount_t ref;
};
-/*
- * Per memcg event counter is incremented at every pagein/pageout. With THP,
- * it will be incremented by the number of pages. This counter is used
- * to trigger some periodic events. This is straightforward and better
- * than using jiffies etc. to handle periodic memcg event.
- */
-enum mem_cgroup_events_target {
- MEM_CGROUP_TARGET_THRESH,
- MEM_CGROUP_TARGET_SOFTLIMIT,
- MEM_CGROUP_NTARGETS,
-};
-
struct memcg_vmstats_percpu;
+struct memcg1_events_percpu;
struct memcg_vmstats;
struct lruvec_stats_percpu;
struct lruvec_stats;
@@ -89,30 +79,47 @@ struct lruvec_stats;
struct mem_cgroup_reclaim_iter {
struct mem_cgroup *position;
/* scan generation, increased every round-trip */
- unsigned int generation;
+ atomic_t generation;
};
/*
* per-node information in memory controller.
*/
struct mem_cgroup_per_node {
- struct lruvec lruvec;
+ /* Keep the read-only fields at the start */
+ struct mem_cgroup *memcg; /* Back pointer, we cannot */
+ /* use container_of */
struct lruvec_stats_percpu __percpu *lruvec_stats_percpu;
struct lruvec_stats *lruvec_stats;
-
- unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
-
- struct mem_cgroup_reclaim_iter iter;
-
struct shrinker_info __rcu *shrinker_info;
+#ifdef CONFIG_MEMCG_V1
+ /*
+ * Memcg-v1 only stuff in middle as buffer between read mostly fields
+ * and update often fields to avoid false sharing. If v1 stuff is
+ * not present, an explicit padding is needed.
+ */
+
struct rb_node tree_node; /* RB tree node */
unsigned long usage_in_excess;/* Set to the value by which */
/* the soft limit is exceeded*/
bool on_tree;
- struct mem_cgroup *memcg; /* Back pointer, we cannot */
- /* use container_of */
+#else
+ CACHELINE_PADDING(_pad1_);
+#endif
+
+ /* Fields which get updated often at the end. */
+ struct lruvec lruvec;
+ CACHELINE_PADDING(_pad2_);
+ unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
+ struct mem_cgroup_reclaim_iter iter;
+
+#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
+ /* slab stats for nmi context */
+ atomic_t slab_reclaimable;
+ atomic_t slab_unreclaimable;
+#endif
};
struct mem_cgroup_threshold {
@@ -194,14 +201,15 @@ struct mem_cgroup {
struct page_counter memsw; /* v1 only */
};
- /* Legacy consumer-oriented counters */
- struct page_counter kmem; /* v1 only */
- struct page_counter tcpmem; /* v1 only */
+ /* registered local peak watchers */
+ struct list_head memory_peaks;
+ struct list_head swap_peaks;
+ spinlock_t peaks_lock;
/* Range enforcement for interrupt charges */
struct work_struct high_work;
-#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+#ifdef CONFIG_ZSWAP
unsigned long zswap_max;
/*
@@ -211,8 +219,6 @@ struct mem_cgroup {
bool zswap_writeback;
#endif
- unsigned long soft_limit;
-
/* vmpressure notifications */
struct vmpressure vmpressure;
@@ -221,13 +227,7 @@ struct mem_cgroup {
*/
bool oom_group;
- /* protected by memcg_oom_lock */
- bool oom_lock;
- int under_oom;
-
- int swappiness;
- /* OOM-Killer disable */
- int oom_kill_disable;
+ int swappiness;
/* memory.events and memory.events.local */
struct cgroup_file events_file;
@@ -236,29 +236,6 @@ struct mem_cgroup {
/* handle for "memory.swap.events" */
struct cgroup_file swap_events_file;
- /* protect arrays of thresholds */
- struct mutex thresholds_lock;
-
- /* thresholds for memory usage. RCU-protected */
- struct mem_cgroup_thresholds thresholds;
-
- /* thresholds for mem+swap usage. RCU-protected */
- struct mem_cgroup_thresholds memsw_thresholds;
-
- /* For oom notifier event fd */
- struct list_head oom_notify;
-
- /*
- * Should we move charges of a task when a task is moved into this
- * mem_cgroup ? And what type of charges should we move ?
- */
- unsigned long move_charge_at_immigrate;
- /* taken only while moving_account > 0 */
- spinlock_t move_lock;
- unsigned long move_lock_flags;
-
- CACHELINE_PADDING(_pad1_);
-
/* memory.stat */
struct memcg_vmstats *vmstats;
@@ -266,18 +243,19 @@ struct mem_cgroup {
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
+#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
+ /* MEMCG_KMEM for nmi context */
+ atomic_t kmem_stat;
+#endif
/*
* Hint of reclaim pressure for socket memroy management. Note
* that this indicator should NOT be used in legacy cgroup mode
* where socket memory is accounted/charged separately.
*/
- unsigned long socket_pressure;
-
- /* Legacy tcp memory accounting */
- bool tcpmem_active;
- int tcpmem_pressure;
-
-#ifdef CONFIG_MEMCG_KMEM
+ u64 socket_pressure;
+#if BITS_PER_LONG < 64
+ seqlock_t socket_pressure_seqlock;
+#endif
int kmemcg_id;
/*
* memcg->objcg is wiped out as a part of the objcg repaprenting
@@ -288,15 +266,6 @@ struct mem_cgroup {
struct obj_cgroup *orig_objcg;
/* list of inherited objcgs, protected by objcg_lock */
struct list_head objcg_list;
-#endif
-
- CACHELINE_PADDING(_pad2_);
-
- /*
- * set > 0 if pages under this cgroup are moving to other cgroup.
- */
- atomic_t moving_account;
- struct task_struct *move_lock_task;
struct memcg_vmstats_percpu __percpu *vmstats_percpu;
@@ -306,10 +275,6 @@ struct mem_cgroup {
struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
#endif
- /* List of events which userspace want to receive */
- struct list_head event_list;
- spinlock_t event_list_lock;
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct deferred_split deferred_split_queue;
#endif
@@ -319,6 +284,43 @@ struct mem_cgroup {
struct lru_gen_mm_list mm_list;
#endif
+#ifdef CONFIG_MEMCG_V1
+ /* Legacy consumer-oriented counters */
+ struct page_counter kmem; /* v1 only */
+ struct page_counter tcpmem; /* v1 only */
+
+ struct memcg1_events_percpu __percpu *events_percpu;
+
+ unsigned long soft_limit;
+
+ /* protected by memcg_oom_lock */
+ bool oom_lock;
+ int under_oom;
+
+ /* OOM-Killer disable */
+ int oom_kill_disable;
+
+ /* protect arrays of thresholds */
+ struct mutex thresholds_lock;
+
+ /* thresholds for memory usage. RCU-protected */
+ struct mem_cgroup_thresholds thresholds;
+
+ /* thresholds for mem+swap usage. RCU-protected */
+ struct mem_cgroup_thresholds memsw_thresholds;
+
+ /* For oom notifier event fd */
+ struct list_head oom_notify;
+
+ /* Legacy tcp memory accounting */
+ bool tcpmem_active;
+ int tcpmem_pressure;
+
+ /* List of events which userspace want to receive */
+ struct list_head event_list;
+ spinlock_t event_list_lock;
+#endif /* CONFIG_MEMCG_V1 */
+
struct mem_cgroup_per_node *nodeinfo[];
};
@@ -340,17 +342,25 @@ enum page_memcg_data_flags {
__NR_MEMCG_DATA_FLAGS = (1UL << 2),
};
+#define __OBJEXTS_ALLOC_FAIL MEMCG_DATA_OBJEXTS
#define __FIRST_OBJEXT_FLAG __NR_MEMCG_DATA_FLAGS
#else /* CONFIG_MEMCG */
+#define __OBJEXTS_ALLOC_FAIL (1UL << 0)
#define __FIRST_OBJEXT_FLAG (1UL << 0)
#endif /* CONFIG_MEMCG */
enum objext_flags {
- /* slabobj_ext vector failed to allocate */
- OBJEXTS_ALLOC_FAIL = __FIRST_OBJEXT_FLAG,
+ /*
+ * Use bit 0 with zero other bits to signal that slabobj_ext vector
+ * failed to allocate. The same bit 0 with valid upper bits means
+ * MEMCG_DATA_OBJEXTS.
+ */
+ OBJEXTS_ALLOC_FAIL = __OBJEXTS_ALLOC_FAIL,
+ /* slabobj_ext vector allocated with kmalloc_nolock() */
+ OBJEXTS_NOSPIN_ALLOC = __FIRST_OBJEXT_FLAG,
/* the next bit after the last actual flag */
__NR_OBJEXTS_FLAGS = (__FIRST_OBJEXT_FLAG << 1),
};
@@ -365,11 +375,11 @@ static inline bool folio_memcg_kmem(struct folio *folio);
* After the initialization objcg->memcg is always pointing at
* a valid memcg, but can be atomically swapped to the parent memcg.
*
- * The caller must ensure that the returned memcg won't be released:
- * e.g. acquire the rcu_read_lock or css_set_lock.
+ * The caller must ensure that the returned memcg won't be released.
*/
static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
{
+ lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex));
return READ_ONCE(objcg->memcg);
}
@@ -429,9 +439,7 @@ static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
*
* - the folio lock
* - LRU isolation
- * - folio_memcg_lock()
* - exclusive reference
- * - mem_cgroup_trylock_pages()
*
* For a kmem folio a caller should hold an rcu read lock to protect memcg
* associated with a kmem folio from being released.
@@ -443,37 +451,15 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio)
return __folio_memcg(folio);
}
-static inline struct mem_cgroup *page_memcg(struct page *page)
-{
- return folio_memcg(page_folio(page));
-}
-
-/**
- * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
+/*
+ * folio_memcg_charged - If a folio is charged to a memory cgroup.
* @folio: Pointer to the folio.
*
- * This function assumes that the folio is known to have a
- * proper memory cgroup pointer. It's not safe to call this function
- * against some type of folios, e.g. slab folios or ex-slab folios.
- *
- * Return: A pointer to the memory cgroup associated with the folio,
- * or NULL.
+ * Returns true if folio is charged to a memory cgroup, otherwise returns false.
*/
-static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
+static inline bool folio_memcg_charged(struct folio *folio)
{
- unsigned long memcg_data = READ_ONCE(folio->memcg_data);
-
- VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
- WARN_ON_ONCE(!rcu_read_lock_held());
-
- if (memcg_data & MEMCG_DATA_KMEM) {
- struct obj_cgroup *objcg;
-
- objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
- return obj_cgroup_memcg(objcg);
- }
-
- return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
+ return folio->memcg_data != 0;
}
/*
@@ -491,9 +477,7 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
*
* - the folio lock
* - LRU isolation
- * - lock_folio_memcg()
* - exclusive reference
- * - mem_cgroup_trylock_pages()
*
* For a kmem folio a caller should hold an rcu read lock to protect memcg
* associated with a kmem folio from being released.
@@ -540,7 +524,6 @@ retry:
return memcg;
}
-#ifdef CONFIG_MEMCG_KMEM
/*
* folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
* @folio: Pointer to the folio.
@@ -556,15 +539,6 @@ static inline bool folio_memcg_kmem(struct folio *folio)
return folio->memcg_data & MEMCG_DATA_KMEM;
}
-
-#else
-static inline bool folio_memcg_kmem(struct folio *folio)
-{
- return false;
-}
-
-#endif
-
static inline bool PageMemcgKmem(struct page *page)
{
return folio_memcg_kmem(page_folio(page));
@@ -665,8 +639,6 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
page_counter_read(&memcg->memory);
}
-void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
-
int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
/**
@@ -691,12 +663,10 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
return __mem_cgroup_charge(folio, mm, gfp);
}
-int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
- long nr_pages);
+int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp);
int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
-void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
void __mem_cgroup_uncharge(struct folio *folio);
@@ -721,7 +691,6 @@ static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
__mem_cgroup_uncharge_folios(folios);
}
-void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
void mem_cgroup_replace_folio(struct folio *old, struct folio *new);
void mem_cgroup_migrate(struct folio *old, struct folio *new);
@@ -781,6 +750,8 @@ struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
struct mem_cgroup *get_mem_cgroup_from_current(void);
+struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio);
+
struct lruvec *folio_lruvec_lock(struct folio *folio);
struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
@@ -938,7 +909,13 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
}
-void mem_cgroup_handle_over_high(gfp_t gfp_mask);
+void __mem_cgroup_handle_over_high(gfp_t gfp_mask);
+
+static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
+{
+ if (unlikely(current->memcg_nr_pages_over_high))
+ __mem_cgroup_handle_over_high(gfp_mask);
+}
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
@@ -949,61 +926,13 @@ void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
-static inline void mem_cgroup_enter_user_fault(void)
-{
- WARN_ON(current->in_user_fault);
- current->in_user_fault = 1;
-}
-
-static inline void mem_cgroup_exit_user_fault(void)
-{
- WARN_ON(!current->in_user_fault);
- current->in_user_fault = 0;
-}
-
-static inline bool task_in_memcg_oom(struct task_struct *p)
-{
- return p->memcg_in_oom;
-}
-
-bool mem_cgroup_oom_synchronize(bool wait);
struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
struct mem_cgroup *oom_domain);
void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
-void folio_memcg_lock(struct folio *folio);
-void folio_memcg_unlock(struct folio *folio);
-
-void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
- int val);
-
-/* try to stablize folio_memcg() for all the pages in a memcg */
-static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
-{
- rcu_read_lock();
-
- if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
- return true;
-
- rcu_read_unlock();
- return false;
-}
-
-static inline void mem_cgroup_unlock_pages(void)
-{
- rcu_read_unlock();
-}
-
/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx, int val)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __mod_memcg_state(memcg, idx, val);
- local_irq_restore(flags);
-}
+void mod_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx, int val);
static inline void mod_memcg_page_state(struct page *page,
enum memcg_stat_item idx, int val)
@@ -1014,7 +943,7 @@ static inline void mod_memcg_page_state(struct page *page,
return;
rcu_read_lock();
- memcg = page_memcg(page);
+ memcg = folio_memcg(page_folio(page));
if (memcg)
mod_memcg_state(memcg, idx, val);
rcu_read_unlock();
@@ -1028,31 +957,10 @@ unsigned long lruvec_page_state_local(struct lruvec *lruvec,
void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
-void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
+void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
-static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
- int val)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __mod_lruvec_kmem_state(p, idx, val);
- local_irq_restore(flags);
-}
-
-void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
- unsigned long count);
-
-static inline void count_memcg_events(struct mem_cgroup *memcg,
- enum vm_event_item idx,
- unsigned long count)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __count_memcg_events(memcg, idx, count);
- local_irq_restore(flags);
-}
+void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
+ unsigned long count);
static inline void count_memcg_folio_events(struct folio *folio,
enum vm_event_item idx, unsigned long nr)
@@ -1063,8 +971,8 @@ static inline void count_memcg_folio_events(struct folio *folio,
count_memcg_events(memcg, idx, nr);
}
-static inline void count_memcg_event_mm(struct mm_struct *mm,
- enum vm_event_item idx)
+static inline void count_memcg_events_mm(struct mm_struct *mm,
+ enum vm_event_item idx, unsigned long count)
{
struct mem_cgroup *memcg;
@@ -1074,33 +982,23 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
if (likely(memcg))
- count_memcg_events(memcg, idx, 1);
+ count_memcg_events(memcg, idx, count);
rcu_read_unlock();
}
-static inline void memcg_memory_event(struct mem_cgroup *memcg,
- enum memcg_memory_event event)
+static inline void count_memcg_event_mm(struct mm_struct *mm,
+ enum vm_event_item idx)
{
- bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
- event == MEMCG_SWAP_FAIL;
+ count_memcg_events_mm(mm, idx, 1);
+}
- atomic_long_inc(&memcg->memory_events_local[event]);
- if (!swap_event)
- cgroup_file_notify(&memcg->events_local_file);
+void __memcg_memory_event(struct mem_cgroup *memcg,
+ enum memcg_memory_event event, bool allow_spinning);
- do {
- atomic_long_inc(&memcg->memory_events[event]);
- if (swap_event)
- cgroup_file_notify(&memcg->swap_events_file);
- else
- cgroup_file_notify(&memcg->events_file);
-
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
- break;
- if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
- break;
- } while ((memcg = parent_mem_cgroup(memcg)) &&
- !mem_cgroup_is_root(memcg));
+static inline void memcg_memory_event(struct mem_cgroup *memcg,
+ enum memcg_memory_event event)
+{
+ __memcg_memory_event(memcg, event, true);
}
static inline void memcg_memory_event_mm(struct mm_struct *mm,
@@ -1118,30 +1016,42 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
rcu_read_unlock();
}
-void split_page_memcg(struct page *head, int old_order, int new_order);
+void split_page_memcg(struct page *first, unsigned order);
+void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
+ unsigned new_order);
+
+static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
+{
+ struct mem_cgroup *memcg;
+ u64 id;
+
+ if (mem_cgroup_disabled())
+ return 0;
-unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned);
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (!memcg)
+ memcg = root_mem_cgroup;
+ id = cgroup_id(memcg->css.cgroup);
+ rcu_read_unlock();
+ return id;
+}
+extern int mem_cgroup_init(void);
#else /* CONFIG_MEMCG */
#define MEM_CGROUP_ID_SHIFT 0
-static inline struct mem_cgroup *folio_memcg(struct folio *folio)
-{
- return NULL;
-}
+#define root_mem_cgroup (NULL)
-static inline struct mem_cgroup *page_memcg(struct page *page)
+static inline struct mem_cgroup *folio_memcg(struct folio *folio)
{
return NULL;
}
-static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
+static inline bool folio_memcg_charged(struct folio *folio)
{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return NULL;
+ return false;
}
static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
@@ -1219,21 +1129,15 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
return false;
}
-static inline void mem_cgroup_commit_charge(struct folio *folio,
- struct mem_cgroup *memcg)
-{
-}
-
static inline int mem_cgroup_charge(struct folio *folio,
struct mm_struct *mm, gfp_t gfp)
{
return 0;
}
-static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg,
- gfp_t gfp, long nr_pages)
+static inline int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp)
{
- return 0;
+ return 0;
}
static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
@@ -1242,10 +1146,6 @@ static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
return 0;
}
-static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
-{
-}
-
static inline void mem_cgroup_uncharge(struct folio *folio)
{
}
@@ -1254,11 +1154,6 @@ static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
{
}
-static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
- unsigned int nr_pages)
-{
-}
-
static inline void mem_cgroup_replace_folio(struct folio *old,
struct folio *new)
{
@@ -1306,12 +1201,21 @@ static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
return NULL;
}
+static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
+{
+ return NULL;
+}
+
static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
{
return NULL;
}
+static inline void obj_cgroup_get(struct obj_cgroup *objcg)
+{
+}
+
static inline void obj_cgroup_put(struct obj_cgroup *objcg)
{
}
@@ -1439,48 +1343,10 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
}
-static inline void folio_memcg_lock(struct folio *folio)
-{
-}
-
-static inline void folio_memcg_unlock(struct folio *folio)
-{
-}
-
-static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
-{
- /* to match folio_memcg_rcu() */
- rcu_read_lock();
- return true;
-}
-
-static inline void mem_cgroup_unlock_pages(void)
-{
- rcu_read_unlock();
-}
-
static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
}
-static inline void mem_cgroup_enter_user_fault(void)
-{
-}
-
-static inline void mem_cgroup_exit_user_fault(void)
-{
-}
-
-static inline bool task_in_memcg_oom(struct task_struct *p)
-{
- return false;
-}
-
-static inline bool mem_cgroup_oom_synchronize(bool wait)
-{
- return false;
-}
-
static inline struct mem_cgroup *mem_cgroup_get_oom_group(
struct task_struct *victim, struct mem_cgroup *oom_domain)
{
@@ -1491,12 +1357,6 @@ static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
{
}
-static inline void __mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx,
- int nr)
-{
-}
-
static inline void mod_memcg_state(struct mem_cgroup *memcg,
enum memcg_stat_item idx,
int nr)
@@ -1533,14 +1393,6 @@ static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
{
}
-static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
- int val)
-{
- struct page *page = virt_to_head_page(p);
-
- __mod_node_page_state(page_pgdat(page), idx, val);
-}
-
static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
int val)
{
@@ -1550,12 +1402,6 @@ static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
}
static inline void count_memcg_events(struct mem_cgroup *memcg,
- enum vm_event_item idx,
- unsigned long count)
-{
-}
-
-static inline void __count_memcg_events(struct mem_cgroup *memcg,
enum vm_event_item idx,
unsigned long count)
{
@@ -1566,22 +1412,31 @@ static inline void count_memcg_folio_events(struct folio *folio,
{
}
+static inline void count_memcg_events_mm(struct mm_struct *mm,
+ enum vm_event_item idx, unsigned long count)
+{
+}
+
static inline
void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
}
-static inline void split_page_memcg(struct page *head, int old_order, int new_order)
+static inline void split_page_memcg(struct page *first, unsigned order)
{
}
-static inline
-unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned)
+static inline void folio_split_memcg_refs(struct folio *folio,
+ unsigned old_order, unsigned new_order)
+{
+}
+
+static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
{
return 0;
}
+
+static inline int mem_cgroup_init(void) { return 0; }
#endif /* CONFIG_MEMCG */
/*
@@ -1589,7 +1444,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
* if MEMCG_DATA_OBJEXTS is set.
*/
struct slabobj_ext {
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
struct obj_cgroup *objcg;
#endif
#ifdef CONFIG_MEM_ALLOC_PROFILING
@@ -1597,16 +1452,6 @@ struct slabobj_ext {
#endif
} __aligned(8);
-static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
-{
- __mod_lruvec_kmem_state(p, idx, 1);
-}
-
-static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
-{
- __mod_lruvec_kmem_state(p, idx, -1);
-}
-
static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
{
struct mem_cgroup *memcg;
@@ -1636,7 +1481,7 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
spin_unlock_irqrestore(&lruvec->lru_lock, flags);
}
-/* Test requires a stable page->memcg binding, see page_memcg() */
+/* Test requires a stable folio->memcg binding, see folio_memcg() */
static inline bool folio_matches_lruvec(struct folio *folio,
struct lruvec *lruvec)
{
@@ -1724,45 +1569,100 @@ static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
#endif /* CONFIG_CGROUP_WRITEBACK */
struct sock;
-bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
- gfp_t gfp_mask);
-void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
#ifdef CONFIG_MEMCG
extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
+
void mem_cgroup_sk_alloc(struct sock *sk);
void mem_cgroup_sk_free(struct sock *sk);
-static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
+void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk);
+bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages,
+ gfp_t gfp_mask);
+void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages);
+
+#if BITS_PER_LONG < 64
+static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
+{
+ u64 val = get_jiffies_64() + HZ;
+ unsigned long flags;
+
+ write_seqlock_irqsave(&memcg->socket_pressure_seqlock, flags);
+ memcg->socket_pressure = val;
+ write_sequnlock_irqrestore(&memcg->socket_pressure_seqlock, flags);
+}
+
+static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
{
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
- return !!memcg->tcpmem_pressure;
+ unsigned int seq;
+ u64 val;
+
do {
- if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
- return true;
- } while ((memcg = parent_mem_cgroup(memcg)));
- return false;
+ seq = read_seqbegin(&memcg->socket_pressure_seqlock);
+ val = memcg->socket_pressure;
+ } while (read_seqretry(&memcg->socket_pressure_seqlock, seq));
+
+ return val;
+}
+#else
+static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
+{
+ WRITE_ONCE(memcg->socket_pressure, jiffies + HZ);
}
+static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
+{
+ return READ_ONCE(memcg->socket_pressure);
+}
+#endif
+
int alloc_shrinker_info(struct mem_cgroup *memcg);
void free_shrinker_info(struct mem_cgroup *memcg);
void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
void reparent_shrinker_deferred(struct mem_cgroup *memcg);
+
+static inline int shrinker_id(struct shrinker *shrinker)
+{
+ return shrinker->id;
+}
#else
#define mem_cgroup_sockets_enabled 0
-static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
-static inline void mem_cgroup_sk_free(struct sock *sk) { };
-static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
+
+static inline void mem_cgroup_sk_alloc(struct sock *sk)
+{
+}
+
+static inline void mem_cgroup_sk_free(struct sock *sk)
+{
+}
+
+static inline void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk)
+{
+}
+
+static inline bool mem_cgroup_sk_charge(const struct sock *sk,
+ unsigned int nr_pages,
+ gfp_t gfp_mask)
{
return false;
}
+static inline void mem_cgroup_sk_uncharge(const struct sock *sk,
+ unsigned int nr_pages)
+{
+}
+
static inline void set_shrinker_bit(struct mem_cgroup *memcg,
int nid, int shrinker_id)
{
}
+
+static inline int shrinker_id(struct shrinker *shrinker)
+{
+ return -1;
+}
#endif
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
bool mem_cgroup_kmem_disabled(void);
int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge_page(struct page *page, int order);
@@ -1827,11 +1727,11 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
return memcg ? memcg->kmemcg_id : -1;
}
-struct mem_cgroup *mem_cgroup_from_obj(void *p);
struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
-static inline void count_objcg_event(struct obj_cgroup *objcg,
- enum vm_event_item idx)
+static inline void count_objcg_events(struct obj_cgroup *objcg,
+ enum vm_event_item idx,
+ unsigned long count)
{
struct mem_cgroup *memcg;
@@ -1840,10 +1740,19 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
rcu_read_lock();
memcg = obj_cgroup_memcg(objcg);
- count_memcg_events(memcg, idx, 1);
+ count_memcg_events(memcg, idx, count);
rcu_read_unlock();
}
+bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid);
+
+void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg);
+
+static inline bool memcg_is_dying(struct mem_cgroup *memcg)
+{
+ return memcg ? css_is_dying(&memcg->css) : false;
+}
+
#else
static inline bool mem_cgroup_kmem_disabled(void)
{
@@ -1890,24 +1799,38 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
return -1;
}
-static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
+static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
{
return NULL;
}
-static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
+static inline void count_objcg_events(struct obj_cgroup *objcg,
+ enum vm_event_item idx,
+ unsigned long count)
{
- return NULL;
}
-static inline void count_objcg_event(struct obj_cgroup *objcg,
- enum vm_event_item idx)
+static inline ino_t page_cgroup_ino(struct page *page)
+{
+ return 0;
+}
+
+static inline bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid)
{
+ return true;
}
-#endif /* CONFIG_MEMCG_KMEM */
+static inline void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg)
+{
+}
-#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+static inline bool memcg_is_dying(struct mem_cgroup *memcg)
+{
+ return false;
+}
+#endif /* CONFIG_MEMCG */
+
+#if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
@@ -1932,4 +1855,71 @@ static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
}
#endif
+
+/* Cgroup v1-related declarations */
+
+#ifdef CONFIG_MEMCG_V1
+unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned);
+
+bool mem_cgroup_oom_synchronize(bool wait);
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+ return p->memcg_in_oom;
+}
+
+static inline void mem_cgroup_enter_user_fault(void)
+{
+ WARN_ON(current->in_user_fault);
+ current->in_user_fault = 1;
+}
+
+static inline void mem_cgroup_exit_user_fault(void)
+{
+ WARN_ON(!current->in_user_fault);
+ current->in_user_fault = 0;
+}
+
+void memcg1_swapout(struct folio *folio, swp_entry_t entry);
+void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages);
+
+#else /* CONFIG_MEMCG_V1 */
+static inline
+unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned)
+{
+ return 0;
+}
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+ return false;
+}
+
+static inline bool mem_cgroup_oom_synchronize(bool wait)
+{
+ return false;
+}
+
+static inline void mem_cgroup_enter_user_fault(void)
+{
+}
+
+static inline void mem_cgroup_exit_user_fault(void)
+{
+}
+
+static inline void memcg1_swapout(struct folio *folio, swp_entry_t entry)
+{
+}
+
+static inline void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages)
+{
+}
+
+#endif /* CONFIG_MEMCG_V1 */
+
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
index e7abf6fa4c52..cc74de3dbcfe 100644
--- a/include/linux/memfd.h
+++ b/include/linux/memfd.h
@@ -4,13 +4,33 @@
#include <linux/file.h>
+#define MEMFD_ANON_NAME "[memfd]"
+
#ifdef CONFIG_MEMFD_CREATE
extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg);
+struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx);
+/*
+ * Check for any existing seals on mmap, return an error if access is denied due
+ * to sealing, or 0 otherwise.
+ *
+ * We also update VMA flags if appropriate by manipulating the VMA flags pointed
+ * to by vm_flags_ptr.
+ */
+int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr);
#else
static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)
{
return -EINVAL;
}
+static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline int memfd_check_seals_mmap(struct file *file,
+ vm_flags_t *vm_flags_ptr)
+{
+ return 0;
+}
#endif
#endif /* __LINUX_MEMFD_H */
diff --git a/include/linux/memory-failure.h b/include/linux/memory-failure.h
new file mode 100644
index 000000000000..bc326503d2d2
--- /dev/null
+++ b/include/linux/memory-failure.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MEMORY_FAILURE_H
+#define _LINUX_MEMORY_FAILURE_H
+
+#include <linux/interval_tree.h>
+
+struct pfn_address_space;
+
+struct pfn_address_space {
+ struct interval_tree_node node;
+ struct address_space *mapping;
+};
+
+int register_pfn_address_space(struct pfn_address_space *pfn_space);
+void unregister_pfn_address_space(struct pfn_address_space *pfn_space);
+
+#endif /* _LINUX_MEMORY_FAILURE_H */
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
index 0d70788558f4..7a805796fcfd 100644
--- a/include/linux/memory-tiers.h
+++ b/include/linux/memory-tiers.h
@@ -18,7 +18,7 @@
* adistance value (slightly faster) than default DRAM adistance to be part of
* the same memory tier.
*/
-#define MEMTIER_ADISTANCE_DRAM ((4 * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
+#define MEMTIER_ADISTANCE_DRAM ((4L * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
struct memory_tier;
struct memory_dev_type {
@@ -38,6 +38,7 @@ struct access_coordinate;
#ifdef CONFIG_NUMA
extern bool numa_demotion_enabled;
extern struct memory_dev_type *default_dram_type;
+extern nodemask_t default_dram_nodes;
struct memory_dev_type *alloc_memory_type(int adistance);
void put_memory_type(struct memory_dev_type *memtype);
void init_node_memory_type(int node, struct memory_dev_type *default_type);
@@ -76,6 +77,7 @@ static inline bool node_is_toptier(int node)
#define numa_demotion_enabled false
#define default_dram_type NULL
+#define default_dram_nodes NODE_MASK_NONE
/*
* CONFIG_NUMA implementation returns non NULL error.
*/
diff --git a/include/linux/memory.h b/include/linux/memory.h
index c0afee5d126e..faeaa921e55b 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -25,7 +25,7 @@
/**
* struct memory_group - a logical group of memory blocks
* @nid: The node id for all memory blocks inside the memory group.
- * @blocks: List of all memory blocks belonging to this memory group.
+ * @memory_blocks: List of all memory blocks belonging to this memory group.
* @present_kernel_pages: Present (online) memory outside ZONE_MOVABLE of this
* memory group.
* @present_movable_pages: Present (online) memory in ZONE_MOVABLE of this
@@ -64,9 +64,19 @@ struct memory_group {
};
};
+enum memory_block_state {
+ /* These states are exposed to userspace as text strings in sysfs */
+ MEM_ONLINE, /* exposed to userspace */
+ MEM_GOING_OFFLINE, /* exposed to userspace */
+ MEM_OFFLINE, /* exposed to userspace */
+ MEM_GOING_ONLINE,
+ MEM_CANCEL_ONLINE,
+ MEM_CANCEL_OFFLINE,
+};
+
struct memory_block {
unsigned long start_section_nr;
- unsigned long state; /* serialized by the dev->lock */
+ enum memory_block_state state; /* serialized by the dev->lock */
int online_type; /* for passing data to online routine */
int nid; /* NID for this memory block */
/*
@@ -89,41 +99,22 @@ int arch_get_memory_phys_device(unsigned long start_pfn);
unsigned long memory_block_size_bytes(void);
int set_memory_block_size_order(unsigned int order);
-/* These states are exposed to userspace as text strings in sysfs */
-#define MEM_ONLINE (1<<0) /* exposed to userspace */
-#define MEM_GOING_OFFLINE (1<<1) /* exposed to userspace */
-#define MEM_OFFLINE (1<<2) /* exposed to userspace */
-#define MEM_GOING_ONLINE (1<<3)
-#define MEM_CANCEL_ONLINE (1<<4)
-#define MEM_CANCEL_OFFLINE (1<<5)
-#define MEM_PREPARE_ONLINE (1<<6)
-#define MEM_FINISH_OFFLINE (1<<7)
-
struct memory_notify {
- /*
- * The altmap_start_pfn and altmap_nr_pages fields are designated for
- * specifying the altmap range and are exclusively intended for use in
- * MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
- */
- unsigned long altmap_start_pfn;
- unsigned long altmap_nr_pages;
unsigned long start_pfn;
unsigned long nr_pages;
- int status_change_nid_normal;
- int status_change_nid;
};
struct notifier_block;
struct mem_section;
/*
- * Priorities for the hotplug memory callback routines (stored in decreasing
- * order in the callback chain)
+ * Priorities for the hotplug memory callback routines. Invoked from
+ * high to low. Higher priorities correspond to higher numbers.
*/
#define DEFAULT_CALLBACK_PRI 0
#define SLAB_CALLBACK_PRI 1
-#define HMAT_CALLBACK_PRI 2
#define CXL_CALLBACK_PRI 5
+#define HMAT_CALLBACK_PRI 6
#define MM_COMPUTE_BATCH_PRI 10
#define CPUSET_CALLBACK_PRI 10
#define MEMTIER_HOTPLUG_PRI 100
@@ -141,7 +132,7 @@ static inline int register_memory_notifier(struct notifier_block *nb)
static inline void unregister_memory_notifier(struct notifier_block *nb)
{
}
-static inline int memory_notify(unsigned long val, void *v)
+static inline int memory_notify(enum memory_block_state state, void *v)
{
return 0;
}
@@ -149,15 +140,23 @@ static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
{
return 0;
}
+static inline int memory_block_advise_max_size(unsigned long size)
+{
+ return -ENODEV;
+}
+static inline unsigned long memory_block_advised_max_size(void)
+{
+ return 0;
+}
#else /* CONFIG_MEMORY_HOTPLUG */
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
int create_memory_block_devices(unsigned long start, unsigned long size,
- struct vmem_altmap *altmap,
+ int nid, struct vmem_altmap *altmap,
struct memory_group *group);
void remove_memory_block_devices(unsigned long start, unsigned long size);
extern void memory_dev_init(void);
-extern int memory_notify(unsigned long val, void *v);
+extern int memory_notify(enum memory_block_state state, void *v);
extern struct memory_block *find_memory_block(unsigned long section_nr);
typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
extern int walk_memory_blocks(unsigned long start, unsigned long size,
@@ -171,16 +170,35 @@ struct memory_group *memory_group_find_by_id(int mgid);
typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *);
int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
struct memory_group *excluded, void *arg);
+struct memory_block *find_memory_block_by_id(unsigned long block_id);
#define hotplug_memory_notifier(fn, pri) ({ \
static __meminitdata struct notifier_block fn##_mem_nb =\
{ .notifier_call = fn, .priority = pri };\
register_memory_notifier(&fn##_mem_nb); \
})
+extern int sections_per_block;
+
+static inline unsigned long memory_block_id(unsigned long section_nr)
+{
+ return section_nr / sections_per_block;
+}
+
+static inline unsigned long pfn_to_block_id(unsigned long pfn)
+{
+ return memory_block_id(pfn_to_section_nr(pfn));
+}
+
+static inline unsigned long phys_to_block_id(unsigned long phys)
+{
+ return pfn_to_block_id(PFN_DOWN(phys));
+}
+
#ifdef CONFIG_NUMA
-void memory_block_add_nid(struct memory_block *mem, int nid,
- enum meminit_context context);
+void memory_block_add_nid_early(struct memory_block *mem, int nid);
#endif /* CONFIG_NUMA */
+int memory_block_advise_max_size(unsigned long size);
+unsigned long memory_block_advised_max_size(void);
#endif /* CONFIG_MEMORY_HOTPLUG */
/*
diff --git a/include/linux/memory/ti-aemif.h b/include/linux/memory/ti-aemif.h
new file mode 100644
index 000000000000..da94a9d985e7
--- /dev/null
+++ b/include/linux/memory/ti-aemif.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __MEMORY_TI_AEMIF_H
+#define __MEMORY_TI_AEMIF_H
+
+/**
+ * struct aemif_cs_timings: structure to hold CS timing configuration
+ * values are expressed in number of clock cycles - 1
+ * @ta: minimum turn around time
+ * @rhold: read hold width
+ * @rstrobe: read strobe width
+ * @rsetup: read setup width
+ * @whold: write hold width
+ * @wstrobe: write strobe width
+ * @wsetup: write setup width
+ */
+struct aemif_cs_timings {
+ u32 ta;
+ u32 rhold;
+ u32 rstrobe;
+ u32 rsetup;
+ u32 whold;
+ u32 wstrobe;
+ u32 wsetup;
+};
+
+struct aemif_device;
+
+int aemif_set_cs_timings(struct aemif_device *aemif, u8 cs, struct aemif_cs_timings *timings);
+int aemif_check_cs_timings(struct aemif_cs_timings *timings);
+
+#endif // __MEMORY_TI_AEMIF_H
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 7a9ff464608d..f2f16cdd73ee 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -16,54 +16,6 @@ struct resource;
struct vmem_altmap;
struct dev_pagemap;
-#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
-/*
- * For supporting node-hotadd, we have to allocate a new pgdat.
- *
- * If an arch has generic style NODE_DATA(),
- * node_data[nid] = kzalloc() works well. But it depends on the architecture.
- *
- * In general, generic_alloc_nodedata() is used.
- *
- */
-extern pg_data_t *arch_alloc_nodedata(int nid);
-extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
-
-#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-
-#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
-
-#ifdef CONFIG_NUMA
-/*
- * XXX: node aware allocation can't work well to get new node's memory at this time.
- * Because, pgdat for the new node is not allocated/initialized yet itself.
- * To use new node's memory, more consideration will be necessary.
- */
-#define generic_alloc_nodedata(nid) \
-({ \
- memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \
-})
-
-extern pg_data_t *node_data[];
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
- node_data[nid] = pgdat;
-}
-
-#else /* !CONFIG_NUMA */
-
-/* never called */
-static inline pg_data_t *generic_alloc_nodedata(int nid)
-{
- BUG();
- return NULL;
-}
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
-}
-#endif /* CONFIG_NUMA */
-#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-
#ifdef CONFIG_MEMORY_HOTPLUG
struct page *pfn_to_online_page(unsigned long pfn);
@@ -106,22 +58,6 @@ typedef int __bitwise mhp_t;
* implies the node id (nid).
*/
#define MHP_NID_IS_MGID ((__force mhp_t)BIT(2))
-/*
- * The hotplugged memory is completely inaccessible while the memory is
- * offline. The memory provider will handle MEM_PREPARE_ONLINE /
- * MEM_FINISH_OFFLINE notifications and make the memory accessible.
- *
- * This flag is only relevant when used along with MHP_MEMMAP_ON_MEMORY,
- * because the altmap cannot be written (e.g., poisoned) when adding
- * memory -- before it is set online.
- *
- * This allows for adding memory with an altmap that is not currently
- * made available by a hypervisor. When onlining that memory, the
- * hypervisor can be instructed to make that memory available, and
- * the onlining phase will not require any memory allocations, which is
- * helpful in low-memory situations.
- */
-#define MHP_OFFLINE_INACCESSIBLE ((__force mhp_t)BIT(3))
/*
* Extended parameters for memory hotplug:
@@ -171,12 +107,12 @@ extern void adjust_present_page_count(struct page *page,
long nr_pages);
/* VM interface that may be used by firmware interface */
extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
- struct zone *zone, bool mhp_off_inaccessible);
+ struct zone *zone);
extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
extern int online_pages(unsigned long pfn, unsigned long nr_pages,
struct zone *zone, struct memory_group *group);
-extern void __offline_isolated_pages(unsigned long start_pfn,
- unsigned long end_pfn);
+extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
+ unsigned long end_pfn);
typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
@@ -192,8 +128,6 @@ extern u64 max_mem_size;
extern int mhp_online_type_from_str(const char *str);
-/* Default online_type (MMOP_*) when new memory blocks are added. */
-extern int mhp_default_online_type;
/* If movable_node boot option specified */
extern bool movable_node_enabled;
static inline bool movable_node_is_enabled(void)
@@ -351,6 +285,9 @@ static inline void __remove_memory(u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
#ifdef CONFIG_MEMORY_HOTPLUG
+/* Default online_type (MMOP_*) when new memory blocks are added. */
+extern int mhp_get_default_online_type(void);
+extern void mhp_set_default_online_type(int online_type);
extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
@@ -361,7 +298,8 @@ extern int add_memory_driver_managed(int nid, u64 start, u64 size,
mhp_t mhp_flags);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages,
- struct vmem_altmap *altmap, int migratetype);
+ struct vmem_altmap *altmap, int migratetype,
+ bool isolate_pageblock);
extern void remove_pfn_range_from_zone(struct zone *zone,
unsigned long start_pfn,
unsigned long nr_pages);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 1add16f21612..0fe96f3ab3ef 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
+#include <linux/node.h>
#include <linux/nodemask.h>
#include <linux/pagemap.h>
#include <uapi/linux/mempolicy.h>
@@ -47,7 +48,7 @@ struct mempolicy {
atomic_t refcnt;
unsigned short mode; /* See MPOL_* above */
unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
- nodemask_t nodes; /* interleave/bind/perfer */
+ nodemask_t nodes; /* interleave/bind/preferred/etc */
int home_node; /* Home node to use for MPOL_BIND and MPOL_PREFERRED_MANY */
union {
@@ -178,6 +179,9 @@ static inline bool mpol_is_preferred_many(struct mempolicy *pol)
extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone);
+extern int mempolicy_set_node_perf(unsigned int node,
+ struct access_coordinate *coords);
+
#else
struct mempolicy {};
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 7b151441341b..e8e440e04a06 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -15,7 +15,7 @@ struct kmem_cache;
typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
typedef void (mempool_free_t)(void *element, void *pool_data);
-typedef struct mempool_s {
+typedef struct mempool {
spinlock_t lock;
int min_nr; /* nr of elements at *elements */
int curr_nr; /* Current nr of elements at *elements */
@@ -27,32 +27,31 @@ typedef struct mempool_s {
wait_queue_head_t wait;
} mempool_t;
-static inline bool mempool_initialized(mempool_t *pool)
+static inline bool mempool_initialized(struct mempool *pool)
{
return pool->elements != NULL;
}
-static inline bool mempool_is_saturated(mempool_t *pool)
+static inline bool mempool_is_saturated(struct mempool *pool)
{
return READ_ONCE(pool->curr_nr) >= pool->min_nr;
}
-void mempool_exit(mempool_t *pool);
-int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data,
- gfp_t gfp_mask, int node_id);
-
-int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data);
+void mempool_exit(struct mempool *pool);
+int mempool_init_node(struct mempool *pool, int min_nr,
+ mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
+ void *pool_data, gfp_t gfp_mask, int node_id);
+int mempool_init_noprof(struct mempool *pool, int min_nr,
+ mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
+ void *pool_data);
#define mempool_init(...) \
alloc_hooks(mempool_init_noprof(__VA_ARGS__))
-extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data);
-
-extern mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data,
- gfp_t gfp_mask, int nid);
+struct mempool *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
+ mempool_free_t *free_fn, void *pool_data);
+struct mempool *mempool_create_node_noprof(int min_nr,
+ mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
+ void *pool_data, gfp_t gfp_mask, int nid);
#define mempool_create_node(...) \
alloc_hooks(mempool_create_node_noprof(__VA_ARGS__))
@@ -60,15 +59,21 @@ extern mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_
mempool_create_node(_min_nr, _alloc_fn, _free_fn, _pool_data, \
GFP_KERNEL, NUMA_NO_NODE)
-extern int mempool_resize(mempool_t *pool, int new_min_nr);
-extern void mempool_destroy(mempool_t *pool);
+int mempool_resize(struct mempool *pool, int new_min_nr);
+void mempool_destroy(struct mempool *pool);
-extern void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) __malloc;
+void *mempool_alloc_noprof(struct mempool *pool, gfp_t gfp_mask) __malloc;
#define mempool_alloc(...) \
alloc_hooks(mempool_alloc_noprof(__VA_ARGS__))
+int mempool_alloc_bulk_noprof(struct mempool *pool, void **elem,
+ unsigned int count, unsigned int allocated);
+#define mempool_alloc_bulk(...) \
+ alloc_hooks(mempool_alloc_bulk_noprof(__VA_ARGS__))
-extern void *mempool_alloc_preallocated(mempool_t *pool) __malloc;
-extern void mempool_free(void *element, mempool_t *pool);
+void *mempool_alloc_preallocated(struct mempool *pool) __malloc;
+void mempool_free(void *element, struct mempool *pool);
+unsigned int mempool_free_bulk(struct mempool *pool, void **elem,
+ unsigned int count);
/*
* A mempool_alloc_t and mempool_free_t that get the memory from
@@ -97,19 +102,6 @@ void mempool_kfree(void *element, void *pool_data);
mempool_create((_min_nr), mempool_kmalloc, mempool_kfree, \
(void *)(unsigned long)(_size))
-void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data);
-void mempool_kvfree(void *element, void *pool_data);
-
-static inline int mempool_init_kvmalloc_pool(mempool_t *pool, int min_nr, size_t size)
-{
- return mempool_init(pool, min_nr, mempool_kvmalloc, mempool_kvfree, (void *) size);
-}
-
-static inline mempool_t *mempool_create_kvmalloc_pool(int min_nr, size_t size)
-{
- return mempool_create(min_nr, mempool_kvmalloc, mempool_kvfree, (void *) size);
-}
-
/*
* A mempool_alloc_t and mempool_free_t for a simple page allocator that
* allocates pages of the order specified by pool_data
diff --git a/include/linux/memregion.h b/include/linux/memregion.h
index c01321467789..a55f62cc5266 100644
--- a/include/linux/memregion.h
+++ b/include/linux/memregion.h
@@ -26,8 +26,10 @@ static inline void memregion_free(int id)
/**
* cpu_cache_invalidate_memregion - drop any CPU cached data for
- * memregions described by @res_desc
- * @res_desc: one of the IORES_DESC_* types
+ * memregion
+ * @start: start physical address of the target memory region.
+ * @len: length of the target memory region. -1 for all the regions of
+ * the target type.
*
* Perform cache maintenance after a memory event / operation that
* changes the contents of physical memory in a cache-incoherent manner.
@@ -46,7 +48,7 @@ static inline void memregion_free(int id)
* the cache maintenance.
*/
#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
-int cpu_cache_invalidate_memregion(int res_desc);
+int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len);
bool cpu_cache_has_invalidate_memregion(void);
#else
static inline bool cpu_cache_has_invalidate_memregion(void)
@@ -54,10 +56,16 @@ static inline bool cpu_cache_has_invalidate_memregion(void)
return false;
}
-static inline int cpu_cache_invalidate_memregion(int res_desc)
+static inline int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len)
{
WARN_ON_ONCE("CPU cache invalidation required");
return -ENXIO;
}
#endif
+
+static inline int cpu_cache_invalidate_all(void)
+{
+ return cpu_cache_invalidate_memregion(0, -1);
+}
+
#endif /* _MEMREGION_H_ */
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 3f7143ade32c..713ec0435b48 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -25,7 +25,6 @@ struct vmem_altmap {
unsigned long free;
unsigned long align;
unsigned long alloc;
- bool inaccessible;
};
/*
@@ -77,11 +76,11 @@ enum memory_type {
struct dev_pagemap_ops {
/*
- * Called once the page refcount reaches 0. The reference count will be
+ * Called once the folio refcount reaches 0. The reference count will be
* reset to one by the core code after the method is called to prepare
- * for handing out the page again.
+ * for handing out the folio again.
*/
- void (*page_free)(struct page *page);
+ void (*folio_free)(struct folio *folio);
/*
* Used for private (un-addressable) device memory only. Must migrate
@@ -100,6 +99,13 @@ struct dev_pagemap_ops {
*/
int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
unsigned long nr_pages, int mf_flags);
+
+ /*
+ * Used for private (un-addressable) device memory only.
+ * This callback is used when a folio is split into
+ * a smaller folio
+ */
+ void (*folio_split)(struct folio *head, struct folio *tail);
};
#define PGMAP_ALTMAP_VALID (1 << 0)
@@ -157,47 +163,101 @@ static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap)
return 1 << pgmap->vmemmap_shift;
}
+static inline bool folio_is_device_private(const struct folio *folio)
+{
+ return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
+ folio_is_zone_device(folio) &&
+ folio->pgmap->type == MEMORY_DEVICE_PRIVATE;
+}
+
static inline bool is_device_private_page(const struct page *page)
{
return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
- is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PRIVATE;
+ folio_is_device_private(page_folio(page));
}
-static inline bool folio_is_device_private(const struct folio *folio)
+static inline bool folio_is_pci_p2pdma(const struct folio *folio)
{
- return is_device_private_page(&folio->page);
+ return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
+ folio_is_zone_device(folio) &&
+ folio->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
+}
+
+static inline void *folio_zone_device_data(const struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_is_device_private(folio), folio);
+ return folio->page.zone_device_data;
+}
+
+static inline void folio_set_zone_device_data(struct folio *folio, void *data)
+{
+ VM_WARN_ON_FOLIO(!folio_is_device_private(folio), folio);
+ folio->page.zone_device_data = data;
}
static inline bool is_pci_p2pdma_page(const struct page *page)
{
return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
- is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
+ folio_is_pci_p2pdma(page_folio(page));
+}
+
+static inline bool folio_is_device_coherent(const struct folio *folio)
+{
+ return folio_is_zone_device(folio) &&
+ folio->pgmap->type == MEMORY_DEVICE_COHERENT;
}
static inline bool is_device_coherent_page(const struct page *page)
{
- return is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_COHERENT;
+ return folio_is_device_coherent(page_folio(page));
}
-static inline bool folio_is_device_coherent(const struct folio *folio)
+static inline bool folio_is_fsdax(const struct folio *folio)
{
- return is_device_coherent_page(&folio->page);
+ return folio_is_zone_device(folio) &&
+ folio->pgmap->type == MEMORY_DEVICE_FS_DAX;
+}
+
+static inline bool is_fsdax_page(const struct page *page)
+{
+ return folio_is_fsdax(page_folio(page));
}
#ifdef CONFIG_ZONE_DEVICE
-void zone_device_page_init(struct page *page);
+void zone_device_page_init(struct page *page, unsigned int order);
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
-struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
- struct dev_pagemap *pgmap);
+struct dev_pagemap *get_dev_pagemap(unsigned long pfn);
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
unsigned long memremap_compat_align(void);
+
+static inline void zone_device_folio_init(struct folio *folio, unsigned int order)
+{
+ zone_device_page_init(&folio->page, order);
+ if (order)
+ folio_set_large_rmappable(folio);
+}
+
+static inline void zone_device_private_split_cb(struct folio *original_folio,
+ struct folio *new_folio)
+{
+ if (folio_is_device_private(original_folio)) {
+ if (!original_folio->pgmap->ops->folio_split) {
+ if (new_folio) {
+ new_folio->pgmap = original_folio->pgmap;
+ new_folio->page.mapping =
+ original_folio->page.mapping;
+ }
+ } else {
+ original_folio->pgmap->ops->folio_split(original_folio,
+ new_folio);
+ }
+ }
+}
+
#else
static inline void *devm_memremap_pages(struct device *dev,
struct dev_pagemap *pgmap)
@@ -216,8 +276,7 @@ static inline void devm_memunmap_pages(struct device *dev,
{
}
-static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
- struct dev_pagemap *pgmap)
+static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn)
{
return NULL;
}
@@ -232,6 +291,11 @@ static inline unsigned long memremap_compat_align(void)
{
return PAGE_SIZE;
}
+
+static inline void zone_device_private_split_cb(struct folio *original_folio,
+ struct folio *new_folio)
+{
+}
#endif /* CONFIG_ZONE_DEVICE */
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
diff --git a/include/linux/memstick.h b/include/linux/memstick.h
index ebf73d4ee969..107bdcbedf79 100644
--- a/include/linux/memstick.h
+++ b/include/linux/memstick.h
@@ -293,7 +293,7 @@ struct memstick_host {
};
struct memstick_driver {
- struct memstick_device_id *id_table;
+ const struct memstick_device_id *id_table;
int (*probe)(struct memstick_dev *card);
void (*remove)(struct memstick_dev *card);
int (*suspend)(struct memstick_dev *card,
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index def5df6e74bf..551ef1c367d6 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -294,7 +294,7 @@ struct pm80x_chip {
struct i2c_client *client;
struct i2c_client *companion;
struct regmap *regmap;
- struct regmap_irq_chip *regmap_irq_chip;
+ const struct regmap_irq_chip *regmap_irq_chip;
struct regmap_irq_chip_data *irq_data;
int type;
int irq;
diff --git a/include/linux/mfd/88pm886.h b/include/linux/mfd/88pm886.h
new file mode 100644
index 000000000000..38892ba7b8a4
--- /dev/null
+++ b/include/linux/mfd/88pm886.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __MFD_88PM886_H
+#define __MFD_88PM886_H
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#define PM886_A1_CHIP_ID 0xa1
+
+#define PM886_IRQ_ONKEY 0
+
+#define PM886_PAGE_OFFSET_REGULATORS 1
+#define PM886_PAGE_OFFSET_GPADC 2
+
+#define PM886_REG_ID 0x00
+
+#define PM886_REG_STATUS1 0x01
+#define PM886_ONKEY_STS1 BIT(0)
+
+#define PM886_REG_INT_STATUS1 0x05
+
+#define PM886_REG_INT_ENA_1 0x0a
+#define PM886_INT_ENA1_ONKEY BIT(0)
+
+#define PM886_REG_MISC_CONFIG1 0x14
+#define PM886_SW_PDOWN BIT(5)
+
+#define PM886_REG_MISC_CONFIG2 0x15
+#define PM886_INT_INV BIT(0)
+#define PM886_INT_CLEAR BIT(1)
+#define PM886_INT_RC 0x00
+#define PM886_INT_WC BIT(1)
+#define PM886_INT_MASK_MODE BIT(2)
+
+#define PM886_REG_RTC_CNT1 0xd1
+#define PM886_REG_RTC_CNT2 0xd2
+#define PM886_REG_RTC_CNT3 0xd3
+#define PM886_REG_RTC_CNT4 0xd4
+#define PM886_REG_RTC_SPARE1 0xea
+#define PM886_REG_RTC_SPARE2 0xeb
+#define PM886_REG_RTC_SPARE3 0xec
+#define PM886_REG_RTC_SPARE4 0xed
+#define PM886_REG_RTC_SPARE5 0xee
+#define PM886_REG_RTC_SPARE6 0xef
+
+#define PM886_REG_BUCK_EN 0x08
+#define PM886_REG_LDO_EN1 0x09
+#define PM886_REG_LDO_EN2 0x0a
+#define PM886_REG_LDO1_VOUT 0x20
+#define PM886_REG_LDO2_VOUT 0x26
+#define PM886_REG_LDO3_VOUT 0x2c
+#define PM886_REG_LDO4_VOUT 0x32
+#define PM886_REG_LDO5_VOUT 0x38
+#define PM886_REG_LDO6_VOUT 0x3e
+#define PM886_REG_LDO7_VOUT 0x44
+#define PM886_REG_LDO8_VOUT 0x4a
+#define PM886_REG_LDO9_VOUT 0x50
+#define PM886_REG_LDO10_VOUT 0x56
+#define PM886_REG_LDO11_VOUT 0x5c
+#define PM886_REG_LDO12_VOUT 0x62
+#define PM886_REG_LDO13_VOUT 0x68
+#define PM886_REG_LDO14_VOUT 0x6e
+#define PM886_REG_LDO15_VOUT 0x74
+#define PM886_REG_LDO16_VOUT 0x7a
+#define PM886_REG_BUCK1_VOUT 0xa5
+#define PM886_REG_BUCK2_VOUT 0xb3
+#define PM886_REG_BUCK3_VOUT 0xc1
+#define PM886_REG_BUCK4_VOUT 0xcf
+#define PM886_REG_BUCK5_VOUT 0xdd
+
+#define PM886_LDO_VSEL_MASK 0x0f
+#define PM886_BUCK_VSEL_MASK 0x7f
+
+/* GPADC enable/disable registers */
+#define PM886_REG_GPADC_CONFIG(n) (n)
+
+#define PM886_GPADC_VSC_EN BIT(0)
+#define PM886_GPADC_VBAT_EN BIT(1)
+#define PM886_GPADC_GNDDET1_EN BIT(3)
+#define PM886_GPADC_VBUS_EN BIT(4)
+#define PM886_GPADC_VCHG_PWR_EN BIT(5)
+#define PM886_GPADC_VCF_OUT_EN BIT(6)
+#define PM886_GPADC_CONFIG1_EN_ALL \
+ (PM886_GPADC_VSC_EN | \
+ PM886_GPADC_VBAT_EN | \
+ PM886_GPADC_GNDDET1_EN | \
+ PM886_GPADC_VBUS_EN | \
+ PM886_GPADC_VCHG_PWR_EN | \
+ PM886_GPADC_VCF_OUT_EN)
+
+#define PM886_GPADC_TINT_EN BIT(0)
+#define PM886_GPADC_PMODE_EN BIT(1)
+#define PM886_GPADC_GPADC0_EN BIT(2)
+#define PM886_GPADC_GPADC1_EN BIT(3)
+#define PM886_GPADC_GPADC2_EN BIT(4)
+#define PM886_GPADC_GPADC3_EN BIT(5)
+#define PM886_GPADC_MIC_DET_EN BIT(6)
+#define PM886_GPADC_CONFIG2_EN_ALL \
+ (PM886_GPADC_TINT_EN | \
+ PM886_GPADC_GPADC0_EN | \
+ PM886_GPADC_GPADC1_EN | \
+ PM886_GPADC_GPADC2_EN | \
+ PM886_GPADC_GPADC3_EN | \
+ PM886_GPADC_MIC_DET_EN)
+
+/* No CONFIG3_EN_ALL because this is the only bit there. */
+#define PM886_GPADC_GND_DET2_EN BIT(0)
+
+/* GPADC channel registers */
+#define PM886_REG_GPADC_VSC 0x40
+#define PM886_REG_GPADC_VCHG_PWR 0x4c
+#define PM886_REG_GPADC_VCF_OUT 0x4e
+#define PM886_REG_GPADC_TINT 0x50
+#define PM886_REG_GPADC_GPADC0 0x54
+#define PM886_REG_GPADC_GPADC1 0x56
+#define PM886_REG_GPADC_GPADC2 0x58
+#define PM886_REG_GPADC_VBAT 0xa0
+#define PM886_REG_GPADC_GND_DET1 0xa4
+#define PM886_REG_GPADC_GND_DET2 0xa6
+#define PM886_REG_GPADC_VBUS 0xa8
+#define PM886_REG_GPADC_GPADC3 0xaa
+#define PM886_REG_GPADC_MIC_DET 0xac
+#define PM886_REG_GPADC_VBAT_SLP 0xb0
+
+/* VBAT_SLP is the last register and is 2 bytes wide like other channels. */
+#define PM886_GPADC_MAX_REGISTER (PM886_REG_GPADC_VBAT_SLP + 1)
+
+#define PM886_GPADC_BIAS_LEVELS 16
+#define PM886_GPADC_INDEX_TO_BIAS_uA(i) (1 + (i) * 5)
+
+struct pm886_chip {
+ struct i2c_client *client;
+ unsigned int chip_id;
+ struct regmap *regmap;
+};
+#endif /* __MFD_88PM886_H */
diff --git a/include/linux/mfd/aat2870.h b/include/linux/mfd/aat2870.h
index 2445842d482d..c7a3c53eba68 100644
--- a/include/linux/mfd/aat2870.h
+++ b/include/linux/mfd/aat2870.h
@@ -133,9 +133,6 @@ struct aat2870_data {
int (*read)(struct aat2870_data *aat2870, u8 addr, u8 *val);
int (*write)(struct aat2870_data *aat2870, u8 addr, u8 val);
int (*update)(struct aat2870_data *aat2870, u8 addr, u8 mask, u8 val);
-
- /* for debugfs */
- struct dentry *dentry_root;
};
struct aat2870_subdev_info {
diff --git a/include/linux/mfd/adp5585.h b/include/linux/mfd/adp5585.h
new file mode 100644
index 000000000000..5237da6b4a9f
--- /dev/null
+++ b/include/linux/mfd/adp5585.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Analog Devices ADP5585 I/O expander, PWM controller and keypad controller
+ *
+ * Copyright 2022 NXP
+ * Copyright 2024 Ideas on Board Oy
+ */
+
+#ifndef __MFD_ADP5585_H_
+#define __MFD_ADP5585_H_
+
+#include <linux/bits.h>
+#include <linux/notifier.h>
+
+#define ADP5585_ID 0x00
+#define ADP5585_MAN_ID_VALUE 0x20
+#define ADP5585_MAN_ID_MASK GENMASK(7, 4)
+#define ADP5585_REV_ID_MASK GENMASK(3, 0)
+#define ADP5585_INT_STATUS 0x01
+#define ADP5585_OVRFLOW_INT BIT(2)
+#define ADP5585_EVENT_INT BIT(0)
+#define ADP5585_STATUS 0x02
+#define ADP5585_EC_MASK GENMASK(4, 0)
+#define ADP5585_FIFO_1 0x03
+#define ADP5585_KEV_EV_PRESS_MASK BIT(7)
+#define ADP5585_KEY_EVENT_MASK GENMASK(6, 0)
+#define ADP5585_FIFO_2 0x04
+#define ADP5585_FIFO_3 0x05
+#define ADP5585_FIFO_4 0x06
+#define ADP5585_FIFO_5 0x07
+#define ADP5585_FIFO_6 0x08
+#define ADP5585_FIFO_7 0x09
+#define ADP5585_FIFO_8 0x0a
+#define ADP5585_FIFO_9 0x0b
+#define ADP5585_FIFO_10 0x0c
+#define ADP5585_FIFO_11 0x0d
+#define ADP5585_FIFO_12 0x0e
+#define ADP5585_FIFO_13 0x0f
+#define ADP5585_FIFO_14 0x10
+#define ADP5585_FIFO_15 0x11
+#define ADP5585_FIFO_16 0x12
+#define ADP5585_EV_MAX (ADP5585_FIFO_16 - ADP5585_FIFO_1 + 1)
+#define ADP5585_GPI_INT_STAT_A 0x13
+#define ADP5585_GPI_INT_STAT_B 0x14
+#define ADP5585_GPI_STATUS_A 0x15
+#define ADP5585_GPI_STATUS_B 0x16
+#define ADP5585_RPULL_CONFIG_A 0x17
+#define ADP5585_RPULL_CONFIG_B 0x18
+#define ADP5585_RPULL_CONFIG_C 0x19
+#define ADP5585_RPULL_CONFIG_D 0x1a
+#define ADP5585_Rx_PULL_CFG_PU_300K 0
+#define ADP5585_Rx_PULL_CFG_PD_300K 1
+#define ADP5585_Rx_PULL_CFG_PU_100K 2
+#define ADP5585_Rx_PULL_CFG_DISABLE 3
+#define ADP5585_Rx_PULL_CFG_MASK 3
+#define ADP5585_GPI_INT_LEVEL_A 0x1b
+#define ADP5585_GPI_INT_LEVEL_B 0x1c
+#define ADP5585_GPI_EVENT_EN_A 0x1d
+#define ADP5585_GPI_EVENT_EN_B 0x1e
+#define ADP5585_GPI_INTERRUPT_EN_A 0x1f
+#define ADP5585_GPI_INTERRUPT_EN_B 0x20
+#define ADP5585_DEBOUNCE_DIS_A 0x21
+#define ADP5585_DEBOUNCE_DIS_B 0x22
+#define ADP5585_GPO_DATA_OUT_A 0x23
+#define ADP5585_GPO_DATA_OUT_B 0x24
+#define ADP5585_GPO_OUT_MODE_A 0x25
+#define ADP5585_GPO_OUT_MODE_B 0x26
+#define ADP5585_GPIO_DIRECTION_A 0x27
+#define ADP5585_GPIO_DIRECTION_B 0x28
+#define ADP5585_RESET1_EVENT_A 0x29
+#define ADP5585_RESET_EV_PRESS BIT(7)
+#define ADP5585_RESET1_EVENT_B 0x2a
+#define ADP5585_RESET1_EVENT_C 0x2b
+#define ADP5585_RESET2_EVENT_A 0x2c
+#define ADP5585_RESET2_EVENT_B 0x2d
+#define ADP5585_RESET_CFG 0x2e
+#define ADP5585_PWM_OFFT_LOW 0x2f
+#define ADP5585_PWM_OFFT_HIGH 0x30
+#define ADP5585_PWM_ONT_LOW 0x31
+#define ADP5585_PWM_ONT_HIGH 0x32
+#define ADP5585_PWM_CFG 0x33
+#define ADP5585_PWM_IN_AND BIT(2)
+#define ADP5585_PWM_MODE BIT(1)
+#define ADP5585_PWM_EN BIT(0)
+#define ADP5585_LOGIC_CFG 0x34
+#define ADP5585_LOGIC_FF_CFG 0x35
+#define ADP5585_LOGIC_INT_EVENT_EN 0x36
+#define ADP5585_POLL_PTIME_CFG 0x37
+#define ADP5585_PIN_CONFIG_A 0x38
+#define ADP5585_PIN_CONFIG_B 0x39
+#define ADP5585_PIN_CONFIG_C 0x3a
+#define ADP5585_PULL_SELECT BIT(7)
+#define ADP5585_C4_EXTEND_CFG_GPIO11 (0U << 6)
+#define ADP5585_C4_EXTEND_CFG_RESET2 (1U << 6)
+#define ADP5585_C4_EXTEND_CFG_MASK GENMASK(6, 6)
+#define ADP5585_R4_EXTEND_CFG_GPIO5 (0U << 5)
+#define ADP5585_R4_EXTEND_CFG_RESET1 (1U << 5)
+#define ADP5585_R4_EXTEND_CFG_MASK GENMASK(5, 5)
+#define ADP5585_R3_EXTEND_CFG_GPIO4 (0U << 2)
+#define ADP5585_R3_EXTEND_CFG_LC (1U << 2)
+#define ADP5585_R3_EXTEND_CFG_PWM_OUT (2U << 2)
+#define ADP5585_R3_EXTEND_CFG_MASK GENMASK(3, 2)
+#define ADP5585_R0_EXTEND_CFG_GPIO1 (0U << 0)
+#define ADP5585_R0_EXTEND_CFG_LY (1U << 0)
+#define ADP5585_R0_EXTEND_CFG_MASK GENMASK(0, 0)
+#define ADP5585_GENERAL_CFG 0x3b
+#define ADP5585_OSC_EN BIT(7)
+#define ADP5585_OSC_FREQ_50KHZ (0U << 5)
+#define ADP5585_OSC_FREQ_100KHZ (1U << 5)
+#define ADP5585_OSC_FREQ_200KHZ (2U << 5)
+#define ADP5585_OSC_FREQ_500KHZ (3U << 5)
+#define ADP5585_OSC_FREQ_MASK GENMASK(6, 5)
+#define ADP5585_INT_CFG BIT(1)
+#define ADP5585_RST_CFG BIT(0)
+#define ADP5585_INT_EN 0x3c
+#define ADP5585_OVRFLOW_IEN BIT(2)
+#define ADP5585_EVENT_IEN BIT(0)
+
+#define ADP5585_MAX_REG ADP5585_INT_EN
+
+#define ADP5585_PIN_MAX 11
+#define ADP5585_MAX_UNLOCK_TIME_SEC 7
+#define ADP5585_KEY_EVENT_START 1
+#define ADP5585_KEY_EVENT_END 25
+#define ADP5585_GPI_EVENT_START 37
+#define ADP5585_GPI_EVENT_END 47
+#define ADP5585_ROW5_KEY_EVENT_START 1
+#define ADP5585_ROW5_KEY_EVENT_END 30
+#define ADP5585_PWM_OUT 3
+#define ADP5585_RESET1_OUT 4
+#define ADP5585_RESET2_OUT 9
+#define ADP5585_ROW5 5
+
+/* ADP5589 */
+#define ADP5589_MAN_ID_VALUE 0x10
+#define ADP5589_GPI_STATUS_A 0x16
+#define ADP5589_GPI_STATUS_C 0x18
+#define ADP5589_RPULL_CONFIG_A 0x19
+#define ADP5589_GPI_INT_LEVEL_A 0x1e
+#define ADP5589_GPI_EVENT_EN_A 0x21
+#define ADP5589_DEBOUNCE_DIS_A 0x27
+#define ADP5589_GPO_DATA_OUT_A 0x2a
+#define ADP5589_GPO_OUT_MODE_A 0x2d
+#define ADP5589_GPIO_DIRECTION_A 0x30
+#define ADP5589_UNLOCK1 0x33
+#define ADP5589_UNLOCK_EV_PRESS BIT(7)
+#define ADP5589_UNLOCK_TIMERS 0x36
+#define ADP5589_UNLOCK_TIMER GENMASK(2, 0)
+#define ADP5589_LOCK_CFG 0x37
+#define ADP5589_LOCK_EN BIT(0)
+#define ADP5589_RESET1_EVENT_A 0x38
+#define ADP5589_RESET2_EVENT_A 0x3B
+#define ADP5589_RESET_CFG 0x3D
+#define ADP5585_RESET2_POL BIT(7)
+#define ADP5585_RESET1_POL BIT(6)
+#define ADP5585_RST_PASSTHRU_EN BIT(5)
+#define ADP5585_RESET_TRIG_TIME GENMASK(4, 2)
+#define ADP5585_PULSE_WIDTH GENMASK(1, 0)
+#define ADP5589_PWM_OFFT_LOW 0x3e
+#define ADP5589_PWM_ONT_LOW 0x40
+#define ADP5589_PWM_CFG 0x42
+#define ADP5589_POLL_PTIME_CFG 0x48
+#define ADP5589_PIN_CONFIG_A 0x49
+#define ADP5589_PIN_CONFIG_D 0x4C
+#define ADP5589_GENERAL_CFG 0x4d
+#define ADP5589_INT_EN 0x4e
+#define ADP5589_MAX_REG ADP5589_INT_EN
+
+#define ADP5589_PIN_MAX 19
+#define ADP5589_KEY_EVENT_START 1
+#define ADP5589_KEY_EVENT_END 88
+#define ADP5589_GPI_EVENT_START 97
+#define ADP5589_GPI_EVENT_END 115
+#define ADP5589_UNLOCK_WILDCARD 127
+#define ADP5589_RESET2_OUT 12
+
+struct regmap;
+
+enum adp5585_variant {
+ ADP5585_00 = 1,
+ ADP5585_01,
+ ADP5585_02,
+ ADP5585_03,
+ ADP5585_04,
+ ADP5589_00,
+ ADP5589_01,
+ ADP5589_02,
+ ADP5585_MAX
+};
+
+struct adp5585_regs {
+ unsigned int gen_cfg;
+ unsigned int ext_cfg;
+ unsigned int int_en;
+ unsigned int poll_ptime_cfg;
+ unsigned int reset_cfg;
+ unsigned int reset1_event_a;
+ unsigned int reset2_event_a;
+ unsigned int pin_cfg_a;
+};
+
+struct adp5585_dev {
+ struct device *dev;
+ struct regmap *regmap;
+ const struct adp5585_regs *regs;
+ struct blocking_notifier_head event_notifier;
+ unsigned long *pin_usage;
+ unsigned int n_pins;
+ unsigned int reset2_out;
+ enum adp5585_variant variant;
+ unsigned int id;
+ bool has_unlock;
+ bool has_pin6;
+ int irq;
+ unsigned int ev_poll_time;
+ unsigned int unlock_time;
+ unsigned int unlock_keys[2];
+ unsigned int nkeys_unlock;
+ unsigned int reset1_keys[3];
+ unsigned int nkeys_reset1;
+ unsigned int reset2_keys[2];
+ unsigned int nkeys_reset2;
+ u8 reset_cfg;
+};
+
+#endif
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 2d13bbea4f3a..f72e6d4b14a7 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -117,8 +117,10 @@ struct arizona_pdata {
/** Check for line output with HPDET method */
bool hpdet_acc_id_line;
+#ifdef CONFIG_GPIOLIB_LEGACY
/** GPIO used for mic isolation with HPDET */
int hpdet_id_gpio;
+#endif
/** Channel to use for headphone detection */
unsigned int hpdet_channel;
@@ -129,8 +131,10 @@ struct arizona_pdata {
/** Extra debounce timeout used during initial mic detection (ms) */
unsigned int micd_detect_debounce;
+#ifdef CONFIG_GPIOLIB_LEGACY
/** GPIO for mic detection polarity */
int micd_pol_gpio;
+#endif
/** Mic detect ramp rate */
unsigned int micd_bias_start_time;
@@ -184,8 +188,10 @@ struct arizona_pdata {
/** Haptic actuator type */
unsigned int hap_act;
+#ifdef CONFIG_GPIOLIB_LEGACY
/** GPIO for primary IRQ (used for edge triggered emulation) */
int irq_gpio;
+#endif
/** General purpose switch control */
unsigned int gpsw;
diff --git a/include/linux/mfd/atmel-hlcdc.h b/include/linux/mfd/atmel-hlcdc.h
index a186119a49b5..80d675a03b39 100644
--- a/include/linux/mfd/atmel-hlcdc.h
+++ b/include/linux/mfd/atmel-hlcdc.h
@@ -22,6 +22,8 @@
#define ATMEL_HLCDC_DITHER BIT(6)
#define ATMEL_HLCDC_DISPDLY BIT(7)
#define ATMEL_HLCDC_MODE_MASK GENMASK(9, 8)
+#define ATMEL_XLCDC_MODE_MASK GENMASK(10, 8)
+#define ATMEL_XLCDC_DPI BIT(11)
#define ATMEL_HLCDC_PP BIT(10)
#define ATMEL_HLCDC_VSPSU BIT(12)
#define ATMEL_HLCDC_VSPHO BIT(13)
@@ -34,6 +36,12 @@
#define ATMEL_HLCDC_IDR 0x30
#define ATMEL_HLCDC_IMR 0x34
#define ATMEL_HLCDC_ISR 0x38
+#define ATMEL_XLCDC_ATTRE 0x3c
+
+#define ATMEL_XLCDC_BASE_UPDATE BIT(0)
+#define ATMEL_XLCDC_OVR1_UPDATE BIT(1)
+#define ATMEL_XLCDC_OVR3_UPDATE BIT(2)
+#define ATMEL_XLCDC_HEO_UPDATE BIT(3)
#define ATMEL_HLCDC_CLKPOL BIT(0)
#define ATMEL_HLCDC_CLKSEL BIT(2)
@@ -48,6 +56,8 @@
#define ATMEL_HLCDC_DISP BIT(2)
#define ATMEL_HLCDC_PWM BIT(3)
#define ATMEL_HLCDC_SIP BIT(4)
+#define ATMEL_XLCDC_SD BIT(5)
+#define ATMEL_XLCDC_CM BIT(6)
#define ATMEL_HLCDC_SOF BIT(0)
#define ATMEL_HLCDC_SYNCDIS BIT(1)
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 8c0a33a2e9ce..3c5aecf1d4b5 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -19,6 +19,7 @@ enum axp20x_variants {
AXP223_ID,
AXP288_ID,
AXP313A_ID,
+ AXP323_ID,
AXP717_ID,
AXP803_ID,
AXP806_ID,
@@ -113,8 +114,19 @@ enum axp20x_variants {
#define AXP313A_SHUTDOWN_CTRL 0x1a
#define AXP313A_IRQ_EN 0x20
#define AXP313A_IRQ_STATE 0x21
+#define AXP323_DCDC_MODE_CTRL2 0x22
#define AXP717_ON_INDICATE 0x00
+#define AXP717_PMU_STATUS_2 0x01
+#define AXP717_BC_DETECT 0x05
+#define AXP717_PMU_FAULT 0x08
+#define AXP717_MODULE_EN_CONTROL_1 0x0b
+#define AXP717_MIN_SYS_V_CONTROL 0x15
+#define AXP717_INPUT_VOL_LIMIT_CTRL 0x16
+#define AXP717_INPUT_CUR_LIMIT_CTRL 0x17
+#define AXP717_MODULE_EN_CONTROL_2 0x19
+#define AXP717_BOOST_CONTROL 0x1e
+#define AXP717_VSYS_V_POWEROFF 0x24
#define AXP717_IRQ0_EN 0x40
#define AXP717_IRQ1_EN 0x41
#define AXP717_IRQ2_EN 0x42
@@ -125,6 +137,10 @@ enum axp20x_variants {
#define AXP717_IRQ2_STATE 0x4a
#define AXP717_IRQ3_STATE 0x4b
#define AXP717_IRQ4_STATE 0x4c
+#define AXP717_TS_PIN_CFG 0x50
+#define AXP717_ICC_CHG_SET 0x62
+#define AXP717_ITERM_CHG_SET 0x63
+#define AXP717_CV_CHG_SET 0x64
#define AXP717_DCDC_OUTPUT_CONTROL 0x80
#define AXP717_DCDC1_CONTROL 0x83
#define AXP717_DCDC2_CONTROL 0x84
@@ -145,6 +161,19 @@ enum axp20x_variants {
#define AXP717_CLDO3_CONTROL 0x9d
#define AXP717_CLDO4_CONTROL 0x9e
#define AXP717_CPUSLDO_CONTROL 0x9f
+#define AXP717_BATT_PERCENT_DATA 0xa4
+#define AXP717_ADC_CH_EN_CONTROL 0xc0
+#define AXP717_BATT_V_H 0xc4
+#define AXP717_BATT_V_L 0xc5
+#define AXP717_VBUS_V_H 0xc6
+#define AXP717_VBUS_V_L 0xc7
+#define AXP717_VSYS_V_H 0xc8
+#define AXP717_VSYS_V_L 0xc9
+#define AXP717_BATT_CHRG_I_H 0xca
+#define AXP717_BATT_CHRG_I_L 0xcb
+#define AXP717_ADC_DATA_SEL 0xcd
+#define AXP717_ADC_DATA_H 0xce
+#define AXP717_ADC_DATA_L 0xcf
#define AXP806_STARTUP_SRC 0x00
#define AXP806_CHIP_ID 0x03
@@ -484,6 +513,7 @@ enum {
AXP717_CLDO3,
AXP717_CLDO4,
AXP717_CPUSLDO,
+ AXP717_BOOST,
AXP717_REG_ID_MAX,
};
@@ -932,7 +962,7 @@ struct axp20x_dev {
unsigned long irq_flags;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
- long variant;
+ enum axp20x_variants variant;
int nr_cells;
const struct mfd_cell *cells;
const struct regmap_config *regmap_cfg;
diff --git a/include/linux/mfd/bcm590xx.h b/include/linux/mfd/bcm590xx.h
index 6b8791da6119..5a5783abd47b 100644
--- a/include/linux/mfd/bcm590xx.h
+++ b/include/linux/mfd/bcm590xx.h
@@ -13,6 +13,26 @@
#include <linux/i2c.h>
#include <linux/regmap.h>
+/* PMU ID register values; also used as device type */
+#define BCM590XX_PMUID_BCM59054 0x54
+#define BCM590XX_PMUID_BCM59056 0x56
+
+/* Known chip revision IDs */
+#define BCM59054_REV_DIGITAL_A1 1
+#define BCM59054_REV_ANALOG_A1 2
+
+#define BCM59056_REV_DIGITAL_A0 1
+#define BCM59056_REV_ANALOG_A0 1
+
+#define BCM59056_REV_DIGITAL_B0 2
+#define BCM59056_REV_ANALOG_B0 2
+
+/* regmap types */
+enum bcm590xx_regmap_type {
+ BCM590XX_REGMAP_PRI,
+ BCM590XX_REGMAP_SEC,
+};
+
/* max register address */
#define BCM590XX_MAX_REGISTER_PRI 0xe7
#define BCM590XX_MAX_REGISTER_SEC 0xf0
@@ -23,7 +43,13 @@ struct bcm590xx {
struct i2c_client *i2c_sec;
struct regmap *regmap_pri;
struct regmap *regmap_sec;
- unsigned int id;
+
+ /* PMU ID value; also used as device type */
+ u8 pmu_id;
+
+ /* Chip revision, read from PMUREV reg */
+ u8 rev_digital;
+ u8 rev_analog;
};
#endif /* __LINUX_MFD_BCM590XX_H */
diff --git a/include/linux/mfd/bq257xx.h b/include/linux/mfd/bq257xx.h
new file mode 100644
index 000000000000..1d6ddc7fb09f
--- /dev/null
+++ b/include/linux/mfd/bq257xx.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Register definitions for TI BQ257XX
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#define BQ25703_CHARGE_OPTION_0 0x00
+#define BQ25703_CHARGE_CURRENT 0x02
+#define BQ25703_MAX_CHARGE_VOLT 0x04
+#define BQ25703_OTG_VOLT 0x06
+#define BQ25703_OTG_CURRENT 0x08
+#define BQ25703_INPUT_VOLTAGE 0x0a
+#define BQ25703_MIN_VSYS 0x0c
+#define BQ25703_IIN_HOST 0x0e
+#define BQ25703_CHARGER_STATUS 0x20
+#define BQ25703_PROCHOT_STATUS 0x22
+#define BQ25703_IIN_DPM 0x24
+#define BQ25703_ADCIBAT_CHG 0x28
+#define BQ25703_ADCIINCMPIN 0x2a
+#define BQ25703_ADCVSYSVBAT 0x2c
+#define BQ25703_MANUFACT_DEV_ID 0x2e
+#define BQ25703_CHARGE_OPTION_1 0x30
+#define BQ25703_CHARGE_OPTION_2 0x32
+#define BQ25703_CHARGE_OPTION_3 0x34
+#define BQ25703_ADC_OPTION 0x3a
+
+#define BQ25703_EN_LWPWR BIT(15)
+#define BQ25703_WDTMR_ADJ_MASK GENMASK(14, 13)
+#define BQ25703_WDTMR_DISABLE 0
+#define BQ25703_WDTMR_5_SEC 1
+#define BQ25703_WDTMR_88_SEC 2
+#define BQ25703_WDTMR_175_SEC 3
+
+#define BQ25703_ICHG_MASK GENMASK(12, 6)
+#define BQ25703_ICHG_STEP_UA 64000
+#define BQ25703_ICHG_MIN_UA 64000
+#define BQ25703_ICHG_MAX_UA 8128000
+
+#define BQ25703_MAX_CHARGE_VOLT_MASK GENMASK(15, 4)
+#define BQ25703_VBATREG_STEP_UV 16000
+#define BQ25703_VBATREG_MIN_UV 1024000
+#define BQ25703_VBATREG_MAX_UV 19200000
+
+#define BQ25703_OTG_VOLT_MASK GENMASK(13, 6)
+#define BQ25703_OTG_VOLT_STEP_UV 64000
+#define BQ25703_OTG_VOLT_MIN_UV 4480000
+#define BQ25703_OTG_VOLT_MAX_UV 20800000
+#define BQ25703_OTG_VOLT_NUM_VOLT 256
+
+#define BQ25703_OTG_CUR_MASK GENMASK(14, 8)
+#define BQ25703_OTG_CUR_STEP_UA 50000
+#define BQ25703_OTG_CUR_MAX_UA 6350000
+
+#define BQ25703_MINVSYS_MASK GENMASK(13, 8)
+#define BQ25703_MINVSYS_STEP_UV 256000
+#define BQ25703_MINVSYS_MIN_UV 1024000
+#define BQ25703_MINVSYS_MAX_UV 16128000
+
+#define BQ25703_STS_AC_STAT BIT(15)
+#define BQ25703_STS_IN_FCHRG BIT(10)
+#define BQ25703_STS_IN_PCHRG BIT(9)
+#define BQ25703_STS_FAULT_ACOV BIT(7)
+#define BQ25703_STS_FAULT_BATOC BIT(6)
+#define BQ25703_STS_FAULT_ACOC BIT(5)
+
+#define BQ25703_IINDPM_MASK GENMASK(14, 8)
+#define BQ25703_IINDPM_STEP_UA 50000
+#define BQ25703_IINDPM_MIN_UA 50000
+#define BQ25703_IINDPM_MAX_UA 6400000
+#define BQ25703_IINDPM_DEFAULT_UA 3300000
+#define BQ25703_IINDPM_OFFSET_UA 50000
+
+#define BQ25703_ADCIBAT_DISCHG_MASK GENMASK(6, 0)
+#define BQ25703_ADCIBAT_CHG_MASK GENMASK(14, 8)
+#define BQ25703_ADCIBAT_CHG_STEP_UA 64000
+#define BQ25703_ADCIBAT_DIS_STEP_UA 256000
+
+#define BQ25703_ADCIIN GENMASK(15, 8)
+#define BQ25703_ADCIINCMPIN_STEP 50000
+
+#define BQ25703_ADCVSYS_MASK GENMASK(15, 8)
+#define BQ25703_ADCVBAT_MASK GENMASK(7, 0)
+#define BQ25703_ADCVSYSVBAT_OFFSET_UV 2880000
+#define BQ25703_ADCVSYSVBAT_STEP 64000
+
+#define BQ25703_ADC_CH_MASK GENMASK(7, 0)
+#define BQ25703_ADC_CONV_EN BIT(15)
+#define BQ25703_ADC_START BIT(14)
+#define BQ25703_ADC_FULL_SCALE BIT(13)
+#define BQ25703_ADC_CMPIN_EN BIT(7)
+#define BQ25703_ADC_VBUS_EN BIT(6)
+#define BQ25703_ADC_PSYS_EN BIT(5)
+#define BQ25703_ADC_IIN_EN BIT(4)
+#define BQ25703_ADC_IDCHG_EN BIT(3)
+#define BQ25703_ADC_ICHG_EN BIT(2)
+#define BQ25703_ADC_VSYS_EN BIT(1)
+#define BQ25703_ADC_VBAT_EN BIT(0)
+
+#define BQ25703_EN_OTG_MASK BIT(12)
+
+struct bq257xx_device {
+ struct i2c_client *client;
+ struct regmap *regmap;
+};
diff --git a/include/linux/mfd/cgbc.h b/include/linux/mfd/cgbc.h
new file mode 100644
index 000000000000..badbec4c7033
--- /dev/null
+++ b/include/linux/mfd/cgbc.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Congatec Board Controller driver definitions
+ *
+ * Copyright (C) 2024 Bootlin
+ * Author: Thomas Richard <thomas.richard@bootlin.com>
+ */
+
+#ifndef _LINUX_MFD_CGBC_H_
+
+/**
+ * struct cgbc_version - Board Controller device version structure
+ * @feature: Board Controller feature number
+ * @major: Board Controller major revision
+ * @minor: Board Controller minor revision
+ */
+struct cgbc_version {
+ unsigned char feature;
+ unsigned char major;
+ unsigned char minor;
+};
+
+/**
+ * struct cgbc_device_data - Internal representation of the Board Controller device
+ * @io_session: Pointer to the session IO memory
+ * @io_cmd: Pointer to the command IO memory
+ * @session: Session id returned by the Board Controller
+ * @dev: Pointer to kernel device structure
+ * @cgbc_version: Board Controller version structure
+ * @mutex: Board Controller mutex
+ */
+struct cgbc_device_data {
+ void __iomem *io_session;
+ void __iomem *io_cmd;
+ u8 session;
+ struct device *dev;
+ struct cgbc_version version;
+ struct mutex lock;
+};
+
+int cgbc_command(struct cgbc_device_data *cgbc, void *cmd, unsigned int cmd_size,
+ void *data, unsigned int data_size, u8 *status);
+
+#endif /*_LINUX_MFD_CGBC_H_*/
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index e8bcad641d8c..faeea7abd688 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -72,7 +72,7 @@ struct mfd_cell {
int (*resume)(struct platform_device *dev);
/* platform data passed to the sub devices drivers */
- void *platform_data;
+ const void *platform_data;
size_t pdata_size;
/* Matches ACPI */
diff --git a/include/linux/mfd/cs40l50.h b/include/linux/mfd/cs40l50.h
new file mode 100644
index 000000000000..e5dc49860944
--- /dev/null
+++ b/include/linux/mfd/cs40l50.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * CS40L50 Advanced Haptic Driver with waveform memory,
+ * integrated DSP, and closed-loop algorithms
+ *
+ * Copyright 2024 Cirrus Logic, Inc.
+ *
+ * Author: James Ogletree <james.ogletree@cirrus.com>
+ */
+
+#ifndef __MFD_CS40L50_H__
+#define __MFD_CS40L50_H__
+
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/gpio/consumer.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+/* Power Supply Configuration */
+#define CS40L50_BLOCK_ENABLES2 0x201C
+#define CS40L50_ERR_RLS 0x2034
+#define CS40L50_BST_LPMODE_SEL 0x3810
+#define CS40L50_DCM_LOW_POWER 0x1
+#define CS40L50_OVERTEMP_WARN 0x4000010
+
+/* Interrupts */
+#define CS40L50_IRQ1_INT_1 0xE010
+#define CS40L50_IRQ1_BASE CS40L50_IRQ1_INT_1
+#define CS40L50_IRQ1_INT_2 0xE014
+#define CS40L50_IRQ1_INT_8 0xE02C
+#define CS40L50_IRQ1_INT_9 0xE030
+#define CS40L50_IRQ1_INT_10 0xE034
+#define CS40L50_IRQ1_INT_18 0xE054
+#define CS40L50_IRQ1_MASK_1 0xE090
+#define CS40L50_IRQ1_MASK_2 0xE094
+#define CS40L50_IRQ1_MASK_20 0xE0DC
+#define CS40L50_IRQ1_INT_1_OFFSET (CS40L50_IRQ1_INT_1 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_2_OFFSET (CS40L50_IRQ1_INT_2 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_8_OFFSET (CS40L50_IRQ1_INT_8 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_9_OFFSET (CS40L50_IRQ1_INT_9 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_10_OFFSET (CS40L50_IRQ1_INT_10 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_18_OFFSET (CS40L50_IRQ1_INT_18 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ_MASK_2_OVERRIDE 0xFFDF7FFF
+#define CS40L50_IRQ_MASK_20_OVERRIDE 0x15C01000
+#define CS40L50_AMP_SHORT_MASK BIT(31)
+#define CS40L50_DSP_QUEUE_MASK BIT(21)
+#define CS40L50_TEMP_ERR_MASK BIT(31)
+#define CS40L50_BST_UVP_MASK BIT(6)
+#define CS40L50_BST_SHORT_MASK BIT(7)
+#define CS40L50_BST_ILIMIT_MASK BIT(18)
+#define CS40L50_UVLO_VDDBATT_MASK BIT(16)
+#define CS40L50_GLOBAL_ERROR_MASK BIT(15)
+
+enum cs40l50_irq_list {
+ CS40L50_DSP_QUEUE_IRQ,
+ CS40L50_GLOBAL_ERROR_IRQ,
+ CS40L50_UVLO_VDDBATT_IRQ,
+ CS40L50_BST_ILIMIT_IRQ,
+ CS40L50_BST_SHORT_IRQ,
+ CS40L50_BST_UVP_IRQ,
+ CS40L50_TEMP_ERR_IRQ,
+ CS40L50_AMP_SHORT_IRQ,
+};
+
+/* DSP */
+#define CS40L50_XMEM_PACKED_0 0x2000000
+#define CS40L50_XMEM_UNPACKED24_0 0x2800000
+#define CS40L50_SYS_INFO_ID 0x25E0000
+#define CS40L50_DSP_QUEUE_WT 0x28042C8
+#define CS40L50_DSP_QUEUE_RD 0x28042CC
+#define CS40L50_NUM_WAVES 0x2805C18
+#define CS40L50_CORE_BASE 0x2B80000
+#define CS40L50_YMEM_PACKED_0 0x2C00000
+#define CS40L50_YMEM_UNPACKED24_0 0x3400000
+#define CS40L50_PMEM_0 0x3800000
+#define CS40L50_DSP_POLL_US 1000
+#define CS40L50_DSP_TIMEOUT_COUNT 100
+#define CS40L50_RESET_PULSE_US 2200
+#define CS40L50_CP_READY_US 3100
+#define CS40L50_AUTOSUSPEND_MS 2000
+#define CS40L50_PM_ALGO 0x9F206
+#define CS40L50_GLOBAL_ERR_RLS_SET BIT(11)
+#define CS40L50_GLOBAL_ERR_RLS_CLEAR 0
+
+enum cs40l50_wseqs {
+ CS40L50_PWR_ON,
+ CS40L50_STANDBY,
+ CS40L50_ACTIVE,
+ CS40L50_NUM_WSEQS,
+};
+
+/* DSP Queue */
+#define CS40L50_DSP_QUEUE_BASE 0x11004
+#define CS40L50_DSP_QUEUE_END 0x1101C
+#define CS40L50_DSP_QUEUE 0x11020
+#define CS40L50_PREVENT_HIBER 0x2000003
+#define CS40L50_ALLOW_HIBER 0x2000004
+#define CS40L50_SHUTDOWN 0x2000005
+#define CS40L50_SYSTEM_RESET 0x2000007
+#define CS40L50_START_I2S 0x3000002
+#define CS40L50_OWT_PUSH 0x3000008
+#define CS40L50_STOP_PLAYBACK 0x5000000
+#define CS40L50_OWT_DELETE 0xD000000
+
+/* Firmware files */
+#define CS40L50_FW "cs40l50.wmfw"
+#define CS40L50_WT "cs40l50.bin"
+
+/* Device */
+#define CS40L50_DEVID 0x0
+#define CS40L50_REVID 0x4
+#define CS40L50_DEVID_A 0x40A50
+#define CS40L50_REVID_B0 0xB0
+
+struct cs40l50 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock;
+ struct cs_dsp dsp;
+ struct gpio_desc *reset_gpio;
+ struct regmap_irq_chip_data *irq_data;
+ const struct firmware *fw;
+ const struct firmware *bin;
+ struct cs_dsp_wseq wseqs[CS40L50_NUM_WSEQS];
+ int irq;
+ u32 devid;
+ u32 revid;
+};
+
+int cs40l50_dsp_write(struct device *dev, struct regmap *regmap, u32 val);
+int cs40l50_probe(struct cs40l50 *cs40l50);
+int cs40l50_remove(struct cs40l50 *cs40l50);
+
+extern const struct regmap_config cs40l50_regmap;
+extern const struct dev_pm_ops cs40l50_pm_ops;
+
+#endif /* __MFD_CS40L50_H__ */
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index 76feb3a7066d..9cb2fc2938ce 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -93,6 +93,8 @@ struct da9052 {
int chip_irq;
+ int fault_log;
+
/* SOC I/O transfer related fixes for DA9052/53 */
int (*fix_io) (struct da9052 *da9052, unsigned char reg);
};
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index 8db52324f416..eae82f421414 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -78,6 +78,7 @@ struct da9063 {
enum da9063_type type;
unsigned char variant_code;
unsigned int flags;
+ bool use_sw_pm;
/* Control interface */
struct regmap *regmap;
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h
index 556375b91316..9acd703dd5ca 100644
--- a/include/linux/mfd/davinci_voicecodec.h
+++ b/include/linux/mfd/davinci_voicecodec.h
@@ -10,11 +10,13 @@
#ifndef __LINUX_MFD_DAVINCI_VOICECODEC_H_
#define __LINUX_MFD_DAVINCI_VOICECODEC_H_
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
+#include <linux/bits.h>
#include <linux/mfd/core.h>
-#include <linux/platform_data/edma.h>
+#include <linux/types.h>
+struct clk;
+struct device;
+struct platform_device;
struct regmap;
/*
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index dd0fc891b228..828362b7860c 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -213,9 +213,9 @@ struct prcmu_fw_version {
#if defined(CONFIG_UX500_SOC_DB8500)
-static inline void prcmu_early_init(void)
+static inline void __init prcmu_early_init(void)
{
- return db8500_prcmu_early_init();
+ db8500_prcmu_early_init();
}
static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
@@ -302,7 +302,7 @@ static inline int prcmu_request_ape_opp_100_voltage(bool enable)
static inline void prcmu_system_reset(u16 reset_code)
{
- return db8500_prcmu_system_reset(reset_code);
+ db8500_prcmu_system_reset(reset_code);
}
static inline u16 prcmu_get_reset_code(void)
@@ -314,7 +314,7 @@ int prcmu_ac_wake_req(void);
void prcmu_ac_sleep_req(void);
static inline void prcmu_modem_reset(void)
{
- return db8500_prcmu_modem_reset();
+ db8500_prcmu_modem_reset();
}
static inline bool prcmu_is_ac_wake_requested(void)
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h
deleted file mode 100644
index 43dfca1c9702..000000000000
--- a/include/linux/mfd/ds1wm.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* MFD cell driver data for the DS1WM driver
- *
- * to be defined in the MFD device that is
- * using this driver for one of his sub devices
- */
-
-struct ds1wm_driver_data {
- int active_high;
- int clock_rate;
- /* in milliseconds, the amount of time to
- * sleep following a reset pulse. Zero
- * should work if your bus devices recover
- * time respects the 1-wire spec since the
- * ds1wm implements the precise timings of
- * a reset pulse/presence detect sequence.
- */
- unsigned int reset_recover_delay;
-
- /* Say 1 here for big endian Hardware
- * (only relevant with bus-shift > 0
- */
- bool is_hw_big_endian;
-
- /* left shift of register number to get register address offsett.
- * Only 0,1,2 allowed for 8,16 or 32 bit bus width respectively
- */
- unsigned int bus_shift;
-};
diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
index ffde195e12b7..ea51b1cdca5a 100644
--- a/include/linux/mfd/ezx-pcap.h
+++ b/include/linux/mfd/ezx-pcap.h
@@ -31,7 +31,6 @@ int ezx_pcap_set_bits(struct pcap_chip *, u8, u32, u32);
int pcap_to_irq(struct pcap_chip *, int);
int irq_to_pcap(struct pcap_chip *, int);
int pcap_adc_async(struct pcap_chip *, u8, u32, u8[], void *, void *);
-int pcap_adc_sync(struct pcap_chip *, u8, u32, u8[], u16[]);
void pcap_set_ts_bits(struct pcap_chip *, u32);
#define PCAP_SECOND_PORT 1
diff --git a/include/linux/mfd/idt8a340_reg.h b/include/linux/mfd/idt8a340_reg.h
index 0c706085c205..53a222605526 100644
--- a/include/linux/mfd/idt8a340_reg.h
+++ b/include/linux/mfd/idt8a340_reg.h
@@ -61,7 +61,7 @@
#define HW_Q8_CTRL_SPARE (0xa7d4)
#define HW_Q11_CTRL_SPARE (0xa7ec)
-/**
+/*
* Select FOD5 as sync_trigger for Q8 divider.
* Transition from logic zero to one
* sets trigger to sync Q8 divider.
@@ -70,7 +70,7 @@
*/
#define Q9_TO_Q8_SYNC_TRIG BIT(1)
-/**
+/*
* Enable FOD5 as driver for clock and sync for Q8 divider.
* Enable fanout buffer for FOD5.
*
@@ -78,7 +78,7 @@
*/
#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
-/**
+/*
* Select FOD6 as sync_trigger for Q11 divider.
* Transition from logic zero to one
* sets trigger to sync Q11 divider.
@@ -87,7 +87,7 @@
*/
#define Q10_TO_Q11_SYNC_TRIG BIT(1)
-/**
+/*
* Enable FOD6 as driver for clock and sync for Q11 divider.
* Enable fanout buffer for FOD6.
*
diff --git a/include/linux/mfd/lm3533.h b/include/linux/mfd/lm3533.h
index 77092f6363ad..69059a7a2ce5 100644
--- a/include/linux/mfd/lm3533.h
+++ b/include/linux/mfd/lm3533.h
@@ -16,6 +16,7 @@
DEVICE_ATTR(_name, S_IRUGO | S_IWUSR , show_##_name, store_##_name)
struct device;
+struct gpio_desc;
struct regmap;
struct lm3533 {
@@ -23,7 +24,7 @@ struct lm3533 {
struct regmap *regmap;
- int gpio_hwen;
+ struct gpio_desc *hwen;
int irq;
unsigned have_als:1;
@@ -69,8 +70,6 @@ enum lm3533_boost_ovp {
};
struct lm3533_platform_data {
- int gpio_hwen;
-
enum lm3533_boost_ovp boost_ovp;
enum lm3533_boost_freq boost_freq;
diff --git a/include/linux/mfd/loongson-se.h b/include/linux/mfd/loongson-se.h
new file mode 100644
index 000000000000..07afa0c2524d
--- /dev/null
+++ b/include/linux/mfd/loongson-se.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2025 Loongson Technology Corporation Limited */
+
+#ifndef __MFD_LOONGSON_SE_H__
+#define __MFD_LOONGSON_SE_H__
+
+#define LOONGSON_ENGINE_CMD_TIMEOUT_US 10000
+#define SE_SEND_CMD_REG 0x0
+#define SE_SEND_CMD_REG_LEN 0x8
+/* Controller command ID */
+#define SE_CMD_START 0x0
+#define SE_CMD_SET_DMA 0x3
+#define SE_CMD_SET_ENGINE_CMDBUF 0x4
+
+#define SE_S2LINT_STAT 0x88
+#define SE_S2LINT_EN 0x8c
+#define SE_S2LINT_CL 0x94
+#define SE_L2SINT_STAT 0x98
+#define SE_L2SINT_SET 0xa0
+
+#define SE_INT_ALL 0xffffffff
+#define SE_INT_CONTROLLER BIT(0)
+
+#define SE_ENGINE_MAX 16
+#define SE_ENGINE_RNG 1
+#define SE_CMD_RNG 0x100
+
+#define SE_ENGINE_TPM 5
+#define SE_CMD_TPM 0x500
+
+#define SE_ENGINE_CMD_SIZE 32
+
+struct loongson_se_engine {
+ struct loongson_se *se;
+ int id;
+
+ /* Command buffer */
+ void *command;
+ void *command_ret;
+
+ void *data_buffer;
+ uint buffer_size;
+ /* Data buffer offset to DMA base */
+ uint buffer_off;
+
+ struct completion completion;
+
+};
+
+struct loongson_se_engine *loongson_se_init_engine(struct device *dev, int id);
+int loongson_se_send_engine_cmd(struct loongson_se_engine *engine);
+
+#endif
diff --git a/include/linux/mfd/lp3943.h b/include/linux/mfd/lp3943.h
index 020a339f96e8..402f01078fcc 100644
--- a/include/linux/mfd/lp3943.h
+++ b/include/linux/mfd/lp3943.h
@@ -11,7 +11,6 @@
#define __MFD_LP3943_H__
#include <linux/gpio.h>
-#include <linux/pwm.h>
#include <linux/regmap.h>
/* Registers */
diff --git a/include/linux/mfd/macsmc.h b/include/linux/mfd/macsmc.h
new file mode 100644
index 000000000000..cc09ecce0df7
--- /dev/null
+++ b/include/linux/mfd/macsmc.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple SMC (System Management Controller) core definitions
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#ifndef _LINUX_MFD_MACSMC_H
+#define _LINUX_MFD_MACSMC_H
+
+#include <linux/soc/apple/rtkit.h>
+
+/**
+ * typedef smc_key - Alias for u32 to be used for SMC keys
+ *
+ * SMC keys are 32bit integers containing packed ASCII characters in natural
+ * integer order, i.e. 0xAABBCCDD, which represent the FourCC ABCD.
+ * The SMC driver is designed with this assumption and ensures the right
+ * endianness is used when these are stored to memory and sent to or received
+ * from the actual SMC firmware (which can be done in either shared memory or
+ * as 64bit mailbox message on Apple Silicon).
+ * Internally, SMC stores these keys in a table sorted lexicographically and
+ * allows resolving an index into this table to the corresponding SMC key.
+ * Thus, storing keys as u32 is very convenient as it allows to e.g. use
+ * normal comparison operators which directly map to the natural order used
+ * by SMC firmware.
+ *
+ * This simple type alias is introduced to allow easy recognition of SMC key
+ * variables and arguments.
+ */
+typedef u32 smc_key;
+
+/**
+ * SMC_KEY - Convert FourCC SMC keys in source code to smc_key
+ *
+ * This macro can be used to easily define FourCC SMC keys in source code
+ * and convert these to u32 / smc_key, e.g. SMC_KEY(NTAP) will expand to
+ * 0x4e544150.
+ *
+ * @s: FourCC SMC key to be converted
+ */
+#define SMC_KEY(s) (smc_key)(_SMC_KEY(#s))
+#define _SMC_KEY(s) (((s)[0] << 24) | ((s)[1] << 16) | ((s)[2] << 8) | (s)[3])
+#define __SMC_KEY(a, b, c, d) (((u32)(a) << 24) | ((u32)(b) << 16) | ((u32)(c) << 8) | ((u32)(d)))
+
+#define APPLE_SMC_READABLE BIT(7)
+#define APPLE_SMC_WRITABLE BIT(6)
+#define APPLE_SMC_FUNCTION BIT(4)
+
+/**
+ * struct apple_smc_key_info - Information for a SMC key as returned by SMC
+ * @type_code: FourCC code indicating the type for this key.
+ * Known types:
+ * ch8*: ASCII string
+ * flag: Boolean, 1 or 0
+ * flt: 32-bit single-precision IEEE 754 float
+ * hex: Binary data
+ * ioft: 64bit Unsigned fixed-point intger (48.16)
+ * {si,ui}{8,16,32,64}: Signed/Unsigned 8-/16-/32-/64-bit integer
+ * @size: Size of the buffer associated with this key
+ * @flags: Bitfield encoding flags (APPLE_SMC_{READABLE,WRITABLE,FUNCTION})
+ */
+struct apple_smc_key_info {
+ u32 type_code;
+ u8 size;
+ u8 flags;
+};
+
+/**
+ * enum apple_smc_boot_stage - SMC boot stage
+ * @APPLE_SMC_BOOTING: SMC is booting
+ * @APPLE_SMC_INITIALIZED: SMC is initialized and ready to use
+ * @APPLE_SMC_ERROR_NO_SHMEM: Shared memory could not be initialized during boot
+ * @APPLE_SMC_ERROR_CRASHED: SMC has crashed
+ */
+enum apple_smc_boot_stage {
+ APPLE_SMC_BOOTING,
+ APPLE_SMC_INITIALIZED,
+ APPLE_SMC_ERROR_NO_SHMEM,
+ APPLE_SMC_ERROR_CRASHED
+};
+
+/**
+ * struct apple_smc
+ * @dev: Underlying device struct for the physical backend device
+ * @key_count: Number of available SMC keys
+ * @first_key: First valid SMC key
+ * @last_key: Last valid SMC key
+ * @event_handlers: Notifier call chain for events received from SMC
+ * @rtk: Pointer to Apple RTKit instance
+ * @init_done: Completion for initialization
+ * @boot_stage: Current boot stage of SMC
+ * @sram: Pointer to SRAM resource
+ * @sram_base: SRAM base address
+ * @shmem: RTKit shared memory structure for SRAM
+ * @msg_id: Current message id for commands, will be incremented for each command
+ * @atomic_mode: Flag set when atomic mode is entered
+ * @atomic_pending: Flag indicating pending atomic command
+ * @cmd_done: Completion for command execution in non-atomic mode
+ * @cmd_ret: Return value from SMC for last command
+ * @mutex: Mutex for non-atomic mode
+ * @lock: Spinlock for atomic mode
+ */
+struct apple_smc {
+ struct device *dev;
+
+ u32 key_count;
+ smc_key first_key;
+ smc_key last_key;
+
+ struct blocking_notifier_head event_handlers;
+
+ struct apple_rtkit *rtk;
+
+ struct completion init_done;
+ enum apple_smc_boot_stage boot_stage;
+
+ struct resource *sram;
+ void __iomem *sram_base;
+ struct apple_rtkit_shmem shmem;
+
+ unsigned int msg_id;
+
+ bool atomic_mode;
+ bool atomic_pending;
+ struct completion cmd_done;
+ u64 cmd_ret;
+
+ struct mutex mutex;
+ spinlock_t lock;
+};
+
+/**
+ * apple_smc_read - Read size bytes from given SMC key into buf
+ * @smc: Pointer to apple_smc struct
+ * @key: smc_key to be read
+ * @buf: Buffer into which size bytes of data will be read from SMC
+ * @size: Number of bytes to be read into buf
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_read(struct apple_smc *smc, smc_key key, void *buf, size_t size);
+
+/**
+ * apple_smc_write - Write size bytes into given SMC key from buf
+ * @smc: Pointer to apple_smc struct
+ * @key: smc_key data will be written to
+ * @buf: Buffer from which size bytes of data will be written to SMC
+ * @size: Number of bytes to be written
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_write(struct apple_smc *smc, smc_key key, const void *buf, size_t size);
+
+/**
+ * apple_smc_enter_atomic - Enter atomic mode to be able to use apple_smc_write_atomic
+ * @smc: Pointer to apple_smc struct
+ *
+ * This function switches the SMC backend to atomic mode which allows the
+ * use of apple_smc_write_atomic while disabling *all* other functions.
+ * This is only used for shutdown/reboot which requires writing to a SMC
+ * key from atomic context.
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_enter_atomic(struct apple_smc *smc);
+
+/**
+ * apple_smc_write_atomic - Write size bytes into given SMC key from buf without sleeping
+ * @smc: Pointer to apple_smc struct
+ * @key: smc_key data will be written to
+ * @buf: Buffer from which size bytes of data will be written to SMC
+ * @size: Number of bytes to be written
+ *
+ * Note that this function will fail if apple_smc_enter_atomic hasn't been
+ * called before.
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_write_atomic(struct apple_smc *smc, smc_key key, const void *buf, size_t size);
+
+/**
+ * apple_smc_rw - Write and then read using the given SMC key
+ * @smc: Pointer to apple_smc struct
+ * @key: smc_key data will be written to
+ * @wbuf: Buffer from which size bytes of data will be written to SMC
+ * @wsize: Number of bytes to be written
+ * @rbuf: Buffer to which size bytes of data will be read from SMC
+ * @rsize: Number of bytes to be read
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_rw(struct apple_smc *smc, smc_key key, const void *wbuf, size_t wsize,
+ void *rbuf, size_t rsize);
+
+/**
+ * apple_smc_get_key_by_index - Given an index return the corresponding SMC key
+ * @smc: Pointer to apple_smc struct
+ * @index: Index to be resolved
+ * @key: Buffer for SMC key to be returned
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_get_key_by_index(struct apple_smc *smc, int index, smc_key *key);
+
+/**
+ * apple_smc_get_key_info - Get key information from SMC
+ * @smc: Pointer to apple_smc struct
+ * @key: Key to acquire information for
+ * @info: Pointer to struct apple_smc_key_info which will be filled
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_get_key_info(struct apple_smc *smc, smc_key key, struct apple_smc_key_info *info);
+
+/**
+ * apple_smc_key_exists - Check if the given SMC key exists
+ * @smc: Pointer to apple_smc struct
+ * @key: smc_key to be checked
+ *
+ * Return: True if the key exists, false otherwise
+ */
+static inline bool apple_smc_key_exists(struct apple_smc *smc, smc_key key)
+{
+ return apple_smc_get_key_info(smc, key, NULL) >= 0;
+}
+
+#define APPLE_SMC_TYPE_OPS(type) \
+ static inline int apple_smc_read_##type(struct apple_smc *smc, smc_key key, type *p) \
+ { \
+ int ret = apple_smc_read(smc, key, p, sizeof(*p)); \
+ return (ret < 0) ? ret : ((ret != sizeof(*p)) ? -EINVAL : 0); \
+ } \
+ static inline int apple_smc_write_##type(struct apple_smc *smc, smc_key key, type p) \
+ { \
+ return apple_smc_write(smc, key, &p, sizeof(p)); \
+ } \
+ static inline int apple_smc_write_##type##_atomic(struct apple_smc *smc, smc_key key, type p) \
+ { \
+ return apple_smc_write_atomic(smc, key, &p, sizeof(p)); \
+ } \
+ static inline int apple_smc_rw_##type(struct apple_smc *smc, smc_key key, \
+ type w, type *r) \
+ { \
+ int ret = apple_smc_rw(smc, key, &w, sizeof(w), r, sizeof(*r)); \
+ return (ret < 0) ? ret : ((ret != sizeof(*r)) ? -EINVAL : 0); \
+ }
+
+APPLE_SMC_TYPE_OPS(u64)
+APPLE_SMC_TYPE_OPS(u32)
+APPLE_SMC_TYPE_OPS(u16)
+APPLE_SMC_TYPE_OPS(u8)
+APPLE_SMC_TYPE_OPS(s64)
+APPLE_SMC_TYPE_OPS(s32)
+APPLE_SMC_TYPE_OPS(s16)
+APPLE_SMC_TYPE_OPS(s8)
+
+static inline int apple_smc_read_flag(struct apple_smc *smc, smc_key key, bool *flag)
+{
+ u8 val;
+ int ret = apple_smc_read_u8(smc, key, &val);
+
+ if (ret < 0)
+ return ret;
+
+ *flag = val ? true : false;
+ return ret;
+}
+
+static inline int apple_smc_write_flag(struct apple_smc *smc, smc_key key, bool state)
+{
+ return apple_smc_write_u8(smc, key, state ? 1 : 0);
+}
+
+static inline int apple_smc_write_flag_atomic(struct apple_smc *smc, smc_key key, bool state)
+{
+ return apple_smc_write_u8_atomic(smc, key, state ? 1 : 0);
+}
+
+#endif
diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h
index 32e3470708ed..7e84738cbb20 100644
--- a/include/linux/mfd/madera/pdata.h
+++ b/include/linux/mfd/madera/pdata.h
@@ -8,10 +8,11 @@
#ifndef MADERA_PDATA_H
#define MADERA_PDATA_H
-#include <linux/kernel.h>
#include <linux/regulator/arizona-ldo1.h>
#include <linux/regulator/arizona-micsupp.h>
#include <linux/regulator/machine.h>
+#include <linux/types.h>
+
#include <sound/madera-pdata.h>
#define MADERA_MAX_MICBIAS 4
diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h
index a21374f8ad26..dd51a37fa37f 100644
--- a/include/linux/mfd/max14577-private.h
+++ b/include/linux/mfd/max14577-private.h
@@ -2,7 +2,7 @@
/*
* max14577-private.h - Common API for the Maxim 14577/77836 internal sub chip
*
- * Copyright (C) 2014 Samsung Electrnoics
+ * Copyright (C) 2014 Samsung Electronics
* Chanwoo Choi <cw00.choi@samsung.com>
* Krzysztof Kozlowski <krzk@kernel.org>
*/
diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h
index 8b3ef891ba42..0fda5c2e745a 100644
--- a/include/linux/mfd/max14577.h
+++ b/include/linux/mfd/max14577.h
@@ -2,7 +2,7 @@
/*
* max14577.h - Driver for the Maxim 14577/77836
*
- * Copyright (C) 2014 Samsung Electrnoics
+ * Copyright (C) 2014 Samsung Electronics
* Chanwoo Choi <cw00.choi@samsung.com>
* Krzysztof Kozlowski <krzk@kernel.org>
*
diff --git a/include/linux/mfd/max5970.h b/include/linux/mfd/max5970.h
index 762a7d40c843..fc50e89edfaa 100644
--- a/include/linux/mfd/max5970.h
+++ b/include/linux/mfd/max5970.h
@@ -16,18 +16,6 @@
#define MAX5978_NUM_SWITCHES 1
#define MAX5970_NUM_LEDS 4
-struct max5970_data {
- int num_switches;
- u32 irng[MAX5970_NUM_SWITCHES];
- u32 mon_rng[MAX5970_NUM_SWITCHES];
- u32 shunt_micro_ohms[MAX5970_NUM_SWITCHES];
-};
-
-enum max5970_chip_type {
- TYPE_MAX5978 = 1,
- TYPE_MAX5970,
-};
-
#define MAX5970_REG_CURRENT_L(ch) (0x01 + (ch) * 4)
#define MAX5970_REG_CURRENT_H(ch) (0x00 + (ch) * 4)
#define MAX5970_REG_VOLTAGE_L(ch) (0x03 + (ch) * 4)
diff --git a/include/linux/mfd/max7360.h b/include/linux/mfd/max7360.h
new file mode 100644
index 000000000000..44cf2bf651a2
--- /dev/null
+++ b/include/linux/mfd/max7360.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __LINUX_MFD_MAX7360_H
+#define __LINUX_MFD_MAX7360_H
+
+#include <linux/bits.h>
+
+#define MAX7360_MAX_KEY_ROWS 8
+#define MAX7360_MAX_KEY_COLS 8
+#define MAX7360_MAX_KEY_NUM (MAX7360_MAX_KEY_ROWS * MAX7360_MAX_KEY_COLS)
+#define MAX7360_ROW_SHIFT 3
+
+#define MAX7360_MAX_GPIO 8
+#define MAX7360_MAX_GPO 6
+#define MAX7360_PORT_PWM_COUNT 8
+#define MAX7360_PORT_RTR_PIN (MAX7360_PORT_PWM_COUNT - 1)
+
+/*
+ * MAX7360 registers
+ */
+#define MAX7360_REG_KEYFIFO 0x00
+#define MAX7360_REG_CONFIG 0x01
+#define MAX7360_REG_DEBOUNCE 0x02
+#define MAX7360_REG_INTERRUPT 0x03
+#define MAX7360_REG_PORTS 0x04
+#define MAX7360_REG_KEYREP 0x05
+#define MAX7360_REG_SLEEP 0x06
+
+/*
+ * MAX7360 GPIO registers
+ *
+ * All these registers are reset together when writing bit 3 of
+ * MAX7360_REG_GPIOCFG.
+ */
+#define MAX7360_REG_GPIOCFG 0x40
+#define MAX7360_REG_GPIOCTRL 0x41
+#define MAX7360_REG_GPIODEB 0x42
+#define MAX7360_REG_GPIOCURR 0x43
+#define MAX7360_REG_GPIOOUTM 0x44
+#define MAX7360_REG_PWMCOM 0x45
+#define MAX7360_REG_RTRCFG 0x46
+#define MAX7360_REG_I2C_TIMEOUT 0x48
+#define MAX7360_REG_GPIOIN 0x49
+#define MAX7360_REG_RTR_CNT 0x4A
+#define MAX7360_REG_PWMBASE 0x50
+#define MAX7360_REG_PWMCFGBASE 0x58
+
+#define MAX7360_REG_GPIO_LAST 0x5F
+
+#define MAX7360_REG_PWM(x) (MAX7360_REG_PWMBASE + (x))
+#define MAX7360_REG_PWMCFG(x) (MAX7360_REG_PWMCFGBASE + (x))
+
+/*
+ * Configuration register bits
+ */
+#define MAX7360_FIFO_EMPTY 0x3F
+#define MAX7360_FIFO_OVERFLOW 0x7F
+#define MAX7360_FIFO_RELEASE BIT(6)
+#define MAX7360_FIFO_COL GENMASK(5, 3)
+#define MAX7360_FIFO_ROW GENMASK(2, 0)
+
+#define MAX7360_CFG_SLEEP BIT(7)
+#define MAX7360_CFG_INTERRUPT BIT(5)
+#define MAX7360_CFG_KEY_RELEASE BIT(3)
+#define MAX7360_CFG_WAKEUP BIT(1)
+#define MAX7360_CFG_TIMEOUT BIT(0)
+
+#define MAX7360_DEBOUNCE GENMASK(4, 0)
+#define MAX7360_DEBOUNCE_MIN 9
+#define MAX7360_DEBOUNCE_MAX 40
+#define MAX7360_PORTS GENMASK(8, 5)
+
+#define MAX7360_INTERRUPT_TIME_MASK GENMASK(4, 0)
+#define MAX7360_INTERRUPT_FIFO_MASK GENMASK(7, 5)
+
+#define MAX7360_PORT_CFG_INTERRUPT_MASK BIT(7)
+#define MAX7360_PORT_CFG_INTERRUPT_EDGES BIT(6)
+#define MAX7360_PORT_CFG_COMMON_PWM BIT(5)
+
+/*
+ * Autosleep register values
+ */
+#define MAX7360_AUTOSLEEP_8192MS 0x01
+#define MAX7360_AUTOSLEEP_4096MS 0x02
+#define MAX7360_AUTOSLEEP_2048MS 0x03
+#define MAX7360_AUTOSLEEP_1024MS 0x04
+#define MAX7360_AUTOSLEEP_512MS 0x05
+#define MAX7360_AUTOSLEEP_256MS 0x06
+
+#define MAX7360_GPIO_CFG_RTR_EN BIT(7)
+#define MAX7360_GPIO_CFG_GPIO_EN BIT(4)
+#define MAX7360_GPIO_CFG_GPIO_RST BIT(3)
+
+#define MAX7360_ROT_DEBOUNCE GENMASK(3, 0)
+#define MAX7360_ROT_DEBOUNCE_MIN 0
+#define MAX7360_ROT_DEBOUNCE_MAX 15
+#define MAX7360_ROT_INTCNT GENMASK(6, 4)
+#define MAX7360_ROT_INTCNT_DLY BIT(7)
+
+#define MAX7360_INT_INTI 0
+#define MAX7360_INT_INTK 1
+
+#define MAX7360_INT_GPIO 0
+#define MAX7360_INT_KEYPAD 1
+#define MAX7360_INT_ROTARY 2
+
+#define MAX7360_NR_INTERNAL_IRQS 3
+
+#endif
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index ea635d12a741..e6b8b4014dc0 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -2,7 +2,7 @@
/*
* max77686-private.h - Voltage regulator driver for the Maxim 77686/802
*
- * Copyright (C) 2012 Samsung Electrnoics
+ * Copyright (C) 2012 Samsung Electronics
* Chiwoong Byun <woong.byun@samsung.com>
*/
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index d0fb510875e6..7c4624acd1db 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -2,7 +2,7 @@
/*
* max77686.h - Driver for the Maxim 77686/802
*
- * Copyright (C) 2012 Samsung Electrnoics
+ * Copyright (C) 2012 Samsung Electronics
* Chiwoong Byun <woong.byun@samsung.com>
*
* This driver is based on max8997.h
diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h
index a5bce099f1ed..ec2e1b2dceb8 100644
--- a/include/linux/mfd/max77693-common.h
+++ b/include/linux/mfd/max77693-common.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * Common data shared between Maxim 77693 and 77843 drivers
+ * Common data shared between Maxim 77693, 77705 and 77843 drivers
*
* Copyright (C) 2015 Samsung Electronics
*/
@@ -11,6 +11,7 @@
enum max77693_types {
TYPE_MAX77693_UNKNOWN,
TYPE_MAX77693,
+ TYPE_MAX77705,
TYPE_MAX77843,
TYPE_MAX77693_NUM,
@@ -32,6 +33,7 @@ struct max77693_dev {
struct regmap *regmap_muic;
struct regmap *regmap_haptic; /* Only MAX77693 */
struct regmap *regmap_chg; /* Only MAX77843 */
+ struct regmap *regmap_leds; /* Only MAX77705 */
struct regmap_irq_chip_data *irq_data_led;
struct regmap_irq_chip_data *irq_data_topsys;
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 54444ff2a5de..8e7c35b5ea1c 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -2,7 +2,7 @@
/*
* max77693-private.h - Voltage regulator driver for the Maxim 77693
*
- * Copyright (C) 2012 Samsung Electrnoics
+ * Copyright (C) 2012 Samsung Electronics
* SangYoung Son <hello.son@samsung.com>
*
* This program is not provided / owned by Maxim Integrated Products.
@@ -217,6 +217,10 @@ enum max77693_charger_battery_state {
#define CHG_CNFG_01_CHGRSTRT_MASK (0x3 << CHG_CNFG_01_CHGRSTRT_SHIFT)
#define CHG_CNFG_01_PQEN_MAKS BIT(CHG_CNFG_01_PQEN_SHIFT)
+/* MAX77693_CHG_REG_CHG_CNFG_02 register */
+#define CHG_CNFG_02_CC_SHIFT 0
+#define CHG_CNFG_02_CC_MASK 0x3F
+
/* MAX77693_CHG_REG_CHG_CNFG_03 register */
#define CHG_CNFG_03_TOITH_SHIFT 0
#define CHG_CNFG_03_TOTIME_SHIFT 3
@@ -244,6 +248,7 @@ enum max77693_charger_battery_state {
#define CHG_CNFG_12_VCHGINREG_MASK (0x3 << CHG_CNFG_12_VCHGINREG_SHIFT)
/* MAX77693 CHG_CNFG_09 Register */
+#define CHG_CNFG_09_CHGIN_ILIM_SHIFT 0
#define CHG_CNFG_09_CHGIN_ILIM_MASK 0x7F
/* MAX77693 CHG_CTRL Register */
@@ -414,17 +419,6 @@ enum max77693_haptic_reg {
#define MAX77693_CONFIG2_MEN 6
#define MAX77693_CONFIG2_HTYP 5
-enum max77693_irq_source {
- LED_INT = 0,
- TOPSYS_INT,
- CHG_INT,
- MUIC_INT1,
- MUIC_INT2,
- MUIC_INT3,
-
- MAX77693_IRQ_GROUP_NR,
-};
-
#define SRC_IRQ_CHARGER BIT(0)
#define SRC_IRQ_TOP BIT(1)
#define SRC_IRQ_FLASH BIT(2)
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index c67c16ba8649..8e77ebeb7cf1 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -2,7 +2,7 @@
/*
* max77693.h - Driver for the Maxim 77693
*
- * Copyright (C) 2012 Samsung Electrnoics
+ * Copyright (C) 2012 Samsung Electronics
* SangYoung Son <hello.son@samsung.com>
*
* This program is not provided / owned by Maxim Integrated Products.
diff --git a/include/linux/mfd/max77705-private.h b/include/linux/mfd/max77705-private.h
new file mode 100644
index 000000000000..214de7feeb8c
--- /dev/null
+++ b/include/linux/mfd/max77705-private.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Maxim MAX77705 definitions.
+ *
+ * Copyright (C) 2015 Samsung Electronics, Inc.
+ * Copyright (C) 2025 Dzmitry Sankouski <dsankouski@gmail.com>
+ */
+
+#ifndef __LINUX_MFD_MAX77705_PRIV_H
+#define __LINUX_MFD_MAX77705_PRIV_H
+
+#define MAX77705_SRC_IRQ_CHG BIT(0)
+#define MAX77705_SRC_IRQ_TOP BIT(1)
+#define MAX77705_SRC_IRQ_FG BIT(2)
+#define MAX77705_SRC_IRQ_USBC BIT(3)
+#define MAX77705_SRC_IRQ_ALL (MAX77705_SRC_IRQ_CHG | MAX77705_SRC_IRQ_TOP | \
+ MAX77705_SRC_IRQ_FG | MAX77705_SRC_IRQ_USBC)
+
+/* MAX77705_PMIC_REG_PMICREV register */
+#define MAX77705_VERSION_SHIFT 3
+#define MAX77705_REVISION_MASK GENMASK(2, 0)
+#define MAX77705_VERSION_MASK GENMASK(7, MAX77705_VERSION_SHIFT)
+/* MAX77705_PMIC_REG_MAINCTRL1 register */
+#define MAX77705_MAINCTRL1_BIASEN_SHIFT 7
+#define MAX77705_MAINCTRL1_BIASEN_MASK BIT(MAX77705_MAINCTRL1_BIASEN_SHIFT)
+/* MAX77705_PMIC_REG_MCONFIG2 (haptics) register */
+#define MAX77705_CONFIG2_MEN_SHIFT 6
+#define MAX77705_CONFIG2_MODE_SHIFT 7
+#define MAX77705_CONFIG2_HTYP_SHIFT 5
+/* MAX77705_PMIC_REG_SYSTEM_INT_MASK register */
+#define MAX77705_SYSTEM_IRQ_BSTEN_INT BIT(3)
+#define MAX77705_SYSTEM_IRQ_SYSUVLO_INT BIT(4)
+#define MAX77705_SYSTEM_IRQ_SYSOVLO_INT BIT(5)
+#define MAX77705_SYSTEM_IRQ_TSHDN_INT BIT(6)
+#define MAX77705_SYSTEM_IRQ_TM_INT BIT(7)
+/* MAX77705_RGBLED_REG_LEDEN register */
+#define MAX77705_RGBLED_EN_WIDTH 2
+/* MAX77705_RGBLED_REG_LEDBLNK register */
+#define MAX77705_RGB_DELAY_100_STEP_LIM 500
+#define MAX77705_RGB_DELAY_100_STEP_COUNT 4
+#define MAX77705_RGB_DELAY_100_STEP 100
+#define MAX77705_RGB_DELAY_250_STEP_LIM 3250
+#define MAX77705_RGB_DELAY_250_STEP 250
+#define MAX77705_RGB_DELAY_500_STEP 500
+#define MAX77705_RGB_DELAY_500_STEP_COUNT 10
+#define MAX77705_RGB_DELAY_500_STEP_LIM 5000
+#define MAX77705_RGB_DELAY_1000_STEP_LIM 8000
+#define MAX77705_RGB_DELAY_1000_STEP_COUNT 13
+#define MAX77705_RGB_DELAY_1000_STEP 1000
+#define MAX77705_RGB_DELAY_2000_STEP 2000
+#define MAX77705_RGB_DELAY_2000_STEP_COUNT 13
+#define MAX77705_RGB_DELAY_2000_STEP_LIM 12000
+
+enum max77705_hw_rev {
+ MAX77705_PASS1 = 1,
+ MAX77705_PASS2,
+ MAX77705_PASS3
+};
+
+enum max77705_reg {
+ MAX77705_PMIC_REG_PMICID1 = 0x00,
+ MAX77705_PMIC_REG_PMICREV = 0x01,
+ MAX77705_PMIC_REG_MAINCTRL1 = 0x02,
+ MAX77705_PMIC_REG_BSTOUT_MASK = 0x03,
+ MAX77705_PMIC_REG_FORCE_EN_MASK = 0x08,
+ MAX77705_PMIC_REG_MCONFIG = 0x10,
+ MAX77705_PMIC_REG_MCONFIG2 = 0x11,
+ MAX77705_PMIC_REG_INTSRC = 0x22,
+ MAX77705_PMIC_REG_INTSRC_MASK = 0x23,
+ MAX77705_PMIC_REG_SYSTEM_INT = 0x24,
+ MAX77705_PMIC_REG_RESERVED_25 = 0x25,
+ MAX77705_PMIC_REG_SYSTEM_INT_MASK = 0x26,
+ MAX77705_PMIC_REG_RESERVED_27 = 0x27,
+ MAX77705_PMIC_REG_RESERVED_28 = 0x28,
+ MAX77705_PMIC_REG_RESERVED_29 = 0x29,
+ MAX77705_PMIC_REG_BOOSTCONTROL1 = 0x4C,
+ MAX77705_PMIC_REG_BOOSTCONTROL2 = 0x4F,
+ MAX77705_PMIC_REG_SW_RESET = 0x50,
+ MAX77705_PMIC_REG_USBC_RESET = 0x51,
+
+ MAX77705_PMIC_REG_END
+};
+
+enum max77705_chg_reg {
+ MAX77705_CHG_REG_BASE = 0xB0,
+ MAX77705_CHG_REG_INT = 0,
+ MAX77705_CHG_REG_INT_MASK,
+ MAX77705_CHG_REG_INT_OK,
+ MAX77705_CHG_REG_DETAILS_00,
+ MAX77705_CHG_REG_DETAILS_01,
+ MAX77705_CHG_REG_DETAILS_02,
+ MAX77705_CHG_REG_DTLS_03,
+ MAX77705_CHG_REG_CNFG_00,
+ MAX77705_CHG_REG_CNFG_01,
+ MAX77705_CHG_REG_CNFG_02,
+ MAX77705_CHG_REG_CNFG_03,
+ MAX77705_CHG_REG_CNFG_04,
+ MAX77705_CHG_REG_CNFG_05,
+ MAX77705_CHG_REG_CNFG_06,
+ MAX77705_CHG_REG_CNFG_07,
+ MAX77705_CHG_REG_CNFG_08,
+ MAX77705_CHG_REG_CNFG_09,
+ MAX77705_CHG_REG_CNFG_10,
+ MAX77705_CHG_REG_CNFG_11,
+
+ MAX77705_CHG_REG_CNFG_12,
+ MAX77705_CHG_REG_CNFG_13,
+ MAX77705_CHG_REG_CNFG_14,
+ MAX77705_CHG_REG_SAFEOUT_CTRL
+};
+
+enum max77705_fuelgauge_reg {
+ STATUS_REG = 0x00,
+ VALRT_THRESHOLD_REG = 0x01,
+ TALRT_THRESHOLD_REG = 0x02,
+ SALRT_THRESHOLD_REG = 0x03,
+ REMCAP_REP_REG = 0x05,
+ SOCREP_REG = 0x06,
+ TEMPERATURE_REG = 0x08,
+ VCELL_REG = 0x09,
+ TIME_TO_EMPTY_REG = 0x11,
+ FULLSOCTHR_REG = 0x13,
+ CURRENT_REG = 0x0A,
+ AVG_CURRENT_REG = 0x0B,
+ SOCMIX_REG = 0x0D,
+ SOCAV_REG = 0x0E,
+ REMCAP_MIX_REG = 0x0F,
+ FULLCAP_REG = 0x10,
+ RFAST_REG = 0x15,
+ AVR_TEMPERATURE_REG = 0x16,
+ CYCLES_REG = 0x17,
+ DESIGNCAP_REG = 0x18,
+ AVR_VCELL_REG = 0x19,
+ TIME_TO_FULL_REG = 0x20,
+ CONFIG_REG = 0x1D,
+ ICHGTERM_REG = 0x1E,
+ REMCAP_AV_REG = 0x1F,
+ FULLCAP_NOM_REG = 0x23,
+ LEARN_CFG_REG = 0x28,
+ FILTER_CFG_REG = 0x29,
+ MISCCFG_REG = 0x2B,
+ QRTABLE20_REG = 0x32,
+ FULLCAP_REP_REG = 0x35,
+ RCOMP_REG = 0x38,
+ VEMPTY_REG = 0x3A,
+ FSTAT_REG = 0x3D,
+ DISCHARGE_THRESHOLD_REG = 0x40,
+ QRTABLE30_REG = 0x42,
+ ISYS_REG = 0x43,
+ DQACC_REG = 0x45,
+ DPACC_REG = 0x46,
+ AVGISYS_REG = 0x4B,
+ QH_REG = 0x4D,
+ VSYS_REG = 0xB1,
+ TALRTTH2_REG = 0xB2,
+ VBYP_REG = 0xB3,
+ CONFIG2_REG = 0xBB,
+ IIN_REG = 0xD0,
+ OCV_REG = 0xEE,
+ VFOCV_REG = 0xFB,
+ VFSOC_REG = 0xFF,
+
+ MAX77705_FG_END
+};
+
+enum max77705_led_reg {
+ MAX77705_RGBLED_REG_BASE = 0x30,
+ MAX77705_RGBLED_REG_LEDEN = 0,
+ MAX77705_RGBLED_REG_LED0BRT,
+ MAX77705_RGBLED_REG_LED1BRT,
+ MAX77705_RGBLED_REG_LED2BRT,
+ MAX77705_RGBLED_REG_LED3BRT,
+ MAX77705_RGBLED_REG_LEDRMP,
+ MAX77705_RGBLED_REG_LEDBLNK,
+ MAX77705_LED_REG_END
+};
+
+enum max77705_charger_battery_state {
+ MAX77705_BATTERY_NOBAT,
+ MAX77705_BATTERY_PREQUALIFICATION,
+ MAX77705_BATTERY_DEAD,
+ MAX77705_BATTERY_GOOD,
+ MAX77705_BATTERY_LOWVOLTAGE,
+ MAX77705_BATTERY_OVERVOLTAGE,
+ MAX77705_BATTERY_RESERVED
+};
+
+enum max77705_charger_charge_type {
+ MAX77705_CHARGER_CONSTANT_CURRENT = 1,
+ MAX77705_CHARGER_CONSTANT_VOLTAGE,
+ MAX77705_CHARGER_END_OF_CHARGE,
+ MAX77705_CHARGER_DONE
+};
+
+#endif /* __LINUX_MFD_MAX77705_PRIV_H */
diff --git a/include/linux/mfd/max77759.h b/include/linux/mfd/max77759.h
new file mode 100644
index 000000000000..c6face34e385
--- /dev/null
+++ b/include/linux/mfd/max77759.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Google Inc.
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Maxim MAX77759 core driver
+ */
+
+#ifndef __LINUX_MFD_MAX77759_H
+#define __LINUX_MFD_MAX77759_H
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+#define MAX77759_PMIC_REG_PMIC_ID 0x00
+#define MAX77759_PMIC_REG_PMIC_REVISION 0x01
+#define MAX77759_PMIC_REG_OTP_REVISION 0x02
+#define MAX77759_PMIC_REG_INTSRC 0x22
+#define MAX77759_PMIC_REG_INTSRCMASK 0x23
+#define MAX77759_PMIC_REG_INTSRC_MAXQ BIT(3)
+#define MAX77759_PMIC_REG_INTSRC_TOPSYS BIT(1)
+#define MAX77759_PMIC_REG_INTSRC_CHGR BIT(0)
+#define MAX77759_PMIC_REG_TOPSYS_INT 0x24
+#define MAX77759_PMIC_REG_TOPSYS_INT_MASK 0x26
+#define MAX77759_PMIC_REG_TOPSYS_INT_TSHDN BIT(6)
+#define MAX77759_PMIC_REG_TOPSYS_INT_SYSOVLO BIT(5)
+#define MAX77759_PMIC_REG_TOPSYS_INT_SYSUVLO BIT(4)
+#define MAX77759_PMIC_REG_TOPSYS_INT_FSHIP BIT(0)
+#define MAX77759_PMIC_REG_I2C_CNFG 0x40
+#define MAX77759_PMIC_REG_SWRESET 0x50
+#define MAX77759_PMIC_REG_CONTROL_FG 0x51
+
+#define MAX77759_MAXQ_REG_UIC_INT1 0x64
+#define MAX77759_MAXQ_REG_UIC_INT1_APCMDRESI BIT(7)
+#define MAX77759_MAXQ_REG_UIC_INT1_SYSMSGI BIT(6)
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIO6I BIT(1)
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIO5I BIT(0)
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(offs, en) (((en) & 1) << (offs))
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIOxI_MASK(offs) \
+ MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(offs, ~0)
+#define MAX77759_MAXQ_REG_UIC_INT2 0x65
+#define MAX77759_MAXQ_REG_UIC_INT3 0x66
+#define MAX77759_MAXQ_REG_UIC_INT4 0x67
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS1 0x68
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS2 0x69
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS3 0x6a
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS4 0x6b
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS5 0x6c
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS6 0x6d
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS7 0x6f
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS8 0x6f
+#define MAX77759_MAXQ_REG_UIC_INT1_M 0x70
+#define MAX77759_MAXQ_REG_UIC_INT2_M 0x71
+#define MAX77759_MAXQ_REG_UIC_INT3_M 0x72
+#define MAX77759_MAXQ_REG_UIC_INT4_M 0x73
+#define MAX77759_MAXQ_REG_AP_DATAOUT0 0x81
+#define MAX77759_MAXQ_REG_AP_DATAOUT32 0xa1
+#define MAX77759_MAXQ_REG_AP_DATAIN0 0xb1
+#define MAX77759_MAXQ_REG_UIC_SWRST 0xe0
+
+#define MAX77759_CHGR_REG_CHG_INT 0xb0
+#define MAX77759_CHGR_REG_CHG_INT2 0xb1
+#define MAX77759_CHGR_REG_CHG_INT_MASK 0xb2
+#define MAX77759_CHGR_REG_CHG_INT2_MASK 0xb3
+#define MAX77759_CHGR_REG_CHG_INT_OK 0xb4
+#define MAX77759_CHGR_REG_CHG_DETAILS_00 0xb5
+#define MAX77759_CHGR_REG_CHG_DETAILS_01 0xb6
+#define MAX77759_CHGR_REG_CHG_DETAILS_02 0xb7
+#define MAX77759_CHGR_REG_CHG_DETAILS_03 0xb8
+#define MAX77759_CHGR_REG_CHG_CNFG_00 0xb9
+#define MAX77759_CHGR_REG_CHG_CNFG_01 0xba
+#define MAX77759_CHGR_REG_CHG_CNFG_02 0xbb
+#define MAX77759_CHGR_REG_CHG_CNFG_03 0xbc
+#define MAX77759_CHGR_REG_CHG_CNFG_04 0xbd
+#define MAX77759_CHGR_REG_CHG_CNFG_05 0xbe
+#define MAX77759_CHGR_REG_CHG_CNFG_06 0xbf
+#define MAX77759_CHGR_REG_CHG_CNFG_07 0xc0
+#define MAX77759_CHGR_REG_CHG_CNFG_08 0xc1
+#define MAX77759_CHGR_REG_CHG_CNFG_09 0xc2
+#define MAX77759_CHGR_REG_CHG_CNFG_10 0xc3
+#define MAX77759_CHGR_REG_CHG_CNFG_11 0xc4
+#define MAX77759_CHGR_REG_CHG_CNFG_12 0xc5
+#define MAX77759_CHGR_REG_CHG_CNFG_13 0xc6
+#define MAX77759_CHGR_REG_CHG_CNFG_14 0xc7
+#define MAX77759_CHGR_REG_CHG_CNFG_15 0xc8
+#define MAX77759_CHGR_REG_CHG_CNFG_16 0xc9
+#define MAX77759_CHGR_REG_CHG_CNFG_17 0xca
+#define MAX77759_CHGR_REG_CHG_CNFG_18 0xcb
+#define MAX77759_CHGR_REG_CHG_CNFG_19 0xcc
+
+/* MaxQ opcodes for max77759_maxq_command() */
+#define MAX77759_MAXQ_OPCODE_MAXLENGTH (MAX77759_MAXQ_REG_AP_DATAOUT32 - \
+ MAX77759_MAXQ_REG_AP_DATAOUT0 + \
+ 1)
+
+#define MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_READ 0x21
+#define MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_WRITE 0x22
+#define MAX77759_MAXQ_OPCODE_GPIO_CONTROL_READ 0x23
+#define MAX77759_MAXQ_OPCODE_GPIO_CONTROL_WRITE 0x24
+#define MAX77759_MAXQ_OPCODE_USER_SPACE_READ 0x81
+#define MAX77759_MAXQ_OPCODE_USER_SPACE_WRITE 0x82
+
+/**
+ * struct max77759 - core max77759 internal data structure
+ *
+ * @regmap_top: Regmap for accessing TOP registers
+ * @maxq_lock: Lock for serializing access to MaxQ
+ * @regmap_maxq: Regmap for accessing MaxQ registers
+ * @cmd_done: Used to signal completion of a MaxQ command
+ * @regmap_charger: Regmap for accessing charger registers
+ *
+ * The MAX77759 comprises several sub-blocks, namely TOP, MaxQ, Charger,
+ * Fuel Gauge, and TCPCI.
+ */
+struct max77759 {
+ struct regmap *regmap_top;
+
+ /* This protects MaxQ commands - only one can be active */
+ struct mutex maxq_lock;
+ struct regmap *regmap_maxq;
+ struct completion cmd_done;
+
+ struct regmap *regmap_charger;
+};
+
+/**
+ * struct max77759_maxq_command - structure containing the MaxQ command to
+ * send
+ *
+ * @length: The number of bytes to send.
+ * @cmd: The data to send.
+ */
+struct max77759_maxq_command {
+ u8 length;
+ u8 cmd[] __counted_by(length);
+};
+
+/**
+ * struct max77759_maxq_response - structure containing the MaxQ response
+ *
+ * @length: The number of bytes to receive.
+ * @rsp: The data received. Must have at least @length bytes space.
+ */
+struct max77759_maxq_response {
+ u8 length;
+ u8 rsp[] __counted_by(length);
+};
+
+/**
+ * max77759_maxq_command() - issue a MaxQ command and wait for the response
+ * and associated data
+ *
+ * @max77759: The core max77759 device handle.
+ * @cmd: The command to be sent.
+ * @rsp: Any response data associated with the command will be copied here;
+ * can be %NULL if the command has no response (other than ACK).
+ *
+ * Return: 0 on success, a negative error number otherwise.
+ */
+int max77759_maxq_command(struct max77759 *max77759,
+ const struct max77759_maxq_command *cmd,
+ struct max77759_maxq_response *rsp);
+
+#endif /* __LINUX_MFD_MAX77759_H */
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index a10cd6945232..261c0aae7d00 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -2,7 +2,7 @@
/*
* max8997-private.h - Voltage regulator driver for the Maxim 8997
*
- * Copyright (C) 2010 Samsung Electrnoics
+ * Copyright (C) 2010 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
*/
@@ -397,7 +397,6 @@ enum max8997_types {
};
extern int max8997_irq_init(struct max8997_dev *max8997);
-extern void max8997_irq_exit(struct max8997_dev *max8997);
extern int max8997_irq_resume(struct max8997_dev *max8997);
extern int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest);
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index 5c2cc1103437..fb36e1386069 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -2,7 +2,7 @@
/*
* max8997.h - Driver for the Maxim 8997/8966
*
- * Copyright (C) 2009-2010 Samsung Electrnoics
+ * Copyright (C) 2009-2010 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
*
* This driver is based on max8998.h
diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h
index 6deb5f577602..d77dc18db6eb 100644
--- a/include/linux/mfd/max8998-private.h
+++ b/include/linux/mfd/max8998-private.h
@@ -2,7 +2,7 @@
/*
* max8998-private.h - Voltage regulator driver for the Maxim 8998
*
- * Copyright (C) 2009-2010 Samsung Electrnoics
+ * Copyright (C) 2009-2010 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
*/
diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h
index a054e55c8646..5473f1983e31 100644
--- a/include/linux/mfd/max8998.h
+++ b/include/linux/mfd/max8998.h
@@ -2,7 +2,7 @@
/*
* max8998.h - Voltage regulator driver for the Maxim 8998
*
- * Copyright (C) 2009-2010 Samsung Electrnoics
+ * Copyright (C) 2009-2010 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
*/
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index f372926d5894..dd46fe424a80 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -31,12 +31,6 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx,
unsigned int mode, unsigned int channel,
u8 ato, bool atox, unsigned int *sample);
-/* Deprecated calls */
-static inline int mc13xxx_irq_ack(struct mc13xxx *mc13xxx, int irq)
-{
- return 0;
-}
-
static inline int mc13xxx_irq_request_nounmask(struct mc13xxx *mc13xxx, int irq,
irq_handler_t handler,
const char *name, void *dev)
diff --git a/include/linux/mfd/mt6328/core.h b/include/linux/mfd/mt6328/core.h
new file mode 100644
index 000000000000..9a08aed72b9f
--- /dev/null
+++ b/include/linux/mfd/mt6328/core.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#ifndef __MFD_MT6328_CORE_H__
+#define __MFD_MT6328_CORE_H__
+
+enum mt6328_irq_status_numbers {
+ MT6328_IRQ_STATUS_PWRKEY = 0,
+ MT6328_IRQ_STATUS_HOMEKEY,
+ MT6328_IRQ_STATUS_PWRKEY_R,
+ MT6328_IRQ_STATUS_HOMEKEY_R,
+ MT6328_IRQ_STATUS_THR_H,
+ MT6328_IRQ_STATUS_THR_L,
+ MT6328_IRQ_STATUS_BAT_H,
+ MT6328_IRQ_STATUS_BAT_L,
+ MT6328_IRQ_STATUS_RTC,
+ MT6328_IRQ_STATUS_AUDIO,
+ MT6328_IRQ_STATUS_ACCDET,
+ MT6328_IRQ_STATUS_ACCDET_EINT,
+ MT6328_IRQ_STATUS_ACCDET_NEGV,
+ MT6328_IRQ_STATUS_NI_LBAT_INT,
+ MT6328_IRQ_STATUS_VPROC_OC = 16,
+ MT6328_IRQ_STATUS_VSYS_OC,
+ MT6328_IRQ_STATUS_VLTE_OC,
+ MT6328_IRQ_STATUS_VCORE_OC,
+ MT6328_IRQ_STATUS_VPA_OC,
+ MT6328_IRQ_STATUS_LDO_OC,
+ MT6328_IRQ_STATUS_BAT2_H,
+ MT6328_IRQ_STATUS_BAT2_L,
+ MT6328_IRQ_STATUS_VISMPS0_H,
+ MT6328_IRQ_STATUS_VISMPS0_L,
+ MT6328_IRQ_STATUS_AUXADC_IMP,
+ MT6328_IRQ_STATUS_OV = 32,
+ MT6328_IRQ_STATUS_BVALID_DET,
+ MT6328_IRQ_STATUS_VBATON_HV,
+ MT6328_IRQ_STATUS_VBATON_UNDET,
+ MT6328_IRQ_STATUS_WATCHDOG,
+ MT6328_IRQ_STATUS_PCHR_CM_VDEC,
+ MT6328_IRQ_STATUS_CHRDET,
+ MT6328_IRQ_STATUS_PCHR_CM_VINC,
+ MT6328_IRQ_STATUS_FG_BAT_H,
+ MT6328_IRQ_STATUS_FG_BAT_L,
+ MT6328_IRQ_STATUS_FG_CUR_H,
+ MT6328_IRQ_STATUS_FG_CUR_L,
+ MT6328_IRQ_STATUS_FG_ZCV,
+ MT6328_IRQ_STATUS_SPKL_D,
+ MT6328_IRQ_STATUS_SPKL_AB,
+};
+
+#endif /* __MFD_MT6323_CORE_H__ */
diff --git a/include/linux/mfd/mt6328/registers.h b/include/linux/mfd/mt6328/registers.h
new file mode 100644
index 000000000000..8199aaea27b9
--- /dev/null
+++ b/include/linux/mfd/mt6328/registers.h
@@ -0,0 +1,822 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#ifndef __MFD_MT6328_REGISTERS_H__
+#define __MFD_MT6328_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6328_STRUP_CON0 0x0000
+#define MT6328_STRUP_CON2 0x0002
+#define MT6328_STRUP_CON3 0x0004
+#define MT6328_STRUP_CON4 0x0006
+#define MT6328_STRUP_CON5 0x0008
+#define MT6328_STRUP_CON6 0x000a
+#define MT6328_STRUP_CON7 0x000c
+#define MT6328_STRUP_CON8 0x000e
+#define MT6328_STRUP_CON9 0x0010
+#define MT6328_STRUP_CON10 0x0012
+#define MT6328_STRUP_CON11 0x0014
+#define MT6328_STRUP_CON12 0x0016
+#define MT6328_STRUP_CON13 0x0018
+#define MT6328_STRUP_CON14 0x001a
+#define MT6328_STRUP_CON15 0x001c
+#define MT6328_STRUP_CON16 0x001e
+#define MT6328_STRUP_CON17 0x0020
+#define MT6328_STRUP_CON18 0x0022
+#define MT6328_STRUP_CON19 0x0024
+#define MT6328_STRUP_CON20 0x0026
+#define MT6328_STRUP_CON21 0x0028
+#define MT6328_STRUP_CON22 0x002a
+#define MT6328_STRUP_CON23 0x002c
+#define MT6328_STRUP_CON24 0x002e
+#define MT6328_STRUP_CON25 0x0030
+#define MT6328_STRUP_CON26 0x0032
+#define MT6328_STRUP_CON27 0x0034
+#define MT6328_STRUP_CON28 0x0036
+#define MT6328_STRUP_CON29 0x0038
+#define MT6328_STRUP_CON30 0x003a
+#define MT6328_STRUP_CON31 0x003c
+#define MT6328_STRUP_CON32 0x003e
+#define MT6328_STRUP_ANA_CON0 0x0040
+#define MT6328_HWCID 0x0200
+#define MT6328_SWCID 0x0202
+#define MT6328_TOP_CON 0x0204
+#define MT6328_TEST_OUT 0x0206
+#define MT6328_TEST_CON0 0x0208
+#define MT6328_TEST_CON1 0x020a
+#define MT6328_TESTMODE_SW 0x020c
+#define MT6328_EN_STATUS0 0x020e
+#define MT6328_EN_STATUS1 0x0210
+#define MT6328_EN_STATUS2 0x0212
+#define MT6328_OCSTATUS0 0x0214
+#define MT6328_OCSTATUS1 0x0216
+#define MT6328_OCSTATUS2 0x0218
+#define MT6328_PGDEBSTATUS 0x021a
+#define MT6328_PGSTATUS 0x021c
+#define MT6328_THERMALSTATUS 0x021e
+#define MT6328_TOPSTATUS 0x0220
+#define MT6328_TDSEL_CON 0x0222
+#define MT6328_RDSEL_CON 0x0224
+#define MT6328_SMT_CON0 0x0226
+#define MT6328_SMT_CON1 0x0228
+#define MT6328_SMT_CON2 0x022a
+#define MT6328_DRV_CON0 0x022c
+#define MT6328_DRV_CON1 0x022e
+#define MT6328_DRV_CON2 0x0230
+#define MT6328_DRV_CON3 0x0232
+#define MT6328_TOP_STATUS 0x0234
+#define MT6328_TOP_STATUS_SET 0x0236
+#define MT6328_TOP_STATUS_CLR 0x0238
+#define MT6328_RGS_ANA_MON 0x023a
+#define MT6328_TOP_CKPDN_CON0 0x023c
+#define MT6328_TOP_CKPDN_CON0_SET 0x023e
+#define MT6328_TOP_CKPDN_CON0_CLR 0x0240
+#define MT6328_TOP_CKPDN_CON1 0x0242
+#define MT6328_TOP_CKPDN_CON1_SET 0x0244
+#define MT6328_TOP_CKPDN_CON1_CLR 0x0246
+#define MT6328_TOP_CKPDN_CON2 0x0248
+#define MT6328_TOP_CKPDN_CON2_SET 0x024a
+#define MT6328_TOP_CKPDN_CON2_CLR 0x024c
+#define MT6328_TOP_CKPDN_CON3 0x024e
+#define MT6328_TOP_CKPDN_CON3_SET 0x0250
+#define MT6328_TOP_CKPDN_CON3_CLR 0x0252
+#define MT6328_TOP_CKPDN_CON4 0x0254
+#define MT6328_TOP_CKPDN_CON4_SET 0x0256
+#define MT6328_TOP_CKPDN_CON4_CLR 0x0258
+#define MT6328_TOP_CKSEL_CON0 0x025a
+#define MT6328_TOP_CKSEL_CON0_SET 0x025c
+#define MT6328_TOP_CKSEL_CON0_CLR 0x025e
+#define MT6328_TOP_CKSEL_CON1 0x0260
+#define MT6328_TOP_CKSEL_CON1_SET 0x0262
+#define MT6328_TOP_CKSEL_CON1_CLR 0x0264
+#define MT6328_TOP_CKSEL_CON2 0x0266
+#define MT6328_TOP_CKSEL_CON2_SET 0x0268
+#define MT6328_TOP_CKSEL_CON2_CLR 0x026a
+#define MT6328_TOP_CKDIVSEL_CON0 0x026c
+#define MT6328_TOP_CKDIVSEL_CON0_SET 0x026e
+#define MT6328_TOP_CKDIVSEL_CON0_CLR 0x0270
+#define MT6328_TOP_CKDIVSEL_CON1 0x0272
+#define MT6328_TOP_CKDIVSEL_CON1_SET 0x0274
+#define MT6328_TOP_CKDIVSEL_CON1_CLR 0x0276
+#define MT6328_TOP_CKHWEN_CON0 0x0278
+#define MT6328_TOP_CKHWEN_CON0_SET 0x027a
+#define MT6328_TOP_CKHWEN_CON0_CLR 0x027c
+#define MT6328_TOP_CKHWEN_CON1 0x027e
+#define MT6328_TOP_CKHWEN_CON1_SET 0x0280
+#define MT6328_TOP_CKHWEN_CON1_CLR 0x0282
+#define MT6328_TOP_CKTST_CON0 0x0284
+#define MT6328_TOP_CKTST_CON1 0x0286
+#define MT6328_TOP_CKTST_CON2 0x0288
+#define MT6328_TOP_CLKSQ 0x028a
+#define MT6328_TOP_CLKSQ_SET 0x028c
+#define MT6328_TOP_CLKSQ_CLR 0x028e
+#define MT6328_TOP_CLKSQ_RTC 0x0290
+#define MT6328_TOP_CLKSQ_RTC_SET 0x0292
+#define MT6328_TOP_CLKSQ_RTC_CLR 0x0294
+#define MT6328_TOP_CLK_TRIM 0x0296
+#define MT6328_TOP_RST_CON0 0x0298
+#define MT6328_TOP_RST_CON0_SET 0x029a
+#define MT6328_TOP_RST_CON0_CLR 0x029c
+#define MT6328_TOP_RST_CON1 0x029e
+#define MT6328_TOP_RST_MISC 0x02a0
+#define MT6328_TOP_RST_MISC_SET 0x02a2
+#define MT6328_TOP_RST_MISC_CLR 0x02a4
+#define MT6328_TOP_RST_STATUS 0x02a6
+#define MT6328_TOP_RST_STATUS_SET 0x02a8
+#define MT6328_TOP_RST_STATUS_CLR 0x02aa
+#define MT6328_INT_CON0 0x02ac
+#define MT6328_INT_CON0_SET 0x02ae
+#define MT6328_INT_CON0_CLR 0x02b0
+#define MT6328_INT_CON1 0x02b2
+#define MT6328_INT_CON1_SET 0x02b4
+#define MT6328_INT_CON1_CLR 0x02b6
+#define MT6328_INT_CON2 0x02b8
+#define MT6328_INT_CON2_SET 0x02ba
+#define MT6328_INT_CON2_CLR 0x02bc
+#define MT6328_INT_MISC_CON 0x02be
+#define MT6328_INT_MISC_CON_SET 0x02c0
+#define MT6328_INT_MISC_CON_CLR 0x02c2
+#define MT6328_INT_STATUS0 0x02c4
+#define MT6328_INT_STATUS1 0x02c6
+#define MT6328_INT_STATUS2 0x02c8
+#define MT6328_OC_GEAR_0 0x02ca
+#define MT6328_FQMTR_CON0 0x02cc
+#define MT6328_FQMTR_CON1 0x02ce
+#define MT6328_FQMTR_CON2 0x02d0
+#define MT6328_RG_SPI_CON 0x02d2
+#define MT6328_DEW_DIO_EN 0x02d4
+#define MT6328_DEW_READ_TEST 0x02d6
+#define MT6328_DEW_WRITE_TEST 0x02d8
+#define MT6328_DEW_CRC_SWRST 0x02da
+#define MT6328_DEW_CRC_EN 0x02dc
+#define MT6328_DEW_CRC_VAL 0x02de
+#define MT6328_DEW_DBG_MON_SEL 0x02e0
+#define MT6328_DEW_CIPHER_KEY_SEL 0x02e2
+#define MT6328_DEW_CIPHER_IV_SEL 0x02e4
+#define MT6328_DEW_CIPHER_EN 0x02e6
+#define MT6328_DEW_CIPHER_RDY 0x02e8
+#define MT6328_DEW_CIPHER_MODE 0x02ea
+#define MT6328_DEW_CIPHER_SWRST 0x02ec
+#define MT6328_DEW_RDDMY_NO 0x02ee
+#define MT6328_INT_TYPE_CON0 0x02f0
+#define MT6328_INT_TYPE_CON0_SET 0x02f2
+#define MT6328_INT_TYPE_CON0_CLR 0x02f4
+#define MT6328_INT_TYPE_CON1 0x02f6
+#define MT6328_INT_TYPE_CON1_SET 0x02f8
+#define MT6328_INT_TYPE_CON1_CLR 0x02fa
+#define MT6328_INT_TYPE_CON2 0x02fc
+#define MT6328_INT_TYPE_CON2_SET 0x02fe
+#define MT6328_INT_TYPE_CON2_CLR 0x0300
+#define MT6328_INT_STA 0x0302
+#define MT6328_BUCK_ALL_CON0 0x0400
+#define MT6328_BUCK_ALL_CON1 0x0402
+#define MT6328_BUCK_ALL_CON2 0x0404
+#define MT6328_BUCK_ALL_CON3 0x0406
+#define MT6328_BUCK_ALL_CON4 0x0408
+#define MT6328_BUCK_ALL_CON5 0x040a
+#define MT6328_BUCK_ALL_CON6 0x040c
+#define MT6328_BUCK_ALL_CON9 0x040e
+#define MT6328_BUCK_ALL_CON12 0x0410
+#define MT6328_BUCK_ALL_CON13 0x0412
+#define MT6328_BUCK_ALL_CON14 0x0414
+#define MT6328_BUCK_ALL_CON16 0x0416
+#define MT6328_BUCK_ALL_CON18 0x0418
+#define MT6328_BUCK_ALL_CON19 0x041a
+#define MT6328_BUCK_ALL_CON20 0x041c
+#define MT6328_BUCK_ALL_CON21 0x041e
+#define MT6328_BUCK_ALL_CON22 0x0420
+#define MT6328_BUCK_ALL_CON23 0x0422
+#define MT6328_BUCK_ALL_CON24 0x0424
+#define MT6328_BUCK_ALL_CON25 0x0426
+#define MT6328_BUCK_ALL_CON26 0x0428
+#define MT6328_BUCK_ALL_CON27 0x042a
+#define MT6328_BUCK_ALL_CON28 0x042c
+#define MT6328_SMPS_TOP_ANA_CON0 0x042e
+#define MT6328_SMPS_TOP_ANA_CON1 0x0430
+#define MT6328_SMPS_TOP_ANA_CON2 0x0432
+#define MT6328_SMPS_TOP_ANA_CON3 0x0434
+#define MT6328_SMPS_TOP_ANA_CON4 0x0436
+#define MT6328_SMPS_TOP_ANA_CON5 0x0438
+#define MT6328_SMPS_TOP_ANA_CON6 0x043a
+#define MT6328_SMPS_TOP_ANA_CON7 0x043c
+#define MT6328_SMPS_TOP_ANA_CON8 0x043e
+#define MT6328_VCORE_ANA_CON0 0x0440
+#define MT6328_VCORE_ANA_CON1 0x0442
+#define MT6328_VCORE_ANA_CON2 0x0444
+#define MT6328_VCORE_ANA_CON3 0x0446
+#define MT6328_VCORE_ANA_CON4 0x0448
+#define MT6328_VSYS22_ANA_CON0 0x044a
+#define MT6328_VSYS22_ANA_CON1 0x044c
+#define MT6328_VSYS22_ANA_CON2 0x044e
+#define MT6328_VSYS22_ANA_CON3 0x0450
+#define MT6328_VSYS22_ANA_CON4 0x0452
+#define MT6328_VPROC_ANA_CON0 0x0454
+#define MT6328_VPROC_ANA_CON1 0x0456
+#define MT6328_VPROC_ANA_CON2 0x0458
+#define MT6328_VPROC_ANA_CON3 0x045a
+#define MT6328_VPROC_ANA_CON4 0x045c
+#define MT6328_OSC32_ANA_CON0 0x045e
+#define MT6328_OSC32_ANA_CON1 0x0460
+#define MT6328_VPA_ANA_CON0 0x0462
+#define MT6328_VPA_ANA_CON1 0x0464
+#define MT6328_VPA_ANA_CON2 0x0466
+#define MT6328_VPA_ANA_CON3 0x0468
+#define MT6328_VLTE_ANA_CON0 0x046a
+#define MT6328_VLTE_ANA_CON1 0x046c
+#define MT6328_VLTE_ANA_CON2 0x046e
+#define MT6328_VLTE_ANA_CON3 0x0470
+#define MT6328_VLTE_ANA_CON4 0x0472
+#define MT6328_VPROC_CON0 0x0474
+#define MT6328_VPROC_CON1 0x0476
+#define MT6328_VPROC_CON2 0x0478
+#define MT6328_VPROC_CON3 0x047a
+#define MT6328_VPROC_CON4 0x047c
+#define MT6328_VPROC_CON5 0x047e
+#define MT6328_VPROC_CON6 0x0480
+#define MT6328_VPROC_CON7 0x0482
+#define MT6328_VPROC_CON8 0x0484
+#define MT6328_VPROC_CON9 0x0486
+#define MT6328_VPROC_CON10 0x0488
+#define MT6328_VPROC_CON11 0x048a
+#define MT6328_VPROC_CON12 0x048c
+#define MT6328_VPROC_CON13 0x048e
+#define MT6328_VPROC_CON14 0x0490
+#define MT6328_VPROC_CON15 0x0492
+#define MT6328_VPROC_CON16 0x0494
+#define MT6328_VPROC_CON17 0x0496
+#define MT6328_VPROC_CON18 0x0498
+#define MT6328_VPROC_CON19 0x049a
+#define MT6328_VSRAM_CON0 0x049c
+#define MT6328_VSRAM_CON1 0x049e
+#define MT6328_VSRAM_CON2 0x04a0
+#define MT6328_VSRAM_CON3 0x04a2
+#define MT6328_VSRAM_CON4 0x04a4
+#define MT6328_VSRAM_CON5 0x04a6
+#define MT6328_VSRAM_CON6 0x04a8
+#define MT6328_VSRAM_CON7 0x04aa
+#define MT6328_VSRAM_CON8 0x04ac
+#define MT6328_VSRAM_CON9 0x04ae
+#define MT6328_VSRAM_CON10 0x04b0
+#define MT6328_VSRAM_CON11 0x04b2
+#define MT6328_VSRAM_CON12 0x04b4
+#define MT6328_VSRAM_CON13 0x04b6
+#define MT6328_VSRAM_CON14 0x04b8
+#define MT6328_VSRAM_CON15 0x04ba
+#define MT6328_VSRAM_CON16 0x04bc
+#define MT6328_VSRAM_CON17 0x04be
+#define MT6328_VSRAM_CON18 0x04c0
+#define MT6328_VSRAM_CON19 0x04c2
+#define MT6328_VLTE_CON0 0x04c4
+#define MT6328_VLTE_CON1 0x04c6
+#define MT6328_VLTE_CON2 0x04c8
+#define MT6328_VLTE_CON3 0x04ca
+#define MT6328_VLTE_CON4 0x04cc
+#define MT6328_VLTE_CON5 0x04ce
+#define MT6328_VLTE_CON6 0x04d0
+#define MT6328_VLTE_CON7 0x04d2
+#define MT6328_VLTE_CON8 0x04d4
+#define MT6328_VLTE_CON9 0x04d6
+#define MT6328_VLTE_CON10 0x04d8
+#define MT6328_VLTE_CON11 0x04da
+#define MT6328_VLTE_CON12 0x04dc
+#define MT6328_VLTE_CON13 0x04de
+#define MT6328_VLTE_CON14 0x04e0
+#define MT6328_VLTE_CON15 0x04e2
+#define MT6328_VLTE_CON16 0x04e4
+#define MT6328_VLTE_CON17 0x04e6
+#define MT6328_VLTE_CON18 0x04e8
+#define MT6328_VLTE_CON19 0x04ea
+#define MT6328_VCORE1_CON0 0x0600
+#define MT6328_VCORE1_CON1 0x0602
+#define MT6328_VCORE1_CON2 0x0604
+#define MT6328_VCORE1_CON3 0x0606
+#define MT6328_VCORE1_CON4 0x0608
+#define MT6328_VCORE1_CON5 0x060a
+#define MT6328_VCORE1_CON6 0x060c
+#define MT6328_VCORE1_CON7 0x060e
+#define MT6328_VCORE1_CON8 0x0610
+#define MT6328_VCORE1_CON9 0x0612
+#define MT6328_VCORE1_CON10 0x0614
+#define MT6328_VCORE1_CON11 0x0616
+#define MT6328_VCORE1_CON12 0x0618
+#define MT6328_VCORE1_CON13 0x061a
+#define MT6328_VCORE1_CON14 0x061c
+#define MT6328_VCORE1_CON15 0x061e
+#define MT6328_VCORE1_CON16 0x0620
+#define MT6328_VCORE1_CON17 0x0622
+#define MT6328_VCORE1_CON18 0x0624
+#define MT6328_VCORE1_CON19 0x0626
+#define MT6328_VSYS22_CON0 0x0628
+#define MT6328_VSYS22_CON1 0x062a
+#define MT6328_VSYS22_CON2 0x062c
+#define MT6328_VSYS22_CON3 0x062e
+#define MT6328_VSYS22_CON4 0x0630
+#define MT6328_VSYS22_CON5 0x0632
+#define MT6328_VSYS22_CON6 0x0634
+#define MT6328_VSYS22_CON7 0x0636
+#define MT6328_VSYS22_CON8 0x0638
+#define MT6328_VSYS22_CON9 0x063a
+#define MT6328_VSYS22_CON10 0x063c
+#define MT6328_VSYS22_CON11 0x063e
+#define MT6328_VSYS22_CON12 0x0640
+#define MT6328_VSYS22_CON13 0x0642
+#define MT6328_VSYS22_CON14 0x0644
+#define MT6328_VSYS22_CON15 0x0646
+#define MT6328_VSYS22_CON16 0x0648
+#define MT6328_VSYS22_CON17 0x064a
+#define MT6328_VSYS22_CON18 0x064c
+#define MT6328_VSYS22_CON19 0x064e
+#define MT6328_VPA_CON0 0x0650
+#define MT6328_VPA_CON1 0x0652
+#define MT6328_VPA_CON2 0x0654
+#define MT6328_VPA_CON3 0x0656
+#define MT6328_VPA_CON4 0x0658
+#define MT6328_VPA_CON5 0x065a
+#define MT6328_VPA_CON6 0x065c
+#define MT6328_VPA_CON7 0x065e
+#define MT6328_VPA_CON8 0x0660
+#define MT6328_VPA_CON9 0x0662
+#define MT6328_VPA_CON10 0x0664
+#define MT6328_VPA_CON11 0x0666
+#define MT6328_VPA_CON12 0x0668
+#define MT6328_VPA_CON13 0x066a
+#define MT6328_VPA_CON14 0x066c
+#define MT6328_VPA_CON15 0x066e
+#define MT6328_VPA_CON16 0x0670
+#define MT6328_VPA_CON17 0x0672
+#define MT6328_VPA_CON18 0x0674
+#define MT6328_VPA_CON19 0x0676
+#define MT6328_VPA_CON20 0x0678
+#define MT6328_VPA_CON21 0x067a
+#define MT6328_VPA_CON22 0x067c
+#define MT6328_VPA_CON23 0x067e
+#define MT6328_VPA_CON24 0x0680
+#define MT6328_BUCK_K_CON0 0x0682
+#define MT6328_BUCK_K_CON1 0x0684
+#define MT6328_BUCK_K_CON2 0x0686
+#define MT6328_BUCK_K_CON3 0x0688
+#define MT6328_ZCD_CON0 0x0800
+#define MT6328_ZCD_CON1 0x0802
+#define MT6328_ZCD_CON2 0x0804
+#define MT6328_ZCD_CON3 0x0806
+#define MT6328_ZCD_CON4 0x0808
+#define MT6328_ZCD_CON5 0x080a
+#define MT6328_ISINK0_CON0 0x080c
+#define MT6328_ISINK0_CON1 0x080e
+#define MT6328_ISINK0_CON2 0x0810
+#define MT6328_ISINK0_CON3 0x0812
+#define MT6328_ISINK1_CON0 0x0814
+#define MT6328_ISINK1_CON1 0x0816
+#define MT6328_ISINK1_CON2 0x0818
+#define MT6328_ISINK1_CON3 0x081a
+#define MT6328_ISINK2_CON1 0x081c
+#define MT6328_ISINK3_CON1 0x081e
+#define MT6328_ISINK_ANA0 0x0820
+#define MT6328_ISINK_ANA1 0x0822
+#define MT6328_ISINK_PHASE_DLY 0x0824
+#define MT6328_ISINK_SFSTR 0x0826
+#define MT6328_ISINK_EN_CTRL 0x0828
+#define MT6328_ISINK_MODE_CTRL 0x082a
+#define MT6328_VTCXO_0_CON0 0x0a00
+#define MT6328_VTCXO_1_CON0 0x0a02
+#define MT6328_VAUD28_CON0 0x0a04
+#define MT6328_VAUX18_CON0 0x0a06
+#define MT6328_VRF18_0_CON0 0x0a08
+#define MT6328_VRF18_0_CON1 0x0a0a
+#define MT6328_VCAMA_CON0 0x0a0c
+#define MT6328_VCN28_CON0 0x0a0e
+#define MT6328_VCN33_CON0 0x0a10
+#define MT6328_VCN33_CON1 0x0a12
+#define MT6328_VCN33_CON2 0x0a14
+#define MT6328_VRF18_1_CON0 0x0a16
+#define MT6328_VRF18_1_CON1 0x0a18
+#define MT6328_VUSB33_CON0 0x0a1a
+#define MT6328_VMCH_CON0 0x0a1c
+#define MT6328_VMCH_CON1 0x0a1e
+#define MT6328_VMC_CON0 0x0a20
+#define MT6328_VMC_CON1 0x0a22
+#define MT6328_VEMC_3V3_CON0 0x0a24
+#define MT6328_VEMC_3V3_CON1 0x0a26
+#define MT6328_VIO28_CON0 0x0a28
+#define MT6328_VCAMAF_CON0 0x0a2a
+#define MT6328_VGP1_CON0 0x0a2c
+#define MT6328_VGP1_CON1 0x0a2e
+#define MT6328_VEFUSE_CON0 0x0a30
+#define MT6328_VSIM1_CON0 0x0a32
+#define MT6328_VSIM2_CON0 0x0a34
+#define MT6328_VIO18_CON0 0x0a36
+#define MT6328_VIBR_CON0 0x0a38
+#define MT6328_VCN18_CON0 0x0a3a
+#define MT6328_VCAM_CON0 0x0a3c
+#define MT6328_VCAMIO_CON0 0x0a3e
+#define MT6328_LDO_VSRAM_CON0 0x0a40
+#define MT6328_LDO_VSRAM_CON1 0x0a42
+#define MT6328_VTREF_CON0 0x0a44
+#define MT6328_VM_CON0 0x0a46
+#define MT6328_VM_CON1 0x0a48
+#define MT6328_VRTC_CON0 0x0a4a
+#define MT6328_LDO_OCFB0 0x0a4c
+#define MT6328_ALDO_ANA_CON0 0x0a4e
+#define MT6328_ADLDO_ANA_CON1 0x0a50
+#define MT6328_ADLDO_ANA_CON2 0x0a52
+#define MT6328_ADLDO_ANA_CON3 0x0a54
+#define MT6328_ADLDO_ANA_CON4 0x0a56
+#define MT6328_ADLDO_ANA_CON5 0x0a58
+#define MT6328_ADLDO_ANA_CON6 0x0a5a
+#define MT6328_ADLDO_ANA_CON7 0x0a5c
+#define MT6328_ADLDO_ANA_CON8 0x0a5e
+#define MT6328_ADLDO_ANA_CON9 0x0a60
+#define MT6328_ADLDO_ANA_CON10 0x0a62
+#define MT6328_ADLDO_ANA_CON11 0x0a64
+#define MT6328_ADLDO_ANA_CON12 0x0a66
+#define MT6328_ADLDO_ANA_CON13 0x0a68
+#define MT6328_DLDO_ANA_CON0 0x0a6a
+#define MT6328_DLDO_ANA_CON1 0x0a6c
+#define MT6328_DLDO_ANA_CON2 0x0a6e
+#define MT6328_DLDO_ANA_CON3 0x0a70
+#define MT6328_DLDO_ANA_CON4 0x0a72
+#define MT6328_DLDO_ANA_CON5 0x0a74
+#define MT6328_SLDO_ANA_CON0 0x0a76
+#define MT6328_SLDO_ANA_CON1 0x0a78
+#define MT6328_SLDO_ANA_CON2 0x0a7a
+#define MT6328_SLDO_ANA_CON3 0x0a7c
+#define MT6328_SLDO_ANA_CON4 0x0a7e
+#define MT6328_SLDO_ANA_CON5 0x0a80
+#define MT6328_SLDO_ANA_CON6 0x0a82
+#define MT6328_SLDO_ANA_CON7 0x0a84
+#define MT6328_SLDO_ANA_CON8 0x0a86
+#define MT6328_SLDO_ANA_CON9 0x0a88
+#define MT6328_SLDO_ANA_CON10 0x0a8a
+#define MT6328_LDO_RSV_CON0 0x0a8c
+#define MT6328_LDO_RSV_CON1 0x0a8e
+#define MT6328_SPK_CON0 0x0a90
+#define MT6328_SPK_CON1 0x0a92
+#define MT6328_SPK_CON2 0x0a94
+#define MT6328_SPK_CON3 0x0a96
+#define MT6328_SPK_CON4 0x0a98
+#define MT6328_SPK_CON5 0x0a9a
+#define MT6328_SPK_CON6 0x0a9c
+#define MT6328_SPK_CON7 0x0a9e
+#define MT6328_SPK_CON8 0x0aa0
+#define MT6328_SPK_CON9 0x0aa2
+#define MT6328_SPK_CON10 0x0aa4
+#define MT6328_SPK_CON11 0x0aa6
+#define MT6328_SPK_CON12 0x0aa8
+#define MT6328_SPK_CON13 0x0aaa
+#define MT6328_SPK_CON14 0x0aac
+#define MT6328_SPK_CON15 0x0aae
+#define MT6328_SPK_CON16 0x0ab0
+#define MT6328_SPK_ANA_CON0 0x0ab2
+#define MT6328_SPK_ANA_CON1 0x0ab4
+#define MT6328_SPK_ANA_CON3 0x0ab6
+#define MT6328_OTP_CON0 0x0c00
+#define MT6328_OTP_CON1 0x0c02
+#define MT6328_OTP_CON2 0x0c04
+#define MT6328_OTP_CON3 0x0c06
+#define MT6328_OTP_CON4 0x0c08
+#define MT6328_OTP_CON5 0x0c0a
+#define MT6328_OTP_CON6 0x0c0c
+#define MT6328_OTP_CON7 0x0c0e
+#define MT6328_OTP_CON8 0x0c10
+#define MT6328_OTP_CON9 0x0c12
+#define MT6328_OTP_CON10 0x0c14
+#define MT6328_OTP_CON11 0x0c16
+#define MT6328_OTP_CON12 0x0c18
+#define MT6328_OTP_CON13 0x0c1a
+#define MT6328_OTP_CON14 0x0c1c
+#define MT6328_OTP_DOUT_0_15 0x0c1e
+#define MT6328_OTP_DOUT_16_31 0x0c20
+#define MT6328_OTP_DOUT_32_47 0x0c22
+#define MT6328_OTP_DOUT_48_63 0x0c24
+#define MT6328_OTP_DOUT_64_79 0x0c26
+#define MT6328_OTP_DOUT_80_95 0x0c28
+#define MT6328_OTP_DOUT_96_111 0x0c2a
+#define MT6328_OTP_DOUT_112_127 0x0c2c
+#define MT6328_OTP_DOUT_128_143 0x0c2e
+#define MT6328_OTP_DOUT_144_159 0x0c30
+#define MT6328_OTP_DOUT_160_175 0x0c32
+#define MT6328_OTP_DOUT_176_191 0x0c34
+#define MT6328_OTP_DOUT_192_207 0x0c36
+#define MT6328_OTP_DOUT_208_223 0x0c38
+#define MT6328_OTP_DOUT_224_239 0x0c3a
+#define MT6328_OTP_DOUT_240_255 0x0c3c
+#define MT6328_OTP_DOUT_256_271 0x0c3e
+#define MT6328_OTP_DOUT_272_287 0x0c40
+#define MT6328_OTP_DOUT_288_303 0x0c42
+#define MT6328_OTP_DOUT_304_319 0x0c44
+#define MT6328_OTP_DOUT_320_335 0x0c46
+#define MT6328_OTP_DOUT_336_351 0x0c48
+#define MT6328_OTP_DOUT_352_367 0x0c4a
+#define MT6328_OTP_DOUT_368_383 0x0c4c
+#define MT6328_OTP_DOUT_384_399 0x0c4e
+#define MT6328_OTP_DOUT_400_415 0x0c50
+#define MT6328_OTP_DOUT_416_431 0x0c52
+#define MT6328_OTP_DOUT_432_447 0x0c54
+#define MT6328_OTP_DOUT_448_463 0x0c56
+#define MT6328_OTP_DOUT_464_479 0x0c58
+#define MT6328_OTP_DOUT_480_495 0x0c5a
+#define MT6328_OTP_DOUT_496_511 0x0c5c
+#define MT6328_OTP_VAL_0_15 0x0c5e
+#define MT6328_OTP_VAL_16_31 0x0c60
+#define MT6328_OTP_VAL_32_47 0x0c62
+#define MT6328_OTP_VAL_48_63 0x0c64
+#define MT6328_OTP_VAL_64_79 0x0c66
+#define MT6328_OTP_VAL_80_95 0x0c68
+#define MT6328_OTP_VAL_96_111 0x0c6a
+#define MT6328_OTP_VAL_112_127 0x0c6c
+#define MT6328_OTP_VAL_128_143 0x0c6e
+#define MT6328_OTP_VAL_144_159 0x0c70
+#define MT6328_OTP_VAL_160_175 0x0c72
+#define MT6328_OTP_VAL_176_191 0x0c74
+#define MT6328_OTP_VAL_192_207 0x0c76
+#define MT6328_OTP_VAL_208_223 0x0c78
+#define MT6328_OTP_VAL_224_239 0x0c7a
+#define MT6328_OTP_VAL_240_255 0x0c7c
+#define MT6328_OTP_VAL_256_271 0x0c7e
+#define MT6328_OTP_VAL_272_287 0x0c80
+#define MT6328_OTP_VAL_288_303 0x0c82
+#define MT6328_OTP_VAL_304_319 0x0c84
+#define MT6328_OTP_VAL_320_335 0x0c86
+#define MT6328_OTP_VAL_336_351 0x0c88
+#define MT6328_OTP_VAL_352_367 0x0c8a
+#define MT6328_OTP_VAL_368_383 0x0c8c
+#define MT6328_OTP_VAL_384_399 0x0c8e
+#define MT6328_OTP_VAL_400_415 0x0c90
+#define MT6328_OTP_VAL_416_431 0x0c92
+#define MT6328_OTP_VAL_432_447 0x0c94
+#define MT6328_OTP_VAL_448_463 0x0c96
+#define MT6328_OTP_VAL_464_479 0x0c98
+#define MT6328_OTP_VAL_480_495 0x0c9a
+#define MT6328_OTP_VAL_496_511 0x0c9c
+#define MT6328_RTC_MIX_CON0 0x0c9e
+#define MT6328_RTC_MIX_CON1 0x0ca0
+#define MT6328_RTC_MIX_CON2 0x0ca2
+#define MT6328_FGADC_CON0 0x0ca4
+#define MT6328_FGADC_CON1 0x0ca6
+#define MT6328_FGADC_CON2 0x0ca8
+#define MT6328_FGADC_CON3 0x0caa
+#define MT6328_FGADC_CON4 0x0cac
+#define MT6328_FGADC_CON5 0x0cae
+#define MT6328_FGADC_CON6 0x0cb0
+#define MT6328_FGADC_CON7 0x0cb2
+#define MT6328_FGADC_CON8 0x0cb4
+#define MT6328_FGADC_CON9 0x0cb6
+#define MT6328_FGADC_CON10 0x0cb8
+#define MT6328_FGADC_CON11 0x0cba
+#define MT6328_FGADC_CON12 0x0cbc
+#define MT6328_FGADC_CON13 0x0cbe
+#define MT6328_FGADC_CON14 0x0cc0
+#define MT6328_FGADC_CON15 0x0cc2
+#define MT6328_FGADC_CON16 0x0cc4
+#define MT6328_FGADC_CON17 0x0cc6
+#define MT6328_FGADC_CON18 0x0cc8
+#define MT6328_FGADC_CON19 0x0cca
+#define MT6328_FGADC_CON20 0x0ccc
+#define MT6328_FGADC_CON21 0x0cce
+#define MT6328_FGADC_CON22 0x0cd0
+#define MT6328_FGADC_CON23 0x0cd2
+#define MT6328_FGADC_CON24 0x0cd4
+#define MT6328_FGADC_CON25 0x0cd6
+#define MT6328_FGADC_CON26 0x0cd8
+#define MT6328_FGADC_CON27 0x0cda
+#define MT6328_AUDDEC_ANA_CON0 0x0cdc
+#define MT6328_AUDDEC_ANA_CON1 0x0cde
+#define MT6328_AUDDEC_ANA_CON2 0x0ce0
+#define MT6328_AUDDEC_ANA_CON3 0x0ce2
+#define MT6328_AUDDEC_ANA_CON4 0x0ce4
+#define MT6328_AUDDEC_ANA_CON5 0x0ce6
+#define MT6328_AUDDEC_ANA_CON6 0x0ce8
+#define MT6328_AUDDEC_ANA_CON7 0x0cea
+#define MT6328_AUDDEC_ANA_CON8 0x0cec
+#define MT6328_AUDENC_ANA_CON0 0x0cee
+#define MT6328_AUDENC_ANA_CON1 0x0cf0
+#define MT6328_AUDENC_ANA_CON2 0x0cf2
+#define MT6328_AUDENC_ANA_CON3 0x0cf4
+#define MT6328_AUDENC_ANA_CON4 0x0cf6
+#define MT6328_AUDENC_ANA_CON5 0x0cf8
+#define MT6328_AUDENC_ANA_CON6 0x0cfa
+#define MT6328_AUDENC_ANA_CON7 0x0cfc
+#define MT6328_AUDENC_ANA_CON8 0x0cfe
+#define MT6328_AUDENC_ANA_CON9 0x0d00
+#define MT6328_AUDENC_ANA_CON10 0x0d02
+#define MT6328_AUDNCP_CLKDIV_CON0 0x0d04
+#define MT6328_AUDNCP_CLKDIV_CON1 0x0d06
+#define MT6328_AUDNCP_CLKDIV_CON2 0x0d08
+#define MT6328_AUDNCP_CLKDIV_CON3 0x0d0a
+#define MT6328_AUDNCP_CLKDIV_CON4 0x0d0c
+#define MT6328_AUXADC_ADC0 0x0e00
+#define MT6328_AUXADC_ADC1 0x0e02
+#define MT6328_AUXADC_ADC2 0x0e04
+#define MT6328_AUXADC_ADC3 0x0e06
+#define MT6328_AUXADC_ADC4 0x0e08
+#define MT6328_AUXADC_ADC5 0x0e0a
+#define MT6328_AUXADC_ADC6 0x0e0c
+#define MT6328_AUXADC_ADC7 0x0e0e
+#define MT6328_AUXADC_ADC8 0x0e10
+#define MT6328_AUXADC_ADC9 0x0e12
+#define MT6328_AUXADC_ADC10 0x0e14
+#define MT6328_AUXADC_ADC11 0x0e16
+#define MT6328_AUXADC_ADC12 0x0e18
+#define MT6328_AUXADC_ADC13 0x0e1a
+#define MT6328_AUXADC_ADC14 0x0e1c
+#define MT6328_AUXADC_ADC15 0x0e1e
+#define MT6328_AUXADC_ADC16 0x0e20
+#define MT6328_AUXADC_ADC17 0x0e22
+#define MT6328_AUXADC_ADC18 0x0e24
+#define MT6328_AUXADC_ADC19 0x0e26
+#define MT6328_AUXADC_ADC20 0x0e28
+#define MT6328_AUXADC_ADC21 0x0e2a
+#define MT6328_AUXADC_ADC22 0x0e2c
+#define MT6328_AUXADC_ADC23 0x0e2e
+#define MT6328_AUXADC_ADC24 0x0e30
+#define MT6328_AUXADC_ADC25 0x0e32
+#define MT6328_AUXADC_ADC26 0x0e34
+#define MT6328_AUXADC_ADC27 0x0e36
+#define MT6328_AUXADC_ADC28 0x0e38
+#define MT6328_AUXADC_ADC29 0x0e3a
+#define MT6328_AUXADC_ADC30 0x0e3c
+#define MT6328_AUXADC_ADC31 0x0e3e
+#define MT6328_AUXADC_ADC32 0x0e40
+#define MT6328_AUXADC_ADC33 0x0e42
+#define MT6328_AUXADC_BUF0 0x0e44
+#define MT6328_AUXADC_BUF1 0x0e46
+#define MT6328_AUXADC_BUF2 0x0e48
+#define MT6328_AUXADC_BUF3 0x0e4a
+#define MT6328_AUXADC_BUF4 0x0e4c
+#define MT6328_AUXADC_BUF5 0x0e4e
+#define MT6328_AUXADC_BUF6 0x0e50
+#define MT6328_AUXADC_BUF7 0x0e52
+#define MT6328_AUXADC_BUF8 0x0e54
+#define MT6328_AUXADC_BUF9 0x0e56
+#define MT6328_AUXADC_BUF10 0x0e58
+#define MT6328_AUXADC_BUF11 0x0e5a
+#define MT6328_AUXADC_BUF12 0x0e5c
+#define MT6328_AUXADC_BUF13 0x0e5e
+#define MT6328_AUXADC_BUF14 0x0e60
+#define MT6328_AUXADC_BUF15 0x0e62
+#define MT6328_AUXADC_BUF16 0x0e64
+#define MT6328_AUXADC_BUF17 0x0e66
+#define MT6328_AUXADC_BUF18 0x0e68
+#define MT6328_AUXADC_BUF19 0x0e6a
+#define MT6328_AUXADC_BUF20 0x0e6c
+#define MT6328_AUXADC_BUF21 0x0e6e
+#define MT6328_AUXADC_BUF22 0x0e70
+#define MT6328_AUXADC_BUF23 0x0e72
+#define MT6328_AUXADC_BUF24 0x0e74
+#define MT6328_AUXADC_BUF25 0x0e76
+#define MT6328_AUXADC_BUF26 0x0e78
+#define MT6328_AUXADC_BUF27 0x0e7a
+#define MT6328_AUXADC_BUF28 0x0e7c
+#define MT6328_AUXADC_BUF29 0x0e7e
+#define MT6328_AUXADC_BUF30 0x0e80
+#define MT6328_AUXADC_BUF31 0x0e82
+#define MT6328_AUXADC_STA0 0x0e84
+#define MT6328_AUXADC_STA1 0x0e86
+#define MT6328_AUXADC_RQST0 0x0e88
+#define MT6328_AUXADC_RQST0_SET 0x0e8a
+#define MT6328_AUXADC_RQST0_CLR 0x0e8c
+#define MT6328_AUXADC_RQST1 0x0e8e
+#define MT6328_AUXADC_RQST1_SET 0x0e90
+#define MT6328_AUXADC_RQST1_CLR 0x0e92
+#define MT6328_AUXADC_CON0 0x0e94
+#define MT6328_AUXADC_CON0_SET 0x0e96
+#define MT6328_AUXADC_CON0_CLR 0x0e98
+#define MT6328_AUXADC_CON1 0x0e9a
+#define MT6328_AUXADC_CON2 0x0e9c
+#define MT6328_AUXADC_CON3 0x0e9e
+#define MT6328_AUXADC_CON4 0x0ea0
+#define MT6328_AUXADC_CON5 0x0ea2
+#define MT6328_AUXADC_CON6 0x0ea4
+#define MT6328_AUXADC_CON7 0x0ea6
+#define MT6328_AUXADC_CON8 0x0ea8
+#define MT6328_AUXADC_CON9 0x0eaa
+#define MT6328_AUXADC_CON10 0x0eac
+#define MT6328_AUXADC_CON11 0x0eae
+#define MT6328_AUXADC_CON12 0x0eb0
+#define MT6328_AUXADC_CON13 0x0eb2
+#define MT6328_AUXADC_CON14 0x0eb4
+#define MT6328_AUXADC_CON15 0x0eb6
+#define MT6328_AUXADC_CON16 0x0eb8
+#define MT6328_AUXADC_AUTORPT0 0x0eba
+#define MT6328_AUXADC_LBAT0 0x0ebc
+#define MT6328_AUXADC_LBAT1 0x0ebe
+#define MT6328_AUXADC_LBAT2 0x0ec0
+#define MT6328_AUXADC_LBAT3 0x0ec2
+#define MT6328_AUXADC_LBAT4 0x0ec4
+#define MT6328_AUXADC_LBAT5 0x0ec6
+#define MT6328_AUXADC_LBAT6 0x0ec8
+#define MT6328_AUXADC_ACCDET 0x0eca
+#define MT6328_AUXADC_THR0 0x0ecc
+#define MT6328_AUXADC_THR1 0x0ece
+#define MT6328_AUXADC_THR2 0x0ed0
+#define MT6328_AUXADC_THR3 0x0ed2
+#define MT6328_AUXADC_THR4 0x0ed4
+#define MT6328_AUXADC_THR5 0x0ed6
+#define MT6328_AUXADC_THR6 0x0ed8
+#define MT6328_AUXADC_EFUSE0 0x0eda
+#define MT6328_AUXADC_EFUSE1 0x0edc
+#define MT6328_AUXADC_EFUSE2 0x0ede
+#define MT6328_AUXADC_EFUSE3 0x0ee0
+#define MT6328_AUXADC_EFUSE4 0x0ee2
+#define MT6328_AUXADC_EFUSE5 0x0ee4
+#define MT6328_AUXADC_DBG0 0x0ee6
+#define MT6328_AUXADC_IMP0 0x0ee8
+#define MT6328_AUXADC_IMP1 0x0eea
+#define MT6328_AUXADC_VISMPS0_1 0x0eec
+#define MT6328_AUXADC_VISMPS0_2 0x0eee
+#define MT6328_AUXADC_VISMPS0_3 0x0ef0
+#define MT6328_AUXADC_VISMPS0_4 0x0ef2
+#define MT6328_AUXADC_VISMPS0_5 0x0ef4
+#define MT6328_AUXADC_VISMPS0_6 0x0ef6
+#define MT6328_AUXADC_VISMPS0_7 0x0ef8
+#define MT6328_AUXADC_LBAT2_1 0x0efa
+#define MT6328_AUXADC_LBAT2_2 0x0efc
+#define MT6328_AUXADC_LBAT2_3 0x0efe
+#define MT6328_AUXADC_LBAT2_4 0x0f00
+#define MT6328_AUXADC_LBAT2_5 0x0f02
+#define MT6328_AUXADC_LBAT2_6 0x0f04
+#define MT6328_AUXADC_LBAT2_7 0x0f06
+#define MT6328_AUXADC_MDBG_0 0x0f08
+#define MT6328_AUXADC_MDBG_1 0x0f0a
+#define MT6328_AUXADC_MDBG_2 0x0f0c
+#define MT6328_AUXADC_MDRT_0 0x0f0e
+#define MT6328_AUXADC_MDRT_1 0x0f10
+#define MT6328_AUXADC_MDRT_2 0x0f12
+#define MT6328_ACCDET_CON0 0x0f14
+#define MT6328_ACCDET_CON1 0x0f16
+#define MT6328_ACCDET_CON2 0x0f18
+#define MT6328_ACCDET_CON3 0x0f1a
+#define MT6328_ACCDET_CON4 0x0f1c
+#define MT6328_ACCDET_CON5 0x0f1e
+#define MT6328_ACCDET_CON6 0x0f20
+#define MT6328_ACCDET_CON7 0x0f22
+#define MT6328_ACCDET_CON8 0x0f24
+#define MT6328_ACCDET_CON9 0x0f26
+#define MT6328_ACCDET_CON10 0x0f28
+#define MT6328_ACCDET_CON11 0x0f2a
+#define MT6328_ACCDET_CON12 0x0f2c
+#define MT6328_ACCDET_CON13 0x0f2e
+#define MT6328_ACCDET_CON14 0x0f30
+#define MT6328_ACCDET_CON15 0x0f32
+#define MT6328_ACCDET_CON16 0x0f34
+#define MT6328_ACCDET_CON17 0x0f36
+#define MT6328_ACCDET_CON18 0x0f38
+#define MT6328_ACCDET_CON19 0x0f3a
+#define MT6328_ACCDET_CON20 0x0f3c
+#define MT6328_ACCDET_CON21 0x0f3e
+#define MT6328_ACCDET_CON22 0x0f40
+#define MT6328_ACCDET_CON23 0x0f42
+#define MT6328_ACCDET_CON24 0x0f44
+#define MT6328_ACCDET_CON25 0x0f46
+#define MT6328_CHR_CON0 0x0f48
+#define MT6328_CHR_CON1 0x0f4a
+#define MT6328_CHR_CON2 0x0f4c
+#define MT6328_CHR_CON3 0x0f4e
+#define MT6328_CHR_CON4 0x0f50
+#define MT6328_CHR_CON5 0x0f52
+#define MT6328_CHR_CON6 0x0f54
+#define MT6328_CHR_CON7 0x0f56
+#define MT6328_CHR_CON8 0x0f58
+#define MT6328_CHR_CON9 0x0f5a
+#define MT6328_CHR_CON10 0x0f5c
+#define MT6328_CHR_CON11 0x0f5e
+#define MT6328_CHR_CON12 0x0f60
+#define MT6328_CHR_CON13 0x0f62
+#define MT6328_CHR_CON14 0x0f64
+#define MT6328_CHR_CON15 0x0f66
+#define MT6328_CHR_CON16 0x0f68
+#define MT6328_CHR_CON17 0x0f6a
+#define MT6328_CHR_CON18 0x0f6c
+#define MT6328_CHR_CON19 0x0f6e
+#define MT6328_CHR_CON20 0x0f70
+#define MT6328_CHR_CON21 0x0f72
+#define MT6328_CHR_CON22 0x0f74
+#define MT6328_CHR_CON23 0x0f76
+#define MT6328_CHR_CON24 0x0f78
+#define MT6328_CHR_CON25 0x0f7a
+#define MT6328_CHR_CON26 0x0f7c
+#define MT6328_CHR_CON27 0x0f7e
+#define MT6328_CHR_CON28 0x0f80
+#define MT6328_CHR_CON29 0x0f82
+#define MT6328_CHR_CON30 0x0f84
+#define MT6328_CHR_CON31 0x0f86
+#define MT6328_CHR_CON32 0x0f88
+#define MT6328_CHR_CON33 0x0f8a
+#define MT6328_CHR_CON34 0x0f8c
+#define MT6328_CHR_CON35 0x0f8e
+#define MT6328_CHR_CON36 0x0f90
+#define MT6328_CHR_CON37 0x0f92
+#define MT6328_CHR_CON38 0x0f94
+#define MT6328_CHR_CON39 0x0f96
+#define MT6328_CHR_CON40 0x0f98
+#define MT6328_CHR_CON41 0x0f9a
+#define MT6328_CHR_CON42 0x0f9c
+#define MT6328_BATON_CON0 0x0f9e
+#define MT6328_CHR_CON43 0x0fa0
+#define MT6328_EOSC_CALI_CON0 0x0faa
+#define MT6328_EOSC_CALI_CON1 0x0fac
+#define MT6328_VRTC_PWM_CON0 0x0fae
+
+#endif /* __MFD_MT6328_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index 627487e26287..b774c3a4bb62 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -12,6 +12,7 @@
enum chip_id {
MT6323_CHIP_ID = 0x23,
+ MT6328_CHIP_ID = 0x30,
MT6331_CHIP_ID = 0x20,
MT6332_CHIP_ID = 0x20,
MT6357_CHIP_ID = 0x57,
@@ -65,11 +66,11 @@ struct mt6397_chip {
int irq;
struct irq_domain *irq_domain;
struct mutex irqlock;
- u16 wake_mask[2];
- u16 irq_masks_cur[2];
- u16 irq_masks_cache[2];
- u16 int_con[2];
- u16 int_status[2];
+ u16 wake_mask[3];
+ u16 irq_masks_cur[3];
+ u16 irq_masks_cache[3];
+ u16 int_con[3];
+ u16 int_status[3];
u16 chip_id;
void *irq_data;
};
diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h
index 068ae1c0f0e8..27883af44f87 100644
--- a/include/linux/mfd/mt6397/rtc.h
+++ b/include/linux/mfd/mt6397/rtc.h
@@ -60,11 +60,6 @@
#define RTC_PDN2 0x002e
#define RTC_PDN2_PWRON_ALARM BIT(4)
-#define RTC_MIN_YEAR 1968
-#define RTC_BASE_YEAR 1900
-#define RTC_NUM_YEARS 128
-#define RTC_MIN_YEAR_OFFSET (RTC_MIN_YEAR - RTC_BASE_YEAR)
-
#define MTK_RTC_POLL_DELAY_US 10
#define MTK_RTC_POLL_TIMEOUT (jiffies_to_usecs(HZ))
diff --git a/include/linux/mfd/nct6694.h b/include/linux/mfd/nct6694.h
new file mode 100644
index 000000000000..6eb9be2cd4a0
--- /dev/null
+++ b/include/linux/mfd/nct6694.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ *
+ * Nuvoton NCT6694 USB transaction and data structure.
+ */
+
+#ifndef __MFD_NCT6694_H
+#define __MFD_NCT6694_H
+
+#define NCT6694_VENDOR_ID 0x0416
+#define NCT6694_PRODUCT_ID 0x200B
+#define NCT6694_INT_IN_EP 0x81
+#define NCT6694_BULK_IN_EP 0x02
+#define NCT6694_BULK_OUT_EP 0x03
+
+#define NCT6694_HCTRL_SET 0x40
+#define NCT6694_HCTRL_GET 0x80
+
+#define NCT6694_URB_TIMEOUT 1000
+
+enum nct6694_irq_id {
+ NCT6694_IRQ_GPIO0 = 0,
+ NCT6694_IRQ_GPIO1,
+ NCT6694_IRQ_GPIO2,
+ NCT6694_IRQ_GPIO3,
+ NCT6694_IRQ_GPIO4,
+ NCT6694_IRQ_GPIO5,
+ NCT6694_IRQ_GPIO6,
+ NCT6694_IRQ_GPIO7,
+ NCT6694_IRQ_GPIO8,
+ NCT6694_IRQ_GPIO9,
+ NCT6694_IRQ_GPIOA,
+ NCT6694_IRQ_GPIOB,
+ NCT6694_IRQ_GPIOC,
+ NCT6694_IRQ_GPIOD,
+ NCT6694_IRQ_GPIOE,
+ NCT6694_IRQ_GPIOF,
+ NCT6694_IRQ_CAN0,
+ NCT6694_IRQ_CAN1,
+ NCT6694_IRQ_RTC,
+ NCT6694_NR_IRQS,
+};
+
+enum nct6694_response_err_status {
+ NCT6694_NO_ERROR = 0,
+ NCT6694_FORMAT_ERROR,
+ NCT6694_RESERVED1,
+ NCT6694_RESERVED2,
+ NCT6694_NOT_SUPPORT_ERROR,
+ NCT6694_NO_RESPONSE_ERROR,
+ NCT6694_TIMEOUT_ERROR,
+ NCT6694_PENDING,
+};
+
+struct __packed nct6694_cmd_header {
+ u8 rsv1;
+ u8 mod;
+ union __packed {
+ __le16 offset;
+ struct __packed {
+ u8 cmd;
+ u8 sel;
+ };
+ };
+ u8 hctrl;
+ u8 rsv2;
+ __le16 len;
+};
+
+struct __packed nct6694_response_header {
+ u8 sequence_id;
+ u8 sts;
+ u8 reserved[4];
+ __le16 len;
+};
+
+union __packed nct6694_usb_msg {
+ struct nct6694_cmd_header cmd_header;
+ struct nct6694_response_header response_header;
+};
+
+struct nct6694 {
+ struct device *dev;
+ struct ida gpio_ida;
+ struct ida i2c_ida;
+ struct ida canfd_ida;
+ struct ida wdt_ida;
+ struct irq_domain *domain;
+ struct mutex access_lock;
+ spinlock_t irq_lock;
+ struct urb *int_in_urb;
+ struct usb_device *udev;
+ union nct6694_usb_msg *usb_msg;
+ __le32 *int_buffer;
+ unsigned int irq_enable;
+};
+
+int nct6694_read_msg(struct nct6694 *nct6694, const struct nct6694_cmd_header *cmd_hd, void *buf);
+int nct6694_write_msg(struct nct6694 *nct6694, const struct nct6694_cmd_header *cmd_hd, void *buf);
+
+#endif
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index eda1ffd99c1a..dabcc0dea802 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -98,8 +98,8 @@ struct palmas_sleep_requestor_info {
};
struct palmas_regs_info {
- char *name;
- char *sname;
+ const char *name;
+ const char *sname;
u8 vsel_addr;
u8 ctrl_addr;
u8 tstep_addr;
diff --git a/include/linux/mfd/pcf50633/adc.h b/include/linux/mfd/pcf50633/adc.h
deleted file mode 100644
index 6a81896d4889..000000000000
--- a/include/linux/mfd/pcf50633/adc.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * adc.h -- Driver for NXP PCF50633 ADC
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * All rights reserved.
- */
-
-#ifndef __LINUX_MFD_PCF50633_ADC_H
-#define __LINUX_MFD_PCF50633_ADC_H
-
-#include <linux/mfd/pcf50633/core.h>
-#include <linux/platform_device.h>
-
-/* ADC Registers */
-#define PCF50633_REG_ADCC3 0x52
-#define PCF50633_REG_ADCC2 0x53
-#define PCF50633_REG_ADCC1 0x54
-#define PCF50633_REG_ADCS1 0x55
-#define PCF50633_REG_ADCS2 0x56
-#define PCF50633_REG_ADCS3 0x57
-
-#define PCF50633_ADCC1_ADCSTART 0x01
-#define PCF50633_ADCC1_RES_8BIT 0x02
-#define PCF50633_ADCC1_RES_10BIT 0x00
-#define PCF50633_ADCC1_AVERAGE_NO 0x00
-#define PCF50633_ADCC1_AVERAGE_4 0x04
-#define PCF50633_ADCC1_AVERAGE_8 0x08
-#define PCF50633_ADCC1_AVERAGE_16 0x0c
-#define PCF50633_ADCC1_MUX_BATSNS_RES 0x00
-#define PCF50633_ADCC1_MUX_BATSNS_SUBTR 0x10
-#define PCF50633_ADCC1_MUX_ADCIN2_RES 0x20
-#define PCF50633_ADCC1_MUX_ADCIN2_SUBTR 0x30
-#define PCF50633_ADCC1_MUX_BATTEMP 0x60
-#define PCF50633_ADCC1_MUX_ADCIN1 0x70
-#define PCF50633_ADCC1_AVERAGE_MASK 0x0c
-#define PCF50633_ADCC1_ADCMUX_MASK 0xf0
-
-#define PCF50633_ADCC2_RATIO_NONE 0x00
-#define PCF50633_ADCC2_RATIO_BATTEMP 0x01
-#define PCF50633_ADCC2_RATIO_ADCIN1 0x02
-#define PCF50633_ADCC2_RATIO_BOTH 0x03
-#define PCF50633_ADCC2_RATIOSETTL_100US 0x04
-
-#define PCF50633_ADCC3_ACCSW_EN 0x01
-#define PCF50633_ADCC3_NTCSW_EN 0x04
-#define PCF50633_ADCC3_RES_DIV_TWO 0x10
-#define PCF50633_ADCC3_RES_DIV_THREE 0x00
-
-#define PCF50633_ADCS3_REF_NTCSW 0x00
-#define PCF50633_ADCS3_REF_ACCSW 0x10
-#define PCF50633_ADCS3_REF_2V0 0x20
-#define PCF50633_ADCS3_REF_VISA 0x30
-#define PCF50633_ADCS3_REF_2V0_2 0x70
-#define PCF50633_ADCS3_ADCRDY 0x80
-
-#define PCF50633_ADCS3_ADCDAT1L_MASK 0x03
-#define PCF50633_ADCS3_ADCDAT2L_MASK 0x0c
-#define PCF50633_ADCS3_ADCDAT2L_SHIFT 2
-#define PCF50633_ASCS3_REF_MASK 0x70
-
-extern int
-pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
- void (*callback)(struct pcf50633 *, void *, int),
- void *callback_param);
-extern int
-pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg);
-
-#endif /* __LINUX_PCF50633_ADC_H */
diff --git a/include/linux/mfd/pcf50633/backlight.h b/include/linux/mfd/pcf50633/backlight.h
deleted file mode 100644
index fd4a4f8d6c13..000000000000
--- a/include/linux/mfd/pcf50633/backlight.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
- * PCF50633 backlight device driver
- */
-
-#ifndef __LINUX_MFD_PCF50633_BACKLIGHT
-#define __LINUX_MFD_PCF50633_BACKLIGHT
-
-/*
-* @default_brightness: Backlight brightness is initialized to this value
-*
-* Brightness to be used after the driver has been probed.
-* Valid range 0-63.
-*
-* @default_brightness_limit: The actual brightness is limited by this value
-*
-* Brightness limit to be used after the driver has been probed. This is useful
-* when it is not known how much power is available for the backlight during
-* probe.
-* Valid range 0-63. Can be changed later with pcf50633_bl_set_brightness_limit.
-*
-* @ramp_time: Display ramp time when changing brightness
-*
-* When changing the backlights brightness the change is not instant, instead
-* it fades smooth from one state to another. This value specifies how long
-* the fade should take. The lower the value the higher the fade time.
-* Valid range 0-255
-*/
-struct pcf50633_bl_platform_data {
- unsigned int default_brightness;
- unsigned int default_brightness_limit;
- uint8_t ramp_time;
-};
-
-
-struct pcf50633;
-
-int pcf50633_bl_set_brightness_limit(struct pcf50633 *pcf, unsigned int limit);
-
-#endif
-
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
deleted file mode 100644
index 539f27f8bd89..000000000000
--- a/include/linux/mfd/pcf50633/core.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * core.h -- Core driver for NXP PCF50633
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * All rights reserved.
- */
-
-#ifndef __LINUX_MFD_PCF50633_CORE_H
-#define __LINUX_MFD_PCF50633_CORE_H
-
-#include <linux/i2c.h>
-#include <linux/workqueue.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/pm.h>
-#include <linux/power_supply.h>
-#include <linux/mfd/pcf50633/backlight.h>
-
-struct pcf50633;
-struct regmap;
-
-#define PCF50633_NUM_REGULATORS 11
-
-struct pcf50633_platform_data {
- struct regulator_init_data reg_init_data[PCF50633_NUM_REGULATORS];
-
- char **batteries;
- int num_batteries;
-
- /*
- * Should be set accordingly to the reference resistor used, see
- * I_{ch(ref)} charger reference current in the pcf50633 User
- * Manual.
- */
- int charger_reference_current_ma;
-
- /* Callbacks */
- void (*probe_done)(struct pcf50633 *);
- void (*mbc_event_callback)(struct pcf50633 *, int);
- void (*regulator_registered)(struct pcf50633 *, int);
- void (*force_shutdown)(struct pcf50633 *);
-
- u8 resumers[5];
-
- struct pcf50633_bl_platform_data *backlight_data;
-};
-
-struct pcf50633_irq {
- void (*handler) (int, void *);
- void *data;
-};
-
-int pcf50633_register_irq(struct pcf50633 *pcf, int irq,
- void (*handler) (int, void *), void *data);
-int pcf50633_free_irq(struct pcf50633 *pcf, int irq);
-
-int pcf50633_irq_mask(struct pcf50633 *pcf, int irq);
-int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq);
-int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq);
-
-int pcf50633_read_block(struct pcf50633 *, u8 reg,
- int nr_regs, u8 *data);
-int pcf50633_write_block(struct pcf50633 *pcf, u8 reg,
- int nr_regs, u8 *data);
-u8 pcf50633_reg_read(struct pcf50633 *, u8 reg);
-int pcf50633_reg_write(struct pcf50633 *pcf, u8 reg, u8 val);
-
-int pcf50633_reg_set_bit_mask(struct pcf50633 *pcf, u8 reg, u8 mask, u8 val);
-int pcf50633_reg_clear_bits(struct pcf50633 *pcf, u8 reg, u8 bits);
-
-/* Interrupt registers */
-
-#define PCF50633_REG_INT1 0x02
-#define PCF50633_REG_INT2 0x03
-#define PCF50633_REG_INT3 0x04
-#define PCF50633_REG_INT4 0x05
-#define PCF50633_REG_INT5 0x06
-
-#define PCF50633_REG_INT1M 0x07
-#define PCF50633_REG_INT2M 0x08
-#define PCF50633_REG_INT3M 0x09
-#define PCF50633_REG_INT4M 0x0a
-#define PCF50633_REG_INT5M 0x0b
-
-enum {
- /* Chip IRQs */
- PCF50633_IRQ_ADPINS,
- PCF50633_IRQ_ADPREM,
- PCF50633_IRQ_USBINS,
- PCF50633_IRQ_USBREM,
- PCF50633_IRQ_RESERVED1,
- PCF50633_IRQ_RESERVED2,
- PCF50633_IRQ_ALARM,
- PCF50633_IRQ_SECOND,
- PCF50633_IRQ_ONKEYR,
- PCF50633_IRQ_ONKEYF,
- PCF50633_IRQ_EXTON1R,
- PCF50633_IRQ_EXTON1F,
- PCF50633_IRQ_EXTON2R,
- PCF50633_IRQ_EXTON2F,
- PCF50633_IRQ_EXTON3R,
- PCF50633_IRQ_EXTON3F,
- PCF50633_IRQ_BATFULL,
- PCF50633_IRQ_CHGHALT,
- PCF50633_IRQ_THLIMON,
- PCF50633_IRQ_THLIMOFF,
- PCF50633_IRQ_USBLIMON,
- PCF50633_IRQ_USBLIMOFF,
- PCF50633_IRQ_ADCRDY,
- PCF50633_IRQ_ONKEY1S,
- PCF50633_IRQ_LOWSYS,
- PCF50633_IRQ_LOWBAT,
- PCF50633_IRQ_HIGHTMP,
- PCF50633_IRQ_AUTOPWRFAIL,
- PCF50633_IRQ_DWN1PWRFAIL,
- PCF50633_IRQ_DWN2PWRFAIL,
- PCF50633_IRQ_LEDPWRFAIL,
- PCF50633_IRQ_LEDOVP,
- PCF50633_IRQ_LDO1PWRFAIL,
- PCF50633_IRQ_LDO2PWRFAIL,
- PCF50633_IRQ_LDO3PWRFAIL,
- PCF50633_IRQ_LDO4PWRFAIL,
- PCF50633_IRQ_LDO5PWRFAIL,
- PCF50633_IRQ_LDO6PWRFAIL,
- PCF50633_IRQ_HCLDOPWRFAIL,
- PCF50633_IRQ_HCLDOOVL,
-
- /* Always last */
- PCF50633_NUM_IRQ,
-};
-
-struct pcf50633 {
- struct device *dev;
- struct regmap *regmap;
-
- struct pcf50633_platform_data *pdata;
- int irq;
- struct pcf50633_irq irq_handler[PCF50633_NUM_IRQ];
- struct work_struct irq_work;
- struct workqueue_struct *work_queue;
- struct mutex lock;
-
- u8 mask_regs[5];
-
- u8 suspend_irq_masks[5];
- u8 resume_reason[5];
- int is_suspended;
-
- int onkey1s_held;
-
- struct platform_device *rtc_pdev;
- struct platform_device *mbc_pdev;
- struct platform_device *adc_pdev;
- struct platform_device *input_pdev;
- struct platform_device *bl_pdev;
- struct platform_device *regulator_pdev[PCF50633_NUM_REGULATORS];
-};
-
-enum pcf50633_reg_int1 {
- PCF50633_INT1_ADPINS = 0x01, /* Adapter inserted */
- PCF50633_INT1_ADPREM = 0x02, /* Adapter removed */
- PCF50633_INT1_USBINS = 0x04, /* USB inserted */
- PCF50633_INT1_USBREM = 0x08, /* USB removed */
- /* reserved */
- PCF50633_INT1_ALARM = 0x40, /* RTC alarm time is reached */
- PCF50633_INT1_SECOND = 0x80, /* RTC periodic second interrupt */
-};
-
-enum pcf50633_reg_int2 {
- PCF50633_INT2_ONKEYR = 0x01, /* ONKEY rising edge */
- PCF50633_INT2_ONKEYF = 0x02, /* ONKEY falling edge */
- PCF50633_INT2_EXTON1R = 0x04, /* EXTON1 rising edge */
- PCF50633_INT2_EXTON1F = 0x08, /* EXTON1 falling edge */
- PCF50633_INT2_EXTON2R = 0x10, /* EXTON2 rising edge */
- PCF50633_INT2_EXTON2F = 0x20, /* EXTON2 falling edge */
- PCF50633_INT2_EXTON3R = 0x40, /* EXTON3 rising edge */
- PCF50633_INT2_EXTON3F = 0x80, /* EXTON3 falling edge */
-};
-
-enum pcf50633_reg_int3 {
- PCF50633_INT3_BATFULL = 0x01, /* Battery full */
- PCF50633_INT3_CHGHALT = 0x02, /* Charger halt */
- PCF50633_INT3_THLIMON = 0x04,
- PCF50633_INT3_THLIMOFF = 0x08,
- PCF50633_INT3_USBLIMON = 0x10,
- PCF50633_INT3_USBLIMOFF = 0x20,
- PCF50633_INT3_ADCRDY = 0x40, /* ADC result ready */
- PCF50633_INT3_ONKEY1S = 0x80, /* ONKEY pressed 1 second */
-};
-
-enum pcf50633_reg_int4 {
- PCF50633_INT4_LOWSYS = 0x01,
- PCF50633_INT4_LOWBAT = 0x02,
- PCF50633_INT4_HIGHTMP = 0x04,
- PCF50633_INT4_AUTOPWRFAIL = 0x08,
- PCF50633_INT4_DWN1PWRFAIL = 0x10,
- PCF50633_INT4_DWN2PWRFAIL = 0x20,
- PCF50633_INT4_LEDPWRFAIL = 0x40,
- PCF50633_INT4_LEDOVP = 0x80,
-};
-
-enum pcf50633_reg_int5 {
- PCF50633_INT5_LDO1PWRFAIL = 0x01,
- PCF50633_INT5_LDO2PWRFAIL = 0x02,
- PCF50633_INT5_LDO3PWRFAIL = 0x04,
- PCF50633_INT5_LDO4PWRFAIL = 0x08,
- PCF50633_INT5_LDO5PWRFAIL = 0x10,
- PCF50633_INT5_LDO6PWRFAIL = 0x20,
- PCF50633_INT5_HCLDOPWRFAIL = 0x40,
- PCF50633_INT5_HCLDOOVL = 0x80,
-};
-
-/* misc. registers */
-#define PCF50633_REG_OOCSHDWN 0x0c
-
-/* LED registers */
-#define PCF50633_REG_LEDOUT 0x28
-#define PCF50633_REG_LEDENA 0x29
-#define PCF50633_REG_LEDCTL 0x2a
-#define PCF50633_REG_LEDDIM 0x2b
-
-static inline struct pcf50633 *dev_to_pcf50633(struct device *dev)
-{
- return dev_get_drvdata(dev);
-}
-
-int pcf50633_irq_init(struct pcf50633 *pcf, int irq);
-void pcf50633_irq_free(struct pcf50633 *pcf);
-extern const struct dev_pm_ops pcf50633_pm;
-
-#endif
diff --git a/include/linux/mfd/pcf50633/gpio.h b/include/linux/mfd/pcf50633/gpio.h
deleted file mode 100644
index f589e35795f1..000000000000
--- a/include/linux/mfd/pcf50633/gpio.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * gpio.h -- GPIO driver for NXP PCF50633
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * All rights reserved.
- */
-
-#ifndef __LINUX_MFD_PCF50633_GPIO_H
-#define __LINUX_MFD_PCF50633_GPIO_H
-
-#include <linux/mfd/pcf50633/core.h>
-
-#define PCF50633_GPIO1 1
-#define PCF50633_GPIO2 2
-#define PCF50633_GPIO3 3
-#define PCF50633_GPO 4
-
-#define PCF50633_REG_GPIO1CFG 0x14
-#define PCF50633_REG_GPIO2CFG 0x15
-#define PCF50633_REG_GPIO3CFG 0x16
-#define PCF50633_REG_GPOCFG 0x17
-
-#define PCF50633_GPOCFG_GPOSEL_MASK 0x07
-
-enum pcf50633_reg_gpocfg {
- PCF50633_GPOCFG_GPOSEL_0 = 0x00,
- PCF50633_GPOCFG_GPOSEL_LED_NFET = 0x01,
- PCF50633_GPOCFG_GPOSEL_SYSxOK = 0x02,
- PCF50633_GPOCFG_GPOSEL_CLK32K = 0x03,
- PCF50633_GPOCFG_GPOSEL_ADAPUSB = 0x04,
- PCF50633_GPOCFG_GPOSEL_USBxOK = 0x05,
- PCF50633_GPOCFG_GPOSEL_ACTPH4 = 0x06,
- PCF50633_GPOCFG_GPOSEL_1 = 0x07,
- PCF50633_GPOCFG_GPOSEL_INVERSE = 0x08,
-};
-
-int pcf50633_gpio_set(struct pcf50633 *pcf, int gpio, u8 val);
-u8 pcf50633_gpio_get(struct pcf50633 *pcf, int gpio);
-
-int pcf50633_gpio_invert_set(struct pcf50633 *, int gpio, int invert);
-int pcf50633_gpio_invert_get(struct pcf50633 *pcf, int gpio);
-
-int pcf50633_gpio_power_supply_set(struct pcf50633 *,
- int gpio, int regulator, int on);
-#endif /* __LINUX_MFD_PCF50633_GPIO_H */
-
-
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h
deleted file mode 100644
index fa5cb9256d99..000000000000
--- a/include/linux/mfd/pcf50633/mbc.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * mbc.h -- Driver for NXP PCF50633 Main Battery Charger
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * All rights reserved.
- */
-
-#ifndef __LINUX_MFD_PCF50633_MBC_H
-#define __LINUX_MFD_PCF50633_MBC_H
-
-#include <linux/mfd/pcf50633/core.h>
-#include <linux/platform_device.h>
-
-#define PCF50633_REG_MBCC1 0x43
-#define PCF50633_REG_MBCC2 0x44
-#define PCF50633_REG_MBCC3 0x45
-#define PCF50633_REG_MBCC4 0x46
-#define PCF50633_REG_MBCC5 0x47
-#define PCF50633_REG_MBCC6 0x48
-#define PCF50633_REG_MBCC7 0x49
-#define PCF50633_REG_MBCC8 0x4a
-#define PCF50633_REG_MBCS1 0x4b
-#define PCF50633_REG_MBCS2 0x4c
-#define PCF50633_REG_MBCS3 0x4d
-
-enum pcf50633_reg_mbcc1 {
- PCF50633_MBCC1_CHGENA = 0x01, /* Charger enable */
- PCF50633_MBCC1_AUTOSTOP = 0x02,
- PCF50633_MBCC1_AUTORES = 0x04, /* automatic resume */
- PCF50633_MBCC1_RESUME = 0x08, /* explicit resume cmd */
- PCF50633_MBCC1_RESTART = 0x10, /* restart charging */
- PCF50633_MBCC1_PREWDTIME_60M = 0x20, /* max. precharging time */
- PCF50633_MBCC1_WDTIME_1H = 0x00,
- PCF50633_MBCC1_WDTIME_2H = 0x40,
- PCF50633_MBCC1_WDTIME_4H = 0x80,
- PCF50633_MBCC1_WDTIME_6H = 0xc0,
-};
-#define PCF50633_MBCC1_WDTIME_MASK 0xc0
-
-enum pcf50633_reg_mbcc2 {
- PCF50633_MBCC2_VBATCOND_2V7 = 0x00,
- PCF50633_MBCC2_VBATCOND_2V85 = 0x01,
- PCF50633_MBCC2_VBATCOND_3V0 = 0x02,
- PCF50633_MBCC2_VBATCOND_3V15 = 0x03,
- PCF50633_MBCC2_VMAX_4V = 0x00,
- PCF50633_MBCC2_VMAX_4V20 = 0x28,
- PCF50633_MBCC2_VRESDEBTIME_64S = 0x80, /* debounce time (32/64sec) */
-};
-
-enum pcf50633_reg_mbcc7 {
- PCF50633_MBCC7_USB_100mA = 0x00,
- PCF50633_MBCC7_USB_500mA = 0x01,
- PCF50633_MBCC7_USB_1000mA = 0x02,
- PCF50633_MBCC7_USB_SUSPEND = 0x03,
- PCF50633_MBCC7_BATTEMP_EN = 0x04,
- PCF50633_MBCC7_BATSYSIMAX_1A6 = 0x00,
- PCF50633_MBCC7_BATSYSIMAX_1A8 = 0x40,
- PCF50633_MBCC7_BATSYSIMAX_2A0 = 0x80,
- PCF50633_MBCC7_BATSYSIMAX_2A2 = 0xc0,
-};
-#define PCF50633_MBCC7_USB_MASK 0x03
-
-enum pcf50633_reg_mbcc8 {
- PCF50633_MBCC8_USBENASUS = 0x10,
-};
-
-enum pcf50633_reg_mbcs1 {
- PCF50633_MBCS1_USBPRES = 0x01,
- PCF50633_MBCS1_USBOK = 0x02,
- PCF50633_MBCS1_ADAPTPRES = 0x04,
- PCF50633_MBCS1_ADAPTOK = 0x08,
- PCF50633_MBCS1_TBAT_OK = 0x00,
- PCF50633_MBCS1_TBAT_ABOVE = 0x10,
- PCF50633_MBCS1_TBAT_BELOW = 0x20,
- PCF50633_MBCS1_TBAT_UNDEF = 0x30,
- PCF50633_MBCS1_PREWDTEXP = 0x40,
- PCF50633_MBCS1_WDTEXP = 0x80,
-};
-
-enum pcf50633_reg_mbcs2_mbcmod {
- PCF50633_MBCS2_MBC_PLAY = 0x00,
- PCF50633_MBCS2_MBC_USB_PRE = 0x01,
- PCF50633_MBCS2_MBC_USB_PRE_WAIT = 0x02,
- PCF50633_MBCS2_MBC_USB_FAST = 0x03,
- PCF50633_MBCS2_MBC_USB_FAST_WAIT = 0x04,
- PCF50633_MBCS2_MBC_USB_SUSPEND = 0x05,
- PCF50633_MBCS2_MBC_ADP_PRE = 0x06,
- PCF50633_MBCS2_MBC_ADP_PRE_WAIT = 0x07,
- PCF50633_MBCS2_MBC_ADP_FAST = 0x08,
- PCF50633_MBCS2_MBC_ADP_FAST_WAIT = 0x09,
- PCF50633_MBCS2_MBC_BAT_FULL = 0x0a,
- PCF50633_MBCS2_MBC_HALT = 0x0b,
-};
-#define PCF50633_MBCS2_MBC_MASK 0x0f
-enum pcf50633_reg_mbcs2_chgstat {
- PCF50633_MBCS2_CHGS_NONE = 0x00,
- PCF50633_MBCS2_CHGS_ADAPTER = 0x10,
- PCF50633_MBCS2_CHGS_USB = 0x20,
- PCF50633_MBCS2_CHGS_BOTH = 0x30,
-};
-#define PCF50633_MBCS2_RESSTAT_AUTO 0x40
-
-enum pcf50633_reg_mbcs3 {
- PCF50633_MBCS3_USBLIM_PLAY = 0x01,
- PCF50633_MBCS3_USBLIM_CGH = 0x02,
- PCF50633_MBCS3_TLIM_PLAY = 0x04,
- PCF50633_MBCS3_TLIM_CHG = 0x08,
- PCF50633_MBCS3_ILIM = 0x10, /* 1: Ibat > Icutoff */
- PCF50633_MBCS3_VLIM = 0x20, /* 1: Vbat == Vmax */
- PCF50633_MBCS3_VBATSTAT = 0x40, /* 1: Vbat > Vbatcond */
- PCF50633_MBCS3_VRES = 0x80, /* 1: Vbat > Vth(RES) */
-};
-
-#define PCF50633_MBCC2_VBATCOND_MASK 0x03
-#define PCF50633_MBCC2_VMAX_MASK 0x3c
-
-/* Charger status */
-#define PCF50633_MBC_USB_ONLINE 0x01
-#define PCF50633_MBC_USB_ACTIVE 0x02
-#define PCF50633_MBC_ADAPTER_ONLINE 0x04
-#define PCF50633_MBC_ADAPTER_ACTIVE 0x08
-
-int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma);
-
-int pcf50633_mbc_get_status(struct pcf50633 *);
-int pcf50633_mbc_get_usb_online_status(struct pcf50633 *);
-
-#endif
-
diff --git a/include/linux/mfd/pcf50633/pmic.h b/include/linux/mfd/pcf50633/pmic.h
deleted file mode 100644
index eac0c3d8e984..000000000000
--- a/include/linux/mfd/pcf50633/pmic.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_MFD_PCF50633_PMIC_H
-#define __LINUX_MFD_PCF50633_PMIC_H
-
-#include <linux/mfd/pcf50633/core.h>
-#include <linux/platform_device.h>
-
-#define PCF50633_REG_AUTOOUT 0x1a
-#define PCF50633_REG_AUTOENA 0x1b
-#define PCF50633_REG_AUTOCTL 0x1c
-#define PCF50633_REG_AUTOMXC 0x1d
-#define PCF50633_REG_DOWN1OUT 0x1e
-#define PCF50633_REG_DOWN1ENA 0x1f
-#define PCF50633_REG_DOWN1CTL 0x20
-#define PCF50633_REG_DOWN1MXC 0x21
-#define PCF50633_REG_DOWN2OUT 0x22
-#define PCF50633_REG_DOWN2ENA 0x23
-#define PCF50633_REG_DOWN2CTL 0x24
-#define PCF50633_REG_DOWN2MXC 0x25
-#define PCF50633_REG_MEMLDOOUT 0x26
-#define PCF50633_REG_MEMLDOENA 0x27
-#define PCF50633_REG_LDO1OUT 0x2d
-#define PCF50633_REG_LDO1ENA 0x2e
-#define PCF50633_REG_LDO2OUT 0x2f
-#define PCF50633_REG_LDO2ENA 0x30
-#define PCF50633_REG_LDO3OUT 0x31
-#define PCF50633_REG_LDO3ENA 0x32
-#define PCF50633_REG_LDO4OUT 0x33
-#define PCF50633_REG_LDO4ENA 0x34
-#define PCF50633_REG_LDO5OUT 0x35
-#define PCF50633_REG_LDO5ENA 0x36
-#define PCF50633_REG_LDO6OUT 0x37
-#define PCF50633_REG_LDO6ENA 0x38
-#define PCF50633_REG_HCLDOOUT 0x39
-#define PCF50633_REG_HCLDOENA 0x3a
-#define PCF50633_REG_HCLDOOVL 0x40
-
-enum pcf50633_regulator_enable {
- PCF50633_REGULATOR_ON = 0x01,
- PCF50633_REGULATOR_ON_GPIO1 = 0x02,
- PCF50633_REGULATOR_ON_GPIO2 = 0x04,
- PCF50633_REGULATOR_ON_GPIO3 = 0x08,
-};
-#define PCF50633_REGULATOR_ON_MASK 0x0f
-
-enum pcf50633_regulator_phase {
- PCF50633_REGULATOR_ACTPH1 = 0x00,
- PCF50633_REGULATOR_ACTPH2 = 0x10,
- PCF50633_REGULATOR_ACTPH3 = 0x20,
- PCF50633_REGULATOR_ACTPH4 = 0x30,
-};
-#define PCF50633_REGULATOR_ACTPH_MASK 0x30
-
-enum pcf50633_regulator_id {
- PCF50633_REGULATOR_AUTO,
- PCF50633_REGULATOR_DOWN1,
- PCF50633_REGULATOR_DOWN2,
- PCF50633_REGULATOR_LDO1,
- PCF50633_REGULATOR_LDO2,
- PCF50633_REGULATOR_LDO3,
- PCF50633_REGULATOR_LDO4,
- PCF50633_REGULATOR_LDO5,
- PCF50633_REGULATOR_LDO6,
- PCF50633_REGULATOR_HCLDO,
- PCF50633_REGULATOR_MEMLDO,
-};
-#endif
-
diff --git a/include/linux/mfd/pf1550.h b/include/linux/mfd/pf1550.h
new file mode 100644
index 000000000000..7cb2340ff2bd
--- /dev/null
+++ b/include/linux/mfd/pf1550.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Declarations for the PF1550 PMIC
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Robin Gong <yibin.gong@freescale.com>
+ *
+ * Portions Copyright (c) 2025 Savoir-faire Linux Inc.
+ * Samuel Kayode <samuel.kayode@savoirfairelinux.com>
+ */
+
+#ifndef __LINUX_MFD_PF1550_H
+#define __LINUX_MFD_PF1550_H
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+enum pf1550_pmic_reg {
+ /* PMIC regulator part */
+ PF1550_PMIC_REG_DEVICE_ID = 0x00,
+ PF1550_PMIC_REG_OTP_FLAVOR = 0x01,
+ PF1550_PMIC_REG_SILICON_REV = 0x02,
+
+ PF1550_PMIC_REG_INT_CATEGORY = 0x06,
+ PF1550_PMIC_REG_SW_INT_STAT0 = 0x08,
+ PF1550_PMIC_REG_SW_INT_MASK0 = 0x09,
+ PF1550_PMIC_REG_SW_INT_SENSE0 = 0x0a,
+ PF1550_PMIC_REG_SW_INT_STAT1 = 0x0b,
+ PF1550_PMIC_REG_SW_INT_MASK1 = 0x0c,
+ PF1550_PMIC_REG_SW_INT_SENSE1 = 0x0d,
+ PF1550_PMIC_REG_SW_INT_STAT2 = 0x0e,
+ PF1550_PMIC_REG_SW_INT_MASK2 = 0x0f,
+ PF1550_PMIC_REG_SW_INT_SENSE2 = 0x10,
+ PF1550_PMIC_REG_LDO_INT_STAT0 = 0x18,
+ PF1550_PMIC_REG_LDO_INT_MASK0 = 0x19,
+ PF1550_PMIC_REG_LDO_INT_SENSE0 = 0x1a,
+ PF1550_PMIC_REG_TEMP_INT_STAT0 = 0x20,
+ PF1550_PMIC_REG_TEMP_INT_MASK0 = 0x21,
+ PF1550_PMIC_REG_TEMP_INT_SENSE0 = 0x22,
+ PF1550_PMIC_REG_ONKEY_INT_STAT0 = 0x24,
+ PF1550_PMIC_REG_ONKEY_INT_MASK0 = 0x25,
+ PF1550_PMIC_REG_ONKEY_INT_SENSE0 = 0x26,
+ PF1550_PMIC_REG_MISC_INT_STAT0 = 0x28,
+ PF1550_PMIC_REG_MISC_INT_MASK0 = 0x29,
+ PF1550_PMIC_REG_MISC_INT_SENSE0 = 0x2a,
+
+ PF1550_PMIC_REG_COINCELL_CONTROL = 0x30,
+
+ PF1550_PMIC_REG_SW1_VOLT = 0x32,
+ PF1550_PMIC_REG_SW1_STBY_VOLT = 0x33,
+ PF1550_PMIC_REG_SW1_SLP_VOLT = 0x34,
+ PF1550_PMIC_REG_SW1_CTRL = 0x35,
+ PF1550_PMIC_REG_SW1_CTRL1 = 0x36,
+ PF1550_PMIC_REG_SW2_VOLT = 0x38,
+ PF1550_PMIC_REG_SW2_STBY_VOLT = 0x39,
+ PF1550_PMIC_REG_SW2_SLP_VOLT = 0x3a,
+ PF1550_PMIC_REG_SW2_CTRL = 0x3b,
+ PF1550_PMIC_REG_SW2_CTRL1 = 0x3c,
+ PF1550_PMIC_REG_SW3_VOLT = 0x3e,
+ PF1550_PMIC_REG_SW3_STBY_VOLT = 0x3f,
+ PF1550_PMIC_REG_SW3_SLP_VOLT = 0x40,
+ PF1550_PMIC_REG_SW3_CTRL = 0x41,
+ PF1550_PMIC_REG_SW3_CTRL1 = 0x42,
+ PF1550_PMIC_REG_VSNVS_CTRL = 0x48,
+ PF1550_PMIC_REG_VREFDDR_CTRL = 0x4a,
+ PF1550_PMIC_REG_LDO1_VOLT = 0x4c,
+ PF1550_PMIC_REG_LDO1_CTRL = 0x4d,
+ PF1550_PMIC_REG_LDO2_VOLT = 0x4f,
+ PF1550_PMIC_REG_LDO2_CTRL = 0x50,
+ PF1550_PMIC_REG_LDO3_VOLT = 0x52,
+ PF1550_PMIC_REG_LDO3_CTRL = 0x53,
+ PF1550_PMIC_REG_PWRCTRL0 = 0x58,
+ PF1550_PMIC_REG_PWRCTRL1 = 0x59,
+ PF1550_PMIC_REG_PWRCTRL2 = 0x5a,
+ PF1550_PMIC_REG_PWRCTRL3 = 0x5b,
+ PF1550_PMIC_REG_SW1_PWRDN_SEQ = 0x5f,
+ PF1550_PMIC_REG_SW2_PWRDN_SEQ = 0x60,
+ PF1550_PMIC_REG_SW3_PWRDN_SEQ = 0x61,
+ PF1550_PMIC_REG_LDO1_PWRDN_SEQ = 0x62,
+ PF1550_PMIC_REG_LDO2_PWRDN_SEQ = 0x63,
+ PF1550_PMIC_REG_LDO3_PWRDN_SEQ = 0x64,
+ PF1550_PMIC_REG_VREFDDR_PWRDN_SEQ = 0x65,
+
+ PF1550_PMIC_REG_STATE_INFO = 0x67,
+ PF1550_PMIC_REG_I2C_ADDR = 0x68,
+ PF1550_PMIC_REG_IO_DRV0 = 0x69,
+ PF1550_PMIC_REG_IO_DRV1 = 0x6a,
+ PF1550_PMIC_REG_RC_16MHZ = 0x6b,
+ PF1550_PMIC_REG_KEY = 0x6f,
+
+ /* Charger part */
+ PF1550_CHARG_REG_CHG_INT = 0x80,
+ PF1550_CHARG_REG_CHG_INT_MASK = 0x82,
+ PF1550_CHARG_REG_CHG_INT_OK = 0x84,
+ PF1550_CHARG_REG_VBUS_SNS = 0x86,
+ PF1550_CHARG_REG_CHG_SNS = 0x87,
+ PF1550_CHARG_REG_BATT_SNS = 0x88,
+ PF1550_CHARG_REG_CHG_OPER = 0x89,
+ PF1550_CHARG_REG_CHG_TMR = 0x8a,
+ PF1550_CHARG_REG_CHG_EOC_CNFG = 0x8d,
+ PF1550_CHARG_REG_CHG_CURR_CNFG = 0x8e,
+ PF1550_CHARG_REG_BATT_REG = 0x8f,
+ PF1550_CHARG_REG_BATFET_CNFG = 0x91,
+ PF1550_CHARG_REG_THM_REG_CNFG = 0x92,
+ PF1550_CHARG_REG_VBUS_INLIM_CNFG = 0x94,
+ PF1550_CHARG_REG_VBUS_LIN_DPM = 0x95,
+ PF1550_CHARG_REG_USB_PHY_LDO_CNFG = 0x96,
+ PF1550_CHARG_REG_DBNC_DELAY_TIME = 0x98,
+ PF1550_CHARG_REG_CHG_INT_CNFG = 0x99,
+ PF1550_CHARG_REG_THM_ADJ_SETTING = 0x9a,
+ PF1550_CHARG_REG_VBUS2SYS_CNFG = 0x9b,
+ PF1550_CHARG_REG_LED_PWM = 0x9c,
+ PF1550_CHARG_REG_FAULT_BATFET_CNFG = 0x9d,
+ PF1550_CHARG_REG_LED_CNFG = 0x9e,
+ PF1550_CHARG_REG_CHGR_KEY2 = 0x9f,
+
+ PF1550_TEST_REG_FMRADDR = 0xc4,
+ PF1550_TEST_REG_FMRDATA = 0xc5,
+ PF1550_TEST_REG_KEY3 = 0xdf,
+
+ PF1550_PMIC_REG_END = 0xff,
+};
+
+/* One-Time Programmable(OTP) memory */
+enum pf1550_otp_reg {
+ PF1550_OTP_SW1_SW2 = 0x1e,
+ PF1550_OTP_SW2_SW3 = 0x1f,
+};
+
+#define PF1550_DEVICE_ID 0x7c
+
+/* Keys for reading OTP */
+#define PF1550_OTP_PMIC_KEY 0x15
+#define PF1550_OTP_CHGR_KEY 0x50
+#define PF1550_OTP_TEST_KEY 0xab
+
+/* Supported charger modes */
+#define PF1550_CHG_BAT_OFF 1
+#define PF1550_CHG_BAT_ON 2
+
+#define PF1550_CHG_PRECHARGE 0
+#define PF1550_CHG_CONSTANT_CURRENT 1
+#define PF1550_CHG_CONSTANT_VOL 2
+#define PF1550_CHG_EOC 3
+#define PF1550_CHG_DONE 4
+#define PF1550_CHG_TIMER_FAULT 6
+#define PF1550_CHG_SUSPEND 7
+#define PF1550_CHG_OFF_INV 8
+#define PF1550_CHG_BAT_OVER 9
+#define PF1550_CHG_OFF_TEMP 10
+#define PF1550_CHG_LINEAR_ONLY 12
+#define PF1550_CHG_SNS_MASK 0xf
+#define PF1550_CHG_INT_MASK 0x51
+
+#define PF1550_BAT_NO_VBUS 0
+#define PF1550_BAT_LOW_THAN_PRECHARG 1
+#define PF1550_BAT_CHARG_FAIL 2
+#define PF1550_BAT_HIGH_THAN_PRECHARG 4
+#define PF1550_BAT_OVER_VOL 5
+#define PF1550_BAT_NO_DETECT 6
+#define PF1550_BAT_SNS_MASK 0x7
+
+#define PF1550_VBUS_UVLO BIT(2)
+#define PF1550_VBUS_IN2SYS BIT(3)
+#define PF1550_VBUS_OVLO BIT(4)
+#define PF1550_VBUS_VALID BIT(5)
+
+#define PF1550_CHARG_REG_BATT_REG_CHGCV_MASK 0x3f
+#define PF1550_CHARG_REG_BATT_REG_VMINSYS_SHIFT 6
+#define PF1550_CHARG_REG_BATT_REG_VMINSYS_MASK GENMASK(7, 6)
+#define PF1550_CHARG_REG_THM_REG_CNFG_REGTEMP_SHIFT 2
+#define PF1550_CHARG_REG_THM_REG_CNFG_REGTEMP_MASK GENMASK(3, 2)
+
+#define PF1550_ONKEY_RST_EN BIT(7)
+
+/* DVS enable masks */
+#define OTP_SW1_DVS_ENB BIT(1)
+#define OTP_SW2_DVS_ENB BIT(3)
+
+/* Top level interrupt masks */
+#define IRQ_REGULATOR (BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(6))
+#define IRQ_ONKEY BIT(5)
+#define IRQ_CHG BIT(0)
+
+/* Regulator interrupt masks */
+#define PMIC_IRQ_SW1_LS BIT(0)
+#define PMIC_IRQ_SW2_LS BIT(1)
+#define PMIC_IRQ_SW3_LS BIT(2)
+#define PMIC_IRQ_SW1_HS BIT(0)
+#define PMIC_IRQ_SW2_HS BIT(1)
+#define PMIC_IRQ_SW3_HS BIT(2)
+#define PMIC_IRQ_LDO1_FAULT BIT(0)
+#define PMIC_IRQ_LDO2_FAULT BIT(1)
+#define PMIC_IRQ_LDO3_FAULT BIT(2)
+#define PMIC_IRQ_TEMP_110 BIT(0)
+#define PMIC_IRQ_TEMP_125 BIT(1)
+
+/* Onkey interrupt masks */
+#define ONKEY_IRQ_PUSHI BIT(0)
+#define ONKEY_IRQ_1SI BIT(1)
+#define ONKEY_IRQ_2SI BIT(2)
+#define ONKEY_IRQ_3SI BIT(3)
+#define ONKEY_IRQ_4SI BIT(4)
+#define ONKEY_IRQ_8SI BIT(5)
+
+/* Charger interrupt masks */
+#define CHARG_IRQ_BAT2SOCI BIT(1)
+#define CHARG_IRQ_BATI BIT(2)
+#define CHARG_IRQ_CHGI BIT(3)
+#define CHARG_IRQ_VBUSI BIT(5)
+#define CHARG_IRQ_DPMI BIT(6)
+#define CHARG_IRQ_THMI BIT(7)
+
+enum pf1550_irq {
+ PF1550_IRQ_CHG,
+ PF1550_IRQ_REGULATOR,
+ PF1550_IRQ_ONKEY,
+};
+
+enum pf1550_pmic_irq {
+ PF1550_PMIC_IRQ_SW1_LS,
+ PF1550_PMIC_IRQ_SW2_LS,
+ PF1550_PMIC_IRQ_SW3_LS,
+ PF1550_PMIC_IRQ_SW1_HS,
+ PF1550_PMIC_IRQ_SW2_HS,
+ PF1550_PMIC_IRQ_SW3_HS,
+ PF1550_PMIC_IRQ_LDO1_FAULT,
+ PF1550_PMIC_IRQ_LDO2_FAULT,
+ PF1550_PMIC_IRQ_LDO3_FAULT,
+ PF1550_PMIC_IRQ_TEMP_110,
+ PF1550_PMIC_IRQ_TEMP_125,
+};
+
+enum pf1550_onkey_irq {
+ PF1550_ONKEY_IRQ_PUSHI,
+ PF1550_ONKEY_IRQ_1SI,
+ PF1550_ONKEY_IRQ_2SI,
+ PF1550_ONKEY_IRQ_3SI,
+ PF1550_ONKEY_IRQ_4SI,
+ PF1550_ONKEY_IRQ_8SI,
+};
+
+enum pf1550_charg_irq {
+ PF1550_CHARG_IRQ_BAT2SOCI,
+ PF1550_CHARG_IRQ_BATI,
+ PF1550_CHARG_IRQ_CHGI,
+ PF1550_CHARG_IRQ_VBUSI,
+ PF1550_CHARG_IRQ_THMI,
+};
+
+enum pf1550_regulators {
+ PF1550_SW1,
+ PF1550_SW2,
+ PF1550_SW3,
+ PF1550_VREFDDR,
+ PF1550_LDO1,
+ PF1550_LDO2,
+ PF1550_LDO3,
+};
+
+struct pf1550_ddata {
+ struct regmap_irq_chip_data *irq_data_regulator;
+ struct regmap_irq_chip_data *irq_data_charger;
+ struct regmap_irq_chip_data *irq_data_onkey;
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap *regmap;
+ struct device *dev;
+ bool dvs1_enable;
+ bool dvs2_enable;
+ int irq;
+};
+
+#endif /* __LINUX_MFD_PF1550_H */
diff --git a/include/linux/mfd/qnap-mcu.h b/include/linux/mfd/qnap-mcu.h
new file mode 100644
index 000000000000..42bf523f9a5b
--- /dev/null
+++ b/include/linux/mfd/qnap-mcu.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Core definitions for QNAP MCU MFD driver.
+ * Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de>
+ */
+
+#ifndef _LINUX_QNAP_MCU_H_
+#define _LINUX_QNAP_MCU_H_
+
+#include <linux/types.h>
+
+struct qnap_mcu;
+
+struct qnap_mcu_variant {
+ u32 baud_rate;
+ int num_drives;
+ int fan_pwm_min;
+ int fan_pwm_max;
+ bool usb_led;
+};
+
+int qnap_mcu_exec(struct qnap_mcu *mcu,
+ const u8 *cmd_data, size_t cmd_data_size,
+ u8 *reply_data, size_t reply_data_size);
+int qnap_mcu_exec_with_ack(struct qnap_mcu *mcu,
+ const u8 *cmd_data, size_t cmd_data_size);
+
+#endif /* _LINUX_QNAP_MCU_H_ */
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index 69cbea78b430..28170ee08898 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -812,6 +812,8 @@ enum rk806_pin_dr_sel {
#define RK806_INT_POL_H BIT(1)
#define RK806_INT_POL_L 0
+/* SYS_CFG3 */
+#define RK806_RST_FUN_MSK GENMASK(7, 6)
#define RK806_SLAVE_RESTART_FUN_MSK BIT(1)
#define RK806_SLAVE_RESTART_FUN_EN BIT(1)
#define RK806_SLAVE_RESTART_FUN_OFF 0
diff --git a/include/linux/mfd/rohm-bd71828.h b/include/linux/mfd/rohm-bd71828.h
index ce786c96404a..73a71ef69152 100644
--- a/include/linux/mfd/rohm-bd71828.h
+++ b/include/linux/mfd/rohm-bd71828.h
@@ -189,6 +189,69 @@ enum {
/* Charger/Battey */
#define BD71828_REG_CHG_STATE 0x65
#define BD71828_REG_CHG_FULL 0xd2
+#define BD71828_REG_CHG_EN 0x6F
+#define BD71828_REG_DCIN_STAT 0x68
+#define BD71828_MASK_DCIN_DET 0x01
+#define BD71828_REG_VDCIN_U 0x9c
+#define BD71828_MASK_CHG_EN 0x01
+#define BD71828_CHG_MASK_DCIN_U 0x0f
+#define BD71828_REG_BAT_STAT 0x67
+#define BD71828_REG_BAT_TEMP 0x6c
+#define BD71828_MASK_BAT_TEMP 0x07
+#define BD71828_BAT_TEMP_OPEN 0x07
+#define BD71828_MASK_BAT_DET 0x20
+#define BD71828_MASK_BAT_DET_DONE 0x10
+#define BD71828_REG_CHG_STATE 0x65
+#define BD71828_REG_VBAT_U 0x8c
+#define BD71828_MASK_VBAT_U 0x0f
+#define BD71828_REG_VBAT_REX_AVG_U 0x92
+
+#define BD71828_REG_OCV_PWRON_U 0x8A
+
+#define BD71828_REG_VBAT_MIN_AVG_U 0x8e
+#define BD71828_REG_VBAT_MIN_AVG_L 0x8f
+
+#define BD71828_REG_CC_CNT3 0xb5
+#define BD71828_REG_CC_CNT2 0xb6
+#define BD71828_REG_CC_CNT1 0xb7
+#define BD71828_REG_CC_CNT0 0xb8
+#define BD71828_REG_CC_CURCD_AVG_U 0xb2
+#define BD71828_MASK_CC_CURCD_AVG_U 0x3f
+#define BD71828_MASK_CC_CUR_DIR 0x80
+#define BD71828_REG_VM_BTMP_U 0xa1
+#define BD71828_REG_VM_BTMP_L 0xa2
+#define BD71828_MASK_VM_BTMP_U 0x0f
+#define BD71828_REG_COULOMB_CTRL 0xc4
+#define BD71828_REG_COULOMB_CTRL2 0xd2
+#define BD71828_MASK_REX_CC_CLR 0x01
+#define BD71828_MASK_FULL_CC_CLR 0x10
+#define BD71828_REG_CC_CNT_FULL3 0xbd
+#define BD71828_REG_CC_CNT_CHG3 0xc1
+
+#define BD71828_REG_VBAT_INITIAL1_U 0x86
+#define BD71828_REG_VBAT_INITIAL1_L 0x87
+
+#define BD71828_REG_VBAT_INITIAL2_U 0x88
+#define BD71828_REG_VBAT_INITIAL2_L 0x89
+
+#define BD71828_REG_IBAT_U 0xb0
+#define BD71828_REG_IBAT_L 0xb1
+
+#define BD71828_REG_IBAT_AVG_U 0xb2
+#define BD71828_REG_IBAT_AVG_L 0xb3
+
+#define BD71828_REG_VSYS_AVG_U 0x96
+#define BD71828_REG_VSYS_AVG_L 0x97
+#define BD71828_REG_VSYS_MIN_AVG_U 0x98
+#define BD71828_REG_VSYS_MIN_AVG_L 0x99
+#define BD71828_REG_CHG_SET1 0x75
+#define BD71828_REG_ALM_VBAT_LIMIT_U 0xaa
+#define BD71828_REG_BATCAP_MON_LIMIT_U 0xcc
+#define BD71828_REG_CONF 0x64
+
+#define BD71828_REG_DCIN_CLPS 0x71
+
+#define BD71828_REG_MEAS_CLEAR 0xaf
/* LEDs */
#define BD71828_REG_LED_CTRL 0x4A
diff --git a/include/linux/mfd/rohm-bd96801.h b/include/linux/mfd/rohm-bd96801.h
new file mode 100644
index 000000000000..68c8ac8ad409
--- /dev/null
+++ b/include/linux/mfd/rohm-bd96801.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2024 ROHM Semiconductors */
+
+#ifndef __MFD_BD96801_H__
+#define __MFD_BD96801_H__
+
+#define BD96801_REG_SSCG_CTRL 0x09
+#define BD96801_REG_SHD_INTB 0x20
+#define BD96801_LDO5_VOL_LVL_REG 0x2c
+#define BD96801_LDO6_VOL_LVL_REG 0x2d
+#define BD96801_LDO7_VOL_LVL_REG 0x2e
+#define BD96801_REG_BUCK_OVP 0x30
+#define BD96801_REG_BUCK_OVD 0x35
+#define BD96801_REG_LDO_OVP 0x31
+#define BD96801_REG_LDO_OVD 0x36
+#define BD96801_REG_BOOT_OVERTIME 0x3a
+#define BD96801_REG_WD_TMO 0x40
+#define BD96801_REG_WD_CONF 0x41
+#define BD96801_REG_WD_FEED 0x42
+#define BD96801_REG_WD_FAILCOUNT 0x43
+#define BD96801_REG_WD_ASK 0x46
+#define BD96801_REG_WD_STATUS 0x4a
+#define BD96801_REG_PMIC_STATE 0x4f
+#define BD96801_REG_EXT_STATE 0x50
+
+#define BD96801_STATE_STBY 0x09
+
+#define BD96801_LOCK_REG 0x04
+#define BD96801_UNLOCK 0x9d
+#define BD96801_LOCK 0x00
+
+/* IRQ register area */
+#define BD96801_REG_INT_MAIN 0x51
+
+/*
+ * The BD96801 has two physical IRQ lines, INTB and ERRB.
+ *
+ * The 'main status register' is located at 0x51.
+ * The ERRB status registers are located at 0x52 ... 0x5B
+ * INTB status registers are at range 0x5c ... 0x63
+ */
+#define BD96801_REG_INT_SYS_ERRB1 0x52
+#define BD96801_REG_INT_BUCK2_ERRB 0x56
+#define BD96801_REG_INT_SYS_INTB 0x5c
+#define BD96801_REG_INT_BUCK2_INTB 0x5e
+#define BD96801_REG_INT_LDO7_INTB 0x63
+
+/* MASK registers */
+#define BD96801_REG_MASK_SYS_INTB 0x73
+#define BD96801_REG_MASK_SYS_ERRB 0x69
+
+#define BD96801_MAX_REGISTER 0x7a
+
+#define BD96801_OTP_ERR_MASK BIT(0)
+#define BD96801_DBIST_ERR_MASK BIT(1)
+#define BD96801_EEP_ERR_MASK BIT(2)
+#define BD96801_ABIST_ERR_MASK BIT(3)
+#define BD96801_PRSTB_ERR_MASK BIT(4)
+#define BD96801_DRMOS1_ERR_MASK BIT(5)
+#define BD96801_DRMOS2_ERR_MASK BIT(6)
+#define BD96801_SLAVE_ERR_MASK BIT(7)
+#define BD96801_VREF_ERR_MASK BIT(0)
+#define BD96801_TSD_ERR_MASK BIT(1)
+#define BD96801_UVLO_ERR_MASK BIT(2)
+#define BD96801_OVLO_ERR_MASK BIT(3)
+#define BD96801_OSC_ERR_MASK BIT(4)
+#define BD96801_PON_ERR_MASK BIT(5)
+#define BD96801_POFF_ERR_MASK BIT(6)
+#define BD96801_CMD_SHDN_ERR_MASK BIT(7)
+#define BD96801_INT_PRSTB_WDT_ERR_MASK BIT(0)
+#define BD96801_INT_CHIP_IF_ERR_MASK BIT(3)
+#define BD96801_INT_SHDN_ERR_MASK BIT(7)
+#define BD96801_OUT_PVIN_ERR_MASK BIT(0)
+#define BD96801_OUT_OVP_ERR_MASK BIT(1)
+#define BD96801_OUT_UVP_ERR_MASK BIT(2)
+#define BD96801_OUT_SHDN_ERR_MASK BIT(7)
+
+/* ERRB IRQs */
+enum {
+ /* Reg 0x52, 0x53, 0x54 - ERRB system IRQs */
+ BD96801_OTP_ERR_STAT,
+ BD96801_DBIST_ERR_STAT,
+ BD96801_EEP_ERR_STAT,
+ BD96801_ABIST_ERR_STAT,
+ BD96801_PRSTB_ERR_STAT,
+ BD96801_DRMOS1_ERR_STAT,
+ BD96801_DRMOS2_ERR_STAT,
+ BD96801_SLAVE_ERR_STAT,
+ BD96801_VREF_ERR_STAT,
+ BD96801_TSD_ERR_STAT,
+ BD96801_UVLO_ERR_STAT,
+ BD96801_OVLO_ERR_STAT,
+ BD96801_OSC_ERR_STAT,
+ BD96801_PON_ERR_STAT,
+ BD96801_POFF_ERR_STAT,
+ BD96801_CMD_SHDN_ERR_STAT,
+ BD96801_INT_PRSTB_WDT_ERR,
+ BD96801_INT_CHIP_IF_ERR,
+ BD96801_INT_SHDN_ERR_STAT,
+
+ /* Reg 0x55 BUCK1 ERR IRQs */
+ BD96801_BUCK1_PVIN_ERR_STAT,
+ BD96801_BUCK1_OVP_ERR_STAT,
+ BD96801_BUCK1_UVP_ERR_STAT,
+ BD96801_BUCK1_SHDN_ERR_STAT,
+
+ /* Reg 0x56 BUCK2 ERR IRQs */
+ BD96801_BUCK2_PVIN_ERR_STAT,
+ BD96801_BUCK2_OVP_ERR_STAT,
+ BD96801_BUCK2_UVP_ERR_STAT,
+ BD96801_BUCK2_SHDN_ERR_STAT,
+
+ /* Reg 0x57 BUCK3 ERR IRQs */
+ BD96801_BUCK3_PVIN_ERR_STAT,
+ BD96801_BUCK3_OVP_ERR_STAT,
+ BD96801_BUCK3_UVP_ERR_STAT,
+ BD96801_BUCK3_SHDN_ERR_STAT,
+
+ /* Reg 0x58 BUCK4 ERR IRQs */
+ BD96801_BUCK4_PVIN_ERR_STAT,
+ BD96801_BUCK4_OVP_ERR_STAT,
+ BD96801_BUCK4_UVP_ERR_STAT,
+ BD96801_BUCK4_SHDN_ERR_STAT,
+
+ /* Reg 0x59 LDO5 ERR IRQs */
+ BD96801_LDO5_PVIN_ERR_STAT,
+ BD96801_LDO5_OVP_ERR_STAT,
+ BD96801_LDO5_UVP_ERR_STAT,
+ BD96801_LDO5_SHDN_ERR_STAT,
+
+ /* Reg 0x5a LDO6 ERR IRQs */
+ BD96801_LDO6_PVIN_ERR_STAT,
+ BD96801_LDO6_OVP_ERR_STAT,
+ BD96801_LDO6_UVP_ERR_STAT,
+ BD96801_LDO6_SHDN_ERR_STAT,
+
+ /* Reg 0x5b LDO7 ERR IRQs */
+ BD96801_LDO7_PVIN_ERR_STAT,
+ BD96801_LDO7_OVP_ERR_STAT,
+ BD96801_LDO7_UVP_ERR_STAT,
+ BD96801_LDO7_SHDN_ERR_STAT,
+};
+
+/* INTB IRQs */
+enum {
+ /* Reg 0x5c (System INTB) */
+ BD96801_TW_STAT,
+ BD96801_WDT_ERR_STAT,
+ BD96801_I2C_ERR_STAT,
+ BD96801_CHIP_IF_ERR_STAT,
+
+ /* Reg 0x5d (BUCK1 INTB) */
+ BD96801_BUCK1_OCPH_STAT,
+ BD96801_BUCK1_OCPL_STAT,
+ BD96801_BUCK1_OCPN_STAT,
+ BD96801_BUCK1_OVD_STAT,
+ BD96801_BUCK1_UVD_STAT,
+ BD96801_BUCK1_TW_CH_STAT,
+
+ /* Reg 0x5e (BUCK2 INTB) */
+ BD96801_BUCK2_OCPH_STAT,
+ BD96801_BUCK2_OCPL_STAT,
+ BD96801_BUCK2_OCPN_STAT,
+ BD96801_BUCK2_OVD_STAT,
+ BD96801_BUCK2_UVD_STAT,
+ BD96801_BUCK2_TW_CH_STAT,
+
+ /* Reg 0x5f (BUCK3 INTB)*/
+ BD96801_BUCK3_OCPH_STAT,
+ BD96801_BUCK3_OCPL_STAT,
+ BD96801_BUCK3_OCPN_STAT,
+ BD96801_BUCK3_OVD_STAT,
+ BD96801_BUCK3_UVD_STAT,
+ BD96801_BUCK3_TW_CH_STAT,
+
+ /* Reg 0x60 (BUCK4 INTB)*/
+ BD96801_BUCK4_OCPH_STAT,
+ BD96801_BUCK4_OCPL_STAT,
+ BD96801_BUCK4_OCPN_STAT,
+ BD96801_BUCK4_OVD_STAT,
+ BD96801_BUCK4_UVD_STAT,
+ BD96801_BUCK4_TW_CH_STAT,
+
+ /* Reg 0x61 (LDO5 INTB) */
+ BD96801_LDO5_OCPH_STAT, /* bit [0] */
+ BD96801_LDO5_OVD_STAT, /* bit [3] */
+ BD96801_LDO5_UVD_STAT, /* bit [4] */
+
+ /* Reg 0x62 (LDO6 INTB) */
+ BD96801_LDO6_OCPH_STAT, /* bit [0] */
+ BD96801_LDO6_OVD_STAT, /* bit [3] */
+ BD96801_LDO6_UVD_STAT, /* bit [4] */
+
+ /* Reg 0x63 (LDO7 INTB) */
+ BD96801_LDO7_OCPH_STAT, /* bit [0] */
+ BD96801_LDO7_OVD_STAT, /* bit [3] */
+ BD96801_LDO7_UVD_STAT, /* bit [4] */
+};
+
+/* IRQ MASKs */
+#define BD96801_TW_STAT_MASK BIT(0)
+#define BD96801_WDT_ERR_STAT_MASK BIT(1)
+#define BD96801_I2C_ERR_STAT_MASK BIT(2)
+#define BD96801_CHIP_IF_ERR_STAT_MASK BIT(3)
+
+#define BD96801_BUCK_OCPH_STAT_MASK BIT(0)
+#define BD96801_BUCK_OCPL_STAT_MASK BIT(1)
+#define BD96801_BUCK_OCPN_STAT_MASK BIT(2)
+#define BD96801_BUCK_OVD_STAT_MASK BIT(3)
+#define BD96801_BUCK_UVD_STAT_MASK BIT(4)
+#define BD96801_BUCK_TW_CH_STAT_MASK BIT(5)
+
+#define BD96801_LDO_OCPH_STAT_MASK BIT(0)
+#define BD96801_LDO_OVD_STAT_MASK BIT(3)
+#define BD96801_LDO_UVD_STAT_MASK BIT(4)
+
+#endif
diff --git a/include/linux/mfd/rohm-bd96802.h b/include/linux/mfd/rohm-bd96802.h
new file mode 100644
index 000000000000..bf4b77944edf
--- /dev/null
+++ b/include/linux/mfd/rohm-bd96802.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2025 ROHM Semiconductors
+ *
+ * The digital interface of trhe BD96802 PMIC is a reduced version of the
+ * BD96801. Hence the BD96801 definitions are used for registers and masks
+ * while this header only holds the IRQ definitions - mainly to avoid gaps in
+ * IRQ numbers caused by the lack of some BUCKs / LDOs and their respective
+ * IRQs.
+ */
+
+#ifndef __LINUX_MFD_BD96802_H__
+#define __LINUX_MFD_BD96802_H__
+
+/* ERRB IRQs */
+enum {
+ /* Reg 0x52, 0x53, 0x54 - ERRB system IRQs */
+ BD96802_OTP_ERR_STAT,
+ BD96802_DBIST_ERR_STAT,
+ BD96802_EEP_ERR_STAT,
+ BD96802_ABIST_ERR_STAT,
+ BD96802_PRSTB_ERR_STAT,
+ BD96802_DRMOS1_ERR_STAT,
+ BD96802_DRMOS2_ERR_STAT,
+ BD96802_SLAVE_ERR_STAT,
+ BD96802_VREF_ERR_STAT,
+ BD96802_TSD_ERR_STAT,
+ BD96802_UVLO_ERR_STAT,
+ BD96802_OVLO_ERR_STAT,
+ BD96802_OSC_ERR_STAT,
+ BD96802_PON_ERR_STAT,
+ BD96802_POFF_ERR_STAT,
+ BD96802_CMD_SHDN_ERR_STAT,
+ BD96802_INT_SHDN_ERR_STAT,
+
+ /* Reg 0x55 BUCK1 ERR IRQs */
+ BD96802_BUCK1_PVIN_ERR_STAT,
+ BD96802_BUCK1_OVP_ERR_STAT,
+ BD96802_BUCK1_UVP_ERR_STAT,
+ BD96802_BUCK1_SHDN_ERR_STAT,
+
+ /* Reg 0x56 BUCK2 ERR IRQs */
+ BD96802_BUCK2_PVIN_ERR_STAT,
+ BD96802_BUCK2_OVP_ERR_STAT,
+ BD96802_BUCK2_UVP_ERR_STAT,
+ BD96802_BUCK2_SHDN_ERR_STAT,
+};
+
+/* INTB IRQs */
+enum {
+ /* Reg 0x5c (System INTB) */
+ BD96802_TW_STAT,
+ BD96802_WDT_ERR_STAT,
+ BD96802_I2C_ERR_STAT,
+ BD96802_CHIP_IF_ERR_STAT,
+
+ /* Reg 0x5d (BUCK1 INTB) */
+ BD96802_BUCK1_OCPH_STAT,
+ BD96802_BUCK1_OCPL_STAT,
+ BD96802_BUCK1_OCPN_STAT,
+ BD96802_BUCK1_OVD_STAT,
+ BD96802_BUCK1_UVD_STAT,
+ BD96802_BUCK1_TW_CH_STAT,
+
+ /* Reg 0x5e (BUCK2 INTB) */
+ BD96802_BUCK2_OCPH_STAT,
+ BD96802_BUCK2_OCPL_STAT,
+ BD96802_BUCK2_OCPN_STAT,
+ BD96802_BUCK2_OVD_STAT,
+ BD96802_BUCK2_UVD_STAT,
+ BD96802_BUCK2_TW_CH_STAT,
+};
+
+#endif
diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h
index 4eeb22876bad..579e8dcfcca4 100644
--- a/include/linux/mfd/rohm-generic.h
+++ b/include/linux/mfd/rohm-generic.h
@@ -16,6 +16,10 @@ enum rohm_chip_type {
ROHM_CHIP_TYPE_BD71828,
ROHM_CHIP_TYPE_BD71837,
ROHM_CHIP_TYPE_BD71847,
+ ROHM_CHIP_TYPE_BD96801,
+ ROHM_CHIP_TYPE_BD96802,
+ ROHM_CHIP_TYPE_BD96805,
+ ROHM_CHIP_TYPE_BD96806,
ROHM_CHIP_TYPE_AMOUNT
};
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index a212b9f72bc9..d785e101fe79 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -37,12 +37,15 @@ struct gpio_desc;
enum sec_device_type {
S5M8767X,
+ S2DOS05,
S2MPA01,
+ S2MPG10,
S2MPS11X,
S2MPS13X,
S2MPS14X,
S2MPS15X,
S2MPU02,
+ S2MPU05,
};
/**
@@ -64,15 +67,11 @@ struct sec_pmic_dev {
struct regmap *regmap_pmic;
struct i2c_client *i2c;
- unsigned long device_type;
+ int device_type;
int irq;
struct regmap_irq_chip_data *irq_data;
};
-int sec_irq_init(struct sec_pmic_dev *sec_pmic);
-void sec_irq_exit(struct sec_pmic_dev *sec_pmic);
-int sec_irq_resume(struct sec_pmic_dev *sec_pmic);
-
struct sec_platform_data {
struct sec_regulator_data *regulators;
struct sec_opmode_data *opmode;
diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h
index 3fd2775eb9bb..8402a5f8e18a 100644
--- a/include/linux/mfd/samsung/irq.h
+++ b/include/linux/mfd/samsung/irq.h
@@ -57,6 +57,115 @@ enum s2mpa01_irq {
#define S2MPA01_IRQ_B24_TSD_MASK (1 << 4)
#define S2MPA01_IRQ_B35_TSD_MASK (1 << 5)
+enum s2mpg10_common_irq {
+ /* Top-level (common) block */
+ S2MPG10_COMMON_IRQ_PMIC,
+ S2MPG10_COMMON_IRQ_UNUSED,
+};
+
+enum s2mpg10_irq {
+ /* PMIC */
+ S2MPG10_IRQ_PWRONF,
+ S2MPG10_IRQ_PWRONR,
+ S2MPG10_IRQ_JIGONBF,
+ S2MPG10_IRQ_JIGONBR,
+ S2MPG10_IRQ_ACOKBF,
+ S2MPG10_IRQ_ACOKBR,
+ S2MPG10_IRQ_PWRON1S,
+ S2MPG10_IRQ_MRB,
+#define S2MPG10_IRQ_PWRONF_MASK BIT(0)
+#define S2MPG10_IRQ_PWRONR_MASK BIT(1)
+#define S2MPG10_IRQ_JIGONBF_MASK BIT(2)
+#define S2MPG10_IRQ_JIGONBR_MASK BIT(3)
+#define S2MPG10_IRQ_ACOKBF_MASK BIT(4)
+#define S2MPG10_IRQ_ACOKBR_MASK BIT(5)
+#define S2MPG10_IRQ_PWRON1S_MASK BIT(6)
+#define S2MPG10_IRQ_MRB_MASK BIT(7)
+
+ S2MPG10_IRQ_RTC60S,
+ S2MPG10_IRQ_RTCA1,
+ S2MPG10_IRQ_RTCA0,
+ S2MPG10_IRQ_RTC1S,
+ S2MPG10_IRQ_WTSR_COLDRST,
+ S2MPG10_IRQ_WTSR,
+ S2MPG10_IRQ_WRST,
+ S2MPG10_IRQ_SMPL,
+#define S2MPG10_IRQ_RTC60S_MASK BIT(0)
+#define S2MPG10_IRQ_RTCA1_MASK BIT(1)
+#define S2MPG10_IRQ_RTCA0_MASK BIT(2)
+#define S2MPG10_IRQ_RTC1S_MASK BIT(3)
+#define S2MPG10_IRQ_WTSR_COLDRST_MASK BIT(4)
+#define S2MPG10_IRQ_WTSR_MASK BIT(5)
+#define S2MPG10_IRQ_WRST_MASK BIT(6)
+#define S2MPG10_IRQ_SMPL_MASK BIT(7)
+
+ S2MPG10_IRQ_120C,
+ S2MPG10_IRQ_140C,
+ S2MPG10_IRQ_TSD,
+ S2MPG10_IRQ_PIF_TIMEOUT1,
+ S2MPG10_IRQ_PIF_TIMEOUT2,
+ S2MPG10_IRQ_SPD_PARITY_ERR,
+ S2MPG10_IRQ_SPD_ABNORMAL_STOP,
+ S2MPG10_IRQ_PMETER_OVERF,
+#define S2MPG10_IRQ_INT120C_MASK BIT(0)
+#define S2MPG10_IRQ_INT140C_MASK BIT(1)
+#define S2MPG10_IRQ_TSD_MASK BIT(2)
+#define S2MPG10_IRQ_PIF_TIMEOUT1_MASK BIT(3)
+#define S2MPG10_IRQ_PIF_TIMEOUT2_MASK BIT(4)
+#define S2MPG10_IRQ_SPD_PARITY_ERR_MASK BIT(5)
+#define S2MPG10_IRQ_SPD_ABNORMAL_STOP_MASK BIT(6)
+#define S2MPG10_IRQ_PMETER_OVERF_MASK BIT(7)
+
+ S2MPG10_IRQ_OCP_B1M,
+ S2MPG10_IRQ_OCP_B2M,
+ S2MPG10_IRQ_OCP_B3M,
+ S2MPG10_IRQ_OCP_B4M,
+ S2MPG10_IRQ_OCP_B5M,
+ S2MPG10_IRQ_OCP_B6M,
+ S2MPG10_IRQ_OCP_B7M,
+ S2MPG10_IRQ_OCP_B8M,
+#define S2MPG10_IRQ_OCP_B1M_MASK BIT(0)
+#define S2MPG10_IRQ_OCP_B2M_MASK BIT(1)
+#define S2MPG10_IRQ_OCP_B3M_MASK BIT(2)
+#define S2MPG10_IRQ_OCP_B4M_MASK BIT(3)
+#define S2MPG10_IRQ_OCP_B5M_MASK BIT(4)
+#define S2MPG10_IRQ_OCP_B6M_MASK BIT(5)
+#define S2MPG10_IRQ_OCP_B7M_MASK BIT(6)
+#define S2MPG10_IRQ_OCP_B8M_MASK BIT(7)
+
+ S2MPG10_IRQ_OCP_B9M,
+ S2MPG10_IRQ_OCP_B10M,
+ S2MPG10_IRQ_WLWP_ACC,
+ S2MPG10_IRQ_SMPL_TIMEOUT,
+ S2MPG10_IRQ_WTSR_TIMEOUT,
+ S2MPG10_IRQ_SPD_SRP_PKT_RST,
+#define S2MPG10_IRQ_OCP_B9M_MASK BIT(0)
+#define S2MPG10_IRQ_OCP_B10M_MASK BIT(1)
+#define S2MPG10_IRQ_WLWP_ACC_MASK BIT(2)
+#define S2MPG10_IRQ_SMPL_TIMEOUT_MASK BIT(5)
+#define S2MPG10_IRQ_WTSR_TIMEOUT_MASK BIT(6)
+#define S2MPG10_IRQ_SPD_SRP_PKT_RST_MASK BIT(7)
+
+ S2MPG10_IRQ_PWR_WARN_CH0,
+ S2MPG10_IRQ_PWR_WARN_CH1,
+ S2MPG10_IRQ_PWR_WARN_CH2,
+ S2MPG10_IRQ_PWR_WARN_CH3,
+ S2MPG10_IRQ_PWR_WARN_CH4,
+ S2MPG10_IRQ_PWR_WARN_CH5,
+ S2MPG10_IRQ_PWR_WARN_CH6,
+ S2MPG10_IRQ_PWR_WARN_CH7,
+#define S2MPG10_IRQ_PWR_WARN_CH0_MASK BIT(0)
+#define S2MPG10_IRQ_PWR_WARN_CH1_MASK BIT(1)
+#define S2MPG10_IRQ_PWR_WARN_CH2_MASK BIT(2)
+#define S2MPG10_IRQ_PWR_WARN_CH3_MASK BIT(3)
+#define S2MPG10_IRQ_PWR_WARN_CH4_MASK BIT(4)
+#define S2MPG10_IRQ_PWR_WARN_CH5_MASK BIT(5)
+#define S2MPG10_IRQ_PWR_WARN_CH6_MASK BIT(6)
+#define S2MPG10_IRQ_PWR_WARN_CH7_MASK BIT(7)
+
+ S2MPG10_IRQ_NR,
+};
+
enum s2mps11_irq {
S2MPS11_IRQ_PWRONF,
S2MPS11_IRQ_PWRONR,
@@ -150,6 +259,50 @@ enum s2mpu02_irq {
/* Masks for interrupts are the same as in s2mps11 */
#define S2MPS14_IRQ_TSD_MASK (1 << 2)
+enum s2mpu05_irq {
+ S2MPU05_IRQ_PWRONF,
+ S2MPU05_IRQ_PWRONR,
+ S2MPU05_IRQ_JIGONBF,
+ S2MPU05_IRQ_JIGONBR,
+ S2MPU05_IRQ_ACOKF,
+ S2MPU05_IRQ_ACOKR,
+ S2MPU05_IRQ_PWRON1S,
+ S2MPU05_IRQ_MRB,
+
+ S2MPU05_IRQ_RTC60S,
+ S2MPU05_IRQ_RTCA1,
+ S2MPU05_IRQ_RTCA0,
+ S2MPU05_IRQ_SMPL,
+ S2MPU05_IRQ_RTC1S,
+ S2MPU05_IRQ_WTSR,
+
+ S2MPU05_IRQ_INT120C,
+ S2MPU05_IRQ_INT140C,
+ S2MPU05_IRQ_TSD,
+
+ S2MPU05_IRQ_NR,
+};
+
+#define S2MPU05_IRQ_PWRONF_MASK BIT(0)
+#define S2MPU05_IRQ_PWRONR_MASK BIT(1)
+#define S2MPU05_IRQ_JIGONBF_MASK BIT(2)
+#define S2MPU05_IRQ_JIGONBR_MASK BIT(3)
+#define S2MPU05_IRQ_ACOKF_MASK BIT(4)
+#define S2MPU05_IRQ_ACOKR_MASK BIT(5)
+#define S2MPU05_IRQ_PWRON1S_MASK BIT(6)
+#define S2MPU05_IRQ_MRB_MASK BIT(7)
+
+#define S2MPU05_IRQ_RTC60S_MASK BIT(0)
+#define S2MPU05_IRQ_RTCA1_MASK BIT(1)
+#define S2MPU05_IRQ_RTCA0_MASK BIT(2)
+#define S2MPU05_IRQ_SMPL_MASK BIT(3)
+#define S2MPU05_IRQ_RTC1S_MASK BIT(4)
+#define S2MPU05_IRQ_WTSR_MASK BIT(5)
+
+#define S2MPU05_IRQ_INT120C_MASK BIT(0)
+#define S2MPU05_IRQ_INT140C_MASK BIT(1)
+#define S2MPU05_IRQ_TSD_MASK BIT(2)
+
enum s5m8767_irq {
S5M8767_IRQ_PWRR,
S5M8767_IRQ_PWRF,
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h
index 0204decfc9aa..51c4239a1fa6 100644
--- a/include/linux/mfd/samsung/rtc.h
+++ b/include/linux/mfd/samsung/rtc.h
@@ -72,6 +72,37 @@ enum s2mps_rtc_reg {
S2MPS_RTC_REG_MAX,
};
+enum s2mpg10_rtc_reg {
+ S2MPG10_RTC_CTRL,
+ S2MPG10_RTC_UPDATE,
+ S2MPG10_RTC_SMPL,
+ S2MPG10_RTC_WTSR,
+ S2MPG10_RTC_CAP_SEL,
+ S2MPG10_RTC_MSEC,
+ S2MPG10_RTC_SEC,
+ S2MPG10_RTC_MIN,
+ S2MPG10_RTC_HOUR,
+ S2MPG10_RTC_WEEK,
+ S2MPG10_RTC_DAY,
+ S2MPG10_RTC_MON,
+ S2MPG10_RTC_YEAR,
+ S2MPG10_RTC_A0SEC,
+ S2MPG10_RTC_A0MIN,
+ S2MPG10_RTC_A0HOUR,
+ S2MPG10_RTC_A0WEEK,
+ S2MPG10_RTC_A0DAY,
+ S2MPG10_RTC_A0MON,
+ S2MPG10_RTC_A0YEAR,
+ S2MPG10_RTC_A1SEC,
+ S2MPG10_RTC_A1MIN,
+ S2MPG10_RTC_A1HOUR,
+ S2MPG10_RTC_A1WEEK,
+ S2MPG10_RTC_A1DAY,
+ S2MPG10_RTC_A1MON,
+ S2MPG10_RTC_A1YEAR,
+ S2MPG10_RTC_OSC_CTRL,
+};
+
#define RTC_I2C_ADDR (0x0C >> 1)
#define HOUR_12 (1 << 7)
@@ -124,10 +155,16 @@ enum s2mps_rtc_reg {
#define ALARM_ENABLE_SHIFT 7
#define ALARM_ENABLE_MASK (1 << ALARM_ENABLE_SHIFT)
+/* WTSR & SMPL registers */
#define SMPL_ENABLE_SHIFT 7
#define SMPL_ENABLE_MASK (1 << SMPL_ENABLE_SHIFT)
#define WTSR_ENABLE_SHIFT 6
#define WTSR_ENABLE_MASK (1 << WTSR_ENABLE_SHIFT)
+#define S2MPG10_WTSR_COLDTIMER GENMASK(6, 5)
+#define S2MPG10_WTSR_COLDRST BIT(4)
+#define S2MPG10_WTSR_WTSRT GENMASK(3, 1)
+#define S2MPG10_WTSR_WTSR_EN BIT(0)
+
#endif /* __LINUX_MFD_SEC_RTC_H */
diff --git a/include/linux/mfd/samsung/s2mpg10.h b/include/linux/mfd/samsung/s2mpg10.h
new file mode 100644
index 000000000000..9f5919b89a3c
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mpg10.h
@@ -0,0 +1,454 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015 Samsung Electronics
+ * Copyright 2020 Google Inc
+ * Copyright 2025 Linaro Ltd.
+ */
+
+#ifndef __LINUX_MFD_S2MPG10_H
+#define __LINUX_MFD_S2MPG10_H
+
+/* Common registers (type 0x000) */
+enum s2mpg10_common_reg {
+ S2MPG10_COMMON_CHIPID,
+ S2MPG10_COMMON_INT,
+ S2MPG10_COMMON_INT_MASK,
+ S2MPG10_COMMON_SPD_CTRL1 = 0x0a,
+ S2MPG10_COMMON_SPD_CTRL2,
+ S2MPG10_COMMON_SPD_CTRL3,
+ S2MPG10_COMMON_MON1SEL = 0x1a,
+ S2MPG10_COMMON_MON2SEL,
+ S2MPG10_COMMON_MONR,
+ S2MPG10_COMMON_DEBUG_CTRL1,
+ S2MPG10_COMMON_DEBUG_CTRL2,
+ S2MPG10_COMMON_DEBUG_CTRL3,
+ S2MPG10_COMMON_DEBUG_CTRL4,
+ S2MPG10_COMMON_DEBUG_CTRL5,
+ S2MPG10_COMMON_DEBUG_CTRL6,
+ S2MPG10_COMMON_DEBUG_CTRL7,
+ S2MPG10_COMMON_DEBUG_CTRL8,
+ S2MPG10_COMMON_TEST_MODE1,
+ S2MPG10_COMMON_TEST_MODE2,
+ S2MPG10_COMMON_SPD_DEBUG1,
+ S2MPG10_COMMON_SPD_DEBUG2,
+ S2MPG10_COMMON_SPD_DEBUG3,
+ S2MPG10_COMMON_SPD_DEBUG4,
+};
+
+/* For S2MPG10_COMMON_INT and S2MPG10_COMMON_INT_MASK */
+#define S2MPG10_COMMON_INT_SRC GENMASK(7, 0)
+#define S2MPG10_COMMON_INT_SRC_PMIC BIT(0)
+
+/* PMIC registers (type 0x100) */
+enum s2mpg10_pmic_reg {
+ S2MPG10_PMIC_INT1,
+ S2MPG10_PMIC_INT2,
+ S2MPG10_PMIC_INT3,
+ S2MPG10_PMIC_INT4,
+ S2MPG10_PMIC_INT5,
+ S2MPG10_PMIC_INT6,
+ S2MPG10_PMIC_INT1M,
+ S2MPG10_PMIC_INT2M,
+ S2MPG10_PMIC_INT3M,
+ S2MPG10_PMIC_INT4M,
+ S2MPG10_PMIC_INT5M,
+ S2MPG10_PMIC_INT6M,
+ S2MPG10_PMIC_STATUS1,
+ S2MPG10_PMIC_STATUS2,
+ S2MPG10_PMIC_PWRONSRC,
+ S2MPG10_PMIC_OFFSRC,
+ S2MPG10_PMIC_BU_CHG,
+ S2MPG10_PMIC_RTCBUF,
+ S2MPG10_PMIC_COMMON_CTRL1,
+ S2MPG10_PMIC_COMMON_CTRL2,
+ S2MPG10_PMIC_COMMON_CTRL3,
+ S2MPG10_PMIC_COMMON_CTRL4,
+ S2MPG10_PMIC_SMPL_WARN_CTRL,
+ S2MPG10_PMIC_MIMICKING_CTRL,
+ S2MPG10_PMIC_B1M_CTRL,
+ S2MPG10_PMIC_B1M_OUT1,
+ S2MPG10_PMIC_B1M_OUT2,
+ S2MPG10_PMIC_B2M_CTRL,
+ S2MPG10_PMIC_B2M_OUT1,
+ S2MPG10_PMIC_B2M_OUT2,
+ S2MPG10_PMIC_B3M_CTRL,
+ S2MPG10_PMIC_B3M_OUT1,
+ S2MPG10_PMIC_B3M_OUT2,
+ S2MPG10_PMIC_B4M_CTRL,
+ S2MPG10_PMIC_B4M_OUT1,
+ S2MPG10_PMIC_B4M_OUT2,
+ S2MPG10_PMIC_B5M_CTRL,
+ S2MPG10_PMIC_B5M_OUT1,
+ S2MPG10_PMIC_B5M_OUT2,
+ S2MPG10_PMIC_B6M_CTRL,
+ S2MPG10_PMIC_B6M_OUT1,
+ S2MPG10_PMIC_B6M_OUT2,
+ S2MPG10_PMIC_B7M_CTRL,
+ S2MPG10_PMIC_B7M_OUT1,
+ S2MPG10_PMIC_B7M_OUT2,
+ S2MPG10_PMIC_B8M_CTRL,
+ S2MPG10_PMIC_B8M_OUT1,
+ S2MPG10_PMIC_B8M_OUT2,
+ S2MPG10_PMIC_B9M_CTRL,
+ S2MPG10_PMIC_B9M_OUT1,
+ S2MPG10_PMIC_B9M_OUT2,
+ S2MPG10_PMIC_B10M_CTRL,
+ S2MPG10_PMIC_B10M_OUT1,
+ S2MPG10_PMIC_B10M_OUT2,
+ S2MPG10_PMIC_BUCK1M_USONIC,
+ S2MPG10_PMIC_BUCK2M_USONIC,
+ S2MPG10_PMIC_BUCK3M_USONIC,
+ S2MPG10_PMIC_BUCK4M_USONIC,
+ S2MPG10_PMIC_BUCK5M_USONIC,
+ S2MPG10_PMIC_BUCK6M_USONIC,
+ S2MPG10_PMIC_BUCK7M_USONIC,
+ S2MPG10_PMIC_BUCK8M_USONIC,
+ S2MPG10_PMIC_BUCK9M_USONIC,
+ S2MPG10_PMIC_BUCK10M_USONIC,
+ S2MPG10_PMIC_L1M_CTRL,
+ S2MPG10_PMIC_L2M_CTRL,
+ S2MPG10_PMIC_L3M_CTRL,
+ S2MPG10_PMIC_L4M_CTRL,
+ S2MPG10_PMIC_L5M_CTRL,
+ S2MPG10_PMIC_L6M_CTRL,
+ S2MPG10_PMIC_L7M_CTRL,
+ S2MPG10_PMIC_L8M_CTRL,
+ S2MPG10_PMIC_L9M_CTRL,
+ S2MPG10_PMIC_L10M_CTRL,
+ S2MPG10_PMIC_L11M_CTRL1,
+ S2MPG10_PMIC_L11M_CTRL2,
+ S2MPG10_PMIC_L12M_CTRL1,
+ S2MPG10_PMIC_L12M_CTRL2,
+ S2MPG10_PMIC_L13M_CTRL1,
+ S2MPG10_PMIC_L13M_CTRL2,
+ S2MPG10_PMIC_L14M_CTRL,
+ S2MPG10_PMIC_L15M_CTRL1,
+ S2MPG10_PMIC_L15M_CTRL2,
+ S2MPG10_PMIC_L16M_CTRL,
+ S2MPG10_PMIC_L17M_CTRL,
+ S2MPG10_PMIC_L18M_CTRL,
+ S2MPG10_PMIC_L19M_CTRL,
+ S2MPG10_PMIC_L20M_CTRL,
+ S2MPG10_PMIC_L21M_CTRL,
+ S2MPG10_PMIC_L22M_CTRL,
+ S2MPG10_PMIC_L23M_CTRL,
+ S2MPG10_PMIC_L24M_CTRL,
+ S2MPG10_PMIC_L25M_CTRL,
+ S2MPG10_PMIC_L26M_CTRL,
+ S2MPG10_PMIC_L27M_CTRL,
+ S2MPG10_PMIC_L28M_CTRL,
+ S2MPG10_PMIC_L29M_CTRL,
+ S2MPG10_PMIC_L30M_CTRL,
+ S2MPG10_PMIC_L31M_CTRL,
+ S2MPG10_PMIC_LDO_CTRL1,
+ S2MPG10_PMIC_LDO_CTRL2,
+ S2MPG10_PMIC_LDO_DSCH1,
+ S2MPG10_PMIC_LDO_DSCH2,
+ S2MPG10_PMIC_LDO_DSCH3,
+ S2MPG10_PMIC_LDO_DSCH4,
+ S2MPG10_PMIC_LDO_BUCK7M_HLIMIT,
+ S2MPG10_PMIC_LDO_BUCK7M_LLIMIT,
+ S2MPG10_PMIC_LDO_LDO21M_HLIMIT,
+ S2MPG10_PMIC_LDO_LDO21M_LLIMIT,
+ S2MPG10_PMIC_LDO_LDO11M_HLIMIT,
+ S2MPG10_PMIC_DVS_RAMP1,
+ S2MPG10_PMIC_DVS_RAMP2,
+ S2MPG10_PMIC_DVS_RAMP3,
+ S2MPG10_PMIC_DVS_RAMP4,
+ S2MPG10_PMIC_DVS_RAMP5,
+ S2MPG10_PMIC_DVS_RAMP6,
+ S2MPG10_PMIC_DVS_SYNC_CTRL1,
+ S2MPG10_PMIC_DVS_SYNC_CTRL2,
+ S2MPG10_PMIC_DVS_SYNC_CTRL3,
+ S2MPG10_PMIC_DVS_SYNC_CTRL4,
+ S2MPG10_PMIC_DVS_SYNC_CTRL5,
+ S2MPG10_PMIC_DVS_SYNC_CTRL6,
+ S2MPG10_PMIC_OFF_CTRL1,
+ S2MPG10_PMIC_OFF_CTRL2,
+ S2MPG10_PMIC_OFF_CTRL3,
+ S2MPG10_PMIC_OFF_CTRL4,
+ S2MPG10_PMIC_SEQ_CTRL1,
+ S2MPG10_PMIC_SEQ_CTRL2,
+ S2MPG10_PMIC_SEQ_CTRL3,
+ S2MPG10_PMIC_SEQ_CTRL4,
+ S2MPG10_PMIC_SEQ_CTRL5,
+ S2MPG10_PMIC_SEQ_CTRL6,
+ S2MPG10_PMIC_SEQ_CTRL7,
+ S2MPG10_PMIC_SEQ_CTRL8,
+ S2MPG10_PMIC_SEQ_CTRL9,
+ S2MPG10_PMIC_SEQ_CTRL10,
+ S2MPG10_PMIC_SEQ_CTRL11,
+ S2MPG10_PMIC_SEQ_CTRL12,
+ S2MPG10_PMIC_SEQ_CTRL13,
+ S2MPG10_PMIC_SEQ_CTRL14,
+ S2MPG10_PMIC_SEQ_CTRL15,
+ S2MPG10_PMIC_SEQ_CTRL16,
+ S2MPG10_PMIC_SEQ_CTRL17,
+ S2MPG10_PMIC_SEQ_CTRL18,
+ S2MPG10_PMIC_SEQ_CTRL19,
+ S2MPG10_PMIC_SEQ_CTRL20,
+ S2MPG10_PMIC_SEQ_CTRL21,
+ S2MPG10_PMIC_SEQ_CTRL22,
+ S2MPG10_PMIC_SEQ_CTRL23,
+ S2MPG10_PMIC_SEQ_CTRL24,
+ S2MPG10_PMIC_SEQ_CTRL25,
+ S2MPG10_PMIC_SEQ_CTRL26,
+ S2MPG10_PMIC_SEQ_CTRL27,
+ S2MPG10_PMIC_SEQ_CTRL28,
+ S2MPG10_PMIC_SEQ_CTRL29,
+ S2MPG10_PMIC_SEQ_CTRL30,
+ S2MPG10_PMIC_SEQ_CTRL31,
+ S2MPG10_PMIC_SEQ_CTRL32,
+ S2MPG10_PMIC_SEQ_CTRL33,
+ S2MPG10_PMIC_SEQ_CTRL34,
+ S2MPG10_PMIC_SEQ_CTRL35,
+ S2MPG10_PMIC_OFF_SEQ_CTRL1,
+ S2MPG10_PMIC_OFF_SEQ_CTRL2,
+ S2MPG10_PMIC_OFF_SEQ_CTRL3,
+ S2MPG10_PMIC_OFF_SEQ_CTRL4,
+ S2MPG10_PMIC_OFF_SEQ_CTRL5,
+ S2MPG10_PMIC_OFF_SEQ_CTRL6,
+ S2MPG10_PMIC_OFF_SEQ_CTRL7,
+ S2MPG10_PMIC_OFF_SEQ_CTRL8,
+ S2MPG10_PMIC_OFF_SEQ_CTRL9,
+ S2MPG10_PMIC_OFF_SEQ_CTRL10,
+ S2MPG10_PMIC_OFF_SEQ_CTRL11,
+ S2MPG10_PMIC_OFF_SEQ_CTRL12,
+ S2MPG10_PMIC_OFF_SEQ_CTRL13,
+ S2MPG10_PMIC_OFF_SEQ_CTRL14,
+ S2MPG10_PMIC_OFF_SEQ_CTRL15,
+ S2MPG10_PMIC_OFF_SEQ_CTRL16,
+ S2MPG10_PMIC_OFF_SEQ_CTRL17,
+ S2MPG10_PMIC_OFF_SEQ_CTRL18,
+ S2MPG10_PMIC_PCTRLSEL1,
+ S2MPG10_PMIC_PCTRLSEL2,
+ S2MPG10_PMIC_PCTRLSEL3,
+ S2MPG10_PMIC_PCTRLSEL4,
+ S2MPG10_PMIC_PCTRLSEL5,
+ S2MPG10_PMIC_PCTRLSEL6,
+ S2MPG10_PMIC_PCTRLSEL7,
+ S2MPG10_PMIC_PCTRLSEL8,
+ S2MPG10_PMIC_PCTRLSEL9,
+ S2MPG10_PMIC_PCTRLSEL10,
+ S2MPG10_PMIC_PCTRLSEL11,
+ S2MPG10_PMIC_PCTRLSEL12,
+ S2MPG10_PMIC_PCTRLSEL13,
+ S2MPG10_PMIC_DCTRLSEL1,
+ S2MPG10_PMIC_DCTRLSEL2,
+ S2MPG10_PMIC_DCTRLSEL3,
+ S2MPG10_PMIC_DCTRLSEL4,
+ S2MPG10_PMIC_DCTRLSEL5,
+ S2MPG10_PMIC_DCTRLSEL6,
+ S2MPG10_PMIC_DCTRLSEL7,
+ S2MPG10_PMIC_GPIO_CTRL1,
+ S2MPG10_PMIC_GPIO_CTRL2,
+ S2MPG10_PMIC_GPIO_CTRL3,
+ S2MPG10_PMIC_GPIO_CTRL4,
+ S2MPG10_PMIC_GPIO_CTRL5,
+ S2MPG10_PMIC_GPIO_CTRL6,
+ S2MPG10_PMIC_GPIO_CTRL7,
+ S2MPG10_PMIC_B2M_OCP_WARN,
+ S2MPG10_PMIC_B2M_OCP_WARN_X,
+ S2MPG10_PMIC_B2M_OCP_WARN_Y,
+ S2MPG10_PMIC_B2M_OCP_WARN_Z,
+ S2MPG10_PMIC_B3M_OCP_WARN,
+ S2MPG10_PMIC_B3M_OCP_WARN_X,
+ S2MPG10_PMIC_B3M_OCP_WARN_Y,
+ S2MPG10_PMIC_B3M_OCP_WARN_Z,
+ S2MPG10_PMIC_B10M_OCP_WARN,
+ S2MPG10_PMIC_B10M_OCP_WARN_X,
+ S2MPG10_PMIC_B10M_OCP_WARN_Y,
+ S2MPG10_PMIC_B10M_OCP_WARN_Z,
+ S2MPG10_PMIC_B2M_SOFT_OCP_WARN,
+ S2MPG10_PMIC_B2M_SOFT_OCP_WARN_X,
+ S2MPG10_PMIC_B2M_SOFT_OCP_WARN_Y,
+ S2MPG10_PMIC_B2M_SOFT_OCP_WARN_Z,
+ S2MPG10_PMIC_B3M_SOFT_OCP_WARN,
+ S2MPG10_PMIC_B3M_SOFT_OCP_WARN_X,
+ S2MPG10_PMIC_B3M_SOFT_OCP_WARN_Y,
+ S2MPG10_PMIC_B3M_SOFT_OCP_WARN_Z,
+ S2MPG10_PMIC_B10M_SOFT_OCP_WARN,
+ S2MPG10_PMIC_B10M_SOFT_OCP_WARN_X,
+ S2MPG10_PMIC_B10M_SOFT_OCP_WARN_Y,
+ S2MPG10_PMIC_B10M_SOFT_OCP_WARN_Z,
+ S2MPG10_PMIC_BUCK_OCP_EN1,
+ S2MPG10_PMIC_BUCK_OCP_EN2,
+ S2MPG10_PMIC_BUCK_OCP_PD_EN1,
+ S2MPG10_PMIC_BUCK_OCP_PD_EN2,
+ S2MPG10_PMIC_BUCK_OCP_CTRL1,
+ S2MPG10_PMIC_BUCK_OCP_CTRL2,
+ S2MPG10_PMIC_BUCK_OCP_CTRL3,
+ S2MPG10_PMIC_BUCK_OCP_CTRL4,
+ S2MPG10_PMIC_BUCK_OCP_CTRL5,
+ S2MPG10_PMIC_PIF_CTRL,
+ S2MPG10_PMIC_BUCK_HR_MODE1,
+ S2MPG10_PMIC_BUCK_HR_MODE2,
+ S2MPG10_PMIC_FAULTOUT_CTRL,
+ S2MPG10_PMIC_LDO_SENSE1,
+ S2MPG10_PMIC_LDO_SENSE2,
+ S2MPG10_PMIC_LDO_SENSE3,
+ S2MPG10_PMIC_LDO_SENSE4,
+};
+
+/* Meter registers (type 0xa00) */
+enum s2mpg10_meter_reg {
+ S2MPG10_METER_CTRL1,
+ S2MPG10_METER_CTRL2,
+ S2MPG10_METER_CTRL3,
+ S2MPG10_METER_CTRL4,
+ S2MPG10_METER_BUCKEN1,
+ S2MPG10_METER_BUCKEN2,
+ S2MPG10_METER_MUXSEL0,
+ S2MPG10_METER_MUXSEL1,
+ S2MPG10_METER_MUXSEL2,
+ S2MPG10_METER_MUXSEL3,
+ S2MPG10_METER_MUXSEL4,
+ S2MPG10_METER_MUXSEL5,
+ S2MPG10_METER_MUXSEL6,
+ S2MPG10_METER_MUXSEL7,
+ S2MPG10_METER_LPF_C0_0,
+ S2MPG10_METER_LPF_C0_1,
+ S2MPG10_METER_LPF_C0_2,
+ S2MPG10_METER_LPF_C0_3,
+ S2MPG10_METER_LPF_C0_4,
+ S2MPG10_METER_LPF_C0_5,
+ S2MPG10_METER_LPF_C0_6,
+ S2MPG10_METER_LPF_C0_7,
+ S2MPG10_METER_PWR_WARN0,
+ S2MPG10_METER_PWR_WARN1,
+ S2MPG10_METER_PWR_WARN2,
+ S2MPG10_METER_PWR_WARN3,
+ S2MPG10_METER_PWR_WARN4,
+ S2MPG10_METER_PWR_WARN5,
+ S2MPG10_METER_PWR_WARN6,
+ S2MPG10_METER_PWR_WARN7,
+ S2MPG10_METER_PWR_HYS1,
+ S2MPG10_METER_PWR_HYS2,
+ S2MPG10_METER_PWR_HYS3,
+ S2MPG10_METER_PWR_HYS4,
+ S2MPG10_METER_ACC_DATA_CH0_1 = 0x40,
+ S2MPG10_METER_ACC_DATA_CH0_2,
+ S2MPG10_METER_ACC_DATA_CH0_3,
+ S2MPG10_METER_ACC_DATA_CH0_4,
+ S2MPG10_METER_ACC_DATA_CH0_5,
+ S2MPG10_METER_ACC_DATA_CH0_6,
+ S2MPG10_METER_ACC_DATA_CH1_1,
+ S2MPG10_METER_ACC_DATA_CH1_2,
+ S2MPG10_METER_ACC_DATA_CH1_3,
+ S2MPG10_METER_ACC_DATA_CH1_4,
+ S2MPG10_METER_ACC_DATA_CH1_5,
+ S2MPG10_METER_ACC_DATA_CH1_6,
+ S2MPG10_METER_ACC_DATA_CH2_1,
+ S2MPG10_METER_ACC_DATA_CH2_2,
+ S2MPG10_METER_ACC_DATA_CH2_3,
+ S2MPG10_METER_ACC_DATA_CH2_4,
+ S2MPG10_METER_ACC_DATA_CH2_5,
+ S2MPG10_METER_ACC_DATA_CH2_6,
+ S2MPG10_METER_ACC_DATA_CH3_1,
+ S2MPG10_METER_ACC_DATA_CH3_2,
+ S2MPG10_METER_ACC_DATA_CH3_3,
+ S2MPG10_METER_ACC_DATA_CH3_4,
+ S2MPG10_METER_ACC_DATA_CH3_5,
+ S2MPG10_METER_ACC_DATA_CH3_6,
+ S2MPG10_METER_ACC_DATA_CH4_1,
+ S2MPG10_METER_ACC_DATA_CH4_2,
+ S2MPG10_METER_ACC_DATA_CH4_3,
+ S2MPG10_METER_ACC_DATA_CH4_4,
+ S2MPG10_METER_ACC_DATA_CH4_5,
+ S2MPG10_METER_ACC_DATA_CH4_6,
+ S2MPG10_METER_ACC_DATA_CH5_1,
+ S2MPG10_METER_ACC_DATA_CH5_2,
+ S2MPG10_METER_ACC_DATA_CH5_3,
+ S2MPG10_METER_ACC_DATA_CH5_4,
+ S2MPG10_METER_ACC_DATA_CH5_5,
+ S2MPG10_METER_ACC_DATA_CH5_6,
+ S2MPG10_METER_ACC_DATA_CH6_1,
+ S2MPG10_METER_ACC_DATA_CH6_2,
+ S2MPG10_METER_ACC_DATA_CH6_3,
+ S2MPG10_METER_ACC_DATA_CH6_4,
+ S2MPG10_METER_ACC_DATA_CH6_5,
+ S2MPG10_METER_ACC_DATA_CH6_6,
+ S2MPG10_METER_ACC_DATA_CH7_1,
+ S2MPG10_METER_ACC_DATA_CH7_2,
+ S2MPG10_METER_ACC_DATA_CH7_3,
+ S2MPG10_METER_ACC_DATA_CH7_4,
+ S2MPG10_METER_ACC_DATA_CH7_5,
+ S2MPG10_METER_ACC_DATA_CH7_6,
+ S2MPG10_METER_ACC_COUNT_1,
+ S2MPG10_METER_ACC_COUNT_2,
+ S2MPG10_METER_ACC_COUNT_3,
+ S2MPG10_METER_LPF_DATA_CH0_1,
+ S2MPG10_METER_LPF_DATA_CH0_2,
+ S2MPG10_METER_LPF_DATA_CH0_3,
+ S2MPG10_METER_LPF_DATA_CH1_1,
+ S2MPG10_METER_LPF_DATA_CH1_2,
+ S2MPG10_METER_LPF_DATA_CH1_3,
+ S2MPG10_METER_LPF_DATA_CH2_1,
+ S2MPG10_METER_LPF_DATA_CH2_2,
+ S2MPG10_METER_LPF_DATA_CH2_3,
+ S2MPG10_METER_LPF_DATA_CH3_1,
+ S2MPG10_METER_LPF_DATA_CH3_2,
+ S2MPG10_METER_LPF_DATA_CH3_3,
+ S2MPG10_METER_LPF_DATA_CH4_1,
+ S2MPG10_METER_LPF_DATA_CH4_2,
+ S2MPG10_METER_LPF_DATA_CH4_3,
+ S2MPG10_METER_LPF_DATA_CH5_1,
+ S2MPG10_METER_LPF_DATA_CH5_2,
+ S2MPG10_METER_LPF_DATA_CH5_3,
+ S2MPG10_METER_LPF_DATA_CH6_1,
+ S2MPG10_METER_LPF_DATA_CH6_2,
+ S2MPG10_METER_LPF_DATA_CH6_3,
+ S2MPG10_METER_LPF_DATA_CH7_1,
+ S2MPG10_METER_LPF_DATA_CH7_2,
+ S2MPG10_METER_LPF_DATA_CH7_3,
+ S2MPG10_METER_DSM_TRIM_OFFSET = 0xee,
+ S2MPG10_METER_BUCK_METER_TRIM3 = 0xf1,
+};
+
+/* S2MPG10 regulator IDs */
+enum s2mpg10_regulators {
+ S2MPG10_LDO1,
+ S2MPG10_LDO2,
+ S2MPG10_LDO3,
+ S2MPG10_LDO4,
+ S2MPG10_LDO5,
+ S2MPG10_LDO6,
+ S2MPG10_LDO7,
+ S2MPG10_LDO8,
+ S2MPG10_LDO9,
+ S2MPG10_LDO10,
+ S2MPG10_LDO11,
+ S2MPG10_LDO12,
+ S2MPG10_LDO13,
+ S2MPG10_LDO14,
+ S2MPG10_LDO15,
+ S2MPG10_LDO16,
+ S2MPG10_LDO17,
+ S2MPG10_LDO18,
+ S2MPG10_LDO19,
+ S2MPG10_LDO20,
+ S2MPG10_LDO21,
+ S2MPG10_LDO22,
+ S2MPG10_LDO23,
+ S2MPG10_LDO24,
+ S2MPG10_LDO25,
+ S2MPG10_LDO26,
+ S2MPG10_LDO27,
+ S2MPG10_LDO28,
+ S2MPG10_LDO29,
+ S2MPG10_LDO30,
+ S2MPG10_LDO31,
+ S2MPG10_BUCK1,
+ S2MPG10_BUCK2,
+ S2MPG10_BUCK3,
+ S2MPG10_BUCK4,
+ S2MPG10_BUCK5,
+ S2MPG10_BUCK6,
+ S2MPG10_BUCK7,
+ S2MPG10_BUCK8,
+ S2MPG10_BUCK9,
+ S2MPG10_BUCK10,
+ S2MPG10_REGULATOR_MAX,
+};
+
+#endif /* __LINUX_MFD_S2MPG10_H */
diff --git a/include/linux/mfd/samsung/s2mpu05.h b/include/linux/mfd/samsung/s2mpu05.h
new file mode 100644
index 000000000000..fcdb6c8adb03
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mpu05.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd
+ * Copyright (c) 2025 Kaustabh Chakraborty <kauschluss@disroot.org>
+ */
+
+#ifndef __LINUX_MFD_S2MPU05_H
+#define __LINUX_MFD_S2MPU05_H
+
+/* S2MPU05 registers */
+enum S2MPU05_reg {
+ S2MPU05_REG_ID,
+ S2MPU05_REG_INT1,
+ S2MPU05_REG_INT2,
+ S2MPU05_REG_INT3,
+ S2MPU05_REG_INT1M,
+ S2MPU05_REG_INT2M,
+ S2MPU05_REG_INT3M,
+ S2MPU05_REG_ST1,
+ S2MPU05_REG_ST2,
+ S2MPU05_REG_PWRONSRC,
+ S2MPU05_REG_OFFSRC,
+ S2MPU05_REG_BU_CHG,
+ S2MPU05_REG_RTC_BUF,
+ S2MPU05_REG_CTRL1,
+ S2MPU05_REG_CTRL2,
+ S2MPU05_REG_ETC_TEST,
+ S2MPU05_REG_OTP_ADRL,
+ S2MPU05_REG_OTP_ADRH,
+ S2MPU05_REG_OTP_DATA,
+ S2MPU05_REG_MON1SEL,
+ S2MPU05_REG_MON2SEL,
+ S2MPU05_REG_CTRL3,
+ S2MPU05_REG_ETC_OTP,
+ S2MPU05_REG_UVLO,
+ S2MPU05_REG_TIME_CTRL1,
+ S2MPU05_REG_TIME_CTRL2,
+ S2MPU05_REG_B1CTRL1,
+ S2MPU05_REG_B1CTRL2,
+ S2MPU05_REG_B2CTRL1,
+ S2MPU05_REG_B2CTRL2,
+ S2MPU05_REG_B2CTRL3,
+ S2MPU05_REG_B2CTRL4,
+ S2MPU05_REG_B3CTRL1,
+ S2MPU05_REG_B3CTRL2,
+ S2MPU05_REG_B3CTRL3,
+ S2MPU05_REG_B4CTRL1,
+ S2MPU05_REG_B4CTRL2,
+ S2MPU05_REG_B5CTRL1,
+ S2MPU05_REG_B5CTRL2,
+ S2MPU05_REG_BUCK_RAMP,
+ S2MPU05_REG_LDO_DVS1,
+ S2MPU05_REG_LDO_DVS9,
+ S2MPU05_REG_LDO_DVS10,
+ S2MPU05_REG_L1CTRL,
+ S2MPU05_REG_L2CTRL,
+ S2MPU05_REG_L3CTRL,
+ S2MPU05_REG_L4CTRL,
+ S2MPU05_REG_L5CTRL,
+ S2MPU05_REG_L6CTRL,
+ S2MPU05_REG_L7CTRL,
+ S2MPU05_REG_L8CTRL,
+ S2MPU05_REG_L9CTRL1,
+ S2MPU05_REG_L9CTRL2,
+ S2MPU05_REG_L10CTRL,
+ S2MPU05_REG_L11CTRL1,
+ S2MPU05_REG_L11CTRL2,
+ S2MPU05_REG_L12CTRL,
+ S2MPU05_REG_L13CTRL,
+ S2MPU05_REG_L14CTRL,
+ S2MPU05_REG_L15CTRL,
+ S2MPU05_REG_L16CTRL,
+ S2MPU05_REG_L17CTRL1,
+ S2MPU05_REG_L17CTRL2,
+ S2MPU05_REG_L18CTRL1,
+ S2MPU05_REG_L18CTRL2,
+ S2MPU05_REG_L19CTRL,
+ S2MPU05_REG_L20CTRL,
+ S2MPU05_REG_L21CTRL,
+ S2MPU05_REG_L22CTRL,
+ S2MPU05_REG_L23CTRL,
+ S2MPU05_REG_L24CTRL,
+ S2MPU05_REG_L25CTRL,
+ S2MPU05_REG_L26CTRL,
+ S2MPU05_REG_L27CTRL,
+ S2MPU05_REG_L28CTRL,
+ S2MPU05_REG_L29CTRL,
+ S2MPU05_REG_L30CTRL,
+ S2MPU05_REG_L31CTRL,
+ S2MPU05_REG_L32CTRL,
+ S2MPU05_REG_L33CTRL,
+ S2MPU05_REG_L34CTRL,
+ S2MPU05_REG_L35CTRL,
+ S2MPU05_REG_LDO_DSCH1,
+ S2MPU05_REG_LDO_DSCH2,
+ S2MPU05_REG_LDO_DSCH3,
+ S2MPU05_REG_LDO_DSCH4,
+ S2MPU05_REG_LDO_DSCH5,
+ S2MPU05_REG_LDO_CTRL1,
+ S2MPU05_REG_LDO_CTRL2,
+ S2MPU05_REG_TCXO_CTRL,
+ S2MPU05_REG_SELMIF,
+};
+
+/* S2MPU05 regulator ids */
+enum S2MPU05_regulators {
+ S2MPU05_LDO1,
+ S2MPU05_LDO2,
+ S2MPU05_LDO3,
+ S2MPU05_LDO4,
+ S2MPU05_LDO5,
+ S2MPU05_LDO6,
+ S2MPU05_LDO7,
+ S2MPU05_LDO8,
+ S2MPU05_LDO9,
+ S2MPU05_LDO10,
+ S2MPU05_LDO11,
+ S2MPU05_LDO12,
+ S2MPU05_LDO13,
+ S2MPU05_LDO14,
+ S2MPU05_LDO15,
+ S2MPU05_LDO16,
+ S2MPU05_LDO17,
+ S2MPU05_LDO18,
+ S2MPU05_LDO19,
+ S2MPU05_LDO20,
+ S2MPU05_LDO21,
+ S2MPU05_LDO22,
+ S2MPU05_LDO23,
+ S2MPU05_LDO24,
+ S2MPU05_LDO25,
+ S2MPU05_LDO26,
+ S2MPU05_LDO27,
+ S2MPU05_LDO28,
+ S2MPU05_LDO29,
+ S2MPU05_LDO30,
+ S2MPU05_LDO31,
+ S2MPU05_LDO32,
+ S2MPU05_LDO33,
+ S2MPU05_LDO34,
+ S2MPU05_LDO35,
+ S2MPU05_BUCK1,
+ S2MPU05_BUCK2,
+ S2MPU05_BUCK3,
+ S2MPU05_BUCK4,
+ S2MPU05_BUCK5,
+
+ S2MPU05_REGULATOR_MAX,
+};
+
+#define S2MPU05_SW_ENABLE_MASK 0x03
+
+#define S2MPU05_ENABLE_TIME_LDO 128
+#define S2MPU05_ENABLE_TIME_BUCK1 110
+#define S2MPU05_ENABLE_TIME_BUCK2 110
+#define S2MPU05_ENABLE_TIME_BUCK3 110
+#define S2MPU05_ENABLE_TIME_BUCK4 150
+#define S2MPU05_ENABLE_TIME_BUCK5 150
+
+#define S2MPU05_LDO_MIN1 800000
+#define S2MPU05_LDO_MIN2 1800000
+#define S2MPU05_LDO_MIN3 400000
+#define S2MPU05_LDO_STEP1 12500
+#define S2MPU05_LDO_STEP2 25000
+
+#define S2MPU05_BUCK_MIN1 400000
+#define S2MPU05_BUCK_MIN2 600000
+#define S2MPU05_BUCK_STEP1 6250
+#define S2MPU05_BUCK_STEP2 12500
+
+#define S2MPU05_RAMP_DELAY 12000 /* uV/uS */
+
+#define S2MPU05_ENABLE_SHIFT 6
+#define S2MPU05_ENABLE_MASK (0x03 << S2MPU05_ENABLE_SHIFT)
+
+#define S2MPU05_LDO_VSEL_MASK 0x3F
+#define S2MPU05_BUCK_VSEL_MASK 0xFF
+#define S2MPU05_LDO_N_VOLTAGES (S2MPU05_LDO_VSEL_MASK + 1)
+#define S2MPU05_BUCK_N_VOLTAGES (S2MPU05_BUCK_VSEL_MASK + 1)
+
+#define S2MPU05_PMIC_EN_SHIFT 6
+
+#endif /* __LINUX_MFD_S2MPU05_H */
diff --git a/include/linux/mfd/sta2x11-mfd.h b/include/linux/mfd/sta2x11-mfd.h
deleted file mode 100644
index 2001ca5c44a9..000000000000
--- a/include/linux/mfd/sta2x11-mfd.h
+++ /dev/null
@@ -1,506 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2009-2011 Wind River Systems, Inc.
- * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
- *
- * The STMicroelectronics ConneXt (STA2X11) chip has several unrelated
- * functions in one PCI endpoint functions. This driver simply
- * registers the platform devices in this iomemregion and exports a few
- * functions to access common registers
- */
-
-#ifndef __STA2X11_MFD_H
-#define __STA2X11_MFD_H
-#include <linux/types.h>
-#include <linux/pci.h>
-
-enum sta2x11_mfd_plat_dev {
- sta2x11_sctl = 0,
- sta2x11_gpio,
- sta2x11_scr,
- sta2x11_time,
- sta2x11_apbreg,
- sta2x11_apb_soc_regs,
- sta2x11_vic,
- sta2x11_n_mfd_plat_devs,
-};
-
-#define STA2X11_MFD_SCTL_NAME "sta2x11-sctl"
-#define STA2X11_MFD_GPIO_NAME "sta2x11-gpio"
-#define STA2X11_MFD_SCR_NAME "sta2x11-scr"
-#define STA2X11_MFD_TIME_NAME "sta2x11-time"
-#define STA2X11_MFD_APBREG_NAME "sta2x11-apbreg"
-#define STA2X11_MFD_APB_SOC_REGS_NAME "sta2x11-apb-soc-regs"
-#define STA2X11_MFD_VIC_NAME "sta2x11-vic"
-
-extern u32
-__sta2x11_mfd_mask(struct pci_dev *, u32, u32, u32, enum sta2x11_mfd_plat_dev);
-
-/*
- * The MFD PCI block includes the GPIO peripherals and other register blocks.
- * For GPIO, we have 32*4 bits (I use "gsta" for "gpio sta2x11".)
- */
-#define GSTA_GPIO_PER_BLOCK 32
-#define GSTA_NR_BLOCKS 4
-#define GSTA_NR_GPIO (GSTA_GPIO_PER_BLOCK * GSTA_NR_BLOCKS)
-
-/* Pinconfig is set by the board definition: altfunc, pull-up, pull-down */
-struct sta2x11_gpio_pdata {
- unsigned pinconfig[GSTA_NR_GPIO];
-};
-
-/* Macros below lifted from sh_pfc.h, with minor differences */
-#define PINMUX_TYPE_NONE 0
-#define PINMUX_TYPE_FUNCTION 1
-#define PINMUX_TYPE_OUTPUT_LOW 2
-#define PINMUX_TYPE_OUTPUT_HIGH 3
-#define PINMUX_TYPE_INPUT 4
-#define PINMUX_TYPE_INPUT_PULLUP 5
-#define PINMUX_TYPE_INPUT_PULLDOWN 6
-
-/* Give names to GPIO pins, like PXA does, taken from the manual */
-#define STA2X11_GPIO0 0
-#define STA2X11_GPIO1 1
-#define STA2X11_GPIO2 2
-#define STA2X11_GPIO3 3
-#define STA2X11_GPIO4 4
-#define STA2X11_GPIO5 5
-#define STA2X11_GPIO6 6
-#define STA2X11_GPIO7 7
-#define STA2X11_GPIO8_RGBOUT_RED7 8
-#define STA2X11_GPIO9_RGBOUT_RED6 9
-#define STA2X11_GPIO10_RGBOUT_RED5 10
-#define STA2X11_GPIO11_RGBOUT_RED4 11
-#define STA2X11_GPIO12_RGBOUT_RED3 12
-#define STA2X11_GPIO13_RGBOUT_RED2 13
-#define STA2X11_GPIO14_RGBOUT_RED1 14
-#define STA2X11_GPIO15_RGBOUT_RED0 15
-#define STA2X11_GPIO16_RGBOUT_GREEN7 16
-#define STA2X11_GPIO17_RGBOUT_GREEN6 17
-#define STA2X11_GPIO18_RGBOUT_GREEN5 18
-#define STA2X11_GPIO19_RGBOUT_GREEN4 19
-#define STA2X11_GPIO20_RGBOUT_GREEN3 20
-#define STA2X11_GPIO21_RGBOUT_GREEN2 21
-#define STA2X11_GPIO22_RGBOUT_GREEN1 22
-#define STA2X11_GPIO23_RGBOUT_GREEN0 23
-#define STA2X11_GPIO24_RGBOUT_BLUE7 24
-#define STA2X11_GPIO25_RGBOUT_BLUE6 25
-#define STA2X11_GPIO26_RGBOUT_BLUE5 26
-#define STA2X11_GPIO27_RGBOUT_BLUE4 27
-#define STA2X11_GPIO28_RGBOUT_BLUE3 28
-#define STA2X11_GPIO29_RGBOUT_BLUE2 29
-#define STA2X11_GPIO30_RGBOUT_BLUE1 30
-#define STA2X11_GPIO31_RGBOUT_BLUE0 31
-#define STA2X11_GPIO32_RGBOUT_VSYNCH 32
-#define STA2X11_GPIO33_RGBOUT_HSYNCH 33
-#define STA2X11_GPIO34_RGBOUT_DEN 34
-#define STA2X11_GPIO35_ETH_CRS_DV 35
-#define STA2X11_GPIO36_ETH_TXD1 36
-#define STA2X11_GPIO37_ETH_TXD0 37
-#define STA2X11_GPIO38_ETH_TX_EN 38
-#define STA2X11_GPIO39_MDIO 39
-#define STA2X11_GPIO40_ETH_REF_CLK 40
-#define STA2X11_GPIO41_ETH_RXD1 41
-#define STA2X11_GPIO42_ETH_RXD0 42
-#define STA2X11_GPIO43_MDC 43
-#define STA2X11_GPIO44_CAN_TX 44
-#define STA2X11_GPIO45_CAN_RX 45
-#define STA2X11_GPIO46_MLB_DAT 46
-#define STA2X11_GPIO47_MLB_SIG 47
-#define STA2X11_GPIO48_SPI0_CLK 48
-#define STA2X11_GPIO49_SPI0_TXD 49
-#define STA2X11_GPIO50_SPI0_RXD 50
-#define STA2X11_GPIO51_SPI0_FRM 51
-#define STA2X11_GPIO52_SPI1_CLK 52
-#define STA2X11_GPIO53_SPI1_TXD 53
-#define STA2X11_GPIO54_SPI1_RXD 54
-#define STA2X11_GPIO55_SPI1_FRM 55
-#define STA2X11_GPIO56_SPI2_CLK 56
-#define STA2X11_GPIO57_SPI2_TXD 57
-#define STA2X11_GPIO58_SPI2_RXD 58
-#define STA2X11_GPIO59_SPI2_FRM 59
-#define STA2X11_GPIO60_I2C0_SCL 60
-#define STA2X11_GPIO61_I2C0_SDA 61
-#define STA2X11_GPIO62_I2C1_SCL 62
-#define STA2X11_GPIO63_I2C1_SDA 63
-#define STA2X11_GPIO64_I2C2_SCL 64
-#define STA2X11_GPIO65_I2C2_SDA 65
-#define STA2X11_GPIO66_I2C3_SCL 66
-#define STA2X11_GPIO67_I2C3_SDA 67
-#define STA2X11_GPIO68_MSP0_RCK 68
-#define STA2X11_GPIO69_MSP0_RXD 69
-#define STA2X11_GPIO70_MSP0_RFS 70
-#define STA2X11_GPIO71_MSP0_TCK 71
-#define STA2X11_GPIO72_MSP0_TXD 72
-#define STA2X11_GPIO73_MSP0_TFS 73
-#define STA2X11_GPIO74_MSP0_SCK 74
-#define STA2X11_GPIO75_MSP1_CK 75
-#define STA2X11_GPIO76_MSP1_RXD 76
-#define STA2X11_GPIO77_MSP1_FS 77
-#define STA2X11_GPIO78_MSP1_TXD 78
-#define STA2X11_GPIO79_MSP2_CK 79
-#define STA2X11_GPIO80_MSP2_RXD 80
-#define STA2X11_GPIO81_MSP2_FS 81
-#define STA2X11_GPIO82_MSP2_TXD 82
-#define STA2X11_GPIO83_MSP3_CK 83
-#define STA2X11_GPIO84_MSP3_RXD 84
-#define STA2X11_GPIO85_MSP3_FS 85
-#define STA2X11_GPIO86_MSP3_TXD 86
-#define STA2X11_GPIO87_MSP4_CK 87
-#define STA2X11_GPIO88_MSP4_RXD 88
-#define STA2X11_GPIO89_MSP4_FS 89
-#define STA2X11_GPIO90_MSP4_TXD 90
-#define STA2X11_GPIO91_MSP5_CK 91
-#define STA2X11_GPIO92_MSP5_RXD 92
-#define STA2X11_GPIO93_MSP5_FS 93
-#define STA2X11_GPIO94_MSP5_TXD 94
-#define STA2X11_GPIO95_SDIO3_DAT3 95
-#define STA2X11_GPIO96_SDIO3_DAT2 96
-#define STA2X11_GPIO97_SDIO3_DAT1 97
-#define STA2X11_GPIO98_SDIO3_DAT0 98
-#define STA2X11_GPIO99_SDIO3_CLK 99
-#define STA2X11_GPIO100_SDIO3_CMD 100
-#define STA2X11_GPIO101 101
-#define STA2X11_GPIO102 102
-#define STA2X11_GPIO103 103
-#define STA2X11_GPIO104 104
-#define STA2X11_GPIO105_SDIO2_DAT3 105
-#define STA2X11_GPIO106_SDIO2_DAT2 106
-#define STA2X11_GPIO107_SDIO2_DAT1 107
-#define STA2X11_GPIO108_SDIO2_DAT0 108
-#define STA2X11_GPIO109_SDIO2_CLK 109
-#define STA2X11_GPIO110_SDIO2_CMD 110
-#define STA2X11_GPIO111 111
-#define STA2X11_GPIO112 112
-#define STA2X11_GPIO113 113
-#define STA2X11_GPIO114 114
-#define STA2X11_GPIO115_SDIO1_DAT3 115
-#define STA2X11_GPIO116_SDIO1_DAT2 116
-#define STA2X11_GPIO117_SDIO1_DAT1 117
-#define STA2X11_GPIO118_SDIO1_DAT0 118
-#define STA2X11_GPIO119_SDIO1_CLK 119
-#define STA2X11_GPIO120_SDIO1_CMD 120
-#define STA2X11_GPIO121 121
-#define STA2X11_GPIO122 122
-#define STA2X11_GPIO123 123
-#define STA2X11_GPIO124 124
-#define STA2X11_GPIO125_UART2_TXD 125
-#define STA2X11_GPIO126_UART2_RXD 126
-#define STA2X11_GPIO127_UART3_TXD 127
-
-/*
- * The APB bridge has its own registers, needed by our users as well.
- * They are accessed with the following read/mask/write function.
- */
-static inline u32
-sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
-{
- return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_apbreg);
-}
-
-/* CAN and MLB */
-#define APBREG_BSR 0x00 /* Bridge Status Reg */
-#define APBREG_PAER 0x08 /* Peripherals Address Error Reg */
-#define APBREG_PWAC 0x20 /* Peripheral Write Access Control reg */
-#define APBREG_PRAC 0x40 /* Peripheral Read Access Control reg */
-#define APBREG_PCG 0x60 /* Peripheral Clock Gating Reg */
-#define APBREG_PUR 0x80 /* Peripheral Under Reset Reg */
-#define APBREG_EMU_PCG 0xA0 /* Emulator Peripheral Clock Gating Reg */
-
-#define APBREG_CAN (1 << 1)
-#define APBREG_MLB (1 << 3)
-
-/* SARAC */
-#define APBREG_BSR_SARAC 0x100 /* Bridge Status Reg */
-#define APBREG_PAER_SARAC 0x108 /* Peripherals Address Error Reg */
-#define APBREG_PWAC_SARAC 0x120 /* Peripheral Write Access Control reg */
-#define APBREG_PRAC_SARAC 0x140 /* Peripheral Read Access Control reg */
-#define APBREG_PCG_SARAC 0x160 /* Peripheral Clock Gating Reg */
-#define APBREG_PUR_SARAC 0x180 /* Peripheral Under Reset Reg */
-#define APBREG_EMU_PCG_SARAC 0x1A0 /* Emulator Peripheral Clock Gating Reg */
-
-#define APBREG_SARAC (1 << 2)
-
-/*
- * The system controller has its own registers. Some of these are accessed
- * by out users as well, using the following read/mask/write/function
- */
-static inline
-u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
-{
- return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_sctl);
-}
-
-#define SCTL_SCCTL 0x00 /* System controller control register */
-#define SCTL_ARMCFG 0x04 /* ARM configuration register */
-#define SCTL_SCPLLCTL 0x08 /* PLL control status register */
-
-#define SCTL_SCPLLCTL_AUDIO_PLL_PD BIT(1)
-#define SCTL_SCPLLCTL_FRAC_CONTROL BIT(3)
-#define SCTL_SCPLLCTL_STRB_BYPASS BIT(6)
-#define SCTL_SCPLLCTL_STRB_INPUT BIT(8)
-
-#define SCTL_SCPLLFCTRL 0x0c /* PLL frequency control register */
-
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_NDIV_MASK 0xff
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_NDIV_SHIFT 10
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_IDF_MASK 7
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_IDF_SHIFT 21
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_ODF_MASK 7
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_ODF_SHIFT 18
-#define SCTL_SCPLLFCTRL_DITHER_DISABLE_MASK 0x03
-#define SCTL_SCPLLFCTRL_DITHER_DISABLE_SHIFT 4
-
-
-#define SCTL_SCRESFRACT 0x10 /* PLL fractional input register */
-
-#define SCTL_SCRESFRACT_MASK 0x0000ffff
-
-
-#define SCTL_SCRESCTRL1 0x14 /* Peripheral reset control 1 */
-#define SCTL_SCRESXTRL2 0x18 /* Peripheral reset control 2 */
-#define SCTL_SCPEREN0 0x1c /* Peripheral clock enable register 0 */
-#define SCTL_SCPEREN1 0x20 /* Peripheral clock enable register 1 */
-#define SCTL_SCPEREN2 0x24 /* Peripheral clock enable register 2 */
-#define SCTL_SCGRST 0x28 /* Peripheral global reset */
-#define SCTL_SCPCIECSBRST 0x2c /* PCIe PAB CSB reset status register */
-#define SCTL_SCPCIPMCR1 0x30 /* PCI power management control 1 */
-#define SCTL_SCPCIPMCR2 0x34 /* PCI power management control 2 */
-#define SCTL_SCPCIPMSR1 0x38 /* PCI power management status 1 */
-#define SCTL_SCPCIPMSR2 0x3c /* PCI power management status 2 */
-#define SCTL_SCPCIPMSR3 0x40 /* PCI power management status 3 */
-#define SCTL_SCINTREN 0x44 /* Interrupt enable */
-#define SCTL_SCRISR 0x48 /* RAW interrupt status */
-#define SCTL_SCCLKSTAT0 0x4c /* Peripheral clocks status 0 */
-#define SCTL_SCCLKSTAT1 0x50 /* Peripheral clocks status 1 */
-#define SCTL_SCCLKSTAT2 0x54 /* Peripheral clocks status 2 */
-#define SCTL_SCRSTSTA 0x58 /* Reset status register */
-
-#define SCTL_SCRESCTRL1_USB_PHY_POR (1 << 0)
-#define SCTL_SCRESCTRL1_USB_OTG (1 << 1)
-#define SCTL_SCRESCTRL1_USB_HRST (1 << 2)
-#define SCTL_SCRESCTRL1_USB_PHY_HOST (1 << 3)
-#define SCTL_SCRESCTRL1_SATAII (1 << 4)
-#define SCTL_SCRESCTRL1_VIP (1 << 5)
-#define SCTL_SCRESCTRL1_PER_MMC0 (1 << 6)
-#define SCTL_SCRESCTRL1_PER_MMC1 (1 << 7)
-#define SCTL_SCRESCTRL1_PER_GPIO0 (1 << 8)
-#define SCTL_SCRESCTRL1_PER_GPIO1 (1 << 9)
-#define SCTL_SCRESCTRL1_PER_GPIO2 (1 << 10)
-#define SCTL_SCRESCTRL1_PER_GPIO3 (1 << 11)
-#define SCTL_SCRESCTRL1_PER_MTU0 (1 << 12)
-#define SCTL_SCRESCTRL1_KER_SPI0 (1 << 13)
-#define SCTL_SCRESCTRL1_KER_SPI1 (1 << 14)
-#define SCTL_SCRESCTRL1_KER_SPI2 (1 << 15)
-#define SCTL_SCRESCTRL1_KER_MCI0 (1 << 16)
-#define SCTL_SCRESCTRL1_KER_MCI1 (1 << 17)
-#define SCTL_SCRESCTRL1_PRE_HSI2C0 (1 << 18)
-#define SCTL_SCRESCTRL1_PER_HSI2C1 (1 << 19)
-#define SCTL_SCRESCTRL1_PER_HSI2C2 (1 << 20)
-#define SCTL_SCRESCTRL1_PER_HSI2C3 (1 << 21)
-#define SCTL_SCRESCTRL1_PER_MSP0 (1 << 22)
-#define SCTL_SCRESCTRL1_PER_MSP1 (1 << 23)
-#define SCTL_SCRESCTRL1_PER_MSP2 (1 << 24)
-#define SCTL_SCRESCTRL1_PER_MSP3 (1 << 25)
-#define SCTL_SCRESCTRL1_PER_MSP4 (1 << 26)
-#define SCTL_SCRESCTRL1_PER_MSP5 (1 << 27)
-#define SCTL_SCRESCTRL1_PER_MMC (1 << 28)
-#define SCTL_SCRESCTRL1_KER_MSP0 (1 << 29)
-#define SCTL_SCRESCTRL1_KER_MSP1 (1 << 30)
-#define SCTL_SCRESCTRL1_KER_MSP2 (1 << 31)
-
-#define SCTL_SCPEREN0_UART0 (1 << 0)
-#define SCTL_SCPEREN0_UART1 (1 << 1)
-#define SCTL_SCPEREN0_UART2 (1 << 2)
-#define SCTL_SCPEREN0_UART3 (1 << 3)
-#define SCTL_SCPEREN0_MSP0 (1 << 4)
-#define SCTL_SCPEREN0_MSP1 (1 << 5)
-#define SCTL_SCPEREN0_MSP2 (1 << 6)
-#define SCTL_SCPEREN0_MSP3 (1 << 7)
-#define SCTL_SCPEREN0_MSP4 (1 << 8)
-#define SCTL_SCPEREN0_MSP5 (1 << 9)
-#define SCTL_SCPEREN0_SPI0 (1 << 10)
-#define SCTL_SCPEREN0_SPI1 (1 << 11)
-#define SCTL_SCPEREN0_SPI2 (1 << 12)
-#define SCTL_SCPEREN0_I2C0 (1 << 13)
-#define SCTL_SCPEREN0_I2C1 (1 << 14)
-#define SCTL_SCPEREN0_I2C2 (1 << 15)
-#define SCTL_SCPEREN0_I2C3 (1 << 16)
-#define SCTL_SCPEREN0_SVDO_LVDS (1 << 17)
-#define SCTL_SCPEREN0_USB_HOST (1 << 18)
-#define SCTL_SCPEREN0_USB_OTG (1 << 19)
-#define SCTL_SCPEREN0_MCI0 (1 << 20)
-#define SCTL_SCPEREN0_MCI1 (1 << 21)
-#define SCTL_SCPEREN0_MCI2 (1 << 22)
-#define SCTL_SCPEREN0_MCI3 (1 << 23)
-#define SCTL_SCPEREN0_SATA (1 << 24)
-#define SCTL_SCPEREN0_ETHERNET (1 << 25)
-#define SCTL_SCPEREN0_VIC (1 << 26)
-#define SCTL_SCPEREN0_DMA_AUDIO (1 << 27)
-#define SCTL_SCPEREN0_DMA_SOC (1 << 28)
-#define SCTL_SCPEREN0_RAM (1 << 29)
-#define SCTL_SCPEREN0_VIP (1 << 30)
-#define SCTL_SCPEREN0_ARM (1 << 31)
-
-#define SCTL_SCPEREN1_UART0 (1 << 0)
-#define SCTL_SCPEREN1_UART1 (1 << 1)
-#define SCTL_SCPEREN1_UART2 (1 << 2)
-#define SCTL_SCPEREN1_UART3 (1 << 3)
-#define SCTL_SCPEREN1_MSP0 (1 << 4)
-#define SCTL_SCPEREN1_MSP1 (1 << 5)
-#define SCTL_SCPEREN1_MSP2 (1 << 6)
-#define SCTL_SCPEREN1_MSP3 (1 << 7)
-#define SCTL_SCPEREN1_MSP4 (1 << 8)
-#define SCTL_SCPEREN1_MSP5 (1 << 9)
-#define SCTL_SCPEREN1_SPI0 (1 << 10)
-#define SCTL_SCPEREN1_SPI1 (1 << 11)
-#define SCTL_SCPEREN1_SPI2 (1 << 12)
-#define SCTL_SCPEREN1_I2C0 (1 << 13)
-#define SCTL_SCPEREN1_I2C1 (1 << 14)
-#define SCTL_SCPEREN1_I2C2 (1 << 15)
-#define SCTL_SCPEREN1_I2C3 (1 << 16)
-#define SCTL_SCPEREN1_USB_PHY (1 << 17)
-
-/*
- * APB-SOC registers
- */
-static inline
-u32 sta2x11_apb_soc_regs_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
-{
- return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_apb_soc_regs);
-}
-
-#define PCIE_EP1_FUNC3_0_INTR_REG 0x000
-#define PCIE_EP1_FUNC7_4_INTR_REG 0x004
-#define PCIE_EP2_FUNC3_0_INTR_REG 0x008
-#define PCIE_EP2_FUNC7_4_INTR_REG 0x00c
-#define PCIE_EP3_FUNC3_0_INTR_REG 0x010
-#define PCIE_EP3_FUNC7_4_INTR_REG 0x014
-#define PCIE_EP4_FUNC3_0_INTR_REG 0x018
-#define PCIE_EP4_FUNC7_4_INTR_REG 0x01c
-#define PCIE_INTR_ENABLE0_REG 0x020
-#define PCIE_INTR_ENABLE1_REG 0x024
-#define PCIE_EP1_FUNC_TC_REG 0x028
-#define PCIE_EP2_FUNC_TC_REG 0x02c
-#define PCIE_EP3_FUNC_TC_REG 0x030
-#define PCIE_EP4_FUNC_TC_REG 0x034
-#define PCIE_EP1_FUNC_F_REG 0x038
-#define PCIE_EP2_FUNC_F_REG 0x03c
-#define PCIE_EP3_FUNC_F_REG 0x040
-#define PCIE_EP4_FUNC_F_REG 0x044
-#define PCIE_PAB_AMBA_SW_RST_REG 0x048
-#define PCIE_PM_STATUS_0_PORT_0_4 0x04c
-#define PCIE_PM_STATUS_7_0_EP1 0x050
-#define PCIE_PM_STATUS_7_0_EP2 0x054
-#define PCIE_PM_STATUS_7_0_EP3 0x058
-#define PCIE_PM_STATUS_7_0_EP4 0x05c
-#define PCIE_DEV_ID_0_EP1_REG 0x060
-#define PCIE_CC_REV_ID_0_EP1_REG 0x064
-#define PCIE_DEV_ID_1_EP1_REG 0x068
-#define PCIE_CC_REV_ID_1_EP1_REG 0x06c
-#define PCIE_DEV_ID_2_EP1_REG 0x070
-#define PCIE_CC_REV_ID_2_EP1_REG 0x074
-#define PCIE_DEV_ID_3_EP1_REG 0x078
-#define PCIE_CC_REV_ID_3_EP1_REG 0x07c
-#define PCIE_DEV_ID_4_EP1_REG 0x080
-#define PCIE_CC_REV_ID_4_EP1_REG 0x084
-#define PCIE_DEV_ID_5_EP1_REG 0x088
-#define PCIE_CC_REV_ID_5_EP1_REG 0x08c
-#define PCIE_DEV_ID_6_EP1_REG 0x090
-#define PCIE_CC_REV_ID_6_EP1_REG 0x094
-#define PCIE_DEV_ID_7_EP1_REG 0x098
-#define PCIE_CC_REV_ID_7_EP1_REG 0x09c
-#define PCIE_DEV_ID_0_EP2_REG 0x0a0
-#define PCIE_CC_REV_ID_0_EP2_REG 0x0a4
-#define PCIE_DEV_ID_1_EP2_REG 0x0a8
-#define PCIE_CC_REV_ID_1_EP2_REG 0x0ac
-#define PCIE_DEV_ID_2_EP2_REG 0x0b0
-#define PCIE_CC_REV_ID_2_EP2_REG 0x0b4
-#define PCIE_DEV_ID_3_EP2_REG 0x0b8
-#define PCIE_CC_REV_ID_3_EP2_REG 0x0bc
-#define PCIE_DEV_ID_4_EP2_REG 0x0c0
-#define PCIE_CC_REV_ID_4_EP2_REG 0x0c4
-#define PCIE_DEV_ID_5_EP2_REG 0x0c8
-#define PCIE_CC_REV_ID_5_EP2_REG 0x0cc
-#define PCIE_DEV_ID_6_EP2_REG 0x0d0
-#define PCIE_CC_REV_ID_6_EP2_REG 0x0d4
-#define PCIE_DEV_ID_7_EP2_REG 0x0d8
-#define PCIE_CC_REV_ID_7_EP2_REG 0x0dC
-#define PCIE_DEV_ID_0_EP3_REG 0x0e0
-#define PCIE_CC_REV_ID_0_EP3_REG 0x0e4
-#define PCIE_DEV_ID_1_EP3_REG 0x0e8
-#define PCIE_CC_REV_ID_1_EP3_REG 0x0ec
-#define PCIE_DEV_ID_2_EP3_REG 0x0f0
-#define PCIE_CC_REV_ID_2_EP3_REG 0x0f4
-#define PCIE_DEV_ID_3_EP3_REG 0x0f8
-#define PCIE_CC_REV_ID_3_EP3_REG 0x0fc
-#define PCIE_DEV_ID_4_EP3_REG 0x100
-#define PCIE_CC_REV_ID_4_EP3_REG 0x104
-#define PCIE_DEV_ID_5_EP3_REG 0x108
-#define PCIE_CC_REV_ID_5_EP3_REG 0x10c
-#define PCIE_DEV_ID_6_EP3_REG 0x110
-#define PCIE_CC_REV_ID_6_EP3_REG 0x114
-#define PCIE_DEV_ID_7_EP3_REG 0x118
-#define PCIE_CC_REV_ID_7_EP3_REG 0x11c
-#define PCIE_DEV_ID_0_EP4_REG 0x120
-#define PCIE_CC_REV_ID_0_EP4_REG 0x124
-#define PCIE_DEV_ID_1_EP4_REG 0x128
-#define PCIE_CC_REV_ID_1_EP4_REG 0x12c
-#define PCIE_DEV_ID_2_EP4_REG 0x130
-#define PCIE_CC_REV_ID_2_EP4_REG 0x134
-#define PCIE_DEV_ID_3_EP4_REG 0x138
-#define PCIE_CC_REV_ID_3_EP4_REG 0x13c
-#define PCIE_DEV_ID_4_EP4_REG 0x140
-#define PCIE_CC_REV_ID_4_EP4_REG 0x144
-#define PCIE_DEV_ID_5_EP4_REG 0x148
-#define PCIE_CC_REV_ID_5_EP4_REG 0x14c
-#define PCIE_DEV_ID_6_EP4_REG 0x150
-#define PCIE_CC_REV_ID_6_EP4_REG 0x154
-#define PCIE_DEV_ID_7_EP4_REG 0x158
-#define PCIE_CC_REV_ID_7_EP4_REG 0x15c
-#define PCIE_SUBSYS_VEN_ID_REG 0x160
-#define PCIE_COMMON_CLOCK_CONFIG_0_4_0 0x164
-#define PCIE_MIPHYP_SSC_EN_REG 0x168
-#define PCIE_MIPHYP_ADDR_REG 0x16c
-#define PCIE_L1_ASPM_READY_REG 0x170
-#define PCIE_EXT_CFG_RDY_REG 0x174
-#define PCIE_SoC_INT_ROUTER_STATUS0_REG 0x178
-#define PCIE_SoC_INT_ROUTER_STATUS1_REG 0x17c
-#define PCIE_SoC_INT_ROUTER_STATUS2_REG 0x180
-#define PCIE_SoC_INT_ROUTER_STATUS3_REG 0x184
-#define DMA_IP_CTRL_REG 0x324
-#define DISP_BRIDGE_PU_PD_CTRL_REG 0x328
-#define VIP_PU_PD_CTRL_REG 0x32c
-#define USB_MLB_PU_PD_CTRL_REG 0x330
-#define SDIO_PU_PD_MISCFUNC_CTRL_REG1 0x334
-#define SDIO_PU_PD_MISCFUNC_CTRL_REG2 0x338
-#define UART_PU_PD_CTRL_REG 0x33c
-#define ARM_Lock 0x340
-#define SYS_IO_CHAR_REG1 0x344
-#define SYS_IO_CHAR_REG2 0x348
-#define SATA_CORE_ID_REG 0x34c
-#define SATA_CTRL_REG 0x350
-#define I2C_HSFIX_MISC_REG 0x354
-#define SPARE2_RESERVED 0x358
-#define SPARE3_RESERVED 0x35c
-#define MASTER_LOCK_REG 0x368
-#define SYSTEM_CONFIG_STATUS_REG 0x36c
-#define MSP_CLK_CTRL_REG 0x39c
-#define COMPENSATION_REG1 0x3c4
-#define COMPENSATION_REG2 0x3c8
-#define COMPENSATION_REG3 0x3cc
-#define TEST_CTL_REG 0x3d0
-
-/*
- * SECR (OTP) registers
- */
-#define STA2X11_SECR_CR 0x00
-#define STA2X11_SECR_FVR0 0x10
-#define STA2X11_SECR_FVR1 0x14
-
-extern int sta2x11_mfd_get_regs_data(struct platform_device *pdev,
- enum sta2x11_mfd_plat_dev index,
- void __iomem **regs,
- spinlock_t **lock);
-
-#endif /* __STA2X11_MFD_H */
diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h
index 06d3f11dc3c9..a592c8dc716d 100644
--- a/include/linux/mfd/stm32-lptimer.h
+++ b/include/linux/mfd/stm32-lptimer.h
@@ -17,20 +17,30 @@
#define STM32_LPTIM_IER 0x08 /* Interrupt Enable Reg */
#define STM32_LPTIM_CFGR 0x0C /* Configuration Reg */
#define STM32_LPTIM_CR 0x10 /* Control Reg */
-#define STM32_LPTIM_CMP 0x14 /* Compare Reg */
+#define STM32_LPTIM_CMP 0x14 /* Compare Reg (MP25 CCR1) */
#define STM32_LPTIM_ARR 0x18 /* Autoreload Reg */
#define STM32_LPTIM_CNT 0x1C /* Counter Reg */
+#define STM32_LPTIM_CCMR1 0x2C /* Capture/Compare Mode MP25 */
+#define STM32_LPTIM_CCR2 0x34 /* Compare Reg2 MP25 */
+
+#define STM32_LPTIM_HWCFGR2 0x3EC /* Hardware configuration register 2 - MP25 */
+#define STM32_LPTIM_HWCFGR1 0x3F0 /* Hardware configuration register 1 - MP15 */
+#define STM32_LPTIM_VERR 0x3F4 /* Version identification register - MP15 */
/* STM32_LPTIM_ISR - bit fields */
+#define STM32_LPTIM_DIEROK_ARROK (BIT(24) | BIT(4)) /* MP25 */
+#define STM32_LPTIM_CMP2_ARROK (BIT(19) | BIT(4))
#define STM32_LPTIM_CMPOK_ARROK GENMASK(4, 3)
#define STM32_LPTIM_ARROK BIT(4)
#define STM32_LPTIM_CMPOK BIT(3)
/* STM32_LPTIM_ICR - bit fields */
-#define STM32_LPTIM_ARRMCF BIT(1)
+#define STM32_LPTIM_DIEROKCF_ARROKCF (BIT(24) | BIT(4)) /* MP25 */
+#define STM32_LPTIM_CMP2OKCF_ARROKCF (BIT(19) | BIT(4))
#define STM32_LPTIM_CMPOKCF_ARROKCF GENMASK(4, 3)
+#define STM32_LPTIM_ARRMCF BIT(1)
-/* STM32_LPTIM_IER - bit flieds */
+/* STM32_LPTIM_IER - bit fields */
#define STM32_LPTIM_ARRMIE BIT(1)
/* STM32_LPTIM_CR - bit fields */
@@ -53,16 +63,37 @@
/* STM32_LPTIM_ARR */
#define STM32_LPTIM_MAX_ARR 0xFFFF
+/* STM32_LPTIM_CCMR1 */
+#define STM32_LPTIM_CC2P GENMASK(19, 18)
+#define STM32_LPTIM_CC2E BIT(17)
+#define STM32_LPTIM_CC2SEL BIT(16)
+#define STM32_LPTIM_CC1P GENMASK(3, 2)
+#define STM32_LPTIM_CC1E BIT(1)
+#define STM32_LPTIM_CC1SEL BIT(0)
+
+/* STM32_LPTIM_HWCFGR1 */
+#define STM32_LPTIM_HWCFGR1_ENCODER BIT(16)
+
+/* STM32_LPTIM_HWCFGR2 */
+#define STM32_LPTIM_HWCFGR2_CHAN_NUM GENMASK(3, 0)
+
+/* STM32_LPTIM_VERR */
+#define STM32_LPTIM_VERR_23 0x23 /* STM32MP25 */
+
/**
* struct stm32_lptimer - STM32 Low-Power Timer data assigned by parent device
* @clk: clock reference for this instance
* @regmap: register map reference for this instance
* @has_encoder: indicates this Low-Power Timer supports encoder mode
+ * @num_cc_chans: indicates the number of capture/compare channels
+ * @version: indicates the major and minor revision of the controller
*/
struct stm32_lptimer {
struct clk *clk;
struct regmap *regmap;
bool has_encoder;
+ unsigned int num_cc_chans;
+ u32 version;
};
#endif
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index 9eb17481b07f..23b0cae4a9f8 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -12,97 +12,114 @@
#include <linux/dma-mapping.h>
#include <linux/regmap.h>
-#define TIM_CR1 0x00 /* Control Register 1 */
-#define TIM_CR2 0x04 /* Control Register 2 */
-#define TIM_SMCR 0x08 /* Slave mode control reg */
-#define TIM_DIER 0x0C /* DMA/interrupt register */
-#define TIM_SR 0x10 /* Status register */
-#define TIM_EGR 0x14 /* Event Generation Reg */
-#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */
-#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */
-#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */
-#define TIM_CNT 0x24 /* Counter */
-#define TIM_PSC 0x28 /* Prescaler */
-#define TIM_ARR 0x2c /* Auto-Reload Register */
-#define TIM_CCR1 0x34 /* Capt/Comp Register 1 */
-#define TIM_CCR2 0x38 /* Capt/Comp Register 2 */
-#define TIM_CCR3 0x3C /* Capt/Comp Register 3 */
-#define TIM_CCR4 0x40 /* Capt/Comp Register 4 */
-#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */
-#define TIM_DCR 0x48 /* DMA control register */
-#define TIM_DMAR 0x4C /* DMA register for transfer */
-#define TIM_TISEL 0x68 /* Input Selection */
+#define TIM_CR1 0x00 /* Control Register 1 */
+#define TIM_CR2 0x04 /* Control Register 2 */
+#define TIM_SMCR 0x08 /* Slave mode control reg */
+#define TIM_DIER 0x0C /* DMA/interrupt register */
+#define TIM_SR 0x10 /* Status register */
+#define TIM_EGR 0x14 /* Event Generation Reg */
+#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */
+#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */
+#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */
+#define TIM_CNT 0x24 /* Counter */
+#define TIM_PSC 0x28 /* Prescaler */
+#define TIM_ARR 0x2c /* Auto-Reload Register */
+#define TIM_CCRx(x) (0x34 + 4 * ((x) - 1)) /* Capt/Comp Register x (x ∈ {1, .. 4}) */
+#define TIM_CCR1 TIM_CCRx(1) /* Capt/Comp Register 1 */
+#define TIM_CCR2 TIM_CCRx(2) /* Capt/Comp Register 2 */
+#define TIM_CCR3 TIM_CCRx(3) /* Capt/Comp Register 3 */
+#define TIM_CCR4 TIM_CCRx(4) /* Capt/Comp Register 4 */
+#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */
+#define TIM_DCR 0x48 /* DMA control register */
+#define TIM_DMAR 0x4C /* DMA register for transfer */
+#define TIM_TISEL 0x68 /* Input Selection */
+#define TIM_HWCFGR2 0x3EC /* hardware configuration 2 Reg (MP25) */
+#define TIM_HWCFGR1 0x3F0 /* hardware configuration 1 Reg (MP25) */
+#define TIM_IPIDR 0x3F8 /* IP identification Reg (MP25) */
-#define TIM_CR1_CEN BIT(0) /* Counter Enable */
-#define TIM_CR1_DIR BIT(4) /* Counter Direction */
-#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */
-#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */
-#define TIM_CR2_MMS2 GENMASK(23, 20) /* Master mode selection 2 */
-#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */
-#define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */
-#define TIM_DIER_UIE BIT(0) /* Update interrupt */
-#define TIM_DIER_CC1IE BIT(1) /* CC1 Interrupt Enable */
-#define TIM_DIER_CC2IE BIT(2) /* CC2 Interrupt Enable */
-#define TIM_DIER_CC3IE BIT(3) /* CC3 Interrupt Enable */
-#define TIM_DIER_CC4IE BIT(4) /* CC4 Interrupt Enable */
-#define TIM_DIER_CC_IE(x) BIT((x) + 1) /* CC1, CC2, CC3, CC4 interrupt enable */
-#define TIM_DIER_UDE BIT(8) /* Update DMA request Enable */
-#define TIM_DIER_CC1DE BIT(9) /* CC1 DMA request Enable */
-#define TIM_DIER_CC2DE BIT(10) /* CC2 DMA request Enable */
-#define TIM_DIER_CC3DE BIT(11) /* CC3 DMA request Enable */
-#define TIM_DIER_CC4DE BIT(12) /* CC4 DMA request Enable */
-#define TIM_DIER_COMDE BIT(13) /* COM DMA request Enable */
-#define TIM_DIER_TDE BIT(14) /* Trigger DMA request Enable */
-#define TIM_SR_UIF BIT(0) /* Update interrupt flag */
-#define TIM_SR_CC_IF(x) BIT((x) + 1) /* CC1, CC2, CC3, CC4 interrupt flag */
-#define TIM_EGR_UG BIT(0) /* Update Generation */
-#define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */
-#define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */
-#define TIM_CCMR_CC1S (BIT(0) | BIT(1)) /* Capture/compare 1 sel */
-#define TIM_CCMR_IC1PSC GENMASK(3, 2) /* Input capture 1 prescaler */
-#define TIM_CCMR_CC2S (BIT(8) | BIT(9)) /* Capture/compare 2 sel */
-#define TIM_CCMR_IC2PSC GENMASK(11, 10) /* Input capture 2 prescaler */
-#define TIM_CCMR_CC1S_TI1 BIT(0) /* IC1/IC3 selects TI1/TI3 */
-#define TIM_CCMR_CC1S_TI2 BIT(1) /* IC1/IC3 selects TI2/TI4 */
-#define TIM_CCMR_CC2S_TI2 BIT(8) /* IC2/IC4 selects TI2/TI4 */
-#define TIM_CCMR_CC2S_TI1 BIT(9) /* IC2/IC4 selects TI1/TI3 */
-#define TIM_CCMR_CC3S (BIT(0) | BIT(1)) /* Capture/compare 3 sel */
-#define TIM_CCMR_CC4S (BIT(8) | BIT(9)) /* Capture/compare 4 sel */
-#define TIM_CCMR_CC3S_TI3 BIT(0) /* IC3 selects TI3 */
-#define TIM_CCMR_CC4S_TI4 BIT(8) /* IC4 selects TI4 */
-#define TIM_CCER_CC1E BIT(0) /* Capt/Comp 1 out Ena */
-#define TIM_CCER_CC1P BIT(1) /* Capt/Comp 1 Polarity */
-#define TIM_CCER_CC1NE BIT(2) /* Capt/Comp 1N out Ena */
-#define TIM_CCER_CC1NP BIT(3) /* Capt/Comp 1N Polarity */
-#define TIM_CCER_CC2E BIT(4) /* Capt/Comp 2 out Ena */
-#define TIM_CCER_CC2P BIT(5) /* Capt/Comp 2 Polarity */
-#define TIM_CCER_CC2NP BIT(7) /* Capt/Comp 2N Polarity */
-#define TIM_CCER_CC3E BIT(8) /* Capt/Comp 3 out Ena */
-#define TIM_CCER_CC3P BIT(9) /* Capt/Comp 3 Polarity */
-#define TIM_CCER_CC3NP BIT(11) /* Capt/Comp 3N Polarity */
-#define TIM_CCER_CC4E BIT(12) /* Capt/Comp 4 out Ena */
-#define TIM_CCER_CC4P BIT(13) /* Capt/Comp 4 Polarity */
-#define TIM_CCER_CC4NP BIT(15) /* Capt/Comp 4N Polarity */
-#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12))
-#define TIM_BDTR_BKE(x) BIT(12 + (x) * 12) /* Break input enable */
-#define TIM_BDTR_BKP(x) BIT(13 + (x) * 12) /* Break input polarity */
-#define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */
-#define TIM_BDTR_MOE BIT(15) /* Main Output Enable */
-#define TIM_BDTR_BKF(x) (0xf << (16 + (x) * 4))
-#define TIM_DCR_DBA GENMASK(4, 0) /* DMA base addr */
-#define TIM_DCR_DBL GENMASK(12, 8) /* DMA burst len */
+#define TIM_CR1_CEN BIT(0) /* Counter Enable */
+#define TIM_CR1_DIR BIT(4) /* Counter Direction */
+#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */
+#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */
+#define TIM_CR2_MMS2 GENMASK(23, 20) /* Master mode selection 2 */
+#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */
+#define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */
+#define TIM_DIER_UIE BIT(0) /* Update interrupt */
+#define TIM_DIER_CCxIE(x) BIT(1 + ((x) - 1)) /* CCx Interrupt Enable (x ∈ {1, .. 4}) */
+#define TIM_DIER_CC1IE TIM_DIER_CCxIE(1) /* CC1 Interrupt Enable */
+#define TIM_DIER_CC2IE TIM_DIER_CCxIE(2) /* CC2 Interrupt Enable */
+#define TIM_DIER_CC3IE TIM_DIER_CCxIE(3) /* CC3 Interrupt Enable */
+#define TIM_DIER_CC4IE TIM_DIER_CCxIE(4) /* CC4 Interrupt Enable */
+#define TIM_DIER_UDE BIT(8) /* Update DMA request Enable */
+#define TIM_DIER_CCxDE(x) BIT(9 + ((x) - 1)) /* CCx DMA request Enable (x ∈ {1, .. 4}) */
+#define TIM_DIER_CC1DE TIM_DIER_CCxDE(1) /* CC1 DMA request Enable */
+#define TIM_DIER_CC2DE TIM_DIER_CCxDE(2) /* CC2 DMA request Enable */
+#define TIM_DIER_CC3DE TIM_DIER_CCxDE(3) /* CC3 DMA request Enable */
+#define TIM_DIER_CC4DE TIM_DIER_CCxDE(4) /* CC4 DMA request Enable */
+#define TIM_DIER_COMDE BIT(13) /* COM DMA request Enable */
+#define TIM_DIER_TDE BIT(14) /* Trigger DMA request Enable */
+#define TIM_SR_UIF BIT(0) /* Update interrupt flag */
+#define TIM_SR_CC_IF(x) BIT((x) + 1) /* CC1, CC2, CC3, CC4 interrupt flag */
+#define TIM_EGR_UG BIT(0) /* Update Generation */
+#define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */
+#define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */
+#define TIM_CCMR_CC1S (BIT(0) | BIT(1)) /* Capture/compare 1 sel */
+#define TIM_CCMR_IC1PSC GENMASK(3, 2) /* Input capture 1 prescaler */
+#define TIM_CCMR_CC2S (BIT(8) | BIT(9)) /* Capture/compare 2 sel */
+#define TIM_CCMR_IC2PSC GENMASK(11, 10) /* Input capture 2 prescaler */
+#define TIM_CCMR_CC1S_TI1 BIT(0) /* IC1/IC3 selects TI1/TI3 */
+#define TIM_CCMR_CC1S_TI2 BIT(1) /* IC1/IC3 selects TI2/TI4 */
+#define TIM_CCMR_CC2S_TI2 BIT(8) /* IC2/IC4 selects TI2/TI4 */
+#define TIM_CCMR_CC2S_TI1 BIT(9) /* IC2/IC4 selects TI1/TI3 */
+#define TIM_CCMR_CC3S (BIT(0) | BIT(1)) /* Capture/compare 3 sel */
+#define TIM_CCMR_CC4S (BIT(8) | BIT(9)) /* Capture/compare 4 sel */
+#define TIM_CCMR_CC3S_TI3 BIT(0) /* IC3 selects TI3 */
+#define TIM_CCMR_CC4S_TI4 BIT(8) /* IC4 selects TI4 */
+#define TIM_CCER_CCxE(x) BIT(0 + 4 * ((x) - 1)) /* Capt/Comp x out Ena (x ∈ {1, .. 4}) */
+#define TIM_CCER_CCxP(x) BIT(1 + 4 * ((x) - 1)) /* Capt/Comp x Polarity (x ∈ {1, .. 4}) */
+#define TIM_CCER_CCxNE(x) BIT(2 + 4 * ((x) - 1)) /* Capt/Comp xN out Ena (x ∈ {1, .. 4}) */
+#define TIM_CCER_CCxNP(x) BIT(3 + 4 * ((x) - 1)) /* Capt/Comp xN Polarity (x ∈ {1, .. 4}) */
+#define TIM_CCER_CC1E TIM_CCER_CCxE(1) /* Capt/Comp 1 out Ena */
+#define TIM_CCER_CC1P TIM_CCER_CCxP(1) /* Capt/Comp 1 Polarity */
+#define TIM_CCER_CC1NE TIM_CCER_CCxNE(1) /* Capt/Comp 1N out Ena */
+#define TIM_CCER_CC1NP TIM_CCER_CCxNP(1) /* Capt/Comp 1N Polarity */
+#define TIM_CCER_CC2E TIM_CCER_CCxE(2) /* Capt/Comp 2 out Ena */
+#define TIM_CCER_CC2P TIM_CCER_CCxP(2) /* Capt/Comp 2 Polarity */
+#define TIM_CCER_CC2NE TIM_CCER_CCxNE(2) /* Capt/Comp 2N out Ena */
+#define TIM_CCER_CC2NP TIM_CCER_CCxNP(2) /* Capt/Comp 2N Polarity */
+#define TIM_CCER_CC3E TIM_CCER_CCxE(3) /* Capt/Comp 3 out Ena */
+#define TIM_CCER_CC3P TIM_CCER_CCxP(3) /* Capt/Comp 3 Polarity */
+#define TIM_CCER_CC3NE TIM_CCER_CCxNE(3) /* Capt/Comp 3N out Ena */
+#define TIM_CCER_CC3NP TIM_CCER_CCxNP(3) /* Capt/Comp 3N Polarity */
+#define TIM_CCER_CC4E TIM_CCER_CCxE(4) /* Capt/Comp 4 out Ena */
+#define TIM_CCER_CC4P TIM_CCER_CCxP(4) /* Capt/Comp 4 Polarity */
+#define TIM_CCER_CC4NE TIM_CCER_CCxNE(4) /* Capt/Comp 4N out Ena */
+#define TIM_CCER_CC4NP TIM_CCER_CCxNP(4) /* Capt/Comp 4N Polarity */
+#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12))
+#define TIM_BDTR_BKE(x) BIT(12 + (x) * 12) /* Break input enable */
+#define TIM_BDTR_BKP(x) BIT(13 + (x) * 12) /* Break input polarity */
+#define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */
+#define TIM_BDTR_MOE BIT(15) /* Main Output Enable */
+#define TIM_BDTR_BKF(x) (0xf << (16 + (x) * 4))
+#define TIM_DCR_DBA GENMASK(4, 0) /* DMA base addr */
+#define TIM_DCR_DBL GENMASK(12, 8) /* DMA burst len */
+#define TIM_HWCFGR1_NB_OF_CC GENMASK(3, 0) /* Capture/compare channels */
+#define TIM_HWCFGR1_NB_OF_DT GENMASK(7, 4) /* Complementary outputs & dead-time generators */
+#define TIM_HWCFGR2_CNT_WIDTH GENMASK(15, 8) /* Counter width */
-#define MAX_TIM_PSC 0xFFFF
-#define MAX_TIM_ICPSC 0x3
-#define TIM_CR2_MMS_SHIFT 4
-#define TIM_CR2_MMS2_SHIFT 20
+#define MAX_TIM_PSC 0xFFFF
+#define MAX_TIM_ICPSC 0x3
+#define TIM_CR2_MMS_SHIFT 4
+#define TIM_CR2_MMS2_SHIFT 20
#define TIM_SMCR_SMS_SLAVE_MODE_DISABLED 0 /* counts on internal clock when CEN=1 */
#define TIM_SMCR_SMS_ENCODER_MODE_1 1 /* counts TI1FP1 edges, depending on TI2FP2 level */
#define TIM_SMCR_SMS_ENCODER_MODE_2 2 /* counts TI2FP2 edges, depending on TI1FP1 level */
#define TIM_SMCR_SMS_ENCODER_MODE_3 3 /* counts on both TI1FP1 and TI2FP2 edges */
-#define TIM_SMCR_TS_SHIFT 4
-#define TIM_BDTR_BKF_MASK 0xF
-#define TIM_BDTR_BKF_SHIFT(x) (16 + (x) * 4)
+#define TIM_SMCR_TS_SHIFT 4
+#define TIM_BDTR_BKF_MASK 0xF
+#define TIM_BDTR_BKF_SHIFT(x) (16 + (x) * 4)
+
+#define STM32MP25_TIM_IPIDR 0x00120002
enum stm32_timers_dmas {
STM32_TIMERS_DMA_CH1,
@@ -142,6 +159,7 @@ struct stm32_timers_dma {
struct stm32_timers {
struct clk *clk;
+ u32 ipidr;
struct regmap *regmap;
u32 max_arr;
struct stm32_timers_dma dma; /* Only to be used by the parent */
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index c315903f6dab..aad9c6b50463 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -28,6 +28,8 @@ struct regmap *syscon_regmap_lookup_by_phandle_args(struct device_node *np,
unsigned int *out_args);
struct regmap *syscon_regmap_lookup_by_phandle_optional(struct device_node *np,
const char *property);
+int of_syscon_register_regmap(struct device_node *np,
+ struct regmap *regmap);
#else
static inline struct regmap *device_node_to_regmap(struct device_node *np)
{
@@ -67,6 +69,12 @@ static inline struct regmap *syscon_regmap_lookup_by_phandle_optional(
return NULL;
}
+static inline int of_syscon_register_regmap(struct device_node *np,
+ struct regmap *regmap)
+{
+ return -EOPNOTSUPP;
+}
+
#endif
#endif /* __LINUX_MFD_SYSCON_H__ */
diff --git a/include/linux/mfd/syscon/atmel-smc.h b/include/linux/mfd/syscon/atmel-smc.h
index e9e24f4c4578..9b9119c742a2 100644
--- a/include/linux/mfd/syscon/atmel-smc.h
+++ b/include/linux/mfd/syscon/atmel-smc.h
@@ -11,9 +11,11 @@
#ifndef _LINUX_MFD_SYSCON_ATMEL_SMC_H_
#define _LINUX_MFD_SYSCON_ATMEL_SMC_H_
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/regmap.h>
+#include <linux/bits.h>
+#include <linux/types.h>
+
+struct device_node;
+struct regmap;
#define ATMEL_SMC_SETUP(cs) (((cs) * 0x10))
#define ATMEL_HSMC_SETUP(layout, cs) \
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
deleted file mode 100644
index eace8ea6cda0..000000000000
--- a/include/linux/mfd/tmio.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef MFD_TMIO_H
-#define MFD_TMIO_H
-
-#include <linux/device.h>
-#include <linux/fb.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/mmc/card.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-
-#define tmio_ioread8(addr) readb(addr)
-#define tmio_ioread16(addr) readw(addr)
-#define tmio_ioread16_rep(r, b, l) readsw(r, b, l)
-#define tmio_ioread32(addr) \
- (((u32)readw((addr))) | (((u32)readw((addr) + 2)) << 16))
-
-#define tmio_iowrite8(val, addr) writeb((val), (addr))
-#define tmio_iowrite16(val, addr) writew((val), (addr))
-#define tmio_iowrite16_rep(r, b, l) writesw(r, b, l)
-#define tmio_iowrite32(val, addr) \
- do { \
- writew((val), (addr)); \
- writew((val) >> 16, (addr) + 2); \
- } while (0)
-
-#define sd_config_write8(base, shift, reg, val) \
- tmio_iowrite8((val), (base) + ((reg) << (shift)))
-#define sd_config_write16(base, shift, reg, val) \
- tmio_iowrite16((val), (base) + ((reg) << (shift)))
-#define sd_config_write32(base, shift, reg, val) \
- do { \
- tmio_iowrite16((val), (base) + ((reg) << (shift))); \
- tmio_iowrite16((val) >> 16, (base) + ((reg + 2) << (shift))); \
- } while (0)
-
-/* tmio MMC platform flags */
-/*
- * Some controllers can support a 2-byte block size when the bus width
- * is configured in 4-bit mode.
- */
-#define TMIO_MMC_BLKSZ_2BYTES BIT(1)
-/*
- * Some controllers can support SDIO IRQ signalling.
- */
-#define TMIO_MMC_SDIO_IRQ BIT(2)
-
-/* Some features are only available or tested on R-Car Gen2 or later */
-#define TMIO_MMC_MIN_RCAR2 BIT(3)
-
-/*
- * Some controllers require waiting for the SD bus to become
- * idle before writing to some registers.
- */
-#define TMIO_MMC_HAS_IDLE_WAIT BIT(4)
-
-/*
- * Use the busy timeout feature. Probably all TMIO versions support it. Yet,
- * we don't have documentation for old variants, so we enable only known good
- * variants with this flag. Can be removed once all variants are known good.
- */
-#define TMIO_MMC_USE_BUSY_TIMEOUT BIT(5)
-
-/*
- * Some controllers have CMD12 automatically
- * issue/non-issue register
- */
-#define TMIO_MMC_HAVE_CMD12_CTRL BIT(7)
-
-/* Controller has some SDIO status bits which must be 1 */
-#define TMIO_MMC_SDIO_STATUS_SETBITS BIT(8)
-
-/*
- * Some controllers have a 32-bit wide data port register
- */
-#define TMIO_MMC_32BIT_DATA_PORT BIT(9)
-
-/*
- * Some controllers allows to set SDx actual clock
- */
-#define TMIO_MMC_CLK_ACTUAL BIT(10)
-
-/* Some controllers have a CBSY bit */
-#define TMIO_MMC_HAVE_CBSY BIT(11)
-
-struct dma_chan;
-
-/*
- * data for the MMC controller
- */
-struct tmio_mmc_data {
- void *chan_priv_tx;
- void *chan_priv_rx;
- unsigned int hclk;
- unsigned long capabilities;
- unsigned long capabilities2;
- unsigned long flags;
- u32 ocr_mask; /* available voltages */
- dma_addr_t dma_rx_offset;
- unsigned int max_blk_count;
- unsigned short max_segs;
- void (*set_pwr)(struct platform_device *host, int state);
- void (*set_clk_div)(struct platform_device *host, int state);
-};
-
-/*
- * data for the NAND controller
- */
-struct tmio_nand_data {
- struct nand_bbt_descr *badblock_pattern;
- struct mtd_partition *partition;
- unsigned int num_partitions;
- const char *const *part_parsers;
-};
-
-#define FBIO_TMIO_ACC_WRITE 0x7C639300
-#define FBIO_TMIO_ACC_SYNC 0x7C639301
-
-struct tmio_fb_data {
- int (*lcd_set_power)(struct platform_device *fb_dev,
- bool on);
- int (*lcd_mode)(struct platform_device *fb_dev,
- const struct fb_videomode *mode);
- int num_modes;
- struct fb_videomode *modes;
-
- /* in mm: size of screen */
- int height;
- int width;
-};
-
-#endif
diff --git a/include/linux/mfd/tps65219.h b/include/linux/mfd/tps65219.h
index e6826e34e2a6..55234e771ba7 100644
--- a/include/linux/mfd/tps65219.h
+++ b/include/linux/mfd/tps65219.h
@@ -1,25 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Functions to access TPS65219 Power Management IC.
+ * Functions to access TPS65215/TPS65219 Power Management Integrated Chips
*
* Copyright (C) 2022 BayLibre Incorporated - https://www.baylibre.com/
+ * Copyright (C) 2024 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef MFD_TPS65219_H
#define MFD_TPS65219_H
#include <linux/bitops.h>
-#include <linux/notifier.h>
+#include <linux/regmap.h>
#include <linux/regulator/driver.h>
-struct regmap;
-struct regmap_irq_chip_data;
-
-#define TPS65219_1V35 1350000
-#define TPS65219_1V8 1800000
-
-/* TPS chip id list */
-#define TPS65219 0xF0
+/* Chip id list*/
+enum pmic_id {
+ TPS65214,
+ TPS65215,
+ TPS65219,
+};
/* I2C ID for TPS65219 part */
#define TPS65219_I2C_ID 0x24
@@ -29,15 +28,23 @@ struct regmap_irq_chip_data;
#define TPS65219_REG_NVM_ID 0x01
#define TPS65219_REG_ENABLE_CTRL 0x02
#define TPS65219_REG_BUCKS_CONFIG 0x03
+#define TPS65214_REG_LOCK 0x03
#define TPS65219_REG_LDO4_VOUT 0x04
+#define TPS65214_REG_LDO1_VOUT_STBY 0x04
#define TPS65219_REG_LDO3_VOUT 0x05
+#define TPS65215_REG_LDO2_VOUT 0x05
+#define TPS65214_REG_LDO1_VOUT 0x05
#define TPS65219_REG_LDO2_VOUT 0x06
+#define TPS65214_REG_LDO2_VOUT 0x06
#define TPS65219_REG_LDO1_VOUT 0x07
+#define TPS65214_REG_LDO2_VOUT_STBY 0x07
#define TPS65219_REG_BUCK3_VOUT 0x8
#define TPS65219_REG_BUCK2_VOUT 0x9
#define TPS65219_REG_BUCK1_VOUT 0xA
#define TPS65219_REG_LDO4_SEQUENCE_SLOT 0xB
#define TPS65219_REG_LDO3_SEQUENCE_SLOT 0xC
+#define TPS65215_REG_LDO2_SEQUENCE_SLOT 0xC
+#define TPS65214_REG_LDO1_SEQUENCE_SLOT 0xC
#define TPS65219_REG_LDO2_SEQUENCE_SLOT 0xD
#define TPS65219_REG_LDO1_SEQUENCE_SLOT 0xE
#define TPS65219_REG_BUCK3_SEQUENCE_SLOT 0xF
@@ -46,15 +53,21 @@ struct regmap_irq_chip_data;
#define TPS65219_REG_nRST_SEQUENCE_SLOT 0x12
#define TPS65219_REG_GPIO_SEQUENCE_SLOT 0x13
#define TPS65219_REG_GPO2_SEQUENCE_SLOT 0x14
+#define TPS65214_REG_GPIO_GPI_SEQUENCE_SLOT 0x14
#define TPS65219_REG_GPO1_SEQUENCE_SLOT 0x15
+#define TPS65214_REG_GPO_SEQUENCE_SLOT 0x15
#define TPS65219_REG_POWER_UP_SLOT_DURATION_1 0x16
#define TPS65219_REG_POWER_UP_SLOT_DURATION_2 0x17
+/* _SLOT_DURATION_3 doesn't apply to TPS65215*/
#define TPS65219_REG_POWER_UP_SLOT_DURATION_3 0x18
#define TPS65219_REG_POWER_UP_SLOT_DURATION_4 0x19
+#define TPS65214_REG_BUCK3_VOUT_STBY 0x19
#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_1 0x1A
#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_2 0x1B
#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_3 0x1C
+#define TPS65214_REG_BUCK2_VOUT_STBY 0x1C
#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_4 0x1D
+#define TPS65214_REG_BUCK1_VOUT_STBY 0x1D
#define TPS65219_REG_GENERAL_CONFIG 0x1E
#define TPS65219_REG_MFP_1_CONFIG 0x1F
#define TPS65219_REG_MFP_2_CONFIG 0x20
@@ -72,9 +85,19 @@ struct regmap_irq_chip_data;
#define TPS65219_REG_DISCHARGE_CONFIG 0x2A
/* main irq registers */
#define TPS65219_REG_INT_SOURCE 0x2B
-/* 'sub irq' registers */
+
+/* TPS65219 'sub irq' registers */
#define TPS65219_REG_INT_LDO_3_4 0x2C
#define TPS65219_REG_INT_LDO_1_2 0x2D
+
+/* TPS65215 specific 'sub irq' registers */
+#define TPS65215_REG_INT_LDO_2 0x2C
+#define TPS65215_REG_INT_LDO_1 0x2D
+
+/* TPS65214 specific 'sub irq' register */
+#define TPS65214_REG_INT_LDO_1_2 0x2D
+
+/* Common TPS65215 & TPS65219 'sub irq' registers */
#define TPS65219_REG_INT_BUCK_3 0x2E
#define TPS65219_REG_INT_BUCK_1_2 0x2F
#define TPS65219_REG_INT_SYSTEM 0x30
@@ -91,6 +114,17 @@ struct regmap_irq_chip_data;
#define TPS65219_REG_INT_TO_RV_POS 6
#define TPS65219_REG_INT_PB_POS 7
+#define TPS65215_REG_INT_LDO_2_POS 0
+#define TPS65215_REG_INT_LDO_1_POS 1
+
+#define TPS65214_REG_INT_LDO_1_2_POS 0
+#define TPS65214_REG_INT_BUCK_3_POS 1
+#define TPS65214_REG_INT_BUCK_1_2_POS 2
+#define TPS65214_REG_INT_SYS_POS 3
+#define TPS65214_REG_INT_RV_POS 4
+#define TPS65214_REG_INT_TO_RV_POS 5
+#define TPS65214_REG_INT_PB_POS 6
+
#define TPS65219_REG_USER_NVM_CMD 0x34
#define TPS65219_REG_POWER_UP_STATUS 0x35
#define TPS65219_REG_SPARE_2 0x36
@@ -112,6 +146,8 @@ struct regmap_irq_chip_data;
#define TPS65219_ENABLE_LDO1_EN_MASK BIT(3)
#define TPS65219_ENABLE_LDO2_EN_MASK BIT(4)
#define TPS65219_ENABLE_LDO3_EN_MASK BIT(5)
+#define TPS65215_ENABLE_LDO2_EN_MASK BIT(5)
+#define TPS65214_ENABLE_LDO1_EN_MASK BIT(5)
#define TPS65219_ENABLE_LDO4_EN_MASK BIT(6)
/* power ON-OFF sequence slot */
#define TPS65219_BUCKS_LDOS_SEQUENCE_OFF_SLOT_MASK GENMASK(3, 0)
@@ -163,20 +199,27 @@ struct regmap_irq_chip_data;
#define TPS65219_REG_MASK_EFFECT_MASK GENMASK(2, 1)
#define TPS65219_REG_MASK_INT_FOR_PB_MASK BIT(7)
/* UnderVoltage - Short to GND - OverCurrent*/
-/* LDO3-4 */
+/* LDO3-4: only for TPS65219*/
#define TPS65219_INT_LDO3_SCG_MASK BIT(0)
#define TPS65219_INT_LDO3_OC_MASK BIT(1)
#define TPS65219_INT_LDO3_UV_MASK BIT(2)
#define TPS65219_INT_LDO4_SCG_MASK BIT(3)
#define TPS65219_INT_LDO4_OC_MASK BIT(4)
#define TPS65219_INT_LDO4_UV_MASK BIT(5)
-/* LDO1-2 */
+/* LDO1-2: TPS65214 & TPS65219 */
#define TPS65219_INT_LDO1_SCG_MASK BIT(0)
#define TPS65219_INT_LDO1_OC_MASK BIT(1)
#define TPS65219_INT_LDO1_UV_MASK BIT(2)
#define TPS65219_INT_LDO2_SCG_MASK BIT(3)
#define TPS65219_INT_LDO2_OC_MASK BIT(4)
#define TPS65219_INT_LDO2_UV_MASK BIT(5)
+/* TPS65215 LDO1-2*/
+#define TPS65215_INT_LDO1_SCG_MASK BIT(0)
+#define TPS65215_INT_LDO1_OC_MASK BIT(1)
+#define TPS65215_INT_LDO1_UV_MASK BIT(2)
+#define TPS65215_INT_LDO2_SCG_MASK BIT(0)
+#define TPS65215_INT_LDO2_OC_MASK BIT(1)
+#define TPS65215_INT_LDO2_UV_MASK BIT(2)
/* BUCK3 */
#define TPS65219_INT_BUCK3_SCG_MASK BIT(0)
#define TPS65219_INT_BUCK3_OC_MASK BIT(1)
@@ -191,12 +234,13 @@ struct regmap_irq_chip_data;
#define TPS65219_INT_BUCK2_OC_MASK BIT(5)
#define TPS65219_INT_BUCK2_NEG_OC_MASK BIT(6)
#define TPS65219_INT_BUCK2_UV_MASK BIT(7)
-/* Thermal Sensor */
+/* Thermal Sensor: TPS65219/TPS65215 */
#define TPS65219_INT_SENSOR_3_WARM_MASK BIT(0)
+#define TPS65219_INT_SENSOR_3_HOT_MASK BIT(4)
+/* Thermal Sensor: TPS65219/TPS65215/TPS65214 */
#define TPS65219_INT_SENSOR_2_WARM_MASK BIT(1)
#define TPS65219_INT_SENSOR_1_WARM_MASK BIT(2)
#define TPS65219_INT_SENSOR_0_WARM_MASK BIT(3)
-#define TPS65219_INT_SENSOR_3_HOT_MASK BIT(4)
#define TPS65219_INT_SENSOR_2_HOT_MASK BIT(5)
#define TPS65219_INT_SENSOR_1_HOT_MASK BIT(6)
#define TPS65219_INT_SENSOR_0_HOT_MASK BIT(7)
@@ -207,6 +251,8 @@ struct regmap_irq_chip_data;
#define TPS65219_INT_LDO1_RV_MASK BIT(3)
#define TPS65219_INT_LDO2_RV_MASK BIT(4)
#define TPS65219_INT_LDO3_RV_MASK BIT(5)
+#define TPS65215_INT_LDO2_RV_MASK BIT(5)
+#define TPS65214_INT_LDO2_RV_MASK BIT(5)
#define TPS65219_INT_LDO4_RV_MASK BIT(6)
/* Residual Voltage ShutDown */
#define TPS65219_INT_BUCK1_RV_SD_MASK BIT(0)
@@ -215,6 +261,8 @@ struct regmap_irq_chip_data;
#define TPS65219_INT_LDO1_RV_SD_MASK BIT(3)
#define TPS65219_INT_LDO2_RV_SD_MASK BIT(4)
#define TPS65219_INT_LDO3_RV_SD_MASK BIT(5)
+#define TPS65215_INT_LDO2_RV_SD_MASK BIT(5)
+#define TPS65214_INT_LDO1_RV_SD_MASK BIT(5)
#define TPS65219_INT_LDO4_RV_SD_MASK BIT(6)
#define TPS65219_INT_TIMEOUT_MASK BIT(7)
/* Power Button */
@@ -240,7 +288,15 @@ enum {
TPS65219_INT_LDO4_SCG,
TPS65219_INT_LDO4_OC,
TPS65219_INT_LDO4_UV,
- /* LDO1-2 */
+ /* TPS65215 LDO1*/
+ TPS65215_INT_LDO1_SCG,
+ TPS65215_INT_LDO1_OC,
+ TPS65215_INT_LDO1_UV,
+ /* TPS65215 LDO2*/
+ TPS65215_INT_LDO2_SCG,
+ TPS65215_INT_LDO2_OC,
+ TPS65215_INT_LDO2_UV,
+ /* LDO1-2: TPS65219/TPS65214 */
TPS65219_INT_LDO1_SCG,
TPS65219_INT_LDO1_OC,
TPS65219_INT_LDO1_UV,
@@ -276,6 +332,8 @@ enum {
TPS65219_INT_BUCK3_RV,
TPS65219_INT_LDO1_RV,
TPS65219_INT_LDO2_RV,
+ TPS65215_INT_LDO2_RV,
+ TPS65214_INT_LDO2_RV,
TPS65219_INT_LDO3_RV,
TPS65219_INT_LDO4_RV,
/* Residual Voltage ShutDown */
@@ -283,6 +341,8 @@ enum {
TPS65219_INT_BUCK2_RV_SD,
TPS65219_INT_BUCK3_RV_SD,
TPS65219_INT_LDO1_RV_SD,
+ TPS65214_INT_LDO1_RV_SD,
+ TPS65215_INT_LDO2_RV_SD,
TPS65219_INT_LDO2_RV_SD,
TPS65219_INT_LDO3_RV_SD,
TPS65219_INT_LDO4_RV_SD,
@@ -292,6 +352,23 @@ enum {
TPS65219_INT_PB_RISING_EDGE_DETECT,
};
+enum tps65214_regulator_id {
+ /*
+ * DCDC's same as TPS65219
+ * LDO1 maps to TPS65219's LDO3
+ * LDO2 is the same as TPS65219
+ *
+ */
+ TPS65214_LDO_1 = 3,
+ TPS65214_LDO_2 = 4,
+};
+
+enum tps65215_regulator_id {
+ /* DCDC's same as TPS65219 */
+ /* LDO1 is the same as TPS65219 */
+ TPS65215_LDO_2 = 4,
+};
+
enum tps65219_regulator_id {
/* DCDC's */
TPS65219_BUCK_1,
@@ -305,11 +382,40 @@ enum tps65219_regulator_id {
};
/* Number of step-down converters available */
-#define TPS65219_NUM_DCDC 3
+#define TPS6521X_NUM_BUCKS 3
/* Number of LDO voltage regulators available */
#define TPS65219_NUM_LDO 4
+#define TPS65215_NUM_LDO 2
+#define TPS65214_NUM_LDO 2
/* Number of total regulators available */
-#define TPS65219_NUM_REGULATOR (TPS65219_NUM_DCDC + TPS65219_NUM_LDO)
+#define TPS65219_NUM_REGULATOR (TPS6521X_NUM_BUCKS + TPS65219_NUM_LDO)
+#define TPS65215_NUM_REGULATOR (TPS6521X_NUM_BUCKS + TPS65215_NUM_LDO)
+#define TPS65214_NUM_REGULATOR (TPS6521X_NUM_BUCKS + TPS65214_NUM_LDO)
+
+/* Define the TPS65214 IRQ numbers */
+enum tps65214_irqs {
+ /* INT source registers */
+ TPS65214_TO_RV_SD_SET_IRQ,
+ TPS65214_RV_SET_IRQ,
+ TPS65214_SYS_SET_IRQ,
+ TPS65214_BUCK_1_2_SET_IRQ,
+ TPS65214_BUCK_3_SET_IRQ,
+ TPS65214_LDO_1_2_SET_IRQ,
+ TPS65214_PB_SET_IRQ = 7,
+};
+
+/* Define the TPS65215 IRQ numbers */
+enum tps65215_irqs {
+ /* INT source registers */
+ TPS65215_TO_RV_SD_SET_IRQ,
+ TPS65215_RV_SET_IRQ,
+ TPS65215_SYS_SET_IRQ,
+ TPS65215_BUCK_1_2_SET_IRQ,
+ TPS65215_BUCK_3_SET_IRQ,
+ TPS65215_LDO_1_SET_IRQ,
+ TPS65215_LDO_2_SET_IRQ,
+ TPS65215_PB_SET_IRQ,
+};
/* Define the TPS65219 IRQ numbers */
enum tps65219_irqs {
@@ -332,14 +438,12 @@ enum tps65219_irqs {
* @dev: MFD device
* @regmap: Regmap for accessing the device registers
* @irq_data: Regmap irq data used for the irq chip
- * @nb: notifier block for the restart handler
*/
struct tps65219 {
struct device *dev;
struct regmap *regmap;
struct regmap_irq_chip_data *irq_data;
- struct notifier_block nb;
};
#endif /* MFD_TPS65219_H */
diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h
index 860ec0a16c96..e5373c302722 100644
--- a/include/linux/mfd/tps65912.h
+++ b/include/linux/mfd/tps65912.h
@@ -314,6 +314,5 @@ struct tps65912 {
extern const struct regmap_config tps65912_regmap_config;
int tps65912_device_init(struct tps65912 *tps);
-void tps65912_device_exit(struct tps65912 *tps);
#endif /* __LINUX_MFD_TPS65912_H */
diff --git a/include/linux/mfd/tps6594.h b/include/linux/mfd/tps6594.h
index 16543fd4d83e..021db8875963 100644
--- a/include/linux/mfd/tps6594.h
+++ b/include/linux/mfd/tps6594.h
@@ -19,6 +19,7 @@ enum pmic_id {
TPS6593,
LP8764,
TPS65224,
+ TPS652G1,
};
/* Macro to get page index from register address */
diff --git a/include/linux/mfd/twl.h b/include/linux/mfd/twl.h
index 85dc406173db..b31e07fa4d51 100644
--- a/include/linux/mfd/twl.h
+++ b/include/linux/mfd/twl.h
@@ -205,27 +205,6 @@ int twl_get_hfclk_rate(void);
int twl6030_interrupt_unmask(u8 bit_mask, u8 offset);
int twl6030_interrupt_mask(u8 bit_mask, u8 offset);
-/* Card detect Configuration for MMC1 Controller on OMAP4 */
-#ifdef CONFIG_TWL4030_CORE
-int twl6030_mmc_card_detect_config(void);
-#else
-static inline int twl6030_mmc_card_detect_config(void)
-{
- pr_debug("twl6030_mmc_card_detect_config not supported\n");
- return 0;
-}
-#endif
-
-/* MMC1 Controller on OMAP4 uses Phoenix irq for Card detect */
-#ifdef CONFIG_TWL4030_CORE
-int twl6030_mmc_card_detect(struct device *dev, int slot);
-#else
-static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
-{
- pr_debug("Call back twl6030_mmc_card_detect not supported\n");
- return -EIO;
-}
-#endif
/*----------------------------------------------------------------------*/
/*
diff --git a/include/linux/mfd/upboard-fpga.h b/include/linux/mfd/upboard-fpga.h
new file mode 100644
index 000000000000..12231e40f5da
--- /dev/null
+++ b/include/linux/mfd/upboard-fpga.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * UP Board CPLD/FPGA driver
+ *
+ * Copyright (c) AAEON. All rights reserved.
+ * Copyright (C) 2024 Bootlin
+ *
+ * Author: Gary Wang <garywang@aaeon.com.tw>
+ * Author: Thomas Richard <thomas.richard@bootlin.com>
+ *
+ */
+
+#ifndef __LINUX_MFD_UPBOARD_FPGA_H
+#define __LINUX_MFD_UPBOARD_FPGA_H
+
+#define UPBOARD_REGISTER_SIZE 16
+
+enum upboard_fpgareg {
+ UPBOARD_REG_PLATFORM_ID = 0x10,
+ UPBOARD_REG_FIRMWARE_ID = 0x11,
+ UPBOARD_REG_FUNC_EN0 = 0x20,
+ UPBOARD_REG_FUNC_EN1 = 0x21,
+ UPBOARD_REG_GPIO_EN0 = 0x30,
+ UPBOARD_REG_GPIO_EN1 = 0x31,
+ UPBOARD_REG_GPIO_EN2 = 0x32,
+ UPBOARD_REG_GPIO_DIR0 = 0x40,
+ UPBOARD_REG_GPIO_DIR1 = 0x41,
+ UPBOARD_REG_GPIO_DIR2 = 0x42,
+ UPBOARD_REG_MAX,
+};
+
+enum upboard_fpga_type {
+ UPBOARD_UP_FPGA,
+ UPBOARD_UP2_FPGA,
+};
+
+struct upboard_fpga_data {
+ enum upboard_fpga_type type;
+ const struct regmap_config *regmap_config;
+};
+
+struct upboard_fpga {
+ struct device *dev;
+ struct regmap *regmap;
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *clear_gpio;
+ struct gpio_desc *strobe_gpio;
+ struct gpio_desc *datain_gpio;
+ struct gpio_desc *dataout_gpio;
+ unsigned int firmware_version;
+ const struct upboard_fpga_data *fpga_data;
+};
+
+#endif /* __LINUX_MFD_UPBOARD_FPGA_H */
diff --git a/include/linux/mfd/wl1273-core.h b/include/linux/mfd/wl1273-core.h
deleted file mode 100644
index c28cf76d5c31..000000000000
--- a/include/linux/mfd/wl1273-core.h
+++ /dev/null
@@ -1,277 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * include/linux/mfd/wl1273-core.h
- *
- * Some definitions for the wl1273 radio receiver/transmitter chip.
- *
- * Copyright (C) 2010 Nokia Corporation
- * Author: Matti J. Aaltonen <matti.j.aaltonen@nokia.com>
- */
-
-#ifndef WL1273_CORE_H
-#define WL1273_CORE_H
-
-#include <linux/i2c.h>
-#include <linux/mfd/core.h>
-
-#define WL1273_FM_DRIVER_NAME "wl1273-fm"
-#define RX71_FM_I2C_ADDR 0x22
-
-#define WL1273_STEREO_GET 0
-#define WL1273_RSSI_LVL_GET 1
-#define WL1273_IF_COUNT_GET 2
-#define WL1273_FLAG_GET 3
-#define WL1273_RDS_SYNC_GET 4
-#define WL1273_RDS_DATA_GET 5
-#define WL1273_FREQ_SET 10
-#define WL1273_AF_FREQ_SET 11
-#define WL1273_MOST_MODE_SET 12
-#define WL1273_MOST_BLEND_SET 13
-#define WL1273_DEMPH_MODE_SET 14
-#define WL1273_SEARCH_LVL_SET 15
-#define WL1273_BAND_SET 16
-#define WL1273_MUTE_STATUS_SET 17
-#define WL1273_RDS_PAUSE_LVL_SET 18
-#define WL1273_RDS_PAUSE_DUR_SET 19
-#define WL1273_RDS_MEM_SET 20
-#define WL1273_RDS_BLK_B_SET 21
-#define WL1273_RDS_MSK_B_SET 22
-#define WL1273_RDS_PI_MASK_SET 23
-#define WL1273_RDS_PI_SET 24
-#define WL1273_RDS_SYSTEM_SET 25
-#define WL1273_INT_MASK_SET 26
-#define WL1273_SEARCH_DIR_SET 27
-#define WL1273_VOLUME_SET 28
-#define WL1273_AUDIO_ENABLE 29
-#define WL1273_PCM_MODE_SET 30
-#define WL1273_I2S_MODE_CONFIG_SET 31
-#define WL1273_POWER_SET 32
-#define WL1273_INTX_CONFIG_SET 33
-#define WL1273_PULL_EN_SET 34
-#define WL1273_HILO_SET 35
-#define WL1273_SWITCH2FREF 36
-#define WL1273_FREQ_DRIFT_REPORT 37
-
-#define WL1273_PCE_GET 40
-#define WL1273_FIRM_VER_GET 41
-#define WL1273_ASIC_VER_GET 42
-#define WL1273_ASIC_ID_GET 43
-#define WL1273_MAN_ID_GET 44
-#define WL1273_TUNER_MODE_SET 45
-#define WL1273_STOP_SEARCH 46
-#define WL1273_RDS_CNTRL_SET 47
-
-#define WL1273_WRITE_HARDWARE_REG 100
-#define WL1273_CODE_DOWNLOAD 101
-#define WL1273_RESET 102
-
-#define WL1273_FM_POWER_MODE 254
-#define WL1273_FM_INTERRUPT 255
-
-/* Transmitter API */
-
-#define WL1273_CHANL_SET 55
-#define WL1273_SCAN_SPACING_SET 56
-#define WL1273_REF_SET 57
-#define WL1273_POWER_ENB_SET 90
-#define WL1273_POWER_ATT_SET 58
-#define WL1273_POWER_LEV_SET 59
-#define WL1273_AUDIO_DEV_SET 60
-#define WL1273_PILOT_DEV_SET 61
-#define WL1273_RDS_DEV_SET 62
-#define WL1273_PUPD_SET 91
-#define WL1273_AUDIO_IO_SET 63
-#define WL1273_PREMPH_SET 64
-#define WL1273_MONO_SET 66
-#define WL1273_MUTE 92
-#define WL1273_MPX_LMT_ENABLE 67
-#define WL1273_PI_SET 93
-#define WL1273_ECC_SET 69
-#define WL1273_PTY 70
-#define WL1273_AF 71
-#define WL1273_DISPLAY_MODE 74
-#define WL1273_RDS_REP_SET 77
-#define WL1273_RDS_CONFIG_DATA_SET 98
-#define WL1273_RDS_DATA_SET 99
-#define WL1273_RDS_DATA_ENB 94
-#define WL1273_TA_SET 78
-#define WL1273_TP_SET 79
-#define WL1273_DI_SET 80
-#define WL1273_MS_SET 81
-#define WL1273_PS_SCROLL_SPEED 82
-#define WL1273_TX_AUDIO_LEVEL_TEST 96
-#define WL1273_TX_AUDIO_LEVEL_TEST_THRESHOLD 73
-#define WL1273_TX_AUDIO_INPUT_LEVEL_RANGE_SET 54
-#define WL1273_RX_ANTENNA_SELECT 87
-#define WL1273_I2C_DEV_ADDR_SET 86
-#define WL1273_REF_ERR_CALIB_PARAM_SET 88
-#define WL1273_REF_ERR_CALIB_PERIODICITY_SET 89
-#define WL1273_SOC_INT_TRIGGER 52
-#define WL1273_SOC_AUDIO_PATH_SET 83
-#define WL1273_SOC_PCMI_OVERRIDE 84
-#define WL1273_SOC_I2S_OVERRIDE 85
-#define WL1273_RSSI_BLOCK_SCAN_FREQ_SET 95
-#define WL1273_RSSI_BLOCK_SCAN_START 97
-#define WL1273_RSSI_BLOCK_SCAN_DATA_GET 5
-#define WL1273_READ_FMANT_TUNE_VALUE 104
-
-#define WL1273_RDS_OFF 0
-#define WL1273_RDS_ON 1
-#define WL1273_RDS_RESET 2
-
-#define WL1273_AUDIO_DIGITAL 0
-#define WL1273_AUDIO_ANALOG 1
-
-#define WL1273_MODE_RX BIT(0)
-#define WL1273_MODE_TX BIT(1)
-#define WL1273_MODE_OFF BIT(2)
-#define WL1273_MODE_SUSPENDED BIT(3)
-
-#define WL1273_RADIO_CHILD BIT(0)
-#define WL1273_CODEC_CHILD BIT(1)
-
-#define WL1273_RX_MONO 1
-#define WL1273_RX_STEREO 0
-#define WL1273_TX_MONO 0
-#define WL1273_TX_STEREO 1
-
-#define WL1273_MAX_VOLUME 0xffff
-#define WL1273_DEFAULT_VOLUME 0x78b8
-
-/* I2S protocol, left channel first, data width 16 bits */
-#define WL1273_PCM_DEF_MODE 0x00
-
-/* Rx */
-#define WL1273_AUDIO_ENABLE_I2S BIT(0)
-#define WL1273_AUDIO_ENABLE_ANALOG BIT(1)
-
-/* Tx */
-#define WL1273_AUDIO_IO_SET_ANALOG 0
-#define WL1273_AUDIO_IO_SET_I2S 1
-
-#define WL1273_PUPD_SET_OFF 0x00
-#define WL1273_PUPD_SET_ON 0x01
-#define WL1273_PUPD_SET_RETENTION 0x10
-
-/* I2S mode */
-#define WL1273_IS2_WIDTH_32 0x0
-#define WL1273_IS2_WIDTH_40 0x1
-#define WL1273_IS2_WIDTH_22_23 0x2
-#define WL1273_IS2_WIDTH_23_22 0x3
-#define WL1273_IS2_WIDTH_48 0x4
-#define WL1273_IS2_WIDTH_50 0x5
-#define WL1273_IS2_WIDTH_60 0x6
-#define WL1273_IS2_WIDTH_64 0x7
-#define WL1273_IS2_WIDTH_80 0x8
-#define WL1273_IS2_WIDTH_96 0x9
-#define WL1273_IS2_WIDTH_128 0xa
-#define WL1273_IS2_WIDTH 0xf
-
-#define WL1273_IS2_FORMAT_STD (0x0 << 4)
-#define WL1273_IS2_FORMAT_LEFT (0x1 << 4)
-#define WL1273_IS2_FORMAT_RIGHT (0x2 << 4)
-#define WL1273_IS2_FORMAT_USER (0x3 << 4)
-
-#define WL1273_IS2_MASTER (0x0 << 6)
-#define WL1273_IS2_SLAVEW (0x1 << 6)
-
-#define WL1273_IS2_TRI_AFTER_SENDING (0x0 << 7)
-#define WL1273_IS2_TRI_ALWAYS_ACTIVE (0x1 << 7)
-
-#define WL1273_IS2_SDOWS_RR (0x0 << 8)
-#define WL1273_IS2_SDOWS_RF (0x1 << 8)
-#define WL1273_IS2_SDOWS_FR (0x2 << 8)
-#define WL1273_IS2_SDOWS_FF (0x3 << 8)
-
-#define WL1273_IS2_TRI_OPT (0x0 << 10)
-#define WL1273_IS2_TRI_ALWAYS (0x1 << 10)
-
-#define WL1273_IS2_RATE_48K (0x0 << 12)
-#define WL1273_IS2_RATE_44_1K (0x1 << 12)
-#define WL1273_IS2_RATE_32K (0x2 << 12)
-#define WL1273_IS2_RATE_22_05K (0x4 << 12)
-#define WL1273_IS2_RATE_16K (0x5 << 12)
-#define WL1273_IS2_RATE_12K (0x8 << 12)
-#define WL1273_IS2_RATE_11_025 (0x9 << 12)
-#define WL1273_IS2_RATE_8K (0xa << 12)
-#define WL1273_IS2_RATE (0xf << 12)
-
-#define WL1273_I2S_DEF_MODE (WL1273_IS2_WIDTH_32 | \
- WL1273_IS2_FORMAT_STD | \
- WL1273_IS2_MASTER | \
- WL1273_IS2_TRI_AFTER_SENDING | \
- WL1273_IS2_SDOWS_RR | \
- WL1273_IS2_TRI_OPT | \
- WL1273_IS2_RATE_48K)
-
-#define SCHAR_MIN (-128)
-#define SCHAR_MAX 127
-
-#define WL1273_FR_EVENT BIT(0)
-#define WL1273_BL_EVENT BIT(1)
-#define WL1273_RDS_EVENT BIT(2)
-#define WL1273_BBLK_EVENT BIT(3)
-#define WL1273_LSYNC_EVENT BIT(4)
-#define WL1273_LEV_EVENT BIT(5)
-#define WL1273_IFFR_EVENT BIT(6)
-#define WL1273_PI_EVENT BIT(7)
-#define WL1273_PD_EVENT BIT(8)
-#define WL1273_STIC_EVENT BIT(9)
-#define WL1273_MAL_EVENT BIT(10)
-#define WL1273_POW_ENB_EVENT BIT(11)
-#define WL1273_SCAN_OVER_EVENT BIT(12)
-#define WL1273_ERROR_EVENT BIT(13)
-
-#define TUNER_MODE_STOP_SEARCH 0
-#define TUNER_MODE_PRESET 1
-#define TUNER_MODE_AUTO_SEEK 2
-#define TUNER_MODE_AF 3
-#define TUNER_MODE_AUTO_SEEK_PI 4
-#define TUNER_MODE_AUTO_SEEK_BULK 5
-
-#define RDS_BLOCK_SIZE 3
-
-struct wl1273_fm_platform_data {
- int (*request_resources) (struct i2c_client *client);
- void (*free_resources) (void);
- void (*enable) (void);
- void (*disable) (void);
-
- u8 forbidden_modes;
- unsigned int children;
-};
-
-#define WL1273_FM_CORE_CELLS 2
-
-#define WL1273_BAND_OTHER 0
-#define WL1273_BAND_JAPAN 1
-
-#define WL1273_BAND_JAPAN_LOW 76000
-#define WL1273_BAND_JAPAN_HIGH 90000
-#define WL1273_BAND_OTHER_LOW 87500
-#define WL1273_BAND_OTHER_HIGH 108000
-
-#define WL1273_BAND_TX_LOW 76000
-#define WL1273_BAND_TX_HIGH 108000
-
-struct wl1273_core {
- struct mfd_cell cells[WL1273_FM_CORE_CELLS];
- struct wl1273_fm_platform_data *pdata;
-
- unsigned int mode;
- unsigned int i2s_mode;
- unsigned int volume;
- unsigned int audio_mode;
- unsigned int channel_number;
- struct mutex lock; /* for serializing fm radio operations */
-
- struct i2c_client *client;
-
- int (*read)(struct wl1273_core *core, u8, u16 *);
- int (*write)(struct wl1273_core *core, u8, u16);
- int (*write_data)(struct wl1273_core *core, u8 *, u16);
- int (*set_audio)(struct wl1273_core *core, unsigned int);
- int (*set_volume)(struct wl1273_core *core, unsigned int);
-};
-
-#endif /* ifndef WL1273_CORE_H */
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index a3241e4d7548..5f70d3b5d1b1 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -8,11 +8,12 @@
#ifndef __LINUX_MFD_WM8350_CORE_H_
#define __LINUX_MFD_WM8350_CORE_H_
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/interrupt.h>
#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
#include <linux/regmap.h>
+#include <linux/types.h>
#include <linux/mfd/wm8350/audio.h>
#include <linux/mfd/wm8350/gpio.h>
@@ -21,6 +22,9 @@
#include <linux/mfd/wm8350/supply.h>
#include <linux/mfd/wm8350/wdt.h>
+struct device;
+struct platform_device;
+
/*
* Register values.
*/
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index b573f15762f8..dd372b0123a6 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -290,6 +290,7 @@ struct mhi_controller_config {
/**
* struct mhi_controller - Master MHI controller structure
+ * @name: Device name of the MHI controller
* @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
* controller (required)
* @mhi_dev: MHI device instance for the controller
@@ -367,6 +368,7 @@ struct mhi_controller_config {
* they can be populated depending on the usecase.
*/
struct mhi_controller {
+ const char *name;
struct device *cntrl_dev;
struct mhi_device *mhi_dev;
struct dentry *debugfs_dentry;
@@ -526,7 +528,7 @@ struct mhi_driver {
struct device_driver driver;
};
-#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
+#define to_mhi_driver(drv) container_of_const(drv, struct mhi_driver, driver)
#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
/**
@@ -719,12 +721,6 @@ enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
void mhi_soc_reset(struct mhi_controller *mhi_cntrl);
/**
- * mhi_device_get - Disable device low power mode
- * @mhi_dev: Device associated with the channel
- */
-void mhi_device_get(struct mhi_device *mhi_dev);
-
-/**
* mhi_device_get_sync - Disable device low power mode. Synchronously
* take the controller out of suspended state
* @mhi_dev: Device associated with the channel
@@ -775,18 +771,6 @@ int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev);
void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
/**
- * mhi_queue_dma - Send or receive DMA mapped buffers from client device
- * over MHI channel
- * @mhi_dev: Device associated with the channels
- * @dir: DMA direction for the channel
- * @mhi_buf: Buffer for holding the DMA mapped data
- * @len: Buffer length
- * @mflags: MHI transfer flags used for the transfer
- */
-int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
- struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags);
-
-/**
* mhi_queue_buf - Send or receive raw buffers from client device over MHI
* channel
* @mhi_dev: Device associated with the channels
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
index 11bf3212f782..7b40fc8cbe77 100644
--- a/include/linux/mhi_ep.h
+++ b/include/linux/mhi_ep.h
@@ -221,7 +221,7 @@ struct mhi_ep_driver {
};
#define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev)
-#define to_mhi_ep_driver(drv) container_of(drv, struct mhi_ep_driver, driver)
+#define to_mhi_ep_driver(drv) container_of_const(drv, struct mhi_ep_driver, driver)
/*
* module_mhi_ep_driver() - Helper macro for drivers that don't do
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 591bf5b5e8dc..ca691641788b 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -32,6 +32,7 @@
#define PHY_ID_LAN8814 0x00221660
#define PHY_ID_LAN8804 0x00221670
#define PHY_ID_LAN8841 0x00221650
+#define PHY_ID_LAN8842 0x002216C0
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
@@ -44,7 +45,6 @@
#define MICREL_PHY_50MHZ_CLK BIT(0)
#define MICREL_PHY_FXEN BIT(1)
#define MICREL_KSZ8_P1_ERRATA BIT(2)
-#define MICREL_NO_EEE BIT(3)
#define MICREL_KSZ9021_EXTREG_CTRL 0xB
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 2ce13e8a309b..26ca00c325d9 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -12,14 +12,6 @@ typedef void free_folio_t(struct folio *folio, unsigned long private);
struct migration_target_control;
-/*
- * Return values from addresss_space_operations.migratepage():
- * - negative errno on page migration failure;
- * - zero on page migration success;
- */
-#define MIGRATEPAGE_SUCCESS 0
-#define MIGRATEPAGE_UNMAP 1
-
/**
* struct movable_operations - Driver page migration
* @isolate_page:
@@ -35,8 +27,7 @@ struct migration_target_control;
* @src page. The driver should copy the contents of the
* @src page to the @dst page and set up the fields of @dst page.
* Both pages are locked.
- * If page migration is successful, the driver should call
- * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS.
+ * If page migration is successful, the driver should return 0.
* If the driver cannot migrate the page at the moment, it can return
* -EAGAIN. The VM interprets this as a temporary migration failure and
* will retry it later. Any other error value is a permanent migration
@@ -63,24 +54,23 @@ extern const char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION
void putback_movable_pages(struct list_head *l);
-int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
- struct folio *src, enum migrate_mode mode, int extra_count);
int migrate_folio(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode);
int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
unsigned long private, enum migrate_mode mode, int reason,
unsigned int *ret_succeeded);
struct folio *alloc_migration_target(struct folio *src, unsigned long private);
-bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode);
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
-void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
+void migration_entry_wait_on_locked(softleaf_t entry, spinlock_t *ptl)
__releases(ptl);
void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
-void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
int folio_migrate_mapping(struct address_space *mapping,
struct folio *newfolio, struct folio *folio, int extra_count);
+int set_movable_ops(const struct movable_operations *ops, enum pagetype type);
#else
@@ -92,7 +82,9 @@ static inline int migrate_pages(struct list_head *l, new_folio_t new,
static inline struct folio *alloc_migration_target(struct folio *src,
unsigned long private)
{ return NULL; }
-static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+static inline bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
+ { return false; }
+static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
{ return false; }
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
@@ -100,53 +92,24 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
{
return -ENOSYS;
}
-
-#endif /* CONFIG_MIGRATION */
-
-#ifdef CONFIG_COMPACTION
-bool PageMovable(struct page *page);
-void __SetPageMovable(struct page *page, const struct movable_operations *ops);
-void __ClearPageMovable(struct page *page);
-#else
-static inline bool PageMovable(struct page *page) { return false; }
-static inline void __SetPageMovable(struct page *page,
- const struct movable_operations *ops)
-{
-}
-static inline void __ClearPageMovable(struct page *page)
-{
-}
-#endif
-
-static inline bool folio_test_movable(struct folio *folio)
+static inline int set_movable_ops(const struct movable_operations *ops, enum pagetype type)
{
- return PageMovable(&folio->page);
-}
-
-static inline
-const struct movable_operations *folio_movable_ops(struct folio *folio)
-{
- VM_BUG_ON(!__folio_test_movable(folio));
-
- return (const struct movable_operations *)
- ((unsigned long)folio->mapping - PAGE_MAPPING_MOVABLE);
+ return -ENOSYS;
}
-static inline
-const struct movable_operations *page_movable_ops(struct page *page)
-{
- VM_BUG_ON(!__PageMovable(page));
-
- return (const struct movable_operations *)
- ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
-}
+#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_NUMA_BALANCING
-int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
- int node);
+int migrate_misplaced_folio_prepare(struct folio *folio,
+ struct vm_area_struct *vma, int node);
+int migrate_misplaced_folio(struct folio *folio, int node);
#else
-static inline int migrate_misplaced_folio(struct folio *folio,
- struct vm_area_struct *vma, int node)
+static inline int migrate_misplaced_folio_prepare(struct folio *folio,
+ struct vm_area_struct *vma, int node)
+{
+ return -EAGAIN; /* can't migrate now */
+}
+static inline int migrate_misplaced_folio(struct folio *folio, int node)
{
return -EAGAIN; /* can't migrate now */
}
@@ -162,6 +125,7 @@ static inline int migrate_misplaced_folio(struct folio *folio,
#define MIGRATE_PFN_VALID (1UL << 0)
#define MIGRATE_PFN_MIGRATE (1UL << 1)
#define MIGRATE_PFN_WRITE (1UL << 3)
+#define MIGRATE_PFN_COMPOUND (1UL << 4)
#define MIGRATE_PFN_SHIFT 6
static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
@@ -180,6 +144,7 @@ enum migrate_vma_direction {
MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2,
+ MIGRATE_VMA_SELECT_COMPOUND = 1 << 3,
};
struct migrate_vma {
@@ -200,8 +165,8 @@ struct migrate_vma {
unsigned long end;
/*
- * Set to the owner value also stored in page->pgmap->owner for
- * migrating out of device private memory. The flags also need to
+ * Set to the owner value also stored in page_pgmap(page)->owner
+ * for migrating out of device private memory. The flags also need to
* be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
* The caller should always set this field when using mmu notifier
* callbacks to avoid device MMU invalidations for device private
@@ -222,6 +187,7 @@ void migrate_vma_pages(struct migrate_vma *migrate);
void migrate_vma_finalize(struct migrate_vma *migrate);
int migrate_device_range(unsigned long *src_pfns, unsigned long start,
unsigned long npages);
+int migrate_device_pfns(unsigned long *src_pfns, unsigned long npages);
void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
unsigned long npages);
void migrate_device_finalize(unsigned long *src_pfns,
diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h
index f37cc03f9369..265c4328b36a 100644
--- a/include/linux/migrate_mode.h
+++ b/include/linux/migrate_mode.h
@@ -7,16 +7,11 @@
* on most operations but not ->writepage as the potential stall time
* is too significant
* MIGRATE_SYNC will block when migrating pages
- * MIGRATE_SYNC_NO_COPY will block when migrating pages but will not copy pages
- * with the CPU. Instead, page copy happens outside the migratepage()
- * callback and is likely using a DMA engine. See migrate_vma() and HMM
- * (mm/hmm.c) for users of this mode.
*/
enum migrate_mode {
MIGRATE_ASYNC,
MIGRATE_SYNC_LIGHT,
MIGRATE_SYNC,
- MIGRATE_SYNC_NO_COPY,
};
enum migrate_reason {
@@ -29,6 +24,7 @@ enum migrate_reason {
MR_CONTIG_RANGE,
MR_LONGTERM_PIN,
MR_DEMOTION,
+ MR_DAMON,
MR_TYPES
};
diff --git a/include/linux/mii.h b/include/linux/mii.h
index d5a959ce4877..b8f26d4513c3 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -140,7 +140,7 @@ static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
* settings to phy autonegotiation advertisements for the
* MII_ADVERTISE register.
*/
-static inline u32 linkmode_adv_to_mii_adv_t(unsigned long *advertising)
+static inline u32 linkmode_adv_to_mii_adv_t(const unsigned long *advertising)
{
u32 result = 0;
@@ -215,7 +215,8 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
* settings to phy autonegotiation advertisements for the
* MII_CTRL1000 register when in 1000T mode.
*/
-static inline u32 linkmode_adv_to_mii_ctrl1000_t(unsigned long *advertising)
+static inline u32
+linkmode_adv_to_mii_ctrl1000_t(const unsigned long *advertising)
{
u32 result = 0;
@@ -453,7 +454,7 @@ static inline void mii_ctrl1000_mod_linkmode_adv_t(unsigned long *advertising,
* A small helper function that translates linkmode advertising to LVL
* pause capabilities.
*/
-static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
+static inline u32 linkmode_adv_to_lcl_adv_t(const unsigned long *advertising)
{
u32 lcl_adv = 0;
diff --git a/include/linux/mii_timestamper.h b/include/linux/mii_timestamper.h
index 26b04f73f214..3102c425c8e0 100644
--- a/include/linux/mii_timestamper.h
+++ b/include/linux/mii_timestamper.h
@@ -27,7 +27,9 @@ struct phy_device;
* as soon as a timestamp becomes available. One of the PTP_CLASS_
* values is passed in 'type'.
*
- * @hwtstamp: Handles SIOCSHWTSTAMP ioctl for hardware time stamping.
+ * @hwtstamp_set: Handles SIOCSHWTSTAMP ioctl for hardware time stamping.
+ *
+ * @hwtstamp_get: Handles SIOCGHWTSTAMP ioctl for hardware time stamping.
*
* @link_state: Allows the device to respond to changes in the link
* state. The caller invokes this function while holding
@@ -51,15 +53,18 @@ struct mii_timestamper {
void (*txtstamp)(struct mii_timestamper *mii_ts,
struct sk_buff *skb, int type);
- int (*hwtstamp)(struct mii_timestamper *mii_ts,
- struct kernel_hwtstamp_config *kernel_config,
- struct netlink_ext_ack *extack);
+ int (*hwtstamp_set)(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *kernel_config,
+ struct netlink_ext_ack *extack);
+
+ int (*hwtstamp_get)(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *kernel_config);
void (*link_state)(struct mii_timestamper *mii_ts,
struct phy_device *phydev);
int (*ts_info)(struct mii_timestamper *mii_ts,
- struct ethtool_ts_info *ts_info);
+ struct kernel_ethtool_ts_info *ts_info);
struct device *device;
};
diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h
index d52daf45861b..79ddc0adbf2b 100644
--- a/include/linux/min_heap.h
+++ b/include/linux/min_heap.h
@@ -6,131 +6,472 @@
#include <linux/string.h>
#include <linux/types.h>
+/*
+ * The Min Heap API provides utilities for managing min-heaps, a binary tree
+ * structure where each node's value is less than or equal to its children's
+ * values, ensuring the smallest element is at the root.
+ *
+ * Users should avoid directly calling functions prefixed with __min_heap_*().
+ * Instead, use the provided macro wrappers.
+ *
+ * For further details and examples, refer to Documentation/core-api/min_heap.rst.
+ */
+
/**
- * struct min_heap - Data structure to hold a min-heap.
- * @data: Start of array holding the heap elements.
+ * Data structure to hold a min-heap.
* @nr: Number of elements currently in the heap.
* @size: Maximum number of elements that can be held in current storage.
+ * @data: Pointer to the start of array holding the heap elements.
+ * @preallocated: Start of the static preallocated array holding the heap elements.
*/
-struct min_heap {
- void *data;
- int nr;
- int size;
-};
+#define MIN_HEAP_PREALLOCATED(_type, _name, _nr) \
+struct _name { \
+ size_t nr; \
+ size_t size; \
+ _type *data; \
+ _type preallocated[_nr]; \
+}
+
+#define DEFINE_MIN_HEAP(_type, _name) MIN_HEAP_PREALLOCATED(_type, _name, 0)
+
+typedef DEFINE_MIN_HEAP(char, min_heap_char) min_heap_char;
+
+#define __minheap_cast(_heap) (typeof((_heap)->data[0]) *)
+#define __minheap_obj_size(_heap) sizeof((_heap)->data[0])
/**
* struct min_heap_callbacks - Data/functions to customise the min_heap.
- * @elem_size: The nr of each element in bytes.
* @less: Partial order function for this heap.
* @swp: Swap elements function.
*/
struct min_heap_callbacks {
- int elem_size;
- bool (*less)(const void *lhs, const void *rhs);
- void (*swp)(void *lhs, void *rhs);
+ bool (*less)(const void *lhs, const void *rhs, void *args);
+ void (*swp)(void *lhs, void *rhs, void *args);
};
+/**
+ * is_aligned - is this pointer & size okay for word-wide copying?
+ * @base: pointer to data
+ * @size: size of each element
+ * @align: required alignment (typically 4 or 8)
+ *
+ * Returns true if elements can be copied using word loads and stores.
+ * The size must be a multiple of the alignment, and the base address must
+ * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
+ *
+ * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
+ * to "if ((a | b) & mask)", so we do that by hand.
+ */
+__attribute_const__ __always_inline
+static bool is_aligned(const void *base, size_t size, unsigned char align)
+{
+ unsigned char lsbits = (unsigned char)size;
+
+ (void)base;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ lsbits |= (unsigned char)(uintptr_t)base;
+#endif
+ return (lsbits & (align - 1)) == 0;
+}
+
+/**
+ * swap_words_32 - swap two elements in 32-bit chunks
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size (must be a multiple of 4)
+ *
+ * Exchange the two objects in memory. This exploits base+index addressing,
+ * which basically all CPUs have, to minimize loop overhead computations.
+ *
+ * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
+ * bottom of the loop, even though the zero flag is still valid from the
+ * subtract (since the intervening mov instructions don't alter the flags).
+ * Gcc 8.1.0 doesn't have that problem.
+ */
+static __always_inline
+void swap_words_32(void *a, void *b, size_t n)
+{
+ do {
+ u32 t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+ } while (n);
+}
+
+/**
+ * swap_words_64 - swap two elements in 64-bit chunks
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size (must be a multiple of 8)
+ *
+ * Exchange the two objects in memory. This exploits base+index
+ * addressing, which basically all CPUs have, to minimize loop overhead
+ * computations.
+ *
+ * We'd like to use 64-bit loads if possible. If they're not, emulating
+ * one requires base+index+4 addressing which x86 has but most other
+ * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
+ * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
+ * x32 ABI). Are there any cases the kernel needs to worry about?
+ */
+static __always_inline
+void swap_words_64(void *a, void *b, size_t n)
+{
+ do {
+#ifdef CONFIG_64BIT
+ u64 t = *(u64 *)(a + (n -= 8));
+ *(u64 *)(a + n) = *(u64 *)(b + n);
+ *(u64 *)(b + n) = t;
+#else
+ /* Use two 32-bit transfers to avoid base+index+4 addressing */
+ u32 t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+
+ t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+#endif
+ } while (n);
+}
+
+/**
+ * swap_bytes - swap two elements a byte at a time
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size
+ *
+ * This is the fallback if alignment doesn't allow using larger chunks.
+ */
+static __always_inline
+void swap_bytes(void *a, void *b, size_t n)
+{
+ do {
+ char t = ((char *)a)[--n];
+ ((char *)a)[n] = ((char *)b)[n];
+ ((char *)b)[n] = t;
+ } while (n);
+}
+
+/*
+ * The values are arbitrary as long as they can't be confused with
+ * a pointer, but small integers make for the smallest compare
+ * instructions.
+ */
+#define SWAP_WORDS_64 ((void (*)(void *, void *, void *))0)
+#define SWAP_WORDS_32 ((void (*)(void *, void *, void *))1)
+#define SWAP_BYTES ((void (*)(void *, void *, void *))2)
+
+/*
+ * Selects the appropriate swap function based on the element size.
+ */
+static __always_inline
+void *select_swap_func(const void *base, size_t size)
+{
+ if (is_aligned(base, size, 8))
+ return SWAP_WORDS_64;
+ else if (is_aligned(base, size, 4))
+ return SWAP_WORDS_32;
+ else
+ return SWAP_BYTES;
+}
+
+static __always_inline
+void do_swap(void *a, void *b, size_t size, void (*swap_func)(void *lhs, void *rhs, void *args),
+ void *priv)
+{
+ if (swap_func == SWAP_WORDS_64)
+ swap_words_64(a, b, size);
+ else if (swap_func == SWAP_WORDS_32)
+ swap_words_32(a, b, size);
+ else if (swap_func == SWAP_BYTES)
+ swap_bytes(a, b, size);
+ else
+ swap_func(a, b, priv);
+}
+
+/**
+ * parent - given the offset of the child, find the offset of the parent.
+ * @i: the offset of the heap element whose parent is sought. Non-zero.
+ * @lsbit: a precomputed 1-bit mask, equal to "size & -size"
+ * @size: size of each element
+ *
+ * In terms of array indexes, the parent of element j = @i/@size is simply
+ * (j-1)/2. But when working in byte offsets, we can't use implicit
+ * truncation of integer divides.
+ *
+ * Fortunately, we only need one bit of the quotient, not the full divide.
+ * @size has a least significant bit. That bit will be clear if @i is
+ * an even multiple of @size, and set if it's an odd multiple.
+ *
+ * Logically, we're doing "if (i & lsbit) i -= size;", but since the
+ * branch is unpredictable, it's done with a bit of clever branch-free
+ * code instead.
+ */
+__attribute_const__ __always_inline
+static size_t parent(size_t i, unsigned int lsbit, size_t size)
+{
+ i -= size;
+ i -= size & -(i & lsbit);
+ return i / 2;
+}
+
+/* Initialize a min-heap. */
+static __always_inline
+void __min_heap_init_inline(min_heap_char *heap, void *data, size_t size)
+{
+ heap->nr = 0;
+ heap->size = size;
+ if (data)
+ heap->data = data;
+ else
+ heap->data = heap->preallocated;
+}
+
+#define min_heap_init_inline(_heap, _data, _size) \
+ __min_heap_init_inline(container_of(&(_heap)->nr, min_heap_char, nr), _data, _size)
+
+/* Get the minimum element from the heap. */
+static __always_inline
+void *__min_heap_peek_inline(struct min_heap_char *heap)
+{
+ return heap->nr ? heap->data : NULL;
+}
+
+#define min_heap_peek_inline(_heap) \
+ (__minheap_cast(_heap) \
+ __min_heap_peek_inline(container_of(&(_heap)->nr, min_heap_char, nr)))
+
+/* Check if the heap is full. */
+static __always_inline
+bool __min_heap_full_inline(min_heap_char *heap)
+{
+ return heap->nr == heap->size;
+}
+
+#define min_heap_full_inline(_heap) \
+ __min_heap_full_inline(container_of(&(_heap)->nr, min_heap_char, nr))
+
/* Sift the element at pos down the heap. */
static __always_inline
-void min_heapify(struct min_heap *heap, int pos,
- const struct min_heap_callbacks *func)
+void __min_heap_sift_down_inline(min_heap_char *heap, size_t pos, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
{
- void *left, *right;
+ const unsigned long lsbit = elem_size & -elem_size;
void *data = heap->data;
- void *root = data + pos * func->elem_size;
- int i = pos, j;
+ void (*swp)(void *lhs, void *rhs, void *args) = func->swp;
+ /* pre-scale counters for performance */
+ size_t a = pos * elem_size;
+ size_t b, c, d;
+ size_t n = heap->nr * elem_size;
+
+ if (!swp)
+ swp = select_swap_func(data, elem_size);
/* Find the sift-down path all the way to the leaves. */
- for (;;) {
- if (i * 2 + 2 >= heap->nr)
- break;
- left = data + (i * 2 + 1) * func->elem_size;
- right = data + (i * 2 + 2) * func->elem_size;
- i = func->less(left, right) ? i * 2 + 1 : i * 2 + 2;
- }
+ for (b = a; c = 2 * b + elem_size, (d = c + elem_size) < n;)
+ b = func->less(data + c, data + d, args) ? c : d;
/* Special case for the last leaf with no sibling. */
- if (i * 2 + 2 == heap->nr)
- i = i * 2 + 1;
+ if (d == n)
+ b = c;
/* Backtrack to the correct location. */
- while (i != pos && func->less(root, data + i * func->elem_size))
- i = (i - 1) / 2;
+ while (b != a && func->less(data + a, data + b, args))
+ b = parent(b, lsbit, elem_size);
/* Shift the element into its correct place. */
- j = i;
- while (i != pos) {
- i = (i - 1) / 2;
- func->swp(data + i * func->elem_size, data + j * func->elem_size);
+ c = b;
+ while (b != a) {
+ b = parent(b, lsbit, elem_size);
+ do_swap(data + b, data + c, elem_size, swp, args);
}
}
+#define min_heap_sift_down_inline(_heap, _pos, _func, _args) \
+ __min_heap_sift_down_inline(container_of(&(_heap)->nr, min_heap_char, nr), _pos, \
+ __minheap_obj_size(_heap), _func, _args)
+
+/* Sift up ith element from the heap, O(log2(nr)). */
+static __always_inline
+void __min_heap_sift_up_inline(min_heap_char *heap, size_t elem_size, size_t idx,
+ const struct min_heap_callbacks *func, void *args)
+{
+ const unsigned long lsbit = elem_size & -elem_size;
+ void *data = heap->data;
+ void (*swp)(void *lhs, void *rhs, void *args) = func->swp;
+ /* pre-scale counters for performance */
+ size_t a = idx * elem_size, b;
+
+ if (!swp)
+ swp = select_swap_func(data, elem_size);
+
+ while (a) {
+ b = parent(a, lsbit, elem_size);
+ if (func->less(data + b, data + a, args))
+ break;
+ do_swap(data + a, data + b, elem_size, swp, args);
+ a = b;
+ }
+}
+
+#define min_heap_sift_up_inline(_heap, _idx, _func, _args) \
+ __min_heap_sift_up_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _idx, _func, _args)
+
/* Floyd's approach to heapification that is O(nr). */
static __always_inline
-void min_heapify_all(struct min_heap *heap,
- const struct min_heap_callbacks *func)
+void __min_heapify_all_inline(min_heap_char *heap, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
{
- int i;
+ ssize_t i;
for (i = heap->nr / 2 - 1; i >= 0; i--)
- min_heapify(heap, i, func);
+ __min_heap_sift_down_inline(heap, i, elem_size, func, args);
}
+#define min_heapify_all_inline(_heap, _func, _args) \
+ __min_heapify_all_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _func, _args)
+
/* Remove minimum element from the heap, O(log2(nr)). */
static __always_inline
-void min_heap_pop(struct min_heap *heap,
- const struct min_heap_callbacks *func)
+bool __min_heap_pop_inline(min_heap_char *heap, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
{
void *data = heap->data;
if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap"))
- return;
+ return false;
/* Place last element at the root (position 0) and then sift down. */
heap->nr--;
- memcpy(data, data + (heap->nr * func->elem_size), func->elem_size);
- min_heapify(heap, 0, func);
+ memcpy(data, data + (heap->nr * elem_size), elem_size);
+ __min_heap_sift_down_inline(heap, 0, elem_size, func, args);
+
+ return true;
}
+#define min_heap_pop_inline(_heap, _func, _args) \
+ __min_heap_pop_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _func, _args)
+
/*
* Remove the minimum element and then push the given element. The
* implementation performs 1 sift (O(log2(nr))) and is therefore more
* efficient than a pop followed by a push that does 2.
*/
static __always_inline
-void min_heap_pop_push(struct min_heap *heap,
- const void *element,
- const struct min_heap_callbacks *func)
+void __min_heap_pop_push_inline(min_heap_char *heap, const void *element, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
{
- memcpy(heap->data, element, func->elem_size);
- min_heapify(heap, 0, func);
+ memcpy(heap->data, element, elem_size);
+ __min_heap_sift_down_inline(heap, 0, elem_size, func, args);
}
+#define min_heap_pop_push_inline(_heap, _element, _func, _args) \
+ __min_heap_pop_push_inline(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
+ __minheap_obj_size(_heap), _func, _args)
+
/* Push an element on to the heap, O(log2(nr)). */
static __always_inline
-void min_heap_push(struct min_heap *heap, const void *element,
- const struct min_heap_callbacks *func)
+bool __min_heap_push_inline(min_heap_char *heap, const void *element, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
{
void *data = heap->data;
- void *child, *parent;
- int pos;
+ size_t pos;
if (WARN_ONCE(heap->nr >= heap->size, "Pushing on a full heap"))
- return;
+ return false;
/* Place at the end of data. */
pos = heap->nr;
- memcpy(data + (pos * func->elem_size), element, func->elem_size);
+ memcpy(data + (pos * elem_size), element, elem_size);
heap->nr++;
/* Sift child at pos up. */
- for (; pos > 0; pos = (pos - 1) / 2) {
- child = data + (pos * func->elem_size);
- parent = data + ((pos - 1) / 2) * func->elem_size;
- if (func->less(parent, child))
- break;
- func->swp(parent, child);
- }
+ __min_heap_sift_up_inline(heap, elem_size, pos, func, args);
+
+ return true;
}
+#define min_heap_push_inline(_heap, _element, _func, _args) \
+ __min_heap_push_inline(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
+ __minheap_obj_size(_heap), _func, _args)
+
+/* Remove ith element from the heap, O(log2(nr)). */
+static __always_inline
+bool __min_heap_del_inline(min_heap_char *heap, size_t elem_size, size_t idx,
+ const struct min_heap_callbacks *func, void *args)
+{
+ void *data = heap->data;
+ void (*swp)(void *lhs, void *rhs, void *args) = func->swp;
+
+ if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap"))
+ return false;
+
+ if (!swp)
+ swp = select_swap_func(data, elem_size);
+
+ /* Place last element at the root (position 0) and then sift down. */
+ heap->nr--;
+ if (idx == heap->nr)
+ return true;
+ do_swap(data + (idx * elem_size), data + (heap->nr * elem_size), elem_size, swp, args);
+ __min_heap_sift_up_inline(heap, elem_size, idx, func, args);
+ __min_heap_sift_down_inline(heap, idx, elem_size, func, args);
+
+ return true;
+}
+
+#define min_heap_del_inline(_heap, _idx, _func, _args) \
+ __min_heap_del_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _idx, _func, _args)
+
+void __min_heap_init(min_heap_char *heap, void *data, size_t size);
+void *__min_heap_peek(struct min_heap_char *heap);
+bool __min_heap_full(min_heap_char *heap);
+void __min_heap_sift_down(min_heap_char *heap, size_t pos, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args);
+void __min_heap_sift_up(min_heap_char *heap, size_t elem_size, size_t idx,
+ const struct min_heap_callbacks *func, void *args);
+void __min_heapify_all(min_heap_char *heap, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args);
+bool __min_heap_pop(min_heap_char *heap, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args);
+void __min_heap_pop_push(min_heap_char *heap, const void *element, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args);
+bool __min_heap_push(min_heap_char *heap, const void *element, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args);
+bool __min_heap_del(min_heap_char *heap, size_t elem_size, size_t idx,
+ const struct min_heap_callbacks *func, void *args);
+
+#define min_heap_init(_heap, _data, _size) \
+ __min_heap_init(container_of(&(_heap)->nr, min_heap_char, nr), _data, _size)
+#define min_heap_peek(_heap) \
+ (__minheap_cast(_heap) __min_heap_peek(container_of(&(_heap)->nr, min_heap_char, nr)))
+#define min_heap_full(_heap) \
+ __min_heap_full(container_of(&(_heap)->nr, min_heap_char, nr))
+#define min_heap_sift_down(_heap, _pos, _func, _args) \
+ __min_heap_sift_down(container_of(&(_heap)->nr, min_heap_char, nr), _pos, \
+ __minheap_obj_size(_heap), _func, _args)
+#define min_heap_sift_up(_heap, _idx, _func, _args) \
+ __min_heap_sift_up(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _idx, _func, _args)
+#define min_heapify_all(_heap, _func, _args) \
+ __min_heapify_all(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _func, _args)
+#define min_heap_pop(_heap, _func, _args) \
+ __min_heap_pop(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _func, _args)
+#define min_heap_pop_push(_heap, _element, _func, _args) \
+ __min_heap_pop_push(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
+ __minheap_obj_size(_heap), _func, _args)
+#define min_heap_push(_heap, _element, _func, _args) \
+ __min_heap_push(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
+ __minheap_obj_size(_heap), _func, _args)
+#define min_heap_del(_heap, _idx, _func, _args) \
+ __min_heap_del(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _idx, _func, _args)
+
#endif /* _LINUX_MIN_HEAP_H */
diff --git a/include/linux/minmax.h b/include/linux/minmax.h
index 2ec559284a9f..a0158db54a04 100644
--- a/include/linux/minmax.h
+++ b/include/linux/minmax.h
@@ -8,13 +8,10 @@
#include <linux/types.h>
/*
- * min()/max()/clamp() macros must accomplish three things:
+ * min()/max()/clamp() macros must accomplish several things:
*
* - Avoid multiple evaluations of the arguments (so side-effects like
* "x++" happen only once) when non-constant.
- * - Retain result as a constant expressions when called with only
- * constant expressions (to avoid tripping VLA warnings in stack
- * allocation usage).
* - Perform signed v unsigned type-checking (to generate compile
* errors instead of nasty runtime surprises).
* - Unsigned char/short are always promoted to signed int and can be
@@ -26,56 +23,79 @@
#define __typecheck(x, y) \
(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
-/* is_signed_type() isn't a constexpr for pointer types */
-#define __is_signed(x) \
- __builtin_choose_expr(__is_constexpr(is_signed_type(typeof(x))), \
- is_signed_type(typeof(x)), 0)
+/*
+ * __sign_use for integer expressions:
+ * bit #0 set if ok for unsigned comparisons
+ * bit #1 set if ok for signed comparisons
+ *
+ * In particular, statically non-negative signed integer expressions
+ * are ok for both.
+ *
+ * NOTE! Unsigned types smaller than 'int' are implicitly converted to 'int'
+ * in expressions, and are accepted for signed conversions for now.
+ * This is debatable.
+ *
+ * Note that 'x' is the original expression, and 'ux' is the unique variable
+ * that contains the value.
+ *
+ * We use 'ux' for pure type checking, and 'x' for when we need to look at the
+ * value (but without evaluating it for side effects!
+ * Careful to only ever evaluate it with sizeof() or __builtin_constant_p() etc).
+ *
+ * Pointers end up being checked by the normal C type rules at the actual
+ * comparison, and these expressions only need to be careful to not cause
+ * warnings for pointer use.
+ */
+#define __sign_use(ux) (is_signed_type(typeof(ux)) ? \
+ (2 + __is_nonneg(ux)) : (1 + 2 * (sizeof(ux) < 4)))
+
+/*
+ * Check whether a signed value is always non-negative.
+ *
+ * A cast is needed to avoid any warnings from values that aren't signed
+ * integer types (in which case the result doesn't matter).
+ *
+ * On 64-bit any integer or pointer type can safely be cast to 'long long'.
+ * But on 32-bit we need to avoid warnings about casting pointers to integers
+ * of different sizes without truncating 64-bit values so 'long' or 'long long'
+ * must be used depending on the size of the value.
+ *
+ * This does not work for 128-bit signed integers since the cast would truncate
+ * them, but we do not use s128 types in the kernel (we do use 'u128',
+ * but they are handled by the !is_signed_type() case).
+ */
+#if __SIZEOF_POINTER__ == __SIZEOF_LONG_LONG__
+#define __is_nonneg(ux) statically_true((long long)(ux) >= 0)
+#else
+#define __is_nonneg(ux) statically_true( \
+ (typeof(__builtin_choose_expr(sizeof(ux) > 4, 1LL, 1L)))(ux) >= 0)
+#endif
-/* True for a non-negative signed int constant */
-#define __is_noneg_int(x) \
- (__builtin_choose_expr(__is_constexpr(x) && __is_signed(x), x, -1) >= 0)
+#define __types_ok(ux, uy) \
+ (__sign_use(ux) & __sign_use(uy))
-#define __types_ok(x, y) \
- (__is_signed(x) == __is_signed(y) || \
- __is_signed((x) + 0) == __is_signed((y) + 0) || \
- __is_noneg_int(x) || __is_noneg_int(y))
+#define __types_ok3(ux, uy, uz) \
+ (__sign_use(ux) & __sign_use(uy) & __sign_use(uz))
#define __cmp_op_min <
#define __cmp_op_max >
#define __cmp(op, x, y) ((x) __cmp_op_##op (y) ? (x) : (y))
-#define __cmp_once(op, x, y, unique_x, unique_y) ({ \
- typeof(x) unique_x = (x); \
- typeof(y) unique_y = (y); \
- static_assert(__types_ok(x, y), \
- #op "(" #x ", " #y ") signedness error, fix types or consider u" #op "() before " #op "_t()"); \
- __cmp(op, unique_x, unique_y); })
-
-#define __careful_cmp(op, x, y) \
- __builtin_choose_expr(__is_constexpr((x) - (y)), \
- __cmp(op, x, y), \
- __cmp_once(op, x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y)))
+#define __cmp_once_unique(op, type, x, y, ux, uy) \
+ ({ type ux = (x); type uy = (y); __cmp(op, ux, uy); })
-#define __clamp(val, lo, hi) \
- ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
+#define __cmp_once(op, type, x, y) \
+ __cmp_once_unique(op, type, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_))
-#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \
- typeof(val) unique_val = (val); \
- typeof(lo) unique_lo = (lo); \
- typeof(hi) unique_hi = (hi); \
- static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \
- (lo) <= (hi), true), \
- "clamp() low limit " #lo " greater than high limit " #hi); \
- static_assert(__types_ok(val, lo), "clamp() 'lo' signedness error"); \
- static_assert(__types_ok(val, hi), "clamp() 'hi' signedness error"); \
- __clamp(unique_val, unique_lo, unique_hi); })
+#define __careful_cmp_once(op, x, y, ux, uy) ({ \
+ auto ux = (x); auto uy = (y); \
+ BUILD_BUG_ON_MSG(!__types_ok(ux, uy), \
+ #op"("#x", "#y") signedness error"); \
+ __cmp(op, ux, uy); })
-#define __careful_clamp(val, lo, hi) ({ \
- __builtin_choose_expr(__is_constexpr((val) - (lo) + (hi)), \
- __clamp(val, lo, hi), \
- __clamp_once(val, lo, hi, __UNIQUE_ID(__val), \
- __UNIQUE_ID(__lo), __UNIQUE_ID(__hi))); })
+#define __careful_cmp(op, x, y) \
+ __careful_cmp_once(op, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_))
/**
* min - return minimum of two values of the same or compatible types
@@ -108,13 +128,20 @@
#define umax(x, y) \
__careful_cmp(max, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
+#define __careful_op3(op, x, y, z, ux, uy, uz) ({ \
+ auto ux = (x); auto uy = (y); auto uz = (z); \
+ BUILD_BUG_ON_MSG(!__types_ok3(ux, uy, uz), \
+ #op"3("#x", "#y", "#z") signedness error"); \
+ __cmp(op, ux, __cmp(op, uy, uz)); })
+
/**
* min3 - return minimum of three values
* @x: first value
* @y: second value
* @z: third value
*/
-#define min3(x, y, z) min((typeof(x))min(x, y), z)
+#define min3(x, y, z) \
+ __careful_op3(min, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_))
/**
* max3 - return maximum of three values
@@ -122,7 +149,24 @@
* @y: second value
* @z: third value
*/
-#define max3(x, y, z) max((typeof(x))max(x, y), z)
+#define max3(x, y, z) \
+ __careful_op3(max, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_))
+
+/**
+ * min_t - return minimum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
+#define min_t(type, x, y) __cmp_once(min, type, x, y)
+
+/**
+ * max_t - return maximum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
+#define max_t(type, x, y) __cmp_once(max, type, x, y)
/**
* min_not_zero - return the minimum that is _not_ zero, unless both are zero
@@ -134,39 +178,57 @@
typeof(y) __y = (y); \
__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
+#define __clamp(val, lo, hi) \
+ ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
+
+#define __clamp_once(type, val, lo, hi, uval, ulo, uhi) ({ \
+ type uval = (val); \
+ type ulo = (lo); \
+ type uhi = (hi); \
+ BUILD_BUG_ON_MSG(statically_true(ulo > uhi), \
+ "clamp() low limit " #lo " greater than high limit " #hi); \
+ BUILD_BUG_ON_MSG(!__types_ok3(uval, ulo, uhi), \
+ "clamp("#val", "#lo", "#hi") signedness error"); \
+ __clamp(uval, ulo, uhi); })
+
+#define __careful_clamp(type, val, lo, hi) \
+ __clamp_once(type, val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_))
+
/**
- * clamp - return a value clamped to a given range with strict typechecking
+ * clamp - return a value clamped to a given range with typechecking
* @val: current value
* @lo: lowest allowable value
* @hi: highest allowable value
*
- * This macro does strict typechecking of @lo/@hi to make sure they are of the
- * same type as @val. See the unnecessary pointer comparisons.
- */
-#define clamp(val, lo, hi) __careful_clamp(val, lo, hi)
-
-/*
- * ..and if you can't take the strict
- * types, you can specify one yourself.
- *
- * Or not use min/max/clamp at all, of course.
+ * This macro checks @val/@lo/@hi to make sure they have compatible
+ * signedness.
*/
+#define clamp(val, lo, hi) __careful_clamp(auto, val, lo, hi)
/**
- * min_t - return minimum of two values, using the specified type
- * @type: data type to use
- * @x: first value
- * @y: second value
+ * clamp_t - return a value clamped to a given range using a given type
+ * @type: the type of variable to use
+ * @val: current value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of type
+ * @type to make all the comparisons.
*/
-#define min_t(type, x, y) __careful_cmp(min, (type)(x), (type)(y))
+#define clamp_t(type, val, lo, hi) __careful_clamp(type, val, lo, hi)
/**
- * max_t - return maximum of two values, using the specified type
- * @type: data type to use
- * @x: first value
- * @y: second value
+ * clamp_val - return a value clamped to a given range using val's type
+ * @val: current value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of whatever
+ * type the input argument @val is. This is useful when @val is an unsigned
+ * type and @lo and @hi are literals that will otherwise be assigned a signed
+ * integer type.
*/
-#define max_t(type, x, y) __careful_cmp(max, (type)(x), (type)(y))
+#define clamp_val(val, lo, hi) __careful_clamp(typeof(val), val, lo, hi)
/*
* Do not check the array parameter using __must_be_array().
@@ -211,31 +273,6 @@
*/
#define max_array(array, len) __minmax_array(max, array, len)
-/**
- * clamp_t - return a value clamped to a given range using a given type
- * @type: the type of variable to use
- * @val: current value
- * @lo: minimum allowable value
- * @hi: maximum allowable value
- *
- * This macro does no typechecking and uses temporary variables of type
- * @type to make all the comparisons.
- */
-#define clamp_t(type, val, lo, hi) __careful_clamp((type)(val), (type)(lo), (type)(hi))
-
-/**
- * clamp_val - return a value clamped to a given range using val's type
- * @val: current value
- * @lo: minimum allowable value
- * @hi: maximum allowable value
- *
- * This macro does no typechecking and uses temporary variables of whatever
- * type the input argument @val is. This is useful when @val is an unsigned
- * type and @lo and @hi are literals that will otherwise be assigned a signed
- * integer type.
- */
-#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
-
static inline bool in_range64(u64 val, u64 start, u64 len)
{
return (val - start) < len;
@@ -270,4 +307,13 @@ static inline bool in_range32(u32 val, u32 start, u32 len)
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+/*
+ * Use these carefully: no type checking, and uses the arguments
+ * multiple times. Use for obvious constants only.
+ */
+#define MIN(a, b) __cmp(min, a, b)
+#define MAX(a, b) __cmp(max, a, b)
+#define MIN_T(type, a, b) __cmp(min, (type)(a), (type)(b))
+#define MAX_T(type, a, b) __cmp(max, (type)(a), (type)(b))
+
#endif /* _LINUX_MINMAX_H */
diff --git a/include/linux/misc/keba.h b/include/linux/misc/keba.h
new file mode 100644
index 000000000000..a81d6fa70851
--- /dev/null
+++ b/include/linux/misc/keba.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2024, KEBA Industrial Automation Gmbh */
+
+#ifndef _LINUX_MISC_KEBA_H
+#define _LINUX_MISC_KEBA_H
+
+#include <linux/auxiliary_bus.h>
+
+struct i2c_board_info;
+struct spi_board_info;
+
+/**
+ * struct keba_i2c_auxdev - KEBA I2C auxiliary device
+ * @auxdev: auxiliary device object
+ * @io: address range of I2C controller IO memory
+ * @info_size: number of I2C devices to be probed
+ * @info: I2C devices to be probed
+ */
+struct keba_i2c_auxdev {
+ struct auxiliary_device auxdev;
+ struct resource io;
+ int info_size;
+ struct i2c_board_info *info;
+};
+
+/**
+ * struct keba_spi_auxdev - KEBA SPI auxiliary device
+ * @auxdev: auxiliary device object
+ * @io: address range of SPI controller IO memory
+ * @info_size: number of SPI devices to be probed
+ * @info: SPI devices to be probed
+ */
+struct keba_spi_auxdev {
+ struct auxiliary_device auxdev;
+ struct resource io;
+ int info_size;
+ struct spi_board_info *info;
+};
+
+/**
+ * struct keba_fan_auxdev - KEBA fan auxiliary device
+ * @auxdev: auxiliary device object
+ * @io: address range of fan controller IO memory
+ */
+struct keba_fan_auxdev {
+ struct auxiliary_device auxdev;
+ struct resource io;
+};
+
+/**
+ * struct keba_batt_auxdev - KEBA battery auxiliary device
+ * @auxdev: auxiliary device object
+ * @io: address range of battery controller IO memory
+ */
+struct keba_batt_auxdev {
+ struct auxiliary_device auxdev;
+ struct resource io;
+};
+
+/**
+ * struct keba_uart_auxdev - KEBA UART auxiliary device
+ * @auxdev: auxiliary device object
+ * @io: address range of UART controller IO memory
+ * @irq: number of UART controller interrupt
+ */
+struct keba_uart_auxdev {
+ struct auxiliary_device auxdev;
+ struct resource io;
+ unsigned int irq;
+};
+
+#endif /* _LINUX_MISC_KEBA_H */
diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h
index e799b1f8d05b..0cb36a3ffc47 100644
--- a/include/linux/misc_cgroup.h
+++ b/include/linux/misc_cgroup.h
@@ -9,15 +9,20 @@
#define _MISC_CGROUP_H_
/**
- * Types of misc cgroup entries supported by the host.
+ * enum misc_res_type - Types of misc cgroup entries supported by the host.
*/
enum misc_res_type {
#ifdef CONFIG_KVM_AMD_SEV
- /* AMD SEV ASIDs resource */
+ /** @MISC_CG_RES_SEV: AMD SEV ASIDs resource */
MISC_CG_RES_SEV,
- /* AMD SEV-ES ASIDs resource */
+ /** @MISC_CG_RES_SEV_ES: AMD SEV-ES ASIDs resource */
MISC_CG_RES_SEV_ES,
#endif
+#ifdef CONFIG_INTEL_TDX_HOST
+ /** @MISC_CG_RES_TDX: Intel TDX HKIDs resource */
+ MISC_CG_RES_TDX,
+#endif
+ /** @MISC_CG_RES_TYPES: count of enum misc_res_type constants */
MISC_CG_RES_TYPES
};
@@ -30,13 +35,16 @@ struct misc_cg;
/**
* struct misc_res: Per cgroup per misc type resource
* @max: Maximum limit on the resource.
+ * @watermark: Historical maximum usage of the resource.
* @usage: Current usage of the resource.
* @events: Number of times, the resource limit exceeded.
*/
struct misc_res {
u64 max;
+ atomic64_t watermark;
atomic64_t usage;
atomic64_t events;
+ atomic64_t events_local;
};
/**
@@ -50,11 +58,12 @@ struct misc_cg {
/* misc.events */
struct cgroup_file events_file;
+ /* misc.events.local */
+ struct cgroup_file events_local_file;
struct misc_res res[MISC_CG_RES_TYPES];
};
-u64 misc_cg_res_total_usage(enum misc_res_type type);
int misc_cg_set_capacity(enum misc_res_type type, u64 capacity);
int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
@@ -98,11 +107,6 @@ static inline void put_misc_cg(struct misc_cg *cg)
#else /* !CONFIG_CGROUP_MISC */
-static inline u64 misc_cg_res_total_usage(enum misc_res_type type)
-{
- return 0;
-}
-
static inline int misc_cg_set_capacity(enum misc_res_type type, u64 capacity)
{
return 0;
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index c0fea6ca5076..7d0aa718499c 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -70,13 +70,19 @@
#define UHID_MINOR 239
#define USERIO_MINOR 240
#define VHOST_VSOCK_MINOR 241
+#define EISA_EEPROM_MINOR 241
#define RFKILL_MINOR 242
-#define MISC_DYNAMIC_MINOR 255
-struct device;
-struct attribute_group;
+/*
+ * Misc char device minor code space division related to below macro:
+ *
+ * < 255 : Fixed minor code
+ * == 255 : Indicator to request dynamic minor code
+ * > 255 : Dynamic minor code requested, 1048320 minor codes totally.
+ */
+#define MISC_DYNAMIC_MINOR 255
-struct miscdevice {
+struct miscdevice {
int minor;
const char *name;
const struct file_operations *fops;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 27f42f713c89..f016263e1fcf 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1135,7 +1135,7 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_buf *buf);
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order);
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
@@ -1415,7 +1415,6 @@ int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
bool *vlan_offload_disabled);
void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
struct _rule_hw *eth_header);
-int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 991526039ccb..9d47cdc727ad 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -41,7 +41,6 @@ struct mlx5_core_cq {
int cqe_sz;
__be32 *set_ci_db;
__be32 *arm_db;
- struct mlx5_uars_page *uar;
refcount_t refcount;
struct completion free;
unsigned vector;
@@ -184,6 +183,7 @@ static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
complete(&cq->free);
}
+void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen, u32 *out, int outlen);
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index d7bb31d9a446..d7f46a8fbfa1 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -211,6 +211,7 @@ enum {
enum {
MLX5_PFAULT_SUBTYPE_WQE = 0,
MLX5_PFAULT_SUBTYPE_RDMA = 1,
+ MLX5_PFAULT_SUBTYPE_MEMORY = 2,
};
enum wqe_page_fault_type {
@@ -279,6 +280,7 @@ enum {
MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25,
MLX5_MKEY_MASK_FREE = 1ull << 29,
+ MLX5_MKEY_MASK_PAGE_SIZE_5 = 1ull << 42,
MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47,
};
@@ -294,6 +296,7 @@ enum {
#define MLX5_UMR_FLEX_ALIGNMENT 0x40
#define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt))
#define MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_klm))
+#define MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_ksm))
#define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
@@ -369,6 +372,7 @@ enum mlx5_driver_event {
MLX5_DRIVER_EVENT_SF_PEER_DEVLINK,
MLX5_DRIVER_EVENT_AFFILIATION_DONE,
MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
+ MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE,
};
enum {
@@ -535,6 +539,7 @@ struct mlx5_cmd_layout {
};
enum mlx5_rfr_severity_bit_offsets {
+ MLX5_CRR_BIT_OFFSET = 0x6,
MLX5_RFR_BIT_OFFSET = 0x7,
};
@@ -645,10 +650,11 @@ struct mlx5_eqe_page_req {
__be32 rsvd1[5];
};
+#define MEMORY_SCHEME_PAGE_FAULT_GRANULARITY 4096
struct mlx5_eqe_page_fault {
- __be32 bytes_committed;
union {
struct {
+ __be32 bytes_committed;
u16 reserved1;
__be16 wqe_index;
u16 reserved2;
@@ -658,6 +664,7 @@ struct mlx5_eqe_page_fault {
__be32 pftype_wq;
} __packed wqe;
struct {
+ __be32 bytes_committed;
__be32 r_key;
u16 reserved1;
__be16 packet_length;
@@ -665,6 +672,23 @@ struct mlx5_eqe_page_fault {
__be64 rdma_va;
__be32 pftype_token;
} __packed rdma;
+ struct {
+ u8 flags;
+ u8 reserved1;
+ __be16 post_demand_fault_pages;
+ __be16 pre_demand_fault_pages;
+ __be16 token47_32;
+ __be32 token31_0;
+ /*
+ * FW changed from specifying the fault size in byte
+ * count to 4k pages granularity. The size specified
+ * in pages uses bits 31:12, to keep backward
+ * compatibility.
+ */
+ __be32 demand_fault_pages;
+ __be32 mkey;
+ __be64 va;
+ } __packed memory;
} __packed;
} __packed;
@@ -1223,10 +1247,13 @@ enum mlx5_cap_type {
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
MLX5_CAP_CRYPTO = 0x1a,
+ MLX5_CAP_SHAMPO = 0x1d,
+ MLX5_CAP_PSP = 0x1e,
MLX5_CAP_MACSEC = 0x1f,
MLX5_CAP_GENERAL_2 = 0x20,
MLX5_CAP_PORT_SELECTION = 0x25,
MLX5_CAP_ADV_VIRTUALIZATION = 0x26,
+ MLX5_CAP_ADV_RDMA = 0x28,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1242,7 +1269,8 @@ enum mlx5_pcam_feature_groups {
enum mlx5_mcam_reg_groups {
MLX5_MCAM_REGS_FIRST_128 = 0x0,
MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
- MLX5_MCAM_REGS_NUM = 0x3,
+ MLX5_MCAM_REGS_0x9180_0x91FF = 0x3,
+ MLX5_MCAM_REGS_NUM = 0x4,
};
enum mlx5_mcam_feature_groups {
@@ -1320,6 +1348,12 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
+#define MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(mdev, cap) \
+ MLX5_CAP_ADV_RDMA(mdev, rdma_transport_rx_flow_table_properties.cap)
+
+#define MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(mdev, cap) \
+ MLX5_CAP_ADV_RDMA(mdev, rdma_transport_tx_flow_table_properties.cap)
+
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
@@ -1359,6 +1393,10 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(adv_virtualization_cap, \
mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap)
+#define MLX5_CAP_ADV_RDMA(mdev, cap) \
+ MLX5_GET(adv_rdma_cap, \
+ mdev->caps.hca[MLX5_CAP_ADV_RDMA]->cur, cap)
+
#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
@@ -1368,6 +1406,14 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
+#define MLX5_CAP_ODP_SCHEME(mdev, cap) \
+ (MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ mem_page_fault) ? \
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ memory_page_fault_scheme_cap.cap) : \
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ transport_page_fault_scheme_cap.cap))
+
#define MLX5_CAP_ODP_MAX(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
@@ -1391,6 +1437,10 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
mng_access_reg_cap_mask.access_regs2.reg)
+#define MLX5_CAP_MCAM_REG3(mdev, reg) \
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9180_0x91FF], \
+ mng_access_reg_cap_mask.access_regs3.reg)
+
#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
@@ -1435,6 +1485,12 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_MACSEC(mdev, cap)\
MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap)
+#define MLX5_CAP_SHAMPO(mdev, cap) \
+ MLX5_GET(shampo_cap, mdev->caps.hca[MLX5_CAP_SHAMPO]->cur, cap)
+
+#define MLX5_CAP_PSP(mdev, cap)\
+ MLX5_GET(psp_cap, (mdev)->caps.hca[MLX5_CAP_PSP]->cur, cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
@@ -1443,6 +1499,7 @@ enum {
MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
MLX5_CMD_STAT_RES_BUSY = 0x6,
+ MLX5_CMD_STAT_NOT_READY = 0x7,
MLX5_CMD_STAT_LIM_ERR = 0x8,
MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
MLX5_CMD_STAT_IX_ERR = 0xa,
@@ -1465,7 +1522,10 @@ enum {
MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13,
MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
+ MLX5_PHYSICAL_LAYER_RECOVERY_GROUP = 0x1a,
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
+ MLX5_INFINIBAND_EXTENDED_PORT_COUNTERS_GROUP = 0x21,
+ MLX5_RS_FEC_HISTOGRAM_GROUP = 0x23,
};
enum {
@@ -1479,8 +1539,8 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
-#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2
-#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1
+#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 6
+#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 4
#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 779cfdf2e9d6..1c54aa6f74fb 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -36,6 +36,7 @@
#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/pci.h>
+#include <linux/pci-tph.h>
#include <linux/irq.h>
#include <linux/spinlock_types.h>
#include <linux/semaphore.h>
@@ -45,7 +46,6 @@
#include <linux/workqueue.h>
#include <linux/mempool.h>
#include <linux/interrupt.h>
-#include <linux/idr.h>
#include <linux/notifier.h>
#include <linux/refcount.h>
#include <linux/auxiliary_bus.h>
@@ -55,7 +55,6 @@
#include <linux/mlx5/doorbell.h>
#include <linux/mlx5/eq.h>
#include <linux/timecounter.h>
-#include <linux/ptp_clock_kernel.h>
#include <net/devlink.h>
#define MLX5_ADEV_NAME "mlx5_core"
@@ -131,12 +130,14 @@ enum {
MLX5_REG_PDDR = 0x5031,
MLX5_REG_PMLP = 0x5002,
MLX5_REG_PPLM = 0x5023,
+ MLX5_REG_PPHCR = 0x503E,
MLX5_REG_PCAM = 0x507f,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MTCAP = 0x9009,
MLX5_REG_MTMP = 0x900A,
MLX5_REG_MCIA = 0x9014,
+ MLX5_REG_MNVDA = 0x9024,
MLX5_REG_MFRL = 0x9028,
MLX5_REG_MLCR = 0x902b,
MLX5_REG_MRTC = 0x902d,
@@ -159,9 +160,14 @@ enum {
MLX5_REG_MSECQ = 0x9155,
MLX5_REG_MSEES = 0x9156,
MLX5_REG_MIRC = 0x9162,
+ MLX5_REG_MTPTM = 0x9180,
+ MLX5_REG_MTCTR = 0x9181,
+ MLX5_REG_MRTCQ = 0x9182,
MLX5_REG_SBCAM = 0xB01F,
MLX5_REG_RESOURCE_DUMP = 0xC000,
+ MLX5_REG_NIC_CAP = 0xC00D,
MLX5_REG_DTOR = 0xC00E,
+ MLX5_REG_VHCA_ICM_CTRL = 0xC010,
};
enum mlx5_qpts_trust_state {
@@ -301,6 +307,8 @@ struct mlx5_cmd {
struct semaphore sem;
struct semaphore pages_sem;
struct semaphore throttle_sem;
+ struct semaphore unprivileged_sem;
+ struct xarray privileged_uids;
} vars;
enum mlx5_cmdif_state state;
void *cmd_alloc_buf;
@@ -393,6 +401,7 @@ struct mlx5_core_rsc_common {
enum mlx5_res_type res;
refcount_t refcount;
struct completion free;
+ bool invalid;
};
struct mlx5_uars_page {
@@ -425,7 +434,6 @@ struct mlx5_sq_bfreg {
struct mlx5_uars_page *up;
bool wc;
u32 index;
- unsigned int offset;
};
struct mlx5_core_health {
@@ -472,36 +480,6 @@ struct mlx5_core_sriov {
u16 max_ec_vfs;
};
-struct mlx5_fc_pool {
- struct mlx5_core_dev *dev;
- struct mutex pool_lock; /* protects pool lists */
- struct list_head fully_used;
- struct list_head partially_used;
- struct list_head unused;
- int available_fcs;
- int used_fcs;
- int threshold;
-};
-
-struct mlx5_fc_stats {
- spinlock_t counters_idr_lock; /* protects counters_idr */
- struct idr counters_idr;
- struct list_head counters;
- struct llist_head addlist;
- struct llist_head dellist;
-
- struct workqueue_struct *wq;
- struct delayed_work work;
- unsigned long next_query;
- unsigned long sampling_interval; /* jiffies */
- u32 *bulk_query_out;
- int bulk_query_len;
- size_t num_counters;
- bool bulk_query_alloc_failed;
- unsigned long next_bulk_query_alloc;
- struct mlx5_fc_pool fc_pool;
-};
-
struct mlx5_events;
struct mlx5_mpfs;
struct mlx5_eswitch;
@@ -510,7 +488,6 @@ struct mlx5_devcom_dev;
struct mlx5_fw_reset;
struct mlx5_eq_table;
struct mlx5_irq_table;
-struct mlx5_vhca_state_notifier;
struct mlx5_sf_dev_table;
struct mlx5_sf_hw_table;
struct mlx5_sf_table;
@@ -553,6 +530,7 @@ enum {
* creation/deletion on drivers rescan. Unset during device attach.
*/
MLX5_PRIV_FLAGS_DETACH = 1 << 2,
+ MLX5_PRIV_FLAGS_SWITCH_LEGACY = 1 << 3,
};
struct mlx5_adev {
@@ -620,6 +598,7 @@ struct mlx5_priv {
struct mlx5_flow_steering *steering;
struct mlx5_mpfs *mpfs;
+ struct blocking_notifier_head esw_n_head;
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
@@ -628,21 +607,28 @@ struct mlx5_priv {
struct mlx5_devcom_comp_dev *hca_devcom_comp;
struct mlx5_fw_reset *fw_reset;
struct mlx5_core_roce roce;
- struct mlx5_fc_stats fc_stats;
+ struct mlx5_fc_stats *fc_stats;
struct mlx5_rl_table rl_table;
struct mlx5_ft_pool *ft_pool;
struct mlx5_bfreg_data bfregs;
- struct mlx5_uars_page *uar;
+ struct mlx5_sq_bfreg bfreg;
#ifdef CONFIG_MLX5_SF
- struct mlx5_vhca_state_notifier *vhca_state_notifier;
+ struct mlx5_nb vhca_state_nb;
+ struct blocking_notifier_head vhca_state_n_head;
+ struct notifier_block sf_dev_nb;
struct mlx5_sf_dev_table *sf_dev_table;
struct mlx5_core_dev *parent_mdev;
#endif
#ifdef CONFIG_MLX5_SF_MANAGER
+ struct notifier_block sf_hw_table_vhca_nb;
struct mlx5_sf_hw_table *sf_hw_table;
+ struct notifier_block sf_table_esw_nb;
+ struct notifier_block sf_table_vhca_nb;
+ struct notifier_block sf_table_mdev_nb;
struct mlx5_sf_table *sf_table;
#endif
+ struct blocking_notifier_head lag_nh;
};
enum mlx5_device_state {
@@ -678,12 +664,14 @@ struct mlx5e_resources {
u32 pdn;
struct mlx5_td td;
u32 mkey;
- struct mlx5_sq_bfreg bfreg;
+ struct mlx5_sq_bfreg *bfregs;
+ unsigned int num_bfregs;
#define MLX5_MAX_NUM_TC 8
u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC];
bool tisn_valid;
} hw_objs;
struct net_device *uplink_netdev;
+ netdevice_tracker tracker;
struct mutex uplink_netdev_lock;
struct mlx5_crypto_dek_priv *dek_priv;
};
@@ -703,39 +691,14 @@ struct mlx5_rsvd_gids {
struct ida ida;
};
-#define MAX_PIN_NUM 8
-struct mlx5_pps {
- u8 pin_caps[MAX_PIN_NUM];
- struct work_struct out_work;
- u64 start[MAX_PIN_NUM];
- u8 enabled;
- u64 min_npps_period;
- u64 min_out_pulse_duration_ns;
-};
-
-struct mlx5_timer {
- struct cyclecounter cycles;
- struct timecounter tc;
- u32 nominal_c_mult;
- unsigned long overflow_period;
- struct delayed_work overflow_work;
-};
-
-struct mlx5_clock {
- struct mlx5_nb pps_nb;
- seqlock_t lock;
- struct hwtstamp_config hwtstamp_config;
- struct ptp_clock *ptp;
- struct ptp_clock_info ptp_info;
- struct mlx5_pps pps_info;
- struct mlx5_timer timer;
-};
-
+struct mlx5_clock;
+struct mlx5_clock_dev_state;
struct mlx5_dm;
struct mlx5_fw_tracer;
struct mlx5_vxlan;
struct mlx5_geneve;
struct mlx5_hv_vhca;
+struct mlx5_st;
#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
@@ -766,6 +729,12 @@ struct mlx5_hca_cap {
u32 max[MLX5_UN_SZ_DW(hca_cap_union)];
};
+enum mlx5_wc_state {
+ MLX5_WC_STATE_UNINITIALIZED,
+ MLX5_WC_STATE_UNSUPPORTED,
+ MLX5_WC_STATE_SUPPORTED,
+};
+
struct mlx5_core_dev {
struct device *device;
enum mlx5_coredev_type coredev_type;
@@ -799,6 +768,7 @@ struct mlx5_core_dev {
u32 issi;
struct mlx5e_resources mlx5e_res;
struct mlx5_dm *dm;
+ struct mlx5_st *st;
struct mlx5_vxlan *vxlan;
struct mlx5_geneve *geneve;
struct {
@@ -808,7 +778,8 @@ struct mlx5_core_dev {
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
#endif
- struct mlx5_clock clock;
+ struct mlx5_clock *clock;
+ struct mlx5_clock_dev_state *clock_state;
struct mlx5_ib_clock_info *clock_info;
struct mlx5_fw_tracer *tracer;
struct mlx5_rsc_dump *rsc_dump;
@@ -824,6 +795,9 @@ struct mlx5_core_dev {
#endif
u64 num_ipsec_offloads;
struct mlx5_sd *sd;
+ enum mlx5_wc_state wc_state;
+ /* sync write combining state */
+ struct mutex wc_state_lock;
};
struct mlx5_db {
@@ -836,6 +810,8 @@ struct mlx5_db {
int index;
};
+#define MLX5_DEFAULT_NUM_DOORBELLS 8
+
enum {
MLX5_COMP_EQ_SIZE = 1024,
};
@@ -849,6 +825,7 @@ typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
enum {
MLX5_CMD_ENT_STATE_PENDING_COMP,
+ MLX5_CMD_ENT_STATE_TIMEDOUT,
};
struct mlx5_cmd_work_ent {
@@ -908,6 +885,7 @@ struct mlx5_hca_vport_context {
u16 qkey_violation_counter;
u16 pkey_violation_counter;
bool grh_required;
+ u8 num_plane;
};
#define STRUCT_FIELD(header, field) \
@@ -1004,6 +982,8 @@ struct mlx5_async_work {
mlx5_async_cbk_t user_callback;
u16 opcode; /* cmd opcode */
u16 op_mod; /* cmd op_mod */
+ u8 throttle_locked:1;
+ u8 unpriv_locked:1;
void *out; /* pointer to the cmd output buffer */
};
@@ -1034,6 +1014,8 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size);
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
+int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid);
+void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid);
void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev);
@@ -1171,7 +1153,6 @@ bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev);
-struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
@@ -1194,6 +1175,23 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
+#ifdef CONFIG_PCIE_TPH
+int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *st_index);
+int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index);
+#else
+static inline int mlx5_st_alloc_index(struct mlx5_core_dev *dev,
+ enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *st_index)
+{
+ return -EOPNOTSUPP;
+}
+static inline int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev);
void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev);
@@ -1221,6 +1219,12 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_VF;
}
+static inline bool mlx5_core_same_coredev_type(const struct mlx5_core_dev *dev1,
+ const struct mlx5_core_dev *dev2)
+{
+ return dev1->coredev_type == dev2->coredev_type;
+}
+
static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
{
return dev->caps.embedded_cpu;
@@ -1375,4 +1379,14 @@ static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev)
enum {
MLX5_OCTWORD = 16,
};
+
+bool mlx5_wc_support_get(struct mlx5_core_dev *mdev);
+
+static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+{
+ return devlink_net(priv_to_devlink(dev));
+}
+
+#define MLX5_SW_IMAGE_GUID_MAX_BYTES 9
+
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index df73a2ccc9af..67256e776566 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -147,6 +147,8 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
/* reuse tun_opts for the mapped ipsec obj id when tun_id is 0 (invalid) */
#define ESW_IPSEC_RX_MAPPED_ID_MASK GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
+#define ESW_IPSEC_RX_MAPPED_ID_MATCH_MASK \
+ GENMASK(31 - ESW_RESERVED_BITS, ESW_ZONE_ID_BITS)
u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev);
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 3fb428ce7d1c..9cadb1d5e6df 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -40,6 +40,9 @@
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+#define MLX5_RDMA_TRANSPORT_BYPASS_PRIO 16
+#define MLX5_FS_MAX_POOL_SIZE BIT(30)
+
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_NONE,
MLX5_FLOW_DESTINATION_TYPE_VPORT,
@@ -68,6 +71,7 @@ enum {
MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
MLX5_FLOW_TABLE_UPLINK_VPORT = BIT(5),
+ MLX5_FLOW_TABLE_OTHER_ESWITCH = BIT(6),
};
#define LEFTOVERS_RULE_NUM 2
@@ -108,9 +112,12 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC,
MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC,
MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX,
+ MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX,
};
enum {
+ FDB_DROP_ROOT,
FDB_BYPASS_PATH,
FDB_CRYPTO_INGRESS,
FDB_TC_OFFLOAD,
@@ -122,6 +129,24 @@ enum {
FDB_PER_VPORT,
};
+enum fs_flow_table_type {
+ FS_FT_NIC_RX = 0x0,
+ FS_FT_NIC_TX = 0x1,
+ FS_FT_ESW_EGRESS_ACL = 0x2,
+ FS_FT_ESW_INGRESS_ACL = 0x3,
+ FS_FT_FDB = 0X4,
+ FS_FT_SNIFFER_RX = 0X5,
+ FS_FT_SNIFFER_TX = 0X6,
+ FS_FT_RDMA_RX = 0X7,
+ FS_FT_RDMA_TX = 0X8,
+ FS_FT_PORT_SEL = 0X9,
+ FS_FT_FDB_RX = 0xa,
+ FS_FT_FDB_TX = 0xb,
+ FS_FT_RDMA_TRANSPORT_RX = 0xd,
+ FS_FT_RDMA_TRANSPORT_TX = 0xe,
+ FS_FT_MAX_TYPE = FS_FT_RDMA_TRANSPORT_TX,
+};
+
struct mlx5_pkt_reformat;
struct mlx5_modify_hdr;
struct mlx5_flow_definer;
@@ -163,7 +188,7 @@ struct mlx5_flow_destination {
u32 tir_num;
u32 ft_num;
struct mlx5_flow_table *ft;
- u32 counter_id;
+ struct mlx5_fc *counter;
struct {
u16 num;
u16 vhca_id;
@@ -192,9 +217,9 @@ struct mlx5_flow_namespace *
mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type);
struct mlx5_flow_namespace *
-mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
- enum mlx5_flow_namespace_type type,
- int vport);
+mlx5_get_flow_vport_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type,
+ int vport_idx);
struct mlx5_flow_table_attr {
int prio;
@@ -202,6 +227,8 @@ struct mlx5_flow_table_attr {
u32 level;
u32 flags;
u16 uid;
+ u16 vport;
+ u16 esw_owner_vhca_id;
struct mlx5_flow_table *next_ft;
struct {
@@ -238,6 +265,7 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
struct mlx5_exe_aso {
u32 object_id;
+ int base_id;
u8 type;
u8 return_reg_id;
union {
@@ -298,10 +326,11 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
-/* As mlx5_fc_create() but doesn't queue stats refresh thread. */
-struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging);
-
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
+struct mlx5_fc *mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size);
+void mlx5_fc_local_destroy(struct mlx5_fc *counter);
+void mlx5_fc_local_get(struct mlx5_fc *counter);
+void mlx5_fc_local_put(struct mlx5_fc *counter);
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
@@ -342,4 +371,11 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
struct mlx5_pkt_reformat *reformat);
u32 mlx5_flow_table_id(struct mlx5_flow_table *ft);
+
+struct mlx5_flow_root_namespace *
+mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type);
+
+int mlx5_fs_set_root_dev(struct mlx5_core_dev *dev,
+ struct mlx5_core_dev *new_dev,
+ enum fs_flow_table_type table_type);
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index f468763478ae..e9dcd4bf355d 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -80,23 +80,15 @@ enum {
enum {
MLX5_OBJ_TYPE_SW_ICM = 0x0008,
- MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT = 0x23,
-};
-
-enum {
- MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
- MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
- MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
- MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT =
- (1ULL << MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT),
- MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39),
-};
-
-enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c,
MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
+ MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT = 0x23,
+ MLX5_OBJ_TYPE_STC = 0x0040,
+ MLX5_OBJ_TYPE_RTC = 0x0041,
+ MLX5_OBJ_TYPE_STE = 0x0042,
+ MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN = 0x0043,
MLX5_OBJ_TYPE_PAGE_TRACK = 0x46,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
@@ -112,6 +104,16 @@ enum {
MLX5_OBJ_TYPE_RQT = 0xff0e,
MLX5_OBJ_TYPE_FLOW_COUNTER = 0xff0f,
MLX5_OBJ_TYPE_CQ = 0xff10,
+ MLX5_OBJ_TYPE_FT_ALIAS = 0xff15,
+};
+
+enum {
+ MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
+ MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
+ MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT =
+ (1ULL << MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT),
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39),
};
enum {
@@ -187,6 +189,9 @@ enum {
MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
MLX5_CMD_OP_RELEASE_XRQ_ERROR = 0x729,
MLX5_CMD_OP_MODIFY_XRQ = 0x72a,
+ MLX5_CMD_OPCODE_QUERY_DELEGATED_VHCA = 0x732,
+ MLX5_CMD_OPCODE_CREATE_ESW_VPORT = 0x733,
+ MLX5_CMD_OPCODE_DESTROY_ESW_VPORT = 0x734,
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740,
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
@@ -309,10 +314,14 @@ enum {
MLX5_CMD_OP_CREATE_UMEM = 0xa08,
MLX5_CMD_OP_DESTROY_UMEM = 0xa0a,
MLX5_CMD_OP_SYNC_STEERING = 0xb00,
+ MLX5_CMD_OP_PSP_GEN_SPI = 0xb10,
+ MLX5_CMD_OP_PSP_ROTATE_KEY = 0xb11,
MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d,
MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
MLX5_CMD_OP_SYNC_CRYPTO = 0xb12,
MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS = 0xb16,
+ MLX5_CMD_OP_GENERATE_WQE = 0xb17,
+ MLX5_CMD_OPCODE_QUERY_VUID = 0xb22,
MLX5_CMD_OP_MAX
};
@@ -416,7 +425,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
/* Table 2170 - Flow Table Fields Supported 2 Format */
struct mlx5_ifc_flow_table_fields_supported_2_bits {
- u8 reserved_at_0[0x2];
+ u8 inner_l4_type_ext[0x1];
+ u8 outer_l4_type_ext[0x1];
u8 inner_l4_type[0x1];
u8 outer_l4_type[0x1];
u8 reserved_at_4[0xa];
@@ -425,7 +435,11 @@ struct mlx5_ifc_flow_table_fields_supported_2_bits {
u8 tunnel_header_0_1[0x1];
u8 reserved_at_11[0xf];
- u8 reserved_at_20[0x60];
+ u8 reserved_at_20[0xf];
+ u8 ipsec_next_header[0x1];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_flow_table_prop_layout_bits {
@@ -477,15 +491,23 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 execute_aso[0x1];
u8 reserved_at_47[0x19];
- u8 reserved_at_60[0x2];
+ u8 reformat_l2_to_l3_psp_tunnel[0x1];
+ u8 reformat_l3_psp_tunnel_to_l2[0x1];
u8 reformat_insert[0x1];
u8 reformat_remove[0x1];
u8 macsec_encrypt[0x1];
u8 macsec_decrypt[0x1];
- u8 reserved_at_66[0x2];
+ u8 psp_encrypt[0x1];
+ u8 psp_decrypt[0x1];
u8 reformat_add_macsec[0x1];
u8 reformat_remove_macsec[0x1];
- u8 reserved_at_6a[0xe];
+ u8 reparse[0x1];
+ u8 reserved_at_6b[0x1];
+ u8 cross_vhca_object[0x1];
+ u8 reformat_l2_to_l3_audp_tunnel[0x1];
+ u8 reformat_l3_audp_tunnel_to_l2[0x1];
+ u8 ignore_flow_level_rtc_valid[0x1];
+ u8 reserved_at_70[0x8];
u8 log_max_ft_num[0x8];
u8 reserved_at_80[0x10];
@@ -522,7 +544,15 @@ struct mlx5_ifc_ipv6_layout_bits {
u8 ipv6[16][0x8];
};
+struct mlx5_ifc_ipv6_simple_layout_bits {
+ u8 ipv6_127_96[0x20];
+ u8 ipv6_95_64[0x20];
+ u8 ipv6_63_32[0x20];
+ u8 ipv6_31_0[0x20];
+};
+
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
+ struct mlx5_ifc_ipv6_simple_layout_bits ipv6_simple_layout;
struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
u8 reserved_at_0[0x80];
@@ -534,6 +564,13 @@ enum {
MLX5_PACKET_L4_TYPE_UDP,
};
+enum {
+ MLX5_PACKET_L4_TYPE_EXT_NONE,
+ MLX5_PACKET_L4_TYPE_EXT_TCP,
+ MLX5_PACKET_L4_TYPE_EXT_UDP,
+ MLX5_PACKET_L4_TYPE_EXT_ICMP,
+};
+
struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 smac_47_16[0x20];
@@ -560,10 +597,10 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 tcp_dport[0x10];
u8 l4_type[0x2];
- u8 reserved_at_c2[0xe];
+ u8 l4_type_ext[0x4];
+ u8 reserved_at_c6[0xa];
u8 ipv4_ihl[0x4];
- u8 reserved_at_c4[0x4];
-
+ u8 reserved_at_d4[0x4];
u8 ttl_hoplimit[0x8];
u8 udp_sport[0x10];
@@ -670,11 +707,10 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 metadata_reg_a[0x20];
- u8 reserved_at_1a0[0x8];
-
+ u8 psp_syndrome[0x8];
u8 macsec_syndrome[0x8];
u8 ipsec_syndrome[0x8];
- u8 reserved_at_1b8[0x8];
+ u8 ipsec_next_header[0x8];
u8 reserved_at_1c0[0x40];
};
@@ -793,7 +829,7 @@ struct mlx5_ifc_ads_bits {
u8 reserved_at_2[0xe];
u8 pkey_index[0x10];
- u8 reserved_at_20[0x8];
+ u8 plane_index[0x8];
u8 grh[0x1];
u8 mlid[0x7];
u8 rlid[0x10];
@@ -911,7 +947,9 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_8[0x5];
u8 fdb_uplink_hairpin[0x1];
u8 fdb_multi_path_any_table_limit_regc[0x1];
- u8 reserved_at_f[0x3];
+ u8 reserved_at_f[0x1];
+ u8 fdb_dynamic_tunnel[0x1];
+ u8 reserved_at_11[0x1];
u8 fdb_multi_path_any_table[0x1];
u8 reserved_at_13[0x2];
u8 fdb_modify_header_fwd_to_table[0x1];
@@ -950,6 +988,73 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_1900[0x6700];
};
+struct mlx5_ifc_wqe_based_flow_table_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 log_max_num_ste[0x5];
+ u8 reserved_at_8[0x3];
+ u8 log_max_num_stc[0x5];
+ u8 reserved_at_10[0x3];
+ u8 log_max_num_rtc[0x5];
+ u8 reserved_at_18[0x3];
+ u8 log_max_num_header_modify_pattern[0x5];
+
+ u8 rtc_hash_split_table[0x1];
+ u8 rtc_linear_lookup_table[0x1];
+ u8 reserved_at_22[0x1];
+ u8 stc_alloc_log_granularity[0x5];
+ u8 reserved_at_28[0x3];
+ u8 stc_alloc_log_max[0x5];
+ u8 reserved_at_30[0x3];
+ u8 ste_alloc_log_granularity[0x5];
+ u8 reserved_at_38[0x3];
+ u8 ste_alloc_log_max[0x5];
+
+ u8 reserved_at_40[0xb];
+ u8 rtc_reparse_mode[0x5];
+ u8 reserved_at_50[0x3];
+ u8 rtc_index_mode[0x5];
+ u8 reserved_at_58[0x3];
+ u8 rtc_log_depth_max[0x5];
+
+ u8 reserved_at_60[0x10];
+ u8 ste_format[0x10];
+
+ u8 stc_action_type[0x80];
+
+ u8 header_insert_type[0x10];
+ u8 header_remove_type[0x10];
+
+ u8 trivial_match_definer[0x20];
+
+ u8 reserved_at_140[0x1b];
+ u8 rtc_max_num_hash_definer_gen_wqe[0x5];
+
+ u8 reserved_at_160[0x18];
+ u8 access_index_mode[0x8];
+
+ u8 reserved_at_180[0x10];
+ u8 ste_format_gen_wqe[0x10];
+
+ u8 linear_match_definer_reg_c3[0x20];
+
+ u8 fdb_jump_to_tir_stc[0x1];
+ u8 reserved_at_1c1[0x1f];
+};
+
+struct mlx5_ifc_esw_cap_bits {
+ u8 reserved_at_0[0x1d];
+ u8 merged_eswitch[0x1];
+ u8 reserved_at_1e[0x2];
+
+ u8 reserved_at_20[0x40];
+
+ u8 esw_manager_vport_number_valid[0x1];
+ u8 reserved_at_61[0xf];
+ u8 esw_manager_vport_number[0x10];
+
+ u8 reserved_at_80[0x780];
+};
+
enum {
MLX5_COUNTER_SOURCE_ESWITCH = 0x0,
MLX5_COUNTER_FLOW_ESWITCH = 0x1,
@@ -1008,7 +1113,9 @@ struct mlx5_ifc_qos_cap_bits {
u8 log_esw_max_sched_depth[0x4];
u8 reserved_at_10[0x10];
- u8 reserved_at_20[0xb];
+ u8 reserved_at_20[0x9];
+ u8 esw_cross_esw_sched[0x1];
+ u8 reserved_at_2a[0x1];
u8 log_max_qos_nic_queue_group[0x5];
u8 reserved_at_30[0x10];
@@ -1016,7 +1123,8 @@ struct mlx5_ifc_qos_cap_bits {
u8 packet_pacing_min_rate[0x20];
- u8 reserved_at_80[0x10];
+ u8 reserved_at_80[0xb];
+ u8 log_esw_max_rate_limit[0x5];
u8 packet_pacing_rate_table_size[0x10];
u8 esw_element_type[0x10];
@@ -1027,7 +1135,8 @@ struct mlx5_ifc_qos_cap_bits {
u8 max_tsar_bw_share[0x20];
- u8 reserved_at_100[0x20];
+ u8 nic_element_type[0x10];
+ u8 nic_tsar_type[0x10];
u8 reserved_at_120[0x3];
u8 log_meter_aso_granularity[0x5];
@@ -1093,7 +1202,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 tunnel_stateless_ip_over_ip_tx[0x1];
u8 reserved_at_2e[0x2];
u8 max_vxlan_udp_ports[0x8];
- u8 reserved_at_38[0x6];
+ u8 swp_csum_l4_partial[0x1];
+ u8 reserved_at_39[0x5];
u8 max_geneve_opt_len[0x1];
u8 tunnel_stateless_geneve_rx[0x1];
@@ -1324,11 +1434,13 @@ struct mlx5_ifc_atomic_caps_bits {
u8 reserved_at_e0[0x720];
};
-struct mlx5_ifc_odp_cap_bits {
+struct mlx5_ifc_odp_scheme_cap_bits {
u8 reserved_at_0[0x40];
u8 sig[0x1];
- u8 reserved_at_41[0x1f];
+ u8 reserved_at_41[0x4];
+ u8 page_prefetch[0x1];
+ u8 reserved_at_46[0x1a];
u8 reserved_at_60[0x20];
@@ -1342,7 +1454,20 @@ struct mlx5_ifc_odp_cap_bits {
struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps;
- u8 reserved_at_120[0x6E0];
+ u8 reserved_at_120[0xe0];
+};
+
+struct mlx5_ifc_odp_cap_bits {
+ struct mlx5_ifc_odp_scheme_cap_bits transport_page_fault_scheme_cap;
+
+ struct mlx5_ifc_odp_scheme_cap_bits memory_page_fault_scheme_cap;
+
+ u8 reserved_at_400[0x200];
+
+ u8 mem_page_fault[0x1];
+ u8 reserved_at_601[0x1f];
+
+ u8 reserved_at_620[0x1e0];
};
struct mlx5_ifc_tls_cap_bits {
@@ -1390,6 +1515,21 @@ struct mlx5_ifc_macsec_cap_bits {
u8 reserved_at_40[0x7c0];
};
+struct mlx5_ifc_psp_cap_bits {
+ u8 reserved_at_0[0x1];
+ u8 psp_crypto_offload[0x1];
+ u8 reserved_at_2[0x1];
+ u8 psp_crypto_esp_aes_gcm_256_encrypt[0x1];
+ u8 psp_crypto_esp_aes_gcm_128_encrypt[0x1];
+ u8 psp_crypto_esp_aes_gcm_256_decrypt[0x1];
+ u8 psp_crypto_esp_aes_gcm_128_decrypt[0x1];
+ u8 reserved_at_7[0x4];
+ u8 log_max_num_of_psp_spi[0x5];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x7e0];
+};
+
enum {
MLX5_WQ_TYPE_LINKED_LIST = 0x0,
MLX5_WQ_TYPE_CYCLIC = 0x1,
@@ -1442,9 +1582,13 @@ enum {
};
enum {
+ MLX5_FLEX_IPV4_OVER_VXLAN_ENABLED = 1 << 0,
+ MLX5_FLEX_IPV6_OVER_VXLAN_ENABLED = 1 << 1,
+ MLX5_FLEX_IPV6_OVER_IP_ENABLED = 1 << 2,
MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3,
MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4,
MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5,
+ MLX5_FLEX_P_BIT_VXLAN_GPE_ENABLED = 1 << 6,
MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
@@ -1459,6 +1603,8 @@ enum {
enum {
MLX5_UCTX_CAP_RAW_TX = 1UL << 0,
MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
+ MLX5_UCTX_CAP_RDMA_CTRL = 1UL << 3,
+ MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA = 1UL << 4,
};
#define MLX5_FC_BULK_SIZE_FACTOR 128
@@ -1482,12 +1628,14 @@ enum {
MLX5_STEERING_FORMAT_CONNECTX_5 = 0,
MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
MLX5_STEERING_FORMAT_CONNECTX_7 = 2,
+ MLX5_STEERING_FORMAT_CONNECTX_8 = 3,
};
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x6];
u8 page_request_disable[0x1];
- u8 reserved_at_7[0x9];
+ u8 abs_native_port_num[0x1];
+ u8 reserved_at_8[0x8];
u8 shared_object_to_user_object_allowed[0x1];
u8 reserved_at_13[0xe];
u8 vhca_resource_manager[0x1];
@@ -1526,8 +1674,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 ts_cqe_to_dest_cqn[0x1];
u8 reserved_at_b3[0x6];
u8 go_back_n[0x1];
- u8 shampo[0x1];
- u8 reserved_at_bb[0x5];
+ u8 reserved_at_ba[0x6];
u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8];
@@ -1650,7 +1797,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pci_sync_for_fw_update_event[0x1];
u8 reserved_at_1f2[0x6];
u8 init2_lag_tx_port_affinity[0x1];
- u8 reserved_at_1fa[0x3];
+ u8 reserved_at_1fa[0x2];
+ u8 wqe_based_flow_table_update_cap[0x1];
u8 cqe_version[0x4];
u8 compact_address_vector[0x1];
@@ -1717,7 +1865,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 regexp_params[0x1];
u8 uar_sz[0x6];
u8 port_selection_cap[0x1];
- u8 reserved_at_251[0x1];
+ u8 nic_cap_reg[0x1];
u8 umem_uid_0[0x1];
u8 reserved_at_253[0x5];
u8 log_pg_sz[0x8];
@@ -1731,7 +1879,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_bf_reg_size[0x5];
- u8 reserved_at_270[0x3];
+ u8 disciplined_fr_counter[0x1];
+ u8 reserved_at_271[0x2];
u8 qp_error_syndrome[0x1];
u8 reserved_at_274[0x2];
u8 lag_dct[0x2];
@@ -1744,7 +1893,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_280[0x10];
u8 max_wqe_sz_sq[0x10];
- u8 reserved_at_2a0[0x10];
+ u8 reserved_at_2a0[0x7];
+ u8 mkey_pcie_tph[0x1];
+ u8 reserved_at_2a8[0x1];
+ u8 tis_tir_td_order[0x1];
+
+ u8 psp[0x1];
+ u8 shampo[0x1];
+ u8 reserved_at_2ac[0x4];
u8 max_wqe_sz_rq[0x10];
u8 max_flow_counter_31_16[0x10];
@@ -1762,7 +1918,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_328[0x2];
u8 relaxed_ordering_read[0x1];
u8 log_max_pd[0x5];
- u8 reserved_at_330[0x6];
+ u8 dp_ordering_ooo_all_ud[0x1];
+ u8 dp_ordering_ooo_all_uc[0x1];
+ u8 dp_ordering_ooo_all_xrc[0x1];
+ u8 dp_ordering_ooo_all_dc[0x1];
+ u8 dp_ordering_ooo_all_rc[0x1];
+ u8 pcie_reset_using_hotreset_method[0x1];
u8 pci_sync_for_fw_update_with_driver_unload[0x1];
u8 vnic_env_cnt_steering_fail[0x1];
u8 vport_counter_local_loopback[0x1];
@@ -1798,7 +1959,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_rqt[0x5];
u8 reserved_at_390[0x3];
u8 log_max_rqt_size[0x5];
- u8 reserved_at_398[0x3];
+ u8 reserved_at_398[0x1];
+ u8 vnic_env_cnt_bar_uar_access[0x1];
+ u8 vnic_env_cnt_odp_page_fault[0x1];
u8 log_max_tis_per_sq[0x5];
u8 ext_stride_num_range[0x1];
@@ -1871,7 +2034,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_geneve_tlv_options[0x8];
u8 reserved_at_568[0x3];
u8 max_geneve_tlv_option_data_len[0x5];
- u8 reserved_at_570[0x9];
+ u8 reserved_at_570[0x1];
+ u8 adv_rdma[0x1];
+ u8 reserved_at_572[0x7];
u8 adv_virtualization[0x1];
u8 reserved_at_57a[0x6];
@@ -1883,7 +2048,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_5a0[0x10];
u8 enhanced_cqe_compression[0x1];
- u8 reserved_at_5b1[0x2];
+ u8 reserved_at_5b1[0x1];
+ u8 crossing_vhca_mkey[0x1];
u8 log_max_dek[0x5];
u8 reserved_at_5b8[0x4];
u8 mini_cqe_resp_stride_index[0x1];
@@ -1952,12 +2118,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 dynamic_msix_table_size[0xc];
u8 reserved_at_740[0xc];
u8 min_dynamic_vf_msix_table_size[0x4];
- u8 reserved_at_750[0x4];
+ u8 reserved_at_750[0x2];
+ u8 data_direct[0x1];
+ u8 reserved_at_753[0x1];
u8 max_dynamic_vf_msix_table_size[0xc];
u8 reserved_at_760[0x3];
u8 log_max_num_header_modify_argument[0x5];
- u8 reserved_at_768[0x4];
+ u8 log_header_modify_argument_granularity_offset[0x4];
u8 log_header_modify_argument_granularity[0x4];
u8 reserved_at_770[0x3];
u8 log_header_modify_argument_max_alloc[0x5];
@@ -1980,7 +2148,13 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_0[0x80];
u8 migratable[0x1];
- u8 reserved_at_81[0x1f];
+ u8 reserved_at_81[0x7];
+ u8 dp_ordering_force[0x1];
+ u8 reserved_at_89[0x9];
+ u8 query_vuid[0x1];
+ u8 reserved_at_93[0x5];
+ u8 umr_log_entity_size_5[0x1];
+ u8 reserved_at_99[0x7];
u8 max_reformat_insert_size[0x8];
u8 max_reformat_insert_offset[0x8];
@@ -1990,9 +2164,14 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_c0[0x8];
u8 migration_multi_load[0x1];
u8 migration_tracking_state[0x1];
- u8 reserved_at_ca[0x6];
+ u8 multiplane_qp_ud[0x1];
+ u8 reserved_at_cb[0x5];
u8 migration_in_chunks[0x1];
- u8 reserved_at_d1[0xf];
+ u8 reserved_at_d1[0x1];
+ u8 sf_eq_usage[0x1];
+ u8 reserved_at_d3[0x5];
+ u8 multiplane[0x1];
+ u8 reserved_at_d9[0x7];
u8 cross_vhca_object_to_object_supported[0x20];
@@ -2001,11 +2180,13 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_140[0x60];
u8 flow_table_type_2_type[0x8];
- u8 reserved_at_1a8[0x3];
+ u8 reserved_at_1a8[0x2];
+ u8 format_select_dw_8_6_ext[0x1];
u8 log_min_mkey_entity_size[0x5];
u8 reserved_at_1b0[0x10];
- u8 reserved_at_1c0[0x60];
+ u8 general_obj_types_127_64[0x40];
+ u8 reserved_at_200[0x20];
u8 reserved_at_220[0x1];
u8 sw_vhca_id_valid[0x1];
@@ -2016,11 +2197,25 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 ts_cqe_metadata_size2wqe_counter[0x5];
u8 reserved_at_250[0x10];
- u8 reserved_at_260[0x120];
- u8 reserved_at_380[0x10];
+ u8 reserved_at_260[0x20];
+
+ u8 format_select_dw_gtpu_dw_0[0x8];
+ u8 format_select_dw_gtpu_dw_1[0x8];
+ u8 format_select_dw_gtpu_dw_2[0x8];
+ u8 format_select_dw_gtpu_first_ext_dw_0[0x8];
+
+ u8 generate_wqe_type[0x20];
+
+ u8 reserved_at_2c0[0xc0];
+
+ u8 reserved_at_380[0xb];
+ u8 min_mkey_log_entity_size_fixed_buffer[0x5];
u8 ec_vf_vport_base[0x10];
- u8 reserved_at_3a0[0x10];
+ u8 reserved_at_3a0[0x2];
+ u8 max_mkey_log_entity_size_fixed_buffer[0x6];
+ u8 reserved_at_3a8[0x2];
+ u8 max_mkey_log_entity_size_mtt[0x6];
u8 max_rqt_vhca_id[0x10];
u8 reserved_at_3c0[0x20];
@@ -2029,7 +2224,33 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 pcc_ifa2[0x1];
u8 reserved_at_3f1[0xf];
- u8 reserved_at_400[0x400];
+ u8 reserved_at_400[0x1];
+ u8 min_mkey_log_entity_size_fixed_buffer_valid[0x1];
+ u8 reserved_at_402[0xe];
+ u8 return_reg_id[0x10];
+
+ u8 reserved_at_420[0x1c];
+ u8 flow_table_hash_type[0x4];
+
+ u8 reserved_at_440[0x8];
+ u8 max_num_eqs_24b[0x18];
+
+ u8 reserved_at_460[0x144];
+ u8 load_balance_id[0x4];
+ u8 reserved_at_5a8[0x18];
+
+ u8 query_adjacent_functions_id[0x1];
+ u8 ingress_egress_esw_vport_connect[0x1];
+ u8 function_id_type_vhca_id[0x1];
+ u8 reserved_at_5c3[0x1];
+ u8 lag_per_mp_group[0x1];
+ u8 reserved_at_5c5[0xb];
+ u8 delegate_vhca_management_profiles[0x10];
+
+ u8 delegated_vhca_max[0x10];
+ u8 delegate_vhca_max[0x10];
+
+ u8 reserved_at_600[0x200];
};
enum mlx5_ifc_flow_destination_type {
@@ -2072,7 +2293,7 @@ struct mlx5_ifc_extended_dest_format_bits {
u8 reserved_at_60[0x20];
};
-union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
+union mlx5_ifc_dest_format_flow_counter_list_auto_bits {
struct mlx5_ifc_extended_dest_format_bits extended_dest_format;
struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
};
@@ -2164,12 +2385,17 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_139[0x4];
u8 log_wqe_stride_size[0x3];
- u8 reserved_at_140[0x80];
+ u8 dbr_umem_id[0x20];
+ u8 wq_umem_id[0x20];
+
+ u8 wq_umem_offset[0x40];
u8 headers_mkey[0x20];
u8 shampo_enable[0x1];
- u8 reserved_at_1e1[0x4];
+ u8 reserved_at_1e1[0x1];
+ u8 shampo_mode[0x2];
+ u8 reserved_at_1e4[0x1];
u8 log_reservation_size[0x3];
u8 reserved_at_1e8[0x5];
u8 log_max_num_of_packets_per_reservation[0x3];
@@ -2478,6 +2704,12 @@ struct mlx5_ifc_field_select_802_1qau_rp_bits {
u8 field_select_8021qaurp[0x20];
};
+struct mlx5_ifc_phys_layer_recovery_cntrs_bits {
+ u8 total_successful_recovery_events[0x20];
+
+ u8 reserved_at_20[0x7a0];
+};
+
struct mlx5_ifc_phys_layer_cntrs_bits {
u8 time_since_last_clear_high[0x20];
@@ -2650,6 +2882,46 @@ struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
u8 port_xmit_wait[0x20];
};
+struct mlx5_ifc_ib_ext_port_cntrs_grp_data_layout_bits {
+ u8 reserved_at_0[0x300];
+
+ u8 port_xmit_data_high[0x20];
+
+ u8 port_xmit_data_low[0x20];
+
+ u8 port_rcv_data_high[0x20];
+
+ u8 port_rcv_data_low[0x20];
+
+ u8 port_xmit_pkts_high[0x20];
+
+ u8 port_xmit_pkts_low[0x20];
+
+ u8 port_rcv_pkts_high[0x20];
+
+ u8 port_rcv_pkts_low[0x20];
+
+ u8 reserved_at_400[0x80];
+
+ u8 port_unicast_xmit_pkts_high[0x20];
+
+ u8 port_unicast_xmit_pkts_low[0x20];
+
+ u8 port_multicast_xmit_pkts_high[0x20];
+
+ u8 port_multicast_xmit_pkts_low[0x20];
+
+ u8 port_unicast_rcv_pkts_high[0x20];
+
+ u8 port_unicast_rcv_pkts_low[0x20];
+
+ u8 port_multicast_rcv_pkts_high[0x20];
+
+ u8 port_multicast_rcv_pkts_low[0x20];
+
+ u8 reserved_at_580[0x240];
+};
+
struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits {
u8 transmit_queue_high[0x20];
@@ -3127,6 +3399,14 @@ struct mlx5_ifc_dropped_packet_logged_bits {
u8 reserved_at_0[0xe0];
};
+struct mlx5_ifc_nic_cap_reg_bits {
+ u8 reserved_at_0[0x1a];
+ u8 vhca_icm_ctrl[0x1];
+ u8 reserved_at_1b[0x5];
+
+ u8 reserved_at_20[0x60];
+};
+
struct mlx5_ifc_default_timeout_bits {
u8 to_multiplier[0x3];
u8 reserved_at_3[0x9];
@@ -3163,6 +3443,18 @@ struct mlx5_ifc_dtor_reg_bits {
u8 reserved_at_1c0[0x20];
};
+struct mlx5_ifc_vhca_icm_ctrl_reg_bits {
+ u8 vhca_id_valid[0x1];
+ u8 reserved_at_1[0xf];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_20[0xa0];
+
+ u8 cur_alloc_icm[0x20];
+
+ u8 reserved_at_e0[0x120];
+};
+
enum {
MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
@@ -3337,7 +3629,8 @@ struct mlx5_ifc_qpc_bits {
u8 latency_sensitive[0x1];
u8 reserved_at_24[0x1];
u8 drain_sigerr[0x1];
- u8 reserved_at_26[0x2];
+ u8 reserved_at_26[0x1];
+ u8 dp_ordering_force[0x1];
u8 pd[0x18];
u8 mtu[0x3];
@@ -3410,7 +3703,8 @@ struct mlx5_ifc_qpc_bits {
u8 rae[0x1];
u8 reserved_at_493[0x1];
u8 page_offset[0x6];
- u8 reserved_at_49a[0x3];
+ u8 reserved_at_49a[0x2];
+ u8 dp_ordering_1[0x1];
u8 cd_slave_receive[0x1];
u8 cd_slave_send[0x1];
u8 cd_master[0x1];
@@ -3499,6 +3793,22 @@ struct mlx5_ifc_crypto_cap_bits {
u8 reserved_at_80[0x780];
};
+struct mlx5_ifc_shampo_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 shampo_log_max_reservation_size[0x5];
+ u8 reserved_at_8[0x3];
+ u8 shampo_log_min_reservation_size[0x5];
+ u8 shampo_min_mss_size[0x10];
+
+ u8 shampo_header_split[0x1];
+ u8 shampo_header_split_data_merge[0x1];
+ u8 reserved_at_22[0x1];
+ u8 shampo_log_max_headers_entry_size[0x5];
+ u8 reserved_at_28[0x18];
+
+ u8 reserved_at_40[0x7c0];
+};
+
union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
@@ -3508,6 +3818,8 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
+ struct mlx5_ifc_wqe_based_flow_table_cap_bits wqe_based_flow_table_cap;
+ struct mlx5_ifc_esw_cap_bits esw_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
struct mlx5_ifc_port_selection_cap_bits port_selection_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
@@ -3519,6 +3831,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_macsec_cap_bits macsec_cap;
struct mlx5_ifc_crypto_cap_bits crypto_cap;
struct mlx5_ifc_ipsec_cap_bits ipsec_cap;
+ struct mlx5_ifc_psp_cap_bits psp_cap;
u8 reserved_at_0[0x8000];
};
@@ -3548,6 +3861,7 @@ enum {
enum {
MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC = 0x0,
MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC = 0x1,
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP = 0x2,
};
struct mlx5_ifc_vlan_bits {
@@ -3624,7 +3938,7 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_1300[0x500];
- union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[];
+ union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[];
};
enum {
@@ -3712,7 +4026,13 @@ struct mlx5_ifc_vnic_diagnostic_statistics_bits {
u8 handled_pkt_steering_fail[0x40];
- u8 reserved_at_360[0xc80];
+ u8 bar_uar_access[0x20];
+
+ u8 odp_local_triggered_page_fault[0x20];
+
+ u8 odp_remote_triggered_page_fault[0x20];
+
+ u8 reserved_at_3c0[0xc20];
};
struct mlx5_ifc_traffic_counter_bits {
@@ -3865,7 +4185,8 @@ struct mlx5_ifc_sqc_bits {
u8 reg_umr[0x1];
u8 allow_swp[0x1];
u8 hairpin[0x1];
- u8 reserved_at_f[0xb];
+ u8 non_wire[0x1];
+ u8 reserved_at_10[0xa];
u8 ts_format[0x2];
u8 reserved_at_1c[0x4];
@@ -3905,20 +4226,65 @@ enum {
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP = 0x4,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT = 0x5,
};
enum {
- ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0,
+ ELEMENT_TYPE_CAP_MASK_TSAR = 1 << 0,
ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
+ ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP = 1 << 4,
+ ELEMENT_TYPE_CAP_MASK_RATE_LIMIT = 1 << 5,
+};
+
+enum {
+ TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0,
+ TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1,
+ TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
+ TSAR_ELEMENT_TSAR_TYPE_TC_ARB = 0x3,
+};
+
+enum {
+ TSAR_TYPE_CAP_MASK_DWRR = 1 << 0,
+ TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1,
+ TSAR_TYPE_CAP_MASK_ETS = 1 << 2,
+ TSAR_TYPE_CAP_MASK_TC_ARB = 1 << 3,
+};
+
+struct mlx5_ifc_tsar_element_bits {
+ u8 traffic_class[0x4];
+ u8 reserved_at_4[0x4];
+ u8 tsar_type[0x8];
+ u8 reserved_at_10[0x10];
+};
+
+struct mlx5_ifc_vport_element_bits {
+ u8 reserved_at_0[0x4];
+ u8 eswitch_owner_vhca_id_valid[0x1];
+ u8 eswitch_owner_vhca_id[0xb];
+ u8 vport_number[0x10];
+};
+
+struct mlx5_ifc_vport_tc_element_bits {
+ u8 traffic_class[0x4];
+ u8 eswitch_owner_vhca_id_valid[0x1];
+ u8 eswitch_owner_vhca_id[0xb];
+ u8 vport_number[0x10];
+};
+
+union mlx5_ifc_element_attributes_bits {
+ struct mlx5_ifc_tsar_element_bits tsar;
+ struct mlx5_ifc_vport_element_bits vport;
+ struct mlx5_ifc_vport_tc_element_bits vport_tc;
+ u8 reserved_at_0[0x20];
};
struct mlx5_ifc_scheduling_context_bits {
u8 element_type[0x8];
u8 reserved_at_8[0x18];
- u8 element_attributes[0x20];
+ union mlx5_ifc_element_attributes_bits element_attributes;
u8 parent_element_id[0x20];
@@ -3928,7 +4294,9 @@ struct mlx5_ifc_scheduling_context_bits {
u8 max_average_bw[0x20];
- u8 reserved_at_e0[0x120];
+ u8 max_bw_obj_id[0x20];
+
+ u8 reserved_at_100[0x100];
};
struct mlx5_ifc_rqtc_bits {
@@ -4100,6 +4468,11 @@ enum {
MLX5_MKC_ACCESS_MODE_KSM = 0x3,
MLX5_MKC_ACCESS_MODE_SW_ICM = 0x4,
MLX5_MKC_ACCESS_MODE_MEMIC = 0x5,
+ MLX5_MKC_ACCESS_MODE_CROSSING = 0x6,
+};
+
+enum {
+ MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX = 0,
};
struct mlx5_ifc_mkc_bits {
@@ -4142,16 +4515,22 @@ struct mlx5_ifc_mkc_bits {
u8 bsf_octword_size[0x20];
- u8 reserved_at_120[0x80];
+ u8 reserved_at_120[0x60];
+
+ u8 crossing_target_vhca_id[0x10];
+ u8 reserved_at_190[0x10];
u8 translations_octword_size[0x20];
u8 reserved_at_1c0[0x19];
u8 relaxed_ordering_read[0x1];
- u8 reserved_at_1d9[0x1];
- u8 log_page_size[0x5];
+ u8 log_page_size[0x6];
- u8 reserved_at_1e0[0x20];
+ u8 reserved_at_1e0[0x5];
+ u8 pcie_tph_en[0x1];
+ u8 pcie_tph_ph[0x2];
+ u8 pcie_tph_steering_tag_index[0x8];
+ u8 reserved_at_1f0[0x10];
};
struct mlx5_ifc_pkey_bits {
@@ -4172,7 +4551,8 @@ struct mlx5_ifc_hca_vport_context_bits {
u8 has_smi[0x1];
u8 has_raw[0x1];
u8 grh_required[0x1];
- u8 reserved_at_104[0xc];
+ u8 reserved_at_104[0x4];
+ u8 num_port_plane[0x8];
u8 port_physical_state[0x4];
u8 vport_state_policy[0x4];
u8 port_state[0x4];
@@ -4312,7 +4692,8 @@ struct mlx5_ifc_dctc_bits {
u8 state[0x4];
u8 reserved_at_8[0x18];
- u8 reserved_at_20[0x8];
+ u8 reserved_at_20[0x7];
+ u8 dp_ordering_force[0x1];
u8 user_index[0x18];
u8 reserved_at_40[0x8];
@@ -4327,7 +4708,9 @@ struct mlx5_ifc_dctc_bits {
u8 latency_sensitive[0x1];
u8 rlky[0x1];
u8 free_ar[0x1];
- u8 reserved_at_73[0xd];
+ u8 reserved_at_73[0x1];
+ u8 dp_ordering_1[0x1];
+ u8 reserved_at_75[0xb];
u8 reserved_at_80[0x8];
u8 cs_res[0x8];
@@ -4531,6 +4914,11 @@ union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
u8 reserved_at_0[0x20];
};
+struct mlx5_ifc_rs_histogram_cntrs_bits {
+ u8 hist[16][0x40];
+ u8 reserved_at_400[0x2c0];
+};
+
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
@@ -4541,8 +4929,11 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout;
struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout;
struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
+ struct mlx5_ifc_ib_ext_port_cntrs_grp_data_layout_bits ib_ext_port_cntrs_grp_data_layout;
struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs;
+ struct mlx5_ifc_phys_layer_recovery_cntrs_bits phys_layer_recovery_cntrs;
+ struct mlx5_ifc_rs_histogram_cntrs_bits rs_histogram_cntrs;
u8 reserved_at_0[0x7c0];
};
@@ -4602,29 +4993,6 @@ struct mlx5_ifc_register_loopback_control_bits {
u8 reserved_at_20[0x60];
};
-struct mlx5_ifc_vport_tc_element_bits {
- u8 traffic_class[0x4];
- u8 reserved_at_4[0xc];
- u8 vport_number[0x10];
-};
-
-struct mlx5_ifc_vport_element_bits {
- u8 reserved_at_0[0x10];
- u8 vport_number[0x10];
-};
-
-enum {
- TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0,
- TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1,
- TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
-};
-
-struct mlx5_ifc_tsar_element_bits {
- u8 reserved_at_0[0x8];
- u8 tsar_type[0x8];
- u8 reserved_at_10[0x10];
-};
-
enum {
MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0,
MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1,
@@ -4848,7 +5216,9 @@ struct mlx5_ifc_set_hca_cap_in_bits {
u8 other_function[0x1];
u8 ec_vf_function[0x1];
- u8 reserved_at_42[0xe];
+ u8 reserved_at_42[0x1];
+ u8 function_id_type[0x1];
+ u8 reserved_at_44[0xc];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -4881,13 +5251,15 @@ struct mlx5_ifc_set_fte_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -4905,6 +5277,16 @@ struct mlx5_ifc_set_fte_in_bits {
struct mlx5_ifc_flow_context_bits flow_context;
};
+struct mlx5_ifc_dest_format_bits {
+ u8 destination_type[0x8];
+ u8 destination_id[0x18];
+
+ u8 destination_eswitch_owner_vhca_id_valid[0x1];
+ u8 packet_reformat[0x1];
+ u8 reserved_at_22[0xe];
+ u8 destination_eswitch_owner_vhca_id[0x10];
+};
+
struct mlx5_ifc_rts2rts_qp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -5068,6 +5450,36 @@ struct mlx5_ifc_query_vport_state_out_bits {
u8 state[0x4];
};
+struct mlx5_ifc_array1024_auto_bits {
+ u8 array1024_auto[32][0x20];
+};
+
+struct mlx5_ifc_query_vuid_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x40];
+
+ u8 query_vfs_vuid[0x1];
+ u8 data_direct[0x1];
+ u8 reserved_at_62[0xe];
+ u8 vhca_id[0x10];
+};
+
+struct mlx5_ifc_query_vuid_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x1a0];
+
+ u8 reserved_at_1e0[0x10];
+ u8 num_of_entries[0x10];
+
+ struct mlx5_ifc_array1024_auto_bits vuid[];
+};
+
enum {
MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
@@ -5629,7 +6041,11 @@ struct mlx5_ifc_query_q_counter_out_bits {
u8 local_ack_timeout_err[0x20];
- u8 reserved_at_320[0xa0];
+ u8 reserved_at_320[0x60];
+
+ u8 req_rnr_retries_exceeded[0x20];
+
+ u8 reserved_at_3a0[0x20];
u8 resp_local_length_error[0x20];
@@ -6002,7 +6418,9 @@ struct mlx5_ifc_query_hca_cap_in_bits {
u8 other_function[0x1];
u8 ec_vf_function[0x1];
- u8 reserved_at_42[0xe];
+ u8 reserved_at_42[0x1];
+ u8 function_id_type[0x1];
+ u8 reserved_at_44[0xc];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -6060,6 +6478,20 @@ struct mlx5_ifc_modify_other_hca_cap_in_bits {
struct mlx5_ifc_other_hca_cap_bits other_capability;
};
+struct mlx5_ifc_sw_owner_icm_root_params_bits {
+ u8 sw_owner_icm_root_1[0x40];
+
+ u8 sw_owner_icm_root_0[0x40];
+};
+
+struct mlx5_ifc_rtc_params_bits {
+ u8 rtc_id_0[0x20];
+
+ u8 rtc_id_1[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_flow_table_context_bits {
u8 reformat_en[0x1];
u8 decap_en[0x1];
@@ -6067,7 +6499,8 @@ struct mlx5_ifc_flow_table_context_bits {
u8 termination_table[0x1];
u8 table_miss_action[0x4];
u8 level[0x8];
- u8 reserved_at_10[0x8];
+ u8 rtc_valid[0x1];
+ u8 reserved_at_11[0x7];
u8 log_size[0x8];
u8 reserved_at_20[0x8];
@@ -6078,10 +6511,10 @@ struct mlx5_ifc_flow_table_context_bits {
u8 reserved_at_60[0x60];
- u8 sw_owner_icm_root_1[0x40];
-
- u8 sw_owner_icm_root_0[0x40];
-
+ union {
+ struct mlx5_ifc_sw_owner_icm_root_params_bits sws;
+ struct mlx5_ifc_rtc_params_bits hws;
+ };
};
struct mlx5_ifc_query_flow_table_out_bits {
@@ -6613,6 +7046,28 @@ struct mlx5_ifc_query_esw_vport_context_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_destroy_esw_vport_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x20];
+};
+
+struct mlx5_ifc_destroy_esw_vport_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vport_num[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_modify_esw_vport_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -6729,6 +7184,7 @@ struct mlx5_ifc_alloc_packet_reformat_context_out_bits {
enum {
MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START = 0x1,
+ MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START = 0x2,
MLX5_REFORMAT_CONTEXT_ANCHOR_IP_START = 0x7,
MLX5_REFORMAT_CONTEXT_ANCHOR_TCP_UDP_START = 0x9,
};
@@ -6747,6 +7203,8 @@ enum mlx5_reformat_ctx_type {
MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP = 0xa,
MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xb,
MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6 = 0xc,
+ MLX5_REFORMAT_TYPE_ADD_PSP_TUNNEL = 0xd,
+ MLX5_REFORMAT_TYPE_DEL_PSP_TUNNEL = 0xe,
MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf,
MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10,
MLX5_REFORMAT_TYPE_ADD_MACSEC = 0x11,
@@ -6873,6 +7331,7 @@ enum {
MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D,
MLX5_ACTION_IN_FIELD_OUT_EMD_47_32 = 0x6F,
MLX5_ACTION_IN_FIELD_OUT_EMD_31_0 = 0x70,
+ MLX5_ACTION_IN_FIELD_PSP_SYNDROME = 0x71,
};
struct mlx5_ifc_alloc_modify_header_context_out_bits {
@@ -7113,6 +7572,85 @@ struct mlx5_ifc_query_adapter_in_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_function_vhca_rid_info_reg_bits {
+ u8 host_number[0x8];
+ u8 host_pci_device_function[0x8];
+ u8 host_pci_bus[0x8];
+ u8 reserved_at_18[0x3];
+ u8 pci_bus_assigned[0x1];
+ u8 function_type[0x4];
+
+ u8 parent_pci_device_function[0x8];
+ u8 parent_pci_bus[0x8];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_delegated_function_vhca_rid_info_bits {
+ struct mlx5_ifc_function_vhca_rid_info_reg_bits function_vhca_rid_info;
+
+ u8 reserved_at_80[0x18];
+ u8 manage_profile[0x8];
+
+ u8 reserved_at_a0[0x60];
+};
+
+struct mlx5_ifc_query_delegated_vhca_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 functions_count[0x10];
+
+ u8 reserved_at_80[0x80];
+
+ struct mlx5_ifc_delegated_function_vhca_rid_info_bits
+ delegated_function_vhca_rid_info[];
+};
+
+struct mlx5_ifc_query_delegated_vhca_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_esw_vport_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 vport_num[0x10];
+};
+
+struct mlx5_ifc_create_esw_vport_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 managed_vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_qp_2rst_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -7157,6 +7695,30 @@ struct mlx5_ifc_qp_2err_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_trans_page_fault_info_bits {
+ u8 error[0x1];
+ u8 reserved_at_1[0x4];
+ u8 page_fault_type[0x3];
+ u8 wq_number[0x18];
+
+ u8 reserved_at_20[0x8];
+ u8 fault_token[0x18];
+};
+
+struct mlx5_ifc_mem_page_fault_info_bits {
+ u8 error[0x1];
+ u8 reserved_at_1[0xf];
+ u8 fault_token_47_32[0x10];
+
+ u8 fault_token_31_0[0x20];
+};
+
+union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits {
+ struct mlx5_ifc_trans_page_fault_info_bits trans_page_fault_info;
+ struct mlx5_ifc_mem_page_fault_info_bits mem_page_fault_info;
+ u8 reserved_at_0[0x40];
+};
+
struct mlx5_ifc_page_fault_resume_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -7173,13 +7735,8 @@ struct mlx5_ifc_page_fault_resume_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 error[0x1];
- u8 reserved_at_41[0x4];
- u8 page_fault_type[0x3];
- u8 wq_number[0x18];
-
- u8 reserved_at_60[0x8];
- u8 token[0x18];
+ union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits
+ page_fault_info;
};
struct mlx5_ifc_nop_out_bits {
@@ -7221,7 +7778,12 @@ struct mlx5_ifc_modify_vport_state_in_bits {
u8 reserved_at_41[0xf];
u8 vport_number[0x10];
- u8 reserved_at_60[0x18];
+ u8 reserved_at_60[0x10];
+ u8 ingress_connect[0x1];
+ u8 egress_connect[0x1];
+ u8 ingress_connect_valid[0x1];
+ u8 egress_connect_valid[0x1];
+ u8 reserved_at_74[0x4];
u8 admin_state[0x4];
u8 reserved_at_7c[0x4];
};
@@ -7688,7 +8250,7 @@ struct mlx5_ifc_mad_ifc_in_bits {
u8 op_mod[0x10];
u8 remote_lid[0x10];
- u8 reserved_at_50[0x8];
+ u8 plane_index[0x8];
u8 port[0x8];
u8 reserved_at_60[0x20];
@@ -8249,13 +8811,15 @@ struct mlx5_ifc_destroy_flow_table_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -8280,13 +8844,15 @@ struct mlx5_ifc_destroy_flow_group_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -8425,13 +8991,15 @@ struct mlx5_ifc_delete_fte_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -8863,7 +9431,9 @@ struct mlx5_ifc_create_qp_in_bits {
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_at_800[0x60];
+ u8 wq_umem_offset[0x40];
+
+ u8 wq_umem_id[0x20];
u8 wq_umem_valid[0x1];
u8 reserved_at_861[0x1f];
@@ -8929,7 +9499,8 @@ struct mlx5_ifc_create_mkey_in_bits {
u8 pg_access[0x1];
u8 mkey_umem_valid[0x1];
- u8 reserved_at_62[0x1e];
+ u8 data_direct[0x1];
+ u8 reserved_at_63[0x1d];
struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
@@ -8972,13 +9543,15 @@ struct mlx5_ifc_create_flow_table_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x20];
@@ -9017,7 +9590,8 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
@@ -9025,7 +9599,7 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 table_type[0x8];
u8 reserved_at_88[0x4];
u8 group_type[0x4];
- u8 reserved_at_90[0x10];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -9611,13 +10185,19 @@ struct mlx5_ifc_pude_reg_bits {
u8 reserved_at_20[0x60];
};
+enum {
+ MLX5_PTYS_CONNECTOR_TYPE_PORT_DA = 0x7,
+};
+
struct mlx5_ifc_ptys_reg_bits {
u8 reserved_at_0[0x1];
u8 an_disable_admin[0x1];
u8 an_disable_cap[0x1];
u8 reserved_at_3[0x5];
u8 local_port[0x8];
- u8 reserved_at_10[0xd];
+ u8 reserved_at_10[0x8];
+ u8 plane_ind[0x4];
+ u8 reserved_at_1c[0x1];
u8 proto_mask[0x3];
u8 an_status[0x4];
@@ -9645,7 +10225,8 @@ struct mlx5_ifc_ptys_reg_bits {
u8 ib_link_width_oper[0x10];
u8 ib_proto_oper[0x10];
- u8 reserved_at_160[0x1c];
+ u8 reserved_at_160[0x8];
+ u8 lane_rate_oper[0x14];
u8 connector_type[0x4];
u8 eth_proto_lp_advertise[0x20];
@@ -9832,7 +10413,21 @@ struct mlx5_ifc_pplm_reg_bits {
u8 fec_override_admin_200g_2x[0x10];
u8 fec_override_admin_100g_1x[0x10];
- u8 reserved_at_260[0x20];
+ u8 reserved_at_260[0x60];
+
+ u8 fec_override_cap_1600g_8x[0x10];
+ u8 fec_override_cap_800g_4x[0x10];
+
+ u8 fec_override_cap_400g_2x[0x10];
+ u8 fec_override_cap_200g_1x[0x10];
+
+ u8 fec_override_admin_1600g_8x[0x10];
+ u8 fec_override_admin_800g_4x[0x10];
+
+ u8 fec_override_admin_400g_2x[0x10];
+ u8 fec_override_admin_200g_1x[0x10];
+
+ u8 reserved_at_340[0x80];
};
struct mlx5_ifc_ppcnt_reg_bits {
@@ -9843,8 +10438,10 @@ struct mlx5_ifc_ppcnt_reg_bits {
u8 grp[0x6];
u8 clr[0x1];
- u8 reserved_at_21[0x1c];
- u8 prio_tc[0x3];
+ u8 reserved_at_21[0x13];
+ u8 plane_ind[0x4];
+ u8 reserved_at_38[0x3];
+ u8 prio_tc[0x5];
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
};
@@ -10073,10 +10670,19 @@ struct mlx5_ifc_pifr_reg_bits {
u8 port_filter_update_en[8][0x20];
};
+enum {
+ MLX5_BUF_OWNERSHIP_UNKNOWN = 0x0,
+ MLX5_BUF_OWNERSHIP_FW_OWNED = 0x1,
+ MLX5_BUF_OWNERSHIP_SW_OWNED = 0x2,
+};
+
struct mlx5_ifc_pfcc_reg_bits {
- u8 reserved_at_0[0x8];
+ u8 reserved_at_0[0x4];
+ u8 buf_ownership[0x2];
+ u8 reserved_at_6[0x2];
u8 local_port[0x8];
- u8 reserved_at_10[0xb];
+ u8 reserved_at_10[0xa];
+ u8 cable_length_mask[0x1];
u8 ppan_mask_n[0x1];
u8 minor_stall_mask[0x1];
u8 critical_stall_mask[0x1];
@@ -10105,7 +10711,10 @@ struct mlx5_ifc_pfcc_reg_bits {
u8 device_stall_minor_watermark[0x10];
u8 device_stall_critical_watermark[0x10];
- u8 reserved_at_a0[0x60];
+ u8 reserved_at_a0[0x18];
+ u8 cable_length[0x8];
+
+ u8 reserved_at_c0[0x40];
};
struct mlx5_ifc_pelc_reg_bits {
@@ -10204,9 +10813,17 @@ struct mlx5_ifc_mtutc_reg_bits {
};
struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x48];
+ u8 reserved_at_0[0x10];
+ u8 ppcnt_recovery_counters[0x1];
+ u8 reserved_at_11[0x7];
+ u8 cable_length[0x1];
+ u8 reserved_at_19[0x4];
+ u8 fec_200G_per_lane_in_pplm[0x1];
+ u8 reserved_at_1e[0x2a];
u8 fec_100G_per_lane_in_pplm[0x1];
- u8 reserved_at_49[0x1f];
+ u8 reserved_at_49[0xa];
+ u8 buffer_ownership[0x1];
+ u8 resereved_at_54[0x14];
u8 fec_50G_per_lane_in_pplm[0x1];
u8 reserved_at_69[0x4];
u8 rx_icrc_encapsulated_counter[0x1];
@@ -10227,7 +10844,9 @@ struct mlx5_ifc_pcam_regs_5000_to_507f_bits {
u8 port_access_reg_cap_mask_127_to_96[0x20];
u8 port_access_reg_cap_mask_95_to_64[0x20];
- u8 port_access_reg_cap_mask_63_to_36[0x1c];
+ u8 port_access_reg_cap_mask_63[0x1];
+ u8 pphcr[0x1];
+ u8 port_access_reg_cap_mask_61_to_36[0x1a];
u8 pplm[0x1];
u8 port_access_reg_cap_mask_34_to_32[0x3];
@@ -10308,9 +10927,9 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 mfrl[0x1];
u8 regs_39_to_32[0x8];
- u8 regs_31_to_10[0x16];
+ u8 regs_31_to_11[0x15];
u8 mtmp[0x1];
- u8 regs_8_to_0[0x9];
+ u8 regs_9_to_0[0xa];
};
struct mlx5_ifc_mcam_access_reg_bits1 {
@@ -10337,6 +10956,19 @@ struct mlx5_ifc_mcam_access_reg_bits2 {
u8 regs_31_to_0[0x20];
};
+struct mlx5_ifc_mcam_access_reg_bits3 {
+ u8 regs_127_to_96[0x20];
+
+ u8 regs_95_to_64[0x20];
+
+ u8 regs_63_to_32[0x20];
+
+ u8 regs_31_to_3[0x1d];
+ u8 mrtcq[0x1];
+ u8 mtctr[0x1];
+ u8 mtptm[0x1];
+};
+
struct mlx5_ifc_mcam_reg_bits {
u8 reserved_at_0[0x8];
u8 feature_group[0x8];
@@ -10349,6 +10981,7 @@ struct mlx5_ifc_mcam_reg_bits {
struct mlx5_ifc_mcam_access_reg_bits access_regs;
struct mlx5_ifc_mcam_access_reg_bits1 access_regs1;
struct mlx5_ifc_mcam_access_reg_bits2 access_regs2;
+ struct mlx5_ifc_mcam_access_reg_bits3 access_regs3;
u8 reserved_at_0[0x80];
} mng_access_reg_cap_mask;
@@ -10728,6 +11361,7 @@ enum {
MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR = 0x12,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_TRUST_LOCKDOWN_ERR = 0x13,
};
struct mlx5_ifc_initial_seg_bits {
@@ -10971,6 +11605,11 @@ struct mlx5_ifc_mcda_reg_bits {
};
enum {
+ MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE = 0,
+ MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET = 1,
+};
+
+enum {
MLX5_MFRL_REG_RESET_STATE_IDLE = 0,
MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1,
MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2,
@@ -10997,7 +11636,8 @@ struct mlx5_ifc_mfrl_reg_bits {
u8 pci_sync_for_fw_update_start[0x1];
u8 pci_sync_for_fw_update_resp[0x2];
u8 rst_type_sel[0x3];
- u8 reserved_at_28[0x4];
+ u8 pci_reset_req_method[0x3];
+ u8 reserved_at_2b[0x1];
u8 reset_state[0x4];
u8 reset_type[0x8];
u8 reset_level[0x8];
@@ -11102,6 +11742,56 @@ struct mlx5_ifc_mtmp_reg_bits {
u8 sensor_name_lo[0x20];
};
+struct mlx5_ifc_mtptm_reg_bits {
+ u8 reserved_at_0[0x10];
+ u8 psta[0x1];
+ u8 reserved_at_11[0xf];
+
+ u8 reserved_at_20[0x60];
+};
+
+enum {
+ MLX5_MTCTR_REQUEST_NOP = 0x0,
+ MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK = 0x1,
+ MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER = 0x2,
+ MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK = 0x3,
+};
+
+struct mlx5_ifc_mtctr_reg_bits {
+ u8 first_clock_timestamp_request[0x8];
+ u8 second_clock_timestamp_request[0x8];
+ u8 reserved_at_10[0x10];
+
+ u8 first_clock_valid[0x1];
+ u8 second_clock_valid[0x1];
+ u8 reserved_at_22[0x1e];
+
+ u8 first_clock_timestamp[0x40];
+ u8 second_clock_timestamp[0x40];
+};
+
+struct mlx5_ifc_bin_range_layout_bits {
+ u8 reserved_at_0[0xa];
+ u8 high_val[0x6];
+ u8 reserved_at_10[0xa];
+ u8 low_val[0x6];
+};
+
+struct mlx5_ifc_pphcr_reg_bits {
+ u8 active_hist_type[0x4];
+ u8 reserved_at_4[0x4];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x8];
+ u8 num_of_bins[0x8];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_bin_range_layout_bits bin_range[16];
+};
+
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@ -11166,6 +11856,9 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mrtc_reg_bits mrtc_reg;
struct mlx5_ifc_mtcap_reg_bits mtcap_reg;
struct mlx5_ifc_mtmp_reg_bits mtmp_reg;
+ struct mlx5_ifc_mtptm_reg_bits mtptm_reg;
+ struct mlx5_ifc_mtctr_reg_bits mtctr_reg;
+ struct mlx5_ifc_pphcr_reg_bits pphcr_reg;
u8 reserved_at_0[0x60e0];
};
@@ -11196,10 +11889,12 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
- u8 reserved_at_60[0x20];
+ u8 reserved_at_60[0x10];
+ u8 eswitch_owner_vhca_id[0x10];
u8 table_type[0x8];
u8 reserved_at_88[0x7];
@@ -11239,14 +11934,16 @@ struct mlx5_ifc_modify_flow_table_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x10];
u8 modify_field_select[0x10];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -11938,7 +12635,9 @@ struct mlx5_ifc_mtrc_ctrl_bits {
struct mlx5_ifc_host_params_context_bits {
u8 host_number[0x8];
- u8 reserved_at_8[0x7];
+ u8 reserved_at_8[0x5];
+ u8 host_pf_not_exist[0x1];
+ u8 reserved_at_14[0x1];
u8 host_pf_disabled[0x1];
u8 host_num_of_vfs[0x10];
@@ -12060,23 +12759,36 @@ struct mlx5_ifc_affiliated_event_header_bits {
};
enum {
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT_ULL(0xc),
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT_ULL(0x13),
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT_ULL(0x20),
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = BIT_ULL(0x24),
-};
-
-enum {
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc,
MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13,
MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20,
MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24,
MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27,
MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47,
+ MLX5_GENERAL_OBJECT_TYPES_RDMA_CTRL = 0x53,
+ MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT = 0x58,
MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS = 0xff15,
};
enum {
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_IPSEC),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_SAMPLER),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO),
+};
+
+enum {
+ MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_RDMA_CTRL - 0x40),
+ MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT - 0x40),
+};
+
+enum {
MLX5_IPSEC_OBJECT_ICV_LEN_16B,
};
@@ -12452,6 +13164,7 @@ enum {
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS = 0x1,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC = 0x2,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC = 0x4,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_PSP = 0x6,
};
struct mlx5_ifc_tls_static_params_bits {
@@ -12640,6 +13353,44 @@ struct mlx5_ifc_load_vhca_state_out_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_adv_rdma_cap_bits {
+ u8 rdma_transport_manager[0x1];
+ u8 rdma_transport_manager_other_eswitch[0x1];
+ u8 reserved_at_2[0x1e];
+
+ u8 rcx_type[0x8];
+ u8 reserved_at_28[0x2];
+ u8 ps_entry_log_max_value[0x6];
+ u8 reserved_at_30[0x6];
+ u8 qp_max_ps_num_entry[0xa];
+
+ u8 mp_max_num_queues[0x8];
+ u8 ps_user_context_max_log_size[0x8];
+ u8 message_based_qp_and_striding_wq[0x8];
+ u8 reserved_at_58[0x8];
+
+ u8 max_receive_send_message_size_stride[0x10];
+ u8 reserved_at_70[0x10];
+
+ u8 max_receive_send_message_size_byte[0x20];
+
+ u8 reserved_at_a0[0x160];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits rdma_transport_rx_flow_table_properties;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits rdma_transport_tx_flow_table_properties;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_rx_ft_field_support_2;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_tx_ft_field_support_2;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_rx_ft_field_bitmask_support_2;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_tx_ft_field_bitmask_support_2;
+
+ u8 reserved_at_800[0x3800];
+};
+
struct mlx5_ifc_adv_virtualization_cap_bits {
u8 reserved_at_0[0x3];
u8 pg_track_log_max_num[0x5];
@@ -12786,4 +13537,109 @@ struct mlx5_ifc_msees_reg_bits {
u8 reserved_at_80[0x180];
};
+struct mlx5_ifc_mrtcq_reg_bits {
+ u8 reserved_at_0[0x40];
+
+ u8 rt_clock_identity[0x40];
+
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_pcie_cong_event_obj_bits {
+ u8 modify_select_field[0x40];
+
+ u8 inbound_event_en[0x1];
+ u8 outbound_event_en[0x1];
+ u8 reserved_at_42[0x1e];
+
+ u8 reserved_at_60[0x1];
+ u8 inbound_cong_state[0x3];
+ u8 reserved_at_64[0x1];
+ u8 outbound_cong_state[0x3];
+ u8 reserved_at_68[0x18];
+
+ u8 inbound_cong_low_threshold[0x10];
+ u8 inbound_cong_high_threshold[0x10];
+
+ u8 outbound_cong_low_threshold[0x10];
+ u8 outbound_cong_high_threshold[0x10];
+
+ u8 reserved_at_e0[0x340];
+};
+
+struct mlx5_ifc_pcie_cong_event_cmd_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_pcie_cong_event_obj_bits cong_obj;
+};
+
+struct mlx5_ifc_pcie_cong_event_cmd_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits hdr;
+ struct mlx5_ifc_pcie_cong_event_obj_bits cong_obj;
+};
+
+enum mlx5e_pcie_cong_event_mod_field {
+ MLX5_PCIE_CONG_EVENT_MOD_EVENT_EN = BIT(0),
+ MLX5_PCIE_CONG_EVENT_MOD_THRESH = BIT(2),
+};
+
+struct mlx5_ifc_psp_rotate_key_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_psp_rotate_key_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum mlx5_psp_gen_spi_in_key_size {
+ MLX5_PSP_GEN_SPI_IN_KEY_SIZE_128 = 0x0,
+ MLX5_PSP_GEN_SPI_IN_KEY_SIZE_256 = 0x1,
+};
+
+struct mlx5_ifc_key_spi_bits {
+ u8 spi[0x20];
+
+ u8 reserved_at_20[0x60];
+
+ u8 key[8][0x20];
+};
+
+struct mlx5_ifc_psp_gen_spi_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 key_size[0x2];
+ u8 reserved_at_62[0xe];
+ u8 num_of_spi[0x10];
+};
+
+struct mlx5_ifc_psp_gen_spi_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 num_of_spi[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ struct mlx5_ifc_key_spi_bits key_spi[];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h
index 40371c916cf9..58dfa2ee7c83 100644
--- a/include/linux/mlx5/mlx5_ifc_vdpa.h
+++ b/include/linux/mlx5/mlx5_ifc_vdpa.h
@@ -148,7 +148,9 @@ enum {
MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS = (u64)1 << 6,
MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX = (u64)1 << 7,
MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX = (u64)1 << 8,
+ MLX5_VIRTQ_MODIFY_MASK_QUEUE_VIRTIO_VERSION = (u64)1 << 10,
MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY = (u64)1 << 11,
+ MLX5_VIRTQ_MODIFY_MASK_QUEUE_FEATURES = (u64)1 << 12,
MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY = (u64)1 << 14,
};
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 26092c78a985..1df9d9a57bbc 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -61,15 +61,6 @@ enum mlx5_an_status {
#define MLX5_EEPROM_PAGE_LENGTH 256
#define MLX5_EEPROM_HIGH_PAGE_LENGTH 128
-struct mlx5_module_eeprom_query_params {
- u16 size;
- u16 offset;
- u16 i2c_address;
- u32 page;
- u32 bank;
- u32 module_number;
-};
-
enum mlx5e_link_mode {
MLX5E_1000BASE_CX_SGMII = 0,
MLX5E_1000BASE_KX = 1,
@@ -115,9 +106,13 @@ enum mlx5e_ext_link_mode {
MLX5E_100GAUI_1_100GBASE_CR_KR = 11,
MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12,
MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13,
+ MLX5E_200GAUI_1_200GBASE_CR1_KR1 = 14,
MLX5E_400GAUI_8_400GBASE_CR8 = 15,
MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16,
+ MLX5E_400GAUI_2_400GBASE_CR2_KR2 = 17,
MLX5E_800GAUI_8_800GBASE_CR8_KR8 = 19,
+ MLX5E_800GAUI_4_800GBASE_CR4_KR4 = 20,
+ MLX5E_1600TAUI_8_1600TBASE_CR8_KR8 = 23,
MLX5E_EXT_LINK_MODES_NUMBER,
};
@@ -142,12 +137,6 @@ enum mlx5_ptys_width {
MLX5_PTYS_WIDTH_12X = 1 << 4,
};
-struct mlx5_port_eth_proto {
- u32 cap;
- u32 admin;
- u32 oper;
-};
-
#define MLX5E_PROT_MASK(link_mode) (1U << link_mode)
#define MLX5_GET_ETH_PROTO(reg, out, ext, field) \
(ext ? MLX5_GET(reg, out, ext_##field) : \
@@ -155,18 +144,12 @@ struct mlx5_port_eth_proto {
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
- int ptys_size, int proto_mask, u8 local_port);
+ int ptys_size, int proto_mask,
+ u8 local_port, u8 plane_index);
int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper,
- u16 *proto_oper, u8 local_port);
-void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
-int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status status);
-int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status *status);
-int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
-
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+ u16 *proto_oper, u8 local_port, u8 plane_index);
+
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
u8 port);
@@ -174,65 +157,4 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
u8 *vl_hw_cap, u8 local_port);
-int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
-int mlx5_query_port_pause(struct mlx5_core_dev *dev,
- u32 *rx_pause, u32 *tx_pause);
-
-int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
-int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
- u8 *pfc_en_rx);
-
-int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
- u16 stall_critical_watermark,
- u16 stall_minor_watermark);
-int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
- u16 *stall_critical_watermark, u16 *stall_minor_watermark);
-
-int mlx5_max_tc(struct mlx5_core_dev *mdev);
-
-int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
-int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
- u8 prio, u8 *tc);
-int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
-int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
- u8 tc, u8 *tc_group);
-int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
-int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
- u8 tc, u8 *bw_pct);
-int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
- u8 *max_bw_value,
- u8 *max_bw_unit);
-int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
- u8 *max_bw_value,
- u8 *max_bw_unit);
-int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
-int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
-
-int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
-int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
-int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
-void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
- bool *enabled);
-int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
- u16 offset, u16 size, u8 *data);
-int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
- struct mlx5_module_eeprom_query_params *params, u8 *data);
-
-int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
-int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
-
-int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
-int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
-int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
-int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
-
-int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
- struct mlx5_port_eth_proto *eproto);
-bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev);
-u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
- bool force_legacy);
-u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
- bool force_legacy);
-int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
-
#endif /* __MLX5_PORT_H__ */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index f0e55bf3ec8b..d67aedc6ea68 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -149,6 +149,7 @@ enum {
MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
MLX5_WQE_CTRL_SOLICITED = 1 << 1,
+ MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE = 1 << 5,
};
enum {
@@ -236,13 +237,11 @@ enum {
};
enum {
- MLX5_ETH_WQE_SVLAN = 1 << 0,
MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC = 1 << 26,
MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC = 1 << 27,
MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC = 3 << 26,
MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC = 1 << 28,
MLX5_ETH_WQE_INSERT_TRAILER = 1 << 30,
- MLX5_ETH_WQE_INSERT_VLAN = 1 << 15,
};
enum {
@@ -252,9 +251,15 @@ enum {
MLX5_ETH_WQE_SWP_OUTER_L4_UDP = 1 << 5,
};
+/* Metadata bits 0-7 are used by timestamping */
+/* Base shift for metadata bits used by IPsec and MACsec */
+#define MLX5_ETH_WQE_FT_META_SHIFT 8
+
enum {
- MLX5_ETH_WQE_FT_META_IPSEC = BIT(0),
- MLX5_ETH_WQE_FT_META_MACSEC = BIT(1),
+ MLX5_ETH_WQE_FT_META_IPSEC = BIT(0) << MLX5_ETH_WQE_FT_META_SHIFT,
+ MLX5_ETH_WQE_FT_META_MACSEC = BIT(1) << MLX5_ETH_WQE_FT_META_SHIFT,
+ MLX5_ETH_WQE_FT_META_MACSEC_FS_ID_MASK =
+ GENMASK(5, 2) << MLX5_ETH_WQE_FT_META_SHIFT,
};
struct mlx5_wqe_eth_seg {
@@ -274,10 +279,6 @@ struct mlx5_wqe_eth_seg {
DECLARE_FLEX_ARRAY(u8, data);
};
} inline_hdr;
- struct {
- __be16 type;
- __be16 vlan_tci;
- } insert;
__be32 trailer;
};
};
@@ -576,9 +577,12 @@ static inline const char *mlx5_qp_state_str(int state)
static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev)
{
- return !MLX5_CAP_ROCE(dev, qp_ts_format) ?
- MLX5_TIMESTAMP_FORMAT_FREE_RUNNING :
- MLX5_TIMESTAMP_FORMAT_DEFAULT;
+ u8 supported_ts_cap = mlx5_get_roce_state(dev) ?
+ MLX5_CAP_ROCE(dev, qp_ts_format) :
+ MLX5_CAP_GEN(dev, sq_ts_format);
+
+ return supported_ts_cap ? MLX5_TIMESTAMP_FORMAT_DEFAULT :
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
}
#endif /* MLX5_QP_H */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index c36cc6d82926..f876bfc0669c 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -73,7 +73,8 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group);
-int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u16 vport, bool other_vport, u64 *node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
u16 vport, u64 node_guid);
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
@@ -135,4 +136,6 @@ int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev);
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out,
u16 opmod);
+int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id);
+
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9849dfda44d4..7a1819c20643 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -12,6 +12,7 @@
#include <linux/rbtree.h>
#include <linux/atomic.h>
#include <linux/debug_locks.h>
+#include <linux/compiler.h>
#include <linux/mm_types.h>
#include <linux/mmap_lock.h>
#include <linux/range.h>
@@ -31,6 +32,10 @@
#include <linux/kasan.h>
#include <linux/memremap.h>
#include <linux/slab.h>
+#include <linux/cacheinfo.h>
+#include <linux/rcuwait.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
struct mempolicy;
struct anon_vma;
@@ -39,22 +44,10 @@ struct user_struct;
struct pt_regs;
struct folio_batch;
-extern int sysctl_page_lock_unfairness;
-
+void arch_mm_preinit(void);
void mm_core_init(void);
void init_mm_internals(void);
-#ifndef CONFIG_NUMA /* Don't use mapnrs, do it properly */
-extern unsigned long max_mapnr;
-
-static inline void set_max_mapnr(unsigned long limit)
-{
- max_mapnr = limit;
-}
-#else
-static inline void set_max_mapnr(unsigned long limit) { }
-#endif
-
extern atomic_long_t _totalram_pages;
static inline unsigned long totalram_pages(void)
{
@@ -77,8 +70,15 @@ static inline void totalram_pages_add(long count)
}
extern void * high_memory;
-extern int page_cluster;
-extern const int page_cluster_max;
+
+/*
+ * Convert between pages and MB
+ * 20 is the shift for 1MB (2^20 = 1MB)
+ * PAGE_SHIFT is the shift for page size (e.g., 12 for 4KB pages)
+ * So (20 - PAGE_SHIFT) converts between pages and MB
+ */
+#define PAGES_TO_MB(pages) ((pages) >> (20 - PAGE_SHIFT))
+#define MB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
#ifdef CONFIG_SYSCTL
extern int sysctl_legacy_va_layout;
@@ -97,6 +97,16 @@ extern const int mmap_rnd_compat_bits_max;
extern int mmap_rnd_compat_bits __read_mostly;
#endif
+#ifndef DIRECT_MAP_PHYSMEM_END
+# ifdef MAX_PHYSMEM_BITS
+# define DIRECT_MAP_PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1)
+# else
+# define DIRECT_MAP_PHYSMEM_END (((phys_addr_t)-1)&~(1ULL<<63))
+# endif
+#endif
+
+#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
+
#include <asm/page.h>
#include <asm/processor.h>
@@ -200,23 +210,14 @@ extern int sysctl_max_map_count;
extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;
-extern int sysctl_overcommit_memory;
-extern int sysctl_overcommit_ratio;
-extern unsigned long sysctl_overcommit_kbytes;
-
-int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
-#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
-#define folio_page_idx(folio, p) (page_to_pfn(p) - folio_pfn(folio))
+bool page_range_contiguous(const struct page *page, unsigned long nr_pages);
#else
-#define nth_page(page,n) ((page) + (n))
-#define folio_page_idx(folio, p) ((p) - &(folio)->page)
+static inline bool page_range_contiguous(const struct page *page,
+ unsigned long nr_pages)
+{
+ return true;
+}
#endif
/* to align the pointer to the (next) page boundary */
@@ -228,6 +229,20 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
+/**
+ * folio_page_idx - Return the number of a page in a folio.
+ * @folio: The folio.
+ * @page: The folio page.
+ *
+ * This function expects that the page is actually part of the folio.
+ * The returned number is relative to the start of the folio.
+ */
+static inline unsigned long folio_page_idx(const struct folio *folio,
+ const struct page *page)
+{
+ return page - &folio->page;
+}
+
static inline struct folio *lru_to_folio(struct list_head *head)
{
return list_entry((head)->prev, struct folio, lru);
@@ -248,8 +263,6 @@ void setup_initial_init_mm(void *start_code, void *end_code,
struct vm_area_struct *vm_area_alloc(struct mm_struct *);
struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
void vm_area_free(struct vm_area_struct *);
-/* Use only if VMA has no other users */
-void __vm_area_free(struct vm_area_struct *vma);
#ifndef CONFIG_MMU
extern struct rb_root nommu_region_tree;
@@ -262,148 +275,235 @@ extern unsigned int kobjsize(const void *objp);
* vm_flags in vm_area_struct, see mm_types.h.
* When changing, update also include/trace/events/mmflags.h
*/
-#define VM_NONE 0x00000000
-#define VM_READ 0x00000001 /* currently active flags */
-#define VM_WRITE 0x00000002
-#define VM_EXEC 0x00000004
-#define VM_SHARED 0x00000008
+#define VM_NONE 0x00000000
-/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
-#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
-#define VM_MAYWRITE 0x00000020
-#define VM_MAYEXEC 0x00000040
-#define VM_MAYSHARE 0x00000080
+/**
+ * typedef vma_flag_t - specifies an individual VMA flag by bit number.
+ *
+ * This value is made type safe by sparse to avoid passing invalid flag values
+ * around.
+ */
+typedef int __bitwise vma_flag_t;
-#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
+#define DECLARE_VMA_BIT(name, bitnum) \
+ VMA_ ## name ## _BIT = ((__force vma_flag_t)bitnum)
+#define DECLARE_VMA_BIT_ALIAS(name, aliased) \
+ VMA_ ## name ## _BIT = (VMA_ ## aliased ## _BIT)
+enum {
+ DECLARE_VMA_BIT(READ, 0),
+ DECLARE_VMA_BIT(WRITE, 1),
+ DECLARE_VMA_BIT(EXEC, 2),
+ DECLARE_VMA_BIT(SHARED, 3),
+ /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
+ DECLARE_VMA_BIT(MAYREAD, 4), /* limits for mprotect() etc. */
+ DECLARE_VMA_BIT(MAYWRITE, 5),
+ DECLARE_VMA_BIT(MAYEXEC, 6),
+ DECLARE_VMA_BIT(MAYSHARE, 7),
+ DECLARE_VMA_BIT(GROWSDOWN, 8), /* general info on the segment */
#ifdef CONFIG_MMU
-#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */
-#else /* CONFIG_MMU */
-#define VM_MAYOVERLAY 0x00000200 /* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
-#define VM_UFFD_MISSING 0
+ DECLARE_VMA_BIT(UFFD_MISSING, 9),/* missing pages tracking */
+#else
+ /* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
+ DECLARE_VMA_BIT(MAYOVERLAY, 9),
#endif /* CONFIG_MMU */
-#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
-#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */
-
-#define VM_LOCKED 0x00002000
-#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
-
- /* Used by sys_madvise() */
-#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
-#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
-
-#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
-#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
-#define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */
-#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
-#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
-#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
-#define VM_SYNC 0x00800000 /* Synchronous page faults */
-#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
-#define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */
-#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
-
+ /* Page-ranges managed without "struct page", just pure PFN */
+ DECLARE_VMA_BIT(PFNMAP, 10),
+ DECLARE_VMA_BIT(MAYBE_GUARD, 11),
+ DECLARE_VMA_BIT(UFFD_WP, 12), /* wrprotect pages tracking */
+ DECLARE_VMA_BIT(LOCKED, 13),
+ DECLARE_VMA_BIT(IO, 14), /* Memory mapped I/O or similar */
+ DECLARE_VMA_BIT(SEQ_READ, 15), /* App will access data sequentially */
+ DECLARE_VMA_BIT(RAND_READ, 16), /* App will not benefit from clustered reads */
+ DECLARE_VMA_BIT(DONTCOPY, 17), /* Do not copy this vma on fork */
+ DECLARE_VMA_BIT(DONTEXPAND, 18),/* Cannot expand with mremap() */
+ DECLARE_VMA_BIT(LOCKONFAULT, 19),/* Lock pages covered when faulted in */
+ DECLARE_VMA_BIT(ACCOUNT, 20), /* Is a VM accounted object */
+ DECLARE_VMA_BIT(NORESERVE, 21), /* should the VM suppress accounting */
+ DECLARE_VMA_BIT(HUGETLB, 22), /* Huge TLB Page VM */
+ DECLARE_VMA_BIT(SYNC, 23), /* Synchronous page faults */
+ DECLARE_VMA_BIT(ARCH_1, 24), /* Architecture-specific flag */
+ DECLARE_VMA_BIT(WIPEONFORK, 25),/* Wipe VMA contents in child. */
+ DECLARE_VMA_BIT(DONTDUMP, 26), /* Do not include in the core dump */
+ DECLARE_VMA_BIT(SOFTDIRTY, 27), /* NOT soft dirty clean area */
+ DECLARE_VMA_BIT(MIXEDMAP, 28), /* Can contain struct page and pure PFN pages */
+ DECLARE_VMA_BIT(HUGEPAGE, 29), /* MADV_HUGEPAGE marked this vma */
+ DECLARE_VMA_BIT(NOHUGEPAGE, 30),/* MADV_NOHUGEPAGE marked this vma */
+ DECLARE_VMA_BIT(MERGEABLE, 31), /* KSM may merge identical pages */
+ /* These bits are reused, we define specific uses below. */
+ DECLARE_VMA_BIT(HIGH_ARCH_0, 32),
+ DECLARE_VMA_BIT(HIGH_ARCH_1, 33),
+ DECLARE_VMA_BIT(HIGH_ARCH_2, 34),
+ DECLARE_VMA_BIT(HIGH_ARCH_3, 35),
+ DECLARE_VMA_BIT(HIGH_ARCH_4, 36),
+ DECLARE_VMA_BIT(HIGH_ARCH_5, 37),
+ DECLARE_VMA_BIT(HIGH_ARCH_6, 38),
+ /*
+ * This flag is used to connect VFIO to arch specific KVM code. It
+ * indicates that the memory under this VMA is safe for use with any
+ * non-cachable memory type inside KVM. Some VFIO devices, on some
+ * platforms, are thought to be unsafe and can cause machine crashes
+ * if KVM does not lock down the memory type.
+ */
+ DECLARE_VMA_BIT(ALLOW_ANY_UNCACHED, 39),
+#ifdef CONFIG_PPC32
+ DECLARE_VMA_BIT_ALIAS(DROPPABLE, ARCH_1),
+#else
+ DECLARE_VMA_BIT(DROPPABLE, 40),
+#endif
+ DECLARE_VMA_BIT(UFFD_MINOR, 41),
+ DECLARE_VMA_BIT(SEALED, 42),
+ /* Flags that reuse flags above. */
+ DECLARE_VMA_BIT_ALIAS(PKEY_BIT0, HIGH_ARCH_0),
+ DECLARE_VMA_BIT_ALIAS(PKEY_BIT1, HIGH_ARCH_1),
+ DECLARE_VMA_BIT_ALIAS(PKEY_BIT2, HIGH_ARCH_2),
+ DECLARE_VMA_BIT_ALIAS(PKEY_BIT3, HIGH_ARCH_3),
+ DECLARE_VMA_BIT_ALIAS(PKEY_BIT4, HIGH_ARCH_4),
+#if defined(CONFIG_X86_USER_SHADOW_STACK)
+ /*
+ * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
+ * support core mm.
+ *
+ * These VMAs will get a single end guard page. This helps userspace
+ * protect itself from attacks. A single page is enough for current
+ * shadow stack archs (x86). See the comments near alloc_shstk() in
+ * arch/x86/kernel/shstk.c for more details on the guard size.
+ */
+ DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_5),
+#elif defined(CONFIG_ARM64_GCS)
+ /*
+ * arm64's Guarded Control Stack implements similar functionality and
+ * has similar constraints to shadow stacks.
+ */
+ DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_6),
+#endif
+ DECLARE_VMA_BIT_ALIAS(SAO, ARCH_1), /* Strong Access Ordering (powerpc) */
+ DECLARE_VMA_BIT_ALIAS(GROWSUP, ARCH_1), /* parisc */
+ DECLARE_VMA_BIT_ALIAS(SPARC_ADI, ARCH_1), /* sparc64 */
+ DECLARE_VMA_BIT_ALIAS(ARM64_BTI, ARCH_1), /* arm64 */
+ DECLARE_VMA_BIT_ALIAS(ARCH_CLEAR, ARCH_1), /* sparc64, arm64 */
+ DECLARE_VMA_BIT_ALIAS(MAPPED_COPY, ARCH_1), /* !CONFIG_MMU */
+ DECLARE_VMA_BIT_ALIAS(MTE, HIGH_ARCH_4), /* arm64 */
+ DECLARE_VMA_BIT_ALIAS(MTE_ALLOWED, HIGH_ARCH_5),/* arm64 */
+#ifdef CONFIG_STACK_GROWSUP
+ DECLARE_VMA_BIT_ALIAS(STACK, GROWSUP),
+ DECLARE_VMA_BIT_ALIAS(STACK_EARLY, GROWSDOWN),
+#else
+ DECLARE_VMA_BIT_ALIAS(STACK, GROWSDOWN),
+#endif
+};
+#undef DECLARE_VMA_BIT
+#undef DECLARE_VMA_BIT_ALIAS
+
+#define INIT_VM_FLAG(name) BIT((__force int) VMA_ ## name ## _BIT)
+#define VM_READ INIT_VM_FLAG(READ)
+#define VM_WRITE INIT_VM_FLAG(WRITE)
+#define VM_EXEC INIT_VM_FLAG(EXEC)
+#define VM_SHARED INIT_VM_FLAG(SHARED)
+#define VM_MAYREAD INIT_VM_FLAG(MAYREAD)
+#define VM_MAYWRITE INIT_VM_FLAG(MAYWRITE)
+#define VM_MAYEXEC INIT_VM_FLAG(MAYEXEC)
+#define VM_MAYSHARE INIT_VM_FLAG(MAYSHARE)
+#define VM_GROWSDOWN INIT_VM_FLAG(GROWSDOWN)
+#ifdef CONFIG_MMU
+#define VM_UFFD_MISSING INIT_VM_FLAG(UFFD_MISSING)
+#else
+#define VM_UFFD_MISSING VM_NONE
+#define VM_MAYOVERLAY INIT_VM_FLAG(MAYOVERLAY)
+#endif
+#define VM_PFNMAP INIT_VM_FLAG(PFNMAP)
+#define VM_MAYBE_GUARD INIT_VM_FLAG(MAYBE_GUARD)
+#define VM_UFFD_WP INIT_VM_FLAG(UFFD_WP)
+#define VM_LOCKED INIT_VM_FLAG(LOCKED)
+#define VM_IO INIT_VM_FLAG(IO)
+#define VM_SEQ_READ INIT_VM_FLAG(SEQ_READ)
+#define VM_RAND_READ INIT_VM_FLAG(RAND_READ)
+#define VM_DONTCOPY INIT_VM_FLAG(DONTCOPY)
+#define VM_DONTEXPAND INIT_VM_FLAG(DONTEXPAND)
+#define VM_LOCKONFAULT INIT_VM_FLAG(LOCKONFAULT)
+#define VM_ACCOUNT INIT_VM_FLAG(ACCOUNT)
+#define VM_NORESERVE INIT_VM_FLAG(NORESERVE)
+#define VM_HUGETLB INIT_VM_FLAG(HUGETLB)
+#define VM_SYNC INIT_VM_FLAG(SYNC)
+#define VM_ARCH_1 INIT_VM_FLAG(ARCH_1)
+#define VM_WIPEONFORK INIT_VM_FLAG(WIPEONFORK)
+#define VM_DONTDUMP INIT_VM_FLAG(DONTDUMP)
#ifdef CONFIG_MEM_SOFT_DIRTY
-# define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */
+#define VM_SOFTDIRTY INIT_VM_FLAG(SOFTDIRTY)
#else
-# define VM_SOFTDIRTY 0
+#define VM_SOFTDIRTY VM_NONE
#endif
-
-#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
-#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
-#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
-#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
-
-#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
-#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */
-#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */
-#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
-#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
-#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */
-#define VM_HIGH_ARCH_BIT_5 37 /* bit only usable on 64-bit architectures */
-#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
-#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
-#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
-#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
-#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
-#define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5)
-#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
-
-#ifdef CONFIG_ARCH_HAS_PKEYS
-# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
-# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */
-# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */
-# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
-# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
-#ifdef CONFIG_PPC
-# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
+#define VM_MIXEDMAP INIT_VM_FLAG(MIXEDMAP)
+#define VM_HUGEPAGE INIT_VM_FLAG(HUGEPAGE)
+#define VM_NOHUGEPAGE INIT_VM_FLAG(NOHUGEPAGE)
+#define VM_MERGEABLE INIT_VM_FLAG(MERGEABLE)
+#define VM_STACK INIT_VM_FLAG(STACK)
+#ifdef CONFIG_STACK_GROWS_UP
+#define VM_STACK_EARLY INIT_VM_FLAG(STACK_EARLY)
#else
-# define VM_PKEY_BIT4 0
+#define VM_STACK_EARLY VM_NONE
#endif
+#ifdef CONFIG_ARCH_HAS_PKEYS
+#define VM_PKEY_SHIFT ((__force int)VMA_HIGH_ARCH_0_BIT)
+/* Despite the naming, these are FLAGS not bits. */
+#define VM_PKEY_BIT0 INIT_VM_FLAG(PKEY_BIT0)
+#define VM_PKEY_BIT1 INIT_VM_FLAG(PKEY_BIT1)
+#define VM_PKEY_BIT2 INIT_VM_FLAG(PKEY_BIT2)
+#if CONFIG_ARCH_PKEY_BITS > 3
+#define VM_PKEY_BIT3 INIT_VM_FLAG(PKEY_BIT3)
+#else
+#define VM_PKEY_BIT3 VM_NONE
+#endif /* CONFIG_ARCH_PKEY_BITS > 3 */
+#if CONFIG_ARCH_PKEY_BITS > 4
+#define VM_PKEY_BIT4 INIT_VM_FLAG(PKEY_BIT4)
+#else
+#define VM_PKEY_BIT4 VM_NONE
+#endif /* CONFIG_ARCH_PKEY_BITS > 4 */
#endif /* CONFIG_ARCH_HAS_PKEYS */
-
-#ifdef CONFIG_X86_USER_SHADOW_STACK
-/*
- * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
- * support core mm.
- *
- * These VMAs will get a single end guard page. This helps userspace protect
- * itself from attacks. A single page is enough for current shadow stack archs
- * (x86). See the comments near alloc_shstk() in arch/x86/kernel/shstk.c
- * for more details on the guard size.
- */
-# define VM_SHADOW_STACK VM_HIGH_ARCH_5
+#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS)
+#define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK)
#else
-# define VM_SHADOW_STACK VM_NONE
+#define VM_SHADOW_STACK VM_NONE
#endif
-
-#if defined(CONFIG_X86)
-# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
-#elif defined(CONFIG_PPC)
-# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
+#if defined(CONFIG_PPC64)
+#define VM_SAO INIT_VM_FLAG(SAO)
#elif defined(CONFIG_PARISC)
-# define VM_GROWSUP VM_ARCH_1
+#define VM_GROWSUP INIT_VM_FLAG(GROWSUP)
#elif defined(CONFIG_SPARC64)
-# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */
-# define VM_ARCH_CLEAR VM_SPARC_ADI
+#define VM_SPARC_ADI INIT_VM_FLAG(SPARC_ADI)
+#define VM_ARCH_CLEAR INIT_VM_FLAG(ARCH_CLEAR)
#elif defined(CONFIG_ARM64)
-# define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */
-# define VM_ARCH_CLEAR VM_ARM64_BTI
+#define VM_ARM64_BTI INIT_VM_FLAG(ARM64_BTI)
+#define VM_ARCH_CLEAR INIT_VM_FLAG(ARCH_CLEAR)
#elif !defined(CONFIG_MMU)
-# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
-#endif
-
-#if defined(CONFIG_ARM64_MTE)
-# define VM_MTE VM_HIGH_ARCH_0 /* Use Tagged memory for access control */
-# define VM_MTE_ALLOWED VM_HIGH_ARCH_1 /* Tagged memory permitted */
-#else
-# define VM_MTE VM_NONE
-# define VM_MTE_ALLOWED VM_NONE
+#define VM_MAPPED_COPY INIT_VM_FLAG(MAPPED_COPY)
#endif
-
#ifndef VM_GROWSUP
-# define VM_GROWSUP VM_NONE
+#define VM_GROWSUP VM_NONE
+#endif
+#ifdef CONFIG_ARM64_MTE
+#define VM_MTE INIT_VM_FLAG(MTE)
+#define VM_MTE_ALLOWED INIT_VM_FLAG(MTE_ALLOWED)
+#else
+#define VM_MTE VM_NONE
+#define VM_MTE_ALLOWED VM_NONE
#endif
-
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
-# define VM_UFFD_MINOR_BIT 38
-# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */
-#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
-# define VM_UFFD_MINOR VM_NONE
-#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
-
-/*
- * This flag is used to connect VFIO to arch specific KVM code. It
- * indicates that the memory under this VMA is safe for use with any
- * non-cachable memory type inside KVM. Some VFIO devices, on some
- * platforms, are thought to be unsafe and can cause machine crashes
- * if KVM does not lock down the memory type.
- */
+#define VM_UFFD_MINOR INIT_VM_FLAG(UFFD_MINOR)
+#else
+#define VM_UFFD_MINOR VM_NONE
+#endif
#ifdef CONFIG_64BIT
-#define VM_ALLOW_ANY_UNCACHED_BIT 39
-#define VM_ALLOW_ANY_UNCACHED BIT(VM_ALLOW_ANY_UNCACHED_BIT)
+#define VM_ALLOW_ANY_UNCACHED INIT_VM_FLAG(ALLOW_ANY_UNCACHED)
+#define VM_SEALED INIT_VM_FLAG(SEALED)
#else
-#define VM_ALLOW_ANY_UNCACHED VM_NONE
+#define VM_ALLOW_ANY_UNCACHED VM_NONE
+#define VM_SEALED VM_NONE
+#endif
+#if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
+#define VM_DROPPABLE INIT_VM_FLAG(DROPPABLE)
+#else
+#define VM_DROPPABLE VM_NONE
#endif
/* Bits set in the VMA until the stack is in its final location */
@@ -429,12 +529,10 @@ extern unsigned int kobjsize(const void *objp);
#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
-#ifdef CONFIG_STACK_GROWSUP
-#define VM_STACK VM_GROWSUP
-#define VM_STACK_EARLY VM_GROWSDOWN
+#ifdef CONFIG_MSEAL_SYSTEM_MAPPINGS
+#define VM_SEALED_SYSMAP VM_SEALED
#else
-#define VM_STACK VM_GROWSDOWN
-#define VM_STACK_EARLY 0
+#define VM_SEALED_SYSMAP VM_NONE
#endif
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
@@ -442,12 +540,26 @@ extern unsigned int kobjsize(const void *objp);
/* VMA basic access permission flags */
#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
-
/*
* Special vmas that are non-mergable, non-mlock()able.
*/
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
+/*
+ * Physically remapped pages are special. Tell the
+ * rest of the world about it:
+ * VM_IO tells people not to look at these pages
+ * (accesses can have side effects).
+ * VM_PFNMAP tells the core MM that the base pages are just
+ * raw PFN mappings, and do not have a "struct page" associated
+ * with them.
+ * VM_DONTEXPAND
+ * Disable vma merging and expanding with mremap().
+ * VM_DONTDUMP
+ * Omit vma from core dump, even when VM_IO turned off.
+ */
+#define VM_REMAP_FLAGS (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+
/* This mask prevents VMA from being scanned with khugepaged */
#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
@@ -457,13 +569,69 @@ extern unsigned int kobjsize(const void *objp);
/* This mask represents all the VMA flag bits used by mlock */
#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
+/* These flags can be updated atomically via VMA/mmap read lock. */
+#define VM_ATOMIC_SET_ALLOWED VM_MAYBE_GUARD
+
/* Arch-specific flags to clear when updating VM flags on protection change */
#ifndef VM_ARCH_CLEAR
-# define VM_ARCH_CLEAR VM_NONE
+#define VM_ARCH_CLEAR VM_NONE
#endif
#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
/*
+ * Flags which should be 'sticky' on merge - that is, flags which, when one VMA
+ * possesses it but the other does not, the merged VMA should nonetheless have
+ * applied to it:
+ *
+ * VM_SOFTDIRTY - if a VMA is marked soft-dirty, that is has not had its
+ * references cleared via /proc/$pid/clear_refs, any merged VMA
+ * should be considered soft-dirty also as it operates at a VMA
+ * granularity.
+ *
+ * VM_MAYBE_GUARD - If a VMA may have guard regions in place it implies that
+ * mapped page tables may contain metadata not described by the
+ * VMA and thus any merged VMA may also contain this metadata,
+ * and thus we must make this flag sticky.
+ */
+#define VM_STICKY (VM_SOFTDIRTY | VM_MAYBE_GUARD)
+
+/*
+ * VMA flags we ignore for the purposes of merge, i.e. one VMA possessing one
+ * of these flags and the other not does not preclude a merge.
+ *
+ * VM_STICKY - When merging VMAs, VMA flags must match, unless they are
+ * 'sticky'. If any sticky flags exist in either VMA, we simply
+ * set all of them on the merged VMA.
+ */
+#define VM_IGNORE_MERGE VM_STICKY
+
+/*
+ * Flags which should result in page tables being copied on fork. These are
+ * flags which indicate that the VMA maps page tables which cannot be
+ * reconsistuted upon page fault, so necessitate page table copying upon
+ *
+ * VM_PFNMAP / VM_MIXEDMAP - These contain kernel-mapped data which cannot be
+ * reasonably reconstructed on page fault.
+ *
+ * VM_UFFD_WP - Encodes metadata about an installed uffd
+ * write protect handler, which cannot be
+ * reconstructed on page fault.
+ *
+ * We always copy pgtables when dst_vma has uffd-wp
+ * enabled even if it's file-backed
+ * (e.g. shmem). Because when uffd-wp is enabled,
+ * pgtable contains uffd-wp protection information,
+ * that's something we can't retrieve from page cache,
+ * and skip copying will lose those info.
+ *
+ * VM_MAYBE_GUARD - Could contain page guard region markers which
+ * by design are a property of the page tables
+ * only and thus cannot be reconstructed on page
+ * fault.
+ */
+#define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD)
+
+/*
* mapping from the currently active vm_flags protection bits (the
* low four bits) to a page protection mask..
*/
@@ -634,13 +802,21 @@ struct vm_operations_struct {
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr, pgoff_t *ilx);
#endif
+#ifdef CONFIG_FIND_NORMAL_PAGE
/*
- * Called by vm_normal_page() for special PTEs to find the
- * page for @addr. This is useful if the default behavior
- * (using pte_page()) would not find the correct page.
+ * Called by vm_normal_page() for special PTEs in @vma at @addr. This
+ * allows for returning a "normal" page from vm_normal_page() even
+ * though the PTE indicates that the "struct page" either does not exist
+ * or should not be touched: "special".
+ *
+ * Do not add new users: this really only works when a "normal" page
+ * was mapped, but then the PTE got changed to something weird (+
+ * marked special) that would not make pte_pfn() identify the originally
+ * inserted page.
*/
- struct page *(*find_special_page)(struct vm_area_struct *vma,
- unsigned long addr);
+ struct page *(*find_normal_page)(struct vm_area_struct *vma,
+ unsigned long addr);
+#endif /* CONFIG_FIND_NORMAL_PAGE */
};
#ifdef CONFIG_NUMA_BALANCING
@@ -657,109 +833,11 @@ static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
#endif /* CONFIG_NUMA_BALANCING */
-#ifdef CONFIG_PER_VMA_LOCK
/*
- * Try to read-lock a vma. The function is allowed to occasionally yield false
- * locked result to avoid performance overhead, in which case we fall back to
- * using mmap_lock. The function should never yield false unlocked result.
+ * These must be here rather than mmap_lock.h as dependent on vm_fault type,
+ * declared in this header.
*/
-static inline bool vma_start_read(struct vm_area_struct *vma)
-{
- /*
- * Check before locking. A race might cause false locked result.
- * We can use READ_ONCE() for the mm_lock_seq here, and don't need
- * ACQUIRE semantics, because this is just a lockless check whose result
- * we don't rely on for anything - the mm_lock_seq read against which we
- * need ordering is below.
- */
- if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq))
- return false;
-
- if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0))
- return false;
-
- /*
- * Overflow might produce false locked result.
- * False unlocked result is impossible because we modify and check
- * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq
- * modification invalidates all existing locks.
- *
- * We must use ACQUIRE semantics for the mm_lock_seq so that if we are
- * racing with vma_end_write_all(), we only start reading from the VMA
- * after it has been unlocked.
- * This pairs with RELEASE semantics in vma_end_write_all().
- */
- if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) {
- up_read(&vma->vm_lock->lock);
- return false;
- }
- return true;
-}
-
-static inline void vma_end_read(struct vm_area_struct *vma)
-{
- rcu_read_lock(); /* keeps vma alive till the end of up_read */
- up_read(&vma->vm_lock->lock);
- rcu_read_unlock();
-}
-
-/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
-static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
-{
- mmap_assert_write_locked(vma->vm_mm);
-
- /*
- * current task is holding mmap_write_lock, both vma->vm_lock_seq and
- * mm->mm_lock_seq can't be concurrently modified.
- */
- *mm_lock_seq = vma->vm_mm->mm_lock_seq;
- return (vma->vm_lock_seq == *mm_lock_seq);
-}
-
-/*
- * Begin writing to a VMA.
- * Exclude concurrent readers under the per-VMA lock until the currently
- * write-locked mmap_lock is dropped or downgraded.
- */
-static inline void vma_start_write(struct vm_area_struct *vma)
-{
- int mm_lock_seq;
-
- if (__is_vma_write_locked(vma, &mm_lock_seq))
- return;
-
- down_write(&vma->vm_lock->lock);
- /*
- * We should use WRITE_ONCE() here because we can have concurrent reads
- * from the early lockless pessimistic check in vma_start_read().
- * We don't really care about the correctness of that early check, but
- * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy.
- */
- WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
- up_write(&vma->vm_lock->lock);
-}
-
-static inline void vma_assert_write_locked(struct vm_area_struct *vma)
-{
- int mm_lock_seq;
-
- VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
-}
-
-static inline void vma_assert_locked(struct vm_area_struct *vma)
-{
- if (!rwsem_is_locked(&vma->vm_lock->lock))
- vma_assert_write_locked(vma);
-}
-
-static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
-{
- /* When detaching vma should be write-locked */
- if (detached)
- vma_assert_write_locked(vma);
- vma->detached = detached;
-}
-
+#ifdef CONFIG_PER_VMA_LOCK
static inline void release_fault_lock(struct vm_fault *vmf)
{
if (vmf->flags & FAULT_FLAG_VMA_LOCK)
@@ -768,72 +846,73 @@ static inline void release_fault_lock(struct vm_fault *vmf)
mmap_read_unlock(vmf->vma->vm_mm);
}
-static inline void assert_fault_locked(struct vm_fault *vmf)
+static inline void assert_fault_locked(const struct vm_fault *vmf)
{
if (vmf->flags & FAULT_FLAG_VMA_LOCK)
vma_assert_locked(vmf->vma);
else
mmap_assert_locked(vmf->vma->vm_mm);
}
+#else
+static inline void release_fault_lock(struct vm_fault *vmf)
+{
+ mmap_read_unlock(vmf->vma->vm_mm);
+}
-struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
- unsigned long address);
-
-#else /* CONFIG_PER_VMA_LOCK */
+static inline void assert_fault_locked(const struct vm_fault *vmf)
+{
+ mmap_assert_locked(vmf->vma->vm_mm);
+}
+#endif /* CONFIG_PER_VMA_LOCK */
-static inline bool vma_start_read(struct vm_area_struct *vma)
- { return false; }
-static inline void vma_end_read(struct vm_area_struct *vma) {}
-static inline void vma_start_write(struct vm_area_struct *vma) {}
-static inline void vma_assert_write_locked(struct vm_area_struct *vma)
- { mmap_assert_write_locked(vma->vm_mm); }
-static inline void vma_mark_detached(struct vm_area_struct *vma,
- bool detached) {}
+static inline bool mm_flags_test(int flag, const struct mm_struct *mm)
+{
+ return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
+}
-static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
- unsigned long address)
+static inline bool mm_flags_test_and_set(int flag, struct mm_struct *mm)
{
- return NULL;
+ return test_and_set_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
}
-static inline void vma_assert_locked(struct vm_area_struct *vma)
+static inline bool mm_flags_test_and_clear(int flag, struct mm_struct *mm)
{
- mmap_assert_locked(vma->vm_mm);
+ return test_and_clear_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
}
-static inline void release_fault_lock(struct vm_fault *vmf)
+static inline void mm_flags_set(int flag, struct mm_struct *mm)
{
- mmap_read_unlock(vmf->vma->vm_mm);
+ set_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
}
-static inline void assert_fault_locked(struct vm_fault *vmf)
+static inline void mm_flags_clear(int flag, struct mm_struct *mm)
{
- mmap_assert_locked(vmf->vma->vm_mm);
+ clear_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
}
-#endif /* CONFIG_PER_VMA_LOCK */
+static inline void mm_flags_clear_all(struct mm_struct *mm)
+{
+ bitmap_zero(ACCESS_PRIVATE(&mm->flags, __mm_flags), NUM_MM_FLAG_BITS);
+}
extern const struct vm_operations_struct vma_dummy_vm_ops;
-/*
- * WARNING: vma_init does not initialize vma->vm_lock.
- * Use vm_area_alloc()/vm_area_free() if vma needs locking.
- */
static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
{
memset(vma, 0, sizeof(*vma));
vma->vm_mm = mm;
vma->vm_ops = &vma_dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma_mark_detached(vma, false);
- vma_numab_state_init(vma);
+ vma_lock_init(vma, false);
}
/* Use when VMA is not part of the VMA tree and needs no locking */
static inline void vm_flags_init(struct vm_area_struct *vma,
vm_flags_t flags)
{
- ACCESS_PRIVATE(vma, __vm_flags) = flags;
+ VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY));
+ vma_flags_clear_all(&vma->flags);
+ vma_flags_overwrite_word(&vma->flags, flags);
}
/*
@@ -844,6 +923,7 @@ static inline void vm_flags_init(struct vm_area_struct *vma,
static inline void vm_flags_reset(struct vm_area_struct *vma,
vm_flags_t flags)
{
+ VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY));
vma_assert_write_locked(vma);
vm_flags_init(vma, flags);
}
@@ -852,21 +932,33 @@ static inline void vm_flags_reset_once(struct vm_area_struct *vma,
vm_flags_t flags)
{
vma_assert_write_locked(vma);
- WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
+ /*
+ * If VMA flags exist beyond the first system word, also clear these. It
+ * is assumed the write once behaviour is required only for the first
+ * system word.
+ */
+ if (NUM_VMA_FLAG_BITS > BITS_PER_LONG) {
+ unsigned long *bitmap = ACCESS_PRIVATE(&vma->flags, __vma_flags);
+
+ bitmap_zero(&bitmap[1], NUM_VMA_FLAG_BITS - BITS_PER_LONG);
+ }
+
+ vma_flags_overwrite_word_once(&vma->flags, flags);
}
static inline void vm_flags_set(struct vm_area_struct *vma,
vm_flags_t flags)
{
vma_start_write(vma);
- ACCESS_PRIVATE(vma, __vm_flags) |= flags;
+ vma_flags_set_word(&vma->flags, flags);
}
static inline void vm_flags_clear(struct vm_area_struct *vma,
vm_flags_t flags)
{
+ VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY));
vma_start_write(vma);
- ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
+ vma_flags_clear_word(&vma->flags, flags);
}
/*
@@ -890,6 +982,51 @@ static inline void vm_flags_mod(struct vm_area_struct *vma,
__vm_flags_mod(vma, set, clear);
}
+static inline bool __vma_flag_atomic_valid(struct vm_area_struct *vma,
+ vma_flag_t bit)
+{
+ const vm_flags_t mask = BIT((__force int)bit);
+
+ /* Only specific flags are permitted */
+ if (WARN_ON_ONCE(!(mask & VM_ATOMIC_SET_ALLOWED)))
+ return false;
+
+ return true;
+}
+
+/*
+ * Set VMA flag atomically. Requires only VMA/mmap read lock. Only specific
+ * valid flags are allowed to do this.
+ */
+static inline void vma_flag_set_atomic(struct vm_area_struct *vma,
+ vma_flag_t bit)
+{
+ unsigned long *bitmap = ACCESS_PRIVATE(&vma->flags, __vma_flags);
+
+ /* mmap read lock/VMA read lock must be held. */
+ if (!rwsem_is_locked(&vma->vm_mm->mmap_lock))
+ vma_assert_locked(vma);
+
+ if (__vma_flag_atomic_valid(vma, bit))
+ set_bit((__force int)bit, bitmap);
+}
+
+/*
+ * Test for VMA flag atomically. Requires no locks. Only specific valid flags
+ * are allowed to do this.
+ *
+ * This is necessarily racey, so callers must ensure that serialisation is
+ * achieved through some other means, or that races are permissible.
+ */
+static inline bool vma_flag_test_atomic(struct vm_area_struct *vma,
+ vma_flag_t bit)
+{
+ if (__vma_flag_atomic_valid(vma, bit))
+ return test_bit((__force int)bit, &vma->vm_flags);
+
+ return false;
+}
+
static inline void vma_set_anonymous(struct vm_area_struct *vma)
{
vma->vm_ops = NULL;
@@ -925,7 +1062,7 @@ static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
vma->vm_end >= vma->vm_mm->start_stack;
}
-static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
+static inline bool vma_is_temporary_stack(const struct vm_area_struct *vma)
{
int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
@@ -939,7 +1076,7 @@ static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
return false;
}
-static inline bool vma_is_foreign(struct vm_area_struct *vma)
+static inline bool vma_is_foreign(const struct vm_area_struct *vma)
{
if (!current->mm)
return true;
@@ -950,7 +1087,7 @@ static inline bool vma_is_foreign(struct vm_area_struct *vma)
return false;
}
-static inline bool vma_is_accessible(struct vm_area_struct *vma)
+static inline bool vma_is_accessible(const struct vm_area_struct *vma)
{
return vma->vm_flags & VM_ACCESS_FLAGS;
}
@@ -961,7 +1098,7 @@ static inline bool is_shared_maywrite(vm_flags_t vm_flags)
(VM_SHARED | VM_MAYWRITE);
}
-static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
+static inline bool vma_is_shared_maywrite(const struct vm_area_struct *vma)
{
return is_shared_maywrite(vma->vm_flags);
}
@@ -993,27 +1130,6 @@ static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
return mas_prev(&vmi->mas, 0);
}
-static inline
-struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
-{
- return mas_prev_range(&vmi->mas, 0);
-}
-
-static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
-{
- return vmi->mas.index;
-}
-
-static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
-{
- return vmi->mas.last + 1;
-}
-static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
- unsigned long count)
-{
- return mas_expected_entries(&vmi->mas, count);
-}
-
static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
unsigned long start, unsigned long end, gfp_t gfp)
{
@@ -1040,6 +1156,7 @@ static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
+ vma_mark_attached(vma);
return 0;
}
@@ -1065,14 +1182,14 @@ static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
* The vma_is_shmem is not inline because it is used only by slow
* paths in userfault.
*/
-bool vma_is_shmem(struct vm_area_struct *vma);
-bool vma_is_anon_shmem(struct vm_area_struct *vma);
+bool vma_is_shmem(const struct vm_area_struct *vma);
+bool vma_is_anon_shmem(const struct vm_area_struct *vma);
#else
-static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
-static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; }
+static inline bool vma_is_shmem(const struct vm_area_struct *vma) { return false; }
+static inline bool vma_is_anon_shmem(const struct vm_area_struct *vma) { return false; }
#endif
-int vma_is_stack_for_current(struct vm_area_struct *vma);
+int vma_is_stack_for_current(const struct vm_area_struct *vma);
/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
@@ -1080,6 +1197,25 @@ int vma_is_stack_for_current(struct vm_area_struct *vma);
struct mmu_gather;
struct inode;
+extern void prep_compound_page(struct page *page, unsigned int order);
+
+static inline unsigned int folio_large_order(const struct folio *folio)
+{
+ return folio->_flags_1 & 0xff;
+}
+
+#ifdef NR_PAGES_IN_LARGE_FOLIO
+static inline unsigned long folio_large_nr_pages(const struct folio *folio)
+{
+ return folio->_nr_pages;
+}
+#else
+static inline unsigned long folio_large_nr_pages(const struct folio *folio)
+{
+ return 1L << folio_large_order(folio);
+}
+#endif
+
/*
* compound_order() can be called without holding a reference, which means
* that niceties like page_folio() don't work. These callers should be
@@ -1087,13 +1223,13 @@ struct inode;
* set before the order is initialised, or this may be a tail page.
* See compaction.c for some good examples.
*/
-static inline unsigned int compound_order(struct page *page)
+static inline unsigned int compound_order(const struct page *page)
{
- struct folio *folio = (struct folio *)page;
+ const struct folio *folio = (struct folio *)page;
- if (!test_bit(PG_head, &folio->flags))
+ if (!test_bit(PG_head, &folio->flags.f))
return 0;
- return folio->_flags_1 & 0xff;
+ return folio_large_order(folio);
}
/**
@@ -1105,11 +1241,28 @@ static inline unsigned int compound_order(struct page *page)
*
* Return: The order of the folio.
*/
-static inline unsigned int folio_order(struct folio *folio)
+static inline unsigned int folio_order(const struct folio *folio)
{
if (!folio_test_large(folio))
return 0;
- return folio->_flags_1 & 0xff;
+ return folio_large_order(folio);
+}
+
+/**
+ * folio_reset_order - Reset the folio order and derived _nr_pages
+ * @folio: The folio.
+ *
+ * Reset the order and derived _nr_pages to 0. Must only be used in the
+ * process of splitting large folios.
+ */
+static inline void folio_reset_order(struct folio *folio)
+{
+ if (WARN_ON_ONCE(!folio_test_large(folio)))
+ return;
+ folio->_flags_1 &= ~0xffUL;
+#ifdef NR_PAGES_IN_LARGE_FOLIO
+ folio->_nr_pages = 0;
+#endif
}
#include <linux/huge_mm.h>
@@ -1197,49 +1350,16 @@ static inline int is_vmalloc_or_module_addr(const void *x)
/*
* How many times the entire folio is mapped as a single unit (eg by a
* PMD or PUD entry). This is probably not what you want, except for
- * debugging purposes - it does not include PTE-mapped sub-pages; look
- * at folio_mapcount() or page_mapcount() instead.
+ * debugging purposes or implementation of other core folio_*() primitives.
*/
static inline int folio_entire_mapcount(const struct folio *folio)
{
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+ if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio_large_order(folio) == 1))
+ return 0;
return atomic_read(&folio->_entire_mapcount) + 1;
}
-/*
- * The atomic page->_mapcount, starts from -1: so that transitions
- * both from it and to it can be tracked, using atomic_inc_and_test
- * and atomic_add_negative(-1).
- */
-static inline void page_mapcount_reset(struct page *page)
-{
- atomic_set(&(page)->_mapcount, -1);
-}
-
-/**
- * page_mapcount() - Number of times this precise page is mapped.
- * @page: The page.
- *
- * The number of times this page is mapped. If this page is part of
- * a large folio, it includes the number of times this page is mapped
- * as part of that folio.
- *
- * Will report 0 for pages which cannot be mapped into userspace, eg
- * slab, page tables and similar.
- */
-static inline int page_mapcount(struct page *page)
-{
- int mapcount = atomic_read(&page->_mapcount) + 1;
-
- /* Handle page_has_type() pages */
- if (mapcount < PAGE_MAPCOUNT_RESERVE + 1)
- mapcount = 0;
- if (unlikely(PageCompound(page)))
- mapcount += folio_entire_mapcount(page_folio(page));
-
- return mapcount;
-}
-
static inline int folio_large_mapcount(const struct folio *folio)
{
VM_WARN_ON_FOLIO(!folio_test_large(folio), folio);
@@ -1272,8 +1392,7 @@ static inline int folio_mapcount(const struct folio *folio)
if (likely(!folio_test_large(folio))) {
mapcount = atomic_read(&folio->_mapcount) + 1;
- /* Handle page_has_type() pages */
- if (mapcount < PAGE_MAPCOUNT_RESERVE + 1)
+ if (page_mapcount_is_type(mapcount))
mapcount = 0;
return mapcount;
}
@@ -1317,15 +1436,14 @@ static inline struct folio *virt_to_folio(const void *x)
void __folio_put(struct folio *folio);
-void put_pages_list(struct list_head *pages);
-
void split_page(struct page *page, unsigned int order);
void folio_copy(struct folio *dst, struct folio *src);
+int folio_mc_copy(struct folio *dst, struct folio *src);
unsigned long nr_free_buffer_pages(void);
/* Returns the number of bytes in this potentially compound page. */
-static inline unsigned long page_size(struct page *page)
+static inline unsigned long page_size(const struct page *page)
{
return PAGE_SIZE << compound_order(page);
}
@@ -1371,7 +1489,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
return pte;
}
-vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
+vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page);
void set_pte_range(struct vm_fault *vmf, struct folio *folio,
struct page *page, unsigned int nr, unsigned long addr);
@@ -1412,9 +1530,9 @@ vm_fault_t finish_fault(struct vm_fault *vmf);
* the page's disk buffers. PG_private must be set to tell the VM to call
* into the filesystem to release these pages.
*
- * A page may belong to an inode's memory mapping. In this case, page->mapping
- * is the pointer to the inode, and page->index is the file offset of the page,
- * in units of PAGE_SIZE.
+ * A folio may belong to an inode's memory mapping. In this case,
+ * folio->mapping points to the inode, and folio->index is the file
+ * offset of the folio, in units of PAGE_SIZE.
*
* If pagecache pages are not associated with an inode, they are said to be
* anonymous pages. These may become associated with the swapcache, and in that
@@ -1438,25 +1556,6 @@ vm_fault_t finish_fault(struct vm_fault *vmf);
* back into memory.
*/
-#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
-DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
-
-bool __put_devmap_managed_folio_refs(struct folio *folio, int refs);
-static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
-{
- if (!static_branch_unlikely(&devmap_managed_key))
- return false;
- if (!folio_is_zone_device(folio))
- return false;
- return __put_devmap_managed_folio_refs(folio, refs);
-}
-#else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
-static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
-{
- return false;
-}
-#endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
-
/* 127: arbitrary random number, small enough to assemble well */
#define folio_ref_zero_or_close_to_overflow(folio) \
((unsigned int) folio_ref_count(folio) + 127u <= 127u)
@@ -1477,7 +1576,12 @@ static inline void folio_get(struct folio *folio)
static inline void get_page(struct page *page)
{
- folio_get(page_folio(page));
+ struct folio *folio = page_folio(page);
+ if (WARN_ON_ONCE(folio_test_slab(folio)))
+ return;
+ if (WARN_ON_ONCE(folio_test_large_kmalloc(folio)))
+ return;
+ folio_get(folio);
}
static inline __must_check bool try_get_page(struct page *page)
@@ -1571,12 +1675,9 @@ static inline void put_page(struct page *page)
{
struct folio *folio = page_folio(page);
- /*
- * For some devmap managed pages we need to catch refcount transition
- * from 2 to 1:
- */
- if (put_devmap_managed_folio_refs(folio, 1))
+ if (folio_test_slab(folio) || folio_test_large_kmalloc(folio))
return;
+
folio_put(folio);
}
@@ -1607,17 +1708,20 @@ static inline void put_page(struct page *page)
* issue.
*
* Locking: the lockless algorithm described in folio_try_get_rcu()
- * provides safe operation for get_user_pages(), page_mkclean() and
+ * provides safe operation for get_user_pages(), folio_mkclean() and
* other calls that race to set up page table entries.
*/
#define GUP_PIN_COUNTING_BIAS (1U << 10)
void unpin_user_page(struct page *page);
+void unpin_folio(struct folio *folio);
void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
bool make_dirty);
void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
bool make_dirty);
void unpin_user_pages(struct page **pages, unsigned long npages);
+void unpin_user_folio(struct folio *folio, unsigned long npages);
+void unpin_folios(struct folio **folios, unsigned long nfolios);
static inline bool is_cow_mapping(vm_flags_t flags)
{
@@ -1653,21 +1757,26 @@ static inline bool is_nommu_shared_mapping(vm_flags_t flags)
*/
static inline int page_zone_id(struct page *page)
{
- return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
+ return (page->flags.f >> ZONEID_PGSHIFT) & ZONEID_MASK;
}
#ifdef NODE_NOT_IN_PAGE_FLAGS
-int page_to_nid(const struct page *page);
+int memdesc_nid(memdesc_flags_t mdf);
#else
-static inline int page_to_nid(const struct page *page)
+static inline int memdesc_nid(memdesc_flags_t mdf)
{
- return (PF_POISONED_CHECK(page)->flags >> NODES_PGSHIFT) & NODES_MASK;
+ return (mdf.f >> NODES_PGSHIFT) & NODES_MASK;
}
#endif
+static inline int page_to_nid(const struct page *page)
+{
+ return memdesc_nid(PF_POISONED_CHECK(page)->flags);
+}
+
static inline int folio_nid(const struct folio *folio)
{
- return page_to_nid(&folio->page);
+ return memdesc_nid(folio->flags);
}
#ifdef CONFIG_NUMA_BALANCING
@@ -1736,14 +1845,14 @@ static inline void page_cpupid_reset_last(struct page *page)
#else
static inline int folio_last_cpupid(struct folio *folio)
{
- return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
+ return (folio->flags.f >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
}
int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
static inline void page_cpupid_reset_last(struct page *page)
{
- page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
+ page->flags.f |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
@@ -1765,6 +1874,8 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
__set_bit(pid_bit, &vma->numab_state->pids_active[1]);
}
}
+
+bool folio_use_access_time(struct folio *folio);
#else /* !CONFIG_NUMA_BALANCING */
static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
@@ -1818,6 +1929,10 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
{
}
+static inline bool folio_use_access_time(struct folio *folio)
+{
+ return false;
+}
#endif /* CONFIG_NUMA_BALANCING */
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
@@ -1833,7 +1948,7 @@ static inline u8 page_kasan_tag(const struct page *page)
u8 tag = KASAN_TAG_KERNEL;
if (kasan_enabled()) {
- tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+ tag = (page->flags.f >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
tag ^= 0xff;
}
@@ -1848,12 +1963,12 @@ static inline void page_kasan_tag_set(struct page *page, u8 tag)
return;
tag ^= 0xff;
- old_flags = READ_ONCE(page->flags);
+ old_flags = READ_ONCE(page->flags.f);
do {
flags = old_flags;
flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
- } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
+ } while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags)));
}
static inline void page_kasan_tag_reset(struct page *page)
@@ -1884,28 +1999,33 @@ static inline pg_data_t *page_pgdat(const struct page *page)
return NODE_DATA(page_to_nid(page));
}
-static inline struct zone *folio_zone(const struct folio *folio)
+static inline pg_data_t *folio_pgdat(const struct folio *folio)
{
- return page_zone(&folio->page);
+ return NODE_DATA(folio_nid(folio));
}
-static inline pg_data_t *folio_pgdat(const struct folio *folio)
+static inline struct zone *folio_zone(const struct folio *folio)
{
- return page_pgdat(&folio->page);
+ return &folio_pgdat(folio)->node_zones[folio_zonenum(folio)];
}
#ifdef SECTION_IN_PAGE_FLAGS
static inline void set_page_section(struct page *page, unsigned long section)
{
- page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
- page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
+ page->flags.f &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
+ page->flags.f |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
}
-static inline unsigned long page_to_section(const struct page *page)
+static inline unsigned long memdesc_section(memdesc_flags_t mdf)
{
- return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
+ return (mdf.f >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
-#endif
+#else /* !SECTION_IN_PAGE_FLAGS */
+static inline unsigned long memdesc_section(memdesc_flags_t mdf)
+{
+ return 0;
+}
+#endif /* SECTION_IN_PAGE_FLAGS */
/**
* folio_pfn - Return the Page Frame Number of a folio.
@@ -1916,7 +2036,7 @@ static inline unsigned long page_to_section(const struct page *page)
*
* Return: The Page Frame Number of the first page in the folio.
*/
-static inline unsigned long folio_pfn(struct folio *folio)
+static inline unsigned long folio_pfn(const struct folio *folio)
{
return page_to_pfn(&folio->page);
}
@@ -1926,6 +2046,69 @@ static inline struct folio *pfn_folio(unsigned long pfn)
return page_folio(pfn_to_page(pfn));
}
+#ifdef CONFIG_MMU
+static inline pte_t mk_pte(const struct page *page, pgprot_t pgprot)
+{
+ return pfn_pte(page_to_pfn(page), pgprot);
+}
+
+/**
+ * folio_mk_pte - Create a PTE for this folio
+ * @folio: The folio to create a PTE for
+ * @pgprot: The page protection bits to use
+ *
+ * Create a page table entry for the first page of this folio.
+ * This is suitable for passing to set_ptes().
+ *
+ * Return: A page table entry suitable for mapping this folio.
+ */
+static inline pte_t folio_mk_pte(const struct folio *folio, pgprot_t pgprot)
+{
+ return pfn_pte(folio_pfn(folio), pgprot);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/**
+ * folio_mk_pmd - Create a PMD for this folio
+ * @folio: The folio to create a PMD for
+ * @pgprot: The page protection bits to use
+ *
+ * Create a page table entry for the first page of this folio.
+ * This is suitable for passing to set_pmd_at().
+ *
+ * Return: A page table entry suitable for mapping this folio.
+ */
+static inline pmd_t folio_mk_pmd(const struct folio *folio, pgprot_t pgprot)
+{
+ return pmd_mkhuge(pfn_pmd(folio_pfn(folio), pgprot));
+}
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+/**
+ * folio_mk_pud - Create a PUD for this folio
+ * @folio: The folio to create a PUD for
+ * @pgprot: The page protection bits to use
+ *
+ * Create a page table entry for the first page of this folio.
+ * This is suitable for passing to set_pud_at().
+ *
+ * Return: A page table entry suitable for mapping this folio.
+ */
+static inline pud_t folio_mk_pud(const struct folio *folio, pgprot_t pgprot)
+{
+ return pud_mkhuge(pfn_pud(folio_pfn(folio), pgprot));
+}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /* CONFIG_MMU */
+
+static inline bool folio_has_pincount(const struct folio *folio)
+{
+ if (IS_ENABLED(CONFIG_64BIT))
+ return folio_test_large(folio);
+ return folio_order(folio) > 1;
+}
+
/**
* folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
* @folio: The folio.
@@ -1942,18 +2125,18 @@ static inline struct folio *pfn_folio(unsigned long pfn)
* get that many refcounts, and b) all the callers of this routine are
* expected to be able to deal gracefully with a false positive.
*
- * For large folios, the result will be exactly correct. That's because
+ * For most large folios, the result will be exactly correct. That's because
* we have more tracking data available: the _pincount field is used
* instead of the GUP_PIN_COUNTING_BIAS scheme.
*
* For more information, please see Documentation/core-api/pin_user_pages.rst.
*
- * Return: True, if it is likely that the page has been "dma-pinned".
- * False, if the page is definitely not dma-pinned.
+ * Return: True, if it is likely that the folio has been "dma-pinned".
+ * False, if the folio is definitely not dma-pinned.
*/
static inline bool folio_maybe_dma_pinned(struct folio *folio)
{
- if (folio_test_large(folio))
+ if (folio_has_pincount(folio))
return atomic_read(&folio->_pincount) > 0;
/*
@@ -1968,11 +2151,6 @@ static inline bool folio_maybe_dma_pinned(struct folio *folio)
GUP_PIN_COUNTING_BIAS;
}
-static inline bool page_maybe_dma_pinned(struct page *page)
-{
- return folio_maybe_dma_pinned(page_folio(page));
-}
-
/*
* This should most likely only be called during fork() to see whether we
* should break the cow immediately for an anon page on the src mm.
@@ -1984,7 +2162,7 @@ static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
{
VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
- if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
+ if (!mm_flags_test(MMF_HAS_PINNED, vma->vm_mm))
return false;
return folio_maybe_dma_pinned(folio);
@@ -2030,6 +2208,13 @@ static inline bool folio_is_longterm_pinnable(struct folio *folio)
if (folio_is_device_coherent(folio))
return false;
+ /*
+ * Filesystems can only tolerate transient delays to truncate and
+ * hole-punch operations
+ */
+ if (folio_is_fsdax(folio))
+ return false;
+
/* Otherwise, non-movable zone folios can be pinned. */
return !folio_is_zone_movable(folio);
@@ -2043,14 +2228,14 @@ static inline bool folio_is_longterm_pinnable(struct folio *folio)
static inline void set_page_zone(struct page *page, enum zone_type zone)
{
- page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
- page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
+ page->flags.f &= ~(ZONES_MASK << ZONES_PGSHIFT);
+ page->flags.f |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
}
static inline void set_page_node(struct page *page, unsigned long node)
{
- page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
- page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
+ page->flags.f &= ~(NODES_MASK << NODES_PGSHIFT);
+ page->flags.f |= (node & NODES_MASK) << NODES_PGSHIFT;
}
static inline void set_page_links(struct page *page, enum zone_type zone,
@@ -2069,49 +2254,55 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
*
* Return: A positive power of two.
*/
-static inline long folio_nr_pages(const struct folio *folio)
+static inline unsigned long folio_nr_pages(const struct folio *folio)
{
if (!folio_test_large(folio))
return 1;
-#ifdef CONFIG_64BIT
- return folio->_folio_nr_pages;
-#else
- return 1L << (folio->_flags_1 & 0xff);
-#endif
+ return folio_large_nr_pages(folio);
}
-/* Only hugetlbfs can allocate folios larger than MAX_ORDER */
-#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
-#define MAX_FOLIO_NR_PAGES (1UL << PUD_ORDER)
+#if !defined(CONFIG_HAVE_GIGANTIC_FOLIOS)
+/*
+ * We don't expect any folios that exceed buddy sizes (and consequently
+ * memory sections).
+ */
+#define MAX_FOLIO_ORDER MAX_PAGE_ORDER
+#elif defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
+/*
+ * Only pages within a single memory section are guaranteed to be
+ * contiguous. By limiting folios to a single memory section, all folio
+ * pages are guaranteed to be contiguous.
+ */
+#define MAX_FOLIO_ORDER PFN_SECTION_SHIFT
+#elif defined(CONFIG_HUGETLB_PAGE)
+/*
+ * There is no real limit on the folio size. We limit them to the maximum we
+ * currently expect (see CONFIG_HAVE_GIGANTIC_FOLIOS): with hugetlb, we expect
+ * no folios larger than 16 GiB on 64bit and 1 GiB on 32bit.
+ */
+#define MAX_FOLIO_ORDER get_order(IS_ENABLED(CONFIG_64BIT) ? SZ_16G : SZ_1G)
#else
-#define MAX_FOLIO_NR_PAGES MAX_ORDER_NR_PAGES
+/*
+ * Without hugetlb, gigantic folios that are bigger than a single PUD are
+ * currently impossible.
+ */
+#define MAX_FOLIO_ORDER PUD_ORDER
#endif
+#define MAX_FOLIO_NR_PAGES (1UL << MAX_FOLIO_ORDER)
+
/*
* compound_nr() returns the number of pages in this potentially compound
* page. compound_nr() can be called on a tail page, and is defined to
* return 1 in that case.
*/
-static inline unsigned long compound_nr(struct page *page)
+static inline unsigned long compound_nr(const struct page *page)
{
- struct folio *folio = (struct folio *)page;
+ const struct folio *folio = (struct folio *)page;
- if (!test_bit(PG_head, &folio->flags))
+ if (!test_bit(PG_head, &folio->flags.f))
return 1;
-#ifdef CONFIG_64BIT
- return folio->_folio_nr_pages;
-#else
- return 1L << (folio->_flags_1 & 0xff);
-#endif
-}
-
-/**
- * thp_nr_pages - The number of regular pages in this huge page.
- * @page: The head page of a huge page.
- */
-static inline int thp_nr_pages(struct page *page)
-{
- return folio_nr_pages((struct folio *)page);
+ return folio_large_nr_pages(folio);
}
/**
@@ -2145,7 +2336,7 @@ static inline struct folio *folio_next(struct folio *folio)
* it from being split. It is not necessary for the folio to be locked.
* Return: The base-2 logarithm of the size of this folio.
*/
-static inline unsigned int folio_shift(struct folio *folio)
+static inline unsigned int folio_shift(const struct folio *folio)
{
return PAGE_SHIFT + folio_order(folio);
}
@@ -2158,39 +2349,36 @@ static inline unsigned int folio_shift(struct folio *folio)
* it from being split. It is not necessary for the folio to be locked.
* Return: The number of bytes in this folio.
*/
-static inline size_t folio_size(struct folio *folio)
+static inline size_t folio_size(const struct folio *folio)
{
return PAGE_SIZE << folio_order(folio);
}
/**
- * folio_likely_mapped_shared - Estimate if the folio is mapped into the page
- * tables of more than one MM
+ * folio_maybe_mapped_shared - Whether the folio is mapped into the page
+ * tables of more than one MM
* @folio: The folio.
*
- * This function checks if the folio is currently mapped into more than one
- * MM ("mapped shared"), or if the folio is only mapped into a single MM
- * ("mapped exclusively").
+ * This function checks if the folio maybe currently mapped into more than one
+ * MM ("maybe mapped shared"), or if the folio is certainly mapped into a single
+ * MM ("mapped exclusively").
*
- * As precise information is not easily available for all folios, this function
- * estimates the number of MMs ("sharers") that are currently mapping a folio
- * using the number of times the first page of the folio is currently mapped
- * into page tables.
+ * For KSM folios, this function also returns "mapped shared" when a folio is
+ * mapped multiple times into the same MM, because the individual page mappings
+ * are independent.
*
- * For small anonymous folios (except KSM folios) and anonymous hugetlb folios,
- * the return value will be exactly correct, because they can only be mapped
- * at most once into an MM, and they cannot be partially mapped.
+ * For small anonymous folios and anonymous hugetlb folios, the return
+ * value will be exactly correct: non-KSM folios can only be mapped at most once
+ * into an MM, and they cannot be partially mapped. KSM folios are
+ * considered shared even if mapped multiple times into the same MM.
*
* For other folios, the result can be fuzzy:
* #. For partially-mappable large folios (THP), the return value can wrongly
- * indicate "mapped exclusively" (false negative) when the folio is
- * only partially mapped into at least one MM.
+ * indicate "mapped shared" (false positive) if a folio was mapped by
+ * more than two MMs at one point in time.
* #. For pagecache folios (including hugetlb), the return value can wrongly
* indicate "mapped shared" (false positive) when two VMAs in the same MM
* cover the same file range.
- * #. For (small) KSM folios, the return value can wrongly indicate "mapped
- * shared" (false positive), when the folio is mapped multiple times into
- * the same MM.
*
* Further, this function only considers current page table mappings that
* are tracked using the folio mapcount(s).
@@ -2204,7 +2392,7 @@ static inline size_t folio_size(struct folio *folio)
*
* Return: Whether the folio is estimated to be mapped into more than one MM.
*/
-static inline bool folio_likely_mapped_shared(struct folio *folio)
+static inline bool folio_maybe_mapped_shared(struct folio *folio)
{
int mapcount = folio_mapcount(folio);
@@ -2212,38 +2400,83 @@ static inline bool folio_likely_mapped_shared(struct folio *folio)
if (!folio_test_large(folio) || unlikely(folio_test_hugetlb(folio)))
return mapcount > 1;
- /* A single mapping implies "mapped exclusively". */
- if (mapcount <= 1)
- return false;
-
- /* If any page is mapped more than once we treat it "mapped shared". */
- if (folio_entire_mapcount(folio) || mapcount > folio_nr_pages(folio))
+ /*
+ * vm_insert_page() without CONFIG_TRANSPARENT_HUGEPAGE ...
+ * simply assume "mapped shared", nobody should really care
+ * about this for arbitrary kernel allocations.
+ */
+ if (!IS_ENABLED(CONFIG_MM_ID))
return true;
- /* Let's guess based on the first subpage. */
- return atomic_read(&folio->_mapcount) > 0;
+ /*
+ * A single mapping implies "mapped exclusively", even if the
+ * folio flag says something different: it's easier to handle this
+ * case here instead of on the RMAP hot path.
+ */
+ if (mapcount <= 1)
+ return false;
+ return test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids);
}
-#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
-static inline int arch_make_page_accessible(struct page *page)
+/**
+ * folio_expected_ref_count - calculate the expected folio refcount
+ * @folio: the folio
+ *
+ * Calculate the expected folio refcount, taking references from the pagecache,
+ * swapcache, PG_private and page table mappings into account. Useful in
+ * combination with folio_ref_count() to detect unexpected references (e.g.,
+ * GUP or other temporary references).
+ *
+ * Does currently not consider references from the LRU cache. If the folio
+ * was isolated from the LRU (which is the case during migration or split),
+ * the LRU cache does not apply.
+ *
+ * Calling this function on an unmapped folio -- !folio_mapped() -- that is
+ * locked will return a stable result.
+ *
+ * Calling this function on a mapped folio will not result in a stable result,
+ * because nothing stops additional page table mappings from coming (e.g.,
+ * fork()) or going (e.g., munmap()).
+ *
+ * Calling this function without the folio lock will also not result in a
+ * stable result: for example, the folio might get dropped from the swapcache
+ * concurrently.
+ *
+ * However, even when called without the folio lock or on a mapped folio,
+ * this function can be used to detect unexpected references early (for example,
+ * if it makes sense to even lock the folio and unmap it).
+ *
+ * The caller must add any reference (e.g., from folio_try_get()) it might be
+ * holding itself to the result.
+ *
+ * Returns the expected folio refcount.
+ */
+static inline int folio_expected_ref_count(const struct folio *folio)
{
- return 0;
+ const int order = folio_order(folio);
+ int ref_count = 0;
+
+ if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio)))
+ return 0;
+
+ if (folio_test_anon(folio)) {
+ /* One reference per page from the swapcache. */
+ ref_count += folio_test_swapcache(folio) << order;
+ } else {
+ /* One reference per page from the pagecache. */
+ ref_count += !!folio->mapping << order;
+ /* One reference from PG_private. */
+ ref_count += folio_test_private(folio);
+ }
+
+ /* One reference per page table mapping. */
+ return ref_count + folio_mapcount(folio);
}
-#endif
#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
static inline int arch_make_folio_accessible(struct folio *folio)
{
- int ret;
- long i, nr = folio_nr_pages(folio);
-
- for (i = 0; i < nr; i++) {
- ret = arch_make_page_accessible(folio_page(folio, i));
- if (ret)
- break;
- }
-
- return ret;
+ return 0;
}
#endif
@@ -2290,19 +2523,6 @@ static inline void *folio_address(const struct folio *folio)
return page_address(&folio->page);
}
-extern pgoff_t __page_file_index(struct page *page);
-
-/*
- * Return the pagecache index of the passed page. Regular pagecache pages
- * use ->index whereas swapcache pages use swp_offset(->private)
- */
-static inline pgoff_t page_index(struct page *page)
-{
- if (unlikely(PageSwapCache(page)))
- return __page_file_index(page);
- return page->index;
-}
-
/*
* Return true only if the page has been allocated with
* ALLOC_NO_WATERMARKS and the low watermark was not
@@ -2353,7 +2573,6 @@ static inline void clear_page_pfmemalloc(struct page *page)
extern void pagefault_out_of_memory(void);
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
-#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1))
#define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
/*
@@ -2362,6 +2581,7 @@ extern void pagefault_out_of_memory(void);
struct zap_details {
struct folio *single_folio; /* Locked folio to be unmapped */
bool even_cows; /* Zap COWed private pages too? */
+ bool reclaim_pt; /* Need reclaim page tables? */
zap_flags_t zap_flags; /* Extra flags for zapping */
};
@@ -2375,31 +2595,6 @@ struct zap_details {
/* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */
#define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1))
-#ifdef CONFIG_SCHED_MM_CID
-void sched_mm_cid_before_execve(struct task_struct *t);
-void sched_mm_cid_after_execve(struct task_struct *t);
-void sched_mm_cid_fork(struct task_struct *t);
-void sched_mm_cid_exit_signals(struct task_struct *t);
-static inline int task_mm_cid(struct task_struct *t)
-{
- return t->mm_cid;
-}
-#else
-static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
-static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
-static inline void sched_mm_cid_fork(struct task_struct *t) { }
-static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
-static inline int task_mm_cid(struct task_struct *t)
-{
- /*
- * Use the processor id as a fall-back when the mm cid feature is
- * disabled. This provides functional per-cpu data structure accesses
- * in user-space, althrough it won't provide the memory usage benefits.
- */
- return raw_smp_processor_id();
-}
-#endif
-
#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
#else
@@ -2416,6 +2611,8 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
+struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t pud);
void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
@@ -2428,7 +2625,7 @@ static inline void zap_vma_pages(struct vm_area_struct *vma)
}
void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *start_vma, unsigned long start,
- unsigned long end, unsigned long tree_end, bool mm_wr_locked);
+ unsigned long end, unsigned long tree_end);
struct mmu_notifier_range;
@@ -2436,11 +2633,42 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
-int follow_pte(struct vm_area_struct *vma, unsigned long address,
- pte_t **ptepp, spinlock_t **ptlp);
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
+struct follow_pfnmap_args {
+ /**
+ * Inputs:
+ * @vma: Pointer to @vm_area_struct struct
+ * @address: the virtual address to walk
+ */
+ struct vm_area_struct *vma;
+ unsigned long address;
+ /**
+ * Internals:
+ *
+ * The caller shouldn't touch any of these.
+ */
+ spinlock_t *lock;
+ pte_t *ptep;
+ /**
+ * Outputs:
+ *
+ * @pfn: the PFN of the address
+ * @addr_mask: address mask covering pfn
+ * @pgprot: the pgprot_t of the mapping
+ * @writable: whether the mapping is writable
+ * @special: whether the mapping is a special mapping (real PFN maps)
+ */
+ unsigned long pfn;
+ unsigned long addr_mask;
+ pgprot_t pgprot;
+ bool writable;
+ bool special;
+};
+int follow_pfnmap_start(struct follow_pfnmap_args *args);
+void follow_pfnmap_end(struct follow_pfnmap_args *args);
+
extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
@@ -2498,6 +2726,11 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
+#ifdef CONFIG_BPF_SYSCALL
+extern int copy_remote_vm_str(struct task_struct *tsk, unsigned long addr,
+ void *buf, int len, unsigned int gup_flags);
+#endif
+
long get_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
@@ -2545,6 +2778,10 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
+long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
+ struct folio **folios, unsigned int max_folios,
+ pgoff_t *offset);
+int folio_add_pins(struct folio *folio, unsigned int pins);
int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
@@ -2554,22 +2791,18 @@ void folio_add_pin(struct folio *folio);
int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
- struct task_struct *task, bool bypass_rlim);
+ const struct task_struct *task, bool bypass_rlim);
struct kvec;
-struct page *get_dump_page(unsigned long addr);
+struct page *get_dump_page(unsigned long addr, int *locked);
bool folio_mark_dirty(struct folio *folio);
+bool folio_mark_dirty_lock(struct folio *folio);
bool set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
-extern unsigned long move_page_tables(struct vm_area_struct *vma,
- unsigned long old_addr, struct vm_area_struct *new_vma,
- unsigned long new_addr, unsigned long len,
- bool need_rmap_locks, bool for_stack);
-
/*
* Flags used by change_protection(). For now we make it a bitmap so
* that we can pass in multiple flags just like parameters. However
@@ -2590,21 +2823,6 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
MM_CP_UFFD_WP_RESOLVE)
-bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
-bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
-static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
-{
- /*
- * We want to check manually if we can change individual PTEs writable
- * if we can't do that automatically for all PTEs in a mapping. For
- * private mappings, that's always the case when we have write
- * permissions as we properly have to handle COW.
- */
- if (vma->vm_flags & VM_SHARED)
- return vma_wants_writenotify(vma, vma->vm_page_prot);
- return !!(vma->vm_flags & VM_WRITE);
-
-}
bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
extern long change_protection(struct mmu_gather *tlb,
@@ -2612,7 +2830,7 @@ extern long change_protection(struct mmu_gather *tlb,
unsigned long end, unsigned long cp_flags);
extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
struct vm_area_struct *vma, struct vm_area_struct **pprev,
- unsigned long start, unsigned long end, unsigned long newflags);
+ unsigned long start, unsigned long end, vm_flags_t newflags);
/*
* doesn't attempt to fault and will return short.
@@ -2633,6 +2851,11 @@ static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
return percpu_counter_read_positive(&mm->rss_stat[member]);
}
+static inline unsigned long get_mm_counter_sum(struct mm_struct *mm, int member)
+{
+ return percpu_counter_sum_positive(&mm->rss_stat[member]);
+}
+
void mm_trace_rss_stat(struct mm_struct *mm, int member);
static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
@@ -2692,8 +2915,8 @@ static inline void update_hiwater_rss(struct mm_struct *mm)
{
unsigned long _rss = get_mm_rss(mm);
- if ((mm)->hiwater_rss < _rss)
- (mm)->hiwater_rss = _rss;
+ if (data_race(mm->hiwater_rss) < _rss)
+ data_race(mm->hiwater_rss = _rss);
}
static inline void update_hiwater_vm(struct mm_struct *mm)
@@ -2728,12 +2951,29 @@ static inline pte_t pte_mkspecial(pte_t pte)
}
#endif
-#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
-static inline int pte_devmap(pte_t pte)
+#ifndef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+static inline bool pmd_special(pmd_t pmd)
{
- return 0;
+ return false;
}
-#endif
+
+static inline pmd_t pmd_mkspecial(pmd_t pmd)
+{
+ return pmd;
+}
+#endif /* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
+
+#ifndef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
+static inline bool pud_special(pud_t pud)
+{
+ return false;
+}
+
+static inline pud_t pud_mkspecial(pud_t pud)
+{
+ return pud;
+}
+#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl);
@@ -2868,16 +3108,23 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
}
#endif /* CONFIG_MMU */
+enum pt_flags {
+ PT_kernel = PG_referenced,
+ PT_reserved = PG_reserved,
+ /* High bits are used for zone/node/section */
+};
+
static inline struct ptdesc *virt_to_ptdesc(const void *x)
{
return page_ptdesc(virt_to_page(x));
}
-static inline void *ptdesc_to_virt(const struct ptdesc *pt)
-{
- return page_to_virt(ptdesc_page(pt));
-}
-
+/**
+ * ptdesc_address - Virtual address of page table.
+ * @pt: Page table descriptor.
+ *
+ * Return: The first byte of the page table described by @pt.
+ */
static inline void *ptdesc_address(const struct ptdesc *pt)
{
return folio_address(ptdesc_folio(pt));
@@ -2885,7 +3132,47 @@ static inline void *ptdesc_address(const struct ptdesc *pt)
static inline bool pagetable_is_reserved(struct ptdesc *pt)
{
- return folio_test_reserved(ptdesc_folio(pt));
+ return test_bit(PT_reserved, &pt->pt_flags.f);
+}
+
+/**
+ * ptdesc_set_kernel - Mark a ptdesc used to map the kernel
+ * @ptdesc: The ptdesc to be marked
+ *
+ * Kernel page tables often need special handling. Set a flag so that
+ * the handling code knows this ptdesc will not be used for userspace.
+ */
+static inline void ptdesc_set_kernel(struct ptdesc *ptdesc)
+{
+ set_bit(PT_kernel, &ptdesc->pt_flags.f);
+}
+
+/**
+ * ptdesc_clear_kernel - Mark a ptdesc as no longer used to map the kernel
+ * @ptdesc: The ptdesc to be unmarked
+ *
+ * Use when the ptdesc is no longer used to map the kernel and no longer
+ * needs special handling.
+ */
+static inline void ptdesc_clear_kernel(struct ptdesc *ptdesc)
+{
+ /*
+ * Note: the 'PG_referenced' bit does not strictly need to be
+ * cleared before freeing the page. But this is nice for
+ * symmetry.
+ */
+ clear_bit(PT_kernel, &ptdesc->pt_flags.f);
+}
+
+/**
+ * ptdesc_test_kernel - Check if a ptdesc is used to map the kernel
+ * @ptdesc: The ptdesc being tested
+ *
+ * Call to tell if the ptdesc used to map the kernel.
+ */
+static inline bool ptdesc_test_kernel(const struct ptdesc *ptdesc)
+{
+ return test_bit(PT_kernel, &ptdesc->pt_flags.f);
}
/**
@@ -2906,6 +3193,21 @@ static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int orde
}
#define pagetable_alloc(...) alloc_hooks(pagetable_alloc_noprof(__VA_ARGS__))
+static inline void __pagetable_free(struct ptdesc *pt)
+{
+ struct page *page = ptdesc_page(pt);
+
+ __free_pages(page, compound_order(page));
+}
+
+#ifdef CONFIG_ASYNC_KERNEL_PGTABLE_FREE
+void pagetable_free_kernel(struct ptdesc *pt);
+#else
+static inline void pagetable_free_kernel(struct ptdesc *pt)
+{
+ __pagetable_free(pt);
+}
+#endif
/**
* pagetable_free - Free pagetables
* @pt: The page table descriptor
@@ -2915,12 +3217,15 @@ static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int orde
*/
static inline void pagetable_free(struct ptdesc *pt)
{
- struct page *page = ptdesc_page(pt);
-
- __free_pages(page, compound_order(page));
+ if (ptdesc_test_kernel(pt)) {
+ ptdesc_clear_kernel(pt);
+ pagetable_free_kernel(pt);
+ } else {
+ __pagetable_free(pt);
+ }
}
-#if USE_SPLIT_PTE_PTLOCKS
+#if defined(CONFIG_SPLIT_PTE_PTLOCKS)
#if ALLOC_SPLIT_PTLOCKS
void __init ptlock_cache_init(void);
bool ptlock_alloc(struct ptdesc *ptdesc);
@@ -2955,6 +3260,13 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
}
+static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
+{
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_HIGHPTE));
+ BUILD_BUG_ON(MAX_PTRS_PER_PTE * sizeof(pte_t) > PAGE_SIZE);
+ return ptlock_ptr(virt_to_ptdesc(pte));
+}
+
static inline bool ptlock_init(struct ptdesc *ptdesc)
{
/*
@@ -2971,7 +3283,7 @@ static inline bool ptlock_init(struct ptdesc *ptdesc)
return true;
}
-#else /* !USE_SPLIT_PTE_PTLOCKS */
+#else /* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
*/
@@ -2979,32 +3291,61 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
return &mm->page_table_lock;
}
+static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
+{
+ return &mm->page_table_lock;
+}
static inline void ptlock_cache_init(void) {}
static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
static inline void ptlock_free(struct ptdesc *ptdesc) {}
-#endif /* USE_SPLIT_PTE_PTLOCKS */
+#endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
-static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
+static inline unsigned long ptdesc_nr_pages(const struct ptdesc *ptdesc)
{
- struct folio *folio = ptdesc_folio(ptdesc);
+ return compound_nr(ptdesc_page(ptdesc));
+}
- if (!ptlock_init(ptdesc))
- return false;
- __folio_set_pgtable(folio);
- lruvec_stat_add_folio(folio, NR_PAGETABLE);
- return true;
+static inline void __pagetable_ctor(struct ptdesc *ptdesc)
+{
+ pg_data_t *pgdat = NODE_DATA(memdesc_nid(ptdesc->pt_flags));
+
+ __SetPageTable(ptdesc_page(ptdesc));
+ mod_node_page_state(pgdat, NR_PAGETABLE, ptdesc_nr_pages(ptdesc));
}
-static inline void pagetable_pte_dtor(struct ptdesc *ptdesc)
+static inline void pagetable_dtor(struct ptdesc *ptdesc)
{
- struct folio *folio = ptdesc_folio(ptdesc);
+ pg_data_t *pgdat = NODE_DATA(memdesc_nid(ptdesc->pt_flags));
ptlock_free(ptdesc);
- __folio_clear_pgtable(folio);
- lruvec_stat_sub_folio(folio, NR_PAGETABLE);
+ __ClearPageTable(ptdesc_page(ptdesc));
+ mod_node_page_state(pgdat, NR_PAGETABLE, -ptdesc_nr_pages(ptdesc));
+}
+
+static inline void pagetable_dtor_free(struct ptdesc *ptdesc)
+{
+ pagetable_dtor(ptdesc);
+ pagetable_free(ptdesc);
+}
+
+static inline bool pagetable_pte_ctor(struct mm_struct *mm,
+ struct ptdesc *ptdesc)
+{
+ if (mm != &init_mm && !ptlock_init(ptdesc))
+ return false;
+ __pagetable_ctor(ptdesc);
+ return true;
}
-pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
+pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
+static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr,
+ pmd_t *pmdvalp)
+{
+ pte_t *pte;
+
+ __cond_lock(RCU, pte = ___pte_offset_map(pmd, addr, pmdvalp));
+ return pte;
+}
static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
{
return __pte_offset_map(pmd, addr, NULL);
@@ -3017,12 +3358,16 @@ static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
{
pte_t *pte;
- __cond_lock(*ptlp, pte = __pte_offset_map_lock(mm, pmd, addr, ptlp));
+ __cond_lock(RCU, __cond_lock(*ptlp,
+ pte = __pte_offset_map_lock(mm, pmd, addr, ptlp)));
return pte;
}
-pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr, spinlock_t **ptlp);
+pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, spinlock_t **ptlp);
+pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, pmd_t *pmdvalp,
+ spinlock_t **ptlp);
#define pte_unmap_unlock(pte, ptl) do { \
spin_unlock(ptl); \
@@ -3042,7 +3387,7 @@ pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
NULL: pte_offset_kernel(pmd, address))
-#if USE_SPLIT_PMD_PTLOCKS
+#if defined(CONFIG_SPLIT_PMD_PTLOCKS)
static inline struct page *pmd_pgtable_page(pmd_t *pmd)
{
@@ -3068,14 +3413,6 @@ static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
return ptlock_init(ptdesc);
}
-static inline void pmd_ptlock_free(struct ptdesc *ptdesc)
-{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- VM_BUG_ON_PAGE(ptdesc->pmd_huge_pte, ptdesc_page(ptdesc));
-#endif
- ptlock_free(ptdesc);
-}
-
#define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
#else
@@ -3086,7 +3423,6 @@ static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
}
static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
-static inline void pmd_ptlock_free(struct ptdesc *ptdesc) {}
#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
@@ -3099,26 +3435,16 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
return ptl;
}
-static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
+static inline bool pagetable_pmd_ctor(struct mm_struct *mm,
+ struct ptdesc *ptdesc)
{
- struct folio *folio = ptdesc_folio(ptdesc);
-
- if (!pmd_ptlock_init(ptdesc))
+ if (mm != &init_mm && !pmd_ptlock_init(ptdesc))
return false;
- __folio_set_pgtable(folio);
- lruvec_stat_add_folio(folio, NR_PAGETABLE);
+ ptdesc_pmd_pts_init(ptdesc);
+ __pagetable_ctor(ptdesc);
return true;
}
-static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc)
-{
- struct folio *folio = ptdesc_folio(ptdesc);
-
- pmd_ptlock_free(ptdesc);
- __folio_clear_pgtable(folio);
- lruvec_stat_sub_folio(folio, NR_PAGETABLE);
-}
-
/*
* No scalability reason to split PUD locks yet, but follow the same pattern
* as the PMD locks to make it easier if we decide to. The VM should not be
@@ -3140,18 +3466,17 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
{
- struct folio *folio = ptdesc_folio(ptdesc);
-
- __folio_set_pgtable(folio);
- lruvec_stat_add_folio(folio, NR_PAGETABLE);
+ __pagetable_ctor(ptdesc);
}
-static inline void pagetable_pud_dtor(struct ptdesc *ptdesc)
+static inline void pagetable_p4d_ctor(struct ptdesc *ptdesc)
{
- struct folio *folio = ptdesc_folio(ptdesc);
+ __pagetable_ctor(ptdesc);
+}
- __folio_clear_pgtable(folio);
- lruvec_stat_sub_folio(folio, NR_PAGETABLE);
+static inline void pagetable_pgd_ctor(struct ptdesc *ptdesc)
+{
+ __pagetable_ctor(ptdesc);
}
extern void __init pagecache_init(void);
@@ -3172,22 +3497,7 @@ extern void reserve_bootmem_region(phys_addr_t start,
phys_addr_t end, int nid);
/* Free the reserved page into the buddy system, so it gets managed. */
-static inline void free_reserved_page(struct page *page)
-{
- if (mem_alloc_profiling_enabled()) {
- union codetag_ref *ref = get_page_tag_ref(page);
-
- if (ref) {
- set_codetag_empty(ref);
- put_page_tag_ref(ref);
- }
- }
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- adjust_managed_page_count(page, 1);
-}
-#define free_highmem_page(page) free_reserved_page(page)
+void free_reserved_page(struct page *page);
static inline void mark_page_reserved(struct page *page)
{
@@ -3287,6 +3597,8 @@ void vma_interval_tree_insert_after(struct vm_area_struct *node,
struct rb_root_cached *root);
void vma_interval_tree_remove(struct vm_area_struct *node,
struct rb_root_cached *root);
+struct vm_area_struct *vma_interval_tree_subtree_search(struct vm_area_struct *node,
+ unsigned long start, unsigned long last);
struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start, unsigned long last);
struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
@@ -3314,79 +3626,11 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
/* mmap.c */
-extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, pgoff_t pgoff,
- struct vm_area_struct *next);
-extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, pgoff_t pgoff);
-extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
+extern int __vm_enough_memory(const struct mm_struct *mm, long pages, int cap_sys_admin);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
-extern void unlink_file_vma(struct vm_area_struct *);
-extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
- unsigned long addr, unsigned long len, pgoff_t pgoff,
- bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
-struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long vm_flags,
- struct mempolicy *policy,
- struct vm_userfaultfd_ctx uffd_ctx,
- struct anon_vma_name *anon_name);
-
-/* We are about to modify the VMA's flags. */
-static inline struct vm_area_struct
-*vma_modify_flags(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long new_flags)
-{
- return vma_modify(vmi, prev, vma, start, end, new_flags,
- vma_policy(vma), vma->vm_userfaultfd_ctx,
- anon_vma_name(vma));
-}
-
-/* We are about to modify the VMA's flags and/or anon_name. */
-static inline struct vm_area_struct
-*vma_modify_flags_name(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end,
- unsigned long new_flags,
- struct anon_vma_name *new_name)
-{
- return vma_modify(vmi, prev, vma, start, end, new_flags,
- vma_policy(vma), vma->vm_userfaultfd_ctx, new_name);
-}
-
-/* We are about to modify the VMA's memory policy. */
-static inline struct vm_area_struct
-*vma_modify_policy(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct mempolicy *new_pol)
-{
- return vma_modify(vmi, prev, vma, start, end, vma->vm_flags,
- new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma));
-}
-
-/* We are about to modify the VMA's flags and/or uffd context. */
-static inline struct vm_area_struct
-*vma_modify_flags_uffd(struct vma_iterator *vmi,
- struct vm_area_struct *prev,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long new_flags,
- struct vm_userfaultfd_ctx new_ctx)
-{
- return vma_modify(vmi, prev, vma, start, end, new_flags,
- vma_policy(vma), new_ctx, anon_vma_name(vma));
-}
+bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, bool write);
static inline int check_data_rlimit(unsigned long rlim,
unsigned long new,
@@ -3415,14 +3659,10 @@ extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
const struct vm_special_mapping *sm);
-extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
+struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
- unsigned long flags,
+ vm_flags_t vm_flags,
const struct vm_special_mapping *spec);
-/* This is an obsolete alternative to _install_special_mapping. */
-extern int install_special_mapping(struct mm_struct *mm,
- unsigned long addr, unsigned long len,
- unsigned long flags, struct page **pages);
unsigned long randomize_stack_top(unsigned long stack_top);
unsigned long randomize_page(unsigned long start, unsigned long range);
@@ -3438,9 +3678,6 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
return __get_unmapped_area(file, addr, len, pgoff, flags, 0);
}
-extern unsigned long mmap_region(struct file *file, unsigned long addr,
- unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
- struct list_head *uf);
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
@@ -3448,14 +3685,14 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr,
extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
bool unlock);
+int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct list_head *uf, bool unlock);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
#ifdef CONFIG_MMU
-extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct list_head *uf, bool unlock);
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
static inline void mm_populate(unsigned long addr, unsigned long len)
@@ -3488,10 +3725,10 @@ struct vm_unmapped_area_info {
extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
/* truncate.c */
-extern void truncate_inode_pages(struct address_space *, loff_t);
-extern void truncate_inode_pages_range(struct address_space *,
- loff_t lstart, loff_t lend);
-extern void truncate_inode_pages_final(struct address_space *);
+void truncate_inode_pages(struct address_space *mapping, loff_t lstart);
+void truncate_inode_pages_range(struct address_space *mapping, loff_t lstart,
+ uoff_t lend);
+void truncate_inode_pages_final(struct address_space *mapping);
/* generic vm_area_ops exported for stackable file systems */
extern vm_fault_t filemap_fault(struct vm_fault *vmf);
@@ -3504,9 +3741,6 @@ extern unsigned long stack_guard_gap;
int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
-/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
-int expand_downwards(struct vm_area_struct *vma, unsigned long address);
-
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
@@ -3532,7 +3766,7 @@ struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
return mtree_load(&mm->mm_mt, addr);
}
-static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
+static inline unsigned long stack_guard_start_gap(const struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_GROWSDOWN)
return stack_guard_gap;
@@ -3544,7 +3778,7 @@ static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
return 0;
}
-static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+static inline unsigned long vm_start_gap(const struct vm_area_struct *vma)
{
unsigned long gap = stack_guard_start_gap(vma);
unsigned long vm_start = vma->vm_start;
@@ -3555,7 +3789,7 @@ static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
return vm_start;
}
-static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+static inline unsigned long vm_end_gap(const struct vm_area_struct *vma)
{
unsigned long vm_end = vma->vm_end;
@@ -3567,11 +3801,95 @@ static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
return vm_end;
}
-static inline unsigned long vma_pages(struct vm_area_struct *vma)
+static inline unsigned long vma_pages(const struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
}
+static inline unsigned long vma_desc_size(const struct vm_area_desc *desc)
+{
+ return desc->end - desc->start;
+}
+
+static inline unsigned long vma_desc_pages(const struct vm_area_desc *desc)
+{
+ return vma_desc_size(desc) >> PAGE_SHIFT;
+}
+
+/**
+ * mmap_action_remap - helper for mmap_prepare hook to specify that a pure PFN
+ * remap is required.
+ * @desc: The VMA descriptor for the VMA requiring remap.
+ * @start: The virtual address to start the remap from, must be within the VMA.
+ * @start_pfn: The first PFN in the range to remap.
+ * @size: The size of the range to remap, in bytes, at most spanning to the end
+ * of the VMA.
+ */
+static inline void mmap_action_remap(struct vm_area_desc *desc,
+ unsigned long start,
+ unsigned long start_pfn,
+ unsigned long size)
+{
+ struct mmap_action *action = &desc->action;
+
+ /* [start, start + size) must be within the VMA. */
+ WARN_ON_ONCE(start < desc->start || start >= desc->end);
+ WARN_ON_ONCE(start + size > desc->end);
+
+ action->type = MMAP_REMAP_PFN;
+ action->remap.start = start;
+ action->remap.start_pfn = start_pfn;
+ action->remap.size = size;
+ action->remap.pgprot = desc->page_prot;
+}
+
+/**
+ * mmap_action_remap_full - helper for mmap_prepare hook to specify that the
+ * entirety of a VMA should be PFN remapped.
+ * @desc: The VMA descriptor for the VMA requiring remap.
+ * @start_pfn: The first PFN in the range to remap.
+ */
+static inline void mmap_action_remap_full(struct vm_area_desc *desc,
+ unsigned long start_pfn)
+{
+ mmap_action_remap(desc, desc->start, start_pfn, vma_desc_size(desc));
+}
+
+/**
+ * mmap_action_ioremap - helper for mmap_prepare hook to specify that a pure PFN
+ * I/O remap is required.
+ * @desc: The VMA descriptor for the VMA requiring remap.
+ * @start: The virtual address to start the remap from, must be within the VMA.
+ * @start_pfn: The first PFN in the range to remap.
+ * @size: The size of the range to remap, in bytes, at most spanning to the end
+ * of the VMA.
+ */
+static inline void mmap_action_ioremap(struct vm_area_desc *desc,
+ unsigned long start,
+ unsigned long start_pfn,
+ unsigned long size)
+{
+ mmap_action_remap(desc, start, start_pfn, size);
+ desc->action.type = MMAP_IO_REMAP_PFN;
+}
+
+/**
+ * mmap_action_ioremap_full - helper for mmap_prepare hook to specify that the
+ * entirety of a VMA should be PFN I/O remapped.
+ * @desc: The VMA descriptor for the VMA requiring remap.
+ * @start_pfn: The first PFN in the range to remap.
+ */
+static inline void mmap_action_ioremap_full(struct vm_area_desc *desc,
+ unsigned long start_pfn)
+{
+ mmap_action_ioremap(desc, desc->start, start_pfn, vma_desc_size(desc));
+}
+
+void mmap_action_prepare(struct mmap_action *action,
+ struct vm_area_desc *desc);
+int mmap_action_complete(struct mmap_action *action,
+ struct vm_area_struct *vma);
+
/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
unsigned long vm_start, unsigned long vm_end)
@@ -3584,17 +3902,17 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
return vma;
}
-static inline bool range_in_vma(struct vm_area_struct *vma,
+static inline bool range_in_vma(const struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
return (vma && vma->vm_start <= start && end <= vma->vm_end);
}
#ifdef CONFIG_MMU
-pgprot_t vm_get_page_prot(unsigned long vm_flags);
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
void vma_set_page_prot(struct vm_area_struct *vma);
#else
-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
{
return __pgprot(0);
}
@@ -3613,10 +3931,9 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
unsigned long addr);
-int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
- unsigned long pfn, unsigned long size, pgprot_t);
-int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn, unsigned long size, pgprot_t prot);
+int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t pgprot);
+
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num);
@@ -3624,14 +3941,16 @@ int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
unsigned long num);
int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
unsigned long num);
+vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page,
+ bool write);
vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
- pfn_t pfn);
+ unsigned long pfn);
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
- unsigned long addr, pfn_t pfn);
+ unsigned long addr, unsigned long pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
@@ -3647,15 +3966,24 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}
-#ifndef io_remap_pfn_range
-static inline int io_remap_pfn_range(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn,
- unsigned long size, pgprot_t prot)
+#ifndef io_remap_pfn_range_pfn
+static inline unsigned long io_remap_pfn_range_pfn(unsigned long pfn,
+ unsigned long size)
{
- return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
+ return pfn;
}
#endif
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long orig_pfn,
+ unsigned long size, pgprot_t orig_prot)
+{
+ const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size);
+ const pgprot_t prot = pgprot_decrypted(orig_prot);
+
+ return remap_pfn_range(vma, addr, pfn, size, prot);
+}
+
static inline vm_fault_t vmf_error(int err)
{
if (err == -ENOMEM)
@@ -3683,9 +4011,6 @@ static inline vm_fault_t vmf_fs_error(int err)
return VM_FAULT_SIGBUS;
}
-struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
- unsigned int foll_flags);
-
static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
{
if (vm_fault & VM_FAULT_OOM)
@@ -3701,7 +4026,7 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
* Indicates whether GUP can follow a PROT_NONE mapped page, or whether
* a (NUMA hinting) fault is required.
*/
-static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
+static inline bool gup_can_follow_protnone(const struct vm_area_struct *vma,
unsigned int flags)
{
/*
@@ -3776,14 +4101,7 @@ DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
static inline bool want_init_on_free(void)
{
return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
- &init_on_free);
-}
-
-DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON, init_mlocked_on_free);
-static inline bool want_init_mlocked_on_free(void)
-{
- return static_branch_maybe(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON,
- &init_mlocked_on_free);
+ &init_on_free);
}
extern bool _debug_pagealloc_enabled_early;
@@ -3838,7 +4156,7 @@ static inline bool debug_guardpage_enabled(void)
return static_branch_unlikely(&_debug_guardpage_enabled);
}
-static inline bool page_is_guard(struct page *page)
+static inline bool page_is_guard(const struct page *page)
{
if (!debug_guardpage_enabled())
return false;
@@ -3869,7 +4187,7 @@ static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
static inline bool debug_guardpage_enabled(void) { return false; }
-static inline bool page_is_guard(struct page *page) { return false; }
+static inline bool page_is_guard(const struct page *page) { return false; }
static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order) { return false; }
static inline void clear_page_guard(struct zone *zone, struct page *page,
@@ -3892,13 +4210,7 @@ static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
}
#endif /* __HAVE_ARCH_GATE_AREA */
-extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
-
-#ifdef CONFIG_SYSCTL
-extern int sysctl_drop_caches;
-int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-#endif
+bool process_shares_mm(const struct task_struct *p, const struct mm_struct *mm);
void drop_slab(void);
@@ -3918,17 +4230,17 @@ static inline void print_vma_addr(char *prefix, unsigned long rip)
#endif
void *sparse_buffer_alloc(unsigned long size);
+unsigned long section_map_size(void);
struct page * __populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
struct dev_pagemap *pgmap);
-void pmd_init(void *addr);
-void pud_init(void *addr);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
- struct vmem_altmap *altmap, struct page *reuse);
+ struct vmem_altmap *altmap, unsigned long ptpfn,
+ unsigned long flags);
void *vmemmap_alloc_block(unsigned long size, int node);
struct vmem_altmap;
void *vmemmap_alloc_block_buf(unsigned long size, int node,
@@ -3944,6 +4256,12 @@ int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
int node, struct vmem_altmap *altmap);
int vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap);
+int vmemmap_populate_hvo(unsigned long start, unsigned long end, int node,
+ unsigned long headsize);
+int vmemmap_undo_hvo(unsigned long start, unsigned long end, int node,
+ unsigned long headsize);
+void vmemmap_wrprotect_hvo(unsigned long start, unsigned long end, int node,
+ unsigned long headsize);
void vmemmap_populate_print_last(void);
#ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end,
@@ -3951,7 +4269,7 @@ void vmemmap_free(unsigned long start, unsigned long end,
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
-static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
{
/* number of pfns from base where pfn_to_page() is valid */
if (altmap)
@@ -3965,7 +4283,7 @@ static inline void vmem_altmap_free(struct vmem_altmap *altmap,
altmap->alloc -= nr_pfns;
}
#else
-static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
{
return 0;
}
@@ -4010,9 +4328,6 @@ static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
}
#endif
-void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
- unsigned long nr_pages);
-
enum mf_flags {
MF_COUNT_INCREASED = 1 << 0,
MF_ACTION_REQUIRED = 1 << 1,
@@ -4026,7 +4341,6 @@ enum mf_flags {
int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
unsigned long count, int mf_flags);
extern int memory_failure(unsigned long pfn, int flags);
-extern void memory_failure_queue_kick(int cpu);
extern int unpoison_memory(unsigned long pfn);
extern atomic_long_t num_poisoned_pages __read_mostly;
extern int soft_offline_page(unsigned long pfn, int flags);
@@ -4040,7 +4354,6 @@ extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared);
void num_poisoned_pages_inc(unsigned long pfn);
void num_poisoned_pages_sub(unsigned long pfn, long i);
-struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
#else
static inline void memory_failure_queue(unsigned long pfn, int flags)
{
@@ -4061,12 +4374,6 @@ static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
}
#endif
-#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_KSM)
-void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
- struct vm_area_struct *vma, struct list_head *to_kill,
- unsigned long ksm_addr);
-#endif
-
#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
extern void memblk_nr_poison_inc(unsigned long pfn);
extern void memblk_nr_poison_sub(unsigned long pfn, long i);
@@ -4107,10 +4414,10 @@ enum mf_result {
enum mf_action_page_type {
MF_MSG_KERNEL,
MF_MSG_KERNEL_HIGH_ORDER,
- MF_MSG_SLAB,
MF_MSG_DIFFERENT_COMPOUND,
MF_MSG_HUGE,
MF_MSG_FREE_HUGE,
+ MF_MSG_GET_HWPOISON,
MF_MSG_UNMAP_FAILED,
MF_MSG_DIRTY_SWAPCACHE,
MF_MSG_CLEAN_SWAPCACHE,
@@ -4124,13 +4431,13 @@ enum mf_action_page_type {
MF_MSG_BUDDY,
MF_MSG_DAX,
MF_MSG_UNSPLIT_THP,
+ MF_MSG_ALREADY_POISONED,
+ MF_MSG_PFN_MAP,
MF_MSG_UNKNOWN,
};
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
-extern void clear_huge_page(struct page *page,
- unsigned long addr_hint,
- unsigned int pages_per_huge_page);
+void folio_zero_user(struct folio *folio, unsigned long addr_hint);
int copy_user_large_folio(struct folio *dst, struct folio *src,
unsigned long addr_hint,
struct vm_area_struct *vma);
@@ -4181,73 +4488,32 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping,
pgoff_t first_index, pgoff_t nr);
#endif
-extern int sysctl_nr_trim_pages;
-
-#ifdef CONFIG_PRINTK
-void mem_dump_obj(void *object);
-#else
-static inline void mem_dump_obj(void *object) {}
-#endif
-
-/**
- * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and
- * handle them.
- * @seals: the seals to check
- * @vma: the vma to operate on
- *
- * Check whether F_SEAL_WRITE or F_SEAL_FUTURE_WRITE are set; if so, do proper
- * check/handling on the vma flags. Return 0 if check pass, or <0 for errors.
- */
-static inline int seal_check_write(int seals, struct vm_area_struct *vma)
-{
- if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
- /*
- * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
- * write seals are active.
- */
- if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
- return -EPERM;
-
- /*
- * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
- * MAP_SHARED and read-only, take care to not allow mprotect to
- * revert protections on such mappings. Do this only for shared
- * mappings. For private mappings, don't need to mask
- * VM_MAYWRITE as we still want them to be COW-writable.
- */
- if (vma->vm_flags & VM_SHARED)
- vm_flags_clear(vma, VM_MAYWRITE);
- }
-
- return 0;
-}
-
#ifdef CONFIG_ANON_VMA_NAME
-int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
- unsigned long len_in,
- struct anon_vma_name *anon_name);
+int set_anon_vma_name(unsigned long addr, unsigned long size,
+ const char __user *uname);
#else
-static inline int
-madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
- unsigned long len_in, struct anon_vma_name *anon_name) {
- return 0;
+static inline
+int set_anon_vma_name(unsigned long addr, unsigned long size,
+ const char __user *uname)
+{
+ return -EINVAL;
}
#endif
#ifdef CONFIG_UNACCEPTED_MEMORY
-bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end);
-void accept_memory(phys_addr_t start, phys_addr_t end);
+bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size);
+void accept_memory(phys_addr_t start, unsigned long size);
#else
static inline bool range_contains_unaccepted_memory(phys_addr_t start,
- phys_addr_t end)
+ unsigned long size)
{
return false;
}
-static inline void accept_memory(phys_addr_t start, phys_addr_t end)
+static inline void accept_memory(phys_addr_t start, unsigned long size)
{
}
@@ -4255,12 +4521,125 @@ static inline void accept_memory(phys_addr_t start, phys_addr_t end)
static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
{
- phys_addr_t paddr = pfn << PAGE_SHIFT;
-
- return range_contains_unaccepted_memory(paddr, paddr + PAGE_SIZE);
+ return range_contains_unaccepted_memory(pfn << PAGE_SHIFT, PAGE_SIZE);
}
void vma_pgtable_walk_begin(struct vm_area_struct *vma);
void vma_pgtable_walk_end(struct vm_area_struct *vma);
+int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
+int reserve_mem_release_by_name(const char *name);
+
+#ifdef CONFIG_64BIT
+int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
+#else
+static inline int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
+{
+ /* noop on 32 bit */
+ return 0;
+}
+#endif
+
+/*
+ * user_alloc_needs_zeroing checks if a user folio from page allocator needs to
+ * be zeroed or not.
+ */
+static inline bool user_alloc_needs_zeroing(void)
+{
+ /*
+ * for user folios, arch with cache aliasing requires cache flush and
+ * arc changes folio->flags to make icache coherent with dcache, so
+ * always return false to make caller use
+ * clear_user_page()/clear_user_highpage().
+ */
+ return cpu_dcache_is_aliasing() || cpu_icache_is_aliasing() ||
+ !static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
+ &init_on_alloc);
+}
+
+int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status);
+int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status);
+int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
+
+/*
+ * DMA mapping IDs for page_pool
+ *
+ * When DMA-mapping a page, page_pool allocates an ID (from an xarray) and
+ * stashes it in the upper bits of page->pp_magic. We always want to be able to
+ * unambiguously identify page pool pages (using page_pool_page_is_pp()). Non-PP
+ * pages can have arbitrary kernel pointers stored in the same field as pp_magic
+ * (since it overlaps with page->lru.next), so we must ensure that we cannot
+ * mistake a valid kernel pointer with any of the values we write into this
+ * field.
+ *
+ * On architectures that set POISON_POINTER_DELTA, this is already ensured,
+ * since this value becomes part of PP_SIGNATURE; meaning we can just use the
+ * space between the PP_SIGNATURE value (without POISON_POINTER_DELTA), and the
+ * lowest bits of POISON_POINTER_DELTA. On arches where POISON_POINTER_DELTA is
+ * 0, we use the lowest bit of PAGE_OFFSET as the boundary if that value is
+ * known at compile-time.
+ *
+ * If the value of PAGE_OFFSET is not known at compile time, or if it is too
+ * small to leave at least 8 bits available above PP_SIGNATURE, we define the
+ * number of bits to be 0, which turns off the DMA index tracking altogether
+ * (see page_pool_register_dma_index()).
+ */
+#define PP_DMA_INDEX_SHIFT (1 + __fls(PP_SIGNATURE - POISON_POINTER_DELTA))
+#if POISON_POINTER_DELTA > 0
+/* PP_SIGNATURE includes POISON_POINTER_DELTA, so limit the size of the DMA
+ * index to not overlap with that if set
+ */
+#define PP_DMA_INDEX_BITS MIN(32, __ffs(POISON_POINTER_DELTA) - PP_DMA_INDEX_SHIFT)
+#else
+/* Use the lowest bit of PAGE_OFFSET if there's at least 8 bits available; see above */
+#define PP_DMA_INDEX_MIN_OFFSET (1 << (PP_DMA_INDEX_SHIFT + 8))
+#define PP_DMA_INDEX_BITS ((__builtin_constant_p(PAGE_OFFSET) && \
+ PAGE_OFFSET >= PP_DMA_INDEX_MIN_OFFSET && \
+ !(PAGE_OFFSET & (PP_DMA_INDEX_MIN_OFFSET - 1))) ? \
+ MIN(32, __ffs(PAGE_OFFSET) - PP_DMA_INDEX_SHIFT) : 0)
+
+#endif
+
+#define PP_DMA_INDEX_MASK GENMASK(PP_DMA_INDEX_BITS + PP_DMA_INDEX_SHIFT - 1, \
+ PP_DMA_INDEX_SHIFT)
+
+/* Mask used for checking in page_pool_page_is_pp() below. page->pp_magic is
+ * OR'ed with PP_SIGNATURE after the allocation in order to preserve bit 0 for
+ * the head page of compound page and bit 1 for pfmemalloc page, as well as the
+ * bits used for the DMA index. page_is_pfmemalloc() is checked in
+ * __page_pool_put_page() to avoid recycling the pfmemalloc page.
+ */
+#define PP_MAGIC_MASK ~(PP_DMA_INDEX_MASK | 0x3UL)
+
+#ifdef CONFIG_PAGE_POOL
+static inline bool page_pool_page_is_pp(const struct page *page)
+{
+ return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE;
+}
+#else
+static inline bool page_pool_page_is_pp(const struct page *page)
+{
+ return false;
+}
+#endif
+
+#define PAGE_SNAPSHOT_FAITHFUL (1 << 0)
+#define PAGE_SNAPSHOT_PG_BUDDY (1 << 1)
+#define PAGE_SNAPSHOT_PG_IDLE (1 << 2)
+
+struct page_snapshot {
+ struct folio folio_snapshot;
+ struct page page_snapshot;
+ unsigned long pfn;
+ unsigned long idx;
+ unsigned long flags;
+};
+
+static inline bool snapshot_page_is_faithful(const struct page_snapshot *ps)
+{
+ return ps->flags & PAGE_SNAPSHOT_FAITHFUL;
+}
+
+void snapshot_page(struct page_snapshot *ps, const struct page *page);
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index f4fe593c1400..fa2d6ba811b5 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -8,7 +8,7 @@
#include <linux/swap.h>
#include <linux/string.h>
#include <linux/userfaultfd_k.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
/**
* folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
@@ -25,7 +25,7 @@
* 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
* ram or swap backed folio.
*/
-static inline int folio_is_file_lru(struct folio *folio)
+static inline int folio_is_file_lru(const struct folio *folio)
{
return !folio_test_swapbacked(folio);
}
@@ -44,7 +44,7 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec,
lockdep_assert_held(&lruvec->lru_lock);
WARN_ON_ONCE(nr_pages != (int)nr_pages);
- __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
+ mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
__mod_zone_page_state(&pgdat->node_zones[zid],
NR_ZONE_LRU_BASE + lru, nr_pages);
}
@@ -84,7 +84,7 @@ static __always_inline void __folio_clear_lru_flags(struct folio *folio)
* Return: The LRU list a folio should be on, as an index
* into the array of LRU lists.
*/
-static __always_inline enum lru_list folio_lru_list(struct folio *folio)
+static __always_inline enum lru_list folio_lru_list(const struct folio *folio)
{
enum lru_list lru;
@@ -133,36 +133,35 @@ static inline int lru_hist_from_seq(unsigned long seq)
return seq % NR_HIST_GENS;
}
-static inline int lru_tier_from_refs(int refs)
+static inline int lru_tier_from_refs(int refs, bool workingset)
{
VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
- /* see the comment in folio_lru_refs() */
- return order_base_2(refs + 1);
+ /* see the comment on MAX_NR_TIERS */
+ return workingset ? MAX_NR_TIERS - 1 : order_base_2(refs);
}
-static inline int folio_lru_refs(struct folio *folio)
+static inline int folio_lru_refs(const struct folio *folio)
{
- unsigned long flags = READ_ONCE(folio->flags);
- bool workingset = flags & BIT(PG_workingset);
+ unsigned long flags = READ_ONCE(folio->flags.f);
+ if (!(flags & BIT(PG_referenced)))
+ return 0;
/*
- * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
- * total number of accesses is N>1, since N=0,1 both map to the first
- * tier. lru_tier_from_refs() will account for this off-by-one. Also see
- * the comment on MAX_NR_TIERS.
+ * Return the total number of accesses including PG_referenced. Also see
+ * the comment on LRU_REFS_FLAGS.
*/
- return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset;
+ return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + 1;
}
-static inline int folio_lru_gen(struct folio *folio)
+static inline int folio_lru_gen(const struct folio *folio)
{
- unsigned long flags = READ_ONCE(folio->flags);
+ unsigned long flags = READ_ONCE(folio->flags.f);
return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
}
-static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
+static inline bool lru_gen_is_active(const struct lruvec *lruvec, int gen)
{
unsigned long max_seq = lruvec->lrugen.max_seq;
@@ -218,6 +217,40 @@ static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *foli
VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
}
+static inline unsigned long lru_gen_folio_seq(const struct lruvec *lruvec,
+ const struct folio *folio,
+ bool reclaiming)
+{
+ int gen;
+ int type = folio_is_file_lru(folio);
+ const struct lru_gen_folio *lrugen = &lruvec->lrugen;
+
+ /*
+ * +-----------------------------------+-----------------------------------+
+ * | Accessed through page tables and | Accessed through file descriptors |
+ * | promoted by folio_update_gen() | and protected by folio_inc_gen() |
+ * +-----------------------------------+-----------------------------------+
+ * | PG_active (set while isolated) | |
+ * +-----------------+-----------------+-----------------+-----------------+
+ * | PG_workingset | PG_referenced | PG_workingset | LRU_REFS_FLAGS |
+ * +-----------------------------------+-----------------------------------+
+ * |<---------- MIN_NR_GENS ---------->| |
+ * |<---------------------------- MAX_NR_GENS ---------------------------->|
+ */
+ if (folio_test_active(folio))
+ gen = MIN_NR_GENS - folio_test_workingset(folio);
+ else if (reclaiming)
+ gen = MAX_NR_GENS;
+ else if ((!folio_is_file_lru(folio) && !folio_test_swapcache(folio)) ||
+ (folio_test_reclaim(folio) &&
+ (folio_test_dirty(folio) || folio_test_writeback(folio))))
+ gen = MIN_NR_GENS;
+ else
+ gen = MAX_NR_GENS - folio_test_workingset(folio);
+
+ return max(READ_ONCE(lrugen->max_seq) - gen + 1, READ_ONCE(lrugen->min_seq[type]));
+}
+
static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
{
unsigned long seq;
@@ -231,33 +264,12 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio,
if (folio_test_unevictable(folio) || !lrugen->enabled)
return false;
- /*
- * There are four common cases for this page:
- * 1. If it's hot, i.e., freshly faulted in, add it to the youngest
- * generation, and it's protected over the rest below.
- * 2. If it can't be evicted immediately, i.e., a dirty page pending
- * writeback, add it to the second youngest generation.
- * 3. If it should be evicted first, e.g., cold and clean from
- * folio_rotate_reclaimable(), add it to the oldest generation.
- * 4. Everything else falls between 2 & 3 above and is added to the
- * second oldest generation if it's considered inactive, or the
- * oldest generation otherwise. See lru_gen_is_active().
- */
- if (folio_test_active(folio))
- seq = lrugen->max_seq;
- else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
- (folio_test_reclaim(folio) &&
- (folio_test_dirty(folio) || folio_test_writeback(folio))))
- seq = lrugen->max_seq - 1;
- else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq)
- seq = lrugen->min_seq[type];
- else
- seq = lrugen->min_seq[type] + 1;
+ seq = lru_gen_folio_seq(lruvec, folio, reclaiming);
gen = lru_gen_from_seq(seq);
flags = (gen + 1UL) << LRU_GEN_PGOFF;
/* see the comment on MIN_NR_GENS about PG_active */
- set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
+ set_mask_bits(&folio->flags.f, LRU_GEN_MASK | BIT(PG_active), flags);
lru_gen_update_size(lruvec, folio, -1, gen);
/* for folio_rotate_reclaimable() */
@@ -282,7 +294,7 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
/* for folio_migrate_flags() */
flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
- flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags);
+ flags = set_mask_bits(&folio->flags.f, LRU_GEN_MASK, flags);
gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
lru_gen_update_size(lruvec, folio, gen, -1);
@@ -291,6 +303,12 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
return true;
}
+static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
+{
+ unsigned long refs = READ_ONCE(old->flags.f) & LRU_REFS_MASK;
+
+ set_mask_bits(&new->flags.f, LRU_REFS_MASK, refs);
+}
#else /* !CONFIG_LRU_GEN */
static inline bool lru_gen_enabled(void)
@@ -313,6 +331,10 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
return false;
}
+static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
+{
+
+}
#endif /* CONFIG_LRU_GEN */
static __always_inline
@@ -426,6 +448,8 @@ static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
#endif /* CONFIG_ANON_VMA_NAME */
+void pfnmap_track_ctx_release(struct kref *ref);
+
static inline void init_tlb_flush_pending(struct mm_struct *mm)
{
atomic_set(&mm->tlb_flush_pending, 0);
@@ -485,7 +509,7 @@ static inline void dec_tlb_flush_pending(struct mm_struct *mm)
atomic_dec(&mm->tlb_flush_pending);
}
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+static inline bool mm_tlb_flush_pending(const struct mm_struct *mm)
{
/*
* Must be called after having acquired the PTL; orders against that
@@ -498,7 +522,7 @@ static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
return atomic_read(&mm->tlb_flush_pending);
}
-static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+static inline bool mm_tlb_flush_nested(const struct mm_struct *mm)
{
/*
* Similar to mm_tlb_flush_pending(), we must have acquired the PTL
@@ -517,11 +541,11 @@ static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
* The caller should insert a new pte created with make_pte_marker().
*/
static inline pte_marker copy_pte_marker(
- swp_entry_t entry, struct vm_area_struct *dst_vma)
+ softleaf_t entry, struct vm_area_struct *dst_vma)
{
- pte_marker srcm = pte_marker_get(entry);
+ const pte_marker srcm = softleaf_to_marker(entry);
/* Always copy error entries. */
- pte_marker dstm = srcm & PTE_MARKER_POISONED;
+ pte_marker dstm = srcm & (PTE_MARKER_POISONED | PTE_MARKER_GUARD);
/* Only copy PTE markers if UFFD register matches. */
if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma))
@@ -529,7 +553,6 @@ static inline pte_marker copy_pte_marker(
return dstm;
}
-#endif
/*
* If this pte is wr-protected by uffd-wp in any form, arm the special pte to
@@ -541,15 +564,17 @@ static inline pte_marker copy_pte_marker(
* Must be called with pgtable lock held so that no thread will see the none
* pte, and if they see it, they'll fault and serialize at the pgtable lock.
*
- * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled.
+ * Returns true if an uffd-wp pte was installed, false otherwise.
*/
-static inline void
+static inline bool
pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
pte_t *pte, pte_t pteval)
{
-#ifdef CONFIG_PTE_MARKER_UFFD_WP
bool arm_uffd_pte = false;
+ if (!uffd_supports_wp_marker())
+ return false;
+
/* The current status of the pte should be "cleared" before calling */
WARN_ON_ONCE(!pte_none(ptep_get(pte)));
@@ -560,7 +585,7 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
* with a swap pte. There's no way of leaking the bit.
*/
if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
- return;
+ return false;
/* A uffd-wp wr-protected normal pte */
if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
@@ -573,13 +598,16 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
if (unlikely(pte_swp_uffd_wp_any(pteval)))
arm_uffd_pte = true;
- if (unlikely(arm_uffd_pte))
+ if (unlikely(arm_uffd_pte)) {
set_pte_at(vma->vm_mm, addr, pte,
make_pte_marker(PTE_MARKER_UFFD_WP));
-#endif
+ return true;
+ }
+
+ return false;
}
-static inline bool vma_has_recency(struct vm_area_struct *vma)
+static inline bool vma_has_recency(const struct vm_area_struct *vma)
{
if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
return false;
@@ -589,5 +617,42 @@ static inline bool vma_has_recency(struct vm_area_struct *vma)
return true;
}
+#endif
+
+/**
+ * num_pages_contiguous() - determine the number of contiguous pages
+ * that represent contiguous PFNs
+ * @pages: an array of page pointers
+ * @nr_pages: length of the array, at least 1
+ *
+ * Determine the number of contiguous pages that represent contiguous PFNs
+ * in @pages, starting from the first page.
+ *
+ * In some kernel configs contiguous PFNs will not have contiguous struct
+ * pages. In these configurations num_pages_contiguous() will return a num
+ * smaller than ideal number. The caller should continue to check for pfn
+ * contiguity after each call to num_pages_contiguous().
+ *
+ * Returns the number of contiguous pages.
+ */
+static inline size_t num_pages_contiguous(struct page **pages, size_t nr_pages)
+{
+ struct page *cur_page = pages[0];
+ unsigned long section = memdesc_section(cur_page->flags);
+ size_t i;
+
+ for (i = 1; i < nr_pages; i++) {
+ if (++cur_page != pages[i])
+ break;
+ /*
+ * In unproblematic kernel configs, page_to_section() == 0 and
+ * the whole check will get optimized out.
+ */
+ if (memdesc_section(cur_page->flags) != section)
+ break;
+ }
+
+ return i;
+}
#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 24323c7d0bd4..9f6de068295d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -19,6 +19,9 @@
#include <linux/workqueue.h>
#include <linux/seqlock.h>
#include <linux/percpu_counter.h>
+#include <linux/types.h>
+#include <linux/rseq_types.h>
+#include <linux/bitmap.h>
#include <asm/mmu.h>
@@ -27,11 +30,15 @@
#endif
#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
-#define INIT_PASID 0
struct address_space;
+struct futex_private_hash;
struct mem_cgroup;
+typedef struct {
+ unsigned long f;
+} memdesc_flags_t;
+
/*
* Each physical page in the system has a struct page associated with
* it to keep track of whatever it is we are using the page for at the
@@ -46,9 +53,7 @@ struct mem_cgroup;
* which is guaranteed to be aligned. If you use the same storage as
* page->mapping, you must restore it to NULL before freeing the page.
*
- * If your page will not be mapped to userspace, you can also use the four
- * bytes in the mapcount union, but you must call page_mapcount_reset()
- * before freeing it.
+ * The mapcount field must not be used for own purposes.
*
* If you want to use the refcount field, it must be used in such a way
* that other CPUs temporarily incrementing and then decrementing the
@@ -72,7 +77,7 @@ struct mem_cgroup;
#endif
struct page {
- unsigned long flags; /* Atomic flags, some possibly
+ memdesc_flags_t flags; /* Atomic flags, some possibly
* updated asynchronously */
/*
* Five words (20/40 bytes) are available in this union.
@@ -90,29 +95,22 @@ struct page {
union {
struct list_head lru;
- /* Or, for the Unevictable "LRU list" slot */
- struct {
- /* Always even, to negate PageTail */
- void *__filler;
- /* Count page's or folio's mlocks */
- unsigned int mlock_count;
- };
-
/* Or, free page */
struct list_head buddy_list;
struct list_head pcp_list;
+ struct llist_node pcp_llist;
};
- /* See page-flags.h for PAGE_MAPPING_FLAGS */
struct address_space *mapping;
union {
- pgoff_t index; /* Our offset within mapping. */
+ pgoff_t __folio_index; /* Our offset within mapping. */
unsigned long share; /* share count for fsdax */
};
/**
* @private: Mapping-private opaque data.
* Usually used for buffer_heads if PagePrivate.
- * Used for swp_entry_t if PageSwapCache.
- * Indicates order in the buddy system if PageBuddy.
+ * Used for swp_entry_t if swapcache flag set.
+ * Indicates order in the buddy system if PageBuddy
+ * or on pcp_llist.
*/
unsigned long private;
};
@@ -131,8 +129,11 @@ struct page {
unsigned long compound_head; /* Bit zero is set */
};
struct { /* ZONE_DEVICE pages */
- /** @pgmap: Points to the hosting device page map. */
- struct dev_pagemap *pgmap;
+ /*
+ * The first word is used for compound_head or folio
+ * pgmap
+ */
+ void *_unused_pgmap_compound_head;
void *zone_device_data;
/*
* ZONE_DEVICE private pages are counted as being
@@ -152,25 +153,40 @@ struct page {
union { /* This union is 4 bytes in size. */
/*
- * If the page can be mapped to userspace, encodes the number
- * of times this page is referenced by a page table.
+ * For head pages of typed folios, the value stored here
+ * allows for determining what this page is used for. The
+ * tail pages of typed folios will not store a type
+ * (page_type == _mapcount == -1).
+ *
+ * See page-flags.h for a list of page types which are currently
+ * stored here.
+ *
+ * Owners of typed folios may reuse the lower 16 bit of the
+ * head page page_type field after setting the page type,
+ * but must reset these 16 bit to -1 before clearing the
+ * page type.
*/
- atomic_t _mapcount;
+ unsigned int page_type;
/*
- * If the page is neither PageSlab nor mappable to userspace,
- * the value stored here may help determine what this page
- * is used for. See page-flags.h for a list of page types
- * which are currently stored here.
+ * For pages that are part of non-typed folios for which mappings
+ * are tracked via the RMAP, encodes the number of times this page
+ * is directly referenced by a page table.
+ *
+ * Note that the mapcount is always initialized to -1, so that
+ * transitions both from it and to it can be tracked, using
+ * atomic_inc_and_test() and atomic_add_negative(-1).
*/
- unsigned int page_type;
+ atomic_t _mapcount;
};
/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
atomic_t _refcount;
-#ifdef CONFIG_SLAB_OBJ_EXT
+#ifdef CONFIG_MEMCG
unsigned long memcg_data;
+#elif defined(CONFIG_SLAB_OBJ_EXT)
+ unsigned long _unused_slab_obj_exts;
#endif
/*
@@ -271,6 +287,74 @@ typedef struct {
} swp_entry_t;
/**
+ * typedef softleaf_t - Describes a page table software leaf entry, abstracted
+ * from its architecture-specific encoding.
+ *
+ * Page table leaf entries are those which do not reference any descendent page
+ * tables but rather either reference a data page, are an empty (or 'none'
+ * entry), or contain a non-present entry.
+ *
+ * If referencing another page table or a data page then the page table entry is
+ * pertinent to hardware - that is it tells the hardware how to decode the page
+ * table entry.
+ *
+ * Otherwise it is a software-defined leaf page table entry, which this type
+ * describes. See leafops.h and specifically @softleaf_type for a list of all
+ * possible kinds of software leaf entry.
+ *
+ * A softleaf_t entry is abstracted from the hardware page table entry, so is
+ * not architecture-specific.
+ *
+ * NOTE: While we transition from the confusing swp_entry_t type used for this
+ * purpose, we simply alias this type. This will be removed once the
+ * transition is complete.
+ */
+typedef swp_entry_t softleaf_t;
+
+#if defined(CONFIG_MEMCG) || defined(CONFIG_SLAB_OBJ_EXT)
+/* We have some extra room after the refcount in tail pages. */
+#define NR_PAGES_IN_LARGE_FOLIO
+#endif
+
+/*
+ * On 32bit, we can cut the required metadata in half, because:
+ * (a) PID_MAX_LIMIT implicitly limits the number of MMs we could ever have,
+ * so we can limit MM IDs to 15 bit (32767).
+ * (b) We don't expect folios where even a single complete PTE mapping by
+ * one MM would exceed 15 bits (order-15).
+ */
+#ifdef CONFIG_64BIT
+typedef int mm_id_mapcount_t;
+#define MM_ID_MAPCOUNT_MAX INT_MAX
+typedef unsigned int mm_id_t;
+#else /* !CONFIG_64BIT */
+typedef short mm_id_mapcount_t;
+#define MM_ID_MAPCOUNT_MAX SHRT_MAX
+typedef unsigned short mm_id_t;
+#endif /* CONFIG_64BIT */
+
+/* We implicitly use the dummy ID for init-mm etc. where we never rmap pages. */
+#define MM_ID_DUMMY 0
+#define MM_ID_MIN (MM_ID_DUMMY + 1)
+
+/*
+ * We leave the highest bit of each MM id unused, so we can store a flag
+ * in the highest bit of each folio->_mm_id[].
+ */
+#define MM_ID_BITS ((sizeof(mm_id_t) * BITS_PER_BYTE) - 1)
+#define MM_ID_MASK ((1U << MM_ID_BITS) - 1)
+#define MM_ID_MAX MM_ID_MASK
+
+/*
+ * In order to use bit_spin_lock(), which requires an unsigned long, we
+ * operate on folio->_mm_ids when working on flags.
+ */
+#define FOLIO_MM_IDS_LOCK_BITNUM MM_ID_BITS
+#define FOLIO_MM_IDS_LOCK_BIT BIT(FOLIO_MM_IDS_LOCK_BITNUM)
+#define FOLIO_MM_IDS_SHARED_BITNUM (2 * MM_ID_BITS + 1)
+#define FOLIO_MM_IDS_SHARED_BIT BIT(FOLIO_MM_IDS_SHARED_BITNUM)
+
+/**
* struct folio - Represents a contiguous set of bytes.
* @flags: Identical to the page flags.
* @lru: Least Recently Used list; tracks how recently this folio was used.
@@ -279,6 +363,8 @@ typedef struct {
* anonymous memory.
* @index: Offset within the file, in units of pages. For anonymous memory,
* this is the index from the beginning of the mmap.
+ * @share: number of DAX mappings that reference this folio. See
+ * dax_associate_entry.
* @private: Filesystem per-folio data (see folio_attach_private()).
* @swap: Used for swp_entry_t if folio_test_swapcache().
* @_mapcount: Do not access this member directly. Use folio_mapcount() to
@@ -286,18 +372,23 @@ typedef struct {
* @_refcount: Do not access this member directly. Use folio_ref_count()
* to find how many references there are to this folio.
* @memcg_data: Memory Control Group data.
+ * @pgmap: Metadata for ZONE_DEVICE mappings
* @virtual: Virtual address in the kernel direct map.
* @_last_cpupid: IDs of last CPU and last process that accessed the folio.
* @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
* @_large_mapcount: Do not use directly, call folio_mapcount().
* @_nr_pages_mapped: Do not use outside of rmap and debug code.
* @_pincount: Do not use directly, call folio_maybe_dma_pinned().
- * @_folio_nr_pages: Do not use directly, call folio_nr_pages().
+ * @_nr_pages: Do not use directly, call folio_nr_pages().
+ * @_mm_id: Do not use outside of rmap code.
+ * @_mm_ids: Do not use outside of rmap code.
+ * @_mm_id_mapcount: Do not use outside of rmap code.
* @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h.
* @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
* @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
* @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
* @_deferred_list: Folios to be split under memory pressure.
+ * @_unused_slab_obj_exts: Placeholder to match obj_exts in struct slab.
*
* A folio is a physically, virtually and logically contiguous set
* of bytes. It is a power-of-two in size, and it is aligned to that
@@ -312,28 +403,36 @@ struct folio {
union {
struct {
/* public: */
- unsigned long flags;
+ memdesc_flags_t flags;
union {
struct list_head lru;
/* private: avoid cluttering the output */
+ /* For the Unevictable "LRU list" slot */
struct {
+ /* Avoid compound_head */
void *__filler;
/* public: */
unsigned int mlock_count;
/* private: */
};
/* public: */
+ struct dev_pagemap *pgmap;
};
struct address_space *mapping;
- pgoff_t index;
+ union {
+ pgoff_t index;
+ unsigned long share;
+ };
union {
void *private;
swp_entry_t swap;
};
atomic_t _mapcount;
atomic_t _refcount;
-#ifdef CONFIG_SLAB_OBJ_EXT
+#ifdef CONFIG_MEMCG
unsigned long memcg_data;
+#elif defined(CONFIG_SLAB_OBJ_EXT)
+ unsigned long _unused_slab_obj_exts;
#endif
#if defined(WANT_PAGE_VIRTUAL)
void *virtual;
@@ -349,14 +448,30 @@ struct folio {
struct {
unsigned long _flags_1;
unsigned long _head_1;
+ union {
+ struct {
/* public: */
- atomic_t _large_mapcount;
- atomic_t _entire_mapcount;
- atomic_t _nr_pages_mapped;
- atomic_t _pincount;
+ atomic_t _large_mapcount;
+ atomic_t _nr_pages_mapped;
#ifdef CONFIG_64BIT
- unsigned int _folio_nr_pages;
-#endif
+ atomic_t _entire_mapcount;
+ atomic_t _pincount;
+#endif /* CONFIG_64BIT */
+ mm_id_mapcount_t _mm_id_mapcount[2];
+ union {
+ mm_id_t _mm_id[2];
+ unsigned long _mm_ids;
+ };
+ /* private: the union with struct page is transitional */
+ };
+ unsigned long _usable_1[4];
+ };
+ atomic_t _mapcount_1;
+ atomic_t _refcount_1;
+ /* public: */
+#ifdef NR_PAGES_IN_LARGE_FOLIO
+ unsigned int _nr_pages;
+#endif /* NR_PAGES_IN_LARGE_FOLIO */
/* private: the union with struct page is transitional */
};
struct page __page_1;
@@ -366,20 +481,27 @@ struct folio {
unsigned long _flags_2;
unsigned long _head_2;
/* public: */
- void *_hugetlb_subpool;
- void *_hugetlb_cgroup;
- void *_hugetlb_cgroup_rsvd;
- void *_hugetlb_hwpoison;
+ struct list_head _deferred_list;
+#ifndef CONFIG_64BIT
+ atomic_t _entire_mapcount;
+ atomic_t _pincount;
+#endif /* !CONFIG_64BIT */
/* private: the union with struct page is transitional */
};
+ struct page __page_2;
+ };
+ union {
struct {
- unsigned long _flags_2a;
- unsigned long _head_2a;
+ unsigned long _flags_3;
+ unsigned long _head_3;
/* public: */
- struct list_head _deferred_list;
+ void *_hugetlb_subpool;
+ void *_hugetlb_cgroup;
+ void *_hugetlb_cgroup_rsvd;
+ void *_hugetlb_hwpoison;
/* private: the union with struct page is transitional */
};
- struct page __page_2;
+ struct page __page_3;
};
};
@@ -389,7 +511,7 @@ FOLIO_MATCH(flags, flags);
FOLIO_MATCH(lru, lru);
FOLIO_MATCH(mapping, mapping);
FOLIO_MATCH(compound_head, lru);
-FOLIO_MATCH(index, index);
+FOLIO_MATCH(__folio_index, index);
FOLIO_MATCH(private, private);
FOLIO_MATCH(_mapcount, _mapcount);
FOLIO_MATCH(_refcount, _refcount);
@@ -408,27 +530,36 @@ FOLIO_MATCH(_last_cpupid, _last_cpupid);
offsetof(struct page, pg) + sizeof(struct page))
FOLIO_MATCH(flags, _flags_1);
FOLIO_MATCH(compound_head, _head_1);
+FOLIO_MATCH(_mapcount, _mapcount_1);
+FOLIO_MATCH(_refcount, _refcount_1);
#undef FOLIO_MATCH
#define FOLIO_MATCH(pg, fl) \
static_assert(offsetof(struct folio, fl) == \
offsetof(struct page, pg) + 2 * sizeof(struct page))
FOLIO_MATCH(flags, _flags_2);
FOLIO_MATCH(compound_head, _head_2);
-FOLIO_MATCH(flags, _flags_2a);
-FOLIO_MATCH(compound_head, _head_2a);
+#undef FOLIO_MATCH
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct folio, fl) == \
+ offsetof(struct page, pg) + 3 * sizeof(struct page))
+FOLIO_MATCH(flags, _flags_3);
+FOLIO_MATCH(compound_head, _head_3);
#undef FOLIO_MATCH
/**
* struct ptdesc - Memory descriptor for page tables.
- * @__page_flags: Same as page flags. Powerpc only.
+ * @pt_flags: enum pt_flags plus zone/node/section.
* @pt_rcu_head: For freeing page table pages.
- * @pt_list: List of used page tables. Used for s390 and x86.
+ * @pt_list: List of used page tables. Used for s390 gmap shadow pages
+ * (which are not linked into the user page tables) and x86
+ * pgds.
* @_pt_pad_1: Padding that aliases with page's compound head.
* @pmd_huge_pte: Protected by ptdesc->ptl, used for THPs.
* @__page_mapping: Aliases with page->mapping. Unused for page tables.
* @pt_index: Used for s390 gmap.
* @pt_mm: Used for x86 pgds.
* @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
+ * @pt_share_count: Used for HugeTLB PMD page table share count.
* @_pt_pad_2: Padding to ensure proper alignment.
* @ptl: Lock for the page table.
* @__page_type: Same as page->page_type. Unused for page tables.
@@ -439,7 +570,7 @@ FOLIO_MATCH(compound_head, _head_2a);
* understanding of the issues.
*/
struct ptdesc {
- unsigned long __page_flags;
+ memdesc_flags_t pt_flags;
union {
struct rcu_head pt_rcu_head;
@@ -455,6 +586,9 @@ struct ptdesc {
pgoff_t pt_index;
struct mm_struct *pt_mm;
atomic_t pt_frag_refcount;
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
+ atomic_t pt_share_count;
+#endif
};
union {
@@ -474,11 +608,11 @@ struct ptdesc {
#define TABLE_MATCH(pg, pt) \
static_assert(offsetof(struct page, pg) == offsetof(struct ptdesc, pt))
-TABLE_MATCH(flags, __page_flags);
+TABLE_MATCH(flags, pt_flags);
TABLE_MATCH(compound_head, pt_list);
TABLE_MATCH(compound_head, _pt_pad_1);
TABLE_MATCH(mapping, __page_mapping);
-TABLE_MATCH(index, pt_index);
+TABLE_MATCH(__folio_index, pt_index);
TABLE_MATCH(rcu_head, pt_rcu_head);
TABLE_MATCH(page_type, __page_type);
TABLE_MATCH(_refcount, __page_refcount);
@@ -500,14 +634,42 @@ static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
const struct page *: (const struct ptdesc *)(p), \
struct page *: (struct ptdesc *)(p)))
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
+static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
+{
+ atomic_set(&ptdesc->pt_share_count, 0);
+}
+
+static inline void ptdesc_pmd_pts_inc(struct ptdesc *ptdesc)
+{
+ atomic_inc(&ptdesc->pt_share_count);
+}
+
+static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc)
+{
+ atomic_dec(&ptdesc->pt_share_count);
+}
+
+static inline int ptdesc_pmd_pts_count(const struct ptdesc *ptdesc)
+{
+ return atomic_read(&ptdesc->pt_share_count);
+}
+
+static inline bool ptdesc_pmd_is_shared(struct ptdesc *ptdesc)
+{
+ return !!ptdesc_pmd_pts_count(ptdesc);
+}
+#else
+static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
+{
+}
+#endif
+
/*
* Used for sizing the vmemmap region on some architectures
*/
#define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page)))
-#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
-#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
-
/*
* page_private can be used on tail pages. However, PagePrivate is only
* checked by the VM on the head page. So page_private on the tail pages
@@ -521,29 +683,20 @@ static inline void set_page_private(struct page *page, unsigned long private)
page->private = private;
}
-static inline void *folio_get_private(struct folio *folio)
+static inline void *folio_get_private(const struct folio *folio)
{
return folio->private;
}
-struct page_frag_cache {
- void * va;
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
- __u16 offset;
- __u16 size;
-#else
- __u32 offset;
-#endif
- /* we maintain a pagecount bias, so that we dont dirty cache line
- * containing page->_refcount every time we allocate a fragment.
- */
- unsigned int pagecnt_bias;
- bool pfmemalloc;
-};
-
typedef unsigned long vm_flags_t;
/*
+ * freeptr_t represents a SLUB freelist pointer, which might be encoded
+ * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
+ */
+typedef struct { unsigned long v; } freeptr_t;
+
+/*
* A region containing a mapping of a non-memory backed file under NOMMU
* conditions. These are held in a global tree and are pinned by the VMAs that
* map parts of them.
@@ -599,9 +752,8 @@ static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
}
#endif
-struct vma_lock {
- struct rw_semaphore lock;
-};
+#define VMA_LOCK_OFFSET 0x40000000
+#define VMA_REF_LIMIT (VMA_LOCK_OFFSET - 1)
struct vma_numab_state {
/*
@@ -639,11 +791,115 @@ struct vma_numab_state {
int prev_scan_seq;
};
+#ifdef __HAVE_PFNMAP_TRACKING
+struct pfnmap_track_ctx {
+ struct kref kref;
+ unsigned long pfn;
+ unsigned long size; /* in bytes */
+};
+#endif
+
+/* What action should be taken after an .mmap_prepare call is complete? */
+enum mmap_action_type {
+ MMAP_NOTHING, /* Mapping is complete, no further action. */
+ MMAP_REMAP_PFN, /* Remap PFN range. */
+ MMAP_IO_REMAP_PFN, /* I/O remap PFN range. */
+};
+
+/*
+ * Describes an action an mmap_prepare hook can instruct to be taken to complete
+ * the mapping of a VMA. Specified in vm_area_desc.
+ */
+struct mmap_action {
+ union {
+ /* Remap range. */
+ struct {
+ unsigned long start;
+ unsigned long start_pfn;
+ unsigned long size;
+ pgprot_t pgprot;
+ } remap;
+ };
+ enum mmap_action_type type;
+
+ /*
+ * If specified, this hook is invoked after the selected action has been
+ * successfully completed. Note that the VMA write lock still held.
+ *
+ * The absolute minimum ought to be done here.
+ *
+ * Returns 0 on success, or an error code.
+ */
+ int (*success_hook)(const struct vm_area_struct *vma);
+
+ /*
+ * If specified, this hook is invoked when an error occurred when
+ * attempting the selection action.
+ *
+ * The hook can return an error code in order to filter the error, but
+ * it is not valid to clear the error here.
+ */
+ int (*error_hook)(int err);
+
+ /*
+ * This should be set in rare instances where the operation required
+ * that the rmap should not be able to access the VMA until
+ * completely set up.
+ */
+ bool hide_from_rmap_until_complete :1;
+};
+
+/*
+ * Opaque type representing current VMA (vm_area_struct) flag state. Must be
+ * accessed via vma_flags_xxx() helper functions.
+ */
+#define NUM_VMA_FLAG_BITS BITS_PER_LONG
+typedef struct {
+ DECLARE_BITMAP(__vma_flags, NUM_VMA_FLAG_BITS);
+} __private vma_flags_t;
+
+/*
+ * Describes a VMA that is about to be mmap()'ed. Drivers may choose to
+ * manipulate mutable fields which will cause those fields to be updated in the
+ * resultant VMA.
+ *
+ * Helper functions are not required for manipulating any field.
+ */
+struct vm_area_desc {
+ /* Immutable state. */
+ const struct mm_struct *const mm;
+ struct file *const file; /* May vary from vm_file in stacked callers. */
+ unsigned long start;
+ unsigned long end;
+
+ /* Mutable fields. Populated with initial state. */
+ pgoff_t pgoff;
+ struct file *vm_file;
+ union {
+ vm_flags_t vm_flags;
+ vma_flags_t vma_flags;
+ };
+ pgprot_t page_prot;
+
+ /* Write-only fields. */
+ const struct vm_operations_struct *vm_ops;
+ void *private_data;
+
+ /* Take further action? */
+ struct mmap_action action;
+};
+
/*
* This struct describes a virtual memory area. There is one of these
* per VM-area/task. A VM area is any part of the process virtual memory
* space that has a special rule for the page-fault handlers (ie a shared
* library, the executable area etc).
+ *
+ * Only explicitly marked struct members may be accessed by RCU readers before
+ * getting a stable reference.
+ *
+ * WARNING: when adding new members, please update vm_area_init_from() to copy
+ * them during vm_area_struct content duplication.
*/
struct vm_area_struct {
/* The first cache line has the info for VMA tree walking. */
@@ -654,34 +910,35 @@ struct vm_area_struct {
unsigned long vm_start;
unsigned long vm_end;
};
-#ifdef CONFIG_PER_VMA_LOCK
- struct rcu_head vm_rcu; /* Used for deferred freeing. */
-#endif
+ freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */
};
- struct mm_struct *vm_mm; /* The address space we belong to. */
+ /*
+ * The address space we belong to.
+ * Unstable RCU readers are allowed to read this.
+ */
+ struct mm_struct *vm_mm;
pgprot_t vm_page_prot; /* Access permissions of this VMA. */
/*
* Flags, see mm.h.
* To modify use vm_flags_{init|reset|set|clear|mod} functions.
+ * Preferably, use vma_flags_xxx() functions.
*/
union {
+ /* Temporary while VMA flags are being converted. */
const vm_flags_t vm_flags;
- vm_flags_t __private __vm_flags;
+ vma_flags_t flags;
};
#ifdef CONFIG_PER_VMA_LOCK
- /* Flag to indicate areas detached from the mm->mm_mt tree */
- bool detached;
-
/*
* Can only be written (using WRITE_ONCE()) while holding both:
* - mmap_lock (in write mode)
- * - vm_lock->lock (in write mode)
+ * - vm_refcnt bit at VMA_LOCK_OFFSET is set
* Can be read reliably while holding one of:
* - mmap_lock (in read or write mode)
- * - vm_lock->lock (in read or write mode)
+ * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
* Can be read unreliably (using READ_ONCE()) for pessimistic bailout
* while holding nothing (except RCU to keep the VMA struct allocated).
*
@@ -689,20 +946,8 @@ struct vm_area_struct {
* counter reuse can only lead to occasional unnecessary use of the
* slowpath.
*/
- int vm_lock_seq;
- struct vma_lock *vm_lock;
+ unsigned int vm_lock_seq;
#endif
-
- /*
- * For areas with an address space and backing store,
- * linkage into the address_space->i_mmap interval tree.
- *
- */
- struct {
- struct rb_node rb;
- unsigned long rb_subtree_last;
- } shared;
-
/*
* A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
* list, after a COW of one of the file pages. A MAP_SHARED vma
@@ -722,14 +967,6 @@ struct vm_area_struct {
struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */
-#ifdef CONFIG_ANON_VMA_NAME
- /*
- * For private and shared anonymous mappings, a pointer to a null
- * terminated string containing the name given to the vma, or NULL if
- * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
- */
- struct anon_vma_name *anon_name;
-#endif
#ifdef CONFIG_SWAP
atomic_long_t swap_readahead_info;
#endif
@@ -742,21 +979,96 @@ struct vm_area_struct {
#ifdef CONFIG_NUMA_BALANCING
struct vma_numab_state *numab_state; /* NUMA Balancing state */
#endif
+#ifdef CONFIG_PER_VMA_LOCK
+ /* Unstable RCU readers are allowed to read this. */
+ refcount_t vm_refcnt ____cacheline_aligned_in_smp;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map vmlock_dep_map;
+#endif
+#endif
+ /*
+ * For areas with an address space and backing store,
+ * linkage into the address_space->i_mmap interval tree.
+ *
+ */
+ struct {
+ struct rb_node rb;
+ unsigned long rb_subtree_last;
+ } shared;
+#ifdef CONFIG_ANON_VMA_NAME
+ /*
+ * For private and shared anonymous mappings, a pointer to a null
+ * terminated string containing the name given to the vma, or NULL if
+ * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
+ */
+ struct anon_vma_name *anon_name;
+#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
+#ifdef __HAVE_PFNMAP_TRACKING
+ struct pfnmap_track_ctx *pfnmap_track_ctx;
+#endif
} __randomize_layout;
+/* Clears all bits in the VMA flags bitmap, non-atomically. */
+static inline void vma_flags_clear_all(vma_flags_t *flags)
+{
+ bitmap_zero(ACCESS_PRIVATE(flags, __vma_flags), NUM_VMA_FLAG_BITS);
+}
+
+/*
+ * Copy value to the first system word of VMA flags, non-atomically.
+ *
+ * IMPORTANT: This does not overwrite bytes past the first system word. The
+ * caller must account for this.
+ */
+static inline void vma_flags_overwrite_word(vma_flags_t *flags, unsigned long value)
+{
+ *ACCESS_PRIVATE(flags, __vma_flags) = value;
+}
+
+/*
+ * Copy value to the first system word of VMA flags ONCE, non-atomically.
+ *
+ * IMPORTANT: This does not overwrite bytes past the first system word. The
+ * caller must account for this.
+ */
+static inline void vma_flags_overwrite_word_once(vma_flags_t *flags, unsigned long value)
+{
+ unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
+
+ WRITE_ONCE(*bitmap, value);
+}
+
+/* Update the first system word of VMA flags setting bits, non-atomically. */
+static inline void vma_flags_set_word(vma_flags_t *flags, unsigned long value)
+{
+ unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
+
+ *bitmap |= value;
+}
+
+/* Update the first system word of VMA flags clearing bits, non-atomically. */
+static inline void vma_flags_clear_word(vma_flags_t *flags, unsigned long value)
+{
+ unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
+
+ *bitmap &= ~value;
+}
+
#ifdef CONFIG_NUMA
#define vma_policy(vma) ((vma)->vm_policy)
#else
#define vma_policy(vma) NULL
#endif
-#ifdef CONFIG_SCHED_MM_CID
-struct mm_cid {
- u64 time;
- int cid;
-};
-#endif
+/*
+ * Opaque type representing current mm_struct flag state. Must be accessed via
+ * mm_flags_xxx() helper functions.
+ */
+#define NUM_MM_FLAG_BITS (64)
+typedef struct {
+ DECLARE_BITMAP(__mm_flags, NUM_MM_FLAG_BITS);
+} __private mm_flags_t;
struct kioctx_table;
struct iommu_mm_data;
@@ -810,22 +1122,9 @@ struct mm_struct {
*/
atomic_t mm_users;
-#ifdef CONFIG_SCHED_MM_CID
- /**
- * @pcpu_cid: Per-cpu current cid.
- *
- * Keep track of the currently allocated mm_cid for each cpu.
- * The per-cpu mm_cid values are serialized by their respective
- * runqueue locks.
- */
- struct mm_cid __percpu *pcpu_cid;
- /*
- * @mm_cid_next_scan: Next mm_cid scan (in jiffies).
- *
- * When the next mm_cid scan is due (in jiffies).
- */
- unsigned long mm_cid_next_scan;
-#endif
+ /* MM CID related storage */
+ struct mm_mm_cid mm_cid;
+
#ifdef CONFIG_MMU
atomic_long_t pgtables_bytes; /* size of all page tables */
#endif
@@ -835,10 +1134,10 @@ struct mm_struct {
* counters
*/
/*
- * With some kernel config, the current mmap_lock's offset
- * inside 'mm_struct' is at 0x120, which is very optimal, as
+ * Typically the current mmap_lock's offset is 56 bytes from
+ * the last cacheline boundary, which is very optimal, as
* its two hot fields 'count' and 'owner' sit in 2 different
- * cachelines, and when mmap_lock is highly contended, both
+ * cachelines, and when mmap_lock is highly contended, both
* of the 2 fields will be accessed frequently, current layout
* will help to reduce cache bouncing.
*
@@ -854,12 +1153,16 @@ struct mm_struct {
* by mmlist_lock
*/
#ifdef CONFIG_PER_VMA_LOCK
+ struct rcuwait vma_writer_wait;
/*
* This field has lock-like semantics, meaning it is sometimes
* accessed with ACQUIRE/RELEASE semantics.
* Roughly speaking, incrementing the sequence number is
* equivalent to releasing locks on VMAs; reading the sequence
* number can be part of taking a read lock on a VMA.
+ * Incremented every time mmap_lock is write-locked/unlocked.
+ * Initialized to 0, therefore odd values indicate mmap_lock
+ * is write-locked and even values that it's released.
*
* Can be modified under write mmap_lock using RELEASE
* semantics.
@@ -868,9 +1171,18 @@ struct mm_struct {
* Can be read with ACQUIRE semantics if not holding write
* mmap_lock.
*/
- int mm_lock_seq;
+ seqcount_t mm_lock_seq;
+#endif
+#ifdef CONFIG_FUTEX_PRIVATE_HASH
+ struct mutex futex_hash_lock;
+ struct futex_private_hash __rcu *futex_phash;
+ struct futex_private_hash *futex_phash_new;
+ /* futex-ref */
+ unsigned long futex_batches;
+ struct rcu_head futex_rcu;
+ atomic_long_t futex_atomic;
+ unsigned int __percpu *futex_ref;
#endif
-
unsigned long hiwater_rss; /* High-watermark of RSS usage */
unsigned long hiwater_vm; /* High-water virtual memory usage */
@@ -881,7 +1193,7 @@ struct mm_struct {
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
unsigned long stack_vm; /* VM_STACK */
- unsigned long def_flags;
+ vm_flags_t def_flags;
/**
* @write_protect_seq: Locked when any thread is write
@@ -898,6 +1210,11 @@ struct mm_struct {
unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
+#ifdef CONFIG_ARCH_HAS_ELF_CORE_EFLAGS
+ /* the ABI-related flags from the ELF header. Used for core dump */
+ unsigned long saved_e_flags;
+#endif
+
struct percpu_counter rss_stat[NR_MM_COUNTERS];
struct linux_binfmt *binfmt;
@@ -905,7 +1222,7 @@ struct mm_struct {
/* Architecture-specific MM context */
mm_context_t context;
- unsigned long flags; /* Must use atomic bitops to access */
+ mm_flags_t flags; /* Must use mm_flags_* hlpers to access */
#ifdef CONFIG_AIO
spinlock_t ioctx_lock;
@@ -931,7 +1248,7 @@ struct mm_struct {
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_subscriptions *notifier_subscriptions;
#endif
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
#ifdef CONFIG_NUMA_BALANCING
@@ -985,7 +1302,7 @@ struct mm_struct {
* Represent how many empty pages are merged with kernel zero
* pages when enabling KSM use_zero_pages.
*/
- unsigned long ksm_zero_pages;
+ atomic_long_t ksm_zero_pages;
#endif /* CONFIG_KSM */
#ifdef CONFIG_LRU_GEN_WALKS_MMU
struct {
@@ -1003,6 +1320,9 @@ struct mm_struct {
#endif
} lru_gen;
#endif /* CONFIG_LRU_GEN_WALKS_MMU */
+#ifdef CONFIG_MM_ID
+ mm_id_t mm_id;
+#endif /* CONFIG_MM_ID */
} __randomize_layout;
/*
@@ -1012,6 +1332,36 @@ struct mm_struct {
unsigned long cpu_bitmap[];
};
+/* Copy value to the first system word of mm flags, non-atomically. */
+static inline void __mm_flags_overwrite_word(struct mm_struct *mm, unsigned long value)
+{
+ *ACCESS_PRIVATE(&mm->flags, __mm_flags) = value;
+}
+
+/* Obtain a read-only view of the mm flags bitmap. */
+static inline const unsigned long *__mm_flags_get_bitmap(const struct mm_struct *mm)
+{
+ return (const unsigned long *)ACCESS_PRIVATE(&mm->flags, __mm_flags);
+}
+
+/* Read the first system word of mm flags, non-atomically. */
+static inline unsigned long __mm_flags_get_word(const struct mm_struct *mm)
+{
+ return *__mm_flags_get_bitmap(mm);
+}
+
+/*
+ * Update the first system word of mm flags ONLY, applying the specified mask to
+ * it, then setting all flags specified by bits.
+ */
+static inline void __mm_flags_set_mask_bits_word(struct mm_struct *mm,
+ unsigned long mask, unsigned long bits)
+{
+ unsigned long *bitmap = ACCESS_PRIVATE(&mm->flags, __mm_flags);
+
+ set_mask_bits(bitmap, mask, bits);
+}
+
#define MM_MT_FLAGS (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | \
MT_FLAGS_USE_RCU)
extern struct mm_struct init_mm;
@@ -1112,84 +1462,56 @@ static inline void vma_iter_init(struct vma_iterator *vmi,
}
#ifdef CONFIG_SCHED_MM_CID
-
-enum mm_cid_state {
- MM_CID_UNSET = -1U, /* Unset state has lazy_put flag set. */
- MM_CID_LAZY_PUT = (1U << 31),
-};
-
-static inline bool mm_cid_is_unset(int cid)
-{
- return cid == MM_CID_UNSET;
-}
-
-static inline bool mm_cid_is_lazy_put(int cid)
-{
- return !mm_cid_is_unset(cid) && (cid & MM_CID_LAZY_PUT);
-}
-
-static inline bool mm_cid_is_valid(int cid)
+/*
+ * mm_cpus_allowed: Union of all mm's threads allowed CPUs.
+ */
+static inline cpumask_t *mm_cpus_allowed(struct mm_struct *mm)
{
- return !(cid & MM_CID_LAZY_PUT);
-}
+ unsigned long bitmap = (unsigned long)mm;
-static inline int mm_cid_set_lazy_put(int cid)
-{
- return cid | MM_CID_LAZY_PUT;
-}
-
-static inline int mm_cid_clear_lazy_put(int cid)
-{
- return cid & ~MM_CID_LAZY_PUT;
+ bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ /* Skip cpu_bitmap */
+ bitmap += cpumask_size();
+ return (struct cpumask *)bitmap;
}
/* Accessor for struct mm_struct's cidmask. */
-static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
+static inline unsigned long *mm_cidmask(struct mm_struct *mm)
{
- unsigned long cid_bitmap = (unsigned long)mm;
+ unsigned long cid_bitmap = (unsigned long)mm_cpus_allowed(mm);
- cid_bitmap += offsetof(struct mm_struct, cpu_bitmap);
- /* Skip cpu_bitmap */
+ /* Skip mm_cpus_allowed */
cid_bitmap += cpumask_size();
- return (struct cpumask *)cid_bitmap;
+ return (unsigned long *)cid_bitmap;
}
-static inline void mm_init_cid(struct mm_struct *mm)
-{
- int i;
-
- for_each_possible_cpu(i) {
- struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i);
-
- pcpu_cid->cid = MM_CID_UNSET;
- pcpu_cid->time = 0;
- }
- cpumask_clear(mm_cidmask(mm));
-}
+void mm_init_cid(struct mm_struct *mm, struct task_struct *p);
-static inline int mm_alloc_cid_noprof(struct mm_struct *mm)
+static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p)
{
- mm->pcpu_cid = alloc_percpu_noprof(struct mm_cid);
- if (!mm->pcpu_cid)
+ mm->mm_cid.pcpu = alloc_percpu_noprof(struct mm_cid_pcpu);
+ if (!mm->mm_cid.pcpu)
return -ENOMEM;
- mm_init_cid(mm);
+ mm_init_cid(mm, p);
return 0;
}
#define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__))
static inline void mm_destroy_cid(struct mm_struct *mm)
{
- free_percpu(mm->pcpu_cid);
- mm->pcpu_cid = NULL;
+ free_percpu(mm->mm_cid.pcpu);
+ mm->mm_cid.pcpu = NULL;
}
static inline unsigned int mm_cid_size(void)
{
- return cpumask_size();
+ /* mm_cpus_allowed(), mm_cidmask(). */
+ return cpumask_size() + bitmap_size(num_possible_cpus());
}
+
#else /* CONFIG_SCHED_MM_CID */
-static inline void mm_init_cid(struct mm_struct *mm) { }
-static inline int mm_alloc_cid(struct mm_struct *mm) { return 0; }
+static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { }
+static inline int mm_alloc_cid(struct mm_struct *mm, struct task_struct *p) { return 0; }
static inline void mm_destroy_cid(struct mm_struct *mm) { }
static inline unsigned int mm_cid_size(void)
{
@@ -1297,6 +1619,9 @@ struct vm_special_mapping {
int (*mremap)(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma);
+
+ void (*close)(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma);
};
enum tlb_flush_reason {
@@ -1305,6 +1630,7 @@ enum tlb_flush_reason {
TLB_LOCAL_SHOOTDOWN,
TLB_LOCAL_MM_SHOOTDOWN,
TLB_REMOTE_SEND_IPI,
+ TLB_REMOTE_WRONG_CPU,
NR_TLB_FLUSH_REASONS,
};
@@ -1469,4 +1795,91 @@ enum {
/* See also internal only FOLL flags in mm/internal.h */
};
+/* mm flags */
+
+/*
+ * The first two bits represent core dump modes for set-user-ID,
+ * the modes are SUID_DUMP_* defined in linux/sched/coredump.h
+ */
+#define MMF_DUMPABLE_BITS 2
+#define MMF_DUMPABLE_MASK (BIT(MMF_DUMPABLE_BITS) - 1)
+/* coredump filter bits */
+#define MMF_DUMP_ANON_PRIVATE 2
+#define MMF_DUMP_ANON_SHARED 3
+#define MMF_DUMP_MAPPED_PRIVATE 4
+#define MMF_DUMP_MAPPED_SHARED 5
+#define MMF_DUMP_ELF_HEADERS 6
+#define MMF_DUMP_HUGETLB_PRIVATE 7
+#define MMF_DUMP_HUGETLB_SHARED 8
+#define MMF_DUMP_DAX_PRIVATE 9
+#define MMF_DUMP_DAX_SHARED 10
+
+#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
+#define MMF_DUMP_FILTER_BITS 9
+#define MMF_DUMP_FILTER_MASK \
+ ((BIT(MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
+#define MMF_DUMP_FILTER_DEFAULT \
+ (BIT(MMF_DUMP_ANON_PRIVATE) | BIT(MMF_DUMP_ANON_SHARED) | \
+ BIT(MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
+
+#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
+# define MMF_DUMP_MASK_DEFAULT_ELF BIT(MMF_DUMP_ELF_HEADERS)
+#else
+# define MMF_DUMP_MASK_DEFAULT_ELF 0
+#endif
+ /* leave room for more dump flags */
+#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
+#define MMF_VM_HUGEPAGE 17 /* set when mm is available for khugepaged */
+
+#define MMF_HUGE_ZERO_FOLIO 18 /* mm has ever used the global huge zero folio */
+
+#define MMF_HAS_UPROBES 19 /* has uprobes */
+#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
+#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */
+#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
+#define MMF_DISABLE_THP_EXCEPT_ADVISED 23 /* no THP except when advised (e.g., VM_HUGEPAGE) */
+#define MMF_DISABLE_THP_COMPLETELY 24 /* no THP for all VMAs */
+#define MMF_DISABLE_THP_MASK (BIT(MMF_DISABLE_THP_COMPLETELY) | \
+ BIT(MMF_DISABLE_THP_EXCEPT_ADVISED))
+#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */
+#define MMF_MULTIPROCESS 26 /* mm is shared between processes */
+/*
+ * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either
+ * replaced in the future by mm.pinned_vm when it becomes stable, or grow into
+ * a counter on its own. We're aggresive on this bit for now: even if the
+ * pinned pages were unpinned later on, we'll still keep this bit set for the
+ * lifecycle of this mm, just for simplicity.
+ */
+#define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */
+
+#define MMF_HAS_MDWE 28
+#define MMF_HAS_MDWE_MASK BIT(MMF_HAS_MDWE)
+
+#define MMF_HAS_MDWE_NO_INHERIT 29
+
+#define MMF_VM_MERGE_ANY 30
+#define MMF_VM_MERGE_ANY_MASK BIT(MMF_VM_MERGE_ANY)
+
+#define MMF_TOPDOWN 31 /* mm searches top down by default */
+#define MMF_TOPDOWN_MASK BIT(MMF_TOPDOWN)
+
+#define MMF_INIT_LEGACY_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
+ MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\
+ MMF_VM_MERGE_ANY_MASK | MMF_TOPDOWN_MASK)
+
+/* Legacy flags must fit within 32 bits. */
+static_assert((u64)MMF_INIT_LEGACY_MASK <= (u64)UINT_MAX);
+
+/*
+ * Initialise legacy flags according to masks, propagating selected flags on
+ * fork. Further flag manipulation can be performed by the caller.
+ */
+static inline unsigned long mmf_init_legacy_flags(unsigned long flags)
+{
+ if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT))
+ flags &= ~((1UL << MMF_HAS_MDWE) |
+ (1UL << MMF_HAS_MDWE_NO_INHERIT));
+ return flags & MMF_INIT_LEGACY_MASK;
+}
+
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index a2f6179b672b..a82aa80c0ba4 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -8,6 +8,7 @@
* (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
*/
+#include <linux/align.h>
#include <linux/types.h>
#include <asm/page.h>
@@ -16,9 +17,6 @@
#include <asm/tlbbatch.h>
#endif
-#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
-#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
- IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
/*
@@ -46,6 +44,26 @@ struct page_frag {
#endif
};
+#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
+#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+struct page_frag_cache {
+ /* encoded_page consists of the virtual address, pfmemalloc bit and
+ * order of a page.
+ */
+ unsigned long encoded_page;
+
+ /* we maintain a pagecount bias, so that we dont dirty cache line
+ * containing page->_refcount every time we allocate a fragment.
+ */
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
+ __u16 offset;
+ __u16 pagecnt_bias;
+#else
+ __u32 offset;
+ __u32 pagecnt_bias;
+#endif
+};
+
/* Track pages that require TLB flushes */
struct tlbflush_unmap_batch {
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
diff --git a/include/linux/mman.h b/include/linux/mman.h
index bcb201ab7a41..0ba8a7e8b90a 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_MMAN_H
#define _LINUX_MMAN_H
+#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/percpu_counter.h>
@@ -58,8 +59,6 @@
| MAP_HUGE_1GB)
extern int sysctl_overcommit_memory;
-extern int sysctl_overcommit_ratio;
-extern unsigned long sysctl_overcommit_kbytes;
extern struct percpu_counter vm_committed_as;
#ifdef CONFIG_SMP
@@ -94,7 +93,7 @@ static inline void vm_unacct_memory(long pages)
#endif
#ifndef arch_calc_vm_flag_bits
-#define arch_calc_vm_flag_bits(flags) 0
+#define arch_calc_vm_flag_bits(file, flags) 0
#endif
#ifndef arch_validate_prot
@@ -138,7 +137,7 @@ static inline bool arch_validate_flags(unsigned long flags)
/*
* Combine the mmap "prot" argument into "vm_flags" used internally.
*/
-static inline unsigned long
+static inline vm_flags_t
calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
{
return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
@@ -150,14 +149,16 @@ calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
/*
* Combine the mmap "flags" argument into "vm_flags" used internally.
*/
-static inline unsigned long
-calc_vm_flag_bits(unsigned long flags)
+static inline vm_flags_t
+calc_vm_flag_bits(struct file *file, unsigned long flags)
{
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
_calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
_calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) |
- arch_calc_vm_flag_bits(flags);
+#endif
+ arch_calc_vm_flag_bits(file, flags);
}
unsigned long vm_commit_limit(void);
@@ -188,16 +189,31 @@ static inline bool arch_memory_deny_write_exec_supported(void)
*
* d) mmap(PROT_READ | PROT_EXEC)
* mmap(PROT_READ | PROT_EXEC | PROT_BTI)
+ *
+ * This is only applicable if the user has set the Memory-Deny-Write-Execute
+ * (MDWE) protection mask for the current process.
+ *
+ * @old specifies the VMA flags the VMA originally possessed, and @new the ones
+ * we propose to set.
+ *
+ * Return: false if proposed change is OK, true if not ok and should be denied.
*/
-static inline bool map_deny_write_exec(struct vm_area_struct *vma, unsigned long vm_flags)
+static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
{
- if (!test_bit(MMF_HAS_MDWE, &current->mm->flags))
+ /* If MDWE is disabled, we have nothing to deny. */
+ if (!mm_flags_test(MMF_HAS_MDWE, current->mm))
+ return false;
+
+ /* If the new VMA is not executable, we have nothing to deny. */
+ if (!(new & VM_EXEC))
return false;
- if ((vm_flags & VM_EXEC) && (vm_flags & VM_WRITE))
+ /* Under MDWE we do not accept newly writably executable VMAs... */
+ if (new & VM_WRITE)
return true;
- if (!(vma->vm_flags & VM_EXEC) && (vm_flags & VM_EXEC))
+ /* ...nor previously non-executable VMAs becoming executable. */
+ if (!(old & VM_EXEC))
return true;
return false;
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index de9dc20b01ba..d53f72dba7fe 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -1,12 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MMAP_LOCK_H
#define _LINUX_MMAP_LOCK_H
+/* Avoid a dependency loop by declaring here. */
+extern int rcuwait_wake_up(struct rcuwait *w);
+
#include <linux/lockdep.h>
#include <linux/mm_types.h>
#include <linux/mmdebug.h>
#include <linux/rwsem.h>
#include <linux/tracepoint-defs.h>
#include <linux/types.h>
+#include <linux/cleanup.h>
+#include <linux/sched/mm.h>
#define MMAP_LOCK_INITIALIZER(name) \
.mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
@@ -71,39 +77,263 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm)
}
#ifdef CONFIG_PER_VMA_LOCK
+
+static inline void mm_lock_seqcount_init(struct mm_struct *mm)
+{
+ seqcount_init(&mm->mm_lock_seq);
+}
+
+static inline void mm_lock_seqcount_begin(struct mm_struct *mm)
+{
+ do_raw_write_seqcount_begin(&mm->mm_lock_seq);
+}
+
+static inline void mm_lock_seqcount_end(struct mm_struct *mm)
+{
+ ASSERT_EXCLUSIVE_WRITER(mm->mm_lock_seq);
+ do_raw_write_seqcount_end(&mm->mm_lock_seq);
+}
+
+static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq)
+{
+ /*
+ * Since mmap_lock is a sleeping lock, and waiting for it to become
+ * unlocked is more or less equivalent with taking it ourselves, don't
+ * bother with the speculative path if mmap_lock is already write-locked
+ * and take the slow path, which takes the lock.
+ */
+ return raw_seqcount_try_begin(&mm->mm_lock_seq, *seq);
+}
+
+static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq)
+{
+ return read_seqcount_retry(&mm->mm_lock_seq, seq);
+}
+
+static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ static struct lock_class_key lockdep_key;
+
+ lockdep_init_map(&vma->vmlock_dep_map, "vm_lock", &lockdep_key, 0);
+#endif
+ if (reset_refcnt)
+ refcount_set(&vma->vm_refcnt, 0);
+ vma->vm_lock_seq = UINT_MAX;
+}
+
+static inline bool is_vma_writer_only(int refcnt)
+{
+ /*
+ * With a writer and no readers, refcnt is VMA_LOCK_OFFSET if the vma
+ * is detached and (VMA_LOCK_OFFSET + 1) if it is attached. Waiting on
+ * a detached vma happens only in vma_mark_detached() and is a rare
+ * case, therefore most of the time there will be no unnecessary wakeup.
+ */
+ return (refcnt & VMA_LOCK_OFFSET) && refcnt <= VMA_LOCK_OFFSET + 1;
+}
+
+static inline void vma_refcount_put(struct vm_area_struct *vma)
+{
+ /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */
+ struct mm_struct *mm = vma->vm_mm;
+ int oldcnt;
+
+ rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
+ if (!__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt)) {
+
+ if (is_vma_writer_only(oldcnt - 1))
+ rcuwait_wake_up(&mm->vma_writer_wait);
+ }
+}
+
/*
- * Drop all currently-held per-VMA locks.
- * This is called from the mmap_lock implementation directly before releasing
- * a write-locked mmap_lock (or downgrading it to read-locked).
- * This should normally NOT be called manually from other places.
- * If you want to call this manually anyway, keep in mind that this will release
- * *all* VMA write locks, including ones from further up the stack.
+ * Use only while holding mmap read lock which guarantees that locking will not
+ * fail (nobody can concurrently write-lock the vma). vma_start_read() should
+ * not be used in such cases because it might fail due to mm_lock_seq overflow.
+ * This functionality is used to obtain vma read lock and drop the mmap read lock.
*/
-static inline void vma_end_write_all(struct mm_struct *mm)
+static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass)
{
- mmap_assert_write_locked(mm);
+ int oldcnt;
+
+ mmap_assert_locked(vma->vm_mm);
+ if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
+ VMA_REF_LIMIT)))
+ return false;
+
+ rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
+ return true;
+}
+
+/*
+ * Use only while holding mmap read lock which guarantees that locking will not
+ * fail (nobody can concurrently write-lock the vma). vma_start_read() should
+ * not be used in such cases because it might fail due to mm_lock_seq overflow.
+ * This functionality is used to obtain vma read lock and drop the mmap read lock.
+ */
+static inline bool vma_start_read_locked(struct vm_area_struct *vma)
+{
+ return vma_start_read_locked_nested(vma, 0);
+}
+
+static inline void vma_end_read(struct vm_area_struct *vma)
+{
+ vma_refcount_put(vma);
+}
+
+/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
+static inline bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
+{
+ mmap_assert_write_locked(vma->vm_mm);
+
/*
- * Nobody can concurrently modify mm->mm_lock_seq due to exclusive
- * mmap_lock being held.
- * We need RELEASE semantics here to ensure that preceding stores into
- * the VMA take effect before we unlock it with this store.
- * Pairs with ACQUIRE semantics in vma_start_read().
+ * current task is holding mmap_write_lock, both vma->vm_lock_seq and
+ * mm->mm_lock_seq can't be concurrently modified.
*/
- smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1);
+ *mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence;
+ return (vma->vm_lock_seq == *mm_lock_seq);
}
-#else
-static inline void vma_end_write_all(struct mm_struct *mm) {}
-#endif
-static inline void mmap_init_lock(struct mm_struct *mm)
+int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
+ int state);
+
+/*
+ * Begin writing to a VMA.
+ * Exclude concurrent readers under the per-VMA lock until the currently
+ * write-locked mmap_lock is dropped or downgraded.
+ */
+static inline void vma_start_write(struct vm_area_struct *vma)
{
- init_rwsem(&mm->mmap_lock);
+ unsigned int mm_lock_seq;
+
+ if (__is_vma_write_locked(vma, &mm_lock_seq))
+ return;
+
+ __vma_start_write(vma, mm_lock_seq, TASK_UNINTERRUPTIBLE);
+}
+
+/**
+ * vma_start_write_killable - Begin writing to a VMA.
+ * @vma: The VMA we are going to modify.
+ *
+ * Exclude concurrent readers under the per-VMA lock until the currently
+ * write-locked mmap_lock is dropped or downgraded.
+ *
+ * Context: May sleep while waiting for readers to drop the vma read lock.
+ * Caller must already hold the mmap_lock for write.
+ *
+ * Return: 0 for a successful acquisition. -EINTR if a fatal signal was
+ * received.
+ */
+static inline __must_check
+int vma_start_write_killable(struct vm_area_struct *vma)
+{
+ unsigned int mm_lock_seq;
+
+ if (__is_vma_write_locked(vma, &mm_lock_seq))
+ return 0;
+ return __vma_start_write(vma, mm_lock_seq, TASK_KILLABLE);
+}
+
+static inline void vma_assert_write_locked(struct vm_area_struct *vma)
+{
+ unsigned int mm_lock_seq;
+
+ VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
+}
+
+static inline void vma_assert_locked(struct vm_area_struct *vma)
+{
+ unsigned int mm_lock_seq;
+
+ VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
+ !__is_vma_write_locked(vma, &mm_lock_seq), vma);
}
+/*
+ * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these
+ * assertions should be made either under mmap_write_lock or when the object
+ * has been isolated under mmap_write_lock, ensuring no competing writers.
+ */
+static inline void vma_assert_attached(struct vm_area_struct *vma)
+{
+ WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
+}
+
+static inline void vma_assert_detached(struct vm_area_struct *vma)
+{
+ WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
+}
+
+static inline void vma_mark_attached(struct vm_area_struct *vma)
+{
+ vma_assert_write_locked(vma);
+ vma_assert_detached(vma);
+ refcount_set_release(&vma->vm_refcnt, 1);
+}
+
+void vma_mark_detached(struct vm_area_struct *vma);
+
+struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
+ unsigned long address);
+
+/*
+ * Locks next vma pointed by the iterator. Confirms the locked vma has not
+ * been modified and will retry under mmap_lock protection if modification
+ * was detected. Should be called from read RCU section.
+ * Returns either a valid locked VMA, NULL if no more VMAs or -EINTR if the
+ * process was interrupted.
+ */
+struct vm_area_struct *lock_next_vma(struct mm_struct *mm,
+ struct vma_iterator *iter,
+ unsigned long address);
+
+#else /* CONFIG_PER_VMA_LOCK */
+
+static inline void mm_lock_seqcount_init(struct mm_struct *mm) {}
+static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {}
+static inline void mm_lock_seqcount_end(struct mm_struct *mm) {}
+
+static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq)
+{
+ return false;
+}
+
+static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq)
+{
+ return true;
+}
+static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) {}
+static inline void vma_end_read(struct vm_area_struct *vma) {}
+static inline void vma_start_write(struct vm_area_struct *vma) {}
+static inline __must_check
+int vma_start_write_killable(struct vm_area_struct *vma) { return 0; }
+static inline void vma_assert_write_locked(struct vm_area_struct *vma)
+ { mmap_assert_write_locked(vma->vm_mm); }
+static inline void vma_assert_attached(struct vm_area_struct *vma) {}
+static inline void vma_assert_detached(struct vm_area_struct *vma) {}
+static inline void vma_mark_attached(struct vm_area_struct *vma) {}
+static inline void vma_mark_detached(struct vm_area_struct *vma) {}
+
+static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
+ unsigned long address)
+{
+ return NULL;
+}
+
+static inline void vma_assert_locked(struct vm_area_struct *vma)
+{
+ mmap_assert_locked(vma->vm_mm);
+}
+
+#endif /* CONFIG_PER_VMA_LOCK */
+
static inline void mmap_write_lock(struct mm_struct *mm)
{
__mmap_lock_trace_start_locking(mm, true);
down_write(&mm->mmap_lock);
+ mm_lock_seqcount_begin(mm);
__mmap_lock_trace_acquire_returned(mm, true, true);
}
@@ -111,6 +341,7 @@ static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
{
__mmap_lock_trace_start_locking(mm, true);
down_write_nested(&mm->mmap_lock, subclass);
+ mm_lock_seqcount_begin(mm);
__mmap_lock_trace_acquire_returned(mm, true, true);
}
@@ -120,10 +351,26 @@ static inline int mmap_write_lock_killable(struct mm_struct *mm)
__mmap_lock_trace_start_locking(mm, true);
ret = down_write_killable(&mm->mmap_lock);
+ if (!ret)
+ mm_lock_seqcount_begin(mm);
__mmap_lock_trace_acquire_returned(mm, true, ret == 0);
return ret;
}
+/*
+ * Drop all currently-held per-VMA locks.
+ * This is called from the mmap_lock implementation directly before releasing
+ * a write-locked mmap_lock (or downgrading it to read-locked).
+ * This should normally NOT be called manually from other places.
+ * If you want to call this manually anyway, keep in mind that this will release
+ * *all* VMA write locks, including ones from further up the stack.
+ */
+static inline void vma_end_write_all(struct mm_struct *mm)
+{
+ mmap_assert_write_locked(mm);
+ mm_lock_seqcount_end(mm);
+}
+
static inline void mmap_write_unlock(struct mm_struct *mm)
{
__mmap_lock_trace_released(mm, true);
@@ -171,6 +418,9 @@ static inline void mmap_read_unlock(struct mm_struct *mm)
up_read(&mm->mmap_lock);
}
+DEFINE_GUARD(mmap_read_lock, struct mm_struct *,
+ mmap_read_lock(_T), mmap_read_unlock(_T))
+
static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
{
__mmap_lock_trace_released(mm, false);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index f34407cc2788..e9e964c20e53 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -35,7 +35,7 @@ struct mmc_csd {
unsigned int wp_grp_size;
unsigned int read_blkbits;
unsigned int write_blkbits;
- unsigned int capacity;
+ sector_t capacity;
unsigned int read_partial:1,
read_misalign:1,
write_partial:1,
@@ -182,12 +182,17 @@ struct sd_switch_caps {
#define SD_SET_CURRENT_LIMIT_400 1
#define SD_SET_CURRENT_LIMIT_600 2
#define SD_SET_CURRENT_LIMIT_800 3
-#define SD_SET_CURRENT_NO_CHANGE (-1)
#define SD_MAX_CURRENT_200 (1 << SD_SET_CURRENT_LIMIT_200)
#define SD_MAX_CURRENT_400 (1 << SD_SET_CURRENT_LIMIT_400)
#define SD_MAX_CURRENT_600 (1 << SD_SET_CURRENT_LIMIT_600)
#define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800)
+
+#define SD4_SET_POWER_LIMIT_0_72W 0
+#define SD4_SET_POWER_LIMIT_1_44W 1
+#define SD4_SET_POWER_LIMIT_2_16W 2
+#define SD4_SET_POWER_LIMIT_2_88W 3
+#define SD4_SET_POWER_LIMIT_1_80W 4
};
struct sd_ext_reg {
@@ -209,6 +214,34 @@ struct sd_ext_reg {
#define SD_EXT_PERF_CMD_QUEUE (1<<4)
};
+struct sd_uhs2_config {
+ u32 node_id;
+
+ u32 n_fcu;
+ u32 maxblk_len;
+ u8 n_lanes;
+ u8 dadr_len;
+ u8 app_type;
+ u8 phy_minor_rev;
+ u8 phy_major_rev;
+ u8 can_hibernate;
+ u8 n_lss_sync;
+ u8 n_lss_dir;
+ u8 link_minor_rev;
+ u8 link_major_rev;
+ u8 dev_type;
+ u8 n_data_gap;
+
+ u32 n_fcu_set;
+ u32 maxblk_len_set;
+ u8 n_lanes_set;
+ u8 speed_range_set;
+ u8 n_lss_sync_set;
+ u8 n_lss_dir_set;
+ u8 n_data_gap_set;
+ u8 max_retry_set;
+};
+
struct sdio_cccr {
unsigned int sdio_vsn;
unsigned int sd_vsn;
@@ -294,6 +327,8 @@ struct mmc_card {
#define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */
#define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */
#define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */
+#define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY (1<<17) /* Disable broken SD poweroff notify support */
+#define MMC_QUIRK_NO_UHS_DDR50_TUNING (1<<18) /* Disable DDR50 tuning */
bool written_flag; /* Indicates eMMC has been written since power on */
bool reenable_cmdq; /* Re-enable Command Queue */
@@ -319,6 +354,8 @@ struct mmc_card {
struct sd_ext_reg ext_power; /* SD extension reg for PM */
struct sd_ext_reg ext_perf; /* SD extension reg for PERF */
+ struct sd_uhs2_config uhs2_config; /* SD UHS-II config */
+
unsigned int sdio_funcs; /* number of SDIO functions */
atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */
struct sdio_cccr cccr; /* common card info */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 2c7928a50907..01e0f591a20b 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -11,16 +11,18 @@
struct mmc_data;
struct mmc_request;
-enum mmc_blk_status {
- MMC_BLK_SUCCESS = 0,
- MMC_BLK_PARTIAL,
- MMC_BLK_CMD_ERR,
- MMC_BLK_RETRY,
- MMC_BLK_ABORT,
- MMC_BLK_DATA_ERR,
- MMC_BLK_ECC_ERR,
- MMC_BLK_NOMEDIUM,
- MMC_BLK_NEW_REQUEST,
+#define UHS2_MAX_PAYLOAD_LEN 2
+#define UHS2_MAX_RESP_LEN 20
+
+struct uhs2_command {
+ u16 header;
+ u16 arg;
+ __be32 payload[UHS2_MAX_PAYLOAD_LEN];
+ u8 payload_len;
+ u8 packet_len;
+ u8 tmode_half_duplex;
+ u8 uhs2_resp[UHS2_MAX_RESP_LEN]; /* UHS2 native cmd resp */
+ u8 uhs2_resp_len; /* UHS2 native cmd resp len */
};
struct mmc_command {
@@ -55,6 +57,7 @@ struct mmc_command {
#define MMC_RSP_NONE (0)
#define MMC_RSP_R1 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
#define MMC_RSP_R1B (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY)
+#define MMC_RSP_R1B_NO_CRC (MMC_RSP_PRESENT|MMC_RSP_OPCODE|MMC_RSP_BUSY)
#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC)
#define MMC_RSP_R3 (MMC_RSP_PRESENT)
#define MMC_RSP_R4 (MMC_RSP_PRESENT)
@@ -62,9 +65,6 @@ struct mmc_command {
#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
-/* Can be used by core to poll after switch to MMC HS mode */
-#define MMC_RSP_R1_NO_CRC (MMC_RSP_PRESENT|MMC_RSP_OPCODE)
-
#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE))
/*
@@ -108,6 +108,12 @@ struct mmc_command {
unsigned int busy_timeout; /* busy detect timeout in ms */
struct mmc_data *data; /* data segment associated with cmd */
struct mmc_request *mrq; /* associated request */
+
+ struct uhs2_command *uhs2_cmd; /* UHS2 command */
+
+ /* for SDUC */
+ bool has_ext_addr;
+ u8 ext_addr;
};
struct mmc_data {
@@ -166,6 +172,7 @@ struct mmc_request {
const struct bio_crypt_ctx *crypto_ctx;
int crypto_key_slot;
#endif
+ struct uhs2_command uhs2_cmd;
};
struct mmc_card;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 88c6a76042ee..e0e2c265e5d1 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -10,12 +10,14 @@
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/fault-inject.h>
+#include <linux/debugfs.h>
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
#include <linux/mmc/pm.h>
#include <linux/dma-direction.h>
#include <linux/blk-crypto-profile.h>
+#include <linux/mmc/sd_uhs2.h>
struct mmc_ios {
unsigned int clock; /* clock rate */
@@ -63,6 +65,10 @@ struct mmc_ios {
#define MMC_TIMING_MMC_HS400 10
#define MMC_TIMING_SD_EXP 11
#define MMC_TIMING_SD_EXP_1_2V 12
+#define MMC_TIMING_UHS2_SPEED_A 13
+#define MMC_TIMING_UHS2_SPEED_A_HD 14
+#define MMC_TIMING_UHS2_SPEED_B 15
+#define MMC_TIMING_UHS2_SPEED_B_HD 16
unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */
@@ -70,6 +76,9 @@ struct mmc_ios {
#define MMC_SIGNAL_VOLTAGE_180 1
#define MMC_SIGNAL_VOLTAGE_120 2
+ unsigned char vqmmc2_voltage;
+#define MMC_VQMMC2_VOLTAGE_180 0
+
unsigned char drv_type; /* driver type (A, B, C, D) */
#define MMC_SET_DRIVER_TYPE_B 0
@@ -91,6 +100,43 @@ struct mmc_clk_phase_map {
struct mmc_clk_phase phase[MMC_NUM_CLK_PHASES];
};
+struct sd_uhs2_caps {
+ u32 dap;
+ u32 gap;
+ u32 group_desc;
+ u32 maxblk_len;
+ u32 n_fcu;
+ u8 n_lanes;
+ u8 addr64;
+ u8 card_type;
+ u8 phy_rev;
+ u8 speed_range;
+ u8 n_lss_sync;
+ u8 n_lss_dir;
+ u8 link_rev;
+ u8 host_type;
+ u8 n_data_gap;
+
+ u32 maxblk_len_set;
+ u32 n_fcu_set;
+ u8 n_lanes_set;
+ u8 n_lss_sync_set;
+ u8 n_lss_dir_set;
+ u8 n_data_gap_set;
+ u8 max_retry_set;
+};
+
+enum sd_uhs2_operation {
+ UHS2_PHY_INIT = 0,
+ UHS2_SET_CONFIG,
+ UHS2_ENABLE_INT,
+ UHS2_DISABLE_INT,
+ UHS2_ENABLE_CLK,
+ UHS2_DISABLE_CLK,
+ UHS2_CHECK_DORMANT,
+ UHS2_SET_IOS,
+};
+
struct mmc_host;
enum mmc_err_stat {
@@ -218,6 +264,14 @@ struct mmc_host_ops {
/* Initialize an SD express card, mandatory for MMC_CAP2_SD_EXP. */
int (*init_sd_express)(struct mmc_host *host, struct mmc_ios *ios);
+
+ /*
+ * The uhs2_control callback is used to execute SD UHS-II specific
+ * operations. It's mandatory to implement for hosts that supports the
+ * SD UHS-II interface (MMC_CAP2_SD_UHS2). Expected return values are a
+ * negative errno in case of a failure or zero for success.
+ */
+ int (*uhs2_control)(struct mmc_host *host, enum sd_uhs2_operation op);
};
struct mmc_cqe_ops {
@@ -264,16 +318,6 @@ struct mmc_cqe_ops {
void (*cqe_recovery_finish)(struct mmc_host *host);
};
-struct mmc_async_req {
- /* active mmc request */
- struct mmc_request *mrq;
- /*
- * Check error status of completed mmc request.
- * Returns 0 if success otherwise non zero.
- */
- enum mmc_blk_status (*err_check)(struct mmc_card *, struct mmc_async_req *);
-};
-
/**
* struct mmc_slot - MMC slot functions
*
@@ -291,26 +335,17 @@ struct mmc_slot {
void *handler_priv;
};
-/**
- * mmc_context_info - synchronization details for mmc context
- * @is_done_rcv wake up reason was done request
- * @is_new_req wake up reason was new request
- * @is_waiting_last_req mmc context waiting for single running request
- * @wait wait queue
- */
-struct mmc_context_info {
- bool is_done_rcv;
- bool is_new_req;
- bool is_waiting_last_req;
- wait_queue_head_t wait;
-};
-
struct regulator;
struct mmc_pwrseq;
+struct notifier_block;
struct mmc_supply {
struct regulator *vmmc; /* Card power supply */
struct regulator *vqmmc; /* Optional Vccq supply */
+ struct regulator *vqmmc2; /* Optional supply for phy */
+
+ struct notifier_block vmmc_nb; /* Notifier for vmmc */
+ struct work_struct uv_work; /* Undervoltage work */
};
struct mmc_ctx {
@@ -402,6 +437,7 @@ struct mmc_host {
MMC_CAP2_HS200_1_2V_SDR)
#define MMC_CAP2_SD_EXP (1 << 7) /* SD express via PCIe */
#define MMC_CAP2_SD_EXP_1_2V (1 << 8) /* SD express 1.2V */
+#define MMC_CAP2_SD_UHS2 (1 << 9) /* SD UHS-II support */
#define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */
#define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */
#define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
@@ -428,6 +464,10 @@ struct mmc_host {
#endif
#define MMC_CAP2_ALT_GPT_TEGRA (1 << 28) /* Host with eMMC that has GPT entry at a non-standard location */
+ bool uhs2_sd_tran; /* UHS-II flag for SD_TRAN state */
+ bool uhs2_app_cmd; /* UHS-II flag for APP command */
+ struct sd_uhs2_caps uhs2_caps; /* Host UHS-II capabilities */
+
int fixed_drv_type; /* fixed driver type for non-removable media */
mmc_pm_flag_t pm_caps; /* supported pm features */
@@ -458,6 +498,13 @@ struct mmc_host {
unsigned int can_dma_map_merge:1; /* merging can be used */
unsigned int vqmmc_enabled:1; /* vqmmc regulator is enabled */
+ /*
+ * Indicates if an undervoltage event has already been handled.
+ * This prevents repeated regulator notifiers from triggering
+ * multiple REGULATOR_EVENT_UNDER_VOLTAGE events.
+ */
+ unsigned int undervoltage:1; /* Undervoltage state */
+
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
@@ -529,6 +576,7 @@ struct mmc_host {
int hsq_depth;
u32 err_stats[MMC_ERR_MAX];
+ u32 max_sd_hs_hz;
unsigned long private[] ____cacheline_aligned;
};
@@ -554,6 +602,14 @@ static inline struct mmc_host *mmc_from_priv(void *priv)
return container_of(priv, struct mmc_host, private);
}
+#ifdef CONFIG_MMC_CRYPTO
+static inline struct mmc_host *
+mmc_from_crypto_profile(struct blk_crypto_profile *profile)
+{
+ return container_of(profile, struct mmc_host, crypto_profile);
+}
+#endif
+
#define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI)
#define mmc_dev(x) ((x)->parent)
@@ -590,6 +646,7 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
struct regulator *supply,
unsigned short vdd_bit);
int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios);
+int mmc_regulator_set_vqmmc2(struct mmc_host *mmc, struct mmc_ios *ios);
#else
static inline int mmc_regulator_set_ocr(struct mmc_host *mmc,
struct regulator *supply,
@@ -603,6 +660,12 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
{
return -EINVAL;
}
+
+static inline int mmc_regulator_set_vqmmc2(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ return -EINVAL;
+}
#endif
int mmc_regulator_get_supply(struct mmc_host *mmc);
@@ -638,6 +701,14 @@ static inline int mmc_card_uhs(struct mmc_card *card)
card->host->ios.timing <= MMC_TIMING_UHS_DDR50;
}
+static inline bool mmc_card_uhs2(struct mmc_host *host)
+{
+ return host->ios.timing == MMC_TIMING_UHS2_SPEED_A ||
+ host->ios.timing == MMC_TIMING_UHS2_SPEED_A_HD ||
+ host->ios.timing == MMC_TIMING_UHS2_SPEED_B ||
+ host->ios.timing == MMC_TIMING_UHS2_SPEED_B_HD;
+}
+
void mmc_retune_timer_stop(struct mmc_host *host);
static inline void mmc_retune_needed(struct mmc_host *host)
@@ -672,10 +743,18 @@ static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host,
host->err_stats[stat] += 1;
}
-int mmc_sd_switch(struct mmc_card *card, int mode, int group, u8 value, u8 *resp);
+static inline int mmc_card_uhs2_hd_mode(struct mmc_host *host)
+{
+ return host->ios.timing == MMC_TIMING_UHS2_SPEED_A_HD ||
+ host->ios.timing == MMC_TIMING_UHS2_SPEED_B_HD;
+}
+
+int mmc_sd_switch(struct mmc_card *card, bool mode, int group,
+ u8 value, u8 *resp);
int mmc_send_status(struct mmc_card *card, u32 *status);
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode);
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
+int mmc_read_tuning(struct mmc_host *host, unsigned int blksz, unsigned int blocks);
#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h
index 6727576a8755..af5fc70e09a2 100644
--- a/include/linux/mmc/sd.h
+++ b/include/linux/mmc/sd.h
@@ -15,6 +15,9 @@
#define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */
#define SD_SWITCH_VOLTAGE 11 /* ac R1 */
+/* Class 2 */
+#define SD_ADDR_EXT 22 /* ac [5:0] R1 */
+
/* class 10 */
#define SD_SWITCH 6 /* adtc [31:0] See below R1 */
@@ -36,6 +39,7 @@
/* OCR bit definitions */
#define SD_OCR_S18R (1 << 24) /* 1.8V switching request */
#define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */
+#define SD_OCR_2T (1 << 27) /* HO2T/CO2T - SDUC support */
#define SD_OCR_XPC (1 << 28) /* SDXC power control */
#define SD_OCR_CCS (1 << 30) /* Card Capacity Status */
diff --git a/include/linux/mmc/sd_uhs2.h b/include/linux/mmc/sd_uhs2.h
new file mode 100644
index 000000000000..7abe9bd870c7
--- /dev/null
+++ b/include/linux/mmc/sd_uhs2.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Header file for UHS-II packets, Host Controller registers and I/O
+ * accessors.
+ *
+ * Copyright (C) 2014 Intel Corp, All Rights Reserved.
+ */
+#ifndef LINUX_MMC_UHS2_H
+#define LINUX_MMC_UHS2_H
+
+/* LINK Layer definition */
+/*
+ * UHS2 Header:
+ * Refer to UHS-II Addendum Version 1.02 Figure 5-2, the format of CCMD Header is described below:
+ * bit [3:0] : DID(Destination ID = Node ID of UHS2 card)
+ * bit [6:4] : TYP(Packet Type)
+ * 000b: CCMD(Control command packet)
+ * 001b: DCMD(Data command packet)
+ * 010b: RES(Response packet)
+ * 011b: DATA(Data payload packet)
+ * 111b: MSG(Message packet)
+ * Others: Reserved
+ * bit [7] : NP(Native Packet)
+ * bit [10:8] : TID(Transaction ID)
+ * bit [11] : Reserved
+ * bit [15:12]: SID(Source ID 0: Node ID of Host)
+ *
+ * Broadcast CCMD issued by Host is represented as DID=SID=0.
+ */
+/*
+ * UHS2 Argument:
+ * Refer to UHS-II Addendum Version 1.02 Figure 6-5, the format of CCMD Argument is described below:
+ * bit [3:0] : MSB of IOADR
+ * bit [5:4] : PLEN(Payload Length)
+ * 00b: 0 byte
+ * 01b: 4 bytes
+ * 10b: 8 bytes
+ * 11b: 16 bytes
+ * bit [6] : Reserved
+ * bit [7] : R/W(Read/Write)
+ * 0: Control read command
+ * 1: Control write command
+ * bit [15:8] : LSB of IOADR
+ *
+ * I/O Address specifies the address of register in UHS-II I/O space accessed by CCMD.
+ * The unit of I/O Address is 4 Bytes. It is transmitted in MSB first, LSB last.
+ */
+#define UHS2_NATIVE_PACKET_POS 7
+#define UHS2_NATIVE_PACKET (1 << UHS2_NATIVE_PACKET_POS)
+
+#define UHS2_PACKET_TYPE_POS 4
+#define UHS2_PACKET_TYPE_CCMD (0 << UHS2_PACKET_TYPE_POS)
+#define UHS2_PACKET_TYPE_DCMD (1 << UHS2_PACKET_TYPE_POS)
+#define UHS2_PACKET_TYPE_RES (2 << UHS2_PACKET_TYPE_POS)
+#define UHS2_PACKET_TYPE_DATA (3 << UHS2_PACKET_TYPE_POS)
+#define UHS2_PACKET_TYPE_MSG (7 << UHS2_PACKET_TYPE_POS)
+
+#define UHS2_DEST_ID_MASK 0x0F
+#define UHS2_DEST_ID 0x1
+
+#define UHS2_SRC_ID_POS 12
+#define UHS2_SRC_ID_MASK 0xF000
+
+#define UHS2_TRANS_ID_POS 8
+#define UHS2_TRANS_ID_MASK 0x0700
+
+/* UHS2 MSG */
+#define UHS2_MSG_CTG_POS 5
+#define UHS2_MSG_CTG_LMSG 0x00
+#define UHS2_MSG_CTG_INT 0x60
+#define UHS2_MSG_CTG_AMSG 0x80
+
+#define UHS2_MSG_CTG_FCREQ 0x00
+#define UHS2_MSG_CTG_FCRDY 0x01
+#define UHS2_MSG_CTG_STAT 0x02
+
+#define UHS2_MSG_CODE_POS 8
+#define UHS2_MSG_CODE_FC_UNRECOVER_ERR 0x8
+#define UHS2_MSG_CODE_STAT_UNRECOVER_ERR 0x8
+#define UHS2_MSG_CODE_STAT_RECOVER_ERR 0x1
+
+/* TRANS Layer definition */
+
+/* Native packets*/
+#define UHS2_NATIVE_CMD_RW_POS 7
+#define UHS2_NATIVE_CMD_WRITE (1 << UHS2_NATIVE_CMD_RW_POS)
+#define UHS2_NATIVE_CMD_READ (0 << UHS2_NATIVE_CMD_RW_POS)
+
+#define UHS2_NATIVE_CMD_PLEN_POS 4
+#define UHS2_NATIVE_CMD_PLEN_4B (1 << UHS2_NATIVE_CMD_PLEN_POS)
+#define UHS2_NATIVE_CMD_PLEN_8B (2 << UHS2_NATIVE_CMD_PLEN_POS)
+#define UHS2_NATIVE_CMD_PLEN_16B (3 << UHS2_NATIVE_CMD_PLEN_POS)
+
+#define UHS2_NATIVE_CCMD_GET_MIOADR_MASK 0xF00
+#define UHS2_NATIVE_CCMD_MIOADR_MASK 0x0F
+
+#define UHS2_NATIVE_CCMD_LIOADR_POS 8
+#define UHS2_NATIVE_CCMD_GET_LIOADR_MASK 0x0FF
+
+#define UHS2_CCMD_DEV_INIT_COMPLETE_FLAG BIT(11)
+#define UHS2_DEV_INIT_PAYLOAD_LEN 1
+#define UHS2_DEV_INIT_RESP_LEN 6
+#define UHS2_DEV_ENUM_PAYLOAD_LEN 1
+#define UHS2_DEV_ENUM_RESP_LEN 8
+#define UHS2_CFG_WRITE_PAYLOAD_LEN 2
+#define UHS2_CFG_WRITE_PHY_SET_RESP_LEN 4
+#define UHS2_CFG_WRITE_GENERIC_SET_RESP_LEN 5
+#define UHS2_GO_DORMANT_PAYLOAD_LEN 1
+
+/*
+ * UHS2 Argument:
+ * Refer to UHS-II Addendum Version 1.02 Figure 6-8, the format of DCMD Argument is described below:
+ * bit [3:0] : Reserved
+ * bit [6:3] : TMODE(Transfer Mode)
+ * bit 3: DAM(Data Access Mode)
+ * bit 4: TLUM(TLEN Unit Mode)
+ * bit 5: LM(Length Mode)
+ * bit 6: DM(Duplex Mode)
+ * bit [7] : R/W(Read/Write)
+ * 0: Control read command
+ * 1: Control write command
+ * bit [15:8] : Reserved
+ *
+ * I/O Address specifies the address of register in UHS-II I/O space accessed by CCMD.
+ * The unit of I/O Address is 4 Bytes. It is transmitted in MSB first, LSB last.
+ */
+#define UHS2_DCMD_DM_POS 6
+#define UHS2_DCMD_2L_HD_MODE (1 << UHS2_DCMD_DM_POS)
+#define UHS2_DCMD_LM_POS 5
+#define UHS2_DCMD_LM_TLEN_EXIST (1 << UHS2_DCMD_LM_POS)
+#define UHS2_DCMD_TLUM_POS 4
+#define UHS2_DCMD_TLUM_BYTE_MODE (1 << UHS2_DCMD_TLUM_POS)
+#define UHS2_NATIVE_DCMD_DAM_POS 3
+#define UHS2_NATIVE_DCMD_DAM_IO (1 << UHS2_NATIVE_DCMD_DAM_POS)
+
+#define UHS2_RES_NACK_POS 7
+#define UHS2_RES_NACK_MASK (0x1 << UHS2_RES_NACK_POS)
+
+#define UHS2_RES_ECODE_POS 4
+#define UHS2_RES_ECODE_MASK 0x7
+#define UHS2_RES_ECODE_COND 1
+#define UHS2_RES_ECODE_ARG 2
+#define UHS2_RES_ECODE_GEN 3
+
+/* IOADR of device registers */
+#define UHS2_IOADR_GENERIC_CAPS 0x00
+#define UHS2_IOADR_PHY_CAPS 0x02
+#define UHS2_IOADR_LINK_CAPS 0x04
+#define UHS2_IOADR_RSV_CAPS 0x06
+#define UHS2_IOADR_GENERIC_SETTINGS 0x08
+#define UHS2_IOADR_PHY_SETTINGS 0x0A
+#define UHS2_IOADR_LINK_SETTINGS 0x0C
+#define UHS2_IOADR_PRESET 0x40
+
+/* SD application packets */
+#define UHS2_SD_CMD_INDEX_POS 8
+
+#define UHS2_SD_CMD_APP_POS 14
+#define UHS2_SD_CMD_APP (1 << UHS2_SD_CMD_APP_POS)
+
+/* UHS-II Device Registers */
+#define UHS2_DEV_CONFIG_REG 0x000
+
+/* General Caps and Settings registers */
+#define UHS2_DEV_CONFIG_GEN_CAPS (UHS2_DEV_CONFIG_REG + 0x000)
+#define UHS2_DEV_CONFIG_N_LANES_POS 8
+#define UHS2_DEV_CONFIG_N_LANES_MASK 0x3F
+#define UHS2_DEV_CONFIG_2L_HD_FD 0x1
+#define UHS2_DEV_CONFIG_2D1U_FD 0x2
+#define UHS2_DEV_CONFIG_1D2U_FD 0x4
+#define UHS2_DEV_CONFIG_2D2U_FD 0x8
+#define UHS2_DEV_CONFIG_DADR_POS 14
+#define UHS2_DEV_CONFIG_DADR_MASK 0x1
+#define UHS2_DEV_CONFIG_APP_POS 16
+#define UHS2_DEV_CONFIG_APP_MASK 0xFF
+#define UHS2_DEV_CONFIG_APP_SD_MEM 0x1
+
+#define UHS2_DEV_CONFIG_GEN_SET (UHS2_DEV_CONFIG_REG + 0x008)
+#define UHS2_DEV_CONFIG_GEN_SET_N_LANES_POS 8
+#define UHS2_DEV_CONFIG_GEN_SET_2L_FD_HD 0x0
+#define UHS2_DEV_CONFIG_GEN_SET_2D1U_FD 0x2
+#define UHS2_DEV_CONFIG_GEN_SET_1D2U_FD 0x3
+#define UHS2_DEV_CONFIG_GEN_SET_2D2U_FD 0x4
+#define UHS2_DEV_CONFIG_GEN_SET_CFG_COMPLETE BIT(31)
+
+/* PHY Caps and Settings registers */
+#define UHS2_DEV_CONFIG_PHY_CAPS (UHS2_DEV_CONFIG_REG + 0x002)
+#define UHS2_DEV_CONFIG_PHY_MINOR_MASK 0xF
+#define UHS2_DEV_CONFIG_PHY_MAJOR_POS 4
+#define UHS2_DEV_CONFIG_PHY_MAJOR_MASK 0x3
+#define UHS2_DEV_CONFIG_CAN_HIBER_POS 15
+#define UHS2_DEV_CONFIG_CAN_HIBER_MASK 0x1
+#define UHS2_DEV_CONFIG_PHY_CAPS1 (UHS2_DEV_CONFIG_REG + 0x003)
+#define UHS2_DEV_CONFIG_N_LSS_SYN_MASK 0xF
+#define UHS2_DEV_CONFIG_N_LSS_DIR_POS 4
+#define UHS2_DEV_CONFIG_N_LSS_DIR_MASK 0xF
+
+#define UHS2_DEV_CONFIG_PHY_SET (UHS2_DEV_CONFIG_REG + 0x00A)
+#define UHS2_DEV_CONFIG_PHY_SET_SPEED_POS 6
+#define UHS2_DEV_CONFIG_PHY_SET_SPEED_A 0x0
+#define UHS2_DEV_CONFIG_PHY_SET_SPEED_B 0x1
+
+/* LINK-TRAN Caps and Settings registers */
+#define UHS2_DEV_CONFIG_LINK_TRAN_CAPS (UHS2_DEV_CONFIG_REG + 0x004)
+#define UHS2_DEV_CONFIG_LT_MINOR_MASK 0xF
+#define UHS2_DEV_CONFIG_LT_MAJOR_POS 4
+#define UHS2_DEV_CONFIG_LT_MAJOR_MASK 0x3
+#define UHS2_DEV_CONFIG_N_FCU_POS 8
+#define UHS2_DEV_CONFIG_N_FCU_MASK 0xFF
+#define UHS2_DEV_CONFIG_DEV_TYPE_POS 16
+#define UHS2_DEV_CONFIG_DEV_TYPE_MASK 0x7
+#define UHS2_DEV_CONFIG_MAX_BLK_LEN_POS 20
+#define UHS2_DEV_CONFIG_MAX_BLK_LEN_MASK 0xFFF
+#define UHS2_DEV_CONFIG_LINK_TRAN_CAPS1 (UHS2_DEV_CONFIG_REG + 0x005)
+#define UHS2_DEV_CONFIG_N_DATA_GAP_MASK 0xFF
+
+#define UHS2_DEV_CONFIG_LINK_TRAN_SET (UHS2_DEV_CONFIG_REG + 0x00C)
+#define UHS2_DEV_CONFIG_LT_SET_MAX_BLK_LEN 0x200
+#define UHS2_DEV_CONFIG_LT_SET_MAX_RETRY_POS 16
+
+/* Preset register */
+#define UHS2_DEV_CONFIG_PRESET (UHS2_DEV_CONFIG_REG + 0x040)
+
+#define UHS2_DEV_INT_REG 0x100
+
+#define UHS2_DEV_STATUS_REG 0x180
+
+#define UHS2_DEV_CMD_REG 0x200
+#define UHS2_DEV_CMD_FULL_RESET (UHS2_DEV_CMD_REG + 0x000)
+#define UHS2_DEV_CMD_GO_DORMANT_STATE (UHS2_DEV_CMD_REG + 0x001)
+#define UHS2_DEV_CMD_DORMANT_HIBER BIT(7)
+#define UHS2_DEV_CMD_DEVICE_INIT (UHS2_DEV_CMD_REG + 0x002)
+#define UHS2_DEV_INIT_COMPLETE_FLAG BIT(11)
+#define UHS2_DEV_CMD_ENUMERATE (UHS2_DEV_CMD_REG + 0x003)
+#define UHS2_DEV_CMD_TRANS_ABORT (UHS2_DEV_CMD_REG + 0x004)
+
+#define UHS2_RCLK_MAX 52000000
+#define UHS2_RCLK_MIN 26000000
+
+#endif /* LINUX_MMC_UHS2_H */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 7cddfdac2f57..673cbdf43453 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -76,7 +76,8 @@
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_43439 0xa9af
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
-#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752 0xaae8
+#define SDIO_DEVICE_ID_BROADCOM_43751 0xaae7
+#define SDIO_DEVICE_ID_BROADCOM_43752 0xaae8
#define SDIO_VENDOR_ID_CYPRESS 0x04b4
#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439 0xbd3d
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 274a2767ea49..23ac5696fa38 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -22,10 +22,9 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
unsigned int idx, unsigned int debounce);
int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config);
-void mmc_gpio_set_cd_isr(struct mmc_host *host, irq_handler_t isr);
int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on);
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
-bool mmc_can_gpio_cd(struct mmc_host *host);
-bool mmc_can_gpio_ro(struct mmc_host *host);
+bool mmc_host_can_gpio_cd(struct mmc_host *host);
+bool mmc_host_can_gpio_ro(struct mmc_host *host);
#endif
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 39a7714605a7..14a45979cccc 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -9,10 +9,12 @@ struct page;
struct vm_area_struct;
struct mm_struct;
struct vma_iterator;
+struct vma_merge_struct;
void dump_page(const struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
+void dump_vmg(const struct vma_merge_struct *vmg, const char *reason);
void vma_iter_dump_tree(const struct vma_iterator *vmi);
#ifdef CONFIG_DEBUG_VM
@@ -46,7 +48,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
} \
} while (0)
#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \
- static bool __section(".data.once") __warned; \
+ static bool __section(".data..once") __warned; \
int __ret_warn_once = !!(cond); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
@@ -66,7 +68,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
unlikely(__ret_warn); \
})
#define VM_WARN_ON_ONCE_FOLIO(cond, folio) ({ \
- static bool __section(".data.once") __warned; \
+ static bool __section(".data..once") __warned; \
int __ret_warn_once = !!(cond); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
@@ -77,7 +79,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
unlikely(__ret_warn_once); \
})
#define VM_WARN_ON_ONCE_MM(cond, mm) ({ \
- static bool __section(".data.once") __warned; \
+ static bool __section(".data..once") __warned; \
int __ret_warn_once = !!(cond); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
@@ -87,6 +89,26 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
} \
unlikely(__ret_warn_once); \
})
+#define VM_WARN_ON_ONCE_VMA(cond, vma) ({ \
+ static bool __section(".data..once") __warned; \
+ int __ret_warn_once = !!(cond); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ dump_vma(vma); \
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
+#define VM_WARN_ON_VMG(cond, vmg) ({ \
+ int __ret_warn = !!(cond); \
+ \
+ if (unlikely(__ret_warn)) { \
+ dump_vmg(vmg, "VM_WARN_ON_VMG(" __stringify(cond)")"); \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn); \
+})
#define VM_WARN_ON(cond) (void)WARN_ON(cond)
#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
@@ -104,9 +126,11 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_MM(cond, mm) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_VMA(cond, vma) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_VMG(cond, vmg) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
-#endif
+#endif /* CONFIG_DEBUG_VM */
#ifdef CONFIG_DEBUG_VM_IRQSOFF
#define VM_WARN_ON_IRQS_ENABLED() WARN_ON_ONCE(!irqs_disabled())
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
index bbaec80c78c5..ac01dc4eb2ce 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -24,6 +24,7 @@ static inline void leave_mm(void) { }
#ifndef task_cpu_possible_mask
# define task_cpu_possible_mask(p) cpu_possible_mask
# define task_cpu_possible(cpu, p) true
+# define task_cpu_fallback_mask(p) housekeeping_cpumask(HK_TYPE_TICK)
#else
# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
#endif
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index d39ebb10caeb..d1094c2d5fb6 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -43,10 +43,10 @@ struct mmu_interval_notifier;
* a device driver to possibly ignore the invalidation if the
* owner field matches the driver's device private pgmap owner.
*
- * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no
- * longer have exclusive access to the page. When sent during creation of an
- * exclusive range the owner will be initialised to the value provided by the
- * caller of make_device_exclusive_range(), otherwise the owner will be NULL.
+ * @MMU_NOTIFY_EXCLUSIVE: conversion of a page table entry to device-exclusive.
+ * The owner is initialized to the value provided by the caller of
+ * make_device_exclusive(), such that this caller can filter out these
+ * events.
*/
enum mmu_notifier_event {
MMU_NOTIFY_UNMAP = 0,
@@ -606,6 +606,13 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
return 0;
}
+static inline int mmu_notifier_clear_young(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ return 0;
+}
+
static inline int mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address)
{
@@ -647,9 +654,6 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
#define ptep_clear_young_notify ptep_test_and_clear_young
#define pmdp_clear_young_notify pmdp_test_and_clear_young
-#define ptep_clear_flush_notify ptep_clear_flush
-#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
-#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
static inline void mmu_notifier_synchronize(void)
{
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 8f9c9590a42c..4398e027f450 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -37,6 +37,22 @@
#define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1)
+/* Defines the order for the number of pages that have a migrate type. */
+#ifndef CONFIG_PAGE_BLOCK_MAX_ORDER
+#define PAGE_BLOCK_MAX_ORDER MAX_PAGE_ORDER
+#else
+#define PAGE_BLOCK_MAX_ORDER CONFIG_PAGE_BLOCK_MAX_ORDER
+#endif /* CONFIG_PAGE_BLOCK_MAX_ORDER */
+
+/*
+ * The MAX_PAGE_ORDER, which defines the max order of pages to be allocated
+ * by the buddy allocator, has to be larger or equal to the PAGE_BLOCK_MAX_ORDER,
+ * which defines the order for the number of pages that can have a migrate type
+ */
+#if (PAGE_BLOCK_MAX_ORDER > MAX_PAGE_ORDER)
+#error MAX_PAGE_ORDER must be >= PAGE_BLOCK_MAX_ORDER
+#endif
+
/*
* PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
* costly to service. That is between allocation orders which should
@@ -63,6 +79,9 @@ enum migratetype {
* __free_pageblock_cma() function.
*/
MIGRATE_CMA,
+ __MIGRATE_TYPE_END = MIGRATE_CMA,
+#else
+ __MIGRATE_TYPE_END = MIGRATE_HIGHATOMIC,
#endif
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
@@ -76,8 +95,12 @@ extern const char * const migratetype_names[MIGRATE_TYPES];
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
-# define is_migrate_cma_folio(folio, pfn) (MIGRATE_CMA == \
- get_pfnblock_flags_mask(&folio->page, pfn, MIGRATETYPE_MASK))
+/*
+ * __dump_folio() in mm/debug.c passes a folio pointer to on-stack struct folio,
+ * so folio_pfn() cannot be used and pfn is needed.
+ */
+# define is_migrate_cma_folio(folio, pfn) \
+ (get_pfnblock_migratetype(&folio->page, pfn) == MIGRATE_CMA)
#else
# define is_migrate_cma(migratetype) false
# define is_migrate_cma_page(_page) false
@@ -106,14 +129,12 @@ static inline bool migratetype_is_mergeable(int mt)
extern int page_group_by_mobility_disabled;
-#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
+#define get_pageblock_migratetype(page) \
+ get_pfnblock_migratetype(page, page_to_pfn(page))
-#define get_pageblock_migratetype(page) \
- get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
+#define folio_migratetype(folio) \
+ get_pageblock_migratetype(&folio->page)
-#define folio_migratetype(folio) \
- get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \
- MIGRATETYPE_MASK)
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
@@ -138,6 +159,7 @@ enum numa_stat_item {
enum zone_stat_item {
/* First 128 byte cacheline (assuming 64 bit words) */
NR_FREE_PAGES,
+ NR_FREE_PAGES_BLOCKS,
NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
NR_ZONE_ACTIVE_ANON,
@@ -147,7 +169,6 @@ enum zone_stat_item {
NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
/* Second 128 byte cacheline */
- NR_BOUNCE,
#if IS_ENABLED(CONFIG_ZSMALLOC)
NR_ZSPAGES, /* allocated in zsmalloc */
#endif
@@ -185,7 +206,6 @@ enum node_stat_item {
NR_FILE_PAGES,
NR_FILE_DIRTY,
NR_WRITEBACK,
- NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
NR_SHMEM_THPS,
NR_SHMEM_PMDMAPPED,
@@ -214,12 +234,32 @@ enum node_stat_item {
#endif
#ifdef CONFIG_NUMA_BALANCING
PGPROMOTE_SUCCESS, /* promote successfully */
- PGPROMOTE_CANDIDATE, /* candidate pages to promote */
+ /**
+ * Candidate pages for promotion based on hint fault latency. This
+ * counter is used to control the promotion rate and adjust the hot
+ * threshold.
+ */
+ PGPROMOTE_CANDIDATE,
+ /**
+ * Not rate-limited (NRL) candidate pages for those can be promoted
+ * without considering hot threshold because of enough free pages in
+ * fast-tier node. These promotions bypass the regular hotness checks
+ * and do NOT influence the promotion rate-limiter or
+ * threshold-adjustment logic.
+ * This is for statistics/monitoring purposes.
+ */
+ PGPROMOTE_CANDIDATE_NRL,
#endif
/* PGDEMOTE_*: pages demoted */
PGDEMOTE_KSWAPD,
PGDEMOTE_DIRECT,
PGDEMOTE_KHUGEPAGED,
+ PGDEMOTE_PROACTIVE,
+#ifdef CONFIG_HUGETLB_PAGE
+ NR_HUGETLB,
+#endif
+ NR_BALLOON_PAGES,
+ NR_KERNEL_FILE_PAGES,
NR_VM_NODE_STAT_ITEMS
};
@@ -329,66 +369,88 @@ enum lruvec_flags {
#endif /* !__GENERATING_BOUNDS_H */
/*
- * Evictable pages are divided into multiple generations. The youngest and the
+ * Evictable folios are divided into multiple generations. The youngest and the
* oldest generation numbers, max_seq and min_seq, are monotonically increasing.
* They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
* offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
* corresponding generation. The gen counter in folio->flags stores gen+1 while
- * a page is on one of lrugen->folios[]. Otherwise it stores 0.
+ * a folio is on one of lrugen->folios[]. Otherwise it stores 0.
*
- * A page is added to the youngest generation on faulting. The aging needs to
- * check the accessed bit at least twice before handing this page over to the
- * eviction. The first check takes care of the accessed bit set on the initial
- * fault; the second check makes sure this page hasn't been used since then.
- * This process, AKA second chance, requires a minimum of two generations,
- * hence MIN_NR_GENS. And to maintain ABI compatibility with the active/inactive
- * LRU, e.g., /proc/vmstat, these two generations are considered active; the
- * rest of generations, if they exist, are considered inactive. See
- * lru_gen_is_active().
+ * After a folio is faulted in, the aging needs to check the accessed bit at
+ * least twice before handing this folio over to the eviction. The first check
+ * clears the accessed bit from the initial fault; the second check makes sure
+ * this folio hasn't been used since then. This process, AKA second chance,
+ * requires a minimum of two generations, hence MIN_NR_GENS. And to maintain ABI
+ * compatibility with the active/inactive LRU, e.g., /proc/vmstat, these two
+ * generations are considered active; the rest of generations, if they exist,
+ * are considered inactive. See lru_gen_is_active().
*
- * PG_active is always cleared while a page is on one of lrugen->folios[] so
- * that the aging needs not to worry about it. And it's set again when a page
- * considered active is isolated for non-reclaiming purposes, e.g., migration.
- * See lru_gen_add_folio() and lru_gen_del_folio().
+ * PG_active is always cleared while a folio is on one of lrugen->folios[] so
+ * that the sliding window needs not to worry about it. And it's set again when
+ * a folio considered active is isolated for non-reclaiming purposes, e.g.,
+ * migration. See lru_gen_add_folio() and lru_gen_del_folio().
*
* MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the
* number of categories of the active/inactive LRU when keeping track of
* accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits
- * in folio->flags.
+ * in folio->flags, masked by LRU_GEN_MASK.
*/
#define MIN_NR_GENS 2U
#define MAX_NR_GENS 4U
/*
- * Each generation is divided into multiple tiers. A page accessed N times
- * through file descriptors is in tier order_base_2(N). A page in the first tier
- * (N=0,1) is marked by PG_referenced unless it was faulted in through page
- * tables or read ahead. A page in any other tier (N>1) is marked by
- * PG_referenced and PG_workingset. This implies a minimum of two tiers is
- * supported without using additional bits in folio->flags.
+ * Each generation is divided into multiple tiers. A folio accessed N times
+ * through file descriptors is in tier order_base_2(N). A folio in the first
+ * tier (N=0,1) is marked by PG_referenced unless it was faulted in through page
+ * tables or read ahead. A folio in the last tier (MAX_NR_TIERS-1) is marked by
+ * PG_workingset. A folio in any other tier (1<N<5) between the first and last
+ * is marked by additional bits of LRU_REFS_WIDTH in folio->flags.
*
* In contrast to moving across generations which requires the LRU lock, moving
* across tiers only involves atomic operations on folio->flags and therefore
* has a negligible cost in the buffered access path. In the eviction path,
- * comparisons of refaulted/(evicted+protected) from the first tier and the
- * rest infer whether pages accessed multiple times through file descriptors
- * are statistically hot and thus worth protecting.
+ * comparisons of refaulted/(evicted+protected) from the first tier and the rest
+ * infer whether folios accessed multiple times through file descriptors are
+ * statistically hot and thus worth protecting.
*
* MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the
* number of categories of the active/inactive LRU when keeping track of
* accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in
- * folio->flags.
+ * folio->flags, masked by LRU_REFS_MASK.
*/
#define MAX_NR_TIERS 4U
#ifndef __GENERATING_BOUNDS_H
-struct lruvec;
-struct page_vma_mapped_walk;
-
#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
+/*
+ * For folios accessed multiple times through file descriptors,
+ * lru_gen_inc_refs() sets additional bits of LRU_REFS_WIDTH in folio->flags
+ * after PG_referenced, then PG_workingset after LRU_REFS_WIDTH. After all its
+ * bits are set, i.e., LRU_REFS_FLAGS|BIT(PG_workingset), a folio is lazily
+ * promoted into the second oldest generation in the eviction path. And when
+ * folio_inc_gen() does that, it clears LRU_REFS_FLAGS so that
+ * lru_gen_inc_refs() can start over. Note that for this case, LRU_REFS_MASK is
+ * only valid when PG_referenced is set.
+ *
+ * For folios accessed multiple times through page tables, folio_update_gen()
+ * from a page table walk or lru_gen_set_refs() from a rmap walk sets
+ * PG_referenced after the accessed bit is cleared for the first time.
+ * Thereafter, those two paths set PG_workingset and promote folios to the
+ * youngest generation. Like folio_inc_gen(), folio_update_gen() also clears
+ * PG_referenced. Note that for this case, LRU_REFS_MASK is not used.
+ *
+ * For both cases above, after PG_workingset is set on a folio, it remains until
+ * this folio is either reclaimed, or "deactivated" by lru_gen_clear_refs(). It
+ * can be set again if lru_gen_test_recent() returns true upon a refault.
+ */
+#define LRU_REFS_FLAGS (LRU_REFS_MASK | BIT(PG_referenced))
+
+struct lruvec;
+struct page_vma_mapped_walk;
+
#ifdef CONFIG_LRU_GEN
enum {
@@ -416,12 +478,11 @@ enum {
/*
* The youngest generation number is stored in max_seq for both anon and file
* types as they are aged on an equal footing. The oldest generation numbers are
- * stored in min_seq[] separately for anon and file types as clean file pages
- * can be evicted regardless of swap constraints.
- *
- * Normally anon and file min_seq are in sync. But if swapping is constrained,
- * e.g., out of swap space, file min_seq is allowed to advance and leave anon
- * min_seq behind.
+ * stored in min_seq[] separately for anon and file types so that they can be
+ * incremented independently. Ideally min_seq[] are kept in sync when both anon
+ * and file types are evictable. However, to adapt to situations like extreme
+ * swappiness, they are allowed to be out of sync by at most
+ * MAX_NR_GENS-MIN_NR_GENS-1.
*
* The number of pages in each generation is eventually consistent and therefore
* can be transiently negative when reset_batch_size() is pending.
@@ -441,8 +502,8 @@ struct lru_gen_folio {
unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
/* the exponential moving average of evicted+protected */
unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
- /* the first tier doesn't need protection, hence the minus one */
- unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1];
+ /* can only be modified under the LRU lock */
+ unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
/* can be modified without holding the LRU lock */
atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
@@ -458,9 +519,7 @@ struct lru_gen_folio {
enum {
MM_LEAF_TOTAL, /* total leaf entries */
- MM_LEAF_OLD, /* old leaf entries */
MM_LEAF_YOUNG, /* young leaf entries */
- MM_NONLEAF_TOTAL, /* total non-leaf entries */
MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
NR_MM_STATS
@@ -495,7 +554,7 @@ struct lru_gen_mm_walk {
int mm_stats[NR_MM_STATS];
/* total batched items */
int batched;
- bool can_swap;
+ int swappiness;
bool force_scan;
};
@@ -557,7 +616,7 @@ struct lru_gen_memcg {
void lru_gen_init_pgdat(struct pglist_data *pgdat);
void lru_gen_init_lruvec(struct lruvec *lruvec);
-void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
+bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
void lru_gen_init_memcg(struct mem_cgroup *memcg);
void lru_gen_exit_memcg(struct mem_cgroup *memcg);
@@ -576,8 +635,9 @@ static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
{
}
-static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
{
+ return false;
}
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
@@ -654,24 +714,18 @@ enum zone_watermarks {
};
/*
- * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. One additional list
- * for THP which will usually be GFP_MOVABLE. Even if it is another type,
- * it should not contribute to serious fragmentation causing THP allocation
- * failures.
+ * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. Two additional lists
+ * are added for THP. One PCP list is used by GPF_MOVABLE, and the other PCP list
+ * is used by GFP_UNMOVABLE and GFP_RECLAIMABLE.
*/
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define NR_PCP_THP 1
+#define NR_PCP_THP 2
#else
#define NR_PCP_THP 0
#endif
#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
-#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
-#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
-#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
-#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
-
/*
* Flags used in pcp->flags field.
*
@@ -830,6 +884,7 @@ struct zone {
unsigned long watermark_boost;
unsigned long nr_reserved_highatomic;
+ unsigned long nr_free_highatomic;
/*
* We don't know if the memory that we're going to allocate will be
@@ -946,6 +1001,9 @@ struct zone {
#ifdef CONFIG_UNACCEPTED_MEMORY
/* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */
struct list_head unaccepted_pages;
+
+ /* To be called once the last page in the zone is accepted */
+ struct work_struct unaccepted_cleanup;
#endif
/* zone flags, see below */
@@ -954,6 +1012,9 @@ struct zone {
/* Primarily protects free_area */
spinlock_t lock;
+ /* Pages to be freed when next trylock succeeds */
+ struct llist_head trylock_free_pages;
+
/* Write-intensive fields used by compaction and vmstats. */
CACHELINE_PADDING(_pad2_);
@@ -999,10 +1060,6 @@ struct zone {
} ____cacheline_internodealigned_in_smp;
enum pgdat_flags {
- PGDAT_DIRTY, /* reclaim scanning has recently found
- * many dirty file pages at the tail
- * of the LRU.
- */
PGDAT_WRITEBACK, /* reclaim scanning has recently found
* many pages under writeback
*/
@@ -1017,7 +1074,33 @@ enum zone_flags {
ZONE_BELOW_HIGH, /* zone is below high watermark. */
};
-static inline unsigned long zone_managed_pages(struct zone *zone)
+static inline unsigned long wmark_pages(const struct zone *z,
+ enum zone_watermarks w)
+{
+ return z->_watermark[w] + z->watermark_boost;
+}
+
+static inline unsigned long min_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_MIN);
+}
+
+static inline unsigned long low_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_LOW);
+}
+
+static inline unsigned long high_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_HIGH);
+}
+
+static inline unsigned long promo_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_PROMO);
+}
+
+static inline unsigned long zone_managed_pages(const struct zone *zone)
{
return (unsigned long)atomic_long_read(&zone->managed_pages);
}
@@ -1041,12 +1124,12 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
}
-static inline bool zone_is_initialized(struct zone *zone)
+static inline bool zone_is_initialized(const struct zone *zone)
{
return zone->initialized;
}
-static inline bool zone_is_empty(struct zone *zone)
+static inline bool zone_is_empty(const struct zone *zone)
{
return zone->spanned_pages == 0;
}
@@ -1097,21 +1180,32 @@ static inline bool zone_is_empty(struct zone *zone)
#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
+static inline enum zone_type memdesc_zonenum(memdesc_flags_t flags)
+{
+ ASSERT_EXCLUSIVE_BITS(flags.f, ZONES_MASK << ZONES_PGSHIFT);
+ return (flags.f >> ZONES_PGSHIFT) & ZONES_MASK;
+}
+
static inline enum zone_type page_zonenum(const struct page *page)
{
- ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
- return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
+ return memdesc_zonenum(page->flags);
}
static inline enum zone_type folio_zonenum(const struct folio *folio)
{
- return page_zonenum(&folio->page);
+ return memdesc_zonenum(folio->flags);
}
#ifdef CONFIG_ZONE_DEVICE
-static inline bool is_zone_device_page(const struct page *page)
+static inline bool memdesc_is_zone_device(memdesc_flags_t mdf)
{
- return page_zonenum(page) == ZONE_DEVICE;
+ return memdesc_zonenum(mdf) == ZONE_DEVICE;
+}
+
+static inline struct dev_pagemap *page_pgmap(const struct page *page)
+{
+ VM_WARN_ON_ONCE_PAGE(!memdesc_is_zone_device(page->flags), page);
+ return page_folio(page)->pgmap;
}
/*
@@ -1125,17 +1219,17 @@ static inline bool is_zone_device_page(const struct page *page)
static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
const struct page *b)
{
- if (is_zone_device_page(a) != is_zone_device_page(b))
+ if (memdesc_is_zone_device(a->flags) != memdesc_is_zone_device(b->flags))
return false;
- if (!is_zone_device_page(a))
+ if (!memdesc_is_zone_device(a->flags))
return true;
- return a->pgmap == b->pgmap;
+ return page_pgmap(a) == page_pgmap(b);
}
extern void memmap_init_zone_device(struct zone *, unsigned long,
unsigned long, struct dev_pagemap *);
#else
-static inline bool is_zone_device_page(const struct page *page)
+static inline bool memdesc_is_zone_device(memdesc_flags_t mdf)
{
return false;
}
@@ -1144,11 +1238,20 @@ static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
{
return true;
}
+static inline struct dev_pagemap *page_pgmap(const struct page *page)
+{
+ return NULL;
+}
#endif
+static inline bool is_zone_device_page(const struct page *page)
+{
+ return memdesc_is_zone_device(page->flags);
+}
+
static inline bool folio_is_zone_device(const struct folio *folio)
{
- return is_zone_device_page(&folio->page);
+ return memdesc_is_zone_device(folio->flags);
}
static inline bool is_zone_movable_page(const struct page *page)
@@ -1166,7 +1269,7 @@ static inline bool folio_is_zone_movable(const struct folio *folio)
* Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
* intersection with the given zone
*/
-static inline bool zone_intersects(struct zone *zone,
+static inline bool zone_intersects(const struct zone *zone,
unsigned long start_pfn, unsigned long nr_pages)
{
if (zone_is_empty(zone))
@@ -1333,7 +1436,7 @@ typedef struct pglist_data {
int kswapd_order;
enum zone_type kswapd_highest_zoneidx;
- int kswapd_failures; /* Number of 'reclaimed == 0' runs */
+ atomic_t kswapd_failures; /* Number of 'reclaimed == 0' runs */
#ifdef CONFIG_COMPACTION
int kcompactd_max_order;
@@ -1439,8 +1542,6 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned long mark, int highest_zoneidx,
unsigned int alloc_flags);
-bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
- unsigned long mark, int highest_zoneidx);
/*
* Memory initialization context, use to differentiate memory added by
* the platform statically or via memory hotplug interface.
@@ -1476,12 +1577,12 @@ static inline int local_memory_node(int node_id) { return node_id; };
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
#ifdef CONFIG_ZONE_DEVICE
-static inline bool zone_is_zone_device(struct zone *zone)
+static inline bool zone_is_zone_device(const struct zone *zone)
{
return zone_idx(zone) == ZONE_DEVICE;
}
#else
-static inline bool zone_is_zone_device(struct zone *zone)
+static inline bool zone_is_zone_device(const struct zone *zone)
{
return false;
}
@@ -1493,19 +1594,19 @@ static inline bool zone_is_zone_device(struct zone *zone)
* populated_zone(). If the whole zone is reserved then we can easily
* end up with populated_zone() && !managed_zone().
*/
-static inline bool managed_zone(struct zone *zone)
+static inline bool managed_zone(const struct zone *zone)
{
return zone_managed_pages(zone);
}
/* Returns true if a zone has memory */
-static inline bool populated_zone(struct zone *zone)
+static inline bool populated_zone(const struct zone *zone)
{
return zone->present_pages;
}
#ifdef CONFIG_NUMA
-static inline int zone_to_nid(struct zone *zone)
+static inline int zone_to_nid(const struct zone *zone)
{
return zone->node;
}
@@ -1515,7 +1616,7 @@ static inline void zone_set_nid(struct zone *zone, int nid)
zone->node = nid;
}
#else
-static inline int zone_to_nid(struct zone *zone)
+static inline int zone_to_nid(const struct zone *zone)
{
return 0;
}
@@ -1542,7 +1643,7 @@ static inline int is_highmem_idx(enum zone_type idx)
* @zone: pointer to struct zone variable
* Return: 1 for a highmem zone, 0 otherwise
*/
-static inline int is_highmem(struct zone *zone)
+static inline int is_highmem(const struct zone *zone)
{
return is_highmem_idx(zone_idx(zone));
}
@@ -1608,12 +1709,12 @@ static inline struct zone *zonelist_zone(struct zoneref *zoneref)
return zoneref->zone;
}
-static inline int zonelist_zone_idx(struct zoneref *zoneref)
+static inline int zonelist_zone_idx(const struct zoneref *zoneref)
{
return zoneref->zone_idx;
}
-static inline int zonelist_node_idx(struct zoneref *zoneref)
+static inline int zonelist_node_idx(const struct zoneref *zoneref)
{
return zone_to_nid(zoneref->zone);
}
@@ -1689,7 +1790,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
zone = zonelist_zone(z))
#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
- for (zone = z->zone; \
+ for (zone = zonelist_zone(z); \
zone; \
z = next_zones_zonelist(++z, highidx, nodemask), \
zone = zonelist_zone(z))
@@ -1725,7 +1826,7 @@ static inline bool movable_only_nodes(nodemask_t *nodes)
nid = first_node(*nodes);
zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
- return (!z->zone) ? true : false;
+ return (!zonelist_zone(z)) ? true : false;
}
@@ -1890,6 +1991,9 @@ enum {
#ifdef CONFIG_ZONE_DEVICE
SECTION_TAINT_ZONE_DEVICE_BIT,
#endif
+#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
+ SECTION_IS_VMEMMAP_PREINIT_BIT,
+#endif
SECTION_MAP_LAST_BIT,
};
@@ -1900,6 +2004,9 @@ enum {
#ifdef CONFIG_ZONE_DEVICE
#define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT)
#endif
+#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
+#define SECTION_IS_VMEMMAP_PREINIT BIT(SECTION_IS_VMEMMAP_PREINIT_BIT)
+#endif
#define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1))
#define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT
@@ -1910,7 +2017,7 @@ static inline struct page *__section_mem_map_addr(struct mem_section *section)
return (struct page *)map;
}
-static inline int present_section(struct mem_section *section)
+static inline int present_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
}
@@ -1920,12 +2027,12 @@ static inline int present_section_nr(unsigned long nr)
return present_section(__nr_to_section(nr));
}
-static inline int valid_section(struct mem_section *section)
+static inline int valid_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
}
-static inline int early_section(struct mem_section *section)
+static inline int early_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_IS_EARLY));
}
@@ -1935,25 +2042,49 @@ static inline int valid_section_nr(unsigned long nr)
return valid_section(__nr_to_section(nr));
}
-static inline int online_section(struct mem_section *section)
+static inline int online_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_IS_ONLINE));
}
#ifdef CONFIG_ZONE_DEVICE
-static inline int online_device_section(struct mem_section *section)
+static inline int online_device_section(const struct mem_section *section)
{
unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
return section && ((section->section_mem_map & flags) == flags);
}
#else
-static inline int online_device_section(struct mem_section *section)
+static inline int online_device_section(const struct mem_section *section)
{
return 0;
}
#endif
+#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
+static inline int preinited_vmemmap_section(const struct mem_section *section)
+{
+ return (section &&
+ (section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT));
+}
+
+void sparse_vmemmap_init_nid_early(int nid);
+void sparse_vmemmap_init_nid_late(int nid);
+
+#else
+static inline int preinited_vmemmap_section(const struct mem_section *section)
+{
+ return 0;
+}
+static inline void sparse_vmemmap_init_nid_early(int nid)
+{
+}
+
+static inline void sparse_vmemmap_init_nid_late(int nid)
+{
+}
+#endif
+
static inline int online_section_nr(unsigned long nr)
{
return online_section(__nr_to_section(nr));
@@ -1980,16 +2111,46 @@ static inline int subsection_map_index(unsigned long pfn)
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
int idx = subsection_map_index(pfn);
+ struct mem_section_usage *usage = READ_ONCE(ms->usage);
- return test_bit(idx, READ_ONCE(ms->usage)->subsection_map);
+ return usage ? test_bit(idx, usage->subsection_map) : 0;
+}
+
+static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn)
+{
+ struct mem_section_usage *usage = READ_ONCE(ms->usage);
+ int idx = subsection_map_index(*pfn);
+ unsigned long bit;
+
+ if (!usage)
+ return false;
+
+ if (test_bit(idx, usage->subsection_map))
+ return true;
+
+ /* Find the next subsection that exists */
+ bit = find_next_bit(usage->subsection_map, SUBSECTIONS_PER_SECTION, idx);
+ if (bit == SUBSECTIONS_PER_SECTION)
+ return false;
+
+ *pfn = (*pfn & PAGE_SECTION_MASK) + (bit * PAGES_PER_SUBSECTION);
+ return true;
}
#else
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
return 1;
}
+
+static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn)
+{
+ return true;
+}
#endif
+void sparse_init_early_section(int nid, struct page *map, unsigned long pnum,
+ unsigned long flags);
+
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
/**
* pfn_valid - check if there is a valid memory map entry for a PFN
@@ -2033,6 +2194,58 @@ static inline int pfn_valid(unsigned long pfn)
return ret;
}
+
+/* Returns end_pfn or higher if no valid PFN remaining in range */
+static inline unsigned long first_valid_pfn(unsigned long pfn, unsigned long end_pfn)
+{
+ unsigned long nr = pfn_to_section_nr(pfn);
+
+ rcu_read_lock_sched();
+
+ while (nr <= __highest_present_section_nr && pfn < end_pfn) {
+ struct mem_section *ms = __pfn_to_section(pfn);
+
+ if (valid_section(ms) &&
+ (early_section(ms) || pfn_section_first_valid(ms, &pfn))) {
+ rcu_read_unlock_sched();
+ return pfn;
+ }
+
+ /* Nothing left in this section? Skip to next section */
+ nr++;
+ pfn = section_nr_to_pfn(nr);
+ }
+
+ rcu_read_unlock_sched();
+ return end_pfn;
+}
+
+static inline unsigned long next_valid_pfn(unsigned long pfn, unsigned long end_pfn)
+{
+ pfn++;
+
+ if (pfn >= end_pfn)
+ return end_pfn;
+
+ /*
+ * Either every PFN within the section (or subsection for VMEMMAP) is
+ * valid, or none of them are. So there's no point repeating the check
+ * for every PFN; only call first_valid_pfn() again when crossing a
+ * (sub)section boundary (i.e. !(pfn & ~PAGE_{SUB,}SECTION_MASK)).
+ */
+ if (pfn & ~(IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP) ?
+ PAGE_SUBSECTION_MASK : PAGE_SECTION_MASK))
+ return pfn;
+
+ return first_valid_pfn(pfn, end_pfn);
+}
+
+
+#define for_each_valid_pfn(_pfn, _start_pfn, _end_pfn) \
+ for ((_pfn) = first_valid_pfn((_start_pfn), (_end_pfn)); \
+ (_pfn) < (_end_pfn); \
+ (_pfn) = next_valid_pfn((_pfn), (_end_pfn)))
+
#endif
static inline int pfn_in_present_section(unsigned long pfn)
@@ -2052,6 +2265,11 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr)
return -1;
}
+#define for_each_present_section_nr(start, section_nr) \
+ for (section_nr = next_present_section_nr(start - 1); \
+ section_nr != -1; \
+ section_nr = next_present_section_nr(section_nr))
+
/*
* These are _only_ used during initialisation, therefore they
* can use __initdata ... They could have names to indicate
@@ -2071,10 +2289,22 @@ void sparse_init(void);
#else
#define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0)
+#define sparse_vmemmap_init_nid_early(_nid, _use) do {} while (0)
+#define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
#define pfn_in_present_section pfn_valid
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
+/*
+ * Fallback case for when the architecture provides its own pfn_valid() but
+ * not a corresponding for_each_valid_pfn().
+ */
+#ifndef for_each_valid_pfn
+#define for_each_valid_pfn(_pfn, _start_pfn, _end_pfn) \
+ for ((_pfn) = (_start_pfn); (_pfn) < (_end_pfn); (_pfn)++) \
+ if (pfn_valid(_pfn))
+#endif
+
#endif /* !__GENERATING_BOUNDS.H */
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */
diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h
index cd4d5c8781f5..e71a6070a8f8 100644
--- a/include/linux/mnt_idmapping.h
+++ b/include/linux/mnt_idmapping.h
@@ -9,6 +9,7 @@ struct mnt_idmap;
struct user_namespace;
extern struct mnt_idmap nop_mnt_idmap;
+extern struct mnt_idmap invalid_mnt_idmap;
extern struct user_namespace init_user_ns;
typedef struct {
@@ -24,6 +25,11 @@ static_assert(sizeof(vfsgid_t) == sizeof(kgid_t));
static_assert(offsetof(vfsuid_t, val) == offsetof(kuid_t, val));
static_assert(offsetof(vfsgid_t, val) == offsetof(kgid_t, val));
+static inline bool is_valid_mnt_idmap(const struct mnt_idmap *idmap)
+{
+ return idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap;
+}
+
#ifdef CONFIG_MULTIUSER
static inline uid_t __vfsuid_val(vfsuid_t uid)
{
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 8f882f5881e8..0acd1089d149 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -3,14 +3,20 @@
#define _NAMESPACE_H_
#ifdef __KERNEL__
+#include <linux/cleanup.h>
+#include <linux/err.h>
+
struct mnt_namespace;
struct fs_struct;
struct user_namespace;
struct ns_common;
-extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
+extern struct mnt_namespace init_mnt_ns;
+
+extern struct mnt_namespace *copy_mnt_ns(u64, struct mnt_namespace *,
struct user_namespace *, struct fs_struct *);
extern void put_mnt_ns(struct mnt_namespace *ns);
+DEFINE_FREE(put_mnt_ns, struct mnt_namespace *, if (!IS_ERR_OR_NULL(_T)) put_mnt_ns(_T))
extern struct ns_common *from_mnt_ns(struct mnt_namespace *);
extern const struct file_operations proc_mounts_operations;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 4338b1b4ac44..24eb5a88a5c5 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -340,7 +340,7 @@ struct pcmcia_device_id {
#define INPUT_DEVICE_ID_LED_MAX 0x0f
#define INPUT_DEVICE_ID_SND_MAX 0x07
#define INPUT_DEVICE_ID_FF_MAX 0x7f
-#define INPUT_DEVICE_ID_SW_MAX 0x10
+#define INPUT_DEVICE_ID_SW_MAX 0x11
#define INPUT_DEVICE_ID_PROP_MAX 0x1f
#define INPUT_DEVICE_ID_MATCH_BUS 1
@@ -601,7 +601,7 @@ struct dmi_system_id {
#define DMI_MATCH(a, b) { .slot = a, .substr = b }
#define DMI_EXACT_MATCH(a, b) { .slot = a, .substr = b, .exact_match = 1 }
-#define PLATFORM_NAME_SIZE 20
+#define PLATFORM_NAME_SIZE 24
#define PLATFORM_MODULE_PREFIX "platform:"
struct platform_device_id {
@@ -692,6 +692,7 @@ struct x86_cpu_id {
__u16 feature; /* bit index */
/* Solely for kernel-internal use: DO NOT EXPORT to userspace! */
__u16 flags;
+ __u8 type;
kernel_ulong_t driver_data;
};
@@ -700,7 +701,10 @@ struct x86_cpu_id {
#define X86_FAMILY_ANY 0
#define X86_MODEL_ANY 0
#define X86_STEPPING_ANY 0
+#define X86_STEP_MIN 0
+#define X86_STEP_MAX 0xf
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
+#define X86_CPU_TYPE_ANY 0
/*
* Generic table type for matching CPU features.
@@ -863,7 +867,7 @@ struct mhi_device_id {
kernel_ulong_t driver_data;
};
-#define AUXILIARY_NAME_SIZE 32
+#define AUXILIARY_NAME_SIZE 40
#define AUXILIARY_MODULE_PREFIX "auxiliary:"
struct auxiliary_device_id {
diff --git a/include/linux/module.h b/include/linux/module.h
index ffa1c603163c..d80c3ea57472 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -14,6 +14,7 @@
#include <linux/buildid.h>
#include <linux/compiler.h>
#include <linux/cache.h>
+#include <linux/cleanup.h>
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/elf.h>
@@ -32,7 +33,7 @@
#include <linux/percpu.h>
#include <asm/module.h>
-#define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN
+#define MODULE_NAME_LEN __MODULE_NAME_LEN
struct modversion_info {
unsigned long crc;
@@ -52,9 +53,9 @@ struct module_kobject {
struct module_attribute {
struct attribute attr;
- ssize_t (*show)(struct module_attribute *, struct module_kobject *,
+ ssize_t (*show)(const struct module_attribute *, struct module_kobject *,
char *);
- ssize_t (*store)(struct module_attribute *, struct module_kobject *,
+ ssize_t (*store)(const struct module_attribute *, struct module_kobject *,
const char *, size_t count);
void (*setup)(struct module *, const char *);
int (*test)(struct module *);
@@ -67,10 +68,10 @@ struct module_version_attribute {
const char *version;
};
-extern ssize_t __modver_version_show(struct module_attribute *,
+extern ssize_t __modver_version_show(const struct module_attribute *,
struct module_kobject *, char *);
-extern struct module_attribute module_uevent;
+extern const struct module_attribute module_uevent;
/* These are either module local, or the kernel's dummy ones. */
extern int init_module(void);
@@ -162,8 +163,7 @@ extern void cleanup_module(void);
#define __INITRODATA_OR_MODULE __INITRODATA
#endif /*CONFIG_MODULES*/
-/* Generic info of form tag = "info" */
-#define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info)
+struct module_kobject *lookup_or_create_module_kobject(const char *name);
/* For userspace: you can also call me... */
#define MODULE_ALIAS(_alias) MODULE_INFO(alias, _alias)
@@ -174,6 +174,12 @@ extern void cleanup_module(void);
#define MODULE_SOFTDEP(_softdep) MODULE_INFO(softdep, _softdep)
/*
+ * Weak module dependencies. See man modprobe.d for details.
+ * Example: MODULE_WEAKDEP("module-foo")
+ */
+#define MODULE_WEAKDEP(_weakdep) MODULE_INFO(weakdep, _weakdep)
+
+/*
* MODULE_FILE is used for generating modules.builtin
* So, make it no-op when this is being built as a module
*/
@@ -238,14 +244,23 @@ extern void cleanup_module(void);
/* What your module does. */
#define MODULE_DESCRIPTION(_description) MODULE_INFO(description, _description)
-#ifdef MODULE
+/*
+ * Format: __mod_device_table__kmod_<modname>__<type>__<name>
+ * Parts of the string `__kmod_` and `__` are used as delimiters when parsing
+ * a symbol in file2alias.c
+ */
+#define __mod_device_table(type, name) \
+ __PASTE(__mod_device_table__, \
+ __PASTE(kmod_, \
+ __PASTE(__KBUILD_MODNAME, \
+ __PASTE(__, \
+ __PASTE(type, \
+ __PASTE(__, name))))))
+
/* Creates an alias so file2alias.c can find device table. */
#define MODULE_DEVICE_TABLE(type, name) \
-extern typeof(name) __mod_##type##__##name##_device_table \
- __attribute__ ((unused, alias(__stringify(name))))
-#else /* !MODULE */
-#define MODULE_DEVICE_TABLE(type, name)
-#endif
+static typeof(name) __mod_device_table(type, name) \
+ __attribute__ ((used, alias(__stringify(name))))
/* Version of form [<epoch>:]<version>[-<extra-version>].
* Or for CVS/RCS ID version, everything but the number is stripped.
@@ -269,7 +284,7 @@ extern typeof(name) __mod_##type##__##name##_device_table \
#else
#define MODULE_VERSION(_version) \
MODULE_INFO(version, _version); \
- static struct module_version_attribute __modver_attr \
+ static const struct module_version_attribute __modver_attr \
__used __section("__modver") \
__aligned(__alignof__(struct module_version_attribute)) \
= { \
@@ -290,25 +305,10 @@ extern typeof(name) __mod_##type##__##name##_device_table \
* files require multiple MODULE_FIRMWARE() specifiers */
#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware)
-#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, __stringify(ns))
+#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, ns)
struct notifier_block;
-#ifdef CONFIG_MODULES
-
-extern int modules_disabled; /* for sysctl */
-/* Get/put a kernel symbol (calls must be symmetric) */
-void *__symbol_get(const char *symbol);
-void *__symbol_get_gpl(const char *symbol);
-#define symbol_get(x) ((typeof(&x))(__symbol_get(__stringify(x))))
-
-/* modules using other modules: kdb wants to see this. */
-struct module_use {
- struct list_head source_list;
- struct list_head target_list;
- struct module *source, *target;
-};
-
enum module_state {
MODULE_STATE_LIVE, /* Normal state. */
MODULE_STATE_COMING, /* Full formed, running module_init. */
@@ -361,6 +361,7 @@ enum mod_mem_type {
struct module_memory {
void *base;
+ bool is_rox;
unsigned int size;
#ifdef CONFIG_MODULES_TREE_LOOKUP
@@ -422,7 +423,7 @@ struct module {
/* Exported symbols */
const struct kernel_symbol *syms;
- const s32 *crcs;
+ const u32 *crcs;
unsigned int num_syms;
#ifdef CONFIG_ARCH_USES_CFI_TRAPS
@@ -440,7 +441,7 @@ struct module {
/* GPL-only exported symbols. */
unsigned int num_gpl_syms;
const struct kernel_symbol *gpl_syms;
- const s32 *gpl_crcs;
+ const u32 *gpl_crcs;
bool using_gplonly_symbols;
#ifdef CONFIG_MODULE_SIG
@@ -509,7 +510,9 @@ struct module {
#endif
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
unsigned int btf_data_size;
+ unsigned int btf_base_data_size;
void *btf_data;
+ void *btf_base_data;
#endif
#ifdef CONFIG_JUMP_LABEL
struct jump_entry *jump_entries;
@@ -525,7 +528,7 @@ struct module {
struct trace_eval_map **trace_evals;
unsigned int num_trace_evals;
#endif
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#ifdef CONFIG_DYNAMIC_FTRACE
unsigned int num_ftrace_callsites;
unsigned long *ftrace_callsites;
#endif
@@ -590,6 +593,16 @@ struct module {
#define MODULE_ARCH_INIT {}
#endif
+#ifdef CONFIG_MODULES
+
+/* Get/put a kernel symbol (calls must be symmetric) */
+void *__symbol_get(const char *symbol);
+void *__symbol_get_gpl(const char *symbol);
+#define symbol_get(x) ({ \
+ static const char __notrim[] \
+ __used __section(".no_trim_symbol") = __stringify(x); \
+ (typeof(&x))(__symbol_get(__stringify(x))); })
+
#ifndef HAVE_ARCH_KALLSYMS_SYMBOL_VALUE
static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym)
{
@@ -653,7 +666,7 @@ static inline bool within_module(unsigned long addr, const struct module *mod)
return within_module_init(addr, mod) || within_module_core(addr, mod);
}
-/* Search for module by name: must be in a RCU-sched critical section. */
+/* Search for module by name: must be in a RCU critical section. */
struct module *find_module(const char *name);
extern void __noreturn __module_put_and_kthread_exit(struct module *mod,
@@ -759,6 +772,8 @@ static inline bool is_livepatch_module(struct module *mod)
void set_module_sig_enforced(void);
+void module_for_each_mod(int(*func)(struct module *mod, void *data), void *data);
+
#else /* !CONFIG_MODULES... */
static inline struct module *__module_address(unsigned long addr)
@@ -866,6 +881,10 @@ static inline bool module_is_coming(struct module *mod)
{
return false;
}
+
+static inline void module_for_each_mod(int(*func)(struct module *mod, void *data), void *data)
+{
+}
#endif /* CONFIG_MODULES */
#ifdef CONFIG_SYSFS
@@ -931,11 +950,11 @@ int module_kallsyms_on_each_symbol(const char *modname,
* least KSYM_NAME_LEN long: a pointer to namebuf is returned if
* found, otherwise NULL.
*/
-const char *module_address_lookup(unsigned long addr,
- unsigned long *symbolsize,
- unsigned long *offset,
- char **modname, const unsigned char **modbuildid,
- char *namebuf);
+int module_address_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char **modname, const unsigned char **modbuildid,
+ char *namebuf);
int lookup_module_symbol_name(unsigned long addr, char *symname);
int lookup_module_symbol_attrs(unsigned long addr,
unsigned long *size,
@@ -964,14 +983,14 @@ static inline int module_kallsyms_on_each_symbol(const char *modname,
}
/* For kallsyms to ask for address resolution. NULL means not found. */
-static inline const char *module_address_lookup(unsigned long addr,
+static inline int module_address_lookup(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset,
char **modname,
const unsigned char **modbuildid,
char *namebuf)
{
- return NULL;
+ return 0;
}
static inline int lookup_module_symbol_name(unsigned long addr, char *symname)
@@ -999,4 +1018,7 @@ static inline unsigned long find_kallsyms_symbol_value(struct module *mod,
#endif /* CONFIG_MODULES && CONFIG_KALLSYMS */
+/* Define __free(module_put) macro for struct module *. */
+DEFINE_FREE(module_put, struct module *, if (_T) module_put(_T))
+
#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index bfb85fd13e1f..915f32f7d888 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -6,6 +6,13 @@
#include <linux/stringify.h>
#include <linux/kernel.h>
+/*
+ * The maximum module name length, including the NUL byte.
+ * Chosen so that structs with an unsigned long line up, specifically
+ * modversion_info.
+ */
+#define __MODULE_NAME_LEN (64 - sizeof(unsigned long))
+
/* You can override this manually, but generally this should match the
module name. */
#ifdef MODULE
@@ -17,21 +24,22 @@
#define __MODULE_INFO_PREFIX KBUILD_MODNAME "."
#endif
-/* Chosen so that structs with an unsigned long line up. */
-#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
-
-#define __MODULE_INFO(tag, name, info) \
- static const char __UNIQUE_ID(name)[] \
+/* Generic info of form tag = "info" */
+#define MODULE_INFO(tag, info) \
+ static_assert( \
+ sizeof(info) - 1 == __builtin_strlen(info), \
+ "MODULE_INFO(" #tag ", ...) contains embedded NUL byte"); \
+ static const char __UNIQUE_ID(modinfo)[] \
__used __section(".modinfo") __aligned(1) \
= __MODULE_INFO_PREFIX __stringify(tag) "=" info
#define __MODULE_PARM_TYPE(name, _type) \
- __MODULE_INFO(parmtype, name##type, #name ":" _type)
+ MODULE_INFO(parmtype, #name ":" _type)
/* One for each parameter, describing how to use it. Some files do
multiple of these per line, so can't just use MODULE_INFO. */
#define MODULE_PARM_DESC(_parm, desc) \
- __MODULE_INFO(parm, _parm, #_parm ":" desc)
+ MODULE_INFO(parm, #_parm ":" desc)
struct kernel_param;
@@ -282,10 +290,9 @@ struct kparam_array
#define __moduleparam_const const
#endif
-/* This is the fundamental function for registering boot/module
- parameters. */
+/* This is the fundamental function for registering boot/module parameters. */
#define __module_param_call(prefix, name, ops, arg, perm, level, flags) \
- /* Default value instead of permissions? */ \
+ static_assert(sizeof(""prefix) - 1 <= __MODULE_NAME_LEN); \
static const char __param_str_##name[] = prefix #name; \
static struct kernel_param __moduleparam_const __param_##name \
__used __section("__param") \
@@ -345,6 +352,19 @@ static inline void kernel_param_unlock(struct module *mod)
__module_param_call("", name, &param_ops_##type, &var, perm, \
-1, KERNEL_PARAM_FL_UNSAFE)
+/**
+ * __core_param_cb - similar like core_param, with a set/get ops instead of type.
+ * @name: the name of the cmdline and sysfs parameter (often the same as var)
+ * @var: the variable
+ * @ops: the set & get operations for this parameter.
+ * @perm: visibility in sysfs
+ *
+ * Ideally this should be called 'core_param_cb', but the name has been
+ * used for module core parameter, so add the '__' prefix
+ */
+#define __core_param_cb(name, ops, arg, perm) \
+ __module_param_call("", name, ops, arg, perm, -1, 0)
+
#endif /* !MODULE */
/**
diff --git a/include/linux/mount.h b/include/linux/mount.h
index c34c18b4e8f3..acfe7ef86a1b 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -22,49 +22,38 @@ struct fs_context;
struct file;
struct path;
-#define MNT_NOSUID 0x01
-#define MNT_NODEV 0x02
-#define MNT_NOEXEC 0x04
-#define MNT_NOATIME 0x08
-#define MNT_NODIRATIME 0x10
-#define MNT_RELATIME 0x20
-#define MNT_READONLY 0x40 /* does the user want this to be r/o? */
-#define MNT_NOSYMFOLLOW 0x80
-
-#define MNT_SHRINKABLE 0x100
-#define MNT_WRITE_HOLD 0x200
-
-#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */
-#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */
-/*
- * MNT_SHARED_MASK is the set of flags that should be cleared when a
- * mount becomes shared. Currently, this is only the flag that says a
- * mount cannot be bind mounted, since this is how we create a mount
- * that shares events with another mount. If you add a new MNT_*
- * flag, consider how it interacts with shared mounts.
- */
-#define MNT_SHARED_MASK (MNT_UNBINDABLE)
-#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
- | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
- | MNT_READONLY | MNT_NOSYMFOLLOW)
-#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
-
-#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
- MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | MNT_ONRB)
-
-#define MNT_INTERNAL 0x4000
-
-#define MNT_LOCK_ATIME 0x040000
-#define MNT_LOCK_NOEXEC 0x080000
-#define MNT_LOCK_NOSUID 0x100000
-#define MNT_LOCK_NODEV 0x200000
-#define MNT_LOCK_READONLY 0x400000
-#define MNT_LOCKED 0x800000
-#define MNT_DOOMED 0x1000000
-#define MNT_SYNC_UMOUNT 0x2000000
-#define MNT_MARKED 0x4000000
-#define MNT_UMOUNT 0x8000000
-#define MNT_ONRB 0x10000000
+enum mount_flags {
+ MNT_NOSUID = 0x01,
+ MNT_NODEV = 0x02,
+ MNT_NOEXEC = 0x04,
+ MNT_NOATIME = 0x08,
+ MNT_NODIRATIME = 0x10,
+ MNT_RELATIME = 0x20,
+ MNT_READONLY = 0x40, /* does the user want this to be r/o? */
+ MNT_NOSYMFOLLOW = 0x80,
+
+ MNT_SHRINKABLE = 0x100,
+
+ MNT_INTERNAL = 0x4000,
+
+ MNT_LOCK_ATIME = 0x040000,
+ MNT_LOCK_NOEXEC = 0x080000,
+ MNT_LOCK_NOSUID = 0x100000,
+ MNT_LOCK_NODEV = 0x200000,
+ MNT_LOCK_READONLY = 0x400000,
+ MNT_LOCKED = 0x800000,
+ MNT_DOOMED = 0x1000000,
+ MNT_SYNC_UMOUNT = 0x2000000,
+ MNT_UMOUNT = 0x8000000,
+
+ MNT_USER_SETTABLE_MASK = MNT_NOSUID | MNT_NODEV | MNT_NOEXEC
+ | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME
+ | MNT_READONLY | MNT_NOSYMFOLLOW,
+ MNT_ATIME_MASK = MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME,
+
+ MNT_INTERNAL_FLAGS = MNT_INTERNAL | MNT_DOOMED |
+ MNT_SYNC_UMOUNT | MNT_LOCKED
+};
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
@@ -76,7 +65,7 @@ struct vfsmount {
static inline struct mnt_idmap *mnt_idmap(const struct vfsmount *mnt)
{
/* Pairs with smp_store_release() in do_idmap_mount(). */
- return smp_load_acquire(&mnt->mnt_idmap);
+ return READ_ONCE(mnt->mnt_idmap);
}
extern int mnt_want_write(struct vfsmount *mnt);
@@ -87,7 +76,7 @@ extern void mntput(struct vfsmount *mnt);
extern struct vfsmount *mntget(struct vfsmount *mnt);
extern void mnt_make_shortterm(struct vfsmount *mnt);
extern struct vfsmount *mnt_clone_internal(const struct path *path);
-extern bool __mnt_is_readonly(struct vfsmount *mnt);
+extern bool __mnt_is_readonly(const struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);
extern struct vfsmount *clone_private_mount(const struct path *path);
@@ -95,13 +84,11 @@ int mnt_get_write_access(struct vfsmount *mnt);
void mnt_put_write_access(struct vfsmount *mnt);
extern struct vfsmount *fc_mount(struct fs_context *fc);
+extern struct vfsmount *fc_mount_longterm(struct fs_context *fc);
extern struct vfsmount *vfs_create_mount(struct fs_context *fc);
extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
int flags, const char *name,
void *data);
-extern struct vfsmount *vfs_submount(const struct dentry *mountpoint,
- struct file_system_type *type,
- const char *name, void *data);
extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list);
extern void mark_mounts_for_expiry(struct list_head *mounts);
@@ -114,12 +101,10 @@ extern struct vfsmount *kern_mount(struct file_system_type *);
extern void kern_unmount(struct vfsmount *mnt);
extern int may_umount_tree(struct vfsmount *);
extern int may_umount(struct vfsmount *);
-extern long do_mount(const char *, const char __user *,
+int do_mount(const char *, const char __user *,
const char *, unsigned long, void *);
-extern struct vfsmount *collect_mounts(const struct path *);
-extern void drop_collected_mounts(struct vfsmount *);
-extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
- struct vfsmount *);
+extern const struct path *collect_paths(const struct path *, struct path *, unsigned);
+extern void drop_collected_paths(const struct path *, const struct path *);
extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num);
extern int cifs_root_data(char **dev, char **opts);
diff --git a/include/linux/moxtet.h b/include/linux/moxtet.h
index ac577699edfd..dfa4800306ee 100644
--- a/include/linux/moxtet.h
+++ b/include/linux/moxtet.h
@@ -61,13 +61,8 @@ struct moxtet_driver {
struct device_driver driver;
};
-static inline struct moxtet_driver *
-to_moxtet_driver(struct device_driver *drv)
-{
- if (!drv)
- return NULL;
- return container_of(drv, struct moxtet_driver, driver);
-}
+#define to_moxtet_driver(__drv) \
+ ( __drv ? container_of_const(__drv, struct moxtet_driver, driver) : NULL )
extern int __moxtet_register_driver(struct module *owner,
struct moxtet_driver *mdrv);
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index eb0d1c1db208..47be46f36435 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -40,79 +40,26 @@ struct gcry_mpi {
typedef struct gcry_mpi *MPI;
#define mpi_get_nlimbs(a) ((a)->nlimbs)
-#define mpi_has_sign(a) ((a)->sign)
/*-- mpiutil.c --*/
MPI mpi_alloc(unsigned nlimbs);
-void mpi_clear(MPI a);
void mpi_free(MPI a);
int mpi_resize(MPI a, unsigned nlimbs);
-static inline MPI mpi_new(unsigned int nbits)
-{
- return mpi_alloc((nbits + BITS_PER_MPI_LIMB - 1) / BITS_PER_MPI_LIMB);
-}
-
MPI mpi_copy(MPI a);
-MPI mpi_alloc_like(MPI a);
-void mpi_snatch(MPI w, MPI u);
-MPI mpi_set(MPI w, MPI u);
-MPI mpi_set_ui(MPI w, unsigned long u);
-MPI mpi_alloc_set_ui(unsigned long u);
-void mpi_swap_cond(MPI a, MPI b, unsigned long swap);
-
-/* Constants used to return constant MPIs. See mpi_init if you
- * want to add more constants.
- */
-#define MPI_NUMBER_OF_CONSTANTS 6
-enum gcry_mpi_constants {
- MPI_C_ZERO,
- MPI_C_ONE,
- MPI_C_TWO,
- MPI_C_THREE,
- MPI_C_FOUR,
- MPI_C_EIGHT
-};
-
-MPI mpi_const(enum gcry_mpi_constants no);
/*-- mpicoder.c --*/
-
-/* Different formats of external big integer representation. */
-enum gcry_mpi_format {
- GCRYMPI_FMT_NONE = 0,
- GCRYMPI_FMT_STD = 1, /* Twos complement stored without length. */
- GCRYMPI_FMT_PGP = 2, /* As used by OpenPGP (unsigned only). */
- GCRYMPI_FMT_SSH = 3, /* As used by SSH (like STD but with length). */
- GCRYMPI_FMT_HEX = 4, /* Hex format. */
- GCRYMPI_FMT_USG = 5, /* Like STD but unsigned. */
- GCRYMPI_FMT_OPAQUE = 8 /* Opaque format (some functions only). */
-};
-
MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes);
MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
-int mpi_fromstr(MPI val, const char *str);
-MPI mpi_scanval(const char *string);
MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len);
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
int *sign);
int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes,
int *sign);
-int mpi_print(enum gcry_mpi_format format, unsigned char *buffer,
- size_t buflen, size_t *nwritten, MPI a);
/*-- mpi-mod.c --*/
-void mpi_mod(MPI rem, MPI dividend, MPI divisor);
-
-/* Context used with Barrett reduction. */
-struct barrett_ctx_s;
-typedef struct barrett_ctx_s *mpi_barrett_t;
-
-mpi_barrett_t mpi_barrett_init(MPI m, int copy);
-void mpi_barrett_free(mpi_barrett_t ctx);
-void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx);
-void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx);
+int mpi_mod(MPI rem, MPI dividend, MPI divisor);
/*-- mpi-pow.c --*/
int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
@@ -120,7 +67,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
/*-- mpi-cmp.c --*/
int mpi_cmp_ui(MPI u, ulong v);
int mpi_cmp(MPI u, MPI v);
-int mpi_cmpabs(MPI u, MPI v);
/*-- mpi-sub-ui.c --*/
int mpi_sub_ui(MPI w, MPI u, unsigned long vval);
@@ -129,138 +75,22 @@ int mpi_sub_ui(MPI w, MPI u, unsigned long vval);
void mpi_normalize(MPI a);
unsigned mpi_get_nbits(MPI a);
int mpi_test_bit(MPI a, unsigned int n);
-void mpi_set_bit(MPI a, unsigned int n);
-void mpi_set_highbit(MPI a, unsigned int n);
-void mpi_clear_highbit(MPI a, unsigned int n);
-void mpi_clear_bit(MPI a, unsigned int n);
-void mpi_rshift_limbs(MPI a, unsigned int count);
-void mpi_rshift(MPI x, MPI a, unsigned int n);
-void mpi_lshift_limbs(MPI a, unsigned int count);
-void mpi_lshift(MPI x, MPI a, unsigned int n);
+int mpi_set_bit(MPI a, unsigned int n);
+int mpi_rshift(MPI x, MPI a, unsigned int n);
/*-- mpi-add.c --*/
-void mpi_add_ui(MPI w, MPI u, unsigned long v);
-void mpi_add(MPI w, MPI u, MPI v);
-void mpi_sub(MPI w, MPI u, MPI v);
-void mpi_addm(MPI w, MPI u, MPI v, MPI m);
-void mpi_subm(MPI w, MPI u, MPI v, MPI m);
+int mpi_add(MPI w, MPI u, MPI v);
+int mpi_sub(MPI w, MPI u, MPI v);
+int mpi_addm(MPI w, MPI u, MPI v, MPI m);
+int mpi_subm(MPI w, MPI u, MPI v, MPI m);
/*-- mpi-mul.c --*/
-void mpi_mul(MPI w, MPI u, MPI v);
-void mpi_mulm(MPI w, MPI u, MPI v, MPI m);
+int mpi_mul(MPI w, MPI u, MPI v);
+int mpi_mulm(MPI w, MPI u, MPI v, MPI m);
/*-- mpi-div.c --*/
-void mpi_tdiv_r(MPI rem, MPI num, MPI den);
-void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
-void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor);
-
-/*-- mpi-inv.c --*/
-int mpi_invm(MPI x, MPI a, MPI n);
-
-/*-- ec.c --*/
-
-/* Object to represent a point in projective coordinates */
-struct gcry_mpi_point {
- MPI x;
- MPI y;
- MPI z;
-};
-
-typedef struct gcry_mpi_point *MPI_POINT;
-
-/* Models describing an elliptic curve */
-enum gcry_mpi_ec_models {
- /* The Short Weierstrass equation is
- * y^2 = x^3 + ax + b
- */
- MPI_EC_WEIERSTRASS = 0,
- /* The Montgomery equation is
- * by^2 = x^3 + ax^2 + x
- */
- MPI_EC_MONTGOMERY,
- /* The Twisted Edwards equation is
- * ax^2 + y^2 = 1 + bx^2y^2
- * Note that we use 'b' instead of the commonly used 'd'.
- */
- MPI_EC_EDWARDS
-};
-
-/* Dialects used with elliptic curves */
-enum ecc_dialects {
- ECC_DIALECT_STANDARD = 0,
- ECC_DIALECT_ED25519,
- ECC_DIALECT_SAFECURVE
-};
-
-/* This context is used with all our EC functions. */
-struct mpi_ec_ctx {
- enum gcry_mpi_ec_models model; /* The model describing this curve. */
- enum ecc_dialects dialect; /* The ECC dialect used with the curve. */
- int flags; /* Public key flags (not always used). */
- unsigned int nbits; /* Number of bits. */
-
- /* Domain parameters. Note that they may not all be set and if set
- * the MPIs may be flagged as constant.
- */
- MPI p; /* Prime specifying the field GF(p). */
- MPI a; /* First coefficient of the Weierstrass equation. */
- MPI b; /* Second coefficient of the Weierstrass equation. */
- MPI_POINT G; /* Base point (generator). */
- MPI n; /* Order of G. */
- unsigned int h; /* Cofactor. */
-
- /* The actual key. May not be set. */
- MPI_POINT Q; /* Public key. */
- MPI d; /* Private key. */
-
- const char *name; /* Name of the curve. */
-
- /* This structure is private to mpi/ec.c! */
- struct {
- struct {
- unsigned int a_is_pminus3:1;
- unsigned int two_inv_p:1;
- } valid; /* Flags to help setting the helper vars below. */
-
- int a_is_pminus3; /* True if A = P - 3. */
-
- MPI two_inv_p;
-
- mpi_barrett_t p_barrett;
-
- /* Scratch variables. */
- MPI scratch[11];
-
- /* Helper for fast reduction. */
- /* int nist_nbits; /\* If this is a NIST curve, the # of bits. *\/ */
- /* MPI s[10]; */
- /* MPI c; */
- } t;
-
- /* Curve specific computation routines for the field. */
- void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
- void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec);
- void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
- void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx);
- void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx);
-};
-
-void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
- enum ecc_dialects dialect,
- int flags, MPI p, MPI a, MPI b);
-void mpi_ec_deinit(struct mpi_ec_ctx *ctx);
-MPI_POINT mpi_point_new(unsigned int nbits);
-void mpi_point_release(MPI_POINT p);
-void mpi_point_init(MPI_POINT p);
-void mpi_point_free_parts(MPI_POINT p);
-int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx);
-void mpi_ec_add_points(MPI_POINT result,
- MPI_POINT p1, MPI_POINT p2,
- struct mpi_ec_ctx *ctx);
-void mpi_ec_mul_point(MPI_POINT result,
- MPI scalar, MPI_POINT point,
- struct mpi_ec_ctx *ctx);
-int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx);
+int mpi_tdiv_r(MPI rem, MPI num, MPI den);
+int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
/* inline functions */
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 63ef5191cc57..fddafdc168f7 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -31,6 +31,7 @@ extern int ip6_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t);
extern int ip6_mr_input(struct sk_buff *skb);
extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
extern int ip6_mr_init(void);
+extern int ip6_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb);
extern void ip6_mr_cleanup(void);
int ip6mr_ioctl(struct sock *sk, int cmd, void *arg);
#else
@@ -58,6 +59,12 @@ static inline int ip6_mr_init(void)
return 0;
}
+static inline int
+ip6_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ return ip6_output(net, sk, skb);
+}
+
static inline void ip6_mr_cleanup(void)
{
return;
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index 9dd4bf157255..0075f6e5c3da 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -146,9 +146,9 @@ struct mr_mfc {
unsigned long last_assert;
int minvif;
int maxvif;
- unsigned long bytes;
- unsigned long pkt;
- unsigned long wrong_if;
+ atomic_long_t bytes;
+ atomic_long_t pkt;
+ atomic_long_t wrong_if;
unsigned long lastuse;
unsigned char ttls[MAXVIFS];
refcount_t refcount;
@@ -262,6 +262,11 @@ struct mr_table {
int mroute_reg_vif_num;
};
+static inline bool mr_can_free_table(struct net *net)
+{
+ return !check_net(net) || !net_initialized(net);
+}
+
#ifdef CONFIG_IP_MROUTE_COMMON
void vif_device_init(struct vif_device *v,
struct net_device *dev,
diff --git a/include/linux/msi.h b/include/linux/msi.h
index dc27cf3903d5..8003e3218c46 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -19,13 +19,9 @@
*/
#include <linux/irqdomain_defs.h>
-#include <linux/cpumask.h>
+#include <linux/cpumask_types.h>
#include <linux/msi_api.h>
-#include <linux/xarray.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
#include <linux/irq.h>
-#include <linux/bits.h>
#include <asm/msi.h>
@@ -77,11 +73,9 @@ struct msi_msg {
};
};
-extern int pci_msi_ignore_mask;
/* Helper functions */
struct msi_desc;
struct pci_dev;
-struct platform_msi_priv_data;
struct device_attribute;
struct irq_domain;
struct irq_affinity_desc;
@@ -171,6 +165,10 @@ struct msi_desc_data {
* @dev: Pointer to the device which uses this descriptor
* @msg: The last set MSI message cached for reuse
* @affinity: Optional pointer to a cpu affinity mask for this descriptor
+ * @iommu_msi_iova: Optional shifted IOVA from the IOMMU to override the msi_addr.
+ * Only used if iommu_msi_shift != 0
+ * @iommu_msi_shift: Indicates how many bits of the original address should be
+ * preserved when using iommu_msi_iova.
* @sysfs_attr: Pointer to sysfs device attribute
*
* @write_msi_msg: Callback that may be called when the MSI message
@@ -189,7 +187,8 @@ struct msi_desc {
struct msi_msg msg;
struct irq_affinity_desc *affinity;
#ifdef CONFIG_IRQ_MSI_IOMMU
- const void *iommu_cookie;
+ u64 iommu_msi_iova : 58;
+ u64 iommu_msi_shift : 6;
#endif
#ifdef CONFIG_SYSFS
struct device_attribute *sysfs_attrs;
@@ -228,26 +227,13 @@ struct msi_dev_domain {
struct irq_domain *domain;
};
-/**
- * msi_device_data - MSI per device data
- * @properties: MSI properties which are interesting to drivers
- * @platform_data: Platform-MSI specific data
- * @mutex: Mutex protecting the MSI descriptor store
- * @__domains: Internal data for per device MSI domains
- * @__iter_idx: Index to search the next entry for iterators
- */
-struct msi_device_data {
- unsigned long properties;
- struct platform_msi_priv_data *platform_data;
- struct mutex mutex;
- struct msi_dev_domain __domains[MSI_MAX_DEVICE_IRQDOMAINS];
- unsigned long __iter_idx;
-};
-
int msi_setup_device_data(struct device *dev);
-void msi_lock_descs(struct device *dev);
-void msi_unlock_descs(struct device *dev);
+void __msi_lock_descs(struct device *dev);
+void __msi_unlock_descs(struct device *dev);
+
+DEFINE_LOCK_GUARD_1(msi_descs_lock, struct device, __msi_lock_descs(_T->lock),
+ __msi_unlock_descs(_T->lock));
struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid,
enum msi_desc_filter filter);
@@ -306,28 +292,42 @@ struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid,
#define msi_desc_to_dev(desc) ((desc)->dev)
-#ifdef CONFIG_IRQ_MSI_IOMMU
-static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
+static inline void msi_desc_set_iommu_msi_iova(struct msi_desc *desc, u64 msi_iova,
+ unsigned int msi_shift)
{
- return desc->iommu_cookie;
-}
-
-static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
- const void *iommu_cookie)
-{
- desc->iommu_cookie = iommu_cookie;
-}
-#else
-static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
-{
- return NULL;
+#ifdef CONFIG_IRQ_MSI_IOMMU
+ desc->iommu_msi_iova = msi_iova >> msi_shift;
+ desc->iommu_msi_shift = msi_shift;
+#endif
}
-static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
- const void *iommu_cookie)
+/**
+ * msi_msg_set_addr() - Set MSI address in an MSI message
+ *
+ * @desc: MSI descriptor that may carry an IOVA base address for MSI via @iommu_msi_iova/shift
+ * @msg: Target MSI message to set its address_hi and address_lo
+ * @msi_addr: Physical address to set the MSI message
+ *
+ * Notes:
+ * - Override @msi_addr using the IOVA base address in the @desc if @iommu_msi_shift is set
+ * - Otherwise, simply set @msi_addr to @msg
+ */
+static inline void msi_msg_set_addr(struct msi_desc *desc, struct msi_msg *msg,
+ phys_addr_t msi_addr)
{
-}
+#ifdef CONFIG_IRQ_MSI_IOMMU
+ if (desc->iommu_msi_shift) {
+ u64 msi_iova = desc->iommu_msi_iova << desc->iommu_msi_shift;
+
+ msg->address_hi = upper_32_bits(msi_iova);
+ msg->address_lo = lower_32_bits(msi_iova) |
+ (msi_addr & ((1 << desc->iommu_msi_shift) - 1));
+ return;
+ }
#endif
+ msg->address_hi = upper_32_bits(msi_addr);
+ msg->address_lo = lower_32_bits(msi_addr);
+}
int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid,
struct msi_desc *init_desc);
@@ -423,6 +423,7 @@ struct msi_domain_info;
* @msi_init: Domain specific init function for MSI interrupts
* @msi_free: Domain specific function to free a MSI interrupts
* @msi_prepare: Prepare the allocation of the interrupts in the domain
+ * @msi_teardown: Reverse the effects of @msi_prepare
* @prepare_desc: Optional function to prepare the allocated MSI descriptor
* in the domain
* @set_desc: Set the msi descriptor for an interrupt
@@ -430,16 +431,15 @@ struct msi_domain_info;
* function.
* @domain_free_irqs: Optional function to override the default free
* function.
- * @msi_post_free: Optional function which is invoked after freeing
- * all interrupts.
* @msi_translate: Optional translate callback to support the odd wire to
* MSI bridges, e.g. MBIGEN
*
* @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying
* irqdomain.
*
- * @msi_check, @msi_prepare, @prepare_desc and @set_desc are callbacks used by the
- * msi_domain_alloc/free_irqs*() variants.
+ * @msi_check, @msi_prepare, @msi_teardown, @prepare_desc and
+ * @set_desc are callbacks used by the msi_domain_alloc/free_irqs*()
+ * variants.
*
* @domain_alloc_irqs, @domain_free_irqs can be used to override the
* default allocation/free functions (__msi_domain_alloc/free_irqs). This
@@ -461,6 +461,8 @@ struct msi_domain_ops {
int (*msi_prepare)(struct irq_domain *domain,
struct device *dev, int nvec,
msi_alloc_info_t *arg);
+ void (*msi_teardown)(struct irq_domain *domain,
+ msi_alloc_info_t *arg);
void (*prepare_desc)(struct irq_domain *domain, msi_alloc_info_t *arg,
struct msi_desc *desc);
void (*set_desc)(msi_alloc_info_t *arg,
@@ -469,8 +471,6 @@ struct msi_domain_ops {
struct device *dev, int nvec);
void (*domain_free_irqs)(struct irq_domain *domain,
struct device *dev);
- void (*msi_post_free)(struct irq_domain *domain,
- struct device *dev);
int (*msi_translate)(struct irq_domain *domain, struct irq_fwspec *fwspec,
irq_hw_number_t *hwirq, unsigned int *type);
};
@@ -484,11 +484,13 @@ struct msi_domain_ops {
* gets initialized to the maximum software index limit
* by the domain creation code.
* @ops: The callback data structure
+ * @dev: Device which creates the domain
* @chip: Optional: associated interrupt chip
* @chip_data: Optional: associated interrupt chip data
* @handler: Optional: associated interrupt flow handler
* @handler_data: Optional: associated interrupt flow handler data
* @handler_name: Optional: associated interrupt flow handler name
+ * @alloc_data: Optional: associated interrupt allocation data
* @data: Optional: domain specific data
*/
struct msi_domain_info {
@@ -496,11 +498,13 @@ struct msi_domain_info {
enum irq_domain_bus_token bus_token;
unsigned int hwsize;
struct msi_domain_ops *ops;
+ struct device *dev;
struct irq_chip *chip;
void *chip_data;
irq_flow_handler_t handler;
void *handler_data;
const char *handler_name;
+ msi_alloc_info_t *alloc_data;
void *data;
};
@@ -510,12 +514,14 @@ struct msi_domain_info {
* @chip: Interrupt chip for this domain
* @ops: MSI domain ops
* @info: MSI domain info data
+ * @alloc_info: MSI domain allocation data (architecture specific)
*/
struct msi_domain_template {
char name[48];
struct irq_chip chip;
struct msi_domain_ops ops;
struct msi_domain_info info;
+ msi_alloc_info_t alloc_info;
};
/*
@@ -556,6 +562,10 @@ enum {
MSI_FLAG_USE_DEV_FWNODE = (1 << 7),
/* Set parent->dev into domain->pm_dev on device domain creation */
MSI_FLAG_PARENT_PM_DEV = (1 << 8),
+ /* Support for parent mask/unmask */
+ MSI_FLAG_PCI_MSI_MASK_PARENT = (1 << 9),
+ /* Support for parent startup/shutdown */
+ MSI_FLAG_PCI_MSI_STARTUP_PARENT = (1 << 10),
/* Mask for the generic functionality */
MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
@@ -573,6 +583,18 @@ enum {
MSI_FLAG_MSIX_CONTIGUOUS = (1 << 19),
/* PCI/MSI-X vectors can be dynamically allocated/freed post MSI-X enable */
MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20),
+ /* PCI MSIs cannot be steered separately to CPU cores */
+ MSI_FLAG_NO_AFFINITY = (1 << 21),
+ /* Inhibit usage of entry masking */
+ MSI_FLAG_NO_MASK = (1 << 22),
+};
+
+/*
+ * Flags for msi_parent_ops::chip_flags
+ */
+enum {
+ MSI_CHIP_FLAG_SET_EOI = (1 << 0),
+ MSI_CHIP_FLAG_SET_ACK = (1 << 1),
};
/**
@@ -580,6 +602,8 @@ enum {
*
* @supported_flags: Required: The supported MSI flags of the parent domain
* @required_flags: Optional: The required MSI flags of the parent MSI domain
+ * @chip_flags: Optional: Select MSI chip callbacks to update with defaults
+ * in msi_lib_init_dev_msi_info().
* @bus_select_token: Optional: The bus token of the real parent domain for
* irq_domain::select()
* @bus_select_mask: Optional: A mask of supported BUS_DOMAINs for
@@ -592,6 +616,7 @@ enum {
struct msi_parent_ops {
u32 supported_flags;
u32 required_flags;
+ u32 chip_flags;
u32 bus_select_token;
u32 bus_select_mask;
const char *prefix;
@@ -611,6 +636,10 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
+struct irq_domain_info;
+struct irq_domain *msi_create_parent_irq_domain(struct irq_domain_info *info,
+ const struct msi_parent_ops *msi_parent_ops);
+
bool msi_create_device_irq_domain(struct device *dev, unsigned int domid,
const struct msi_domain_template *template,
unsigned int hwsize, void *domain_data,
@@ -639,35 +668,6 @@ void msi_domain_free_irqs_all(struct device *dev, unsigned int domid);
struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
-struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
- struct msi_domain_info *info,
- struct irq_domain *parent);
-
-/* When an MSI domain is used as an intermediate domain */
-int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
- int nvec, msi_alloc_info_t *args);
-int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
- int virq, int nvec, msi_alloc_info_t *args);
-void msi_domain_depopulate_descs(struct device *dev, int virq, int nvec);
-
-struct irq_domain *
-__platform_msi_create_device_domain(struct device *dev,
- unsigned int nvec,
- bool is_tree,
- irq_write_msi_msg_t write_msi_msg,
- const struct irq_domain_ops *ops,
- void *host_data);
-
-#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \
- __platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
-#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
- __platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
-
-int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs);
-void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nvec);
-void *platform_msi_get_host_data(struct irq_domain *domain);
/* Per device platform MSI */
int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nvec,
irq_write_msi_msg_t write_msi_msg);
@@ -701,11 +701,11 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void pci_msi_mask_irq(struct irq_data *data);
void pci_msi_unmask_irq(struct irq_data *data);
-struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
- struct msi_domain_info *info,
- struct irq_domain *parent);
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
+u32 pci_msi_map_rid_ctlr_node(struct pci_dev *pdev, struct device_node **node);
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
+void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg,
+ struct msi_desc *desc);
#else /* CONFIG_PCI_MSI */
static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
{
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 947410faf9e2..35ca19ae21ae 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -308,32 +308,32 @@ static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
{
map_word val = map_read(map, addr);
- if (map_bankwidth_is_1(map)) {
+ if (map_bankwidth_is_1(map))
return val.x[0];
- } else if (map_bankwidth_is_2(map)) {
+ if (map_bankwidth_is_2(map))
return cfi16_to_cpu(map, val.x[0]);
- } else {
- /* No point in a 64-bit byteswap since that would just be
- swapping the responses from different chips, and we are
- only interested in one chip (a representative sample) */
- return cfi32_to_cpu(map, val.x[0]);
- }
+ /*
+ * No point in a 64-bit byteswap since that would just be
+ * swapping the responses from different chips, and we are
+ * only interested in one chip (a representative sample)
+ */
+ return cfi32_to_cpu(map, val.x[0]);
}
static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
{
map_word val = map_read(map, addr);
- if (map_bankwidth_is_1(map)) {
+ if (map_bankwidth_is_1(map))
return val.x[0] & 0xff;
- } else if (map_bankwidth_is_2(map)) {
+ if (map_bankwidth_is_2(map))
return cfi16_to_cpu(map, val.x[0]);
- } else {
- /* No point in a 64-bit byteswap since that would just be
- swapping the responses from different chips, and we are
- only interested in one chip (a representative sample) */
- return cfi32_to_cpu(map, val.x[0]);
- }
+ /*
+ * No point in a 64-bit byteswap since that would just be
+ * swapping the responses from different chips, and we are
+ * only interested in one chip (a representative sample)
+ */
+ return cfi32_to_cpu(map, val.x[0]);
}
void cfi_udelay(int us);
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index b4fa92a6e44b..75b0b2abc880 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -8,16 +8,17 @@
#ifndef __LINUX_MTD_MAP_H__
#define __LINUX_MTD_MAP_H__
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/string.h>
#include <linux/bug.h>
-#include <linux/kernel.h>
#include <linux/io.h>
-
-#include <asm/unaligned.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
#include <asm/barrier.h>
+struct device_node;
+struct module;
+
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
#define map_bankwidth(map) 1
#define map_bankwidth_is_1(map) (map_bankwidth(map) == 1)
@@ -188,6 +189,7 @@ typedef union {
of living.
*/
+struct mtd_chip_driver;
struct map_info {
const char *name;
unsigned long size;
diff --git a/include/linux/mtd/nand-ecc-mxic.h b/include/linux/mtd/nand-ecc-mxic.h
index b125926e458c..0da4b2999576 100644
--- a/include/linux/mtd/nand-ecc-mxic.h
+++ b/include/linux/mtd/nand-ecc-mxic.h
@@ -16,7 +16,7 @@ struct mxic_ecc_engine;
#if IS_ENABLED(CONFIG_MTD_NAND_ECC_MXIC) && IS_REACHABLE(CONFIG_MTD_NAND_CORE)
-struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void);
+const struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void);
struct nand_ecc_engine *mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev);
void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng);
int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
@@ -24,7 +24,7 @@ int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
#else /* !CONFIG_MTD_NAND_ECC_MXIC */
-static inline struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
+static inline const struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
{
return NULL;
}
diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h
new file mode 100644
index 000000000000..e8201d1b7cf9
--- /dev/null
+++ b/include/linux/mtd/nand-qpic-common.h
@@ -0,0 +1,483 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * QCOM QPIC common APIs header file
+ *
+ * Copyright (c) 2023 Qualcomm Inc.
+ * Authors: Md sadre Alam <quic_mdalam@quicinc.com>
+ *
+ */
+#ifndef __MTD_NAND_QPIC_COMMON_H__
+#define __MTD_NAND_QPIC_COMMON_H__
+
+/* NANDc reg offsets */
+#define NAND_FLASH_CMD 0x00
+#define NAND_ADDR0 0x04
+#define NAND_ADDR1 0x08
+#define NAND_FLASH_CHIP_SELECT 0x0c
+#define NAND_EXEC_CMD 0x10
+#define NAND_FLASH_STATUS 0x14
+#define NAND_BUFFER_STATUS 0x18
+#define NAND_DEV0_CFG0 0x20
+#define NAND_DEV0_CFG1 0x24
+#define NAND_DEV0_ECC_CFG 0x28
+#define NAND_AUTO_STATUS_EN 0x2c
+#define NAND_DEV1_CFG0 0x30
+#define NAND_DEV1_CFG1 0x34
+#define NAND_READ_ID 0x40
+#define NAND_READ_STATUS 0x44
+#define NAND_DEV_CMD0 0xa0
+#define NAND_DEV_CMD1 0xa4
+#define NAND_DEV_CMD2 0xa8
+#define NAND_DEV_CMD_VLD 0xac
+#define SFLASHC_BURST_CFG 0xe0
+#define NAND_ERASED_CW_DETECT_CFG 0xe8
+#define NAND_ERASED_CW_DETECT_STATUS 0xec
+#define NAND_EBI2_ECC_BUF_CFG 0xf0
+#define FLASH_BUF_ACC 0x100
+
+#define NAND_CTRL 0xf00
+#define NAND_VERSION 0xf08
+#define NAND_READ_LOCATION_0 0xf20
+#define NAND_READ_LOCATION_1 0xf24
+#define NAND_READ_LOCATION_2 0xf28
+#define NAND_READ_LOCATION_3 0xf2c
+#define NAND_READ_LOCATION_LAST_CW_0 0xf40
+#define NAND_READ_LOCATION_LAST_CW_1 0xf44
+#define NAND_READ_LOCATION_LAST_CW_2 0xf48
+#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
+
+/* dummy register offsets, used by qcom_write_reg_dma */
+#define NAND_DEV_CMD1_RESTORE 0xdead
+#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
+
+/* NAND_FLASH_CMD bits */
+#define PAGE_ACC BIT(4)
+#define LAST_PAGE BIT(5)
+
+/* NAND_FLASH_CHIP_SELECT bits */
+#define NAND_DEV_SEL 0
+#define DM_EN BIT(2)
+
+/* NAND_FLASH_STATUS bits */
+#define FS_OP_ERR BIT(4)
+#define FS_READY_BSY_N BIT(5)
+#define FS_MPU_ERR BIT(8)
+#define FS_DEVICE_STS_ERR BIT(16)
+#define FS_DEVICE_WP BIT(23)
+
+/* NAND_BUFFER_STATUS bits */
+#define BS_UNCORRECTABLE_BIT BIT(8)
+#define BS_CORRECTABLE_ERR_MSK 0x1f
+
+/* NAND_DEVn_CFG0 bits */
+#define DISABLE_STATUS_AFTER_WRITE BIT(4)
+#define CW_PER_PAGE_MASK GENMASK(8, 6)
+#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
+#define ECC_PARITY_SIZE_BYTES_RS GENMASK(22, 19)
+#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
+#define NUM_ADDR_CYCLES_MASK GENMASK(29, 27)
+#define STATUS_BFR_READ BIT(30)
+#define SET_RD_MODE_AFTER_STATUS BIT(31)
+
+/* NAND_DEVn_CFG0 bits */
+#define DEV0_CFG1_ECC_DISABLE BIT(0)
+#define WIDE_FLASH BIT(1)
+#define NAND_RECOVERY_CYCLES_MASK GENMASK(4, 2)
+#define CS_ACTIVE_BSY BIT(5)
+#define BAD_BLOCK_BYTE_NUM_MASK GENMASK(15, 6)
+#define BAD_BLOCK_IN_SPARE_AREA BIT(16)
+#define WR_RD_BSY_GAP_MASK GENMASK(22, 17)
+#define ENABLE_BCH_ECC BIT(27)
+
+/* NAND_DEV0_ECC_CFG bits */
+#define ECC_CFG_ECC_DISABLE BIT(0)
+#define ECC_SW_RESET BIT(1)
+#define ECC_MODE_MASK GENMASK(5, 4)
+#define ECC_MODE_4BIT 0
+#define ECC_MODE_8BIT 1
+#define ECC_PARITY_SIZE_BYTES_BCH_MASK GENMASK(12, 8)
+#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
+#define ECC_FORCE_CLK_OPEN BIT(30)
+
+/* NAND_DEV_CMD1 bits */
+#define READ_ADDR_MASK GENMASK(7, 0)
+
+/* NAND_DEV_CMD_VLD bits */
+#define READ_START_VLD BIT(0)
+#define READ_STOP_VLD BIT(1)
+#define WRITE_START_VLD BIT(2)
+#define ERASE_START_VLD BIT(3)
+#define SEQ_READ_START_VLD BIT(4)
+
+/* NAND_EBI2_ECC_BUF_CFG bits */
+#define NUM_STEPS_MASK GENMASK(9, 0)
+
+/* NAND_ERASED_CW_DETECT_CFG bits */
+#define ERASED_CW_ECC_MASK 1
+#define AUTO_DETECT_RES 0
+#define MASK_ECC BIT(ERASED_CW_ECC_MASK)
+#define RESET_ERASED_DET BIT(AUTO_DETECT_RES)
+#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
+#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
+#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
+
+/* NAND_ERASED_CW_DETECT_STATUS bits */
+#define PAGE_ALL_ERASED BIT(7)
+#define CODEWORD_ALL_ERASED BIT(6)
+#define PAGE_ERASED BIT(5)
+#define CODEWORD_ERASED BIT(4)
+#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
+#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
+
+/* NAND_READ_LOCATION_n bits */
+#define READ_LOCATION_OFFSET_MASK GENMASK(9, 0)
+#define READ_LOCATION_SIZE_MASK GENMASK(25, 16)
+#define READ_LOCATION_LAST_MASK BIT(31)
+
+/* Version Mask */
+#define NAND_VERSION_MAJOR_MASK 0xf0000000
+#define NAND_VERSION_MAJOR_SHIFT 28
+#define NAND_VERSION_MINOR_MASK 0x0fff0000
+#define NAND_VERSION_MINOR_SHIFT 16
+
+/* NAND OP_CMDs */
+#define OP_PAGE_READ 0x2
+#define OP_PAGE_READ_WITH_ECC 0x3
+#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
+#define OP_PAGE_READ_ONFI_READ 0x5
+#define OP_PROGRAM_PAGE 0x6
+#define OP_PAGE_PROGRAM_WITH_ECC 0x7
+#define OP_PROGRAM_PAGE_SPARE 0x9
+#define OP_BLOCK_ERASE 0xa
+#define OP_CHECK_STATUS 0xc
+#define OP_FETCH_ID 0xb
+#define OP_RESET_DEVICE 0xd
+
+/* Default Value for NAND_DEV_CMD_VLD */
+#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
+ ERASE_START_VLD | SEQ_READ_START_VLD)
+
+/* NAND_CTRL bits */
+#define BAM_MODE_EN BIT(0)
+
+/*
+ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+ * the driver calls the chunks 'step' or 'codeword' interchangeably
+ */
+#define NANDC_STEP_SIZE 512
+
+/*
+ * the largest page size we support is 8K, this will have 16 steps/codewords
+ * of 512 bytes each
+ */
+#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
+
+/* we read at most 3 registers per codeword scan */
+#define MAX_REG_RD (3 * MAX_NUM_STEPS)
+
+/* ECC modes supported by the controller */
+#define ECC_NONE BIT(0)
+#define ECC_RS_4BIT BIT(1)
+#define ECC_BCH_4BIT BIT(2)
+#define ECC_BCH_8BIT BIT(3)
+
+/*
+ * Returns the actual register address for all NAND_DEV_ registers
+ * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
+ */
+#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
+
+/* Returns the dma address for reg read buffer */
+#define reg_buf_dma_addr(chip, vaddr) \
+ ((chip)->reg_read_dma + \
+ ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
+
+#define QPIC_PER_CW_CMD_ELEMENTS 32
+#define QPIC_PER_CW_CMD_SGL 32
+#define QPIC_PER_CW_DATA_SGL 8
+
+#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
+
+/*
+ * Flags used in DMA descriptor preparation helper functions
+ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
+ */
+/* Don't set the EOT in current tx BAM sgl */
+#define NAND_BAM_NO_EOT BIT(0)
+/* Set the NWD flag in current BAM sgl */
+#define NAND_BAM_NWD BIT(1)
+/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
+#define NAND_BAM_NEXT_SGL BIT(2)
+/*
+ * Erased codeword status is being used two times in single transfer so this
+ * flag will determine the current value of erased codeword status register
+ */
+#define NAND_ERASED_CW_SET BIT(4)
+
+#define MAX_ADDRESS_CYCLE 5
+
+/*
+ * This data type corresponds to the BAM transaction which will be used for all
+ * NAND transfers.
+ * @bam_ce - the array of BAM command elements
+ * @cmd_sgl - sgl for NAND BAM command pipe
+ * @data_sgl - sgl for NAND BAM consumer/producer pipe
+ * @last_data_desc - last DMA desc in data channel (tx/rx).
+ * @last_cmd_desc - last DMA desc in command channel.
+ * @txn_done - completion for NAND transfer.
+ * @bam_ce_nitems - the number of elements in the @bam_ce array
+ * @cmd_sgl_nitems - the number of elements in the @cmd_sgl array
+ * @data_sgl_nitems - the number of elements in the @data_sgl array
+ * @bam_ce_pos - the index in bam_ce which is available for next sgl
+ * @bam_ce_start - the index in bam_ce which marks the start position ce
+ * for current sgl. It will be used for size calculation
+ * for current sgl
+ * @cmd_sgl_pos - current index in command sgl.
+ * @cmd_sgl_start - start index in command sgl.
+ * @tx_sgl_pos - current index in data sgl for tx.
+ * @tx_sgl_start - start index in data sgl for tx.
+ * @rx_sgl_pos - current index in data sgl for rx.
+ * @rx_sgl_start - start index in data sgl for rx.
+ */
+struct bam_transaction {
+ struct bam_cmd_element *bam_ce;
+ struct scatterlist *cmd_sgl;
+ struct scatterlist *data_sgl;
+ struct dma_async_tx_descriptor *last_data_desc;
+ struct dma_async_tx_descriptor *last_cmd_desc;
+ struct completion txn_done;
+
+ unsigned int bam_ce_nitems;
+ unsigned int cmd_sgl_nitems;
+ unsigned int data_sgl_nitems;
+
+ struct_group(bam_positions,
+ u32 bam_ce_pos;
+ u32 bam_ce_start;
+ u32 cmd_sgl_pos;
+ u32 cmd_sgl_start;
+ u32 tx_sgl_pos;
+ u32 tx_sgl_start;
+ u32 rx_sgl_pos;
+ u32 rx_sgl_start;
+
+ );
+};
+
+/*
+ * This data type corresponds to the nand dma descriptor
+ * @dma_desc - low level DMA engine descriptor
+ * @list - list for desc_info
+ *
+ * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
+ * ADM
+ * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
+ * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
+ * @dir - DMA transfer direction
+ */
+struct desc_info {
+ struct dma_async_tx_descriptor *dma_desc;
+ struct list_head node;
+
+ union {
+ struct scatterlist adm_sgl;
+ struct {
+ struct scatterlist *bam_sgl;
+ int sgl_cnt;
+ };
+ };
+ enum dma_data_direction dir;
+};
+
+/*
+ * holds the current register values that we want to write. acts as a contiguous
+ * chunk of memory which we use to write the controller registers through DMA.
+ */
+struct nandc_regs {
+ __le32 cmd;
+ __le32 addr0;
+ __le32 addr1;
+ __le32 chip_sel;
+ __le32 exec;
+
+ __le32 cfg0;
+ __le32 cfg1;
+ __le32 ecc_bch_cfg;
+
+ __le32 clrflashstatus;
+ __le32 clrreadstatus;
+
+ __le32 cmd1;
+ __le32 vld;
+
+ __le32 orig_cmd1;
+ __le32 orig_vld;
+
+ __le32 ecc_buf_cfg;
+ __le32 read_location0;
+ __le32 read_location1;
+ __le32 read_location2;
+ __le32 read_location3;
+ __le32 read_location_last0;
+ __le32 read_location_last1;
+ __le32 read_location_last2;
+ __le32 read_location_last3;
+ __le32 spi_cfg;
+ __le32 num_addr_cycle;
+ __le32 busy_wait_cnt;
+ __le32 flash_feature;
+
+ __le32 erased_cw_detect_cfg_clr;
+ __le32 erased_cw_detect_cfg_set;
+};
+
+/*
+ * NAND controller data struct
+ *
+ * @dev: parent device
+ *
+ * @base: MMIO base
+ *
+ * @core_clk: controller clock
+ * @aon_clk: another controller clock
+ * @iomacro_clk: io macro clock
+ *
+ * @regs: a contiguous chunk of memory for DMA register
+ * writes. contains the register values to be
+ * written to controller
+ *
+ * @props: properties of current NAND controller,
+ * initialized via DT match data
+ *
+ * @controller: base controller structure
+ * @qspi: qpic spi structure
+ * @host_list: list containing all the chips attached to the
+ * controller
+ *
+ * @chan: dma channel
+ * @cmd_crci: ADM DMA CRCI for command flow control
+ * @data_crci: ADM DMA CRCI for data flow control
+ *
+ * @desc_list: DMA descriptor list (list of desc_infos)
+ *
+ * @data_buffer: our local DMA buffer for page read/writes,
+ * used when we can't use the buffer provided
+ * by upper layers directly
+ * @reg_read_buf: local buffer for reading back registers via DMA
+ *
+ * @base_phys: physical base address of controller registers
+ * @base_dma: dma base address of controller registers
+ * @reg_read_dma: contains dma address for register read buffer
+ *
+ * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
+ * functions
+ * @max_cwperpage: maximum QPIC codewords required. calculated
+ * from all connected NAND devices pagesize
+ *
+ * @reg_read_pos: marker for data read in reg_read_buf
+ *
+ * @cmd1/vld: some fixed controller register values
+ *
+ * @exec_opwrite: flag to select correct number of code word
+ * while reading status
+ */
+struct qcom_nand_controller {
+ struct device *dev;
+
+ void __iomem *base;
+
+ struct clk *core_clk;
+ struct clk *aon_clk;
+
+ struct nandc_regs *regs;
+ struct bam_transaction *bam_txn;
+
+ const struct qcom_nandc_props *props;
+
+ struct nand_controller *controller;
+ struct qpic_spi_nand *qspi;
+ struct list_head host_list;
+
+ union {
+ /* will be used only by QPIC for BAM DMA */
+ struct {
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ struct dma_chan *cmd_chan;
+ };
+
+ /* will be used only by EBI2 for ADM DMA */
+ struct {
+ struct dma_chan *chan;
+ unsigned int cmd_crci;
+ unsigned int data_crci;
+ };
+ };
+
+ struct list_head desc_list;
+
+ u8 *data_buffer;
+ __le32 *reg_read_buf;
+
+ phys_addr_t base_phys;
+ dma_addr_t base_dma;
+ dma_addr_t reg_read_dma;
+
+ int buf_size;
+ int buf_count;
+ int buf_start;
+ unsigned int max_cwperpage;
+
+ int reg_read_pos;
+
+ u32 cmd1, vld;
+ bool exec_opwrite;
+};
+
+/*
+ * This data type corresponds to the NAND controller properties which varies
+ * among different NAND controllers.
+ * @ecc_modes - ecc mode for NAND
+ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+ * @supports_bam - whether NAND controller is using BAM
+ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
+ * @qpic_version2 - flag to indicate QPIC IP version 2
+ * @use_codeword_fixup - whether NAND has different layout for boot partitions
+ */
+struct qcom_nandc_props {
+ u32 ecc_modes;
+ u32 dev_cmd_reg_start;
+ u32 bam_offset;
+ bool supports_bam;
+ bool nandc_part_of_qpic;
+ bool qpic_version2;
+ bool use_codeword_fixup;
+};
+
+void qcom_free_bam_transaction(struct qcom_nand_controller *nandc);
+struct bam_transaction *qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc);
+void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc);
+void qcom_qpic_bam_dma_done(void *data);
+void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu);
+int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+ struct dma_chan *chan, unsigned long flags);
+int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr, int size, unsigned int flags);
+int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+ const void *vaddr, int size, unsigned int flags);
+int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, int reg_off,
+ const void *vaddr, int size, bool flow_control);
+int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, int num_regs,
+ unsigned int flags);
+int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, int first,
+ int num_regs, unsigned int flags);
+int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
+ int size, unsigned int flags);
+int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
+ int size, unsigned int flags);
+int qcom_submit_descs(struct qcom_nand_controller *nandc);
+void qcom_clear_read_regs(struct qcom_nand_controller *nandc);
+void qcom_nandc_unalloc(struct qcom_nand_controller *nandc);
+int qcom_nandc_alloc(struct qcom_nand_controller *nandc);
+#endif
+
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index b2996dc987ff..09c8c93e4dba 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -21,7 +21,7 @@ struct nand_device;
* @oobsize: OOB area size
* @pages_per_eraseblock: number of pages per eraseblock
* @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
- * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN
+ * @max_bad_eraseblocks_per_lun: maximum number of bad eraseblocks per LUN
* @planes_per_lun: number of planes per LUN
* @luns_per_target: number of LUN per target (target is a synonym for die)
* @ntargets: total number of targets exposed by the NAND device
@@ -103,6 +103,8 @@ enum nand_page_io_req_type {
* @ooblen: the number of OOB bytes to read from/write to this page
* @oobbuf: buffer to store OOB data in or get OOB data from
* @mode: one of the %MTD_OPS_XXX mode
+ * @continuous: no need to start over the operation at the end of each page, the
+ * NAND device will automatically prepare the next one
*
* This object is used to pass per-page I/O requests to NAND sub-layers. This
* way all useful information are already formatted in a useful way and
@@ -125,6 +127,7 @@ struct nand_page_io_req {
void *in;
} oobbuf;
int mode;
+ bool continuous;
};
const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
@@ -290,7 +293,7 @@ enum nand_ecc_engine_integration {
struct nand_ecc_engine {
struct device *dev;
struct list_head node;
- struct nand_ecc_engine_ops *ops;
+ const struct nand_ecc_engine_ops *ops;
enum nand_ecc_engine_integration integration;
void *priv;
};
@@ -906,19 +909,19 @@ static inline void nanddev_pos_next_page(struct nand_device *nand,
}
/**
- * nand_io_iter_init - Initialize a NAND I/O iterator
+ * nand_io_page_iter_init - Initialize a NAND I/O iterator
* @nand: NAND device
* @offs: absolute offset
* @req: MTD request
* @iter: NAND I/O iterator
*
* Initializes a NAND iterator based on the information passed by the MTD
- * layer.
+ * layer for page jumps.
*/
-static inline void nanddev_io_iter_init(struct nand_device *nand,
- enum nand_page_io_req_type reqtype,
- loff_t offs, struct mtd_oob_ops *req,
- struct nand_io_iter *iter)
+static inline void nanddev_io_page_iter_init(struct nand_device *nand,
+ enum nand_page_io_req_type reqtype,
+ loff_t offs, struct mtd_oob_ops *req,
+ struct nand_io_iter *iter)
{
struct mtd_info *mtd = nanddev_to_mtd(nand);
@@ -937,6 +940,43 @@ static inline void nanddev_io_iter_init(struct nand_device *nand,
iter->req.ooblen = min_t(unsigned int,
iter->oobbytes_per_page - iter->req.ooboffs,
iter->oobleft);
+ iter->req.continuous = false;
+}
+
+/**
+ * nand_io_block_iter_init - Initialize a NAND I/O iterator
+ * @nand: NAND device
+ * @offs: absolute offset
+ * @req: MTD request
+ * @iter: NAND I/O iterator
+ *
+ * Initializes a NAND iterator based on the information passed by the MTD
+ * layer for block jumps (no OOB)
+ *
+ * In practice only reads may leverage this iterator.
+ */
+static inline void nanddev_io_block_iter_init(struct nand_device *nand,
+ enum nand_page_io_req_type reqtype,
+ loff_t offs, struct mtd_oob_ops *req,
+ struct nand_io_iter *iter)
+{
+ unsigned int offs_in_eb;
+
+ iter->req.type = reqtype;
+ iter->req.mode = req->mode;
+ iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
+ iter->req.ooboffs = 0;
+ iter->oobbytes_per_page = 0;
+ iter->dataleft = req->len;
+ iter->oobleft = 0;
+ iter->req.databuf.in = req->datbuf;
+ offs_in_eb = (nand->memorg.pagesize * iter->req.pos.page) + iter->req.dataoffs;
+ iter->req.datalen = min_t(unsigned int,
+ nanddev_eraseblock_size(nand) - offs_in_eb,
+ iter->dataleft);
+ iter->req.oobbuf.in = NULL;
+ iter->req.ooblen = 0;
+ iter->req.continuous = true;
}
/**
@@ -963,6 +1003,25 @@ static inline void nanddev_io_iter_next_page(struct nand_device *nand,
}
/**
+ * nand_io_iter_next_block - Move to the next block
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Updates the @iter to point to the next block.
+ * No OOB handling available.
+ */
+static inline void nanddev_io_iter_next_block(struct nand_device *nand,
+ struct nand_io_iter *iter)
+{
+ nanddev_pos_next_eraseblock(nand, &iter->req.pos);
+ iter->dataleft -= iter->req.datalen;
+ iter->req.databuf.in += iter->req.datalen;
+ iter->req.dataoffs = 0;
+ iter->req.datalen = min_t(unsigned int, nanddev_eraseblock_size(nand),
+ iter->dataleft);
+}
+
+/**
* nand_io_iter_end - Should end iteration or not
* @nand: NAND device
* @iter: NAND I/O iterator
@@ -990,13 +1049,28 @@ static inline bool nanddev_io_iter_end(struct nand_device *nand,
* @req: MTD I/O request
* @iter: NAND I/O iterator
*
- * Should be used for iterate over pages that are contained in an MTD request.
+ * Should be used for iterating over pages that are contained in an MTD request.
*/
#define nanddev_io_for_each_page(nand, type, start, req, iter) \
- for (nanddev_io_iter_init(nand, type, start, req, iter); \
+ for (nanddev_io_page_iter_init(nand, type, start, req, iter); \
!nanddev_io_iter_end(nand, iter); \
nanddev_io_iter_next_page(nand, iter))
+/**
+ * nand_io_for_each_block - Iterate over all NAND pages contained in an MTD I/O
+ * request, one block at a time
+ * @nand: NAND device
+ * @start: start address to read/write from
+ * @req: MTD I/O request
+ * @iter: NAND I/O iterator
+ *
+ * Should be used for iterating over blocks that are contained in an MTD request.
+ */
+#define nanddev_io_for_each_block(nand, type, start, req, iter) \
+ for (nanddev_io_block_iter_init(nand, type, start, req, iter); \
+ !nanddev_io_iter_end(nand, iter); \
+ nanddev_io_iter_next_block(nand, iter))
+
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
@@ -1062,4 +1136,9 @@ static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len);
+int nand_check_erased_ecc_chunk(void *data, int datalen,
+ void *ecc, int ecclen,
+ void *extraoob, int extraooblen,
+ int threshold);
+
#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index e84522e31301..d30bdc3fcfd7 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -1519,11 +1519,6 @@ int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc);
void rawnand_sw_bch_cleanup(struct nand_chip *chip);
-int nand_check_erased_ecc_chunk(void *data, int datalen,
- void *ecc, int ecclen,
- void *extraoob, int extraooblen,
- int threshold);
-
int nand_ecc_choose_conf(struct nand_chip *chip,
const struct nand_ecc_caps *caps, int oobavail);
diff --git a/include/linux/mtd/spear_smi.h b/include/linux/mtd/spear_smi.h
index 581603ac1277..871634862627 100644
--- a/include/linux/mtd/spear_smi.h
+++ b/include/linux/mtd/spear_smi.h
@@ -31,12 +31,12 @@
* struct spear_smi_flash_info - platform structure for passing flash
* information
*
- * name: name of the serial nor flash for identification
- * mem_base: the memory base on which the flash is mapped
- * size: size of the flash in bytes
- * partitions: parition details
- * nr_partitions: number of partitions
- * fast_mode: whether flash supports fast mode
+ * @name: name of the serial nor flash for identification
+ * @mem_base: the memory base on which the flash is mapped
+ * @size: size of the flash in bytes
+ * @partitions: parition details
+ * @nr_partitions: number of partitions
+ * @fast_mode: whether flash supports fast mode
*/
struct spear_smi_flash_info {
@@ -51,9 +51,10 @@ struct spear_smi_flash_info {
/**
* struct spear_smi_plat_data - platform structure for configuring smi
*
- * clk_rate: clk rate at which SMI must operate
- * num_flashes: number of flashes present on board
- * board_flash_info: specific details of each flash present on board
+ * @clk_rate: clk rate at which SMI must operate
+ * @num_flashes: number of flashes present on board
+ * @board_flash_info: specific details of each flash present on board
+ * @np: array of DT node pointers for all possible flash chip devices
*/
struct spear_smi_plat_data {
unsigned long clk_rate;
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 5c19ead60499..ce76f5c632e1 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -20,126 +20,218 @@
* Standard SPI NAND flash operations
*/
-#define SPINAND_RESET_OP \
+#define SPINAND_RESET_1S_0_0_OP \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_WR_EN_DIS_OP(enable) \
+#define SPINAND_WR_EN_DIS_1S_0_0_OP(enable) \
SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_READID_OP(naddr, ndummy, buf, len) \
+#define SPINAND_READID_1S_1S_1S_OP(naddr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
SPI_MEM_OP_ADDR(naddr, 0, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1))
-#define SPINAND_SET_FEATURE_OP(reg, valptr) \
+#define SPINAND_SET_FEATURE_1S_1S_1S_OP(reg, valptr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \
SPI_MEM_OP_ADDR(1, reg, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(1, valptr, 1))
-#define SPINAND_GET_FEATURE_OP(reg, valptr) \
+#define SPINAND_GET_FEATURE_1S_1S_1S_OP(reg, valptr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \
SPI_MEM_OP_ADDR(1, reg, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_IN(1, valptr, 1))
-#define SPINAND_BLK_ERASE_OP(addr) \
+#define SPINAND_BLK_ERASE_1S_1S_0_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_PAGE_READ_OP(addr) \
+#define SPINAND_PAGE_READ_1S_1S_0_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 1))
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
- SPI_MEM_OP_ADDR(3, addr, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 1))
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
- SPI_MEM_OP_ADDR(2, addr, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
SPI_MEM_OP_ADDR(2, addr, 2), \
SPI_MEM_OP_DUMMY(ndummy, 2), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP_3A(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_2S_2S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
SPI_MEM_OP_ADDR(3, addr, 2), \
SPI_MEM_OP_DUMMY(ndummy, 2), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbd, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 2), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 2), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
SPI_MEM_OP_ADDR(2, addr, 4), \
SPI_MEM_OP_DUMMY(ndummy, 4), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP_3A(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_4S_4S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
SPI_MEM_OP_ADDR(3, addr, 4), \
SPI_MEM_OP_DUMMY(ndummy, 4), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
-
-#define SPINAND_PROG_EXEC_OP(addr) \
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xed, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 4), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 4), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x8b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 8), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xcb, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 8), \
+ SPI_MEM_OP_DUMMY(ndummy, 8), \
+ SPI_MEM_OP_DATA_IN(len, buf, 8), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x9d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 8), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PROG_EXEC_1S_1S_0_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_PROG_LOAD(reset, addr, buf, len) \
+#define SPINAND_PROG_LOAD_1S_1S_1S_OP(reset, addr, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(len, buf, 1))
-#define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \
+#define SPINAND_PROG_LOAD_1S_1S_4S_OP(reset, addr, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(len, buf, 4))
+#define SPINAND_PROG_LOAD_1S_1S_8S_OP(addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x82, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 8))
+
+#define SPINAND_PROG_LOAD_1S_8S_8S_OP(reset, addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0xc2 : 0xc4, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 8), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 8))
+
/**
* Standard SPI NAND flash commands
*/
@@ -262,12 +354,15 @@ struct spinand_manufacturer {
/* SPI NAND manufacturers */
extern const struct spinand_manufacturer alliancememory_spinand_manufacturer;
extern const struct spinand_manufacturer ato_spinand_manufacturer;
+extern const struct spinand_manufacturer esmt_8c_spinand_manufacturer;
extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer;
+extern const struct spinand_manufacturer fmsh_spinand_manufacturer;
extern const struct spinand_manufacturer foresee_spinand_manufacturer;
extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;
extern const struct spinand_manufacturer paragon_spinand_manufacturer;
+extern const struct spinand_manufacturer skyhigh_spinand_manufacturer;
extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
extern const struct spinand_manufacturer winbond_spinand_manufacturer;
extern const struct spinand_manufacturer xtx_spinand_manufacturer;
@@ -312,6 +407,9 @@ struct spinand_ecc_info {
#define SPINAND_HAS_QE_BIT BIT(0)
#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
+#define SPINAND_HAS_PROG_PLANE_SELECT_BIT BIT(2)
+#define SPINAND_HAS_READ_PLANE_SELECT_BIT BIT(3)
+#define SPINAND_NO_RAW_ACCESS BIT(4)
/**
* struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure
@@ -323,6 +421,67 @@ struct spinand_ondie_ecc_conf {
};
/**
+ * struct spinand_otp_layout - structure to describe the SPI NAND OTP area
+ * @npages: number of pages in the OTP
+ * @start_page: start page of the user/factory OTP area.
+ */
+struct spinand_otp_layout {
+ unsigned int npages;
+ unsigned int start_page;
+};
+
+/**
+ * struct spinand_fact_otp_ops - SPI NAND OTP methods for factory area
+ * @info: get the OTP area information
+ * @read: read from the SPI NAND OTP area
+ */
+struct spinand_fact_otp_ops {
+ int (*info)(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen);
+ int (*read)(struct spinand_device *spinand, loff_t from, size_t len,
+ size_t *retlen, u8 *buf);
+};
+
+/**
+ * struct spinand_user_otp_ops - SPI NAND OTP methods for user area
+ * @info: get the OTP area information
+ * @lock: lock an OTP region
+ * @erase: erase an OTP region
+ * @read: read from the SPI NAND OTP area
+ * @write: write to the SPI NAND OTP area
+ */
+struct spinand_user_otp_ops {
+ int (*info)(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen);
+ int (*lock)(struct spinand_device *spinand, loff_t from, size_t len);
+ int (*erase)(struct spinand_device *spinand, loff_t from, size_t len);
+ int (*read)(struct spinand_device *spinand, loff_t from, size_t len,
+ size_t *retlen, u8 *buf);
+ int (*write)(struct spinand_device *spinand, loff_t from, size_t len,
+ size_t *retlen, const u8 *buf);
+};
+
+/**
+ * struct spinand_fact_otp - SPI NAND OTP grouping structure for factory area
+ * @layout: OTP region layout
+ * @ops: OTP access ops
+ */
+struct spinand_fact_otp {
+ const struct spinand_otp_layout layout;
+ const struct spinand_fact_otp_ops *ops;
+};
+
+/**
+ * struct spinand_user_otp - SPI NAND OTP grouping structure for user area
+ * @layout: OTP region layout
+ * @ops: OTP access ops
+ */
+struct spinand_user_otp {
+ const struct spinand_otp_layout layout;
+ const struct spinand_user_otp_ops *ops;
+};
+
+/**
* struct spinand_info - Structure used to describe SPI NAND chips
* @model: model name
* @devid: device ID
@@ -336,6 +495,12 @@ struct spinand_ondie_ecc_conf {
* @op_variants.update_cache: variants of the update-cache operation
* @select_target: function used to select a target/die. Required only for
* multi-die chips
+ * @configure_chip: Align the chip configuration with the core settings
+ * @set_cont_read: enable/disable continuous cached reads
+ * @fact_otp: SPI NAND factory OTP info.
+ * @user_otp: SPI NAND user OTP info.
+ * @read_retries: the number of read retry modes supported
+ * @set_read_retry: enable/disable read retry for data recovery
*
* Each SPI NAND manufacturer driver should have a spinand_info table
* describing all the chips supported by the driver.
@@ -354,6 +519,14 @@ struct spinand_info {
} op_variants;
int (*select_target)(struct spinand_device *spinand,
unsigned int target);
+ int (*configure_chip)(struct spinand_device *spinand);
+ int (*set_cont_read)(struct spinand_device *spinand,
+ bool enable);
+ struct spinand_fact_otp fact_otp;
+ struct spinand_user_otp user_otp;
+ unsigned int read_retries;
+ int (*set_read_retry)(struct spinand_device *spinand,
+ unsigned int read_retry);
};
#define SPINAND_ID(__method, ...) \
@@ -377,7 +550,35 @@ struct spinand_info {
}
#define SPINAND_SELECT_TARGET(__func) \
- .select_target = __func,
+ .select_target = __func
+
+#define SPINAND_CONFIGURE_CHIP(__configure_chip) \
+ .configure_chip = __configure_chip
+
+#define SPINAND_CONT_READ(__set_cont_read) \
+ .set_cont_read = __set_cont_read
+
+#define SPINAND_FACT_OTP_INFO(__npages, __start_page, __ops) \
+ .fact_otp = { \
+ .layout = { \
+ .npages = __npages, \
+ .start_page = __start_page, \
+ }, \
+ .ops = __ops, \
+ }
+
+#define SPINAND_USER_OTP_INFO(__npages, __start_page, __ops) \
+ .user_otp = { \
+ .layout = { \
+ .npages = __npages, \
+ .start_page = __start_page, \
+ }, \
+ .ops = __ops, \
+ }
+
+#define SPINAND_READ_RETRY(__read_retries, __set_read_retry) \
+ .read_retries = __read_retries, \
+ .set_read_retry = __set_read_retry
#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
__flags, ...) \
@@ -422,7 +623,18 @@ struct spinand_dirmap {
* passed in spi_mem_op be DMA-able, so we can't based the bufs on
* the stack
* @manufacturer: SPI NAND manufacturer information
+ * @configure_chip: Align the chip configuration with the core settings
+ * @cont_read_possible: Field filled by the core once the whole system
+ * configuration is known to tell whether continuous reads are
+ * suitable to use or not in general with this chip/configuration.
+ * A per-transfer check must of course be done to ensure it is
+ * actually relevant to enable this feature.
+ * @set_cont_read: Enable/disable the continuous read feature
* @priv: manufacturer private data
+ * @fact_otp: SPI NAND factory OTP info.
+ * @user_otp: SPI NAND user OTP info.
+ * @read_retries: the number of read retry modes supported
+ * @set_read_retry: Enable/disable the read retry feature
*/
struct spinand_device {
struct nand_device base;
@@ -451,6 +663,18 @@ struct spinand_device {
u8 *scratchbuf;
const struct spinand_manufacturer *manufacturer;
void *priv;
+
+ int (*configure_chip)(struct spinand_device *spinand);
+ bool cont_read_possible;
+ int (*set_cont_read)(struct spinand_device *spinand,
+ bool enable);
+
+ const struct spinand_fact_otp *fact_otp;
+ const struct spinand_user_otp *user_otp;
+
+ unsigned int read_retries;
+ int (*set_read_retry)(struct spinand_device *spinand,
+ unsigned int retry_mode);
};
/**
@@ -517,6 +741,31 @@ int spinand_match_and_init(struct spinand_device *spinand,
enum spinand_readid_method rdid_method);
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
+int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val);
+int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val);
+int spinand_write_enable_op(struct spinand_device *spinand);
int spinand_select_target(struct spinand_device *spinand, unsigned int target);
+int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
+ unsigned long poll_delay_us, u8 *s);
+
+int spinand_read_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req);
+
+int spinand_write_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req);
+
+size_t spinand_otp_page_size(struct spinand_device *spinand);
+size_t spinand_fact_otp_size(struct spinand_device *spinand);
+size_t spinand_user_otp_size(struct spinand_device *spinand);
+
+int spinand_fact_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf);
+int spinand_user_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf);
+int spinand_user_otp_write(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, const u8 *buf);
+
+int spinand_set_mtd_otp_ops(struct spinand_device *spinand);
+
#endif /* __LINUX_MTD_SPINAND_H */
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index 562f92504f2b..c3f79c4be1cc 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -250,7 +250,6 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum);
int ubi_leb_map(struct ubi_volume_desc *desc, int lnum);
int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum);
int ubi_sync(int ubi_num);
-int ubi_flush(int ubi_num, int vol_id, int lnum);
/*
* This function is the same as the 'ubi_leb_read()' function, but it does not
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index a561c629d89f..bf535f0118bb 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -49,7 +49,6 @@ static inline void mutex_destroy(struct mutex *lock) {}
#endif
-#ifndef CONFIG_PREEMPT_RT
/**
* mutex_init - initialize the mutex
* @mutex: the mutex to be initialized
@@ -65,6 +64,18 @@ do { \
__mutex_init((mutex), #mutex, &__key); \
} while (0)
+/**
+ * mutex_init_with_key - initialize a mutex with a given lockdep key
+ * @mutex: the mutex to be initialized
+ * @key: the lockdep key to be associated with the mutex
+ *
+ * Initialize the mutex to the unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
+#define mutex_init_with_key(mutex, key) __mutex_init((mutex), #mutex, (key))
+
+#ifndef CONFIG_PREEMPT_RT
#define __MUTEX_INITIALIZER(lockname) \
{ .owner = ATOMIC_LONG_INIT(0) \
, .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
@@ -75,8 +86,23 @@ do { \
#define DEFINE_MUTEX(mutexname) \
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
-extern void __mutex_init(struct mutex *lock, const char *name,
- struct lock_class_key *key);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void mutex_init_lockep(struct mutex *lock, const char *name, struct lock_class_key *key);
+
+static inline void __mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key)
+{
+ mutex_init_lockep(lock, name, key);
+}
+#else
+extern void mutex_init_generic(struct mutex *lock);
+
+static inline void __mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key)
+{
+ mutex_init_generic(lock);
+}
+#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
/**
* mutex_is_locked - is the mutex locked
@@ -100,32 +126,36 @@ extern bool mutex_is_locked(struct mutex *lock);
#define DEFINE_MUTEX(mutexname) \
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
-extern void __mutex_rt_init(struct mutex *lock, const char *name,
- struct lock_class_key *key);
-
#define mutex_is_locked(l) rt_mutex_base_is_locked(&(l)->rtmutex)
-#define __mutex_init(mutex, name, key) \
-do { \
- rt_mutex_base_init(&(mutex)->rtmutex); \
- __mutex_rt_init((mutex), name, key); \
-} while (0)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void mutex_rt_init_lockdep(struct mutex *mutex, const char *name,
+ struct lock_class_key *key);
-#define mutex_init(mutex) \
-do { \
- static struct lock_class_key __key; \
- \
- __mutex_init((mutex), #mutex, &__key); \
-} while (0)
+static inline void __mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key)
+{
+ mutex_rt_init_lockdep(lock, name, key);
+}
+
+#else
+extern void mutex_rt_init_generic(struct mutex *mutex);
+
+static inline void __mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key)
+{
+ mutex_rt_init_generic(lock);
+}
+#endif /* !CONFIG_LOCKDEP */
#endif /* CONFIG_PREEMPT_RT */
#ifdef CONFIG_DEBUG_MUTEXES
-int __devm_mutex_init(struct device *dev, struct mutex *lock);
+int __must_check __devm_mutex_init(struct device *dev, struct mutex *lock);
#else
-static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
+static inline int __must_check __devm_mutex_init(struct device *dev, struct mutex *lock)
{
/*
* When CONFIG_DEBUG_MUTEXES is off mutex_destroy() is just a nop so
@@ -136,14 +166,17 @@ static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
#endif
-#define devm_mutex_init(dev, mutex) \
+#define __mutex_init_ret(mutex) \
({ \
typeof(mutex) mutex_ = (mutex); \
\
mutex_init(mutex_); \
- __devm_mutex_init(dev, mutex_); \
+ mutex_; \
})
+#define devm_mutex_init(dev, mutex) \
+ __devm_mutex_init(dev, __mutex_init_ret(mutex))
+
/*
* See kernel/locking/mutex.c for detailed documentation of these APIs.
* Also see Documentation/locking/mutex-design.rst.
@@ -151,16 +184,15 @@ static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
-
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass);
-extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
- unsigned int subclass);
+extern int __must_check _mutex_lock_killable(struct mutex *lock,
+ unsigned int subclass, struct lockdep_map *nest_lock);
extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
-#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
+#define mutex_lock_killable(lock) _mutex_lock_killable(lock, 0, NULL)
#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
#define mutex_lock_nest_lock(lock, nest_lock) \
@@ -169,6 +201,15 @@ do { \
_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
+#define mutex_lock_killable_nest_lock(lock, nest_lock) \
+( \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map), \
+ _mutex_lock_killable(lock, 0, &(nest_lock)->dep_map) \
+)
+
+#define mutex_lock_killable_nested(lock, subclass) \
+ _mutex_lock_killable(lock, subclass, NULL)
+
#else
extern void mutex_lock(struct mutex *lock);
extern int __must_check mutex_lock_interruptible(struct mutex *lock);
@@ -178,6 +219,7 @@ extern void mutex_lock_io(struct mutex *lock);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
+# define mutex_lock_killable_nest_lock(lock, nest_lock) mutex_lock_killable(lock)
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
#endif
@@ -188,13 +230,30 @@ extern void mutex_lock_io(struct mutex *lock);
*
* Returns 1 if the mutex has been acquired successfully, and 0 on contention.
*/
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
+
+#define mutex_trylock_nest_lock(lock, nest_lock) \
+( \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map), \
+ _mutex_trylock_nest_lock(lock, &(nest_lock)->dep_map) \
+)
+
+#define mutex_trylock(lock) _mutex_trylock_nest_lock(lock, NULL)
+#else
extern int mutex_trylock(struct mutex *lock);
+#define mutex_trylock_nest_lock(lock, nest_lock) mutex_trylock(lock)
+#endif
+
extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
-DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)
+DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T), _RET == 0)
+
+extern unsigned long mutex_get_owner(struct mutex *lock);
#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/mux/driver.h b/include/linux/mux/driver.h
index 18824064f8c0..e58e59354e23 100644
--- a/include/linux/mux/driver.h
+++ b/include/linux/mux/driver.h
@@ -56,18 +56,18 @@ struct mux_control {
/**
* struct mux_chip - Represents a chip holding mux controllers.
* @controllers: Number of mux controllers handled by the chip.
- * @mux: Array of mux controllers that are handled.
* @dev: Device structure.
* @id: Used to identify the device internally.
* @ops: Mux controller operations.
+ * @mux: Array of mux controllers that are handled.
*/
struct mux_chip {
unsigned int controllers;
- struct mux_control *mux;
struct device dev;
int id;
const struct mux_control_ops *ops;
+ struct mux_control mux[] __counted_by(controllers);
};
#define to_mux_chip(x) container_of((x), struct mux_chip, dev)
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h
deleted file mode 100644
index 000b126acfb6..000000000000
--- a/include/linux/mv643xx.h
+++ /dev/null
@@ -1,921 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * mv643xx.h - MV-643XX Internal registers definition file.
- *
- * Copyright 2002 Momentum Computer, Inc.
- * Author: Matthew Dharm <mdharm@momenco.com>
- * Copyright 2002 GALILEO TECHNOLOGY, LTD.
- */
-#ifndef __ASM_MV643XX_H
-#define __ASM_MV643XX_H
-
-#include <asm/types.h>
-#include <linux/mv643xx_eth.h>
-#include <linux/mv643xx_i2c.h>
-
-/****************************************/
-/* Processor Address Space */
-/****************************************/
-
-/* DDR SDRAM BAR and size registers */
-
-#define MV64340_CS_0_BASE_ADDR 0x008
-#define MV64340_CS_0_SIZE 0x010
-#define MV64340_CS_1_BASE_ADDR 0x208
-#define MV64340_CS_1_SIZE 0x210
-#define MV64340_CS_2_BASE_ADDR 0x018
-#define MV64340_CS_2_SIZE 0x020
-#define MV64340_CS_3_BASE_ADDR 0x218
-#define MV64340_CS_3_SIZE 0x220
-
-/* Devices BAR and size registers */
-
-#define MV64340_DEV_CS0_BASE_ADDR 0x028
-#define MV64340_DEV_CS0_SIZE 0x030
-#define MV64340_DEV_CS1_BASE_ADDR 0x228
-#define MV64340_DEV_CS1_SIZE 0x230
-#define MV64340_DEV_CS2_BASE_ADDR 0x248
-#define MV64340_DEV_CS2_SIZE 0x250
-#define MV64340_DEV_CS3_BASE_ADDR 0x038
-#define MV64340_DEV_CS3_SIZE 0x040
-#define MV64340_BOOTCS_BASE_ADDR 0x238
-#define MV64340_BOOTCS_SIZE 0x240
-
-/* PCI 0 BAR and size registers */
-
-#define MV64340_PCI_0_IO_BASE_ADDR 0x048
-#define MV64340_PCI_0_IO_SIZE 0x050
-#define MV64340_PCI_0_MEMORY0_BASE_ADDR 0x058
-#define MV64340_PCI_0_MEMORY0_SIZE 0x060
-#define MV64340_PCI_0_MEMORY1_BASE_ADDR 0x080
-#define MV64340_PCI_0_MEMORY1_SIZE 0x088
-#define MV64340_PCI_0_MEMORY2_BASE_ADDR 0x258
-#define MV64340_PCI_0_MEMORY2_SIZE 0x260
-#define MV64340_PCI_0_MEMORY3_BASE_ADDR 0x280
-#define MV64340_PCI_0_MEMORY3_SIZE 0x288
-
-/* PCI 1 BAR and size registers */
-#define MV64340_PCI_1_IO_BASE_ADDR 0x090
-#define MV64340_PCI_1_IO_SIZE 0x098
-#define MV64340_PCI_1_MEMORY0_BASE_ADDR 0x0a0
-#define MV64340_PCI_1_MEMORY0_SIZE 0x0a8
-#define MV64340_PCI_1_MEMORY1_BASE_ADDR 0x0b0
-#define MV64340_PCI_1_MEMORY1_SIZE 0x0b8
-#define MV64340_PCI_1_MEMORY2_BASE_ADDR 0x2a0
-#define MV64340_PCI_1_MEMORY2_SIZE 0x2a8
-#define MV64340_PCI_1_MEMORY3_BASE_ADDR 0x2b0
-#define MV64340_PCI_1_MEMORY3_SIZE 0x2b8
-
-/* SRAM base address */
-#define MV64340_INTEGRATED_SRAM_BASE_ADDR 0x268
-
-/* internal registers space base address */
-#define MV64340_INTERNAL_SPACE_BASE_ADDR 0x068
-
-/* Enables the CS , DEV_CS , PCI 0 and PCI 1
- windows above */
-#define MV64340_BASE_ADDR_ENABLE 0x278
-
-/****************************************/
-/* PCI remap registers */
-/****************************************/
- /* PCI 0 */
-#define MV64340_PCI_0_IO_ADDR_REMAP 0x0f0
-#define MV64340_PCI_0_MEMORY0_LOW_ADDR_REMAP 0x0f8
-#define MV64340_PCI_0_MEMORY0_HIGH_ADDR_REMAP 0x320
-#define MV64340_PCI_0_MEMORY1_LOW_ADDR_REMAP 0x100
-#define MV64340_PCI_0_MEMORY1_HIGH_ADDR_REMAP 0x328
-#define MV64340_PCI_0_MEMORY2_LOW_ADDR_REMAP 0x2f8
-#define MV64340_PCI_0_MEMORY2_HIGH_ADDR_REMAP 0x330
-#define MV64340_PCI_0_MEMORY3_LOW_ADDR_REMAP 0x300
-#define MV64340_PCI_0_MEMORY3_HIGH_ADDR_REMAP 0x338
- /* PCI 1 */
-#define MV64340_PCI_1_IO_ADDR_REMAP 0x108
-#define MV64340_PCI_1_MEMORY0_LOW_ADDR_REMAP 0x110
-#define MV64340_PCI_1_MEMORY0_HIGH_ADDR_REMAP 0x340
-#define MV64340_PCI_1_MEMORY1_LOW_ADDR_REMAP 0x118
-#define MV64340_PCI_1_MEMORY1_HIGH_ADDR_REMAP 0x348
-#define MV64340_PCI_1_MEMORY2_LOW_ADDR_REMAP 0x310
-#define MV64340_PCI_1_MEMORY2_HIGH_ADDR_REMAP 0x350
-#define MV64340_PCI_1_MEMORY3_LOW_ADDR_REMAP 0x318
-#define MV64340_PCI_1_MEMORY3_HIGH_ADDR_REMAP 0x358
-
-#define MV64340_CPU_PCI_0_HEADERS_RETARGET_CONTROL 0x3b0
-#define MV64340_CPU_PCI_0_HEADERS_RETARGET_BASE 0x3b8
-#define MV64340_CPU_PCI_1_HEADERS_RETARGET_CONTROL 0x3c0
-#define MV64340_CPU_PCI_1_HEADERS_RETARGET_BASE 0x3c8
-#define MV64340_CPU_GE_HEADERS_RETARGET_CONTROL 0x3d0
-#define MV64340_CPU_GE_HEADERS_RETARGET_BASE 0x3d8
-#define MV64340_CPU_IDMA_HEADERS_RETARGET_CONTROL 0x3e0
-#define MV64340_CPU_IDMA_HEADERS_RETARGET_BASE 0x3e8
-
-/****************************************/
-/* CPU Control Registers */
-/****************************************/
-
-#define MV64340_CPU_CONFIG 0x000
-#define MV64340_CPU_MODE 0x120
-#define MV64340_CPU_MASTER_CONTROL 0x160
-#define MV64340_CPU_CROSS_BAR_CONTROL_LOW 0x150
-#define MV64340_CPU_CROSS_BAR_CONTROL_HIGH 0x158
-#define MV64340_CPU_CROSS_BAR_TIMEOUT 0x168
-
-/****************************************/
-/* SMP RegisterS */
-/****************************************/
-
-#define MV64340_SMP_WHO_AM_I 0x200
-#define MV64340_SMP_CPU0_DOORBELL 0x214
-#define MV64340_SMP_CPU0_DOORBELL_CLEAR 0x21C
-#define MV64340_SMP_CPU1_DOORBELL 0x224
-#define MV64340_SMP_CPU1_DOORBELL_CLEAR 0x22C
-#define MV64340_SMP_CPU0_DOORBELL_MASK 0x234
-#define MV64340_SMP_CPU1_DOORBELL_MASK 0x23C
-#define MV64340_SMP_SEMAPHOR0 0x244
-#define MV64340_SMP_SEMAPHOR1 0x24c
-#define MV64340_SMP_SEMAPHOR2 0x254
-#define MV64340_SMP_SEMAPHOR3 0x25c
-#define MV64340_SMP_SEMAPHOR4 0x264
-#define MV64340_SMP_SEMAPHOR5 0x26c
-#define MV64340_SMP_SEMAPHOR6 0x274
-#define MV64340_SMP_SEMAPHOR7 0x27c
-
-/****************************************/
-/* CPU Sync Barrier Register */
-/****************************************/
-
-#define MV64340_CPU_0_SYNC_BARRIER_TRIGGER 0x0c0
-#define MV64340_CPU_0_SYNC_BARRIER_VIRTUAL 0x0c8
-#define MV64340_CPU_1_SYNC_BARRIER_TRIGGER 0x0d0
-#define MV64340_CPU_1_SYNC_BARRIER_VIRTUAL 0x0d8
-
-/****************************************/
-/* CPU Access Protect */
-/****************************************/
-
-#define MV64340_CPU_PROTECT_WINDOW_0_BASE_ADDR 0x180
-#define MV64340_CPU_PROTECT_WINDOW_0_SIZE 0x188
-#define MV64340_CPU_PROTECT_WINDOW_1_BASE_ADDR 0x190
-#define MV64340_CPU_PROTECT_WINDOW_1_SIZE 0x198
-#define MV64340_CPU_PROTECT_WINDOW_2_BASE_ADDR 0x1a0
-#define MV64340_CPU_PROTECT_WINDOW_2_SIZE 0x1a8
-#define MV64340_CPU_PROTECT_WINDOW_3_BASE_ADDR 0x1b0
-#define MV64340_CPU_PROTECT_WINDOW_3_SIZE 0x1b8
-
-
-/****************************************/
-/* CPU Error Report */
-/****************************************/
-
-#define MV64340_CPU_ERROR_ADDR_LOW 0x070
-#define MV64340_CPU_ERROR_ADDR_HIGH 0x078
-#define MV64340_CPU_ERROR_DATA_LOW 0x128
-#define MV64340_CPU_ERROR_DATA_HIGH 0x130
-#define MV64340_CPU_ERROR_PARITY 0x138
-#define MV64340_CPU_ERROR_CAUSE 0x140
-#define MV64340_CPU_ERROR_MASK 0x148
-
-/****************************************/
-/* CPU Interface Debug Registers */
-/****************************************/
-
-#define MV64340_PUNIT_SLAVE_DEBUG_LOW 0x360
-#define MV64340_PUNIT_SLAVE_DEBUG_HIGH 0x368
-#define MV64340_PUNIT_MASTER_DEBUG_LOW 0x370
-#define MV64340_PUNIT_MASTER_DEBUG_HIGH 0x378
-#define MV64340_PUNIT_MMASK 0x3e4
-
-/****************************************/
-/* Integrated SRAM Registers */
-/****************************************/
-
-#define MV64340_SRAM_CONFIG 0x380
-#define MV64340_SRAM_TEST_MODE 0X3F4
-#define MV64340_SRAM_ERROR_CAUSE 0x388
-#define MV64340_SRAM_ERROR_ADDR 0x390
-#define MV64340_SRAM_ERROR_ADDR_HIGH 0X3F8
-#define MV64340_SRAM_ERROR_DATA_LOW 0x398
-#define MV64340_SRAM_ERROR_DATA_HIGH 0x3a0
-#define MV64340_SRAM_ERROR_DATA_PARITY 0x3a8
-
-/****************************************/
-/* SDRAM Configuration */
-/****************************************/
-
-#define MV64340_SDRAM_CONFIG 0x1400
-#define MV64340_D_UNIT_CONTROL_LOW 0x1404
-#define MV64340_D_UNIT_CONTROL_HIGH 0x1424
-#define MV64340_SDRAM_TIMING_CONTROL_LOW 0x1408
-#define MV64340_SDRAM_TIMING_CONTROL_HIGH 0x140c
-#define MV64340_SDRAM_ADDR_CONTROL 0x1410
-#define MV64340_SDRAM_OPEN_PAGES_CONTROL 0x1414
-#define MV64340_SDRAM_OPERATION 0x1418
-#define MV64340_SDRAM_MODE 0x141c
-#define MV64340_EXTENDED_DRAM_MODE 0x1420
-#define MV64340_SDRAM_CROSS_BAR_CONTROL_LOW 0x1430
-#define MV64340_SDRAM_CROSS_BAR_CONTROL_HIGH 0x1434
-#define MV64340_SDRAM_CROSS_BAR_TIMEOUT 0x1438
-#define MV64340_SDRAM_ADDR_CTRL_PADS_CALIBRATION 0x14c0
-#define MV64340_SDRAM_DATA_PADS_CALIBRATION 0x14c4
-
-/****************************************/
-/* SDRAM Error Report */
-/****************************************/
-
-#define MV64340_SDRAM_ERROR_DATA_LOW 0x1444
-#define MV64340_SDRAM_ERROR_DATA_HIGH 0x1440
-#define MV64340_SDRAM_ERROR_ADDR 0x1450
-#define MV64340_SDRAM_RECEIVED_ECC 0x1448
-#define MV64340_SDRAM_CALCULATED_ECC 0x144c
-#define MV64340_SDRAM_ECC_CONTROL 0x1454
-#define MV64340_SDRAM_ECC_ERROR_COUNTER 0x1458
-
-/******************************************/
-/* Controlled Delay Line (CDL) Registers */
-/******************************************/
-
-#define MV64340_DFCDL_CONFIG0 0x1480
-#define MV64340_DFCDL_CONFIG1 0x1484
-#define MV64340_DLL_WRITE 0x1488
-#define MV64340_DLL_READ 0x148c
-#define MV64340_SRAM_ADDR 0x1490
-#define MV64340_SRAM_DATA0 0x1494
-#define MV64340_SRAM_DATA1 0x1498
-#define MV64340_SRAM_DATA2 0x149c
-#define MV64340_DFCL_PROBE 0x14a0
-
-/******************************************/
-/* Debug Registers */
-/******************************************/
-
-#define MV64340_DUNIT_DEBUG_LOW 0x1460
-#define MV64340_DUNIT_DEBUG_HIGH 0x1464
-#define MV64340_DUNIT_MMASK 0X1b40
-
-/****************************************/
-/* Device Parameters */
-/****************************************/
-
-#define MV64340_DEVICE_BANK0_PARAMETERS 0x45c
-#define MV64340_DEVICE_BANK1_PARAMETERS 0x460
-#define MV64340_DEVICE_BANK2_PARAMETERS 0x464
-#define MV64340_DEVICE_BANK3_PARAMETERS 0x468
-#define MV64340_DEVICE_BOOT_BANK_PARAMETERS 0x46c
-#define MV64340_DEVICE_INTERFACE_CONTROL 0x4c0
-#define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_LOW 0x4c8
-#define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_HIGH 0x4cc
-#define MV64340_DEVICE_INTERFACE_CROSS_BAR_TIMEOUT 0x4c4
-
-/****************************************/
-/* Device interrupt registers */
-/****************************************/
-
-#define MV64340_DEVICE_INTERRUPT_CAUSE 0x4d0
-#define MV64340_DEVICE_INTERRUPT_MASK 0x4d4
-#define MV64340_DEVICE_ERROR_ADDR 0x4d8
-#define MV64340_DEVICE_ERROR_DATA 0x4dc
-#define MV64340_DEVICE_ERROR_PARITY 0x4e0
-
-/****************************************/
-/* Device debug registers */
-/****************************************/
-
-#define MV64340_DEVICE_DEBUG_LOW 0x4e4
-#define MV64340_DEVICE_DEBUG_HIGH 0x4e8
-#define MV64340_RUNIT_MMASK 0x4f0
-
-/****************************************/
-/* PCI Slave Address Decoding registers */
-/****************************************/
-
-#define MV64340_PCI_0_CS_0_BANK_SIZE 0xc08
-#define MV64340_PCI_1_CS_0_BANK_SIZE 0xc88
-#define MV64340_PCI_0_CS_1_BANK_SIZE 0xd08
-#define MV64340_PCI_1_CS_1_BANK_SIZE 0xd88
-#define MV64340_PCI_0_CS_2_BANK_SIZE 0xc0c
-#define MV64340_PCI_1_CS_2_BANK_SIZE 0xc8c
-#define MV64340_PCI_0_CS_3_BANK_SIZE 0xd0c
-#define MV64340_PCI_1_CS_3_BANK_SIZE 0xd8c
-#define MV64340_PCI_0_DEVCS_0_BANK_SIZE 0xc10
-#define MV64340_PCI_1_DEVCS_0_BANK_SIZE 0xc90
-#define MV64340_PCI_0_DEVCS_1_BANK_SIZE 0xd10
-#define MV64340_PCI_1_DEVCS_1_BANK_SIZE 0xd90
-#define MV64340_PCI_0_DEVCS_2_BANK_SIZE 0xd18
-#define MV64340_PCI_1_DEVCS_2_BANK_SIZE 0xd98
-#define MV64340_PCI_0_DEVCS_3_BANK_SIZE 0xc14
-#define MV64340_PCI_1_DEVCS_3_BANK_SIZE 0xc94
-#define MV64340_PCI_0_DEVCS_BOOT_BANK_SIZE 0xd14
-#define MV64340_PCI_1_DEVCS_BOOT_BANK_SIZE 0xd94
-#define MV64340_PCI_0_P2P_MEM0_BAR_SIZE 0xd1c
-#define MV64340_PCI_1_P2P_MEM0_BAR_SIZE 0xd9c
-#define MV64340_PCI_0_P2P_MEM1_BAR_SIZE 0xd20
-#define MV64340_PCI_1_P2P_MEM1_BAR_SIZE 0xda0
-#define MV64340_PCI_0_P2P_I_O_BAR_SIZE 0xd24
-#define MV64340_PCI_1_P2P_I_O_BAR_SIZE 0xda4
-#define MV64340_PCI_0_CPU_BAR_SIZE 0xd28
-#define MV64340_PCI_1_CPU_BAR_SIZE 0xda8
-#define MV64340_PCI_0_INTERNAL_SRAM_BAR_SIZE 0xe00
-#define MV64340_PCI_1_INTERNAL_SRAM_BAR_SIZE 0xe80
-#define MV64340_PCI_0_EXPANSION_ROM_BAR_SIZE 0xd2c
-#define MV64340_PCI_1_EXPANSION_ROM_BAR_SIZE 0xd9c
-#define MV64340_PCI_0_BASE_ADDR_REG_ENABLE 0xc3c
-#define MV64340_PCI_1_BASE_ADDR_REG_ENABLE 0xcbc
-#define MV64340_PCI_0_CS_0_BASE_ADDR_REMAP 0xc48
-#define MV64340_PCI_1_CS_0_BASE_ADDR_REMAP 0xcc8
-#define MV64340_PCI_0_CS_1_BASE_ADDR_REMAP 0xd48
-#define MV64340_PCI_1_CS_1_BASE_ADDR_REMAP 0xdc8
-#define MV64340_PCI_0_CS_2_BASE_ADDR_REMAP 0xc4c
-#define MV64340_PCI_1_CS_2_BASE_ADDR_REMAP 0xccc
-#define MV64340_PCI_0_CS_3_BASE_ADDR_REMAP 0xd4c
-#define MV64340_PCI_1_CS_3_BASE_ADDR_REMAP 0xdcc
-#define MV64340_PCI_0_CS_0_BASE_HIGH_ADDR_REMAP 0xF04
-#define MV64340_PCI_1_CS_0_BASE_HIGH_ADDR_REMAP 0xF84
-#define MV64340_PCI_0_CS_1_BASE_HIGH_ADDR_REMAP 0xF08
-#define MV64340_PCI_1_CS_1_BASE_HIGH_ADDR_REMAP 0xF88
-#define MV64340_PCI_0_CS_2_BASE_HIGH_ADDR_REMAP 0xF0C
-#define MV64340_PCI_1_CS_2_BASE_HIGH_ADDR_REMAP 0xF8C
-#define MV64340_PCI_0_CS_3_BASE_HIGH_ADDR_REMAP 0xF10
-#define MV64340_PCI_1_CS_3_BASE_HIGH_ADDR_REMAP 0xF90
-#define MV64340_PCI_0_DEVCS_0_BASE_ADDR_REMAP 0xc50
-#define MV64340_PCI_1_DEVCS_0_BASE_ADDR_REMAP 0xcd0
-#define MV64340_PCI_0_DEVCS_1_BASE_ADDR_REMAP 0xd50
-#define MV64340_PCI_1_DEVCS_1_BASE_ADDR_REMAP 0xdd0
-#define MV64340_PCI_0_DEVCS_2_BASE_ADDR_REMAP 0xd58
-#define MV64340_PCI_1_DEVCS_2_BASE_ADDR_REMAP 0xdd8
-#define MV64340_PCI_0_DEVCS_3_BASE_ADDR_REMAP 0xc54
-#define MV64340_PCI_1_DEVCS_3_BASE_ADDR_REMAP 0xcd4
-#define MV64340_PCI_0_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xd54
-#define MV64340_PCI_1_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xdd4
-#define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xd5c
-#define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xddc
-#define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xd60
-#define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xde0
-#define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xd64
-#define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xde4
-#define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xd68
-#define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xde8
-#define MV64340_PCI_0_P2P_I_O_BASE_ADDR_REMAP 0xd6c
-#define MV64340_PCI_1_P2P_I_O_BASE_ADDR_REMAP 0xdec
-#define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_LOW 0xd70
-#define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_LOW 0xdf0
-#define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_HIGH 0xd74
-#define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_HIGH 0xdf4
-#define MV64340_PCI_0_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf00
-#define MV64340_PCI_1_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf80
-#define MV64340_PCI_0_EXPANSION_ROM_BASE_ADDR_REMAP 0xf38
-#define MV64340_PCI_1_EXPANSION_ROM_BASE_ADDR_REMAP 0xfb8
-#define MV64340_PCI_0_ADDR_DECODE_CONTROL 0xd3c
-#define MV64340_PCI_1_ADDR_DECODE_CONTROL 0xdbc
-#define MV64340_PCI_0_HEADERS_RETARGET_CONTROL 0xF40
-#define MV64340_PCI_1_HEADERS_RETARGET_CONTROL 0xFc0
-#define MV64340_PCI_0_HEADERS_RETARGET_BASE 0xF44
-#define MV64340_PCI_1_HEADERS_RETARGET_BASE 0xFc4
-#define MV64340_PCI_0_HEADERS_RETARGET_HIGH 0xF48
-#define MV64340_PCI_1_HEADERS_RETARGET_HIGH 0xFc8
-
-/***********************************/
-/* PCI Control Register Map */
-/***********************************/
-
-#define MV64340_PCI_0_DLL_STATUS_AND_COMMAND 0x1d20
-#define MV64340_PCI_1_DLL_STATUS_AND_COMMAND 0x1da0
-#define MV64340_PCI_0_MPP_PADS_DRIVE_CONTROL 0x1d1C
-#define MV64340_PCI_1_MPP_PADS_DRIVE_CONTROL 0x1d9C
-#define MV64340_PCI_0_COMMAND 0xc00
-#define MV64340_PCI_1_COMMAND 0xc80
-#define MV64340_PCI_0_MODE 0xd00
-#define MV64340_PCI_1_MODE 0xd80
-#define MV64340_PCI_0_RETRY 0xc04
-#define MV64340_PCI_1_RETRY 0xc84
-#define MV64340_PCI_0_READ_BUFFER_DISCARD_TIMER 0xd04
-#define MV64340_PCI_1_READ_BUFFER_DISCARD_TIMER 0xd84
-#define MV64340_PCI_0_MSI_TRIGGER_TIMER 0xc38
-#define MV64340_PCI_1_MSI_TRIGGER_TIMER 0xcb8
-#define MV64340_PCI_0_ARBITER_CONTROL 0x1d00
-#define MV64340_PCI_1_ARBITER_CONTROL 0x1d80
-#define MV64340_PCI_0_CROSS_BAR_CONTROL_LOW 0x1d08
-#define MV64340_PCI_1_CROSS_BAR_CONTROL_LOW 0x1d88
-#define MV64340_PCI_0_CROSS_BAR_CONTROL_HIGH 0x1d0c
-#define MV64340_PCI_1_CROSS_BAR_CONTROL_HIGH 0x1d8c
-#define MV64340_PCI_0_CROSS_BAR_TIMEOUT 0x1d04
-#define MV64340_PCI_1_CROSS_BAR_TIMEOUT 0x1d84
-#define MV64340_PCI_0_SYNC_BARRIER_TRIGGER_REG 0x1D18
-#define MV64340_PCI_1_SYNC_BARRIER_TRIGGER_REG 0x1D98
-#define MV64340_PCI_0_SYNC_BARRIER_VIRTUAL_REG 0x1d10
-#define MV64340_PCI_1_SYNC_BARRIER_VIRTUAL_REG 0x1d90
-#define MV64340_PCI_0_P2P_CONFIG 0x1d14
-#define MV64340_PCI_1_P2P_CONFIG 0x1d94
-
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_LOW 0x1e00
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_HIGH 0x1e04
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_0 0x1e08
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_LOW 0x1e10
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_HIGH 0x1e14
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_1 0x1e18
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_LOW 0x1e20
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_HIGH 0x1e24
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_2 0x1e28
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_LOW 0x1e30
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_HIGH 0x1e34
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_3 0x1e38
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_LOW 0x1e40
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_HIGH 0x1e44
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_4 0x1e48
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_LOW 0x1e50
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_HIGH 0x1e54
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_5 0x1e58
-
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_LOW 0x1e80
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_HIGH 0x1e84
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_0 0x1e88
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_LOW 0x1e90
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_HIGH 0x1e94
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_1 0x1e98
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_LOW 0x1ea0
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_HIGH 0x1ea4
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_2 0x1ea8
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_LOW 0x1eb0
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_HIGH 0x1eb4
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_3 0x1eb8
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_LOW 0x1ec0
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_HIGH 0x1ec4
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_4 0x1ec8
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_LOW 0x1ed0
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_HIGH 0x1ed4
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_5 0x1ed8
-
-/****************************************/
-/* PCI Configuration Access Registers */
-/****************************************/
-
-#define MV64340_PCI_0_CONFIG_ADDR 0xcf8
-#define MV64340_PCI_0_CONFIG_DATA_VIRTUAL_REG 0xcfc
-#define MV64340_PCI_1_CONFIG_ADDR 0xc78
-#define MV64340_PCI_1_CONFIG_DATA_VIRTUAL_REG 0xc7c
-#define MV64340_PCI_0_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xc34
-#define MV64340_PCI_1_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xcb4
-
-/****************************************/
-/* PCI Error Report Registers */
-/****************************************/
-
-#define MV64340_PCI_0_SERR_MASK 0xc28
-#define MV64340_PCI_1_SERR_MASK 0xca8
-#define MV64340_PCI_0_ERROR_ADDR_LOW 0x1d40
-#define MV64340_PCI_1_ERROR_ADDR_LOW 0x1dc0
-#define MV64340_PCI_0_ERROR_ADDR_HIGH 0x1d44
-#define MV64340_PCI_1_ERROR_ADDR_HIGH 0x1dc4
-#define MV64340_PCI_0_ERROR_ATTRIBUTE 0x1d48
-#define MV64340_PCI_1_ERROR_ATTRIBUTE 0x1dc8
-#define MV64340_PCI_0_ERROR_COMMAND 0x1d50
-#define MV64340_PCI_1_ERROR_COMMAND 0x1dd0
-#define MV64340_PCI_0_ERROR_CAUSE 0x1d58
-#define MV64340_PCI_1_ERROR_CAUSE 0x1dd8
-#define MV64340_PCI_0_ERROR_MASK 0x1d5c
-#define MV64340_PCI_1_ERROR_MASK 0x1ddc
-
-/****************************************/
-/* PCI Debug Registers */
-/****************************************/
-
-#define MV64340_PCI_0_MMASK 0X1D24
-#define MV64340_PCI_1_MMASK 0X1DA4
-
-/*********************************************/
-/* PCI Configuration, Function 0, Registers */
-/*********************************************/
-
-#define MV64340_PCI_DEVICE_AND_VENDOR_ID 0x000
-#define MV64340_PCI_STATUS_AND_COMMAND 0x004
-#define MV64340_PCI_CLASS_CODE_AND_REVISION_ID 0x008
-#define MV64340_PCI_BIST_HEADER_TYPE_LATENCY_TIMER_CACHE_LINE 0x00C
-
-#define MV64340_PCI_SCS_0_BASE_ADDR_LOW 0x010
-#define MV64340_PCI_SCS_0_BASE_ADDR_HIGH 0x014
-#define MV64340_PCI_SCS_1_BASE_ADDR_LOW 0x018
-#define MV64340_PCI_SCS_1_BASE_ADDR_HIGH 0x01C
-#define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_LOW 0x020
-#define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_HIGH 0x024
-#define MV64340_PCI_SUBSYSTEM_ID_AND_SUBSYSTEM_VENDOR_ID 0x02c
-#define MV64340_PCI_EXPANSION_ROM_BASE_ADDR_REG 0x030
-#define MV64340_PCI_CAPABILTY_LIST_POINTER 0x034
-#define MV64340_PCI_INTERRUPT_PIN_AND_LINE 0x03C
- /* capability list */
-#define MV64340_PCI_POWER_MANAGEMENT_CAPABILITY 0x040
-#define MV64340_PCI_POWER_MANAGEMENT_STATUS_AND_CONTROL 0x044
-#define MV64340_PCI_VPD_ADDR 0x048
-#define MV64340_PCI_VPD_DATA 0x04c
-#define MV64340_PCI_MSI_MESSAGE_CONTROL 0x050
-#define MV64340_PCI_MSI_MESSAGE_ADDR 0x054
-#define MV64340_PCI_MSI_MESSAGE_UPPER_ADDR 0x058
-#define MV64340_PCI_MSI_MESSAGE_DATA 0x05c
-#define MV64340_PCI_X_COMMAND 0x060
-#define MV64340_PCI_X_STATUS 0x064
-#define MV64340_PCI_COMPACT_PCI_HOT_SWAP 0x068
-
-/***********************************************/
-/* PCI Configuration, Function 1, Registers */
-/***********************************************/
-
-#define MV64340_PCI_SCS_2_BASE_ADDR_LOW 0x110
-#define MV64340_PCI_SCS_2_BASE_ADDR_HIGH 0x114
-#define MV64340_PCI_SCS_3_BASE_ADDR_LOW 0x118
-#define MV64340_PCI_SCS_3_BASE_ADDR_HIGH 0x11c
-#define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_LOW 0x120
-#define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_HIGH 0x124
-
-/***********************************************/
-/* PCI Configuration, Function 2, Registers */
-/***********************************************/
-
-#define MV64340_PCI_DEVCS_0_BASE_ADDR_LOW 0x210
-#define MV64340_PCI_DEVCS_0_BASE_ADDR_HIGH 0x214
-#define MV64340_PCI_DEVCS_1_BASE_ADDR_LOW 0x218
-#define MV64340_PCI_DEVCS_1_BASE_ADDR_HIGH 0x21c
-#define MV64340_PCI_DEVCS_2_BASE_ADDR_LOW 0x220
-#define MV64340_PCI_DEVCS_2_BASE_ADDR_HIGH 0x224
-
-/***********************************************/
-/* PCI Configuration, Function 3, Registers */
-/***********************************************/
-
-#define MV64340_PCI_DEVCS_3_BASE_ADDR_LOW 0x310
-#define MV64340_PCI_DEVCS_3_BASE_ADDR_HIGH 0x314
-#define MV64340_PCI_BOOT_CS_BASE_ADDR_LOW 0x318
-#define MV64340_PCI_BOOT_CS_BASE_ADDR_HIGH 0x31c
-#define MV64340_PCI_CPU_BASE_ADDR_LOW 0x220
-#define MV64340_PCI_CPU_BASE_ADDR_HIGH 0x224
-
-/***********************************************/
-/* PCI Configuration, Function 4, Registers */
-/***********************************************/
-
-#define MV64340_PCI_P2P_MEM0_BASE_ADDR_LOW 0x410
-#define MV64340_PCI_P2P_MEM0_BASE_ADDR_HIGH 0x414
-#define MV64340_PCI_P2P_MEM1_BASE_ADDR_LOW 0x418
-#define MV64340_PCI_P2P_MEM1_BASE_ADDR_HIGH 0x41c
-#define MV64340_PCI_P2P_I_O_BASE_ADDR 0x420
-#define MV64340_PCI_INTERNAL_REGS_I_O_MAPPED_BASE_ADDR 0x424
-
-/****************************************/
-/* Messaging Unit Registers (I20) */
-/****************************************/
-
-#define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_0_SIDE 0x010
-#define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_0_SIDE 0x014
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_0_SIDE 0x018
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_0_SIDE 0x01C
-#define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_0_SIDE 0x020
-#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x024
-#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x028
-#define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_0_SIDE 0x02C
-#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x030
-#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x034
-#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x040
-#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x044
-#define MV64340_I2O_QUEUE_CONTROL_REG_PCI_0_SIDE 0x050
-#define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_0_SIDE 0x054
-#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x060
-#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x064
-#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x068
-#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x06C
-#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x070
-#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x074
-#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x0F8
-#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x0FC
-
-#define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_1_SIDE 0x090
-#define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_1_SIDE 0x094
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_1_SIDE 0x098
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_1_SIDE 0x09C
-#define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_1_SIDE 0x0A0
-#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0A4
-#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0A8
-#define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_1_SIDE 0x0AC
-#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0B0
-#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0B4
-#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C0
-#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C4
-#define MV64340_I2O_QUEUE_CONTROL_REG_PCI_1_SIDE 0x0D0
-#define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_1_SIDE 0x0D4
-#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0E0
-#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0E4
-#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x0E8
-#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x0EC
-#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0F0
-#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0F4
-#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x078
-#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x07C
-
-#define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C10
-#define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C14
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C18
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C1C
-#define MV64340_I2O_INBOUND_DOORBELL_REG_CPU0_SIDE 0x1C20
-#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C24
-#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C28
-#define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU0_SIDE 0x1C2C
-#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C30
-#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C34
-#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C40
-#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C44
-#define MV64340_I2O_QUEUE_CONTROL_REG_CPU0_SIDE 0x1C50
-#define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU0_SIDE 0x1C54
-#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C60
-#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C64
-#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1C68
-#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1C6C
-#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C70
-#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C74
-#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1CF8
-#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1CFC
-#define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C90
-#define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C94
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C98
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C9C
-#define MV64340_I2O_INBOUND_DOORBELL_REG_CPU1_SIDE 0x1CA0
-#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CA4
-#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CA8
-#define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU1_SIDE 0x1CAC
-#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CB0
-#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CB4
-#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC0
-#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC4
-#define MV64340_I2O_QUEUE_CONTROL_REG_CPU1_SIDE 0x1CD0
-#define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU1_SIDE 0x1CD4
-#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CE0
-#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CE4
-#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1CE8
-#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1CEC
-#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CF0
-#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CF4
-#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1C78
-#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1C7C
-
-/****************************************/
-/* Ethernet Unit Registers */
-/****************************************/
-
-/*******************************************/
-/* CUNIT Registers */
-/*******************************************/
-
- /* Address Decoding Register Map */
-
-#define MV64340_CUNIT_BASE_ADDR_REG0 0xf200
-#define MV64340_CUNIT_BASE_ADDR_REG1 0xf208
-#define MV64340_CUNIT_BASE_ADDR_REG2 0xf210
-#define MV64340_CUNIT_BASE_ADDR_REG3 0xf218
-#define MV64340_CUNIT_SIZE0 0xf204
-#define MV64340_CUNIT_SIZE1 0xf20c
-#define MV64340_CUNIT_SIZE2 0xf214
-#define MV64340_CUNIT_SIZE3 0xf21c
-#define MV64340_CUNIT_HIGH_ADDR_REMAP_REG0 0xf240
-#define MV64340_CUNIT_HIGH_ADDR_REMAP_REG1 0xf244
-#define MV64340_CUNIT_BASE_ADDR_ENABLE_REG 0xf250
-#define MV64340_MPSC0_ACCESS_PROTECTION_REG 0xf254
-#define MV64340_MPSC1_ACCESS_PROTECTION_REG 0xf258
-#define MV64340_CUNIT_INTERNAL_SPACE_BASE_ADDR_REG 0xf25C
-
- /* Error Report Registers */
-
-#define MV64340_CUNIT_INTERRUPT_CAUSE_REG 0xf310
-#define MV64340_CUNIT_INTERRUPT_MASK_REG 0xf314
-#define MV64340_CUNIT_ERROR_ADDR 0xf318
-
- /* Cunit Control Registers */
-
-#define MV64340_CUNIT_ARBITER_CONTROL_REG 0xf300
-#define MV64340_CUNIT_CONFIG_REG 0xb40c
-#define MV64340_CUNIT_CRROSBAR_TIMEOUT_REG 0xf304
-
- /* Cunit Debug Registers */
-
-#define MV64340_CUNIT_DEBUG_LOW 0xf340
-#define MV64340_CUNIT_DEBUG_HIGH 0xf344
-#define MV64340_CUNIT_MMASK 0xf380
-
- /* MPSCs Clocks Routing Registers */
-
-#define MV64340_MPSC_ROUTING_REG 0xb400
-#define MV64340_MPSC_RX_CLOCK_ROUTING_REG 0xb404
-#define MV64340_MPSC_TX_CLOCK_ROUTING_REG 0xb408
-
- /* MPSCs Interrupts Registers */
-
-#define MV64340_MPSC_CAUSE_REG(port) (0xb804 + (port<<3))
-#define MV64340_MPSC_MASK_REG(port) (0xb884 + (port<<3))
-
-#define MV64340_MPSC_MAIN_CONFIG_LOW(port) (0x8000 + (port<<12))
-#define MV64340_MPSC_MAIN_CONFIG_HIGH(port) (0x8004 + (port<<12))
-#define MV64340_MPSC_PROTOCOL_CONFIG(port) (0x8008 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG1(port) (0x800c + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG2(port) (0x8010 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG3(port) (0x8014 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG4(port) (0x8018 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG5(port) (0x801c + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG6(port) (0x8020 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG7(port) (0x8024 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG8(port) (0x8028 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG9(port) (0x802c + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG10(port) (0x8030 + (port<<12))
-
- /* MPSC0 Registers */
-
-
-/***************************************/
-/* SDMA Registers */
-/***************************************/
-
-#define MV64340_SDMA_CONFIG_REG(channel) (0x4000 + (channel<<13))
-#define MV64340_SDMA_COMMAND_REG(channel) (0x4008 + (channel<<13))
-#define MV64340_SDMA_CURRENT_RX_DESCRIPTOR_POINTER(channel) (0x4810 + (channel<<13))
-#define MV64340_SDMA_CURRENT_TX_DESCRIPTOR_POINTER(channel) (0x4c10 + (channel<<13))
-#define MV64340_SDMA_FIRST_TX_DESCRIPTOR_POINTER(channel) (0x4c14 + (channel<<13))
-
-#define MV64340_SDMA_CAUSE_REG 0xb800
-#define MV64340_SDMA_MASK_REG 0xb880
-
-/* BRG Interrupts */
-
-#define MV64340_BRG_CONFIG_REG(brg) (0xb200 + (brg<<3))
-#define MV64340_BRG_BAUDE_TUNING_REG(brg) (0xb208 + (brg<<3))
-#define MV64340_BRG_CAUSE_REG 0xb834
-#define MV64340_BRG_MASK_REG 0xb8b4
-
-/****************************************/
-/* DMA Channel Control */
-/****************************************/
-
-#define MV64340_DMA_CHANNEL0_CONTROL 0x840
-#define MV64340_DMA_CHANNEL0_CONTROL_HIGH 0x880
-#define MV64340_DMA_CHANNEL1_CONTROL 0x844
-#define MV64340_DMA_CHANNEL1_CONTROL_HIGH 0x884
-#define MV64340_DMA_CHANNEL2_CONTROL 0x848
-#define MV64340_DMA_CHANNEL2_CONTROL_HIGH 0x888
-#define MV64340_DMA_CHANNEL3_CONTROL 0x84C
-#define MV64340_DMA_CHANNEL3_CONTROL_HIGH 0x88C
-
-
-/****************************************/
-/* IDMA Registers */
-/****************************************/
-
-#define MV64340_DMA_CHANNEL0_BYTE_COUNT 0x800
-#define MV64340_DMA_CHANNEL1_BYTE_COUNT 0x804
-#define MV64340_DMA_CHANNEL2_BYTE_COUNT 0x808
-#define MV64340_DMA_CHANNEL3_BYTE_COUNT 0x80C
-#define MV64340_DMA_CHANNEL0_SOURCE_ADDR 0x810
-#define MV64340_DMA_CHANNEL1_SOURCE_ADDR 0x814
-#define MV64340_DMA_CHANNEL2_SOURCE_ADDR 0x818
-#define MV64340_DMA_CHANNEL3_SOURCE_ADDR 0x81c
-#define MV64340_DMA_CHANNEL0_DESTINATION_ADDR 0x820
-#define MV64340_DMA_CHANNEL1_DESTINATION_ADDR 0x824
-#define MV64340_DMA_CHANNEL2_DESTINATION_ADDR 0x828
-#define MV64340_DMA_CHANNEL3_DESTINATION_ADDR 0x82C
-#define MV64340_DMA_CHANNEL0_NEXT_DESCRIPTOR_POINTER 0x830
-#define MV64340_DMA_CHANNEL1_NEXT_DESCRIPTOR_POINTER 0x834
-#define MV64340_DMA_CHANNEL2_NEXT_DESCRIPTOR_POINTER 0x838
-#define MV64340_DMA_CHANNEL3_NEXT_DESCRIPTOR_POINTER 0x83C
-#define MV64340_DMA_CHANNEL0_CURRENT_DESCRIPTOR_POINTER 0x870
-#define MV64340_DMA_CHANNEL1_CURRENT_DESCRIPTOR_POINTER 0x874
-#define MV64340_DMA_CHANNEL2_CURRENT_DESCRIPTOR_POINTER 0x878
-#define MV64340_DMA_CHANNEL3_CURRENT_DESCRIPTOR_POINTER 0x87C
-
- /* IDMA Address Decoding Base Address Registers */
-
-#define MV64340_DMA_BASE_ADDR_REG0 0xa00
-#define MV64340_DMA_BASE_ADDR_REG1 0xa08
-#define MV64340_DMA_BASE_ADDR_REG2 0xa10
-#define MV64340_DMA_BASE_ADDR_REG3 0xa18
-#define MV64340_DMA_BASE_ADDR_REG4 0xa20
-#define MV64340_DMA_BASE_ADDR_REG5 0xa28
-#define MV64340_DMA_BASE_ADDR_REG6 0xa30
-#define MV64340_DMA_BASE_ADDR_REG7 0xa38
-
- /* IDMA Address Decoding Size Address Register */
-
-#define MV64340_DMA_SIZE_REG0 0xa04
-#define MV64340_DMA_SIZE_REG1 0xa0c
-#define MV64340_DMA_SIZE_REG2 0xa14
-#define MV64340_DMA_SIZE_REG3 0xa1c
-#define MV64340_DMA_SIZE_REG4 0xa24
-#define MV64340_DMA_SIZE_REG5 0xa2c
-#define MV64340_DMA_SIZE_REG6 0xa34
-#define MV64340_DMA_SIZE_REG7 0xa3C
-
- /* IDMA Address Decoding High Address Remap and Access
- Protection Registers */
-
-#define MV64340_DMA_HIGH_ADDR_REMAP_REG0 0xa60
-#define MV64340_DMA_HIGH_ADDR_REMAP_REG1 0xa64
-#define MV64340_DMA_HIGH_ADDR_REMAP_REG2 0xa68
-#define MV64340_DMA_HIGH_ADDR_REMAP_REG3 0xa6C
-#define MV64340_DMA_BASE_ADDR_ENABLE_REG 0xa80
-#define MV64340_DMA_CHANNEL0_ACCESS_PROTECTION_REG 0xa70
-#define MV64340_DMA_CHANNEL1_ACCESS_PROTECTION_REG 0xa74
-#define MV64340_DMA_CHANNEL2_ACCESS_PROTECTION_REG 0xa78
-#define MV64340_DMA_CHANNEL3_ACCESS_PROTECTION_REG 0xa7c
-#define MV64340_DMA_ARBITER_CONTROL 0x860
-#define MV64340_DMA_CROSS_BAR_TIMEOUT 0x8d0
-
- /* IDMA Headers Retarget Registers */
-
-#define MV64340_DMA_HEADERS_RETARGET_CONTROL 0xa84
-#define MV64340_DMA_HEADERS_RETARGET_BASE 0xa88
-
- /* IDMA Interrupt Register */
-
-#define MV64340_DMA_INTERRUPT_CAUSE_REG 0x8c0
-#define MV64340_DMA_INTERRUPT_CAUSE_MASK 0x8c4
-#define MV64340_DMA_ERROR_ADDR 0x8c8
-#define MV64340_DMA_ERROR_SELECT 0x8cc
-
- /* IDMA Debug Register ( for internal use ) */
-
-#define MV64340_DMA_DEBUG_LOW 0x8e0
-#define MV64340_DMA_DEBUG_HIGH 0x8e4
-#define MV64340_DMA_SPARE 0xA8C
-
-/****************************************/
-/* Timer_Counter */
-/****************************************/
-
-#define MV64340_TIMER_COUNTER0 0x850
-#define MV64340_TIMER_COUNTER1 0x854
-#define MV64340_TIMER_COUNTER2 0x858
-#define MV64340_TIMER_COUNTER3 0x85C
-#define MV64340_TIMER_COUNTER_0_3_CONTROL 0x864
-#define MV64340_TIMER_COUNTER_0_3_INTERRUPT_CAUSE 0x868
-#define MV64340_TIMER_COUNTER_0_3_INTERRUPT_MASK 0x86c
-
-/****************************************/
-/* Watchdog registers */
-/****************************************/
-
-#define MV64340_WATCHDOG_CONFIG_REG 0xb410
-#define MV64340_WATCHDOG_VALUE_REG 0xb414
-
-/****************************************/
-/* I2C Registers */
-/****************************************/
-
-#define MV64XXX_I2C_OFFSET 0xc000
-#define MV64XXX_I2C_REG_BLOCK_SIZE 0x0020
-
-/****************************************/
-/* GPP Interface Registers */
-/****************************************/
-
-#define MV64340_GPP_IO_CONTROL 0xf100
-#define MV64340_GPP_LEVEL_CONTROL 0xf110
-#define MV64340_GPP_VALUE 0xf104
-#define MV64340_GPP_INTERRUPT_CAUSE 0xf108
-#define MV64340_GPP_INTERRUPT_MASK0 0xf10c
-#define MV64340_GPP_INTERRUPT_MASK1 0xf114
-#define MV64340_GPP_VALUE_SET 0xf118
-#define MV64340_GPP_VALUE_CLEAR 0xf11c
-
-/****************************************/
-/* Interrupt Controller Registers */
-/****************************************/
-
-/****************************************/
-/* Interrupts */
-/****************************************/
-
-#define MV64340_MAIN_INTERRUPT_CAUSE_LOW 0x004
-#define MV64340_MAIN_INTERRUPT_CAUSE_HIGH 0x00c
-#define MV64340_CPU_INTERRUPT0_MASK_LOW 0x014
-#define MV64340_CPU_INTERRUPT0_MASK_HIGH 0x01c
-#define MV64340_CPU_INTERRUPT0_SELECT_CAUSE 0x024
-#define MV64340_CPU_INTERRUPT1_MASK_LOW 0x034
-#define MV64340_CPU_INTERRUPT1_MASK_HIGH 0x03c
-#define MV64340_CPU_INTERRUPT1_SELECT_CAUSE 0x044
-#define MV64340_INTERRUPT0_MASK_0_LOW 0x054
-#define MV64340_INTERRUPT0_MASK_0_HIGH 0x05c
-#define MV64340_INTERRUPT0_SELECT_CAUSE 0x064
-#define MV64340_INTERRUPT1_MASK_0_LOW 0x074
-#define MV64340_INTERRUPT1_MASK_0_HIGH 0x07c
-#define MV64340_INTERRUPT1_SELECT_CAUSE 0x084
-
-/****************************************/
-/* MPP Interface Registers */
-/****************************************/
-
-#define MV64340_MPP_CONTROL0 0xf000
-#define MV64340_MPP_CONTROL1 0xf004
-#define MV64340_MPP_CONTROL2 0xf008
-#define MV64340_MPP_CONTROL3 0xf00c
-
-/****************************************/
-/* Serial Initialization registers */
-/****************************************/
-
-#define MV64340_SERIAL_INIT_LAST_DATA 0xf324
-#define MV64340_SERIAL_INIT_CONTROL 0xf328
-#define MV64340_SERIAL_INIT_STATUS 0xf32c
-
-extern void mv64340_irq_init(unsigned int base);
-
-#endif /* __ASM_MV643XX_H */
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 967aa9ea9f96..58600cf234bc 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -7,6 +7,7 @@
#include <linux/path.h>
#include <linux/fcntl.h>
#include <linux/errno.h>
+#include <linux/fs_struct.h>
enum { MAX_NESTED_LINKS = 8 };
@@ -18,73 +19,150 @@ enum { MAX_NESTED_LINKS = 8 };
enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT};
/* pathwalk mode */
-#define LOOKUP_FOLLOW 0x0001 /* follow links at the end */
-#define LOOKUP_DIRECTORY 0x0002 /* require a directory */
-#define LOOKUP_AUTOMOUNT 0x0004 /* force terminal automount */
-#define LOOKUP_EMPTY 0x4000 /* accept empty path [user_... only] */
-#define LOOKUP_DOWN 0x8000 /* follow mounts in the starting point */
-#define LOOKUP_MOUNTPOINT 0x0080 /* follow mounts in the end */
-
-#define LOOKUP_REVAL 0x0020 /* tell ->d_revalidate() to trust no cache */
-#define LOOKUP_RCU 0x0040 /* RCU pathwalk mode; semi-internal */
+#define LOOKUP_FOLLOW BIT(0) /* follow links at the end */
+#define LOOKUP_DIRECTORY BIT(1) /* require a directory */
+#define LOOKUP_AUTOMOUNT BIT(2) /* force terminal automount */
+#define LOOKUP_EMPTY BIT(3) /* accept empty path [user_... only] */
+#define LOOKUP_LINKAT_EMPTY BIT(4) /* Linkat request with empty path. */
+#define LOOKUP_DOWN BIT(5) /* follow mounts in the starting point */
+#define LOOKUP_MOUNTPOINT BIT(6) /* follow mounts in the end */
+#define LOOKUP_REVAL BIT(7) /* tell ->d_revalidate() to trust no cache */
+#define LOOKUP_RCU BIT(8) /* RCU pathwalk mode; semi-internal */
+#define LOOKUP_CACHED BIT(9) /* Only do cached lookup */
+#define LOOKUP_PARENT BIT(10) /* Looking up final parent in path */
+/* 5 spare bits for pathwalk */
/* These tell filesystem methods that we are dealing with the final component... */
-#define LOOKUP_OPEN 0x0100 /* ... in open */
-#define LOOKUP_CREATE 0x0200 /* ... in object creation */
-#define LOOKUP_EXCL 0x0400 /* ... in exclusive creation */
-#define LOOKUP_RENAME_TARGET 0x0800 /* ... in destination of rename() */
+#define LOOKUP_OPEN BIT(16) /* ... in open */
+#define LOOKUP_CREATE BIT(17) /* ... in object creation */
+#define LOOKUP_EXCL BIT(18) /* ... in target must not exist */
+#define LOOKUP_RENAME_TARGET BIT(19) /* ... in destination of rename() */
-/* internal use only */
-#define LOOKUP_PARENT 0x0010
+/* 4 spare bits for intent */
/* Scoping flags for lookup. */
-#define LOOKUP_NO_SYMLINKS 0x010000 /* No symlink crossing. */
-#define LOOKUP_NO_MAGICLINKS 0x020000 /* No nd_jump_link() crossing. */
-#define LOOKUP_NO_XDEV 0x040000 /* No mountpoint crossing. */
-#define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */
-#define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */
-#define LOOKUP_CACHED 0x200000 /* Only do cached lookup */
-#define LOOKUP_LINKAT_EMPTY 0x400000 /* Linkat request with empty path. */
+#define LOOKUP_NO_SYMLINKS BIT(24) /* No symlink crossing. */
+#define LOOKUP_NO_MAGICLINKS BIT(25) /* No nd_jump_link() crossing. */
+#define LOOKUP_NO_XDEV BIT(26) /* No mountpoint crossing. */
+#define LOOKUP_BENEATH BIT(27) /* No escaping from starting point. */
+#define LOOKUP_IN_ROOT BIT(28) /* Treat dirfd as fs root. */
/* LOOKUP_* flags which do scope-related checks based on the dirfd. */
#define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT)
+/* 3 spare bits for scoping */
extern int path_pts(struct path *path);
-extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
-
-static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
- struct path *path)
-{
- return user_path_at_empty(dfd, name, flags, path, NULL);
-}
+extern int user_path_at(int, const char __user *, unsigned, struct path *);
struct dentry *lookup_one_qstr_excl(const struct qstr *name,
struct dentry *base,
unsigned int flags);
extern int kern_path(const char *, unsigned, struct path *);
-
-extern struct dentry *kern_path_create(int, const char *, struct path *, unsigned int);
-extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
-extern void done_path_create(struct path *, struct dentry *);
-extern struct dentry *kern_path_locked(const char *, struct path *);
-extern struct dentry *user_path_locked_at(int , const char __user *, struct path *);
+struct dentry *kern_path_parent(const char *name, struct path *parent);
+
+extern struct dentry *start_creating_path(int, const char *, struct path *, unsigned int);
+extern struct dentry *start_creating_user_path(int, const char __user *, struct path *, unsigned int);
+extern void end_creating_path(const struct path *, struct dentry *);
+extern struct dentry *start_removing_path(const char *, struct path *);
+extern struct dentry *start_removing_user_path_at(int , const char __user *, struct path *);
+static inline void end_removing_path(const struct path *path , struct dentry *dentry)
+{
+ end_creating_path(path, dentry);
+}
int vfs_path_parent_lookup(struct filename *filename, unsigned int flags,
struct path *parent, struct qstr *last, int *type,
const struct path *root);
int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *,
unsigned int, struct path *);
-extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int);
-extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
-extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
-extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int);
-struct dentry *lookup_one(struct mnt_idmap *, const char *, struct dentry *, int);
+extern struct dentry *try_lookup_noperm(struct qstr *, struct dentry *);
+extern struct dentry *lookup_noperm(struct qstr *, struct dentry *);
+extern struct dentry *lookup_noperm_unlocked(struct qstr *, struct dentry *);
+extern struct dentry *lookup_noperm_positive_unlocked(struct qstr *, struct dentry *);
+struct dentry *lookup_one(struct mnt_idmap *, struct qstr *, struct dentry *);
struct dentry *lookup_one_unlocked(struct mnt_idmap *idmap,
- const char *name, struct dentry *base,
- int len);
+ struct qstr *name, struct dentry *base);
struct dentry *lookup_one_positive_unlocked(struct mnt_idmap *idmap,
- const char *name,
- struct dentry *base, int len);
+ struct qstr *name,
+ struct dentry *base);
+struct dentry *lookup_one_positive_killable(struct mnt_idmap *idmap,
+ struct qstr *name,
+ struct dentry *base);
+
+struct dentry *start_creating(struct mnt_idmap *idmap, struct dentry *parent,
+ struct qstr *name);
+struct dentry *start_removing(struct mnt_idmap *idmap, struct dentry *parent,
+ struct qstr *name);
+struct dentry *start_creating_killable(struct mnt_idmap *idmap,
+ struct dentry *parent,
+ struct qstr *name);
+struct dentry *start_removing_killable(struct mnt_idmap *idmap,
+ struct dentry *parent,
+ struct qstr *name);
+struct dentry *start_creating_noperm(struct dentry *parent, struct qstr *name);
+struct dentry *start_removing_noperm(struct dentry *parent, struct qstr *name);
+struct dentry *start_creating_dentry(struct dentry *parent,
+ struct dentry *child);
+struct dentry *start_removing_dentry(struct dentry *parent,
+ struct dentry *child);
+
+/* end_creating - finish action started with start_creating
+ * @child: dentry returned by start_creating() or vfs_mkdir()
+ *
+ * Unlock and release the child. This can be called after
+ * start_creating() whether that function succeeded or not,
+ * but it is not needed on failure.
+ *
+ * If vfs_mkdir() was called then the value returned from that function
+ * should be given for @child rather than the original dentry, as vfs_mkdir()
+ * may have provided a new dentry.
+ *
+ *
+ * If vfs_mkdir() was not called, then @child will be a valid dentry and
+ * @parent will be ignored.
+ */
+static inline void end_creating(struct dentry *child)
+{
+ end_dirop(child);
+}
+
+/* end_creating_keep - finish action started with start_creating() and return result
+ * @child: dentry returned by start_creating() or vfs_mkdir()
+ *
+ * Unlock and return the child. This can be called after
+ * start_creating() whether that function succeeded or not,
+ * but it is not needed on failure.
+ *
+ * If vfs_mkdir() was called then the value returned from that function
+ * should be given for @child rather than the original dentry, as vfs_mkdir()
+ * may have provided a new dentry.
+ *
+ * Returns: @child, which may be a dentry or an error.
+ *
+ */
+static inline struct dentry *end_creating_keep(struct dentry *child)
+{
+ if (!IS_ERR(child))
+ dget(child);
+ end_dirop(child);
+ return child;
+}
+
+/**
+ * end_removing - finish action started with start_removing
+ * @child: dentry returned by start_removing()
+ * @parent: dentry given to start_removing()
+ *
+ * Unlock and release the child.
+ *
+ * This is identical to end_dirop(). It can be passed the result of
+ * start_removing() whether that was successful or not, but it not needed
+ * if start_removing() failed.
+ */
+static inline void end_removing(struct dentry *child)
+{
+ end_dirop(child);
+}
extern int follow_down_one(struct path *);
extern int follow_down(struct path *path, unsigned int flags);
@@ -93,6 +171,13 @@ extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern struct dentry *lock_rename_child(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
+int start_renaming(struct renamedata *rd, int lookup_flags,
+ struct qstr *old_last, struct qstr *new_last);
+int start_renaming_dentry(struct renamedata *rd, int lookup_flags,
+ struct dentry *old_dentry, struct qstr *new_last);
+int start_renaming_two_dentries(struct renamedata *rd,
+ struct dentry *old_dentry, struct dentry *new_dentry);
+void end_renaming(struct renamedata *rd);
/**
* mode_strip_umask - handle vfs umask stripping
diff --git a/include/linux/nd.h b/include/linux/nd.h
index b9771ba1ef87..fa099e295f78 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -84,11 +84,7 @@ struct nd_device_driver {
void (*notify)(struct device *dev, enum nvdimm_event event);
};
-static inline struct nd_device_driver *to_nd_device_driver(
- struct device_driver *drv)
-{
- return container_of(drv, struct nd_device_driver, drv);
-};
+#define to_nd_device_driver(__drv) container_of_const(__drv, struct nd_device_driver, drv)
/**
* struct nd_namespace_common - core infrastructure of a namespace
diff --git a/include/linux/net.h b/include/linux/net.h
index 688320b79fcc..f58b38ab37f8 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -36,14 +36,13 @@ struct net;
* in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
* Eventually all flags will be in sk->sk_wq->flags.
*/
-#define SOCKWQ_ASYNC_NOSPACE 0
-#define SOCKWQ_ASYNC_WAITDATA 1
-#define SOCK_NOSPACE 2
-#define SOCK_PASSCRED 3
-#define SOCK_PASSSEC 4
-#define SOCK_SUPPORT_ZC 5
-#define SOCK_CUSTOM_SOCKOPT 6
-#define SOCK_PASSPIDFD 7
+enum socket_flags {
+ SOCKWQ_ASYNC_NOSPACE,
+ SOCKWQ_ASYNC_WAITDATA,
+ SOCK_NOSPACE,
+ SOCK_SUPPORT_ZC,
+ SOCK_CUSTOM_SOCKOPT,
+};
#ifndef ARCH_HAS_SOCKET_TYPES
/**
@@ -70,6 +69,7 @@ enum sock_type {
SOCK_DCCP = 6,
SOCK_PACKET = 10,
};
+#endif /* ARCH_HAS_SOCKET_TYPES */
#define SOCK_MAX (SOCK_PACKET + 1)
/* Mask which covers at least up to SOCK_MASK-1. The
@@ -81,8 +81,7 @@ enum sock_type {
#ifndef SOCK_NONBLOCK
#define SOCK_NONBLOCK O_NONBLOCK
#endif
-
-#endif /* ARCH_HAS_SOCKET_TYPES */
+#define SOCK_COREDUMP O_NOCTTY
/**
* enum sock_shutdown_cmd - Shutdown types
@@ -149,7 +148,6 @@ typedef struct {
struct vm_area_struct;
struct page;
-struct sockaddr;
struct msghdr;
struct module;
struct sk_buff;
@@ -164,10 +162,10 @@ struct proto_ops {
struct module *owner;
int (*release) (struct socket *sock);
int (*bind) (struct socket *sock,
- struct sockaddr *myaddr,
+ struct sockaddr_unsized *myaddr,
int sockaddr_len);
int (*connect) (struct socket *sock,
- struct sockaddr *vaddr,
+ struct sockaddr_unsized *vaddr,
int sockaddr_len, int flags);
int (*socketpair)(struct socket *sock1,
struct socket *sock2);
@@ -322,17 +320,34 @@ static inline bool sendpage_ok(struct page *page)
return !PageSlab(page) && page_count(page) >= 1;
}
+/*
+ * Check sendpage_ok on contiguous pages.
+ */
+static inline bool sendpages_ok(struct page *page, size_t len, size_t offset)
+{
+ struct page *p = page + (offset >> PAGE_SHIFT);
+ size_t count = 0;
+
+ while (count < len) {
+ if (!sendpage_ok(p))
+ return false;
+
+ p++;
+ count += PAGE_SIZE;
+ }
+
+ return true;
+}
+
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len);
-int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
- struct kvec *vec, size_t num, size_t len);
int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len, int flags);
-int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen);
+int kernel_bind(struct socket *sock, struct sockaddr_unsized *addr, int addrlen);
int kernel_listen(struct socket *sock, int backlog);
int kernel_accept(struct socket *sock, struct socket **newsock, int flags);
-int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
+int kernel_connect(struct socket *sock, struct sockaddr_unsized *addr, int addrlen,
int flags);
int kernel_getsockname(struct socket *sock, struct sockaddr *addr);
int kernel_getpeername(struct socket *sock, struct sockaddr *addr);
diff --git a/include/linux/net/intel/iidc.h b/include/linux/net/intel/iidc.h
deleted file mode 100644
index 1c1332e4df26..000000000000
--- a/include/linux/net/intel/iidc.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2021, Intel Corporation. */
-
-#ifndef _IIDC_H_
-#define _IIDC_H_
-
-#include <linux/auxiliary_bus.h>
-#include <linux/dcbnl.h>
-#include <linux/device.h>
-#include <linux/if_ether.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-
-enum iidc_event_type {
- IIDC_EVENT_BEFORE_MTU_CHANGE,
- IIDC_EVENT_AFTER_MTU_CHANGE,
- IIDC_EVENT_BEFORE_TC_CHANGE,
- IIDC_EVENT_AFTER_TC_CHANGE,
- IIDC_EVENT_CRIT_ERR,
- IIDC_EVENT_NBITS /* must be last */
-};
-
-enum iidc_reset_type {
- IIDC_PFR,
- IIDC_CORER,
- IIDC_GLOBR,
-};
-
-enum iidc_rdma_protocol {
- IIDC_RDMA_PROTOCOL_IWARP = BIT(0),
- IIDC_RDMA_PROTOCOL_ROCEV2 = BIT(1),
-};
-
-#define IIDC_MAX_USER_PRIORITY 8
-#define IIDC_MAX_DSCP_MAPPING 64
-#define IIDC_DSCP_PFC_MODE 0x1
-
-/* Struct to hold per RDMA Qset info */
-struct iidc_rdma_qset_params {
- /* Qset TEID returned to the RDMA driver in
- * ice_add_rdma_qset and used by RDMA driver
- * for calls to ice_del_rdma_qset
- */
- u32 teid; /* Qset TEID */
- u16 qs_handle; /* RDMA driver provides this */
- u16 vport_id; /* VSI index */
- u8 tc; /* TC branch the Qset should belong to */
-};
-
-struct iidc_qos_info {
- u64 tc_ctx;
- u8 rel_bw;
- u8 prio_type;
- u8 egress_virt_up;
- u8 ingress_virt_up;
-};
-
-/* Struct to pass QoS info */
-struct iidc_qos_params {
- struct iidc_qos_info tc_info[IEEE_8021QAZ_MAX_TCS];
- u8 up2tc[IIDC_MAX_USER_PRIORITY];
- u8 vport_relative_bw;
- u8 vport_priority_type;
- u8 num_tc;
- u8 pfc_mode;
- u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
-};
-
-struct iidc_event {
- DECLARE_BITMAP(type, IIDC_EVENT_NBITS);
- u32 reg;
-};
-
-struct ice_pf;
-
-int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
-int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset);
-int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type);
-int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable);
-void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos);
-
-/* Structure representing auxiliary driver tailored information about the core
- * PCI dev, each auxiliary driver using the IIDC interface will have an
- * instance of this struct dedicated to it.
- */
-
-struct iidc_auxiliary_dev {
- struct auxiliary_device adev;
- struct ice_pf *pf;
-};
-
-/* structure representing the auxiliary driver. This struct is to be
- * allocated and populated by the auxiliary driver's owner. The core PCI
- * driver will access these ops by performing a container_of on the
- * auxiliary_device->dev.driver.
- */
-struct iidc_auxiliary_drv {
- struct auxiliary_driver adrv;
- /* This event_handler is meant to be a blocking call. For instance,
- * when a BEFORE_MTU_CHANGE event comes in, the event_handler will not
- * return until the auxiliary driver is ready for the MTU change to
- * happen.
- */
- void (*event_handler)(struct ice_pf *pf, struct iidc_event *event);
-};
-
-#endif /* _IIDC_H_*/
diff --git a/include/linux/net/intel/iidc_rdma.h b/include/linux/net/intel/iidc_rdma.h
new file mode 100644
index 000000000000..8baad1082042
--- /dev/null
+++ b/include/linux/net/intel/iidc_rdma.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021-2025, Intel Corporation. */
+
+#ifndef _IIDC_RDMA_H_
+#define _IIDC_RDMA_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/device.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <net/dscp.h>
+
+enum iidc_rdma_event_type {
+ IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE,
+ IIDC_RDMA_EVENT_AFTER_MTU_CHANGE,
+ IIDC_RDMA_EVENT_BEFORE_TC_CHANGE,
+ IIDC_RDMA_EVENT_AFTER_TC_CHANGE,
+ IIDC_RDMA_EVENT_WARN_RESET,
+ IIDC_RDMA_EVENT_CRIT_ERR,
+ IIDC_RDMA_EVENT_NBITS /* must be last */
+};
+
+struct iidc_rdma_event {
+ DECLARE_BITMAP(type, IIDC_RDMA_EVENT_NBITS);
+ u32 reg;
+};
+
+enum iidc_rdma_reset_type {
+ IIDC_FUNC_RESET,
+ IIDC_DEV_RESET,
+};
+
+enum iidc_rdma_protocol {
+ IIDC_RDMA_PROTOCOL_IWARP = BIT(0),
+ IIDC_RDMA_PROTOCOL_ROCEV2 = BIT(1),
+};
+
+/* Structure to be populated by core LAN PCI driver */
+struct iidc_rdma_core_dev_info {
+ struct pci_dev *pdev; /* PCI device of corresponding to main function */
+ struct auxiliary_device *adev;
+ /* Current active RDMA protocol */
+ enum iidc_rdma_protocol rdma_protocol;
+ void *iidc_priv; /* elements unique to each driver */
+};
+
+/* Structure representing auxiliary driver tailored information about the core
+ * PCI dev, each auxiliary driver using the IIDC interface will have an
+ * instance of this struct dedicated to it.
+ */
+struct iidc_rdma_core_auxiliary_dev {
+ struct auxiliary_device adev;
+ struct iidc_rdma_core_dev_info *cdev_info;
+};
+
+/* structure representing the auxiliary driver. This struct is to be
+ * allocated and populated by the auxiliary driver's owner. The core PCI
+ * driver will access these ops by performing a container_of on the
+ * auxiliary_device->dev.driver.
+ */
+struct iidc_rdma_core_auxiliary_drv {
+ struct auxiliary_driver adrv;
+ void (*event_handler)(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_event *event);
+};
+
+#endif /* _IIDC_RDMA_H_*/
diff --git a/include/linux/net/intel/iidc_rdma_ice.h b/include/linux/net/intel/iidc_rdma_ice.h
new file mode 100644
index 000000000000..b40eed0e13fe
--- /dev/null
+++ b/include/linux/net/intel/iidc_rdma_ice.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021-2025, Intel Corporation. */
+
+#ifndef _IIDC_RDMA_ICE_H_
+#define _IIDC_RDMA_ICE_H_
+
+#include <linux/dcbnl.h>
+
+#define IIDC_MAX_USER_PRIORITY 8
+#define IIDC_DSCP_PFC_MODE 0x1
+
+/**
+ * struct iidc_rdma_qset_params - Struct to hold per RDMA Qset info
+ * @teid: TEID of the Qset node
+ * @qs_handle: SW index of the Qset, RDMA provides this
+ * @vport_id: VSI index
+ * @tc: Traffic Class branch the QSet should belong to
+ */
+struct iidc_rdma_qset_params {
+ /* Qset TEID returned to the RDMA driver in
+ * ice_add_rdma_qset and used by RDMA driver
+ * for calls to ice_del_rdma_qset
+ */
+ u32 teid;
+ u16 qs_handle;
+ u16 vport_id;
+ u8 tc;
+};
+
+struct iidc_rdma_qos_info {
+ u64 tc_ctx;
+ u8 rel_bw;
+ u8 prio_type;
+ u8 egress_virt_up;
+ u8 ingress_virt_up;
+};
+
+/* Struct to pass QoS info */
+struct iidc_rdma_qos_params {
+ struct iidc_rdma_qos_info tc_info[IEEE_8021QAZ_MAX_TCS];
+ u8 up2tc[IIDC_MAX_USER_PRIORITY];
+ u8 vport_relative_bw;
+ u8 vport_priority_type;
+ u8 num_tc;
+ u8 pfc_mode;
+ u8 dscp_map[DSCP_MAX];
+};
+
+struct iidc_rdma_priv_dev_info {
+ u8 pf_id;
+ u16 vport_id;
+ struct net_device *netdev;
+ struct iidc_rdma_qos_params qos_info;
+ u8 __iomem *hw_addr;
+};
+
+int ice_add_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset);
+int ice_del_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset);
+int ice_rdma_request_reset(struct iidc_rdma_core_dev_info *cdev,
+ enum iidc_rdma_reset_type reset_type);
+int ice_rdma_update_vsi_filter(struct iidc_rdma_core_dev_info *cdev, u16 vsi_id,
+ bool enable);
+int ice_alloc_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry);
+void ice_free_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry);
+
+#endif /* _IIDC_RDMA_ICE_H_*/
diff --git a/include/linux/net/intel/iidc_rdma_idpf.h b/include/linux/net/intel/iidc_rdma_idpf.h
new file mode 100644
index 000000000000..bab697e18fd6
--- /dev/null
+++ b/include/linux/net/intel/iidc_rdma_idpf.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2025 Intel Corporation. */
+
+#ifndef _IIDC_RDMA_IDPF_H_
+#define _IIDC_RDMA_IDPF_H_
+
+#include <linux/auxiliary_bus.h>
+
+/* struct to be populated by core LAN PCI driver */
+struct iidc_rdma_vport_dev_info {
+ struct auxiliary_device *adev;
+ struct auxiliary_device *core_adev;
+ struct net_device *netdev;
+ u16 vport_id;
+};
+
+struct iidc_rdma_vport_auxiliary_dev {
+ struct auxiliary_device adev;
+ struct iidc_rdma_vport_dev_info *vdev_info;
+};
+
+struct iidc_rdma_vport_auxiliary_drv {
+ struct auxiliary_driver adrv;
+ void (*event_handler)(struct iidc_rdma_vport_dev_info *vdev,
+ struct iidc_rdma_event *event);
+};
+
+/* struct to be populated by core LAN PCI driver */
+enum iidc_function_type {
+ IIDC_FUNCTION_TYPE_PF,
+ IIDC_FUNCTION_TYPE_VF,
+};
+
+struct iidc_rdma_lan_mapped_mem_region {
+ u8 __iomem *region_addr;
+ __le64 size;
+ __le64 start_offset;
+};
+
+struct iidc_rdma_priv_dev_info {
+ struct msix_entry *msix_entries;
+ u16 msix_count; /* How many vectors are reserved for this device */
+ enum iidc_function_type ftype;
+ __le16 num_memory_regions;
+ struct iidc_rdma_lan_mapped_mem_region *mapped_mem_regions;
+};
+
+int idpf_idc_vport_dev_ctrl(struct iidc_rdma_core_dev_info *cdev_info, bool up);
+int idpf_idc_request_reset(struct iidc_rdma_core_dev_info *cdev_info,
+ enum iidc_rdma_reset_type __always_unused reset_type);
+int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
+ u8 *send_msg, u16 msg_size,
+ u8 *recv_msg, u16 *recv_len);
+
+#endif /* _IIDC_RDMA_IDPF_H_ */
diff --git a/include/linux/net/intel/libie/adminq.h b/include/linux/net/intel/libie/adminq.h
new file mode 100644
index 000000000000..ab13bd777a28
--- /dev/null
+++ b/include/linux/net/intel/libie/adminq.h
@@ -0,0 +1,399 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBIE_ADMINQ_H
+#define __LIBIE_ADMINQ_H
+
+#include <linux/build_bug.h>
+#include <linux/types.h>
+
+#define LIBIE_CHECK_STRUCT_LEN(n, X) \
+ static_assert((n) == sizeof(struct X))
+#define LIBIE_AQ_MAX_BUF_LEN 4096
+
+/**
+ * struct libie_aqc_generic - Generic structure used in adminq communication
+ * @param0: generic parameter high 32bit
+ * @param1: generic parameter lower 32bit
+ * @addr_high: generic address high 32bit
+ * @addr_low: generic address lower 32bit
+ */
+struct libie_aqc_generic {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_generic);
+
+/**
+ * struct libie_aqc_get_ver - Used in command get version (direct 0x0001)
+ * @rom_ver: rom version
+ * @fw_build: number coressponding to firmware build
+ * @fw_branch: branch identifier of firmware version
+ * @fw_major: major number of firmware version
+ * @fw_minor: minor number of firmware version
+ * @fw_patch: patch of firmware version
+ * @api_branch: brancch identifier of API version
+ * @api_major: major number of API version
+ * @api_minor: minor number of API version
+ * @api_patch: patch of API version
+ */
+struct libie_aqc_get_ver {
+ __le32 rom_ver;
+ __le32 fw_build;
+ u8 fw_branch;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_patch;
+ u8 api_branch;
+ u8 api_major;
+ u8 api_minor;
+ u8 api_patch;
+};
+LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_get_ver);
+
+/**
+ * struct libie_aqc_driver_ver - Used in command send driver version
+ * (indirect 0x0002)
+ * @major_ver: driver major version
+ * @minor_ver: driver minor version
+ * @build_ver: driver build version
+ * @subbuild_ver: driver subbuild version
+ * @reserved: for feature use
+ * @addr_high: high part of response address buff
+ * @addr_low: low part of response address buff
+ */
+struct libie_aqc_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_driver_ver);
+
+enum libie_aq_res_id {
+ LIBIE_AQC_RES_ID_NVM = 1,
+ LIBIE_AQC_RES_ID_SDP = 2,
+ LIBIE_AQC_RES_ID_CHNG_LOCK = 3,
+ LIBIE_AQC_RES_ID_GLBL_LOCK = 4,
+};
+
+enum libie_aq_res_access_type {
+ LIBIE_AQC_RES_ACCESS_READ = 1,
+ LIBIE_AQC_RES_ACCESS_WRITE = 2,
+};
+
+#define LIBIE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
+#define LIBIE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
+#define LIBIE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
+#define LIBIE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
+
+#define LIBIE_AQ_RES_GLBL_SUCCESS 0
+#define LIBIE_AQ_RES_GLBL_IN_PROG 1
+#define LIBIE_AQ_RES_GLBL_DONE 2
+
+/**
+ * struct libie_aqc_req_res - Request resource ownership
+ * @res_id: resource ID (look at enum definition above)
+ * @access_type: read or write (enum definition above)
+ * @timeout: Upon successful completion, FW writes this value and driver is
+ * expected to release resource before timeout. This value is provided in
+ * milliseconds.
+ * @res_number: for SDP, this is the pin ID of the SDP
+ * @status: status only used for LIBIE_AQC_RES_ID_GLBL_LOCK, for others reserved
+ * @reserved: reserved for future use
+ *
+ * Used in commands:
+ * request resource ownership (direct 0x0008)
+ * request resource ownership (direct 0x0009)
+ */
+struct libie_aqc_req_res {
+ __le16 res_id;
+ __le16 access_type;
+
+ __le32 timeout;
+ __le32 res_number;
+ __le16 status;
+ u8 reserved[2];
+};
+LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_req_res);
+
+/**
+ * struct libie_aqc_list_caps - Getting capabilities
+ * @cmd_flags: command flags
+ * @pf_index: index of PF to get caps from
+ * @reserved: reserved for future use
+ * @count: number of capabilities records
+ * @addr_high: high part of response address buff
+ * @addr_low: low part of response address buff
+ *
+ * Used in commands:
+ * get function capabilities (indirect 0x000A)
+ * get device capabilities (indirect 0x000B)
+ */
+struct libie_aqc_list_caps {
+ u8 cmd_flags;
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_list_caps);
+
+/* Device/Function buffer entry, repeated per reported capability */
+#define LIBIE_AQC_CAPS_SWITCH_MODE 0x0001
+#define LIBIE_AQC_CAPS_MNG_MODE 0x0002
+#define LIBIE_AQC_CAPS_NPAR_ACTIVE 0x0003
+#define LIBIE_AQC_CAPS_OS2BMC_CAP 0x0004
+#define LIBIE_AQC_CAPS_VALID_FUNCTIONS 0x0005
+#define LIBIE_AQC_MAX_VALID_FUNCTIONS 0x8
+#define LIBIE_AQC_CAPS_SRIOV 0x0012
+#define LIBIE_AQC_CAPS_VF 0x0013
+#define LIBIE_AQC_CAPS_VMDQ 0x0014
+#define LIBIE_AQC_CAPS_8021QBG 0x0015
+#define LIBIE_AQC_CAPS_8021QBR 0x0016
+#define LIBIE_AQC_CAPS_VSI 0x0017
+#define LIBIE_AQC_CAPS_DCB 0x0018
+#define LIBIE_AQC_CAPS_FCOE 0x0021
+#define LIBIE_AQC_CAPS_ISCSI 0x0022
+#define LIBIE_AQC_CAPS_RSS 0x0040
+#define LIBIE_AQC_CAPS_RXQS 0x0041
+#define LIBIE_AQC_CAPS_TXQS 0x0042
+#define LIBIE_AQC_CAPS_MSIX 0x0043
+#define LIBIE_AQC_CAPS_VF_MSIX 0x0044
+#define LIBIE_AQC_CAPS_FD 0x0045
+#define LIBIE_AQC_CAPS_1588 0x0046
+#define LIBIE_AQC_CAPS_MAX_MTU 0x0047
+#define LIBIE_AQC_CAPS_NVM_VER 0x0048
+#define LIBIE_AQC_CAPS_PENDING_NVM_VER 0x0049
+#define LIBIE_AQC_CAPS_OROM_VER 0x004A
+#define LIBIE_AQC_CAPS_PENDING_OROM_VER 0x004B
+#define LIBIE_AQC_CAPS_NET_VER 0x004C
+#define LIBIE_AQC_CAPS_PENDING_NET_VER 0x004D
+#define LIBIE_AQC_CAPS_RDMA 0x0051
+#define LIBIE_AQC_CAPS_LED 0x0061
+#define LIBIE_AQC_CAPS_SDP 0x0062
+#define LIBIE_AQC_CAPS_MDIO 0x0063
+#define LIBIE_AQC_CAPS_WSR_PROT 0x0064
+#define LIBIE_AQC_CAPS_SENSOR_READING 0x0067
+#define LIBIE_AQC_INLINE_IPSEC 0x0070
+#define LIBIE_AQC_CAPS_NUM_ENABLED_PORTS 0x0072
+#define LIBIE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
+#define LIBIE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
+#define LIBIE_AQC_CAPS_NVM_MGMT 0x0080
+#define LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG0 0x0081
+#define LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG1 0x0082
+#define LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG2 0x0083
+#define LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG3 0x0084
+#define LIBIE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
+#define LIBIE_AQC_CAPS_NAC_TOPOLOGY 0x0087
+#define LIBIE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
+#define LIBIE_AQC_BIT_ROCEV2_LAG BIT(0)
+#define LIBIE_AQC_BIT_SRIOV_LAG BIT(1)
+#define LIBIE_AQC_BIT_SRIOV_AA_LAG BIT(2)
+#define LIBIE_AQC_CAPS_FLEX10 0x00F1
+#define LIBIE_AQC_CAPS_CEM 0x00F2
+
+/**
+ * struct libie_aqc_list_caps_elem - Getting list of caps elements
+ * @cap: one from the defines list above
+ * @major_ver: major version
+ * @minor_ver: minor version
+ * @number: number of resources described by this capability
+ * @logical_id: logical ID, only meaningful for some types of resources
+ * @phys_id: physical ID, only meaningful for some types of resources
+ * @rsvd1: reserved for future use
+ * @rsvd2: reserved for future use
+ */
+struct libie_aqc_list_caps_elem {
+ __le16 cap;
+
+ u8 major_ver;
+ u8 minor_ver;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+LIBIE_CHECK_STRUCT_LEN(32, libie_aqc_list_caps_elem);
+
+/* Admin Queue command opcodes */
+enum libie_adminq_opc {
+ /* FW Logging Commands */
+ libie_aqc_opc_fw_logs_config = 0xFF30,
+ libie_aqc_opc_fw_logs_register = 0xFF31,
+ libie_aqc_opc_fw_logs_query = 0xFF32,
+ libie_aqc_opc_fw_logs_event = 0xFF33,
+};
+
+enum libie_aqc_fw_logging_mod {
+ LIBIE_AQC_FW_LOG_ID_GENERAL = 0,
+ LIBIE_AQC_FW_LOG_ID_CTRL,
+ LIBIE_AQC_FW_LOG_ID_LINK,
+ LIBIE_AQC_FW_LOG_ID_LINK_TOPO,
+ LIBIE_AQC_FW_LOG_ID_DNL,
+ LIBIE_AQC_FW_LOG_ID_I2C,
+ LIBIE_AQC_FW_LOG_ID_SDP,
+ LIBIE_AQC_FW_LOG_ID_MDIO,
+ LIBIE_AQC_FW_LOG_ID_ADMINQ,
+ LIBIE_AQC_FW_LOG_ID_HDMA,
+ LIBIE_AQC_FW_LOG_ID_LLDP,
+ LIBIE_AQC_FW_LOG_ID_DCBX,
+ LIBIE_AQC_FW_LOG_ID_DCB,
+ LIBIE_AQC_FW_LOG_ID_XLR,
+ LIBIE_AQC_FW_LOG_ID_NVM,
+ LIBIE_AQC_FW_LOG_ID_AUTH,
+ LIBIE_AQC_FW_LOG_ID_VPD,
+ LIBIE_AQC_FW_LOG_ID_IOSF,
+ LIBIE_AQC_FW_LOG_ID_PARSER,
+ LIBIE_AQC_FW_LOG_ID_SW,
+ LIBIE_AQC_FW_LOG_ID_SCHEDULER,
+ LIBIE_AQC_FW_LOG_ID_TXQ,
+ LIBIE_AQC_FW_LOG_ID_RSVD,
+ LIBIE_AQC_FW_LOG_ID_POST,
+ LIBIE_AQC_FW_LOG_ID_WATCHDOG,
+ LIBIE_AQC_FW_LOG_ID_TASK_DISPATCH,
+ LIBIE_AQC_FW_LOG_ID_MNG,
+ LIBIE_AQC_FW_LOG_ID_SYNCE,
+ LIBIE_AQC_FW_LOG_ID_HEALTH,
+ LIBIE_AQC_FW_LOG_ID_TSDRV,
+ LIBIE_AQC_FW_LOG_ID_PFREG,
+ LIBIE_AQC_FW_LOG_ID_MDLVER,
+ LIBIE_AQC_FW_LOG_ID_MAX
+};
+
+/* Set FW Logging configuration (indirect 0xFF30)
+ * Register for FW Logging (indirect 0xFF31)
+ * Query FW Logging (indirect 0xFF32)
+ * FW Log Event (indirect 0xFF33)
+ */
+#define LIBIE_AQC_FW_LOG_CONF_UART_EN BIT(0)
+#define LIBIE_AQC_FW_LOG_CONF_AQ_EN BIT(1)
+#define LIBIE_AQC_FW_LOG_QUERY_REGISTERED BIT(2)
+#define LIBIE_AQC_FW_LOG_CONF_SET_VALID BIT(3)
+#define LIBIE_AQC_FW_LOG_AQ_REGISTER BIT(0)
+#define LIBIE_AQC_FW_LOG_AQ_QUERY BIT(2)
+
+#define LIBIE_AQC_FW_LOG_MIN_RESOLUTION 1
+#define LIBIE_AQC_FW_LOG_MAX_RESOLUTION 128
+
+struct libie_aqc_fw_log {
+ u8 cmd_flags;
+
+ u8 rsp_flag;
+ __le16 fw_rt_msb;
+ union {
+ struct {
+ __le32 fw_rt_lsb;
+ } sync;
+ struct {
+ __le16 log_resolution;
+ __le16 mdl_cnt;
+ } cfg;
+ } ops;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Response Buffer for:
+ * Set Firmware Logging Configuration (0xFF30)
+ * Query FW Logging (0xFF32)
+ */
+struct libie_aqc_fw_log_cfg_resp {
+ __le16 module_identifier;
+ u8 log_level;
+ u8 rsvd0;
+};
+
+/**
+ * struct libie_aq_desc - Admin Queue (AQ) descriptor
+ * @flags: LIBIE_AQ_FLAG_* flags
+ * @opcode: AQ command opcode
+ * @datalen: length in bytes of indirect/external data buffer
+ * @retval: return value from firmware
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
+ * @params: command-specific parameters
+ *
+ * Descriptor format for commands the driver posts on the Admin Transmit Queue
+ * (ATQ). The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
+ * result of the command are written to the Admin Receive Queue (ARQ) using
+ * the same descriptor format. Descriptors are in little-endian notation with
+ * 32-bit words.
+ */
+struct libie_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ u8 raw[16];
+ struct libie_aqc_generic generic;
+ struct libie_aqc_get_ver get_ver;
+ struct libie_aqc_driver_ver driver_ver;
+ struct libie_aqc_req_res res_owner;
+ struct libie_aqc_list_caps get_cap;
+ struct libie_aqc_fw_log fw_log;
+ } params;
+};
+LIBIE_CHECK_STRUCT_LEN(32, libie_aq_desc);
+
+/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
+#define LIBIE_AQ_LG_BUF 512
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+#define LIBIE_AQ_FLAG_DD BIT(0) /* 0x1 */
+#define LIBIE_AQ_FLAG_CMP BIT(1) /* 0x2 */
+#define LIBIE_AQ_FLAG_ERR BIT(2) /* 0x4 */
+#define LIBIE_AQ_FLAG_VFE BIT(3) /* 0x8 */
+#define LIBIE_AQ_FLAG_LB BIT(9) /* 0x200 */
+#define LIBIE_AQ_FLAG_RD BIT(10) /* 0x400 */
+#define LIBIE_AQ_FLAG_VFC BIT(11) /* 0x800 */
+#define LIBIE_AQ_FLAG_BUF BIT(12) /* 0x1000 */
+#define LIBIE_AQ_FLAG_SI BIT(13) /* 0x2000 */
+#define LIBIE_AQ_FLAG_EI BIT(14) /* 0x4000 */
+#define LIBIE_AQ_FLAG_FE BIT(15) /* 0x8000 */
+
+/* error codes */
+enum libie_aq_err {
+ LIBIE_AQ_RC_OK = 0, /* Success */
+ LIBIE_AQ_RC_EPERM = 1, /* Operation not permitted */
+ LIBIE_AQ_RC_ENOENT = 2, /* No such element */
+ LIBIE_AQ_RC_ESRCH = 3, /* Bad opcode */
+ LIBIE_AQ_RC_EIO = 5, /* I/O error */
+ LIBIE_AQ_RC_EAGAIN = 8, /* Try again */
+ LIBIE_AQ_RC_ENOMEM = 9, /* Out of memory */
+ LIBIE_AQ_RC_EACCES = 10, /* Permission denied */
+ LIBIE_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ LIBIE_AQ_RC_EEXIST = 13, /* Object already exists */
+ LIBIE_AQ_RC_EINVAL = 14, /* Invalid argument */
+ LIBIE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
+ LIBIE_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ LIBIE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ LIBIE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
+ LIBIE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */
+ LIBIE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */
+ LIBIE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */
+ LIBIE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+};
+
+static inline void *libie_aq_raw(struct libie_aq_desc *desc)
+{
+ return &desc->params.raw;
+}
+
+const char *libie_aq_str(enum libie_aq_err err);
+
+#endif /* __LIBIE_ADMINQ_H */
diff --git a/include/linux/net/intel/libie/fwlog.h b/include/linux/net/intel/libie/fwlog.h
new file mode 100644
index 000000000000..7273c78c826b
--- /dev/null
+++ b/include/linux/net/intel/libie/fwlog.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _LIBIE_FWLOG_H_
+#define _LIBIE_FWLOG_H_
+
+#include <linux/net/intel/libie/adminq.h>
+
+/* Only a single log level should be set and all log levels under the set value
+ * are enabled, e.g. if log level is set to LIBIE_FW_LOG_LEVEL_VERBOSE, then all
+ * other log levels are included (except LIBIE_FW_LOG_LEVEL_NONE)
+ */
+enum libie_fwlog_level {
+ LIBIE_FWLOG_LEVEL_NONE = 0,
+ LIBIE_FWLOG_LEVEL_ERROR = 1,
+ LIBIE_FWLOG_LEVEL_WARNING = 2,
+ LIBIE_FWLOG_LEVEL_NORMAL = 3,
+ LIBIE_FWLOG_LEVEL_VERBOSE = 4,
+ LIBIE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
+};
+
+struct libie_fwlog_module_entry {
+ /* module ID for the corresponding firmware logging event */
+ u16 module_id;
+ /* verbosity level for the module_id */
+ u8 log_level;
+};
+
+struct libie_fwlog_cfg {
+ /* list of modules for configuring log level */
+ struct libie_fwlog_module_entry module_entries[LIBIE_AQC_FW_LOG_ID_MAX];
+ /* options used to configure firmware logging */
+ u16 options;
+#define LIBIE_FWLOG_OPTION_ARQ_ENA BIT(0)
+#define LIBIE_FWLOG_OPTION_UART_ENA BIT(1)
+ /* set before calling libie_fwlog_init() so the PF registers for
+ * firmware logging on initialization
+ */
+#define LIBIE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
+ /* set in the libie_aq_fwlog_get() response if the PF is registered for
+ * FW logging events over ARQ
+ */
+#define LIBIE_FWLOG_OPTION_IS_REGISTERED BIT(3)
+
+ /* minimum number of log events sent per Admin Receive Queue event */
+ u16 log_resolution;
+};
+
+struct libie_fwlog_data {
+ u16 data_size;
+ u8 *data;
+};
+
+struct libie_fwlog_ring {
+ struct libie_fwlog_data *rings;
+ u16 index;
+ u16 size;
+ u16 head;
+ u16 tail;
+};
+
+#define LIBIE_FWLOG_RING_SIZE_INDEX_DFLT 3
+#define LIBIE_FWLOG_RING_SIZE_DFLT 256
+#define LIBIE_FWLOG_RING_SIZE_MAX 512
+
+struct libie_fwlog {
+ struct libie_fwlog_cfg cfg;
+ bool supported; /* does hardware support FW logging? */
+ struct libie_fwlog_ring ring;
+ struct dentry *debugfs;
+ /* keep track of all the dentrys for FW log modules */
+ struct dentry **debugfs_modules;
+ struct_group_tagged(libie_fwlog_api, api,
+ struct pci_dev *pdev;
+ int (*send_cmd)(void *, struct libie_aq_desc *, void *, u16);
+ void *priv;
+ struct dentry *debugfs_root;
+ );
+};
+
+#if IS_ENABLED(CONFIG_LIBIE_FWLOG)
+int libie_fwlog_init(struct libie_fwlog *fwlog, struct libie_fwlog_api *api);
+void libie_fwlog_deinit(struct libie_fwlog *fwlog);
+void libie_fwlog_reregister(struct libie_fwlog *fwlog);
+void libie_get_fwlog_data(struct libie_fwlog *fwlog, u8 *buf, u16 len);
+#else
+static inline int libie_fwlog_init(struct libie_fwlog *fwlog,
+ struct libie_fwlog_api *api)
+{
+ return -EOPNOTSUPP;
+}
+static inline void libie_fwlog_deinit(struct libie_fwlog *fwlog) { }
+static inline void libie_fwlog_reregister(struct libie_fwlog *fwlog) { }
+static inline void libie_get_fwlog_data(struct libie_fwlog *fwlog, u8 *buf,
+ u16 len) { }
+#endif /* CONFIG_LIBIE_FWLOG */
+#endif /* _LIBIE_FWLOG_H_ */
diff --git a/include/linux/net/intel/libie/pctype.h b/include/linux/net/intel/libie/pctype.h
new file mode 100644
index 000000000000..d783417fbf36
--- /dev/null
+++ b/include/linux/net/intel/libie/pctype.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBIE_PCTYPE_H
+#define __LIBIE_PCTYPE_H
+
+/* Packet Classifier Type indexes, used to set the xxQF_HENA registers. Also
+ * communicated over the virtchnl API as part of struct virtchnl_rss_hashena.
+ */
+enum libie_filter_pctype {
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ LIBIE_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ LIBIE_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ LIBIE_FILTER_PCTYPE_FCOE_OX = 48,
+ LIBIE_FILTER_PCTYPE_FCOE_RX = 49,
+ LIBIE_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ LIBIE_FILTER_PCTYPE_L2_PAYLOAD = 63
+};
+
+#endif /* __LIBIE_PCTYPE_H */
diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h
index eb01c37e71e0..f4936d9c2b3c 100644
--- a/include/linux/net_tstamp.h
+++ b/include/linux/net_tstamp.h
@@ -4,10 +4,41 @@
#define _LINUX_NET_TIMESTAMPING_H_
#include <uapi/linux/net_tstamp.h>
+#include <uapi/linux/ethtool_netlink_generated.h>
-enum hwtstamp_source {
- HWTSTAMP_SOURCE_NETDEV,
- HWTSTAMP_SOURCE_PHYLIB,
+#define SOF_TIMESTAMPING_SOFTWARE_MASK (SOF_TIMESTAMPING_RX_SOFTWARE | \
+ SOF_TIMESTAMPING_TX_SOFTWARE | \
+ SOF_TIMESTAMPING_SOFTWARE)
+
+#define SOF_TIMESTAMPING_HARDWARE_MASK (SOF_TIMESTAMPING_RX_HARDWARE | \
+ SOF_TIMESTAMPING_TX_HARDWARE | \
+ SOF_TIMESTAMPING_RAW_HARDWARE)
+
+/**
+ * struct hwtstamp_provider_desc - hwtstamp provider description
+ *
+ * @index: index of the hwtstamp provider.
+ * @qualifier: hwtstamp provider qualifier.
+ */
+struct hwtstamp_provider_desc {
+ int index;
+ enum hwtstamp_provider_qualifier qualifier;
+};
+
+/**
+ * struct hwtstamp_provider - hwtstamp provider object
+ *
+ * @rcu_head: RCU callback used to free the struct.
+ * @source: source of the hwtstamp provider.
+ * @phydev: pointer of the phydev source in case a PTP coming from phylib
+ * @desc: hwtstamp provider description.
+ */
+
+struct hwtstamp_provider {
+ struct rcu_head rcu_head;
+ enum hwtstamp_source source;
+ struct phy_device *phydev;
+ struct hwtstamp_provider_desc desc;
};
/**
@@ -22,6 +53,7 @@ enum hwtstamp_source {
* copied the ioctl request back to user space
* @source: indication whether timestamps should come from the netdev or from
* an attached phylib PHY
+ * @qualifier: qualifier of the hwtstamp provider
*
* Prefer using this structure for in-kernel processing of hardware
* timestamping configuration, over the inextensible struct hwtstamp_config
@@ -34,6 +66,7 @@ struct kernel_hwtstamp_config {
struct ifreq *ifr;
bool copied_to_user;
enum hwtstamp_source source;
+ enum hwtstamp_provider_qualifier qualifier;
};
static inline void hwtstamp_config_to_kernel(struct kernel_hwtstamp_config *kernel_cfg,
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 7c2d77d75a88..93e4da7046a1 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -24,9 +24,8 @@ enum {
NETIF_F_HW_VLAN_CTAG_FILTER_BIT,/* Receive filtering on VLAN CTAGs */
NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */
NETIF_F_GSO_BIT, /* Enable software GSO. */
- NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */
- /* do not use LLTX in new drivers */
- NETIF_F_NETNS_LOCAL_BIT, /* Does not change network namespaces */
+ __UNUSED_NETIF_F_12,
+ __UNUSED_NETIF_F_13,
NETIF_F_GRO_BIT, /* Generic receive offload */
NETIF_F_LRO_BIT, /* large receive offload */
@@ -54,12 +53,12 @@ enum {
NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */
NETIF_F_GSO_UDP_L4_BIT, /* ... UDP payload GSO (not UFO) */
NETIF_F_GSO_FRAGLIST_BIT, /* ... Fraglist GSO */
+ NETIF_F_GSO_ACCECN_BIT, /* TCP AccECN w/ TSO (no clear CWR) */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
- NETIF_F_GSO_FRAGLIST_BIT,
+ NETIF_F_GSO_ACCECN_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
- NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/
NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */
NETIF_F_RXHASH_BIT, /* Receive hashing offload */
NETIF_F_RXCSUM_BIT, /* Receive checksumming offload */
@@ -106,7 +105,6 @@ enum {
#define __NETIF_F(name) __NETIF_F_BIT(NETIF_F_##name##_BIT)
#define NETIF_F_FCOE_CRC __NETIF_F(FCOE_CRC)
-#define NETIF_F_FCOE_MTU __NETIF_F(FCOE_MTU)
#define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST)
#define NETIF_F_FSO __NETIF_F(FSO)
#define NETIF_F_GRO __NETIF_F(GRO)
@@ -120,10 +118,8 @@ enum {
#define NETIF_F_HW_VLAN_CTAG_TX __NETIF_F(HW_VLAN_CTAG_TX)
#define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM)
#define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM)
-#define NETIF_F_LLTX __NETIF_F(LLTX)
#define NETIF_F_LOOPBACK __NETIF_F(LOOPBACK)
#define NETIF_F_LRO __NETIF_F(LRO)
-#define NETIF_F_NETNS_LOCAL __NETIF_F(NETNS_LOCAL)
#define NETIF_F_NOCACHE_COPY __NETIF_F(NOCACHE_COPY)
#define NETIF_F_NTUPLE __NETIF_F(NTUPLE)
#define NETIF_F_RXCSUM __NETIF_F(RXCSUM)
@@ -132,6 +128,7 @@ enum {
#define NETIF_F_SG __NETIF_F(SG)
#define NETIF_F_TSO6 __NETIF_F(TSO6)
#define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN)
+#define NETIF_F_GSO_ACCECN __NETIF_F(GSO_ACCECN)
#define NETIF_F_TSO __NETIF_F(TSO)
#define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED)
#define NETIF_F_RXFCS __NETIF_F(RXFCS)
@@ -192,8 +189,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
-#define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \
- NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
+#define NETIF_F_NEVER_CHANGE NETIF_F_VLAN_CHALLENGED
/* remember that ((t)1 << t_BITS) is undefined in C99 */
#define NETIF_F_ETHTOOL_BITS ((__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) | \
@@ -214,11 +210,9 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | \
NETIF_F_TSO_ECN | NETIF_F_TSO_MANGLEID)
-#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
- NETIF_F_FSO)
-
/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_GSO_SCTP | \
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | \
+ NETIF_F_GSO_ACCECN | NETIF_F_GSO_SCTP | \
NETIF_F_GSO_UDP_L4 | NETIF_F_GSO_FRAGLIST)
/*
@@ -261,4 +255,29 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)
+/* virtual device features */
+#define MASTER_UPPER_DEV_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
+ NETIF_F_GSO_ENCAP_ALL | \
+ NETIF_F_HIGHDMA | NETIF_F_LRO)
+
+#define MASTER_UPPER_DEV_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE | \
+ NETIF_F_GSO_PARTIAL)
+
+#define MASTER_UPPER_DEV_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_GSO_SOFTWARE)
+
+#define MASTER_UPPER_DEV_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \
+ NETIF_F_GSO_ESP)
+
+#define MASTER_UPPER_DEV_GSO_PARTIAL_FEATURES (NETIF_F_GSO_ESP)
+
+static inline netdev_features_t netdev_base_features(netdev_features_t features)
+{
+ features &= ~NETIF_F_ONE_FOR_ALL;
+ features |= NETIF_F_ALL_FOR_ALL;
+ return features;
+}
+
#endif /* _LINUX_NETDEV_FEATURES_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d20c6c99eb88..5870a9e514a5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -40,9 +40,9 @@
#include <net/dcbnl.h>
#endif
#include <net/netprio_cgroup.h>
-
#include <linux/netdev_features.h>
#include <linux/neighbour.h>
+#include <linux/netdevice_xmit.h>
#include <uapi/linux/netdevice.h>
#include <uapi/linux/if_bonding.h>
#include <uapi/linux/pkt_cls.h>
@@ -52,6 +52,7 @@
#include <net/net_trackers.h>
#include <net/net_debug.h>
#include <net/dropreason-core.h>
+#include <net/neighbour_tables.h>
struct netpoll_info;
struct device;
@@ -62,6 +63,7 @@ struct dsa_port;
struct ip_tunnel_parm_kern;
struct macsec_context;
struct macsec_ops;
+struct netdev_config;
struct netdev_name_node;
struct sd_flow_limit;
struct sfp_bus;
@@ -79,6 +81,9 @@ struct xdp_buff;
struct xdp_frame;
struct xdp_metadata_ops;
struct xdp_md;
+struct ethtool_netdev_state;
+struct phy_link_topology;
+struct hwtstamp_provider;
typedef u32 xdp_features_t;
@@ -335,15 +340,45 @@ struct gro_list {
};
/*
- * size of gro hash buckets, must less than bit number of
- * napi_struct::gro_bitmask
+ * size of gro hash buckets, must be <= the number of bits in
+ * gro_node::bitmask
*/
#define GRO_HASH_BUCKETS 8
+/**
+ * struct gro_node - structure to support Generic Receive Offload
+ * @bitmask: bitmask to indicate used buckets in @hash
+ * @hash: hashtable of pending aggregated skbs, separated by flows
+ * @rx_list: list of pending ``GRO_NORMAL`` skbs
+ * @rx_count: cached current length of @rx_list
+ * @cached_napi_id: napi_struct::napi_id cached for hotpath, 0 for standalone
+ */
+struct gro_node {
+ unsigned long bitmask;
+ struct gro_list hash[GRO_HASH_BUCKETS];
+ struct list_head rx_list;
+ u32 rx_count;
+ u32 cached_napi_id;
+};
+
+/*
+ * Structure for per-NAPI config
+ */
+struct napi_config {
+ u64 gro_flush_timeout;
+ u64 irq_suspend_timeout;
+ u32 defer_hard_irqs;
+ cpumask_t affinity_mask;
+ u8 threaded;
+ unsigned int napi_id;
+};
+
/*
* Structure for NAPI scheduling similar to tasklet but with weighting
*/
struct napi_struct {
+ /* This field should be first or softnet_data.backlog needs tweaks. */
+ unsigned long state;
/* The poll_list must only be managed by the entity which
* changes the state of the NAPI_STATE_SCHED bit. This means
* whoever atomically sets that bit can add this napi_struct
@@ -352,10 +387,8 @@ struct napi_struct {
*/
struct list_head poll_list;
- unsigned long state;
int weight;
- int defer_hard_irqs_count;
- unsigned long gro_bitmask;
+ u32 defer_hard_irqs_count;
int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
/* CPU actively polling if netpoll is configured */
@@ -364,17 +397,23 @@ struct napi_struct {
/* CPU on which NAPI has been scheduled for processing */
int list_owner;
struct net_device *dev;
- struct gro_list gro_hash[GRO_HASH_BUCKETS];
struct sk_buff *skb;
- struct list_head rx_list; /* Pending GRO_NORMAL skbs */
- int rx_count; /* length of rx_list */
- unsigned int napi_id;
+ struct gro_node gro;
struct hrtimer timer;
+ /* all fields past this point are write-protected by netdev_lock */
struct task_struct *thread;
+ unsigned long gro_flush_timeout;
+ unsigned long irq_suspend_timeout;
+ u32 defer_hard_irqs;
/* control-path-only fields follow */
+ u32 napi_id;
struct list_head dev_list;
struct hlist_node napi_hash_node;
int irq;
+ struct irq_affinity_notify notify;
+ int napi_rmap_idx;
+ int index;
+ struct napi_config *config;
};
enum {
@@ -384,10 +423,12 @@ enum {
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
NAPI_STATE_LISTED, /* NAPI added to system lists */
NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */
- NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */
+ NAPI_STATE_IN_BUSY_POLL, /* Do not rearm NAPI interrupt */
NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/
NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */
+ NAPI_STATE_HAS_NOTIFIER, /* Napi has an IRQ notifier */
+ NAPI_STATE_THREADED_BUSY_POLL, /* The threaded NAPI poller will busy poll */
};
enum {
@@ -401,6 +442,8 @@ enum {
NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL),
NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED),
+ NAPIF_STATE_HAS_NOTIFIER = BIT(NAPI_STATE_HAS_NOTIFIER),
+ NAPIF_STATE_THREADED_BUSY_POLL = BIT(NAPI_STATE_THREADED_BUSY_POLL),
};
enum gro_result {
@@ -491,7 +534,7 @@ static inline bool napi_prefer_busy_poll(struct napi_struct *n)
* is scheduled for example in the context of delayed timer
* that can be skipped if a NAPI is already scheduled.
*
- * Return True if NAPI is scheduled, False otherwise.
+ * Return: True if NAPI is scheduled, False otherwise.
*/
static inline bool napi_is_scheduled(struct napi_struct *n)
{
@@ -506,7 +549,7 @@ bool napi_schedule_prep(struct napi_struct *n);
*
* Schedule NAPI poll routine to be called if it is not already
* running.
- * Return true if we schedule a NAPI or false if not.
+ * Return: true if we schedule a NAPI or false if not.
* Refer to napi_schedule_prep() for additional reason on why
* a NAPI might not be scheduled.
*/
@@ -540,7 +583,7 @@ static inline void napi_schedule_irqoff(struct napi_struct *n)
* Mark NAPI processing as complete. Should only be called if poll budget
* has not been completely consumed.
* Prefer over napi_complete().
- * Return false if device should avoid rearming interrupts.
+ * Return: false if device should avoid rearming interrupts.
*/
bool napi_complete_done(struct napi_struct *n, int work_done);
@@ -549,18 +592,15 @@ static inline bool napi_complete(struct napi_struct *n)
return napi_complete_done(n, 0);
}
-int dev_set_threaded(struct net_device *dev, bool threaded);
+void netif_threaded_enable(struct net_device *dev);
+int dev_set_threaded(struct net_device *dev,
+ enum netdev_napi_threaded threaded);
-/**
- * napi_disable - prevent NAPI from scheduling
- * @n: NAPI context
- *
- * Stop NAPI from being scheduled on this context.
- * Waits till any outstanding processing completes.
- */
void napi_disable(struct napi_struct *n);
+void napi_disable_locked(struct napi_struct *n);
void napi_enable(struct napi_struct *n);
+void napi_enable_locked(struct napi_struct *n);
/**
* napi_synchronize - wait until NAPI is not running
@@ -642,9 +682,7 @@ struct netdev_queue {
struct Qdisc __rcu *qdisc_sleeping;
#ifdef CONFIG_SYSFS
struct kobject kobj;
-#endif
-#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
- int numa_node;
+ const struct attribute_group **groups;
#endif
unsigned long tx_maxrate;
/*
@@ -656,15 +694,16 @@ struct netdev_queue {
/* Subordinate device that the queue has been assigned to */
struct net_device *sb_dev;
#ifdef CONFIG_XDP_SOCKETS
+ /* "ops protected", see comment about net_device::lock */
struct xsk_buff_pool *pool;
#endif
- /* NAPI instance for the queue
- * Readers and writers must hold RTNL
- */
- struct napi_struct *napi;
+
/*
* write-mostly part
*/
+#ifdef CONFIG_BQL
+ struct dql dql;
+#endif
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
int xmit_lock_owner;
/*
@@ -674,8 +713,16 @@ struct netdev_queue {
unsigned long state;
-#ifdef CONFIG_BQL
- struct dql dql;
+/*
+ * slow- / control-path part
+ */
+ /* NAPI instance for the queue
+ * "ops protected", see comment about net_device::lock
+ */
+ struct napi_struct *napi;
+
+#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
+ int numa_node;
#endif
} ____cacheline_aligned_in_smp;
@@ -830,6 +877,7 @@ enum net_device_path_type {
DEV_PATH_PPPOE,
DEV_PATH_DSA,
DEV_PATH_MTK_WDMA,
+ DEV_PATH_TUN,
};
struct net_device_path {
@@ -842,6 +890,18 @@ struct net_device_path {
u8 h_dest[ETH_ALEN];
} encap;
struct {
+ union {
+ struct in_addr src_v4;
+ struct in6_addr src_v6;
+ };
+ union {
+ struct in_addr dst_v4;
+ struct in6_addr dst_v6;
+ };
+
+ u8 l3_proto;
+ } tun;
+ struct {
enum {
DEV_PATH_BR_VLAN_KEEP,
DEV_PATH_BR_VLAN_TAG,
@@ -972,9 +1032,13 @@ struct netdev_bpf {
#ifdef CONFIG_XFRM_OFFLOAD
struct xfrmdev_ops {
- int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack);
- void (*xdo_dev_state_delete) (struct xfrm_state *x);
- void (*xdo_dev_state_free) (struct xfrm_state *x);
+ int (*xdo_dev_state_add)(struct net_device *dev,
+ struct xfrm_state *x,
+ struct netlink_ext_ack *extack);
+ void (*xdo_dev_state_delete)(struct net_device *dev,
+ struct xfrm_state *x);
+ void (*xdo_dev_state_free)(struct net_device *dev,
+ struct xfrm_state *x);
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
@@ -1064,8 +1128,8 @@ struct netdev_net_notifier {
*
* int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
* Old-style ioctl entry point. This is used internally by the
- * appletalk and ieee802154 subsystems but is no longer called by
- * the device ioctl handler.
+ * ieee802154 subsystem but is no longer called by the device
+ * ioctl handler.
*
* int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd);
* Used by the bonding driver for its device specific ioctls:
@@ -1225,12 +1289,17 @@ struct netdev_net_notifier {
* int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
* const unsigned char *addr, u16 vid, u16 flags,
- * struct netlink_ext_ack *extack);
+ * bool *notified, struct netlink_ext_ack *extack);
* Adds an FDB entry to dev for addr.
+ * Callee shall set *notified to true if it sent any appropriate
+ * notification(s). Otherwise core will send a generic one.
* int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
- * const unsigned char *addr, u16 vid)
- * Deletes the FDB entry from dev coresponding to addr.
+ * const unsigned char *addr, u16 vid
+ * bool *notified, struct netlink_ext_ack *extack);
+ * Deletes the FDB entry from dev corresponding to addr.
+ * Callee shall set *notified to true if it sent any appropriate
+ * notification(s). Otherwise core will send a generic one.
* int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev,
* struct netlink_ext_ack *extack);
* int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
@@ -1405,8 +1474,7 @@ struct net_device_ops {
__be16 proto, u16 vid);
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev);
- int (*ndo_netpoll_setup)(struct net_device *dev,
- struct netpoll_info *info);
+ int (*ndo_netpoll_setup)(struct net_device *dev);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
@@ -1503,12 +1571,15 @@ struct net_device_ops {
const unsigned char *addr,
u16 vid,
u16 flags,
+ bool *notified,
struct netlink_ext_ack *extack);
int (*ndo_fdb_del)(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
- u16 vid, struct netlink_ext_ack *extack);
+ u16 vid,
+ bool *notified,
+ struct netlink_ext_ack *extack);
int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh,
struct net_device *dev,
struct netlink_ext_ack *extack);
@@ -1596,6 +1667,14 @@ struct net_device_ops {
int (*ndo_hwtstamp_set)(struct net_device *dev,
struct kernel_hwtstamp_config *kernel_config,
struct netlink_ext_ack *extack);
+
+#if IS_ENABLED(CONFIG_NET_SHAPER)
+ /**
+ * @net_shaper_ops: Device shaping offload operations
+ * see include/net/net_shapers.h
+ */
+ const struct net_shaper_ops *net_shaper_ops;
+#endif
};
/**
@@ -1606,7 +1685,8 @@ struct net_device_ops {
* userspace; this means that the order of these flags can change
* during any kernel release.
*
- * You should have a pretty good reason to be extending these flags.
+ * You should add bitfield booleans after either net_device::priv_flags
+ * (hotpath) or ::threaded (slowpath) instead of extending these flags.
*
* @IFF_802_1Q_VLAN: 802.1Q VLAN device
* @IFF_EBRIDGE: Ethernet bridging device
@@ -1645,10 +1725,6 @@ struct net_device_ops {
* @IFF_NO_ADDRCONF: prevent ipv6 addrconf
* @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
* skb_headlen(skb) == 0 (data starts from frag0)
- * @IFF_CHANGE_PROTO_DOWN: device supports setting carrier via IFLA_PROTO_DOWN
- * @IFF_SEE_ALL_HWTSTAMP_REQUESTS: device wants to see calls to
- * ndo_hwtstamp_set() for all timestamp requests regardless of source,
- * even if those aren't HWTSTAMP_SOURCE_NETDEV.
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1683,42 +1759,8 @@ enum netdev_priv_flags {
IFF_L3MDEV_RX_HANDLER = 1<<29,
IFF_NO_ADDRCONF = BIT_ULL(30),
IFF_TX_SKB_NO_LINEAR = BIT_ULL(31),
- IFF_CHANGE_PROTO_DOWN = BIT_ULL(32),
- IFF_SEE_ALL_HWTSTAMP_REQUESTS = BIT_ULL(33),
};
-#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
-#define IFF_EBRIDGE IFF_EBRIDGE
-#define IFF_BONDING IFF_BONDING
-#define IFF_ISATAP IFF_ISATAP
-#define IFF_WAN_HDLC IFF_WAN_HDLC
-#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
-#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
-#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
-#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
-#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
-#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
-#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
-#define IFF_UNICAST_FLT IFF_UNICAST_FLT
-#define IFF_TEAM_PORT IFF_TEAM_PORT
-#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
-#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
-#define IFF_MACVLAN IFF_MACVLAN
-#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
-#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
-#define IFF_NO_QUEUE IFF_NO_QUEUE
-#define IFF_OPENVSWITCH IFF_OPENVSWITCH
-#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
-#define IFF_TEAM IFF_TEAM
-#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
-#define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM
-#define IFF_MACSEC IFF_MACSEC
-#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
-#define IFF_FAILOVER IFF_FAILOVER
-#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
-#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
-#define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR
-
/* Specifies the type of the struct net_device::ml_priv pointer */
enum netdev_ml_priv_type {
ML_PRIV_NONE,
@@ -1748,6 +1790,13 @@ enum netdev_reg_state {
* data with strictly "high-level" data, and it has to know about
* almost every data structure used in the INET module.
*
+ * @priv_flags: flags invisible to userspace defined as bits, see
+ * enum netdev_priv_flags for the definitions
+ * @lltx: device supports lockless Tx. Deprecated for real HW
+ * drivers. Mainly used by logical interfaces, such as
+ * bonding and tunnels
+ * @netmem_tx: device support netmem_tx.
+ *
* @name: This is the first field of the "visible" part of this structure
* (i.e. as seen by users in the "Space.c" file). It is the name
* of the interface.
@@ -1797,7 +1846,6 @@ enum netdev_reg_state {
* @wireless_handlers: List of functions to handle Wireless Extensions,
* instead of ioctl,
* see <net/iw_handler.h> for details.
- * @wireless_data: Instance data managed by the core of wireless extensions
*
* @netdev_ops: Includes several pointers to callbacks,
* if one wants to override the ndo_*() functions
@@ -1814,10 +1862,9 @@ enum netdev_reg_state {
*
* @flags: Interface flags (a la BSD)
* @xdp_features: XDP capability supported by the device
- * @priv_flags: Like 'flags' but invisible to userspace,
- * see if.h for the definitions
* @gflags: Global flags ( kept as legacy )
- * @padded: How much padding added by alloc_netdev()
+ * @priv_len: Size of the ->priv flexible array
+ * @priv: Flexible array containing private data
* @operstate: RFC2863 operstate
* @link_mode: Mapping policy to operstate
* @if_port: Selectable AUI, TP, ...
@@ -1842,6 +1889,7 @@ enum netdev_reg_state {
* @addr_len: Hardware address length
* @upper_level: Maximum depth level of upper devices.
* @lower_level: Maximum depth level of lower devices.
+ * @threaded: napi threaded state.
* @neigh_priv_len: Used in neigh_alloc()
* @dev_id: Used to differentiate devices that share
* the same link layer address
@@ -1874,6 +1922,7 @@ enum netdev_reg_state {
* device struct
* @mpls_ptr: mpls_dev struct pointer
* @mctp_ptr: MCTP specific data
+ * @psp_dev: PSP crypto device registered for this netdev
*
* @dev_addr: Hw address (before bcast,
* because most packets are unicast)
@@ -1883,9 +1932,6 @@ enum netdev_reg_state {
* allocated at register_netdev() time
* @real_num_rx_queues: Number of RX queues currently active in device
* @xdp_prog: XDP sockets filter program pointer
- * @gro_flush_timeout: timeout for GRO layer in NAPI
- * @napi_defer_hard_irqs: If not zero, provides a counter that would
- * allow to avoid NIC hard IRQ, on busy queues.
*
* @rx_handler: handler for received packets
* @rx_handler_data: XXX: need comments on this one
@@ -1926,13 +1972,11 @@ enum netdev_reg_state {
*
* @reg_state: Register/unregister state machine
* @dismantle: Device is going to be freed
- * @rtnl_link_state: This enum represents the phases of creating
- * a new link
- *
* @needs_free_netdev: Should unregister perform free_netdev?
* @priv_destructor: Called from unregister
* @npinfo: XXX: need comments on this one
* @nd_net: Network namespace this network device is inside
+ * protected by @lock
*
* @ml_priv: Mid-layer private
* @ml_priv_type: Mid-layer private type
@@ -1975,6 +2019,7 @@ enum netdev_reg_state {
* @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
*
* @priomap: XXX: need comments on this one
+ * @link_topo: Physical link topology tracking attached PHYs
* @phydev: Physical device may attach itself
* for hardware timestamping
* @sfp_bus: attached &struct sfp_bus structure.
@@ -1985,9 +2030,22 @@ enum netdev_reg_state {
* switch driver and used to set the phys state of the
* switch port.
*
- * @wol_enabled: Wake-on-LAN is enabled
+ * @irq_affinity_auto: driver wants the core to store and re-assign the IRQ
+ * affinity. Set by netif_enable_irq_affinity(), then
+ * the driver must create a persistent napi by
+ * netif_napi_add_config() and finally bind the napi to
+ * IRQ (via netif_napi_set_irq()).
+ *
+ * @rx_cpu_rmap_auto: driver wants the core to manage the ARFS rmap.
+ * Set by calling netif_enable_cpu_rmap().
*
- * @threaded: napi threaded mode is enabled
+ * @see_all_hwtstamp_requests: device wants to see calls to
+ * ndo_hwtstamp_set() for all timestamp requests
+ * regardless of source, even if those aren't
+ * HWTSTAMP_SOURCE_NETDEV
+ * @change_proto_down: device supports setting carrier via IFLA_PROTO_DOWN
+ * @netns_immutable: interface can't change network namespaces
+ * @fcoe_mtu: device supports maximum FCoE MTU, 2158 bytes
*
* @net_notifier_list: List of per-net netdev notifier block
* that follow this device when it is moved
@@ -1998,6 +2056,7 @@ enum netdev_reg_state {
* @udp_tunnel_nic_info: static structure describing the UDP tunnel
* offload capabilities of the device
* @udp_tunnel_nic: UDP tunnel offload state
+ * @ethtool: ethtool related state
* @xdp_state: stores info on attached XDP BPF programs
*
* @nested_level: Used as a parameter of spin_lock_nested() of
@@ -2026,6 +2085,19 @@ enum netdev_reg_state {
* @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem,
* where the clock is recovered.
*
+ * @max_pacing_offload_horizon: max EDT offload horizon in nsec.
+ * @napi_config: An array of napi_config structures containing per-NAPI
+ * settings.
+ * @num_napi_configs: number of allocated NAPI config structs,
+ * always >= max(num_rx_queues, num_tx_queues).
+ * @gro_flush_timeout: timeout for GRO layer in NAPI
+ * @napi_defer_hard_irqs: If not zero, provides a counter that would
+ * allow to avoid NIC hard IRQ, on busy queues.
+ *
+ * @neighbours: List heads pointing to this device's neighbours'
+ * dev_list, one per address-family.
+ * @hwprov: Tracks which PTP performs hardware packet time stamping.
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -2038,7 +2110,11 @@ struct net_device {
/* TX read-mostly hotpath */
__cacheline_group_begin(net_device_read_tx);
- unsigned long long priv_flags;
+ struct_group(priv_flags_fast,
+ unsigned long priv_flags:32;
+ unsigned long lltx:1;
+ unsigned long netmem_tx:1;
+ );
const struct net_device_ops *netdev_ops;
const struct header_ops *header_ops;
struct netdev_queue *_tx;
@@ -2088,8 +2164,6 @@ struct net_device {
int ifindex;
unsigned int real_num_rx_queues;
struct netdev_rx_queue *_rx;
- unsigned long gro_flush_timeout;
- int napi_defer_hard_irqs;
unsigned int gro_max_size;
unsigned int gro_ipv4_max_size;
rx_handler_func_t __rcu *rx_handler;
@@ -2164,7 +2238,6 @@ struct net_device {
#ifdef CONFIG_WIRELESS_EXT
const struct iw_handler_def *wireless_handlers;
- struct iw_public_data *wireless_data;
#endif
const struct ethtool_ops *ethtool_ops;
#ifdef CONFIG_NET_L3_MASTER_DEV
@@ -2194,14 +2267,15 @@ struct net_device {
unsigned char addr_len;
unsigned char upper_level;
unsigned char lower_level;
+ u8 threaded;
unsigned short neigh_priv_len;
unsigned short dev_id;
unsigned short dev_port;
- unsigned short padded;
+ int irq;
+ u32 priv_len;
spinlock_t addr_list_lock;
- int irq;
struct netdev_hw_addr_list uc;
struct netdev_hw_addr_list mc;
@@ -2223,6 +2297,9 @@ struct net_device {
/* Protocol-specific pointers */
struct in_device __rcu *ip_ptr;
+ /** @fib_nh_head: nexthops associated with this netdev */
+ struct hlist_head fib_nh_head;
+
#if IS_ENABLED(CONFIG_VLAN_8021Q)
struct vlan_info __rcu *vlan_info;
#endif
@@ -2236,7 +2313,7 @@ struct net_device {
void *atalk_ptr;
#endif
#if IS_ENABLED(CONFIG_AX25)
- void *ax25_ptr;
+ struct ax25_dev __rcu *ax25_ptr;
#endif
#if IS_ENABLED(CONFIG_CFG80211)
struct wireless_dev *ieee80211_ptr;
@@ -2250,6 +2327,9 @@ struct net_device {
#if IS_ENABLED(CONFIG_MCTP)
struct mctp_dev __rcu *mctp_ptr;
#endif
+#if IS_ENABLED(CONFIG_INET_PSP)
+ struct psp_dev __rcu *psp_dev;
+#endif
/*
* Cache lines mostly used on receive path (including eth_type_trans())
@@ -2309,10 +2389,10 @@ struct net_device {
bool dismantle;
- enum {
- RTNL_LINK_INITIALIZED,
- RTNL_LINK_INITIALIZING,
- } rtnl_link_state:16;
+ /** @moving_ns: device is changing netns, protected by @lock */
+ bool moving_ns;
+ /** @rtnl_link_initializing: Device being created, suppress events */
+ bool rtnl_link_initializing;
bool needs_free_netdev;
void (*priv_destructor)(struct net_device *dev);
@@ -2333,7 +2413,7 @@ struct net_device {
struct dm_hw_stat_delta __rcu *dm_private;
#endif
struct device dev;
- const struct attribute_group *sysfs_groups[4];
+ const struct attribute_group *sysfs_groups[5];
const struct attribute_group *sysfs_rx_queue_group;
const struct rtnl_link_ops *rtnl_link_ops;
@@ -2367,12 +2447,19 @@ struct net_device {
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
struct netprio_map __rcu *priomap;
#endif
+ struct phy_link_topology *link_topo;
struct phy_device *phydev;
struct sfp_bus *sfp_bus;
struct lock_class_key *qdisc_tx_busylock;
bool proto_down;
- bool threaded;
- unsigned wol_enabled:1;
+ bool irq_affinity_auto;
+ bool rx_cpu_rmap_auto;
+
+ /* priv_flags_slow, ungrouped to save space */
+ unsigned long see_all_hwtstamp_requests:1;
+ unsigned long change_proto_down:1;
+ unsigned long netns_immutable:1;
+ unsigned long fcoe_mtu:1;
struct list_head net_notifier_list;
@@ -2383,6 +2470,16 @@ struct net_device {
const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
struct udp_tunnel_nic *udp_tunnel_nic;
+ /** @cfg: net_device queue-related configuration */
+ struct netdev_config *cfg;
+ /**
+ * @cfg_pending: same as @cfg but when device is being actively
+ * reconfigured includes any changes to the configuration
+ * requested by the user, but which may or may not be rejected.
+ */
+ struct netdev_config *cfg_pending;
+ struct ethtool_netdev_state *ethtool;
+
/* protected by rtnl_lock */
struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
@@ -2401,7 +2498,86 @@ struct net_device {
/** @page_pools: page pools created for this netdevice */
struct hlist_head page_pools;
#endif
-};
+
+ /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */
+ struct dim_irq_moder *irq_moder;
+
+ u64 max_pacing_offload_horizon;
+ struct napi_config *napi_config;
+ u32 num_napi_configs;
+ u32 napi_defer_hard_irqs;
+ unsigned long gro_flush_timeout;
+
+ /**
+ * @up: copy of @state's IFF_UP, but safe to read with just @lock.
+ * May report false negatives while the device is being opened
+ * or closed (@lock does not protect .ndo_open, or .ndo_close).
+ */
+ bool up;
+
+ /**
+ * @request_ops_lock: request the core to run all @netdev_ops and
+ * @ethtool_ops under the @lock.
+ */
+ bool request_ops_lock;
+
+ /**
+ * @lock: netdev-scope lock, protects a small selection of fields.
+ * Should always be taken using netdev_lock() / netdev_unlock() helpers.
+ * Drivers are free to use it for other protection.
+ *
+ * For the drivers that implement shaper or queue API, the scope
+ * of this lock is expanded to cover most ndo/queue/ethtool/sysfs
+ * operations. Drivers may opt-in to this behavior by setting
+ * @request_ops_lock.
+ *
+ * @lock protection mixes with rtnl_lock in multiple ways, fields are
+ * either:
+ *
+ * - simply protected by the instance @lock;
+ *
+ * - double protected - writers hold both locks, readers hold either;
+ *
+ * - ops protected - protected by the lock held around the NDOs
+ * and other callbacks, that is the instance lock on devices for
+ * which netdev_need_ops_lock() returns true, otherwise by rtnl_lock;
+ *
+ * - double ops protected - always protected by rtnl_lock but for
+ * devices for which netdev_need_ops_lock() returns true - also
+ * the instance lock.
+ *
+ * Simply protects:
+ * @gro_flush_timeout, @napi_defer_hard_irqs, @napi_list,
+ * @net_shaper_hierarchy, @reg_state, @threaded
+ *
+ * Double protects:
+ * @up, @moving_ns, @nd_net, @xdp_features
+ *
+ * Double ops protects:
+ * @real_num_rx_queues, @real_num_tx_queues
+ *
+ * Also protects some fields in:
+ * struct napi_struct, struct netdev_queue, struct netdev_rx_queue
+ *
+ * Ordering: take after rtnl_lock.
+ */
+ struct mutex lock;
+
+#if IS_ENABLED(CONFIG_NET_SHAPER)
+ /**
+ * @net_shaper_hierarchy: data tracking the current shaper status
+ * see include/net/net_shapers.h
+ */
+ struct net_shaper_hierarchy *net_shaper_hierarchy;
+#endif
+
+ struct hlist_head neighbours[NEIGH_NR_TABLES];
+
+ struct hwtstamp_provider __rcu *hwprov;
+
+ u8 priv[] ____cacheline_aligned
+ __counted_by(priv_len);
+} ____cacheline_aligned;
#define to_net_dev(d) container_of(d, struct net_device, dev)
/*
@@ -2504,21 +2680,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
f(dev, &dev->_tx[i], arg);
}
-#define netdev_lockdep_set_classes(dev) \
-{ \
- static struct lock_class_key qdisc_tx_busylock_key; \
- static struct lock_class_key qdisc_xmit_lock_key; \
- static struct lock_class_key dev_addr_list_lock_key; \
- unsigned int i; \
- \
- (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
- lockdep_set_class(&(dev)->addr_list_lock, \
- &dev_addr_list_lock_key); \
- for (i = 0; i < (dev)->num_tx_queues; i++) \
- lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
- &qdisc_xmit_lock_key); \
-}
-
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
@@ -2578,6 +2739,12 @@ struct net *dev_net(const struct net_device *dev)
}
static inline
+struct net *dev_net_rcu(const struct net_device *dev)
+{
+ return read_pnet_rcu(&dev->nd_net);
+}
+
+static inline
void dev_net_set(struct net_device *dev, struct net *net)
{
write_pnet(&dev->nd_net, net);
@@ -2591,7 +2758,7 @@ void dev_net_set(struct net_device *dev, struct net *net)
*/
static inline void *netdev_priv(const struct net_device *dev)
{
- return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
+ return (void *)dev->priv;
}
/* Set the sysfs physical device reference for the network logical device
@@ -2609,9 +2776,24 @@ void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
enum netdev_queue_type type,
struct napi_struct *napi);
+static inline void netdev_lock(struct net_device *dev)
+{
+ mutex_lock(&dev->lock);
+}
+
+static inline void netdev_unlock(struct net_device *dev)
+{
+ mutex_unlock(&dev->lock);
+}
+/* Additional netdev_lock()-related helpers are in net/netdev_lock.h */
+
+void netif_napi_set_irq_locked(struct napi_struct *napi, int irq);
+
static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
{
- napi->irq = irq;
+ netdev_lock(napi->dev);
+ netif_napi_set_irq_locked(napi, irq);
+ netdev_unlock(napi->dev);
}
/* Default NAPI poll() weight
@@ -2619,8 +2801,19 @@ static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
*/
#define NAPI_POLL_WEIGHT 64
-void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int weight);
+void netif_napi_add_weight_locked(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight);
+
+static inline void
+netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight)
+{
+ netdev_lock(dev);
+ netif_napi_add_weight_locked(dev, napi, poll, weight);
+ netdev_unlock(dev);
+}
/**
* netif_napi_add() - initialize a NAPI context
@@ -2639,6 +2832,13 @@ netif_napi_add(struct net_device *dev, struct napi_struct *napi,
}
static inline void
+netif_napi_add_locked(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int))
+{
+ netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
+}
+
+static inline void
netif_napi_add_tx_weight(struct net_device *dev,
struct napi_struct *napi,
int (*poll)(struct napi_struct *, int),
@@ -2648,6 +2848,31 @@ netif_napi_add_tx_weight(struct net_device *dev,
netif_napi_add_weight(dev, napi, poll, weight);
}
+static inline void
+netif_napi_add_config_locked(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int index)
+{
+ napi->index = index;
+ napi->config = &dev->napi_config[index];
+ netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
+}
+
+/**
+ * netif_napi_add_config - initialize a NAPI context with persistent config
+ * @dev: network device
+ * @napi: NAPI context
+ * @poll: polling function
+ * @index: the NAPI index
+ */
+static inline void
+netif_napi_add_config(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int index)
+{
+ netdev_lock(dev);
+ netif_napi_add_config_locked(dev, napi, poll, index);
+ netdev_unlock(dev);
+}
+
/**
* netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
* @dev: network device
@@ -2665,6 +2890,8 @@ static inline void netif_napi_add_tx(struct net_device *dev,
netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
}
+void __netif_napi_del_locked(struct napi_struct *napi);
+
/**
* __netif_napi_del - remove a NAPI context
* @napi: NAPI context
@@ -2673,7 +2900,18 @@ static inline void netif_napi_add_tx(struct net_device *dev,
* containing @napi. Drivers might want to call this helper to combine
* all the needed RCU grace periods into a single one.
*/
-void __netif_napi_del(struct napi_struct *napi);
+static inline void __netif_napi_del(struct napi_struct *napi)
+{
+ netdev_lock(napi->dev);
+ __netif_napi_del_locked(napi);
+ netdev_unlock(napi->dev);
+}
+
+static inline void netif_napi_del_locked(struct napi_struct *napi)
+{
+ __netif_napi_del_locked(napi);
+ synchronize_net();
+}
/**
* netif_napi_del - remove a NAPI context
@@ -2687,6 +2925,9 @@ static inline void netif_napi_del(struct napi_struct *napi)
synchronize_net();
}
+int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs);
+void netif_set_affinity_auto(struct net_device *dev);
+
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
bool ignore_outgoing;
@@ -2731,12 +2972,12 @@ struct pcpu_sw_netstats {
} __aligned(4 * sizeof(u64));
struct pcpu_dstats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_drops;
- u64 tx_packets;
- u64 tx_bytes;
- u64 tx_drops;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ u64_stats_t rx_drops;
+ u64_stats_t tx_drops;
struct u64_stats_sync syncp;
} __aligned(8 * sizeof(u64));
@@ -2780,6 +3021,56 @@ static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
u64_stats_update_end(&lstats->syncp);
}
+static inline void dev_dstats_rx_add(struct net_device *dev,
+ unsigned int len)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_inc(&dstats->rx_packets);
+ u64_stats_add(&dstats->rx_bytes, len);
+ u64_stats_update_end(&dstats->syncp);
+}
+
+static inline void dev_dstats_rx_dropped(struct net_device *dev)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_inc(&dstats->rx_drops);
+ u64_stats_update_end(&dstats->syncp);
+}
+
+static inline void dev_dstats_rx_dropped_add(struct net_device *dev,
+ unsigned int packets)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_add(&dstats->rx_drops, packets);
+ u64_stats_update_end(&dstats->syncp);
+}
+
+static inline void dev_dstats_tx_add(struct net_device *dev,
+ unsigned int len)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_inc(&dstats->tx_packets);
+ u64_stats_add(&dstats->tx_bytes, len);
+ u64_stats_update_end(&dstats->syncp);
+}
+
+static inline void dev_dstats_tx_dropped(struct net_device *dev)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_inc(&dstats->tx_drops);
+ u64_stats_update_end(&dstats->syncp);
+}
+
#define __netdev_alloc_pcpu_stats(type, gfp) \
({ \
typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
@@ -3016,12 +3307,13 @@ int call_netdevice_notifiers_info(unsigned long val,
#define for_each_netdev_continue_rcu(net, d) \
list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_in_bond_rcu(bond, slave) \
- for_each_netdev_rcu(&init_net, slave) \
+ for_each_netdev_rcu(dev_net_rcu(bond), slave) \
if (netdev_master_upper_dev_get_rcu(slave) == (bond))
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
#define for_each_netdev_dump(net, d, ifindex) \
- xa_for_each_start(&(net)->dev_by_index, (ifindex), (d), (ifindex))
+ for (; (d = xa_find(&(net)->dev_by_index, &ifindex, \
+ ULONG_MAX, XA_PRESENT)); ifindex++)
static inline struct net_device *next_net_device(struct net_device *dev)
{
@@ -3049,14 +3341,9 @@ static inline struct net_device *first_net_device(struct net *net)
net_device_entry(net->dev_base_head.next);
}
-static inline struct net_device *first_net_device_rcu(struct net *net)
-{
- struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
-
- return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
-}
-
int netdev_boot_setup_check(struct net_device *dev);
+struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type,
+ const char *hwaddr);
struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
const char *hwaddr);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
@@ -3070,22 +3357,21 @@ int dev_get_iflink(const struct net_device *dev);
int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
struct net_device_path_stack *stack);
-struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
- unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
bool netdev_name_in_use(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
+int netif_open(struct net_device *dev, struct netlink_ext_ack *extack);
int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
+void netif_close(struct net_device *dev);
void dev_close(struct net_device *dev);
-void dev_close_many(struct list_head *head, bool unlink);
+void netif_close_many(struct list_head *head, bool unlink);
+void netif_disable_lro(struct net_device *dev);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
-u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev);
int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev);
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
@@ -3121,8 +3407,6 @@ static inline void unregister_netdevice(struct net_device *dev)
int netdev_refcnt_read(const struct net_device *dev);
void free_netdev(struct net_device *dev);
-void netdev_freemem(struct net_device *dev);
-void init_dummy_netdev(struct net_device *dev);
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
struct sk_buff *skb,
@@ -3133,10 +3417,12 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *netdev_get_by_index(struct net *net, int ifindex,
netdevice_tracker *tracker, gfp_t gfp);
+struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex);
struct net_device *netdev_get_by_name(struct net *net, const char *name,
netdevice_tracker *tracker, gfp_t gfp);
+struct net_device *netdev_get_by_flags_rcu(struct net *net, netdevice_tracker *tracker,
+ unsigned short flags, unsigned short mask);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
-struct net_device *dev_get_by_napi_id(unsigned int napi_id);
void netdev_copy_name(struct net_device *dev, char *name);
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -3194,12 +3480,39 @@ static inline bool dev_has_header(const struct net_device *dev)
return dev->header_ops && dev->header_ops->create;
}
+struct numa_drop_counters {
+ atomic_t drops0 ____cacheline_aligned_in_smp;
+ atomic_t drops1 ____cacheline_aligned_in_smp;
+};
+
+static inline int numa_drop_read(const struct numa_drop_counters *ndc)
+{
+ return atomic_read(&ndc->drops0) + atomic_read(&ndc->drops1);
+}
+
+static inline void numa_drop_add(struct numa_drop_counters *ndc, int val)
+{
+ int n = numa_node_id() % 2;
+
+ if (n)
+ atomic_add(val, &ndc->drops1);
+ else
+ atomic_add(val, &ndc->drops0);
+}
+
+static inline void numa_drop_reset(struct numa_drop_counters *ndc)
+{
+ atomic_set(&ndc->drops0, 0);
+ atomic_set(&ndc->drops1, 0);
+}
+
/*
* Incoming packets are placed on per-CPU queues
*/
struct softnet_data {
struct list_head poll_list;
struct sk_buff_head process_queue;
+ local_lock_t process_queue_bh_lock;
/* stats */
unsigned int processed;
@@ -3222,13 +3535,7 @@ struct softnet_data {
struct sk_buff_head xfrm_backlog;
#endif
/* written and read only by owning cpu: */
- struct {
- u16 recursion;
- u8 more;
-#ifdef CONFIG_NET_EGRESS
- u8 skip_txqueue;
-#endif
- } xmit;
+ struct netdev_xmit xmit;
#ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct,
* and only read by other cpus. Worth using a cache line.
@@ -3239,27 +3546,45 @@ struct softnet_data {
call_single_data_t csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
unsigned int cpu;
+
+ /* We force a cacheline alignment from here, to hold together
+ * input_queue_tail, input_pkt_queue and backlog.state.
+ * We add holes so that backlog.state is the last field
+ * of this cache line.
+ */
+ long pad[3] ____cacheline_aligned_in_smp;
unsigned int input_queue_tail;
#endif
struct sk_buff_head input_pkt_queue;
+
struct napi_struct backlog;
- atomic_t dropped ____cacheline_aligned_in_smp;
+ struct numa_drop_counters drop_counters;
- /* Another possibly contended cache line */
- spinlock_t defer_lock ____cacheline_aligned_in_smp;
- int defer_count;
- int defer_ipi_scheduled;
- struct sk_buff *defer_list;
+ int defer_ipi_scheduled ____cacheline_aligned_in_smp;
call_single_data_t defer_csd;
};
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+struct page_pool_bh {
+ struct page_pool *pool;
+ local_lock_t bh_lock;
+};
+DECLARE_PER_CPU(struct page_pool_bh, system_page_pool);
+
+#ifndef CONFIG_PREEMPT_RT
static inline int dev_recursion_level(void)
{
return this_cpu_read(softnet_data.xmit.recursion);
}
+#else
+static inline int dev_recursion_level(void)
+{
+ return current->net_xmit.recursion;
+}
+
+#endif
void __netif_schedule(struct Qdisc *q);
void netif_schedule_queue(struct netdev_queue *txq);
@@ -3324,6 +3649,12 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
+ /* Paired with READ_ONCE() from dev_watchdog() */
+ WRITE_ONCE(dev_queue->trans_start, jiffies);
+
+ /* This barrier is paired with smp_mb() from dev_watchdog() */
+ smp_mb__before_atomic();
+
/* Must be an atomic op see netif_txq_try_stop() */
set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
@@ -3450,6 +3781,12 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
if (likely(dql_avail(&dev_queue->dql) >= 0))
return;
+ /* Paired with READ_ONCE() from dev_watchdog() */
+ WRITE_ONCE(dev_queue->trans_start, jiffies);
+
+ /* This barrier is paired with smp_mb() from dev_watchdog() */
+ smp_mb__before_atomic();
+
set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
/*
@@ -3457,7 +3794,7 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
* because in netdev_tx_completed_queue we update the dql_completed
* before checking the XOFF flag.
*/
- smp_mb();
+ smp_mb__after_atomic();
/* check again in case another CPU has just made room avail */
if (unlikely(dql_avail(&dev_queue->dql) >= 0))
@@ -3527,7 +3864,7 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
dql_completed(&dev_queue->dql, bytes);
/*
- * Without the memory barrier there is a small possiblity that
+ * Without the memory barrier there is a small possibility that
* netdev_tx_sent_queue will miss the update and cause the queue to
* be stopped forever
*/
@@ -3566,6 +3903,17 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q)
}
/**
+ * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue
+ * @dev: network device
+ * @qid: stack index of the queue to reset
+ */
+static inline void netdev_tx_reset_subqueue(const struct net_device *dev,
+ u32 qid)
+{
+ netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid));
+}
+
+/**
* netdev_reset_queue - reset the packets and bytes count of a network device
* @dev_queue: network device
*
@@ -3574,7 +3922,7 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q)
*/
static inline void netdev_reset_queue(struct net_device *dev_queue)
{
- netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
+ netdev_tx_reset_subqueue(dev_queue, 0);
}
/**
@@ -3712,7 +4060,7 @@ static inline bool netif_attr_test_mask(unsigned long j,
* @online_mask: bitmask for CPUs/Rx queues that are online
* @nr_bits: number of bits in the bitmask
*
- * Returns true if a CPU/Rx queue is online.
+ * Returns: true if a CPU/Rx queue is online.
*/
static inline bool netif_attr_test_online(unsigned long j,
const unsigned long *online_mask,
@@ -3732,7 +4080,8 @@ static inline bool netif_attr_test_online(unsigned long j,
* @srcp: the cpumask/Rx queue mask pointer
* @nr_bits: number of bits in the bitmask
*
- * Returns >= nr_bits if no further CPUs/Rx queues set.
+ * Returns: next (after n) CPU/Rx queue index in the mask;
+ * >= nr_bits if no further CPUs/Rx queues set.
*/
static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
unsigned int nr_bits)
@@ -3754,7 +4103,8 @@ static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
* @src2p: the second CPUs/Rx queues mask pointer
* @nr_bits: number of bits in the bitmask
*
- * Returns >= nr_bits if no further CPUs/Rx queues set in both.
+ * Returns: next (after n) CPU/Rx queue index set in both masks;
+ * >= nr_bits if no further CPUs/Rx queues set in both.
*/
static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
const unsigned long *src2p,
@@ -3801,17 +4151,7 @@ static inline bool netif_is_multiqueue(const struct net_device *dev)
}
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
-
-#ifdef CONFIG_SYSFS
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
-#else
-static inline int netif_set_real_num_rx_queues(struct net_device *dev,
- unsigned int rxqs)
-{
- dev->real_num_rx_queues = rxqs;
- return 0;
-}
-#endif
int netif_set_real_num_queues(struct net_device *dev,
unsigned int txq, unsigned int rxq);
@@ -3860,9 +4200,9 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
}
u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog);
-void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
-int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb);
+ const struct bpf_prog *xdp_prog);
+void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog);
+int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb);
int netif_rx(struct sk_buff *skb);
int __netif_rx(struct sk_buff *skb);
@@ -3870,10 +4210,15 @@ int netif_receive_skb(struct sk_buff *skb);
int netif_receive_skb_core(struct sk_buff *skb);
void netif_receive_skb_list_internal(struct list_head *head);
void netif_receive_skb_list(struct list_head *head);
-gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
-void napi_gro_flush(struct napi_struct *napi, bool flush_old);
+gro_result_t gro_receive_skb(struct gro_node *gro, struct sk_buff *skb);
+
+static inline gro_result_t napi_gro_receive(struct napi_struct *napi,
+ struct sk_buff *skb)
+{
+ return gro_receive_skb(&napi->gro, skb);
+}
+
struct sk_buff *napi_get_frags(struct napi_struct *napi);
-void napi_get_frags_check(struct napi_struct *napi);
gro_result_t napi_gro_frags(struct napi_struct *napi);
static inline void napi_free_frags(struct napi_struct *napi)
@@ -3898,41 +4243,43 @@ int put_user_ifreq(struct ifreq *ifr, void __user *arg);
int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
void __user *data, bool *need_copyout);
int dev_ifconf(struct net *net, struct ifconf __user *ifc);
+int dev_eth_ioctl(struct net_device *dev,
+ struct ifreq *ifr, unsigned int cmd);
int generic_hwtstamp_get_lower(struct net_device *dev,
struct kernel_hwtstamp_config *kernel_cfg);
int generic_hwtstamp_set_lower(struct net_device *dev,
struct kernel_hwtstamp_config *kernel_cfg,
struct netlink_ext_ack *extack);
-int dev_set_hwtstamp_phylib(struct net_device *dev,
- struct kernel_hwtstamp_config *cfg,
- struct netlink_ext_ack *extack);
int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
-unsigned int dev_get_flags(const struct net_device *);
+unsigned int netif_get_flags(const struct net_device *dev);
int __dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack);
+int netif_change_flags(struct net_device *dev, unsigned int flags,
+ struct netlink_ext_ack *extack);
int dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack);
+int netif_set_alias(struct net_device *dev, const char *alias, size_t len);
int dev_set_alias(struct net_device *, const char *, size_t);
int dev_get_alias(const struct net_device *, char *, size_t);
int __dev_change_net_namespace(struct net_device *dev, struct net *net,
- const char *pat, int new_ifindex);
-static inline
+ const char *pat, int new_ifindex,
+ struct netlink_ext_ack *extack);
int dev_change_net_namespace(struct net_device *dev, struct net *net,
- const char *pat)
-{
- return __dev_change_net_namespace(dev, net, pat, 0);
-}
-int __dev_set_mtu(struct net_device *, int);
+ const char *pat);
+int __netif_set_mtu(struct net_device *dev, int new_mtu);
+int netif_set_mtu(struct net_device *dev, int new_mtu);
int dev_set_mtu(struct net_device *, int);
-int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
- struct netlink_ext_ack *extack);
-int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
+int netif_pre_changeaddr_notify(struct net_device *dev, const char *addr,
+ struct netlink_ext_ack *extack);
+int netif_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
+ struct netlink_ext_ack *extack);
+int dev_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
struct netlink_ext_ack *extack);
-int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
+int dev_set_mac_address_user(struct net_device *dev, struct sockaddr_storage *ss,
struct netlink_ext_ack *extack);
-int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
-int dev_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid, bool recurse);
+int netif_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
+int netif_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid, bool recurse);
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
@@ -3941,8 +4288,13 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
u8 dev_xdp_prog_count(struct net_device *dev);
+int netif_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
+int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
+u8 dev_xdp_sb_prog_count(struct net_device *dev);
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
+u32 dev_get_min_mp_channel_count(const struct net_device *dev);
+
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
@@ -4005,7 +4357,17 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
return 0;
}
-bool dev_nit_active(struct net_device *dev);
+bool dev_nit_active_rcu(const struct net_device *dev);
+static inline bool dev_nit_active(const struct net_device *dev)
+{
+ bool ret;
+
+ rcu_read_lock();
+ ret = dev_nit_active_rcu(dev);
+ rcu_read_unlock();
+ return ret;
+}
+
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
static inline void __dev_put(struct net_device *dev)
@@ -4136,6 +4498,7 @@ void linkwatch_fire_event(struct net_device *dev);
* pending work list (if queued).
*/
void linkwatch_sync_dev(struct net_device *dev);
+void __linkwatch_sync_dev(struct net_device *dev);
/**
* netif_carrier_ok - test if carrier present
@@ -4150,7 +4513,7 @@ static inline bool netif_carrier_ok(const struct net_device *dev)
unsigned long dev_trans_start(struct net_device *dev);
-void __netdev_watchdog_up(struct net_device *dev);
+void netdev_watchdog_up(struct net_device *dev);
void netif_carrier_on(struct net_device *dev);
void netif_carrier_off(struct net_device *dev);
@@ -4395,9 +4758,10 @@ static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
/*
* txq->trans_start can be read locklessly from dev_watchdog()
*/
-static inline void txq_trans_update(struct netdev_queue *txq)
+static inline void txq_trans_update(const struct net_device *dev,
+ struct netdev_queue *txq)
{
- if (txq->xmit_lock_owner != -1)
+ if (!dev->lltx)
WRITE_ONCE(txq->trans_start, jiffies);
}
@@ -4440,7 +4804,7 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
}
#define HARD_TX_LOCK(dev, txq, cpu) { \
- if ((dev->features & NETIF_F_LLTX) == 0) { \
+ if (!(dev)->lltx) { \
__netif_tx_lock(txq, cpu); \
} else { \
__netif_tx_acquire(txq); \
@@ -4448,12 +4812,12 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
}
#define HARD_TX_TRYLOCK(dev, txq) \
- (((dev->features & NETIF_F_LLTX) == 0) ? \
+ (!(dev)->lltx ? \
__netif_tx_trylock(txq) : \
__netif_tx_acquire(txq))
#define HARD_TX_UNLOCK(dev, txq) { \
- if ((dev->features & NETIF_F_LLTX) == 0) { \
+ if (!(dev)->lltx) { \
__netif_tx_unlock(txq); \
} else { \
__netif_tx_release(txq); \
@@ -4544,6 +4908,9 @@ int devm_register_netdev(struct device *dev, struct net_device *ndev);
/* General hardware address lists handling functions */
int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list, int addr_len);
+int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len);
void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list, int addr_len);
int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
@@ -4598,7 +4965,7 @@ void dev_uc_flush(struct net_device *dev);
void dev_uc_init(struct net_device *dev);
/**
- * __dev_uc_sync - Synchonize device's unicast list
+ * __dev_uc_sync - Synchronize device's unicast list
* @dev: device to sync
* @sync: function to call if address should be added
* @unsync: function to call if address should be removed
@@ -4642,7 +5009,7 @@ void dev_mc_flush(struct net_device *dev);
void dev_mc_init(struct net_device *dev);
/**
- * __dev_mc_sync - Synchonize device's multicast list
+ * __dev_mc_sync - Synchronize device's multicast list
* @dev: device to sync
* @sync: function to call if address should be added
* @unsync: function to call if address should be removed
@@ -4675,8 +5042,11 @@ static inline void __dev_mc_unsync(struct net_device *dev,
/* Functions used for secondary unicast and multicast support */
void dev_set_rx_mode(struct net_device *dev);
+int netif_set_promiscuity(struct net_device *dev, int inc);
int dev_set_promiscuity(struct net_device *dev, int inc);
+int netif_set_allmulti(struct net_device *dev, int inc, bool notify);
int dev_set_allmulti(struct net_device *dev, int inc);
+void netif_state_change(struct net_device *dev);
void netdev_state_change(struct net_device *dev);
void __netdev_notify_peers(struct net_device *dev);
void netdev_notify_peers(struct net_device *dev);
@@ -4817,10 +5187,9 @@ void netdev_bonding_info_change(struct net_device *dev,
struct netdev_bonding_info *bonding_info);
#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
-void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
+void ethtool_notify(struct net_device *dev, unsigned int cmd);
#else
-static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
- const void *data)
+static inline void ethtool_notify(struct net_device *dev, unsigned int cmd)
{
}
#endif
@@ -4874,18 +5243,35 @@ static inline ktime_t netdev_get_tstamp(struct net_device *dev,
return hwtstamps->hwtstamp;
}
-static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
- struct sk_buff *skb, struct net_device *dev,
- bool more)
+#ifndef CONFIG_PREEMPT_RT
+static inline void netdev_xmit_set_more(bool more)
{
__this_cpu_write(softnet_data.xmit.more, more);
- return ops->ndo_start_xmit(skb, dev);
}
static inline bool netdev_xmit_more(void)
{
return __this_cpu_read(softnet_data.xmit.more);
}
+#else
+static inline void netdev_xmit_set_more(bool more)
+{
+ current->net_xmit.more = more;
+}
+
+static inline bool netdev_xmit_more(void)
+{
+ return current->net_xmit.more;
+}
+#endif
+
+static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
+ struct sk_buff *skb, struct net_device *dev,
+ bool more)
+{
+ netdev_xmit_set_more(more);
+ return ops->ndo_start_xmit(skb, dev);
+}
static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, bool more)
@@ -4895,7 +5281,7 @@ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_devi
rc = __netdev_start_xmit(ops, skb, dev, more);
if (rc == NETDEV_TX_OK)
- txq_trans_update(txq);
+ txq_trans_update(dev, txq);
return rc;
}
@@ -4943,6 +5329,7 @@ static inline netdev_features_t netdev_add_tso_features(netdev_features_t featur
int __netdev_update_features(struct net_device *dev);
void netdev_update_features(struct net_device *dev);
void netdev_change_features(struct net_device *dev);
+void netdev_compute_master_upper_features(struct net_device *dev, bool update_header);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
@@ -4955,13 +5342,18 @@ void skb_warn_bad_offload(const struct sk_buff *skb);
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
{
- netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
+ netdev_features_t feature;
+
+ if (gso_type & (SKB_GSO_TCP_FIXEDID | SKB_GSO_TCP_FIXEDID_INNER))
+ gso_type |= __SKB_GSO_TCP_FIXEDID;
+
+ feature = ((netdev_features_t)gso_type << NETIF_F_GSO_SHIFT) & NETIF_F_GSO_MASK;
/* check flags correspondence */
BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
- BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(__SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
@@ -4977,6 +5369,8 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_TCP_ACCECN !=
+ (NETIF_F_GSO_ACCECN >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}
@@ -5000,6 +5394,24 @@ void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs);
void netif_inherit_tso_max(struct net_device *to,
const struct net_device *from);
+static inline unsigned int
+netif_get_gro_max_size(const struct net_device *dev, const struct sk_buff *skb)
+{
+ /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
+ return skb->protocol == htons(ETH_P_IPV6) ?
+ READ_ONCE(dev->gro_max_size) :
+ READ_ONCE(dev->gro_ipv4_max_size);
+}
+
+static inline unsigned int
+netif_get_gso_max_size(const struct net_device *dev, const struct sk_buff *skb)
+{
+ /* pairs with WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
+ return skb->protocol == htons(ETH_P_IPV6) ?
+ READ_ONCE(dev->gso_max_size) :
+ READ_ONCE(dev->gso_ipv4_max_size);
+}
+
static inline bool netif_is_macsec(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACSEC;
diff --git a/include/linux/netdevice_xmit.h b/include/linux/netdevice_xmit.h
new file mode 100644
index 000000000000..cc232508e695
--- /dev/null
+++ b/include/linux/netdevice_xmit.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_NETDEVICE_XMIT_H
+#define _LINUX_NETDEVICE_XMIT_H
+
+#if IS_ENABLED(CONFIG_NET_ACT_MIRRED)
+#define MIRRED_NEST_LIMIT 4
+#endif
+
+struct net_device;
+
+struct netdev_xmit {
+ u16 recursion;
+ u8 more;
+#ifdef CONFIG_NET_EGRESS
+ u8 skip_txqueue;
+#endif
+#if IS_ENABLED(CONFIG_NET_ACT_MIRRED)
+ u8 sched_mirred_nest;
+ struct net_device *sched_mirred_dev[MIRRED_NEST_LIMIT];
+#endif
+#if IS_ENABLED(CONFIG_NF_DUP_NETDEV)
+ u8 nf_dup_skb_recursion;
+#endif
+};
+
+#endif
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 2683b2b77612..efbbfa770d66 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -92,9 +92,13 @@ enum nf_hook_ops_type {
NF_HOOK_OP_UNDEFINED,
NF_HOOK_OP_NF_TABLES,
NF_HOOK_OP_BPF,
+ NF_HOOK_OP_NFT_FT,
};
struct nf_hook_ops {
+ struct list_head list;
+ struct rcu_head rcu;
+
/* User fills in from here down. */
nf_hookfn *hook;
struct net_device *dev;
@@ -376,15 +380,11 @@ int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
struct nf_conn;
enum nf_nat_manip_type;
struct nlattr;
-enum ip_conntrack_dir;
struct nf_nat_hook {
int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip,
const struct nlattr *attr);
void (*decode_session)(struct sk_buff *skb, struct flowi *fl);
- unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct,
- enum nf_nat_manip_type mtype,
- enum ip_conntrack_dir dir);
void (*remove_nat_bysrc)(struct nf_conn *ct);
};
@@ -474,6 +474,7 @@ struct nf_ct_hook {
void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
void (*set_closing)(struct nf_conntrack *nfct);
int (*confirm)(struct sk_buff *skb);
+ u32 (*get_id)(const struct nf_conntrack *nfct);
};
extern const struct nf_ct_hook __rcu *nf_ct_hook;
@@ -502,17 +503,6 @@ extern const struct nf_defrag_hook __rcu *nf_defrag_v4_hook;
extern const struct nf_defrag_hook __rcu *nf_defrag_v6_hook;
/*
- * nf_skb_duplicated - TEE target has sent a packet
- *
- * When a xtables target sends a packet, the OUTPUT and POSTROUTING
- * hooks are traversed again, i.e. nft and xtables are invoked recursively.
- *
- * This is used by xtables TEE target to prevent the duplicated skb from
- * being duplicated again.
- */
-DECLARE_PER_CPU(bool, nf_skb_duplicated);
-
-/*
* Contains bitmask of ctnetlink event subscribers, if any.
* Can't be pernet due to NETLINK_LISTEN_ALL_NSID setsockopt flag.
*/
diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h
deleted file mode 100644
index c509ed76e714..000000000000
--- a/include/linux/netfilter/nf_conntrack_dccp.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NF_CONNTRACK_DCCP_H
-#define _NF_CONNTRACK_DCCP_H
-
-/* Exposed to userspace over nfnetlink */
-enum ct_dccp_states {
- CT_DCCP_NONE,
- CT_DCCP_REQUEST,
- CT_DCCP_RESPOND,
- CT_DCCP_PARTOPEN,
- CT_DCCP_OPEN,
- CT_DCCP_CLOSEREQ,
- CT_DCCP_CLOSING,
- CT_DCCP_TIMEWAIT,
- CT_DCCP_IGNORE,
- CT_DCCP_INVALID,
- __CT_DCCP_MAX
-};
-#define CT_DCCP_MAX (__CT_DCCP_MAX - 1)
-
-enum ct_dccp_roles {
- CT_DCCP_ROLE_CLIENT,
- CT_DCCP_ROLE_SERVER,
- __CT_DCCP_ROLE_MAX
-};
-#define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1)
-
-#include <linux/netfilter/nf_conntrack_tuple_common.h>
-
-struct nf_ct_dccp {
- u_int8_t role[IP_CT_DIR_MAX];
- u_int8_t state;
- u_int8_t last_pkt;
- u_int8_t last_dir;
- u_int64_t handshake_seq;
-};
-
-#endif /* _NF_CONNTRACK_DCCP_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 5897f3dbaf7c..77c778d84d4c 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -51,21 +51,11 @@ static inline struct net_device *xt_in(const struct xt_action_param *par)
return par->state->in;
}
-static inline const char *xt_inname(const struct xt_action_param *par)
-{
- return par->state->in->name;
-}
-
static inline struct net_device *xt_out(const struct xt_action_param *par)
{
return par->state->out;
}
-static inline const char *xt_outname(const struct xt_action_param *par)
-{
- return par->state->out->name;
-}
-
static inline unsigned int xt_hooknum(const struct xt_action_param *par)
{
return par->state->hook;
@@ -357,7 +347,7 @@ extern struct static_key xt_tee_enabled;
* Begin packet processing : all readers must wait the end
* 1) Must be called with preemption disabled
* 2) softirqs must be disabled too (or we should use this_cpu_add())
- * Returns :
+ * Returns:
* 1 if no recursion on this cpu
* 0 if recursion detected
*/
diff --git a/include/linux/netfilter_netdev.h b/include/linux/netfilter_netdev.h
index 8676316547cc..3175073a66ba 100644
--- a/include/linux/netfilter_netdev.h
+++ b/include/linux/netfilter_netdev.h
@@ -66,7 +66,6 @@ static inline bool nf_hook_egress_active(void)
* @rc: result code which shall be returned by __dev_queue_xmit() on failure
* @dev: netdev whose egress hooks shall be applied to @skb
*
- * Returns @skb on success or %NULL if the packet was consumed or filtered.
* Caller must hold rcu_read_lock.
*
* On ingress, packets are classified first by tc, then by netfilter.
@@ -81,6 +80,8 @@ static inline bool nf_hook_egress_active(void)
* called recursively by tunnel drivers such as vxlan, the flag is reverted to
* false after sch_handle_egress(). This ensures that netfilter is applied
* both on the overlay and underlying network.
+ *
+ * Returns: @skb on success or %NULL if the packet was consumed or filtered.
*/
static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
struct net_device *dev)
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index d2d291a9cdad..72ee7d210a74 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -18,9 +18,11 @@
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/uio.h>
+#include <linux/rolling_buffer.h>
enum netfs_sreq_ref_trace;
-typedef struct mempool_s mempool_t;
+typedef struct mempool mempool_t;
+struct folio_queue;
/**
* folio_start_private_2 - Start an fscache write on a folio. [DEPRECATED]
@@ -38,22 +40,17 @@ static inline void folio_start_private_2(struct folio *folio)
folio_set_private_2(folio);
}
-/* Marks used on xarray-based buffers */
-#define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */
-#define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */
-
enum netfs_io_source {
+ NETFS_SOURCE_UNKNOWN,
NETFS_FILL_WITH_ZEROES,
NETFS_DOWNLOAD_FROM_SERVER,
NETFS_READ_FROM_CACHE,
NETFS_INVALID_READ,
NETFS_UPLOAD_TO_SERVER,
NETFS_WRITE_TO_CACHE,
- NETFS_INVALID_WRITE,
} __mode(byte);
-typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error,
- bool was_async);
+typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error);
/*
* Per-inode context. This wraps the VFS inode.
@@ -68,12 +65,12 @@ struct netfs_inode {
loff_t remote_i_size; /* Size of the remote file */
loff_t zero_point; /* Size after which we assume there's no data
* on the server */
+ atomic_t io_count; /* Number of outstanding reqs */
unsigned long flags;
#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
-#define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */
-#define NETFS_ICTX_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark
- * write to cache on read */
+#define NETFS_ICTX_MODIFIED_ATTR 3 /* Indicate change in mtime/ctime */
+#define NETFS_ICTX_SINGLE_NO_UPLOAD 4 /* Monolithic payload, cache but no upload */
};
/*
@@ -134,9 +131,11 @@ static inline struct netfs_group *netfs_folio_group(struct folio *folio)
struct netfs_io_stream {
/* Submission tracking */
struct netfs_io_subrequest *construct; /* Op being constructed */
+ size_t sreq_max_len; /* Maximum size of a subrequest */
+ unsigned int sreq_max_segs; /* 0 or max number of segments in an iterator */
unsigned int submit_off; /* Folio offset we're submitting from */
unsigned int submit_len; /* Amount of data left to submit */
- unsigned int submit_max_len; /* Amount I/O can be rounded up to */
+ unsigned int submit_extendable_to; /* Amount I/O can be rounded up to */
void (*prepare_write)(struct netfs_io_subrequest *subreq);
void (*issue_write)(struct netfs_io_subrequest *subreq);
/* Collection tracking */
@@ -144,13 +143,14 @@ struct netfs_io_stream {
struct netfs_io_subrequest *front; /* Op being collected */
unsigned long long collected_to; /* Position we've collected results to */
size_t transferred; /* The amount transferred from this stream */
- enum netfs_io_source source; /* Where to read from/write to */
unsigned short error; /* Aggregate error for the stream */
+ enum netfs_io_source source; /* Where to read from/write to */
unsigned char stream_nr; /* Index of stream in parent table */
bool avail; /* T if stream is available */
bool active; /* T if stream is active */
bool need_retry; /* T if this stream needs retrying */
bool failed; /* T if this stream failed */
+ bool transferred_valid; /* T is ->transferred is valid */
};
/*
@@ -177,40 +177,41 @@ struct netfs_io_subrequest {
struct list_head rreq_link; /* Link in rreq->subrequests */
struct iov_iter io_iter; /* Iterator for this subrequest */
unsigned long long start; /* Where to start the I/O */
- size_t max_len; /* Maximum size of the I/O */
size_t len; /* Size of the I/O */
size_t transferred; /* Amount of data transferred */
refcount_t ref;
short error; /* 0 or error that occurred */
unsigned short debug_index; /* Index in list (for debugging output) */
unsigned int nr_segs; /* Number of segs in io_iter */
- unsigned int max_nr_segs; /* 0 or max number of segments in an iterator */
+ u8 retry_count; /* The number of retries (0 on initial pass) */
enum netfs_io_source source; /* Where to read from/write to */
unsigned char stream_nr; /* I/O stream this belongs to */
unsigned long flags;
#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
#define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */
-#define NETFS_SREQ_SHORT_IO 2 /* Set if the I/O was short */
-#define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */
-#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */
+#define NETFS_SREQ_MADE_PROGRESS 4 /* Set if we transferred at least some data */
#define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */
#define NETFS_SREQ_BOUNDARY 6 /* Set if ends on hard boundary (eg. ceph object) */
+#define NETFS_SREQ_HIT_EOF 7 /* Set if short due to EOF */
#define NETFS_SREQ_IN_PROGRESS 8 /* Unlocked when the subrequest completes */
#define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */
-#define NETFS_SREQ_RETRYING 10 /* Set if we're retrying */
-#define NETFS_SREQ_FAILED 11 /* Set if the subreq failed unretryably */
+#define NETFS_SREQ_FAILED 10 /* Set if the subreq failed unretryably */
};
enum netfs_io_origin {
NETFS_READAHEAD, /* This read was triggered by readahead */
NETFS_READPAGE, /* This read is a synchronous read */
+ NETFS_READ_GAPS, /* This read is a synchronous read to fill gaps */
+ NETFS_READ_SINGLE, /* This read should be treated as a single object */
NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
- NETFS_COPY_TO_CACHE, /* This write is to copy a read to the cache */
+ NETFS_UNBUFFERED_READ, /* This is an unbuffered read */
+ NETFS_DIO_READ, /* This is a direct I/O read */
NETFS_WRITEBACK, /* This write was triggered by writepages */
+ NETFS_WRITEBACK_SINGLE, /* This monolithic write was triggered by writepages */
NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */
NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */
- NETFS_DIO_READ, /* This is a direct I/O read */
NETFS_DIO_WRITE, /* This is a direct I/O write */
+ NETFS_PGPRIV2_COPY_TO_CACHE, /* [DEPRECATED] This is writing read data to the cache */
nr__netfs_io_origin
} __mode(byte);
@@ -220,65 +221,65 @@ enum netfs_io_origin {
*/
struct netfs_io_request {
union {
- struct work_struct work;
+ struct work_struct cleanup_work; /* Deferred cleanup work */
struct rcu_head rcu;
};
+ struct work_struct work; /* Result collector work */
struct inode *inode; /* The file being accessed */
struct address_space *mapping; /* The mapping being accessed */
struct kiocb *iocb; /* AIO completion vector */
struct netfs_cache_resources cache_resources;
+ struct netfs_io_request *copy_to_cache; /* Request to write just-read data to the cache */
+#ifdef CONFIG_PROC_FS
struct list_head proc_link; /* Link in netfs_iorequests */
- struct list_head subrequests; /* Contributory I/O operations */
+#endif
struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */
#define NR_IO_STREAMS 2 //wreq->nr_io_streams
struct netfs_group *group; /* Writeback group being written back */
- struct iov_iter iter; /* Unencrypted-side iterator */
- struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */
+ struct rolling_buffer buffer; /* Unencrypted buffer */
+#define NETFS_ROLLBUF_PUT_MARK ROLLBUF_MARK_1
+#define NETFS_ROLLBUF_PAGECACHE_MARK ROLLBUF_MARK_2
+ wait_queue_head_t waitq; /* Processor waiter */
void *netfs_priv; /* Private data for the netfs */
void *netfs_priv2; /* Private data for the netfs */
struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */
- unsigned int direct_bv_count; /* Number of elements in direct_bv[] */
- unsigned int debug_id;
- unsigned int rsize; /* Maximum read size (0 for none) */
- unsigned int wsize; /* Maximum write size (0 for none) */
- atomic_t subreq_counter; /* Next subreq->debug_index */
- unsigned int nr_group_rel; /* Number of refs to release on ->group */
- spinlock_t lock; /* Lock for queuing subreqs */
- atomic_t nr_outstanding; /* Number of ops in progress */
- atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */
- size_t upper_len; /* Length can be extended to here */
unsigned long long submitted; /* Amount submitted for I/O so far */
unsigned long long len; /* Length of the request */
size_t transferred; /* Amount to be indicated as transferred */
- short error; /* 0 or error that occurred */
- enum netfs_io_origin origin; /* Origin of the request */
- bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
+ long error; /* 0 or error that occurred */
unsigned long long i_size; /* Size of the file */
unsigned long long start; /* Start position */
atomic64_t issued_to; /* Write issuer folio cursor */
- unsigned long long contiguity; /* Tracking for gaps in the writeback sequence */
unsigned long long collected_to; /* Point we've collected to */
unsigned long long cleaned_to; /* Position we've cleaned folios to */
+ unsigned long long abandon_to; /* Position to abandon folios to */
pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
+ unsigned int direct_bv_count; /* Number of elements in direct_bv[] */
+ unsigned int debug_id;
+ unsigned int rsize; /* Maximum read size (0 for none) */
+ unsigned int wsize; /* Maximum write size (0 for none) */
+ atomic_t subreq_counter; /* Next subreq->debug_index */
+ unsigned int nr_group_rel; /* Number of refs to release on ->group */
+ spinlock_t lock; /* Lock for queuing subreqs */
+ unsigned char front_folio_order; /* Order (size) of front folio */
+ enum netfs_io_origin origin; /* Origin of the request */
+ bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
refcount_t ref;
unsigned long flags;
-#define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */
-#define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */
-#define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */
-#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
-#define NETFS_RREQ_FAILED 4 /* The request failed */
-#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
-#define NETFS_RREQ_WRITE_TO_CACHE 7 /* Need to write to the cache */
-#define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */
-#define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */
-#define NETFS_RREQ_BLOCKED 10 /* We blocked */
-#define NETFS_RREQ_PAUSE 11 /* Pause subrequest generation */
+#define NETFS_RREQ_IN_PROGRESS 0 /* Unlocked when the request completes (has ref) */
+#define NETFS_RREQ_ALL_QUEUED 1 /* All subreqs are now queued */
+#define NETFS_RREQ_PAUSE 2 /* Pause subrequest generation */
+#define NETFS_RREQ_FAILED 3 /* The request failed */
+#define NETFS_RREQ_RETRYING 4 /* Set if we're in the retry path */
+#define NETFS_RREQ_SHORT_TRANSFER 5 /* Set if we have a short transfer */
+#define NETFS_RREQ_OFFLOAD_COLLECTION 8 /* Offload collection to workqueue */
+#define NETFS_RREQ_NO_UNLOCK_FOLIO 9 /* Don't unlock no_unlock_folio on completion */
+#define NETFS_RREQ_FOLIO_COPY_TO_CACHE 10 /* Copy current folio to cache from read */
+#define NETFS_RREQ_UPLOAD_TO_SERVER 11 /* Need to write to the server */
#define NETFS_RREQ_USE_IO_ITER 12 /* Use ->io_iter rather than ->i_pages */
-#define NETFS_RREQ_ALL_QUEUED 13 /* All subreqs are now queued */
#define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark
* write to cache on read */
const struct netfs_request_ops *netfs_ops;
- void (*cleanup)(struct netfs_io_request *req);
};
/*
@@ -293,7 +294,7 @@ struct netfs_request_ops {
/* Read request handling */
void (*expand_readahead)(struct netfs_io_request *rreq);
- bool (*clamp_length)(struct netfs_io_subrequest *subreq);
+ int (*prepare_read)(struct netfs_io_subrequest *subreq);
void (*issue_read)(struct netfs_io_subrequest *subreq);
bool (*is_still_valid)(struct netfs_io_request *rreq);
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
@@ -317,7 +318,6 @@ struct netfs_request_ops {
*/
enum netfs_read_from_hole {
NETFS_READ_HOLE_IGNORE,
- NETFS_READ_HOLE_CLEAR,
NETFS_READ_HOLE_FAIL,
};
@@ -404,6 +404,13 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
struct netfs_group *netfs_group);
ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from);
+/* Single, monolithic object read/write API. */
+void netfs_single_mark_inode_dirty(struct inode *inode);
+ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_iter *iter);
+int netfs_writeback_single(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct iov_iter *iter);
+
/* Address operations API */
struct readahead_control;
void netfs_readahead(struct readahead_control *);
@@ -423,20 +430,19 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp);
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
/* (Sub)request management API. */
-void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
+void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq);
+void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq);
void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
enum netfs_sreq_ref_trace what);
void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
- bool was_async, enum netfs_sreq_ref_trace what);
+ enum netfs_sreq_ref_trace what);
ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
struct iov_iter *new,
iov_iter_extraction_t extraction_flags);
size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
size_t max_size, size_t max_segs);
void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq);
-void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
- bool was_async);
-void netfs_queue_write_request(struct netfs_io_subrequest *subreq);
+void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error);
int netfs_start_io_read(struct inode *inode);
void netfs_end_io_read(struct inode *inode);
@@ -445,6 +451,18 @@ void netfs_end_io_write(struct inode *inode);
int netfs_start_io_direct(struct inode *inode);
void netfs_end_io_direct(struct inode *inode);
+/* Miscellaneous APIs. */
+struct folio_queue *netfs_folioq_alloc(unsigned int rreq_id, gfp_t gfp,
+ unsigned int trace /*enum netfs_folioq_trace*/);
+void netfs_folioq_free(struct folio_queue *folioq,
+ unsigned int trace /*enum netfs_trace_folioq*/);
+
+/* Buffer wrangling helpers API. */
+int netfs_alloc_folioq_buffer(struct address_space *mapping,
+ struct folio_queue **_buffer,
+ size_t *_cur_size, ssize_t size, gfp_t gfp);
+void netfs_free_folioq_buffer(struct folio_queue *fq);
+
/**
* netfs_inode - Get the netfs inode context from the inode
* @inode: The inode to query
@@ -474,6 +492,7 @@ static inline void netfs_inode_init(struct netfs_inode *ctx,
ctx->remote_i_size = i_size_read(&ctx->inode);
ctx->zero_point = LLONG_MAX;
ctx->flags = 0;
+ atomic_set(&ctx->io_count, 0);
#if IS_ENABLED(CONFIG_FSCACHE)
ctx->cache = NULL;
#endif
@@ -517,4 +536,20 @@ static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx)
#endif
}
+/**
+ * netfs_wait_for_outstanding_io - Wait for outstanding I/O to complete
+ * @inode: The netfs inode to wait on
+ *
+ * Wait for outstanding I/O requests of any type to complete. This is intended
+ * to be called from inode eviction routines. This makes sure that any
+ * resources held by those requests are cleaned up before we let the inode get
+ * cleaned up.
+ */
+static inline void netfs_wait_for_outstanding_io(struct inode *inode)
+{
+ struct netfs_inode *ictx = netfs_inode(inode);
+
+ wait_var_event(&ictx->io_count, atomic_read(&ictx->io_count) == 0);
+}
+
#endif /* _LINUX_NETFS_H */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 5df7340d4dab..882e9c1b6c1d 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -34,6 +34,7 @@ struct netlink_skb_parms {
#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds)
+#define NETLINK_CTX_SIZE 48
void netlink_table_grab(void);
@@ -47,7 +48,6 @@ struct netlink_kernel_cfg {
unsigned int groups;
unsigned int flags;
void (*input)(struct sk_buff *skb);
- struct mutex *cb_mutex;
int (*bind)(struct net *net, int group);
void (*unbind)(struct net *net, int group);
void (*release) (struct sock *sk, unsigned long *groups);
@@ -63,7 +63,7 @@ netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
}
/* this can be increased when necessary - don't expose to userland */
-#define NETLINK_MAX_COOKIE_LEN 20
+#define NETLINK_MAX_COOKIE_LEN 8
#define NETLINK_MAX_FMTMSG_LEN 80
/**
@@ -212,6 +212,7 @@ static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
{
if (!extack)
return;
+ BUILD_BUG_ON(sizeof(extack->cookie) < sizeof(cookie));
memcpy(extack->cookie, &cookie, sizeof(cookie));
extack->cookie_len = sizeof(cookie);
}
@@ -240,7 +241,7 @@ int netlink_register_notifier(struct notifier_block *nb);
int netlink_unregister_notifier(struct notifier_block *nb);
/* finegrained unicast helpers: */
-struct sock *netlink_getsockbyfilp(struct file *filp);
+struct sock *netlink_getsockbyfd(int fd);
int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
long *timeo, struct sock *ssk);
void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
@@ -294,7 +295,7 @@ struct netlink_callback {
int flags;
bool strict_check;
union {
- u8 ctx[48];
+ u8 ctx[NETLINK_CTX_SIZE];
/* args is deprecated. Cast a struct over ctx instead
* for proper type safety.
@@ -303,7 +304,7 @@ struct netlink_callback {
};
};
-#define NL_ASSERT_DUMP_CTX_FITS(type_name) \
+#define NL_ASSERT_CTX_FITS(type_name) \
BUILD_BUG_ON(sizeof(type_name) > \
sizeof_field(struct netlink_callback, ctx))
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index bd19c4b91e31..f22eec466040 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -15,25 +15,37 @@
#include <linux/refcount.h>
union inet_addr {
- __u32 all[4];
__be32 ip;
- __be32 ip6[4];
- struct in_addr in;
struct in6_addr in6;
};
struct netpoll {
struct net_device *dev;
netdevice_tracker dev_tracker;
+ /*
+ * Either dev_name or dev_mac can be used to specify the local
+ * interface - dev_name is used if it is a nonempty string, else
+ * dev_mac is used.
+ */
char dev_name[IFNAMSIZ];
+ u8 dev_mac[ETH_ALEN];
const char *name;
union inet_addr local_ip, remote_ip;
bool ipv6;
u16 local_port, remote_port;
u8 remote_mac[ETH_ALEN];
+ struct sk_buff_head skb_pool;
+ struct work_struct refill_wq;
};
+#define np_info(np, fmt, ...) \
+ pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
+#define np_err(np, fmt, ...) \
+ pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
+#define np_notice(np, fmt, ...) \
+ pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
+
struct netpoll_info {
refcount_t refcnt;
@@ -43,7 +55,6 @@ struct netpoll_info {
struct delayed_work tx_work;
- struct netpoll *netpoll;
struct rcu_head rcu;
};
@@ -56,14 +67,12 @@ static inline void netpoll_poll_disable(struct net_device *dev) { return; }
static inline void netpoll_poll_enable(struct net_device *dev) { return; }
#endif
-void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
-void netpoll_print_options(struct netpoll *np);
-int netpoll_parse_options(struct netpoll *np, char *opt);
+int netpoll_send_udp(struct netpoll *np, const char *msg, int len);
int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
int netpoll_setup(struct netpoll *np);
-void __netpoll_cleanup(struct netpoll *np);
void __netpoll_free(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
+void do_netpoll_cleanup(struct netpoll *np);
netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
#ifdef CONFIG_NETPOLL
@@ -71,7 +80,7 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
{
struct net_device *dev = napi->dev;
- if (dev && dev->npinfo) {
+ if (dev && rcu_access_pointer(dev->npinfo)) {
int owner = smp_processor_id();
while (cmpxchg(&napi->poll_owner, -1, owner) != -1)
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index ceb70a926b95..0906a0b40c6a 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -8,11 +8,20 @@
#ifndef _LINUX_NFS_H
#define _LINUX_NFS_H
+#include <linux/cred.h>
+#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/string.h>
#include <linux/crc32.h>
#include <uapi/linux/nfs.h>
+/* The LOCALIO program is entirely private to Linux and is
+ * NOT part of the uapi.
+ */
+#define NFS_LOCALIO_PROGRAM 400122
+#define LOCALIOPROC_NULL 0
+#define LOCALIOPROC_UUID_IS_LOCAL 1
+
/*
* This is the kernel NFS client file handle representation
*/
@@ -46,7 +55,6 @@ enum nfs3_stable_how {
NFS_INVALID_STABLE_HOW = -1
};
-#ifdef CONFIG_CRC32
/**
* nfs_fhandle_hash - calculate the crc32 hash for the filehandle
* @fh - pointer to filehandle
@@ -58,10 +66,4 @@ static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
{
return ~crc32_le(0xFFFFFFFF, &fh->data[0], fh->size);
}
-#else /* CONFIG_CRC32 */
-static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
-{
- return 0;
-}
-#endif /* CONFIG_CRC32 */
#endif /* _LINUX_NFS_H */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 0d896ce296ce..e947af6a3684 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -17,6 +17,7 @@
#include <linux/uidgid.h>
#include <uapi/linux/nfs4.h>
#include <linux/sunrpc/msg_prot.h>
+#include <linux/sunrpc/xdrgen/nfs4_1.h>
enum nfs4_acl_whotype {
NFS4_ACL_WHO_NAMED = 0,
@@ -46,6 +47,7 @@ struct nfs4_acl {
struct nfs4_label {
uint32_t lfs;
uint32_t pi;
+ u32 lsmid;
u32 len;
char *label;
};
@@ -70,6 +72,7 @@ struct nfs4_stateid_struct {
NFS4_LAYOUT_STATEID_TYPE,
NFS4_PNFS_DS_STATEID_TYPE,
NFS4_REVOKED_STATEID_TYPE,
+ NFS4_FREED_STATEID_TYPE,
} type;
};
@@ -281,20 +284,24 @@ enum nfsstat4 {
/* nfs42 */
NFS4ERR_PARTNER_NOTSUPP = 10088,
NFS4ERR_PARTNER_NO_AUTH = 10089,
- NFS4ERR_UNION_NOTSUPP = 10090,
- NFS4ERR_OFFLOAD_DENIED = 10091,
- NFS4ERR_WRONG_LFS = 10092,
- NFS4ERR_BADLABEL = 10093,
- NFS4ERR_OFFLOAD_NO_REQS = 10094,
+ NFS4ERR_UNION_NOTSUPP = 10090,
+ NFS4ERR_OFFLOAD_DENIED = 10091,
+ NFS4ERR_WRONG_LFS = 10092,
+ NFS4ERR_BADLABEL = 10093,
+ NFS4ERR_OFFLOAD_NO_REQS = 10094,
/* xattr (RFC8276) */
- NFS4ERR_NOXATTR = 10095,
- NFS4ERR_XATTR2BIG = 10096,
+ NFS4ERR_NOXATTR = 10095,
+ NFS4ERR_XATTR2BIG = 10096,
+
+ /* can be used for internal errors */
+ NFS4ERR_FIRST_FREE
};
/* error codes for internal client use */
#define NFS4ERR_RESET_TO_MDS 12001
#define NFS4ERR_RESET_TO_PNFS 12002
+#define NFS4ERR_FATAL_IOERROR 12003
static inline bool seqid_mutating_err(u32 err)
{
@@ -362,11 +369,13 @@ enum limit_by4 {
NFS4_LIMIT_BLOCKS = 2
};
-enum open_delegation_type4 {
+enum nfs4_open_delegation_type4 {
NFS4_OPEN_DELEGATE_NONE = 0,
NFS4_OPEN_DELEGATE_READ = 1,
NFS4_OPEN_DELEGATE_WRITE = 2,
NFS4_OPEN_DELEGATE_NONE_EXT = 3, /* 4.1 */
+ NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG = 4,
+ NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG = 5,
};
enum why_no_delegation4 { /* new to v4.1 */
@@ -586,6 +595,9 @@ enum {
#define FATTR4_WORD2_SECURITY_LABEL BIT(FATTR4_SEC_LABEL - 64)
#define FATTR4_WORD2_MODE_UMASK BIT(FATTR4_MODE_UMASK - 64)
#define FATTR4_WORD2_XATTR_SUPPORT BIT(FATTR4_XATTR_SUPPORT - 64)
+#define FATTR4_WORD2_TIME_DELEG_ACCESS BIT(FATTR4_TIME_DELEG_ACCESS - 64)
+#define FATTR4_WORD2_TIME_DELEG_MODIFY BIT(FATTR4_TIME_DELEG_MODIFY - 64)
+#define FATTR4_WORD2_OPEN_ARGUMENTS BIT(FATTR4_OPEN_ARGUMENTS - 64)
/* MDS threshold bitmap bits */
#define THRESHOLD_RD (1UL << 0)
@@ -667,6 +679,7 @@ enum {
NFSPROC4_CLNT_SEEK,
NFSPROC4_CLNT_ALLOCATE,
NFSPROC4_CLNT_DEALLOCATE,
+ NFSPROC4_CLNT_ZERO_RANGE,
NFSPROC4_CLNT_LAYOUTSTATS,
NFSPROC4_CLNT_CLONE,
NFSPROC4_CLNT_COPY,
@@ -681,6 +694,7 @@ enum {
NFSPROC4_CLNT_LISTXATTRS,
NFSPROC4_CLNT_REMOVEXATTR,
NFSPROC4_CLNT_READ_PLUS,
+ NFSPROC4_CLNT_OFFLOAD_STATUS,
};
/* nfs41 types */
diff --git a/include/linux/nfs_common.h b/include/linux/nfs_common.h
new file mode 100644
index 000000000000..a541c3a02887
--- /dev/null
+++ b/include/linux/nfs_common.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file contains constants and methods used by both NFS client and server.
+ */
+#ifndef _LINUX_NFS_COMMON_H
+#define _LINUX_NFS_COMMON_H
+
+#include <linux/errno.h>
+#include <uapi/linux/nfs.h>
+
+/* Mapping from NFS error code to "errno" error code. */
+
+int nfs_stat_to_errno(enum nfs_stat status);
+int nfs4_stat_to_errno(int stat);
+
+__u32 nfs_localio_errno_to_nfs4_stat(int errno);
+
+#endif /* _LINUX_NFS_COMMON_H */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 039898d70954..c585939b6cd6 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -77,6 +77,23 @@ struct nfs_lock_context {
struct rcu_head rcu_head;
};
+struct nfs_file_localio {
+ struct nfsd_file __rcu *ro_file;
+ struct nfsd_file __rcu *rw_file;
+ struct list_head list;
+ void __rcu *nfs_uuid; /* opaque pointer to 'nfs_uuid_t' */
+};
+
+static inline void nfs_localio_file_init(struct nfs_file_localio *nfl)
+{
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ nfl->ro_file = NULL;
+ nfl->rw_file = NULL;
+ INIT_LIST_HEAD(&nfl->list);
+ nfl->nfs_uuid = NULL;
+#endif
+}
+
struct nfs4_state;
struct nfs_open_context {
struct nfs_lock_context lock_context;
@@ -87,15 +104,16 @@ struct nfs_open_context {
struct nfs4_state *state;
fmode_t mode;
+ int error;
unsigned long flags;
#define NFS_CONTEXT_BAD (2)
#define NFS_CONTEXT_UNLOCK (3)
#define NFS_CONTEXT_FILE_OPEN (4)
- int error;
- struct list_head list;
struct nfs4_threshold *mdsthreshold;
+ struct list_head list;
struct rcu_head rcu_head;
+ struct nfs_file_localio nfl;
};
struct nfs_open_dir_context {
@@ -143,6 +161,12 @@ struct nfs_inode {
unsigned long cache_validity; /* bit mask */
/*
+ * NFS Attributes not included in struct inode
+ */
+
+ struct timespec64 btime;
+
+ /*
* read_cache_jiffies is when we started read-caching this inode.
* attrtimeo is for how long the cached information is assumed
* to be valid. A successful attribute revalidation doubles
@@ -298,10 +322,12 @@ struct nfs4_copy_state {
#define NFS_INO_INVALID_XATTR BIT(15) /* xattrs are invalid */
#define NFS_INO_INVALID_NLINK BIT(16) /* cached nlinks is invalid */
#define NFS_INO_INVALID_MODE BIT(17) /* cached mode is invalid */
+#define NFS_INO_INVALID_BTIME BIT(18) /* cached btime is invalid */
#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \
| NFS_INO_INVALID_CTIME \
| NFS_INO_INVALID_MTIME \
+ | NFS_INO_INVALID_BTIME \
| NFS_INO_INVALID_SIZE \
| NFS_INO_INVALID_NLINK \
| NFS_INO_INVALID_MODE \
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 92de074e63b9..d30c0245031c 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -8,6 +8,7 @@
#include <linux/wait.h>
#include <linux/nfs_xdr.h>
#include <linux/sunrpc/xprt.h>
+#include <linux/nfslocalio.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
@@ -49,6 +50,7 @@ struct nfs_client {
#define NFS_CS_DS 7 /* - Server is a DS */
#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
#define NFS_CS_PNFS 9 /* - Server used for pnfs */
+#define NFS_CS_NETUNREACH_FATAL 10 /* - ENETUNREACH errors are fatal */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -123,8 +125,16 @@ struct nfs_client {
*/
char cl_ipaddr[48];
struct net *cl_net;
+ netns_tracker cl_ns_tracker;
struct list_head pending_cb_stateids;
struct rcu_head rcu;
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ struct timespec64 cl_nfssvc_boot;
+ seqlock_t cl_boot_lock;
+ nfs_uuid_t cl_uuid;
+ struct work_struct cl_local_probe_work;
+#endif /* CONFIG_NFS_LOCALIO */
};
/*
@@ -140,6 +150,7 @@ struct nfs_server {
struct rpc_clnt * client_acl; /* ACL RPC client handle */
struct nlm_host *nlm_host; /* NLM client handle */
struct nfs_iostats __percpu *io_stats; /* I/O statistics */
+ wait_queue_head_t write_congestion_wait; /* wait until write congestion eases */
atomic_long_t writeback; /* number of writeback pages */
unsigned int write_congested;/* flag set when writeback gets too high */
unsigned int flags; /* various flags */
@@ -157,13 +168,15 @@ struct nfs_server {
#define NFS_MOUNT_WRITE_WAIT 0x02000000
#define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000
#define NFS_MOUNT_SHUTDOWN 0x08000000
+#define NFS_MOUNT_NO_ALIGNWRITE 0x10000000
+#define NFS_MOUNT_FORCE_RDIRPLUS 0x20000000
+#define NFS_MOUNT_NETUNREACH_FATAL 0x40000000
- unsigned int fattr_valid; /* Valid attributes */
unsigned int caps; /* server capabilities */
+ __u64 fattr_valid; /* Valid attributes */
unsigned int rsize; /* read size */
unsigned int rpages; /* read size (in pages) */
unsigned int wsize; /* write size */
- unsigned int wpages; /* write size (in pages) */
unsigned int wtmult; /* server disk block size */
unsigned int dtsize; /* readdir size */
unsigned short port; /* "port=" setting */
@@ -189,7 +202,6 @@ struct nfs_server {
struct nfs_fsid fsid;
int s_sysfs_id; /* sysfs dentry index */
__u64 maxfilesize; /* maximum file size */
- struct timespec64 time_delta; /* smallest time granularity */
unsigned long mount_time; /* when this fs was mounted */
struct super_block *super; /* VFS super block */
dev_t s_dev; /* superblock dev numbers */
@@ -200,6 +212,15 @@ struct nfs_server {
char *fscache_uniq; /* Uniquifier (or NULL) */
#endif
+ /* The following #defines numerically match the NFSv4 equivalents */
+#define NFS_FH_NOEXPIRE_WITH_OPEN (0x1)
+#define NFS_FH_VOLATILE_ANY (0x2)
+#define NFS_FH_VOL_MIGRATION (0x4)
+#define NFS_FH_VOL_RENAME (0x8)
+#define NFS_FH_RENAME_UNSAFE (NFS_FH_VOLATILE_ANY | NFS_FH_VOL_RENAME)
+ u32 fh_expire_type; /* V4 bitmask representing file
+ handle volatility type for
+ this filesystem */
u32 pnfs_blksize; /* layout_blksize attr */
#if IS_ENABLED(CONFIG_NFS_V4)
u32 attr_bitmask[3];/* V4 bitmask representing the set
@@ -223,23 +244,26 @@ struct nfs_server {
u32 acl_bitmask; /* V4 bitmask representing the ACEs
that are supported on this
filesystem */
- u32 fh_expire_type; /* V4 bitmask representing file
- handle volatility type for
- this filesystem */
struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */
struct rpc_wait_queue roc_rpcwaitq;
- void *pnfs_ld_data; /* per mount point data */
/* the following fields are protected by nfs_client->cl_lock */
struct rb_root state_owners;
#endif
- struct ida openowner_id;
- struct ida lockowner_id;
+ atomic64_t owner_ctr;
struct list_head state_owners_lru;
struct list_head layouts;
struct list_head delegations;
+ atomic_long_t nr_active_delegations;
+ unsigned int delegation_hash_mask;
+ struct hlist_head *delegation_hash_table;
struct list_head ss_copies;
+ struct list_head ss_src_copies;
+ unsigned long delegation_flags;
+#define NFS4SERV_DELEGRETURN (1)
+#define NFS4SERV_DELEGATION_EXPIRED (2)
+#define NFS4SERV_DELEGRETURN_DELAYED (3)
unsigned long delegation_gen;
unsigned long mig_gen;
unsigned long mig_status;
@@ -278,6 +302,11 @@ struct nfs_server {
#define NFS_CAP_LGOPEN (1U << 5)
#define NFS_CAP_CASE_INSENSITIVE (1U << 6)
#define NFS_CAP_CASE_PRESERVING (1U << 7)
+#define NFS_CAP_REBOOT_LAYOUTRETURN (1U << 8)
+#define NFS_CAP_OFFLOAD_STATUS (1U << 9)
+#define NFS_CAP_ZERO_RANGE (1U << 10)
+#define NFS_CAP_OPEN_XOR (1U << 12)
+#define NFS_CAP_DELEGTIME (1U << 13)
#define NFS_CAP_POSIX_LOCK (1U << 14)
#define NFS_CAP_UIDGID_NOMAP (1U << 15)
#define NFS_CAP_STATEID_NFSV41 (1U << 16)
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 1c315f854ea8..afe1d8f09d89 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -122,8 +122,6 @@ struct nfs_pageio_descriptor {
/* arbitrarily selected limit to number of mirrors */
#define NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX 16
-#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
-
extern struct nfs_page *nfs_page_create_from_page(struct nfs_open_context *ctx,
struct page *page,
unsigned int pgbase,
@@ -152,17 +150,15 @@ extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t);
extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
struct nfs_page *prev,
struct nfs_page *req);
-extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
extern void nfs_unlock_and_release_request(struct nfs_page *);
-extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
-extern int nfs_page_group_lock_subrequests(struct nfs_page *head);
extern void nfs_join_page_group(struct nfs_page *head,
struct nfs_commit_info *cinfo,
struct inode *inode);
extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
+extern bool nfs_page_group_sync_on_bit_locked(struct nfs_page *, unsigned int);
extern int nfs_page_set_headlock(struct nfs_page *req);
extern void nfs_page_clear_headlock(struct nfs_page *req);
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
@@ -208,8 +204,8 @@ static inline struct inode *nfs_page_to_inode(const struct nfs_page *req)
struct folio *folio = nfs_page_to_folio(req);
if (folio == NULL)
- return page_file_mapping(req->wb_page)->host;
- return folio_file_mapping(folio)->host;
+ return req->wb_page->mapping->host;
+ return folio->mapping->host;
}
/**
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index d09b9773b20c..31463286402f 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -45,7 +45,7 @@ struct nfs4_threshold {
};
struct nfs_fattr {
- unsigned int valid; /* which fields are valid */
+ __u64 valid; /* which fields are valid */
umode_t mode;
__u32 nlink;
kuid_t uid;
@@ -67,6 +67,7 @@ struct nfs_fattr {
struct timespec64 atime;
struct timespec64 mtime;
struct timespec64 ctime;
+ struct timespec64 btime;
__u64 change_attr; /* NFSv4 change attribute */
__u64 pre_change_attr;/* pre-op NFSv4 change attribute */
__u64 pre_size; /* pre_op_attr.size */
@@ -80,32 +81,33 @@ struct nfs_fattr {
struct nfs4_label *label;
};
-#define NFS_ATTR_FATTR_TYPE (1U << 0)
-#define NFS_ATTR_FATTR_MODE (1U << 1)
-#define NFS_ATTR_FATTR_NLINK (1U << 2)
-#define NFS_ATTR_FATTR_OWNER (1U << 3)
-#define NFS_ATTR_FATTR_GROUP (1U << 4)
-#define NFS_ATTR_FATTR_RDEV (1U << 5)
-#define NFS_ATTR_FATTR_SIZE (1U << 6)
-#define NFS_ATTR_FATTR_PRESIZE (1U << 7)
-#define NFS_ATTR_FATTR_BLOCKS_USED (1U << 8)
-#define NFS_ATTR_FATTR_SPACE_USED (1U << 9)
-#define NFS_ATTR_FATTR_FSID (1U << 10)
-#define NFS_ATTR_FATTR_FILEID (1U << 11)
-#define NFS_ATTR_FATTR_ATIME (1U << 12)
-#define NFS_ATTR_FATTR_MTIME (1U << 13)
-#define NFS_ATTR_FATTR_CTIME (1U << 14)
-#define NFS_ATTR_FATTR_PREMTIME (1U << 15)
-#define NFS_ATTR_FATTR_PRECTIME (1U << 16)
-#define NFS_ATTR_FATTR_CHANGE (1U << 17)
-#define NFS_ATTR_FATTR_PRECHANGE (1U << 18)
-#define NFS_ATTR_FATTR_V4_LOCATIONS (1U << 19)
-#define NFS_ATTR_FATTR_V4_REFERRAL (1U << 20)
-#define NFS_ATTR_FATTR_MOUNTPOINT (1U << 21)
-#define NFS_ATTR_FATTR_MOUNTED_ON_FILEID (1U << 22)
-#define NFS_ATTR_FATTR_OWNER_NAME (1U << 23)
-#define NFS_ATTR_FATTR_GROUP_NAME (1U << 24)
-#define NFS_ATTR_FATTR_V4_SECURITY_LABEL (1U << 25)
+#define NFS_ATTR_FATTR_TYPE BIT_ULL(0)
+#define NFS_ATTR_FATTR_MODE BIT_ULL(1)
+#define NFS_ATTR_FATTR_NLINK BIT_ULL(2)
+#define NFS_ATTR_FATTR_OWNER BIT_ULL(3)
+#define NFS_ATTR_FATTR_GROUP BIT_ULL(4)
+#define NFS_ATTR_FATTR_RDEV BIT_ULL(5)
+#define NFS_ATTR_FATTR_SIZE BIT_ULL(6)
+#define NFS_ATTR_FATTR_PRESIZE BIT_ULL(7)
+#define NFS_ATTR_FATTR_BLOCKS_USED BIT_ULL(8)
+#define NFS_ATTR_FATTR_SPACE_USED BIT_ULL(9)
+#define NFS_ATTR_FATTR_FSID BIT_ULL(10)
+#define NFS_ATTR_FATTR_FILEID BIT_ULL(11)
+#define NFS_ATTR_FATTR_ATIME BIT_ULL(12)
+#define NFS_ATTR_FATTR_MTIME BIT_ULL(13)
+#define NFS_ATTR_FATTR_CTIME BIT_ULL(14)
+#define NFS_ATTR_FATTR_PREMTIME BIT_ULL(15)
+#define NFS_ATTR_FATTR_PRECTIME BIT_ULL(16)
+#define NFS_ATTR_FATTR_CHANGE BIT_ULL(17)
+#define NFS_ATTR_FATTR_PRECHANGE BIT_ULL(18)
+#define NFS_ATTR_FATTR_V4_LOCATIONS BIT_ULL(19)
+#define NFS_ATTR_FATTR_V4_REFERRAL BIT_ULL(20)
+#define NFS_ATTR_FATTR_MOUNTPOINT BIT_ULL(21)
+#define NFS_ATTR_FATTR_MOUNTED_ON_FILEID BIT_ULL(22)
+#define NFS_ATTR_FATTR_OWNER_NAME BIT_ULL(23)
+#define NFS_ATTR_FATTR_GROUP_NAME BIT_ULL(24)
+#define NFS_ATTR_FATTR_V4_SECURITY_LABEL BIT_ULL(25)
+#define NFS_ATTR_FATTR_BTIME BIT_ULL(26)
#define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \
| NFS_ATTR_FATTR_MODE \
@@ -126,6 +128,7 @@ struct nfs_fattr {
| NFS_ATTR_FATTR_SPACE_USED)
#define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \
| NFS_ATTR_FATTR_SPACE_USED \
+ | NFS_ATTR_FATTR_BTIME \
| NFS_ATTR_FATTR_V4_SECURITY_LABEL)
/*
@@ -446,7 +449,23 @@ struct nfs42_clone_res {
struct stateowner_id {
__u64 create_time;
- __u32 uniquifier;
+ __u64 uniquifier;
+};
+
+struct nfs4_open_delegation {
+ __u32 open_delegation_type;
+ union {
+ struct {
+ fmode_t type;
+ __u32 do_recall;
+ nfs4_stateid stateid;
+ unsigned long pagemod_limit;
+ };
+ struct {
+ __u32 why_no_delegation;
+ __u32 will_notify;
+ };
+ };
};
/*
@@ -468,7 +487,7 @@ struct nfs_openargs {
nfs4_verifier verifier; /* EXCLUSIVE */
};
nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */
- fmode_t delegation_type; /* CLAIM_PREVIOUS */
+ __u32 delegation_type; /* CLAIM_PREVIOUS */
} u;
const struct qstr * name;
const struct nfs_server *server; /* Needed for ID mapping */
@@ -490,13 +509,10 @@ struct nfs_openres {
struct nfs_fattr * f_attr;
struct nfs_seqid * seqid;
const struct nfs_server *server;
- fmode_t delegation_type;
- nfs4_stateid delegation;
- unsigned long pagemod_limit;
- __u32 do_recall;
__u32 attrset[NFS4_BITMAP_SIZE];
struct nfs4_string *owner;
struct nfs4_string *group_owner;
+ struct nfs4_open_delegation delegation;
__u32 access_request;
__u32 access_supported;
__u32 access_result;
@@ -609,6 +625,13 @@ struct nfs_release_lockowner_res {
struct nfs4_sequence_res seq_res;
};
+struct nfs4_delegattr {
+ struct timespec64 atime;
+ struct timespec64 mtime;
+ bool atime_set;
+ bool mtime_set;
+};
+
struct nfs4_delegreturnargs {
struct nfs4_sequence_args seq_args;
const struct nfs_fh *fhandle;
@@ -616,6 +639,7 @@ struct nfs4_delegreturnargs {
const u32 *bitmask;
u32 bitmask_store[NFS_BITMASK_SZ];
struct nfs4_layoutreturn_args *lr_args;
+ struct nfs4_delegattr *sattr_args;
};
struct nfs4_delegreturnres {
@@ -624,6 +648,8 @@ struct nfs4_delegreturnres {
struct nfs_server *server;
struct nfs4_layoutreturn_res *lr_res;
int lr_ret;
+ bool sattr_res;
+ int sattr_ret;
};
/*
@@ -836,7 +862,7 @@ struct nfs_getaclres {
size_t acl_len;
size_t acl_data_offset;
int acl_flags;
- struct page * acl_scratch;
+ struct folio * acl_scratch;
};
struct nfs_setattrres {
@@ -1190,6 +1216,14 @@ struct nfs4_statfs_res {
struct nfs_fsstat *fsstat;
};
+struct nfs4_open_caps {
+ u32 oa_share_access[1];
+ u32 oa_share_deny[1];
+ u32 oa_share_access_want[1];
+ u32 oa_open_claim[1];
+ u32 oa_createmode[1];
+};
+
struct nfs4_server_caps_arg {
struct nfs4_sequence_args seq_args;
struct nfs_fh *fhandle;
@@ -1206,6 +1240,7 @@ struct nfs4_server_caps_res {
u32 fh_expire_type;
u32 case_insensitive;
u32 case_preserving;
+ struct nfs4_open_caps open_caps;
};
#define NFS4_PATHNAME_MAXCOMPONENTS 512
@@ -1283,11 +1318,6 @@ struct nfs4_fsid_present_res {
#endif /* CONFIG_NFS_V4 */
-struct nfstime4 {
- u64 seconds;
- u32 nseconds;
-};
-
#ifdef CONFIG_NFS_V4_1
struct pnfs_commit_bucket {
@@ -1304,7 +1334,7 @@ struct pnfs_commit_array {
struct rcu_head rcu;
refcount_t refcount;
unsigned int nbuckets;
- struct pnfs_commit_bucket buckets[];
+ struct pnfs_commit_bucket buckets[] __counted_by(nbuckets);
};
struct pnfs_ds_commit_info {
@@ -1406,7 +1436,7 @@ struct nfs41_secinfo_no_name_args {
struct nfs41_test_stateid_args {
struct nfs4_sequence_args seq_args;
- nfs4_stateid *stateid;
+ nfs4_stateid stateid;
};
struct nfs41_test_stateid_res {
@@ -1488,8 +1518,9 @@ struct nfs42_offload_status_args {
struct nfs42_offload_status_res {
struct nfs4_sequence_res osr_seq_res;
- uint64_t osr_count;
- int osr_status;
+ u64 osr_count;
+ int complete_count;
+ u32 osr_complete;
};
struct nfs42_copy_notify_args {
@@ -1565,7 +1596,7 @@ struct nfs42_listxattrsargs {
struct nfs42_listxattrsres {
struct nfs4_sequence_res seq_res;
- struct page *scratch;
+ struct folio *scratch;
void *xattr_buf;
size_t xattr_len;
u64 cookie;
@@ -1605,6 +1636,7 @@ enum {
NFS_IOHDR_RESEND_PNFS,
NFS_IOHDR_RESEND_MDS,
NFS_IOHDR_UNSTABLE_WRITES,
+ NFS_IOHDR_ODIRECT,
};
struct nfs_io_completion;
@@ -1627,6 +1659,7 @@ struct nfs_pgio_header {
void *netfs;
#endif
+ unsigned short retrans;
int pnfs_error;
int error; /* merge with pnfs_error */
unsigned int good_bytes; /* boundary of good data */
@@ -1753,7 +1786,7 @@ struct nfs_rpc_ops {
struct nfs_fattr *, struct inode *);
int (*setattr) (struct dentry *, struct nfs_fattr *,
struct iattr *);
- int (*lookup) (struct inode *, struct dentry *,
+ int (*lookup) (struct inode *, struct dentry *, const struct qstr *,
struct nfs_fh *, struct nfs_fattr *);
int (*lookupp) (struct inode *, struct nfs_fh *,
struct nfs_fattr *);
@@ -1774,7 +1807,7 @@ struct nfs_rpc_ops {
int (*link) (struct inode *, struct inode *, const struct qstr *);
int (*symlink) (struct inode *, struct dentry *, struct folio *,
unsigned int, struct iattr *);
- int (*mkdir) (struct inode *, struct dentry *, struct iattr *);
+ struct dentry *(*mkdir) (struct inode *, struct dentry *, struct iattr *);
int (*rmdir) (struct inode *, const struct qstr *);
int (*readdir) (struct nfs_readdir_arg *, struct nfs_readdir_res *);
int (*mknod) (struct inode *, struct dentry *, struct iattr *,
@@ -1807,7 +1840,8 @@ struct nfs_rpc_ops {
int open_flags,
struct iattr *iattr,
int *);
- int (*have_delegation)(struct inode *, fmode_t);
+ int (*have_delegation)(struct inode *, fmode_t, int);
+ int (*return_delegation)(struct inode *);
struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
struct nfs_client *(*init_client) (struct nfs_client *,
const struct nfs_client_initdata *);
@@ -1821,6 +1855,24 @@ struct nfs_rpc_ops {
};
/*
+ * Helper functions used by NFS client and/or server
+ */
+static inline void encode_opaque_fixed(struct xdr_stream *xdr,
+ const void *buf, size_t len)
+{
+ WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
+}
+
+static inline int decode_opaque_fixed(struct xdr_stream *xdr,
+ void *buf, size_t len)
+{
+ ssize_t ret = xdr_stream_decode_opaque_fixed(xdr, buf, len);
+ if (unlikely(ret < 0))
+ return -EIO;
+ return 0;
+}
+
+/*
* Function vectors etc. for the NFS client
*/
extern const struct nfs_rpc_ops nfs_v2_clientops;
@@ -1833,4 +1885,4 @@ extern const struct rpc_version nfs_version4;
extern const struct rpc_version nfsacl_version3;
extern const struct rpc_program nfsacl_program;
-#endif
+#endif /* _LINUX_NFS_XDR_H */
diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h
new file mode 100644
index 000000000000..3d91043254e6
--- /dev/null
+++ b/include/linux/nfslocalio.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+#ifndef __LINUX_NFSLOCALIO_H
+#define __LINUX_NFSLOCALIO_H
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/uuid.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svcauth.h>
+#include <linux/nfs.h>
+#include <net/net_namespace.h>
+
+struct nfs_client;
+struct nfs_file_localio;
+
+/*
+ * Useful to allow a client to negotiate if localio
+ * possible with its server.
+ *
+ * See Documentation/filesystems/nfs/localio.rst for more detail.
+ */
+typedef struct {
+ uuid_t uuid;
+ unsigned nfs3_localio_probe_count;
+ /* this struct is over a cacheline, avoid bouncing */
+ spinlock_t ____cacheline_aligned lock;
+ struct list_head list;
+ spinlock_t *list_lock; /* nn->local_clients_lock */
+ struct net __rcu *net; /* nfsd's network namespace */
+ struct auth_domain *dom; /* auth_domain for localio */
+ /* Local files to close when net is shut down or exports change */
+ struct list_head files;
+} nfs_uuid_t;
+
+void nfs_uuid_init(nfs_uuid_t *);
+bool nfs_uuid_begin(nfs_uuid_t *);
+void nfs_uuid_end(nfs_uuid_t *);
+void nfs_uuid_is_local(const uuid_t *, struct list_head *, spinlock_t *,
+ struct net *, struct auth_domain *, struct module *);
+
+void nfs_localio_enable_client(struct nfs_client *clp);
+void nfs_localio_disable_client(struct nfs_client *clp);
+void nfs_localio_invalidate_clients(struct list_head *nn_local_clients,
+ spinlock_t *nn_local_clients_lock);
+
+/* localio needs to map filehandle -> struct nfsd_file */
+void nfs_close_local_fh(struct nfs_file_localio *);
+
+struct nfsd_localio_operations {
+ bool (*nfsd_net_try_get)(struct net *);
+ void (*nfsd_net_put)(struct net *);
+ struct nfsd_file *(*nfsd_open_local_fh)(struct net *,
+ struct auth_domain *,
+ struct rpc_clnt *,
+ const struct cred *,
+ const struct nfs_fh *,
+ struct nfsd_file __rcu **pnf,
+ const fmode_t);
+ struct net *(*nfsd_file_put_local)(struct nfsd_file __rcu **);
+ struct file *(*nfsd_file_file)(struct nfsd_file *);
+ void (*nfsd_file_dio_alignment)(struct nfsd_file *,
+ u32 *, u32 *, u32 *);
+} ____cacheline_aligned;
+
+extern void nfsd_localio_ops_init(void);
+extern const struct nfsd_localio_operations *nfs_to;
+
+struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *,
+ struct rpc_clnt *, const struct cred *,
+ const struct nfs_fh *, struct nfs_file_localio *,
+ struct nfsd_file __rcu **pnf,
+ const fmode_t);
+
+static inline void nfs_to_nfsd_net_put(struct net *net)
+{
+ /*
+ * Once reference to net (and associated nfsd_serv) is dropped, NFSD
+ * could be unloaded, so ensure safe return from nfsd_net_put() by
+ * always taking RCU.
+ */
+ rcu_read_lock();
+ nfs_to->nfsd_net_put(net);
+ rcu_read_unlock();
+}
+
+static inline void nfs_to_nfsd_file_put_local(struct nfsd_file __rcu **localio)
+{
+ /*
+ * Either *localio must be guaranteed to be non-NULL, or caller
+ * must prevent nfsd shutdown from completing as nfs_close_local_fh()
+ * does by blocking the nfs_uuid from being finally put.
+ */
+ struct net *net;
+
+ net = nfs_to->nfsd_file_put_local(localio);
+
+ if (net)
+ nfs_to_nfsd_net_put(net);
+}
+
+#else /* CONFIG_NFS_LOCALIO */
+
+struct nfs_file_localio;
+static inline void nfs_close_local_fh(struct nfs_file_localio *nfl)
+{
+}
+static inline void nfsd_localio_ops_init(void)
+{
+}
+struct nfs_client;
+static inline void nfs_localio_disable_client(struct nfs_client *clp)
+{
+}
+
+#endif /* CONFIG_NFS_LOCALIO */
+
+#endif /* __LINUX_NFSLOCALIO_H */
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index a8dfb38c9bb6..cf3c6ab408aa 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -17,7 +17,6 @@
void lockup_detector_init(void);
void lockup_detector_retry_init(void);
void lockup_detector_soft_poweroff(void);
-void lockup_detector_cleanup(void);
extern int watchdog_user_enabled;
extern int watchdog_thresh;
@@ -37,7 +36,6 @@ extern int sysctl_hardlockup_all_cpu_backtrace;
static inline void lockup_detector_init(void) { }
static inline void lockup_detector_retry_init(void) { }
static inline void lockup_detector_soft_poweroff(void) { }
-static inline void lockup_detector_cleanup(void) { }
#endif /* !CONFIG_LOCKUP_DETECTOR */
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
@@ -104,13 +102,13 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs);
#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
extern void hardlockup_detector_perf_stop(void);
extern void hardlockup_detector_perf_restart(void);
-extern void hardlockup_detector_perf_cleanup(void);
extern void hardlockup_config_perf_event(const char *str);
+extern void hardlockup_detector_perf_adjust_period(u64 period);
#else
static inline void hardlockup_detector_perf_stop(void) { }
static inline void hardlockup_detector_perf_restart(void) { }
-static inline void hardlockup_detector_perf_cleanup(void) { }
static inline void hardlockup_config_perf_event(const char *str) { }
+static inline void hardlockup_detector_perf_adjust_period(u64 period) { }
#endif
void watchdog_hardlockup_stop(void);
diff --git a/include/linux/node.h b/include/linux/node.h
index dfc004e4bee7..0269b064ba65 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -16,7 +16,6 @@
#define _LINUX_NODE_H_
#include <linux/device.h>
-#include <linux/cpumask.h>
#include <linux/list.h>
/**
@@ -58,6 +57,11 @@ enum cache_write_policy {
NODE_CACHE_WRITE_OTHER,
};
+enum cache_mode {
+ NODE_CACHE_ADDR_MODE_RESERVED,
+ NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR,
+};
+
/**
* struct node_cache_attrs - system memory caching attributes
*
@@ -66,6 +70,7 @@ enum cache_write_policy {
* @size: Total size of cache in bytes
* @line_size: Number of bytes fetched on a cache miss
* @level: The cache hierarchy level
+ * @address_mode: The address mode
*/
struct node_cache_attrs {
enum cache_indexing indexing;
@@ -73,12 +78,15 @@ struct node_cache_attrs {
u64 size;
u16 line_size;
u8 level;
+ u16 address_mode;
};
#ifdef CONFIG_HMEM_REPORTING
void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs);
void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
enum access_coordinate_class access);
+void node_update_perf_attrs(unsigned int nid, struct access_coordinate *coord,
+ enum access_coordinate_class access);
#else
static inline void node_add_cache(unsigned int nid,
struct node_cache_attrs *cache_attrs)
@@ -90,6 +98,12 @@ static inline void node_set_perf_attrs(unsigned int nid,
enum access_coordinate_class access)
{
}
+
+static inline void node_update_perf_attrs(unsigned int nid,
+ struct access_coordinate *coord,
+ enum access_coordinate_class access)
+{
+}
#endif
struct node {
@@ -105,44 +119,63 @@ struct memory_block;
extern struct node *node_devices[];
#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA)
-void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
- unsigned long end_pfn,
- enum meminit_context context);
+void register_memory_blocks_under_node_hotplug(int nid, unsigned long start_pfn,
+ unsigned long end_pfn);
#else
-static inline void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
- unsigned long end_pfn,
- enum meminit_context context)
+static inline void register_memory_blocks_under_node_hotplug(int nid,
+ unsigned long start_pfn,
+ unsigned long end_pfn)
{
}
-#endif
-
-extern void unregister_node(struct node *node);
-#ifdef CONFIG_NUMA
-extern void node_dev_init(void);
-/* Core of the node registration - only memory hotplug should use this */
-extern int __register_one_node(int nid);
-
-/* Registers an online node */
-static inline int register_one_node(int nid)
+static inline void register_memory_blocks_under_nodes(void)
{
- int error = 0;
+}
+#endif
- if (node_online(nid)) {
- struct pglist_data *pgdat = NODE_DATA(nid);
- unsigned long start_pfn = pgdat->node_start_pfn;
- unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
+struct node_notify {
+ int nid;
+};
- error = __register_one_node(nid);
- if (error)
- return error;
- register_memory_blocks_under_node(nid, start_pfn, end_pfn,
- MEMINIT_EARLY);
- }
+#define NODE_ADDING_FIRST_MEMORY (1<<0)
+#define NODE_ADDED_FIRST_MEMORY (1<<1)
+#define NODE_CANCEL_ADDING_FIRST_MEMORY (1<<2)
+#define NODE_REMOVING_LAST_MEMORY (1<<3)
+#define NODE_REMOVED_LAST_MEMORY (1<<4)
+#define NODE_CANCEL_REMOVING_LAST_MEMORY (1<<5)
- return error;
+#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA)
+extern int register_node_notifier(struct notifier_block *nb);
+extern void unregister_node_notifier(struct notifier_block *nb);
+extern int node_notify(unsigned long val, void *v);
+
+#define hotplug_node_notifier(fn, pri) ({ \
+ static __meminitdata struct notifier_block fn##_node_nb =\
+ { .notifier_call = fn, .priority = pri };\
+ register_node_notifier(&fn##_node_nb); \
+})
+#else
+static inline int register_node_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline void unregister_node_notifier(struct notifier_block *nb)
+{
+}
+static inline int node_notify(unsigned long val, void *v)
+{
+ return 0;
+}
+static inline int hotplug_node_notifier(notifier_fn_t fn, int pri)
+{
+ return 0;
}
+#endif
-extern void unregister_one_node(int nid);
+#ifdef CONFIG_NUMA
+extern void node_dev_init(void);
+/* Core of the node registration - only memory hotplug should use this */
+int register_node(int nid);
+void unregister_node(int nid);
extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk);
@@ -154,15 +187,11 @@ extern int register_memory_node_under_compute_node(unsigned int mem_nid,
static inline void node_dev_init(void)
{
}
-static inline int __register_one_node(int nid)
-{
- return 0;
-}
-static inline int register_one_node(int nid)
+static inline int register_node(int nid)
{
return 0;
}
-static inline int unregister_one_node(int nid)
+static inline int unregister_node(int nid)
{
return 0;
}
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index b61438313a73..bd38648c998d 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -39,9 +39,6 @@
* int nodes_full(mask) Is mask full (all bits sets)?
* int nodes_weight(mask) Hamming weight - number of set bits
*
- * void nodes_shift_right(dst, src, n) Shift right
- * void nodes_shift_left(dst, src, n) Shift left
- *
* unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES
* unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
* unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first,
@@ -94,7 +91,6 @@
#include <linux/bitmap.h>
#include <linux/minmax.h>
#include <linux/nodemask_types.h>
-#include <linux/numa.h>
#include <linux/random.h>
extern nodemask_t _unused_nodemask_arg_;
@@ -107,11 +103,11 @@ extern nodemask_t _unused_nodemask_arg_;
*/
#define nodemask_pr_args(maskp) __nodemask_pr_numnodes(maskp), \
__nodemask_pr_bits(maskp)
-static inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m)
+static __always_inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m)
{
return m ? MAX_NUMNODES : 0;
}
-static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
+static __always_inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
{
return m ? m->bits : NULL;
}
@@ -132,19 +128,19 @@ static __always_inline void __node_set(int node, volatile nodemask_t *dstp)
}
#define node_clear(node, dst) __node_clear((node), &(dst))
-static inline void __node_clear(int node, volatile nodemask_t *dstp)
+static __always_inline void __node_clear(int node, volatile nodemask_t *dstp)
{
clear_bit(node, dstp->bits);
}
#define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES)
-static inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
+static __always_inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
#define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES)
-static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
+static __always_inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
@@ -154,14 +150,14 @@ static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
#define node_test_and_set(node, nodemask) \
__node_test_and_set((node), &(nodemask))
-static inline bool __node_test_and_set(int node, nodemask_t *addr)
+static __always_inline bool __node_test_and_set(int node, nodemask_t *addr)
{
return test_and_set_bit(node, addr->bits);
}
#define nodes_and(dst, src1, src2) \
__nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -169,7 +165,7 @@ static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_or(dst, src1, src2) \
__nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -177,7 +173,7 @@ static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_xor(dst, src1, src2) \
__nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -185,15 +181,22 @@ static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_andnot(dst, src1, src2) \
__nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
+#define nodes_copy(dst, src) __nodes_copy(&(dst), &(src), MAX_NUMNODES)
+static __always_inline void __nodes_copy(nodemask_t *dstp,
+ const nodemask_t *srcp, unsigned int nbits)
+{
+ bitmap_copy(dstp->bits, srcp->bits, nbits);
+}
+
#define nodes_complement(dst, src) \
__nodes_complement(&(dst), &(src), MAX_NUMNODES)
-static inline void __nodes_complement(nodemask_t *dstp,
+static __always_inline void __nodes_complement(nodemask_t *dstp,
const nodemask_t *srcp, unsigned int nbits)
{
bitmap_complement(dstp->bits, srcp->bits, nbits);
@@ -201,7 +204,7 @@ static inline void __nodes_complement(nodemask_t *dstp,
#define nodes_equal(src1, src2) \
__nodes_equal(&(src1), &(src2), MAX_NUMNODES)
-static inline bool __nodes_equal(const nodemask_t *src1p,
+static __always_inline bool __nodes_equal(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
@@ -209,7 +212,7 @@ static inline bool __nodes_equal(const nodemask_t *src1p,
#define nodes_intersects(src1, src2) \
__nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
-static inline bool __nodes_intersects(const nodemask_t *src1p,
+static __always_inline bool __nodes_intersects(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
@@ -217,59 +220,43 @@ static inline bool __nodes_intersects(const nodemask_t *src1p,
#define nodes_subset(src1, src2) \
__nodes_subset(&(src1), &(src2), MAX_NUMNODES)
-static inline bool __nodes_subset(const nodemask_t *src1p,
+static __always_inline bool __nodes_subset(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
-static inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
+static __always_inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
-static inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
+static __always_inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
+static __always_inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
-#define nodes_shift_right(dst, src, n) \
- __nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES)
-static inline void __nodes_shift_right(nodemask_t *dstp,
- const nodemask_t *srcp, int n, int nbits)
-{
- bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
-}
-
-#define nodes_shift_left(dst, src, n) \
- __nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES)
-static inline void __nodes_shift_left(nodemask_t *dstp,
- const nodemask_t *srcp, int n, int nbits)
-{
- bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
-}
-
/* FIXME: better would be to fix all architectures to never return
- > MAX_NUMNODES, then the silly min_ts could be dropped. */
+ > MAX_NUMNODES, then the silly min()s could be dropped. */
#define first_node(src) __first_node(&(src))
-static inline unsigned int __first_node(const nodemask_t *srcp)
+static __always_inline unsigned int __first_node(const nodemask_t *srcp)
{
- return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
+ return min(MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
}
#define next_node(n, src) __next_node((n), &(src))
-static inline unsigned int __next_node(int n, const nodemask_t *srcp)
+static __always_inline unsigned int __next_node(int n, const nodemask_t *srcp)
{
- return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
+ return min(MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
/*
@@ -277,7 +264,7 @@ static inline unsigned int __next_node(int n, const nodemask_t *srcp)
* the first node in src if needed. Returns MAX_NUMNODES if src is empty.
*/
#define next_node_in(n, src) __next_node_in((n), &(src))
-static inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
+static __always_inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
{
unsigned int ret = __next_node(node, srcp);
@@ -286,7 +273,7 @@ static inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
return ret;
}
-static inline void init_nodemask_of_node(nodemask_t *mask, int node)
+static __always_inline void init_nodemask_of_node(nodemask_t *mask, int node)
{
nodes_clear(*mask);
node_set(node, *mask);
@@ -304,10 +291,9 @@ static inline void init_nodemask_of_node(nodemask_t *mask, int node)
})
#define first_unset_node(mask) __first_unset_node(&(mask))
-static inline unsigned int __first_unset_node(const nodemask_t *maskp)
+static __always_inline unsigned int __first_unset_node(const nodemask_t *maskp)
{
- return min_t(unsigned int, MAX_NUMNODES,
- find_first_zero_bit(maskp->bits, MAX_NUMNODES));
+ return min(MAX_NUMNODES, find_first_zero_bit(maskp->bits, MAX_NUMNODES));
}
#define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES)
@@ -338,21 +324,21 @@ static inline unsigned int __first_unset_node(const nodemask_t *maskp)
#define nodemask_parse_user(ubuf, ulen, dst) \
__nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES)
-static inline int __nodemask_parse_user(const char __user *buf, int len,
+static __always_inline int __nodemask_parse_user(const char __user *buf, int len,
nodemask_t *dstp, int nbits)
{
return bitmap_parse_user(buf, len, dstp->bits, nbits);
}
#define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES)
-static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
+static __always_inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
{
return bitmap_parselist(buf, dstp->bits, nbits);
}
#define node_remap(oldbit, old, new) \
__node_remap((oldbit), &(old), &(new), MAX_NUMNODES)
-static inline int __node_remap(int oldbit,
+static __always_inline int __node_remap(int oldbit,
const nodemask_t *oldp, const nodemask_t *newp, int nbits)
{
return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
@@ -360,7 +346,7 @@ static inline int __node_remap(int oldbit,
#define nodes_remap(dst, src, old, new) \
__nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES)
-static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
+static __always_inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
const nodemask_t *oldp, const nodemask_t *newp, int nbits)
{
bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
@@ -368,7 +354,7 @@ static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
#define nodes_onto(dst, orig, relmap) \
__nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES)
-static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
+static __always_inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
const nodemask_t *relmapp, int nbits)
{
bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
@@ -376,7 +362,7 @@ static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
#define nodes_fold(dst, orig, sz) \
__nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES)
-static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
+static __always_inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
int sz, int nbits)
{
bitmap_fold(dstp->bits, origp->bits, sz, nbits);
@@ -418,22 +404,22 @@ enum node_states {
extern nodemask_t node_states[NR_NODE_STATES];
#if MAX_NUMNODES > 1
-static inline int node_state(int node, enum node_states state)
+static __always_inline int node_state(int node, enum node_states state)
{
return node_isset(node, node_states[state]);
}
-static inline void node_set_state(int node, enum node_states state)
+static __always_inline void node_set_state(int node, enum node_states state)
{
__node_set(node, &node_states[state]);
}
-static inline void node_clear_state(int node, enum node_states state)
+static __always_inline void node_clear_state(int node, enum node_states state)
{
__node_clear(node, &node_states[state]);
}
-static inline int num_node_state(enum node_states state)
+static __always_inline int num_node_state(enum node_states state)
{
return nodes_weight(node_states[state]);
}
@@ -443,11 +429,11 @@ static inline int num_node_state(enum node_states state)
#define first_online_node first_node(node_states[N_ONLINE])
#define first_memory_node first_node(node_states[N_MEMORY])
-static inline unsigned int next_online_node(int nid)
+static __always_inline unsigned int next_online_node(int nid)
{
return next_node(nid, node_states[N_ONLINE]);
}
-static inline unsigned int next_memory_node(int nid)
+static __always_inline unsigned int next_memory_node(int nid)
{
return next_node(nid, node_states[N_MEMORY]);
}
@@ -455,13 +441,13 @@ static inline unsigned int next_memory_node(int nid)
extern unsigned int nr_node_ids;
extern unsigned int nr_online_nodes;
-static inline void node_set_online(int nid)
+static __always_inline void node_set_online(int nid)
{
node_set_state(nid, N_ONLINE);
nr_online_nodes = num_node_state(N_ONLINE);
}
-static inline void node_set_offline(int nid)
+static __always_inline void node_set_offline(int nid)
{
node_clear_state(nid, N_ONLINE);
nr_online_nodes = num_node_state(N_ONLINE);
@@ -469,20 +455,20 @@ static inline void node_set_offline(int nid)
#else
-static inline int node_state(int node, enum node_states state)
+static __always_inline int node_state(int node, enum node_states state)
{
return node == 0;
}
-static inline void node_set_state(int node, enum node_states state)
+static __always_inline void node_set_state(int node, enum node_states state)
{
}
-static inline void node_clear_state(int node, enum node_states state)
+static __always_inline void node_clear_state(int node, enum node_states state)
{
}
-static inline int num_node_state(enum node_states state)
+static __always_inline int num_node_state(enum node_states state)
{
return 1;
}
@@ -502,24 +488,12 @@ static inline int num_node_state(enum node_states state)
#endif
-static inline int node_random(const nodemask_t *maskp)
+static __always_inline int node_random(const nodemask_t *maskp)
{
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
- int w, bit;
-
- w = nodes_weight(*maskp);
- switch (w) {
- case 0:
- bit = NUMA_NO_NODE;
- break;
- case 1:
- bit = first_node(*maskp);
- break;
- default:
- bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_u32_below(w));
- break;
- }
- return bit;
+ int node = find_random_bit(maskp->bits, MAX_NUMNODES);
+
+ return node < MAX_NUMNODES ? node : NUMA_NO_NODE;
#else
return 0;
#endif
@@ -535,6 +509,7 @@ static inline int node_random(const nodemask_t *maskp)
#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
+#define for_each_node_with_cpus(node) for_each_node_state(node, N_CPU)
/*
* For nodemask scratch area.
diff --git a/include/linux/nodemask_types.h b/include/linux/nodemask_types.h
index 6b28d97ea6ed..f850a48742f1 100644
--- a/include/linux/nodemask_types.h
+++ b/include/linux/nodemask_types.h
@@ -3,7 +3,16 @@
#define __LINUX_NODEMASK_TYPES_H
#include <linux/bitops.h>
-#include <linux/numa.h>
+
+#ifdef CONFIG_NODES_SHIFT
+#define NODES_SHIFT CONFIG_NODES_SHIFT
+#else
+#define NODES_SHIFT 0
+#endif
+
+#define MAX_NUMNODES (1 << NODES_SHIFT)
+
+#define NUMA_NO_NODE (-1)
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 45702bdcbceb..01b6c9d9956f 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -109,7 +109,7 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
.mutex = __MUTEX_INITIALIZER(name.mutex), \
.head = NULL, \
.srcuu = __SRCU_USAGE_INIT(name.srcuu), \
- .srcu = __SRCU_STRUCT_INIT(name.srcu, name.srcuu, pcpu), \
+ .srcu = __SRCU_STRUCT_INIT(name.srcu, name.srcuu, pcpu, 0), \
}
#define ATOMIC_NOTIFIER_HEAD(name) \
@@ -237,7 +237,5 @@ static inline int notifier_to_errno(int ret)
#define KBD_KEYSYM 0x0004 /* Keyboard keysym */
#define KBD_POST_KEYSYM 0x0005 /* Called after keyboard keysym interpretation */
-extern struct blocking_notifier_head reboot_notifier_list;
-
#endif /* __KERNEL__ */
#endif /* _LINUX_NOTIFIER_H */
diff --git a/include/linux/ns/ns_common_types.h b/include/linux/ns/ns_common_types.h
new file mode 100644
index 000000000000..b332b019b29c
--- /dev/null
+++ b/include/linux/ns/ns_common_types.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_NS_COMMON_TYPES_H
+#define _LINUX_NS_COMMON_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/ns/nstree_types.h>
+#include <linux/rbtree.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
+
+struct cgroup_namespace;
+struct dentry;
+struct ipc_namespace;
+struct mnt_namespace;
+struct net;
+struct pid_namespace;
+struct proc_ns_operations;
+struct time_namespace;
+struct user_namespace;
+struct uts_namespace;
+
+extern struct cgroup_namespace init_cgroup_ns;
+extern struct ipc_namespace init_ipc_ns;
+extern struct mnt_namespace init_mnt_ns;
+extern struct net init_net;
+extern struct pid_namespace init_pid_ns;
+extern struct time_namespace init_time_ns;
+extern struct user_namespace init_user_ns;
+extern struct uts_namespace init_uts_ns;
+
+extern const struct proc_ns_operations cgroupns_operations;
+extern const struct proc_ns_operations ipcns_operations;
+extern const struct proc_ns_operations mntns_operations;
+extern const struct proc_ns_operations netns_operations;
+extern const struct proc_ns_operations pidns_operations;
+extern const struct proc_ns_operations pidns_for_children_operations;
+extern const struct proc_ns_operations timens_operations;
+extern const struct proc_ns_operations timens_for_children_operations;
+extern const struct proc_ns_operations userns_operations;
+extern const struct proc_ns_operations utsns_operations;
+
+/*
+ * Namespace lifetimes are managed via a two-tier reference counting model:
+ *
+ * (1) __ns_ref (refcount_t): Main reference count tracking memory
+ * lifetime. Controls when the namespace structure itself is freed.
+ * It also pins the namespace on the namespace trees whereas (2)
+ * only regulates their visibility to userspace.
+ *
+ * (2) __ns_ref_active (atomic_t): Reference count tracking active users.
+ * Controls visibility of the namespace in the namespace trees.
+ * Any live task that uses the namespace (via nsproxy or cred) holds
+ * an active reference. Any open file descriptor or bind-mount of
+ * the namespace holds an active reference. Once all tasks have
+ * called exited their namespaces and all file descriptors and
+ * bind-mounts have been released the active reference count drops
+ * to zero and the namespace becomes inactive. IOW, the namespace
+ * cannot be listed or opened via file handles anymore.
+ *
+ * Note that it is valid to transition from active to inactive and
+ * back from inactive to active e.g., when resurrecting an inactive
+ * namespace tree via the SIOCGSKNS ioctl().
+ *
+ * Relationship and lifecycle states:
+ *
+ * - Active (__ns_ref_active > 0):
+ * Namespace is actively used and visible to userspace. The namespace
+ * can be reopened via /proc/<pid>/ns/<ns_type>, via namespace file
+ * handles, or discovered via listns().
+ *
+ * - Inactive (__ns_ref_active == 0, __ns_ref > 0):
+ * No tasks are actively using the namespace and it isn't pinned by
+ * any bind-mounts or open file descriptors anymore. But the namespace
+ * is still kept alive by internal references. For example, the user
+ * namespace could be pinned by an open file through file->f_cred
+ * references when one of the now defunct tasks had opened a file and
+ * handed the file descriptor off to another process via a UNIX
+ * sockets. Such references keep the namespace structure alive through
+ * __ns_ref but will not hold an active reference.
+ *
+ * - Destroyed (__ns_ref == 0):
+ * No references remain. The namespace is removed from the tree and freed.
+ *
+ * State transitions:
+ *
+ * Active -> Inactive:
+ * When the last task using the namespace exits it drops its active
+ * references to all namespaces. However, user and pid namespaces
+ * remain accessible until the task has been reaped.
+ *
+ * Inactive -> Active:
+ * An inactive namespace tree might be resurrected due to e.g., the
+ * SIOCGSKNS ioctl() on a socket.
+ *
+ * Inactive -> Destroyed:
+ * When __ns_ref drops to zero the namespace is removed from the
+ * namespaces trees and the memory is freed (after RCU grace period).
+ *
+ * Initial namespaces:
+ * Boot-time namespaces (init_net, init_pid_ns, etc.) start with
+ * __ns_ref_active = 1 and remain active forever.
+ *
+ * @ns_type: type of namespace (e.g., CLONE_NEWNET)
+ * @stashed: cached dentry to be used by the vfs
+ * @ops: namespace operations
+ * @inum: namespace inode number (quickly recycled for non-initial namespaces)
+ * @__ns_ref: main reference count (do not use directly)
+ * @ns_tree: namespace tree nodes and active reference count
+ */
+struct ns_common {
+ u32 ns_type;
+ struct dentry *stashed;
+ const struct proc_ns_operations *ops;
+ unsigned int inum;
+ refcount_t __ns_ref; /* do not use directly */
+ union {
+ struct ns_tree;
+ struct rcu_head ns_rcu;
+ };
+};
+
+#define to_ns_common(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: &(__ns)->ns, \
+ const struct cgroup_namespace *: &(__ns)->ns, \
+ struct ipc_namespace *: &(__ns)->ns, \
+ const struct ipc_namespace *: &(__ns)->ns, \
+ struct mnt_namespace *: &(__ns)->ns, \
+ const struct mnt_namespace *: &(__ns)->ns, \
+ struct net *: &(__ns)->ns, \
+ const struct net *: &(__ns)->ns, \
+ struct pid_namespace *: &(__ns)->ns, \
+ const struct pid_namespace *: &(__ns)->ns, \
+ struct time_namespace *: &(__ns)->ns, \
+ const struct time_namespace *: &(__ns)->ns, \
+ struct user_namespace *: &(__ns)->ns, \
+ const struct user_namespace *: &(__ns)->ns, \
+ struct uts_namespace *: &(__ns)->ns, \
+ const struct uts_namespace *: &(__ns)->ns)
+
+#define ns_init_inum(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: CGROUP_NS_INIT_INO, \
+ struct ipc_namespace *: IPC_NS_INIT_INO, \
+ struct mnt_namespace *: MNT_NS_INIT_INO, \
+ struct net *: NET_NS_INIT_INO, \
+ struct pid_namespace *: PID_NS_INIT_INO, \
+ struct time_namespace *: TIME_NS_INIT_INO, \
+ struct user_namespace *: USER_NS_INIT_INO, \
+ struct uts_namespace *: UTS_NS_INIT_INO)
+
+#define ns_init_ns(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: &init_cgroup_ns, \
+ struct ipc_namespace *: &init_ipc_ns, \
+ struct mnt_namespace *: &init_mnt_ns, \
+ struct net *: &init_net, \
+ struct pid_namespace *: &init_pid_ns, \
+ struct time_namespace *: &init_time_ns, \
+ struct user_namespace *: &init_user_ns, \
+ struct uts_namespace *: &init_uts_ns)
+
+#define ns_init_id(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: CGROUP_NS_INIT_ID, \
+ struct ipc_namespace *: IPC_NS_INIT_ID, \
+ struct mnt_namespace *: MNT_NS_INIT_ID, \
+ struct net *: NET_NS_INIT_ID, \
+ struct pid_namespace *: PID_NS_INIT_ID, \
+ struct time_namespace *: TIME_NS_INIT_ID, \
+ struct user_namespace *: USER_NS_INIT_ID, \
+ struct uts_namespace *: UTS_NS_INIT_ID)
+
+#define to_ns_operations(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: (IS_ENABLED(CONFIG_CGROUPS) ? &cgroupns_operations : NULL), \
+ struct ipc_namespace *: (IS_ENABLED(CONFIG_IPC_NS) ? &ipcns_operations : NULL), \
+ struct mnt_namespace *: &mntns_operations, \
+ struct net *: (IS_ENABLED(CONFIG_NET_NS) ? &netns_operations : NULL), \
+ struct pid_namespace *: (IS_ENABLED(CONFIG_PID_NS) ? &pidns_operations : NULL), \
+ struct time_namespace *: (IS_ENABLED(CONFIG_TIME_NS) ? &timens_operations : NULL), \
+ struct user_namespace *: (IS_ENABLED(CONFIG_USER_NS) ? &userns_operations : NULL), \
+ struct uts_namespace *: (IS_ENABLED(CONFIG_UTS_NS) ? &utsns_operations : NULL))
+
+#define ns_common_type(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: CLONE_NEWCGROUP, \
+ struct ipc_namespace *: CLONE_NEWIPC, \
+ struct mnt_namespace *: CLONE_NEWNS, \
+ struct net *: CLONE_NEWNET, \
+ struct pid_namespace *: CLONE_NEWPID, \
+ struct time_namespace *: CLONE_NEWTIME, \
+ struct user_namespace *: CLONE_NEWUSER, \
+ struct uts_namespace *: CLONE_NEWUTS)
+
+#endif /* _LINUX_NS_COMMON_TYPES_H */
diff --git a/include/linux/ns/nstree_types.h b/include/linux/ns/nstree_types.h
new file mode 100644
index 000000000000..2fb28ee31efb
--- /dev/null
+++ b/include/linux/ns/nstree_types.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Christian Brauner <brauner@kernel.org> */
+#ifndef _LINUX_NSTREE_TYPES_H
+#define _LINUX_NSTREE_TYPES_H
+
+#include <linux/rbtree.h>
+#include <linux/list.h>
+
+/**
+ * struct ns_tree_root - Root of a namespace tree
+ * @ns_rb: Red-black tree root for efficient lookups
+ * @ns_list_head: List head for sequential iteration
+ *
+ * Each namespace tree maintains both an rbtree (for O(log n) lookups)
+ * and a list (for efficient sequential iteration). The list is kept in
+ * the same sorted order as the rbtree.
+ */
+struct ns_tree_root {
+ struct rb_root ns_rb;
+ struct list_head ns_list_head;
+};
+
+/**
+ * struct ns_tree_node - Node in a namespace tree
+ * @ns_node: Red-black tree node
+ * @ns_list_entry: List entry for sequential iteration
+ *
+ * Represents a namespace's position in a tree. Each namespace has
+ * multiple tree nodes for different trees (unified, per-type, owner).
+ */
+struct ns_tree_node {
+ struct rb_node ns_node;
+ struct list_head ns_list_entry;
+};
+
+/**
+ * struct ns_tree - Namespace tree nodes and active reference count
+ * @ns_id: Unique namespace identifier
+ * @__ns_ref_active: Active reference count (do not use directly)
+ * @ns_unified_node: Node in the global namespace tree
+ * @ns_tree_node: Node in the per-type namespace tree
+ * @ns_owner_node: Node in the owner namespace's tree of owned namespaces
+ * @ns_owner_root: Root of the tree of namespaces owned by this namespace
+ * (only used when this namespace is an owner)
+ */
+struct ns_tree {
+ u64 ns_id;
+ atomic_t __ns_ref_active;
+ struct ns_tree_node ns_unified_node;
+ struct ns_tree_node ns_tree_node;
+ struct ns_tree_node ns_owner_node;
+ struct ns_tree_root ns_owner_root;
+};
+
+#endif /* _LINUX_NSTREE_TYPES_H */
diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h
index 7d22ea50b098..825f5865bfc5 100644
--- a/include/linux/ns_common.h
+++ b/include/linux/ns_common.h
@@ -2,15 +2,149 @@
#ifndef _LINUX_NS_COMMON_H
#define _LINUX_NS_COMMON_H
+#include <linux/ns/ns_common_types.h>
#include <linux/refcount.h>
+#include <linux/vfsdebug.h>
+#include <uapi/linux/sched.h>
+#include <uapi/linux/nsfs.h>
-struct proc_ns_operations;
+bool is_current_namespace(struct ns_common *ns);
+int __ns_common_init(struct ns_common *ns, u32 ns_type, const struct proc_ns_operations *ops, int inum);
+void __ns_common_free(struct ns_common *ns);
+struct ns_common *__must_check ns_owner(struct ns_common *ns);
-struct ns_common {
- struct dentry *stashed;
- const struct proc_ns_operations *ops;
- unsigned int inum;
- refcount_t count;
-};
+static __always_inline bool is_ns_init_inum(const struct ns_common *ns)
+{
+ VFS_WARN_ON_ONCE(ns->inum == 0);
+ return unlikely(in_range(ns->inum, MNT_NS_INIT_INO,
+ IPC_NS_INIT_INO - MNT_NS_INIT_INO + 1));
+}
+
+static __always_inline bool is_ns_init_id(const struct ns_common *ns)
+{
+ VFS_WARN_ON_ONCE(ns->ns_id == 0);
+ return ns->ns_id <= NS_LAST_INIT_ID;
+}
+
+#define NS_COMMON_INIT(nsname) \
+{ \
+ .ns_type = ns_common_type(&nsname), \
+ .ns_id = ns_init_id(&nsname), \
+ .inum = ns_init_inum(&nsname), \
+ .ops = to_ns_operations(&nsname), \
+ .stashed = NULL, \
+ .__ns_ref = REFCOUNT_INIT(1), \
+ .__ns_ref_active = ATOMIC_INIT(1), \
+ .ns_unified_node.ns_list_entry = LIST_HEAD_INIT(nsname.ns.ns_unified_node.ns_list_entry), \
+ .ns_tree_node.ns_list_entry = LIST_HEAD_INIT(nsname.ns.ns_tree_node.ns_list_entry), \
+ .ns_owner_node.ns_list_entry = LIST_HEAD_INIT(nsname.ns.ns_owner_node.ns_list_entry), \
+ .ns_owner_root.ns_list_head = LIST_HEAD_INIT(nsname.ns.ns_owner_root.ns_list_head), \
+}
+
+#define ns_common_init(__ns) \
+ __ns_common_init(to_ns_common(__ns), \
+ ns_common_type(__ns), \
+ to_ns_operations(__ns), \
+ (((__ns) == ns_init_ns(__ns)) ? ns_init_inum(__ns) : 0))
+
+#define ns_common_init_inum(__ns, __inum) \
+ __ns_common_init(to_ns_common(__ns), \
+ ns_common_type(__ns), \
+ to_ns_operations(__ns), \
+ __inum)
+
+#define ns_common_free(__ns) __ns_common_free(to_ns_common((__ns)))
+
+static __always_inline __must_check int __ns_ref_active_read(const struct ns_common *ns)
+{
+ return atomic_read(&ns->__ns_ref_active);
+}
+
+static __always_inline __must_check int __ns_ref_read(const struct ns_common *ns)
+{
+ return refcount_read(&ns->__ns_ref);
+}
+
+static __always_inline __must_check bool __ns_ref_put(struct ns_common *ns)
+{
+ if (is_ns_init_id(ns)) {
+ VFS_WARN_ON_ONCE(__ns_ref_read(ns) != 1);
+ VFS_WARN_ON_ONCE(__ns_ref_active_read(ns) != 1);
+ return false;
+ }
+ if (refcount_dec_and_test(&ns->__ns_ref)) {
+ VFS_WARN_ON_ONCE(__ns_ref_active_read(ns));
+ return true;
+ }
+ return false;
+}
+
+static __always_inline __must_check bool __ns_ref_get(struct ns_common *ns)
+{
+ if (is_ns_init_id(ns)) {
+ VFS_WARN_ON_ONCE(__ns_ref_read(ns) != 1);
+ VFS_WARN_ON_ONCE(__ns_ref_active_read(ns) != 1);
+ return true;
+ }
+ if (refcount_inc_not_zero(&ns->__ns_ref))
+ return true;
+ VFS_WARN_ON_ONCE(__ns_ref_active_read(ns));
+ return false;
+}
+
+static __always_inline void __ns_ref_inc(struct ns_common *ns)
+{
+ if (is_ns_init_id(ns)) {
+ VFS_WARN_ON_ONCE(__ns_ref_read(ns) != 1);
+ VFS_WARN_ON_ONCE(__ns_ref_active_read(ns) != 1);
+ return;
+ }
+ refcount_inc(&ns->__ns_ref);
+}
+
+static __always_inline __must_check bool __ns_ref_dec_and_lock(struct ns_common *ns,
+ spinlock_t *ns_lock)
+{
+ if (is_ns_init_id(ns)) {
+ VFS_WARN_ON_ONCE(__ns_ref_read(ns) != 1);
+ VFS_WARN_ON_ONCE(__ns_ref_active_read(ns) != 1);
+ return false;
+ }
+ return refcount_dec_and_lock(&ns->__ns_ref, ns_lock);
+}
+
+#define ns_ref_read(__ns) __ns_ref_read(to_ns_common((__ns)))
+#define ns_ref_inc(__ns) \
+ do { if (__ns) __ns_ref_inc(to_ns_common((__ns))); } while (0)
+#define ns_ref_get(__ns) \
+ ((__ns) ? __ns_ref_get(to_ns_common((__ns))) : false)
+#define ns_ref_put(__ns) \
+ ((__ns) ? __ns_ref_put(to_ns_common((__ns))) : false)
+#define ns_ref_put_and_lock(__ns, __ns_lock) \
+ ((__ns) ? __ns_ref_dec_and_lock(to_ns_common((__ns)), __ns_lock) : false)
+
+#define ns_ref_active_read(__ns) \
+ ((__ns) ? __ns_ref_active_read(to_ns_common(__ns)) : 0)
+
+void __ns_ref_active_put(struct ns_common *ns);
+
+#define ns_ref_active_put(__ns) \
+ do { if (__ns) __ns_ref_active_put(to_ns_common(__ns)); } while (0)
+
+static __always_inline struct ns_common *__must_check ns_get_unless_inactive(struct ns_common *ns)
+{
+ if (!__ns_ref_active_read(ns)) {
+ VFS_WARN_ON_ONCE(is_ns_init_id(ns));
+ return NULL;
+ }
+ if (!__ns_ref_get(ns))
+ return NULL;
+ return ns;
+}
+
+void __ns_ref_active_get(struct ns_common *ns);
+
+#define ns_ref_active_get(__ns) \
+ do { if (__ns) __ns_ref_active_get(to_ns_common(__ns)); } while (0)
#endif
diff --git a/include/linux/nsfs.h b/include/linux/nsfs.h
new file mode 100644
index 000000000000..731b67fc2fec
--- /dev/null
+++ b/include/linux/nsfs.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Christian Brauner <brauner@kernel.org> */
+
+#ifndef _LINUX_NSFS_H
+#define _LINUX_NSFS_H
+
+#include <linux/ns_common.h>
+#include <linux/cred.h>
+#include <linux/pid_namespace.h>
+
+struct path;
+struct task_struct;
+struct proc_ns_operations;
+
+int ns_get_path(struct path *path, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops);
+typedef struct ns_common *ns_get_path_helper_t(void *);
+int ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
+ void *private_data);
+
+bool ns_match(const struct ns_common *ns, dev_t dev, ino_t ino);
+
+int ns_get_name(char *buf, size_t size, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops);
+void nsfs_init(void);
+
+#define __current_namespace_from_type(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: current->nsproxy->cgroup_ns, \
+ struct ipc_namespace *: current->nsproxy->ipc_ns, \
+ struct net *: current->nsproxy->net_ns, \
+ struct pid_namespace *: task_active_pid_ns(current), \
+ struct mnt_namespace *: current->nsproxy->mnt_ns, \
+ struct time_namespace *: current->nsproxy->time_ns, \
+ struct user_namespace *: current_user_ns(), \
+ struct uts_namespace *: current->nsproxy->uts_ns)
+
+#define current_in_namespace(__ns) (__current_namespace_from_type(__ns) == __ns)
+
+void nsproxy_ns_active_get(struct nsproxy *ns);
+void nsproxy_ns_active_put(struct nsproxy *ns);
+
+#endif /* _LINUX_NSFS_H */
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 5601d14e2886..5a67648721c7 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -92,11 +92,14 @@ static inline struct cred *nsset_cred(struct nsset *set)
*
*/
-int copy_namespaces(unsigned long flags, struct task_struct *tsk);
-void exit_task_namespaces(struct task_struct *tsk);
+int copy_namespaces(u64 flags, struct task_struct *tsk);
+void switch_cred_namespaces(const struct cred *old, const struct cred *new);
+void exit_nsproxy_namespaces(struct task_struct *tsk);
+void get_cred_namespaces(struct task_struct *tsk);
+void exit_cred_namespaces(struct task_struct *tsk);
void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new);
int exec_task_namespaces(void);
-void free_nsproxy(struct nsproxy *ns);
+void deactivate_nsproxy(struct nsproxy *ns);
int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **,
struct cred *, struct fs_struct *);
int __init nsproxy_cache_init(void);
@@ -104,7 +107,7 @@ int __init nsproxy_cache_init(void);
static inline void put_nsproxy(struct nsproxy *ns)
{
if (refcount_dec_and_test(&ns->count))
- free_nsproxy(ns);
+ deactivate_nsproxy(ns);
}
static inline void get_nsproxy(struct nsproxy *ns)
@@ -112,4 +115,6 @@ static inline void get_nsproxy(struct nsproxy *ns)
refcount_inc(&ns->count);
}
+DEFINE_FREE(put_nsproxy, struct nsproxy *, if (_T) put_nsproxy(_T))
+
#endif
diff --git a/include/linux/nstree.h b/include/linux/nstree.h
new file mode 100644
index 000000000000..175e4625bfa6
--- /dev/null
+++ b/include/linux/nstree.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Christian Brauner <brauner@kernel.org> */
+#ifndef _LINUX_NSTREE_H
+#define _LINUX_NSTREE_H
+
+#include <linux/ns/nstree_types.h>
+#include <linux/nsproxy.h>
+#include <linux/rbtree.h>
+#include <linux/seqlock.h>
+#include <linux/rculist.h>
+#include <linux/cookie.h>
+#include <uapi/linux/nsfs.h>
+
+struct ns_common;
+
+extern struct ns_tree_root cgroup_ns_tree;
+extern struct ns_tree_root ipc_ns_tree;
+extern struct ns_tree_root mnt_ns_tree;
+extern struct ns_tree_root net_ns_tree;
+extern struct ns_tree_root pid_ns_tree;
+extern struct ns_tree_root time_ns_tree;
+extern struct ns_tree_root user_ns_tree;
+extern struct ns_tree_root uts_ns_tree;
+
+void ns_tree_node_init(struct ns_tree_node *node);
+void ns_tree_root_init(struct ns_tree_root *root);
+bool ns_tree_node_empty(const struct ns_tree_node *node);
+struct rb_node *ns_tree_node_add(struct ns_tree_node *node,
+ struct ns_tree_root *root,
+ int (*cmp)(struct rb_node *, const struct rb_node *));
+void ns_tree_node_del(struct ns_tree_node *node, struct ns_tree_root *root);
+
+#define to_ns_tree(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: &(cgroup_ns_tree), \
+ struct ipc_namespace *: &(ipc_ns_tree), \
+ struct net *: &(net_ns_tree), \
+ struct pid_namespace *: &(pid_ns_tree), \
+ struct mnt_namespace *: &(mnt_ns_tree), \
+ struct time_namespace *: &(time_ns_tree), \
+ struct user_namespace *: &(user_ns_tree), \
+ struct uts_namespace *: &(uts_ns_tree))
+
+#define ns_tree_gen_id(__ns) \
+ __ns_tree_gen_id(to_ns_common(__ns), \
+ (((__ns) == ns_init_ns(__ns)) ? ns_init_id(__ns) : 0))
+
+u64 __ns_tree_gen_id(struct ns_common *ns, u64 id);
+void __ns_tree_add_raw(struct ns_common *ns, struct ns_tree_root *ns_tree);
+void __ns_tree_remove(struct ns_common *ns, struct ns_tree_root *ns_tree);
+struct ns_common *ns_tree_lookup_rcu(u64 ns_id, int ns_type);
+struct ns_common *__ns_tree_adjoined_rcu(struct ns_common *ns,
+ struct ns_tree_root *ns_tree,
+ bool previous);
+
+static inline void __ns_tree_add(struct ns_common *ns, struct ns_tree_root *ns_tree, u64 id)
+{
+ __ns_tree_gen_id(ns, id);
+ __ns_tree_add_raw(ns, ns_tree);
+}
+
+/**
+ * ns_tree_add_raw - Add a namespace to a namespace
+ * @ns: Namespace to add
+ *
+ * This function adds a namespace to the appropriate namespace tree
+ * without assigning a id.
+ */
+#define ns_tree_add_raw(__ns) __ns_tree_add_raw(to_ns_common(__ns), to_ns_tree(__ns))
+
+/**
+ * ns_tree_add - Add a namespace to a namespace tree
+ * @ns: Namespace to add
+ *
+ * This function assigns a new id to the namespace and adds it to the
+ * appropriate namespace tree and list.
+ */
+#define ns_tree_add(__ns) \
+ __ns_tree_add(to_ns_common(__ns), to_ns_tree(__ns), \
+ (((__ns) == ns_init_ns(__ns)) ? ns_init_id(__ns) : 0))
+
+/**
+ * ns_tree_remove - Remove a namespace from a namespace tree
+ * @ns: Namespace to remove
+ *
+ * This function removes a namespace from the appropriate namespace
+ * tree and list.
+ */
+#define ns_tree_remove(__ns) __ns_tree_remove(to_ns_common(__ns), to_ns_tree(__ns))
+
+#define ns_tree_adjoined_rcu(__ns, __previous) \
+ __ns_tree_adjoined_rcu(to_ns_common(__ns), to_ns_tree(__ns), __previous)
+
+#define ns_tree_active(__ns) (!RB_EMPTY_NODE(&to_ns_common(__ns)->ns_tree_node.ns_node))
+
+#endif /* _LINUX_NSTREE_H */
diff --git a/include/linux/numa.h b/include/linux/numa.h
index 1d43371fafd2..e6baaf6051bc 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -3,18 +3,15 @@
#define _LINUX_NUMA_H
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/nodemask.h>
-#ifdef CONFIG_NODES_SHIFT
-#define NODES_SHIFT CONFIG_NODES_SHIFT
-#else
-#define NODES_SHIFT 0
-#endif
-
-#define MAX_NUMNODES (1 << NODES_SHIFT)
-
-#define NUMA_NO_NODE (-1)
#define NUMA_NO_MEMBLK (-1)
+static inline bool numa_valid_node(int nid)
+{
+ return nid >= 0 && nid < MAX_NUMNODES;
+}
+
/* optionally keep NUMA memory info available post init */
#ifdef CONFIG_NUMA_KEEP_MEMINFO
#define __initdata_or_meminfo
@@ -25,9 +22,17 @@
#ifdef CONFIG_NUMA
#include <asm/sparsemem.h>
+extern struct pglist_data *node_data[];
+#define NODE_DATA(nid) (node_data[nid])
+
+void __init alloc_node_data(int nid);
+void __init alloc_offline_node_data(int nid);
+
/* Generic implementation available */
int numa_nearest_node(int node, unsigned int state);
+int nearest_node_nodemask(int node, nodemask_t *mask);
+
#ifndef memory_add_physaddr_to_nid
int memory_add_physaddr_to_nid(u64 start);
#endif
@@ -44,6 +49,11 @@ static inline int numa_nearest_node(int node, unsigned int state)
return NUMA_NO_NODE;
}
+static inline int nearest_node_nodemask(int node, nodemask_t *mask)
+{
+ return NUMA_NO_NODE;
+}
+
static inline int memory_add_physaddr_to_nid(u64 start)
{
return 0;
@@ -52,6 +62,8 @@ static inline int phys_to_target_node(u64 start)
{
return 0;
}
+
+static inline void alloc_offline_node_data(int nid) {}
#endif
#define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE)
diff --git a/include/linux/numa_memblks.h b/include/linux/numa_memblks.h
new file mode 100644
index 000000000000..991076cba7c5
--- /dev/null
+++ b/include/linux/numa_memblks.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NUMA_MEMBLKS_H
+#define __NUMA_MEMBLKS_H
+
+#ifdef CONFIG_NUMA_MEMBLKS
+#include <linux/types.h>
+
+#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2)
+
+void __init numa_set_distance(int from, int to, int distance);
+void __init numa_reset_distance(void);
+
+struct numa_memblk {
+ u64 start;
+ u64 end;
+ int nid;
+};
+
+struct numa_meminfo {
+ int nr_blks;
+ struct numa_memblk blk[NR_NODE_MEMBLKS];
+};
+
+int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+int __init numa_add_reserved_memblk(int nid, u64 start, u64 end);
+void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi);
+
+int __init numa_cleanup_meminfo(struct numa_meminfo *mi);
+
+int __init numa_memblks_init(int (*init_func)(void),
+ bool memblock_force_top_down);
+
+extern int numa_distance_cnt;
+
+#ifdef CONFIG_NUMA_EMU
+extern int emu_nid_to_phys[MAX_NUMNODES];
+int numa_emu_cmdline(char *str);
+void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys,
+ unsigned int nr_emu_nids);
+u64 __init numa_emu_dma_end(void);
+void __init numa_emulation(struct numa_meminfo *numa_meminfo,
+ int numa_dist_cnt);
+#else
+static inline void numa_emulation(struct numa_meminfo *numa_meminfo,
+ int numa_dist_cnt)
+{ }
+static inline int numa_emu_cmdline(char *str)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_NUMA_EMU */
+
+#ifdef CONFIG_NUMA_KEEP_MEMINFO
+extern int phys_to_target_node(u64 start);
+#define phys_to_target_node phys_to_target_node
+extern int memory_add_physaddr_to_nid(u64 start);
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+#endif /* CONFIG_NUMA_KEEP_MEMINFO */
+
+#endif /* CONFIG_NUMA_MEMBLKS */
+
+#endif /* __NUMA_MEMBLKS_H */
diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h
index c1d0bc5d9624..60e069a6757f 100644
--- a/include/linux/nvme-auth.h
+++ b/include/linux/nvme-auth.h
@@ -40,5 +40,12 @@ int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm,
int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
u8 *ctrl_key, size_t ctrl_key_len,
u8 *sess_key, size_t sess_key_len);
+int nvme_auth_generate_psk(u8 hmac_id, u8 *skey, size_t skey_len,
+ u8 *c1, u8 *c2, size_t hash_len,
+ u8 **ret_psk, size_t *ret_len);
+int nvme_auth_generate_digest(u8 hmac_id, u8 *psk, size_t psk_len,
+ char *subsysnqn, char *hostnqn, u8 **ret_digest);
+int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len,
+ u8 *psk_digest, u8 **ret_psk);
#endif /* _NVME_AUTH_H */
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 4109f1bd6128..9f6acadfe0c8 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -620,7 +620,7 @@ enum {
*
* Structure used between LLDD and nvmet-fc layer to represent the exchange
* context for a FC-NVME FCP I/O operation (e.g. a nvme sqe, the sqe-related
- * memory transfers, and its assocated cqe transfer).
+ * memory transfers, and its associated cqe transfer).
*
* The structure is allocated by the LLDD whenever a FCP CMD IU is received
* from the FC link. The address of the structure is passed to the nvmet-fc
@@ -920,6 +920,9 @@ struct nvmet_fc_target_port {
* further references to hosthandle.
* Entrypoint is Mandatory if the lldd calls nvmet_fc_invalidate_host().
*
+ * @host_traddr: called by the transport to retrieve the node name and
+ * port name of the host port address.
+ *
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
* Value is Mandatory. Must be at least 1.
@@ -975,6 +978,7 @@ struct nvmet_fc_target_template {
void (*ls_abort)(struct nvmet_fc_target_port *targetport,
void *hosthandle, struct nvmefc_ls_req *lsreq);
void (*host_release)(void *hosthandle);
+ int (*host_traddr)(void *hosthandle, u64 *wwnn, u64 *wwpn);
u32 max_hw_queues;
u16 max_sgl_segments;
diff --git a/include/linux/nvme-keyring.h b/include/linux/nvme-keyring.h
index e10333d78dbb..ab8971afa973 100644
--- a/include/linux/nvme-keyring.h
+++ b/include/linux/nvme-keyring.h
@@ -6,15 +6,25 @@
#ifndef _NVME_KEYRING_H
#define _NVME_KEYRING_H
+#include <linux/key.h>
+
#if IS_ENABLED(CONFIG_NVME_KEYRING)
+struct key *nvme_tls_psk_refresh(struct key *keyring,
+ const char *hostnqn, const char *subnqn, u8 hmac_id,
+ u8 *data, size_t data_len, const char *digest);
key_serial_t nvme_tls_psk_default(struct key *keyring,
const char *hostnqn, const char *subnqn);
key_serial_t nvme_keyring_id(void);
-
+struct key *nvme_tls_key_lookup(key_serial_t key_id);
#else
-
+static inline struct key *nvme_tls_psk_refresh(struct key *keyring,
+ const char *hostnqn, char *subnqn, u8 hmac_id,
+ u8 *data, size_t data_len, const char *digest)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
static inline key_serial_t nvme_tls_psk_default(struct key *keyring,
const char *hostnqn, const char *subnqn)
{
@@ -24,5 +34,9 @@ static inline key_serial_t nvme_keyring_id(void)
{
return 0;
}
+static inline struct key *nvme_tls_key_lookup(key_serial_t key_id)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
#endif /* !CONFIG_NVME_KEYRING */
#endif /* _NVME_KEYRING_H */
diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h
index eb2f04d636c8..97c5f00b9aa3 100644
--- a/include/linux/nvme-rdma.h
+++ b/include/linux/nvme-rdma.h
@@ -25,6 +25,7 @@ enum nvme_rdma_cm_status {
NVME_RDMA_CM_NO_RSC = 0x06,
NVME_RDMA_CM_INVALID_IRD = 0x07,
NVME_RDMA_CM_INVALID_ORD = 0x08,
+ NVME_RDMA_CM_INVALID_CNTLID = 0x09,
};
static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
@@ -46,6 +47,8 @@ static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
return "invalid IRD";
case NVME_RDMA_CM_INVALID_ORD:
return "Invalid ORD";
+ case NVME_RDMA_CM_INVALID_CNTLID:
+ return "invalid controller ID";
default:
return "unrecognized reason";
}
@@ -64,7 +67,8 @@ struct nvme_rdma_cm_req {
__le16 qid;
__le16 hrqsize;
__le16 hsqsize;
- u8 rsvd[24];
+ __le16 cntlid;
+ u8 rsvd[22];
};
/**
diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
index e07e8978d691..e435250fcb4d 100644
--- a/include/linux/nvme-tcp.h
+++ b/include/linux/nvme-tcp.h
@@ -13,6 +13,8 @@
#define NVME_TCP_ADMIN_CCSZ SZ_8K
#define NVME_TCP_DIGEST_LENGTH 4
#define NVME_TCP_MIN_MAXH2CDATA 4096
+#define NVME_TCP_MIN_C2HTERM_PLEN 24
+#define NVME_TCP_MAX_C2HTERM_PLEN 152
enum nvme_tcp_pfv {
NVME_TCP_PFV_1_0 = 0x0,
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 425573202295..655d194f8e72 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -25,6 +25,9 @@
#define NVME_NSID_ALL 0xffffffff
+/* Special NSSR value, 'NVMe' */
+#define NVME_SUBSYS_RESET 0x4E564D65
+
enum nvme_subsys_type {
/* Referral to another discovery type target subsystem */
NVME_NQN_DISC = 1,
@@ -61,6 +64,7 @@ enum {
/* Transport Type codes for Discovery Log Page entry TRTYPE field */
enum {
+ NVMF_TRTYPE_PCI = 0, /* PCI */
NVMF_TRTYPE_RDMA = 1, /* RDMA */
NVMF_TRTYPE_FC = 2, /* Fibre Channel */
NVMF_TRTYPE_TCP = 3, /* TCP/IP */
@@ -85,10 +89,11 @@ enum {
enum {
NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
+ NVMF_RDMA_QPTYPE_INVALID = 0xff,
};
-/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
- * RDMA_QPTYPE field
+/* RDMA Provider Type codes for Discovery Log Page entry TSAS
+ * RDMA_PRTYPE field
*/
enum {
NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
@@ -110,6 +115,7 @@ enum {
NVMF_TCP_SECTYPE_NONE = 0, /* No Security */
NVMF_TCP_SECTYPE_TLS12 = 1, /* TLSv1.2, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
NVMF_TCP_SECTYPE_TLS13 = 2, /* TLSv1.3, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
+ NVMF_TCP_SECTYPE_INVALID = 0xff,
};
#define NVME_AQ_DEPTH 32
@@ -193,28 +199,54 @@ enum {
#define NVME_NVM_IOSQES 6
#define NVME_NVM_IOCQES 4
+/*
+ * Controller Configuration (CC) register (Offset 14h)
+ */
enum {
+ /* Enable (EN): bit 0 */
NVME_CC_ENABLE = 1 << 0,
NVME_CC_EN_SHIFT = 0,
+
+ /* Bits 03:01 are reserved (NVMe Base Specification rev 2.1) */
+
+ /* I/O Command Set Selected (CSS): bits 06:04 */
NVME_CC_CSS_SHIFT = 4,
- NVME_CC_MPS_SHIFT = 7,
- NVME_CC_AMS_SHIFT = 11,
- NVME_CC_SHN_SHIFT = 14,
- NVME_CC_IOSQES_SHIFT = 16,
- NVME_CC_IOCQES_SHIFT = 20,
+ NVME_CC_CSS_MASK = 7 << NVME_CC_CSS_SHIFT,
NVME_CC_CSS_NVM = 0 << NVME_CC_CSS_SHIFT,
NVME_CC_CSS_CSI = 6 << NVME_CC_CSS_SHIFT,
- NVME_CC_CSS_MASK = 7 << NVME_CC_CSS_SHIFT,
+
+ /* Memory Page Size (MPS): bits 10:07 */
+ NVME_CC_MPS_SHIFT = 7,
+ NVME_CC_MPS_MASK = 0xf << NVME_CC_MPS_SHIFT,
+
+ /* Arbitration Mechanism Selected (AMS): bits 13:11 */
+ NVME_CC_AMS_SHIFT = 11,
+ NVME_CC_AMS_MASK = 7 << NVME_CC_AMS_SHIFT,
NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT,
NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT,
NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT,
+
+ /* Shutdown Notification (SHN): bits 15:14 */
+ NVME_CC_SHN_SHIFT = 14,
+ NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT,
NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT,
NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT,
- NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
+
+ /* I/O Submission Queue Entry Size (IOSQES): bits 19:16 */
+ NVME_CC_IOSQES_SHIFT = 16,
+ NVME_CC_IOSQES_MASK = 0xf << NVME_CC_IOSQES_SHIFT,
NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT,
+
+ /* I/O Completion Queue Entry Size (IOCQES): bits 23:20 */
+ NVME_CC_IOCQES_SHIFT = 20,
+ NVME_CC_IOCQES_MASK = 0xf << NVME_CC_IOCQES_SHIFT,
NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT,
+
+ /* Controller Ready Independent of Media Enable (CRIME): bit 24 */
NVME_CC_CRIME = 1 << 24,
+
+ /* Bits 25:31 are reserved (NVMe Base Specification rev 2.1) */
};
enum {
@@ -270,6 +302,8 @@ enum nvme_ctrl_attr {
NVME_CTRL_ATTR_HID_128_BIT = (1 << 0),
NVME_CTRL_ATTR_TBKAS = (1 << 6),
NVME_CTRL_ATTR_ELBAS = (1 << 15),
+ NVME_CTRL_ATTR_RHII = (1 << 18),
+ NVME_CTRL_ATTR_FDPS = (1 << 19),
};
struct nvme_id_ctrl {
@@ -322,7 +356,8 @@ struct nvme_id_ctrl {
__le32 sanicap;
__le32 hmminds;
__le16 hmmaxd;
- __u8 rsvd338[4];
+ __le16 nvmsetidmax;
+ __le16 endgidmax;
__u8 anatt;
__u8 anacap;
__le32 anagrpmax;
@@ -383,6 +418,11 @@ enum {
NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5,
NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7,
NVME_CTRL_CTRATT_UUID_LIST = 1 << 9,
+ NVME_CTRL_SGLS_BYTE_ALIGNED = 1,
+ NVME_CTRL_SGLS_DWORD_ALIGNED = 2,
+ NVME_CTRL_SGLS_KSDBDS = 1 << 2,
+ NVME_CTRL_SGLS_MSDS = 1 << 19,
+ NVME_CTRL_SGLS_SAOS = 1 << 20,
};
struct nvme_lbaf {
@@ -480,6 +520,9 @@ enum {
NVME_ID_NS_NVM_STS_MASK = 0x7f,
NVME_ID_NS_NVM_GUARD_SHIFT = 7,
NVME_ID_NS_NVM_GUARD_MASK = 0x3,
+ NVME_ID_NS_NVM_QPIF_SHIFT = 9,
+ NVME_ID_NS_NVM_QPIF_MASK = 0xf,
+ NVME_ID_NS_NVM_QPIFS = 1 << 3,
};
static inline __u8 nvme_elbaf_sts(__u32 elbaf)
@@ -492,6 +535,11 @@ static inline __u8 nvme_elbaf_guard_type(__u32 elbaf)
return (elbaf >> NVME_ID_NS_NVM_GUARD_SHIFT) & NVME_ID_NS_NVM_GUARD_MASK;
}
+static inline __u8 nvme_elbaf_qualified_guard_type(__u32 elbaf)
+{
+ return (elbaf >> NVME_ID_NS_NVM_QPIF_SHIFT) & NVME_ID_NS_NVM_QPIF_MASK;
+}
+
struct nvme_id_ctrl_nvm {
__u8 vsl;
__u8 wzsl;
@@ -509,6 +557,7 @@ enum {
NVME_ID_CNS_NS_DESC_LIST = 0x03,
NVME_ID_CNS_CS_NS = 0x05,
NVME_ID_CNS_CS_CTRL = 0x06,
+ NVME_ID_CNS_NS_ACTIVE_LIST_CS = 0x07,
NVME_ID_CNS_NS_CS_INDEP = 0x08,
NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
NVME_ID_CNS_NS_PRESENT = 0x11,
@@ -517,6 +566,7 @@ enum {
NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15,
NVME_ID_CNS_NS_GRANULARITY = 0x16,
NVME_ID_CNS_UUID_LIST = 0x17,
+ NVME_ID_CNS_ENDGRP_LIST = 0x19,
};
enum {
@@ -547,6 +597,8 @@ enum {
NVME_NS_FLBAS_LBA_SHIFT = 1,
NVME_NS_FLBAS_META_EXT = 0x10,
NVME_NS_NMIC_SHARED = 1 << 0,
+ NVME_NS_ROTATIONAL = 1 << 4,
+ NVME_NS_VWC_NOT_PRESENT = 1 << 5,
NVME_LBAF_RP_BEST = 0,
NVME_LBAF_RP_BETTER = 1,
NVME_LBAF_RP_GOOD = 2,
@@ -571,6 +623,7 @@ enum {
NVME_NVM_NS_16B_GUARD = 0,
NVME_NVM_NS_32B_GUARD = 1,
NVME_NVM_NS_64B_GUARD = 2,
+ NVME_NVM_NS_QTYPE_GUARD = 3,
};
static inline __u8 nvme_lbaf_index(__u8 flbas)
@@ -603,6 +656,78 @@ enum {
NVME_NIDT_CSI = 0x04,
};
+struct nvme_endurance_group_log {
+ __u8 egcw;
+ __u8 egfeat;
+ __u8 rsvd2;
+ __u8 avsp;
+ __u8 avspt;
+ __u8 pused;
+ __le16 did;
+ __u8 rsvd8[24];
+ __u8 ee[16];
+ __u8 dur[16];
+ __u8 duw[16];
+ __u8 muw[16];
+ __u8 hrc[16];
+ __u8 hwc[16];
+ __u8 mdie[16];
+ __u8 neile[16];
+ __u8 tegcap[16];
+ __u8 uegcap[16];
+ __u8 rsvd192[320];
+};
+
+struct nvme_rotational_media_log {
+ __le16 endgid;
+ __le16 numa;
+ __le16 nrs;
+ __u8 rsvd6[2];
+ __le32 spinc;
+ __le32 fspinc;
+ __le32 ldc;
+ __le32 fldc;
+ __u8 rsvd24[488];
+};
+
+struct nvme_fdp_config {
+ __u8 flags;
+#define FDPCFG_FDPE (1U << 0)
+ __u8 fdpcidx;
+ __le16 reserved;
+};
+
+struct nvme_fdp_ruh_desc {
+ __u8 ruht;
+ __u8 reserved[3];
+};
+
+struct nvme_fdp_config_desc {
+ __le16 dsze;
+ __u8 fdpa;
+ __u8 vss;
+ __le32 nrg;
+ __le16 nruh;
+ __le16 maxpids;
+ __le32 nns;
+ __le64 runs;
+ __le32 erutl;
+ __u8 rsvd28[36];
+ struct nvme_fdp_ruh_desc ruhs[];
+};
+
+struct nvme_fdp_config_log {
+ __le16 numfdpc;
+ __u8 ver;
+ __u8 rsvd3;
+ __le32 sze;
+ __u8 rsvd8[8];
+ /*
+ * This is followed by variable number of nvme_fdp_config_desc
+ * structures, but sparse doesn't like nested variable sized arrays.
+ */
+};
+
struct nvme_smart_log {
__u8 critical_warning;
__u8 temperature[2];
@@ -829,6 +954,7 @@ enum nvme_opcode {
nvme_cmd_resv_register = 0x0d,
nvme_cmd_resv_report = 0x0e,
nvme_cmd_resv_acquire = 0x11,
+ nvme_cmd_io_mgmt_recv = 0x12,
nvme_cmd_resv_release = 0x15,
nvme_cmd_zone_mgmt_send = 0x79,
nvme_cmd_zone_mgmt_recv = 0x7a,
@@ -850,6 +976,7 @@ enum nvme_opcode {
nvme_opcode_name(nvme_cmd_resv_register), \
nvme_opcode_name(nvme_cmd_resv_report), \
nvme_opcode_name(nvme_cmd_resv_acquire), \
+ nvme_opcode_name(nvme_cmd_io_mgmt_recv), \
nvme_opcode_name(nvme_cmd_resv_release), \
nvme_opcode_name(nvme_cmd_zone_mgmt_send), \
nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \
@@ -973,8 +1100,8 @@ struct nvme_rw_command {
__le16 control;
__le32 dsmgmt;
__le32 reftag;
- __le16 apptag;
- __le16 appmask;
+ __le16 lbat;
+ __le16 lbatm;
};
enum {
@@ -1001,6 +1128,7 @@ enum {
NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
NVME_RW_PRINFO_PRACT = 1 << 13,
NVME_RW_DTYPE_STREAMS = 1 << 4,
+ NVME_RW_DTYPE_DPLCMT = 2 << 4,
NVME_WZ_DEAC = 1 << 9,
};
@@ -1043,8 +1171,8 @@ struct nvme_write_zeroes_cmd {
__le16 control;
__le32 dsmgmt;
__le32 reftag;
- __le16 apptag;
- __le16 appmask;
+ __le16 lbat;
+ __le16 lbatm;
};
enum nvme_zone_mgmt_action {
@@ -1088,6 +1216,38 @@ struct nvme_zone_mgmt_recv_cmd {
__le32 cdw14[2];
};
+struct nvme_io_mgmt_recv_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __le64 rsvd2[2];
+ union nvme_data_ptr dptr;
+ __u8 mo;
+ __u8 rsvd11;
+ __u16 mos;
+ __le32 numd;
+ __le32 cdw12[4];
+};
+
+enum {
+ NVME_IO_MGMT_RECV_MO_RUHS = 1,
+};
+
+struct nvme_fdp_ruh_status_desc {
+ __le16 pid;
+ __le16 ruhid;
+ __le32 earutr;
+ __le64 ruamw;
+ __u8 reserved[16];
+};
+
+struct nvme_fdp_ruh_status {
+ __u8 rsvd0[14];
+ __le16 nruhsd;
+ struct nvme_fdp_ruh_status_desc ruhsd[];
+};
+
enum {
NVME_ZRA_ZONE_REPORT = 0,
NVME_ZRASF_ZONE_REPORT_ALL = 0,
@@ -1223,6 +1383,7 @@ enum {
NVME_FEAT_PLM_WINDOW = 0x14,
NVME_FEAT_HOST_BEHAVIOR = 0x16,
NVME_FEAT_SANITIZE = 0x17,
+ NVME_FEAT_FDP = 0x1d,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
@@ -1230,6 +1391,7 @@ enum {
NVME_FEAT_WRITE_PROTECT = 0x84,
NVME_FEAT_VENDOR_START = 0xC0,
NVME_FEAT_VENDOR_END = 0xFF,
+ NVME_LOG_SUPPORTED = 0x00,
NVME_LOG_ERROR = 0x01,
NVME_LOG_SMART = 0x02,
NVME_LOG_FW_SLOT = 0x03,
@@ -1240,6 +1402,9 @@ enum {
NVME_LOG_TELEMETRY_CTRL = 0x08,
NVME_LOG_ENDURANCE_GROUP = 0x09,
NVME_LOG_ANA = 0x0c,
+ NVME_LOG_FEATURES = 0x12,
+ NVME_LOG_RMI = 0x16,
+ NVME_LOG_FDP_CONFIGS = 0x20,
NVME_LOG_DISC = 0x70,
NVME_LOG_RESERVATION = 0x80,
NVME_FWACT_REPL = (0 << 3),
@@ -1247,6 +1412,24 @@ enum {
NVME_FWACT_ACTV = (2 << 3),
};
+struct nvme_supported_log {
+ __le32 lids[256];
+};
+
+enum {
+ NVME_LIDS_LSUPP = 1 << 0,
+};
+
+struct nvme_supported_features_log {
+ __le32 fis[256];
+};
+
+enum {
+ NVME_FIS_FSUPP = 1 << 0,
+ NVME_FIS_NSCPE = 1 << 20,
+ NVME_FIS_CSCPE = 1 << 21,
+};
+
/* NVMe Namespace Write Protect State */
enum {
NVME_NS_NO_WRITE_PROTECT = 0,
@@ -1267,7 +1450,8 @@ struct nvme_identify {
__u8 cns;
__u8 rsvd3;
__le16 ctrlid;
- __u8 rsvd11[3];
+ __le16 cnssid;
+ __u8 rsvd11;
__u8 csi;
__u32 rsvd12[4];
};
@@ -1375,7 +1559,7 @@ struct nvme_get_log_page_command {
__u8 lsp; /* upper 4 bits reserved */
__le16 numdl;
__le16 numdu;
- __u16 rsvd11;
+ __le16 lsi;
union {
struct {
__le32 lpol;
@@ -1664,6 +1848,13 @@ enum {
NVME_AUTH_DHGROUP_INVALID = 0xff,
};
+enum {
+ NVME_AUTH_SECP_NOSC = 0x00,
+ NVME_AUTH_SECP_SC = 0x01,
+ NVME_AUTH_SECP_NEWTLSPSK = 0x02,
+ NVME_AUTH_SECP_REPLACETLSPSK = 0x03,
+};
+
union nvmf_auth_protocol {
struct nvmf_auth_dhchap_protocol_descriptor dhchap;
};
@@ -1808,6 +1999,7 @@ struct nvme_command {
struct nvmf_auth_receive_command auth_receive;
struct nvme_dbbuf dbbuf;
struct nvme_directive_cmd directive;
+ struct nvme_io_mgmt_recv_cmd imr;
};
};
@@ -1816,6 +2008,46 @@ static inline bool nvme_is_fabrics(const struct nvme_command *cmd)
return cmd->common.opcode == nvme_fabrics_command;
}
+#ifdef CONFIG_NVME_VERBOSE_ERRORS
+const char *nvme_get_error_status_str(u16 status);
+const char *nvme_get_opcode_str(u8 opcode);
+const char *nvme_get_admin_opcode_str(u8 opcode);
+const char *nvme_get_fabrics_opcode_str(u8 opcode);
+#else /* CONFIG_NVME_VERBOSE_ERRORS */
+static inline const char *nvme_get_error_status_str(u16 status)
+{
+ return "I/O Error";
+}
+static inline const char *nvme_get_opcode_str(u8 opcode)
+{
+ return "I/O Cmd";
+}
+static inline const char *nvme_get_admin_opcode_str(u8 opcode)
+{
+ return "Admin Cmd";
+}
+
+static inline const char *nvme_get_fabrics_opcode_str(u8 opcode)
+{
+ return "Fabrics Cmd";
+}
+#endif /* CONFIG_NVME_VERBOSE_ERRORS */
+
+static inline const char *nvme_opcode_str(int qid, u8 opcode)
+{
+ return qid ? nvme_get_opcode_str(opcode) :
+ nvme_get_admin_opcode_str(opcode);
+}
+
+static inline const char *nvme_fabrics_opcode_str(
+ int qid, const struct nvme_command *cmd)
+{
+ if (nvme_is_fabrics(cmd))
+ return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype);
+
+ return nvme_opcode_str(qid, cmd->common.opcode);
+}
+
struct nvme_error_slot {
__le64 error_count;
__le16 sqid;
@@ -1846,6 +2078,7 @@ enum {
/*
* Generic Command Status:
*/
+ NVME_SCT_GENERIC = 0x0,
NVME_SC_SUCCESS = 0x0,
NVME_SC_INVALID_OPCODE = 0x1,
NVME_SC_INVALID_FIELD = 0x2,
@@ -1893,6 +2126,7 @@ enum {
/*
* Command Specific Status:
*/
+ NVME_SCT_COMMAND_SPECIFIC = 0x100,
NVME_SC_CQ_INVALID = 0x100,
NVME_SC_QID_INVALID = 0x101,
NVME_SC_QUEUE_SIZE = 0x102,
@@ -1921,7 +2155,7 @@ enum {
NVME_SC_NS_NOT_ATTACHED = 0x11a,
NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
NVME_SC_CTRL_LIST_INVALID = 0x11c,
- NVME_SC_SELT_TEST_IN_PROGRESS = 0x11d,
+ NVME_SC_SELF_TEST_IN_PROGRESS = 0x11d,
NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
NVME_SC_CTRL_ID_INVALID = 0x11f,
NVME_SC_SEC_CTRL_STATE_INVALID = 0x120,
@@ -1937,7 +2171,7 @@ enum {
NVME_SC_BAD_ATTRIBUTES = 0x180,
NVME_SC_INVALID_PI = 0x181,
NVME_SC_READ_ONLY = 0x182,
- NVME_SC_ONCS_NOT_SUPPORTED = 0x183,
+ NVME_SC_CMD_SIZE_LIM_EXCEEDED = 0x183,
/*
* I/O Command Set Specific - Fabrics commands:
@@ -1966,6 +2200,7 @@ enum {
/*
* Media and Data Integrity Errors:
*/
+ NVME_SCT_MEDIA_ERROR = 0x200,
NVME_SC_WRITE_FAULT = 0x280,
NVME_SC_READ_ERROR = 0x281,
NVME_SC_GUARD_CHECK = 0x282,
@@ -1978,6 +2213,7 @@ enum {
/*
* Path-related Errors:
*/
+ NVME_SCT_PATH = 0x300,
NVME_SC_INTERNAL_PATH_ERROR = 0x300,
NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
NVME_SC_ANA_INACCESSIBLE = 0x302,
@@ -1986,11 +2222,17 @@ enum {
NVME_SC_HOST_PATH_ERROR = 0x370,
NVME_SC_HOST_ABORTED_CMD = 0x371,
- NVME_SC_CRD = 0x1800,
- NVME_SC_MORE = 0x2000,
- NVME_SC_DNR = 0x4000,
+ NVME_SC_MASK = 0x00ff, /* Status Code */
+ NVME_SCT_MASK = 0x0700, /* Status Code Type */
+ NVME_SCT_SC_MASK = NVME_SCT_MASK | NVME_SC_MASK,
+
+ NVME_STATUS_CRD = 0x1800, /* Command Retry Delayed */
+ NVME_STATUS_MORE = 0x2000,
+ NVME_STATUS_DNR = 0x4000, /* Do Not Retry */
};
+#define NVME_SCT(status) ((status) >> 8 & 7)
+
struct nvme_completion {
/*
* Used by Admin and Fabrics commands to return data:
@@ -2013,4 +2255,81 @@ struct nvme_completion {
#define NVME_MINOR(ver) (((ver) >> 8) & 0xff)
#define NVME_TERTIARY(ver) ((ver) & 0xff)
+enum {
+ NVME_AEN_RESV_LOG_PAGE_AVALIABLE = 0x00,
+};
+
+enum {
+ NVME_PR_LOG_EMPTY_LOG_PAGE = 0x00,
+ NVME_PR_LOG_REGISTRATION_PREEMPTED = 0x01,
+ NVME_PR_LOG_RESERVATION_RELEASED = 0x02,
+ NVME_PR_LOG_RESERVATOIN_PREEMPTED = 0x03,
+};
+
+enum {
+ NVME_PR_NOTIFY_BIT_REG_PREEMPTED = 1,
+ NVME_PR_NOTIFY_BIT_RESV_RELEASED = 2,
+ NVME_PR_NOTIFY_BIT_RESV_PREEMPTED = 3,
+};
+
+struct nvme_pr_log {
+ __le64 count;
+ __u8 type;
+ __u8 nr_pages;
+ __u8 rsvd1[2];
+ __le32 nsid;
+ __u8 rsvd2[48];
+};
+
+struct nvmet_pr_register_data {
+ __le64 crkey;
+ __le64 nrkey;
+};
+
+struct nvmet_pr_acquire_data {
+ __le64 crkey;
+ __le64 prkey;
+};
+
+struct nvmet_pr_release_data {
+ __le64 crkey;
+};
+
+enum nvme_pr_capabilities {
+ NVME_PR_SUPPORT_PTPL = 1,
+ NVME_PR_SUPPORT_WRITE_EXCLUSIVE = 1 << 1,
+ NVME_PR_SUPPORT_EXCLUSIVE_ACCESS = 1 << 2,
+ NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY = 1 << 3,
+ NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY = 1 << 4,
+ NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS = 1 << 5,
+ NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS = 1 << 6,
+ NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF = 1 << 7,
+};
+
+enum nvme_pr_register_action {
+ NVME_PR_REGISTER_ACT_REG = 0,
+ NVME_PR_REGISTER_ACT_UNREG = 1,
+ NVME_PR_REGISTER_ACT_REPLACE = 1 << 1,
+};
+
+enum nvme_pr_acquire_action {
+ NVME_PR_ACQUIRE_ACT_ACQUIRE = 0,
+ NVME_PR_ACQUIRE_ACT_PREEMPT = 1,
+ NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT = 1 << 1,
+};
+
+enum nvme_pr_release_action {
+ NVME_PR_RELEASE_ACT_RELEASE = 0,
+ NVME_PR_RELEASE_ACT_CLEAR = 1,
+};
+
+enum nvme_pr_change_ptpl {
+ NVME_PR_CPTPL_NO_CHANGE = 0,
+ NVME_PR_CPTPL_RESV = 1 << 30,
+ NVME_PR_CPTPL_CLEARED = 2 << 30,
+ NVME_PR_CPTPL_PERSIST = 3 << 30,
+};
+
+#define NVME_PR_IGNORE_KEY (1 << 3)
+
#endif /* _LINUX_NVME_H */
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 3ebeaa0ded00..f3b13da78aac 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -92,8 +92,8 @@ struct nvmem_cell_info {
* @read_only: Device is read-only.
* @root_only: Device is accessibly to root only.
* @of_node: If given, this will be used instead of the parent's of_node.
- * @reg_read: Callback to read data.
- * @reg_write: Callback to write data.
+ * @reg_read: Callback to read data; return zero if successful.
+ * @reg_write: Callback to write data; return zero if successful.
* @size: Device size.
* @word_size: Minimum read/write access granularity.
* @stride: Minimum read/write access stride.
@@ -103,7 +103,7 @@ struct nvmem_cell_info {
*
* Note: A default "nvmem<id>" name will be assigned to the device if
* no name is specified in its configuration. In such case "<id>" is
- * generated with ida_simple_get() and provided id field is ignored.
+ * generated with ida_alloc() and provided id field is ignored.
*
* Note: Specifying name and setting id to -1 implies a unique device
* whose name is provided as-is (kept unaltered).
@@ -138,25 +138,6 @@ struct nvmem_config {
};
/**
- * struct nvmem_cell_table - NVMEM cell definitions for given provider
- *
- * @nvmem_name: Provider name.
- * @cells: Array of cell definitions.
- * @ncells: Number of cell definitions in the array.
- * @node: List node.
- *
- * This structure together with related helper functions is provided for users
- * that don't can't access the nvmem provided structure but wish to register
- * cell definitions for it e.g. board files registering an EEPROM device.
- */
-struct nvmem_cell_table {
- const char *nvmem_name;
- const struct nvmem_cell_info *cells;
- size_t ncells;
- struct list_head node;
-};
-
-/**
* struct nvmem_layout - NVMEM layout definitions
*
* @dev: Device-model layout device.
@@ -190,9 +171,6 @@ void nvmem_unregister(struct nvmem_device *nvmem);
struct nvmem_device *devm_nvmem_register(struct device *dev,
const struct nvmem_config *cfg);
-void nvmem_add_cell_table(struct nvmem_cell_table *table);
-void nvmem_del_cell_table(struct nvmem_cell_table *table);
-
int nvmem_add_one_cell(struct nvmem_device *nvmem,
const struct nvmem_cell_info *info);
@@ -223,8 +201,6 @@ devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
return nvmem_register(c);
}
-static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {}
-static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {}
static inline int nvmem_add_one_cell(struct nvmem_device *nvmem,
const struct nvmem_cell_info *info)
{
diff --git a/include/linux/oa_tc6.h b/include/linux/oa_tc6.h
new file mode 100644
index 000000000000..15f58e3c56c7
--- /dev/null
+++ b/include/linux/oa_tc6.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface framework
+ *
+ * Link: https://opensig.org/download/document/OPEN_Alliance_10BASET1x_MAC-PHY_Serial_Interface_V1.1.pdf
+ *
+ * Author: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/spi/spi.h>
+
+struct oa_tc6;
+
+struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev);
+void oa_tc6_exit(struct oa_tc6 *tc6);
+int oa_tc6_write_register(struct oa_tc6 *tc6, u32 address, u32 value);
+int oa_tc6_write_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length);
+int oa_tc6_read_register(struct oa_tc6 *tc6, u32 address, u32 *value);
+int oa_tc6_read_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length);
+netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb);
+int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 *tc6);
diff --git a/include/linux/objagg.h b/include/linux/objagg.h
index 78021777df46..6df5b887dc54 100644
--- a/include/linux/objagg.h
+++ b/include/linux/objagg.h
@@ -8,7 +8,6 @@ struct objagg_ops {
size_t obj_size;
bool (*delta_check)(void *priv, const void *parent_obj,
const void *obj);
- int (*hints_obj_cmp)(const void *obj1, const void *obj2);
void * (*delta_create)(void *priv, void *parent_obj, void *obj);
void (*delta_destroy)(void *priv, void *delta_priv);
void * (*root_create)(void *priv, void *obj, unsigned int root_id);
diff --git a/include/linux/objpool.h b/include/linux/objpool.h
index cb1758eaa2d3..b713a1fe7521 100644
--- a/include/linux/objpool.h
+++ b/include/linux/objpool.h
@@ -170,17 +170,16 @@ static inline void *objpool_pop(struct objpool_head *pool)
{
void *obj = NULL;
unsigned long flags;
- int i, cpu;
+ int start, cpu;
/* disable local irq to avoid preemption & interruption */
raw_local_irq_save(flags);
- cpu = raw_smp_processor_id();
- for (i = 0; i < pool->nr_possible_cpus; i++) {
+ start = raw_smp_processor_id();
+ for_each_possible_cpu_wrap(cpu, start) {
obj = __objpool_try_get_slot(pool, cpu);
if (obj)
break;
- cpu = cpumask_next_wrap(cpu, cpu_possible_mask, -1, 1);
}
raw_local_irq_restore(flags);
diff --git a/include/linux/objtool.h b/include/linux/objtool.h
index b3b8d3dab52d..9a00e701454c 100644
--- a/include/linux/objtool.h
+++ b/include/linux/objtool.h
@@ -3,16 +3,16 @@
#define _LINUX_OBJTOOL_H
#include <linux/objtool_types.h>
+#include <linux/annotate.h>
#ifdef CONFIG_OBJTOOL
-#include <asm/asm.h>
-
#ifndef __ASSEMBLY__
-#define UNWIND_HINT(type, sp_reg, sp_offset, signal) \
+#define UNWIND_HINT(type, sp_reg, sp_offset, signal) \
"987: \n\t" \
".pushsection .discard.unwind_hints\n\t" \
+ ANNOTATE_DATA_SPECIAL "\n\t" \
/* struct unwind_hint */ \
".long 987b - .\n\t" \
".short " __stringify(sp_offset) "\n\t" \
@@ -45,35 +45,21 @@
#define STACK_FRAME_NON_STANDARD_FP(func)
#endif
-#define ANNOTATE_NOENDBR \
- "986: \n\t" \
- ".pushsection .discard.noendbr\n\t" \
- ".long 986b\n\t" \
- ".popsection\n\t"
-
#define ASM_REACHABLE \
"998:\n\t" \
".pushsection .discard.reachable\n\t" \
".long 998b\n\t" \
".popsection\n\t"
-#else /* __ASSEMBLY__ */
+#define __ASM_BREF(label) label ## b
-/*
- * This macro indicates that the following intra-function call is valid.
- * Any non-annotated intra-function call will cause objtool to issue a warning.
- */
-#define ANNOTATE_INTRA_FUNCTION_CALL \
- 999: \
- .pushsection .discard.intra_function_calls; \
- .long 999b; \
- .popsection;
+#else /* __ASSEMBLY__ */
/*
* In asm, there are two kinds of code: normal C-type callable functions and
* the rest. The normal callable functions can be called by other code, and
* don't do anything unusual with the stack. Such normal callable functions
- * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this
+ * are annotated with SYM_FUNC_{START,END}. Most asm code falls in this
* category. In this case, no special debugging annotations are needed because
* objtool can automatically generate the ORC data for the ORC unwinder to read
* at runtime.
@@ -93,6 +79,7 @@
.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0
.Lhere_\@:
.pushsection .discard.unwind_hints
+ ANNOTATE_DATA_SPECIAL
/* struct unwind_hint */
.long .Lhere_\@ - .
.short \sp_offset
@@ -105,7 +92,7 @@
.macro STACK_FRAME_NON_STANDARD func:req
.pushsection .discard.func_stack_frame_non_standard, "aw"
- .long \func - .
+ .quad \func
.popsection
.endm
@@ -115,37 +102,6 @@
#endif
.endm
-.macro ANNOTATE_NOENDBR
-.Lhere_\@:
- .pushsection .discard.noendbr
- .long .Lhere_\@
- .popsection
-.endm
-
-/*
- * Use objtool to validate the entry requirement that all code paths do
- * VALIDATE_UNRET_END before RET.
- *
- * NOTE: The macro must be used at the beginning of a global symbol, otherwise
- * it will be ignored.
- */
-.macro VALIDATE_UNRET_BEGIN
-#if defined(CONFIG_NOINSTR_VALIDATION) && \
- (defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO))
-.Lhere_\@:
- .pushsection .discard.validate_unret
- .long .Lhere_\@ - .
- .popsection
-#endif
-.endm
-
-.macro REACHABLE
-.Lhere_\@:
- .pushsection .discard.reachable
- .long .Lhere_\@
- .popsection
-.endm
-
#endif /* __ASSEMBLY__ */
#else /* !CONFIG_OBJTOOL */
@@ -155,20 +111,20 @@
#define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t"
#define STACK_FRAME_NON_STANDARD(func)
#define STACK_FRAME_NON_STANDARD_FP(func)
-#define ANNOTATE_NOENDBR
-#define ASM_REACHABLE
#else
-#define ANNOTATE_INTRA_FUNCTION_CALL
.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0
.endm
.macro STACK_FRAME_NON_STANDARD func:req
.endm
-.macro ANNOTATE_NOENDBR
-.endm
-.macro REACHABLE
-.endm
#endif
#endif /* CONFIG_OBJTOOL */
+#if defined(CONFIG_NOINSTR_VALIDATION) && \
+ (defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO))
+#define VALIDATE_UNRET_BEGIN ANNOTATE_UNRET_BEGIN
+#else
+#define VALIDATE_UNRET_BEGIN
+#endif
+
#endif /* _LINUX_OBJTOOL_H */
diff --git a/include/linux/objtool_types.h b/include/linux/objtool_types.h
index 453a4f4ef39d..c6def4049b1a 100644
--- a/include/linux/objtool_types.h
+++ b/include/linux/objtool_types.h
@@ -54,4 +54,19 @@ struct unwind_hint {
#define UNWIND_HINT_TYPE_SAVE 6
#define UNWIND_HINT_TYPE_RESTORE 7
+/*
+ * Annotate types
+ */
+#define ANNOTYPE_NOENDBR 1
+#define ANNOTYPE_RETPOLINE_SAFE 2
+#define ANNOTYPE_INSTR_BEGIN 3
+#define ANNOTYPE_INSTR_END 4
+#define ANNOTYPE_UNRET_BEGIN 5
+#define ANNOTYPE_IGNORE_ALTS 6
+#define ANNOTYPE_INTRA_FUNCTION_CALL 7
+#define ANNOTYPE_REACHABLE 8
+#define ANNOTYPE_NOCFI 9
+
+#define ANNOTYPE_DATA_SPECIAL 1
+
#endif /* _LINUX_OBJTOOL_TYPES_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index a0bedd038a05..01bb3affcd49 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -67,7 +67,7 @@ struct device_node {
#endif
};
-#define MAX_PHANDLE_ARGS 16
+#define MAX_PHANDLE_ARGS NR_FWNODE_REFERENCE_ARGS
struct of_phandle_args {
struct device_node *np;
int args_count;
@@ -289,6 +289,9 @@ extern struct device_node *of_get_parent(const struct device_node *node);
extern struct device_node *of_get_next_parent(struct device_node *node);
extern struct device_node *of_get_next_child(const struct device_node *node,
struct device_node *prev);
+extern struct device_node *of_get_next_child_with_prefix(const struct device_node *node,
+ struct device_node *prev,
+ const char *prefix);
extern struct device_node *of_get_next_available_child(
const struct device_node *node, struct device_node *prev);
extern struct device_node *of_get_next_reserved_child(
@@ -298,6 +301,8 @@ extern struct device_node *of_get_compatible_child(const struct device_node *par
const char *compatible);
extern struct device_node *of_get_child_by_name(const struct device_node *node,
const char *name);
+extern struct device_node *of_get_available_child_by_name(const struct device_node *node,
+ const char *name);
/* cache lookup */
extern struct device_node *of_find_next_cache_node(const struct device_node *);
@@ -308,8 +313,12 @@ extern struct device_node *of_find_node_with_property(
extern struct property *of_find_property(const struct device_node *np,
const char *name,
int *lenp);
+extern bool of_property_read_bool(const struct device_node *np, const char *propname);
extern int of_property_count_elems_of_size(const struct device_node *np,
const char *propname, int elem_size);
+extern int of_property_read_u16_index(const struct device_node *np,
+ const char *propname,
+ u32 index, u16 *out_value);
extern int of_property_read_u32_index(const struct device_node *np,
const char *propname,
u32 index, u32 *out_value);
@@ -357,7 +366,7 @@ extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
extern struct device_node *of_cpu_device_node_get(int cpu);
extern int of_cpu_node_to_id(struct device_node *np);
extern struct device_node *of_get_next_cpu_node(struct device_node *prev);
-extern struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+extern struct device_node *of_get_cpu_state_node(const struct device_node *cpu_node,
int index);
extern u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread);
@@ -394,11 +403,12 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it,
uint32_t *args,
int size);
-extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
-extern int of_alias_get_id(struct device_node *np, const char *stem);
+extern int of_alias_get_id(const struct device_node *np, const char *stem);
extern int of_alias_get_highest_id(const char *stem);
bool of_machine_compatible_match(const char *const *compats);
+bool of_machine_device_match(const struct of_device_id *matches);
+const void *of_machine_get_match_data(const struct of_device_id *matches);
/**
* of_machine_is_compatible - Test root of device tree for a given compatible value
@@ -430,14 +440,12 @@ extern int of_detach_node(struct device_node *);
#define of_match_ptr(_ptr) (_ptr)
/*
- * struct property *prop;
- * const __be32 *p;
* u32 u;
*
- * of_property_for_each_u32(np, "propname", prop, p, u)
+ * of_property_for_each_u32(np, "propname", u)
* printk("U32 value: %x\n", u);
*/
-const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
+const __be32 *of_prop_next_u32(const struct property *prop, const __be32 *cur,
u32 *pu);
/*
* struct property *prop;
@@ -446,11 +454,11 @@ const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
* of_property_for_each_string(np, "propname", prop, s)
* printk("String value: %s\n", s);
*/
-const char *of_prop_next_string(struct property *prop, const char *cur);
+const char *of_prop_next_string(const struct property *prop, const char *cur);
-bool of_console_check(struct device_node *dn, char *name, int index);
+bool of_console_check(const struct device_node *dn, char *name, int index);
-int of_map_id(struct device_node *np, u32 id,
+int of_map_id(const struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out);
@@ -544,6 +552,13 @@ static inline struct device_node *of_get_next_child(
return NULL;
}
+static inline struct device_node *of_get_next_child_with_prefix(
+ const struct device_node *node, struct device_node *prev,
+ const char *prefix)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_next_available_child(
const struct device_node *node, struct device_node *prev)
{
@@ -577,6 +592,13 @@ static inline struct device_node *of_get_child_by_name(
return NULL;
}
+static inline struct device_node *of_get_available_child_by_name(
+ const struct device_node *node,
+ const char *name)
+{
+ return NULL;
+}
+
static inline int of_device_is_compatible(const struct device_node *device,
const char *name)
{
@@ -614,12 +636,24 @@ static inline struct device_node *of_find_compatible_node(
return NULL;
}
+static inline bool of_property_read_bool(const struct device_node *np,
+ const char *propname)
+{
+ return false;
+}
+
static inline int of_property_count_elems_of_size(const struct device_node *np,
const char *propname, int elem_size)
{
return -ENOSYS;
}
+static inline int of_property_read_u16_index(const struct device_node *np,
+ const char *propname, u32 index, u16 *out_value)
+{
+ return -ENOSYS;
+}
+
static inline int of_property_read_u32_index(const struct device_node *np,
const char *propname, u32 index, u32 *out_value)
{
@@ -823,18 +857,29 @@ static inline bool of_machine_compatible_match(const char *const *compats)
return false;
}
+static inline bool of_machine_device_match(const struct of_device_id *matches)
+{
+ return false;
+}
+
+static inline const void *
+of_machine_get_match_data(const struct of_device_id *matches)
+{
+ return NULL;
+}
+
static inline bool of_console_check(const struct device_node *dn, const char *name, int index)
{
return false;
}
-static inline const __be32 *of_prop_next_u32(struct property *prop,
+static inline const __be32 *of_prop_next_u32(const struct property *prop,
const __be32 *cur, u32 *pu)
{
return NULL;
}
-static inline const char *of_prop_next_string(struct property *prop,
+static inline const char *of_prop_next_string(const struct property *prop,
const char *cur)
{
return NULL;
@@ -873,7 +918,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
{
}
-static inline int of_map_id(struct device_node *np, u32 id,
+static inline int of_map_id(const struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out)
{
@@ -901,12 +946,6 @@ static inline const void *of_device_get_match_data(const struct device *dev)
#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
#endif
-static inline int of_prop_val_eq(struct property *p1, struct property *p2)
-{
- return p1->length == p2->length &&
- !memcmp(p1->value, p2->value, (size_t)p1->length);
-}
-
#define for_each_property_of_node(dn, pp) \
for (pp = dn->properties; pp != NULL; pp = pp->next)
@@ -1108,7 +1147,7 @@ static inline bool of_phandle_args_equal(const struct of_phandle_args *a1,
* Search for a property in a device node and count the number of u8 elements
* in it.
*
- * Return: The number of elements on sucess, -EINVAL if the property does
+ * Return: The number of elements on success, -EINVAL if the property does
* not exist or its length does not match a multiple of u8 and -ENODATA if the
* property does not have a value.
*/
@@ -1127,7 +1166,7 @@ static inline int of_property_count_u8_elems(const struct device_node *np,
* Search for a property in a device node and count the number of u16 elements
* in it.
*
- * Return: The number of elements on sucess, -EINVAL if the property does
+ * Return: The number of elements on success, -EINVAL if the property does
* not exist or its length does not match a multiple of u16 and -ENODATA if the
* property does not have a value.
*/
@@ -1146,7 +1185,7 @@ static inline int of_property_count_u16_elems(const struct device_node *np,
* Search for a property in a device node and count the number of u32 elements
* in it.
*
- * Return: The number of elements on sucess, -EINVAL if the property does
+ * Return: The number of elements on success, -EINVAL if the property does
* not exist or its length does not match a multiple of u32 and -ENODATA if the
* property does not have a value.
*/
@@ -1165,7 +1204,7 @@ static inline int of_property_count_u32_elems(const struct device_node *np,
* Search for a property in a device node and count the number of u64 elements
* in it.
*
- * Return: The number of elements on sucess, -EINVAL if the property does
+ * Return: The number of elements on success, -EINVAL if the property does
* not exist or its length does not match a multiple of u64 and -ENODATA if the
* property does not have a value.
*/
@@ -1242,24 +1281,6 @@ static inline int of_property_read_string_index(const struct device_node *np,
}
/**
- * of_property_read_bool - Find a property
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- *
- * Search for a boolean property in a device node. Usage on non-boolean
- * property types is deprecated.
- *
- * Return: true if the property exists false otherwise.
- */
-static inline bool of_property_read_bool(const struct device_node *np,
- const char *propname)
-{
- struct property *prop = of_find_property(np, propname, NULL);
-
- return prop ? true : false;
-}
-
-/**
* of_property_present - Test if a property is present in a node
* @np: device node to search for the property.
* @propname: name of the property to be searched.
@@ -1270,7 +1291,9 @@ static inline bool of_property_read_bool(const struct device_node *np,
*/
static inline bool of_property_present(const struct device_node *np, const char *propname)
{
- return of_property_read_bool(np, propname);
+ struct property *prop = of_find_property(np, propname, NULL);
+
+ return prop ? true : false;
}
/**
@@ -1431,11 +1454,12 @@ static inline int of_property_read_s32(const struct device_node *np,
err == 0; \
err = of_phandle_iterator_next(it))
-#define of_property_for_each_u32(np, propname, prop, p, u) \
- for (prop = of_find_property(np, propname, NULL), \
- p = of_prop_next_u32(prop, NULL, &u); \
- p; \
- p = of_prop_next_u32(prop, p, &u))
+#define of_property_for_each_u32(np, propname, u) \
+ for (struct {const struct property *prop; const __be32 *item; } _it = \
+ {of_find_property(np, propname, NULL), \
+ of_prop_next_u32(_it.prop, NULL, &u)}; \
+ _it.item; \
+ _it.item = of_prop_next_u32(_it.prop, _it.item, &u))
#define of_property_for_each_string(np, propname, prop, s) \
for (prop = of_find_property(np, propname, NULL), \
@@ -1469,6 +1493,12 @@ static inline int of_property_read_s32(const struct device_node *np,
child != NULL; \
child = of_get_next_child(parent, child))
+#define for_each_child_of_node_with_prefix(parent, child, prefix) \
+ for (struct device_node *child __free(device_node) = \
+ of_get_next_child_with_prefix(parent, NULL, prefix); \
+ child != NULL; \
+ child = of_get_next_child_with_prefix(parent, child, prefix))
+
#define for_each_available_child_of_node(parent, child) \
for (child = of_get_next_available_child(parent, NULL); child != NULL; \
child = of_get_next_available_child(parent, child))
@@ -1639,7 +1669,7 @@ int of_changeset_add_prop_string(struct of_changeset *ocs,
int of_changeset_add_prop_string_array(struct of_changeset *ocs,
struct device_node *np,
const char *prop_name,
- const char **str_array, size_t sz);
+ const char * const *str_array, size_t sz);
int of_changeset_add_prop_u32_array(struct of_changeset *ocs,
struct device_node *np,
const char *prop_name,
@@ -1652,6 +1682,13 @@ static inline int of_changeset_add_prop_u32(struct of_changeset *ocs,
return of_changeset_add_prop_u32_array(ocs, np, prop_name, &val, 1);
}
+int of_changeset_update_prop_string(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name, const char *str);
+
+int of_changeset_add_prop_bool(struct of_changeset *ocs, struct device_node *np,
+ const char *prop_name);
+
#else /* CONFIG_OF_DYNAMIC */
static inline int of_reconfig_notifier_register(struct notifier_block *nb)
{
@@ -1732,7 +1769,7 @@ struct of_overlay_notify_data {
#ifdef CONFIG_OF_OVERLAY
int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
- int *ovcs_id, struct device_node *target_base);
+ int *ovcs_id, const struct device_node *target_base);
int of_overlay_remove(int *ovcs_id);
int of_overlay_remove_all(void);
@@ -1742,7 +1779,7 @@ int of_overlay_notifier_unregister(struct notifier_block *nb);
#else
static inline int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
- int *ovcs_id, struct device_node *target_base)
+ int *ovcs_id, const struct device_node *target_base)
{
return -ENOTSUPP;
}
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 26a19daf0d09..0cff90365391 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -10,7 +10,7 @@ struct of_bus;
struct of_pci_range_parser {
struct device_node *node;
- struct of_bus *bus;
+ const struct of_bus *bus;
const __be32 *range;
const __be32 *end;
int na;
@@ -26,6 +26,7 @@ struct of_pci_range {
u64 bus_addr;
};
u64 cpu_addr;
+ u64 parent_bus_addr;
u64 size;
u32 flags;
};
@@ -83,8 +84,8 @@ extern struct of_pci_range *of_pci_range_parser_one(
struct of_pci_range *range);
extern int of_pci_address_to_resource(struct device_node *dev, int bar,
struct resource *r);
-extern int of_pci_range_to_resource(struct of_pci_range *range,
- struct device_node *np,
+extern int of_pci_range_to_resource(const struct of_pci_range *range,
+ const struct device_node *np,
struct resource *res);
extern int of_range_to_resource(struct device_node *np, int index,
struct resource *res);
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index d69ad5bb1eb1..51dadbaa3d63 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -31,6 +31,7 @@ extern void *of_fdt_unflatten_tree(const unsigned long *blob,
extern int __initdata dt_root_addr_cells;
extern int __initdata dt_root_size_cells;
extern void *initial_boot_params;
+extern phys_addr_t initial_boot_params_pa;
extern char __dtb_start[];
extern char __dtb_end[];
@@ -54,6 +55,15 @@ extern int of_get_flat_dt_subnode_by_name(unsigned long node,
const char *uname);
extern const void *of_get_flat_dt_prop(unsigned long node, const char *name,
int *size);
+
+extern const __be32 *of_flat_dt_get_addr_size_prop(unsigned long node,
+ const char *name,
+ int *entries);
+extern bool of_flat_dt_get_addr_size(unsigned long node, const char *name,
+ u64 *addr, u64 *size);
+extern void of_flat_dt_read_addr_size(const __be32 *prop, int entry_index,
+ u64 *addr, u64 *size);
+
extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
extern unsigned long of_get_flat_dt_root(void);
extern uint32_t of_get_flat_dt_phandle(unsigned long node);
@@ -70,8 +80,8 @@ extern u64 dt_mem_next_cell(int s, const __be32 **cellp);
/* Early flat tree scan hooks */
extern int early_init_dt_scan_root(void);
-extern bool early_init_dt_scan(void *params);
-extern bool early_init_dt_verify(void *params);
+extern bool early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys);
+extern bool early_init_dt_verify(void *dt_virt, phys_addr_t dt_phys);
extern void early_init_dt_scan_nodes(void);
extern const char *of_flat_dt_get_machine_name(void);
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index a4bea62bfa29..a692d9d979a6 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -11,6 +11,7 @@
#ifndef __LINUX_OF_GRAPH_H
#define __LINUX_OF_GRAPH_H
+#include <linux/cleanup.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -37,14 +38,43 @@ struct of_endpoint {
for (child = of_graph_get_next_endpoint(parent, NULL); child != NULL; \
child = of_graph_get_next_endpoint(parent, child))
+/**
+ * for_each_of_graph_port - iterate over every port in a device or ports node
+ * @parent: parent device or ports node containing port
+ * @child: loop variable pointing to the current port node
+ *
+ * When breaking out of the loop, and continue to use the @child, you need to
+ * use return_ptr(@child) or no_free_ptr(@child) not to call __free() for it.
+ */
+#define for_each_of_graph_port(parent, child) \
+ for (struct device_node *child __free(device_node) = of_graph_get_next_port(parent, NULL);\
+ child != NULL; child = of_graph_get_next_port(parent, child))
+
+/**
+ * for_each_of_graph_port_endpoint - iterate over every endpoint in a port node
+ * @parent: parent port node
+ * @child: loop variable pointing to the current endpoint node
+ *
+ * When breaking out of the loop, and continue to use the @child, you need to
+ * use return_ptr(@child) or no_free_ptr(@child) not to call __free() for it.
+ */
+#define for_each_of_graph_port_endpoint(parent, child) \
+ for (struct device_node *child __free(device_node) = of_graph_get_next_port_endpoint(parent, NULL);\
+ child != NULL; child = of_graph_get_next_port_endpoint(parent, child))
+
#ifdef CONFIG_OF
bool of_graph_is_present(const struct device_node *node);
int of_graph_parse_endpoint(const struct device_node *node,
struct of_endpoint *endpoint);
unsigned int of_graph_get_endpoint_count(const struct device_node *np);
+unsigned int of_graph_get_port_count(struct device_node *np);
struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id);
struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
struct device_node *previous);
+struct device_node *of_graph_get_next_port(const struct device_node *parent,
+ struct device_node *port);
+struct device_node *of_graph_get_next_port_endpoint(const struct device_node *port,
+ struct device_node *prev);
struct device_node *of_graph_get_endpoint_by_regs(
const struct device_node *parent, int port_reg, int reg);
struct device_node *of_graph_get_remote_endpoint(
@@ -73,6 +103,11 @@ static inline unsigned int of_graph_get_endpoint_count(const struct device_node
return 0;
}
+static inline unsigned int of_graph_get_port_count(struct device_node *np)
+{
+ return 0;
+}
+
static inline struct device_node *of_graph_get_port_by_id(
struct device_node *node, u32 id)
{
@@ -86,6 +121,20 @@ static inline struct device_node *of_graph_get_next_endpoint(
return NULL;
}
+static inline struct device_node *of_graph_get_next_port(
+ const struct device_node *parent,
+ struct device_node *previous)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_graph_get_next_port_endpoint(
+ const struct device_node *parent,
+ struct device_node *previous)
+{
+ return NULL;
+}
+
static inline struct device_node *of_graph_get_endpoint_by_regs(
const struct device_node *parent, int port_reg, int reg)
{
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index d6d3eae2f145..1c2bc0281807 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -43,18 +43,20 @@ extern int of_irq_parse_one(struct device_node *device, int index,
struct of_phandle_args *out_irq);
extern int of_irq_count(struct device_node *dev);
extern int of_irq_get(struct device_node *dev, int index);
+extern const struct cpumask *of_irq_get_affinity(struct device_node *dev,
+ int index);
extern int of_irq_get_byname(struct device_node *dev, const char *name);
extern int of_irq_to_resource_table(struct device_node *dev,
struct resource *res, int nr_irqs);
extern struct device_node *of_irq_find_parent(struct device_node *child);
extern struct irq_domain *of_msi_get_domain(struct device *dev,
- struct device_node *np,
+ const struct device_node *np,
enum irq_domain_bus_token token);
extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
u32 id,
u32 bus_token);
-extern void of_msi_configure(struct device *dev, struct device_node *np);
-u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in);
+extern void of_msi_configure(struct device *dev, const struct device_node *np);
+extern u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in);
#else
static inline void of_irq_init(const struct of_device_id *matches)
{
@@ -76,6 +78,11 @@ static inline int of_irq_get_byname(struct device_node *dev, const char *name)
{
return 0;
}
+static inline const struct cpumask *of_irq_get_affinity(struct device_node *dev,
+ int index)
+{
+ return NULL;
+}
static inline int of_irq_to_resource_table(struct device_node *dev,
struct resource *res, int nr_irqs)
{
@@ -100,8 +107,7 @@ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev
static inline void of_msi_configure(struct device *dev, struct device_node *np)
{
}
-static inline u32 of_msi_map_id(struct device *dev,
- struct device_node *msi_np, u32 id_in)
+static inline u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in)
{
return id_in;
}
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index a2ff1ad48f7f..17471ef8e092 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -47,8 +47,6 @@ struct of_dev_auxdata {
{ .compatible = _compat, .phys_addr = _phys, .name = _name, \
.platform_data = _pdata }
-extern const struct of_device_id of_default_bus_match_table[];
-
/* Platform drivers register/unregister */
extern struct platform_device *of_device_alloc(struct device_node *np,
const char *bus_id,
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index e338282da652..f573423359f4 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -7,6 +7,7 @@
struct of_phandle_args;
struct reserved_mem_ops;
+struct resource;
struct reserved_mem {
const char *name;
@@ -39,6 +40,12 @@ int of_reserved_mem_device_init_by_name(struct device *dev,
void of_reserved_mem_device_release(struct device *dev);
struct reserved_mem *of_reserved_mem_lookup(struct device_node *np);
+int of_reserved_mem_region_to_resource(const struct device_node *np,
+ unsigned int idx, struct resource *res);
+int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
+ const char *name, struct resource *res);
+int of_reserved_mem_region_count(const struct device_node *np);
+
#else
#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
@@ -63,6 +70,25 @@ static inline struct reserved_mem *of_reserved_mem_lookup(struct device_node *np
{
return NULL;
}
+
+static inline int of_reserved_mem_region_to_resource(const struct device_node *np,
+ unsigned int idx,
+ struct resource *res)
+{
+ return -ENOSYS;
+}
+
+static inline int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
+ const char *name,
+ struct resource *res)
+{
+ return -ENOSYS;
+}
+
+static inline int of_reserved_mem_region_count(const struct device_node *np)
+{
+ return 0;
+}
#endif
/**
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 6f9242259edc..6de479ebbe5d 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -151,6 +151,5 @@ enum OID {
extern enum OID look_up_OID(const void *data, size_t datasize);
extern int parse_OID(const void *data, size_t datasize, enum OID *oid);
extern int sprint_oid(const void *, size_t, char *, size_t);
-extern int sprint_OID(enum OID, char *, size_t);
#endif /* _LINUX_OID_REGISTRY_H */
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index 082841908fe7..263b915df1fb 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -66,10 +66,6 @@ extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
struct device_node;
-extern int gpmc_get_client_irq(unsigned irq_config);
-
-extern unsigned int gpmc_ticks_to_ns(unsigned int ticks);
-
extern void gpmc_cs_write_reg(int cs, int idx, u32 val);
extern int gpmc_calc_divider(unsigned int sync_clk);
extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
@@ -84,13 +80,3 @@ extern void gpmc_read_settings_dt(struct device_node *np,
struct gpmc_timings;
struct omap_nand_platform_data;
struct omap_onenand_platform_data;
-
-#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
-extern int gpmc_onenand_init(struct omap_onenand_platform_data *d);
-#else
-#define board_onenand_data NULL
-static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d)
-{
- return 0;
-}
-#endif
diff --git a/include/linux/once.h b/include/linux/once.h
index bc714d414448..449a0e34ad5a 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -46,7 +46,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
#define DO_ONCE(func, ...) \
({ \
bool ___ret = false; \
- static bool __section(".data.once") ___done = false; \
+ static bool __section(".data..do_once") ___done = false; \
static DEFINE_STATIC_KEY_TRUE(___once_key); \
if (static_branch_unlikely(&___once_key)) { \
unsigned long ___flags; \
@@ -64,7 +64,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
#define DO_ONCE_SLEEPABLE(func, ...) \
({ \
bool ___ret = false; \
- static bool __section(".data.once") ___done = false; \
+ static bool __section(".data..do_once") ___done = false; \
static DEFINE_STATIC_KEY_TRUE(___once_key); \
if (static_branch_unlikely(&___once_key)) { \
___ret = __do_once_sleepable_start(&___done); \
diff --git a/include/linux/once_lite.h b/include/linux/once_lite.h
index b7bce4983638..236592c4eeb1 100644
--- a/include/linux/once_lite.h
+++ b/include/linux/once_lite.h
@@ -12,11 +12,11 @@
#define __ONCE_LITE_IF(condition) \
({ \
- static bool __section(".data.once") __already_done; \
+ static bool __section(".data..once") __already_done; \
bool __ret_cond = !!(condition); \
bool __ret_once = false; \
\
- if (unlikely(__ret_cond && !__already_done)) { \
+ if (unlikely(__ret_cond) && unlikely(!__already_done)) {\
__already_done = true; \
__ret_once = true; \
} \
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 7d0c9c48a0c5..7b02bc1d0a7e 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -7,7 +7,6 @@
#include <linux/types.h>
#include <linux/nodemask.h>
#include <uapi/linux/oom.h>
-#include <linux/sched/coredump.h> /* MMF_* */
#include <linux/mm.h> /* VM_FAULT* */
struct zonelist;
@@ -92,7 +91,7 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
*/
static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
{
- if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
+ if (unlikely(mm_flags_test(MMF_UNSTABLE, mm)))
return VM_FAULT_SIGBUS;
return 0;
}
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 0c7e3dcfe867..736f633b2d5f 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -239,6 +239,76 @@ static inline bool __must_check __must_check_overflow(bool overflow)
__overflows_type(n, T))
/**
+ * range_overflows() - Check if a range is out of bounds
+ * @start: Start of the range.
+ * @size: Size of the range.
+ * @max: Exclusive upper boundary.
+ *
+ * A strict check to determine if the range [@start, @start + @size) is
+ * invalid with respect to the allowable range [0, @max). Any range
+ * starting at or beyond @max is considered an overflow, even if @size is 0.
+ *
+ * Returns: true if the range is out of bounds.
+ */
+#define range_overflows(start, size, max) ({ \
+ typeof(start) start__ = (start); \
+ typeof(size) size__ = (size); \
+ typeof(max) max__ = (max); \
+ (void)(&start__ == &size__); \
+ (void)(&start__ == &max__); \
+ start__ >= max__ || size__ > max__ - start__; \
+})
+
+/**
+ * range_overflows_t() - Check if a range is out of bounds
+ * @type: Data type to use.
+ * @start: Start of the range.
+ * @size: Size of the range.
+ * @max: Exclusive upper boundary.
+ *
+ * Same as range_overflows() but forcing the parameters to @type.
+ *
+ * Returns: true if the range is out of bounds.
+ */
+#define range_overflows_t(type, start, size, max) \
+ range_overflows((type)(start), (type)(size), (type)(max))
+
+/**
+ * range_end_overflows() - Check if a range's endpoint is out of bounds
+ * @start: Start of the range.
+ * @size: Size of the range.
+ * @max: Exclusive upper boundary.
+ *
+ * Checks only if the endpoint of a range (@start + @size) exceeds @max.
+ * Unlike range_overflows(), a zero-sized range at the boundary (@start == @max)
+ * is not considered an overflow. Useful for iterator-style checks.
+ *
+ * Returns: true if the endpoint exceeds the boundary.
+ */
+#define range_end_overflows(start, size, max) ({ \
+ typeof(start) start__ = (start); \
+ typeof(size) size__ = (size); \
+ typeof(max) max__ = (max); \
+ (void)(&start__ == &size__); \
+ (void)(&start__ == &max__); \
+ start__ > max__ || size__ > max__ - start__; \
+})
+
+/**
+ * range_end_overflows_t() - Check if a range's endpoint is out of bounds
+ * @type: Data type to use.
+ * @start: Start of the range.
+ * @size: Size of the range.
+ * @max: Exclusive upper boundary.
+ *
+ * Same as range_end_overflows() but forcing the parameters to @type.
+ *
+ * Returns: true if the endpoint exceeds the boundary.
+ */
+#define range_end_overflows_t(type, start, size, max) \
+ range_end_overflows((type)(start), (type)(size), (type)(max))
+
+/**
* castable_to_type - like __same_type(), but also allows for casted literals
*
* @n: variable or constant value
@@ -389,25 +459,50 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
struct_size((type *)NULL, member, count)
/**
- * _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
- * Enables caller macro to pass (different) initializer.
+ * struct_offset() - Calculate the offset of a member within a struct
+ * @p: Pointer to the struct
+ * @member: Name of the member to get the offset of
+ *
+ * Calculates the offset of a particular @member of the structure pointed
+ * to by @p.
+ *
+ * Return: number of bytes to the location of @member.
+ */
+#define struct_offset(p, member) (offsetof(typeof(*(p)), member))
+
+/**
+ * __DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
+ * Enables caller macro to pass arbitrary trailing expressions
*
* @type: structure type name, including "struct" keyword.
* @name: Name for a variable to define.
* @member: Name of the array member.
* @count: Number of elements in the array; must be compile-time const.
- * @initializer: initializer expression (could be empty for no init).
+ * @trailer: Trailing expressions for attributes and/or initializers.
*/
-#define _DEFINE_FLEX(type, name, member, count, initializer...) \
+#define __DEFINE_FLEX(type, name, member, count, trailer...) \
_Static_assert(__builtin_constant_p(count), \
"onstack flex array members require compile-time const count"); \
union { \
u8 bytes[struct_size_t(type, member, count)]; \
type obj; \
- } name##_u initializer; \
+ } name##_u trailer; \
type *name = (type *)&name##_u
/**
+ * _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
+ * Enables caller macro to pass (different) initializer.
+ *
+ * @type: structure type name, including "struct" keyword.
+ * @name: Name for a variable to define.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array; must be compile-time const.
+ * @initializer: Initializer expression (e.g., pass `= { }` at minimum).
+ */
+#define _DEFINE_FLEX(type, name, member, count, initializer...) \
+ __DEFINE_FLEX(type, name, member, count, = { .obj initializer })
+
+/**
* DEFINE_RAW_FLEX() - Define an on-stack instance of structure with a trailing
* flexible array member, when it does not have a __counted_by annotation.
*
@@ -419,9 +514,12 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
* Define a zeroed, on-stack, instance of @type structure with a trailing
* flexible array member.
* Use __struct_size(@name) to get compile-time size of it afterwards.
+ * Use __member_size(@name->member) to get compile-time size of @name members.
+ * Use STACK_FLEX_ARRAY_SIZE(@name, @member) to get compile-time number of
+ * elements in array @member.
*/
#define DEFINE_RAW_FLEX(type, name, member, count) \
- _DEFINE_FLEX(type, name, member, count, = {})
+ __DEFINE_FLEX(type, name, member, count, = { })
/**
* DEFINE_FLEX() - Define an on-stack instance of structure with a trailing
@@ -436,8 +534,22 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
* Define a zeroed, on-stack, instance of @TYPE structure with a trailing
* flexible array member.
* Use __struct_size(@NAME) to get compile-time size of it afterwards.
+ * Use __member_size(@NAME->member) to get compile-time size of @NAME members.
+ * Use STACK_FLEX_ARRAY_SIZE(@name, @member) to get compile-time number of
+ * elements in array @member.
*/
#define DEFINE_FLEX(TYPE, NAME, MEMBER, COUNTER, COUNT) \
- _DEFINE_FLEX(TYPE, NAME, MEMBER, COUNT, = { .obj.COUNTER = COUNT, })
+ _DEFINE_FLEX(TYPE, NAME, MEMBER, COUNT, = { .COUNTER = COUNT, })
+
+/**
+ * STACK_FLEX_ARRAY_SIZE() - helper macro for DEFINE_FLEX() family.
+ * Returns the number of elements in @array.
+ *
+ * @name: Name for a variable defined in DEFINE_RAW_FLEX()/DEFINE_FLEX().
+ * @array: Name of the array member.
+ */
+#define STACK_FLEX_ARRAY_SIZE(name, array) \
+ (__member_size((name)->array) / sizeof(*(name)->array) + \
+ __must_be_array((name)->array))
#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/packing.h b/include/linux/packing.h
index 8d6571feb95d..20ae4d452c7b 100644
--- a/include/linux/packing.h
+++ b/include/linux/packing.h
@@ -5,8 +5,89 @@
#ifndef _LINUX_PACKING_H
#define _LINUX_PACKING_H
-#include <linux/types.h>
+#include <linux/array_size.h>
#include <linux/bitops.h>
+#include <linux/build_bug.h>
+#include <linux/minmax.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+#define GEN_PACKED_FIELD_STRUCT(__type) \
+ struct packed_field_ ## __type { \
+ __type startbit; \
+ __type endbit; \
+ __type offset; \
+ __type size; \
+ }
+
+/* struct packed_field_u8. Use with bit offsets < 256, buffers < 32B and
+ * unpacked structures < 256B.
+ */
+GEN_PACKED_FIELD_STRUCT(u8);
+
+/* struct packed_field_u16. Use with bit offsets < 65536, buffers < 8KB and
+ * unpacked structures < 64KB.
+ */
+GEN_PACKED_FIELD_STRUCT(u16);
+
+#define PACKED_FIELD(start, end, struct_name, struct_field) \
+{ \
+ (start), \
+ (end), \
+ offsetof(struct_name, struct_field), \
+ sizeof_field(struct_name, struct_field), \
+}
+
+#define CHECK_PACKED_FIELD_OVERLAP(fields, index1, index2) ({ \
+ typeof(&(fields)[0]) __f = (fields); \
+ typeof(__f[0]) _f1 = __f[index1]; typeof(__f[0]) _f2 = __f[index2]; \
+ const bool _ascending = __f[0].startbit < __f[1].startbit; \
+ BUILD_BUG_ON_MSG(_ascending && _f1.startbit >= _f2.startbit, \
+ __stringify(fields) " field " __stringify(index2) \
+ " breaks ascending order"); \
+ BUILD_BUG_ON_MSG(!_ascending && _f1.startbit <= _f2.startbit, \
+ __stringify(fields) " field " __stringify(index2) \
+ " breaks descending order"); \
+ BUILD_BUG_ON_MSG(max(_f1.endbit, _f2.endbit) <= \
+ min(_f1.startbit, _f2.startbit), \
+ __stringify(fields) " field " __stringify(index2) \
+ " overlaps with previous field"); \
+})
+
+#define CHECK_PACKED_FIELD(fields, index) ({ \
+ typeof(&(fields)[0]) _f = (fields); \
+ typeof(_f[0]) __f = _f[index]; \
+ BUILD_BUG_ON_MSG(__f.startbit < __f.endbit, \
+ __stringify(fields) " field " __stringify(index) \
+ " start bit must not be smaller than end bit"); \
+ BUILD_BUG_ON_MSG(__f.size != 1 && __f.size != 2 && \
+ __f.size != 4 && __f.size != 8, \
+ __stringify(fields) " field " __stringify(index) \
+ " has unsupported unpacked storage size"); \
+ BUILD_BUG_ON_MSG(__f.startbit - __f.endbit >= BITS_PER_BYTE * __f.size, \
+ __stringify(fields) " field " __stringify(index) \
+ " exceeds unpacked storage size"); \
+ __builtin_choose_expr(index != 0, \
+ CHECK_PACKED_FIELD_OVERLAP(fields, index - 1, index), \
+ 1); \
+})
+
+/* Note that the packed fields may be either in ascending or descending order.
+ * Thus, we must check that both the first and last field wit within the
+ * packed buffer size.
+ */
+#define CHECK_PACKED_FIELDS_SIZE(fields, pbuflen) ({ \
+ typeof(&(fields)[0]) _f = (fields); \
+ typeof(pbuflen) _len = (pbuflen); \
+ const size_t num_fields = ARRAY_SIZE(fields); \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p(_len), \
+ __stringify(fields) " pbuflen " __stringify(pbuflen) \
+ " must be a compile time constant"); \
+ BUILD_BUG_ON_MSG(_f[0].startbit >= BITS_PER_BYTE * _len, \
+ __stringify(fields) " first field exceeds packed buffer size"); \
+ BUILD_BUG_ON_MSG(_f[num_fields - 1].startbit >= BITS_PER_BYTE * _len, \
+ __stringify(fields) " last field exceeds packed buffer size"); \
+})
#define QUIRK_MSB_ON_THE_RIGHT BIT(0)
#define QUIRK_LITTLE_ENDIAN BIT(1)
@@ -17,33 +98,361 @@ enum packing_op {
UNPACK,
};
-/**
- * packing - Convert numbers (currently u64) between a packed and an unpacked
- * format. Unpacked means laid out in memory in the CPU's native
- * understanding of integers, while packed means anything else that
- * requires translation.
- *
- * @pbuf: Pointer to a buffer holding the packed value.
- * @uval: Pointer to an u64 holding the unpacked value.
- * @startbit: The index (in logical notation, compensated for quirks) where
- * the packed value starts within pbuf. Must be larger than, or
- * equal to, endbit.
- * @endbit: The index (in logical notation, compensated for quirks) where
- * the packed value ends within pbuf. Must be smaller than, or equal
- * to, startbit.
- * @op: If PACK, then uval will be treated as const pointer and copied (packed)
- * into pbuf, between startbit and endbit.
- * If UNPACK, then pbuf will be treated as const pointer and the logical
- * value between startbit and endbit will be copied (unpacked) to uval.
- * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
- * QUIRK_MSB_ON_THE_RIGHT.
- *
- * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming
- * correct usage, return code may be discarded.
- * If op is PACK, pbuf is modified.
- * If op is UNPACK, uval is modified.
- */
int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen,
enum packing_op op, u8 quirks);
+int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen,
+ u8 quirks);
+
+int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit,
+ size_t pbuflen, u8 quirks);
+
+void pack_fields_u8(void *pbuf, size_t pbuflen, const void *ustruct,
+ const struct packed_field_u8 *fields, size_t num_fields,
+ u8 quirks);
+
+void pack_fields_u16(void *pbuf, size_t pbuflen, const void *ustruct,
+ const struct packed_field_u16 *fields, size_t num_fields,
+ u8 quirks);
+
+void unpack_fields_u8(const void *pbuf, size_t pbuflen, void *ustruct,
+ const struct packed_field_u8 *fields, size_t num_fields,
+ u8 quirks);
+
+void unpack_fields_u16(const void *pbuf, size_t pbuflen, void *ustruct,
+ const struct packed_field_u16 *fields, size_t num_fields,
+ u8 quirks);
+
+/* Do not hand-edit the following packed field check macros!
+ *
+ * They are generated using scripts/gen_packed_field_checks.c, which may be
+ * built via "make scripts_gen_packed_field_checks". If larger macro sizes are
+ * needed in the future, please use this program to re-generate the macros and
+ * insert them here.
+ */
+
+#define CHECK_PACKED_FIELDS_1(fields) \
+ CHECK_PACKED_FIELD(fields, 0)
+
+#define CHECK_PACKED_FIELDS_2(fields) do { \
+ CHECK_PACKED_FIELDS_1(fields); \
+ CHECK_PACKED_FIELD(fields, 1); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_3(fields) do { \
+ CHECK_PACKED_FIELDS_2(fields); \
+ CHECK_PACKED_FIELD(fields, 2); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_4(fields) do { \
+ CHECK_PACKED_FIELDS_3(fields); \
+ CHECK_PACKED_FIELD(fields, 3); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_5(fields) do { \
+ CHECK_PACKED_FIELDS_4(fields); \
+ CHECK_PACKED_FIELD(fields, 4); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_6(fields) do { \
+ CHECK_PACKED_FIELDS_5(fields); \
+ CHECK_PACKED_FIELD(fields, 5); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_7(fields) do { \
+ CHECK_PACKED_FIELDS_6(fields); \
+ CHECK_PACKED_FIELD(fields, 6); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_8(fields) do { \
+ CHECK_PACKED_FIELDS_7(fields); \
+ CHECK_PACKED_FIELD(fields, 7); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_9(fields) do { \
+ CHECK_PACKED_FIELDS_8(fields); \
+ CHECK_PACKED_FIELD(fields, 8); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_10(fields) do { \
+ CHECK_PACKED_FIELDS_9(fields); \
+ CHECK_PACKED_FIELD(fields, 9); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_11(fields) do { \
+ CHECK_PACKED_FIELDS_10(fields); \
+ CHECK_PACKED_FIELD(fields, 10); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_12(fields) do { \
+ CHECK_PACKED_FIELDS_11(fields); \
+ CHECK_PACKED_FIELD(fields, 11); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_13(fields) do { \
+ CHECK_PACKED_FIELDS_12(fields); \
+ CHECK_PACKED_FIELD(fields, 12); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_14(fields) do { \
+ CHECK_PACKED_FIELDS_13(fields); \
+ CHECK_PACKED_FIELD(fields, 13); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_15(fields) do { \
+ CHECK_PACKED_FIELDS_14(fields); \
+ CHECK_PACKED_FIELD(fields, 14); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_16(fields) do { \
+ CHECK_PACKED_FIELDS_15(fields); \
+ CHECK_PACKED_FIELD(fields, 15); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_17(fields) do { \
+ CHECK_PACKED_FIELDS_16(fields); \
+ CHECK_PACKED_FIELD(fields, 16); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_18(fields) do { \
+ CHECK_PACKED_FIELDS_17(fields); \
+ CHECK_PACKED_FIELD(fields, 17); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_19(fields) do { \
+ CHECK_PACKED_FIELDS_18(fields); \
+ CHECK_PACKED_FIELD(fields, 18); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_20(fields) do { \
+ CHECK_PACKED_FIELDS_19(fields); \
+ CHECK_PACKED_FIELD(fields, 19); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_21(fields) do { \
+ CHECK_PACKED_FIELDS_20(fields); \
+ CHECK_PACKED_FIELD(fields, 20); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_22(fields) do { \
+ CHECK_PACKED_FIELDS_21(fields); \
+ CHECK_PACKED_FIELD(fields, 21); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_23(fields) do { \
+ CHECK_PACKED_FIELDS_22(fields); \
+ CHECK_PACKED_FIELD(fields, 22); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_24(fields) do { \
+ CHECK_PACKED_FIELDS_23(fields); \
+ CHECK_PACKED_FIELD(fields, 23); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_25(fields) do { \
+ CHECK_PACKED_FIELDS_24(fields); \
+ CHECK_PACKED_FIELD(fields, 24); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_26(fields) do { \
+ CHECK_PACKED_FIELDS_25(fields); \
+ CHECK_PACKED_FIELD(fields, 25); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_27(fields) do { \
+ CHECK_PACKED_FIELDS_26(fields); \
+ CHECK_PACKED_FIELD(fields, 26); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_28(fields) do { \
+ CHECK_PACKED_FIELDS_27(fields); \
+ CHECK_PACKED_FIELD(fields, 27); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_29(fields) do { \
+ CHECK_PACKED_FIELDS_28(fields); \
+ CHECK_PACKED_FIELD(fields, 28); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_30(fields) do { \
+ CHECK_PACKED_FIELDS_29(fields); \
+ CHECK_PACKED_FIELD(fields, 29); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_31(fields) do { \
+ CHECK_PACKED_FIELDS_30(fields); \
+ CHECK_PACKED_FIELD(fields, 30); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_32(fields) do { \
+ CHECK_PACKED_FIELDS_31(fields); \
+ CHECK_PACKED_FIELD(fields, 31); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_33(fields) do { \
+ CHECK_PACKED_FIELDS_32(fields); \
+ CHECK_PACKED_FIELD(fields, 32); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_34(fields) do { \
+ CHECK_PACKED_FIELDS_33(fields); \
+ CHECK_PACKED_FIELD(fields, 33); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_35(fields) do { \
+ CHECK_PACKED_FIELDS_34(fields); \
+ CHECK_PACKED_FIELD(fields, 34); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_36(fields) do { \
+ CHECK_PACKED_FIELDS_35(fields); \
+ CHECK_PACKED_FIELD(fields, 35); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_37(fields) do { \
+ CHECK_PACKED_FIELDS_36(fields); \
+ CHECK_PACKED_FIELD(fields, 36); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_38(fields) do { \
+ CHECK_PACKED_FIELDS_37(fields); \
+ CHECK_PACKED_FIELD(fields, 37); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_39(fields) do { \
+ CHECK_PACKED_FIELDS_38(fields); \
+ CHECK_PACKED_FIELD(fields, 38); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_40(fields) do { \
+ CHECK_PACKED_FIELDS_39(fields); \
+ CHECK_PACKED_FIELD(fields, 39); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_41(fields) do { \
+ CHECK_PACKED_FIELDS_40(fields); \
+ CHECK_PACKED_FIELD(fields, 40); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_42(fields) do { \
+ CHECK_PACKED_FIELDS_41(fields); \
+ CHECK_PACKED_FIELD(fields, 41); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_43(fields) do { \
+ CHECK_PACKED_FIELDS_42(fields); \
+ CHECK_PACKED_FIELD(fields, 42); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_44(fields) do { \
+ CHECK_PACKED_FIELDS_43(fields); \
+ CHECK_PACKED_FIELD(fields, 43); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_45(fields) do { \
+ CHECK_PACKED_FIELDS_44(fields); \
+ CHECK_PACKED_FIELD(fields, 44); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_46(fields) do { \
+ CHECK_PACKED_FIELDS_45(fields); \
+ CHECK_PACKED_FIELD(fields, 45); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_47(fields) do { \
+ CHECK_PACKED_FIELDS_46(fields); \
+ CHECK_PACKED_FIELD(fields, 46); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_48(fields) do { \
+ CHECK_PACKED_FIELDS_47(fields); \
+ CHECK_PACKED_FIELD(fields, 47); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_49(fields) do { \
+ CHECK_PACKED_FIELDS_48(fields); \
+ CHECK_PACKED_FIELD(fields, 48); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_50(fields) do { \
+ CHECK_PACKED_FIELDS_49(fields); \
+ CHECK_PACKED_FIELD(fields, 49); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS(fields) \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 1, ({ CHECK_PACKED_FIELDS_1(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 2, ({ CHECK_PACKED_FIELDS_2(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 3, ({ CHECK_PACKED_FIELDS_3(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 4, ({ CHECK_PACKED_FIELDS_4(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 5, ({ CHECK_PACKED_FIELDS_5(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 6, ({ CHECK_PACKED_FIELDS_6(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 7, ({ CHECK_PACKED_FIELDS_7(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 8, ({ CHECK_PACKED_FIELDS_8(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 9, ({ CHECK_PACKED_FIELDS_9(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 10, ({ CHECK_PACKED_FIELDS_10(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 11, ({ CHECK_PACKED_FIELDS_11(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 12, ({ CHECK_PACKED_FIELDS_12(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 13, ({ CHECK_PACKED_FIELDS_13(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 14, ({ CHECK_PACKED_FIELDS_14(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 15, ({ CHECK_PACKED_FIELDS_15(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 16, ({ CHECK_PACKED_FIELDS_16(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 17, ({ CHECK_PACKED_FIELDS_17(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 18, ({ CHECK_PACKED_FIELDS_18(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 19, ({ CHECK_PACKED_FIELDS_19(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 20, ({ CHECK_PACKED_FIELDS_20(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 21, ({ CHECK_PACKED_FIELDS_21(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 22, ({ CHECK_PACKED_FIELDS_22(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 23, ({ CHECK_PACKED_FIELDS_23(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 24, ({ CHECK_PACKED_FIELDS_24(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 25, ({ CHECK_PACKED_FIELDS_25(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 26, ({ CHECK_PACKED_FIELDS_26(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 27, ({ CHECK_PACKED_FIELDS_27(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 28, ({ CHECK_PACKED_FIELDS_28(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 29, ({ CHECK_PACKED_FIELDS_29(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 30, ({ CHECK_PACKED_FIELDS_30(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 31, ({ CHECK_PACKED_FIELDS_31(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 32, ({ CHECK_PACKED_FIELDS_32(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 33, ({ CHECK_PACKED_FIELDS_33(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 34, ({ CHECK_PACKED_FIELDS_34(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 35, ({ CHECK_PACKED_FIELDS_35(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 36, ({ CHECK_PACKED_FIELDS_36(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 37, ({ CHECK_PACKED_FIELDS_37(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 38, ({ CHECK_PACKED_FIELDS_38(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 39, ({ CHECK_PACKED_FIELDS_39(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 40, ({ CHECK_PACKED_FIELDS_40(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 41, ({ CHECK_PACKED_FIELDS_41(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 42, ({ CHECK_PACKED_FIELDS_42(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 43, ({ CHECK_PACKED_FIELDS_43(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 44, ({ CHECK_PACKED_FIELDS_44(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 45, ({ CHECK_PACKED_FIELDS_45(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 46, ({ CHECK_PACKED_FIELDS_46(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 47, ({ CHECK_PACKED_FIELDS_47(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 48, ({ CHECK_PACKED_FIELDS_48(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 49, ({ CHECK_PACKED_FIELDS_49(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 50, ({ CHECK_PACKED_FIELDS_50(fields); }), \
+ ({ BUILD_BUG_ON_MSG(1, "CHECK_PACKED_FIELDS() must be regenerated to support array sizes larger than 50."); }) \
+))))))))))))))))))))))))))))))))))))))))))))))))))
+
+/* End of generated content */
+
+#define pack_fields(pbuf, pbuflen, ustruct, fields, quirks) \
+ ({ \
+ CHECK_PACKED_FIELDS(fields); \
+ CHECK_PACKED_FIELDS_SIZE((fields), (pbuflen)); \
+ _Generic((fields), \
+ const struct packed_field_u8 * : pack_fields_u8, \
+ const struct packed_field_u16 * : pack_fields_u16 \
+ )((pbuf), (pbuflen), (ustruct), (fields), ARRAY_SIZE(fields), (quirks)); \
+ })
+
+#define unpack_fields(pbuf, pbuflen, ustruct, fields, quirks) \
+ ({ \
+ CHECK_PACKED_FIELDS(fields); \
+ CHECK_PACKED_FIELDS_SIZE((fields), (pbuflen)); \
+ _Generic((fields), \
+ const struct packed_field_u8 * : unpack_fields_u8, \
+ const struct packed_field_u16 * : unpack_fields_u16 \
+ )((pbuf), (pbuflen), (ustruct), (fields), ARRAY_SIZE(fields), (quirks)); \
+ })
+
#endif
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 0146daf34430..765f2778e264 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -90,8 +90,6 @@ struct padata_cpumask {
* @processed: Number of already processed objects.
* @cpu: Next CPU to be processed.
* @cpumask: The cpumasks in use for parallel and serial workers.
- * @reorder_work: work struct for reordering.
- * @lock: Reorder lock.
*/
struct parallel_data {
struct padata_shell *ps;
@@ -102,8 +100,6 @@ struct parallel_data {
unsigned int processed;
int cpu;
struct padata_cpumask cpumask;
- struct work_struct reorder_work;
- spinlock_t ____cacheline_aligned lock;
};
/**
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index 7d79818dc065..760006b1c480 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -72,8 +72,10 @@
#define NODE_NOT_IN_PAGE_FLAGS 1
#endif
-#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
+#if defined(CONFIG_KASAN_SW_TAGS)
#define KASAN_TAG_WIDTH 8
+#elif defined(CONFIG_KASAN_HW_TAGS)
+#define KASAN_TAG_WIDTH 4
#else
#define KASAN_TAG_WIDTH 0
#endif
@@ -111,5 +113,12 @@
ZONES_WIDTH - LRU_GEN_WIDTH - SECTIONS_WIDTH - \
NODES_WIDTH - KASAN_TAG_WIDTH - LAST_CPUPID_WIDTH)
+#define NR_NON_PAGEFLAG_BITS (SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + \
+ LAST_CPUPID_SHIFT + KASAN_TAG_WIDTH + \
+ LRU_GEN_WIDTH + LRU_REFS_WIDTH)
+
+#define NR_UNUSED_PAGEFLAG_BITS (BITS_PER_LONG - \
+ (NR_NON_PAGEFLAG_BITS + NR_PAGEFLAGS))
+
#endif
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 104078afe0b1..f7a0e4af0c73 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -30,16 +30,11 @@
* - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
* to read/write these pages might end badly. Don't touch!
* - The zero page(s)
- * - Pages not added to the page allocator when onlining a section because
- * they were excluded via the online_page_callback() or because they are
- * PG_hwpoison.
* - Pages allocated in the context of kexec/kdump (loaded kernel image,
* control pages, vmcoreinfo)
* - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
* not marked PG_reserved (as they might be in use by somebody else who does
* not respect the caching strategy).
- * - Pages part of an offline section (struct pages of offline sections should
- * not be trusted as they will be initialized when first onlined).
* - MCA pages on ia64
* - Pages holding CPU notes for POWER Firmware Assisted Dump
* - Device memory (e.g. PMEM, DAX, HMM)
@@ -71,8 +66,6 @@
* PG_referenced, PG_reclaim are used for page reclaim for anonymous and
* file-backed pagecache (see mm/vmscan.c).
*
- * PG_error is set to indicate that an I/O error occurred on this page.
- *
* PG_arch_1 is an architecture specific page state bit. The generic code
* guarantees that this bit is cleared for a page when it first is entered into
* the page cache.
@@ -108,22 +101,19 @@ enum pageflags {
PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_active,
PG_workingset,
- PG_error,
- PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
+ PG_owner_priv_1, /* Owner use. If pagecache, fs may use */
+ PG_owner_2, /* Owner use. If pagecache, fs may use */
PG_arch_1,
PG_reserved,
PG_private, /* If pagecache, has fs-private data */
PG_private_2, /* If pagecache, has fs aux data */
- PG_mappedtodisk, /* Has blocks allocated on-disk */
PG_reclaim, /* To be reclaimed asap */
PG_swapbacked, /* Page is backed by RAM/swap */
PG_unevictable, /* Page is "unevictable" */
+ PG_dropbehind, /* drop pages on IO completion */
#ifdef CONFIG_MMU
PG_mlocked, /* Page is vma mlocked */
#endif
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
- PG_uncached, /* Page has been mapped as uncached */
-#endif
#ifdef CONFIG_MEMORY_FAILURE
PG_hwpoison, /* hardware poisoned page. Don't touch */
#endif
@@ -131,14 +121,21 @@ enum pageflags {
PG_young,
PG_idle,
#endif
-#ifdef CONFIG_ARCH_USES_PG_ARCH_X
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
PG_arch_2,
+#endif
+#ifdef CONFIG_ARCH_USES_PG_ARCH_3
PG_arch_3,
#endif
__NR_PAGEFLAGS,
PG_readahead = PG_reclaim,
+ /* Anonymous memory (and shmem) */
+ PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
+ /* Some filesystems */
+ PG_checked = PG_owner_priv_1,
+
/*
* Depending on the way an anonymous folio can be mapped into a page
* table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
@@ -146,13 +143,13 @@ enum pageflags {
* tail pages of an anonymous folio. For now, we only expect it to be
* set on tail pages for PTE-mapped THP.
*/
- PG_anon_exclusive = PG_mappedtodisk,
-
- /* Filesystems */
- PG_checked = PG_owner_priv_1,
+ PG_anon_exclusive = PG_owner_2,
- /* SwapBacked */
- PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
+ /*
+ * Set if all buffer heads in the folio are mapped.
+ * Filesystems which do not use BHs can use it for their own purpose.
+ */
+ PG_mappedtodisk = PG_owner_2,
/* Two page bits are conscripted by FS-Cache to maintain local caching
* state. These bits are set on pages belonging to the netfs's inodes
@@ -170,8 +167,12 @@ enum pageflags {
/* Remapped by swiotlb-xen. */
PG_xen_remapped = PG_owner_priv_1,
- /* non-lru isolated movable page */
- PG_isolated = PG_reclaim,
+#ifdef CONFIG_MIGRATION
+ /* movable_ops page that is isolated for migration */
+ PG_movable_ops_isolated = PG_reclaim,
+ /* this is a movable_ops page (for selected typed pages only) */
+ PG_movable_ops = PG_uptodate,
+#endif
/* Only valid for buddy pages. Used to track pages that are reported */
PG_reported = PG_uptodate,
@@ -188,8 +189,9 @@ enum pageflags {
*/
/* At least one page in this folio has the hwpoison flag set */
- PG_has_hwpoisoned = PG_error,
+ PG_has_hwpoisoned = PG_active,
PG_large_rmappable = PG_workingset, /* anon or file-backed */
+ PG_partially_mapped = PG_reclaim, /* was identified to be partially mapped */
};
#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
@@ -215,7 +217,7 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page
* cold cacheline in some cases.
*/
if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
- test_bit(PG_head, &page->flags)) {
+ test_bit(PG_head, &page->flags.f)) {
/*
* We can safely access the field of the @page[1] with PG_head
* because the @page is a compound page composed with at least
@@ -228,11 +230,48 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page
}
return page;
}
+
+static __always_inline bool page_count_writable(const struct page *page, int u)
+{
+ if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
+ return true;
+
+ /*
+ * The refcount check is ordered before the fake-head check to prevent
+ * the following race:
+ * CPU 1 (HVO) CPU 2 (speculative PFN walker)
+ *
+ * page_ref_freeze()
+ * synchronize_rcu()
+ * rcu_read_lock()
+ * page_is_fake_head() is false
+ * vmemmap_remap_pte()
+ * XXX: struct page[] becomes r/o
+ *
+ * page_ref_unfreeze()
+ * page_ref_count() is not zero
+ *
+ * atomic_add_unless(&page->_refcount)
+ * XXX: try to modify r/o struct page[]
+ *
+ * The refcount check also prevents modification attempts to other (r/o)
+ * tail pages that are not fake heads.
+ */
+ if (atomic_read_acquire(&page->_refcount) == u)
+ return false;
+
+ return page_fixed_fake_head(page) == page;
+}
#else
static inline const struct page *page_fixed_fake_head(const struct page *page)
{
return page;
}
+
+static inline bool page_count_writable(const struct page *page, int u)
+{
+ return true;
+}
#endif
static __always_inline int page_is_fake_head(const struct page *page)
@@ -240,7 +279,7 @@ static __always_inline int page_is_fake_head(const struct page *page)
return page_fixed_fake_head(page) != page;
}
-static inline unsigned long _compound_head(const struct page *page)
+static __always_inline unsigned long _compound_head(const struct page *page)
{
unsigned long head = READ_ONCE(page->compound_head);
@@ -277,7 +316,7 @@ static inline unsigned long _compound_head(const struct page *page)
* check that the page number lies within @folio; the caller is presumed
* to have a reference to the page.
*/
-#define folio_page(folio, n) nth_page(&(folio)->page, n)
+#define folio_page(folio, n) (&(folio)->page + (n))
static __always_inline int PageTail(const struct page *page)
{
@@ -286,14 +325,14 @@ static __always_inline int PageTail(const struct page *page)
static __always_inline int PageCompound(const struct page *page)
{
- return test_bit(PG_head, &page->flags) ||
+ return test_bit(PG_head, &page->flags.f) ||
READ_ONCE(page->compound_head) & 1;
}
#define PAGE_POISON_PATTERN -1l
static inline int PagePoisoned(const struct page *page)
{
- return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
+ return READ_ONCE(page->flags.f) == PAGE_POISON_PATTERN;
}
#ifdef CONFIG_DEBUG_VM
@@ -309,18 +348,18 @@ static const unsigned long *const_folio_flags(const struct folio *folio,
{
const struct page *page = &folio->page;
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
- VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
- return &page[n].flags;
+ VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
+ VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page);
+ return &page[n].flags.f;
}
static unsigned long *folio_flags(struct folio *folio, unsigned n)
{
struct page *page = &folio->page;
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
- VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
- return &page[n].flags;
+ VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
+ VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page);
+ return &page[n].flags.f;
}
/*
@@ -410,37 +449,37 @@ FOLIO_CLEAR_FLAG(name, page)
#define TESTPAGEFLAG(uname, lname, policy) \
FOLIO_TEST_FLAG(lname, FOLIO_##policy) \
static __always_inline int Page##uname(const struct page *page) \
-{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
+{ return test_bit(PG_##lname, &policy(page, 0)->flags.f); }
#define SETPAGEFLAG(uname, lname, policy) \
FOLIO_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline void SetPage##uname(struct page *page) \
-{ set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ set_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define CLEARPAGEFLAG(uname, lname, policy) \
FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline void ClearPage##uname(struct page *page) \
-{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ clear_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define __SETPAGEFLAG(uname, lname, policy) \
__FOLIO_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline void __SetPage##uname(struct page *page) \
-{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ __set_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define __CLEARPAGEFLAG(uname, lname, policy) \
__FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline void __ClearPage##uname(struct page *page) \
-{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ __clear_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define TESTSETFLAG(uname, lname, policy) \
FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline int TestSetPage##uname(struct page *page) \
-{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define TESTCLEARFLAG(uname, lname, policy) \
FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline int TestClearPage##uname(struct page *page) \
-{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define PAGEFLAG(uname, lname, policy) \
TESTPAGEFLAG(uname, lname, policy) \
@@ -511,7 +550,6 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
-PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE)
FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE)
__FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE)
@@ -519,8 +557,9 @@ PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
TESTCLEARFLAG(LRU, lru, PF_HEAD)
-PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
- TESTCLEARFLAG(Active, active, PF_HEAD)
+FOLIO_FLAG(active, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
PAGEFLAG(Workingset, workingset, PF_HEAD)
TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
@@ -536,9 +575,9 @@ PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
-PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
- __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
- __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
+FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(swapbacked, FOLIO_HEAD_PAGE)
+ __FOLIO_SET_FLAG(swapbacked, FOLIO_HEAD_PAGE)
/*
* Private page markings that may be used by the filesystem that owns the page
@@ -546,9 +585,10 @@ PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
* - PG_private and PG_private_2 cause release_folio() and co to be invoked
*/
PAGEFLAG(Private, private, PF_ANY)
-PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
-PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
- TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
+FOLIO_FLAG(private_2, FOLIO_HEAD_PAGE)
+
+/* owner_2 can be set on tail pages for anon memory */
+FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE)
/*
* Only test-and-set exist for PG_writeback. The unconditional operators are
@@ -556,13 +596,17 @@ PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
*/
TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
-PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
+FOLIO_FLAG(mappedtodisk, FOLIO_HEAD_PAGE)
/* PG_readahead is only used for reads; PG_reclaim is only for writes */
PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
-PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
- TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
+FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE)
+
+FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(dropbehind, FOLIO_HEAD_PAGE)
+ __FOLIO_SET_FLAG(dropbehind, FOLIO_HEAD_PAGE)
#ifdef CONFIG_HIGHMEM
/*
@@ -574,6 +618,14 @@ PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
#else
PAGEFLAG_FALSE(HighMem, highmem)
#endif
+#define PhysHighMem(__p) (PageHighMem(phys_to_page(__p)))
+
+/* Does kmap_local_folio() only allow access to one page of the folio? */
+#ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
+#define folio_test_partial_kmap(f) true
+#else
+#define folio_test_partial_kmap(f) folio_test_highmem(f)
+#endif
#ifdef CONFIG_SWAP
static __always_inline bool folio_test_swapcache(const struct folio *folio)
@@ -582,45 +634,32 @@ static __always_inline bool folio_test_swapcache(const struct folio *folio)
test_bit(PG_swapcache, const_folio_flags(folio, 0));
}
-static __always_inline bool PageSwapCache(const struct page *page)
-{
- return folio_test_swapcache(page_folio(page));
-}
-
-SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
-CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
+FOLIO_SET_FLAG(swapcache, FOLIO_HEAD_PAGE)
+FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE)
#else
-PAGEFLAG_FALSE(SwapCache, swapcache)
+FOLIO_FLAG_FALSE(swapcache)
#endif
-PAGEFLAG(Unevictable, unevictable, PF_HEAD)
- __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
- TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
+FOLIO_FLAG(unevictable, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
#ifdef CONFIG_MMU
-PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
- __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
- TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
-#else
-PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
- TESTSCFLAG_FALSE(Mlocked, mlocked)
-#endif
-
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
-PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
+FOLIO_FLAG(mlocked, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_SET_FLAG(mlocked, FOLIO_HEAD_PAGE)
#else
-PAGEFLAG_FALSE(Uncached, uncached)
+FOLIO_FLAG_FALSE(mlocked)
+ __FOLIO_CLEAR_FLAG_NOOP(mlocked)
+ FOLIO_TEST_CLEAR_FLAG_FALSE(mlocked)
+ FOLIO_TEST_SET_FLAG_FALSE(mlocked)
#endif
#ifdef CONFIG_MEMORY_FAILURE
PAGEFLAG(HWPoison, hwpoison, PF_ANY)
TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
#define __PG_HWPOISON (1UL << PG_hwpoison)
-#define MAGIC_HWPOISON 0x48575053U /* HWPS */
-extern void SetPageHWPoisonTakenOff(struct page *page);
-extern void ClearPageHWPoisonTakenOff(struct page *page);
-extern bool take_page_off_buddy(struct page *page);
-extern bool put_page_back_buddy(struct page *page);
#else
PAGEFLAG_FALSE(HWPoison, hwpoison)
#define __PG_HWPOISON 0
@@ -655,71 +694,47 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
#endif
/*
- * On an anonymous page mapped into a user virtual memory area,
- * page->mapping points to its anon_vma, not to a struct address_space;
- * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
+ * On an anonymous folio mapped into a user virtual memory area,
+ * folio->mapping points to its anon_vma, not to a struct address_space;
+ * with the FOLIO_MAPPING_ANON bit set to distinguish it. See rmap.h.
*
- * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
- * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
- * bit; and then page->mapping points, not to an anon_vma, but to a private
- * structure which KSM associates with that merged page. See ksm.h.
+ * On an anonymous folio in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
+ * the FOLIO_MAPPING_ANON_KSM bit may be set along with the FOLIO_MAPPING_ANON
+ * bit; and then folio->mapping points, not to an anon_vma, but to a private
+ * structure which KSM associates with that merged folio. See ksm.h.
*
- * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
- * page and then page->mapping points to a struct movable_operations.
- *
- * Please note that, confusingly, "page_mapping" refers to the inode
- * address_space which maps the page from disk; whereas "page_mapped"
- * refers to user virtual address space into which the page is mapped.
+ * Please note that, confusingly, "folio_mapping" refers to the inode
+ * address_space which maps the folio from disk; whereas "folio_mapped"
+ * refers to user virtual address space into which the folio is mapped.
*
* For slab pages, since slab reuses the bits in struct page to store its
- * internal states, the page->mapping does not exist as such, nor do these
- * flags below. So in order to avoid testing non-existent bits, please
- * make sure that PageSlab(page) actually evaluates to false before calling
- * the following functions (e.g., PageAnon). See mm/slab.h.
+ * internal states, the folio->mapping does not exist as such, nor do
+ * these flags below. So in order to avoid testing non-existent bits,
+ * please make sure that folio_test_slab(folio) actually evaluates to
+ * false before calling the following functions (e.g., folio_test_anon).
+ * See mm/slab.h.
*/
-#define PAGE_MAPPING_ANON 0x1
-#define PAGE_MAPPING_MOVABLE 0x2
-#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
-#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
+#define FOLIO_MAPPING_ANON 0x1
+#define FOLIO_MAPPING_ANON_KSM 0x2
+#define FOLIO_MAPPING_KSM (FOLIO_MAPPING_ANON | FOLIO_MAPPING_ANON_KSM)
+#define FOLIO_MAPPING_FLAGS (FOLIO_MAPPING_ANON | FOLIO_MAPPING_ANON_KSM)
-/*
- * Different with flags above, this flag is used only for fsdax mode. It
- * indicates that this page->mapping is now under reflink case.
- */
-#define PAGE_MAPPING_DAX_SHARED ((void *)0x1)
-
-static __always_inline bool folio_mapping_flags(const struct folio *folio)
+static __always_inline bool folio_test_anon(const struct folio *folio)
{
- return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
+ return ((unsigned long)folio->mapping & FOLIO_MAPPING_ANON) != 0;
}
-static __always_inline bool PageMappingFlags(const struct page *page)
+static __always_inline bool PageAnonNotKsm(const struct page *page)
{
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
-}
+ unsigned long flags = (unsigned long)page_folio(page)->mapping;
-static __always_inline bool folio_test_anon(const struct folio *folio)
-{
- return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
+ return (flags & FOLIO_MAPPING_FLAGS) == FOLIO_MAPPING_ANON;
}
static __always_inline bool PageAnon(const struct page *page)
{
return folio_test_anon(page_folio(page));
}
-
-static __always_inline bool __folio_test_movable(const struct folio *folio)
-{
- return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
- PAGE_MAPPING_MOVABLE;
-}
-
-static __always_inline bool __PageMovable(const struct page *page)
-{
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
- PAGE_MAPPING_MOVABLE;
-}
-
#ifdef CONFIG_KSM
/*
* A KSM page is one of those write-protected "shared pages" or "merged pages"
@@ -729,16 +744,11 @@ static __always_inline bool __PageMovable(const struct page *page)
*/
static __always_inline bool folio_test_ksm(const struct folio *folio)
{
- return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
- PAGE_MAPPING_KSM;
-}
-
-static __always_inline bool PageKsm(const struct page *page)
-{
- return folio_test_ksm(page_folio(page));
+ return ((unsigned long)folio->mapping & FOLIO_MAPPING_FLAGS) ==
+ FOLIO_MAPPING_KSM;
}
#else
-TESTPAGEFLAG_FALSE(Ksm, ksm)
+FOLIO_TEST_FLAG_FALSE(ksm)
#endif
u64 stable_page_flags(const struct page *page);
@@ -828,8 +838,6 @@ void set_page_writeback(struct page *page);
#define folio_start_writeback(folio) \
__folio_start_writeback(folio, false)
-#define folio_start_writeback_keepwrite(folio) \
- __folio_start_writeback(folio, true)
static __always_inline bool folio_test_head(const struct folio *folio)
{
@@ -839,7 +847,7 @@ static __always_inline bool folio_test_head(const struct folio *folio)
static __always_inline int PageHead(const struct page *page)
{
PF_POISONED_CHECK(page);
- return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
+ return test_bit(PG_head, &page->flags.f) && !page_is_fake_head(page);
}
__SETPAGEFLAG(Head, head, PF_ANY)
@@ -874,28 +882,16 @@ static inline void ClearPageCompound(struct page *page)
ClearPageHead(page);
}
FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
+FOLIO_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
#else
FOLIO_FLAG_FALSE(large_rmappable)
+FOLIO_FLAG_FALSE(partially_mapped)
#endif
#define PG_head_mask ((1UL << PG_head))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
- * PageHuge() only returns true for hugetlbfs pages, but not for
- * normal or transparent huge pages.
- *
- * PageTransHuge() returns true for both transparent huge and
- * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
- * called only in the core VM paths where hugetlbfs pages can't exist.
- */
-static inline int PageTransHuge(const struct page *page)
-{
- VM_BUG_ON_PAGE(PageTail(page), page);
- return PageHead(page);
-}
-
-/*
* PageTransCompound returns true for both transparent huge pages
* and hugetlbfs pages, so it should only be called when it's known
* that hugetlbfs pages aren't involved.
@@ -904,21 +900,8 @@ static inline int PageTransCompound(const struct page *page)
{
return PageCompound(page);
}
-
-/*
- * PageTransTail returns true for both transparent huge pages
- * and hugetlbfs pages, so it should only be called when it's known
- * that hugetlbfs pages aren't involved.
- */
-static inline int PageTransTail(const struct page *page)
-{
- return PageTail(page);
-}
#else
-TESTPAGEFLAG_FALSE(TransHuge, transhuge)
TESTPAGEFLAG_FALSE(TransCompound, transcompound)
-TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
-TESTPAGEFLAG_FALSE(TransTail, transtail)
#endif
#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
@@ -928,78 +911,89 @@ TESTPAGEFLAG_FALSE(TransTail, transtail)
*
* This flag is set by hwpoison handler. Cleared by THP split or free page.
*/
-PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
- TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
+FOLIO_FLAG(has_hwpoisoned, FOLIO_SECOND_PAGE)
#else
-PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
- TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
+FOLIO_FLAG_FALSE(has_hwpoisoned)
#endif
/*
- * For pages that are never mapped to userspace,
- * page_type may be used. Because it is initialised to -1, we invert the
- * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
- * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
- * low bits so that an underflow or overflow of _mapcount won't be
- * mistaken for a page type value.
+ * For pages that do not use mapcount, page_type may be used.
+ * The low 24 bits of pagetype may be used for your own purposes, as long
+ * as you are careful to not affect the top 8 bits. The low bits of
+ * pagetype will be overwritten when you clear the page_type from the page.
*/
+enum pagetype {
+ /* 0x00-0x7f are positive numbers, ie mapcount */
+ /* Reserve 0x80-0xef for mapcount overflow. */
+ PGTY_buddy = 0xf0,
+ PGTY_offline = 0xf1,
+ PGTY_table = 0xf2,
+ PGTY_guard = 0xf3,
+ PGTY_hugetlb = 0xf4,
+ PGTY_slab = 0xf5,
+ PGTY_zsmalloc = 0xf6,
+ PGTY_unaccepted = 0xf7,
+ PGTY_large_kmalloc = 0xf8,
+
+ PGTY_mapcount_underflow = 0xff
+};
-#define PAGE_TYPE_BASE 0xf0000000
-/* Reserve 0x0000007f to catch underflows of _mapcount */
-#define PAGE_MAPCOUNT_RESERVE -128
-#define PG_buddy 0x00000080
-#define PG_offline 0x00000100
-#define PG_table 0x00000200
-#define PG_guard 0x00000400
-#define PG_hugetlb 0x00000800
-#define PG_slab 0x00001000
-
-#define PageType(page, flag) \
- ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
-#define folio_test_type(folio, flag) \
- ((folio->page.page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
-
-static inline int page_type_has_type(unsigned int page_type)
+static inline bool page_type_has_type(int page_type)
{
- return (int)page_type < PAGE_MAPCOUNT_RESERVE;
+ return page_type < (PGTY_mapcount_underflow << 24);
}
-static inline int page_has_type(const struct page *page)
+/* This takes a mapcount which is one more than page->_mapcount */
+static inline bool page_mapcount_is_type(unsigned int mapcount)
{
- return page_type_has_type(page->page_type);
+ return page_type_has_type(mapcount - 1);
+}
+
+static inline bool page_has_type(const struct page *page)
+{
+ return page_type_has_type(data_race(page->page_type));
}
#define FOLIO_TYPE_OPS(lname, fname) \
-static __always_inline bool folio_test_##fname(const struct folio *folio)\
+static __always_inline bool folio_test_##fname(const struct folio *folio) \
{ \
- return folio_test_type(folio, PG_##lname); \
+ return data_race(folio->page.page_type >> 24) == PGTY_##lname; \
} \
static __always_inline void __folio_set_##fname(struct folio *folio) \
{ \
- VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
- folio->page.page_type &= ~PG_##lname; \
+ if (folio_test_##fname(folio)) \
+ return; \
+ VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \
+ folio); \
+ folio->page.page_type = (unsigned int)PGTY_##lname << 24; \
} \
static __always_inline void __folio_clear_##fname(struct folio *folio) \
{ \
+ if (folio->page.page_type == UINT_MAX) \
+ return; \
VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
- folio->page.page_type |= PG_##lname; \
+ folio->page.page_type = UINT_MAX; \
}
#define PAGE_TYPE_OPS(uname, lname, fname) \
FOLIO_TYPE_OPS(lname, fname) \
static __always_inline int Page##uname(const struct page *page) \
{ \
- return PageType(page, PG_##lname); \
+ return data_race(page->page_type >> 24) == PGTY_##lname; \
} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
- VM_BUG_ON_PAGE(!PageType(page, 0), page); \
- page->page_type &= ~PG_##lname; \
+ if (Page##uname(page)) \
+ return; \
+ VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \
+ page->page_type = (unsigned int)PGTY_##lname << 24; \
} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
+ if (page->page_type == UINT_MAX) \
+ return; \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
- page->page_type |= PG_##lname; \
+ page->page_type = UINT_MAX; \
}
/*
@@ -1015,15 +1009,22 @@ PAGE_TYPE_OPS(Buddy, buddy, buddy)
* The content of these pages is effectively stale. Such pages should not
* be touched (read/write/dump/save) except by their owner.
*
+ * When a memory block gets onlined, all pages are initialized with a
+ * refcount of 1 and PageOffline(). generic_online_page() will
+ * take care of clearing PageOffline().
+ *
* If a driver wants to allow to offline unmovable PageOffline() pages without
* putting them back to the buddy, it can do so via the memory notifier by
* decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
* reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
- * pages (now with a reference count of zero) are treated like free pages,
- * allowing the containing memory block to get offlined. A driver that
+ * pages (now with a reference count of zero) are treated like free (unmanaged)
+ * pages, allowing the containing memory block to get offlined. A driver that
* relies on this feature is aware that re-onlining the memory block will
- * require to re-set the pages PageOffline() and not giving them to the
- * buddy via online_page_callback_t.
+ * require not giving them to the buddy via generic_online_page().
+ *
+ * Memory offlining code will not adjust the managed page count for any
+ * PageOffline() pages, treating them like they were never exposed to the
+ * buddy using generic_online_page().
*
* There are drivers that mark a page PageOffline() and expect there won't be
* any further access to page content. PFN walkers that read content of random
@@ -1047,19 +1048,7 @@ PAGE_TYPE_OPS(Table, table, pgtable)
*/
PAGE_TYPE_OPS(Guard, guard, guard)
-FOLIO_TYPE_OPS(slab, slab)
-
-/**
- * PageSlab - Determine if the page belongs to the slab allocator
- * @page: The page to test.
- *
- * Context: Any context.
- * Return: True for slab pages, false for any other kind of page.
- */
-static inline bool PageSlab(const struct page *page)
-{
- return folio_test_slab(page_folio(page));
-}
+PAGE_TYPE_OPS(Slab, slab, slab)
#ifdef CONFIG_HUGETLB_PAGE
FOLIO_TYPE_OPS(hugetlb, hugetlb)
@@ -1067,6 +1056,16 @@ FOLIO_TYPE_OPS(hugetlb, hugetlb)
FOLIO_TEST_FLAG_FALSE(hugetlb)
#endif
+PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
+
+/*
+ * Mark pages that has to be accepted before touched for the first time.
+ *
+ * Serialized with zone lock.
+ */
+PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
+PAGE_TYPE_OPS(LargeKmalloc, large_kmalloc, large_kmalloc)
+
/**
* PageHuge - Determine if the page belongs to hugetlbfs
* @page: The page to test.
@@ -1095,9 +1094,61 @@ static inline bool is_page_hwpoison(const struct page *page)
return folio_test_hugetlb(folio) && PageHWPoison(&folio->page);
}
+static inline bool folio_contain_hwpoisoned_page(struct folio *folio)
+{
+ return folio_test_hwpoison(folio) ||
+ (folio_test_large(folio) && folio_test_has_hwpoisoned(folio));
+}
+
bool is_free_buddy_page(const struct page *page);
-PAGEFLAG(Isolated, isolated, PF_ANY);
+#ifdef CONFIG_MIGRATION
+/*
+ * This page is migratable through movable_ops (for selected typed pages
+ * only).
+ *
+ * Page migration of such pages might fail, for example, if the page is
+ * already isolated by somebody else, or if the page is about to get freed.
+ *
+ * While a subsystem might set selected typed pages that support page migration
+ * as being movable through movable_ops, it must never clear this flag.
+ *
+ * This flag is only cleared when the page is freed back to the buddy.
+ *
+ * Only selected page types support this flag (see page_movable_ops()) and
+ * the flag might be used in other context for other pages. Always use
+ * page_has_movable_ops() instead.
+ */
+TESTPAGEFLAG(MovableOps, movable_ops, PF_NO_TAIL);
+SETPAGEFLAG(MovableOps, movable_ops, PF_NO_TAIL);
+/*
+ * A movable_ops page has this flag set while it is isolated for migration.
+ * This flag primarily protects against concurrent migration attempts.
+ *
+ * Once migration ended (success or failure), the flag is cleared. The
+ * flag is managed by the migration core.
+ */
+PAGEFLAG(MovableOpsIsolated, movable_ops_isolated, PF_NO_TAIL);
+#else /* !CONFIG_MIGRATION */
+TESTPAGEFLAG_FALSE(MovableOps, movable_ops);
+SETPAGEFLAG_NOOP(MovableOps, movable_ops);
+PAGEFLAG_FALSE(MovableOpsIsolated, movable_ops_isolated);
+#endif /* CONFIG_MIGRATION */
+
+/**
+ * page_has_movable_ops - test for a movable_ops page
+ * @page: The page to test.
+ *
+ * Test whether this is a movable_ops page. Such pages will stay that
+ * way until freed.
+ *
+ * Returns true if this is a movable_ops page, otherwise false.
+ */
+static inline bool page_has_movable_ops(const struct page *page)
+{
+ return PageMovableOps(page) &&
+ (PageOffline(page) || PageZsmalloc(page));
+}
static __always_inline int PageAnonExclusive(const struct page *page)
{
@@ -1108,28 +1159,28 @@ static __always_inline int PageAnonExclusive(const struct page *page)
*/
if (PageHuge(page))
page = compound_head(page);
- return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
+ return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
static __always_inline void SetPageAnonExclusive(struct page *page)
{
- VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
+ VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page);
VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
- set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
+ set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
static __always_inline void ClearPageAnonExclusive(struct page *page)
{
- VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
+ VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page);
VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
- clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
+ clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
static __always_inline void __ClearPageAnonExclusive(struct page *page)
{
VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
- __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
+ __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
#ifdef CONFIG_MMU
@@ -1166,25 +1217,20 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
*/
#define PAGE_FLAGS_SECOND \
(0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
- 1UL << PG_large_rmappable)
+ 1UL << PG_large_rmappable | 1UL << PG_partially_mapped)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
/**
- * page_has_private - Determine if page has private stuff
- * @page: The page to be checked
+ * folio_has_private - Determine if folio has private stuff
+ * @folio: The folio to be checked
*
- * Determine if a page has private stuff, indicating that release routines
+ * Determine if a folio has private stuff, indicating that release routines
* should be invoked upon it.
*/
-static inline int page_has_private(const struct page *page)
-{
- return !!(page->flags & PAGE_FLAGS_PRIVATE);
-}
-
-static inline bool folio_has_private(const struct folio *folio)
+static inline int folio_has_private(const struct folio *folio)
{
- return page_has_private(&folio->page);
+ return !!(folio->flags.f & PAGE_FLAGS_PRIVATE);
}
#undef PF_ANY
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index c16db0067090..3e2f960e166c 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -3,10 +3,6 @@
#define __LINUX_PAGEISOLATION_H
#ifdef CONFIG_MEMORY_ISOLATION
-static inline bool has_isolate_pageblock(struct zone *zone)
-{
- return zone->nr_isolate_pageblock;
-}
static inline bool is_migrate_isolate_page(struct page *page)
{
return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
@@ -15,35 +11,60 @@ static inline bool is_migrate_isolate(int migratetype)
{
return migratetype == MIGRATE_ISOLATE;
}
+#define get_pageblock_isolate(page) \
+ get_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
+#define clear_pageblock_isolate(page) \
+ clear_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
+#define set_pageblock_isolate(page) \
+ set_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
#else
-static inline bool has_isolate_pageblock(struct zone *zone)
+static inline bool is_migrate_isolate_page(struct page *page)
{
return false;
}
-static inline bool is_migrate_isolate_page(struct page *page)
+static inline bool is_migrate_isolate(int migratetype)
{
return false;
}
-static inline bool is_migrate_isolate(int migratetype)
+static inline bool get_pageblock_isolate(struct page *page)
{
return false;
}
+static inline void clear_pageblock_isolate(struct page *page)
+{
+}
+static inline void set_pageblock_isolate(struct page *page)
+{
+}
#endif
-#define MEMORY_OFFLINE 0x1
-#define REPORT_FAILURE 0x2
+/*
+ * Pageblock isolation modes:
+ * PB_ISOLATE_MODE_MEM_OFFLINE - isolate to offline (!allocate) memory
+ * e.g., skip over PageHWPoison() pages and
+ * PageOffline() pages. Unmovable pages will be
+ * reported in this mode.
+ * PB_ISOLATE_MODE_CMA_ALLOC - isolate for CMA allocations
+ * PB_ISOLATE_MODE_OTHER - isolate for other purposes
+ */
+enum pb_isolate_mode {
+ PB_ISOLATE_MODE_MEM_OFFLINE,
+ PB_ISOLATE_MODE_CMA_ALLOC,
+ PB_ISOLATE_MODE_OTHER,
+};
-void set_pageblock_migratetype(struct page *page, int migratetype);
+void __meminit init_pageblock_migratetype(struct page *page,
+ enum migratetype migratetype,
+ bool isolate);
-bool move_freepages_block_isolate(struct zone *zone, struct page *page,
- int migratetype);
+bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page);
+bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype, int flags, gfp_t gfp_flags);
+ enum pb_isolate_mode mode);
-void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype);
+void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
- int isol_flags);
+ enum pb_isolate_mode mode);
#endif
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index 8cd858d912c4..d649b6bbbc87 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -9,10 +9,12 @@
struct page_counter {
/*
- * Make sure 'usage' does not share cacheline with any other field. The
- * memcg->memory.usage is a hot member of struct mem_cgroup.
+ * Make sure 'usage' does not share cacheline with any other field in
+ * v2. The memcg->memory.usage is a hot member of struct mem_cgroup.
*/
atomic_long_t usage;
+ unsigned long failcnt; /* v1-only field */
+
CACHELINE_PADDING(_pad1_);
/* effective memory.min and memory.min usage tracking */
@@ -26,11 +28,14 @@ struct page_counter {
atomic_long_t children_low_usage;
unsigned long watermark;
- unsigned long failcnt;
+ /* Latest cg2 reset watermark */
+ unsigned long local_watermark;
/* Keep all the read most fields in a separete cacheline. */
CACHELINE_PADDING(_pad2_);
+ bool protection_support;
+ bool track_failcnt;
unsigned long min;
unsigned long low;
unsigned long high;
@@ -44,12 +49,18 @@ struct page_counter {
#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
#endif
+/*
+ * Protection is supported only for the first counter (with id 0).
+ */
static inline void page_counter_init(struct page_counter *counter,
- struct page_counter *parent)
+ struct page_counter *parent,
+ bool protection_support)
{
- atomic_long_set(&counter->usage, 0);
+ counter->usage = (atomic_long_t)ATOMIC_LONG_INIT(0);
counter->max = PAGE_COUNTER_MAX;
counter->parent = parent;
+ counter->protection_support = protection_support;
+ counter->track_failcnt = false;
}
static inline unsigned long page_counter_read(struct page_counter *counter)
@@ -78,7 +89,24 @@ int page_counter_memparse(const char *buf, const char *max,
static inline void page_counter_reset_watermark(struct page_counter *counter)
{
- counter->watermark = page_counter_read(counter);
+ unsigned long usage = page_counter_read(counter);
+
+ /*
+ * Update local_watermark first, so it's always <= watermark
+ * (modulo CPU/compiler re-ordering)
+ */
+ counter->local_watermark = usage;
+ counter->watermark = usage;
}
+#if IS_ENABLED(CONFIG_MEMCG) || IS_ENABLED(CONFIG_CGROUP_DMEM)
+void page_counter_calculate_protection(struct page_counter *root,
+ struct page_counter *counter,
+ bool recursive_protection);
+#else
+static inline void page_counter_calculate_protection(struct page_counter *root,
+ struct page_counter *counter,
+ bool recursive_protection) {}
+#endif
+
#endif /* _LINUX_PAGE_COUNTER_H */
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index e4b48a0dda24..76c817162d2f 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -3,6 +3,7 @@
#define __LINUX_PAGE_EXT_H
#include <linux/types.h>
+#include <linux/mmzone.h>
#include <linux/stacktrace.h>
struct pglist_data;
@@ -69,16 +70,31 @@ extern void page_ext_init(void);
static inline void page_ext_init_flatmem_late(void)
{
}
+
+static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn)
+{
+ /*
+ * page_ext is allocated per memory section. Once we cross a
+ * memory section, we have to fetch the new pointer.
+ */
+ return next_pfn % PAGES_PER_SECTION;
+}
#else
extern void page_ext_init_flatmem(void);
extern void page_ext_init_flatmem_late(void);
static inline void page_ext_init(void)
{
}
+
+static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn)
+{
+ return true;
+}
#endif
extern struct page_ext *page_ext_get(const struct page *page);
extern void page_ext_put(struct page_ext *page_ext);
+extern struct page_ext *page_ext_lookup(unsigned long pfn);
static inline void *page_ext_data(struct page_ext *page_ext,
struct page_ext_operations *ops)
@@ -93,6 +109,83 @@ static inline struct page_ext *page_ext_next(struct page_ext *curr)
return next;
}
+struct page_ext_iter {
+ unsigned long index;
+ unsigned long start_pfn;
+ struct page_ext *page_ext;
+};
+
+/**
+ * page_ext_iter_begin() - Prepare for iterating through page extensions.
+ * @iter: page extension iterator.
+ * @pfn: PFN of the page we're interested in.
+ *
+ * Must be called with RCU read lock taken.
+ *
+ * Return: NULL if no page_ext exists for this page.
+ */
+static inline struct page_ext *page_ext_iter_begin(struct page_ext_iter *iter,
+ unsigned long pfn)
+{
+ iter->index = 0;
+ iter->start_pfn = pfn;
+ iter->page_ext = page_ext_lookup(pfn);
+
+ return iter->page_ext;
+}
+
+/**
+ * page_ext_iter_next() - Get next page extension
+ * @iter: page extension iterator.
+ *
+ * Must be called with RCU read lock taken.
+ *
+ * Return: NULL if no next page_ext exists.
+ */
+static inline struct page_ext *page_ext_iter_next(struct page_ext_iter *iter)
+{
+ unsigned long pfn;
+
+ if (WARN_ON_ONCE(!iter->page_ext))
+ return NULL;
+
+ iter->index++;
+ pfn = iter->start_pfn + iter->index;
+
+ if (page_ext_iter_next_fast_possible(pfn))
+ iter->page_ext = page_ext_next(iter->page_ext);
+ else
+ iter->page_ext = page_ext_lookup(pfn);
+
+ return iter->page_ext;
+}
+
+/**
+ * page_ext_iter_get() - Get current page extension
+ * @iter: page extension iterator.
+ *
+ * Return: NULL if no page_ext exists for this iterator.
+ */
+static inline struct page_ext *page_ext_iter_get(const struct page_ext_iter *iter)
+{
+ return iter->page_ext;
+}
+
+/**
+ * for_each_page_ext(): iterate through page_ext objects.
+ * @__page: the page we're interested in
+ * @__pgcount: how many pages to iterate through
+ * @__page_ext: struct page_ext pointer where the current page_ext
+ * object is returned
+ * @__iter: struct page_ext_iter object (defined in the stack)
+ *
+ * IMPORTANT: must be called with RCU read lock taken.
+ */
+#define for_each_page_ext(__page, __pgcount, __page_ext, __iter) \
+ for (__page_ext = page_ext_iter_begin(&__iter, page_to_pfn(__page));\
+ __page_ext && __iter.index < __pgcount; \
+ __page_ext = page_ext_iter_next(&__iter))
+
#else /* !CONFIG_PAGE_EXTENSION */
struct page_ext;
diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
new file mode 100644
index 000000000000..41a91df82631
--- /dev/null
+++ b/include/linux/page_frag_cache.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_PAGE_FRAG_CACHE_H
+#define _LINUX_PAGE_FRAG_CACHE_H
+
+#include <linux/bits.h>
+#include <linux/log2.h>
+#include <linux/mm_types_task.h>
+#include <linux/types.h>
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+/* Use a full byte here to enable assembler optimization as the shift
+ * operation is usually expecting a byte.
+ */
+#define PAGE_FRAG_CACHE_ORDER_MASK GENMASK(7, 0)
+#else
+/* Compiler should be able to figure out we don't read things as any value
+ * ANDed with 0 is 0.
+ */
+#define PAGE_FRAG_CACHE_ORDER_MASK 0
+#endif
+
+#define PAGE_FRAG_CACHE_PFMEMALLOC_BIT (PAGE_FRAG_CACHE_ORDER_MASK + 1)
+
+static inline bool encoded_page_decode_pfmemalloc(unsigned long encoded_page)
+{
+ return !!(encoded_page & PAGE_FRAG_CACHE_PFMEMALLOC_BIT);
+}
+
+static inline void page_frag_cache_init(struct page_frag_cache *nc)
+{
+ nc->encoded_page = 0;
+}
+
+static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
+{
+ return encoded_page_decode_pfmemalloc(nc->encoded_page);
+}
+
+void page_frag_cache_drain(struct page_frag_cache *nc);
+void __page_frag_cache_drain(struct page *page, unsigned int count);
+void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
+ gfp_t gfp_mask, unsigned int align_mask);
+
+static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align)
+{
+ WARN_ON_ONCE(!is_power_of_2(align));
+ return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
+}
+
+static inline void *page_frag_alloc(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask)
+{
+ return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
+}
+
+void page_frag_free(void *addr);
+
+#endif
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index debdc25f08b9..3328357f6dba 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -14,7 +14,7 @@ extern void __set_page_owner(struct page *page,
extern void __split_page_owner(struct page *page, int old_order,
int new_order);
extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
-extern void __set_page_owner_migrate_reason(struct page *page, int reason);
+extern void __folio_set_owner_migrate_reason(struct folio *folio, int reason);
extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone);
@@ -43,10 +43,10 @@ static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
if (static_branch_unlikely(&page_owner_inited))
__folio_copy_owner(newfolio, old);
}
-static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+static inline void folio_set_owner_migrate_reason(struct folio *folio, int reason)
{
if (static_branch_unlikely(&page_owner_inited))
- __set_page_owner_migrate_reason(page, reason);
+ __folio_set_owner_migrate_reason(folio, reason);
}
static inline void dump_page_owner(const struct page *page)
{
@@ -68,7 +68,7 @@ static inline void split_page_owner(struct page *page, int old_order,
static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
{
}
-static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+static inline void folio_set_owner_migrate_reason(struct folio *folio, int reason)
{
}
static inline void dump_page_owner(const struct page *page)
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 1acf5bac7f50..544150d1d5fd 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -230,7 +230,13 @@ static inline int folio_ref_dec_return(struct folio *folio)
static inline bool page_ref_add_unless(struct page *page, int nr, int u)
{
- bool ret = atomic_add_unless(&page->_refcount, nr, u);
+ bool ret = false;
+
+ rcu_read_lock();
+ /* avoid writing to the vmemmap area being remapped */
+ if (page_count_writable(page, u))
+ ret = atomic_add_unless(&page->_refcount, nr, u);
+ rcu_read_unlock();
if (page_ref_tracepoint_active(page_ref_mod_unless))
__page_ref_mod_unless(page, nr, ret);
@@ -258,54 +264,9 @@ static inline bool folio_try_get(struct folio *folio)
return folio_ref_add_unless(folio, 1, 0);
}
-static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
-{
-#ifdef CONFIG_TINY_RCU
- /*
- * The caller guarantees the folio will not be freed from interrupt
- * context, so (on !SMP) we only need preemption to be disabled
- * and TINY_RCU does that for us.
- */
-# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic() && !irqs_disabled());
-# endif
- VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
- folio_ref_add(folio, count);
-#else
- if (unlikely(!folio_ref_add_unless(folio, count, 0))) {
- /* Either the folio has been freed, or will be freed. */
- return false;
- }
-#endif
- return true;
-}
-
-/**
- * folio_try_get_rcu - Attempt to increase the refcount on a folio.
- * @folio: The folio.
- *
- * This is a version of folio_try_get() optimised for non-SMP kernels.
- * If you are still holding the rcu_read_lock() after looking up the
- * page and know that the page cannot have its refcount decreased to
- * zero in interrupt context, you can use this instead of folio_try_get().
- *
- * Example users include get_user_pages_fast() (as pages are not unmapped
- * from interrupt context) and the page cache lookups (as pages are not
- * truncated from interrupt context). We also know that pages are not
- * frozen in interrupt context for the purposes of splitting or migration.
- *
- * You can also use this function if you're holding a lock that prevents
- * pages being frozen & removed; eg the i_pages lock for the page cache
- * or the mmap_lock or page table lock for page tables. In this case,
- * it will always succeed, and you could have used a plain folio_get(),
- * but it's sometimes more convenient to have a common function called
- * from both locked and RCU-protected contexts.
- *
- * Return: True if the reference count was successfully incremented.
- */
-static inline bool folio_try_get_rcu(struct folio *folio)
+static inline bool folio_ref_try_add(struct folio *folio, int count)
{
- return folio_ref_try_add_rcu(folio, 1);
+ return folio_ref_add_unless(folio, count, 0);
}
static inline int page_ref_freeze(struct page *page, int count)
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
index 6722941c7cb8..289620d4aad3 100644
--- a/include/linux/page_table_check.h
+++ b/include/linux/page_table_check.h
@@ -19,8 +19,10 @@ void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd);
void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud);
void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
unsigned int nr);
-void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd);
-void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud);
+void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd,
+ unsigned int nr);
+void __page_table_check_puds_set(struct mm_struct *mm, pud_t *pudp, pud_t pud,
+ unsigned int nr);
void __page_table_check_pte_clear_range(struct mm_struct *mm,
unsigned long addr,
pmd_t pmd);
@@ -74,22 +76,22 @@ static inline void page_table_check_ptes_set(struct mm_struct *mm,
__page_table_check_ptes_set(mm, ptep, pte, nr);
}
-static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
- pmd_t pmd)
+static inline void page_table_check_pmds_set(struct mm_struct *mm,
+ pmd_t *pmdp, pmd_t pmd, unsigned int nr)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pmd_set(mm, pmdp, pmd);
+ __page_table_check_pmds_set(mm, pmdp, pmd, nr);
}
-static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp,
- pud_t pud)
+static inline void page_table_check_puds_set(struct mm_struct *mm,
+ pud_t *pudp, pud_t pud, unsigned int nr)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pud_set(mm, pudp, pud);
+ __page_table_check_puds_set(mm, pudp, pud, nr);
}
static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
@@ -129,13 +131,13 @@ static inline void page_table_check_ptes_set(struct mm_struct *mm,
{
}
-static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
- pmd_t pmd)
+static inline void page_table_check_pmds_set(struct mm_struct *mm,
+ pmd_t *pmdp, pmd_t pmd, unsigned int nr)
{
}
-static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp,
- pud_t pud)
+static inline void page_table_check_puds_set(struct mm_struct *mm,
+ pud_t *pudp, pud_t pud, unsigned int nr)
{
}
@@ -146,4 +148,8 @@ static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
}
#endif /* CONFIG_PAGE_TABLE_CHECK */
+
+#define page_table_check_pmd_set(mm, pmdp, pmd) page_table_check_pmds_set(mm, pmdp, pmd, 1)
+#define page_table_check_pud_set(mm, pudp, pud) page_table_check_puds_set(mm, pudp, pud, 1)
+
#endif /* __LINUX_PAGE_TABLE_CHECK_H */
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index 547e82cdc89a..e046278a01fa 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -13,21 +13,37 @@
#include <linux/types.h>
-#define PB_migratetype_bits 3
/* Bit indices that affect a whole block of pages */
enum pageblock_bits {
- PB_migrate,
- PB_migrate_end = PB_migrate + PB_migratetype_bits - 1,
- /* 3 bits required for migrate types */
- PB_migrate_skip,/* If set the block is skipped by compaction */
+ PB_migrate_0,
+ PB_migrate_1,
+ PB_migrate_2,
+ PB_compact_skip,/* If set the block is skipped by compaction */
+#ifdef CONFIG_MEMORY_ISOLATION
+ /*
+ * Pageblock isolation is represented with a separate bit, so that
+ * the migratetype of a block is not overwritten by isolation.
+ */
+ PB_migrate_isolate, /* If set the block is isolated */
+#endif
/*
* Assume the bits will always align on a word. If this assumption
* changes then get/set pageblock needs updating.
*/
- NR_PAGEBLOCK_BITS
+ __NR_PAGEBLOCK_BITS
};
+#define NR_PAGEBLOCK_BITS (roundup_pow_of_two(__NR_PAGEBLOCK_BITS))
+
+#define MIGRATETYPE_MASK (BIT(PB_migrate_0)|BIT(PB_migrate_1)|BIT(PB_migrate_2))
+
+#ifdef CONFIG_MEMORY_ISOLATION
+#define MIGRATETYPE_AND_ISO_MASK (MIGRATETYPE_MASK | BIT(PB_migrate_isolate))
+#else
+#define MIGRATETYPE_AND_ISO_MASK MIGRATETYPE_MASK
+#endif
+
#if defined(CONFIG_HUGETLB_PAGE)
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -41,18 +57,18 @@ extern unsigned int pageblock_order;
* Huge pages are a constant size, but don't exceed the maximum allocation
* granularity.
*/
-#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_PAGE_ORDER)
+#define pageblock_order MIN_T(unsigned int, HUGETLB_PAGE_ORDER, PAGE_BLOCK_MAX_ORDER)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
#elif defined(CONFIG_TRANSPARENT_HUGEPAGE)
-#define pageblock_order min_t(unsigned int, HPAGE_PMD_ORDER, MAX_PAGE_ORDER)
+#define pageblock_order MIN_T(unsigned int, HPAGE_PMD_ORDER, PAGE_BLOCK_MAX_ORDER)
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
-#define pageblock_order MAX_PAGE_ORDER
+/* If huge pages are not used, group by PAGE_BLOCK_MAX_ORDER */
+#define pageblock_order PAGE_BLOCK_MAX_ORDER
#endif /* CONFIG_HUGETLB_PAGE */
@@ -65,27 +81,23 @@ extern unsigned int pageblock_order;
/* Forward declaration */
struct page;
-unsigned long get_pfnblock_flags_mask(const struct page *page,
- unsigned long pfn,
- unsigned long mask);
-
-void set_pfnblock_flags_mask(struct page *page,
- unsigned long flags,
- unsigned long pfn,
- unsigned long mask);
+enum migratetype get_pfnblock_migratetype(const struct page *page,
+ unsigned long pfn);
+bool get_pfnblock_bit(const struct page *page, unsigned long pfn,
+ enum pageblock_bits pb_bit);
+void set_pfnblock_bit(const struct page *page, unsigned long pfn,
+ enum pageblock_bits pb_bit);
+void clear_pfnblock_bit(const struct page *page, unsigned long pfn,
+ enum pageblock_bits pb_bit);
/* Declarations for getting and setting flags. See mm/page_alloc.c */
#ifdef CONFIG_COMPACTION
#define get_pageblock_skip(page) \
- get_pfnblock_flags_mask(page, page_to_pfn(page), \
- (1 << (PB_migrate_skip)))
+ get_pfnblock_bit(page, page_to_pfn(page), PB_compact_skip)
#define clear_pageblock_skip(page) \
- set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \
- (1 << PB_migrate_skip))
+ clear_pfnblock_bit(page, page_to_pfn(page), PB_compact_skip)
#define set_pageblock_skip(page) \
- set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \
- page_to_pfn(page), \
- (1 << PB_migrate_skip))
+ set_pfnblock_bit(page, page_to_pfn(page), PB_compact_skip)
#else
static inline bool get_pageblock_skip(struct page *page)
{
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 3d69589c00a4..31a848485ad9 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -32,10 +32,13 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end);
int kiocb_invalidate_pages(struct kiocb *iocb, size_t count);
void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count);
+int filemap_invalidate_pages(struct address_space *mapping,
+ loff_t pos, loff_t end, bool nowait);
int write_inode_now(struct inode *, int sync);
int filemap_fdatawrite(struct address_space *);
int filemap_flush(struct address_space *);
+int filemap_flush_nr(struct address_space *mapping, long *nr_to_write);
int filemap_fdatawait_keep_errors(struct address_space *mapping);
int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
@@ -51,14 +54,10 @@ static inline int filemap_fdatawait(struct address_space *mapping)
bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend);
int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
-int __filemap_fdatawrite_range(struct address_space *mapping,
- loff_t start, loff_t end, int sync_mode);
int filemap_fdatawrite_range(struct address_space *mapping,
loff_t start, loff_t end);
int filemap_check_errors(struct address_space *mapping);
void __filemap_set_wb_err(struct address_space *mapping, int err);
-int filemap_fdatawrite_wbc(struct address_space *mapping,
- struct writeback_control *wbc);
int kiocb_write_and_wait(struct kiocb *iocb, size_t count);
static inline int filemap_write_and_wait(struct address_space *mapping)
@@ -138,7 +137,7 @@ static inline int inode_drain_writes(struct inode *inode)
return filemap_write_and_wait(inode->i_mapping);
}
-static inline bool mapping_empty(struct address_space *mapping)
+static inline bool mapping_empty(const struct address_space *mapping)
{
return xa_empty(&mapping->i_pages);
}
@@ -164,7 +163,7 @@ static inline bool mapping_empty(struct address_space *mapping)
* refcount and the referenced bit, which will be elevated or set in
* the process of adding new cache pages to an inode.
*/
-static inline bool mapping_shrinkable(struct address_space *mapping)
+static inline bool mapping_shrinkable(const struct address_space *mapping)
{
void *head;
@@ -204,13 +203,24 @@ enum mapping_flags {
AS_EXITING = 4, /* final truncate in progress */
/* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5,
- AS_LARGE_FOLIO_SUPPORT = 6,
- AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */
- AS_STABLE_WRITES, /* must wait for writeback before modifying
+ AS_RELEASE_ALWAYS = 6, /* Call ->release_folio(), even if no private data */
+ AS_STABLE_WRITES = 7, /* must wait for writeback before modifying
folio contents */
- AS_UNMOVABLE, /* The mapping cannot be moved, ever */
+ AS_INACCESSIBLE = 8, /* Do not attempt direct R/W access to the mapping */
+ AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM = 9,
+ AS_KERNEL_FILE = 10, /* mapping for a fake kernel file that shouldn't
+ account usage to user cgroups */
+ /* Bits 16-25 are used for FOLIO_ORDER */
+ AS_FOLIO_ORDER_BITS = 5,
+ AS_FOLIO_ORDER_MIN = 16,
+ AS_FOLIO_ORDER_MAX = AS_FOLIO_ORDER_MIN + AS_FOLIO_ORDER_BITS,
};
+#define AS_FOLIO_ORDER_BITS_MASK ((1u << AS_FOLIO_ORDER_BITS) - 1)
+#define AS_FOLIO_ORDER_MIN_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MIN)
+#define AS_FOLIO_ORDER_MAX_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MAX)
+#define AS_FOLIO_ORDER_MASK (AS_FOLIO_ORDER_MIN_MASK | AS_FOLIO_ORDER_MAX_MASK)
+
/**
* mapping_set_error - record a writeback error in the address_space
* @mapping: the mapping in which an error should be set
@@ -254,7 +264,7 @@ static inline void mapping_clear_unevictable(struct address_space *mapping)
clear_bit(AS_UNEVICTABLE, &mapping->flags);
}
-static inline bool mapping_unevictable(struct address_space *mapping)
+static inline bool mapping_unevictable(const struct address_space *mapping)
{
return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
}
@@ -264,7 +274,7 @@ static inline void mapping_set_exiting(struct address_space *mapping)
set_bit(AS_EXITING, &mapping->flags);
}
-static inline int mapping_exiting(struct address_space *mapping)
+static inline int mapping_exiting(const struct address_space *mapping)
{
return test_bit(AS_EXITING, &mapping->flags);
}
@@ -274,7 +284,7 @@ static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
-static inline int mapping_use_writeback_tags(struct address_space *mapping)
+static inline int mapping_use_writeback_tags(const struct address_space *mapping)
{
return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
@@ -309,29 +319,39 @@ static inline void mapping_clear_stable_writes(struct address_space *mapping)
clear_bit(AS_STABLE_WRITES, &mapping->flags);
}
-static inline void mapping_set_unmovable(struct address_space *mapping)
+static inline void mapping_set_inaccessible(struct address_space *mapping)
{
/*
- * It's expected unmovable mappings are also unevictable. Compaction
+ * It's expected inaccessible mappings are also unevictable. Compaction
* migrate scanner (isolate_migratepages_block()) relies on this to
* reduce page locking.
*/
set_bit(AS_UNEVICTABLE, &mapping->flags);
- set_bit(AS_UNMOVABLE, &mapping->flags);
+ set_bit(AS_INACCESSIBLE, &mapping->flags);
+}
+
+static inline bool mapping_inaccessible(const struct address_space *mapping)
+{
+ return test_bit(AS_INACCESSIBLE, &mapping->flags);
+}
+
+static inline void mapping_set_writeback_may_deadlock_on_reclaim(struct address_space *mapping)
+{
+ set_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
}
-static inline bool mapping_unmovable(struct address_space *mapping)
+static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct address_space *mapping)
{
- return test_bit(AS_UNMOVABLE, &mapping->flags);
+ return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
}
-static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
+static inline gfp_t mapping_gfp_mask(const struct address_space *mapping)
{
return mapping->gfp_mask;
}
/* Restricts the given gfp_mask to what the mapping allows. */
-static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
+static inline gfp_t mapping_gfp_constraint(const struct address_space *mapping,
gfp_t gfp_mask)
{
return mapping_gfp_mask(mapping) & gfp_mask;
@@ -346,9 +366,84 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
m->gfp_mask = mask;
}
+/*
+ * There are some parts of the kernel which assume that PMD entries
+ * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
+ * limit the maximum allocation order to PMD size. I'm not aware of any
+ * assumptions about maximum order if THP are disabled, but 8 seems like
+ * a good order (that's 1MB if you're using 4kB pages)
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define PREFERRED_MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
+#else
+#define PREFERRED_MAX_PAGECACHE_ORDER 8
+#endif
+
+/*
+ * xas_split_alloc() does not support arbitrary orders. This implies no
+ * 512MB THP on ARM64 with 64KB base page size.
+ */
+#define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1)
+#define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER)
+
+/*
+ * mapping_max_folio_size_supported() - Check the max folio size supported
+ *
+ * The filesystem should call this function at mount time if there is a
+ * requirement on the folio mapping size in the page cache.
+ */
+static inline size_t mapping_max_folio_size_supported(void)
+{
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return 1U << (PAGE_SHIFT + MAX_PAGECACHE_ORDER);
+ return PAGE_SIZE;
+}
+
+/*
+ * mapping_set_folio_order_range() - Set the orders supported by a file.
+ * @mapping: The address space of the file.
+ * @min: Minimum folio order (between 0-MAX_PAGECACHE_ORDER inclusive).
+ * @max: Maximum folio order (between @min-MAX_PAGECACHE_ORDER inclusive).
+ *
+ * The filesystem should call this function in its inode constructor to
+ * indicate which base size (min) and maximum size (max) of folio the VFS
+ * can use to cache the contents of the file. This should only be used
+ * if the filesystem needs special handling of folio sizes (ie there is
+ * something the core cannot know).
+ * Do not tune it based on, eg, i_size.
+ *
+ * Context: This should not be called while the inode is active as it
+ * is non-atomic.
+ */
+static inline void mapping_set_folio_order_range(struct address_space *mapping,
+ unsigned int min,
+ unsigned int max)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return;
+
+ if (min > MAX_PAGECACHE_ORDER)
+ min = MAX_PAGECACHE_ORDER;
+
+ if (max > MAX_PAGECACHE_ORDER)
+ max = MAX_PAGECACHE_ORDER;
+
+ if (max < min)
+ max = min;
+
+ mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) |
+ (min << AS_FOLIO_ORDER_MIN) | (max << AS_FOLIO_ORDER_MAX);
+}
+
+static inline void mapping_set_folio_min_order(struct address_space *mapping,
+ unsigned int min)
+{
+ mapping_set_folio_order_range(mapping, min, MAX_PAGECACHE_ORDER);
+}
+
/**
* mapping_set_large_folios() - Indicate the file supports large folios.
- * @mapping: The file.
+ * @mapping: The address space of the file.
*
* The filesystem should call this function in its inode constructor to
* indicate that the VFS can use large folios to cache the contents of
@@ -359,20 +454,72 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
*/
static inline void mapping_set_large_folios(struct address_space *mapping)
{
- __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+ mapping_set_folio_order_range(mapping, 0, MAX_PAGECACHE_ORDER);
+}
+
+static inline unsigned int
+mapping_max_folio_order(const struct address_space *mapping)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return 0;
+ return (mapping->flags & AS_FOLIO_ORDER_MAX_MASK) >> AS_FOLIO_ORDER_MAX;
+}
+
+static inline unsigned int
+mapping_min_folio_order(const struct address_space *mapping)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return 0;
+ return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN;
+}
+
+static inline unsigned long
+mapping_min_folio_nrpages(const struct address_space *mapping)
+{
+ return 1UL << mapping_min_folio_order(mapping);
+}
+
+static inline unsigned long
+mapping_min_folio_nrbytes(const struct address_space *mapping)
+{
+ return mapping_min_folio_nrpages(mapping) << PAGE_SHIFT;
+}
+
+/**
+ * mapping_align_index() - Align index for this mapping.
+ * @mapping: The address_space.
+ * @index: The page index.
+ *
+ * The index of a folio must be naturally aligned. If you are adding a
+ * new folio to the page cache and need to know what index to give it,
+ * call this function.
+ */
+static inline pgoff_t mapping_align_index(const struct address_space *mapping,
+ pgoff_t index)
+{
+ return round_down(index, mapping_min_folio_nrpages(mapping));
}
/*
* Large folio support currently depends on THP. These dependencies are
* being worked on but are not yet fixed.
*/
-static inline bool mapping_large_folio_support(struct address_space *mapping)
+static inline bool mapping_large_folio_support(const struct address_space *mapping)
{
- return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
- test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+ /* AS_FOLIO_ORDER is only reasonable for pagecache folios */
+ VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON,
+ "Anonymous mapping always supports large folio");
+
+ return mapping_max_folio_order(mapping) > 0;
}
-static inline int filemap_nr_thps(struct address_space *mapping)
+/* Return the maximum folio size for this pagecache mapping, in bytes. */
+static inline size_t mapping_max_folio_size(const struct address_space *mapping)
+{
+ return PAGE_SIZE << mapping_max_folio_order(mapping);
+}
+
+static inline int filemap_nr_thps(const struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
return atomic_read(&mapping->nr_thps);
@@ -401,29 +548,7 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping)
#endif
}
-struct address_space *page_mapping(struct page *);
-struct address_space *folio_mapping(struct folio *);
-struct address_space *swapcache_mapping(struct folio *);
-
-/**
- * folio_file_mapping - Find the mapping this folio belongs to.
- * @folio: The folio.
- *
- * For folios which are in the page cache, return the mapping that this
- * page belongs to. Folios in the swap cache return the mapping of the
- * swap file or swap device where the data is stored. This is different
- * from the mapping returned by folio_mapping(). The only reason to
- * use it is if, like NFS, you return 0 from ->activate_swapfile.
- *
- * Do not call this for folios which aren't in the page cache or swap cache.
- */
-static inline struct address_space *folio_file_mapping(struct folio *folio)
-{
- if (unlikely(folio_test_swapcache(folio)))
- return swapcache_mapping(folio);
-
- return folio->mapping;
-}
+struct address_space *folio_mapping(const struct folio *folio);
/**
* folio_flush_mapping - Find the file mapping this folio belongs to.
@@ -445,11 +570,6 @@ static inline struct address_space *folio_flush_mapping(struct folio *folio)
return folio_mapping(folio);
}
-static inline struct address_space *page_file_mapping(struct page *page)
-{
- return folio_file_mapping(page_folio(page));
-}
-
/**
* folio_inode - Get the host inode for this folio.
* @folio: The folio.
@@ -530,23 +650,12 @@ static inline void *detach_page_private(struct page *page)
return folio_detach_private(page_folio(page));
}
-/*
- * There are some parts of the kernel which assume that PMD entries
- * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
- * limit the maximum allocation order to PMD size. I'm not aware of any
- * assumptions about maximum order if THP are disabled, but 8 seems like
- * a good order (that's 1MB if you're using 4kB pages)
- */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
-#else
-#define MAX_PAGECACHE_ORDER 8
-#endif
-
#ifdef CONFIG_NUMA
-struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
+struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order,
+ struct mempolicy *policy);
#else
-static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
+static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order,
+ struct mempolicy *policy)
{
return folio_alloc_noprof(gfp, order);
}
@@ -557,7 +666,7 @@ static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int o
static inline struct page *__page_cache_alloc(gfp_t gfp)
{
- return &filemap_alloc_folio(gfp, 0)->page;
+ return &filemap_alloc_folio(gfp, 0, NULL)->page;
}
static inline gfp_t readahead_gfp_mask(struct address_space *x)
@@ -593,6 +702,7 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping,
* * %FGP_NOFS - __GFP_FS will get cleared in gfp.
* * %FGP_NOWAIT - Don't block on the folio lock.
* * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
+ * * %FGP_DONTCACHE - Uncached buffered IO
* * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin()
* implementation.
*/
@@ -606,10 +716,21 @@ typedef unsigned int __bitwise fgf_t;
#define FGP_NOWAIT ((__force fgf_t)0x00000020)
#define FGP_FOR_MMAP ((__force fgf_t)0x00000040)
#define FGP_STABLE ((__force fgf_t)0x00000080)
+#define FGP_DONTCACHE ((__force fgf_t)0x00000100)
#define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */
#define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
+static inline unsigned int filemap_get_order(size_t size)
+{
+ unsigned int shift = ilog2(size);
+
+ if (shift <= PAGE_SHIFT)
+ return 0;
+
+ return shift - PAGE_SHIFT;
+}
+
/**
* fgf_set_order - Encode a length in the fgf_t flags.
* @size: The suggested size of the folio to create.
@@ -623,19 +744,52 @@ typedef unsigned int __bitwise fgf_t;
*/
static inline fgf_t fgf_set_order(size_t size)
{
- unsigned int shift = ilog2(size);
+ unsigned int order = filemap_get_order(size);
- if (shift <= PAGE_SHIFT)
+ if (!order)
return 0;
- return (__force fgf_t)((shift - PAGE_SHIFT) << 26);
+ return (__force fgf_t)(order << 26);
}
void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
-struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
- fgf_t fgp_flags, gfp_t gfp);
+struct folio *__filemap_get_folio_mpol(struct address_space *mapping,
+ pgoff_t index, fgf_t fgf_flags, gfp_t gfp, struct mempolicy *policy);
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
fgf_t fgp_flags, gfp_t gfp);
+static inline struct folio *__filemap_get_folio(struct address_space *mapping,
+ pgoff_t index, fgf_t fgf_flags, gfp_t gfp)
+{
+ return __filemap_get_folio_mpol(mapping, index, fgf_flags, gfp, NULL);
+}
+
+/**
+ * write_begin_get_folio - Get folio for write_begin with flags.
+ * @iocb: The kiocb passed from write_begin (may be NULL).
+ * @mapping: The address space to search.
+ * @index: The page cache index.
+ * @len: Length of data being written.
+ *
+ * This is a helper for filesystem write_begin() implementations.
+ * It wraps __filemap_get_folio(), setting appropriate flags in
+ * the write begin context.
+ *
+ * Return: A folio or an ERR_PTR.
+ */
+static inline struct folio *write_begin_get_folio(const struct kiocb *iocb,
+ struct address_space *mapping, pgoff_t index, size_t len)
+{
+ fgf_t fgp_flags = FGP_WRITEBEGIN;
+
+ fgp_flags |= fgf_set_order(len);
+
+ if (iocb && iocb->ki_flags & IOCB_DONTCACHE)
+ fgp_flags |= FGP_DONTCACHE;
+
+ return __filemap_get_folio(mapping, index, fgp_flags,
+ mapping_gfp_mask(mapping));
+}
+
/**
* filemap_get_folio - Find and get a folio.
* @mapping: The address_space to search.
@@ -764,7 +918,8 @@ static inline struct page *find_or_create_page(struct address_space *mapping,
* @mapping: target address_space
* @index: the page index
*
- * Same as grab_cache_page(), but do not wait if the page is unavailable.
+ * Returns locked page at given index in given cache, creating it if
+ * needed, but do not wait if the page is locked or to reclaim memory.
* This is intended for speculative data generators, where the data can
* be regenerated if the page couldn't be grabbed. This routine should
* be safe to call while holding the lock for another page.
@@ -780,35 +935,26 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
mapping_gfp_mask(mapping));
}
-#define swapcache_index(folio) __page_file_index(&(folio)->page)
-
/**
- * folio_index - File index of a folio.
- * @folio: The folio.
- *
- * For a folio which is either in the page cache or the swap cache,
- * return its index within the address_space it belongs to. If you know
- * the page is definitely in the page cache, you can look at the folio's
- * index directly.
+ * folio_next_index - Get the index of the next folio.
+ * @folio: The current folio.
*
- * Return: The index (offset in units of pages) of a folio in its file.
+ * Return: The index of the folio which follows this folio in the file.
*/
-static inline pgoff_t folio_index(struct folio *folio)
+static inline pgoff_t folio_next_index(const struct folio *folio)
{
- if (unlikely(folio_test_swapcache(folio)))
- return swapcache_index(folio);
- return folio->index;
+ return folio->index + folio_nr_pages(folio);
}
/**
- * folio_next_index - Get the index of the next folio.
+ * folio_next_pos - Get the file position of the next folio.
* @folio: The current folio.
*
- * Return: The index of the folio which follows this folio in the file.
+ * Return: The position of the folio which follows this folio in the file.
*/
-static inline pgoff_t folio_next_index(struct folio *folio)
+static inline loff_t folio_next_pos(const struct folio *folio)
{
- return folio->index + folio_nr_pages(folio);
+ return (loff_t)folio_next_index(folio) << PAGE_SHIFT;
}
/**
@@ -831,27 +977,14 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
* @folio: The folio.
* @index: The page index within the file.
*
- * Context: The caller should have the page locked in order to prevent
- * (eg) shmem from moving the page between the page cache and swap cache
- * and changing its index in the middle of the operation.
+ * Context: The caller should have the folio locked and ensure
+ * e.g., shmem did not move this folio to the swap cache.
* Return: true or false.
*/
-static inline bool folio_contains(struct folio *folio, pgoff_t index)
+static inline bool folio_contains(const struct folio *folio, pgoff_t index)
{
- return index - folio_index(folio) < folio_nr_pages(folio);
-}
-
-/*
- * Given the page we found in the page cache, return the page corresponding
- * to this index in the file
- */
-static inline struct page *find_subpage(struct page *head, pgoff_t index)
-{
- /* HugeTLBfs wants the head page regardless */
- if (PageHuge(head))
- return head;
-
- return head + (index & (thp_nr_pages(head) - 1));
+ VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio);
+ return index - folio->index < folio_nr_pages(folio);
}
unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
@@ -860,18 +993,8 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
-
-struct page *grab_cache_page_write_begin(struct address_space *mapping,
- pgoff_t index);
-
-/*
- * Returns locked page at given index in given cache, creating it if needed.
- */
-static inline struct page *grab_cache_page(struct address_space *mapping,
- pgoff_t index)
-{
- return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
-}
+unsigned filemap_get_folios_dirty(struct address_space *mapping,
+ pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
struct folio *read_cache_folio(struct address_space *, pgoff_t index,
filler_t *filler, struct file *file);
@@ -894,68 +1017,56 @@ static inline struct folio *read_mapping_folio(struct address_space *mapping,
return read_cache_folio(mapping, index, NULL, file);
}
-/*
- * Get the offset in PAGE_SIZE (even for hugetlb pages).
- */
-static inline pgoff_t page_to_pgoff(struct page *page)
-{
- struct page *head;
-
- if (likely(!PageTransTail(page)))
- return page->index;
-
- head = compound_head(page);
- /*
- * We don't initialize ->index for tail pages: calculate based on
- * head page
- */
- return head->index + page - head;
-}
-
-/*
- * Return byte-offset into filesystem object for page.
+/**
+ * page_pgoff - Calculate the logical page offset of this page.
+ * @folio: The folio containing this page.
+ * @page: The page which we need the offset of.
+ *
+ * For file pages, this is the offset from the beginning of the file
+ * in units of PAGE_SIZE. For anonymous pages, this is the offset from
+ * the beginning of the anon_vma in units of PAGE_SIZE. This will
+ * return nonsense for KSM pages.
+ *
+ * Context: Caller must have a reference on the folio or otherwise
+ * prevent it from being split or freed.
+ *
+ * Return: The offset in units of PAGE_SIZE.
*/
-static inline loff_t page_offset(struct page *page)
+static inline pgoff_t page_pgoff(const struct folio *folio,
+ const struct page *page)
{
- return ((loff_t)page->index) << PAGE_SHIFT;
-}
-
-static inline loff_t page_file_offset(struct page *page)
-{
- return ((loff_t)page_index(page)) << PAGE_SHIFT;
+ return folio->index + folio_page_idx(folio, page);
}
/**
* folio_pos - Returns the byte position of this folio in its file.
* @folio: The folio.
*/
-static inline loff_t folio_pos(struct folio *folio)
+static inline loff_t folio_pos(const struct folio *folio)
{
- return page_offset(&folio->page);
+ return ((loff_t)folio->index) * PAGE_SIZE;
}
-/**
- * folio_file_pos - Returns the byte position of this folio in its file.
- * @folio: The folio.
- *
- * This differs from folio_pos() for folios which belong to a swap file.
- * NFS is the only filesystem today which needs to use folio_file_pos().
+/*
+ * Return byte-offset into filesystem object for page.
*/
-static inline loff_t folio_file_pos(struct folio *folio)
+static inline loff_t page_offset(struct page *page)
{
- return page_file_offset(&folio->page);
+ struct folio *folio = page_folio(page);
+
+ return folio_pos(folio) + folio_page_idx(folio, page) * PAGE_SIZE;
}
/*
* Get the offset in PAGE_SIZE (even for hugetlb folios).
*/
-static inline pgoff_t folio_pgoff(struct folio *folio)
+static inline pgoff_t folio_pgoff(const struct folio *folio)
{
return folio->index;
}
-static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
- unsigned long address)
+static inline pgoff_t linear_page_index(const struct vm_area_struct *vma,
+ const unsigned long address)
{
pgoff_t pgoff;
pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
@@ -1130,18 +1241,14 @@ static inline int folio_wait_locked_killable(struct folio *folio)
return folio_wait_bit_killable(folio, PG_locked);
}
-static inline void wait_on_page_locked(struct page *page)
-{
- folio_wait_locked(page_folio(page));
-}
-
void folio_end_read(struct folio *folio, bool success);
void wait_on_page_writeback(struct page *page);
void folio_wait_writeback(struct folio *folio);
int folio_wait_writeback_killable(struct folio *folio);
void end_page_writeback(struct page *page);
void folio_end_writeback(struct folio *folio);
-void wait_for_stable_page(struct page *page);
+void folio_end_writeback_no_dropbehind(struct folio *folio);
+void folio_end_dropbehind(struct folio *folio);
void folio_wait_stable(struct folio *folio);
void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
@@ -1168,11 +1275,6 @@ void folio_wait_private_2(struct folio *folio);
int folio_wait_private_2_killable(struct folio *folio);
/*
- * Add an arbitrary waiter to a page's wait queue
- */
-void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
-
-/*
* Fault in userspace address range.
*/
size_t fault_in_writeable(char __user *uaddr, size_t size);
@@ -1230,9 +1332,9 @@ static inline bool filemap_range_needs_writeback(struct address_space *mapping,
* struct readahead_control - Describes a readahead request.
*
* A readahead request is for consecutive pages. Filesystems which
- * implement the ->readahead method should call readahead_page() or
- * readahead_page_batch() in a loop and attempt to start I/O against
- * each page in the request.
+ * implement the ->readahead method should call readahead_folio() or
+ * __readahead_batch() in a loop and attempt to start reads into each
+ * folio in the request.
*
* Most of the fields in this struct are private and should be accessed
* by the functions below.
@@ -1250,6 +1352,7 @@ struct readahead_control {
pgoff_t _index;
unsigned int _nr_pages;
unsigned int _batch_count;
+ bool dropbehind;
bool _workingset;
unsigned long _pflags;
};
@@ -1299,8 +1402,7 @@ void page_cache_sync_readahead(struct address_space *mapping,
* @mapping: address_space which holds the pagecache and I/O vectors
* @ra: file_ra_state which holds the readahead state
* @file: Used by the filesystem for authentication.
- * @folio: The folio at @index which triggered the readahead call.
- * @index: Index of first page to be read.
+ * @folio: The folio which triggered the readahead call.
* @req_count: Total number of pages being read by the caller.
*
* page_cache_async_readahead() should be called when a page is used which
@@ -1311,9 +1413,9 @@ void page_cache_sync_readahead(struct address_space *mapping,
static inline
void page_cache_async_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *file,
- struct folio *folio, pgoff_t index, unsigned long req_count)
+ struct folio *folio, unsigned long req_count)
{
- DEFINE_READAHEAD(ractl, file, ra, mapping, index);
+ DEFINE_READAHEAD(ractl, file, ra, mapping, folio->index);
page_cache_async_ra(&ractl, folio, req_count);
}
@@ -1338,22 +1440,6 @@ static inline struct folio *__readahead_folio(struct readahead_control *ractl)
}
/**
- * readahead_page - Get the next page to read.
- * @ractl: The current readahead request.
- *
- * Context: The page is locked and has an elevated refcount. The caller
- * should decreases the refcount once the page has been submitted for I/O
- * and unlock the page once all I/O to that page has completed.
- * Return: A pointer to the next page, or %NULL if we are done.
- */
-static inline struct page *readahead_page(struct readahead_control *ractl)
-{
- struct folio *folio = __readahead_folio(ractl);
-
- return &folio->page;
-}
-
-/**
* readahead_folio - Get the next folio to read.
* @ractl: The current readahead request.
*
@@ -1375,7 +1461,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
{
unsigned int i = 0;
XA_STATE(xas, &rac->mapping->i_pages, 0);
- struct page *page;
+ struct folio *folio;
BUG_ON(rac->_batch_count > rac->_nr_pages);
rac->_nr_pages -= rac->_batch_count;
@@ -1384,13 +1470,12 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
xas_set(&xas, rac->_index);
rcu_read_lock();
- xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
- if (xas_retry(&xas, page))
+ xas_for_each(&xas, folio, rac->_index + rac->_nr_pages - 1) {
+ if (xas_retry(&xas, folio))
continue;
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(PageTail(page), page);
- array[i++] = page;
- rac->_batch_count += thp_nr_pages(page);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ array[i++] = folio_page(folio, 0);
+ rac->_batch_count += folio_nr_pages(folio);
if (i == array_sz)
break;
}
@@ -1400,24 +1485,10 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
}
/**
- * readahead_page_batch - Get a batch of pages to read.
- * @rac: The current readahead request.
- * @array: An array of pointers to struct page.
- *
- * Context: The pages are locked and have an elevated refcount. The caller
- * should decreases the refcount once the page has been submitted for I/O
- * and unlock the page once all I/O to that page has completed.
- * Return: The number of pages placed in the array. 0 indicates the request
- * is complete.
- */
-#define readahead_page_batch(rac, array) \
- __readahead_batch(rac, array, ARRAY_SIZE(array))
-
-/**
* readahead_pos - The byte offset into the file of this readahead request.
* @rac: The readahead request.
*/
-static inline loff_t readahead_pos(struct readahead_control *rac)
+static inline loff_t readahead_pos(const struct readahead_control *rac)
{
return (loff_t)rac->_index * PAGE_SIZE;
}
@@ -1426,7 +1497,7 @@ static inline loff_t readahead_pos(struct readahead_control *rac)
* readahead_length - The number of bytes in this readahead request.
* @rac: The readahead request.
*/
-static inline size_t readahead_length(struct readahead_control *rac)
+static inline size_t readahead_length(const struct readahead_control *rac)
{
return rac->_nr_pages * PAGE_SIZE;
}
@@ -1435,7 +1506,7 @@ static inline size_t readahead_length(struct readahead_control *rac)
* readahead_index - The index of the first page in this readahead request.
* @rac: The readahead request.
*/
-static inline pgoff_t readahead_index(struct readahead_control *rac)
+static inline pgoff_t readahead_index(const struct readahead_control *rac)
{
return rac->_index;
}
@@ -1444,7 +1515,7 @@ static inline pgoff_t readahead_index(struct readahead_control *rac)
* readahead_count - The number of pages in this readahead request.
* @rac: The readahead request.
*/
-static inline unsigned int readahead_count(struct readahead_control *rac)
+static inline unsigned int readahead_count(const struct readahead_control *rac)
{
return rac->_nr_pages;
}
@@ -1453,12 +1524,12 @@ static inline unsigned int readahead_count(struct readahead_control *rac)
* readahead_batch_length - The number of bytes in the current batch.
* @rac: The readahead request.
*/
-static inline size_t readahead_batch_length(struct readahead_control *rac)
+static inline size_t readahead_batch_length(const struct readahead_control *rac)
{
return rac->_batch_count * PAGE_SIZE;
}
-static inline unsigned long dir_pages(struct inode *inode)
+static inline unsigned long dir_pages(const struct inode *inode)
{
return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
PAGE_SHIFT;
@@ -1472,8 +1543,8 @@ static inline unsigned long dir_pages(struct inode *inode)
* Return: the number of bytes in the folio up to EOF,
* or -EFAULT if the folio was truncated.
*/
-static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
- struct inode *inode)
+static inline ssize_t folio_mkwrite_check_truncate(const struct folio *folio,
+ const struct inode *inode)
{
loff_t size = i_size_read(inode);
pgoff_t index = size >> PAGE_SHIFT;
@@ -1493,34 +1564,6 @@ static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
}
/**
- * page_mkwrite_check_truncate - check if page was truncated
- * @page: the page to check
- * @inode: the inode to check the page against
- *
- * Returns the number of bytes in the page up to EOF,
- * or -EFAULT if the page was truncated.
- */
-static inline int page_mkwrite_check_truncate(struct page *page,
- struct inode *inode)
-{
- loff_t size = i_size_read(inode);
- pgoff_t index = size >> PAGE_SHIFT;
- int offset = offset_in_page(size);
-
- if (page->mapping != inode->i_mapping)
- return -EFAULT;
-
- /* page is wholly inside EOF */
- if (page->index < index)
- return PAGE_SIZE;
- /* page is wholly past EOF */
- if (page->index > index || !offset)
- return -EFAULT;
- /* page is partially inside EOF */
- return offset;
-}
-
-/**
* i_blocks_per_folio - How many blocks fit in this folio.
* @inode: The inode which contains the blocks.
* @folio: The folio.
@@ -1532,14 +1575,9 @@ static inline int page_mkwrite_check_truncate(struct page *page,
* Return: The number of filesystem blocks covered by this folio.
*/
static inline
-unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
+unsigned int i_blocks_per_folio(const struct inode *inode,
+ const struct folio *folio)
{
return folio_size(folio) >> inode->i_blkbits;
}
-
-static inline
-unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
-{
- return i_blocks_per_folio(inode, page_folio(page));
-}
#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 5d3a0cccc6bf..63be5a451627 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -51,12 +51,12 @@ static inline void folio_batch_reinit(struct folio_batch *fbatch)
fbatch->i = 0;
}
-static inline unsigned int folio_batch_count(struct folio_batch *fbatch)
+static inline unsigned int folio_batch_count(const struct folio_batch *fbatch)
{
return fbatch->nr;
}
-static inline unsigned int folio_batch_space(struct folio_batch *fbatch)
+static inline unsigned int folio_batch_space(const struct folio_batch *fbatch)
{
return PAGEVEC_SIZE - fbatch->nr;
}
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index 27cd1e59ccf7..88e18615dd72 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -14,6 +14,8 @@ enum page_walk_lock {
PGWALK_WRLOCK = 1,
/* vma is expected to be already write-locked during the walk */
PGWALK_WRLOCK_VERIFY = 2,
+ /* vma is expected to be already read-locked during the walk */
+ PGWALK_VMA_RDLOCK_VERIFY = 3,
};
/**
@@ -25,12 +27,15 @@ enum page_walk_lock {
* this handler is required to be able to handle
* pmd_trans_huge() pmds. They may simply choose to
* split_huge_page() instead of handling it explicitly.
- * @pte_entry: if set, called for each PTE (lowest-level) entry,
- * including empty ones
+ * @pte_entry: if set, called for each PTE (lowest-level) entry
+ * including empty ones, except if @install_pte is set.
+ * If @install_pte is set, @pte_entry is called only for
+ * existing PTEs.
* @pte_hole: if set, called for each hole at all levels,
* depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD.
* Any folded depths (where PTRS_PER_P?D is equal to 1)
- * are skipped.
+ * are skipped. If @install_pte is specified, this will
+ * not trigger for any populated ranges.
* @hugetlb_entry: if set, called for each hugetlb entry. This hook
* function is called with the vma lock held, in order to
* protect against a concurrent freeing of the pte_t* or
@@ -51,6 +56,13 @@ enum page_walk_lock {
* @pre_vma: if set, called before starting walk on a non-null vma.
* @post_vma: if set, called after a walk on a non-null vma, provided
* that @pre_vma and the vma walk succeeded.
+ * @install_pte: if set, missing page table entries are installed and
+ * thus all levels are always walked in the specified
+ * range. This callback is then invoked at the PTE level
+ * (having split any THP pages prior), providing the PTE to
+ * install. If allocations fail, the walk is aborted. This
+ * operation is only available for userland memory. Not
+ * usable for hugetlb ranges.
*
* p?d_entry callbacks are called even if those levels are folded on a
* particular architecture/configuration.
@@ -76,6 +88,8 @@ struct mm_walk_ops {
int (*pre_vma)(unsigned long start, unsigned long end,
struct mm_walk *walk);
void (*post_vma)(struct mm_walk *walk);
+ int (*install_pte)(unsigned long addr, unsigned long next,
+ pte_t *ptep, struct mm_walk *walk);
enum page_walk_lock walk_lock;
};
@@ -117,10 +131,12 @@ struct mm_walk {
int walk_page_range(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private);
-int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
- unsigned long end, const struct mm_walk_ops *ops,
- pgd_t *pgd,
- void *private);
+int walk_kernel_page_table_range(unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ pgd_t *pgd, void *private);
+int walk_kernel_page_table_range_lockless(unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ pgd_t *pgd, void *private);
int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private);
@@ -130,4 +146,62 @@ int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
pgoff_t nr, const struct mm_walk_ops *ops,
void *private);
+typedef int __bitwise folio_walk_flags_t;
+
+/*
+ * Walk migration entries as well. Careful: a large folio might get split
+ * concurrently.
+ */
+#define FW_MIGRATION ((__force folio_walk_flags_t)BIT(0))
+
+/* Walk shared zeropages (small + huge) as well. */
+#define FW_ZEROPAGE ((__force folio_walk_flags_t)BIT(1))
+
+enum folio_walk_level {
+ FW_LEVEL_PTE,
+ FW_LEVEL_PMD,
+ FW_LEVEL_PUD,
+};
+
+/**
+ * struct folio_walk - folio_walk_start() / folio_walk_end() data
+ * @page: exact folio page referenced (if applicable)
+ * @level: page table level identifying the entry type
+ * @pte: pointer to the page table entry (FW_LEVEL_PTE).
+ * @pmd: pointer to the page table entry (FW_LEVEL_PMD).
+ * @pud: pointer to the page table entry (FW_LEVEL_PUD).
+ * @ptl: pointer to the page table lock.
+ *
+ * (see folio_walk_start() documentation for more details)
+ */
+struct folio_walk {
+ /* public */
+ struct page *page;
+ enum folio_walk_level level;
+ union {
+ pte_t *ptep;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ };
+ union {
+ pte_t pte;
+ pud_t pud;
+ pmd_t pmd;
+ };
+ /* private */
+ struct vm_area_struct *vma;
+ spinlock_t *ptl;
+};
+
+struct folio *folio_walk_start(struct folio_walk *fw,
+ struct vm_area_struct *vma, unsigned long addr,
+ folio_walk_flags_t flags);
+
+#define folio_walk_end(__fw, __vma) do { \
+ spin_unlock((__fw)->ptl); \
+ if (likely((__fw)->level == FW_LEVEL_PTE)) \
+ pte_unmap((__fw)->ptep); \
+ vma_pgtable_walk_end(__vma); \
+} while (0)
+
#endif /* _LINUX_PAGEWALK_H */
diff --git a/include/linux/panic.h b/include/linux/panic.h
index 6717b15e798c..a00bc0937698 100644
--- a/include/linux/panic.h
+++ b/include/linux/panic.h
@@ -3,6 +3,7 @@
#define _LINUX_PANIC_H
#include <linux/compiler_attributes.h>
+#include <linux/stdarg.h>
#include <linux/types.h>
struct pt_regs;
@@ -10,24 +11,23 @@ struct pt_regs;
extern long (*panic_blink)(int state);
__printf(1, 2)
void panic(const char *fmt, ...) __noreturn __cold;
+__printf(1, 0)
+void vpanic(const char *fmt, va_list args) __noreturn __cold;
void nmi_panic(struct pt_regs *regs, const char *msg);
void check_panic_on_warn(const char *origin);
extern void oops_enter(void);
extern void oops_exit(void);
extern bool oops_may_print(void);
+extern bool panic_triggering_all_cpu_backtrace;
extern int panic_timeout;
extern unsigned long panic_print;
extern int panic_on_oops;
-extern int panic_on_unrecovered_nmi;
-extern int panic_on_io_nmi;
extern int panic_on_warn;
extern unsigned long panic_on_taint;
extern bool panic_on_taint_nousertaint;
-extern int sysctl_panic_on_rcu_stall;
-extern int sysctl_max_rcu_stall_to_panic;
extern int sysctl_panic_on_stackoverflow;
extern bool crash_kexec_post_notifiers;
@@ -43,6 +43,12 @@ void abort(void);
extern atomic_t panic_cpu;
#define PANIC_CPU_INVALID -1
+bool panic_try_start(void);
+void panic_reset(void);
+bool panic_in_progress(void);
+bool panic_on_this_cpu(void);
+bool panic_on_other_cpu(void);
+
/*
* Only to be used by arch init code. If the user over-wrote the default
* CONFIG_PANIC_TIMEOUT, honor it.
@@ -73,13 +79,14 @@ static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
#define TAINT_AUX 16
#define TAINT_RANDSTRUCT 17
#define TAINT_TEST 18
-#define TAINT_FLAGS_COUNT 19
+#define TAINT_FWCTL 19
+#define TAINT_FLAGS_COUNT 20
#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1)
struct taint_flag {
- char c_true; /* character printed when tainted */
- char c_false; /* character printed when not tainted */
- bool module; /* also show as a per-module taint flag */
+ char c_true; /* character printed when tainted */
+ char c_false; /* character printed when not tainted */
+ const char *desc; /* verbose description of the set taint flag */
};
extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
@@ -90,6 +97,7 @@ enum lockdep_ok {
};
extern const char *print_tainted(void);
+extern const char *print_tainted_verbose(void);
extern void add_taint(unsigned flag, enum lockdep_ok);
extern int test_taint(unsigned flag);
extern unsigned long get_taint(void);
diff --git a/include/linux/parport.h b/include/linux/parport.h
index fff39bc30629..464c2ad28039 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -252,13 +252,10 @@ struct parport {
struct parport_driver {
const char *name;
- void (*attach) (struct parport *);
void (*detach) (struct parport *);
void (*match_port)(struct parport *);
int (*probe)(struct pardevice *);
struct device_driver driver;
- bool devmodel;
- struct list_head list;
};
#define to_parport_driver(n) container_of(n, struct parport_driver, driver)
@@ -301,9 +298,6 @@ int __must_check __parport_register_driver(struct parport_driver *,
* to receive notifications about ports being found in the
* system, as well as ports no longer available.
*
- * If devmodel is true then the new device model is used
- * for registration.
- *
* The @driver structure is allocated by the caller and must not be
* deallocated until after calling parport_unregister_driver().
*
diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h
index ac8c44dd8237..729415e91215 100644
--- a/include/linux/part_stat.h
+++ b/include/linux/part_stat.h
@@ -17,8 +17,8 @@ struct disk_stats {
/*
* Macros to operate on percpu disk statistics:
*
- * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters and should
- * be called between disk_stat_lock() and disk_stat_unlock().
+ * part_stat_{add|sub|inc|dec}() modify the stat counters and should
+ * be called between part_stat_lock() and part_stat_unlock().
*
* part_stat_read() can be called at any time.
*/
@@ -33,7 +33,7 @@ struct disk_stats {
#define part_stat_read(part, field) \
({ \
- typeof((part)->bd_stats->field) res = 0; \
+ TYPEOF_UNQUAL((part)->bd_stats->field) res = 0; \
unsigned int _cpu; \
for_each_possible_cpu(_cpu) \
res += per_cpu_ptr((part)->bd_stats, _cpu)->field; \
@@ -79,4 +79,6 @@ static inline void part_stat_set_all(struct block_device *part, int value)
#define part_stat_local_read_cpu(part, field, cpu) \
local_read(&(part_stat_get_cpu(part, field, cpu)))
+unsigned int bdev_count_inflight(struct block_device *part);
+
#endif /* _LINUX_PART_STAT_H */
diff --git a/include/linux/path.h b/include/linux/path.h
index 475225a03d0d..7ea389dc764b 100644
--- a/include/linux/path.h
+++ b/include/linux/path.h
@@ -18,10 +18,13 @@ static inline int path_equal(const struct path *path1, const struct path *path2)
return path1->mnt == path2->mnt && path1->dentry == path2->dentry;
}
-static inline void path_put_init(struct path *path)
-{
- path_put(path);
- *path = (struct path) { };
-}
+/*
+ * Cleanup macro for use with __free(path_put). Avoids dereference and
+ * copying @path unlike DEFINE_FREE(). path_put() will handle the empty
+ * path correctly just ensure @path is initialized:
+ *
+ * struct path path __free(path_put) = {};
+ */
+#define __free_path_put path_put
#endif /* _LINUX_PATH_H */
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index df54cd5b15db..75c6c86cf09d 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -8,6 +8,7 @@
/* Address Translation Service */
bool pci_ats_supported(struct pci_dev *dev);
int pci_enable_ats(struct pci_dev *dev, int ps);
+int pci_prepare_ats(struct pci_dev *dev, int ps);
void pci_disable_ats(struct pci_dev *dev);
int pci_ats_queue_depth(struct pci_dev *dev);
int pci_ats_page_aligned(struct pci_dev *dev);
@@ -16,6 +17,8 @@ static inline bool pci_ats_supported(struct pci_dev *d)
{ return false; }
static inline int pci_enable_ats(struct pci_dev *d, int ps)
{ return -ENODEV; }
+static inline int pci_prepare_ats(struct pci_dev *dev, int ps)
+{ return -ENODEV; }
static inline void pci_disable_ats(struct pci_dev *d) { }
static inline int pci_ats_queue_depth(struct pci_dev *d)
{ return -ENODEV; }
@@ -39,6 +42,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features);
void pci_disable_pasid(struct pci_dev *pdev);
int pci_pasid_features(struct pci_dev *pdev);
int pci_max_pasids(struct pci_dev *pdev);
+int pci_pasid_status(struct pci_dev *pdev);
#else /* CONFIG_PCI_PASID */
static inline int pci_enable_pasid(struct pci_dev *pdev, int features)
{ return -EINVAL; }
@@ -47,6 +51,8 @@ static inline int pci_pasid_features(struct pci_dev *pdev)
{ return -EINVAL; }
static inline int pci_max_pasids(struct pci_dev *pdev)
{ return -EINVAL; }
+static inline int pci_pasid_status(struct pci_dev *pdev)
+{ return -EINVAL; }
#endif /* CONFIG_PCI_PASID */
#endif /* LINUX_PCI_ATS_H */
diff --git a/include/linux/pci-bwctrl.h b/include/linux/pci-bwctrl.h
new file mode 100644
index 000000000000..cee07127455b
--- /dev/null
+++ b/include/linux/pci-bwctrl.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * PCIe bandwidth controller
+ *
+ * Copyright (C) 2023-2024 Intel Corporation
+ */
+
+#ifndef LINUX_PCI_BWCTRL_H
+#define LINUX_PCI_BWCTRL_H
+
+#include <linux/pci.h>
+
+struct thermal_cooling_device;
+
+#ifdef CONFIG_PCIE_THERMAL
+struct thermal_cooling_device *pcie_cooling_device_register(struct pci_dev *port);
+void pcie_cooling_device_unregister(struct thermal_cooling_device *cdev);
+#else
+static inline struct thermal_cooling_device *pcie_cooling_device_register(struct pci_dev *port)
+{
+ return NULL;
+}
+static inline void pcie_cooling_device_unregister(struct thermal_cooling_device *cdev)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/pci-doe.h b/include/linux/pci-doe.h
index 1f14aed4354b..bd4346a7c4e7 100644
--- a/include/linux/pci-doe.h
+++ b/include/linux/pci-doe.h
@@ -15,6 +15,10 @@
struct pci_doe_mb;
+#define PCI_DOE_FEATURE_DISCOVERY 0
+#define PCI_DOE_FEATURE_CMA 1
+#define PCI_DOE_FEATURE_SSESSION 2
+
struct pci_doe_mb *pci_find_doe_mailbox(struct pci_dev *pdev, u16 vendor,
u8 type);
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index 3a4860bd2758..d930651473b4 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -45,6 +45,10 @@ struct pci_ecam_ops {
unsigned int bus_shift;
struct pci_ops pci_ops;
int (*init)(struct pci_config_window *);
+ int (*enable_device)(struct pci_host_bridge *,
+ struct pci_dev *);
+ void (*disable_device)(struct pci_host_bridge *,
+ struct pci_dev *);
};
/*
@@ -89,10 +93,4 @@ extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */
extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */
#endif
-
-#if IS_ENABLED(CONFIG_PCI_HOST_COMMON)
-/* for DT-based PCI controllers that support ECAM */
-int pci_host_common_probe(struct platform_device *pdev);
-void pci_host_common_remove(struct platform_device *pdev);
-#endif
#endif
diff --git a/include/linux/pci-ep-msi.h b/include/linux/pci-ep-msi.h
new file mode 100644
index 000000000000..7c5db90f9620
--- /dev/null
+++ b/include/linux/pci-ep-msi.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCI Endpoint *Function* side MSI header file
+ *
+ * Copyright (C) 2024 NXP
+ * Author: Frank Li <Frank.Li@nxp.com>
+ */
+
+#ifndef __PCI_EP_MSI__
+#define __PCI_EP_MSI__
+
+struct pci_epf;
+
+#ifdef CONFIG_PCI_ENDPOINT_MSI_DOORBELL
+int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 nums);
+void pci_epf_free_doorbell(struct pci_epf *epf);
+#else
+static inline int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 nums)
+{
+ return -ENODATA;
+}
+
+static inline void pci_epf_free_doorbell(struct pci_epf *epf)
+{
+}
+#endif /* CONFIG_GENERIC_MSI_IRQ */
+
+#endif /* __PCI_EP_MSI__ */
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index acc5f96161fe..4286bfdbfdfa 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -33,10 +33,42 @@ pci_epc_interface_string(enum pci_epc_interface_type type)
}
/**
+ * struct pci_epc_map - information about EPC memory for mapping a RC PCI
+ * address range
+ * @pci_addr: start address of the RC PCI address range to map
+ * @pci_size: size of the RC PCI address range mapped from @pci_addr
+ * @map_pci_addr: RC PCI address used as the first address mapped (may be lower
+ * than @pci_addr)
+ * @map_size: size of the controller memory needed for mapping the RC PCI address
+ * range @map_pci_addr..@pci_addr+@pci_size
+ * @phys_base: base physical address of the allocated EPC memory for mapping the
+ * RC PCI address range
+ * @phys_addr: physical address at which @pci_addr is mapped
+ * @virt_base: base virtual address of the allocated EPC memory for mapping the
+ * RC PCI address range
+ * @virt_addr: virtual address at which @pci_addr is mapped
+ */
+struct pci_epc_map {
+ u64 pci_addr;
+ size_t pci_size;
+
+ u64 map_pci_addr;
+ size_t map_size;
+
+ phys_addr_t phys_base;
+ phys_addr_t phys_addr;
+ void __iomem *virt_base;
+ void __iomem *virt_addr;
+};
+
+/**
* struct pci_epc_ops - set of function pointers for performing EPC operations
* @write_header: ops to populate configuration space header
* @set_bar: ops to configure the BAR
* @clear_bar: ops to reset the BAR
+ * @align_addr: operation to get the mapping address, mapping size and offset
+ * into a controller memory window needed to map an RC PCI address
+ * region
* @map_addr: ops to map CPU address to PCI address
* @unmap_addr: ops to unmap CPU address and PCI address
* @set_msi: ops to set the requested number of MSI interrupts in the MSI
@@ -61,15 +93,17 @@ struct pci_epc_ops {
struct pci_epf_bar *epf_bar);
void (*clear_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
+ u64 (*align_addr)(struct pci_epc *epc, u64 pci_addr, size_t *size,
+ size_t *offset);
int (*map_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr, u64 pci_addr, size_t size);
void (*unmap_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr);
int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u8 interrupts);
+ u8 nr_irqs);
int (*get_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
int (*set_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u16 interrupts, enum pci_barno, u32 offset);
+ u16 nr_irqs, enum pci_barno, u32 offset);
int (*get_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
int (*raise_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
unsigned int type, u16 interrupt_num);
@@ -128,6 +162,7 @@ struct pci_epc_mem {
* @group: configfs group representing the PCI EPC device
* @lock: mutex to protect pci_epc ops
* @function_num_map: bitmap to manage physical function number
+ * @domain_nr: PCI domain number of the endpoint controller
* @init_complete: flag to indicate whether the EPC initialization is complete
* or not
*/
@@ -145,17 +180,23 @@ struct pci_epc {
/* mutex to protect against concurrent access of EP controller */
struct mutex lock;
unsigned long function_num_map;
+ int domain_nr;
bool init_complete;
};
/**
+ * enum pci_epc_bar_type - configurability of endpoint BAR
* @BAR_PROGRAMMABLE: The BAR mask can be configured by the EPC.
* @BAR_FIXED: The BAR mask is fixed by the hardware.
+ * @BAR_RESIZABLE: The BAR implements the PCI-SIG Resizable BAR Capability.
+ * NOTE: An EPC driver can currently only set a single supported
+ * size.
* @BAR_RESERVED: The BAR should not be touched by an EPF driver.
*/
enum pci_epc_bar_type {
BAR_PROGRAMMABLE = 0,
BAR_FIXED,
+ BAR_RESIZABLE,
BAR_RESERVED,
};
@@ -184,6 +225,7 @@ struct pci_epc_bar_desc {
* @linkup_notifier: indicate if the EPC device can notify EPF driver on link up
* @msi_capable: indicate if the endpoint function has MSI capability
* @msix_capable: indicate if the endpoint function has MSI-X capability
+ * @intx_capable: indicate if the endpoint can raise INTx interrupts
* @bar: array specifying the hardware description for each BAR
* @align: alignment size required for BAR buffer allocation
*/
@@ -191,12 +233,15 @@ struct pci_epc_features {
unsigned int linkup_notifier : 1;
unsigned int msi_capable : 1;
unsigned int msix_capable : 1;
+ unsigned int intx_capable : 1;
struct pci_epc_bar_desc bar[PCI_STD_NUM_BARS];
size_t align;
};
#define to_pci_epc(device) container_of((device), struct pci_epc, dev)
+#ifdef CONFIG_PCI_ENDPOINT
+
#define pci_epc_create(dev, ops) \
__pci_epc_create((dev), (ops), THIS_MODULE)
#define devm_pci_epc_create(dev, ops) \
@@ -218,7 +263,6 @@ __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
struct pci_epc *
__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
struct module *owner);
-void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc);
void pci_epc_destroy(struct pci_epc *epc);
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
@@ -226,11 +270,13 @@ void pci_epc_linkup(struct pci_epc *epc);
void pci_epc_linkdown(struct pci_epc *epc);
void pci_epc_init_notify(struct pci_epc *epc);
void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf);
-void pci_epc_bme_notify(struct pci_epc *epc);
+void pci_epc_deinit_notify(struct pci_epc *epc);
+void pci_epc_bus_master_enable_notify(struct pci_epc *epc);
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr);
+int pci_epc_bar_size_to_rebar_cap(size_t size, u32 *cap);
int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
@@ -240,11 +286,10 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
u64 pci_addr, size_t size);
void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr);
-int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u8 interrupts);
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 nr_irqs);
int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u16 interrupts, enum pci_barno, u32 offset);
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u16 nr_irqs,
+ enum pci_barno, u32 offset);
int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr, u8 interrupt_num,
@@ -272,4 +317,18 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size);
void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
void __iomem *virt_addr, size_t size);
+int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u64 pci_addr, size_t pci_size, struct pci_epc_map *map);
+void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epc_map *map);
+
+#else
+static inline void pci_epc_init_notify(struct pci_epc *epc)
+{
+}
+
+static inline void pci_epc_deinit_notify(struct pci_epc *epc)
+{
+}
+#endif /* CONFIG_PCI_ENDPOINT */
#endif /* __LINUX_PCI_EPC_H */
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index adee6a1b35db..48f68c4dcfa5 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -12,6 +12,7 @@
#include <linux/configfs.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+#include <linux/msi.h>
#include <linux/pci.h>
struct pci_epf;
@@ -38,7 +39,7 @@ enum pci_barno {
* @baseclass_code: broadly classifies the type of function the device performs
* @cache_line_size: specifies the system cacheline size in units of DWORDs
* @subsys_vendor_id: vendor of the add-in card or subsystem
- * @subsys_id: id specific to vendor
+ * @subsys_id: ID specific to vendor
* @interrupt_pin: interrupt pin the device (or device function) uses
*/
struct pci_epf_header {
@@ -59,7 +60,7 @@ struct pci_epf_header {
* @bind: ops to perform when a EPC device has been bound to EPF device
* @unbind: ops to perform when a binding has been lost between a EPC device
* and EPF device
- * @add_cfs: ops to initialize function specific configfs attributes
+ * @add_cfs: ops to initialize function-specific configfs attributes
*/
struct pci_epf_ops {
int (*bind)(struct pci_epf *epf);
@@ -70,16 +71,18 @@ struct pci_epf_ops {
/**
* struct pci_epc_event_ops - Callbacks for capturing the EPC events
- * @core_init: Callback for the EPC initialization complete event
+ * @epc_init: Callback for the EPC initialization complete event
+ * @epc_deinit: Callback for the EPC deinitialization event
* @link_up: Callback for the EPC link up event
* @link_down: Callback for the EPC link down event
- * @bme: Callback for the EPC BME (Bus Master Enable) event
+ * @bus_master_enable: Callback for the EPC Bus Master Enable event
*/
struct pci_epc_event_ops {
- int (*core_init)(struct pci_epf *epf);
+ int (*epc_init)(struct pci_epf *epf);
+ void (*epc_deinit)(struct pci_epf *epf);
int (*link_up)(struct pci_epf *epf);
int (*link_down)(struct pci_epf *epf);
- int (*bme)(struct pci_epf *epf);
+ int (*bus_master_enable)(struct pci_epf *epf);
};
/**
@@ -105,14 +108,15 @@ struct pci_epf_driver {
const struct pci_epf_device_id *id_table;
};
-#define to_pci_epf_driver(drv) (container_of((drv), struct pci_epf_driver, \
- driver))
+#define to_pci_epf_driver(drv) container_of_const((drv), struct pci_epf_driver, driver)
/**
* struct pci_epf_bar - represents the BAR of EPF device
* @phys_addr: physical address that should be mapped to the BAR
* @addr: virtual address corresponding to the @phys_addr
* @size: the size of the address space present in BAR
+ * @mem_size: the size actually allocated to accommodate the iATU alignment
+ * requirement
* @barno: BAR number
* @flags: flags that are set for the BAR
*/
@@ -120,11 +124,22 @@ struct pci_epf_bar {
dma_addr_t phys_addr;
void *addr;
size_t size;
+ size_t mem_size;
enum pci_barno barno;
int flags;
};
/**
+ * struct pci_epf_doorbell_msg - represents doorbell message
+ * @msg: MSI message
+ * @virq: IRQ number of this doorbell MSI message
+ */
+struct pci_epf_doorbell_msg {
+ struct msi_msg msg;
+ int virq;
+};
+
+/**
* struct pci_epf - represents the PCI EPF device
* @dev: the PCI EPF device
* @name: the name of the PCI EPF device
@@ -137,7 +152,7 @@ struct pci_epf_bar {
* @epc: the EPC device to which this EPF device is bound
* @epf_pf: the physical EPF device to which this virtual EPF device is bound
* @driver: the EPF driver to which this EPF device is bound
- * @id: Pointer to the EPF device ID
+ * @id: pointer to the EPF device ID
* @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
* @lock: mutex to protect pci_epf_ops
* @sec_epc: the secondary EPC device to which this EPF device is bound
@@ -150,13 +165,15 @@ struct pci_epf_bar {
* @is_vf: true - virtual function, false - physical function
* @vfunction_num_map: bitmap to manage virtual function number
* @pci_vepf: list of virtual endpoint functions associated with this function
- * @event_ops: Callbacks for capturing the EPC events
+ * @event_ops: callbacks for capturing the EPC events
+ * @db_msg: data for MSI from RC side
+ * @num_db: number of doorbells
*/
struct pci_epf {
struct device dev;
const char *name;
struct pci_epf_header *header;
- struct pci_epf_bar bar[6];
+ struct pci_epf_bar bar[PCI_STD_NUM_BARS];
u8 msi_interrupts;
u16 msix_interrupts;
u8 func_no;
@@ -173,7 +190,7 @@ struct pci_epf {
/* Below members are to attach secondary EPC to an endpoint function */
struct pci_epc *sec_epc;
struct list_head sec_epc_list;
- struct pci_epf_bar sec_epc_bar[6];
+ struct pci_epf_bar sec_epc_bar[PCI_STD_NUM_BARS];
u8 sec_epc_func_no;
struct config_group *group;
unsigned int is_bound;
@@ -181,14 +198,17 @@ struct pci_epf {
unsigned long vfunction_num_map;
struct list_head pci_vepf;
const struct pci_epc_event_ops *event_ops;
+ struct pci_epf_doorbell_msg *db_msg;
+ u16 num_db;
};
/**
- * struct pci_epf_msix_tbl - represents the MSIX table entry structure
- * @msg_addr: Writes to this address will trigger MSIX interrupt in host
- * @msg_data: Data that should be written to @msg_addr to trigger MSIX interrupt
+ * struct pci_epf_msix_tbl - represents the MSI-X table entry structure
+ * @msg_addr: Writes to this address will trigger MSI-X interrupt in host
+ * @msg_data: Data that should be written to @msg_addr to trigger MSI-X
+ * interrupt
* @vector_ctrl: Identifies if the function is prohibited from sending a message
- * using this MSIX table entry
+ * using this MSI-X table entry
*/
struct pci_epf_msix_tbl {
u64 msg_addr;
@@ -221,6 +241,15 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
enum pci_epc_interface_type type);
void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
enum pci_epc_interface_type type);
+
+int pci_epf_assign_bar_space(struct pci_epf *epf, size_t size,
+ enum pci_barno bar,
+ const struct pci_epc_features *epc_features,
+ enum pci_epc_interface_type type,
+ dma_addr_t bar_addr);
+
+int pci_epf_align_inbound_addr(struct pci_epf *epf, enum pci_barno bar,
+ u64 addr, dma_addr_t *base, size_t *off);
int pci_epf_bind(struct pci_epf *epf);
void pci_epf_unbind(struct pci_epf *epf);
int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf);
diff --git a/include/linux/pci-ide.h b/include/linux/pci-ide.h
new file mode 100644
index 000000000000..37a1ad9501b0
--- /dev/null
+++ b/include/linux/pci-ide.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common helpers for drivers (e.g. low-level PCI/TSM drivers) implementing the
+ * IDE key management protocol (IDE_KM) as defined by:
+ * PCIe r7.0 section 6.33 Integrity & Data Encryption (IDE)
+ *
+ * Copyright(c) 2024-2025 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __PCI_IDE_H__
+#define __PCI_IDE_H__
+
+enum pci_ide_partner_select {
+ PCI_IDE_EP,
+ PCI_IDE_RP,
+ PCI_IDE_PARTNER_MAX,
+ /*
+ * In addition to the resources in each partner port the
+ * platform / host-bridge additionally has a Stream ID pool that
+ * it shares across root ports. Let pci_ide_stream_alloc() use
+ * the alloc_stream_index() helper as endpoints and root ports.
+ */
+ PCI_IDE_HB = PCI_IDE_PARTNER_MAX,
+};
+
+/**
+ * struct pci_ide_partner - Per port pair Selective IDE Stream settings
+ * @rid_start: Partner Port Requester ID range start
+ * @rid_end: Partner Port Requester ID range end
+ * @stream_index: Selective IDE Stream Register Block selection
+ * @mem_assoc: PCI bus memory address association for targeting peer partner
+ * @pref_assoc: PCI bus prefetchable memory address association for
+ * targeting peer partner
+ * @default_stream: Endpoint uses this stream for all upstream TLPs regardless of
+ * address and RID association registers
+ * @setup: flag to track whether to run pci_ide_stream_teardown() for this
+ * partner slot
+ * @enable: flag whether to run pci_ide_stream_disable() for this partner slot
+ *
+ * By default, pci_ide_stream_alloc() initializes @mem_assoc and @pref_assoc
+ * with the immediate ancestor downstream port memory ranges (i.e. Type 1
+ * Configuration Space Header values). Caller may zero size ({0, -1}) the range
+ * to drop it from consideration at pci_ide_stream_setup() time.
+ */
+struct pci_ide_partner {
+ u16 rid_start;
+ u16 rid_end;
+ u8 stream_index;
+ struct pci_bus_region mem_assoc;
+ struct pci_bus_region pref_assoc;
+ unsigned int default_stream:1;
+ unsigned int setup:1;
+ unsigned int enable:1;
+};
+
+/**
+ * struct pci_ide_regs - Hardware register association settings for Selective
+ * IDE Streams
+ * @rid1: IDE RID Association Register 1
+ * @rid2: IDE RID Association Register 2
+ * @addr: Up to two address association blocks (IDE Address Association Register
+ * 1 through 3) for MMIO and prefetchable MMIO
+ * @nr_addr: Number of address association blocks initialized
+ *
+ * See pci_ide_stream_to_regs()
+ */
+struct pci_ide_regs {
+ u32 rid1;
+ u32 rid2;
+ struct {
+ u32 assoc1;
+ u32 assoc2;
+ u32 assoc3;
+ } addr[2];
+ int nr_addr;
+};
+
+/**
+ * struct pci_ide - PCIe Selective IDE Stream descriptor
+ * @pdev: PCIe Endpoint in the pci_ide_partner pair
+ * @partner: per-partner settings
+ * @host_bridge_stream: allocated from host bridge @ide_stream_ida pool
+ * @stream_id: unique Stream ID (within Partner Port pairing)
+ * @name: name of the established Selective IDE Stream in sysfs
+ * @tsm_dev: For TSM established IDE, the TSM device context
+ *
+ * Negative @stream_id values indicate "uninitialized" on the
+ * expectation that with TSM established IDE the TSM owns the stream_id
+ * allocation.
+ */
+struct pci_ide {
+ struct pci_dev *pdev;
+ struct pci_ide_partner partner[PCI_IDE_PARTNER_MAX];
+ u8 host_bridge_stream;
+ int stream_id;
+ const char *name;
+ struct tsm_dev *tsm_dev;
+};
+
+/*
+ * Some devices need help with aliased stream-ids even for idle streams. Use
+ * this id as the "never enabled" place holder.
+ */
+#define PCI_IDE_RESERVED_STREAM_ID 255
+
+void pci_ide_set_nr_streams(struct pci_host_bridge *hb, u16 nr);
+struct pci_ide_partner *pci_ide_to_settings(struct pci_dev *pdev,
+ struct pci_ide *ide);
+struct pci_ide *pci_ide_stream_alloc(struct pci_dev *pdev);
+void pci_ide_stream_free(struct pci_ide *ide);
+int pci_ide_stream_register(struct pci_ide *ide);
+void pci_ide_stream_unregister(struct pci_ide *ide);
+void pci_ide_stream_setup(struct pci_dev *pdev, struct pci_ide *ide);
+void pci_ide_stream_teardown(struct pci_dev *pdev, struct pci_ide *ide);
+int pci_ide_stream_enable(struct pci_dev *pdev, struct pci_ide *ide);
+void pci_ide_stream_disable(struct pci_dev *pdev, struct pci_ide *ide);
+void pci_ide_stream_release(struct pci_ide *ide);
+DEFINE_FREE(pci_ide_stream_release, struct pci_ide *, if (_T) pci_ide_stream_release(_T))
+#endif /* __PCI_IDE_H__ */
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
index 2c07aa6b7665..517e121d2598 100644
--- a/include/linux/pci-p2pdma.h
+++ b/include/linux/pci-p2pdma.h
@@ -16,12 +16,62 @@
struct block_device;
struct scatterlist;
+/**
+ * struct p2pdma_provider
+ *
+ * A p2pdma provider is a range of MMIO address space available to the CPU.
+ */
+struct p2pdma_provider {
+ struct device *owner;
+ u64 bus_offset;
+};
+
+enum pci_p2pdma_map_type {
+ /*
+ * PCI_P2PDMA_MAP_UNKNOWN: Used internally as an initial state before
+ * the mapping type has been calculated. Exported routines for the API
+ * will never return this value.
+ */
+ PCI_P2PDMA_MAP_UNKNOWN = 0,
+
+ /*
+ * Not a PCI P2PDMA transfer.
+ */
+ PCI_P2PDMA_MAP_NONE,
+
+ /*
+ * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will
+ * traverse the host bridge and the host bridge is not in the
+ * allowlist. DMA Mapping routines should return an error when
+ * this is returned.
+ */
+ PCI_P2PDMA_MAP_NOT_SUPPORTED,
+
+ /*
+ * PCI_P2PDMA_MAP_BUS_ADDR: Indicates that two devices can talk to
+ * each other directly through a PCI switch and the transaction will
+ * not traverse the host bridge. Such a mapping should program
+ * the DMA engine with PCI bus addresses.
+ */
+ PCI_P2PDMA_MAP_BUS_ADDR,
+
+ /*
+ * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk
+ * to each other, but the transaction traverses a host bridge on the
+ * allowlist. In this case, a normal mapping either with CPU physical
+ * addresses (in the case of dma-direct) or IOVA addresses (in the
+ * case of IOMMUs) should be used to program the DMA engine.
+ */
+ PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
+};
+
#ifdef CONFIG_PCI_P2PDMA
+int pcim_p2pdma_init(struct pci_dev *pdev);
+struct p2pdma_provider *pcim_p2pdma_provider(struct pci_dev *pdev, int bar);
int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
u64 offset);
int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
int num_clients, bool verbose);
-bool pci_has_p2pmem(struct pci_dev *pdev);
struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients);
void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size);
void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size);
@@ -34,7 +84,18 @@ int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
bool *use_p2pdma);
ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
bool use_p2pdma);
+enum pci_p2pdma_map_type pci_p2pdma_map_type(struct p2pdma_provider *provider,
+ struct device *dev);
#else /* CONFIG_PCI_P2PDMA */
+static inline int pcim_p2pdma_init(struct pci_dev *pdev)
+{
+ return -EOPNOTSUPP;
+}
+static inline struct p2pdma_provider *pcim_p2pdma_provider(struct pci_dev *pdev,
+ int bar)
+{
+ return NULL;
+}
static inline int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar,
size_t size, u64 offset)
{
@@ -45,10 +106,6 @@ static inline int pci_p2pdma_distance_many(struct pci_dev *provider,
{
return -1;
}
-static inline bool pci_has_p2pmem(struct pci_dev *pdev)
-{
- return false;
-}
static inline struct pci_dev *pci_p2pmem_find_many(struct device **clients,
int num_clients)
{
@@ -90,6 +147,11 @@ static inline ssize_t pci_p2pdma_enable_show(char *page,
{
return sprintf(page, "none\n");
}
+static inline enum pci_p2pdma_map_type
+pci_p2pdma_map_type(struct p2pdma_provider *provider, struct device *dev)
+{
+ return PCI_P2PDMA_MAP_NOT_SUPPORTED;
+}
#endif /* CONFIG_PCI_P2PDMA */
@@ -104,4 +166,48 @@ static inline struct pci_dev *pci_p2pmem_find(struct device *client)
return pci_p2pmem_find_many(&client, 1);
}
+struct pci_p2pdma_map_state {
+ struct p2pdma_provider *mem;
+ enum pci_p2pdma_map_type map;
+};
+
+
+/* helper for pci_p2pdma_state(), do not use directly */
+void __pci_p2pdma_update_state(struct pci_p2pdma_map_state *state,
+ struct device *dev, struct page *page);
+
+/**
+ * pci_p2pdma_state - check the P2P transfer state of a page
+ * @state: P2P state structure
+ * @dev: device to transfer to/from
+ * @page: page to map
+ *
+ * Check if @page is a PCI P2PDMA page, and if yes of what kind. Returns the
+ * map type, and updates @state with all information needed for a P2P transfer.
+ */
+static inline enum pci_p2pdma_map_type
+pci_p2pdma_state(struct pci_p2pdma_map_state *state, struct device *dev,
+ struct page *page)
+{
+ if (IS_ENABLED(CONFIG_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
+ __pci_p2pdma_update_state(state, dev, page);
+ return state->map;
+ }
+ return PCI_P2PDMA_MAP_NONE;
+}
+
+/**
+ * pci_p2pdma_bus_addr_map - Translate a physical address to a bus address
+ * for a PCI_P2PDMA_MAP_BUS_ADDR transfer.
+ * @provider: P2P provider structure
+ * @paddr: physical address to map
+ *
+ * Map a physically contiguous PCI_P2PDMA_MAP_BUS_ADDR transfer.
+ */
+static inline dma_addr_t
+pci_p2pdma_bus_addr_map(struct p2pdma_provider *provider, phys_addr_t paddr)
+{
+ return paddr + provider->bus_offset;
+}
+
#endif /* _LINUX_PCI_P2P_H */
diff --git a/include/linux/pci-pwrctrl.h b/include/linux/pci-pwrctrl.h
new file mode 100644
index 000000000000..4aefc7901cd1
--- /dev/null
+++ b/include/linux/pci-pwrctrl.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#ifndef __PCI_PWRCTRL_H__
+#define __PCI_PWRCTRL_H__
+
+#include <linux/notifier.h>
+#include <linux/workqueue.h>
+
+struct device;
+struct device_link;
+
+/*
+ * This is a simple framework for solving the issue of PCI devices that require
+ * certain resources (regulators, GPIOs, clocks) to be enabled before the
+ * device can actually be detected on the PCI bus.
+ *
+ * The idea is to reuse the platform bus to populate OF nodes describing the
+ * PCI device and its resources, let these platform devices probe and enable
+ * relevant resources and then trigger a rescan of the PCI bus allowing for the
+ * same device (with a second associated struct device) to be registered with
+ * the PCI subsystem.
+ *
+ * To preserve a correct hierarchy for PCI power management and device reset,
+ * we create a device link between the power control platform device (parent)
+ * and the supplied PCI device (child).
+ */
+
+/**
+ * struct pci_pwrctrl - PCI device power control context.
+ * @dev: Address of the power controlling device.
+ *
+ * An object of this type must be allocated by the PCI power control device and
+ * passed to the pwrctrl subsystem to trigger a bus rescan and setup a device
+ * link with the device once it's up.
+ */
+struct pci_pwrctrl {
+ struct device *dev;
+
+ /* private: internal use only */
+ struct notifier_block nb;
+ struct device_link *link;
+ struct work_struct work;
+};
+
+void pci_pwrctrl_init(struct pci_pwrctrl *pwrctrl, struct device *dev);
+int pci_pwrctrl_device_set_ready(struct pci_pwrctrl *pwrctrl);
+void pci_pwrctrl_device_unset_ready(struct pci_pwrctrl *pwrctrl);
+int devm_pci_pwrctrl_device_set_ready(struct device *dev,
+ struct pci_pwrctrl *pwrctrl);
+
+#endif /* __PCI_PWRCTRL_H__ */
diff --git a/include/linux/pci-tph.h b/include/linux/pci-tph.h
new file mode 100644
index 000000000000..ba28140ce670
--- /dev/null
+++ b/include/linux/pci-tph.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TPH (TLP Processing Hints)
+ *
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ * Eric Van Tassell <Eric.VanTassell@amd.com>
+ * Wei Huang <wei.huang2@amd.com>
+ */
+#ifndef LINUX_PCI_TPH_H
+#define LINUX_PCI_TPH_H
+
+/*
+ * According to the ECN for PCI Firmware Spec, Steering Tag can be different
+ * depending on the memory type: Volatile Memory or Persistent Memory. When a
+ * caller query about a target's Steering Tag, it must provide the target's
+ * tph_mem_type. ECN link: https://members.pcisig.com/wg/PCI-SIG/document/15470.
+ */
+enum tph_mem_type {
+ TPH_MEM_TYPE_VM, /* volatile memory */
+ TPH_MEM_TYPE_PM /* persistent memory */
+};
+
+#ifdef CONFIG_PCIE_TPH
+int pcie_tph_set_st_entry(struct pci_dev *pdev,
+ unsigned int index, u16 tag);
+int pcie_tph_get_cpu_st(struct pci_dev *dev,
+ enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *tag);
+void pcie_disable_tph(struct pci_dev *pdev);
+int pcie_enable_tph(struct pci_dev *pdev, int mode);
+u16 pcie_tph_get_st_table_size(struct pci_dev *pdev);
+u32 pcie_tph_get_st_table_loc(struct pci_dev *pdev);
+#else
+static inline int pcie_tph_set_st_entry(struct pci_dev *pdev,
+ unsigned int index, u16 tag)
+{ return -EINVAL; }
+static inline int pcie_tph_get_cpu_st(struct pci_dev *dev,
+ enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *tag)
+{ return -EINVAL; }
+static inline void pcie_disable_tph(struct pci_dev *pdev) { }
+static inline int pcie_enable_tph(struct pci_dev *pdev, int mode)
+{ return -EINVAL; }
+#endif
+
+#endif /* LINUX_PCI_TPH_H */
diff --git a/include/linux/pci-tsm.h b/include/linux/pci-tsm.h
new file mode 100644
index 000000000000..a6435aba03f9
--- /dev/null
+++ b/include/linux/pci-tsm.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PCI_TSM_H
+#define __PCI_TSM_H
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/sockptr.h>
+
+struct pci_tsm;
+struct tsm_dev;
+struct kvm;
+enum pci_tsm_req_scope;
+
+/*
+ * struct pci_tsm_ops - manage confidential links and security state
+ * @link_ops: Coordinate PCIe SPDM and IDE establishment via a platform TSM.
+ * Provide a secure session transport for TDISP state management
+ * (typically bare metal physical function operations).
+ * @devsec_ops: Lock, unlock, and interrogate the security state of the
+ * function via the platform TSM (typically virtual function
+ * operations).
+ *
+ * This operations are mutually exclusive either a tsm_dev instance
+ * manages physical link properties or it manages function security
+ * states like TDISP lock/unlock.
+ */
+struct pci_tsm_ops {
+ /*
+ * struct pci_tsm_link_ops - Manage physical link and the TSM/DSM session
+ * @probe: establish context with the TSM (allocate / wrap 'struct
+ * pci_tsm') for follow-on link operations
+ * @remove: destroy link operations context
+ * @connect: establish / validate a secure connection (e.g. IDE)
+ * with the device
+ * @disconnect: teardown the secure link
+ * @bind: bind a TDI in preparation for it to be accepted by a TVM
+ * @unbind: remove a TDI from secure operation with a TVM
+ * @guest_req: marshal TVM information and state change requests
+ *
+ * Context: @probe, @remove, @connect, and @disconnect run under
+ * pci_tsm_rwsem held for write to sync with TSM unregistration and
+ * mutual exclusion of @connect and @disconnect. @connect and
+ * @disconnect additionally run under the DSM lock (struct
+ * pci_tsm_pf0::lock) as well as @probe and @remove of the subfunctions.
+ * @bind, @unbind, and @guest_req run under pci_tsm_rwsem held for read
+ * and the DSM lock.
+ */
+ struct_group_tagged(pci_tsm_link_ops, link_ops,
+ struct pci_tsm *(*probe)(struct tsm_dev *tsm_dev,
+ struct pci_dev *pdev);
+ void (*remove)(struct pci_tsm *tsm);
+ int (*connect)(struct pci_dev *pdev);
+ void (*disconnect)(struct pci_dev *pdev);
+ struct pci_tdi *(*bind)(struct pci_dev *pdev,
+ struct kvm *kvm, u32 tdi_id);
+ void (*unbind)(struct pci_tdi *tdi);
+ ssize_t (*guest_req)(struct pci_tdi *tdi,
+ enum pci_tsm_req_scope scope,
+ sockptr_t req_in, size_t in_len,
+ sockptr_t req_out, size_t out_len,
+ u64 *tsm_code);
+ );
+
+ /*
+ * struct pci_tsm_devsec_ops - Manage the security state of the function
+ * @lock: establish context with the TSM (allocate / wrap 'struct
+ * pci_tsm') for follow-on security state transitions from the
+ * LOCKED state
+ * @unlock: destroy TSM context and return device to UNLOCKED state
+ *
+ * Context: @lock and @unlock run under pci_tsm_rwsem held for write to
+ * sync with TSM unregistration and each other
+ */
+ struct_group_tagged(pci_tsm_devsec_ops, devsec_ops,
+ struct pci_tsm *(*lock)(struct tsm_dev *tsm_dev,
+ struct pci_dev *pdev);
+ void (*unlock)(struct pci_tsm *tsm);
+ );
+};
+
+/**
+ * struct pci_tdi - Core TEE I/O Device Interface (TDI) context
+ * @pdev: host side representation of guest-side TDI
+ * @kvm: TEE VM context of bound TDI
+ * @tdi_id: Identifier (virtual BDF) for the TDI as referenced by the TSM and DSM
+ */
+struct pci_tdi {
+ struct pci_dev *pdev;
+ struct kvm *kvm;
+ u32 tdi_id;
+};
+
+/**
+ * struct pci_tsm - Core TSM context for a given PCIe endpoint
+ * @pdev: Back ref to device function, distinguishes type of pci_tsm context
+ * @dsm_dev: PCI Device Security Manager for link operations on @pdev
+ * @tsm_dev: PCI TEE Security Manager device for Link Confidentiality or Device
+ * Function Security operations
+ * @tdi: TDI context established by the @bind link operation
+ *
+ * This structure is wrapped by low level TSM driver data and returned by
+ * probe()/lock(), it is freed by the corresponding remove()/unlock().
+ *
+ * For link operations it serves to cache the association between a Device
+ * Security Manager (DSM) and the functions that manager can assign to a TVM.
+ * That can be "self", for assigning function0 of a TEE I/O device, a
+ * sub-function (SR-IOV virtual function, or non-function0
+ * multifunction-device), or a downstream endpoint (PCIe upstream switch-port as
+ * DSM).
+ */
+struct pci_tsm {
+ struct pci_dev *pdev;
+ struct pci_dev *dsm_dev;
+ struct tsm_dev *tsm_dev;
+ struct pci_tdi *tdi;
+};
+
+/**
+ * struct pci_tsm_pf0 - Physical Function 0 TDISP link context
+ * @base_tsm: generic core "tsm" context
+ * @lock: mutual exclustion for pci_tsm_ops invocation
+ * @doe_mb: PCIe Data Object Exchange mailbox
+ */
+struct pci_tsm_pf0 {
+ struct pci_tsm base_tsm;
+ struct mutex lock;
+ struct pci_doe_mb *doe_mb;
+};
+
+/* physical function0 and capable of 'connect' */
+static inline bool is_pci_tsm_pf0(struct pci_dev *pdev)
+{
+ if (!pdev)
+ return false;
+
+ if (!pci_is_pcie(pdev))
+ return false;
+
+ if (pdev->is_virtfn)
+ return false;
+
+ /*
+ * Allow for a Device Security Manager (DSM) associated with function0
+ * of an Endpoint to coordinate TDISP requests for other functions
+ * (physical or virtual) of the device, or allow for an Upstream Port
+ * DSM to accept TDISP requests for the Endpoints downstream of the
+ * switch.
+ */
+ switch (pci_pcie_type(pdev)) {
+ case PCI_EXP_TYPE_ENDPOINT:
+ case PCI_EXP_TYPE_UPSTREAM:
+ case PCI_EXP_TYPE_RC_END:
+ if (pdev->ide_cap || (pdev->devcap & PCI_EXP_DEVCAP_TEE))
+ break;
+ fallthrough;
+ default:
+ return false;
+ }
+
+ return PCI_FUNC(pdev->devfn) == 0;
+}
+
+/**
+ * enum pci_tsm_req_scope - Scope of guest requests to be validated by TSM
+ *
+ * Guest requests are a transport for a TVM to communicate with a TSM + DSM for
+ * a given TDI. A TSM driver is responsible for maintaining the kernel security
+ * model and limit commands that may affect the host, or are otherwise outside
+ * the typical TDISP operational model.
+ */
+enum pci_tsm_req_scope {
+ /**
+ * @PCI_TSM_REQ_INFO: Read-only, without side effects, request for
+ * typical TDISP collateral information like Device Interface Reports.
+ * No device secrets are permitted, and no device state is changed.
+ */
+ PCI_TSM_REQ_INFO = 0,
+ /**
+ * @PCI_TSM_REQ_STATE_CHANGE: Request to change the TDISP state from
+ * UNLOCKED->LOCKED, LOCKED->RUN, or other architecture specific state
+ * changes to support those transitions for a TDI. No other (unrelated
+ * to TDISP) device / host state, configuration, or data change is
+ * permitted.
+ */
+ PCI_TSM_REQ_STATE_CHANGE = 1,
+ /**
+ * @PCI_TSM_REQ_DEBUG_READ: Read-only request for debug information
+ *
+ * A method to facilitate TVM information retrieval outside of typical
+ * TDISP operational requirements. No device secrets are permitted.
+ */
+ PCI_TSM_REQ_DEBUG_READ = 2,
+ /**
+ * @PCI_TSM_REQ_DEBUG_WRITE: Device state changes for debug purposes
+ *
+ * The request may affect the operational state of the device outside of
+ * the TDISP operational model. If allowed, requires CAP_SYS_RAW_IO, and
+ * will taint the kernel.
+ */
+ PCI_TSM_REQ_DEBUG_WRITE = 3,
+};
+
+#ifdef CONFIG_PCI_TSM
+int pci_tsm_register(struct tsm_dev *tsm_dev);
+void pci_tsm_unregister(struct tsm_dev *tsm_dev);
+int pci_tsm_link_constructor(struct pci_dev *pdev, struct pci_tsm *tsm,
+ struct tsm_dev *tsm_dev);
+int pci_tsm_pf0_constructor(struct pci_dev *pdev, struct pci_tsm_pf0 *tsm,
+ struct tsm_dev *tsm_dev);
+void pci_tsm_pf0_destructor(struct pci_tsm_pf0 *tsm);
+int pci_tsm_doe_transfer(struct pci_dev *pdev, u8 type, const void *req,
+ size_t req_sz, void *resp, size_t resp_sz);
+int pci_tsm_bind(struct pci_dev *pdev, struct kvm *kvm, u32 tdi_id);
+void pci_tsm_unbind(struct pci_dev *pdev);
+void pci_tsm_tdi_constructor(struct pci_dev *pdev, struct pci_tdi *tdi,
+ struct kvm *kvm, u32 tdi_id);
+ssize_t pci_tsm_guest_req(struct pci_dev *pdev, enum pci_tsm_req_scope scope,
+ sockptr_t req_in, size_t in_len, sockptr_t req_out,
+ size_t out_len, u64 *tsm_code);
+#else
+static inline int pci_tsm_register(struct tsm_dev *tsm_dev)
+{
+ return 0;
+}
+static inline void pci_tsm_unregister(struct tsm_dev *tsm_dev)
+{
+}
+static inline int pci_tsm_bind(struct pci_dev *pdev, struct kvm *kvm, u64 tdi_id)
+{
+ return -ENXIO;
+}
+static inline void pci_tsm_unbind(struct pci_dev *pdev)
+{
+}
+static inline ssize_t pci_tsm_guest_req(struct pci_dev *pdev,
+ enum pci_tsm_req_scope scope,
+ sockptr_t req_in, size_t in_len,
+ sockptr_t req_out, size_t out_len,
+ u64 *tsm_code)
+{
+ return -ENXIO;
+}
+#endif
+#endif /*__PCI_TSM_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index fb004fd4e889..864775651c6f 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -119,7 +119,8 @@ enum {
#define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3)
/* Total number of bridge resources for P2P and CardBus */
-#define PCI_BRIDGE_RESOURCE_NUM 4
+#define PCI_P2P_BRIDGE_RESOURCE_NUM 3
+#define PCI_BRIDGE_RESOURCE_NUM 4
/* Resources assigned to buses behind the bridge */
PCI_BRIDGE_RESOURCES,
@@ -245,6 +246,8 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
/* Device does honor MSI masking despite saying otherwise */
PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
+ /* Device requires write to PCI_MSIX_ENTRY_DATA before any MSIX reads */
+ PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST = (__force pci_dev_flags_t) (1 << 13),
};
enum pci_irq_reroute_variant {
@@ -313,12 +316,25 @@ struct pci_vpd {
};
struct irq_affinity;
+struct pcie_bwctrl_data;
struct pcie_link_state;
struct pci_sriov;
struct pci_p2pdma;
struct rcec_ea;
-/* The pci_dev structure describes PCI devices */
+/* struct pci_dev - describes a PCI device
+ *
+ * @supported_speeds: PCIe Supported Link Speeds Vector (+ reserved 0 at
+ * LSB). 0 when the supported speeds cannot be
+ * determined (e.g., for Root Complex Integrated
+ * Endpoints without the relevant Capability
+ * Registers).
+ * @is_hotplug_bridge: Hotplug bridge of any kind (e.g. PCIe Hot-Plug Capable,
+ * Conventional PCI Hot-Plug, ACPI slot).
+ * Such bridges are allocated additional MMIO and bus
+ * number resources to allow for hierarchy expansion.
+ * @is_pciehp: PCIe Hot-Plug Capable bridge.
+ */
struct pci_dev {
struct list_head bus_list; /* Node in per-bus list */
struct pci_bus *bus; /* Bus this device is on */
@@ -338,13 +354,14 @@ struct pci_dev {
u8 hdr_type; /* PCI header type (`multi' flag masked out) */
#ifdef CONFIG_PCIEAER
u16 aer_cap; /* AER capability offset */
- struct aer_stats *aer_stats; /* AER stats for this device */
+ struct aer_info *aer_info; /* AER info for this device */
#endif
#ifdef CONFIG_PCIEPORTBUS
struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */
struct pci_dev *rcec; /* Associated RCEC device */
#endif
u32 devcap; /* PCIe Device Capabilities */
+ u16 rebar_cap; /* Resizable BAR capability offset */
u8 pcie_cap; /* PCIe capability offset */
u8 msi_cap; /* MSI capability offset */
u8 msix_cap; /* MSI-X capability offset */
@@ -367,10 +384,12 @@ struct pci_dev {
this is D0-D3, D0 being fully
functional, and D3 being off. */
u8 pm_cap; /* PM capability offset */
- unsigned int imm_ready:1; /* Supports Immediate Readiness */
unsigned int pme_support:5; /* Bitmask of states from which PME#
can be generated */
unsigned int pme_poll:1; /* Poll device's PME status bit */
+ unsigned int pinned:1; /* Whether this dev is pinned */
+ unsigned int config_rrs_sv:1; /* Config RRS software visibility */
+ unsigned int imm_ready:1; /* Supports Immediate Readiness */
unsigned int d1_support:1; /* Low power state D1 is supported */
unsigned int d2_support:1; /* Low power state D2 is supported */
unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
@@ -393,11 +412,13 @@ struct pci_dev {
u16 l1ss; /* L1SS Capability pointer */
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state */
+ unsigned int aspm_l0s_support:1; /* ASPM L0s support */
+ unsigned int aspm_l1_support:1; /* ASPM L1 support */
unsigned int ltr_path:1; /* Latency Tolerance Reporting
supported from root to here */
#endif
unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */
- unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */
+ unsigned int eetlp_prefix_max:3; /* Max # of End-End TLP Prefixes, 0=not supported */
pci_channel_state_t error_state; /* Current connectivity state */
struct device dev; /* Generic device interface */
@@ -412,10 +433,6 @@ struct pci_dev {
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
struct resource driver_exclusive_resource; /* driver exclusive resource ranges */
- bool match_driver; /* Skip attaching driver */
- struct lock_class_key cfg_access_key;
- struct lockdep_map cfg_access_lock;
-
unsigned int transparent:1; /* Subtractive decode bridge */
unsigned int io_window:1; /* Bridge has I/O window */
unsigned int pref_window:1; /* Bridge has pref mem window */
@@ -434,6 +451,8 @@ struct pci_dev {
unsigned int ats_enabled:1; /* Address Translation Svc */
unsigned int pasid_enabled:1; /* Process Address Space ID */
unsigned int pri_enabled:1; /* Page Request Interface */
+ unsigned int tph_enabled:1; /* TLP Processing Hints */
+ unsigned int fm_enabled:1; /* Flit Mode (segment captured) */
unsigned int is_managed:1; /* Managed via devres */
unsigned int is_msi_managed:1; /* MSI release via devres installed */
unsigned int needs_freset:1; /* Requires fundamental reset */
@@ -441,6 +460,7 @@ struct pci_dev {
unsigned int is_physfn:1;
unsigned int is_virtfn:1;
unsigned int is_hotplug_bridge:1;
+ unsigned int is_pciehp:1;
unsigned int shpc_managed:1; /* SHPC owned by shpchp */
unsigned int is_thunderbolt:1; /* Thunderbolt controller */
/*
@@ -467,6 +487,7 @@ struct pci_dev {
unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */
unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */
+ unsigned int non_mappable_bars:1; /* BARs can't be mapped to user-space */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -482,6 +503,8 @@ struct pci_dev {
#ifdef CONFIG_PCIE_PTM
u16 ptm_cap; /* PTM Capability */
unsigned int ptm_root:1;
+ unsigned int ptm_responder:1;
+ unsigned int ptm_requester:1;
unsigned int ptm_enabled:1;
u8 ptm_granularity;
#endif
@@ -495,6 +518,7 @@ struct pci_dev {
unsigned int dpc_rp_extensions:1;
u8 dpc_rp_log_size;
#endif
+ struct pcie_bwctrl_data *link_bwctrl;
#ifdef CONFIG_PCI_ATS
union {
struct pci_sriov *sriov; /* PF: SR-IOV info */
@@ -518,7 +542,23 @@ struct pci_dev {
#ifdef CONFIG_PCI_DOE
struct xarray doe_mbs; /* Data Object Exchange mailboxes */
#endif
+#ifdef CONFIG_PCI_NPEM
+ struct npem *npem; /* Native PCIe Enclosure Management */
+#endif
+#ifdef CONFIG_PCI_IDE
+ u16 ide_cap; /* Link Integrity & Data Encryption */
+ u8 nr_ide_mem; /* Address association resources for streams */
+ u8 nr_link_ide; /* Link Stream count (Selective Stream offset) */
+ u16 nr_sel_ide; /* Selective Stream count (register block allocator) */
+ struct ida ide_stream_ida;
+ unsigned int ide_cfg:1; /* Config cycles over IDE */
+ unsigned int ide_tee_limit:1; /* Disallow T=0 traffic over IDE */
+#endif
+#ifdef CONFIG_PCI_TSM
+ struct pci_tsm *tsm; /* TSM operation state */
+#endif
u16 acs_cap; /* ACS Capability offset */
+ u8 supported_speeds; /* Supported Link Speeds Vector */
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
/*
@@ -531,6 +571,12 @@ struct pci_dev {
/* These methods index pci_reset_fn_methods[] */
u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
+
+#ifdef CONFIG_PCIE_TPH
+ u16 tph_cap; /* TPH capability offset */
+ u8 tph_mode; /* TPH mode */
+ u8 tph_req_type; /* TPH requester type */
+#endif
};
static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
@@ -546,6 +592,8 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
#define to_pci_dev(n) container_of(n, struct pci_dev, dev)
#define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
+#define for_each_pci_dev_reverse(d) \
+ while ((d = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
static inline int pci_channel_offline(struct pci_dev *pdev)
{
@@ -572,9 +620,16 @@ struct pci_host_bridge {
int domain_nr;
struct list_head windows; /* resource_entry */
struct list_head dma_ranges; /* dma ranges resource list */
+#ifdef CONFIG_PCI_IDE
+ u16 nr_ide_streams; /* Max streams possibly active in @ide_stream_ida */
+ struct ida ide_stream_ida;
+ struct ida ide_stream_ids_ida; /* track unique ids per domain */
+#endif
u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
int (*map_irq)(const struct pci_dev *, u8, u8);
void (*release_fn)(struct pci_host_bridge *);
+ int (*enable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
+ void (*disable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
void *release_data;
unsigned int ignore_reset_delay:1; /* For entire hierarchy */
unsigned int no_ext_tags:1; /* No Extended Tags */
@@ -615,6 +670,7 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
size_t priv);
void pci_free_host_bridge(struct pci_host_bridge *bridge);
+struct device *pci_get_host_bridge_device(struct pci_dev *dev);
struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
@@ -623,27 +679,6 @@ void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
-/*
- * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
- * to P2P or CardBus bridge windows) go in a table. Additional ones (for
- * buses below host bridges or subtractive decode bridges) go in the list.
- * Use pci_bus_for_each_resource() to iterate through all the resources.
- */
-
-/*
- * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
- * and there's no way to program the bridge with the details of the window.
- * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
- * decode bit set, because they are explicit and can be programmed with _SRS.
- */
-#define PCI_SUBTRACTIVE_DECODE 0x1
-
-struct pci_bus_resource {
- struct list_head list;
- struct resource *res;
- unsigned int flags;
-};
-
#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
struct pci_bus {
@@ -680,6 +715,7 @@ struct pci_bus {
struct bin_attribute *legacy_mem; /* Legacy mem */
unsigned int is_added:1;
unsigned int unsafe_warn:1; /* warned about RW1C config write */
+ unsigned int flit_mode:1; /* Link in Flit mode */
};
#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
@@ -740,6 +776,21 @@ static inline bool pci_is_vga(struct pci_dev *pdev)
return false;
}
+/**
+ * pci_is_display - check if the PCI device is a display controller
+ * @pdev: PCI device
+ *
+ * Determine whether the given PCI device corresponds to a display
+ * controller. Display controllers are typically used for graphical output
+ * and are identified based on their class code.
+ *
+ * Return: true if the PCI device is a display controller, false otherwise.
+ */
+static inline bool pci_is_display(struct pci_dev *pdev)
+{
+ return (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY;
+}
+
#define for_each_pci_bridge(dev, bus) \
list_for_each_entry(dev, &bus->devices, bus_list) \
if (!pci_is_bridge(dev)) {} else
@@ -803,6 +854,7 @@ struct pci_ops {
void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
+ int (*assert_perst)(struct pci_bus *bus, bool assert);
};
/*
@@ -825,6 +877,11 @@ struct pci_bus_region {
pci_bus_addr_t end;
};
+static inline pci_bus_addr_t pci_bus_region_size(const struct pci_bus_region *region)
+{
+ return region->end - region->start + 1;
+}
+
struct pci_dynids {
spinlock_t lock; /* Protects list, index */
struct list_head list; /* For IDs added at runtime */
@@ -960,10 +1017,8 @@ struct pci_driver {
bool driver_managed_dma;
};
-static inline struct pci_driver *to_pci_driver(struct device_driver *drv)
-{
- return drv ? container_of(drv, struct pci_driver, driver) : NULL;
-}
+#define to_pci_driver(__drv) \
+ ( __drv ? container_of_const(__drv, struct pci_driver, driver) : NULL )
/**
* PCI_DEVICE - macro used to describe a specific PCI device
@@ -1050,6 +1105,20 @@ static inline struct pci_driver *to_pci_driver(struct device_driver *drv)
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
/**
+ * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form
+ * @vend: the vendor name
+ * @dev: the 16 bit PCI Device ID
+ * @subvend: the 16 bit PCI Subvendor ID
+ * @subdev: the 16 bit PCI Subdevice ID
+ *
+ * Generate the pci_device_id struct layout for the specific PCI
+ * device/subdevice. Private data may follow the output.
+ */
+#define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \
+ .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
+ .subvendor = (subvend), .subdevice = (subdev), 0, 0
+
+/**
* PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
* @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
* @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
@@ -1101,7 +1170,7 @@ enum pcie_bus_config_types {
extern enum pcie_bus_config_types pcie_bus_config;
-extern struct bus_type pci_bus_type;
+extern const struct bus_type pci_bus_type;
/* Do NOT directly access these two variables, unless you are arch-specific PCI
* code, or PCI core code. */
@@ -1123,9 +1192,6 @@ resource_size_t pcibios_align_resource(void *, const struct resource *,
resource_size_t,
resource_size_t);
-/* Weak but can be overridden by arch */
-void pci_fixup_cardbus(struct pci_bus *);
-
/* Generic PCI functions used internally */
void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
@@ -1200,6 +1266,8 @@ u64 pci_get_dsn(struct pci_dev *dev);
struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
struct pci_dev *from);
+struct pci_dev *pci_get_device_reverse(unsigned int vendor, unsigned int device,
+ struct pci_dev *from);
struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
unsigned int ss_vendor, unsigned int ss_device,
struct pci_dev *from);
@@ -1273,6 +1341,7 @@ static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
{
switch (pos) {
case PCI_EXP_LNKCTL:
+ case PCI_EXP_LNKCTL2:
case PCI_EXP_RTCTL:
return pcie_capability_clear_and_set_word_locked(dev, pos,
clear, set);
@@ -1382,18 +1451,17 @@ void pci_reset_secondary_bus(struct pci_dev *dev);
void pcibios_reset_secondary_bus(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
-int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
-void pci_release_resource(struct pci_dev *dev, int resno);
-static inline int pci_rebar_bytes_to_size(u64 bytes)
-{
- bytes = roundup_pow_of_two(bytes);
+int pci_release_resource(struct pci_dev *dev, int resno);
- /* Return BAR size as defined in the resizable BAR specification */
- return max(ilog2(bytes), 20) - 20;
-}
+/* Resizable BAR related routines */
+int pci_rebar_bytes_to_size(u64 bytes);
+resource_size_t pci_rebar_size_to_bytes(int size);
+u64 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
+bool pci_rebar_size_supported(struct pci_dev *pdev, int bar, int size);
+int pci_rebar_get_max_size(struct pci_dev *pdev, int bar);
+int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size,
+ int exclude_bars);
-u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
-int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
void pci_ignore_hotplug(struct pci_dev *dev);
@@ -1441,7 +1509,6 @@ void set_pcie_port_type(struct pci_dev *pdev);
void set_pcie_hotplug_bridge(struct pci_dev *pdev);
/* Functions for PCI Hotplug drivers to use */
-unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
unsigned int pci_rescan_bus(struct pci_bus *bus);
void pci_lock_rescan_remove(void);
void pci_unlock_rescan_remove(void);
@@ -1463,7 +1530,6 @@ void pci_assign_unassigned_resources(void);
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
-int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
int pci_enable_resources(struct pci_dev *, int mask);
void pci_assign_irq(struct pci_dev *dev);
struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
@@ -1497,8 +1563,7 @@ void pci_add_resource(struct list_head *resources, struct resource *res);
void pci_add_resource_offset(struct list_head *resources, struct resource *res,
resource_size_t offset);
void pci_free_resource_list(struct list_head *resources);
-void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
- unsigned int flags);
+void pci_bus_add_resource(struct pci_bus *bus, struct resource *res);
struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
void pci_bus_remove_resources(struct pci_bus *bus);
void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
@@ -1551,14 +1616,11 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
struct resource *res, resource_size_t size,
resource_size_t align, resource_size_t min,
unsigned long type_mask,
- resource_size_t (*alignf)(void *,
- const struct resource *,
- resource_size_t,
- resource_size_t),
+ resource_alignf alignf,
void *alignf_data);
-int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
+int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
resource_size_t size);
unsigned long pci_address_to_pio(phys_addr_t addr);
phys_addr_t pci_pio_to_address(unsigned long pio);
@@ -1625,11 +1687,10 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
void *userdata);
-void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
- void *userdata);
+void pci_walk_bus_reverse(struct pci_bus *top,
+ int (*cb)(struct pci_dev *, void *), void *userdata);
int pci_cfg_space_size(struct pci_dev *dev);
unsigned char pci_bus_max_busnr(struct pci_bus *bus);
-void pci_setup_bridge(struct pci_bus *bus);
resource_size_t pcibios_window_alignment(struct pci_bus *bus,
unsigned long type);
@@ -1662,7 +1723,7 @@ void pci_disable_msi(struct pci_dev *dev);
int pci_msix_vec_count(struct pci_dev *dev);
void pci_disable_msix(struct pci_dev *dev);
void pci_restore_msi_state(struct pci_dev *dev);
-int pci_msi_enabled(void);
+bool pci_msi_enabled(void);
int pci_enable_msi(struct pci_dev *dev);
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
int minvec, int maxvec);
@@ -1695,7 +1756,7 @@ static inline void pci_disable_msi(struct pci_dev *dev) { }
static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
static inline void pci_disable_msix(struct pci_dev *dev) { }
static inline void pci_restore_msi_state(struct pci_dev *dev) { }
-static inline int pci_msi_enabled(void) { return 0; }
+static inline bool pci_msi_enabled(void) { return false; }
static inline int pci_enable_msi(struct pci_dev *dev)
{ return -ENOSYS; }
static inline int pci_enable_msix_range(struct pci_dev *dev,
@@ -1788,9 +1849,19 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d,
#ifdef CONFIG_PCIEPORTBUS
extern bool pcie_ports_disabled;
extern bool pcie_ports_native;
+
+int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req,
+ bool use_lt);
#else
#define pcie_ports_disabled true
#define pcie_ports_native false
+
+static inline int pcie_set_target_speed(struct pci_dev *port,
+ enum pci_bus_speed speed_req,
+ bool use_lt)
+{
+ return -EOPNOTSUPP;
+}
#endif
#define PCIE_LINK_STATE_L0S (BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */
@@ -1831,6 +1902,14 @@ static inline bool pcie_aspm_support_enabled(void) { return false; }
static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
#endif
+#ifdef CONFIG_HOTPLUG_PCI
+void pci_hp_ignore_link_change(struct pci_dev *pdev);
+void pci_hp_unignore_link_change(struct pci_dev *pdev);
+#else
+static inline void pci_hp_ignore_link_change(struct pci_dev *pdev) { }
+static inline void pci_hp_unignore_link_change(struct pci_dev *pdev) { }
+#endif
+
#ifdef CONFIG_PCIEAER
bool pci_aer_available(void);
#else
@@ -1839,6 +1918,39 @@ static inline bool pci_aer_available(void) { return false; }
bool pci_ats_disabled(void);
+#define PCIE_PTM_CONTEXT_UPDATE_AUTO 0
+#define PCIE_PTM_CONTEXT_UPDATE_MANUAL 1
+
+struct pcie_ptm_ops {
+ int (*check_capability)(void *drvdata);
+ int (*context_update_write)(void *drvdata, u8 mode);
+ int (*context_update_read)(void *drvdata, u8 *mode);
+ int (*context_valid_write)(void *drvdata, bool valid);
+ int (*context_valid_read)(void *drvdata, bool *valid);
+ int (*local_clock_read)(void *drvdata, u64 *clock);
+ int (*master_clock_read)(void *drvdata, u64 *clock);
+ int (*t1_read)(void *drvdata, u64 *clock);
+ int (*t2_read)(void *drvdata, u64 *clock);
+ int (*t3_read)(void *drvdata, u64 *clock);
+ int (*t4_read)(void *drvdata, u64 *clock);
+
+ bool (*context_update_visible)(void *drvdata);
+ bool (*context_valid_visible)(void *drvdata);
+ bool (*local_clock_visible)(void *drvdata);
+ bool (*master_clock_visible)(void *drvdata);
+ bool (*t1_visible)(void *drvdata);
+ bool (*t2_visible)(void *drvdata);
+ bool (*t3_visible)(void *drvdata);
+ bool (*t4_visible)(void *drvdata);
+};
+
+struct pci_ptm_debugfs {
+ struct dentry *debugfs;
+ const struct pcie_ptm_ops *ops;
+ struct mutex lock;
+ void *pdata;
+};
+
#ifdef CONFIG_PCIE_PTM
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
void pci_disable_ptm(struct pci_dev *dev);
@@ -1851,6 +1963,18 @@ static inline bool pcie_ptm_enabled(struct pci_dev *dev)
{ return false; }
#endif
+#if IS_ENABLED(CONFIG_DEBUG_FS) && IS_ENABLED(CONFIG_PCIE_PTM)
+struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
+ const struct pcie_ptm_ops *ops);
+void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs);
+#else
+static inline struct pci_ptm_debugfs
+*pcie_ptm_create_debugfs(struct device *dev, void *pdata,
+ const struct pcie_ptm_ops *ops) { return NULL; }
+static inline void
+pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs) { }
+#endif
+
void pci_cfg_access_lock(struct pci_dev *dev);
bool pci_cfg_access_trylock(struct pci_dev *dev);
void pci_cfg_access_unlock(struct pci_dev *dev);
@@ -1867,10 +1991,17 @@ DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
*/
#ifdef CONFIG_PCI_DOMAINS
extern int pci_domains_supported;
+int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max);
+void pci_bus_release_emul_domain_nr(int domain_nr);
#else
enum { pci_domains_supported = 0 };
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
+static inline int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max)
+{
+ return 0;
+}
+static inline void pci_bus_release_emul_domain_nr(int domain_nr) { }
#endif /* CONFIG_PCI_DOMAINS */
/*
@@ -1890,7 +2021,7 @@ static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
{ return 0; }
#endif
int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
-void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent);
+void pci_bus_release_domain_nr(struct device *parent, int domain_nr);
#endif
/* Some architectures require additional setup to direct VGA traffic */
@@ -1953,6 +2084,11 @@ static inline struct pci_dev *pci_get_device(unsigned int vendor,
struct pci_dev *from)
{ return NULL; }
+static inline struct pci_dev *pci_get_device_reverse(unsigned int vendor,
+ unsigned int device,
+ struct pci_dev *from)
+{ return NULL; }
+
static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
unsigned int device,
unsigned int ss_vendor,
@@ -2021,7 +2157,7 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{ return -EIO; }
static inline void pci_release_regions(struct pci_dev *dev) { }
-static inline int pci_register_io_range(struct fwnode_handle *fwnode,
+static inline int pci_register_io_range(const struct fwnode_handle *fwnode,
phys_addr_t addr, resource_size_t size)
{ return -EINVAL; }
@@ -2295,13 +2431,18 @@ static inline void pci_fixup_device(enum pci_fixup_pass pass,
struct pci_dev *dev) { }
#endif
+int pcim_intx(struct pci_dev *pdev, int enabled);
+int pcim_request_all_regions(struct pci_dev *pdev, const char *name);
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
+void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
+ const char *name);
+void pcim_iounmap_region(struct pci_dev *pdev, int bar);
void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
+int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
-int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
- const char *name);
-void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
+void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long len);
extern int pci_pci_problems;
#define PCIPCI_FAIL 1 /* No PCI PCI DMA */
@@ -2312,8 +2453,6 @@ extern int pci_pci_problems;
#define PCIPCI_ALIMAGIK 32 /* Need low latency setting */
#define PCIAGP_FAIL 64 /* No PCI to AGP DMA */
-extern unsigned long pci_cardbus_io_size;
-extern unsigned long pci_cardbus_mem_size;
extern u8 pci_dfl_cache_line_size;
extern u8 pci_cache_line_size;
@@ -2368,6 +2507,8 @@ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
int pci_sriov_get_totalvfs(struct pci_dev *dev);
int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
+int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size);
+u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs);
void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
/* Arch may override these (weak) */
@@ -2420,14 +2561,13 @@ static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
#define pci_sriov_configure_simple NULL
static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
{ return 0; }
+static inline int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size)
+{ return -ENODEV; }
+static inline u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs)
+{ return 0; }
static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
#endif
-#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
-void pci_hp_create_module_link(struct pci_slot *pci_slot);
-void pci_hp_remove_module_link(struct pci_slot *pci_slot);
-#endif
-
/**
* pci_pcie_cap - get the saved PCIe capability offset
* @dev: PCI device
@@ -2606,6 +2746,12 @@ pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
#endif
+#if defined(CONFIG_X86) && defined(CONFIG_ACPI)
+bool arch_pci_dev_is_removable(struct pci_dev *pdev);
+#else
+static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; }
+#endif
+
#ifdef CONFIG_EEH
static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
{
@@ -2666,15 +2812,12 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
return false;
}
-#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
+#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) || defined(CONFIG_S390)
void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#endif
#include <linux/dma-mapping.h>
-#define pci_printk(level, pdev, fmt, arg...) \
- dev_printk(level, &(pdev)->dev, fmt, ##arg)
-
#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 3a10d6ec3ee7..ddf79641917f 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -50,7 +50,6 @@ struct hotplug_slot_ops {
/**
* struct hotplug_slot - used to register a physical slot with the hotplug pci core
* @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
- * @slot_list: internal list used to track hotplug PCI slots
* @pci_slot: represents a physical slot
* @owner: The module owner of this structure
* @mod_name: The module name (KBUILD_MODNAME) of this structure
@@ -59,7 +58,6 @@ struct hotplug_slot {
const struct hotplug_slot_ops *ops;
/* Variables below this are for use only by the hotplug pci core. */
- struct list_head slot_list;
struct pci_slot *pci_slot;
struct module *owner;
const char *mod_name;
@@ -106,6 +104,7 @@ static inline bool shpchp_is_native(struct pci_dev *bridge) { return true; }
static inline bool hotplug_is_native(struct pci_dev *bridge)
{
- return pciehp_is_native(bridge) || shpchp_is_native(bridge);
+ return (bridge->is_pciehp && pciehp_is_native(bridge)) ||
+ shpchp_is_native(bridge);
}
#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 942a587bb97e..a9a089566b7c 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -121,6 +121,7 @@
#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310
#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320
#define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330
+#define PCI_CLASS_SERIAL_USB_CDNS 0x0c0380
#define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe
#define PCI_CLASS_SERIAL_FIBER 0x0c04
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
@@ -517,6 +518,7 @@
#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251
#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361
#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252
+#define PCI_DEVICE_ID_IBM_ISM 0x04ed
#define PCI_SUBVENDOR_ID_IBM 0x1014
#define PCI_SUBDEVICE_ID_IBM_SATURN_SERIAL_ONE_PORT 0x03d4
@@ -568,6 +570,7 @@
#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
+#define PCI_DEVICE_ID_AMD_17H_M40H_DF_F3 0x13f3
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
#define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3 0x1727
@@ -580,6 +583,7 @@
#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3
#define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb
+#define PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3 0x124b
#define PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3 0x12bb
#define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3
#define PCI_DEVICE_ID_AMD_MI300_DF_F3 0x152b
@@ -2126,6 +2130,8 @@
#define PCI_VENDOR_ID_CHELSIO 0x1425
+#define PCI_VENDOR_ID_EDIMAX 0x1432
+
#define PCI_VENDOR_ID_ADLINK 0x144a
#define PCI_VENDOR_ID_SAMSUNG 0x144d
@@ -2418,6 +2424,9 @@
#define PCI_VENDOR_ID_QCOM 0x17cb
#define PCI_VENDOR_ID_CDNS 0x17cd
+#define PCI_DEVICE_ID_CDNS_USBSS 0x0100
+#define PCI_DEVICE_ID_CDNS_USB 0x0120
+#define PCI_DEVICE_ID_CDNS_USBSSP 0x0200
#define PCI_VENDOR_ID_ARECA 0x17d3
#define PCI_DEVICE_ID_ARECA_1110 0x1110
@@ -2586,6 +2595,11 @@
#define PCI_VENDOR_ID_REDHAT 0x1b36
+#define PCI_VENDOR_ID_WCHIC 0x1c00
+#define PCI_DEVICE_ID_WCHIC_CH382_0S1P 0x3050
+#define PCI_DEVICE_ID_WCHIC_CH382_2S1P 0x3250
+#define PCI_DEVICE_ID_WCHIC_CH382_2S 0x3253
+
#define PCI_VENDOR_ID_SILICOM_DENMARK 0x1c2c
#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36
@@ -2597,8 +2611,12 @@
#define PCI_VENDOR_ID_ZHAOXIN 0x1d17
+#define PCI_VENDOR_ID_ROCKCHIP 0x1d87
+
#define PCI_VENDOR_ID_HYGON 0x1d94
+#define PCI_VENDOR_ID_META 0x1d9b
+
#define PCI_VENDOR_ID_FUNGIBLE 0x1dad
#define PCI_VENDOR_ID_HXT 0x1dbf
@@ -2606,6 +2624,9 @@
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
+#define PCI_VENDOR_ID_RPI 0x1de4
+#define PCI_DEVICE_ID_RPI_RP1_C0 0x0001
+
#define PCI_VENDOR_ID_ALIBABA 0x1ded
#define PCI_VENDOR_ID_CXL 0x1e98
@@ -2638,6 +2659,12 @@
#define PCI_VENDOR_ID_AKS 0x416c
#define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100
+#define PCI_VENDOR_ID_WCHCN 0x4348
+#define PCI_DEVICE_ID_WCHCN_CH353_4S 0x3453
+#define PCI_DEVICE_ID_WCHCN_CH353_2S1PF 0x5046
+#define PCI_DEVICE_ID_WCHCN_CH353_1S1P 0x5053
+#define PCI_DEVICE_ID_WCHCN_CH353_2S1P 0x7053
+
#define PCI_VENDOR_ID_ACCESSIO 0x494f
#define PCI_DEVICE_ID_ACCESSIO_WDG_CSM 0x22c0
@@ -2657,6 +2684,8 @@
#define PCI_DEVICE_ID_DCI_PCCOM8 0x0002
#define PCI_DEVICE_ID_DCI_PCCOM2 0x0004
+#define PCI_VENDOR_ID_GLENFLY 0x6766
+
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
#define PCI_DEVICE_ID_INTEL_HDA_CML_LP 0x02c8
@@ -3023,6 +3052,7 @@
#define PCI_DEVICE_ID_INTEL_HDA_DG1 0x490d
#define PCI_DEVICE_ID_INTEL_HDA_EHL_0 0x4b55
#define PCI_DEVICE_ID_INTEL_HDA_EHL_3 0x4b58
+#define PCI_DEVICE_ID_INTEL_HDA_WCL 0x4d28
#define PCI_DEVICE_ID_INTEL_HDA_JSL_N 0x4dc8
#define PCI_DEVICE_ID_INTEL_HDA_DG2_0 0x4f90
#define PCI_DEVICE_ID_INTEL_HDA_DG2_1 0x4f91
@@ -3044,6 +3074,8 @@
#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6
#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
+#define PCI_DEVICE_ID_INTEL_HDA_FCL 0x67a8
+#define PCI_DEVICE_ID_INTEL_HDA_NVL_S 0x6e50
#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
@@ -3112,6 +3144,8 @@
#define PCI_DEVICE_ID_INTEL_HDA_LNL_P 0xa828
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
#define PCI_DEVICE_ID_INTEL_HDA_BMG 0xe2f7
+#define PCI_DEVICE_ID_INTEL_HDA_PTL_H 0xe328
+#define PCI_DEVICE_ID_INTEL_HDA_PTL 0xe428
#define PCI_DEVICE_ID_INTEL_HDA_CML_R 0xf0c8
#define PCI_DEVICE_ID_INTEL_HDA_RKL_S 0xf1c8
diff --git a/include/linux/pcie-dwc.h b/include/linux/pcie-dwc.h
new file mode 100644
index 000000000000..8ff778e7aec0
--- /dev/null
+++ b/include/linux/pcie-dwc.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021-2023 Alibaba Inc.
+ * Copyright (C) 2025 Linaro Ltd.
+ *
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#ifndef LINUX_PCIE_DWC_H
+#define LINUX_PCIE_DWC_H
+
+#include <linux/pci_ids.h>
+
+struct dwc_pcie_vsec_id {
+ u16 vendor_id;
+ u16 vsec_id;
+ u8 vsec_rev;
+};
+
+/*
+ * VSEC IDs are allocated by the vendor, so a given ID may mean different
+ * things to different vendors. See PCIe r6.0, sec 7.9.5.2.
+ */
+static const struct dwc_pcie_vsec_id dwc_pcie_rasdes_vsec_ids[] = {
+ { .vendor_id = PCI_VENDOR_ID_ALIBABA,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
+ { .vendor_id = PCI_VENDOR_ID_AMPERE,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
+ { .vendor_id = PCI_VENDOR_ID_QCOM,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
+ { .vendor_id = PCI_VENDOR_ID_ROCKCHIP,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
+ { .vendor_id = PCI_VENDOR_ID_SAMSUNG,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
+ {}
+};
+
+#endif /* LINUX_PCIE_DWC_H */
diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h
index da3a6c30f6d2..36073f7b6bb4 100644
--- a/include/linux/pcs/pcs-xpcs.h
+++ b/include/linux/pcs/pcs-xpcs.h
@@ -7,11 +7,12 @@
#ifndef __LINUX_PCS_XPCS_H
#define __LINUX_PCS_XPCS_H
+#include <linux/clk.h>
+#include <linux/fwnode.h>
+#include <linux/mdio.h>
#include <linux/phy.h>
#include <linux/phylink.h>
-
-#define NXP_SJA1105_XPCS_ID 0x00000010
-#define NXP_SJA1110_XPCS_ID 0x00000020
+#include <linux/types.h>
/* AN mode */
#define DW_AN_C73 1
@@ -20,32 +21,43 @@
#define DW_AN_C37_1000BASEX 4
#define DW_10GBASER 5
-/* device vendor OUI */
-#define DW_OUI_WX 0x0018fc80
-
-/* dev_flag */
-#define DW_DEV_TXGBE BIT(0)
+enum dw_xpcs_pcs_id {
+ DW_XPCS_ID_NATIVE = 0,
+ NXP_SJA1105_XPCS_ID = 0x00000010,
+ NXP_SJA1110_XPCS_ID = 0x00000020,
+ DW_XPCS_ID = 0x7996ced0,
+ DW_XPCS_ID_MASK = 0xffffffff,
+};
-struct xpcs_id;
+enum dw_xpcs_pma_id {
+ DW_XPCS_PMA_ID_NATIVE = 0,
+ DW_XPCS_PMA_GEN1_3G_ID,
+ DW_XPCS_PMA_GEN2_3G_ID,
+ DW_XPCS_PMA_GEN2_6G_ID,
+ DW_XPCS_PMA_GEN4_3G_ID,
+ DW_XPCS_PMA_GEN4_6G_ID,
+ DW_XPCS_PMA_GEN5_10G_ID,
+ DW_XPCS_PMA_GEN5_12G_ID,
+ WX_TXGBE_XPCS_PMA_10G_ID = 0xfc806000,
+ /* Meta Platforms OUI 88:25:08, model 0, revision 0 */
+ MP_FBNIC_XPCS_PMA_100G_ID = 0x46904000,
+};
-struct dw_xpcs {
- struct mdio_device *mdiodev;
- const struct xpcs_id *id;
- struct phylink_pcs pcs;
- phy_interface_t interface;
- int dev_flag;
+struct dw_xpcs_info {
+ u32 pcs;
+ u32 pma;
};
+struct dw_xpcs;
+
+struct phylink_pcs *xpcs_to_phylink_pcs(struct dw_xpcs *xpcs);
int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface);
-void xpcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
- phy_interface_t interface, int speed, int duplex);
-int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
- const unsigned long *advertising, unsigned int neg_mode);
-void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces);
-int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns,
- int enable);
-struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr,
- phy_interface_t interface);
+void xpcs_config_eee_mult_fact(struct dw_xpcs *xpcs, u8 mult_fact);
+struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr);
+struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode);
void xpcs_destroy(struct dw_xpcs *xpcs);
+struct phylink_pcs *xpcs_create_pcs_mdiodev(struct mii_bus *bus, int addr);
+void xpcs_destroy_pcs(struct phylink_pcs *pcs);
+
#endif /* __LINUX_PCS_XPCS_H */
diff --git a/include/linux/pds/pds_adminq.h b/include/linux/pds/pds_adminq.h
index 4b4e9a98b37b..40ff0ec2b879 100644
--- a/include/linux/pds/pds_adminq.h
+++ b/include/linux/pds/pds_adminq.h
@@ -4,7 +4,7 @@
#ifndef _PDS_CORE_ADMINQ_H_
#define _PDS_CORE_ADMINQ_H_
-#define PDSC_ADMINQ_MAX_POLL_INTERVAL 256
+#define PDSC_ADMINQ_MAX_POLL_INTERVAL 256000 /* usecs */
enum pds_core_adminq_flags {
PDS_AQ_FLAG_FASTPOLL = BIT(1), /* completion poll at 1ms */
@@ -463,7 +463,6 @@ struct pds_core_lif_getattr_cmd {
* @rsvd: Word boundary padding
* @comp_index: Index in the descriptor ring for which this is the completion
* @state: LIF state (enum pds_core_lif_state)
- * @name: LIF name string, 0 terminated
* @features: Features (enum pds_core_hw_features)
* @rsvd2: Word boundary padding
* @color: Color bit
@@ -1179,6 +1178,274 @@ struct pds_lm_host_vf_status_cmd {
u8 status;
};
+enum pds_fwctl_cmd_opcode {
+ PDS_FWCTL_CMD_IDENT = 70,
+ PDS_FWCTL_CMD_RPC = 71,
+ PDS_FWCTL_CMD_QUERY = 72,
+};
+
+/**
+ * struct pds_fwctl_cmd - Firmware control command structure
+ * @opcode: Opcode
+ * @rsvd: Reserved
+ * @ep: Endpoint identifier
+ * @op: Operation identifier
+ */
+struct pds_fwctl_cmd {
+ u8 opcode;
+ u8 rsvd[3];
+ __le32 ep;
+ __le32 op;
+} __packed;
+
+/**
+ * struct pds_fwctl_comp - Firmware control completion structure
+ * @status: Status of the firmware control operation
+ * @rsvd: Reserved
+ * @comp_index: Completion index in little-endian format
+ * @rsvd2: Reserved
+ * @color: Color bit indicating the state of the completion
+ */
+struct pds_fwctl_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 rsvd2[11];
+ u8 color;
+} __packed;
+
+/**
+ * struct pds_fwctl_ident_cmd - Firmware control identification command structure
+ * @opcode: Operation code for the command
+ * @rsvd: Reserved
+ * @version: Interface version
+ * @rsvd2: Reserved
+ * @len: Length of the identification data
+ * @ident_pa: Physical address of the identification data
+ */
+struct pds_fwctl_ident_cmd {
+ u8 opcode;
+ u8 rsvd;
+ u8 version;
+ u8 rsvd2;
+ __le32 len;
+ __le64 ident_pa;
+} __packed;
+
+/* future feature bits here
+ * enum pds_fwctl_features {
+ * };
+ * (compilers don't like empty enums)
+ */
+
+/**
+ * struct pds_fwctl_ident - Firmware control identification structure
+ * @features: Supported features (enum pds_fwctl_features)
+ * @version: Interface version
+ * @rsvd: Reserved
+ * @max_req_sz: Maximum request size
+ * @max_resp_sz: Maximum response size
+ * @max_req_sg_elems: Maximum number of request SGs
+ * @max_resp_sg_elems: Maximum number of response SGs
+ */
+struct pds_fwctl_ident {
+ __le64 features;
+ u8 version;
+ u8 rsvd[3];
+ __le32 max_req_sz;
+ __le32 max_resp_sz;
+ u8 max_req_sg_elems;
+ u8 max_resp_sg_elems;
+} __packed;
+
+enum pds_fwctl_query_entity {
+ PDS_FWCTL_RPC_ROOT = 0,
+ PDS_FWCTL_RPC_ENDPOINT = 1,
+ PDS_FWCTL_RPC_OPERATION = 2,
+};
+
+#define PDS_FWCTL_RPC_OPCODE_CMD_SHIFT 0
+#define PDS_FWCTL_RPC_OPCODE_CMD_MASK GENMASK(15, PDS_FWCTL_RPC_OPCODE_CMD_SHIFT)
+#define PDS_FWCTL_RPC_OPCODE_VER_SHIFT 16
+#define PDS_FWCTL_RPC_OPCODE_VER_MASK GENMASK(23, PDS_FWCTL_RPC_OPCODE_VER_SHIFT)
+
+#define PDS_FWCTL_RPC_OPCODE_GET_CMD(op) FIELD_GET(PDS_FWCTL_RPC_OPCODE_CMD_MASK, op)
+#define PDS_FWCTL_RPC_OPCODE_GET_VER(op) FIELD_GET(PDS_FWCTL_RPC_OPCODE_VER_MASK, op)
+
+#define PDS_FWCTL_RPC_OPCODE_CMP(op1, op2) \
+ (PDS_FWCTL_RPC_OPCODE_GET_CMD(op1) == PDS_FWCTL_RPC_OPCODE_GET_CMD(op2) && \
+ PDS_FWCTL_RPC_OPCODE_GET_VER(op1) <= PDS_FWCTL_RPC_OPCODE_GET_VER(op2))
+
+/*
+ * FW command attributes that map to the FWCTL scope values
+ */
+#define PDSFC_FW_CMD_ATTR_READ 0x00
+#define PDSFC_FW_CMD_ATTR_DEBUG_READ 0x02
+#define PDSFC_FW_CMD_ATTR_WRITE 0x04
+#define PDSFC_FW_CMD_ATTR_DEBUG_WRITE 0x08
+#define PDSFC_FW_CMD_ATTR_SYNC 0x10
+
+/**
+ * struct pds_fwctl_query_cmd - Firmware control query command structure
+ * @opcode: Operation code for the command
+ * @entity: Entity type to query (enum pds_fwctl_query_entity)
+ * @version: Version of the query data structure supported by the driver
+ * @rsvd: Reserved
+ * @query_data_buf_len: Length of the query data buffer
+ * @query_data_buf_pa: Physical address of the query data buffer
+ * @ep: Endpoint identifier to query (when entity is PDS_FWCTL_RPC_ENDPOINT)
+ * @op: Operation identifier to query (when entity is PDS_FWCTL_RPC_OPERATION)
+ *
+ * This structure is used to send a query command to the firmware control
+ * interface. The structure is packed to ensure there is no padding between
+ * the fields.
+ */
+struct pds_fwctl_query_cmd {
+ u8 opcode;
+ u8 entity;
+ u8 version;
+ u8 rsvd;
+ __le32 query_data_buf_len;
+ __le64 query_data_buf_pa;
+ union {
+ __le32 ep;
+ __le32 op;
+ };
+} __packed;
+
+/**
+ * struct pds_fwctl_query_comp - Firmware control query completion structure
+ * @status: Status of the query command
+ * @rsvd: Reserved
+ * @comp_index: Completion index in little-endian format
+ * @version: Version of the query data structure returned by firmware. This
+ * should be less than or equal to the version supported by the driver
+ * @rsvd2: Reserved
+ * @color: Color bit indicating the state of the completion
+ */
+struct pds_fwctl_query_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 version;
+ u8 rsvd2[2];
+ u8 color;
+} __packed;
+
+/**
+ * struct pds_fwctl_query_data_endpoint - query data for entity PDS_FWCTL_RPC_ROOT
+ * @id: The identifier for the data endpoint
+ */
+struct pds_fwctl_query_data_endpoint {
+ __le32 id;
+} __packed;
+
+/**
+ * struct pds_fwctl_query_data_operation - query data for entity PDS_FWCTL_RPC_ENDPOINT
+ * @id: Operation identifier
+ * @scope: Scope of the operation (enum fwctl_rpc_scope)
+ * @rsvd: Reserved
+ */
+struct pds_fwctl_query_data_operation {
+ __le32 id;
+ u8 scope;
+ u8 rsvd[3];
+} __packed;
+
+/**
+ * struct pds_fwctl_query_data - query data structure
+ * @version: Version of the query data structure
+ * @rsvd: Reserved
+ * @num_entries: Number of entries in the union
+ * @entries: Array of query data entries, depending on the entity type
+ */
+struct pds_fwctl_query_data {
+ u8 version;
+ u8 rsvd[3];
+ __le32 num_entries;
+ u8 entries[] __counted_by_le(num_entries);
+} __packed;
+
+/**
+ * struct pds_fwctl_rpc_cmd - Firmware control RPC command
+ * @opcode: opcode PDS_FWCTL_CMD_RPC
+ * @rsvd: Reserved
+ * @flags: Indicates indirect request and/or response handling
+ * @ep: Endpoint identifier
+ * @op: Operation identifier
+ * @inline_req0: Buffer for inline request
+ * @inline_req1: Buffer for inline request
+ * @req_pa: Physical address of request data
+ * @req_sz: Size of the request
+ * @req_sg_elems: Number of request SGs
+ * @req_rsvd: Reserved
+ * @inline_req2: Buffer for inline request
+ * @resp_pa: Physical address of response data
+ * @resp_sz: Size of the response
+ * @resp_sg_elems: Number of response SGs
+ * @resp_rsvd: Reserved
+ */
+struct pds_fwctl_rpc_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 flags;
+#define PDS_FWCTL_RPC_IND_REQ 0x1
+#define PDS_FWCTL_RPC_IND_RESP 0x2
+ __le32 ep;
+ __le32 op;
+ u8 inline_req0[16];
+ union {
+ u8 inline_req1[16];
+ struct {
+ __le64 req_pa;
+ __le32 req_sz;
+ u8 req_sg_elems;
+ u8 req_rsvd[3];
+ };
+ };
+ union {
+ u8 inline_req2[16];
+ struct {
+ __le64 resp_pa;
+ __le32 resp_sz;
+ u8 resp_sg_elems;
+ u8 resp_rsvd[3];
+ };
+ };
+} __packed;
+
+/**
+ * struct pds_sg_elem - Transmit scatter-gather (SG) descriptor element
+ * @addr: DMA address of SG element data buffer
+ * @len: Length of SG element data buffer, in bytes
+ * @rsvd: Reserved
+ */
+struct pds_sg_elem {
+ __le64 addr;
+ __le32 len;
+ u8 rsvd[4];
+} __packed;
+
+/**
+ * struct pds_fwctl_rpc_comp - Completion of a firmware control RPC
+ * @status: Status of the command
+ * @rsvd: Reserved
+ * @comp_index: Completion index of the command
+ * @err: Error code, if any, from the RPC
+ * @resp_sz: Size of the response
+ * @rsvd2: Reserved
+ * @color: Color bit indicating the state of the completion
+ */
+struct pds_fwctl_rpc_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ __le32 err;
+ __le32 resp_sz;
+ u8 rsvd2[3];
+ u8 color;
+} __packed;
+
union pds_core_adminq_cmd {
u8 opcode;
u8 bytes[64];
@@ -1216,6 +1483,11 @@ union pds_core_adminq_cmd {
struct pds_lm_dirty_enable_cmd lm_dirty_enable;
struct pds_lm_dirty_disable_cmd lm_dirty_disable;
struct pds_lm_dirty_seq_ack_cmd lm_dirty_seq_ack;
+
+ struct pds_fwctl_cmd fwctl;
+ struct pds_fwctl_ident_cmd fwctl_ident;
+ struct pds_fwctl_rpc_cmd fwctl_rpc;
+ struct pds_fwctl_query_cmd fwctl_query;
};
union pds_core_adminq_comp {
@@ -1243,6 +1515,10 @@ union pds_core_adminq_comp {
struct pds_lm_state_size_comp lm_state_size;
struct pds_lm_dirty_status_comp lm_dirty_status;
+
+ struct pds_fwctl_comp fwctl;
+ struct pds_fwctl_rpc_comp fwctl_rpc;
+ struct pds_fwctl_query_comp fwctl_query;
};
#ifndef __CHECKER__
diff --git a/include/linux/pds/pds_common.h b/include/linux/pds/pds_common.h
index 5802e1deef24..b193adbe7cc3 100644
--- a/include/linux/pds/pds_common.h
+++ b/include/linux/pds/pds_common.h
@@ -29,6 +29,7 @@ enum pds_core_vif_types {
PDS_DEV_TYPE_ETH = 3,
PDS_DEV_TYPE_RDMA = 4,
PDS_DEV_TYPE_LM = 5,
+ PDS_DEV_TYPE_FWCTL = 6,
/* new ones added before this line */
PDS_DEV_TYPE_MAX = 16 /* don't change - used in struct size */
@@ -40,6 +41,7 @@ enum pds_core_vif_types {
#define PDS_DEV_TYPE_ETH_STR "Eth"
#define PDS_DEV_TYPE_RDMA_STR "RDMA"
#define PDS_DEV_TYPE_LM_STR "LM"
+#define PDS_DEV_TYPE_FWCTL_STR "fwctl"
#define PDS_VDPA_DEV_NAME PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_VDPA_STR
#define PDS_VFIO_LM_DEV_NAME PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_LM_STR "." PDS_DEV_TYPE_VFIO_STR
diff --git a/include/linux/pe.h b/include/linux/pe.h
index fdf9c95709ba..cd2b7275385f 100644
--- a/include/linux/pe.h
+++ b/include/linux/pe.h
@@ -39,113 +39,160 @@
*/
#define LINUX_PE_MAGIC 0x818223cd
-#define MZ_MAGIC 0x5a4d /* "MZ" */
+#define IMAGE_DOS_SIGNATURE 0x5a4d /* "MZ" */
-#define PE_MAGIC 0x00004550 /* "PE\0\0" */
-#define PE_OPT_MAGIC_PE32 0x010b
-#define PE_OPT_MAGIC_PE32_ROM 0x0107
-#define PE_OPT_MAGIC_PE32PLUS 0x020b
+#define IMAGE_NT_SIGNATURE 0x00004550 /* "PE\0\0" */
+
+#define IMAGE_ROM_OPTIONAL_HDR_MAGIC 0x0107 /* ROM image (for R3000/R4000/R10000/ALPHA), without MZ and PE\0\0 sign */
+#define IMAGE_NT_OPTIONAL_HDR32_MAGIC 0x010b /* PE32 executable image */
+#define IMAGE_NT_OPTIONAL_HDR64_MAGIC 0x020b /* PE32+ executable image */
/* machine type */
-#define IMAGE_FILE_MACHINE_UNKNOWN 0x0000
-#define IMAGE_FILE_MACHINE_AM33 0x01d3
-#define IMAGE_FILE_MACHINE_AMD64 0x8664
-#define IMAGE_FILE_MACHINE_ARM 0x01c0
-#define IMAGE_FILE_MACHINE_ARMV7 0x01c4
-#define IMAGE_FILE_MACHINE_ARM64 0xaa64
-#define IMAGE_FILE_MACHINE_EBC 0x0ebc
-#define IMAGE_FILE_MACHINE_I386 0x014c
-#define IMAGE_FILE_MACHINE_IA64 0x0200
-#define IMAGE_FILE_MACHINE_M32R 0x9041
-#define IMAGE_FILE_MACHINE_MIPS16 0x0266
-#define IMAGE_FILE_MACHINE_MIPSFPU 0x0366
-#define IMAGE_FILE_MACHINE_MIPSFPU16 0x0466
-#define IMAGE_FILE_MACHINE_POWERPC 0x01f0
-#define IMAGE_FILE_MACHINE_POWERPCFP 0x01f1
-#define IMAGE_FILE_MACHINE_R4000 0x0166
-#define IMAGE_FILE_MACHINE_RISCV32 0x5032
-#define IMAGE_FILE_MACHINE_RISCV64 0x5064
-#define IMAGE_FILE_MACHINE_RISCV128 0x5128
-#define IMAGE_FILE_MACHINE_SH3 0x01a2
-#define IMAGE_FILE_MACHINE_SH3DSP 0x01a3
-#define IMAGE_FILE_MACHINE_SH3E 0x01a4
-#define IMAGE_FILE_MACHINE_SH4 0x01a6
-#define IMAGE_FILE_MACHINE_SH5 0x01a8
-#define IMAGE_FILE_MACHINE_THUMB 0x01c2
-#define IMAGE_FILE_MACHINE_WCEMIPSV2 0x0169
-#define IMAGE_FILE_MACHINE_LOONGARCH32 0x6232
-#define IMAGE_FILE_MACHINE_LOONGARCH64 0x6264
+#define IMAGE_FILE_MACHINE_UNKNOWN 0x0000 /* Unknown architecture */
+#define IMAGE_FILE_MACHINE_TARGET_HOST 0x0001 /* Interacts with the host and not a WOW64 guest (not for file image) */
+#define IMAGE_FILE_MACHINE_ALPHA_OLD 0x0183 /* DEC Alpha AXP 32-bit (old images) */
+#define IMAGE_FILE_MACHINE_ALPHA 0x0184 /* DEC Alpha AXP 32-bit */
+#define IMAGE_FILE_MACHINE_ALPHA64 0x0284 /* DEC Alpha AXP 64-bit (with 8kB page size) */
+#define IMAGE_FILE_MACHINE_AXP64 IMAGE_FILE_MACHINE_ALPHA64
+#define IMAGE_FILE_MACHINE_AM33 0x01d3 /* Matsushita AM33, now Panasonic MN103 */
+#define IMAGE_FILE_MACHINE_AMD64 0x8664 /* AMD64 (x64) */
+#define IMAGE_FILE_MACHINE_ARM 0x01c0 /* ARM Little-Endian (ARMv4) */
+#define IMAGE_FILE_MACHINE_THUMB 0x01c2 /* ARM Thumb Little-Endian (ARMv4T) */
+#define IMAGE_FILE_MACHINE_ARMNT 0x01c4 /* ARM Thumb-2 Little-Endian (ARMv7) */
+#define IMAGE_FILE_MACHINE_ARMV7 IMAGE_FILE_MACHINE_ARMNT
+#define IMAGE_FILE_MACHINE_ARM64 0xaa64 /* ARM64 Little-Endian (Classic ABI) */
+#define IMAGE_FILE_MACHINE_ARM64EC 0xa641 /* ARM64 Little-Endian (Emulation Compatible ABI for AMD64) */
+#define IMAGE_FILE_MACHINE_ARM64X 0xa64e /* ARM64 Little-Endian (fat binary with both Classic ABI and EC ABI code) */
+#define IMAGE_FILE_MACHINE_CEE 0xc0ee /* COM+ Execution Engine (CLR pure MSIL object files) */
+#define IMAGE_FILE_MACHINE_CEF 0x0cef /* Windows CE 3.0 Common Executable Format (CEF bytecode) */
+#define IMAGE_FILE_MACHINE_CHPE_X86 0x3a64 /* ARM64 Little-Endian (Compiled Hybrid PE ABI for I386) */
+#define IMAGE_FILE_MACHINE_HYBRID_X86 IMAGE_FILE_MACHINE_CHPE_X86
+#define IMAGE_FILE_MACHINE_EBC 0x0ebc /* EFI/UEFI Byte Code */
+#define IMAGE_FILE_MACHINE_I386 0x014c /* Intel 386 (x86) */
+#define IMAGE_FILE_MACHINE_I860 0x014d /* Intel 860 (N10) */
+#define IMAGE_FILE_MACHINE_IA64 0x0200 /* Intel IA-64 (with 8kB page size) */
+#define IMAGE_FILE_MACHINE_LOONGARCH32 0x6232 /* LoongArch 32-bit processor family */
+#define IMAGE_FILE_MACHINE_LOONGARCH64 0x6264 /* LoongArch 64-bit processor family */
+#define IMAGE_FILE_MACHINE_M32R 0x9041 /* Mitsubishi M32R 32-bit Little-Endian */
+#define IMAGE_FILE_MACHINE_M68K 0x0268 /* Motorola 68000 series */
+#define IMAGE_FILE_MACHINE_MIPS16 0x0266 /* MIPS III with MIPS16 ASE Little-Endian */
+#define IMAGE_FILE_MACHINE_MIPSFPU 0x0366 /* MIPS III with FPU Little-Endian */
+#define IMAGE_FILE_MACHINE_MIPSFPU16 0x0466 /* MIPS III with MIPS16 ASE and FPU Little-Endian */
+#define IMAGE_FILE_MACHINE_MPPC_601 0x0601 /* PowerPC 32-bit Big-Endian */
+#define IMAGE_FILE_MACHINE_OMNI 0xace1 /* Microsoft OMNI VM (omniprox.dll) */
+#define IMAGE_FILE_MACHINE_PARISC 0x0290 /* HP PA-RISC */
+#define IMAGE_FILE_MACHINE_POWERPC 0x01f0 /* PowerPC 32-bit Little-Endian */
+#define IMAGE_FILE_MACHINE_POWERPCFP 0x01f1 /* PowerPC 32-bit with FPU Little-Endian */
+#define IMAGE_FILE_MACHINE_POWERPCBE 0x01f2 /* PowerPC 64-bit Big-Endian */
+#define IMAGE_FILE_MACHINE_R3000 0x0162 /* MIPS I Little-Endian */
+#define IMAGE_FILE_MACHINE_R3000_BE 0x0160 /* MIPS I Big-Endian */
+#define IMAGE_FILE_MACHINE_R4000 0x0166 /* MIPS III Little-Endian (with 1kB or 4kB page size) */
+#define IMAGE_FILE_MACHINE_R10000 0x0168 /* MIPS IV Little-Endian */
+#define IMAGE_FILE_MACHINE_RISCV32 0x5032 /* RISC-V 32-bit address space */
+#define IMAGE_FILE_MACHINE_RISCV64 0x5064 /* RISC-V 64-bit address space */
+#define IMAGE_FILE_MACHINE_RISCV128 0x5128 /* RISC-V 128-bit address space */
+#define IMAGE_FILE_MACHINE_SH3 0x01a2 /* Hitachi SH-3 32-bit Little-Endian (with 1kB page size) */
+#define IMAGE_FILE_MACHINE_SH3DSP 0x01a3 /* Hitachi SH-3 DSP 32-bit (with 1kB page size) */
+#define IMAGE_FILE_MACHINE_SH3E 0x01a4 /* Hitachi SH-3E Little-Endian (with 1kB page size) */
+#define IMAGE_FILE_MACHINE_SH4 0x01a6 /* Hitachi SH-4 32-bit Little-Endian (with 1kB page size) */
+#define IMAGE_FILE_MACHINE_SH5 0x01a8 /* Hitachi SH-5 64-bit */
+#define IMAGE_FILE_MACHINE_TAHOE 0x07cc /* Intel EM machine */
+#define IMAGE_FILE_MACHINE_TRICORE 0x0520 /* Infineon AUDO 32-bit */
+#define IMAGE_FILE_MACHINE_WCEMIPSV2 0x0169 /* MIPS Windows CE v2 Little-Endian */
/* flags */
-#define IMAGE_FILE_RELOCS_STRIPPED 0x0001
-#define IMAGE_FILE_EXECUTABLE_IMAGE 0x0002
-#define IMAGE_FILE_LINE_NUMS_STRIPPED 0x0004
-#define IMAGE_FILE_LOCAL_SYMS_STRIPPED 0x0008
-#define IMAGE_FILE_AGGRESSIVE_WS_TRIM 0x0010
-#define IMAGE_FILE_LARGE_ADDRESS_AWARE 0x0020
-#define IMAGE_FILE_16BIT_MACHINE 0x0040
-#define IMAGE_FILE_BYTES_REVERSED_LO 0x0080
-#define IMAGE_FILE_32BIT_MACHINE 0x0100
-#define IMAGE_FILE_DEBUG_STRIPPED 0x0200
-#define IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP 0x0400
-#define IMAGE_FILE_NET_RUN_FROM_SWAP 0x0800
-#define IMAGE_FILE_SYSTEM 0x1000
-#define IMAGE_FILE_DLL 0x2000
-#define IMAGE_FILE_UP_SYSTEM_ONLY 0x4000
-#define IMAGE_FILE_BYTES_REVERSED_HI 0x8000
-
-#define IMAGE_FILE_OPT_ROM_MAGIC 0x107
-#define IMAGE_FILE_OPT_PE32_MAGIC 0x10b
-#define IMAGE_FILE_OPT_PE32_PLUS_MAGIC 0x20b
-
-#define IMAGE_SUBSYSTEM_UNKNOWN 0
-#define IMAGE_SUBSYSTEM_NATIVE 1
-#define IMAGE_SUBSYSTEM_WINDOWS_GUI 2
-#define IMAGE_SUBSYSTEM_WINDOWS_CUI 3
-#define IMAGE_SUBSYSTEM_POSIX_CUI 7
-#define IMAGE_SUBSYSTEM_WINDOWS_CE_GUI 9
-#define IMAGE_SUBSYSTEM_EFI_APPLICATION 10
-#define IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER 11
-#define IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER 12
-#define IMAGE_SUBSYSTEM_EFI_ROM_IMAGE 13
-#define IMAGE_SUBSYSTEM_XBOX 14
-
-#define IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE 0x0040
-#define IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY 0x0080
-#define IMAGE_DLL_CHARACTERISTICS_NX_COMPAT 0x0100
-#define IMAGE_DLLCHARACTERISTICS_NO_ISOLATION 0x0200
-#define IMAGE_DLLCHARACTERISTICS_NO_SEH 0x0400
-#define IMAGE_DLLCHARACTERISTICS_NO_BIND 0x0800
-#define IMAGE_DLLCHARACTERISTICS_WDM_DRIVER 0x2000
-#define IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE 0x8000
-
-#define IMAGE_DLLCHARACTERISTICS_EX_CET_COMPAT 0x0001
-#define IMAGE_DLLCHARACTERISTICS_EX_FORWARD_CFI_COMPAT 0x0040
-
-/* they actually defined 0x00000000 as well, but I think we'll skip that one. */
-#define IMAGE_SCN_RESERVED_0 0x00000001
-#define IMAGE_SCN_RESERVED_1 0x00000002
-#define IMAGE_SCN_RESERVED_2 0x00000004
-#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* don't pad - obsolete */
-#define IMAGE_SCN_RESERVED_3 0x00000010
+#define IMAGE_FILE_RELOCS_STRIPPED 0x0001 /* Relocation info stripped from file */
+#define IMAGE_FILE_EXECUTABLE_IMAGE 0x0002 /* File is executable (i.e. no unresolved external references) */
+#define IMAGE_FILE_LINE_NUMS_STRIPPED 0x0004 /* Line nunbers stripped from file */
+#define IMAGE_FILE_LOCAL_SYMS_STRIPPED 0x0008 /* Local symbols stripped from file */
+#define IMAGE_FILE_AGGRESSIVE_WS_TRIM 0x0010 /* Aggressively trim working set */
+#define IMAGE_FILE_LARGE_ADDRESS_AWARE 0x0020 /* App can handle >2gb addresses (image can be loaded at address above 2GB) */
+#define IMAGE_FILE_16BIT_MACHINE 0x0040 /* 16 bit word machine */
+#define IMAGE_FILE_BYTES_REVERSED_LO 0x0080 /* Bytes of machine word are reversed (should be set together with IMAGE_FILE_BYTES_REVERSED_HI) */
+#define IMAGE_FILE_32BIT_MACHINE 0x0100 /* 32 bit word machine */
+#define IMAGE_FILE_DEBUG_STRIPPED 0x0200 /* Debugging info stripped from file in .DBG file */
+#define IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP 0x0400 /* If Image is on removable media, copy and run from the swap file */
+#define IMAGE_FILE_NET_RUN_FROM_SWAP 0x0800 /* If Image is on Net, copy and run from the swap file */
+#define IMAGE_FILE_SYSTEM 0x1000 /* System kernel-mode file (can't be loaded in user-mode) */
+#define IMAGE_FILE_DLL 0x2000 /* File is a DLL */
+#define IMAGE_FILE_UP_SYSTEM_ONLY 0x4000 /* File should only be run on a UP (uniprocessor) machine */
+#define IMAGE_FILE_BYTES_REVERSED_HI 0x8000 /* Bytes of machine word are reversed (should be set together with IMAGE_FILE_BYTES_REVERSED_LO) */
+
+/* subsys */
+#define IMAGE_SUBSYSTEM_UNKNOWN 0 /* Unknown subsystem */
+#define IMAGE_SUBSYSTEM_NATIVE 1 /* No subsystem required (NT device drivers and NT native system processes) */
+#define IMAGE_SUBSYSTEM_WINDOWS_GUI 2 /* Windows graphical user interface (GUI) subsystem */
+#define IMAGE_SUBSYSTEM_WINDOWS_CUI 3 /* Windows character-mode user interface (CUI) subsystem */
+#define IMAGE_SUBSYSTEM_WINDOWS_OLD_CE_GUI 4 /* Old Windows CE subsystem */
+#define IMAGE_SUBSYSTEM_OS2_CUI 5 /* OS/2 CUI subsystem */
+#define IMAGE_SUBSYSTEM_RESERVED_6 6
+#define IMAGE_SUBSYSTEM_POSIX_CUI 7 /* POSIX CUI subsystem */
+#define IMAGE_SUBSYSTEM_MMOSA 8 /* MMOSA/Native Win32E */
+#define IMAGE_SUBSYSTEM_WINDOWS_CE_GUI 9 /* Windows CE subsystem */
+#define IMAGE_SUBSYSTEM_EFI_APPLICATION 10 /* Extensible Firmware Interface (EFI) application */
+#define IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER 11 /* EFI driver with boot services */
+#define IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER 12 /* EFI driver with run-time services */
+#define IMAGE_SUBSYSTEM_EFI_ROM_IMAGE 13 /* EFI ROM image */
+#define IMAGE_SUBSYSTEM_XBOX 14 /* Xbox system */
+#define IMAGE_SUBSYSTEM_RESERVED_15 15
+#define IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION 16 /* Windows Boot application */
+#define IMAGE_SUBSYSTEM_XBOX_CODE_CATALOG 17 /* Xbox Code Catalog */
+
+/* dll_flags */
+#define IMAGE_LIBRARY_PROCESS_INIT 0x0001 /* DLL initialization function called just after process initialization */
+#define IMAGE_LIBRARY_PROCESS_TERM 0x0002 /* DLL initialization function called just before process termination */
+#define IMAGE_LIBRARY_THREAD_INIT 0x0004 /* DLL initialization function called just after thread initialization */
+#define IMAGE_LIBRARY_THREAD_TERM 0x0008 /* DLL initialization function called just before thread initialization */
+#define IMAGE_DLLCHARACTERISTICS_RESERVED_4 0x0010
+#define IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA 0x0020 /* ASLR with 64 bit address space (image can be loaded at address above 4GB) */
+#define IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE 0x0040 /* The DLL can be relocated at load time */
+#define IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY 0x0080 /* Code integrity checks are forced */
+#define IMAGE_DLLCHARACTERISTICS_NX_COMPAT 0x0100 /* Image is compatible with data execution prevention */
+#define IMAGE_DLLCHARACTERISTICS_NO_ISOLATION 0x0200 /* Image is isolation aware, but should not be isolated (prevents loading of manifest file) */
+#define IMAGE_DLLCHARACTERISTICS_NO_SEH 0x0400 /* Image does not use SEH, no SE handler may reside in this image */
+#define IMAGE_DLLCHARACTERISTICS_NO_BIND 0x0800 /* Do not bind the image */
+#define IMAGE_DLLCHARACTERISTICS_X86_THUNK 0x1000 /* Image is a Wx86 Thunk DLL (for non-x86/risc DLL files) */
+#define IMAGE_DLLCHARACTERISTICS_APPCONTAINER 0x1000 /* Image should execute in an AppContainer (for EXE Metro Apps in Windows 8) */
+#define IMAGE_DLLCHARACTERISTICS_WDM_DRIVER 0x2000 /* A WDM driver */
+#define IMAGE_DLLCHARACTERISTICS_GUARD_CF 0x4000 /* Image supports Control Flow Guard */
+#define IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE 0x8000 /* The image is terminal server (Remote Desktop Services) aware */
+
+/* IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS flags */
+#define IMAGE_DLLCHARACTERISTICS_EX_CET_COMPAT 0x0001 /* Image is Control-flow Enforcement Technology Shadow Stack compatible */
+#define IMAGE_DLLCHARACTERISTICS_EX_CET_COMPAT_STRICT_MODE 0x0002 /* CET is enforced in strict mode */
+#define IMAGE_DLLCHARACTERISTICS_EX_CET_SET_CONTEXT_IP_VALIDATION_RELAXED_MODE 0x0004 /* Relaxed mode for Context IP Validation under CET is allowed */
+#define IMAGE_DLLCHARACTERISTICS_EX_CET_DYNAMIC_APIS_ALLOW_IN_PROC 0x0008 /* Use of dynamic APIs is restricted to processes only */
+#define IMAGE_DLLCHARACTERISTICS_EX_CET_RESERVED_1 0x0010
+#define IMAGE_DLLCHARACTERISTICS_EX_CET_RESERVED_2 0x0020
+#define IMAGE_DLLCHARACTERISTICS_EX_FORWARD_CFI_COMPAT 0x0040 /* All branch targets in all image code sections are annotated with forward-edge control flow integrity guard instructions */
+#define IMAGE_DLLCHARACTERISTICS_EX_HOTPATCH_COMPATIBLE 0x0080 /* Image can be modified while in use, hotpatch-compatible */
+
+/* section_header flags */
+#define IMAGE_SCN_SCALE_INDEX 0x00000001 /* address of tls index is scaled = multiplied by 4 (for .tls section on MIPS only) */
+#define IMAGE_SCN_TYPE_NO_LOAD 0x00000002 /* reserved */
+#define IMAGE_SCN_TYPE_GROUPED 0x00000004 /* obsolete (used for 16-bit offset code) */
+#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* .o only - don't pad - obsolete (same as IMAGE_SCN_ALIGN_1BYTES) */
+#define IMAGE_SCN_TYPE_COPY 0x00000010 /* reserved */
#define IMAGE_SCN_CNT_CODE 0x00000020 /* .text */
#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* .data */
#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* .bss */
-#define IMAGE_SCN_LNK_OTHER 0x00000100 /* reserved */
-#define IMAGE_SCN_LNK_INFO 0x00000200 /* .drectve comments */
-#define IMAGE_SCN_RESERVED_4 0x00000400
+#define IMAGE_SCN_LNK_OTHER 0x00000100 /* .o only - other type than code, data or info */
+#define IMAGE_SCN_LNK_INFO 0x00000200 /* .o only - .drectve comments */
+#define IMAGE_SCN_LNK_OVERLAY 0x00000400 /* section contains overlay */
#define IMAGE_SCN_LNK_REMOVE 0x00000800 /* .o only - scn to be rm'd*/
#define IMAGE_SCN_LNK_COMDAT 0x00001000 /* .o only - COMDAT data */
-#define IMAGE_SCN_RESERVED_5 0x00002000 /* spec omits this */
-#define IMAGE_SCN_RESERVED_6 0x00004000 /* spec omits this */
-#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data */
-/* spec lists 0x20000 twice, I suspect they meant 0x10000 for one of them */
-#define IMAGE_SCN_MEM_PURGEABLE 0x00010000 /* reserved for "future" use */
-#define IMAGE_SCN_16BIT 0x00020000 /* reserved for "future" use */
-#define IMAGE_SCN_LOCKED 0x00040000 /* reserved for "future" use */
-#define IMAGE_SCN_PRELOAD 0x00080000 /* reserved for "future" use */
+#define IMAGE_SCN_RESERVED_13 0x00002000 /* spec omits this */
+#define IMAGE_SCN_MEM_PROTECTED 0x00004000 /* section is memory protected (for M68K) */
+#define IMAGE_SCN_NO_DEFER_SPEC_EXC 0x00004000 /* reset speculative exceptions handling bits in the TLB entries (for non-M68K) */
+#define IMAGE_SCN_MEM_FARDATA 0x00008000 /* section uses FAR_EXTERNAL relocations (for M68K) */
+#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data (for non-M68K) */
+#define IMAGE_SCN_MEM_SYSHEAP 0x00010000 /* use system heap (for M68K) */
+#define IMAGE_SCN_MEM_PURGEABLE 0x00020000 /* section can be released from RAM (for M68K) */
+#define IMAGE_SCN_MEM_16BIT 0x00020000 /* section is 16-bit (for non-M68K where it makes sense: I386, THUMB, MIPS16, MIPSFPU16, ...) */
+#define IMAGE_SCN_MEM_LOCKED 0x00040000 /* prevent the section from being moved (for M68K and .o I386) */
+#define IMAGE_SCN_MEM_PRELOAD 0x00080000 /* section is preload to RAM (for M68K and .o I386) */
/* and here they just stuck a 1-byte integer in the middle of a bitfield */
-#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* it does what it says on the box */
+#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* .o only - it does what it says on the box */
#define IMAGE_SCN_ALIGN_2BYTES 0x00200000
#define IMAGE_SCN_ALIGN_4BYTES 0x00300000
#define IMAGE_SCN_ALIGN_8BYTES 0x00400000
@@ -159,7 +206,9 @@
#define IMAGE_SCN_ALIGN_2048BYTES 0x00c00000
#define IMAGE_SCN_ALIGN_4096BYTES 0x00d00000
#define IMAGE_SCN_ALIGN_8192BYTES 0x00e00000
-#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* extended relocations */
+#define IMAGE_SCN_ALIGN_RESERVED 0x00f00000
+#define IMAGE_SCN_ALIGN_MASK 0x00f00000
+#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* .o only - extended relocations */
#define IMAGE_SCN_MEM_DISCARDABLE 0x02000000 /* scn can be discarded */
#define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* cannot be cached */
#define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* not pageable */
@@ -168,8 +217,28 @@
#define IMAGE_SCN_MEM_READ 0x40000000 /* readable */
#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */
-#define IMAGE_DEBUG_TYPE_CODEVIEW 2
-#define IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS 20
+#define IMAGE_DEBUG_TYPE_UNKNOWN 0 /* Unknown value, ignored by all tools */
+#define IMAGE_DEBUG_TYPE_COFF 1 /* COFF debugging information */
+#define IMAGE_DEBUG_TYPE_CODEVIEW 2 /* CodeView debugging information or Visual C++ Program Database debugging information */
+#define IMAGE_DEBUG_TYPE_FPO 3 /* Frame pointer omission (FPO) information */
+#define IMAGE_DEBUG_TYPE_MISC 4 /* Location of DBG file with CodeView debugging information */
+#define IMAGE_DEBUG_TYPE_EXCEPTION 5 /* Exception information, copy of .pdata section */
+#define IMAGE_DEBUG_TYPE_FIXUP 6 /* Fixup information */
+#define IMAGE_DEBUG_TYPE_OMAP_TO_SRC 7 /* The mapping from an RVA in image to an RVA in source image */
+#define IMAGE_DEBUG_TYPE_OMAP_FROM_SRC 8 /* The mapping from an RVA in source image to an RVA in image */
+#define IMAGE_DEBUG_TYPE_BORLAND 9 /* Borland debugging information */
+#define IMAGE_DEBUG_TYPE_RESERVED10 10 /* Coldpath / Hotpatch debug information */
+#define IMAGE_DEBUG_TYPE_CLSID 11 /* CLSID */
+#define IMAGE_DEBUG_TYPE_VC_FEATURE 12 /* Visual C++ counts / statistics */
+#define IMAGE_DEBUG_TYPE_POGO 13 /* COFF group information, data for profile-guided optimization */
+#define IMAGE_DEBUG_TYPE_ILTCG 14 /* Incremental link-time code generation */
+#define IMAGE_DEBUG_TYPE_MPX 15 /* Intel Memory Protection Extensions */
+#define IMAGE_DEBUG_TYPE_REPRO 16 /* PE determinism or reproducibility */
+#define IMAGE_DEBUG_TYPE_EMBEDDED_PORTABLE_PDB 17 /* Embedded Portable PDB debugging information */
+#define IMAGE_DEBUG_TYPE_SPGO 18 /* Sample profile-guided optimization */
+#define IMAGE_DEBUG_TYPE_PDBCHECKSUM 19 /* PDB Checksum */
+#define IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS 20 /* Extended DLL characteristics bits */
+#define IMAGE_DEBUG_TYPE_PERFMAP 21 /* Location of associated Ready To Run PerfMap file */
#ifndef __ASSEMBLY__
@@ -235,7 +304,7 @@ struct pe32_opt_hdr {
uint16_t image_minor; /* minor image version */
uint16_t subsys_major; /* major subsystem version */
uint16_t subsys_minor; /* minor subsystem version */
- uint32_t win32_version; /* reserved, must be 0 */
+ uint32_t win32_version; /* win32 version reported at runtime */
uint32_t image_size; /* image size */
uint32_t header_size; /* header size rounded up to
file_align */
@@ -246,7 +315,7 @@ struct pe32_opt_hdr {
uint32_t stack_size; /* amt of stack required */
uint32_t heap_size_req; /* amt of heap requested */
uint32_t heap_size; /* amt of heap required */
- uint32_t loader_flags; /* reserved, must be 0 */
+ uint32_t loader_flags; /* loader flags */
uint32_t data_dirs; /* number of data dir entries */
};
@@ -269,7 +338,7 @@ struct pe32plus_opt_hdr {
uint16_t image_minor; /* minor image version */
uint16_t subsys_major; /* major subsystem version */
uint16_t subsys_minor; /* minor subsystem version */
- uint32_t win32_version; /* reserved, must be 0 */
+ uint32_t win32_version; /* win32 version reported at runtime */
uint32_t image_size; /* image size */
uint32_t header_size; /* header size rounded up to
file_align */
@@ -280,7 +349,7 @@ struct pe32plus_opt_hdr {
uint64_t stack_size; /* amt of stack required */
uint64_t heap_size_req; /* amt of heap requested */
uint64_t heap_size; /* amt of heap required */
- uint32_t loader_flags; /* reserved, must be 0 */
+ uint32_t loader_flags; /* loader flags */
uint32_t data_dirs; /* number of data dir entries */
};
@@ -301,10 +370,10 @@ struct data_directory {
struct data_dirent global_ptr; /* global pointer reg. Size=0 */
struct data_dirent tls; /* .tls */
struct data_dirent load_config; /* load configuration structure */
- struct data_dirent bound_imports; /* no idea */
+ struct data_dirent bound_imports; /* bound import table */
struct data_dirent import_addrs; /* import address table */
struct data_dirent delay_imports; /* delay-load import table */
- struct data_dirent clr_runtime_hdr; /* .cor (object only) */
+ struct data_dirent clr_runtime_hdr; /* .cor (clr/.net executables) */
struct data_dirent reserved;
};
diff --git a/include/linux/peci-cpu.h b/include/linux/peci-cpu.h
index ff8ae9c26c80..601cdd086bf6 100644
--- a/include/linux/peci-cpu.h
+++ b/include/linux/peci-cpu.h
@@ -6,6 +6,30 @@
#include <linux/types.h>
+/* Copied from x86 <asm/processor.h> */
+#define X86_VENDOR_INTEL 0
+
+/* Copied from x86 <asm/cpu_device_id.h> */
+#define VFM_MODEL_BIT 0
+#define VFM_FAMILY_BIT 8
+#define VFM_VENDOR_BIT 16
+#define VFM_RSVD_BIT 24
+
+#define VFM_MODEL_MASK GENMASK(VFM_FAMILY_BIT - 1, VFM_MODEL_BIT)
+#define VFM_FAMILY_MASK GENMASK(VFM_VENDOR_BIT - 1, VFM_FAMILY_BIT)
+#define VFM_VENDOR_MASK GENMASK(VFM_RSVD_BIT - 1, VFM_VENDOR_BIT)
+
+#define VFM_MODEL(vfm) (((vfm) & VFM_MODEL_MASK) >> VFM_MODEL_BIT)
+#define VFM_FAMILY(vfm) (((vfm) & VFM_FAMILY_MASK) >> VFM_FAMILY_BIT)
+#define VFM_VENDOR(vfm) (((vfm) & VFM_VENDOR_MASK) >> VFM_VENDOR_BIT)
+
+#define VFM_MAKE(_vendor, _family, _model) ( \
+ ((_model) << VFM_MODEL_BIT) | \
+ ((_family) << VFM_FAMILY_BIT) | \
+ ((_vendor) << VFM_VENDOR_BIT) \
+)
+/* End of copied code */
+
#include "../../arch/x86/include/asm/intel-family.h"
#define PECI_PCS_PKG_ID 0 /* Package Identifier Read */
diff --git a/include/linux/peci.h b/include/linux/peci.h
index 90e241458ef6..3e0bc37591d6 100644
--- a/include/linux/peci.h
+++ b/include/linux/peci.h
@@ -59,8 +59,7 @@ static inline struct peci_controller *to_peci_controller(void *d)
* struct peci_device - PECI device
* @dev: device object to register PECI device to the device model
* @info: PECI device characteristics
- * @info.family: device family
- * @info.model: device model
+ * @info.x86_vfm: device vendor-family-model
* @info.peci_revision: PECI revision supported by the PECI device
* @info.socket_id: the socket ID represented by the PECI device
* @addr: address used on the PECI bus connected to the parent controller
@@ -73,8 +72,7 @@ static inline struct peci_controller *to_peci_controller(void *d)
struct peci_device {
struct device dev;
struct {
- u16 family;
- u8 model;
+ u32 x86_vfm;
u8 peci_revision;
u8 socket_id;
} info;
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index ec3573119923..43c854a273c3 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -26,13 +26,11 @@
#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
#endif
-#define PER_CPU_FIRST_SECTION "..first"
#else
#define PER_CPU_SHARED_ALIGNED_SECTION ""
#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
-#define PER_CPU_FIRST_SECTION ""
#endif
@@ -54,7 +52,7 @@
__section(".discard") __attribute__((unused))
/*
- * s390 and alpha modules require percpu variables to be defined as
+ * alpha modules require percpu variables to be defined as
* weak to force the compiler to generate GOT based external
* references for them. This is necessary because percpu sections
* will be located outside of the usually addressable area.
@@ -65,14 +63,15 @@
* 1. The symbol must be globally unique, even the static ones.
* 2. Static percpu variables cannot be defined inside a function.
*
- * Archs which need weak percpu definitions should define
- * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
+ * Archs which need weak percpu definitions should set
+ * CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU when necessary.
*
* To ensure that the generic code observes the above two
* restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
* definition is used for all cases.
*/
-#if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
+#if (defined(CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU) && defined(MODULE)) || \
+ defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
/*
* __pcpu_scope_* dummy variable is used to enforce scope. It
* receives the static modifier when it's used in front of
@@ -115,14 +114,17 @@
DEFINE_PER_CPU_SECTION(type, name, "")
/*
- * Declaration/definition used for per-CPU variables that must come first in
- * the set of variables.
+ * Declaration/definition used for per-CPU variables that are frequently
+ * accessed and should be in a single cacheline.
+ *
+ * For use only by architecture and core code. Only use scalar or pointer
+ * types to maximize density.
*/
-#define DECLARE_PER_CPU_FIRST(type, name) \
- DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
+#define DECLARE_PER_CPU_CACHE_HOT(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, "..hot.." #name)
-#define DEFINE_PER_CPU_FIRST(type, name) \
- DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
+#define DEFINE_PER_CPU_CACHE_HOT(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, "..hot.." #name)
/*
* Declaration/definition used for per-CPU variables that must be cacheline
@@ -220,15 +222,17 @@ do { \
(void)__vpp_verify; \
} while (0)
+#define PERCPU_PTR(__p) \
+ (TYPEOF_UNQUAL(*(__p)) __force __kernel *)((__force unsigned long)(__p))
+
#ifdef CONFIG_SMP
/*
- * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE()
- * to prevent the compiler from making incorrect assumptions about the
- * pointer value. The weird cast keeps both GCC and sparse happy.
+ * Add an offset to a pointer. Use RELOC_HIDE() to prevent the compiler
+ * from making incorrect assumptions about the pointer value.
*/
#define SHIFT_PERCPU_PTR(__p, __offset) \
- RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset))
+ RELOC_HIDE(PERCPU_PTR(__p), (__offset))
#define per_cpu_ptr(ptr, cpu) \
({ \
@@ -254,13 +258,13 @@ do { \
#else /* CONFIG_SMP */
-#define VERIFY_PERCPU_PTR(__p) \
+#define per_cpu_ptr(ptr, cpu) \
({ \
- __verify_pcpu_ptr(__p); \
- (typeof(*(__p)) __kernel __force *)(__p); \
+ (void)(cpu); \
+ __verify_pcpu_ptr(ptr); \
+ PERCPU_PTR(ptr); \
})
-#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
#define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
@@ -315,7 +319,7 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { }
#define __pcpu_size_call_return(stem, variable) \
({ \
- typeof(variable) pscr_ret__; \
+ TYPEOF_UNQUAL(variable) pscr_ret__; \
__verify_pcpu_ptr(&(variable)); \
switch(sizeof(variable)) { \
case 1: pscr_ret__ = stem##1(variable); break; \
@@ -330,7 +334,7 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { }
#define __pcpu_size_call_return2(stem, variable, ...) \
({ \
- typeof(variable) pscr2_ret__; \
+ TYPEOF_UNQUAL(variable) pscr2_ret__; \
__verify_pcpu_ptr(&(variable)); \
switch(sizeof(variable)) { \
case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
@@ -372,7 +376,7 @@ do { \
} while (0)
/*
- * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
+ * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@gentwo.org>
*
* Optimized manipulation for memory allocated through the per cpu
* allocator or for addresses of per cpu variables.
@@ -475,6 +479,12 @@ do { \
raw_cpu_cmpxchg(pcp, oval, nval); \
})
+#define __this_cpu_try_cmpxchg(pcp, ovalp, nval) \
+({ \
+ __this_cpu_preempt_check("try_cmpxchg"); \
+ raw_cpu_try_cmpxchg(pcp, ovalp, nval); \
+})
+
#define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val))
#define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1)
#define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1)
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 36b942b67b7d..288f5235649a 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -8,6 +8,7 @@
#include <linux/wait.h>
#include <linux/rcu_sync.h>
#include <linux/lockdep.h>
+#include <linux/cleanup.h>
struct percpu_rw_semaphore {
struct rcu_sync rss;
@@ -42,9 +43,10 @@ is_static struct percpu_rw_semaphore name = { \
#define DEFINE_STATIC_PERCPU_RWSEM(name) \
__DEFINE_PERCPU_RWSEM(name, static)
-extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool);
+extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool, bool);
-static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+static inline void percpu_down_read_internal(struct percpu_rw_semaphore *sem,
+ bool freezable)
{
might_sleep();
@@ -62,7 +64,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
if (likely(rcu_sync_is_idle(&sem->rss)))
this_cpu_inc(*sem->read_count);
else
- __percpu_down_read(sem, false); /* Unconditional memory barrier */
+ __percpu_down_read(sem, false, freezable); /* Unconditional memory barrier */
/*
* The preempt_enable() prevents the compiler from
* bleeding the critical section out.
@@ -70,6 +72,17 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
preempt_enable();
}
+static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+{
+ percpu_down_read_internal(sem, false);
+}
+
+static inline void percpu_down_read_freezable(struct percpu_rw_semaphore *sem,
+ bool freeze)
+{
+ percpu_down_read_internal(sem, freeze);
+}
+
static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
bool ret = true;
@@ -81,7 +94,7 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
if (likely(rcu_sync_is_idle(&sem->rss)))
this_cpu_inc(*sem->read_count);
else
- ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
+ ret = __percpu_down_read(sem, true, false); /* Unconditional memory barrier */
preempt_enable();
/*
* The barrier() from preempt_enable() prevents the compiler from
@@ -125,6 +138,13 @@ extern bool percpu_is_read_locked(struct percpu_rw_semaphore *);
extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);
+DEFINE_GUARD(percpu_read, struct percpu_rw_semaphore *,
+ percpu_down_read(_T), percpu_up_read(_T))
+DEFINE_GUARD_COND(percpu_read, _try, percpu_down_read_trylock(_T))
+
+DEFINE_GUARD(percpu_write, struct percpu_rw_semaphore *,
+ percpu_down_write(_T), percpu_up_write(_T))
+
static inline bool percpu_is_write_locked(struct percpu_rw_semaphore *sem)
{
return atomic_read(&sem->block);
@@ -145,7 +165,7 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
#define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem)
static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
- bool read, unsigned long ip)
+ unsigned long ip)
{
lock_release(&sem->dep_map, ip);
}
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 03053de557cf..85bf8dd9f087 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -6,7 +6,6 @@
#include <linux/mmdebug.h>
#include <linux/preempt.h>
#include <linux/smp.h>
-#include <linux/cpumask.h>
#include <linux/pfn.h>
#include <linux/init.h>
#include <linux/cleanup.h>
@@ -16,11 +15,7 @@
/* enough to cover all DEFINE_PER_CPUs in modules */
#ifdef CONFIG_MODULES
-#ifdef CONFIG_MEM_ALLOC_PROFILING
-#define PERCPU_MODULE_RESERVE (8 << 13)
-#else
#define PERCPU_MODULE_RESERVE (8 << 10)
-#endif
#else
#define PERCPU_MODULE_RESERVE 0
#endif
@@ -42,7 +37,11 @@
PCPU_MIN_ALLOC_SHIFT)
#ifdef CONFIG_RANDOM_KMALLOC_CACHES
-#define PERCPU_DYNAMIC_SIZE_SHIFT 12
+# if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PAGE_SIZE_4KB)
+# define PERCPU_DYNAMIC_SIZE_SHIFT 13
+# else
+# define PERCPU_DYNAMIC_SIZE_SHIFT 12
+#endif /* LOCKDEP and PAGE_SIZE > 4KiB */
#else
#define PERCPU_DYNAMIC_SIZE_SHIFT 10
#endif
@@ -136,7 +135,6 @@ extern void __init setup_per_cpu_areas(void);
extern void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
gfp_t gfp) __alloc_size(1);
-extern size_t pcpu_alloc_size(void __percpu *__pdata);
#define __alloc_percpu_gfp(_size, _align, _gfp) \
alloc_hooks(pcpu_alloc_noprof(_size, _align, false, _gfp))
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index b3b34f6670cf..52b37f7bdbf9 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -17,10 +17,14 @@
#ifdef CONFIG_ARM_PMU
/*
- * The ARMv7 CPU PMU supports up to 32 event counters.
+ * The Armv7 and Armv8.8 or less CPU PMU supports up to 32 event counters.
+ * The Armv8.9/9.4 CPU PMU supports up to 33 event counters.
*/
+#ifdef CONFIG_ARM
#define ARMPMU_MAX_HWEVENTS 32
-
+#else
+#define ARMPMU_MAX_HWEVENTS 33
+#endif
/*
* ARM PMU hw_event flags
*/
@@ -66,6 +70,11 @@ struct pmu_hw_events {
struct arm_pmu *percpu_pmu;
int irq;
+
+ struct perf_branch_stack *branch_stack;
+
+ /* Active events requesting branch records */
+ unsigned int branch_users;
};
enum armpmu_attr_groups {
@@ -80,7 +89,6 @@ struct arm_pmu {
struct pmu pmu;
cpumask_t supported_cpus;
char *name;
- int pmuver;
irqreturn_t (*handle_irq)(struct arm_pmu *pmu);
void (*enable)(struct perf_event *event);
void (*disable)(struct perf_event *event);
@@ -96,20 +104,28 @@ struct arm_pmu {
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
- int num_events;
+ /*
+ * Called by KVM to map the PMUv3 event space onto non-PMUv3 hardware.
+ */
+ int (*map_pmuv3_event)(unsigned int eventsel);
+ DECLARE_BITMAP(cntr_mask, ARMPMU_MAX_HWEVENTS);
bool secure_access; /* 32-bit ARM only */
-#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
- DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
-#define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000
- DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
struct platform_device *plat_device;
struct pmu_hw_events __percpu *hw_events;
struct hlist_node node;
struct notifier_block cpu_pm_nb;
/* the attr_groups array must be NULL-terminated */
const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
- /* store the PMMIR_EL1 to expose slots */
+
+ /* PMUv3 only */
+ int pmuver;
+ bool has_smt;
u64 reg_pmmir;
+ u64 reg_brbidr;
+#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
+ DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
+#define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000
+ DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
/* Only to be used by ACPI probing code */
unsigned long acpi_cpuid;
@@ -173,8 +189,8 @@ bool arm_pmu_irq_is_nmi(void);
struct arm_pmu *armpmu_alloc(void);
void armpmu_free(struct arm_pmu *pmu);
int armpmu_register(struct arm_pmu *pmu);
-int armpmu_request_irq(int irq, int cpu);
-void armpmu_free_irq(int irq, int cpu);
+int armpmu_request_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu);
+void armpmu_free_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu);
#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h
index 46377e134d67..d698efba28a2 100644
--- a/include/linux/perf/arm_pmuv3.h
+++ b/include/linux/perf/arm_pmuv3.h
@@ -6,8 +6,9 @@
#ifndef __PERF_ARM_PMUV3_H
#define __PERF_ARM_PMUV3_H
-#define ARMV8_PMU_MAX_COUNTERS 32
-#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
+#define ARMV8_PMU_MAX_GENERAL_COUNTERS 31
+#define ARMV8_PMU_CYCLE_IDX 31
+#define ARMV8_PMU_INSTR_IDX 32 /* Not accessible from AArch32 */
/*
* Common architectural and microarchitectural event numbers.
@@ -227,8 +228,10 @@
*/
#define ARMV8_PMU_OVSR_P GENMASK(30, 0)
#define ARMV8_PMU_OVSR_C BIT(31)
+#define ARMV8_PMU_OVSR_F BIT_ULL(32) /* arm64 only */
/* Mask for writable bits is both P and C fields */
-#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C)
+#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C | \
+ ARMV8_PMU_OVSR_F)
/*
* PMXEVTYPER: Event selection reg
@@ -254,6 +257,7 @@
#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
+#define ARMV8_PMU_USERENR_UEN (1 << 4) /* Fine grained per counter access at EL0 */
/* Mask for writable bits */
#define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \
ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER)
@@ -309,4 +313,6 @@
} \
} while (0)
+#include <asm/arm_pmuv3.h>
+
#endif
diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h
index 701974639ff2..f82a28040594 100644
--- a/include/linux/perf/riscv_pmu.h
+++ b/include/linux/perf/riscv_pmu.h
@@ -89,6 +89,7 @@ static inline void riscv_pmu_legacy_skip_init(void) {};
struct riscv_pmu *riscv_pmu_alloc(void);
#ifdef CONFIG_RISCV_PMU_SBI
int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr);
+int riscv_pmu_get_event_info(u32 type, u64 config, u64 *econfig);
#endif
#endif /* CONFIG_RISCV_PMU */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a5304ae8c654..9870d768db4c 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -26,18 +26,9 @@
# include <asm/local64.h>
#endif
-#define PERF_GUEST_ACTIVE 0x01
-#define PERF_GUEST_USER 0x02
-
-struct perf_guest_info_callbacks {
- unsigned int (*state)(void);
- unsigned long (*get_ip)(void);
- unsigned int (*handle_intel_pt_intr)(void);
-};
-
#ifdef CONFIG_HAVE_HW_BREAKPOINT
-#include <linux/rhashtable-types.h>
-#include <asm/hw_breakpoint.h>
+# include <linux/rhashtable-types.h>
+# include <asm/hw_breakpoint.h>
#endif
#include <linux/list.h>
@@ -62,19 +53,20 @@ struct perf_guest_info_callbacks {
#include <linux/security.h>
#include <linux/static_call.h>
#include <linux/lockdep.h>
+
#include <asm/local.h>
struct perf_callchain_entry {
- __u64 nr;
- __u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */
+ u64 nr;
+ u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */
};
struct perf_callchain_entry_ctx {
- struct perf_callchain_entry *entry;
- u32 max_stack;
- u32 nr;
- short contexts;
- bool contexts_maxed;
+ struct perf_callchain_entry *entry;
+ u32 max_stack;
+ u32 nr;
+ short contexts;
+ bool contexts_maxed;
};
typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
@@ -121,8 +113,8 @@ static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
* already stored in age order, the hw_idx should be 0.
*/
struct perf_branch_stack {
- __u64 nr;
- __u64 hw_idx;
+ u64 nr;
+ u64 hw_idx;
struct perf_branch_entry entries[];
};
@@ -132,10 +124,10 @@ struct task_struct;
* extra PMU register associated with an event
*/
struct hw_perf_event_extra {
- u64 config; /* register value */
- unsigned int reg; /* register address or index */
- int alloc; /* extra register already allocated */
- int idx; /* index in shared_regs->regs[] */
+ u64 config; /* register value */
+ unsigned int reg; /* register address or index */
+ int alloc; /* extra register already allocated */
+ int idx; /* index in shared_regs->regs[] */
};
/**
@@ -144,8 +136,8 @@ struct hw_perf_event_extra {
* PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific
* usage.
*/
-#define PERF_EVENT_FLAG_ARCH 0x000fffff
-#define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000
+#define PERF_EVENT_FLAG_ARCH 0x0fffffff
+#define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000
static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0);
@@ -157,7 +149,9 @@ struct hw_perf_event {
union {
struct { /* hardware */
u64 config;
+ u64 config1;
u64 last_tag;
+ u64 dyn_constraint;
unsigned long config_base;
unsigned long event_base;
int event_base_rdpmc;
@@ -168,6 +162,15 @@ struct hw_perf_event {
struct hw_perf_event_extra extra_reg;
struct hw_perf_event_extra branch_reg;
};
+ struct { /* aux / Intel-PT */
+ u64 aux_config;
+ /*
+ * For AUX area events, aux_paused cannot be a state
+ * flag because it can be updated asynchronously to
+ * state.
+ */
+ unsigned int aux_paused;
+ };
struct { /* software */
struct hrtimer hrtimer;
};
@@ -216,9 +219,14 @@ struct hw_perf_event {
/*
* hw_perf_event::state flags; used to track the PERF_EF_* state.
*/
-#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
-#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
-#define PERF_HES_ARCH 0x04
+
+/* the counter is stopped */
+#define PERF_HES_STOPPED 0x01
+
+/* event->count up-to-date */
+#define PERF_HES_UPTODATE 0x02
+
+#define PERF_HES_ARCH 0x04
int state;
@@ -267,7 +275,7 @@ struct hw_perf_event {
*/
u64 freq_time_stamp;
u64 freq_count_stamp;
-#endif
+#endif /* CONFIG_PERF_EVENTS */
};
struct perf_event;
@@ -276,21 +284,40 @@ struct perf_event_pmu_context;
/*
* Common implementation detail of pmu::{start,commit,cancel}_txn
*/
-#define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */
-#define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */
+
+/* txn to add/schedule event on PMU */
+#define PERF_PMU_TXN_ADD 0x1
+
+/* txn to read event group from PMU */
+#define PERF_PMU_TXN_READ 0x2
/**
* pmu::capabilities flags
*/
-#define PERF_PMU_CAP_NO_INTERRUPT 0x0001
-#define PERF_PMU_CAP_NO_NMI 0x0002
-#define PERF_PMU_CAP_AUX_NO_SG 0x0004
-#define PERF_PMU_CAP_EXTENDED_REGS 0x0008
-#define PERF_PMU_CAP_EXCLUSIVE 0x0010
-#define PERF_PMU_CAP_ITRACE 0x0020
-#define PERF_PMU_CAP_NO_EXCLUDE 0x0040
-#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
-#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
+#define PERF_PMU_CAP_NO_INTERRUPT 0x0001
+#define PERF_PMU_CAP_NO_NMI 0x0002
+#define PERF_PMU_CAP_AUX_NO_SG 0x0004
+#define PERF_PMU_CAP_EXTENDED_REGS 0x0008
+#define PERF_PMU_CAP_EXCLUSIVE 0x0010
+#define PERF_PMU_CAP_ITRACE 0x0020
+#define PERF_PMU_CAP_NO_EXCLUDE 0x0040
+#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
+#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
+#define PERF_PMU_CAP_AUX_PAUSE 0x0200
+#define PERF_PMU_CAP_AUX_PREFER_LARGE 0x0400
+
+/**
+ * pmu::scope
+ */
+enum perf_pmu_scope {
+ PERF_PMU_SCOPE_NONE = 0,
+ PERF_PMU_SCOPE_CORE,
+ PERF_PMU_SCOPE_DIE,
+ PERF_PMU_SCOPE_CLUSTER,
+ PERF_PMU_SCOPE_PKG,
+ PERF_PMU_SCOPE_SYS_WIDE,
+ PERF_PMU_MAX_SCOPE,
+};
struct perf_output_handle;
@@ -302,6 +329,9 @@ struct perf_output_handle;
struct pmu {
struct list_head entry;
+ spinlock_t events_lock;
+ struct list_head events;
+
struct module *module;
struct device *dev;
struct device *parent;
@@ -315,8 +345,12 @@ struct pmu {
*/
int capabilities;
- int __percpu *pmu_disable_count;
- struct perf_cpu_pmu_context __percpu *cpu_pmu_context;
+ /*
+ * PMU scope
+ */
+ unsigned int scope;
+
+ struct perf_cpu_pmu_context * __percpu *cpu_pmu_context;
atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
int task_ctx_nr;
int hrtimer_interval_ms;
@@ -360,9 +394,21 @@ struct pmu {
* Flags for ->add()/->del()/ ->start()/->stop(). There are
* matching hw_perf_event::state flags.
*/
-#define PERF_EF_START 0x01 /* start the counter when adding */
-#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
-#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
+
+/* start the counter when adding */
+#define PERF_EF_START 0x01
+
+/* reload the counter when starting */
+#define PERF_EF_RELOAD 0x02
+
+/* update the counter when stopping */
+#define PERF_EF_UPDATE 0x04
+
+/* AUX area event, pause tracing */
+#define PERF_EF_PAUSE 0x08
+
+/* AUX area event, resume tracing */
+#define PERF_EF_RESUME 0x10
/*
* Adds/Removes a counter to/from the PMU, can be done inside a
@@ -402,6 +448,18 @@ struct pmu {
*
* ->start() with PERF_EF_RELOAD will reprogram the counter
* value, must be preceded by a ->stop() with PERF_EF_UPDATE.
+ *
+ * ->stop() with PERF_EF_PAUSE will stop as simply as possible. Will not
+ * overlap another ->stop() with PERF_EF_PAUSE nor ->start() with
+ * PERF_EF_RESUME.
+ *
+ * ->start() with PERF_EF_RESUME will start as simply as possible but
+ * only if the counter is not otherwise stopped. Will not overlap
+ * another ->start() with PERF_EF_RESUME nor ->stop() with
+ * PERF_EF_PAUSE.
+ *
+ * Notably, PERF_EF_PAUSE/PERF_EF_RESUME *can* be concurrent with other
+ * ->stop()/->start() invocations, just not itself.
*/
void (*start) (struct perf_event *event, int flags);
void (*stop) (struct perf_event *event, int flags);
@@ -453,7 +511,7 @@ struct pmu {
* context-switches callback
*/
void (*sched_task) (struct perf_event_pmu_context *pmu_ctx,
- bool sched_in);
+ struct task_struct *task, bool sched_in);
/*
* Kmem cache of PMU specific data
@@ -461,16 +519,6 @@ struct pmu {
struct kmem_cache *task_ctx_cache;
/*
- * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
- * can be synchronized using this function. See Intel LBR callstack support
- * implementation and Perf core context switch handling callbacks for usage
- * examples.
- */
- void (*swap_task_ctx) (struct perf_event_pmu_context *prev_epc,
- struct perf_event_pmu_context *next_epc);
- /* optional */
-
- /*
* Set up pmu-private data structures for an AUX area
*/
void *(*setup_aux) (struct perf_event *event, void **pages,
@@ -559,10 +607,10 @@ enum perf_addr_filter_action_t {
* This is a hardware-agnostic filter configuration as specified by the user.
*/
struct perf_addr_filter {
- struct list_head entry;
- struct path path;
- unsigned long offset;
- unsigned long size;
+ struct list_head entry;
+ struct path path;
+ unsigned long offset;
+ unsigned long size;
enum perf_addr_filter_action_t action;
};
@@ -577,23 +625,62 @@ struct perf_addr_filter {
* bundled together; see perf_event_addr_filters().
*/
struct perf_addr_filters_head {
- struct list_head list;
- raw_spinlock_t lock;
- unsigned int nr_file_filters;
+ struct list_head list;
+ raw_spinlock_t lock;
+ unsigned int nr_file_filters;
};
struct perf_addr_filter_range {
- unsigned long start;
- unsigned long size;
+ unsigned long start;
+ unsigned long size;
};
-/**
- * enum perf_event_state - the states of an event:
+/*
+ * The normal states are:
+ *
+ * ACTIVE --.
+ * ^ |
+ * | |
+ * sched_{in,out}() |
+ * | |
+ * v |
+ * ,---> INACTIVE --+ <-.
+ * | | |
+ * | {dis,en}able()
+ * sched_in() | |
+ * | OFF <--' --+
+ * | |
+ * `---> ERROR ------'
+ *
+ * That is:
+ *
+ * sched_in: INACTIVE -> {ACTIVE,ERROR}
+ * sched_out: ACTIVE -> INACTIVE
+ * disable: {ACTIVE,INACTIVE} -> OFF
+ * enable: {OFF,ERROR} -> INACTIVE
+ *
+ * Where {OFF,ERROR} are disabled states.
+ *
+ * Then we have the {EXIT,REVOKED,DEAD} states which are various shades of
+ * defunct events:
+ *
+ * - EXIT means task that the even was assigned to died, but child events
+ * still live, and further children can still be created. But the event
+ * itself will never be active again. It can only transition to
+ * {REVOKED,DEAD};
+ *
+ * - REVOKED means the PMU the event was associated with is gone; all
+ * functionality is stopped but the event is still alive. Can only
+ * transition to DEAD;
+ *
+ * - DEAD event really is DYING tearing down state and freeing bits.
+ *
*/
enum perf_event_state {
- PERF_EVENT_STATE_DEAD = -4,
- PERF_EVENT_STATE_EXIT = -3,
- PERF_EVENT_STATE_ERROR = -2,
+ PERF_EVENT_STATE_DEAD = -5,
+ PERF_EVENT_STATE_REVOKED = -4, /* pmu gone, must not touch */
+ PERF_EVENT_STATE_EXIT = -3, /* task died, still inherit */
+ PERF_EVENT_STATE_ERROR = -2, /* scheduling error, can enable */
PERF_EVENT_STATE_OFF = -1,
PERF_EVENT_STATE_INACTIVE = 0,
PERF_EVENT_STATE_ACTIVE = 1,
@@ -615,10 +702,13 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *,
* PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
* cannot be a group leader. If an event with this flag is detached from the
* group it is scheduled out and moved into an unrecoverable ERROR state.
+ * PERF_EV_CAP_READ_SCOPE: A CPU event that can be read from any CPU of the
+ * PMU scope where it is active.
*/
#define PERF_EV_CAP_SOFTWARE BIT(0)
#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
#define PERF_EV_CAP_SIBLING BIT(2)
+#define PERF_EV_CAP_READ_SCOPE BIT(3)
#define SWEVENT_HLIST_BITS 8
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
@@ -628,21 +718,24 @@ struct swevent_hlist {
struct rcu_head rcu_head;
};
-#define PERF_ATTACH_CONTEXT 0x01
-#define PERF_ATTACH_GROUP 0x02
-#define PERF_ATTACH_TASK 0x04
-#define PERF_ATTACH_TASK_DATA 0x08
-#define PERF_ATTACH_ITRACE 0x10
-#define PERF_ATTACH_SCHED_CB 0x20
-#define PERF_ATTACH_CHILD 0x40
+#define PERF_ATTACH_CONTEXT 0x0001
+#define PERF_ATTACH_GROUP 0x0002
+#define PERF_ATTACH_TASK 0x0004
+#define PERF_ATTACH_TASK_DATA 0x0008
+#define PERF_ATTACH_GLOBAL_DATA 0x0010
+#define PERF_ATTACH_SCHED_CB 0x0020
+#define PERF_ATTACH_CHILD 0x0040
+#define PERF_ATTACH_EXCLUSIVE 0x0080
+#define PERF_ATTACH_CALLCHAIN 0x0100
+#define PERF_ATTACH_ITRACE 0x0200
struct bpf_prog;
struct perf_cgroup;
struct perf_buffer;
struct pmu_event_list {
- raw_spinlock_t lock;
- struct list_head list;
+ raw_spinlock_t lock;
+ struct list_head list;
};
/*
@@ -652,12 +745,12 @@ struct pmu_event_list {
* disabled is sufficient since it will hold-off the IPIs.
*/
#ifdef CONFIG_PROVE_LOCKING
-#define lockdep_assert_event_ctx(event) \
+# define lockdep_assert_event_ctx(event) \
WARN_ON_ONCE(__lockdep_enabled && \
(this_cpu_read(hardirqs_enabled) && \
lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD))
#else
-#define lockdep_assert_event_ctx(event)
+# define lockdep_assert_event_ctx(event)
#endif
#define for_each_sibling_event(sibling, event) \
@@ -766,7 +859,7 @@ struct perf_event {
/* mmap bits */
struct mutex mmap_mutex;
- atomic_t mmap_count;
+ refcount_t mmap_count;
struct perf_buffer *rb;
struct list_head rb_entry;
@@ -781,9 +874,9 @@ struct perf_event {
unsigned int pending_wakeup;
unsigned int pending_kill;
unsigned int pending_disable;
- unsigned int pending_sigtrap;
unsigned long pending_addr; /* SIGTRAP */
struct irq_work pending_irq;
+ struct irq_work pending_disable_irq;
struct callback_head pending_task;
unsigned int pending_work;
@@ -815,9 +908,9 @@ struct perf_event {
#ifdef CONFIG_EVENT_TRACING
struct trace_event_call *tp_event;
struct event_filter *filter;
-#ifdef CONFIG_FUNCTION_TRACER
+# ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops ftrace_ops;
-#endif
+# endif
#endif
#ifdef CONFIG_CGROUP_PERF
@@ -828,6 +921,7 @@ struct perf_event {
void *security;
#endif
struct list_head sb_list;
+ struct list_head pmu_list;
/*
* Certain events gets forwarded to another pmu internally by over-
@@ -835,7 +929,7 @@ struct perf_event {
* of it. event->orig_type contains original 'type' requested by
* user.
*/
- __u32 orig_type;
+ u32 orig_type;
#endif /* CONFIG_PERF_EVENTS */
};
@@ -875,7 +969,7 @@ struct perf_event_pmu_context {
struct list_head pinned_active;
struct list_head flexible_active;
- /* Used to avoid freeing per-cpu perf_event_pmu_context */
+ /* Used to identify the per-cpu perf_event_pmu_context */
unsigned int embedded : 1;
unsigned int nr_events;
@@ -885,7 +979,6 @@ struct perf_event_pmu_context {
atomic_t refcount; /* event <-> epc */
struct rcu_head rcu_head;
- void *task_ctx_data; /* pmu specific data */
/*
* Set when one or more (plausibly active) event can't be scheduled
* due to pmu overcommit or pmu constraints, except tolerant to
@@ -901,8 +994,8 @@ static inline bool perf_pmu_ctx_is_active(struct perf_event_pmu_context *epc)
}
struct perf_event_groups {
- struct rb_root tree;
- u64 index;
+ struct rb_root tree;
+ u64 index;
};
@@ -933,7 +1026,6 @@ struct perf_event_context {
int nr_user;
int is_active;
- int nr_task_data;
int nr_stat;
int nr_freq;
int rotate_disable;
@@ -962,19 +1054,52 @@ struct perf_event_context {
struct rcu_head rcu_head;
/*
- * Sum (event->pending_sigtrap + event->pending_work)
+ * The count of events for which using the switch-out fast path
+ * should be avoided.
+ *
+ * Sum (event->pending_work + events with
+ * (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ)))
*
* The SIGTRAP is targeted at ctx->task, as such it won't do changing
* that until the signal is delivered.
*/
- local_t nr_pending;
+ local_t nr_no_switch_fast;
};
-/*
- * Number of contexts where an event can trigger:
- * task, softirq, hardirq, nmi.
+/**
+ * struct perf_ctx_data - PMU specific data for a task
+ * @rcu_head: To avoid the race on free PMU specific data
+ * @refcount: To track users
+ * @global: To track system-wide users
+ * @ctx_cache: Kmem cache of PMU specific data
+ * @data: PMU specific data
+ *
+ * Currently, the struct is only used in Intel LBR call stack mode to
+ * save/restore the call stack of a task on context switches.
+ *
+ * The rcu_head is used to prevent the race on free the data.
+ * The data only be allocated when Intel LBR call stack mode is enabled.
+ * The data will be freed when the mode is disabled.
+ * The content of the data will only be accessed in context switch, which
+ * should be protected by rcu_read_lock().
+ *
+ * Because of the alignment requirement of Intel Arch LBR, the Kmem cache
+ * is used to allocate the PMU specific data. The ctx_cache is to track
+ * the Kmem cache.
+ *
+ * Careful: Struct perf_ctx_data is added as a pointer in struct task_struct.
+ * When system-wide Intel LBR call stack mode is enabled, a buffer with
+ * constant size will be allocated for each task.
+ * Also, system memory consumption can further grow when the size of
+ * struct perf_ctx_data enlarges.
*/
-#define PERF_NR_CONTEXTS 4
+struct perf_ctx_data {
+ struct rcu_head rcu_head;
+ refcount_t refcount;
+ int global;
+ struct kmem_cache *ctx_cache;
+ void *data;
+};
struct perf_cpu_pmu_context {
struct perf_event_pmu_context epc;
@@ -985,6 +1110,7 @@ struct perf_cpu_pmu_context {
int active_oncpu;
int exclusive;
+ int pmu_disable_count;
raw_spinlock_t hrtimer_lock;
struct hrtimer hrtimer;
@@ -1018,7 +1144,13 @@ struct perf_output_handle {
struct perf_buffer *rb;
unsigned long wakeup;
unsigned long size;
- u64 aux_flags;
+ union {
+ u64 flags; /* perf_output*() */
+ u64 aux_flags; /* perf_aux_output*() */
+ struct {
+ u64 skip_read : 1;
+ };
+ };
union {
void *addr;
unsigned long head;
@@ -1080,7 +1212,7 @@ extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
extern void perf_event_itrace_started(struct perf_event *event);
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
-extern void perf_pmu_unregister(struct pmu *pmu);
+extern int perf_pmu_unregister(struct pmu *pmu);
extern void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task);
@@ -1106,16 +1238,18 @@ extern void perf_pmu_resched(struct pmu *pmu);
extern int perf_event_refresh(struct perf_event *event, int refresh);
extern void perf_event_update_userpage(struct perf_event *event);
extern int perf_event_release_kernel(struct perf_event *event);
+
extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
- int cpu,
- struct task_struct *task,
- perf_overflow_handler_t callback,
- void *context);
+ int cpu,
+ struct task_struct *task,
+ perf_overflow_handler_t callback,
+ void *context);
+
extern void perf_pmu_migrate_context(struct pmu *pmu,
- int src_cpu, int dst_cpu);
-int perf_event_read_local(struct perf_event *event, u64 *value,
- u64 *enabled, u64 *running);
+ int src_cpu, int dst_cpu);
+extern int perf_event_read_local(struct perf_event *event, u64 *value,
+ u64 *enabled, u64 *running);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
@@ -1235,6 +1369,11 @@ static inline void perf_sample_save_callchain(struct perf_sample_data *data,
{
int size = 1;
+ if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN))
+ return;
+ if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_CALLCHAIN))
+ return;
+
data->callchain = perf_callchain(event, regs);
size += data->callchain->nr;
@@ -1243,12 +1382,18 @@ static inline void perf_sample_save_callchain(struct perf_sample_data *data,
}
static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
+ struct perf_event *event,
struct perf_raw_record *raw)
{
struct perf_raw_frag *frag = &raw->frag;
u32 sum = 0;
int size;
+ if (!(event->attr.sample_type & PERF_SAMPLE_RAW))
+ return;
+ if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_RAW))
+ return;
+
do {
sum += frag->size;
if (perf_raw_frag_last(frag))
@@ -1265,6 +1410,11 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
data->sample_flags |= PERF_SAMPLE_RAW;
}
+static inline bool has_branch_stack(struct perf_event *event)
+{
+ return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
+}
+
static inline void perf_sample_save_brstack(struct perf_sample_data *data,
struct perf_event *event,
struct perf_branch_stack *brs,
@@ -1272,8 +1422,16 @@ static inline void perf_sample_save_brstack(struct perf_sample_data *data,
{
int size = sizeof(u64); /* nr */
+ if (!has_branch_stack(event))
+ return;
+ if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_BRANCH_STACK))
+ return;
+
if (branch_sample_hw_index(event))
size += sizeof(u64);
+
+ brs->nr = min_t(u16, event->attr.sample_max_stack, brs->nr);
+
size += brs->nr * sizeof(struct perf_branch_entry);
/*
@@ -1308,14 +1466,14 @@ static inline u32 perf_sample_data_size(struct perf_sample_data *data,
*/
static inline void perf_clear_branch_entry_bitfields(struct perf_branch_entry *br)
{
- br->mispred = 0;
- br->predicted = 0;
- br->in_tx = 0;
- br->abort = 0;
- br->cycles = 0;
- br->type = 0;
- br->spec = PERF_BR_SPEC_NA;
- br->reserved = 0;
+ br->mispred = 0;
+ br->predicted = 0;
+ br->in_tx = 0;
+ br->abort = 0;
+ br->cycles = 0;
+ br->type = 0;
+ br->spec = PERF_BR_SPEC_NA;
+ br->reserved = 0;
}
extern void perf_output_sample(struct perf_output_handle *handle,
@@ -1504,7 +1662,17 @@ extern void perf_event_bpf_event(struct bpf_prog *prog,
enum perf_bpf_event_type type,
u16 flags);
+#define PERF_GUEST_ACTIVE 0x01
+#define PERF_GUEST_USER 0x02
+
+struct perf_guest_info_callbacks {
+ unsigned int (*state)(void);
+ unsigned long (*get_ip)(void);
+ unsigned int (*handle_intel_pt_intr)(void);
+};
+
#ifdef CONFIG_GUEST_PERF_EVENTS
+
extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state);
@@ -1515,21 +1683,27 @@ static inline unsigned int perf_guest_state(void)
{
return static_call(__perf_guest_state)();
}
+
static inline unsigned long perf_guest_get_ip(void)
{
return static_call(__perf_guest_get_ip)();
}
+
static inline unsigned int perf_guest_handle_intel_pt_intr(void)
{
return static_call(__perf_guest_handle_intel_pt_intr)();
}
+
extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
-#else
+
+#else /* !CONFIG_GUEST_PERF_EVENTS: */
+
static inline unsigned int perf_guest_state(void) { return 0; }
static inline unsigned long perf_guest_get_ip(void) { return 0; }
static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; }
-#endif /* CONFIG_GUEST_PERF_EVENTS */
+
+#endif /* !CONFIG_GUEST_PERF_EVENTS */
extern void perf_event_exec(void);
extern void perf_event_comm(struct task_struct *tsk, bool exec);
@@ -1545,8 +1719,8 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern struct perf_callchain_entry *
-get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
- u32 max_stack, bool crosstask, bool add_mark);
+get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
+ u32 max_stack, bool crosstask, bool add_mark, u64 defer_cookie);
extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void);
extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
@@ -1559,6 +1733,7 @@ static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *
{
if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
struct perf_callchain_entry *entry = ctx->entry;
+
entry->ip[entry->nr++] = ip;
++ctx->contexts;
return 0;
@@ -1572,6 +1747,7 @@ static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64
{
if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
struct perf_callchain_entry *entry = ctx->entry;
+
entry->ip[entry->nr++] = ip;
++ctx->nr;
return 0;
@@ -1581,19 +1757,10 @@ static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64
}
extern int sysctl_perf_event_paranoid;
-extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate;
-extern int sysctl_perf_cpu_time_max_percent;
extern void perf_sample_event_took(u64 sample_len_ns);
-int perf_event_max_sample_rate_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int perf_event_max_stack_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-
/* Access to perf_event_open(2) syscall. */
#define PERF_SECURITY_OPEN 0
@@ -1607,30 +1774,26 @@ static inline int perf_is_paranoid(void)
return sysctl_perf_event_paranoid > -1;
}
-static inline int perf_allow_kernel(struct perf_event_attr *attr)
-{
- if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
- return -EACCES;
-
- return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
-}
+extern int perf_allow_kernel(void);
-static inline int perf_allow_cpu(struct perf_event_attr *attr)
+static inline int perf_allow_cpu(void)
{
if (sysctl_perf_event_paranoid > 0 && !perfmon_capable())
return -EACCES;
- return security_perf_event_open(attr, PERF_SECURITY_CPU);
+ return security_perf_event_open(PERF_SECURITY_CPU);
}
-static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
+static inline int perf_allow_tracepoint(void)
{
if (sysctl_perf_event_paranoid > -1 && !perfmon_capable())
return -EPERM;
- return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
+ return security_perf_event_open(PERF_SECURITY_TRACEPOINT);
}
+extern int perf_exclude_event(struct perf_event *event, struct pt_regs *regs);
+
extern void perf_event_init(void);
extern void perf_tp_event(u16 event_type, u64 count, void *record,
int entry_size, struct pt_regs *regs,
@@ -1638,19 +1801,34 @@ extern void perf_tp_event(u16 event_type, u64 count, void *record,
struct task_struct *task);
extern void perf_bp_event(struct perf_event *event, void *data);
-#ifndef perf_misc_flags
-# define perf_misc_flags(regs) \
+extern unsigned long perf_misc_flags(struct perf_event *event, struct pt_regs *regs);
+extern unsigned long perf_instruction_pointer(struct perf_event *event,
+ struct pt_regs *regs);
+
+#ifndef perf_arch_misc_flags
+# define perf_arch_misc_flags(regs) \
(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
-# define perf_instruction_pointer(regs) instruction_pointer(regs)
+# define perf_arch_instruction_pointer(regs) instruction_pointer(regs)
#endif
#ifndef perf_arch_bpf_user_pt_regs
# define perf_arch_bpf_user_pt_regs(regs) regs
#endif
-static inline bool has_branch_stack(struct perf_event *event)
+#ifndef perf_arch_guest_misc_flags
+static inline unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs)
{
- return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
+ unsigned long guest_state = perf_guest_state();
+
+ if (!(guest_state & PERF_GUEST_ACTIVE))
+ return 0;
+
+ if (guest_state & PERF_GUEST_USER)
+ return PERF_RECORD_MISC_GUEST_USER;
+ else
+ return PERF_RECORD_MISC_GUEST_KERNEL;
}
+# define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs)
+#endif
static inline bool needs_branch_stack(struct perf_event *event)
{
@@ -1659,7 +1837,14 @@ static inline bool needs_branch_stack(struct perf_event *event)
static inline bool has_aux(struct perf_event *event)
{
- return event->pmu->setup_aux;
+ return event->pmu && event->pmu->setup_aux;
+}
+
+static inline bool has_aux_action(struct perf_event *event)
+{
+ return event->attr.aux_sample_size ||
+ event->attr.aux_pause ||
+ event->attr.aux_resume;
}
static inline bool is_write_backward(struct perf_event *event)
@@ -1711,7 +1896,7 @@ extern int perf_output_begin_backward(struct perf_output_handle *handle,
extern void perf_output_end(struct perf_output_handle *handle);
extern unsigned int perf_output_copy(struct perf_output_handle *handle,
- const void *buf, unsigned int len);
+ const void *buf, unsigned int len);
extern unsigned int perf_output_skip(struct perf_output_handle *handle,
unsigned int len);
extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
@@ -1728,7 +1913,9 @@ extern void perf_event_task_tick(void);
extern int perf_event_account_interrupt(struct perf_event *event);
extern int perf_event_period(struct perf_event *event, u64 value);
extern u64 perf_event_pause(struct perf_event *event, bool reset);
+
#else /* !CONFIG_PERF_EVENTS: */
+
static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event) { return NULL; }
@@ -1806,15 +1993,14 @@ static inline void perf_event_disable(struct perf_event *event) { }
static inline int __perf_event_disable(void *info) { return -1; }
static inline void perf_event_task_tick(void) { }
static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
-static inline int perf_event_period(struct perf_event *event, u64 value)
-{
- return -EINVAL;
-}
-static inline u64 perf_event_pause(struct perf_event *event, bool reset)
-{
- return 0;
-}
-#endif
+static inline int
+perf_event_period(struct perf_event *event, u64 value) { return -EINVAL; }
+static inline u64
+perf_event_pause(struct perf_event *event, bool reset) { return 0; }
+static inline int
+perf_exclude_event(struct perf_event *event, struct pt_regs *regs) { return 0; }
+
+#endif /* !CONFIG_PERF_EVENTS */
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern void perf_restore_debug_store(void);
@@ -1822,31 +2008,31 @@ extern void perf_restore_debug_store(void);
static inline void perf_restore_debug_store(void) { }
#endif
-#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
+#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
struct perf_pmu_events_attr {
- struct device_attribute attr;
- u64 id;
- const char *event_str;
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str;
};
struct perf_pmu_events_ht_attr {
- struct device_attribute attr;
- u64 id;
- const char *event_str_ht;
- const char *event_str_noht;
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str_ht;
+ const char *event_str_noht;
};
struct perf_pmu_events_hybrid_attr {
- struct device_attribute attr;
- u64 id;
- const char *event_str;
- u64 pmu_type;
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str;
+ u64 pmu_type;
};
struct perf_pmu_format_hybrid_attr {
- struct device_attribute attr;
- u64 pmu_type;
+ struct device_attribute attr;
+ u64 pmu_type;
};
ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
@@ -1888,11 +2074,11 @@ static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
/* Performance counter hotplug functions */
#ifdef CONFIG_PERF_EVENTS
-int perf_event_init_cpu(unsigned int cpu);
-int perf_event_exit_cpu(unsigned int cpu);
+extern int perf_event_init_cpu(unsigned int cpu);
+extern int perf_event_exit_cpu(unsigned int cpu);
#else
-#define perf_event_init_cpu NULL
-#define perf_event_exit_cpu NULL
+# define perf_event_init_cpu NULL
+# define perf_event_exit_cpu NULL
#endif
extern void arch_perf_update_userpage(struct perf_event *event,
diff --git a/include/linux/pfn.h b/include/linux/pfn.h
index 14bc053c53d8..b90ca0b6c331 100644
--- a/include/linux/pfn.h
+++ b/include/linux/pfn.h
@@ -4,15 +4,6 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
-
-/*
- * pfn_t: encapsulates a page-frame number that is optionally backed
- * by memmap (struct page). Whether a pfn_t has a 'struct page'
- * backing is indicated by flags in the high bits of the value.
- */
-typedef struct {
- u64 val;
-} pfn_t;
#endif
#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
deleted file mode 100644
index 2d9148221e9a..000000000000
--- a/include/linux/pfn_t.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_PFN_T_H_
-#define _LINUX_PFN_T_H_
-#include <linux/mm.h>
-
-/*
- * PFN_FLAGS_MASK - mask of all the possible valid pfn_t flags
- * PFN_SG_CHAIN - pfn is a pointer to the next scatterlist entry
- * PFN_SG_LAST - pfn references a page and is the last scatterlist entry
- * PFN_DEV - pfn is not covered by system memmap by default
- * PFN_MAP - pfn has a dynamic page mapping established by a device driver
- * PFN_SPECIAL - for CONFIG_FS_DAX_LIMITED builds to allow XIP, but not
- * get_user_pages
- */
-#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
-#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
-#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
-#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
-#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4))
-#define PFN_SPECIAL (1ULL << (BITS_PER_LONG_LONG - 5))
-
-#define PFN_FLAGS_TRACE \
- { PFN_SPECIAL, "SPECIAL" }, \
- { PFN_SG_CHAIN, "SG_CHAIN" }, \
- { PFN_SG_LAST, "SG_LAST" }, \
- { PFN_DEV, "DEV" }, \
- { PFN_MAP, "MAP" }
-
-static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags)
-{
- pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), };
-
- return pfn_t;
-}
-
-/* a default pfn to pfn_t conversion assumes that @pfn is pfn_valid() */
-static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
-{
- return __pfn_to_pfn_t(pfn, 0);
-}
-
-static inline pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags)
-{
- return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
-}
-
-static inline bool pfn_t_has_page(pfn_t pfn)
-{
- return (pfn.val & PFN_MAP) == PFN_MAP || (pfn.val & PFN_DEV) == 0;
-}
-
-static inline unsigned long pfn_t_to_pfn(pfn_t pfn)
-{
- return pfn.val & ~PFN_FLAGS_MASK;
-}
-
-static inline struct page *pfn_t_to_page(pfn_t pfn)
-{
- if (pfn_t_has_page(pfn))
- return pfn_to_page(pfn_t_to_pfn(pfn));
- return NULL;
-}
-
-static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
-{
- return PFN_PHYS(pfn_t_to_pfn(pfn));
-}
-
-static inline pfn_t page_to_pfn_t(struct page *page)
-{
- return pfn_to_pfn_t(page_to_pfn(page));
-}
-
-static inline int pfn_t_valid(pfn_t pfn)
-{
- return pfn_valid(pfn_t_to_pfn(pfn));
-}
-
-#ifdef CONFIG_MMU
-static inline pte_t pfn_t_pte(pfn_t pfn, pgprot_t pgprot)
-{
- return pfn_pte(pfn_t_to_pfn(pfn), pgprot);
-}
-#endif
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot)
-{
- return pfn_pmd(pfn_t_to_pfn(pfn), pgprot);
-}
-
-#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot)
-{
- return pfn_pud(pfn_t_to_pfn(pfn), pgprot);
-}
-#endif
-#endif
-
-#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
-static inline bool pfn_t_devmap(pfn_t pfn)
-{
- const u64 flags = PFN_DEV|PFN_MAP;
-
- return (pfn.val & flags) == flags;
-}
-#else
-static inline bool pfn_t_devmap(pfn_t pfn)
-{
- return false;
-}
-pte_t pte_mkdevmap(pte_t pte);
-pmd_t pmd_mkdevmap(pmd_t pmd);
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
-pud_t pud_mkdevmap(pud_t pud);
-#endif
-#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
-
-#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
-static inline bool pfn_t_special(pfn_t pfn)
-{
- return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL;
-}
-#else
-static inline bool pfn_t_special(pfn_t pfn)
-{
- return false;
-}
-#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
-#endif /* _LINUX_PFN_T_H_ */
diff --git a/include/linux/pgalloc.h b/include/linux/pgalloc.h
new file mode 100644
index 000000000000..9174fa59bbc5
--- /dev/null
+++ b/include/linux/pgalloc.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PGALLOC_H
+#define _LINUX_PGALLOC_H
+
+#include <linux/pgtable.h>
+#include <asm/pgalloc.h>
+
+/*
+ * {pgd,p4d}_populate_kernel() are defined as macros to allow
+ * compile-time optimization based on the configured page table levels.
+ * Without this, linking may fail because callers (e.g., KASAN) may rely
+ * on calls to these functions being optimized away when passing symbols
+ * that exist only for certain page table levels.
+ */
+#define pgd_populate_kernel(addr, pgd, p4d) \
+ do { \
+ pgd_populate(&init_mm, pgd, p4d); \
+ if (ARCH_PAGE_TABLE_SYNC_MASK & PGTBL_PGD_MODIFIED) \
+ arch_sync_kernel_mappings(addr, addr); \
+ } while (0)
+
+#define p4d_populate_kernel(addr, p4d, pud) \
+ do { \
+ p4d_populate(&init_mm, p4d, pud); \
+ if (ARCH_PAGE_TABLE_SYNC_MASK & PGTBL_P4D_MODIFIED) \
+ arch_sync_kernel_mappings(addr, addr); \
+ } while (0)
+
+#endif /* _LINUX_PGALLOC_H */
diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h
index 86ba5d33e43b..38a82d65e58e 100644
--- a/include/linux/pgalloc_tag.h
+++ b/include/linux/pgalloc_tag.h
@@ -12,120 +12,202 @@
#include <linux/page_ext.h>
extern struct page_ext_operations page_alloc_tagging_ops;
+extern unsigned long alloc_tag_ref_mask;
+extern int alloc_tag_ref_offs;
+extern struct alloc_tag_kernel_section kernel_tags;
-static inline union codetag_ref *codetag_ref_from_page_ext(struct page_ext *page_ext)
+DECLARE_STATIC_KEY_FALSE(mem_profiling_compressed);
+
+typedef u16 pgalloc_tag_idx;
+
+union pgtag_ref_handle {
+ union codetag_ref *ref; /* reference in page extension */
+ struct page *page; /* reference in page flags */
+};
+
+/* Reserved indexes */
+#define CODETAG_ID_NULL 0
+#define CODETAG_ID_EMPTY 1
+#define CODETAG_ID_FIRST 2
+
+#ifdef CONFIG_MODULES
+
+extern struct alloc_tag_module_section module_tags;
+
+static inline struct alloc_tag *module_idx_to_tag(pgalloc_tag_idx idx)
{
- return (void *)page_ext + page_alloc_tagging_ops.offset;
+ return &module_tags.first_tag[idx - kernel_tags.count];
}
-static inline struct page_ext *page_ext_from_codetag_ref(union codetag_ref *ref)
+static inline pgalloc_tag_idx module_tag_to_idx(struct alloc_tag *tag)
{
- return (void *)ref - page_alloc_tagging_ops.offset;
+ return CODETAG_ID_FIRST + kernel_tags.count + (tag - module_tags.first_tag);
}
-/* Should be called only if mem_alloc_profiling_enabled() */
-static inline union codetag_ref *get_page_tag_ref(struct page *page)
-{
- if (page) {
- struct page_ext *page_ext = page_ext_get(page);
+#else /* CONFIG_MODULES */
- if (page_ext)
- return codetag_ref_from_page_ext(page_ext);
- }
+static inline struct alloc_tag *module_idx_to_tag(pgalloc_tag_idx idx)
+{
+ pr_warn("invalid page tag reference %lu\n", (unsigned long)idx);
return NULL;
}
-static inline void put_page_tag_ref(union codetag_ref *ref)
+static inline pgalloc_tag_idx module_tag_to_idx(struct alloc_tag *tag)
{
- page_ext_put(page_ext_from_codetag_ref(ref));
+ pr_warn("invalid page tag 0x%lx\n", (unsigned long)tag);
+ return CODETAG_ID_NULL;
}
-static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
- unsigned int nr)
-{
- if (mem_alloc_profiling_enabled()) {
- union codetag_ref *ref = get_page_tag_ref(page);
+#endif /* CONFIG_MODULES */
- if (ref) {
- alloc_tag_add(ref, task->alloc_tag, PAGE_SIZE * nr);
- put_page_tag_ref(ref);
- }
+static inline void idx_to_ref(pgalloc_tag_idx idx, union codetag_ref *ref)
+{
+ switch (idx) {
+ case (CODETAG_ID_NULL):
+ ref->ct = NULL;
+ break;
+ case (CODETAG_ID_EMPTY):
+ set_codetag_empty(ref);
+ break;
+ default:
+ idx -= CODETAG_ID_FIRST;
+ ref->ct = idx < kernel_tags.count ?
+ &kernel_tags.first_tag[idx].ct :
+ &module_idx_to_tag(idx)->ct;
+ break;
}
}
-static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
+static inline pgalloc_tag_idx ref_to_idx(union codetag_ref *ref)
{
- if (mem_alloc_profiling_enabled()) {
- union codetag_ref *ref = get_page_tag_ref(page);
+ struct alloc_tag *tag;
- if (ref) {
- alloc_tag_sub(ref, PAGE_SIZE * nr);
- put_page_tag_ref(ref);
- }
- }
+ if (!ref->ct)
+ return CODETAG_ID_NULL;
+
+ if (is_codetag_empty(ref))
+ return CODETAG_ID_EMPTY;
+
+ tag = ct_to_alloc_tag(ref->ct);
+ if (tag >= kernel_tags.first_tag && tag < kernel_tags.first_tag + kernel_tags.count)
+ return CODETAG_ID_FIRST + (tag - kernel_tags.first_tag);
+
+ return module_tag_to_idx(tag);
}
-static inline void pgalloc_tag_split(struct page *page, unsigned int nr)
+
+
+/* Should be called only if mem_alloc_profiling_enabled() */
+static inline bool get_page_tag_ref(struct page *page, union codetag_ref *ref,
+ union pgtag_ref_handle *handle)
{
- int i;
- struct page_ext *page_ext;
- union codetag_ref *ref;
- struct alloc_tag *tag;
+ if (!page)
+ return false;
+
+ if (static_key_enabled(&mem_profiling_compressed)) {
+ pgalloc_tag_idx idx;
+
+ idx = (page->flags.f >> alloc_tag_ref_offs) &
+ alloc_tag_ref_mask;
+ idx_to_ref(idx, ref);
+ handle->page = page;
+ } else {
+ struct page_ext *page_ext;
+ union codetag_ref *tmp;
+
+ page_ext = page_ext_get(page);
+ if (!page_ext)
+ return false;
+
+ tmp = (union codetag_ref *)page_ext_data(page_ext, &page_alloc_tagging_ops);
+ ref->ct = tmp->ct;
+ handle->ref = tmp;
+ }
- if (!mem_alloc_profiling_enabled())
- return;
+ return true;
+}
- page_ext = page_ext_get(page);
- if (unlikely(!page_ext))
+static inline void put_page_tag_ref(union pgtag_ref_handle handle)
+{
+ if (WARN_ON(!handle.ref))
return;
- ref = codetag_ref_from_page_ext(page_ext);
- if (!ref->ct)
- goto out;
+ if (!static_key_enabled(&mem_profiling_compressed))
+ page_ext_put((void *)handle.ref - page_alloc_tagging_ops.offset);
+}
- tag = ct_to_alloc_tag(ref->ct);
- page_ext = page_ext_next(page_ext);
- for (i = 1; i < nr; i++) {
- /* Set new reference to point to the original tag */
- alloc_tag_ref_set(codetag_ref_from_page_ext(page_ext), tag);
- page_ext = page_ext_next(page_ext);
+static inline void update_page_tag_ref(union pgtag_ref_handle handle, union codetag_ref *ref)
+{
+ if (static_key_enabled(&mem_profiling_compressed)) {
+ struct page *page = handle.page;
+ unsigned long old_flags;
+ unsigned long flags;
+ unsigned long idx;
+
+ if (WARN_ON(!page || !ref))
+ return;
+
+ idx = (unsigned long)ref_to_idx(ref);
+ idx = (idx & alloc_tag_ref_mask) << alloc_tag_ref_offs;
+ do {
+ old_flags = READ_ONCE(page->flags.f);
+ flags = old_flags;
+ flags &= ~(alloc_tag_ref_mask << alloc_tag_ref_offs);
+ flags |= idx;
+ } while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags)));
+ } else {
+ if (WARN_ON(!handle.ref || !ref))
+ return;
+
+ handle.ref->ct = ref->ct;
}
-out:
- page_ext_put(page_ext);
}
-static inline struct alloc_tag *pgalloc_tag_get(struct page *page)
-{
- struct alloc_tag *tag = NULL;
+/* Should be called only if mem_alloc_profiling_enabled() */
+void __clear_page_tag_ref(struct page *page);
- if (mem_alloc_profiling_enabled()) {
- union codetag_ref *ref = get_page_tag_ref(page);
+static inline void clear_page_tag_ref(struct page *page)
+{
+ if (mem_alloc_profiling_enabled())
+ __clear_page_tag_ref(page);
+}
- alloc_tag_sub_check(ref);
- if (ref && ref->ct)
- tag = ct_to_alloc_tag(ref->ct);
- put_page_tag_ref(ref);
+/* Should be called only if mem_alloc_profiling_enabled() */
+static inline struct alloc_tag *__pgalloc_tag_get(struct page *page)
+{
+ struct alloc_tag *tag = NULL;
+ union pgtag_ref_handle handle;
+ union codetag_ref ref;
+
+ if (get_page_tag_ref(page, &ref, &handle)) {
+ alloc_tag_sub_check(&ref);
+ if (ref.ct)
+ tag = ct_to_alloc_tag(ref.ct);
+ put_page_tag_ref(handle);
}
return tag;
}
-static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
+static inline struct alloc_tag *pgalloc_tag_get(struct page *page)
{
- if (mem_alloc_profiling_enabled() && tag)
- this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr);
+ if (mem_alloc_profiling_enabled())
+ return __pgalloc_tag_get(page);
+ return NULL;
}
+void pgalloc_tag_split(struct folio *folio, int old_order, int new_order);
+void pgalloc_tag_swap(struct folio *new, struct folio *old);
+
+void __init alloc_tag_sec_init(void);
+
#else /* CONFIG_MEM_ALLOC_PROFILING */
-static inline union codetag_ref *get_page_tag_ref(struct page *page) { return NULL; }
-static inline void put_page_tag_ref(union codetag_ref *ref) {}
-static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
- unsigned int nr) {}
-static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
-static inline void pgalloc_tag_split(struct page *page, unsigned int nr) {}
+static inline void clear_page_tag_ref(struct page *page) {}
+static inline void alloc_tag_sec_init(void) {}
+static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {}
+static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {}
static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; }
-static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
#endif /* CONFIG_MEM_ALLOC_PROFILING */
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 18019f037bae..652f287c1ef6 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -90,6 +90,27 @@ static inline unsigned long pud_index(unsigned long address)
#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
#endif
+#ifndef kernel_pte_init
+static inline void kernel_pte_init(void *addr)
+{
+}
+#define kernel_pte_init kernel_pte_init
+#endif
+
+#ifndef pmd_init
+static inline void pmd_init(void *addr)
+{
+}
+#define pmd_init pmd_init
+#endif
+
+#ifndef pud_init
+static inline void pud_init(void *addr)
+{
+}
+#define pud_init pud_init
+#endif
+
#ifndef pte_offset_kernel
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
@@ -201,15 +222,19 @@ static inline int pmd_dirty(pmd_t pmd)
* hazard could result in the direct mode hypervisor case, since the actual
* write to the page tables may not yet have taken place, so reads though
* a raw PTE pointer after it has been modified are not guaranteed to be
- * up to date. This mode can only be entered and left under the protection of
- * the page table locks for all page tables which may be modified. In the UP
- * case, this is required so that preemption is disabled, and in the SMP case,
- * it must synchronize the delayed page table writes properly on other CPUs.
+ * up to date.
+ *
+ * In the general case, no lock is guaranteed to be held between entry and exit
+ * of the lazy mode. So the implementation must assume preemption may be enabled
+ * and cpu migration is possible; it must take steps to be robust against this.
+ * (In practice, for user PTE updates, the appropriate page table lock(s) are
+ * held, but for kernel PTE updates, no lock is held). Nesting is not permitted
+ * and the mode cannot be used in interrupt context.
*/
#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-#define arch_enter_lazy_mmu_mode() do {} while (0)
-#define arch_leave_lazy_mmu_mode() do {} while (0)
-#define arch_flush_lazy_mmu_mode() do {} while (0)
+static inline void arch_enter_lazy_mmu_mode(void) {}
+static inline void arch_leave_lazy_mmu_mode(void) {}
+static inline void arch_flush_lazy_mmu_mode(void) {}
#endif
#ifndef pte_batch_hint
@@ -266,7 +291,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
{
page_table_check_ptes_set(mm, ptep, pte, nr);
- arch_enter_lazy_mmu_mode();
for (;;) {
set_pte(ptep, pte);
if (--nr == 0)
@@ -274,7 +298,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
ptep++;
pte = pte_next_pfn(pte);
}
- arch_leave_lazy_mmu_mode();
}
#endif
#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
@@ -433,6 +456,17 @@ static inline bool arch_has_hw_pte_young(void)
}
#endif
+#ifndef exec_folio_order
+/*
+ * Returns preferred minimum folio order for executable file-backed memory. Must
+ * be in range [0, PMD_ORDER). Default to order-0.
+ */
+static inline unsigned int exec_folio_order(void)
+{
+ return 0;
+}
+#endif
+
#ifndef arch_check_zapped_pte
static inline void arch_check_zapped_pte(struct vm_area_struct *vma,
pte_t pte)
@@ -447,6 +481,12 @@ static inline void arch_check_zapped_pmd(struct vm_area_struct *vma,
}
#endif
+#ifndef arch_check_zapped_pud
+static inline void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud)
+{
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address,
@@ -506,7 +546,14 @@ static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- ptep_get_and_clear(mm, addr, ptep);
+ pte_t pte = ptep_get(ptep);
+
+ pte_clear(mm, addr, ptep);
+ /*
+ * No need for ptep_get_and_clear(): page table check doesn't care about
+ * any bits that could have been set by HW concurrently.
+ */
+ page_table_check_pte_clear(mm, pte);
}
#ifdef CONFIG_GUP_GET_PXX_LOW_HIGH
@@ -689,6 +736,29 @@ static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm,
}
#endif
+/**
+ * get_and_clear_ptes - Clear present PTEs that map consecutive pages of
+ * the same folio, collecting dirty/accessed bits.
+ * @mm: Address space the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to clear.
+ *
+ * Use this instead of get_and_clear_full_ptes() if it is known that we don't
+ * need to clear the full mm, which is mostly the case.
+ *
+ * Note that PTE bits in the PTE range besides the PFN can differ. For example,
+ * some PTEs might be write-protected.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD.
+ */
+static inline pte_t get_and_clear_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned int nr)
+{
+ return get_and_clear_full_ptes(mm, addr, ptep, nr, 0);
+}
+
#ifndef clear_full_ptes
/**
* clear_full_ptes - Clear present PTEs that map consecutive pages of the same
@@ -721,6 +791,28 @@ static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr,
}
#endif
+/**
+ * clear_ptes - Clear present PTEs that map consecutive pages of the same folio.
+ * @mm: Address space the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to clear.
+ *
+ * Use this instead of clear_full_ptes() if it is known that we don't need to
+ * clear the full mm, which is mostly the case.
+ *
+ * Note that PTE bits in the PTE range besides the PFN can differ. For example,
+ * some PTEs might be write-protected.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD.
+ */
+static inline void clear_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned int nr)
+{
+ clear_full_ptes(mm, addr, ptep, nr, 0);
+}
+
/*
* If two threads concurrently fault at the same page, the thread that
* won the race updates the PTE and its local TLB/Cache. The other thread
@@ -729,13 +821,18 @@ static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr,
* fault. This function updates TLB only, do nothing with cache or others.
* It is the difference with function update_mmu_cache.
*/
-#ifndef __HAVE_ARCH_UPDATE_MMU_TLB
+#ifndef update_mmu_tlb_range
+static inline void update_mmu_tlb_range(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep, unsigned int nr)
+{
+}
+#endif
+
static inline void update_mmu_tlb(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
+ update_mmu_tlb_range(vma, address, ptep, 1);
}
-#define __HAVE_ARCH_UPDATE_MMU_TLB
-#endif
/*
* Some architectures may be able to avoid expensive synchronization
@@ -1045,45 +1142,16 @@ static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
}
#endif
-/*
- * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
- * TLB flush will be required as a result of the "set". For example, use
- * in scenarios where it is known ahead of time that the routine is
- * setting non-present entries, or re-setting an existing entry to the
- * same value. Otherwise, use the typical "set" helpers and flush the
- * TLB.
- */
-#define set_pte_safe(ptep, pte) \
-({ \
- WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
- set_pte(ptep, pte); \
-})
-
-#define set_pmd_safe(pmdp, pmd) \
-({ \
- WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
- set_pmd(pmdp, pmd); \
-})
-
-#define set_pud_safe(pudp, pud) \
-({ \
- WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
- set_pud(pudp, pud); \
-})
-
-#define set_p4d_safe(p4dp, p4d) \
-({ \
- WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
- set_p4d(p4dp, p4d); \
-})
-
-#define set_pgd_safe(pgdp, pgd) \
-({ \
- WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
- set_pgd(pgdp, pgd); \
-})
-
#ifndef __HAVE_ARCH_DO_SWAP_PAGE
+static inline void arch_do_swap_page_nr(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t pte, pte_t oldpte,
+ int nr)
+{
+
+}
+#else
/*
* Some architectures support metadata associated with a page. When a
* page is being swapped out, this metadata must be saved so it can be
@@ -1092,12 +1160,17 @@ static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
* page as metadata for the page. arch_do_swap_page() can restore this
* metadata when a page is swapped back in.
*/
-static inline void arch_do_swap_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long addr,
- pte_t pte, pte_t oldpte)
-{
-
+static inline void arch_do_swap_page_nr(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t pte, pte_t oldpte,
+ int nr)
+{
+ for (int i = 0; i < nr; i++) {
+ arch_do_swap_page(vma->vm_mm, vma, addr + i * PAGE_SIZE,
+ pte_advance_pfn(pte, i),
+ pte_advance_pfn(oldpte, i));
+ }
}
#endif
@@ -1147,10 +1220,6 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
}
#endif
-#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
-#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
-#endif
-
#ifndef __HAVE_ARCH_MOVE_PTE
#define move_pte(pte, old_addr, new_addr) (pte)
#endif
@@ -1163,6 +1232,10 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
#define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address)
#endif
+#ifndef flush_tlb_fix_spurious_fault_pmd
+#define flush_tlb_fix_spurious_fault_pmd(vma, address, pmdp) do { } while (0)
+#endif
+
/*
* When walking page tables, get the address of the next boundary,
* or the end address of the range if that comes earlier. Although no
@@ -1307,7 +1380,9 @@ static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
/*
* Commit an update to a pte, leaving any hardware-controlled bits in
- * the PTE unmodified.
+ * the PTE unmodified. The pte returned from ptep_modify_prot_start() may
+ * additionally have young and/or dirty bits set where previously they were not,
+ * so the updated pte may have these additional changes.
*/
static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr,
@@ -1316,6 +1391,102 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
__ptep_modify_prot_commit(vma, addr, ptep, pte);
}
#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
+
+/**
+ * modify_prot_start_ptes - Start a pte protection read-modify-write transaction
+ * over a batch of ptes, which protects against asynchronous hardware
+ * modifications to the ptes. The intention is not to prevent the hardware from
+ * making pte updates, but to prevent any updates it may make from being lost.
+ * Please see the comment above ptep_modify_prot_start() for full description.
+ *
+ * @vma: The virtual memory area the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_modify_prot_start(), collecting the a/d bits from each pte
+ * in the batch.
+ *
+ * Note that PTE bits in the PTE batch besides the PFN can differ.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. All other PTE bits must be identical for
+ * all PTEs in the batch except for young and dirty bits. The PTEs are all in
+ * the same PMD.
+ */
+#ifndef modify_prot_start_ptes
+static inline pte_t modify_prot_start_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr)
+{
+ pte_t pte, tmp_pte;
+
+ pte = ptep_modify_prot_start(vma, addr, ptep);
+ while (--nr) {
+ ptep++;
+ addr += PAGE_SIZE;
+ tmp_pte = ptep_modify_prot_start(vma, addr, ptep);
+ if (pte_dirty(tmp_pte))
+ pte = pte_mkdirty(pte);
+ if (pte_young(tmp_pte))
+ pte = pte_mkyoung(pte);
+ }
+ return pte;
+}
+#endif
+
+/**
+ * modify_prot_commit_ptes - Commit an update to a batch of ptes, leaving any
+ * hardware-controlled bits in the PTE unmodified.
+ *
+ * @vma: The virtual memory area the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @old_pte: Old page table entry (for the first entry) which is now cleared.
+ * @pte: New page table entry to be set.
+ * @nr: Number of entries.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_modify_prot_commit().
+ *
+ * Context: The caller holds the page table lock. The PTEs are all in the same
+ * PMD. On exit, the set ptes in the batch map the same folio. The ptes set by
+ * ptep_modify_prot_start() may additionally have young and/or dirty bits set
+ * where previously they were not, so the updated ptes may have these
+ * additional changes.
+ */
+#ifndef modify_prot_commit_ptes
+static inline void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep, pte_t old_pte, pte_t pte, unsigned int nr)
+{
+ int i;
+
+ for (i = 0; i < nr; ++i, ++ptep, addr += PAGE_SIZE) {
+ ptep_modify_prot_commit(vma, addr, ptep, old_pte, pte);
+
+ /* Advance PFN only, set same prot */
+ old_pte = pte_next_pfn(old_pte);
+ pte = pte_next_pfn(pte);
+ }
+}
+#endif
+
+/*
+ * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
+ * and let generic vmalloc, ioremap and page table update code know when
+ * arch_sync_kernel_mappings() needs to be called.
+ */
+#ifndef ARCH_PAGE_TABLE_SYNC_MASK
+#define ARCH_PAGE_TABLE_SYNC_MASK 0
+#endif
+
+/*
+ * There is no default implementation for arch_sync_kernel_mappings(). It is
+ * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
+ * is 0.
+ */
+void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
+
#endif /* CONFIG_MMU */
/*
@@ -1386,6 +1557,18 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
#define arch_start_context_switch(prev) do {} while (0)
#endif
+/*
+ * Some platforms can customize the PTE soft-dirty bit making it unavailable
+ * even if the architecture provides the resource.
+ * Adding this API allows architectures to add their own checks for the
+ * devices on which the kernel is running.
+ * Note: When overriding it, please make sure the CONFIG_MEM_SOFT_DIRTY
+ * is part of this macro.
+ */
+#ifndef pgtable_supports_soft_dirty
+#define pgtable_supports_soft_dirty() IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)
+#endif
+
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
@@ -1472,64 +1655,92 @@ static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
* vmf_insert_pfn.
*/
-/*
- * track_pfn_remap is called when a _new_ pfn mapping is being established
- * by remap_pfn_range() for physical range indicated by pfn and size.
- */
-static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
- unsigned long pfn, unsigned long addr,
- unsigned long size)
+static inline int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
+ pgprot_t *prot)
{
return 0;
}
-/*
- * track_pfn_insert is called when a _new_ single pfn is established
- * by vmf_insert_pfn().
- */
-static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
- pfn_t pfn)
+static inline int pfnmap_track(unsigned long pfn, unsigned long size,
+ pgprot_t *prot)
{
+ return 0;
}
-/*
- * track_pfn_copy is called when vma that is covering the pfnmap gets
- * copied through copy_page_range().
- */
-static inline int track_pfn_copy(struct vm_area_struct *vma)
+static inline void pfnmap_untrack(unsigned long pfn, unsigned long size)
{
- return 0;
}
+#else
+/**
+ * pfnmap_setup_cachemode - setup the cachemode in the pgprot for a pfn range
+ * @pfn: the start of the pfn range
+ * @size: the size of the pfn range in bytes
+ * @prot: the pgprot to modify
+ *
+ * Lookup the cachemode for the pfn range starting at @pfn with the size
+ * @size and store it in @prot, leaving other data in @prot unchanged.
+ *
+ * This allows for a hardware implementation to have fine-grained control of
+ * memory cache behavior at page level granularity. Without a hardware
+ * implementation, this function does nothing.
+ *
+ * Currently there is only one implementation for this - x86 Page Attribute
+ * Table (PAT). See Documentation/arch/x86/pat.rst for more details.
+ *
+ * This function can fail if the pfn range spans pfns that require differing
+ * cachemodes. If the pfn range was previously verified to have a single
+ * cachemode, it is sufficient to query only a single pfn. The assumption is
+ * that this is the case for drivers using the vmf_insert_pfn*() interface.
+ *
+ * Returns 0 on success and -EINVAL on error.
+ */
+int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
+ pgprot_t *prot);
-/*
- * untrack_pfn is called while unmapping a pfnmap for a region.
- * untrack can be called for a specific region indicated by pfn and size or
- * can be for the entire vma (in which case pfn, size are zero).
+/**
+ * pfnmap_track - track a pfn range
+ * @pfn: the start of the pfn range
+ * @size: the size of the pfn range in bytes
+ * @prot: the pgprot to track
+ *
+ * Requested the pfn range to be 'tracked' by a hardware implementation and
+ * setup the cachemode in @prot similar to pfnmap_setup_cachemode().
+ *
+ * This allows for fine-grained control of memory cache behaviour at page
+ * level granularity. Tracking memory this way is persisted across VMA splits
+ * (VMA merging does not apply for VM_PFNMAP).
+ *
+ * Currently, there is only one implementation for this - x86 Page Attribute
+ * Table (PAT). See Documentation/arch/x86/pat.rst for more details.
+ *
+ * Returns 0 on success and -EINVAL on error.
*/
-static inline void untrack_pfn(struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size,
- bool mm_wr_locked)
-{
-}
+int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot);
-/*
- * untrack_pfn_clear is called while mremapping a pfnmap for a new region
- * or fails to copy pgtable during duplicate vm area.
+/**
+ * pfnmap_untrack - untrack a pfn range
+ * @pfn: the start of the pfn range
+ * @size: the size of the pfn range in bytes
+ *
+ * Untrack a pfn range previously tracked through pfnmap_track().
+ */
+void pfnmap_untrack(unsigned long pfn, unsigned long size);
+#endif
+
+/**
+ * pfnmap_setup_cachemode_pfn - setup the cachemode in the pgprot for a pfn
+ * @pfn: the pfn
+ * @prot: the pgprot to modify
+ *
+ * Lookup the cachemode for @pfn and store it in @prot, leaving other
+ * data in @prot unchanged.
+ *
+ * See pfnmap_setup_cachemode() for details.
*/
-static inline void untrack_pfn_clear(struct vm_area_struct *vma)
+static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
{
+ pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot);
}
-#else
-extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
- unsigned long pfn, unsigned long addr,
- unsigned long size);
-extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
- pfn_t pfn);
-extern int track_pfn_copy(struct vm_area_struct *vma);
-extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
- unsigned long size, bool mm_wr_locked);
-extern void untrack_pfn_clear(struct vm_area_struct *vma);
-#endif
#ifdef CONFIG_MMU
#ifdef __HAVE_COLOR_ZERO_PAGE
@@ -1591,21 +1802,6 @@ static inline int pud_write(pud_t pud)
}
#endif /* pud_write */
-#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
-static inline int pmd_devmap(pmd_t pmd)
-{
- return 0;
-}
-static inline int pud_devmap(pud_t pud)
-{
- return 0;
-}
-static inline int pgd_devmap(pgd_t pgd)
-{
- return 0;
-}
-#endif
-
#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
!defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
static inline int pud_trans_huge(pud_t pud)
@@ -1620,7 +1816,7 @@ static inline int pud_trans_unstable(pud_t *pud)
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
pud_t pudval = READ_ONCE(*pud);
- if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
+ if (pud_none(pudval) || pud_trans_huge(pudval))
return 1;
if (unlikely(pud_bad(pudval))) {
pud_clear_bad(pud);
@@ -1774,10 +1970,11 @@ static inline bool arch_has_pfn_modify_check(void)
/*
* Page Table Modification bits for pgtbl_mod_mask.
*
- * These are used by the p?d_alloc_track*() set of functions an in the generic
- * vmalloc/ioremap code to track at which page-table levels entries have been
- * modified. Based on that the code can better decide when vmalloc and ioremap
- * mapping changes need to be synchronized to other page-tables in the system.
+ * These are used by the p?d_alloc_track*() and p*d_populate_kernel()
+ * functions in the generic vmalloc, ioremap and page table update code
+ * to track at which page-table levels entries have been modified.
+ * Based on that the code can better decide when page table changes need
+ * to be synchronized to other page-tables in the system.
*/
#define __PGTBL_PGD_MODIFIED 0
#define __PGTBL_P4D_MODIFIED 1
@@ -1794,6 +1991,32 @@ static inline bool arch_has_pfn_modify_check(void)
/* Page-Table Modification Mask */
typedef unsigned int pgtbl_mod_mask;
+enum pgtable_level {
+ PGTABLE_LEVEL_PTE = 0,
+ PGTABLE_LEVEL_PMD,
+ PGTABLE_LEVEL_PUD,
+ PGTABLE_LEVEL_P4D,
+ PGTABLE_LEVEL_PGD,
+};
+
+static inline const char *pgtable_level_to_str(enum pgtable_level level)
+{
+ switch (level) {
+ case PGTABLE_LEVEL_PTE:
+ return "pte";
+ case PGTABLE_LEVEL_PMD:
+ return "pmd";
+ case PGTABLE_LEVEL_PUD:
+ return "pud";
+ case PGTABLE_LEVEL_P4D:
+ return "p4d";
+ case PGTABLE_LEVEL_PGD:
+ return "pgd";
+ default:
+ return "unknown";
+ }
+}
+
#endif /* !__ASSEMBLY__ */
#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
@@ -1860,8 +2083,8 @@ typedef unsigned int pgtbl_mod_mask;
* - It should contain a huge PFN, which points to a huge page larger than
* PAGE_SIZE of the platform. The PFN format isn't important here.
*
- * - It should cover all kinds of huge mappings (e.g., pXd_trans_huge(),
- * pXd_devmap(), or hugetlb mappings).
+ * - It should cover all kinds of huge mappings (i.e. pXd_trans_huge()
+ * or hugetlb mappings).
*/
#ifndef pgd_leaf
#define pgd_leaf(x) false
@@ -1888,9 +2111,12 @@ typedef unsigned int pgtbl_mod_mask;
#ifndef pmd_leaf_size
#define pmd_leaf_size(x) PMD_SIZE
#endif
+#ifndef __pte_leaf_size
#ifndef pte_leaf_size
#define pte_leaf_size(x) PAGE_SIZE
#endif
+#define __pte_leaf_size(x,y) pte_leaf_size(y)
+#endif
/*
* We always define pmd_pfn for all archs as it's used in lots of generic
@@ -1928,6 +2154,18 @@ typedef unsigned int pgtbl_mod_mask;
#define MAX_PTRS_PER_P4D PTRS_PER_P4D
#endif
+#ifndef pte_pgprot
+#define pte_pgprot(x) ((pgprot_t) {0})
+#endif
+
+#ifndef pmd_pgprot
+#define pmd_pgprot(x) ((pgprot_t) {0})
+#endif
+
+#ifndef pud_pgprot
+#define pud_pgprot(x) ((pgprot_t) {0})
+#endif
+
/* description of effects of mapping type and prot in current implementation.
* this is due to the limited x86 page protection hardware. The expected
* behavior is in parens:
@@ -1949,7 +2187,7 @@ typedef unsigned int pgtbl_mod_mask;
* x: (yes) yes
*/
#define DECLARE_VM_GET_PAGE_PROT \
-pgprot_t vm_get_page_prot(unsigned long vm_flags) \
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags) \
{ \
return protection_map[vm_flags & \
(VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \
diff --git a/include/linux/phy.h b/include/linux/phy.h
index e6e83304558e..fbbe028cc4b7 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -32,28 +32,12 @@
#include <linux/atomic.h>
#include <net/eee.h>
-#define PHY_DEFAULT_FEATURES (SUPPORTED_Autoneg | \
- SUPPORTED_TP | \
- SUPPORTED_MII)
-
-#define PHY_10BT_FEATURES (SUPPORTED_10baseT_Half | \
- SUPPORTED_10baseT_Full)
-
-#define PHY_100BT_FEATURES (SUPPORTED_100baseT_Half | \
- SUPPORTED_100baseT_Full)
-
-#define PHY_1000BT_FEATURES (SUPPORTED_1000baseT_Half | \
- SUPPORTED_1000baseT_Full)
-
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1s_p2mp_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
-extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
-extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
-extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init;
@@ -62,21 +46,11 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init;
#define PHY_BASIC_T1S_P2MP_FEATURES ((unsigned long *)&phy_basic_t1s_p2mp_features)
#define PHY_GBIT_FEATURES ((unsigned long *)&phy_gbit_features)
#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
-#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
-#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
-#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
#define PHY_EEE_CAP1_FEATURES ((unsigned long *)&phy_eee_cap1_features)
#define PHY_EEE_CAP2_FEATURES ((unsigned long *)&phy_eee_cap2_features)
extern const int phy_basic_ports_array[3];
-extern const int phy_fibre_port_array[1];
-extern const int phy_all_ports_features_array[7];
-extern const int phy_10_100_features_array[4];
-extern const int phy_basic_t1_features_array[3];
-extern const int phy_basic_t1s_p2mp_features_array[2];
-extern const int phy_gbit_features_array[2];
-extern const int phy_10gbit_features_array[1];
/*
* Set phydev->irq to PHY_POLL if interrupts are not supported,
@@ -107,7 +81,7 @@ extern const int phy_10gbit_features_array[1];
* @PHY_INTERFACE_MODE_RGMII: Reduced gigabit media-independent interface
* @PHY_INTERFACE_MODE_RGMII_ID: RGMII with Internal RX+TX delay
* @PHY_INTERFACE_MODE_RGMII_RXID: RGMII with Internal RX delay
- * @PHY_INTERFACE_MODE_RGMII_TXID: RGMII with Internal RX delay
+ * @PHY_INTERFACE_MODE_RGMII_TXID: RGMII with Internal TX delay
* @PHY_INTERFACE_MODE_RTBI: Reduced TBI
* @PHY_INTERFACE_MODE_SMII: Serial MII
* @PHY_INTERFACE_MODE_XGMII: 10 gigabit media-independent interface
@@ -128,6 +102,11 @@ extern const int phy_10gbit_features_array[1];
* @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN
* @PHY_INTERFACE_MODE_QUSGMII: Quad Universal SGMII
* @PHY_INTERFACE_MODE_1000BASEKX: 1000Base-KX - with Clause 73 AN
+ * @PHY_INTERFACE_MODE_10G_QXGMII: 10G-QXGMII - 4 ports over 10G USXGMII
+ * @PHY_INTERFACE_MODE_50GBASER: 50GBase-R - with Clause 134 FEC
+ * @PHY_INTERFACE_MODE_LAUI: 50 Gigabit Attachment Unit Interface
+ * @PHY_INTERFACE_MODE_100GBASEP: 100GBase-P - with Clause 134 FEC
+ * @PHY_INTERFACE_MODE_MIILITE: MII-Lite - MII without RXER TXER CRS COL
* @PHY_INTERFACE_MODE_MAX: Book keeping
*
* Describes the interface between the MAC and PHY.
@@ -168,6 +147,11 @@ typedef enum {
PHY_INTERFACE_MODE_10GKR,
PHY_INTERFACE_MODE_QUSGMII,
PHY_INTERFACE_MODE_1000BASEKX,
+ PHY_INTERFACE_MODE_10G_QXGMII,
+ PHY_INTERFACE_MODE_50GBASER,
+ PHY_INTERFACE_MODE_LAUI,
+ PHY_INTERFACE_MODE_100GBASEP,
+ PHY_INTERFACE_MODE_MIILITE,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -185,6 +169,16 @@ static inline bool phy_interface_empty(const unsigned long *intf)
return bitmap_empty(intf, PHY_INTERFACE_MODE_MAX);
}
+static inline void phy_interface_copy(unsigned long *d, const unsigned long *s)
+{
+ bitmap_copy(d, s, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline unsigned int phy_interface_weight(const unsigned long *intf)
+{
+ return bitmap_weight(intf, PHY_INTERFACE_MODE_MAX);
+}
+
static inline void phy_interface_and(unsigned long *dst, const unsigned long *a,
const unsigned long *b)
{
@@ -205,13 +199,6 @@ static inline void phy_interface_set_rgmii(unsigned long *intf)
__set_bit(PHY_INTERFACE_MODE_RGMII_TXID, intf);
}
-/*
- * phy_supported_speeds - return all speeds currently supported by a PHY device
- */
-unsigned int phy_supported_speeds(struct phy_device *phy,
- unsigned int *speeds,
- unsigned int size);
-
/**
* phy_modes - map phy_interface_t enum to device tree binding of phy-mode
* @interface: enum phy_interface_t value
@@ -289,18 +276,51 @@ static inline const char *phy_modes(phy_interface_t interface)
return "100base-x";
case PHY_INTERFACE_MODE_QUSGMII:
return "qusgmii";
+ case PHY_INTERFACE_MODE_10G_QXGMII:
+ return "10g-qxgmii";
+ case PHY_INTERFACE_MODE_50GBASER:
+ return "50gbase-r";
+ case PHY_INTERFACE_MODE_LAUI:
+ return "laui";
+ case PHY_INTERFACE_MODE_100GBASEP:
+ return "100gbase-p";
+ case PHY_INTERFACE_MODE_MIILITE:
+ return "mii-lite";
default:
return "unknown";
}
}
-#define PHY_INIT_TIMEOUT 100000
-#define PHY_FORCE_TIMEOUT 10
+/**
+ * rgmii_clock - map link speed to the clock rate
+ * @speed: link speed value
+ *
+ * Description: maps RGMII supported link speeds into the clock rates.
+ * This can also be used for MII, GMII, and RMII interface modes as the
+ * clock rates are identical, but the caller must be aware that errors
+ * for unsupported clock rates will not be signalled.
+ *
+ * Returns: clock rate or negative errno
+ */
+static inline long rgmii_clock(int speed)
+{
+ switch (speed) {
+ case SPEED_10:
+ return 2500000;
+ case SPEED_100:
+ return 25000000;
+ case SPEED_1000:
+ return 125000000;
+ default:
+ return -EINVAL;
+ }
+}
#define PHY_MAX_ADDR 32
/* Used when trying to connect to a specific phy (mii bus id:phy device id) */
#define PHY_ID_FMT "%s:%02x"
+#define PHY_ID_SIZE (MII_BUS_ID_SIZE + 3)
#define MII_BUS_ID_SIZE 61
@@ -329,41 +349,6 @@ struct mdio_bus_stats {
};
/**
- * struct phy_package_shared - Shared information in PHY packages
- * @base_addr: Base PHY address of PHY package used to combine PHYs
- * in one package and for offset calculation of phy_package_read/write
- * @np: Pointer to the Device Node if PHY package defined in DT
- * @refcnt: Number of PHYs connected to this shared data
- * @flags: Initialization of PHY package
- * @priv_size: Size of the shared private data @priv
- * @priv: Driver private data shared across a PHY package
- *
- * Represents a shared structure between different phydev's in the same
- * package, for example a quad PHY. See phy_package_join() and
- * phy_package_leave().
- */
-struct phy_package_shared {
- u8 base_addr;
- /* With PHY package defined in DT this points to the PHY package node */
- struct device_node *np;
- refcount_t refcnt;
- unsigned long flags;
- size_t priv_size;
-
- /* private data pointer */
- /* note that this pointer is shared between different phydevs and
- * the user has to take care of appropriate locking. It is allocated
- * and freed automatically by phy_package_join() and
- * phy_package_leave().
- */
- void *priv;
-};
-
-/* used as bit number in atomic bitops */
-#define PHY_SHARED_F_INIT_DONE 0
-#define PHY_SHARED_F_PROBE_DONE 1
-
-/**
* struct mii_bus - Represents an MDIO bus
*
* @owner: Who owns this device
@@ -438,8 +423,10 @@ struct mii_bus {
/** @shared_lock: protect access to the shared element */
struct mutex shared_lock;
+#if IS_ENABLED(CONFIG_PHY_PACKAGE)
/** @shared: shared state across different PHYs */
struct phy_package_shared *shared[PHY_MAX_ADDR];
+#endif
};
#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
@@ -532,7 +519,7 @@ enum phy_state {
* struct phy_c45_device_ids - 802.3-c45 Device Identifiers
* @devices_in_package: IEEE 802.3 devices in package register value.
* @mmds_present: bit vector of MMDs present.
- * @device_ids: The device identifer for each present device.
+ * @device_ids: The device identifier for each present device.
*/
struct phy_c45_device_ids {
u32 devices_in_package;
@@ -544,12 +531,39 @@ struct macsec_context;
struct macsec_ops;
/**
+ * struct phy_oatc14_sqi_capability - SQI capability information for OATC14
+ * 10Base-T1S PHY
+ * @updated: Indicates whether the SQI capability fields have been updated.
+ * @sqi_max: Maximum supported Signal Quality Indicator (SQI) level reported by
+ * the PHY.
+ * @sqiplus_bits: Bits for SQI+ levels supported by the PHY.
+ * 0 - SQI+ is not supported
+ * 3 - SQI+ is supported, using 3 bits (8 levels)
+ * 4 - SQI+ is supported, using 4 bits (16 levels)
+ * 5 - SQI+ is supported, using 5 bits (32 levels)
+ * 6 - SQI+ is supported, using 6 bits (64 levels)
+ * 7 - SQI+ is supported, using 7 bits (128 levels)
+ * 8 - SQI+ is supported, using 8 bits (256 levels)
+ *
+ * This structure is used by the OATC14 10Base-T1S PHY driver to store the SQI
+ * and SQI+ capability information retrieved from the PHY.
+ */
+struct phy_oatc14_sqi_capability {
+ bool updated;
+ int sqi_max;
+ u8 sqiplus_bits;
+};
+
+/**
* struct phy_device - An instance of a PHY
*
* @mdio: MDIO bus this PHY is on
* @drv: Pointer to the driver for this PHY instance
* @devlink: Create a link between phy dev and mac dev, if the external phy
* used by current mac interface is managed by another mac interface.
+ * @phyindex: Unique id across the phy's parent tree of phys to address the PHY
+ * from userspace, similar to ifindex. A zero index means the PHY
+ * wasn't assigned an id yet.
* @phy_id: UID for this device found during discovery
* @c45_ids: 802.3-c45 Device Identifiers if is_c45.
* @is_c45: Set to true if this PHY uses clause 45 addressing.
@@ -566,6 +580,7 @@ struct macsec_ops;
* @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
* @wol_enabled: Set to true if the PHY or the attached MAC have Wake-on-LAN
* enabled.
+ * @is_genphy_driven: PHY is driven by one of the generic PHY drivers
* @state: State of the PHY for management purposes
* @dev_flags: Device-specific flags used by the PHY driver.
*
@@ -594,12 +609,12 @@ struct macsec_ops;
* @adv_old: Saved advertised while power saving for WoL
* @supported_eee: supported PHY EEE linkmodes
* @advertising_eee: Currently advertised EEE linkmodes
- * @eee_enabled: Flag indicating whether the EEE feature is enabled
* @enable_tx_lpi: When True, MAC should transmit LPI to PHY
+ * @eee_active: phylib private state, indicating that EEE has been negotiated
* @eee_cfg: User configuration of EEE
* @lp_advertising: Current link partner advertised linkmodes
* @host_interfaces: PHY interface modes supported by host
- * @eee_broken_modes: Energy efficient ethernet modes which should be prohibited
+ * @eee_disabled_modes: Energy efficient ethernet modes not to be advertised
* @autoneg: Flag autoneg being used
* @rate_matching: Current rate matching mode
* @link: Current link state
@@ -612,6 +627,8 @@ struct macsec_ops;
* handling shall be postponed until PHY has resumed
* @irq_rerun: Flag indicating interrupts occurred while PHY was suspended,
* requiring a rerun of the interrupt handler after resume
+ * @default_timestamp: Flag indicating whether we are using the phy
+ * timestamp as the default one
* @interface: enum phy_interface_t value
* @possible_interfaces: bitmap if interface modes that the attached PHY
* will switch between depending on media speed.
@@ -633,6 +650,7 @@ struct macsec_ops;
* @link_down_events: Number of times link was lost
* @shared: Pointer to private data shared by phys in one package
* @priv: Pointer to driver private data
+ * @oatc14_sqi_capability: SQI capability information for OATC14 10Base-T1S PHY
*
* interrupts currently only supports enabled or disabled,
* but could be changed in the future to support enabling
@@ -650,6 +668,7 @@ struct phy_device {
struct device_link *devlink;
+ u32 phyindex;
u32 phy_id;
struct phy_c45_device_ids c45_ids;
@@ -666,17 +685,22 @@ struct phy_device {
unsigned is_on_sfp_module:1;
unsigned mac_managed_pm:1;
unsigned wol_enabled:1;
+ unsigned is_genphy_driven:1;
unsigned autoneg:1;
/* The most recently read link state */
unsigned link:1;
unsigned autoneg_complete:1;
+ bool pause:1;
+ bool asym_pause:1;
/* Interrupts are enabled */
unsigned interrupts:1;
unsigned irq_suspended:1;
unsigned irq_rerun:1;
+ unsigned default_timestamp:1;
+
int rate_matching;
enum phy_state state;
@@ -693,8 +717,6 @@ struct phy_device {
int speed;
int duplex;
int port;
- int pause;
- int asym_pause;
u8 master_slave_get;
u8 master_slave_set;
u8 master_slave_state;
@@ -709,16 +731,15 @@ struct phy_device {
/* used for eee validation and configuration*/
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported_eee);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising_eee);
- bool eee_enabled;
-
- /* Host supported PHY interface types. Should be ignored if empty. */
- DECLARE_PHY_INTERFACE_MASK(host_interfaces);
-
/* Energy efficient ethernet modes which should be prohibited */
- u32 eee_broken_modes;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(eee_disabled_modes);
bool enable_tx_lpi;
+ bool eee_active;
struct eee_config eee_cfg;
+ /* Host supported PHY interface types. Should be ignored if empty. */
+ DECLARE_PHY_INTERFACE_MASK(host_interfaces);
+
#ifdef CONFIG_LED_TRIGGER_PHY
struct phy_led_trigger *phy_led_triggers;
unsigned int phy_num_led_triggers;
@@ -738,9 +759,11 @@ struct phy_device {
/* For use by PHYs to maintain extra state */
void *priv;
+#if IS_ENABLED(CONFIG_PHY_PACKAGE)
/* shared data pointer */
/* For use by PHYs inside the same package that need a shared state. */
struct phy_package_shared *shared;
+#endif
/* Reporting cable test results */
struct sk_buff *skb;
@@ -774,16 +797,15 @@ struct phy_device {
/* MACsec management functions */
const struct macsec_ops *macsec_ops;
#endif
+
+ struct phy_oatc14_sqi_capability oatc14_sqi_capability;
};
/* Generic phy_device::dev_flags */
#define PHY_F_NO_IRQ 0x80000000
#define PHY_F_RXC_ALWAYS_ON 0x40000000
-static inline struct phy_device *to_phy_device(const struct device *dev)
-{
- return container_of(to_mdio_device(dev), struct phy_device, mdio);
-}
+#define to_phy_device(__dev) container_of_const(to_mdio_device(__dev), struct phy_device, mdio)
/**
* struct phy_tdr_config - Configuration of a TDR raw test
@@ -807,6 +829,24 @@ struct phy_tdr_config {
#define PHY_PAIR_ALL -1
/**
+ * enum link_inband_signalling - in-band signalling modes that are supported
+ *
+ * @LINK_INBAND_DISABLE: in-band signalling can be disabled
+ * @LINK_INBAND_ENABLE: in-band signalling can be enabled without bypass
+ * @LINK_INBAND_BYPASS: in-band signalling can be enabled with bypass
+ *
+ * The possible and required bits can only be used if the valid bit is set.
+ * If possible is clear, that means inband signalling can not be used.
+ * Required is only valid when possible is set, and means that inband
+ * signalling must be used.
+ */
+enum link_inband_signalling {
+ LINK_INBAND_DISABLE = BIT(0),
+ LINK_INBAND_ENABLE = BIT(1),
+ LINK_INBAND_BYPASS = BIT(2),
+};
+
+/**
* struct phy_plca_cfg - Configuration of the PLCA (Physical Layer Collision
* Avoidance) Reconciliation Sublayer.
*
@@ -865,8 +905,9 @@ struct phy_plca_status {
/* Modes for PHY LED configuration */
enum phy_led_modes {
- PHY_LED_ACTIVE_LOW = 0,
- PHY_LED_INACTIVE_HIGH_IMPEDANCE = 1,
+ PHY_LED_ACTIVE_HIGH = 0,
+ PHY_LED_ACTIVE_LOW = 1,
+ PHY_LED_INACTIVE_HIGH_IMPEDANCE = 2,
/* keep it last */
__PHY_LED_MODES_NUM,
@@ -889,6 +930,165 @@ struct phy_led {
#define to_phy_led(d) container_of(d, struct phy_led, led_cdev)
+/*
+ * PHY_MSE_CAP_* - Bitmask flags for Mean Square Error (MSE) capabilities
+ *
+ * These flags describe which MSE metrics and selectors are implemented
+ * by the PHY for the current link mode. They are used in
+ * struct phy_mse_capability.supported_caps.
+ *
+ * Standardization:
+ * The OPEN Alliance (OA) defines the presence of MSE/SQI/pMSE but not their
+ * numeric scaling, update intervals, or aggregation windows. See:
+ * OA 100BASE-T1 TC1 v1.0, sections 6.1.1-6.1.3
+ * OA 1000BASE-T1 TC12 v2.2, sections 6.1.1-6.1.2
+ *
+ * Description of flags:
+ *
+ * PHY_MSE_CAP_CHANNEL_A
+ * Per-pair diagnostics for Channel A are supported. Mapping to the
+ * physical wire pair may depend on MDI/MDI-X polarity.
+ *
+ * PHY_MSE_CAP_CHANNEL_B, _C, _D
+ * Same as above for channels B-D.
+ *
+ * PHY_MSE_CAP_WORST_CHANNEL
+ * The PHY or driver can identify and report the single worst-performing
+ * channel without querying each one individually.
+ *
+ * PHY_MSE_CAP_LINK
+ * The PHY provides only a link-wide aggregate measurement or cannot map
+ * results to a specific pair (for example 100BASE-TX with unknown
+ * MDI/MDI-X).
+ *
+ * PHY_MSE_CAP_AVG
+ * Average MSE (mean DCQ metric) is supported. For 100/1000BASE-T1 the OA
+ * recommends 2^16 symbols, scaled 0..511, but the exact scaling is
+ * vendor-specific.
+ *
+ * PHY_MSE_CAP_PEAK
+ * Peak MSE (current peak within the measurement window) is supported.
+ * Defined as pMSE for 100BASE-T1; vendor-specific for others.
+ *
+ * PHY_MSE_CAP_WORST_PEAK
+ * Latched worst-case peak MSE since the last read (read-to-clear if
+ * implemented). Optional in OA 100BASE-T1 TC1 6.1.3.
+ */
+#define PHY_MSE_CAP_CHANNEL_A BIT(0)
+#define PHY_MSE_CAP_CHANNEL_B BIT(1)
+#define PHY_MSE_CAP_CHANNEL_C BIT(2)
+#define PHY_MSE_CAP_CHANNEL_D BIT(3)
+#define PHY_MSE_CAP_WORST_CHANNEL BIT(4)
+#define PHY_MSE_CAP_LINK BIT(5)
+#define PHY_MSE_CAP_AVG BIT(6)
+#define PHY_MSE_CAP_PEAK BIT(7)
+#define PHY_MSE_CAP_WORST_PEAK BIT(8)
+
+/*
+ * enum phy_mse_channel - Identifiers for selecting MSE measurement channels
+ *
+ * PHY_MSE_CHANNEL_A - PHY_MSE_CHANNEL_D
+ * Select per-pair measurement for the corresponding channel.
+ *
+ * PHY_MSE_CHANNEL_WORST
+ * Select the single worst-performing channel reported by hardware.
+ *
+ * PHY_MSE_CHANNEL_LINK
+ * Select link-wide aggregate data (used when per-pair results are
+ * unavailable).
+ */
+enum phy_mse_channel {
+ PHY_MSE_CHANNEL_A,
+ PHY_MSE_CHANNEL_B,
+ PHY_MSE_CHANNEL_C,
+ PHY_MSE_CHANNEL_D,
+ PHY_MSE_CHANNEL_WORST,
+ PHY_MSE_CHANNEL_LINK,
+};
+
+/**
+ * struct phy_mse_capability - Capabilities of Mean Square Error (MSE)
+ * measurement interface
+ *
+ * Standardization notes:
+ *
+ * - Presence of MSE/SQI/pMSE is defined by OPEN Alliance specs, but numeric
+ * scaling, refresh/update rate and aggregation windows are not fixed and
+ * are vendor-/product-specific. (OA 100BASE-T1 TC1 v1.0 6.1.*;
+ * OA 1000BASE-T1 TC12 v2.2 6.1.*)
+ *
+ * - Typical recommendations: 2^16 symbols and 0..511 scaling for MSE; pMSE only
+ * defined for 100BASE-T1 (sliding window example), others are vendor
+ * extensions. Drivers must report actual scale/limits here.
+ *
+ * Describes the MSE measurement capabilities for the current link mode. These
+ * properties are dynamic and may change when link settings are modified.
+ * Callers should re-query this capability after any link state change to
+ * ensure they have the most up-to-date information.
+ *
+ * Callers should only request measurements for channels and types that are
+ * indicated as supported by the @supported_caps bitmask. If @supported_caps
+ * is 0, the device provides no MSE diagnostics, and driver operations should
+ * typically return -EOPNOTSUPP.
+ *
+ * Snapshot values for average and peak MSE can be normalized to a 0..1 ratio
+ * by dividing the raw snapshot by the corresponding @max_average_mse or
+ * @max_peak_mse value.
+ *
+ * @max_average_mse: The maximum value for an average MSE snapshot. This
+ * defines the scale for the measurement. If the PHY_MSE_CAP_AVG capability is
+ * supported, this value MUST be greater than 0. (vendor-specific units).
+ * @max_peak_mse: The maximum value for a peak MSE snapshot. If either
+ * PHY_MSE_CAP_PEAK or PHY_MSE_CAP_WORST_PEAK is supported, this value MUST
+ * be greater than 0. (vendor-specific units).
+ * @refresh_rate_ps: The typical interval, in picoseconds, between hardware
+ * updates of the MSE values. This is an estimate, and callers should not
+ * assume synchronous sampling. (vendor-specific units).
+ * @num_symbols: The number of symbols aggregated per hardware sample to
+ * calculate the MSE. (vendor-specific units).
+ * @supported_caps: A bitmask of PHY_MSE_CAP_* values indicating which
+ * measurement types (e.g., average, peak) and channels
+ * (e.g., per-pair or link-wide) are supported.
+ */
+struct phy_mse_capability {
+ u64 max_average_mse;
+ u64 max_peak_mse;
+ u64 refresh_rate_ps;
+ u64 num_symbols;
+ u32 supported_caps;
+};
+
+/**
+ * struct phy_mse_snapshot - A snapshot of Mean Square Error (MSE) diagnostics
+ *
+ * Holds a set of MSE diagnostic values that were all captured from a single
+ * measurement window.
+ *
+ * Values are raw, device-scaled and not normalized. Use struct
+ * phy_mse_capability to interpret the scale and sampling window.
+ *
+ * @average_mse: The average MSE value over the measurement window.
+ * OPEN Alliance references MSE as a DCQ metric; recommends 2^16 symbols and
+ * 0..511 scaling. Exact scale and refresh are vendor-specific.
+ * (100BASE-T1 TC1 v1.0 6.1.1; 1000BASE-T1 TC12 v2.2 6.1.1).
+ *
+ * @peak_mse: The peak MSE value observed within the measurement window.
+ * For 100BASE-T1, "pMSE" is optional and may be implemented via a sliding
+ * 128-symbol window with periodic capture; not standardized for 1000BASE-T1.
+ * (100BASE-T1 TC1 v1.0 6.1.3, Table "DCQ.peakMSE").
+ *
+ * @worst_peak_mse: A latched high-water mark of the peak MSE since last read
+ * (read-to-clear if implemented). OPEN Alliance shows a latched "worst case
+ * peak MSE" for 100BASE-T1 pMSE; availability/semantics outside that are
+ * vendor-specific. (100BASE-T1 TC1 v1.0 6.1.3, DCQ.peakMSE high byte;
+ * 1000BASE-T1 TC12 v2.2 treats DCQ details as vendor-specific.)
+ */
+struct phy_mse_snapshot {
+ u64 average_mse;
+ u64 peak_mse;
+ u64 worst_peak_mse;
+};
+
/**
* struct phy_driver - Driver structure for a particular PHY type
*
@@ -945,6 +1145,19 @@ struct phy_driver {
int (*get_features)(struct phy_device *phydev);
/**
+ * @inband_caps: query whether in-band is supported for the given PHY
+ * interface mode. Returns a bitmask of bits defined by enum
+ * link_inband_signalling.
+ */
+ unsigned int (*inband_caps)(struct phy_device *phydev,
+ phy_interface_t interface);
+
+ /**
+ * @config_inband: configure in-band mode for the PHY
+ */
+ int (*config_inband)(struct phy_device *phydev, unsigned int modes);
+
+ /**
* @get_rate_matching: Get the supported type of rate matching for a
* particular phy interface. This is used by phy consumers to determine
* whether to advertise lower-speed modes for that interface. It is
@@ -994,7 +1207,8 @@ struct phy_driver {
* driver for the given phydev. If NULL, matching is based on
* phy_id and phy_id_mask.
*/
- int (*match_phy_device)(struct phy_device *phydev);
+ int (*match_phy_device)(struct phy_device *phydev,
+ const struct phy_driver *phydrv);
/**
* @set_wol: Some devices (e.g. qnap TS-119P II) require PHY
@@ -1078,6 +1292,53 @@ struct phy_driver {
int (*cable_test_get_status)(struct phy_device *dev, bool *finished);
/* Get statistics from the PHY using ethtool */
+ /**
+ * @get_phy_stats: Retrieve PHY statistics.
+ * @dev: The PHY device for which the statistics are retrieved.
+ * @eth_stats: structure where Ethernet PHY stats will be stored.
+ * @stats: structure where additional PHY-specific stats will be stored.
+ *
+ * Retrieves the supported PHY statistics and populates the provided
+ * structures. The input structures are pre-initialized with
+ * `ETHTOOL_STAT_NOT_SET`, and the driver must only modify members
+ * corresponding to supported statistics. Unmodified members will remain
+ * set to `ETHTOOL_STAT_NOT_SET` and will not be returned to userspace.
+ */
+ void (*get_phy_stats)(struct phy_device *dev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats);
+
+ /**
+ * @get_link_stats: Retrieve link statistics.
+ * @dev: The PHY device for which the statistics are retrieved.
+ * @link_stats: structure where link-specific stats will be stored.
+ *
+ * Retrieves link-related statistics for the given PHY device. The input
+ * structure is pre-initialized with `ETHTOOL_STAT_NOT_SET`, and the
+ * driver must only modify members corresponding to supported
+ * statistics. Unmodified members will remain set to
+ * `ETHTOOL_STAT_NOT_SET` and will not be returned to userspace.
+ */
+ void (*get_link_stats)(struct phy_device *dev,
+ struct ethtool_link_ext_stats *link_stats);
+
+ /**
+ * @update_stats: Trigger periodic statistics updates.
+ * @dev: The PHY device for which statistics updates are triggered.
+ *
+ * Periodically gathers statistics from the PHY device to update locally
+ * maintained 64-bit counters. This is necessary for PHYs that implement
+ * reduced-width counters (e.g., 16-bit or 32-bit) which can overflow
+ * more frequently compared to 64-bit counters. By invoking this
+ * callback, drivers can fetch the current counter values, handle
+ * overflow detection, and accumulate the results into local 64-bit
+ * counters for accurate reporting through the `get_phy_stats` and
+ * `get_link_stats` interfaces.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+ int (*update_stats)(struct phy_device *dev);
+
/** @get_sset_count: Number of statistic counters */
int (*get_sset_count)(struct phy_device *dev);
/** @get_strings: Names of the statistic counters */
@@ -1094,13 +1355,68 @@ struct phy_driver {
int (*set_tunable)(struct phy_device *dev,
struct ethtool_tunable *tuna,
const void *data);
- /** @set_loopback: Set the loopback mood of the PHY */
- int (*set_loopback)(struct phy_device *dev, bool enable);
+ /**
+ * @set_loopback: Set the loopback mode of the PHY
+ * enable selects if the loopback mode is enabled or disabled. If the
+ * loopback mode is enabled, then the speed of the loopback mode can be
+ * requested with the speed argument. If the speed argument is zero,
+ * then any speed can be selected. If the speed argument is > 0, then
+ * this speed shall be selected for the loopback mode or EOPNOTSUPP
+ * shall be returned if speed selection is not supported.
+ */
+ int (*set_loopback)(struct phy_device *dev, bool enable, int speed);
/** @get_sqi: Get the signal quality indication */
int (*get_sqi)(struct phy_device *dev);
/** @get_sqi_max: Get the maximum signal quality indication */
int (*get_sqi_max)(struct phy_device *dev);
+ /**
+ * @get_mse_capability: Get capabilities and scale of MSE measurement
+ * @dev: PHY device
+ * @cap: Output (filled on success)
+ *
+ * Fill @cap with the PHY's MSE capability for the current
+ * link mode: scale limits (max_average_mse, max_peak_mse), update
+ * interval (refresh_rate_ps), sample length (num_symbols) and the
+ * capability bitmask (supported_caps).
+ *
+ * Implementations may defer capability report until hardware has
+ * converged; in that case they should return -EAGAIN and allow the
+ * caller to retry later.
+ *
+ * Return: 0 on success. On failure, returns a negative errno code, such
+ * as -EOPNOTSUPP if MSE measurement is not supported by the PHY or in
+ * the current link mode, or -EAGAIN if the capability information is
+ * not yet available.
+ */
+ int (*get_mse_capability)(struct phy_device *dev,
+ struct phy_mse_capability *cap);
+
+ /**
+ * @get_mse_snapshot: Retrieve a snapshot of MSE diagnostic values
+ * @dev: PHY device
+ * @channel: Channel identifier (PHY_MSE_CHANNEL_*)
+ * @snapshot: Output (filled on success)
+ *
+ * Fill @snapshot with a correlated set of MSE values from the most
+ * recent measurement window.
+ *
+ * Callers must validate @channel against supported_caps returned by
+ * get_mse_capability(). Drivers must not coerce @channel; if the
+ * requested selector is not implemented by the device or current link
+ * mode, the operation must fail.
+ *
+ * worst_peak_mse is latched and must be treated as read-to-clear.
+ *
+ * Return: 0 on success. On failure, returns a negative errno code, such
+ * as -EOPNOTSUPP if MSE measurement is not supported by the PHY or in
+ * the current link mode, or -EAGAIN if measurements are not yet
+ * available.
+ */
+ int (*get_mse_snapshot)(struct phy_device *dev,
+ enum phy_mse_channel channel,
+ struct phy_mse_snapshot *snapshot);
+
/* PLCA RS interface */
/** @get_plca_cfg: Return the current PLCA configuration */
int (*get_plca_cfg)(struct phy_device *dev,
@@ -1122,7 +1438,7 @@ struct phy_driver {
u8 index, enum led_brightness value);
/**
- * @led_blink_set: Set a PHY LED brightness. Index indicates
+ * @led_blink_set: Set a PHY LED blinking. Index indicates
* which of the PHYs led should be configured to blink. Delays
* are in milliseconds and if both are zero then a sensible
* default should be chosen. The call should adjust the
@@ -1178,16 +1494,30 @@ struct phy_driver {
*/
int (*led_polarity_set)(struct phy_device *dev, int index,
unsigned long modes);
+
+ /**
+ * @get_next_update_time: Get the time until the next update event
+ * @dev: PHY device
+ *
+ * Callback to determine the time (in jiffies) until the next
+ * update event for the PHY state machine. Allows PHY drivers to
+ * dynamically adjust polling intervals based on link state or other
+ * conditions.
+ *
+ * Returns the time in jiffies until the next update event.
+ */
+ unsigned int (*get_next_update_time)(struct phy_device *dev);
};
-#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
+#define to_phy_driver(d) container_of_const(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
-#define PHY_ANY_ID "MATCH ANY PHY"
-#define PHY_ANY_UID 0xffffffff
+#define PHY_ID_MATCH_EXTACT_MASK GENMASK(31, 0)
+#define PHY_ID_MATCH_MODEL_MASK GENMASK(31, 4)
+#define PHY_ID_MATCH_VENDOR_MASK GENMASK(31, 10)
-#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 0)
-#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 4)
-#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 10)
+#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = PHY_ID_MATCH_EXTACT_MASK
+#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = PHY_ID_MATCH_MODEL_MASK
+#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = PHY_ID_MATCH_VENDOR_MASK
/**
* phy_id_compare - compare @id1 with @id2 taking account of @mask
@@ -1204,6 +1534,32 @@ static inline bool phy_id_compare(u32 id1, u32 id2, u32 mask)
}
/**
+ * phy_id_compare_vendor - compare @id with @vendor mask
+ * @id: PHY ID
+ * @vendor_mask: PHY Vendor mask
+ *
+ * Return: true if the bits from @id match @vendor using the
+ * generic PHY Vendor mask.
+ */
+static inline bool phy_id_compare_vendor(u32 id, u32 vendor_mask)
+{
+ return phy_id_compare(id, vendor_mask, PHY_ID_MATCH_VENDOR_MASK);
+}
+
+/**
+ * phy_id_compare_model - compare @id with @model mask
+ * @id: PHY ID
+ * @model_mask: PHY Model mask
+ *
+ * Return: true if the bits from @id match @model using the
+ * generic PHY Model mask.
+ */
+static inline bool phy_id_compare_model(u32 id, u32 model_mask)
+{
+ return phy_id_compare(id, model_mask, PHY_ID_MATCH_MODEL_MASK);
+}
+
+/**
* phydev_id_compare - compare @id with the PHY's Clause 22 ID
* @phydev: the PHY device
* @id: the PHY ID to be matched
@@ -1217,39 +1573,12 @@ static inline bool phydev_id_compare(struct phy_device *phydev, u32 id)
return phy_id_compare(id, phydev->phy_id, phydev->drv->phy_id_mask);
}
-/* A Structure for boards to register fixups with the PHY Lib */
-struct phy_fixup {
- struct list_head list;
- char bus_id[MII_BUS_ID_SIZE + 3];
- u32 phy_uid;
- u32 phy_uid_mask;
- int (*run)(struct phy_device *phydev);
-};
-
const char *phy_speed_to_str(int speed);
const char *phy_duplex_to_str(unsigned int duplex);
const char *phy_rate_matching_to_str(int rate_matching);
int phy_interface_num_ports(phy_interface_t interface);
-/* A structure for mapping a particular speed and duplex
- * combination to a particular SUPPORTED and ADVERTISED value
- */
-struct phy_setting {
- u32 speed;
- u8 duplex;
- u8 bit;
-};
-
-const struct phy_setting *
-phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
- bool exact);
-size_t phy_speeds(unsigned int *speeds, size_t size,
- unsigned long *mask);
-void of_set_phy_supported(struct phy_device *phydev);
-void of_set_phy_eee_broken(struct phy_device *phydev);
-int phy_speed_down_core(struct phy_device *phydev);
-
/**
* phy_is_started - Convenience function to check whether PHY is started
* @phydev: The phy_device struct
@@ -1259,9 +1588,53 @@ static inline bool phy_is_started(struct phy_device *phydev)
return phydev->state >= PHY_UP;
}
+/**
+ * phy_driver_is_genphy - Convenience function to check whether PHY is driven
+ * by one of the generic PHY drivers
+ * @phydev: The phy_device struct
+ * Return: true if PHY is driven by one of the genphy drivers
+ */
+static inline bool phy_driver_is_genphy(struct phy_device *phydev)
+{
+ return phydev->is_genphy_driven;
+}
+
+/**
+ * phy_disable_eee_mode - Don't advertise an EEE mode.
+ * @phydev: The phy_device struct
+ * @link_mode: The EEE mode to be disabled
+ */
+static inline void phy_disable_eee_mode(struct phy_device *phydev, u32 link_mode)
+{
+ WARN_ON(phy_is_started(phydev));
+
+ linkmode_set_bit(link_mode, phydev->eee_disabled_modes);
+ linkmode_clear_bit(link_mode, phydev->advertising_eee);
+}
+
+/**
+ * phy_can_wakeup() - indicate whether PHY has driver model wakeup capabilities
+ * @phydev: The phy_device struct
+ *
+ * Returns: true/false depending on the PHY driver's device_set_wakeup_capable()
+ * setting.
+ */
+static inline bool phy_can_wakeup(struct phy_device *phydev)
+{
+ return device_can_wakeup(&phydev->mdio.dev);
+}
+
+/**
+ * phy_may_wakeup() - indicate whether PHY has wakeup enabled
+ * @phydev: The phy_device struct
+ *
+ * Returns: true/false depending on the PHY driver's device_set_wakeup_enabled()
+ * setting if using the driver model, otherwise the legacy determination.
+ */
+bool phy_may_wakeup(struct phy_device *phydev);
+
void phy_resolve_aneg_pause(struct phy_device *phydev);
void phy_resolve_aneg_linkmode(struct phy_device *phydev);
-void phy_check_downshift(struct phy_device *phydev);
/**
* phy_read - Convenience function for reading a given PHY register
@@ -1366,12 +1739,13 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
* @regnum: The register on the MMD to read
* @val: Variable to read the register into
* @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0
- * tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.rst).
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
* @sleep_before_read: if it is true, sleep @sleep_us before read.
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
* case, the last read value at @args is stored in @val. Must not
* be called from atomic context if sleep_us or timeout_us are used.
*/
@@ -1556,6 +1930,9 @@ static inline bool phy_polling_mode(struct phy_device *phydev)
if (phydev->drv->flags & PHY_POLL_CABLE_TEST)
return true;
+ if (phydev->drv->update_stats)
+ return true;
+
return phydev->irq == PHY_POLL;
}
@@ -1565,7 +1942,7 @@ static inline bool phy_polling_mode(struct phy_device *phydev)
*/
static inline bool phy_has_hwtstamp(struct phy_device *phydev)
{
- return phydev && phydev->mii_ts && phydev->mii_ts->hwtstamp;
+ return phydev && phydev->mii_ts && phydev->mii_ts->hwtstamp_set;
}
/**
@@ -1600,7 +1977,7 @@ static inline int phy_hwtstamp(struct phy_device *phydev,
struct kernel_hwtstamp_config *cfg,
struct netlink_ext_ack *extack)
{
- return phydev->mii_ts->hwtstamp(phydev->mii_ts, cfg, extack);
+ return phydev->mii_ts->hwtstamp_set(phydev->mii_ts, cfg, extack);
}
static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb,
@@ -1610,7 +1987,7 @@ static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb,
}
static inline int phy_ts_info(struct phy_device *phydev,
- struct ethtool_ts_info *tsinfo)
+ struct kernel_ethtool_ts_info *tsinfo)
{
return phydev->mii_ts->ts_info(phydev->mii_ts, tsinfo);
}
@@ -1622,12 +1999,18 @@ static inline void phy_txtstamp(struct phy_device *phydev, struct sk_buff *skb,
}
/**
- * phy_is_internal - Convenience function for testing if a PHY is internal
- * @phydev: the phy_device struct
+ * phy_is_default_hwtstamp - Is the PHY hwtstamp the default timestamp
+ * @phydev: Pointer to phy_device
+ *
+ * This is used to get default timestamping device taking into account
+ * the new API choice, which is selecting the timestamping from MAC by
+ * default if the phydev does not have default_timestamp flag enabled.
+ *
+ * Return: True if phy is the default hw timestamp, false otherwise.
*/
-static inline bool phy_is_internal(struct phy_device *phydev)
+static inline bool phy_is_default_hwtstamp(struct phy_device *phydev)
{
- return phydev->is_internal;
+ return phy_has_hwtstamp(phydev) && phydev->default_timestamp;
}
/**
@@ -1684,6 +2067,9 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
return phydev->is_pseudo_fixed_link;
}
+phy_interface_t phy_fix_phy_mode_for_mac_delays(phy_interface_t interface,
+ bool mac_txid, bool mac_rxid);
+
int phy_save_page(struct phy_device *phydev);
int phy_select_page(struct phy_device *phydev, int page);
int phy_restore_page(struct phy_device *phydev, int oldpage, int ret);
@@ -1697,70 +2083,29 @@ int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids);
-#if IS_ENABLED(CONFIG_PHYLIB)
int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id);
struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode);
struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode);
-struct phy_device *device_phy_find_device(struct device *dev);
struct fwnode_handle *fwnode_get_phy_node(const struct fwnode_handle *fwnode);
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
int phy_device_register(struct phy_device *phy);
void phy_device_free(struct phy_device *phydev);
-#else
-static inline int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id)
-{
- return 0;
-}
-static inline
-struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode)
-{
- return 0;
-}
-
-static inline
-struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode)
-{
- return NULL;
-}
-
-static inline struct phy_device *device_phy_find_device(struct device *dev)
-{
- return NULL;
-}
-
-static inline
-struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode)
-{
- return NULL;
-}
-
-static inline
-struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
-{
- return NULL;
-}
-
-static inline int phy_device_register(struct phy_device *phy)
-{
- return 0;
-}
-
-static inline void phy_device_free(struct phy_device *phydev) { }
-#endif /* CONFIG_PHYLIB */
void phy_device_remove(struct phy_device *phydev);
int phy_get_c45_ids(struct phy_device *phydev);
int phy_init_hw(struct phy_device *phydev);
int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);
int __phy_resume(struct phy_device *phydev);
-int phy_loopback(struct phy_device *phydev, bool enable);
+int phy_loopback(struct phy_device *phydev, bool enable, int speed);
+int phy_sfp_connect_phy(void *upstream, struct phy_device *phy);
+void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy);
void phy_sfp_attach(void *upstream, struct sfp_bus *bus);
void phy_sfp_detach(void *upstream, struct sfp_bus *bus);
int phy_sfp_probe(struct phy_device *phydev,
const struct sfp_upstream_ops *ops);
struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
phy_interface_t interface);
-struct phy_device *phy_find_first(struct mii_bus *bus);
+struct phy_device *phy_find_next(struct mii_bus *bus, struct phy_device *pos);
int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface);
int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
@@ -1777,6 +2122,9 @@ int phy_config_aneg(struct phy_device *phydev);
int _phy_start_aneg(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
int phy_aneg_done(struct phy_device *phydev);
+unsigned int phy_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface);
+int phy_config_inband(struct phy_device *phydev, unsigned int modes);
int phy_speed_down(struct phy_device *phydev, bool sync);
int phy_speed_up(struct phy_device *phydev);
bool phy_check_valid(int speed, int duplex, unsigned long *features);
@@ -1784,6 +2132,15 @@ bool phy_check_valid(int speed, int duplex, unsigned long *features);
int phy_restart_aneg(struct phy_device *phydev);
int phy_reset_after_clk_enable(struct phy_device *phydev);
+static inline struct phy_device *phy_find_first(struct mii_bus *bus)
+{
+ return phy_find_next(bus, NULL);
+}
+
+#define mdiobus_for_each_phy(_bus, _phydev) \
+ for (_phydev = phy_find_first(_bus); _phydev; \
+ _phydev = phy_find_next(_bus, _phydev))
+
#if IS_ENABLED(CONFIG_PHYLIB)
int phy_start_cable_test(struct phy_device *phydev,
struct netlink_ext_ack *extack);
@@ -1849,12 +2206,14 @@ char *phy_attached_info_irq(struct phy_device *phydev)
__malloc;
void phy_attached_info(struct phy_device *phydev);
+int genphy_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv);
+
/* Clause 22 PHY */
int genphy_read_abilities(struct phy_device *phydev);
int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart);
-int genphy_config_eee_advert(struct phy_device *phydev);
int __genphy_config_aneg(struct phy_device *phydev, bool changed);
int genphy_aneg_done(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
@@ -1864,7 +2223,7 @@ int genphy_read_status(struct phy_device *phydev);
int genphy_read_master_slave(struct phy_device *phydev);
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
-int genphy_loopback(struct phy_device *phydev, bool enable);
+int genphy_loopback(struct phy_device *phydev, bool enable, int speed);
int genphy_soft_reset(struct phy_device *phydev);
irqreturn_t genphy_handle_interrupt_no_ack(struct phy_device *phydev);
@@ -1906,7 +2265,7 @@ int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev);
int genphy_c45_read_status(struct phy_device *phydev);
int genphy_c45_baset1_read_status(struct phy_device *phydev);
int genphy_c45_config_aneg(struct phy_device *phydev);
-int genphy_c45_loopback(struct phy_device *phydev, bool enable);
+int genphy_c45_loopback(struct phy_device *phydev, bool enable, int speed);
int genphy_c45_pma_resume(struct phy_device *phydev);
int genphy_c45_pma_suspend(struct phy_device *phydev);
int genphy_c45_fast_retrain(struct phy_device *phydev, bool enable);
@@ -1916,18 +2275,17 @@ int genphy_c45_plca_set_cfg(struct phy_device *phydev,
const struct phy_plca_cfg *plca_cfg);
int genphy_c45_plca_get_status(struct phy_device *phydev,
struct phy_plca_status *plca_st);
-int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
- unsigned long *lp, bool *is_enabled);
+int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *lp);
int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
struct ethtool_keee *data);
int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
struct ethtool_keee *data);
-int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv);
int genphy_c45_an_config_eee_aneg(struct phy_device *phydev);
-int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv);
-
-/* Generic C45 PHY driver */
-extern struct phy_driver genphy_c45_driver;
+int genphy_c45_oatc14_cable_test_start(struct phy_device *phydev);
+int genphy_c45_oatc14_cable_test_get_status(struct phy_device *phydev,
+ bool *finished);
+int genphy_c45_oatc14_get_sqi_max(struct phy_device *phydev);
+int genphy_c45_oatc14_get_sqi(struct phy_device *phydev);
/* The gen10g_* functions are the old Clause 45 stub */
int gen10g_config_aneg(struct phy_device *phydev);
@@ -1943,14 +2301,11 @@ static inline int phy_read_status(struct phy_device *phydev)
return genphy_read_status(phydev);
}
-void phy_driver_unregister(struct phy_driver *drv);
void phy_drivers_unregister(struct phy_driver *drv, int n);
-int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
void phy_error(struct phy_device *phydev);
void phy_state_machine(struct work_struct *work);
-void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies);
void phy_trigger_machine(struct phy_device *phydev);
void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
@@ -1975,6 +2330,7 @@ void phy_advertise_eee_all(struct phy_device *phydev);
void phy_support_sym_pause(struct phy_device *phydev);
void phy_support_asym_pause(struct phy_device *phydev);
void phy_support_eee(struct phy_device *phydev);
+void phy_disable_eee(struct phy_device *phydev);
void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
bool autoneg);
void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx);
@@ -1982,14 +2338,19 @@ bool phy_validate_pause(struct phy_device *phydev,
struct ethtool_pauseparam *pp);
void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause);
-s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
- const int *delay_values, int size, bool is_rx);
+s32 phy_get_internal_delay(struct phy_device *phydev, const int *delay_values,
+ int size, bool is_rx);
+
+int phy_get_tx_amplitude_gain(struct phy_device *phydev, struct device *dev,
+ enum ethtool_link_mode_bit_indices linkmode,
+ u32 *val);
+
+int phy_get_mac_termination(struct phy_device *phydev, struct device *dev,
+ u32 *val);
void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv,
bool *tx_pause, bool *rx_pause);
-int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *));
int phy_register_fixup_for_id(const char *bus_id,
int (*run)(struct phy_device *));
int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
@@ -1999,6 +2360,8 @@ int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask);
int phy_unregister_fixup_for_id(const char *bus_id);
int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask);
+int phy_eee_tx_clock_stop_capable(struct phy_device *phydev);
+int phy_eee_rx_clock_stop(struct phy_device *phydev, bool clk_stop_enable);
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable);
int phy_get_eee_err(struct phy_device *phydev);
int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data);
@@ -2011,21 +2374,18 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev,
int phy_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd);
int phy_ethtool_nway_reset(struct net_device *ndev);
-int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size);
-int of_phy_package_join(struct phy_device *phydev, size_t priv_size);
-void phy_package_leave(struct phy_device *phydev);
-int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
- int base_addr, size_t priv_size);
-int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
- size_t priv_size);
-
-int __init mdio_bus_init(void);
-void mdio_bus_exit(void);
int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data);
int phy_ethtool_get_sset_count(struct phy_device *phydev);
int phy_ethtool_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data);
+
+void __phy_ethtool_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *phy_stats,
+ struct ethtool_phy_stats *phydev_stats);
+void __phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats);
+
int phy_ethtool_get_plca_cfg(struct phy_device *phydev,
struct phy_plca_cfg *plca_cfg);
int phy_ethtool_set_plca_cfg(struct phy_device *phydev,
@@ -2040,124 +2400,8 @@ int __phy_hwtstamp_set(struct phy_device *phydev,
struct kernel_hwtstamp_config *config,
struct netlink_ext_ack *extack);
-static inline int phy_package_address(struct phy_device *phydev,
- unsigned int addr_offset)
-{
- struct phy_package_shared *shared = phydev->shared;
- u8 base_addr = shared->base_addr;
-
- if (addr_offset >= PHY_MAX_ADDR - base_addr)
- return -EIO;
-
- /* we know that addr will be in the range 0..31 and thus the
- * implicit cast to a signed int is not a problem.
- */
- return base_addr + addr_offset;
-}
-
-static inline int phy_package_read(struct phy_device *phydev,
- unsigned int addr_offset, u32 regnum)
-{
- int addr = phy_package_address(phydev, addr_offset);
-
- if (addr < 0)
- return addr;
-
- return mdiobus_read(phydev->mdio.bus, addr, regnum);
-}
-
-static inline int __phy_package_read(struct phy_device *phydev,
- unsigned int addr_offset, u32 regnum)
-{
- int addr = phy_package_address(phydev, addr_offset);
-
- if (addr < 0)
- return addr;
-
- return __mdiobus_read(phydev->mdio.bus, addr, regnum);
-}
-
-static inline int phy_package_write(struct phy_device *phydev,
- unsigned int addr_offset, u32 regnum,
- u16 val)
-{
- int addr = phy_package_address(phydev, addr_offset);
-
- if (addr < 0)
- return addr;
-
- return mdiobus_write(phydev->mdio.bus, addr, regnum, val);
-}
-
-static inline int __phy_package_write(struct phy_device *phydev,
- unsigned int addr_offset, u32 regnum,
- u16 val)
-{
- int addr = phy_package_address(phydev, addr_offset);
-
- if (addr < 0)
- return addr;
-
- return __mdiobus_write(phydev->mdio.bus, addr, regnum, val);
-}
-
-int __phy_package_read_mmd(struct phy_device *phydev,
- unsigned int addr_offset, int devad,
- u32 regnum);
-
-int phy_package_read_mmd(struct phy_device *phydev,
- unsigned int addr_offset, int devad,
- u32 regnum);
-
-int __phy_package_write_mmd(struct phy_device *phydev,
- unsigned int addr_offset, int devad,
- u32 regnum, u16 val);
-
-int phy_package_write_mmd(struct phy_device *phydev,
- unsigned int addr_offset, int devad,
- u32 regnum, u16 val);
-
-static inline bool __phy_package_set_once(struct phy_device *phydev,
- unsigned int b)
-{
- struct phy_package_shared *shared = phydev->shared;
-
- if (!shared)
- return false;
-
- return !test_and_set_bit(b, &shared->flags);
-}
-
-static inline bool phy_package_init_once(struct phy_device *phydev)
-{
- return __phy_package_set_once(phydev, PHY_SHARED_F_INIT_DONE);
-}
-
-static inline bool phy_package_probe_once(struct phy_device *phydev)
-{
- return __phy_package_set_once(phydev, PHY_SHARED_F_PROBE_DONE);
-}
-
extern const struct bus_type mdio_bus_type;
-
-struct mdio_board_info {
- const char *bus_id;
- char modalias[MDIO_NAME_SIZE];
- int mdio_addr;
- const void *platform_data;
-};
-
-#if IS_ENABLED(CONFIG_MDIO_DEVICE)
-int mdiobus_register_board_info(const struct mdio_board_info *info,
- unsigned int n);
-#else
-static inline int mdiobus_register_board_info(const struct mdio_board_info *i,
- unsigned int n)
-{
- return 0;
-}
-#endif
-
+extern const struct class mdio_bus_class;
/**
* phy_module_driver() - Helper macro for registering PHY drivers
@@ -2183,7 +2427,4 @@ module_exit(phy_module_exit)
#define module_phy_driver(__phy_drivers) \
phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers))
-bool phy_driver_is_genphy(struct phy_device *phydev);
-bool phy_driver_is_genphy_10g(struct phy_device *phydev);
-
#endif /* __PHY_H */
diff --git a/include/linux/phy/phy-hdmi.h b/include/linux/phy/phy-hdmi.h
new file mode 100644
index 000000000000..f0ec963c6e84
--- /dev/null
+++ b/include/linux/phy/phy-hdmi.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2022,2024 NXP
+ */
+
+#ifndef __PHY_HDMI_H_
+#define __PHY_HDMI_H_
+
+/**
+ * struct phy_configure_opts_hdmi - HDMI configuration set
+ * @tmds_char_rate: HDMI TMDS Character Rate in Hertz.
+ * @bpc: Bits per color channel.
+ *
+ * This structure is used to represent the configuration state of a HDMI phy.
+ */
+struct phy_configure_opts_hdmi {
+ unsigned long long tmds_char_rate;
+ unsigned int bpc;
+};
+
+#endif /* __PHY_HDMI_H_ */
diff --git a/include/linux/phy/phy-sun4i-usb.h b/include/linux/phy/phy-sun4i-usb.h
index 91eb755ee73b..f3e7b13608e4 100644
--- a/include/linux/phy/phy-sun4i-usb.h
+++ b/include/linux/phy/phy-sun4i-usb.h
@@ -11,7 +11,7 @@
/**
* sun4i_usb_phy_set_squelch_detect() - Enable/disable squelch detect
* @phy: reference to a sun4i usb phy
- * @enabled: wether to enable or disable squelch detect
+ * @enabled: whether to enable or disable squelch detect
*/
void sun4i_usb_phy_set_squelch_detect(struct phy *phy, bool enabled);
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 03cd5bae92d3..2af0d01ebb39 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <linux/phy/phy-dp.h>
+#include <linux/phy/phy-hdmi.h>
#include <linux/phy/phy-lvds.h>
#include <linux/phy/phy-mipi-dphy.h>
@@ -42,7 +43,8 @@ enum phy_mode {
PHY_MODE_MIPI_DPHY,
PHY_MODE_SATA,
PHY_MODE_LVDS,
- PHY_MODE_DP
+ PHY_MODE_DP,
+ PHY_MODE_HDMI,
};
enum phy_media {
@@ -51,6 +53,15 @@ enum phy_media {
PHY_MEDIA_DAC,
};
+enum phy_ufs_state {
+ PHY_UFS_HIBERN8_ENTER,
+ PHY_UFS_HIBERN8_EXIT,
+};
+
+union phy_notify {
+ enum phy_ufs_state ufs_state;
+};
+
/**
* union phy_configure_opts - Opaque generic phy configuration
*
@@ -60,11 +71,14 @@ enum phy_media {
* the DisplayPort protocol.
* @lvds: Configuration set applicable for phys supporting
* the LVDS phy mode.
+ * @hdmi: Configuration set applicable for phys supporting
+ * the HDMI phy mode.
*/
union phy_configure_opts {
struct phy_configure_opts_mipi_dphy mipi_dphy;
struct phy_configure_opts_dp dp;
struct phy_configure_opts_lvds lvds;
+ struct phy_configure_opts_hdmi hdmi;
};
/**
@@ -78,6 +92,7 @@ union phy_configure_opts {
* @set_speed: set the speed of the phy (optional)
* @reset: resetting the phy
* @calibrate: calibrate the phy
+ * @notify_phystate: notify and configure the phy for a particular state
* @release: ops to be performed while the consumer relinquishes the PHY
* @owner: the module owner containing the ops
*/
@@ -127,6 +142,7 @@ struct phy_ops {
int (*connect)(struct phy *phy, int port);
int (*disconnect)(struct phy *phy, int port);
+ int (*notify_phystate)(struct phy *phy, union phy_notify state);
void (*release)(struct phy *phy);
struct module *owner;
};
@@ -149,6 +165,7 @@ struct phy_attrs {
* @id: id of the phy device
* @ops: function pointers for performing phy operations
* @mutex: mutex to protect phy_ops
+ * @lockdep_key: lockdep information for this mutex
* @init_count: used to protect when the PHY is used by multiple consumers
* @power_count: used to protect when the PHY is used by multiple consumers
* @attrs: used to specify PHY specific attributes
@@ -160,6 +177,7 @@ struct phy {
int id;
const struct phy_ops *ops;
struct mutex mutex;
+ struct lock_class_key lockdep_key;
int init_count;
int power_count;
struct phy_attrs attrs;
@@ -227,8 +245,6 @@ int phy_pm_runtime_get(struct phy *phy);
int phy_pm_runtime_get_sync(struct phy *phy);
int phy_pm_runtime_put(struct phy *phy);
int phy_pm_runtime_put_sync(struct phy *phy);
-void phy_pm_runtime_allow(struct phy *phy);
-void phy_pm_runtime_forbid(struct phy *phy);
int phy_init(struct phy *phy);
int phy_exit(struct phy *phy);
int phy_power_on(struct phy *phy);
@@ -250,6 +266,7 @@ int phy_reset(struct phy *phy);
int phy_calibrate(struct phy *phy);
int phy_notify_connect(struct phy *phy, int port);
int phy_notify_disconnect(struct phy *phy, int port);
+int phy_notify_state(struct phy *phy, union phy_notify state);
static inline int phy_get_bus_width(struct phy *phy)
{
return phy->attrs.bus_width;
@@ -321,16 +338,6 @@ static inline int phy_pm_runtime_put_sync(struct phy *phy)
return -ENOSYS;
}
-static inline void phy_pm_runtime_allow(struct phy *phy)
-{
- return;
-}
-
-static inline void phy_pm_runtime_forbid(struct phy *phy)
-{
- return;
-}
-
static inline int phy_init(struct phy *phy)
{
if (!phy)
@@ -417,6 +424,13 @@ static inline int phy_notify_disconnect(struct phy *phy, int index)
return -ENOSYS;
}
+static inline int phy_notify_state(struct phy *phy, union phy_notify state)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
static inline int phy_configure(struct phy *phy,
union phy_configure_opts *opts)
{
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 1acafd86ab13..436bff20f324 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -5,51 +5,35 @@
#include <linux/types.h>
struct fixed_phy_status {
- int link;
int speed;
int duplex;
- int pause;
- int asym_pause;
+ bool link:1;
+ bool pause:1;
+ bool asym_pause:1;
};
struct device_node;
-struct gpio_desc;
struct net_device;
#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
-extern int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status);
-extern struct phy_device *fixed_phy_register(unsigned int irq,
- struct fixed_phy_status *status,
- struct device_node *np);
-
-extern struct phy_device *
-fixed_phy_register_with_gpiod(unsigned int irq,
- struct fixed_phy_status *status,
- struct gpio_desc *gpiod);
+struct phy_device *fixed_phy_register(const struct fixed_phy_status *status,
+ struct device_node *np);
+struct phy_device *fixed_phy_register_100fd(void);
extern void fixed_phy_unregister(struct phy_device *phydev);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
struct fixed_phy_status *));
#else
-static inline int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status)
-{
- return -ENODEV;
-}
-static inline struct phy_device *fixed_phy_register(unsigned int irq,
- struct fixed_phy_status *status,
- struct device_node *np)
+static inline struct phy_device *
+fixed_phy_register(const struct fixed_phy_status *status,
+ struct device_node *np)
{
return ERR_PTR(-ENODEV);
}
-static inline struct phy_device *
-fixed_phy_register_with_gpiod(unsigned int irq,
- struct fixed_phy_status *status,
- struct gpio_desc *gpiod)
+static inline struct phy_device *fixed_phy_register_100fd(void)
{
return ERR_PTR(-ENODEV);
}
@@ -57,16 +41,6 @@ fixed_phy_register_with_gpiod(unsigned int irq,
static inline void fixed_phy_unregister(struct phy_device *phydev)
{
}
-static inline int fixed_phy_set_link_update(struct phy_device *phydev,
- int (*link_update)(struct net_device *,
- struct fixed_phy_status *))
-{
- return -ENODEV;
-}
-static inline int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier)
-{
- return -EINVAL;
-}
#endif /* CONFIG_FIXED_PHY */
#endif /* __PHY_FIXED_H */
diff --git a/include/linux/phy_link_topology.h b/include/linux/phy_link_topology.h
new file mode 100644
index 000000000000..68a59e25821c
--- /dev/null
+++ b/include/linux/phy_link_topology.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PHY device list allow maintaining a list of PHY devices that are
+ * part of a netdevice's link topology. PHYs can for example be chained,
+ * as is the case when using a PHY that exposes an SFP module, on which an
+ * SFP transceiver that embeds a PHY is connected.
+ *
+ * This list can then be used by userspace to leverage individual PHY
+ * capabilities.
+ */
+#ifndef __PHY_LINK_TOPOLOGY_H
+#define __PHY_LINK_TOPOLOGY_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+struct xarray;
+struct phy_device;
+struct sfp_bus;
+
+struct phy_link_topology {
+ struct xarray phys;
+ u32 next_phy_index;
+};
+
+struct phy_device_node {
+ enum phy_upstream upstream_type;
+
+ union {
+ struct net_device *netdev;
+ struct phy_device *phydev;
+ } upstream;
+
+ struct sfp_bus *parent_sfp_bus;
+
+ struct phy_device *phy;
+};
+
+#if IS_ENABLED(CONFIG_PHYLIB)
+int phy_link_topo_add_phy(struct net_device *dev,
+ struct phy_device *phy,
+ enum phy_upstream upt, void *upstream);
+
+void phy_link_topo_del_phy(struct net_device *dev, struct phy_device *phy);
+
+static inline struct phy_device *
+phy_link_topo_get_phy(struct net_device *dev, u32 phyindex)
+{
+ struct phy_link_topology *topo = dev->link_topo;
+ struct phy_device_node *pdn;
+
+ if (!topo)
+ return NULL;
+
+ pdn = xa_load(&topo->phys, phyindex);
+ if (pdn)
+ return pdn->phy;
+
+ return NULL;
+}
+
+#else
+static inline int phy_link_topo_add_phy(struct net_device *dev,
+ struct phy_device *phy,
+ enum phy_upstream upt, void *upstream)
+{
+ return 0;
+}
+
+static inline void phy_link_topo_del_phy(struct net_device *dev,
+ struct phy_device *phy)
+{
+}
+
+static inline struct phy_device *
+phy_link_topo_get_phy(struct net_device *dev, u32 phyindex)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __PHY_LINK_TOPOLOGY_H */
diff --git a/include/linux/phylib_stubs.h b/include/linux/phylib_stubs.h
index 1279f48c8a70..9d2d6090c86d 100644
--- a/include/linux/phylib_stubs.h
+++ b/include/linux/phylib_stubs.h
@@ -5,6 +5,9 @@
#include <linux/rtnetlink.h>
+struct ethtool_eth_phy_stats;
+struct ethtool_link_ext_stats;
+struct ethtool_phy_stats;
struct kernel_hwtstamp_config;
struct netlink_ext_ack;
struct phy_device;
@@ -19,6 +22,11 @@ struct phylib_stubs {
int (*hwtstamp_set)(struct phy_device *phydev,
struct kernel_hwtstamp_config *config,
struct netlink_ext_ack *extack);
+ void (*get_phy_stats)(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *phy_stats,
+ struct ethtool_phy_stats *phydev_stats);
+ void (*get_link_ext_stats)(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats);
};
static inline int phy_hwtstamp_get(struct phy_device *phydev,
@@ -50,6 +58,29 @@ static inline int phy_hwtstamp_set(struct phy_device *phydev,
return phylib_stubs->hwtstamp_set(phydev, config, extack);
}
+static inline void phy_ethtool_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *phy_stats,
+ struct ethtool_phy_stats *phydev_stats)
+{
+ ASSERT_RTNL();
+
+ if (!phylib_stubs)
+ return;
+
+ phylib_stubs->get_phy_stats(phydev, phy_stats, phydev_stats);
+}
+
+static inline void phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats)
+{
+ ASSERT_RTNL();
+
+ if (!phylib_stubs)
+ return;
+
+ phylib_stubs->get_link_ext_stats(phydev, link_stats);
+}
+
#else
static inline int phy_hwtstamp_get(struct phy_device *phydev,
@@ -65,4 +96,15 @@ static inline int phy_hwtstamp_set(struct phy_device *phydev,
return -EOPNOTSUPP;
}
+static inline void phy_ethtool_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *phy_stats,
+ struct ethtool_phy_stats *phydev_stats)
+{
+}
+
+static inline void phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats)
+{
+}
+
#endif
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 5ea6b2ad2396..38363e566ac3 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -5,6 +5,8 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include <net/eee.h>
+
struct device_node;
struct ethtool_cmd;
struct fwnode_handle;
@@ -141,12 +143,22 @@ enum phylink_op_type {
* @mac_requires_rxc: if true, the MAC always requires a receive clock from PHY.
* The PHY driver should start the clock signal as soon as
* possible and avoid stopping it during suspend events.
- * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND
+ * @default_an_inband: if true, defaults to MLO_AN_INBAND rather than
+ * MLO_AN_PHY. A fixed-link specification will override.
+ * @eee_rx_clk_stop_enable: if true, PHY can stop the receive clock during LPI
* @get_fixed_state: callback to execute to determine the fixed link state,
* if MAC link is at %MLO_AN_FIXED mode.
* @supported_interfaces: bitmap describing which PHY_INTERFACE_MODE_xxx
* are supported by the MAC/PCS.
+ * @lpi_interfaces: bitmap describing which PHY interface modes can support
+ * LPI signalling.
* @mac_capabilities: MAC pause/speed/duplex capabilities.
+ * @lpi_capabilities: MAC speeds which can support LPI signalling
+ * @lpi_timer_default: Default EEE LPI timer setting.
+ * @eee_enabled_default: If set, EEE will be enabled by phylink at creation time
+ * @wol_phy_legacy: Use Wake-on-Lan with PHY even if phy_can_wakeup() is false
+ * @wol_phy_speed_ctrl: Use phy speed control on suspend/resume
+ * @wol_mac_support: Bitmask of MAC supported %WAKE_* options
*/
struct phylink_config {
struct device *dev;
@@ -154,11 +166,21 @@ struct phylink_config {
bool poll_fixed_state;
bool mac_managed_pm;
bool mac_requires_rxc;
- bool ovr_an_inband;
+ bool default_an_inband;
+ bool eee_rx_clk_stop_enable;
void (*get_fixed_state)(struct phylink_config *config,
struct phylink_link_state *state);
DECLARE_PHY_INTERFACE_MASK(supported_interfaces);
+ DECLARE_PHY_INTERFACE_MASK(lpi_interfaces);
unsigned long mac_capabilities;
+ unsigned long lpi_capabilities;
+ u32 lpi_timer_default;
+ bool eee_enabled_default;
+
+ /* Wake-on-Lan support */
+ bool wol_phy_legacy;
+ bool wol_phy_speed_ctrl;
+ u32 wol_mac_support;
};
void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed);
@@ -172,6 +194,9 @@ void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed);
* @mac_finish: finish a major reconfiguration of the interface.
* @mac_link_down: take the link down.
* @mac_link_up: allow the link to come up.
+ * @mac_disable_tx_lpi: disable LPI.
+ * @mac_enable_tx_lpi: enable and configure LPI.
+ * @mac_wol_set: configure Wake-on-Lan settings at the MAC.
*
* The individual methods are described more fully below.
*/
@@ -192,6 +217,12 @@ struct phylink_mac_ops {
struct phy_device *phy, unsigned int mode,
phy_interface_t interface, int speed, int duplex,
bool tx_pause, bool rx_pause);
+ void (*mac_disable_tx_lpi)(struct phylink_config *config);
+ int (*mac_enable_tx_lpi)(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop);
+
+ int (*mac_wol_set)(struct phylink_config *config, u32 wolopts,
+ const u8 *sopass);
};
#if 0 /* For kernel-doc purposes only. */
@@ -301,9 +332,8 @@ int mac_prepare(struct phylink_config *config, unsigned int mode,
* If in 802.3z mode, the link speed is fixed, dependent on the
* @state->interface. Duplex and pause modes are negotiated via
* the in-band configuration word. Advertised pause modes are set
- * according to the @state->an_enabled and @state->advertising
- * flags. Beware of MACs which only support full duplex at gigabit
- * and higher speeds.
+ * according to @state->advertising. Beware of MACs which only
+ * support full duplex at gigabit and higher speeds.
*
* If in Cisco SGMII mode, the link speed and duplex mode are passed
* in the serial bitstream 16-bit configuration word, and the MAC
@@ -312,7 +342,7 @@ int mac_prepare(struct phylink_config *config, unsigned int mode,
* responsible for reading the configuration word and configuring
* itself accordingly.
*
- * Valid state members: interface, an_enabled, pause, advertising.
+ * Valid state members: interface, pause, advertising.
*
* Implementations are expected to update the MAC to reflect the
* requested settings - i.o.w., if nothing has changed between two
@@ -342,23 +372,29 @@ int mac_finish(struct phylink_config *config, unsigned int mode,
phy_interface_t iface);
/**
- * mac_link_down() - take the link down
+ * mac_link_down() - notification that the link has gone down
* @config: a pointer to a &struct phylink_config.
* @mode: link autonegotiation mode
* @interface: link &typedef phy_interface_t mode
*
- * If @mode is not an in-band negotiation mode (as defined by
- * phylink_autoneg_inband()), force the link down and disable any
- * Energy Efficient Ethernet MAC configuration. Interface type
- * selection must be done in mac_config().
+ * Notifies the MAC that the link has gone down. This will not be called
+ * unless mac_link_up() has been previously called.
+ *
+ * The MAC should stop processing packets for transmission and reception.
+ * phylink will have called netif_carrier_off() to notify the networking
+ * stack that the link has gone down, so MAC drivers should not make this
+ * call.
+ *
+ * If @mode is %MLO_AN_INBAND, then this function must not prevent the
+ * link coming up.
*/
void mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface);
/**
- * mac_link_up() - allow the link to come up
+ * mac_link_up() - notification that the link has come up
* @config: a pointer to a &struct phylink_config.
- * @phy: any attached phy
+ * @phy: any attached phy (deprecated - please use LPI interfaces)
* @mode: link autonegotiation mode
* @interface: link &typedef phy_interface_t mode
* @speed: link speed
@@ -366,7 +402,10 @@ void mac_link_down(struct phylink_config *config, unsigned int mode,
* @tx_pause: link transmit pause enablement status
* @rx_pause: link receive pause enablement status
*
- * Configure the MAC for an established link.
+ * Notifies the MAC that the link has come up, and the parameters of the
+ * link as seen from the MACs point of view. If mac_link_up() has been
+ * called previously, there will be an intervening call to mac_link_down()
+ * before this method will be subsequently called.
*
* @speed, @duplex, @tx_pause and @rx_pause indicate the finalised link
* settings, and should be used to configure the MAC block appropriately
@@ -378,23 +417,67 @@ void mac_link_down(struct phylink_config *config, unsigned int mode,
* that the user wishes to override the pause settings, and this should
* be allowed when considering the implementation of this method.
*
- * If in-band negotiation mode is disabled, allow the link to come up. If
- * @phy is non-%NULL, configure Energy Efficient Ethernet by calling
- * phy_init_eee() and perform appropriate MAC configuration for EEE.
+ * Once configured, the MAC may begin to process packets for transmission
+ * and reception.
+ *
* Interface type selection must be done in mac_config().
*/
void mac_link_up(struct phylink_config *config, struct phy_device *phy,
unsigned int mode, phy_interface_t interface,
int speed, int duplex, bool tx_pause, bool rx_pause);
+
+/**
+ * mac_disable_tx_lpi() - disable LPI generation at the MAC
+ * @config: a pointer to a &struct phylink_config.
+ *
+ * Disable generation of LPI at the MAC, effectively preventing the MAC
+ * from indicating that it is idle.
+ */
+void mac_disable_tx_lpi(struct phylink_config *config);
+
+/**
+ * mac_enable_tx_lpi() - configure and enable LPI generation at the MAC
+ * @config: a pointer to a &struct phylink_config.
+ * @timer: LPI timeout in microseconds.
+ * @tx_clk_stop: allow xMII transmit clock to be stopped during LPI
+ *
+ * Configure the LPI timeout accordingly. This will only be called when
+ * the link is already up, to cater for situations where the hardware
+ * needs to be programmed according to the link speed.
+ *
+ * Enable LPI generation at the MAC, and configure whether the xMII transmit
+ * clock may be stopped.
+ *
+ * Returns: 0 on success. Please consult with rmk before returning an error.
+ */
+int mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop);
+
+/**
+ * mac_wol_set() - configure the Wake-on-Lan parameters
+ * @config: a pointer to a &struct phylink_config.
+ * @wolopts: Bitmask of %WAKE_* flags for enabled Wake-On-Lan modes.
+ * @sopass: SecureOn(tm) password; meaningful only for %WAKE_MAGICSECURE
+ *
+ * Enable the specified Wake-on-Lan options at the MAC. Options that the
+ * PHY can handle will have been removed from @wolopts.
+ *
+ * The presence of this method enables phylink-managed WoL support.
+ *
+ * Returns: 0 on success.
+ */
+int (*mac_wol_set)(struct phylink_config *config, u32 wolopts,
+ const u8 *sopass);
#endif
struct phylink_pcs_ops;
/**
* struct phylink_pcs - PHYLINK PCS instance
+ * @supported_interfaces: describing which PHY_INTERFACE_MODE_xxx
+ * are supported by this PCS.
* @ops: a pointer to the &struct phylink_pcs_ops structure
* @phylink: pointer to &struct phylink_config
- * @neg_mode: provide PCS neg mode via "mode" argument
* @poll: poll the PCS for link changes
* @rxc_always_on: The MAC driver requires the reference clock
* to always be on. Standalone PCS drivers which
@@ -408,9 +491,9 @@ struct phylink_pcs_ops;
* the PCS driver.
*/
struct phylink_pcs {
+ DECLARE_PHY_INTERFACE_MASK(supported_interfaces);
const struct phylink_pcs_ops *ops;
struct phylink *phylink;
- bool neg_mode;
bool poll;
bool rxc_always_on;
};
@@ -418,6 +501,7 @@ struct phylink_pcs {
/**
* struct phylink_pcs_ops - MAC PCS operations structure.
* @pcs_validate: validate the link configuration.
+ * @pcs_inband_caps: query inband support for interface mode.
* @pcs_enable: enable the PCS.
* @pcs_disable: disable the PCS.
* @pcs_pre_config: pre-mac_config method (for errata)
@@ -427,19 +511,25 @@ struct phylink_pcs {
* @pcs_an_restart: restart 802.3z BaseX autonegotiation.
* @pcs_link_up: program the PCS for the resolved link configuration
* (where necessary).
+ * @pcs_disable_eee: optional notification to PCS that EEE has been disabled
+ * at the MAC.
+ * @pcs_enable_eee: optional notification to PCS that EEE will be enabled at
+ * the MAC.
* @pcs_pre_init: configure PCS components necessary for MAC hardware
* initialization e.g. RX clock for stmmac.
*/
struct phylink_pcs_ops {
int (*pcs_validate)(struct phylink_pcs *pcs, unsigned long *supported,
const struct phylink_link_state *state);
+ unsigned int (*pcs_inband_caps)(struct phylink_pcs *pcs,
+ phy_interface_t interface);
int (*pcs_enable)(struct phylink_pcs *pcs);
void (*pcs_disable)(struct phylink_pcs *pcs);
void (*pcs_pre_config)(struct phylink_pcs *pcs,
phy_interface_t interface);
int (*pcs_post_config)(struct phylink_pcs *pcs,
phy_interface_t interface);
- void (*pcs_get_state)(struct phylink_pcs *pcs,
+ void (*pcs_get_state)(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state);
int (*pcs_config)(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
@@ -448,6 +538,8 @@ struct phylink_pcs_ops {
void (*pcs_an_restart)(struct phylink_pcs *pcs);
void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex);
+ void (*pcs_disable_eee)(struct phylink_pcs *pcs);
+ void (*pcs_enable_eee)(struct phylink_pcs *pcs);
int (*pcs_pre_init)(struct phylink_pcs *pcs);
};
@@ -470,6 +562,20 @@ int pcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
const struct phylink_link_state *state);
/**
+ * pcs_inband_caps - query PCS in-band capabilities for interface mode.
+ * @pcs: a pointer to a &struct phylink_pcs.
+ * @interface: interface mode to be queried
+ *
+ * Returns zero if it is unknown what in-band signalling is supported by the
+ * PHY (e.g. because the PHY driver doesn't implement the method.) Otherwise,
+ * returns a bit mask of the LINK_INBAND_* values from
+ * &enum link_inband_signalling to describe which inband modes are supported
+ * for this interface mode.
+ */
+unsigned int pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface);
+
+/**
* pcs_enable() - enable the PCS.
* @pcs: a pointer to a &struct phylink_pcs.
*/
@@ -484,6 +590,7 @@ void pcs_disable(struct phylink_pcs *pcs);
/**
* pcs_get_state() - Read the current inband link state from the hardware
* @pcs: a pointer to a &struct phylink_pcs.
+ * @neg_mode: link negotiation mode (PHYLINK_PCS_NEG_xxx)
* @state: a pointer to a &struct phylink_link_state.
*
* Read the current inband link state from the MAC PCS, reporting the
@@ -492,8 +599,11 @@ void pcs_disable(struct phylink_pcs *pcs);
* negotiation completion state in @state->an_complete, and link up state
* in @state->link. If possible, @state->lp_advertising should also be
* populated.
+ *
+ * Note that the @neg_mode parameter is always the PHYLINK_PCS_NEG_xxx
+ * state, not MLO_AN_xxx.
*/
-void pcs_get_state(struct phylink_pcs *pcs,
+void pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state);
/**
@@ -521,6 +631,14 @@ void pcs_get_state(struct phylink_pcs *pcs,
* The %neg_mode argument should be tested via the phylink_mode_*() family of
* functions, or for PCS that set pcs->neg_mode true, should be tested
* against the PHYLINK_PCS_NEG_* definitions.
+ *
+ * pcs_config() will be called when configuration of the PCS is required
+ * or when the advertisement is possibly updated. It must not unnecessarily
+ * disrupt an established link.
+ *
+ * When an autonegotiation restart is required for 802.3z modes, .pcs_config()
+ * should return a positive non-zero integer (e.g. 1) to indicate to phylink
+ * to call the pcs_an_restart() method.
*/
int pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, const unsigned long *advertising,
@@ -556,6 +674,22 @@ void pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex);
/**
+ * pcs_disable_eee() - Disable EEE at the PCS
+ * @pcs: a pointer to a &struct phylink_pcs
+ *
+ * Optional method informing the PCS that EEE has been disabled at the MAC.
+ */
+void pcs_disable_eee(struct phylink_pcs *pcs);
+
+/**
+ * pcs_enable_eee() - Enable EEE at the PCS
+ * @pcs: a pointer to a &struct phylink_pcs
+ *
+ * Optional method informing the PCS that EEE is about to be enabled at the MAC.
+ */
+void pcs_enable_eee(struct phylink_pcs *pcs);
+
+/**
* pcs_pre_init() - Configure PCS components necessary for MAC initialization
* @pcs: a pointer to a &struct phylink_pcs.
*
@@ -597,6 +731,8 @@ int phylink_fwnode_phy_connect(struct phylink *pl,
const struct fwnode_handle *fwnode,
u32 flags);
void phylink_disconnect_phy(struct phylink *);
+int phylink_set_fixed_link(struct phylink *,
+ const struct phylink_link_state *);
void phylink_mac_change(struct phylink *, bool up);
void phylink_pcs_change(struct phylink_pcs *, bool up);
@@ -606,7 +742,11 @@ int phylink_pcs_pre_init(struct phylink *pl, struct phylink_pcs *pcs);
void phylink_start(struct phylink *);
void phylink_stop(struct phylink *);
+void phylink_rx_clk_stop_block(struct phylink *);
+void phylink_rx_clk_stop_unblock(struct phylink *);
+
void phylink_suspend(struct phylink *pl, bool mac_wol);
+void phylink_prepare_resume(struct phylink *pl);
void phylink_resume(struct phylink *pl);
void phylink_ethtool_get_wol(struct phylink *, struct ethtool_wolinfo *);
@@ -622,7 +762,6 @@ void phylink_ethtool_get_pauseparam(struct phylink *,
int phylink_ethtool_set_pauseparam(struct phylink *,
struct ethtool_pauseparam *);
int phylink_get_eee_err(struct phylink *);
-int phylink_init_eee(struct phylink *, bool);
int phylink_ethtool_get_eee(struct phylink *link, struct ethtool_keee *eee);
int phylink_ethtool_set_eee(struct phylink *link, struct ethtool_keee *eee);
int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
@@ -653,6 +792,7 @@ static inline int phylink_get_link_timer_ns(phy_interface_t interface)
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
return 1600000;
case PHY_INTERFACE_MODE_1000BASEX:
@@ -664,9 +804,22 @@ static inline int phylink_get_link_timer_ns(phy_interface_t interface)
}
}
+/**
+ * phylink_mac_implements_lpi() - determine if MAC implements LPI ops
+ * @ops: phylink_mac_ops structure
+ *
+ * Returns true if the phylink MAC operations structure indicates that the
+ * LPI operations have been implemented, false otherwise.
+ */
+static inline bool phylink_mac_implements_lpi(const struct phylink_mac_ops *ops)
+{
+ return ops && ops->mac_disable_tx_lpi && ops->mac_enable_tx_lpi;
+}
+
void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
- u16 bmsr, u16 lpa);
+ unsigned int neg_mode, u16 bmsr, u16 lpa);
void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state);
int phylink_mii_c22_pcs_encode_advertisement(phy_interface_t interface,
const unsigned long *advertising);
diff --git a/include/linux/pid.h b/include/linux/pid.h
index a3aad9b4074c..003a1027d219 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -47,18 +47,23 @@
#define RESERVED_PIDS 300
+struct pidfs_attr;
+
struct upid {
int nr;
struct pid_namespace *ns;
};
-struct pid
-{
+struct pid {
refcount_t count;
unsigned int level;
spinlock_t lock;
- struct dentry *stashed;
- u64 ino;
+ struct {
+ u64 ino;
+ struct rb_node pidfs_node;
+ struct dentry *stashed;
+ struct pidfs_attr *attr;
+ };
/* lists of tasks that use this pid */
struct hlist_head tasks[PIDTYPE_MAX];
struct hlist_head inodes;
@@ -68,6 +73,7 @@ struct pid
struct upid numbers[];
};
+extern seqcount_spinlock_t pidmap_lock_seq;
extern struct pid init_struct_pid;
struct file;
@@ -75,7 +81,7 @@ struct file;
struct pid *pidfd_pid(const struct file *file);
struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags);
struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags);
-int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret);
+int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret_file);
void do_notify_pidfd(struct task_struct *task);
static inline struct pid *get_pid(struct pid *pid)
@@ -99,16 +105,13 @@ extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
* these helpers must be called with the tasklist_lock write-held.
*/
extern void attach_pid(struct task_struct *task, enum pid_type);
-extern void detach_pid(struct task_struct *task, enum pid_type);
-extern void change_pid(struct task_struct *task, enum pid_type,
- struct pid *pid);
+void detach_pid(struct pid **pids, struct task_struct *task, enum pid_type);
+void change_pid(struct pid **pids, struct task_struct *task, enum pid_type,
+ struct pid *pid);
extern void exchange_tids(struct task_struct *task, struct task_struct *old);
extern void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type);
-extern int pid_max;
-extern int pid_max_min, pid_max_max;
-
/*
* look up a PID in the hash table. Must be called with the tasklist_lock
* or rcu_read_lock() held.
@@ -130,6 +133,7 @@ extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
extern struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
size_t set_tid_size);
extern void free_pid(struct pid *pid);
+void free_pids(struct pid **pids);
extern void disable_pid_allocation(struct pid_namespace *ns);
/*
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index f9f9931e02d6..0e7ae12c96d2 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -30,6 +30,7 @@ struct pid_namespace {
struct task_struct *child_reaper;
struct kmem_cache *pid_cachep;
unsigned int level;
+ int pid_max;
struct pid_namespace *parent;
#ifdef CONFIG_BSD_PROCESS_ACCT
struct fs_pin *bacct;
@@ -38,9 +39,14 @@ struct pid_namespace {
struct ucounts *ucounts;
int reboot; /* group exit code if this pidns was rebooted */
struct ns_common ns;
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
+ struct work_struct work;
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_set set;
+ struct ctl_table_header *sysctls;
+#if defined(CONFIG_MEMFD_CREATE)
int memfd_noexec_scope;
#endif
+#endif
} __randomize_layout;
extern struct pid_namespace init_pid_ns;
@@ -48,10 +54,14 @@ extern struct pid_namespace init_pid_ns;
#define PIDNS_ADDING (1U << 31)
#ifdef CONFIG_PID_NS
+static inline struct pid_namespace *to_pid_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct pid_namespace, ns);
+}
+
static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
{
- if (ns != &init_pid_ns)
- refcount_inc(&ns->ns.count);
+ ns_ref_inc(ns);
return ns;
}
@@ -72,12 +82,15 @@ static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
}
#endif
-extern struct pid_namespace *copy_pid_ns(unsigned long flags,
+extern struct pid_namespace *copy_pid_ns(u64 flags,
struct user_namespace *user_ns, struct pid_namespace *ns);
extern void zap_pid_ns_processes(struct pid_namespace *pid_ns);
extern int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd);
extern void put_pid_ns(struct pid_namespace *ns);
+extern bool pidns_is_ancestor(struct pid_namespace *child,
+ struct pid_namespace *ancestor);
+
#else /* !CONFIG_PID_NS */
#include <linux/err.h>
@@ -91,7 +104,7 @@ static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
return 0;
}
-static inline struct pid_namespace *copy_pid_ns(unsigned long flags,
+static inline struct pid_namespace *copy_pid_ns(u64 flags,
struct user_namespace *user_ns, struct pid_namespace *ns)
{
if (flags & CLONE_NEWPID)
@@ -112,11 +125,19 @@ static inline int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
{
return 0;
}
+
+static inline bool pidns_is_ancestor(struct pid_namespace *child,
+ struct pid_namespace *ancestor)
+{
+ return false;
+}
#endif /* CONFIG_PID_NS */
extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk);
void pidhash_init(void);
void pid_idr_init(void);
+int register_pidns_sysctls(struct pid_namespace *pidns);
+void unregister_pidns_sysctls(struct pid_namespace *pidns);
static inline bool task_is_in_init_pid_ns(struct task_struct *tsk)
{
diff --git a/include/linux/pidfs.h b/include/linux/pidfs.h
index 75bdf9807802..3e08c33da2df 100644
--- a/include/linux/pidfs.h
+++ b/include/linux/pidfs.h
@@ -2,7 +2,18 @@
#ifndef _LINUX_PID_FS_H
#define _LINUX_PID_FS_H
+struct coredump_params;
+
struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags);
void __init pidfs_init(void);
+void pidfs_add_pid(struct pid *pid);
+void pidfs_remove_pid(struct pid *pid);
+void pidfs_exit(struct task_struct *tsk);
+#ifdef CONFIG_COREDUMP
+void pidfs_coredump(const struct coredump_params *cprm);
+#endif
+extern const struct dentry_operations pidfs_dentry_operations;
+int pidfs_register_pid(struct pid *pid);
+void pidfs_free_pid(struct pid *pid);
#endif /* _LINUX_PID_FS_H */
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 73de70362b98..63ce16191eb9 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -48,6 +48,7 @@ int pinctrl_select_default_state(struct device *dev);
#ifdef CONFIG_PM
int pinctrl_pm_select_default_state(struct device *dev);
+int pinctrl_pm_select_init_state(struct device *dev);
int pinctrl_pm_select_sleep_state(struct device *dev);
int pinctrl_pm_select_idle_state(struct device *dev);
#else
@@ -55,6 +56,10 @@ static inline int pinctrl_pm_select_default_state(struct device *dev)
{
return 0;
}
+static inline int pinctrl_pm_select_init_state(struct device *dev)
+{
+ return 0;
+}
static inline int pinctrl_pm_select_sleep_state(struct device *dev)
{
return 0;
@@ -143,6 +148,11 @@ static inline int pinctrl_pm_select_default_state(struct device *dev)
return 0;
}
+static inline int pinctrl_pm_select_init_state(struct device *dev)
+{
+ return 0;
+}
+
static inline int pinctrl_pm_select_sleep_state(struct device *dev)
{
return 0;
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
index 673e96df453b..25620229b1d6 100644
--- a/include/linux/pinctrl/machine.h
+++ b/include/linux/pinctrl/machine.h
@@ -149,14 +149,18 @@ struct pinctrl_map {
#define PIN_MAP_CONFIGS_GROUP_HOG_DEFAULT(dev, grp, cfgs) \
PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, cfgs)
+struct device;
struct pinctrl_map;
#ifdef CONFIG_PINCTRL
-extern int pinctrl_register_mappings(const struct pinctrl_map *map,
- unsigned int num_maps);
-extern void pinctrl_unregister_mappings(const struct pinctrl_map *map);
-extern void pinctrl_provide_dummies(void);
+int pinctrl_register_mappings(const struct pinctrl_map *map,
+ unsigned int num_maps);
+int devm_pinctrl_register_mappings(struct device *dev,
+ const struct pinctrl_map *map,
+ unsigned int num_maps);
+void pinctrl_unregister_mappings(const struct pinctrl_map *map);
+void pinctrl_provide_dummies(void);
#else
static inline int pinctrl_register_mappings(const struct pinctrl_map *map,
@@ -165,6 +169,13 @@ static inline int pinctrl_register_mappings(const struct pinctrl_map *map,
return 0;
}
+static inline int devm_pinctrl_register_mappings(struct device *dev,
+ const struct pinctrl_map *map,
+ unsigned int num_maps)
+{
+ return 0;
+}
+
static inline void pinctrl_unregister_mappings(const struct pinctrl_map *map)
{
}
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index a65d3d078e58..1be4032071c2 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -81,14 +81,20 @@ struct pinctrl_map;
* @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
* If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
* schmitt-trigger mode is disabled.
+ * @PIN_CONFIG_INPUT_SCHMITT_UV: this will configure an input pin to run in
+ * schmitt-trigger mode. The argument is in uV.
* @PIN_CONFIG_MODE_LOW_POWER: this will configure the pin for low power
* operation, if several modes of operation are supported these can be
* passed in the argument on a custom form, else just use argument 1
* to indicate low power mode, argument 0 turns low power mode off.
* @PIN_CONFIG_MODE_PWM: this will configure the pin for PWM
- * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
- * value on the line. Use argument 1 to indicate high level, argument 0 to
- * indicate low level. (Please see Documentation/driver-api/pin-control.rst,
+ * @PIN_CONFIG_LEVEL: setting this will configure the pin as an output and
+ * drive a value on the line. Use argument 1 to indicate high level,
+ * argument 0 to indicate low level. Conversely the value of the line
+ * can be read using this parameter, if and only if that value can be
+ * represented as a binary 0 or 1 where 0 indicate a low voltage level
+ * and 1 indicate a high voltage level.
+ * (Please see Documentation/driver-api/pin-control.rst,
* section "GPIO mode pitfalls" for a discussion around this parameter.)
* @PIN_CONFIG_OUTPUT_ENABLE: this will enable the pin's output mode
* without driving a value there. For most platforms this reduces to
@@ -106,6 +112,12 @@ struct pinctrl_map;
* or latch delay (on outputs) this parameter (in a custom format)
* specifies the clock skew or latch delay. It typically controls how
* many double inverters are put in front of the line.
+ * @PIN_CONFIG_SKEW_DELAY_INPUT_PS: if the pin has independent values for the
+ * programmable skew rate (on inputs) and latch delay (on outputs), then
+ * this parameter specifies the clock skew only. The argument is in ps.
+ * @PIN_CONFIG_SKEW_DELAY_OUPUT_PS: if the pin has independent values for the
+ * programmable skew rate (on inputs) and latch delay (on outputs), then
+ * this parameter specifies the latch delay only. The argument is in ps.
* @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state.
* @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
* this parameter (on a custom format) tells the driver which alternative
@@ -132,14 +144,17 @@ enum pin_config_param {
PIN_CONFIG_INPUT_ENABLE,
PIN_CONFIG_INPUT_SCHMITT,
PIN_CONFIG_INPUT_SCHMITT_ENABLE,
+ PIN_CONFIG_INPUT_SCHMITT_UV,
PIN_CONFIG_MODE_LOW_POWER,
PIN_CONFIG_MODE_PWM,
- PIN_CONFIG_OUTPUT,
+ PIN_CONFIG_LEVEL,
PIN_CONFIG_OUTPUT_ENABLE,
PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS,
PIN_CONFIG_PERSIST_STATE,
PIN_CONFIG_POWER_SOURCE,
PIN_CONFIG_SKEW_DELAY,
+ PIN_CONFIG_SKEW_DELAY_INPUT_PS,
+ PIN_CONFIG_SKEW_DELAY_OUTPUT_PS,
PIN_CONFIG_SLEEP_HARDWARE_STATE,
PIN_CONFIG_SLEW_RATE,
PIN_CONFIG_END = 0x7F,
@@ -174,21 +189,28 @@ static inline unsigned long pinconf_to_config_packed(enum pin_config_param param
return PIN_CONF_PACKED(param, argument);
}
-#define PCONFDUMP(a, b, c, d) { \
- .param = a, .display = b, .format = c, .has_arg = d \
+#define PCONFDUMP_WITH_VALUES(a, b, c, d, e, f) { \
+ .param = a, .display = b, .format = c, .has_arg = d, \
+ .values = e, .num_values = f \
}
+#define PCONFDUMP(a, b, c, d) PCONFDUMP_WITH_VALUES(a, b, c, d, NULL, 0)
+
struct pin_config_item {
const enum pin_config_param param;
const char * const display;
const char * const format;
bool has_arg;
+ const char * const *values;
+ size_t num_values;
};
struct pinconf_generic_params {
const char * const property;
enum pin_config_param param;
u32 default_value;
+ const char * const *values;
+ size_t num_values;
};
int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
@@ -229,4 +251,8 @@ static inline int pinconf_generic_dt_node_to_map_all(struct pinctrl_dev *pctldev
PIN_MAP_TYPE_INVALID);
}
+int pinconf_generic_dt_node_to_map_pinmux(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps);
#endif /* __LINUX_PINCTRL_PINCONF_GENERIC_H */
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 9a8189ffd0f2..1a8084e29405 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -11,6 +11,7 @@
#ifndef __LINUX_PINCTRL_PINCTRL_H
#define __LINUX_PINCTRL_PINCTRL_H
+#include <linux/bits.h>
#include <linux/types.h>
struct device;
@@ -165,25 +166,25 @@ struct pinctrl_desc {
/* External interface to pin controller */
-extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
+extern int pinctrl_register_and_init(const struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data,
struct pinctrl_dev **pctldev);
extern int pinctrl_enable(struct pinctrl_dev *pctldev);
/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */
-extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
+extern struct pinctrl_dev *pinctrl_register(const struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data);
extern void pinctrl_unregister(struct pinctrl_dev *pctldev);
extern int devm_pinctrl_register_and_init(struct device *dev,
- struct pinctrl_desc *pctldesc,
+ const struct pinctrl_desc *pctldesc,
void *driver_data,
struct pinctrl_dev **pctldev);
/* Please use devm_pinctrl_register_and_init() instead */
extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev,
- struct pinctrl_desc *pctldesc,
+ const struct pinctrl_desc *pctldesc,
void *driver_data);
extern void devm_pinctrl_unregister(struct device *dev,
@@ -206,16 +207,20 @@ extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
const char *pin_group, const unsigned int **pins,
unsigned int *num_pins);
+#define PINFUNCTION_FLAG_GPIO BIT(0)
+
/**
* struct pinfunction - Description about a function
* @name: Name of the function
* @groups: An array of groups for this function
* @ngroups: Number of groups in @groups
+ * @flags: Additional pin function flags
*/
struct pinfunction {
const char *name;
const char * const *groups;
size_t ngroups;
+ unsigned long flags;
};
/* Convenience macro to define a single named pinfunction */
@@ -226,6 +231,15 @@ struct pinfunction {
.ngroups = (_ngroups), \
}
+/* Same as PINCTRL_PINFUNCTION() but for the GPIO category of functions */
+#define PINCTRL_GPIO_PINFUNCTION(_name, _groups, _ngroups) \
+(struct pinfunction) { \
+ .name = (_name), \
+ .groups = (_groups), \
+ .ngroups = (_ngroups), \
+ .flags = PINFUNCTION_FLAG_GPIO, \
+ }
+
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PINCTRL)
extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np);
#else
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index d6f7b58d6ad0..094bbe2fd6fd 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -35,6 +35,16 @@ struct pinctrl_gpio_range;
* name can be used with the generic @pinctrl_ops to retrieve the
* actual pins affected. The applicable groups will be returned in
* @groups and the number of groups in @num_groups
+ * @function_is_gpio: determine if the indicated function selector passed
+ * corresponds to the GPIO function which is used by the accelerated GPIO
+ * functions @gpio_request_enable, @gpio_disable_free and
+ * @gpio_set_direction. When the pin control core can properly determine
+ * if a function is a GPIO function, it is easier to use the @strict mode
+ * on the pin controller. Since a single function is passed, this is
+ * only useful on pin controllers that use a specific function for GPIO,
+ * and that usually presupposes that a one-group-per-pin approach is
+ * used, so that a single function can be set on a single pin to turn
+ * it to GPIO mode.
* @set_mux: enable a certain muxing function with a certain pin group. The
* driver does not need to figure out whether enabling this function
* conflicts some other use of the pins in that group, such collisions
@@ -66,6 +76,8 @@ struct pinmux_ops {
unsigned int selector,
const char * const **groups,
unsigned int *num_groups);
+ bool (*function_is_gpio) (struct pinctrl_dev *pctldev,
+ unsigned int selector);
int (*set_mux) (struct pinctrl_dev *pctldev, unsigned int func_selector,
unsigned int group_selector);
int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 8ff23bf5a819..7f6a92ac9704 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -31,13 +31,39 @@ struct pipe_buffer {
unsigned long private;
};
+/*
+ * Really only alpha needs 32-bit fields, but
+ * might as well do it for 64-bit architectures
+ * since that's what we've historically done,
+ * and it makes 'head_tail' always be a simple
+ * 'unsigned long'.
+ */
+#ifdef CONFIG_64BIT
+typedef unsigned int pipe_index_t;
+#else
+typedef unsigned short pipe_index_t;
+#endif
+
+/**
+ * struct pipe_index - pipe indeces
+ * @head: The point of buffer production
+ * @tail: The point of buffer consumption
+ * @head_tail: unsigned long union of @head and @tail
+ */
+union pipe_index {
+ unsigned long head_tail;
+ struct {
+ pipe_index_t head;
+ pipe_index_t tail;
+ };
+};
+
/**
* struct pipe_inode_info - a linux kernel pipe
* @mutex: mutex protecting the whole thing
* @rd_wait: reader wait point in case of empty pipe
* @wr_wait: writer wait point in case of full pipe
- * @head: The point of buffer production
- * @tail: The point of buffer consumption
+ * @pipe_index: the pipe indeces
* @note_loss: The next read() should insert a data-lost message
* @max_usage: The maximum number of slots that may be used in the ring
* @ring_size: total number of buffers (should be a power of 2)
@@ -58,8 +84,9 @@ struct pipe_buffer {
struct pipe_inode_info {
struct mutex mutex;
wait_queue_head_t rd_wait, wr_wait;
- unsigned int head;
- unsigned int tail;
+
+ union pipe_index;
+
unsigned int max_usage;
unsigned int ring_size;
unsigned int nr_accounted;
@@ -72,7 +99,7 @@ struct pipe_inode_info {
#ifdef CONFIG_WATCH_QUEUE
bool note_loss;
#endif
- struct page *tmp_page;
+ struct page *tmp_page[2];
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
struct pipe_buffer *bufs;
@@ -141,23 +168,23 @@ static inline bool pipe_has_watch_queue(const struct pipe_inode_info *pipe)
}
/**
- * pipe_empty - Return true if the pipe is empty
+ * pipe_occupancy - Return number of slots used in the pipe
* @head: The pipe ring head pointer
* @tail: The pipe ring tail pointer
*/
-static inline bool pipe_empty(unsigned int head, unsigned int tail)
+static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail)
{
- return head == tail;
+ return (pipe_index_t)(head - tail);
}
/**
- * pipe_occupancy - Return number of slots used in the pipe
+ * pipe_empty - Return true if the pipe is empty
* @head: The pipe ring head pointer
* @tail: The pipe ring tail pointer
*/
-static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail)
+static inline bool pipe_empty(unsigned int head, unsigned int tail)
{
- return head - tail;
+ return !pipe_occupancy(head, tail);
}
/**
@@ -173,6 +200,33 @@ static inline bool pipe_full(unsigned int head, unsigned int tail,
}
/**
+ * pipe_is_full - Return true if the pipe is full
+ * @pipe: the pipe
+ */
+static inline bool pipe_is_full(const struct pipe_inode_info *pipe)
+{
+ return pipe_full(pipe->head, pipe->tail, pipe->max_usage);
+}
+
+/**
+ * pipe_is_empty - Return true if the pipe is empty
+ * @pipe: the pipe
+ */
+static inline bool pipe_is_empty(const struct pipe_inode_info *pipe)
+{
+ return pipe_empty(pipe->head, pipe->tail);
+}
+
+/**
+ * pipe_buf_usage - Return how many pipe buffers are in use
+ * @pipe: the pipe
+ */
+static inline unsigned int pipe_buf_usage(const struct pipe_inode_info *pipe)
+{
+ return pipe_occupancy(pipe->head, pipe->tail);
+}
+
+/**
* pipe_buf - Return the pipe buffer for the specified slot in the pipe ring
* @pipe: The pipe to access
* @slot: The slot of interest
@@ -245,15 +299,6 @@ static inline bool pipe_buf_try_steal(struct pipe_inode_info *pipe,
return buf->ops->try_steal(pipe, buf);
}
-static inline void pipe_discard_from(struct pipe_inode_info *pipe,
- unsigned int old_head)
-{
- unsigned int mask = pipe->ring_size - 1;
-
- while (pipe->head > old_head)
- pipe_buf_release(pipe, &pipe->bufs[--pipe->head & mask]);
-}
-
/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
#define PIPE_SIZE PAGE_SIZE
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
deleted file mode 100644
index 2f1b952d596a..000000000000
--- a/include/linux/pktcdvd.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
- * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
- *
- * May be copied or modified under the terms of the GNU General Public
- * License. See linux/COPYING for more information.
- *
- * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
- * DVD-RW devices.
- *
- */
-#ifndef __PKTCDVD_H
-#define __PKTCDVD_H
-
-#include <linux/blkdev.h>
-#include <linux/completion.h>
-#include <linux/cdrom.h>
-#include <linux/kobject.h>
-#include <linux/sysfs.h>
-#include <linux/mempool.h>
-#include <uapi/linux/pktcdvd.h>
-
-/* default bio write queue congestion marks */
-#define PKT_WRITE_CONGESTION_ON 10000
-#define PKT_WRITE_CONGESTION_OFF 9000
-
-
-struct packet_settings
-{
- __u32 size; /* packet size in (512 byte) sectors */
- __u8 fp; /* fixed packets */
- __u8 link_loss; /* the rest is specified
- * as per Mt Fuji */
- __u8 write_type;
- __u8 track_mode;
- __u8 block_mode;
-};
-
-/*
- * Very crude stats for now
- */
-struct packet_stats
-{
- unsigned long pkt_started;
- unsigned long pkt_ended;
- unsigned long secs_w;
- unsigned long secs_rg;
- unsigned long secs_r;
-};
-
-struct packet_cdrw
-{
- struct list_head pkt_free_list;
- struct list_head pkt_active_list;
- spinlock_t active_list_lock; /* Serialize access to pkt_active_list */
- struct task_struct *thread;
- atomic_t pending_bios;
-};
-
-/*
- * Switch to high speed reading after reading this many kilobytes
- * with no interspersed writes.
- */
-#define HI_SPEED_SWITCH 512
-
-struct packet_iosched
-{
- atomic_t attention; /* Set to non-zero when queue processing is needed */
- int writing; /* Non-zero when writing, zero when reading */
- spinlock_t lock; /* Protecting read/write queue manipulations */
- struct bio_list read_queue;
- struct bio_list write_queue;
- sector_t last_write; /* The sector where the last write ended */
- int successive_reads;
-};
-
-/*
- * 32 buffers of 2048 bytes
- */
-#if (PAGE_SIZE % CD_FRAMESIZE) != 0
-#error "PAGE_SIZE must be a multiple of CD_FRAMESIZE"
-#endif
-#define PACKET_MAX_SIZE 128
-#define FRAMES_PER_PAGE (PAGE_SIZE / CD_FRAMESIZE)
-#define PACKET_MAX_SECTORS (PACKET_MAX_SIZE * CD_FRAMESIZE >> 9)
-
-enum packet_data_state {
- PACKET_IDLE_STATE, /* Not used at the moment */
- PACKET_WAITING_STATE, /* Waiting for more bios to arrive, so */
- /* we don't have to do as much */
- /* data gathering */
- PACKET_READ_WAIT_STATE, /* Waiting for reads to fill in holes */
- PACKET_WRITE_WAIT_STATE, /* Waiting for the write to complete */
- PACKET_RECOVERY_STATE, /* Recover after read/write errors */
- PACKET_FINISHED_STATE, /* After write has finished */
-
- PACKET_NUM_STATES /* Number of possible states */
-};
-
-/*
- * Information needed for writing a single packet
- */
-struct pktcdvd_device;
-
-struct packet_data
-{
- struct list_head list;
-
- spinlock_t lock; /* Lock protecting state transitions and */
- /* orig_bios list */
-
- struct bio_list orig_bios; /* Original bios passed to pkt_make_request */
- /* that will be handled by this packet */
- int write_size; /* Total size of all bios in the orig_bios */
- /* list, measured in number of frames */
-
- struct bio *w_bio; /* The bio we will send to the real CD */
- /* device once we have all data for the */
- /* packet we are going to write */
- sector_t sector; /* First sector in this packet */
- int frames; /* Number of frames in this packet */
-
- enum packet_data_state state; /* Current state */
- atomic_t run_sm; /* Incremented whenever the state */
- /* machine needs to be run */
- long sleep_time; /* Set this to non-zero to make the state */
- /* machine run after this many jiffies. */
-
- atomic_t io_wait; /* Number of pending IO operations */
- atomic_t io_errors; /* Number of read/write errors during IO */
-
- struct bio *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */
- struct page *pages[PACKET_MAX_SIZE / FRAMES_PER_PAGE];
-
- int cache_valid; /* If non-zero, the data for the zone defined */
- /* by the sector variable is completely cached */
- /* in the pages[] vector. */
-
- int id; /* ID number for debugging */
- struct pktcdvd_device *pd;
-};
-
-struct pkt_rb_node {
- struct rb_node rb_node;
- struct bio *bio;
-};
-
-struct packet_stacked_data
-{
- struct bio *bio; /* Original read request bio */
- struct pktcdvd_device *pd;
-};
-#define PSD_POOL_SIZE 64
-
-struct pktcdvd_device
-{
- struct file *bdev_file; /* dev attached */
- /* handle acquired for bdev during pkt_open_dev() */
- struct file *f_open_bdev;
- dev_t pkt_dev; /* our dev */
- struct packet_settings settings;
- struct packet_stats stats;
- int refcnt; /* Open count */
- int write_speed; /* current write speed, kB/s */
- int read_speed; /* current read speed, kB/s */
- unsigned long offset; /* start offset */
- __u8 mode_offset; /* 0 / 8 */
- __u8 type;
- unsigned long flags;
- __u16 mmc3_profile;
- __u32 nwa; /* next writable address */
- __u32 lra; /* last recorded address */
- struct packet_cdrw cdrw;
- wait_queue_head_t wqueue;
-
- spinlock_t lock; /* Serialize access to bio_queue */
- struct rb_root bio_queue; /* Work queue of bios we need to handle */
- int bio_queue_size; /* Number of nodes in bio_queue */
- bool congested; /* Someone is waiting for bio_queue_size
- * to drop. */
- sector_t current_sector; /* Keep track of where the elevator is */
- atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */
- /* needs to be run. */
- mempool_t rb_pool; /* mempool for pkt_rb_node allocations */
-
- struct packet_iosched iosched;
- struct gendisk *disk;
-
- int write_congestion_off;
- int write_congestion_on;
-
- struct device *dev; /* sysfs pktcdvd[0-7] dev */
-
- struct dentry *dfs_d_root; /* debugfs: devname directory */
- struct dentry *dfs_f_info; /* debugfs: info file */
-};
-
-#endif /* __PKTCDVD_H */
diff --git a/include/linux/platform_data/ad5449.h b/include/linux/platform_data/ad5449.h
deleted file mode 100644
index d687ef5726c2..000000000000
--- a/include/linux/platform_data/ad5449.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * AD5415, AD5426, AD5429, AD5432, AD5439, AD5443, AD5449 Digital to Analog
- * Converter driver.
- *
- * Copyright 2012 Analog Devices Inc.
- * Author: Lars-Peter Clausen <lars@metafoo.de>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_AD5449_H__
-#define __LINUX_PLATFORM_DATA_AD5449_H__
-
-/**
- * enum ad5449_sdo_mode - AD5449 SDO pin configuration
- * @AD5449_SDO_DRIVE_FULL: Drive the SDO pin with full strength.
- * @AD5449_SDO_DRIVE_WEAK: Drive the SDO pin with not full strength.
- * @AD5449_SDO_OPEN_DRAIN: Operate the SDO pin in open-drain mode.
- * @AD5449_SDO_DISABLED: Disable the SDO pin, in this mode it is not possible to
- * read back from the device.
- */
-enum ad5449_sdo_mode {
- AD5449_SDO_DRIVE_FULL = 0x0,
- AD5449_SDO_DRIVE_WEAK = 0x1,
- AD5449_SDO_OPEN_DRAIN = 0x2,
- AD5449_SDO_DISABLED = 0x3,
-};
-
-/**
- * struct ad5449_platform_data - Platform data for the ad5449 DAC driver
- * @sdo_mode: SDO pin mode
- * @hardware_clear_to_midscale: Whether asserting the hardware CLR pin sets the
- * outputs to midscale (true) or to zero scale(false).
- */
-struct ad5449_platform_data {
- enum ad5449_sdo_mode sdo_mode;
- bool hardware_clear_to_midscale;
-};
-
-#endif
diff --git a/include/linux/platform_data/amd_qdma.h b/include/linux/platform_data/amd_qdma.h
new file mode 100644
index 000000000000..967a6ef31cf9
--- /dev/null
+++ b/include/linux/platform_data/amd_qdma.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _PLATDATA_AMD_QDMA_H
+#define _PLATDATA_AMD_QDMA_H
+
+#include <linux/dmaengine.h>
+
+/**
+ * struct qdma_queue_info - DMA queue information. This information is used to
+ * match queue when DMA channel is requested
+ * @dir: Channel transfer direction
+ */
+struct qdma_queue_info {
+ enum dma_transfer_direction dir;
+};
+
+#define QDMA_FILTER_PARAM(qinfo) ((void *)(qinfo))
+
+struct dma_slave_map;
+
+/**
+ * struct qdma_platdata - Platform specific data for QDMA engine
+ * @max_mm_channels: Maximum number of MM DMA channels in each direction
+ * @device_map: DMA slave map
+ * @irq_index: The index of first IRQ
+ * @dma_dev: The device pointer for dma operations
+ */
+struct qdma_platdata {
+ u32 max_mm_channels;
+ u32 irq_index;
+ struct dma_slave_map *device_map;
+ struct device *dma_dev;
+};
+
+#endif /* _PLATDATA_AMD_QDMA_H */
diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h
index f9c00f839e9f..085dd8e8af76 100644
--- a/include/linux/platform_data/asoc-s3c.h
+++ b/include/linux/platform_data/asoc-s3c.h
@@ -13,8 +13,6 @@
#include <linux/dmaengine.h>
-extern void s3c64xx_ac97_setup_gpio(int);
-
struct samsung_i2s_type {
/* If the Primary DAI has 5.1 Channels */
#define QUIRK_PRI_6CHAN (1 << 0)
diff --git a/include/linux/platform_data/bcmgenet.h b/include/linux/platform_data/bcmgenet.h
deleted file mode 100644
index d8f8738629d2..000000000000
--- a/include/linux/platform_data/bcmgenet.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__
-#define __LINUX_PLATFORM_DATA_BCMGENET_H__
-
-#include <linux/types.h>
-#include <linux/if_ether.h>
-#include <linux/phy.h>
-
-struct bcmgenet_platform_data {
- bool mdio_enabled;
- phy_interface_t phy_interface;
- int phy_address;
- int phy_speed;
- int phy_duplex;
- u8 mac_address[ETH_ALEN];
- int genet_version;
-};
-
-#endif
diff --git a/include/linux/platform_data/clk-davinci-pll.h b/include/linux/platform_data/clk-davinci-pll.h
deleted file mode 100644
index e55dab1d578b..000000000000
--- a/include/linux/platform_data/clk-davinci-pll.h
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PLL clock driver for TI Davinci SoCs
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__
-#define __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__
-
-#include <linux/regmap.h>
-
-/**
- * davinci_pll_platform_data
- * @cfgchip: CFGCHIP syscon regmap
- */
-struct davinci_pll_platform_data {
- struct regmap *cfgchip;
-};
-
-#endif /* __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__ */
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index ecc47d5fe239..69294f79cc88 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -1312,6 +1312,38 @@ enum ec_feature_code {
* The EC supports the AP composing VDMs for us to send.
*/
EC_FEATURE_TYPEC_AP_VDM_SEND = 46,
+ /*
+ * The EC supports system safe mode panic recovery.
+ */
+ EC_FEATURE_SYSTEM_SAFE_MODE = 47,
+ /*
+ * The EC will reboot on runtime assertion failures.
+ */
+ EC_FEATURE_ASSERT_REBOOTS = 48,
+ /*
+ * The EC image is built with tokenized logging enabled.
+ */
+ EC_FEATURE_TOKENIZED_LOGGING = 49,
+ /*
+ * The EC supports triggering an STB dump.
+ */
+ EC_FEATURE_AMD_STB_DUMP = 50,
+ /*
+ * The EC supports memory dump commands.
+ */
+ EC_FEATURE_MEMORY_DUMP = 51,
+ /*
+ * The EC supports DP2.1 capability
+ */
+ EC_FEATURE_TYPEC_DP2_1 = 52,
+ /*
+ * The MCU is System Companion Processor Core 1
+ */
+ EC_FEATURE_SCP_C1 = 53,
+ /*
+ * The EC supports UCSI PPM.
+ */
+ EC_FEATURE_UCSI_PPM = 54,
};
#define EC_FEATURE_MASK_0(event_code) BIT(event_code % 32)
@@ -1793,6 +1825,16 @@ struct ec_response_pwm_get_duty {
uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */
} __ec_align2;
+#define EC_CMD_PWM_GET_FAN_DUTY 0x0027
+
+struct ec_params_pwm_get_fan_duty {
+ uint8_t fan_idx;
+} __ec_align1;
+
+struct ec_response_pwm_get_fan_duty {
+ uint32_t percent; /* Percentage of duty cycle, ranging from 0 ~ 100 */
+} __ec_align4;
+
/*****************************************************************************/
/*
* Lightbar commands. This looks worse than it is. Since we only use one HOST
@@ -2356,6 +2398,12 @@ enum motionsense_command {
*/
MOTIONSENSE_CMD_SENSOR_SCALE = 18,
+ /*
+ * Activity management
+ * Retrieve current status of given activity.
+ */
+ MOTIONSENSE_CMD_GET_ACTIVITY = 20,
+
/* Number of motionsense sub-commands. */
MOTIONSENSE_NUM_CMDS
};
@@ -2415,6 +2463,11 @@ enum motionsensor_orientation {
MOTIONSENSE_ORIENTATION_UNKNOWN = 4,
};
+struct ec_response_activity_data {
+ uint8_t activity; /* motionsensor_activity */
+ uint8_t state;
+} __ec_todo_packed;
+
struct ec_response_motion_sensor_data {
/* Flags for each sensor. */
uint8_t flags;
@@ -2428,8 +2481,7 @@ struct ec_response_motion_sensor_data {
uint32_t timestamp;
};
struct __ec_todo_unpacked {
- uint8_t activity; /* motionsensor_activity */
- uint8_t state;
+ struct ec_response_activity_data activity_data;
int16_t add_info[2];
};
};
@@ -2462,6 +2514,7 @@ enum motionsensor_activity {
MOTIONSENSE_ACTIVITY_SIG_MOTION = 1,
MOTIONSENSE_ACTIVITY_DOUBLE_TAP = 2,
MOTIONSENSE_ACTIVITY_ORIENTATION = 3,
+ MOTIONSENSE_ACTIVITY_BODY_DETECTION = 4,
};
struct ec_motion_sense_activity {
@@ -2639,6 +2692,7 @@ struct ec_params_motion_sense {
uint32_t max_data_vector;
} fifo_read;
+ /* Used for MOTIONSENSE_CMD_SET_ACTIVITY */
struct ec_motion_sense_activity set_activity;
/* Used for MOTIONSENSE_CMD_LID_ANGLE */
@@ -2684,6 +2738,12 @@ struct ec_params_motion_sense {
*/
int16_t hys_degree;
} tablet_mode_threshold;
+
+ /* Used for MOTIONSENSE_CMD_GET_ACTIVITY */
+ struct __ec_todo_unpacked {
+ uint8_t sensor_num;
+ uint8_t activity; /* enum motionsensor_activity */
+ } get_activity;
};
} __ec_todo_packed;
@@ -2801,6 +2861,10 @@ struct ec_response_motion_sense {
uint16_t hys_degree;
} tablet_mode_threshold;
+ /* USED for MOTIONSENSE_CMD_GET_ACTIVITY. */
+ struct __ec_todo_unpacked {
+ uint8_t state;
+ } get_activity;
};
} __ec_todo_packed;
@@ -3073,14 +3137,31 @@ struct ec_params_thermal_set_threshold_v1 {
/****************************************************************************/
-/* Toggle automatic fan control */
+/* Set or get fan control mode */
#define EC_CMD_THERMAL_AUTO_FAN_CTRL 0x0052
+enum ec_auto_fan_ctrl_cmd {
+ EC_AUTO_FAN_CONTROL_CMD_SET = 0,
+ EC_AUTO_FAN_CONTROL_CMD_GET,
+};
+
/* Version 1 of input params */
struct ec_params_auto_fan_ctrl_v1 {
uint8_t fan_idx;
} __ec_align1;
+/* Version 2 of input params */
+struct ec_params_auto_fan_ctrl_v2 {
+ uint8_t fan_idx;
+ uint8_t cmd; /* enum ec_auto_fan_ctrl_cmd */
+ uint8_t set_auto; /* only used with EC_AUTO_FAN_CONTROL_CMD_SET - bool
+ */
+} __ec_align4;
+
+struct ec_response_auto_fan_control {
+ uint8_t is_auto; /* bool */
+} __ec_align1;
+
/* Get/Set TMP006 calibration data */
#define EC_CMD_TMP006_GET_CALIBRATION 0x0053
#define EC_CMD_TMP006_SET_CALIBRATION 0x0054
@@ -3463,6 +3544,34 @@ union __ec_align_offset1 ec_response_get_next_data_v1 {
};
BUILD_ASSERT(sizeof(union ec_response_get_next_data_v1) == 16);
+union __ec_align_offset1 ec_response_get_next_data_v3 {
+ uint8_t key_matrix[18];
+
+ /* Unaligned */
+ uint32_t host_event;
+ uint64_t host_event64;
+
+ struct __ec_todo_unpacked {
+ /* For aligning the fifo_info */
+ uint8_t reserved[3];
+ struct ec_response_motion_sense_fifo_info info;
+ } sensor_fifo;
+
+ uint32_t buttons;
+
+ uint32_t switches;
+
+ uint32_t fp_events;
+
+ uint32_t sysrq;
+
+ /* CEC events from enum mkbp_cec_event */
+ uint32_t cec_events;
+
+ uint8_t cec_message[16];
+};
+BUILD_ASSERT(sizeof(union ec_response_get_next_data_v3) == 18);
+
struct ec_response_get_next_event {
uint8_t event_type;
/* Followed by event data if any */
@@ -3475,6 +3584,12 @@ struct ec_response_get_next_event_v1 {
union ec_response_get_next_data_v1 data;
} __ec_align1;
+struct ec_response_get_next_event_v3 {
+ uint8_t event_type;
+ /* Followed by event data if any */
+ union ec_response_get_next_data_v3 data;
+} __ec_align1;
+
/* Bit indices for buttons and switches.*/
/* Buttons */
#define EC_MKBP_POWER_BUTTON 0
@@ -3809,16 +3924,61 @@ struct ec_params_i2c_write {
* discharge the battery.
*/
#define EC_CMD_CHARGE_CONTROL 0x0096
-#define EC_VER_CHARGE_CONTROL 1
+#define EC_VER_CHARGE_CONTROL 3
enum ec_charge_control_mode {
CHARGE_CONTROL_NORMAL = 0,
CHARGE_CONTROL_IDLE,
CHARGE_CONTROL_DISCHARGE,
+ /* Add no more entry below. */
+ CHARGE_CONTROL_COUNT,
+};
+
+#define EC_CHARGE_MODE_TEXT \
+ { \
+ [CHARGE_CONTROL_NORMAL] = "NORMAL", \
+ [CHARGE_CONTROL_IDLE] = "IDLE", \
+ [CHARGE_CONTROL_DISCHARGE] = "DISCHARGE", \
+ }
+
+enum ec_charge_control_cmd {
+ EC_CHARGE_CONTROL_CMD_SET = 0,
+ EC_CHARGE_CONTROL_CMD_GET,
+};
+
+enum ec_charge_control_flag {
+ EC_CHARGE_CONTROL_FLAG_NO_IDLE = BIT(0),
};
struct ec_params_charge_control {
- uint32_t mode; /* enum charge_control_mode */
+ uint32_t mode; /* enum charge_control_mode */
+
+ /* Below are the fields added in V2. */
+ uint8_t cmd; /* enum ec_charge_control_cmd. */
+ uint8_t flags; /* enum ec_charge_control_flag (v3+) */
+ /*
+ * Lower and upper thresholds for battery sustainer. This struct isn't
+ * named to avoid tainting foreign projects' name spaces.
+ *
+ * If charge mode is explicitly set (e.g. DISCHARGE), battery sustainer
+ * will be disabled. To disable battery sustainer, set mode=NORMAL,
+ * lower=-1, upper=-1.
+ */
+ struct {
+ int8_t lower; /* Display SoC in percentage. */
+ int8_t upper; /* Display SoC in percentage. */
+ } sustain_soc;
+} __ec_align4;
+
+/* Added in v2 */
+struct ec_response_charge_control {
+ uint32_t mode; /* enum charge_control_mode */
+ struct { /* Battery sustainer thresholds */
+ int8_t lower;
+ int8_t upper;
+ } sustain_soc;
+ uint8_t flags; /* enum ec_charge_control_flag (v3+) */
+ uint8_t reserved;
} __ec_align4;
/*****************************************************************************/
@@ -4933,8 +5093,12 @@ struct ec_response_pd_status {
#define PD_EVENT_POWER_CHANGE BIT(1)
#define PD_EVENT_IDENTITY_RECEIVED BIT(2)
#define PD_EVENT_DATA_SWAP BIT(3)
+#define PD_EVENT_TYPEC BIT(4)
+#define PD_EVENT_PPM BIT(5)
+#define PD_EVENT_INIT BIT(6)
+
struct ec_response_host_event_status {
- uint32_t status; /* PD MCU host event status */
+ uint32_t status; /* PD MCU host event status */
} __ec_align4;
/* Set USB type-C port role and muxes */
@@ -5994,6 +6158,29 @@ struct ec_response_typec_vdm_response {
#undef VDO_MAX_SIZE
+/*
+ * UCSI OPM-PPM commands
+ *
+ * These commands are used for communication between OPM and PPM.
+ * Only UCSI3.0 is tested.
+ */
+
+#define EC_CMD_UCSI_PPM_SET 0x0140
+
+/* The data size is stored in the host command protocol header. */
+struct ec_params_ucsi_ppm_set {
+ uint16_t offset;
+ uint8_t data[];
+} __ec_align2;
+
+#define EC_CMD_UCSI_PPM_GET 0x0141
+
+/* For 'GET' sub-commands, data will be returned as a raw payload. */
+struct ec_params_ucsi_ppm_get {
+ uint16_t offset;
+ uint8_t size;
+} __ec_align2;
+
/*****************************************************************************/
/* The command range 0x200-0x2FF is reserved for Rotor. */
diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h
index 8865e350c12a..de14923720a5 100644
--- a/include/linux/platform_data/cros_ec_proto.h
+++ b/include/linux/platform_data/cros_ec_proto.h
@@ -33,15 +33,26 @@
/*
* Max bus-specific overhead incurred by request/responses.
- * I2C requires 1 additional byte for requests.
- * I2C requires 2 additional bytes for responses.
- * SPI requires up to 32 additional bytes for responses.
+ *
+ * Request:
+ * - I2C requires 1 byte (see struct ec_host_request_i2c).
+ * - ISHTP requires 4 bytes (see struct cros_ish_out_msg).
+ *
+ * Response:
+ * - I2C requires 2 bytes (see struct ec_host_response_i2c).
+ * - ISHTP requires 4 bytes (see struct cros_ish_in_msg).
+ * - SPI requires 32 bytes (see EC_MSG_PREAMBLE_COUNT).
*/
#define EC_PROTO_VERSION_UNKNOWN 0
-#define EC_MAX_REQUEST_OVERHEAD 1
+#define EC_MAX_REQUEST_OVERHEAD 4
#define EC_MAX_RESPONSE_OVERHEAD 32
/*
+ * ACPI notify value for MKBP host event.
+ */
+#define ACPI_NOTIFY_CROS_EC_MKBP 0x80
+
+/*
* EC panic is not covered by the standard (0-F) ACPI notify values.
* Arbitrarily choosing B0 to notify ec panic, which is in the 84-BF
* device specific ACPI notify range.
@@ -117,6 +128,7 @@ struct cros_ec_command {
* @dout_size: Size of dout buffer to allocate (zero to use static dout).
* @wake_enabled: True if this device can wake the system from sleep.
* @suspended: True if this device had been suspended.
+ * @registered: True if this device had been registered.
* @cmd_xfer: Send command to EC and get response.
* Returns the number of bytes received if the communication
* succeeded, but that doesn't mean the EC was happy with the
@@ -175,6 +187,7 @@ struct cros_ec_device {
int dout_size;
bool wake_enabled;
bool suspended;
+ bool registered;
int (*cmd_xfer)(struct cros_ec_device *ec,
struct cros_ec_command *msg);
int (*pkt_xfer)(struct cros_ec_device *ec,
@@ -185,7 +198,7 @@ struct cros_ec_device {
bool host_sleep_v1;
struct blocking_notifier_head event_notifier;
- struct ec_response_get_next_event_v1 event_data;
+ struct ec_response_get_next_event_v3 event_data;
int event_size;
u32 host_event_wake_mask;
u32 last_resume_result;
@@ -246,6 +259,8 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
+int cros_ec_rwsig_continue(struct cros_ec_device *ec_dev);
+
int cros_ec_query_all(struct cros_ec_device *ec_dev);
int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
@@ -261,6 +276,12 @@ int cros_ec_get_sensor_count(struct cros_ec_dev *ec);
int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, const void *outdata,
size_t outsize, void *indata, size_t insize);
+int cros_ec_cmd_readmem(struct cros_ec_device *ec_dev, u8 offset, u8 size, void *dest);
+
+int cros_ec_get_cmd_versions(struct cros_ec_device *ec_dev, u16 cmd);
+
+bool cros_ec_device_registered(struct cros_ec_device *ec_dev);
+
/**
* cros_ec_get_time_ns() - Return time in ns.
*
diff --git a/include/linux/platform_data/cyttsp4.h b/include/linux/platform_data/cyttsp4.h
deleted file mode 100644
index 5dc9d2be384b..000000000000
--- a/include/linux/platform_data/cyttsp4.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Header file for:
- * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers.
- * For use with Cypress Txx3xx parts.
- * Supported parts include:
- * CY8CTST341
- * CY8CTMA340
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- *
- * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com)
- */
-#ifndef _CYTTSP4_H_
-#define _CYTTSP4_H_
-
-#define CYTTSP4_MT_NAME "cyttsp4_mt"
-#define CYTTSP4_I2C_NAME "cyttsp4_i2c_adapter"
-#define CYTTSP4_SPI_NAME "cyttsp4_spi_adapter"
-
-#define CY_TOUCH_SETTINGS_MAX 32
-
-struct touch_framework {
- const uint16_t *abs;
- uint8_t size;
- uint8_t enable_vkeys;
-} __packed;
-
-struct cyttsp4_mt_platform_data {
- struct touch_framework *frmwrk;
- unsigned short flags;
- char const *inp_dev_name;
-};
-
-struct touch_settings {
- const uint8_t *data;
- uint32_t size;
- uint8_t tag;
-} __packed;
-
-struct cyttsp4_core_platform_data {
- int irq_gpio;
- int rst_gpio;
- int level_irq_udelay;
- int (*xres)(struct cyttsp4_core_platform_data *pdata,
- struct device *dev);
- int (*init)(struct cyttsp4_core_platform_data *pdata,
- int on, struct device *dev);
- int (*power)(struct cyttsp4_core_platform_data *pdata,
- int on, struct device *dev, atomic_t *ignore_irq);
- int (*irq_stat)(struct cyttsp4_core_platform_data *pdata,
- struct device *dev);
- struct touch_settings *sett[CY_TOUCH_SETTINGS_MAX];
-};
-
-struct cyttsp4_platform_data {
- struct cyttsp4_core_platform_data *core_pdata;
- struct cyttsp4_mt_platform_data *mt_pdata;
-};
-
-#endif /* _CYTTSP4_H_ */
diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h
deleted file mode 100644
index eb9805bb3fe8..000000000000
--- a/include/linux/platform_data/dma-ep93xx.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-
-/*
- * M2P channels.
- *
- * Note that these values are also directly used for setting the PPALLOC
- * register.
- */
-#define EP93XX_DMA_I2S1 0
-#define EP93XX_DMA_I2S2 1
-#define EP93XX_DMA_AAC1 2
-#define EP93XX_DMA_AAC2 3
-#define EP93XX_DMA_AAC3 4
-#define EP93XX_DMA_I2S3 5
-#define EP93XX_DMA_UART1 6
-#define EP93XX_DMA_UART2 7
-#define EP93XX_DMA_UART3 8
-#define EP93XX_DMA_IRDA 9
-/* M2M channels */
-#define EP93XX_DMA_SSP 10
-#define EP93XX_DMA_IDE 11
-
-/**
- * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine
- * @port: peripheral which is requesting the channel
- * @direction: TX/RX channel
- * @name: optional name for the channel, this is displayed in /proc/interrupts
- *
- * This information is passed as private channel parameter in a filter
- * function. Note that this is only needed for slave/cyclic channels. For
- * memcpy channels %NULL data should be passed.
- */
-struct ep93xx_dma_data {
- int port;
- enum dma_transfer_direction direction;
- const char *name;
-};
-
-/**
- * struct ep93xx_dma_chan_data - platform specific data for a DMA channel
- * @name: name of the channel, used for getting the right clock for the channel
- * @base: mapped registers
- * @irq: interrupt number used by this channel
- */
-struct ep93xx_dma_chan_data {
- const char *name;
- void __iomem *base;
- int irq;
-};
-
-/**
- * struct ep93xx_dma_platform_data - platform data for the dmaengine driver
- * @channels: array of channels which are passed to the driver
- * @num_channels: number of channels in the array
- *
- * This structure is passed to the DMA engine driver via platform data. For
- * M2P channels, contract is that even channels are for TX and odd for RX.
- * There is no requirement for the M2M channels.
- */
-struct ep93xx_dma_platform_data {
- struct ep93xx_dma_chan_data *channels;
- size_t num_channels;
-};
-
-static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
-{
- return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
-}
-
-/**
- * ep93xx_dma_chan_direction - returns direction the channel can be used
- * @chan: channel
- *
- * This function can be used in filter functions to find out whether the
- * channel supports given DMA direction. Only M2P channels have such
- * limitation, for M2M channels the direction is configurable.
- */
-static inline enum dma_transfer_direction
-ep93xx_dma_chan_direction(struct dma_chan *chan)
-{
- if (!ep93xx_dma_chan_is_m2p(chan))
- return DMA_TRANS_NONE;
-
- /* even channels are for TX, odd for RX */
- return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
-}
-
-#endif /* __ASM_ARCH_DMA_H */
diff --git a/include/linux/platform_data/dmtimer-omap.h b/include/linux/platform_data/dmtimer-omap.h
index 95d852aef130..726d89143842 100644
--- a/include/linux/platform_data/dmtimer-omap.h
+++ b/include/linux/platform_data/dmtimer-omap.h
@@ -36,9 +36,13 @@ struct omap_dm_timer_ops {
int (*set_pwm)(struct omap_dm_timer *timer, int def_on,
int toggle, int trigger, int autoreload);
int (*get_pwm_status)(struct omap_dm_timer *timer);
+ int (*set_cap)(struct omap_dm_timer *timer,
+ int autoreload, bool config_period);
+ int (*get_cap_status)(struct omap_dm_timer *timer);
int (*set_prescaler)(struct omap_dm_timer *timer, int prescaler);
unsigned int (*read_counter)(struct omap_dm_timer *timer);
+ unsigned int (*read_cap)(struct omap_dm_timer *timer, bool is_period);
int (*write_counter)(struct omap_dm_timer *timer,
unsigned int value);
unsigned int (*read_status)(struct omap_dm_timer *timer);
diff --git a/include/linux/platform_data/emc2305.h b/include/linux/platform_data/emc2305.h
index 54d672dd6f7d..76043a97f975 100644
--- a/include/linux/platform_data/emc2305.h
+++ b/include/linux/platform_data/emc2305.h
@@ -9,14 +9,20 @@
* struct emc2305_platform_data - EMC2305 driver platform data
* @max_state: maximum cooling state of the cooling device;
* @pwm_num: number of active channels;
+ * @pwm_output_mask: PWM output mask
+ * @pwm_polarity_mask: PWM polarity mask
* @pwm_separate: separate PWM settings for every channel;
* @pwm_min: array of minimum PWM per channel;
+ * @pwm_freq: array of PWM frequency per channel
*/
struct emc2305_platform_data {
u8 max_state;
u8 pwm_num;
+ u8 pwm_output_mask;
+ u8 pwm_polarity_mask;
bool pwm_separate;
u8 pwm_min[EMC2305_PWM_MAX];
+ u16 pwm_freq[EMC2305_PWM_MAX];
};
#endif
diff --git a/include/linux/platform_data/eth-ep93xx.h b/include/linux/platform_data/eth-ep93xx.h
deleted file mode 100644
index 8eef637a804d..000000000000
--- a/include/linux/platform_data/eth-ep93xx.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_PLATFORM_DATA_ETH_EP93XX
-#define _LINUX_PLATFORM_DATA_ETH_EP93XX
-
-struct ep93xx_eth_data {
- unsigned char dev_addr[6];
- unsigned char phy_id;
-};
-
-#endif
diff --git a/include/linux/platform_data/gpio-ath79.h b/include/linux/platform_data/gpio-ath79.h
deleted file mode 100644
index 3ea6dd942c27..000000000000
--- a/include/linux/platform_data/gpio-ath79.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Atheros AR7XXX/AR9XXX GPIO controller platform data
- *
- * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_GPIO_ATH79_H
-#define __LINUX_PLATFORM_DATA_GPIO_ATH79_H
-
-struct ath79_gpio_platform_data {
- unsigned ngpios;
- bool oe_inverted;
-};
-
-#endif
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
deleted file mode 100644
index b82e44662efe..000000000000
--- a/include/linux/platform_data/gpio-davinci.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * DaVinci GPIO Platform Related Defines
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
- */
-
-#ifndef __DAVINCI_GPIO_PLATFORM_H
-#define __DAVINCI_GPIO_PLATFORM_H
-
-struct davinci_gpio_platform_data {
- bool no_auto_base;
- u32 base;
- u32 ngpio;
- u32 gpio_unbanked;
-};
-
-/* Convert GPIO signal to GPIO pin number */
-#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio))
-
-#endif
diff --git a/include/linux/platform_data/huawei-gaokun-ec.h b/include/linux/platform_data/huawei-gaokun-ec.h
new file mode 100644
index 000000000000..faa15d315128
--- /dev/null
+++ b/include/linux/platform_data/huawei-gaokun-ec.h
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Huawei Matebook E Go Embedded Controller
+ *
+ * Copyright (C) 2024-2025 Pengyu Luo <mitltlatltl@gmail.com>
+ */
+
+#ifndef __HUAWEI_GAOKUN_EC_H__
+#define __HUAWEI_GAOKUN_EC_H__
+
+#define GAOKUN_UCSI_CCI_SIZE 4
+#define GAOKUN_UCSI_MSGI_SIZE 16
+#define GAOKUN_UCSI_READ_SIZE (GAOKUN_UCSI_CCI_SIZE + GAOKUN_UCSI_MSGI_SIZE)
+#define GAOKUN_UCSI_WRITE_SIZE 24 /* 8B CTRL, 16B MSGO */
+
+#define GAOKUN_UCSI_NO_PORT_UPDATE (-1)
+
+#define GAOKUN_SMART_CHARGE_DATA_SIZE 4 /* mode, delay, start, end */
+
+/* -------------------------------------------------------------------------- */
+
+struct gaokun_ec;
+struct gaokun_ucsi_reg;
+struct notifier_block;
+
+#define GAOKUN_MOD_NAME "huawei_gaokun_ec"
+#define GAOKUN_DEV_PSY "psy"
+#define GAOKUN_DEV_UCSI "ucsi"
+
+/* -------------------------------------------------------------------------- */
+/* Common API */
+
+int gaokun_ec_register_notify(struct gaokun_ec *ec,
+ struct notifier_block *nb);
+void gaokun_ec_unregister_notify(struct gaokun_ec *ec,
+ struct notifier_block *nb);
+
+int gaokun_ec_read(struct gaokun_ec *ec, const u8 *req,
+ size_t resp_len, u8 *resp);
+int gaokun_ec_write(struct gaokun_ec *ec, const u8 *req);
+int gaokun_ec_read_byte(struct gaokun_ec *ec, const u8 *req, u8 *byte);
+
+/* -------------------------------------------------------------------------- */
+/* API for PSY */
+
+int gaokun_ec_psy_multi_read(struct gaokun_ec *ec, u8 reg,
+ size_t resp_len, u8 *resp);
+
+static inline int gaokun_ec_psy_read_byte(struct gaokun_ec *ec,
+ u8 reg, u8 *byte)
+{
+ return gaokun_ec_psy_multi_read(ec, reg, sizeof(*byte), byte);
+}
+
+static inline int gaokun_ec_psy_read_word(struct gaokun_ec *ec,
+ u8 reg, u16 *word)
+{
+ return gaokun_ec_psy_multi_read(ec, reg, sizeof(*word), (u8 *)word);
+}
+
+int gaokun_ec_psy_get_smart_charge(struct gaokun_ec *ec,
+ u8 resp[GAOKUN_SMART_CHARGE_DATA_SIZE]);
+int gaokun_ec_psy_set_smart_charge(struct gaokun_ec *ec,
+ const u8 req[GAOKUN_SMART_CHARGE_DATA_SIZE]);
+
+int gaokun_ec_psy_get_smart_charge_enable(struct gaokun_ec *ec, bool *on);
+int gaokun_ec_psy_set_smart_charge_enable(struct gaokun_ec *ec, bool on);
+
+/* -------------------------------------------------------------------------- */
+/* API for UCSI */
+
+int gaokun_ec_ucsi_read(struct gaokun_ec *ec, u8 resp[GAOKUN_UCSI_READ_SIZE]);
+int gaokun_ec_ucsi_write(struct gaokun_ec *ec,
+ const u8 req[GAOKUN_UCSI_WRITE_SIZE]);
+
+int gaokun_ec_ucsi_get_reg(struct gaokun_ec *ec, struct gaokun_ucsi_reg *ureg);
+int gaokun_ec_ucsi_pan_ack(struct gaokun_ec *ec, int port_id);
+
+#endif /* __HUAWEI_GAOKUN_EC_H__ */
diff --git a/include/linux/platform_data/hwmon-s3c.h b/include/linux/platform_data/hwmon-s3c.h
index 1707ad4147df..7d21e0c41037 100644
--- a/include/linux/platform_data/hwmon-s3c.h
+++ b/include/linux/platform_data/hwmon-s3c.h
@@ -33,14 +33,4 @@ struct s3c_hwmon_pdata {
struct s3c_hwmon_chcfg *in[8];
};
-/**
- * s3c_hwmon_set_platdata - Set platform data for S3C HWMON device
- * @pd: Platform data to register to device.
- *
- * Register the given platform data for use with the S3C HWMON device.
- * The call will copy the platform data, so the board definitions can
- * make the structure itself __initdata.
- */
-extern void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd);
-
#endif /* __HWMON_S3C_H__ */
diff --git a/include/linux/platform_data/i2c-davinci.h b/include/linux/platform_data/i2c-davinci.h
deleted file mode 100644
index 98967df07468..000000000000
--- a/include/linux/platform_data/i2c-davinci.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * DaVinci I2C controller platform_device info
- *
- * Author: Vladimir Barinov, MontaVista Software, Inc. <source@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
-*/
-
-#ifndef __ASM_ARCH_I2C_H
-#define __ASM_ARCH_I2C_H
-
-/* All frequencies are expressed in kHz */
-struct davinci_i2c_platform_data {
- unsigned int bus_freq; /* standard bus frequency (kHz) */
- unsigned int bus_delay; /* post-transaction delay (usec) */
- bool gpio_recovery; /* Use GPIO recovery method */
- bool has_pfunc; /* Chip has a ICPFUNC register */
-};
-
-/* for board setup code */
-void davinci_init_i2c(struct davinci_i2c_platform_data *);
-
-#endif /* __ASM_ARCH_I2C_H */
diff --git a/include/linux/platform_data/i2c-mux-gpio.h b/include/linux/platform_data/i2c-mux-gpio.h
index 816a4cd3ccb5..96843aab4d1e 100644
--- a/include/linux/platform_data/i2c-mux-gpio.h
+++ b/include/linux/platform_data/i2c-mux-gpio.h
@@ -19,6 +19,7 @@
* position
* @n_values: Number of multiplexer positions (busses to instantiate)
* @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used
+ * @settle_time: Delay to wait when a new bus is selected
*/
struct i2c_mux_gpio_platform_data {
int parent;
@@ -26,6 +27,7 @@ struct i2c_mux_gpio_platform_data {
const unsigned *values;
int n_values;
unsigned idle;
+ u32 settle_time;
};
#endif /* _LINUX_I2C_MUX_GPIO_H */
diff --git a/include/linux/platform_data/keyboard-spear.h b/include/linux/platform_data/keyboard-spear.h
deleted file mode 100644
index 5e3ff653900c..000000000000
--- a/include/linux/platform_data/keyboard-spear.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (C) 2010 ST Microelectronics
- * Rajeev Kumar <rajeevkumar.linux@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __PLAT_KEYBOARD_H
-#define __PLAT_KEYBOARD_H
-
-#include <linux/bitops.h>
-#include <linux/input.h>
-#include <linux/input/matrix_keypad.h>
-#include <linux/types.h>
-
-#define DECLARE_9x9_KEYMAP(_name) \
-int _name[] = { \
- KEY(0, 0, KEY_ESC), \
- KEY(0, 1, KEY_1), \
- KEY(0, 2, KEY_2), \
- KEY(0, 3, KEY_3), \
- KEY(0, 4, KEY_4), \
- KEY(0, 5, KEY_5), \
- KEY(0, 6, KEY_6), \
- KEY(0, 7, KEY_7), \
- KEY(0, 8, KEY_8), \
- KEY(1, 0, KEY_9), \
- KEY(1, 1, KEY_MINUS), \
- KEY(1, 2, KEY_EQUAL), \
- KEY(1, 3, KEY_BACKSPACE), \
- KEY(1, 4, KEY_TAB), \
- KEY(1, 5, KEY_Q), \
- KEY(1, 6, KEY_W), \
- KEY(1, 7, KEY_E), \
- KEY(1, 8, KEY_R), \
- KEY(2, 0, KEY_T), \
- KEY(2, 1, KEY_Y), \
- KEY(2, 2, KEY_U), \
- KEY(2, 3, KEY_I), \
- KEY(2, 4, KEY_O), \
- KEY(2, 5, KEY_P), \
- KEY(2, 6, KEY_LEFTBRACE), \
- KEY(2, 7, KEY_RIGHTBRACE), \
- KEY(2, 8, KEY_ENTER), \
- KEY(3, 0, KEY_LEFTCTRL), \
- KEY(3, 1, KEY_A), \
- KEY(3, 2, KEY_S), \
- KEY(3, 3, KEY_D), \
- KEY(3, 4, KEY_F), \
- KEY(3, 5, KEY_G), \
- KEY(3, 6, KEY_H), \
- KEY(3, 7, KEY_J), \
- KEY(3, 8, KEY_K), \
- KEY(4, 0, KEY_L), \
- KEY(4, 1, KEY_SEMICOLON), \
- KEY(4, 2, KEY_APOSTROPHE), \
- KEY(4, 3, KEY_GRAVE), \
- KEY(4, 4, KEY_LEFTSHIFT), \
- KEY(4, 5, KEY_BACKSLASH), \
- KEY(4, 6, KEY_Z), \
- KEY(4, 7, KEY_X), \
- KEY(4, 8, KEY_C), \
- KEY(5, 0, KEY_V), \
- KEY(5, 1, KEY_B), \
- KEY(5, 2, KEY_N), \
- KEY(5, 3, KEY_M), \
- KEY(5, 4, KEY_COMMA), \
- KEY(5, 5, KEY_DOT), \
- KEY(5, 6, KEY_SLASH), \
- KEY(5, 7, KEY_RIGHTSHIFT), \
- KEY(5, 8, KEY_KPASTERISK), \
- KEY(6, 0, KEY_LEFTALT), \
- KEY(6, 1, KEY_SPACE), \
- KEY(6, 2, KEY_CAPSLOCK), \
- KEY(6, 3, KEY_F1), \
- KEY(6, 4, KEY_F2), \
- KEY(6, 5, KEY_F3), \
- KEY(6, 6, KEY_F4), \
- KEY(6, 7, KEY_F5), \
- KEY(6, 8, KEY_F6), \
- KEY(7, 0, KEY_F7), \
- KEY(7, 1, KEY_F8), \
- KEY(7, 2, KEY_F9), \
- KEY(7, 3, KEY_F10), \
- KEY(7, 4, KEY_NUMLOCK), \
- KEY(7, 5, KEY_SCROLLLOCK), \
- KEY(7, 6, KEY_KP7), \
- KEY(7, 7, KEY_KP8), \
- KEY(7, 8, KEY_KP9), \
- KEY(8, 0, KEY_KPMINUS), \
- KEY(8, 1, KEY_KP4), \
- KEY(8, 2, KEY_KP5), \
- KEY(8, 3, KEY_KP6), \
- KEY(8, 4, KEY_KPPLUS), \
- KEY(8, 5, KEY_KP1), \
- KEY(8, 6, KEY_KP2), \
- KEY(8, 7, KEY_KP3), \
- KEY(8, 8, KEY_KP0), \
-}
-
-#define DECLARE_6x6_KEYMAP(_name) \
-int _name[] = { \
- KEY(0, 0, KEY_RESERVED), \
- KEY(0, 1, KEY_1), \
- KEY(0, 2, KEY_2), \
- KEY(0, 3, KEY_3), \
- KEY(0, 4, KEY_4), \
- KEY(0, 5, KEY_5), \
- KEY(1, 0, KEY_Q), \
- KEY(1, 1, KEY_W), \
- KEY(1, 2, KEY_E), \
- KEY(1, 3, KEY_R), \
- KEY(1, 4, KEY_T), \
- KEY(1, 5, KEY_Y), \
- KEY(2, 0, KEY_D), \
- KEY(2, 1, KEY_F), \
- KEY(2, 2, KEY_G), \
- KEY(2, 3, KEY_H), \
- KEY(2, 4, KEY_J), \
- KEY(2, 5, KEY_K), \
- KEY(3, 0, KEY_B), \
- KEY(3, 1, KEY_N), \
- KEY(3, 2, KEY_M), \
- KEY(3, 3, KEY_COMMA), \
- KEY(3, 4, KEY_DOT), \
- KEY(3, 5, KEY_SLASH), \
- KEY(4, 0, KEY_F6), \
- KEY(4, 1, KEY_F7), \
- KEY(4, 2, KEY_F8), \
- KEY(4, 3, KEY_F9), \
- KEY(4, 4, KEY_F10), \
- KEY(4, 5, KEY_NUMLOCK), \
- KEY(5, 0, KEY_KP2), \
- KEY(5, 1, KEY_KP3), \
- KEY(5, 2, KEY_KP0), \
- KEY(5, 3, KEY_KPDOT), \
- KEY(5, 4, KEY_RO), \
- KEY(5, 5, KEY_ZENKAKUHANKAKU), \
-}
-
-#define KEYPAD_9x9 0
-#define KEYPAD_6x6 1
-#define KEYPAD_2x2 2
-
-/**
- * struct kbd_platform_data - spear keyboard platform data
- * keymap: pointer to keymap data (table and size)
- * rep: enables key autorepeat
- * mode: choose keyboard support(9x9, 6x6, 2x2)
- * suspended_rate: rate at which keyboard would operate in suspended mode
- *
- * This structure is supposed to be used by platform code to supply
- * keymaps to drivers that implement keyboards.
- */
-struct kbd_platform_data {
- const struct matrix_keymap_data *keymap;
- bool rep;
- unsigned int mode;
- unsigned int suspended_rate;
-};
-
-#endif /* __PLAT_KEYBOARD_H */
diff --git a/include/linux/platform_data/keypad-ep93xx.h b/include/linux/platform_data/keypad-ep93xx.h
deleted file mode 100644
index 3054fced8509..000000000000
--- a/include/linux/platform_data/keypad-ep93xx.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __KEYPAD_EP93XX_H
-#define __KEYPAD_EP93XX_H
-
-struct matrix_keymap_data;
-
-/* flags for the ep93xx_keypad driver */
-#define EP93XX_KEYPAD_DISABLE_3_KEY (1<<0) /* disable 3-key reset */
-#define EP93XX_KEYPAD_DIAG_MODE (1<<1) /* diagnostic mode */
-#define EP93XX_KEYPAD_BACK_DRIVE (1<<2) /* back driving mode */
-#define EP93XX_KEYPAD_TEST_MODE (1<<3) /* scan only column 0 */
-#define EP93XX_KEYPAD_AUTOREPEAT (1<<4) /* enable key autorepeat */
-
-/**
- * struct ep93xx_keypad_platform_data - platform specific device structure
- * @keymap_data: pointer to &matrix_keymap_data
- * @debounce: debounce start count; terminal count is 0xff
- * @prescale: row/column counter pre-scaler load value
- * @flags: see above
- */
-struct ep93xx_keypad_platform_data {
- struct matrix_keymap_data *keymap_data;
- unsigned int debounce;
- unsigned int prescale;
- unsigned int flags;
- unsigned int clk_rate;
-};
-
-#define EP93XX_MATRIX_ROWS (8)
-#define EP93XX_MATRIX_COLS (8)
-
-#endif /* __KEYPAD_EP93XX_H */
diff --git a/include/linux/platform_data/keypad-nomadik-ske.h b/include/linux/platform_data/keypad-nomadik-ske.h
deleted file mode 100644
index 7efabbca1dca..000000000000
--- a/include/linux/platform_data/keypad-nomadik-ske.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Naveen Kumar Gaddipati <naveen.gaddipati@stericsson.com>
- *
- * ux500 Scroll key and Keypad Encoder (SKE) header
- */
-
-#ifndef __SKE_H
-#define __SKE_H
-
-#include <linux/input/matrix_keypad.h>
-
-/* register definitions for SKE peripheral */
-#define SKE_CR 0x00
-#define SKE_VAL0 0x04
-#define SKE_VAL1 0x08
-#define SKE_DBCR 0x0C
-#define SKE_IMSC 0x10
-#define SKE_RIS 0x14
-#define SKE_MIS 0x18
-#define SKE_ICR 0x1C
-
-/*
- * Keypad module
- */
-
-/**
- * struct keypad_platform_data - structure for platform specific data
- * @init: pointer to keypad init function
- * @exit: pointer to keypad deinitialisation function
- * @keymap_data: matrix scan code table for keycodes
- * @krow: maximum number of rows
- * @kcol: maximum number of columns
- * @debounce_ms: platform specific debounce time
- * @no_autorepeat: flag for auto repetition
- * @wakeup_enable: allow waking up the system
- */
-struct ske_keypad_platform_data {
- int (*init)(void);
- int (*exit)(void);
- const struct matrix_keymap_data *keymap_data;
- u8 krow;
- u8 kcol;
- u8 debounce_ms;
- bool no_autorepeat;
- bool wakeup_enable;
-};
-#endif /*__SKE_KPD_H*/
diff --git a/include/linux/platform_data/keypad-pxa27x.h b/include/linux/platform_data/keypad-pxa27x.h
deleted file mode 100644
index a376442b9935..000000000000
--- a/include/linux/platform_data/keypad-pxa27x.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_PXA27x_KEYPAD_H
-#define __ASM_ARCH_PXA27x_KEYPAD_H
-
-#include <linux/input.h>
-#include <linux/input/matrix_keypad.h>
-
-#define MAX_MATRIX_KEY_ROWS (8)
-#define MAX_MATRIX_KEY_COLS (8)
-#define MATRIX_ROW_SHIFT (3)
-#define MAX_DIRECT_KEY_NUM (8)
-
-/* pxa3xx keypad platform specific parameters
- *
- * NOTE:
- * 1. direct_key_num indicates the number of keys in the direct keypad
- * _plus_ the number of rotary-encoder sensor inputs, this can be
- * left as 0 if only rotary encoders are enabled, the driver will
- * automatically calculate this
- *
- * 2. direct_key_map is the key code map for the direct keys, if rotary
- * encoder(s) are enabled, direct key 0/1(2/3) will be ignored
- *
- * 3. rotary can be either interpreted as a relative input event (e.g.
- * REL_WHEEL/REL_HWHEEL) or specific keys (e.g. UP/DOWN/LEFT/RIGHT)
- *
- * 4. matrix key and direct key will use the same debounce_interval by
- * default, which should be sufficient in most cases
- *
- * pxa168 keypad platform specific parameter
- *
- * NOTE:
- * clear_wakeup_event callback is a workaround required to clear the
- * keypad interrupt. The keypad wake must be cleared in addition to
- * reading the MI/DI bits in the KPC register.
- */
-struct pxa27x_keypad_platform_data {
-
- /* code map for the matrix keys */
- const struct matrix_keymap_data *matrix_keymap_data;
- unsigned int matrix_key_rows;
- unsigned int matrix_key_cols;
-
- /* direct keys */
- int direct_key_num;
- unsigned int direct_key_map[MAX_DIRECT_KEY_NUM];
- /* the key output may be low active */
- int direct_key_low_active;
- /* give board a chance to choose the start direct key */
- unsigned int direct_key_mask;
-
- /* rotary encoders 0 */
- int enable_rotary0;
- int rotary0_rel_code;
- int rotary0_up_key;
- int rotary0_down_key;
-
- /* rotary encoders 1 */
- int enable_rotary1;
- int rotary1_rel_code;
- int rotary1_up_key;
- int rotary1_down_key;
-
- /* key debounce interval */
- unsigned int debounce_interval;
-
- /* clear wakeup event requirement for pxa168 */
- void (*clear_wakeup_event)(void);
-};
-
-extern void pxa_set_keypad_info(struct pxa27x_keypad_platform_data *info);
-
-#endif /* __ASM_ARCH_PXA27x_KEYPAD_H */
diff --git a/include/linux/platform_data/keyscan-davinci.h b/include/linux/platform_data/keyscan-davinci.h
deleted file mode 100644
index 260d596ba0af..000000000000
--- a/include/linux/platform_data/keyscan-davinci.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009 Texas Instruments, Inc
- *
- * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
- */
-
-#ifndef DAVINCI_KEYSCAN_H
-#define DAVINCI_KEYSCAN_H
-
-#include <linux/io.h>
-
-enum davinci_matrix_types {
- DAVINCI_KEYSCAN_MATRIX_4X4,
- DAVINCI_KEYSCAN_MATRIX_5X3,
-};
-
-struct davinci_ks_platform_data {
- int (*device_enable)(struct device *dev);
- unsigned short *keymap;
- u32 keymapsize;
- u8 rep:1;
- u8 strobe;
- u8 interval;
- u8 matrix_type;
-};
-
-#endif
-
diff --git a/include/linux/platform_data/lenovo-yoga-c630.h b/include/linux/platform_data/lenovo-yoga-c630.h
new file mode 100644
index 000000000000..5d1f9fb33cfc
--- /dev/null
+++ b/include/linux/platform_data/lenovo-yoga-c630.h
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024, Linaro Ltd
+ * Authors:
+ * Bjorn Andersson
+ * Dmitry Baryshkov
+ */
+
+#ifndef _LENOVO_YOGA_C630_DATA_H
+#define _LENOVO_YOGA_C630_DATA_H
+
+struct yoga_c630_ec;
+struct notifier_block;
+
+#define YOGA_C630_MOD_NAME "lenovo_yoga_c630"
+
+#define YOGA_C630_DEV_UCSI "ucsi"
+#define YOGA_C630_DEV_PSY "psy"
+
+int yoga_c630_ec_read8(struct yoga_c630_ec *ec, u8 addr);
+int yoga_c630_ec_read16(struct yoga_c630_ec *ec, u8 addr);
+
+int yoga_c630_ec_register_notify(struct yoga_c630_ec *ec, struct notifier_block *nb);
+void yoga_c630_ec_unregister_notify(struct yoga_c630_ec *ec, struct notifier_block *nb);
+
+#define YOGA_C630_UCSI_WRITE_SIZE 8
+#define YOGA_C630_UCSI_CCI_SIZE 4
+#define YOGA_C630_UCSI_DATA_SIZE 16
+#define YOGA_C630_UCSI_READ_SIZE (YOGA_C630_UCSI_CCI_SIZE + YOGA_C630_UCSI_DATA_SIZE)
+
+u16 yoga_c630_ec_ucsi_get_version(struct yoga_c630_ec *ec);
+int yoga_c630_ec_ucsi_write(struct yoga_c630_ec *ec,
+ const u8 req[YOGA_C630_UCSI_WRITE_SIZE]);
+int yoga_c630_ec_ucsi_read(struct yoga_c630_ec *ec,
+ u8 resp[YOGA_C630_UCSI_READ_SIZE]);
+
+#define LENOVO_EC_EVENT_USB 0x20
+#define LENOVO_EC_EVENT_UCSI 0x21
+#define LENOVO_EC_EVENT_HPD 0x22
+#define LENOVO_EC_EVENT_BAT_STATUS 0x24
+#define LENOVO_EC_EVENT_BAT_INFO 0x25
+#define LENOVO_EC_EVENT_BAT_ADPT_STATUS 0x37
+
+#endif
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index ab222dd05bbc..3b4a891acefe 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -124,12 +124,12 @@ struct lp855x_rom_data {
};
/**
- * struct lp855x_platform_data
+ * struct lp855x_platform_data - lp855 platform-specific data
* @name : Backlight driver name. If it is not defined, default name is set.
* @device_control : value of DEVICE CONTROL register
* @initial_brightness : initial value of backlight brightness
* @period_ns : platform specific pwm period value. unit is nano.
- Only valid when mode is PWM_BASED.
+ * Only valid when mode is PWM_BASED.
* @size_program : total size of lp855x_rom_data
* @rom_data : list of new eeprom/eprom registers
*/
diff --git a/include/linux/platform_data/max6639.h b/include/linux/platform_data/max6639.h
deleted file mode 100644
index 65bfdb4fdc15..000000000000
--- a/include/linux/platform_data/max6639.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_MAX6639_H
-#define _LINUX_MAX6639_H
-
-#include <linux/types.h>
-
-/* platform data for the MAX6639 temperature sensor and fan control */
-
-struct max6639_platform_data {
- bool pwm_polarity; /* Polarity low (0) or high (1, default) */
- int ppr; /* Pulses per rotation 1..4 (default == 2) */
- int rpm_range; /* 2000, 4000 (default), 8000 or 16000 */
-};
-
-#endif /* _LINUX_MAX6639_H */
diff --git a/include/linux/platform_data/max6697.h b/include/linux/platform_data/max6697.h
deleted file mode 100644
index 6fbb70005541..000000000000
--- a/include/linux/platform_data/max6697.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * max6697.h
- * Copyright (c) 2012 Guenter Roeck <linux@roeck-us.net>
- */
-
-#ifndef MAX6697_H
-#define MAX6697_H
-
-#include <linux/types.h>
-
-/*
- * For all bit masks:
- * bit 0: local temperature
- * bit 1..7: remote temperatures
- */
-struct max6697_platform_data {
- bool smbus_timeout_disable; /* set to disable SMBus timeouts */
- bool extended_range_enable; /* set to enable extended temp range */
- bool beta_compensation; /* set to enable beta compensation */
- u8 alert_mask; /* set bit to 1 to disable alert */
- u8 over_temperature_mask; /* set bit to 1 to disable */
- u8 resistance_cancellation; /* set bit to 0 to disable
- * bit mask for MAX6581,
- * boolean for other chips
- */
- u8 ideality_mask; /* set bit to 0 to disable */
- u8 ideality_value; /* transistor ideality as per
- * MAX6581 datasheet
- */
-};
-
-#endif /* MAX6697_H */
diff --git a/include/linux/platform_data/mcs.h b/include/linux/platform_data/mcs.h
deleted file mode 100644
index fcc6f2a1f5c3..000000000000
--- a/include/linux/platform_data/mcs.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009 - 2010 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- * Author: HeungJun Kim <riverful.kim@samsung.com>
- */
-
-#ifndef __LINUX_MCS_H
-#define __LINUX_MCS_H
-
-#define MCS_KEY_MAP(v, c) ((((v) & 0xff) << 16) | ((c) & 0xffff))
-#define MCS_KEY_VAL(v) (((v) >> 16) & 0xff)
-#define MCS_KEY_CODE(v) ((v) & 0xffff)
-
-struct mcs_platform_data {
- void (*poweron)(bool);
- void (*cfg_pin)(void);
-
- /* touchscreen */
- unsigned int x_size;
- unsigned int y_size;
-
- /* touchkey */
- const u32 *keymap;
- unsigned int keymap_size;
- unsigned int key_maxval;
- bool no_autorepeat;
-};
-
-#endif /* __LINUX_MCS_H */
diff --git a/include/linux/platform_data/media/omap4iss.h b/include/linux/platform_data/media/omap4iss.h
deleted file mode 100644
index 2a511a8fcda7..000000000000
--- a/include/linux/platform_data/media/omap4iss.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ARCH_ARM_PLAT_OMAP4_ISS_H
-#define ARCH_ARM_PLAT_OMAP4_ISS_H
-
-#include <linux/i2c.h>
-
-struct iss_device;
-
-enum iss_interface_type {
- ISS_INTERFACE_CSI2A_PHY1,
- ISS_INTERFACE_CSI2B_PHY2,
-};
-
-/**
- * struct iss_csiphy_lane: CSI2 lane position and polarity
- * @pos: position of the lane
- * @pol: polarity of the lane
- */
-struct iss_csiphy_lane {
- u8 pos;
- u8 pol;
-};
-
-#define ISS_CSIPHY1_NUM_DATA_LANES 4
-#define ISS_CSIPHY2_NUM_DATA_LANES 1
-
-/**
- * struct iss_csiphy_lanes_cfg - CSI2 lane configuration
- * @data: Configuration of one or two data lanes
- * @clk: Clock lane configuration
- */
-struct iss_csiphy_lanes_cfg {
- struct iss_csiphy_lane data[ISS_CSIPHY1_NUM_DATA_LANES];
- struct iss_csiphy_lane clk;
-};
-
-/**
- * struct iss_csi2_platform_data - CSI2 interface platform data
- * @crc: Enable the cyclic redundancy check
- * @vpclk_div: Video port output clock control
- */
-struct iss_csi2_platform_data {
- unsigned crc:1;
- unsigned vpclk_div:2;
- struct iss_csiphy_lanes_cfg lanecfg;
-};
-
-struct iss_subdev_i2c_board_info {
- struct i2c_board_info *board_info;
- int i2c_adapter_id;
-};
-
-struct iss_v4l2_subdevs_group {
- struct iss_subdev_i2c_board_info *subdevs;
- enum iss_interface_type interface;
- union {
- struct iss_csi2_platform_data csi2;
- } bus; /* gcc < 4.6.0 chokes on anonymous union initializers */
-};
-
-struct iss_platform_data {
- struct iss_v4l2_subdevs_group *subdevs;
- void (*set_constraints)(struct iss_device *iss, bool enable);
-};
-
-#endif
diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h
index 8c659db4da6b..028781ad4059 100644
--- a/include/linux/platform_data/microchip-ksz.h
+++ b/include/linux/platform_data/microchip-ksz.h
@@ -23,11 +23,14 @@
#include <linux/platform_data/dsa.h>
enum ksz_chip_id {
+ KSZ8463_CHIP_ID = 0x8463,
KSZ8563_CHIP_ID = 0x8563,
KSZ8795_CHIP_ID = 0x8795,
KSZ8794_CHIP_ID = 0x8794,
KSZ8765_CHIP_ID = 0x8765,
- KSZ8830_CHIP_ID = 0x8830,
+ KSZ88X3_CHIP_ID = 0x8830,
+ KSZ8864_CHIP_ID = 0x8864,
+ KSZ8895_CHIP_ID = 0x8895,
KSZ9477_CHIP_ID = 0x00947700,
KSZ9896_CHIP_ID = 0x00989600,
KSZ9897_CHIP_ID = 0x00989700,
@@ -40,6 +43,7 @@ enum ksz_chip_id {
LAN9372_CHIP_ID = 0x00937200,
LAN9373_CHIP_ID = 0x00937300,
LAN9374_CHIP_ID = 0x00937400,
+ LAN9646_CHIP_ID = 0x00964600,
};
struct ksz_platform_data {
diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h
index 0b9f81a6f753..f6cca7a035c7 100644
--- a/include/linux/platform_data/mlxreg.h
+++ b/include/linux/platform_data/mlxreg.h
@@ -209,7 +209,7 @@ struct mlxreg_core_platform_data {
* @items: same type components with the hotplug capability;
* @irq: platform interrupt number;
* @regmap: register map of parent device;
- * @counter: number of the components with the hotplug capability;
+ * @count: number of the components with the hotplug capability;
* @cell: location of top aggregation interrupt register;
* @mask: top aggregation interrupt common mask;
* @cell_low: location of low aggregation interrupt register;
@@ -224,7 +224,7 @@ struct mlxreg_core_hotplug_platform_data {
struct mlxreg_core_item *items;
int irq;
void *regmap;
- int counter;
+ int count;
u32 cell;
u32 mask;
u32 cell_low;
diff --git a/include/linux/platform_data/mmc-pxamci.h b/include/linux/platform_data/mmc-pxamci.h
index 7e44e84e7150..652f323b5ecc 100644
--- a/include/linux/platform_data/mmc-pxamci.h
+++ b/include/linux/platform_data/mmc-pxamci.h
@@ -7,6 +7,7 @@
struct device;
struct mmc_host;
+struct property_entry;
struct pxamci_platform_data {
unsigned int ocr_mask; /* available voltages */
@@ -18,7 +19,8 @@ struct pxamci_platform_data {
bool gpio_card_ro_invert; /* gpio ro is inverted */
};
-extern void pxa_set_mci_info(struct pxamci_platform_data *info);
+extern void pxa_set_mci_info(const struct pxamci_platform_data *info,
+ const struct property_entry *props);
extern void pxa3xx_set_mci2_info(struct pxamci_platform_data *info);
extern void pxa3xx_set_mci3_info(struct pxamci_platform_data *info);
diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h
deleted file mode 100644
index a49826214a39..000000000000
--- a/include/linux/platform_data/mtd-davinci-aemif.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * TI DaVinci AEMIF support
- *
- * Copyright 2010 (C) Texas Instruments, Inc. https://www.ti.com/
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-#ifndef _MACH_DAVINCI_AEMIF_H
-#define _MACH_DAVINCI_AEMIF_H
-
-#include <linux/platform_device.h>
-
-#define NRCSR_OFFSET 0x00
-#define AWCCR_OFFSET 0x04
-#define A1CR_OFFSET 0x10
-
-#define ACR_ASIZE_MASK 0x3
-#define ACR_EW_MASK BIT(30)
-#define ACR_SS_MASK BIT(31)
-
-/* All timings in nanoseconds */
-struct davinci_aemif_timing {
- u8 wsetup;
- u8 wstrobe;
- u8 whold;
-
- u8 rsetup;
- u8 rstrobe;
- u8 rhold;
-
- u8 ta;
-};
-
-#endif
diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h
deleted file mode 100644
index dd474dd44848..000000000000
--- a/include/linux/platform_data/mtd-davinci.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * mach-davinci/nand.h
- *
- * Copyright © 2006 Texas Instruments.
- *
- * Ported to 2.6.23 Copyright © 2008 by
- * Sander Huijsen <Shuijsen@optelecom-nkf.com>
- * Troy Kisky <troy.kisky@boundarydevices.com>
- * Dirk Behme <Dirk.Behme@gmail.com>
- *
- * --------------------------------------------------------------------------
- */
-
-#ifndef __ARCH_ARM_DAVINCI_NAND_H
-#define __ARCH_ARM_DAVINCI_NAND_H
-
-#include <linux/mtd/rawnand.h>
-
-#define NANDFCR_OFFSET 0x60
-#define NANDFSR_OFFSET 0x64
-#define NANDF1ECC_OFFSET 0x70
-
-/* 4-bit ECC syndrome registers */
-#define NAND_4BIT_ECC_LOAD_OFFSET 0xbc
-#define NAND_4BIT_ECC1_OFFSET 0xc0
-#define NAND_4BIT_ECC2_OFFSET 0xc4
-#define NAND_4BIT_ECC3_OFFSET 0xc8
-#define NAND_4BIT_ECC4_OFFSET 0xcc
-#define NAND_ERR_ADD1_OFFSET 0xd0
-#define NAND_ERR_ADD2_OFFSET 0xd4
-#define NAND_ERR_ERRVAL1_OFFSET 0xd8
-#define NAND_ERR_ERRVAL2_OFFSET 0xdc
-
-/* NOTE: boards don't need to use these address bits
- * for ALE/CLE unless they support booting from NAND.
- * They're used unless platform data overrides them.
- */
-#define MASK_ALE 0x08
-#define MASK_CLE 0x10
-
-struct davinci_nand_pdata { /* platform_data */
- uint32_t mask_ale;
- uint32_t mask_cle;
-
- /*
- * 0-indexed chip-select number of the asynchronous
- * interface to which the NAND device has been connected.
- *
- * So, if you have NAND connected to CS3 of DA850, you
- * will pass '1' here. Since the asynchronous interface
- * on DA850 starts from CS2.
- */
- uint32_t core_chipsel;
-
- /* for packages using two chipselects */
- uint32_t mask_chipsel;
-
- /* board's default static partition info */
- struct mtd_partition *parts;
- unsigned nr_parts;
-
- /* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
- * soft == NAND_ECC_ENGINE_TYPE_SOFT
- * else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
- *
- * All DaVinci-family chips support 1-bit hardware ECC.
- * Newer ones also support 4-bit ECC, but are awkward
- * using it with large page chips.
- */
- enum nand_ecc_engine_type engine_type;
- enum nand_ecc_placement ecc_placement;
- u8 ecc_bits;
-
- /* e.g. NAND_BUSWIDTH_16 */
- unsigned options;
- /* e.g. NAND_BBT_USE_FLASH */
- unsigned bbt_options;
-
- /* Main and mirror bbt descriptor overrides */
- struct nand_bbt_descr *bbt_td;
- struct nand_bbt_descr *bbt_md;
-
- /* Access timings */
- struct davinci_aemif_timing *timing;
-};
-
-#endif /* __ARCH_ARM_DAVINCI_NAND_H */
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h
deleted file mode 100644
index 25390fc3e795..000000000000
--- a/include/linux/platform_data/mtd-nand-s3c2410.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2004 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 - NAND device controller platform_device info
-*/
-
-#ifndef __MTD_NAND_S3C2410_H
-#define __MTD_NAND_S3C2410_H
-
-#include <linux/mtd/rawnand.h>
-
-/**
- * struct s3c2410_nand_set - define a set of one or more nand chips
- * @flash_bbt: Openmoko u-boot can create a Bad Block Table
- * Setting this flag will allow the kernel to
- * look for it at boot time and also skip the NAND
- * scan.
- * @options: Default value to set into 'struct nand_chip' options.
- * @nr_chips: Number of chips in this set
- * @nr_partitions: Number of partitions pointed to by @partitions
- * @name: Name of set (optional)
- * @nr_map: Map for low-layer logical to physical chip numbers (option)
- * @partitions: The mtd partition list
- *
- * define a set of one or more nand chips registered with an unique mtd. Also
- * allows to pass flag to the underlying NAND layer. 'disable_ecc' will trigger
- * a warning at boot time.
- */
-struct s3c2410_nand_set {
- unsigned int flash_bbt:1;
-
- unsigned int options;
- int nr_chips;
- int nr_partitions;
- char *name;
- int *nr_map;
- struct mtd_partition *partitions;
- struct device_node *of_node;
-};
-
-struct s3c2410_platform_nand {
- /* timing information for controller, all times in nanoseconds */
-
- int tacls; /* time for active CLE/ALE to nWE/nOE */
- int twrph0; /* active time for nWE/nOE */
- int twrph1; /* time for release CLE/ALE from nWE/nOE inactive */
-
- unsigned int ignore_unset_ecc:1;
-
- enum nand_ecc_engine_type engine_type;
-
- int nr_sets;
- struct s3c2410_nand_set *sets;
-
- void (*select_chip)(struct s3c2410_nand_set *,
- int chip);
-};
-
-/**
- * s3c_nand_set_platdata() - register NAND platform data.
- * @nand: The NAND platform data to register with s3c_device_nand.
- *
- * This function copies the given NAND platform data, @nand and registers
- * it with the s3c_device_nand. This allows @nand to be __initdata.
-*/
-extern void s3c_nand_set_platdata(struct s3c2410_platform_nand *nand);
-
-#endif /*__MTD_NAND_S3C2410_H */
diff --git a/include/linux/platform_data/sa11x0-serial.h b/include/linux/platform_data/sa11x0-serial.h
index 8b79ab08af45..a88096bc74e4 100644
--- a/include/linux/platform_data/sa11x0-serial.h
+++ b/include/linux/platform_data/sa11x0-serial.h
@@ -10,7 +10,6 @@
#define SA11X0_SERIAL_H
struct uart_port;
-struct uart_info;
/*
* This is a temporary structure for registering these
diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h
deleted file mode 100644
index 2cb5cc70fd9d..000000000000
--- a/include/linux/platform_data/spi-davinci.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2009 Texas Instruments.
- */
-
-#ifndef __ARCH_ARM_DAVINCI_SPI_H
-#define __ARCH_ARM_DAVINCI_SPI_H
-
-#include <linux/platform_data/edma.h>
-
-#define SPI_INTERN_CS 0xFF
-
-enum {
- SPI_VERSION_1, /* For DM355/DM365/DM6467 */
- SPI_VERSION_2, /* For DA8xx */
-};
-
-/**
- * davinci_spi_platform_data - Platform data for SPI master device on DaVinci
- *
- * @version: version of the SPI IP. Different DaVinci devices have slightly
- * varying versions of the same IP.
- * @num_chipselect: number of chipselects supported by this SPI master
- * @intr_line: interrupt line used to connect the SPI IP to the ARM interrupt
- * controller withn the SoC. Possible values are 0 and 1.
- * @cshold_bug: set this to true if the SPI controller on your chip requires
- * a write to CSHOLD bit in between transfers (like in DM355).
- * @dma_event_q: DMA event queue to use if SPI_IO_TYPE_DMA is used for any
- * device on the bus.
- */
-struct davinci_spi_platform_data {
- u8 version;
- u8 num_chipselect;
- u8 intr_line;
- u8 prescaler_limit;
- bool cshold_bug;
- enum dma_event_q dma_event_q;
-};
-
-/**
- * davinci_spi_config - Per-chip-select configuration for SPI slave devices
- *
- * @wdelay: amount of delay between transmissions. Measured in number of
- * SPI module clocks.
- * @odd_parity: polarity of parity flag at the end of transmit data stream.
- * 0 - odd parity, 1 - even parity.
- * @parity_enable: enable transmission of parity at end of each transmit
- * data stream.
- * @io_type: type of IO transfer. Choose between polled, interrupt and DMA.
- * @timer_disable: disable chip-select timers (setup and hold)
- * @c2tdelay: chip-select setup time. Measured in number of SPI module clocks.
- * @t2cdelay: chip-select hold time. Measured in number of SPI module clocks.
- * @t2edelay: transmit data finished to SPI ENAn pin inactive time. Measured
- * in number of SPI clocks.
- * @c2edelay: chip-select active to SPI ENAn signal active time. Measured in
- * number of SPI clocks.
- */
-struct davinci_spi_config {
- u8 wdelay;
- u8 odd_parity;
- u8 parity_enable;
-#define SPI_IO_TYPE_INTR 0
-#define SPI_IO_TYPE_POLL 1
-#define SPI_IO_TYPE_DMA 2
- u8 io_type;
- u8 timer_disable;
- u8 c2tdelay;
- u8 t2cdelay;
- u8 t2edelay;
- u8 c2edelay;
-};
-
-#endif /* __ARCH_ARM_DAVINCI_SPI_H */
diff --git a/include/linux/platform_data/spi-ep93xx.h b/include/linux/platform_data/spi-ep93xx.h
deleted file mode 100644
index b439f2a896e0..000000000000
--- a/include/linux/platform_data/spi-ep93xx.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_MACH_EP93XX_SPI_H
-#define __ASM_MACH_EP93XX_SPI_H
-
-struct spi_device;
-
-/**
- * struct ep93xx_spi_info - EP93xx specific SPI descriptor
- * @use_dma: use DMA for the transfers
- */
-struct ep93xx_spi_info {
- bool use_dma;
-};
-
-#endif /* __ASM_MACH_EP93XX_SPI_H */
diff --git a/include/linux/platform_data/syscon.h b/include/linux/platform_data/syscon.h
deleted file mode 100644
index 2c089dd3e2bd..000000000000
--- a/include/linux/platform_data/syscon.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef PLATFORM_DATA_SYSCON_H
-#define PLATFORM_DATA_SYSCON_H
-
-struct syscon_platform_data {
- const char *label;
-};
-
-#endif
diff --git a/include/linux/platform_data/ti-aemif.h b/include/linux/platform_data/ti-aemif.h
deleted file mode 100644
index 77625251df07..000000000000
--- a/include/linux/platform_data/ti-aemif.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * TI DaVinci AEMIF platform glue.
- *
- * Copyright (C) 2017 BayLibre SAS
- *
- * Author:
- * Bartosz Golaszewski <bgolaszewski@baylibre.com>
- */
-
-#ifndef __TI_DAVINCI_AEMIF_DATA_H__
-#define __TI_DAVINCI_AEMIF_DATA_H__
-
-#include <linux/of_platform.h>
-
-/**
- * struct aemif_abus_data - Async bus configuration parameters.
- *
- * @cs - Chip-select number.
- */
-struct aemif_abus_data {
- u32 cs;
-};
-
-/**
- * struct aemif_platform_data - Data to set up the TI aemif driver.
- *
- * @dev_lookup: of_dev_auxdata passed to of_platform_populate() for aemif
- * subdevices.
- * @cs_offset: Lowest allowed chip-select number.
- * @abus_data: Array of async bus configuration entries.
- * @num_abus_data: Number of abus entries.
- * @sub_devices: Array of platform subdevices.
- * @num_sub_devices: Number of subdevices.
- */
-struct aemif_platform_data {
- struct of_dev_auxdata *dev_lookup;
- u32 cs_offset;
- struct aemif_abus_data *abus_data;
- size_t num_abus_data;
- struct platform_device *sub_devices;
- size_t num_sub_devices;
-};
-
-#endif /* __TI_DAVINCI_AEMIF_DATA_H__ */
diff --git a/include/linux/platform_data/tmio.h b/include/linux/platform_data/tmio.h
new file mode 100644
index 000000000000..426291713b83
--- /dev/null
+++ b/include/linux/platform_data/tmio.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef MFD_TMIO_H
+#define MFD_TMIO_H
+
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+/* TMIO MMC platform flags */
+
+/*
+ * Some controllers can support a 2-byte block size when the bus width is
+ * configured in 4-bit mode.
+ */
+#define TMIO_MMC_BLKSZ_2BYTES BIT(1)
+
+/* Some controllers can support SDIO IRQ signalling */
+#define TMIO_MMC_SDIO_IRQ BIT(2)
+
+/* Some features are only available or tested on R-Car Gen2 or later */
+#define TMIO_MMC_MIN_RCAR2 BIT(3)
+
+/*
+ * Some controllers require waiting for the SD bus to become idle before
+ * writing to some registers.
+ */
+#define TMIO_MMC_HAS_IDLE_WAIT BIT(4)
+
+/*
+ * Use the busy timeout feature. Probably all TMIO versions support it. Yet,
+ * we don't have documentation for old variants, so we enable only known good
+ * variants with this flag. Can be removed once all variants are known good.
+ */
+#define TMIO_MMC_USE_BUSY_TIMEOUT BIT(5)
+
+/* Some controllers have CMD12 automatically issue/non-issue register */
+#define TMIO_MMC_HAVE_CMD12_CTRL BIT(7)
+
+/* Controller has some SDIO status bits which must be 1 */
+#define TMIO_MMC_SDIO_STATUS_SETBITS BIT(8)
+
+/* Some controllers have a 32-bit wide data port register */
+#define TMIO_MMC_32BIT_DATA_PORT BIT(9)
+
+/* Some controllers allows to set SDx actual clock */
+#define TMIO_MMC_CLK_ACTUAL BIT(10)
+
+/* Some controllers have a CBSY bit */
+#define TMIO_MMC_HAVE_CBSY BIT(11)
+
+/* Some controllers have a 64-bit wide data port register */
+#define TMIO_MMC_64BIT_DATA_PORT BIT(12)
+
+struct tmio_mmc_data {
+ void *chan_priv_tx;
+ void *chan_priv_rx;
+ unsigned int hclk;
+ unsigned long capabilities;
+ unsigned long capabilities2;
+ unsigned long flags;
+ u32 ocr_mask; /* available voltages */
+ dma_addr_t dma_rx_offset;
+ unsigned int max_blk_count;
+ unsigned short max_segs;
+};
+#endif
diff --git a/include/linux/platform_data/touchscreen-s3c2410.h b/include/linux/platform_data/touchscreen-s3c2410.h
deleted file mode 100644
index bf8d3b9d7c6a..000000000000
--- a/include/linux/platform_data/touchscreen-s3c2410.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2005 Arnaud Patard <arnaud.patard@rtp-net.org>
-*/
-
-#ifndef __TOUCHSCREEN_S3C2410_H
-#define __TOUCHSCREEN_S3C2410_H
-
-struct s3c2410_ts_mach_info {
- int delay;
- int presc;
- int oversampling_shift;
- void (*cfg_gpio)(struct platform_device *dev);
-};
-
-extern void s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *);
-extern void s3c64xx_ts_set_platdata(struct s3c2410_ts_mach_info *);
-
-/* defined by architecture to configure gpio */
-extern void s3c24xx_ts_cfg_gpio(struct platform_device *dev);
-
-#endif /*__TOUCHSCREEN_S3C2410_H */
diff --git a/include/linux/platform_data/usb-davinci.h b/include/linux/platform_data/usb-davinci.h
deleted file mode 100644
index 879f5c78b91a..000000000000
--- a/include/linux/platform_data/usb-davinci.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * USB related definitions
- *
- * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_USB_H
-#define __ASM_ARCH_USB_H
-
-/* Passed as the platform data to the OHCI driver */
-struct da8xx_ohci_root_hub {
- /* Time from power on to power good (in 2 ms units) */
- u8 potpgt;
-};
-
-void davinci_setup_usb(unsigned mA, unsigned potpgt_ms);
-
-#endif /* ifndef __ASM_ARCH_USB_H */
diff --git a/include/linux/platform_data/video-pxafb.h b/include/linux/platform_data/video-pxafb.h
index 6333bac166a5..38c24c77ba43 100644
--- a/include/linux/platform_data/video-pxafb.h
+++ b/include/linux/platform_data/video-pxafb.h
@@ -150,7 +150,6 @@ struct pxafb_mach_info {
};
void pxa_set_fb_info(struct device *, struct pxafb_mach_info *);
-unsigned long pxafb_get_hsync_time(struct device *dev);
/* smartpanel related */
#define SMART_CMD_A0 (0x1 << 8)
diff --git a/include/linux/platform_data/x86/amd-fch.h b/include/linux/platform_data/x86/amd-fch.h
new file mode 100644
index 000000000000..2cf5153edbc2
--- /dev/null
+++ b/include/linux/platform_data/x86/amd-fch.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_AMD_FCH_H_
+#define _ASM_X86_AMD_FCH_H_
+
+#define FCH_PM_BASE 0xFED80300
+
+/* Register offsets from PM base: */
+#define FCH_PM_DECODEEN 0x00
+#define FCH_PM_DECODEEN_SMBUS0SEL GENMASK(20, 19)
+#define FCH_PM_SCRATCH 0x80
+#define FCH_PM_S5_RESET_STATUS 0xC0
+
+#endif /* _ASM_X86_AMD_FCH_H_ */
diff --git a/include/linux/platform_data/x86/asus-wmi-leds-ids.h b/include/linux/platform_data/x86/asus-wmi-leds-ids.h
new file mode 100644
index 000000000000..034a039c4e37
--- /dev/null
+++ b/include/linux/platform_data/x86/asus-wmi-leds-ids.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PLATFORM_DATA_X86_ASUS_WMI_LEDS_IDS_H
+#define __PLATFORM_DATA_X86_ASUS_WMI_LEDS_IDS_H
+
+#include <linux/dmi.h>
+#include <linux/types.h>
+
+/* To be used by both hid-asus and asus-wmi to determine which controls kbd_brightness */
+#if IS_REACHABLE(CONFIG_ASUS_WMI) || IS_REACHABLE(CONFIG_HID_ASUS)
+static const struct dmi_system_id asus_use_hid_led_dmi_ids[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Zephyrus"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Strix"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Flow"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt P16"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA403U"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GU605M"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "RC71L"),
+ },
+ },
+ { },
+};
+#endif
+
+#endif /* __PLATFORM_DATA_X86_ASUS_WMI_LEDS_IDS_H */
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 3eb5cd6773ad..419491d4abca 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -5,6 +5,9 @@
#include <linux/errno.h>
#include <linux/types.h>
+#define ASUS_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66"
+#define ASUS_ACPI_UID_ASUSWMI "ASUSWMI"
+
/* WMI Methods */
#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */
#define ASUS_WMI_METHODID_SFBD 0x44424653 /* Set First Boot Device */
@@ -51,6 +54,10 @@
#define ASUS_WMI_DEVID_LED6 0x00020016
#define ASUS_WMI_DEVID_MICMUTE_LED 0x00040017
+/* Disable Camera LED */
+#define ASUS_WMI_DEVID_CAMERA_LED_NEG 0x00060078 /* 0 = on (unused) */
+#define ASUS_WMI_DEVID_CAMERA_LED 0x00060079 /* 1 = on */
+
/* Backlight and Brightness */
#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */
#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
@@ -58,20 +65,24 @@
#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025
+#define ASUS_WMI_DEVID_OOBE 0x0005002F
/* This can only be used to disable the screen, not re-enable */
#define ASUS_WMI_DEVID_SCREENPAD_POWER 0x00050031
/* Writing a brightness re-enables the screen if disabled */
#define ASUS_WMI_DEVID_SCREENPAD_LIGHT 0x00050032
#define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018
#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075
+#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO 0x00110019
/* Misc */
+#define ASUS_WMI_DEVID_PANEL_HD 0x0005001C
#define ASUS_WMI_DEVID_PANEL_OD 0x00050019
#define ASUS_WMI_DEVID_CAMERA 0x00060013
#define ASUS_WMI_DEVID_LID_FLIP 0x00060062
#define ASUS_WMI_DEVID_LID_FLIP_ROG 0x00060077
#define ASUS_WMI_DEVID_MINI_LED_MODE 0x0005001E
#define ASUS_WMI_DEVID_MINI_LED_MODE2 0x0005002E
+#define ASUS_WMI_DEVID_SCREEN_AUTO_BRIGHTNESS 0x0005002A
/* Storage */
#define ASUS_WMI_DEVID_CARDREADER 0x00080013
@@ -96,7 +107,7 @@
#define ASUS_WMI_DEVID_PPT_PL1_SPL 0x001200A3
#define ASUS_WMI_DEVID_PPT_APU_SPPT 0x001200B0
#define ASUS_WMI_DEVID_PPT_PLAT_SPPT 0x001200B1
-#define ASUS_WMI_DEVID_PPT_FPPT 0x001200C1
+#define ASUS_WMI_DEVID_PPT_PL3_FPPT 0x001200C1
#define ASUS_WMI_DEVID_NV_DYN_BOOST 0x001200C0
#define ASUS_WMI_DEVID_NV_THERM_TARGET 0x001200C2
@@ -126,6 +137,11 @@
/* dgpu on/off */
#define ASUS_WMI_DEVID_DGPU 0x00090020
+#define ASUS_WMI_DEVID_APU_MEM 0x000600C1
+
+#define ASUS_WMI_DEVID_DGPU_BASE_TGP 0x00120099
+#define ASUS_WMI_DEVID_DGPU_SET_TGP 0x00120098
+
/* gpu mux switch, 0 = dGPU, 1 = Optimus */
#define ASUS_WMI_DEVID_GPU_MUX 0x00090016
#define ASUS_WMI_DEVID_GPU_MUX_VIVO 0x00090026
@@ -150,9 +166,33 @@
#define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00
#define ASUS_WMI_DSTS_LIGHTBAR_MASK 0x0000000F
+enum asus_ally_mcu_hack {
+ ASUS_WMI_ALLY_MCU_HACK_INIT,
+ ASUS_WMI_ALLY_MCU_HACK_ENABLED,
+ ASUS_WMI_ALLY_MCU_HACK_DISABLED,
+};
+
#if IS_REACHABLE(CONFIG_ASUS_WMI)
+void set_ally_mcu_hack(enum asus_ally_mcu_hack status);
+void set_ally_mcu_powersave(bool enabled);
+int asus_wmi_get_devstate_dsts(u32 dev_id, u32 *retval);
+int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, u32 *retval);
int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval);
#else
+static inline void set_ally_mcu_hack(enum asus_ally_mcu_hack status)
+{
+}
+static inline void set_ally_mcu_powersave(bool enabled)
+{
+}
+static inline int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, u32 *retval)
+{
+ return -ENODEV;
+}
+static inline int asus_wmi_get_devstate_dsts(u32 dev_id, u32 *retval)
+{
+ return -ENODEV;
+}
static inline int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1,
u32 *retval)
{
diff --git a/include/linux/platform_data/x86/int3472.h b/include/linux/platform_data/x86/int3472.h
new file mode 100644
index 000000000000..b1b837583d54
--- /dev/null
+++ b/include/linux/platform_data/x86/int3472.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel INT3472 ACPI camera sensor power-management support
+ *
+ * Author: Dan Scally <djrscally@gmail.com>
+ */
+
+#ifndef __PLATFORM_DATA_X86_INT3472_H
+#define __PLATFORM_DATA_X86_INT3472_H
+
+#include <linux/clk-provider.h>
+#include <linux/gpio/machine.h>
+#include <linux/leds.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/types.h>
+
+/* FIXME drop this once the I2C_DEV_NAME_FORMAT macro has been added to include/linux/i2c.h */
+#ifndef I2C_DEV_NAME_FORMAT
+#define I2C_DEV_NAME_FORMAT "i2c-%s"
+#endif
+
+/* PMIC GPIO Types */
+#define INT3472_GPIO_TYPE_RESET 0x00
+#define INT3472_GPIO_TYPE_POWERDOWN 0x01
+#define INT3472_GPIO_TYPE_POWER_ENABLE 0x0b
+#define INT3472_GPIO_TYPE_CLK_ENABLE 0x0c
+#define INT3472_GPIO_TYPE_PRIVACY_LED 0x0d
+#define INT3472_GPIO_TYPE_HANDSHAKE 0x12
+#define INT3472_GPIO_TYPE_HOTPLUG_DETECT 0x13
+
+#define INT3472_PDEV_MAX_NAME_LEN 23
+#define INT3472_MAX_SENSOR_GPIOS 3
+#define INT3472_MAX_REGULATORS 3
+
+/* E.g. "avdd\0" */
+#define GPIO_SUPPLY_NAME_LENGTH 5
+/* 12 chars for acpi_dev_name() + "-", e.g. "ABCD1234:00-" */
+#define GPIO_REGULATOR_NAME_LENGTH (12 + GPIO_SUPPLY_NAME_LENGTH)
+/* lower- and upper-case mapping */
+#define GPIO_REGULATOR_SUPPLY_MAP_COUNT 2
+/*
+ * Ensure the GPIO is driven low/high for at least 2 ms before changing.
+ *
+ * 2 ms has been chosen because it is the minimum time ovXXXX sensors need to
+ * have their reset line driven logical high to properly register a reset.
+ */
+#define GPIO_REGULATOR_ENABLE_TIME (2 * USEC_PER_MSEC)
+#define GPIO_REGULATOR_OFF_ON_DELAY (2 * USEC_PER_MSEC)
+
+#define INT3472_LED_MAX_NAME_LEN 32
+
+#define CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET 86
+
+#define INT3472_REGULATOR(_name, _ops, _enable_time, _off_on_delay) \
+ (const struct regulator_desc) { \
+ .name = _name, \
+ .type = REGULATOR_VOLTAGE, \
+ .ops = _ops, \
+ .owner = THIS_MODULE, \
+ .enable_time = _enable_time, \
+ .off_on_delay = _off_on_delay, \
+ }
+
+#define to_int3472_clk(hw) \
+ container_of(hw, struct int3472_clock, clk_hw)
+
+#define to_int3472_device(clk) \
+ container_of(clk, struct int3472_discrete_device, clock)
+
+struct acpi_device;
+struct dmi_system_id;
+struct i2c_client;
+struct platform_device;
+
+struct int3472_cldb {
+ u8 version;
+ /*
+ * control logic type
+ * 0: UNKNOWN
+ * 1: DISCRETE(CRD-D)
+ * 2: PMIC TPS68470
+ * 3: PMIC uP6641
+ */
+ u8 control_logic_type;
+ u8 control_logic_id;
+ u8 sensor_card_sku;
+ u8 reserved[10];
+ u8 clock_source;
+ u8 reserved2[17];
+};
+
+struct int3472_discrete_quirks {
+ /* For models where AVDD GPIO is shared between sensors */
+ const char *avdd_second_sensor;
+};
+
+struct int3472_gpio_regulator {
+ /* SUPPLY_MAP_COUNT * 2 to make room for second sensor mappings */
+ struct regulator_consumer_supply supply_map[GPIO_REGULATOR_SUPPLY_MAP_COUNT * 2];
+ char supply_name_upper[GPIO_SUPPLY_NAME_LENGTH];
+ char regulator_name[GPIO_REGULATOR_NAME_LENGTH];
+ struct regulator_dev *rdev;
+ struct regulator_desc rdesc;
+};
+
+struct int3472_discrete_device {
+ struct acpi_device *adev;
+ struct device *dev;
+ struct acpi_device *sensor;
+ const char *sensor_name;
+
+ struct int3472_gpio_regulator regulators[INT3472_MAX_REGULATORS];
+
+ struct int3472_clock {
+ struct clk *clk;
+ struct clk_hw clk_hw;
+ struct clk_lookup *cl;
+ struct gpio_desc *ena_gpio;
+ u32 frequency;
+ u8 imgclk_index;
+ } clock;
+
+ struct int3472_pled {
+ struct led_classdev classdev;
+ struct led_lookup_data lookup;
+ char name[INT3472_LED_MAX_NAME_LEN];
+ struct gpio_desc *gpio;
+ } pled;
+
+ struct int3472_discrete_quirks quirks;
+
+ unsigned int ngpios; /* how many GPIOs have we seen */
+ unsigned int n_sensor_gpios; /* how many have we mapped to sensor */
+ unsigned int n_regulator_gpios; /* how many have we mapped to a regulator */
+ struct gpiod_lookup_table gpios;
+};
+
+extern const struct dmi_system_id skl_int3472_discrete_quirks[];
+
+union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev,
+ char *id);
+int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb);
+int skl_int3472_get_sensor_adev_and_name(struct device *dev,
+ struct acpi_device **sensor_adev_ret,
+ const char **name_ret);
+
+int int3472_discrete_parse_crs(struct int3472_discrete_device *int3472);
+void int3472_discrete_cleanup(struct int3472_discrete_device *int3472);
+
+int skl_int3472_register_gpio_clock(struct int3472_discrete_device *int3472,
+ struct gpio_desc *gpio);
+int skl_int3472_register_dsm_clock(struct int3472_discrete_device *int3472);
+void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472);
+
+int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
+ struct gpio_desc *gpio,
+ unsigned int enable_time,
+ const char *supply_name,
+ const char *second_sensor);
+void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472);
+
+int skl_int3472_register_pled(struct int3472_discrete_device *int3472, struct gpio_desc *gpio);
+void skl_int3472_unregister_pled(struct int3472_discrete_device *int3472);
+
+#endif
diff --git a/include/linux/platform_data/intel-mid_wdt.h b/include/linux/platform_data/x86/intel-mid_wdt.h
index 8dba70b4b020..e5c0210d0fec 100644
--- a/include/linux/platform_data/intel-mid_wdt.h
+++ b/include/linux/platform_data/x86/intel-mid_wdt.h
@@ -6,8 +6,8 @@
* Contact: David Cohen <david.a.cohen@linux.intel.com>
*/
-#ifndef __INTEL_MID_WDT_H__
-#define __INTEL_MID_WDT_H__
+#ifndef __PLATFORM_X86_INTEL_MID_WDT_H_
+#define __PLATFORM_X86_INTEL_MID_WDT_H_
#include <linux/platform_device.h>
@@ -16,4 +16,4 @@ struct intel_mid_wdt_pdata {
int (*probe)(struct platform_device *pdev);
};
-#endif /*__INTEL_MID_WDT_H__*/
+#endif /* __PLATFORM_X86_INTEL_MID_WDT_H_ */
diff --git a/include/linux/platform_data/x86/intel_pmc_ipc.h b/include/linux/platform_data/x86/intel_pmc_ipc.h
new file mode 100644
index 000000000000..85ea381e4a27
--- /dev/null
+++ b/include/linux/platform_data/x86/intel_pmc_ipc.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel Core SoC Power Management Controller Header File
+ *
+ * Copyright (c) 2025, Intel Corporation.
+ * All Rights Reserved.
+ *
+ */
+#ifndef INTEL_PMC_IPC_H
+#define INTEL_PMC_IPC_H
+#include <linux/acpi.h>
+#include <linux/cleanup.h>
+
+#define IPC_SOC_REGISTER_ACCESS 0xAA
+#define IPC_SOC_SUB_CMD_READ 0x00
+#define IPC_SOC_SUB_CMD_WRITE 0x01
+#define PMC_IPCS_PARAM_COUNT 7
+#define VALID_IPC_RESPONSE 5
+
+struct pmc_ipc_cmd {
+ u32 cmd;
+ u32 sub_cmd;
+ u32 size;
+ u32 wbuf[4];
+};
+
+struct pmc_ipc_rbuf {
+ u32 buf[4];
+};
+
+/**
+ * intel_pmc_ipc() - PMC IPC Mailbox accessor
+ * @ipc_cmd: Prepared input command to send
+ * @rbuf: Allocated array for returned IPC data
+ *
+ * Return: 0 on success. Non-zero on mailbox error
+ */
+static inline int intel_pmc_ipc(struct pmc_ipc_cmd *ipc_cmd, struct pmc_ipc_rbuf *rbuf)
+{
+#ifdef CONFIG_ACPI
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object params[PMC_IPCS_PARAM_COUNT] = {
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ };
+ struct acpi_object_list arg_list = { PMC_IPCS_PARAM_COUNT, params };
+ int status;
+
+ if (!ipc_cmd || !rbuf)
+ return -EINVAL;
+
+ /*
+ * 0: IPC Command
+ * 1: IPC Sub Command
+ * 2: Size
+ * 3-6: Write Buffer for offset
+ */
+ params[0].integer.value = ipc_cmd->cmd;
+ params[1].integer.value = ipc_cmd->sub_cmd;
+ params[2].integer.value = ipc_cmd->size;
+ params[3].integer.value = ipc_cmd->wbuf[0];
+ params[4].integer.value = ipc_cmd->wbuf[1];
+ params[5].integer.value = ipc_cmd->wbuf[2];
+ params[6].integer.value = ipc_cmd->wbuf[3];
+
+ status = acpi_evaluate_object(NULL, "\\IPCS", &arg_list, &buffer);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ union acpi_object *obj __free(kfree) = buffer.pointer;
+
+ if (obj && obj->type == ACPI_TYPE_PACKAGE &&
+ obj->package.count == VALID_IPC_RESPONSE) {
+ const union acpi_object *objs = obj->package.elements;
+
+ if ((u8)objs[0].integer.value != 0)
+ return -EINVAL;
+
+ rbuf->buf[0] = objs[1].integer.value;
+ rbuf->buf[1] = objs[2].integer.value;
+ rbuf->buf[2] = objs[3].integer.value;
+ rbuf->buf[3] = objs[4].integer.value;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+#else
+ return -ENODEV;
+#endif /* CONFIG_ACPI */
+}
+
+#endif /* INTEL_PMC_IPC_H */
diff --git a/include/linux/platform_data/x86/intel_scu_ipc.h b/include/linux/platform_data/x86/intel_scu_ipc.h
new file mode 100644
index 000000000000..b287627759f7
--- /dev/null
+++ b/include/linux/platform_data/x86/intel_scu_ipc.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PLATFORM_X86_INTEL_SCU_IPC_H_
+#define __PLATFORM_X86_INTEL_SCU_IPC_H_
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+
+struct device;
+struct module;
+
+struct intel_scu_ipc_dev;
+
+/**
+ * struct intel_scu_ipc_data - Data used to configure SCU IPC
+ * @mem: Base address of SCU IPC MMIO registers
+ * @irq: The IRQ number used for SCU (optional)
+ */
+struct intel_scu_ipc_data {
+ struct resource mem;
+ int irq;
+};
+
+struct intel_scu_ipc_dev *
+__intel_scu_ipc_register(struct device *parent,
+ const struct intel_scu_ipc_data *scu_data,
+ struct module *owner);
+
+#define intel_scu_ipc_register(parent, scu_data) \
+ __intel_scu_ipc_register(parent, scu_data, THIS_MODULE)
+
+void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu);
+
+struct intel_scu_ipc_dev *
+__devm_intel_scu_ipc_register(struct device *parent,
+ const struct intel_scu_ipc_data *scu_data,
+ struct module *owner);
+
+#define devm_intel_scu_ipc_register(parent, scu_data) \
+ __devm_intel_scu_ipc_register(parent, scu_data, THIS_MODULE)
+
+struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void);
+void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu);
+struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev);
+
+int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr,
+ u8 *data);
+int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr,
+ u8 data);
+int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr,
+ u8 *data, size_t len);
+int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr,
+ u8 *data, size_t len);
+
+int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr,
+ u8 data, u8 mask);
+
+int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
+ int sub);
+int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
+ int sub, const void *in, size_t inlen,
+ size_t size, void *out, size_t outlen);
+
+static inline int intel_scu_ipc_dev_command(struct intel_scu_ipc_dev *scu, int cmd,
+ int sub, const void *in, size_t inlen,
+ void *out, size_t outlen)
+{
+ return intel_scu_ipc_dev_command_with_size(scu, cmd, sub, in, inlen,
+ inlen, out, outlen);
+}
+
+#endif
diff --git a/include/linux/platform_data/x86/pwm-lpss.h b/include/linux/platform_data/x86/pwm-lpss.h
index 752c06b47cc8..f0349edb47f4 100644
--- a/include/linux/platform_data/x86/pwm-lpss.h
+++ b/include/linux/platform_data/x86/pwm-lpss.h
@@ -15,9 +15,36 @@ struct pwm_lpss_boardinfo {
unsigned int npwm;
unsigned long base_unit_bits;
/*
- * Some versions of the IP may stuck in the state machine if enable
- * bit is not set, and hence update bit will show busy status till
- * the reset. For the rest it may be otherwise.
+ * NOTE:
+ * Intel Broxton, Apollo Lake, and Gemini Lake have different programming flow.
+ *
+ * Initial Enable or First Activation
+ * 1. Program the base unit and on time divisor values.
+ * 2. Set the software update bit.
+ * 3. Poll in a loop on the PWMCTRL bit until software update bit is cleared.+
+ * 4. Enable the PWM output by setting PWM Enable.
+ * 5. Repeat the above steps for the next PWM Module.
+ *
+ * Dynamic update while PWM is Enabled
+ * 1. Program the base unit and on-time divisor values.
+ * 2. Set the software update bit.
+ * 3. Repeat the above steps for the next PWM module.
+ *
+ * + After setting PWMCTRL register's SW update bit, hardware automatically
+ * deasserts the SW update bit after a brief delay. It was observed that
+ * setting of PWM enable is typically done via read-modify-write of the PWMCTRL
+ * register. If there is no/little delay between setting software update bit
+ * and setting enable bit via read-modify-write, it is possible that the read
+ * could return with software enable as 1. In that case, the last write to set
+ * enable to 1 could also set sw_update to 1. If this happens, sw_update gets
+ * stuck and the driver code can hang as it explicitly waits for sw_update bit
+ * to be 0 after setting the enable bit to 1. To avoid this race condition,
+ * SW should poll on the software update bit to make sure that it is 0 before
+ * doing the read-modify-write to set the enable bit to 1.
+ *
+ * Also, we noted that if sw_update bit was set in step #1 above then when it
+ * is set again in step #2, sw_update bit never gets cleared and the flow hangs.
+ * As such, we need to make sure that sw_update bit is 0 when doing step #1.
*/
bool bypass;
/*
diff --git a/include/linux/platform_data/x86/soc.h b/include/linux/platform_data/x86/soc.h
index a5705189e2ac..f981907a5cb0 100644
--- a/include/linux/platform_data/x86/soc.h
+++ b/include/linux/platform_data/x86/soc.h
@@ -20,7 +20,7 @@
static inline bool soc_intel_is_##soc(void) \
{ \
static const struct x86_cpu_id soc##_cpu_ids[] = { \
- X86_MATCH_INTEL_FAM6_MODEL(type, NULL), \
+ X86_MATCH_VFM(type, NULL), \
{} \
}; \
const struct x86_cpu_id *id; \
@@ -31,11 +31,11 @@ static inline bool soc_intel_is_##soc(void) \
return false; \
}
-SOC_INTEL_IS_CPU(byt, ATOM_SILVERMONT);
-SOC_INTEL_IS_CPU(cht, ATOM_AIRMONT);
-SOC_INTEL_IS_CPU(apl, ATOM_GOLDMONT);
-SOC_INTEL_IS_CPU(glk, ATOM_GOLDMONT_PLUS);
-SOC_INTEL_IS_CPU(cml, KABYLAKE_L);
+SOC_INTEL_IS_CPU(byt, INTEL_ATOM_SILVERMONT);
+SOC_INTEL_IS_CPU(cht, INTEL_ATOM_AIRMONT);
+SOC_INTEL_IS_CPU(apl, INTEL_ATOM_GOLDMONT);
+SOC_INTEL_IS_CPU(glk, INTEL_ATOM_GOLDMONT_PLUS);
+SOC_INTEL_IS_CPU(cml, INTEL_KABYLAKE_L);
#undef SOC_INTEL_IS_CPU
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
deleted file mode 100644
index 2463a4a856a6..000000000000
--- a/include/linux/platform_data/zforce_ts.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* drivers/input/touchscreen/zforce.c
- *
- * Copyright (C) 2012-2013 MundoReader S.L.
- */
-
-#ifndef _LINUX_INPUT_ZFORCE_TS_H
-#define _LINUX_INPUT_ZFORCE_TS_H
-
-struct zforce_ts_platdata {
- unsigned int x_max;
- unsigned int y_max;
-};
-
-#endif /* _LINUX_INPUT_ZFORCE_TS_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 7a41c72c1959..813da101b5bf 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -52,7 +52,7 @@ struct platform_device {
extern int platform_device_register(struct platform_device *);
extern void platform_device_unregister(struct platform_device *);
-extern struct bus_type platform_bus_type;
+extern const struct bus_type platform_bus_type;
extern struct device platform_bus;
extern struct resource *platform_get_resource(struct platform_device *,
@@ -80,7 +80,7 @@ static inline void __iomem *
devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
unsigned int index, struct resource **res)
{
- return ERR_PTR(-EINVAL);
+ return IOMEM_ERR_PTR(-EINVAL);
}
@@ -88,20 +88,22 @@ static inline void __iomem *
devm_platform_ioremap_resource(struct platform_device *pdev,
unsigned int index)
{
- return ERR_PTR(-EINVAL);
+ return IOMEM_ERR_PTR(-EINVAL);
}
static inline void __iomem *
devm_platform_ioremap_resource_byname(struct platform_device *pdev,
const char *name)
{
- return ERR_PTR(-EINVAL);
+ return IOMEM_ERR_PTR(-EINVAL);
}
#endif
extern int platform_get_irq(struct platform_device *, unsigned int);
extern int platform_get_irq_optional(struct platform_device *, unsigned int);
+extern int platform_get_irq_affinity(struct platform_device *, unsigned int,
+ const struct cpumask **);
extern int platform_irq_count(struct platform_device *);
extern int devm_platform_get_irqs_affinity(struct platform_device *dev,
struct irq_affinity *affd,
@@ -232,21 +234,11 @@ extern int platform_device_add_data(struct platform_device *pdev,
extern int platform_device_add(struct platform_device *pdev);
extern void platform_device_del(struct platform_device *pdev);
extern void platform_device_put(struct platform_device *pdev);
+DEFINE_FREE(platform_device_put, struct platform_device *, if (_T) platform_device_put(_T))
struct platform_driver {
int (*probe)(struct platform_device *);
-
- /*
- * Traditionally the remove callback returned an int which however is
- * ignored by the driver core. This led to wrong expectations by driver
- * authors who thought returning an error code was a valid error
- * handling strategy. To convert to a callback returning void, new
- * drivers should implement .remove_new() until the conversion it done
- * that eventually makes .remove() return void.
- */
- int (*remove)(struct platform_device *);
- void (*remove_new)(struct platform_device *);
-
+ void (*remove)(struct platform_device *);
void (*shutdown)(struct platform_device *);
int (*suspend)(struct platform_device *, pm_message_t state);
int (*resume)(struct platform_device *);
diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h
index f5492ed413f3..855b28340e95 100644
--- a/include/linux/platform_profile.h
+++ b/include/linux/platform_profile.h
@@ -9,6 +9,7 @@
#ifndef _PLATFORM_PROFILE_H_
#define _PLATFORM_PROFILE_H_
+#include <linux/device.h>
#include <linux/bitops.h>
/*
@@ -23,20 +24,38 @@ enum platform_profile_option {
PLATFORM_PROFILE_BALANCED,
PLATFORM_PROFILE_BALANCED_PERFORMANCE,
PLATFORM_PROFILE_PERFORMANCE,
+ PLATFORM_PROFILE_MAX_POWER,
+ PLATFORM_PROFILE_CUSTOM,
PLATFORM_PROFILE_LAST, /*must always be last */
};
-struct platform_profile_handler {
- unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
- int (*profile_get)(struct platform_profile_handler *pprof,
- enum platform_profile_option *profile);
- int (*profile_set)(struct platform_profile_handler *pprof,
- enum platform_profile_option profile);
+/**
+ * struct platform_profile_ops - platform profile operations
+ * @probe: Callback to setup choices available to the new class device. These
+ * choices will only be enforced when setting a new profile, not when
+ * getting the current one.
+ * @hidden_choices: Callback to setup choices that are not visible to the user
+ * but can be set by the driver.
+ * @profile_get: Callback that will be called when showing the current platform
+ * profile in sysfs.
+ * @profile_set: Callback that will be called when storing a new platform
+ * profile in sysfs.
+ */
+struct platform_profile_ops {
+ int (*probe)(void *drvdata, unsigned long *choices);
+ int (*hidden_choices)(void *drvdata, unsigned long *choices);
+ int (*profile_get)(struct device *dev, enum platform_profile_option *profile);
+ int (*profile_set)(struct device *dev, enum platform_profile_option profile);
};
-int platform_profile_register(struct platform_profile_handler *pprof);
-int platform_profile_remove(void);
+struct device *platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops);
+void platform_profile_remove(struct device *dev);
+struct device *devm_platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops);
int platform_profile_cycle(void);
-void platform_profile_notify(void);
+void platform_profile_notify(struct device *dev);
#endif /*_PLATFORM_PROFILE_H_*/
diff --git a/include/linux/pldmfw.h b/include/linux/pldmfw.h
index 0fc831338226..f5047983004f 100644
--- a/include/linux/pldmfw.h
+++ b/include/linux/pldmfw.h
@@ -125,9 +125,17 @@ struct pldmfw_ops;
* a pointer to their own data, used to implement the device specific
* operations.
*/
+
+enum pldmfw_update_mode {
+ PLDMFW_UPDATE_MODE_FULL,
+ PLDMFW_UPDATE_MODE_SINGLE_COMPONENT,
+};
+
struct pldmfw {
const struct pldmfw_ops *ops;
struct device *dev;
+ u16 component_identifier;
+ enum pldmfw_update_mode mode;
};
bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record);
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 97b0e23363c8..98a899858ece 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -8,14 +8,15 @@
#ifndef _LINUX_PM_H
#define _LINUX_PM_H
+#include <linux/completion.h>
#include <linux/export.h>
-#include <linux/list.h>
-#include <linux/workqueue.h>
+#include <linux/hrtimer_types.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/util_macros.h>
#include <linux/wait.h>
-#include <linux/timer.h>
-#include <linux/hrtimer.h>
-#include <linux/completion.h>
+#include <linux/workqueue_types.h>
/*
* Callbacks for platform drivers to implement.
@@ -24,11 +25,12 @@ extern void (*pm_power_off)(void);
struct device; /* we have a circular dep with device.h */
#ifdef CONFIG_VT_CONSOLE_SLEEP
-extern void pm_vt_switch_required(struct device *dev, bool required);
+extern int pm_vt_switch_required(struct device *dev, bool required);
extern void pm_vt_switch_unregister(struct device *dev);
#else
-static inline void pm_vt_switch_required(struct device *dev, bool required)
+static inline int pm_vt_switch_required(struct device *dev, bool required)
{
+ return 0;
}
static inline void pm_vt_switch_unregister(struct device *dev)
{
@@ -384,12 +386,8 @@ const struct dev_pm_ops name = { \
#ifdef CONFIG_PM
#define _EXPORT_DEV_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns)
-#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name)
-#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns)
#else
#define _EXPORT_DEV_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns)
-#define EXPORT_PM_FN_GPL(name)
-#define EXPORT_PM_FN_NS_GPL(name, ns)
#endif
#ifdef CONFIG_PM_SLEEP
@@ -510,6 +508,7 @@ const struct dev_pm_ops name = { \
* RECOVER Creation of a hibernation image or restoration of the main
* memory contents from a hibernation image has failed, call
* ->thaw() and ->complete() for all devices.
+ * POWEROFF System will poweroff, call ->poweroff() for all devices.
*
* The following PM_EVENT_ messages are defined for internal use by
* kernel subsystems. They are never issued by the PM core.
@@ -540,6 +539,7 @@ const struct dev_pm_ops name = { \
#define PM_EVENT_USER 0x0100
#define PM_EVENT_REMOTE 0x0200
#define PM_EVENT_AUTO 0x0400
+#define PM_EVENT_POWEROFF 0x0800
#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND)
@@ -554,6 +554,7 @@ const struct dev_pm_ops name = { \
#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
+#define PMSG_POWEROFF ((struct pm_message){ .event = PM_EVENT_POWEROFF, })
#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, })
#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, })
#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, })
@@ -570,7 +571,8 @@ const struct dev_pm_ops name = { \
{ .event = PM_EVENT_AUTO_RESUME, })
#define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0)
-
+#define PMSG_NO_WAKEUP(msg) (((msg).event & \
+ (PM_EVENT_FREEZE | PM_EVENT_QUIESCE)) != 0)
/*
* Device run-time power management status.
*
@@ -600,6 +602,7 @@ enum rpm_status {
RPM_RESUMING,
RPM_SUSPENDED,
RPM_SUSPENDING,
+ RPM_BLOCKED,
};
/*
@@ -681,9 +684,12 @@ struct dev_pm_info {
bool wakeup_path:1;
bool syscore:1;
bool no_pm_callbacks:1; /* Owned by the PM core */
- bool async_in_progress:1; /* Owned by the PM core */
+ bool work_in_progress:1; /* Owned by the PM core */
+ bool smart_suspend:1; /* Owned by the PM core */
bool must_resume:1; /* Owned by the PM core */
bool may_skip_resume:1; /* Set by subsystems */
+ bool out_band_wakeup:1;
+ bool strict_midlayer:1;
#else
bool should_wakeup:1;
#endif
@@ -721,6 +727,7 @@ struct dev_pm_info {
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
void (*set_latency_tolerance)(struct device *, s32);
struct dev_pm_qos *qos;
+ bool detach_power_off:1; /* Owned by the driver core */
};
extern int dev_pm_get_subsys_data(struct device *dev);
@@ -840,10 +847,8 @@ extern int pm_generic_resume_early(struct device *dev);
extern int pm_generic_resume_noirq(struct device *dev);
extern int pm_generic_resume(struct device *dev);
extern int pm_generic_freeze_noirq(struct device *dev);
-extern int pm_generic_freeze_late(struct device *dev);
extern int pm_generic_freeze(struct device *dev);
extern int pm_generic_thaw_noirq(struct device *dev);
-extern int pm_generic_thaw_early(struct device *dev);
extern int pm_generic_thaw(struct device *dev);
extern int pm_generic_restore_noirq(struct device *dev);
extern int pm_generic_restore_early(struct device *dev);
@@ -885,10 +890,8 @@ static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void
#define pm_generic_resume_noirq NULL
#define pm_generic_resume NULL
#define pm_generic_freeze_noirq NULL
-#define pm_generic_freeze_late NULL
#define pm_generic_freeze NULL
#define pm_generic_thaw_noirq NULL
-#define pm_generic_thaw_early NULL
#define pm_generic_thaw NULL
#define pm_generic_restore_noirq NULL
#define pm_generic_restore_early NULL
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 68669ce18720..c3b46fa358d3 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -41,9 +41,7 @@ extern int pm_clk_create(struct device *dev);
extern void pm_clk_destroy(struct device *dev);
extern int pm_clk_add(struct device *dev, const char *con_id);
extern int pm_clk_add_clk(struct device *dev, struct clk *clk);
-extern int of_pm_clk_add_clk(struct device *dev, const char *name);
extern int of_pm_clk_add_clks(struct device *dev);
-extern void pm_clk_remove(struct device *dev, const char *con_id);
extern void pm_clk_remove_clk(struct device *dev, struct clk *clk);
extern int pm_clk_suspend(struct device *dev);
extern int pm_clk_resume(struct device *dev);
@@ -76,9 +74,6 @@ static inline int of_pm_clk_add_clks(struct device *dev)
{
return -EINVAL;
}
-static inline void pm_clk_remove(struct device *dev, const char *con_id)
-{
-}
#define pm_clk_suspend NULL
#define pm_clk_resume NULL
static inline void pm_clk_remove_clk(struct device *dev, struct clk *clk)
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index f24546a3d3db..93ba0143ca47 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -16,7 +16,7 @@
#include <linux/of.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
-#include <linux/cpumask.h>
+#include <linux/cpumask_types.h>
#include <linux/time64.h>
/*
@@ -30,9 +30,22 @@
* supplier and its PM domain when creating the
* device-links.
*
+ * PD_FLAG_REQUIRED_OPP: Assign required_devs for the required OPPs. The
+ * index of the required OPP must correspond to the
+ * index in the array of the pd_names. If pd_names
+ * isn't specified, the index just follows the
+ * index for the attached PM domain.
+ *
+ * PD_FLAG_ATTACH_POWER_ON: Power on the domain during attach.
+ *
+ * PD_FLAG_DETACH_POWER_OFF: Power off the domain during detach.
+ *
*/
#define PD_FLAG_NO_DEV_LINK BIT(0)
#define PD_FLAG_DEV_LINK_ON BIT(1)
+#define PD_FLAG_REQUIRED_OPP BIT(2)
+#define PD_FLAG_ATTACH_POWER_ON BIT(3)
+#define PD_FLAG_DETACH_POWER_OFF BIT(4)
struct dev_pm_domain_attach_data {
const char * const *pd_names;
@@ -43,6 +56,7 @@ struct dev_pm_domain_attach_data {
struct dev_pm_domain_list {
struct device **pd_devs;
struct device_link **pd_links;
+ u32 *opp_tokens;
u32 num_pds;
};
@@ -92,6 +106,21 @@ struct dev_pm_domain_list {
* GENPD_FLAG_OPP_TABLE_FW: The genpd provider supports performance states,
* but its corresponding OPP tables are not
* described in DT, but are given directly by FW.
+ *
+ * GENPD_FLAG_DEV_NAME_FW: Instructs genpd to generate an unique device name
+ * using ida. It is used by genpd providers which
+ * get their genpd-names directly from FW.
+ *
+ * GENPD_FLAG_NO_SYNC_STATE: The ->sync_state() support is implemented in a
+ * genpd provider specific way, likely through a
+ * parent device node. This flag makes genpd to
+ * skip its internal support for this.
+ *
+ * GENPD_FLAG_NO_STAY_ON: For genpd OF providers a powered-on PM domain at
+ * initialization is prevented from being
+ * powered-off until the ->sync_state() callback is
+ * invoked. This flag informs genpd to allow a
+ * power-off without waiting for ->sync_state().
*/
#define GENPD_FLAG_PM_CLK (1U << 0)
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
@@ -101,6 +130,9 @@ struct dev_pm_domain_list {
#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5)
#define GENPD_FLAG_MIN_RESIDENCY (1U << 6)
#define GENPD_FLAG_OPP_TABLE_FW (1U << 7)
+#define GENPD_FLAG_DEV_NAME_FW (1U << 8)
+#define GENPD_FLAG_NO_SYNC_STATE (1U << 9)
+#define GENPD_FLAG_NO_STAY_ON (1U << 10)
enum gpd_status {
GENPD_STATE_ON = 0, /* PM domain is on */
@@ -114,7 +146,14 @@ enum genpd_notication {
GENPD_NOTIFY_ON,
};
+enum genpd_sync_state {
+ GENPD_SYNC_STATE_OFF = 0,
+ GENPD_SYNC_STATE_SIMPLE,
+ GENPD_SYNC_STATE_ONECELL,
+};
+
struct dev_power_governor {
+ bool (*system_power_down_ok)(struct dev_pm_domain *domain);
bool (*power_down_ok)(struct dev_pm_domain *domain);
bool (*suspend_ok)(struct device *dev);
};
@@ -129,16 +168,21 @@ struct genpd_governor_data {
bool max_off_time_changed;
ktime_t next_wakeup;
ktime_t next_hrtimer;
+ ktime_t last_enter;
+ bool reflect_residency;
bool cached_power_down_ok;
bool cached_power_down_state_idx;
};
struct genpd_power_state {
+ const char *name;
s64 power_off_latency_ns;
s64 power_on_latency_ns;
s64 residency_ns;
u64 usage;
u64 rejected;
+ u64 above;
+ u64 below;
struct fwnode_handle *fwnode;
u64 idle_time;
void *data;
@@ -163,11 +207,14 @@ struct generic_pm_domain {
atomic_t sd_count; /* Number of subdomains with power "on" */
enum gpd_status status; /* Current state of the domain */
unsigned int device_count; /* Number of devices */
+ unsigned int device_id; /* unique device id */
unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */
unsigned int performance_state; /* Aggregated max performance state */
cpumask_var_t cpus; /* A cpumask of the attached CPUs */
bool synced_poweroff; /* A consumer needs a synced poweroff */
+ bool stay_on; /* Stay powered-on during boot. */
+ enum genpd_sync_state sync_state; /* How sync_state is managed. */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
struct raw_notifier_head power_notifiers; /* Power on/off notifiers */
@@ -175,6 +222,10 @@ struct generic_pm_domain {
int (*set_performance_state)(struct generic_pm_domain *genpd,
unsigned int state);
struct gpd_dev_ops dev_ops;
+ int (*set_hwmode_dev)(struct generic_pm_domain *domain,
+ struct device *dev, bool enable);
+ bool (*get_hwmode_dev)(struct generic_pm_domain *domain,
+ struct device *dev);
int (*attach_dev)(struct generic_pm_domain *domain,
struct device *dev);
void (*detach_dev)(struct generic_pm_domain *domain,
@@ -194,8 +245,11 @@ struct generic_pm_domain {
spinlock_t slock;
unsigned long lock_flags;
};
+ struct {
+ raw_spinlock_t raw_slock;
+ unsigned long raw_lock_flags;
+ };
};
-
};
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -237,6 +291,9 @@ struct generic_pm_domain_data {
unsigned int performance_state;
unsigned int default_pstate;
unsigned int rpm_pstate;
+ unsigned int opp_token;
+ bool hw_mode;
+ bool rpm_always_on;
void *data;
};
@@ -260,6 +317,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off);
int pm_genpd_remove(struct generic_pm_domain *genpd);
+void pm_genpd_inc_rejected(struct generic_pm_domain *genpd,
+ unsigned int state_idx);
struct device *dev_to_genpd_dev(struct device *dev);
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb);
@@ -267,6 +326,10 @@ int dev_pm_genpd_remove_notifier(struct device *dev);
void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next);
ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev);
void dev_pm_genpd_synced_poweroff(struct device *dev);
+int dev_pm_genpd_set_hwmode(struct device *dev, bool enable);
+bool dev_pm_genpd_get_hwmode(struct device *dev);
+int dev_pm_genpd_rpm_always_on(struct device *dev, bool on);
+bool dev_pm_genpd_is_on(struct device *dev);
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
@@ -308,6 +371,10 @@ static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
return -EOPNOTSUPP;
}
+static inline void pm_genpd_inc_rejected(struct generic_pm_domain *genpd,
+ unsigned int state_idx)
+{ }
+
static inline struct device *dev_to_genpd_dev(struct device *dev)
{
return ERR_PTR(-EOPNOTSUPP);
@@ -340,6 +407,26 @@ static inline ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
static inline void dev_pm_genpd_synced_poweroff(struct device *dev)
{ }
+static inline int dev_pm_genpd_set_hwmode(struct device *dev, bool enable)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool dev_pm_genpd_get_hwmode(struct device *dev)
+{
+ return false;
+}
+
+static inline int dev_pm_genpd_rpm_always_on(struct device *dev, bool on)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool dev_pm_genpd_is_on(struct device *dev)
+{
+ return false;
+}
+
#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
#endif
@@ -378,6 +465,7 @@ int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec,
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n);
+void of_genpd_sync_state(struct device_node *np);
int genpd_dev_pm_attach(struct device *dev);
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
@@ -423,6 +511,8 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
return -ENODEV;
}
+static inline void of_genpd_sync_state(struct device_node *np) {}
+
static inline int genpd_dev_pm_attach(struct device *dev)
{
return 0;
@@ -448,7 +538,7 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
#ifdef CONFIG_PM
-int dev_pm_domain_attach(struct device *dev, bool power_on);
+int dev_pm_domain_attach(struct device *dev, u32 flags);
struct device *dev_pm_domain_attach_by_id(struct device *dev,
unsigned int index);
struct device *dev_pm_domain_attach_by_name(struct device *dev,
@@ -456,13 +546,16 @@ struct device *dev_pm_domain_attach_by_name(struct device *dev,
int dev_pm_domain_attach_list(struct device *dev,
const struct dev_pm_domain_attach_data *data,
struct dev_pm_domain_list **list);
+int devm_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list);
void dev_pm_domain_detach(struct device *dev, bool power_off);
void dev_pm_domain_detach_list(struct dev_pm_domain_list *list);
int dev_pm_domain_start(struct device *dev);
void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
int dev_pm_domain_set_performance_state(struct device *dev, unsigned int state);
#else
-static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
+static inline int dev_pm_domain_attach(struct device *dev, u32 flags)
{
return 0;
}
@@ -482,6 +575,14 @@ static inline int dev_pm_domain_attach_list(struct device *dev,
{
return 0;
}
+
+static inline int devm_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list)
+{
+ return 0;
+}
+
static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
static inline void dev_pm_domain_detach_list(struct dev_pm_domain_list *list) {}
static inline int dev_pm_domain_start(struct device *dev)
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index dd7c8441af42..789406d95e69 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -11,6 +11,7 @@
#ifndef __LINUX_OPP_H__
#define __LINUX_OPP_H__
+#include <linux/cleanup.h>
#include <linux/energy_model.h>
#include <linux/err.h>
#include <linux/notifier.h>
@@ -62,11 +63,8 @@ typedef int (*config_clks_t)(struct device *dev, struct opp_table *opp_table,
* @supported_hw: Array of hierarchy of versions to match.
* @supported_hw_count: Number of elements in the array.
* @regulator_names: Array of pointers to the names of the regulator, NULL terminated.
- * @genpd_names: Null terminated array of pointers containing names of genpd to
- * attach. Mutually exclusive with required_devs.
- * @virt_devs: Pointer to return the array of genpd virtual devices. Mutually
- * exclusive with required_devs.
- * @required_devs: Required OPP devices. Mutually exclusive with genpd_names/virt_devs.
+ * @required_dev: The required OPP device.
+ * @required_dev_index: The index of the required OPP for the @required_dev.
*
* This structure contains platform specific OPP configurations for the device.
*/
@@ -79,9 +77,8 @@ struct dev_pm_opp_config {
const unsigned int *supported_hw;
unsigned int supported_hw_count;
const char * const *regulator_names;
- const char * const *genpd_names;
- struct device ***virt_devs;
- struct device **required_devs;
+ struct device *required_dev;
+ unsigned int required_dev_index;
};
#define OPP_LEVEL_UNSET U32_MAX
@@ -101,11 +98,33 @@ struct dev_pm_opp_data {
unsigned long u_volt;
};
+/**
+ * struct dev_pm_opp_key - Key used to identify OPP entries
+ * @freq: Frequency in Hz. Use 0 if frequency is not to be matched.
+ * @level: Performance level associated with the OPP entry.
+ * Use OPP_LEVEL_UNSET if level is not to be matched.
+ * @bw: Bandwidth associated with the OPP entry.
+ * Use 0 if bandwidth is not to be matched.
+ *
+ * This structure is used to uniquely identify an OPP entry based on
+ * frequency, performance level, and bandwidth. Each field can be
+ * selectively ignored during matching by setting it to its respective
+ * NOP value.
+ */
+struct dev_pm_opp_key {
+ unsigned long freq;
+ unsigned int level;
+ u32 bw;
+};
+
#if defined(CONFIG_PM_OPP)
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
+struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table);
void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
+unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index);
+
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *supplies);
@@ -131,6 +150,10 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
bool available);
+struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev,
+ struct dev_pm_opp_key *key,
+ bool available);
+
struct dev_pm_opp *
dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
u32 index, bool available);
@@ -162,6 +185,7 @@ struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
unsigned int *bw, int index);
+struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp);
void dev_pm_opp_put(struct dev_pm_opp *opp);
int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp);
@@ -196,6 +220,7 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
void dev_pm_opp_remove_table(struct device *dev);
void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
int dev_pm_opp_sync_regulators(struct device *dev);
+
#else
static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
{
@@ -207,8 +232,18 @@ static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table)
+{
+ return opp_table;
+}
+
static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {}
+static inline unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index)
+{
+ return 0;
+}
+
static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{
return 0;
@@ -277,6 +312,13 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev,
+ struct dev_pm_opp_key *key,
+ bool available)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline struct dev_pm_opp *
dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
u32 index, bool available)
@@ -338,6 +380,11 @@ static inline struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp)
+{
+ return opp;
+}
+
static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
static inline int
@@ -474,6 +521,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
int of_get_required_opp_performance_state(struct device_node *np, int index);
+bool dev_pm_opp_of_has_required_opp(struct device *dev);
int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table);
int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus);
int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW,
@@ -552,12 +600,23 @@ static inline int of_get_required_opp_performance_state(struct device_node *np,
return -EOPNOTSUPP;
}
+static inline bool dev_pm_opp_of_has_required_opp(struct device *dev)
+{
+ return false;
+}
+
static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table)
{
return -EOPNOTSUPP;
}
#endif
+/* Scope based cleanup macro for OPP reference counting */
+DEFINE_FREE(put_opp, struct dev_pm_opp *, if (!IS_ERR_OR_NULL(_T)) dev_pm_opp_put(_T))
+
+/* Scope based cleanup macro for OPP table reference counting */
+DEFINE_FREE(put_opp_table, struct opp_table *, if (!IS_ERR_OR_NULL(_T)) dev_pm_opp_put_opp_table(_T))
+
/* OPP Configuration helpers */
static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
@@ -669,36 +728,6 @@ static inline void dev_pm_opp_put_config_regulators(int token)
dev_pm_opp_clear_config(token);
}
-/* genpd helpers */
-static inline int dev_pm_opp_attach_genpd(struct device *dev,
- const char * const *names,
- struct device ***virt_devs)
-{
- struct dev_pm_opp_config config = {
- .genpd_names = names,
- .virt_devs = virt_devs,
- };
-
- return dev_pm_opp_set_config(dev, &config);
-}
-
-static inline void dev_pm_opp_detach_genpd(int token)
-{
- dev_pm_opp_clear_config(token);
-}
-
-static inline int devm_pm_opp_attach_genpd(struct device *dev,
- const char * const *names,
- struct device ***virt_devs)
-{
- struct dev_pm_opp_config config = {
- .genpd_names = names,
- .virt_devs = virt_devs,
- };
-
- return devm_pm_opp_set_config(dev, &config);
-}
-
/* prop-name helpers */
static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
@@ -719,4 +748,14 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
return dev_pm_opp_get_freq_indexed(opp, 0);
}
+static inline int dev_pm_opp_set_level(struct device *dev, unsigned int level)
+{
+ struct dev_pm_opp *opp __free(put_opp) = dev_pm_opp_find_level_exact(dev, level);
+
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ return dev_pm_opp_set_opp(dev, opp);
+}
+
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 4a69d4af3ff8..6cea4455f867 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -162,6 +162,15 @@ static inline void cpu_latency_qos_update_request(struct pm_qos_request *req,
static inline void cpu_latency_qos_remove_request(struct pm_qos_request *req) {}
#endif
+#ifdef CONFIG_PM_QOS_CPU_SYSTEM_WAKEUP
+s32 cpu_wakeup_latency_qos_limit(void);
+#else
+static inline s32 cpu_wakeup_latency_qos_limit(void)
+{
+ return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+}
+#endif
+
#ifdef CONFIG_PM
enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index d39dc863f612..41037c513f06 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -21,6 +21,7 @@
#define RPM_GET_PUT 0x04 /* Increment/decrement the
usage_count */
#define RPM_AUTO 0x08 /* Use autosuspend_delay */
+#define RPM_TRANSPARENT 0x10 /* Succeed if runtime PM is disabled */
/*
* Use this for defining a set of PM operations to be used in all situations
@@ -67,7 +68,6 @@ static inline bool queue_pm_work(struct work_struct *work)
extern int pm_generic_runtime_suspend(struct device *dev);
extern int pm_generic_runtime_resume(struct device *dev);
extern int pm_runtime_force_suspend(struct device *dev);
-extern int pm_runtime_force_resume(struct device *dev);
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
@@ -76,7 +76,9 @@ extern int pm_runtime_get_if_active(struct device *dev);
extern int pm_runtime_get_if_in_use(struct device *dev);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
-extern int pm_runtime_barrier(struct device *dev);
+extern void pm_runtime_barrier(struct device *dev);
+extern bool pm_runtime_block_if_disabled(struct device *dev);
+extern void pm_runtime_unblock(struct device *dev);
extern void pm_runtime_enable(struct device *dev);
extern void __pm_runtime_disable(struct device *dev, bool check_resume);
extern void pm_runtime_allow(struct device *dev);
@@ -93,7 +95,9 @@ extern void pm_runtime_new_link(struct device *dev);
extern void pm_runtime_drop_link(struct device_link *link);
extern void pm_runtime_release_supplier(struct device_link *link);
+int devm_pm_runtime_set_active_enabled(struct device *dev);
extern int devm_pm_runtime_enable(struct device *dev);
+int devm_pm_runtime_get_noresume(struct device *dev);
/**
* pm_suspend_ignore_children - Set runtime PM behavior regarding children.
@@ -197,6 +201,17 @@ static inline bool pm_runtime_enabled(struct device *dev)
}
/**
+ * pm_runtime_blocked - Check if runtime PM enabling is blocked.
+ * @dev: Target device.
+ *
+ * Do not call this function outside system suspend/resume code paths.
+ */
+static inline bool pm_runtime_blocked(struct device *dev)
+{
+ return dev->power.last_status == RPM_BLOCKED;
+}
+
+/**
* pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present.
* @dev: Target device.
*
@@ -242,7 +257,6 @@ static inline bool queue_pm_work(struct work_struct *work) { return false; }
static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
-static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
{
@@ -270,13 +284,18 @@ static inline int pm_runtime_get_if_active(struct device *dev)
}
static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; }
-static inline int pm_runtime_barrier(struct device *dev) { return 0; }
+static inline void pm_runtime_barrier(struct device *dev) {}
+static inline bool pm_runtime_block_if_disabled(struct device *dev) { return true; }
+static inline void pm_runtime_unblock(struct device *dev) {}
static inline void pm_runtime_enable(struct device *dev) {}
static inline void __pm_runtime_disable(struct device *dev, bool c) {}
+static inline bool pm_runtime_blocked(struct device *dev) { return true; }
static inline void pm_runtime_allow(struct device *dev) {}
static inline void pm_runtime_forbid(struct device *dev) {}
+static inline int devm_pm_runtime_set_active_enabled(struct device *dev) { return 0; }
static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
+static inline int devm_pm_runtime_get_noresume(struct device *dev) { return 0; }
static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
static inline void pm_runtime_get_noresume(struct device *dev) {}
@@ -308,6 +327,18 @@ static inline void pm_runtime_release_supplier(struct device_link *link) {}
#endif /* !CONFIG_PM */
+#ifdef CONFIG_PM_SLEEP
+
+bool pm_runtime_need_not_resume(struct device *dev);
+int pm_runtime_force_resume(struct device *dev);
+
+#else /* !CONFIG_PM_SLEEP */
+
+static inline bool pm_runtime_need_not_resume(struct device *dev) {return true; }
+static inline int pm_runtime_force_resume(struct device *dev) { return -ENXIO; }
+
+#endif /* CONFIG_PM_SLEEP */
+
/**
* pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it.
* @dev: Target device.
@@ -315,6 +346,19 @@ static inline void pm_runtime_release_supplier(struct device_link *link) {}
* Invoke the "idle check" callback of @dev and, depending on its return value,
* set up autosuspend of @dev or suspend it (depending on whether or not
* autosuspend has been enabled for it).
+ *
+ * Return:
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter non-zero, Runtime PM status change
+ * ongoing or device not in %RPM_ACTIVE state.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -EINPROGRESS: Suspend already in progress.
+ * * -ENOSYS: CONFIG_PM not enabled.
+ * Other values and conditions for the above values are possible as returned by
+ * Runtime PM idle and suspend callbacks.
*/
static inline int pm_runtime_idle(struct device *dev)
{
@@ -324,6 +368,19 @@ static inline int pm_runtime_idle(struct device *dev)
/**
* pm_runtime_suspend - Suspend a device synchronously.
* @dev: Target device.
+ *
+ * Return:
+ * * 1: Success; device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter non-zero or Runtime PM status change
+ * ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -ENOSYS: CONFIG_PM not enabled.
+ * Other values and conditions for the above values are possible as returned by
+ * Runtime PM suspend callbacks.
*/
static inline int pm_runtime_suspend(struct device *dev)
{
@@ -331,14 +388,30 @@ static inline int pm_runtime_suspend(struct device *dev)
}
/**
- * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it.
+ * pm_runtime_autosuspend - Update the last access time and set up autosuspend
+ * of a device.
* @dev: Target device.
*
- * Set up autosuspend of @dev or suspend it (depending on whether or not
- * autosuspend is enabled for it) without engaging its "idle check" callback.
+ * First update the last access time, then set up autosuspend of @dev or suspend
+ * it (depending on whether or not autosuspend is enabled for it) without
+ * engaging its "idle check" callback.
+ *
+ * Return:
+ * * 1: Success; device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter non-zero or Runtime PM status change
+ * ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -ENOSYS: CONFIG_PM not enabled.
+ * Other values and conditions for the above values are possible as returned by
+ * Runtime PM suspend callbacks.
*/
static inline int pm_runtime_autosuspend(struct device *dev)
{
+ pm_runtime_mark_last_busy(dev);
return __pm_runtime_suspend(dev, RPM_AUTO);
}
@@ -357,6 +430,17 @@ static inline int pm_runtime_resume(struct device *dev)
*
* Queue up a work item to run an equivalent of pm_runtime_idle() for @dev
* asynchronously.
+ *
+ * Return:
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter non-zero, Runtime PM status change
+ * ongoing or device not in %RPM_ACTIVE state.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -EINPROGRESS: Suspend already in progress.
+ * * -ENOSYS: CONFIG_PM not enabled.
*/
static inline int pm_request_idle(struct device *dev)
{
@@ -373,14 +457,28 @@ static inline int pm_request_resume(struct device *dev)
}
/**
- * pm_request_autosuspend - Queue up autosuspend of a device.
+ * pm_request_autosuspend - Update the last access time and queue up autosuspend
+ * of a device.
* @dev: Target device.
*
- * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev
- * asynchronously.
+ * Update the last access time of a device and queue up a work item to run an
+ * equivalent pm_runtime_autosuspend() for @dev asynchronously.
+ *
+ * Return:
+ * * 1: Success; device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter non-zero or Runtime PM status change
+ * ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -EINPROGRESS: Suspend already in progress.
+ * * -ENOSYS: CONFIG_PM not enabled.
*/
static inline int pm_request_autosuspend(struct device *dev)
{
+ pm_runtime_mark_last_busy(dev);
return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
}
@@ -415,6 +513,19 @@ static inline int pm_runtime_get_sync(struct device *dev)
return __pm_runtime_resume(dev, RPM_GET_PUT);
}
+static inline int pm_runtime_get_active(struct device *dev, int rpmflags)
+{
+ int ret;
+
+ ret = __pm_runtime_resume(dev, RPM_GET_PUT | rpmflags);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
* @dev: Target device.
@@ -425,15 +536,7 @@ static inline int pm_runtime_get_sync(struct device *dev)
*/
static inline int pm_runtime_resume_and_get(struct device *dev)
{
- int ret;
-
- ret = __pm_runtime_resume(dev, RPM_GET_PUT);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
- return ret;
- }
-
- return 0;
+ return pm_runtime_get_active(dev, 0);
}
/**
@@ -442,6 +545,18 @@ static inline int pm_runtime_resume_and_get(struct device *dev)
*
* Decrement the runtime PM usage counter of @dev and if it turns out to be
* equal to 0, queue up a work item for @dev like in pm_request_idle().
+ *
+ * Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -EINPROGRESS: Suspend already in progress.
+ * * -ENOSYS: CONFIG_PM not enabled.
*/
static inline int pm_runtime_put(struct device *dev)
{
@@ -454,6 +569,18 @@ static inline int pm_runtime_put(struct device *dev)
*
* Decrement the runtime PM usage counter of @dev and if it turns out to be
* equal to 0, queue up a work item for @dev like in pm_request_autosuspend().
+ *
+ * Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -EINPROGRESS: Suspend already in progress.
+ * * -ENOSYS: CONFIG_PM not enabled.
*/
static inline int __pm_runtime_put_autosuspend(struct device *dev)
{
@@ -461,18 +588,79 @@ static inline int __pm_runtime_put_autosuspend(struct device *dev)
}
/**
- * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0.
+ * pm_runtime_put_autosuspend - Update the last access time of a device, drop
+ * its usage counter and queue autosuspend if the usage counter becomes 0.
* @dev: Target device.
*
- * Decrement the runtime PM usage counter of @dev and if it turns out to be
- * equal to 0, queue up a work item for @dev like in pm_request_autosuspend().
+ * Update the last access time of @dev, decrement runtime PM usage counter of
+ * @dev and if it turns out to be equal to 0, queue up a work item for @dev like
+ * in pm_request_autosuspend().
+ *
+ * Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -EINPROGRESS: Suspend already in progress.
+ * * -ENOSYS: CONFIG_PM not enabled.
*/
static inline int pm_runtime_put_autosuspend(struct device *dev)
{
- return __pm_runtime_suspend(dev,
- RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
+ pm_runtime_mark_last_busy(dev);
+ return __pm_runtime_put_autosuspend(dev);
}
+DEFINE_GUARD(pm_runtime_noresume, struct device *,
+ pm_runtime_get_noresume(_T), pm_runtime_put_noidle(_T));
+
+DEFINE_GUARD(pm_runtime_active, struct device *,
+ pm_runtime_get_sync(_T), pm_runtime_put(_T));
+DEFINE_GUARD(pm_runtime_active_auto, struct device *,
+ pm_runtime_get_sync(_T), pm_runtime_put_autosuspend(_T));
+/*
+ * Use the following guards with ACQUIRE()/ACQUIRE_ERR().
+ *
+ * The difference between the "_try" and "_try_enabled" variants is that the
+ * former do not produce an error when runtime PM is disabled for the given
+ * device.
+ */
+DEFINE_GUARD_COND(pm_runtime_active, _try,
+ pm_runtime_get_active(_T, RPM_TRANSPARENT), _RET == 0)
+DEFINE_GUARD_COND(pm_runtime_active, _try_enabled,
+ pm_runtime_resume_and_get(_T), _RET == 0)
+DEFINE_GUARD_COND(pm_runtime_active_auto, _try,
+ pm_runtime_get_active(_T, RPM_TRANSPARENT), _RET == 0)
+DEFINE_GUARD_COND(pm_runtime_active_auto, _try_enabled,
+ pm_runtime_resume_and_get(_T), _RET == 0)
+
+/* ACQUIRE() wrapper macros for the guards defined above. */
+
+#define PM_RUNTIME_ACQUIRE(_dev, _var) \
+ ACQUIRE(pm_runtime_active_try, _var)(_dev)
+
+#define PM_RUNTIME_ACQUIRE_AUTOSUSPEND(_dev, _var) \
+ ACQUIRE(pm_runtime_active_auto_try, _var)(_dev)
+
+#define PM_RUNTIME_ACQUIRE_IF_ENABLED(_dev, _var) \
+ ACQUIRE(pm_runtime_active_try_enabled, _var)(_dev)
+
+#define PM_RUNTIME_ACQUIRE_IF_ENABLED_AUTOSUSPEND(_dev, _var) \
+ ACQUIRE(pm_runtime_active_auto_try_enabled, _var)(_dev)
+
+/*
+ * ACQUIRE_ERR() wrapper macro for guard pm_runtime_active.
+ *
+ * Always check PM_RUNTIME_ACQUIRE_ERR() after using one of the
+ * PM_RUNTIME_ACQUIRE*() macros defined above (yes, it can be used with
+ * any of them) and if it is nonzero, avoid accessing the given device.
+ */
+#define PM_RUNTIME_ACQUIRE_ERR(_var_ptr) \
+ ACQUIRE_ERR(pm_runtime_active, _var_ptr)
+
/**
* pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0.
* @dev: Target device.
@@ -482,9 +670,21 @@ static inline int pm_runtime_put_autosuspend(struct device *dev)
* return value, set up autosuspend of @dev or suspend it (depending on whether
* or not autosuspend has been enabled for it).
*
- * The possible return values of this function are the same as for
- * pm_runtime_idle() and the runtime PM usage counter of @dev remains
- * decremented in all cases, even if it returns an error code.
+ * The runtime PM usage counter of @dev remains decremented in all cases, even
+ * if it returns an error code.
+ *
+ * Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -ENOSYS: CONFIG_PM not enabled.
+ * Other values and conditions for the above values are possible as returned by
+ * Runtime PM suspend callbacks.
*/
static inline int pm_runtime_put_sync(struct device *dev)
{
@@ -498,9 +698,21 @@ static inline int pm_runtime_put_sync(struct device *dev)
* Decrement the runtime PM usage counter of @dev and if it turns out to be
* equal to 0, carry out runtime-suspend of @dev synchronously.
*
- * The possible return values of this function are the same as for
- * pm_runtime_suspend() and the runtime PM usage counter of @dev remains
- * decremented in all cases, even if it returns an error code.
+ * The runtime PM usage counter of @dev remains decremented in all cases, even
+ * if it returns an error code.
+ *
+ * Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -ENOSYS: CONFIG_PM not enabled.
+ * Other values and conditions for the above values are possible as returned by
+ * Runtime PM suspend callbacks.
*/
static inline int pm_runtime_put_sync_suspend(struct device *dev)
{
@@ -508,19 +720,35 @@ static inline int pm_runtime_put_sync_suspend(struct device *dev)
}
/**
- * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0.
+ * pm_runtime_put_sync_autosuspend - Update the last access time of a device,
+ * drop device usage counter and autosuspend if 0.
* @dev: Target device.
*
- * Decrement the runtime PM usage counter of @dev and if it turns out to be
- * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending
- * on whether or not autosuspend has been enabled for it).
- *
- * The possible return values of this function are the same as for
- * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains
- * decremented in all cases, even if it returns an error code.
+ * Update the last access time of @dev, decrement the runtime PM usage counter
+ * of @dev and if it turns out to be equal to 0, set up autosuspend of @dev or
+ * suspend it synchronously (depending on whether or not autosuspend has been
+ * enabled for it).
+ *
+ * The runtime PM usage counter of @dev remains decremented in all cases, even
+ * if it returns an error code.
+ *
+ * Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -EINPROGRESS: Suspend already in progress.
+ * * -ENOSYS: CONFIG_PM not enabled.
+ * Other values and conditions for the above values are possible as returned by
+ * Runtime PM suspend callbacks.
*/
static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
{
+ pm_runtime_mark_last_busy(dev);
return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
}
@@ -556,11 +784,18 @@ static inline int pm_runtime_set_suspended(struct device *dev)
* pm_runtime_disable - Disable runtime PM for a device.
* @dev: Target device.
*
- * Prevent the runtime PM framework from working with @dev (by incrementing its
- * "blocking" counter).
+ * Prevent the runtime PM framework from working with @dev by incrementing its
+ * "disable" counter.
+ *
+ * If the counter is zero when this function runs and there is a pending runtime
+ * resume request for @dev, it will be resumed. If the counter is still zero at
+ * that point, all of the pending runtime PM requests for @dev will be canceled
+ * and all runtime PM operations in progress involving it will be waited for to
+ * complete.
*
- * For each invocation of this function for @dev there must be a matching
- * pm_runtime_enable() call in order for runtime PM to be enabled for it.
+ * For each invocation of this function for @dev, there must be a matching
+ * pm_runtime_enable() call, so that runtime PM is eventually enabled for it
+ * again.
*/
static inline void pm_runtime_disable(struct device *dev)
{
diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h
index d9642c6cf852..25b63ed51b76 100644
--- a/include/linux/pm_wakeirq.h
+++ b/include/linux/pm_wakeirq.h
@@ -10,6 +10,7 @@ extern int dev_pm_set_wake_irq(struct device *dev, int irq);
extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq);
extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq);
extern void dev_pm_clear_wake_irq(struct device *dev);
+extern int devm_pm_set_wake_irq(struct device *dev, int irq);
#else /* !CONFIG_PM */
@@ -32,5 +33,10 @@ static inline void dev_pm_clear_wake_irq(struct device *dev)
{
}
+static inline int devm_pm_set_wake_irq(struct device *dev, int irq)
+{
+ return 0;
+}
+
#endif /* CONFIG_PM */
#endif /* _LINUX_PM_WAKEIRQ_H */
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 76cd1f9f1365..41e8f344a205 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -10,7 +10,7 @@
#define _LINUX_PM_WAKEUP_H
#ifndef _DEVICE_H_
-# error "please don't include this file directly"
+# error "Please do not include this file directly."
#endif
#include <linux/types.h>
@@ -94,11 +94,17 @@ static inline void device_set_wakeup_path(struct device *dev)
dev->power.wakeup_path = true;
}
+static inline void device_set_out_band_wakeup(struct device *dev)
+{
+ dev->power.out_band_wakeup = true;
+}
+
+static inline bool device_out_band_wakeup(struct device *dev)
+{
+ return dev->power.out_band_wakeup;
+}
+
/* drivers/base/power/wakeup.c */
-extern struct wakeup_source *wakeup_source_create(const char *name);
-extern void wakeup_source_destroy(struct wakeup_source *ws);
-extern void wakeup_source_add(struct wakeup_source *ws);
-extern void wakeup_source_remove(struct wakeup_source *ws);
extern struct wakeup_source *wakeup_source_register(struct device *dev,
const char *name);
extern void wakeup_source_unregister(struct wakeup_source *ws);
@@ -129,17 +135,6 @@ static inline bool device_can_wakeup(struct device *dev)
return dev->power.can_wakeup;
}
-static inline struct wakeup_source *wakeup_source_create(const char *name)
-{
- return NULL;
-}
-
-static inline void wakeup_source_destroy(struct wakeup_source *ws) {}
-
-static inline void wakeup_source_add(struct wakeup_source *ws) {}
-
-static inline void wakeup_source_remove(struct wakeup_source *ws) {}
-
static inline struct wakeup_source *wakeup_source_register(struct device *dev,
const char *name)
{
@@ -177,6 +172,13 @@ static inline bool device_wakeup_path(struct device *dev)
static inline void device_set_wakeup_path(struct device *dev) {}
+static inline void device_set_out_band_wakeup(struct device *dev) {}
+
+static inline bool device_out_band_wakeup(struct device *dev)
+{
+ return false;
+}
+
static inline void __pm_stay_awake(struct wakeup_source *ws) {}
static inline void pm_stay_awake(struct device *dev) {}
@@ -205,17 +207,17 @@ static inline void device_set_awake_path(struct device *dev)
static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
{
- return pm_wakeup_ws_event(ws, msec, false);
+ pm_wakeup_ws_event(ws, msec, false);
}
static inline void pm_wakeup_event(struct device *dev, unsigned int msec)
{
- return pm_wakeup_dev_event(dev, msec, false);
+ pm_wakeup_dev_event(dev, msec, false);
}
static inline void pm_wakeup_hard_event(struct device *dev)
{
- return pm_wakeup_dev_event(dev, 0, true);
+ pm_wakeup_dev_event(dev, 0, true);
}
/**
@@ -240,4 +242,21 @@ static inline int device_init_wakeup(struct device *dev, bool enable)
return 0;
}
+static void device_disable_wakeup(void *dev)
+{
+ device_init_wakeup(dev, false);
+}
+
+/**
+ * devm_device_init_wakeup - Resource managed device wakeup initialization.
+ * @dev: Device to handle.
+ *
+ * This function is the devm managed version of device_init_wakeup(dev, true).
+ */
+static inline int devm_device_init_wakeup(struct device *dev)
+{
+ device_init_wakeup(dev, true);
+ return devm_add_action_or_reset(dev, device_disable_wakeup, dev);
+}
+
#endif /* _LINUX_PM_WAKEUP_H */
diff --git a/include/linux/pmbus.h b/include/linux/pmbus.h
index fa9f08164c36..884040e1383b 100644
--- a/include/linux/pmbus.h
+++ b/include/linux/pmbus.h
@@ -73,6 +73,20 @@
*/
#define PMBUS_USE_COEFFICIENTS_CMD BIT(5)
+/*
+ * PMBUS_OP_PROTECTED
+ * Set if the chip OPERATION command is protected and protection is not
+ * determined by the standard WRITE_PROTECT command.
+ */
+#define PMBUS_OP_PROTECTED BIT(6)
+
+/*
+ * PMBUS_VOUT_PROTECTED
+ * Set if the chip VOUT_COMMAND command is protected and protection is not
+ * determined by the standard WRITE_PROTECT command.
+ */
+#define PMBUS_VOUT_PROTECTED BIT(7)
+
struct pmbus_platform_data {
u32 flags; /* Device specific flags */
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 82561242cda4..23fe3eaf242d 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -290,7 +290,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
}
struct pnp_fixup {
- char id[7];
+ char id[8];
void (*quirk_function) (struct pnp_dev *dev); /* fixup function */
};
@@ -383,7 +383,7 @@ struct pnp_driver {
struct device_driver driver;
};
-#define to_pnp_driver(drv) container_of(drv, struct pnp_driver, driver)
+#define to_pnp_driver(drv) container_of_const(drv, struct pnp_driver, driver)
struct pnp_card_driver {
struct list_head global_list;
@@ -435,8 +435,6 @@ struct pnp_protocol {
#define protocol_for_each_dev(protocol, dev) \
list_for_each_entry(dev, &(protocol)->devices, protocol_list)
-extern const struct bus_type pnp_bus_type;
-
#if defined(CONFIG_PNP)
/* device management */
@@ -469,7 +467,7 @@ int compare_pnp_id(struct pnp_id *pos, const char *id);
int pnp_register_driver(struct pnp_driver *drv);
void pnp_unregister_driver(struct pnp_driver *drv);
-#define dev_is_pnp(d) ((d)->bus == &pnp_bus_type)
+bool dev_is_pnp(const struct device *dev);
#else
@@ -502,7 +500,7 @@ static inline int compare_pnp_id(struct pnp_id *pos, const char *id) { return -E
static inline int pnp_register_driver(struct pnp_driver *drv) { return -ENODEV; }
static inline void pnp_unregister_driver(struct pnp_driver *drv) { }
-#define dev_is_pnp(d) false
+static inline bool dev_is_pnp(const struct device *dev) { return false; }
#endif /* CONFIG_PNP */
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 1f0ee2459f2a..299e2dd7da6d 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -38,11 +38,8 @@
* Magic nums for obj red zoning.
* Placed in the first word before and the first word after an obj.
*/
-#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
-#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
-
-#define SLUB_RED_INACTIVE 0xbb
-#define SLUB_RED_ACTIVE 0xcc
+#define SLUB_RED_INACTIVE 0xbb /* when obj is inactive */
+#define SLUB_RED_ACTIVE 0xcc /* when obj is active */
/* ...and for poisoning */
#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
@@ -52,12 +49,6 @@
/********** arch/$ARCH/mm/init.c **********/
#define POISON_FREE_INITMEM 0xcc
-/********** arch/ia64/hp/common/sba_iommu.c **********/
-/*
- * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a
- * value of "SBAIOMMU POISON\0" for spill-over poisoning.
- */
-
/********** fs/jbd/journal.c **********/
#define JBD_POISON_FREE 0x5b
#define JBD2_POISON_FREE 0x5c
@@ -79,6 +70,10 @@
#define KEY_DESTROY 0xbd
/********** net/core/page_pool.c **********/
+/*
+ * page_pool uses additional free bits within this value to store data, see the
+ * definition of PP_DMA_INDEX_MASK in mm.h
+ */
#define PP_SIGNATURE (0x40 + POISON_POINTER_DELTA)
/********** net/core/skbuff.c **********/
@@ -95,4 +90,7 @@
/********** lib/stackdepot.c **********/
#define STACK_DEPOT_POISON ((void *)(0xD390 + POISON_POINTER_DELTA))
+/********** io_uring/ **********/
+#define IO_URING_PTR_POISON ((void *)(0x1091UL + POISON_POINTER_DELTA))
+
#endif
diff --git a/include/linux/poll.h b/include/linux/poll.h
index d1ea4f3714a8..12bb18e8b978 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -25,14 +25,14 @@
struct poll_table_struct;
-/*
+/*
* structures and helpers for f_op->poll implementations
*/
typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
/*
- * Do not touch the structure directly, use the access functions
- * poll_does_not_wait() and poll_requested_events() instead.
+ * Do not touch the structure directly, use the access function
+ * poll_requested_events() instead.
*/
typedef struct poll_table_struct {
poll_queue_proc _qproc;
@@ -41,18 +41,16 @@ typedef struct poll_table_struct {
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
{
- if (p && p->_qproc && wait_address)
+ if (p && p->_qproc) {
p->_qproc(filp, wait_address, p);
-}
-
-/*
- * Return true if it is guaranteed that poll will not wait. This is the case
- * if the poll() of another file descriptor in the set got an event, so there
- * is no need for waiting.
- */
-static inline bool poll_does_not_wait(const poll_table *p)
-{
- return p == NULL || p->_qproc == NULL;
+ /*
+ * This memory barrier is paired in the wq_has_sleeper().
+ * See the comment above prepare_to_wait(), we need to
+ * ensure that subsequent tests in this thread can't be
+ * reordered with __add_wait_queue() in _qproc() paths.
+ */
+ smp_mb();
+ }
}
/*
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index ef8619f48920..a500d3160fe8 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -95,10 +95,13 @@ struct posix_clock {
* struct posix_clock_context - represents clock file operations context
*
* @clk: Pointer to the clock
+ * @fp: Pointer to the file used to open the clock
* @private_clkdata: Pointer to user data
*
* Drivers should use struct posix_clock_context during specific character
- * device file operation methods to access the posix clock.
+ * device file operation methods to access the posix clock. In particular,
+ * the file pointer can be used to verify correct access mode for ioctl()
+ * calls.
*
* Drivers can store a private data structure during the open operation
* if they have specific information that is required in other file
@@ -106,6 +109,7 @@ struct posix_clock {
*/
struct posix_clock_context {
struct posix_clock *clk;
+ struct file *fp;
void *private_clkdata;
};
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index dc7b738de299..4d3dbcef379e 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -5,12 +5,16 @@
#include <linux/alarmtimer.h>
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/pid.h>
#include <linux/posix-timers_types.h>
+#include <linux/rcuref.h>
#include <linux/spinlock.h>
#include <linux/timerqueue.h>
struct kernel_siginfo;
struct task_struct;
+struct sigqueue;
+struct k_itimer;
static inline clockid_t make_process_cpuclock(const unsigned int pid,
const clockid_t clock)
@@ -33,8 +37,15 @@ static inline int clockid_to_fd(const clockid_t clk)
return ~(clk >> 3);
}
+static inline bool clockid_aux_valid(clockid_t id)
+{
+ return IS_ENABLED(CONFIG_POSIX_AUX_CLOCKS) && id >= CLOCK_AUX && id <= CLOCK_AUX_LAST;
+}
+
#ifdef CONFIG_POSIX_TIMERS
+#include <linux/signal_types.h>
+
/**
* cpu_timer - Posix CPU timer representation for k_itimer
* @node: timerqueue node to queue in the task/sig
@@ -42,6 +53,7 @@ static inline int clockid_to_fd(const clockid_t clk)
* @pid: Pointer to target task PID
* @elist: List head for the expiry list
* @firing: Timer is currently firing
+ * @nanosleep: Timer is used for nanosleep and is not a regular posix-timer
* @handling: Pointer to the task which handles expiry
*/
struct cpu_timer {
@@ -49,7 +61,8 @@ struct cpu_timer {
struct timerqueue_head *head;
struct pid *pid;
struct list_head elist;
- int firing;
+ bool firing;
+ bool nanosleep;
struct task_struct __rcu *handling;
};
@@ -101,6 +114,13 @@ static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct,
pct->bases[CPUCLOCK_SCHED].nextevt = runtime;
}
+void posixtimer_rearm_itimer(struct task_struct *p);
+bool posixtimer_init_sigqueue(struct sigqueue *q);
+void posixtimer_send_sigqueue(struct k_itimer *tmr);
+bool posixtimer_deliver_signal(struct kernel_siginfo *info, struct sigqueue *timer_sigq);
+void posixtimer_free_timer(struct k_itimer *timer);
+long posixtimer_create_prctl(unsigned long ctrl);
+
/* Init task static initializer */
#define INIT_CPU_TIMERBASE(b) { \
.nextevt = U64_MAX, \
@@ -122,6 +142,11 @@ struct cpu_timer { };
static inline void posix_cputimers_init(struct posix_cputimers *pct) { }
static inline void posix_cputimers_group_init(struct posix_cputimers *pct,
u64 cpu_limit) { }
+static inline void posixtimer_rearm_itimer(struct task_struct *p) { }
+static inline bool posixtimer_deliver_signal(struct kernel_siginfo *info,
+ struct sigqueue *timer_sigq) { return false; }
+static inline void posixtimer_free_timer(struct k_itimer *timer) { }
+static inline long posixtimer_create_prctl(unsigned long ctrl) { return -EINVAL; }
#endif
#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
@@ -132,50 +157,59 @@ static inline void clear_posix_cputimers_work(struct task_struct *p) { }
static inline void posix_cputimers_init_work(void) { }
#endif
-#define REQUEUE_PENDING 1
-
/**
* struct k_itimer - POSIX.1b interval timer structure.
- * @list: List head for binding the timer to signals->posix_timers
+ * @list: List node for binding the timer to tsk::signal::posix_timers
+ * @ignored_list: List node for tracking ignored timers in tsk::signal::ignored_posix_timers
* @t_hash: Entry in the posix timer hash table
* @it_lock: Lock protecting the timer
* @kclock: Pointer to the k_clock struct handling this timer
* @it_clock: The posix timer clock id
* @it_id: The posix timer id for identifying the timer
- * @it_active: Marker that timer is active
+ * @it_status: The status of the timer
+ * @it_sig_periodic: The periodic status at signal delivery
* @it_overrun: The overrun counter for pending signals
* @it_overrun_last: The overrun at the time of the last delivered signal
- * @it_requeue_pending: Indicator that timer waits for being requeued on
- * signal delivery
+ * @it_signal_seq: Sequence count to control signal delivery
+ * @it_sigqueue_seq: The sequence count at the point where the signal was queued
* @it_sigev_notify: The notify word of sigevent struct for signal delivery
* @it_interval: The interval for periodic timers
* @it_signal: Pointer to the creators signal struct
* @it_pid: The pid of the process/task targeted by the signal
* @it_process: The task to wakeup on clock_nanosleep (CPU timers)
- * @sigq: Pointer to preallocated sigqueue
+ * @rcuref: Reference count for life time management
+ * @sigq: Embedded sigqueue
* @it: Union representing the various posix timer type
* internals.
* @rcu: RCU head for freeing the timer.
*/
struct k_itimer {
- struct list_head list;
+ /* 1st cacheline contains read-mostly fields */
struct hlist_node t_hash;
- spinlock_t it_lock;
- const struct k_clock *kclock;
- clockid_t it_clock;
+ struct hlist_node list;
timer_t it_id;
- int it_active;
+ clockid_t it_clock;
+ int it_sigev_notify;
+ enum pid_type it_pid_type;
+ struct signal_struct *it_signal;
+ const struct k_clock *kclock;
+
+ /* 2nd cacheline and above contain fields which are modified regularly */
+ spinlock_t it_lock;
+ int it_status;
+ bool it_sig_periodic;
s64 it_overrun;
s64 it_overrun_last;
- int it_requeue_pending;
- int it_sigev_notify;
+ unsigned int it_signal_seq;
+ unsigned int it_sigqueue_seq;
ktime_t it_interval;
- struct signal_struct *it_signal;
+ struct hlist_node ignored_list;
union {
struct pid *it_pid;
struct task_struct *it_process;
};
- struct sigqueue *sigq;
+ struct sigqueue sigq;
+ rcuref_t rcuref;
union {
struct {
struct hrtimer timer;
@@ -186,7 +220,7 @@ struct k_itimer {
} alarm;
} it;
struct rcu_head rcu;
-};
+} ____cacheline_aligned_in_smp;
void run_posix_cpu_timers(void);
void posix_cpu_timers_exit(struct task_struct *task);
@@ -196,5 +230,36 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
-void posixtimer_rearm(struct kernel_siginfo *info);
+#ifdef CONFIG_POSIX_TIMERS
+static inline void posixtimer_putref(struct k_itimer *tmr)
+{
+ if (rcuref_put(&tmr->rcuref))
+ posixtimer_free_timer(tmr);
+}
+
+static inline void posixtimer_sigqueue_getref(struct sigqueue *q)
+{
+ struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
+
+ WARN_ON_ONCE(!rcuref_get(&tmr->rcuref));
+}
+
+static inline void posixtimer_sigqueue_putref(struct sigqueue *q)
+{
+ struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
+
+ posixtimer_putref(tmr);
+}
+
+static inline bool posixtimer_valid(const struct k_itimer *timer)
+{
+ unsigned long val = (unsigned long)timer->it_signal;
+
+ return !(val & 0x1UL);
+}
+#else /* CONFIG_POSIX_TIMERS */
+static inline void posixtimer_sigqueue_getref(struct sigqueue *q) { }
+static inline void posixtimer_sigqueue_putref(struct sigqueue *q) { }
+#endif /* !CONFIG_POSIX_TIMERS */
+
#endif
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 0e65b3d634d9..62d497763e25 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -27,11 +27,16 @@ struct posix_acl_entry {
};
struct posix_acl {
- refcount_t a_refcount;
- struct rcu_head a_rcu;
- unsigned int a_count;
- struct posix_acl_entry a_entries[];
+ /* New members MUST be added within the struct_group() macro below. */
+ struct_group_tagged(posix_acl_hdr, hdr,
+ refcount_t a_refcount;
+ unsigned int a_count;
+ struct rcu_head a_rcu;
+ );
+ struct posix_acl_entry a_entries[] __counted_by(a_count);
};
+static_assert(offsetof(struct posix_acl, a_entries) == sizeof(struct posix_acl_hdr),
+ "struct member likely outside of struct_group_tagged()");
#define FOREACH_ACL_ENTRY(pa, acl, pe) \
for(pa=(acl)->a_entries, pe=pa+(acl)->a_count; pa<pe; pa++)
@@ -62,7 +67,7 @@ posix_acl_release(struct posix_acl *acl)
/* posix_acl.c */
extern void posix_acl_init(struct posix_acl *, int);
-extern struct posix_acl *posix_acl_alloc(int, gfp_t);
+extern struct posix_acl *posix_acl_alloc(unsigned int count, gfp_t flags);
extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t);
extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *);
extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *);
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 5180dc9f1706..d56e1276aafe 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -61,6 +61,8 @@ struct bq27xxx_device_info {
struct bq27xxx_access_methods bus;
struct bq27xxx_reg_cache cache;
int charge_design_full;
+ int voltage_min_design;
+ int voltage_max_design;
bool removed;
unsigned long last_update;
union power_supply_propval last_status;
diff --git a/include/linux/power/max77705_charger.h b/include/linux/power/max77705_charger.h
new file mode 100644
index 000000000000..b3950ce0625e
--- /dev/null
+++ b/include/linux/power/max77705_charger.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Maxim MAX77705 definitions.
+ *
+ * Copyright (C) 2015 Samsung Electronics, Inc.
+ * Copyright (C) 2025 Dzmitry Sankouski <dsankouski@gmail.com>
+ */
+
+#ifndef __MAX77705_CHARGER_H
+#define __MAX77705_CHARGER_H __FILE__
+
+#include <linux/regmap.h>
+
+/* MAX77705_CHG_REG_CHG_INT */
+#define MAX77705_BYP_I (0)
+#define MAX77705_INP_LIMIT_I (1)
+#define MAX77705_BATP_I (2)
+#define MAX77705_BAT_I (3)
+#define MAX77705_CHG_I (4)
+#define MAX77705_WCIN_I (5)
+#define MAX77705_CHGIN_I (6)
+#define MAX77705_AICL_I (7)
+
+/* MAX77705_CHG_REG_CHG_INT_OK */
+#define MAX77705_BYP_OK BIT(MAX77705_BYP_I)
+#define MAX77705_DISQBAT_OK BIT(MAX77705_INP_LIMIT_I)
+#define MAX77705_BATP_OK BIT(MAX77705_BATP_I)
+#define MAX77705_BAT_OK BIT(MAX77705_BAT_I)
+#define MAX77705_CHG_OK BIT(MAX77705_CHG_I)
+#define MAX77705_WCIN_OK BIT(MAX77705_WCIN_I)
+#define MAX77705_CHGIN_OK BIT(MAX77705_CHGIN_I)
+#define MAX77705_AICL_OK BIT(MAX77705_AICL_I)
+
+/* MAX77705_CHG_REG_DETAILS_00 */
+#define MAX77705_BATP_DTLS BIT(0)
+#define MAX77705_WCIN_DTLS GENMASK(4, 3)
+#define MAX77705_WCIN_DTLS_SHIFT 3
+#define MAX77705_CHGIN_DTLS GENMASK(6, 5)
+#define MAX77705_CHGIN_DTLS_SHIFT 5
+
+/* MAX77705_CHG_REG_DETAILS_01 */
+#define MAX77705_CHG_DTLS GENMASK(3, 0)
+#define MAX77705_CHG_DTLS_SHIFT 0
+#define MAX77705_BAT_DTLS GENMASK(6, 4)
+#define MAX77705_BAT_DTLS_SHIFT 4
+
+/* MAX77705_CHG_REG_DETAILS_02 */
+#define MAX77705_BYP_DTLS GENMASK(3, 0)
+#define MAX77705_BYP_DTLS_SHIFT 0
+
+/* MAX77705_CHG_REG_CNFG_00 */
+#define MAX77705_CHG_SHIFT 0
+#define MAX77705_UNO_SHIFT 1
+#define MAX77705_OTG_SHIFT 1
+#define MAX77705_BUCK_SHIFT 2
+#define MAX77705_BOOST_SHIFT 3
+#define MAX77705_WDTEN_SHIFT 4
+#define MAX77705_CHG_MASK BIT(MAX77705_CHG_SHIFT)
+#define MAX77705_UNO_MASK BIT(MAX77705_UNO_SHIFT)
+#define MAX77705_OTG_MASK BIT(MAX77705_OTG_SHIFT)
+#define MAX77705_BUCK_MASK BIT(MAX77705_BUCK_SHIFT)
+#define MAX77705_BOOST_MASK BIT(MAX77705_BOOST_SHIFT)
+#define MAX77705_WDTEN_MASK BIT(MAX77705_WDTEN_SHIFT)
+#define MAX77705_UNO_CTRL (MAX77705_UNO_MASK | MAX77705_BOOST_MASK)
+#define MAX77705_OTG_CTRL (MAX77705_OTG_MASK | MAX77705_BOOST_MASK)
+
+/* MAX77705_CHG_REG_CNFG_01 */
+#define MAX77705_FCHGTIME_DISABLE 0
+#define MAX77705_CHG_RSTRT_DISABLE 0x3
+
+#define MAX77705_CHG_PQEN_DISABLE 0
+#define MAX77705_CHG_PQEN_ENABLE 1
+
+/* MAX77705_CHG_REG_CNFG_02 */
+#define MAX77705_OTG_ILIM_500 0
+#define MAX77705_OTG_ILIM_900 1
+#define MAX77705_OTG_ILIM_1200 2
+#define MAX77705_OTG_ILIM_1500 3
+
+/* MAX77705_CHG_REG_CNFG_03 */
+#define MAX77705_TO_ITH_150MA 0
+#define MAX77705_TO_TIME_30M 3
+#define MAX77705_SYS_TRACK_ENABLE 0
+#define MAX77705_SYS_TRACK_DISABLE 1
+
+/* MAX77705_CHG_REG_CNFG_04 */
+#define MAX77705_CHG_MINVSYS_SHIFT 6
+#define MAX77705_CHG_MINVSYS_MASK GENMASK(7, 6)
+
+/* MAX77705_CHG_REG_CNFG_05 */
+#define MAX77705_B2SOVRC_DISABLE 0
+#define MAX77705_B2SOVRC_4_5A 6
+#define MAX77705_B2SOVRC_4_8A 8
+#define MAX77705_B2SOVRC_5_0A 9
+
+/* MAX77705_CHG_CNFG_06 */
+#define MAX77705_WDTCLR_SHIFT 0
+#define MAX77705_WDTCLR_MASK GENMASK(1, 0)
+#define MAX77705_WDTCLR 1
+#define MAX77705_CHGPROT_UNLOCKED 3
+#define MAX77705_SLOWEST_LX_SLOPE 3
+
+/* MAX77705_CHG_REG_CNFG_07 */
+#define MAX77705_CHG_FMBST 4
+#define MAX77705_REG_FMBST_SHIFT 2
+#define MAX77705_REG_FMBST_MASK BIT(MAX77705_REG_FMBST_SHIFT)
+#define MAX77705_REG_FGSRC_SHIFT 1
+#define MAX77705_REG_FGSRC_MASK BIT(MAX77705_REG_FGSRC_SHIFT)
+
+/* MAX77705_CHG_REG_CNFG_08 */
+#define MAX77705_CHG_FSW_3MHz 0
+#define MAX77705_CHG_FSW_2MHz 1
+#define MAX77705_CHG_FSW_1_5MHz 2
+
+/* MAX77705_CHG_REG_CNFG_09 */
+#define MAX77705_CHG_DISABLE 0
+
+/* MAX77705_CHG_REG_CNFG_12 */
+/* REG=4.5V, UVLO=4.7V */
+#define MAX77705_VCHGIN_4_5 0
+/* REG=4.5V, UVLO=4.7V */
+#define MAX77705_WCIN_4_5 0
+#define MAX77705_DISABLE_SKIP 1
+#define MAX77705_AUTO_SKIP 0
+
+#define AICL_WORK_DELAY_MS 100
+
+/* uA */
+#define MAX77705_CURRENT_CHGIN_STEP 25000
+#define MAX77705_CURRENT_CHG_STEP 50000
+#define MAX77705_CURRENT_CHGIN_MIN 100000
+#define MAX77705_CURRENT_CHGIN_MAX 3200000
+
+enum max77705_field_idx {
+ MAX77705_CHGPROT,
+ MAX77705_CHG_EN,
+ MAX77705_CHG_CC_LIM,
+ MAX77705_CHG_CHGIN_LIM,
+ MAX77705_CHG_CV_PRM,
+ MAX77705_CHG_PQEN,
+ MAX77705_CHG_RSTRT,
+ MAX77705_CHG_WCIN,
+ MAX77705_FCHGTIME,
+ MAX77705_LX_SLOPE,
+ MAX77705_MODE,
+ MAX77705_OTG_ILIM,
+ MAX77705_REG_B2SOVRC,
+ MAX77705_REG_DISKIP,
+ MAX77705_REG_FSW,
+ MAX77705_SYS_TRACK,
+ MAX77705_TO,
+ MAX77705_TO_TIME,
+ MAX77705_VBYPSET,
+ MAX77705_VCHGIN,
+ MAX77705_WCIN,
+ MAX77705_N_REGMAP_FIELDS,
+};
+
+static const struct reg_field max77705_reg_field[MAX77705_N_REGMAP_FIELDS] = {
+ [MAX77705_MODE] = REG_FIELD(MAX77705_CHG_REG_CNFG_00, 0, 3),
+ [MAX77705_FCHGTIME] = REG_FIELD(MAX77705_CHG_REG_CNFG_01, 0, 2),
+ [MAX77705_CHG_RSTRT] = REG_FIELD(MAX77705_CHG_REG_CNFG_01, 4, 5),
+ [MAX77705_CHG_PQEN] = REG_FIELD(MAX77705_CHG_REG_CNFG_01, 7, 7),
+ [MAX77705_CHG_CC_LIM] = REG_FIELD(MAX77705_CHG_REG_CNFG_02, 0, 5),
+ [MAX77705_OTG_ILIM] = REG_FIELD(MAX77705_CHG_REG_CNFG_02, 6, 7),
+ [MAX77705_TO] = REG_FIELD(MAX77705_CHG_REG_CNFG_03, 0, 2),
+ [MAX77705_TO_TIME] = REG_FIELD(MAX77705_CHG_REG_CNFG_03, 3, 5),
+ [MAX77705_SYS_TRACK] = REG_FIELD(MAX77705_CHG_REG_CNFG_03, 7, 7),
+ [MAX77705_CHG_CV_PRM] = REG_FIELD(MAX77705_CHG_REG_CNFG_04, 0, 5),
+ [MAX77705_REG_B2SOVRC] = REG_FIELD(MAX77705_CHG_REG_CNFG_05, 0, 3),
+ [MAX77705_CHGPROT] = REG_FIELD(MAX77705_CHG_REG_CNFG_06, 2, 3),
+ [MAX77705_LX_SLOPE] = REG_FIELD(MAX77705_CHG_REG_CNFG_06, 5, 6),
+ [MAX77705_REG_FSW] = REG_FIELD(MAX77705_CHG_REG_CNFG_08, 0, 1),
+ [MAX77705_CHG_CHGIN_LIM] = REG_FIELD(MAX77705_CHG_REG_CNFG_09, 0, 6),
+ [MAX77705_CHG_EN] = REG_FIELD(MAX77705_CHG_REG_CNFG_09, 7, 7),
+ [MAX77705_CHG_WCIN] = REG_FIELD(MAX77705_CHG_REG_CNFG_10, 0, 5),
+ [MAX77705_VBYPSET] = REG_FIELD(MAX77705_CHG_REG_CNFG_11, 0, 6),
+ [MAX77705_REG_DISKIP] = REG_FIELD(MAX77705_CHG_REG_CNFG_12, 0, 0),
+ [MAX77705_WCIN] = REG_FIELD(MAX77705_CHG_REG_CNFG_12, 1, 2),
+ [MAX77705_VCHGIN] = REG_FIELD(MAX77705_CHG_REG_CNFG_12, 3, 4),
+};
+
+struct max77705_charger_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_field *rfield[MAX77705_N_REGMAP_FIELDS];
+ struct power_supply_battery_info *bat_info;
+ struct workqueue_struct *wqueue;
+ struct work_struct chgin_work;
+ struct power_supply *psy_chg;
+};
+
+#endif /* __MAX77705_CHARGER_H */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 8e5705a56b85..360ffdf272da 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -15,6 +15,8 @@
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/leds.h>
+#include <linux/rwsem.h>
+#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/notifier.h>
@@ -40,7 +42,7 @@ enum {
};
/* What algorithm is the charger using? */
-enum {
+enum power_supply_charge_type {
POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0,
POWER_SUPPLY_CHARGE_TYPE_NONE,
POWER_SUPPLY_CHARGE_TYPE_TRICKLE, /* slow speed */
@@ -58,6 +60,7 @@ enum {
POWER_SUPPLY_HEALTH_OVERHEAT,
POWER_SUPPLY_HEALTH_DEAD,
POWER_SUPPLY_HEALTH_OVERVOLTAGE,
+ POWER_SUPPLY_HEALTH_UNDERVOLTAGE,
POWER_SUPPLY_HEALTH_UNSPEC_FAILURE,
POWER_SUPPLY_HEALTH_COLD,
POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
@@ -68,6 +71,8 @@ enum {
POWER_SUPPLY_HEALTH_COOL,
POWER_SUPPLY_HEALTH_HOT,
POWER_SUPPLY_HEALTH_NO_BATTERY,
+ POWER_SUPPLY_HEALTH_BLOWN_FUSE,
+ POWER_SUPPLY_HEALTH_CELL_IMBALANCE,
};
enum {
@@ -99,6 +104,7 @@ enum power_supply_property {
/* Properties of type `int' */
POWER_SUPPLY_PROP_STATUS = 0,
POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CHARGE_TYPES,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
@@ -170,6 +176,8 @@ enum power_supply_property {
POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
POWER_SUPPLY_PROP_MANUFACTURE_MONTH,
POWER_SUPPLY_PROP_MANUFACTURE_DAY,
+ POWER_SUPPLY_PROP_INTERNAL_RESISTANCE,
+ POWER_SUPPLY_PROP_STATE_OF_HEALTH,
/* Properties of type `const char *' */
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
@@ -208,6 +216,7 @@ enum power_supply_usb_type {
enum power_supply_charge_behaviour {
POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO = 0,
POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE,
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE_AWAKE,
POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE,
};
@@ -225,7 +234,6 @@ struct power_supply;
/* Run-time specific power supply configuration */
struct power_supply_config {
- struct device_node *of_node;
struct fwnode_handle *fwnode;
/* Driver private data */
@@ -236,6 +244,8 @@ struct power_supply_config {
char **supplied_to;
size_t num_supplicants;
+
+ bool no_wakeup_source;
};
/* Description of power supply */
@@ -243,8 +253,8 @@ struct power_supply_desc {
const char *name;
enum power_supply_type type;
u8 charge_behaviours;
- const enum power_supply_usb_type *usb_types;
- size_t num_usb_types;
+ u32 charge_types;
+ u32 usb_types;
const enum power_supply_property *properties;
size_t num_properties;
@@ -268,7 +278,6 @@ struct power_supply_desc {
int (*property_is_writeable)(struct power_supply *psy,
enum power_supply_property psp);
void (*external_power_changed)(struct power_supply *psy);
- void (*set_charged)(struct power_supply *psy);
/*
* Set if thermal zone should not be created for this power supply.
@@ -280,6 +289,29 @@ struct power_supply_desc {
int use_for_apm;
};
+struct power_supply_ext {
+ const char *const name;
+ u8 charge_behaviours;
+ u32 charge_types;
+ const enum power_supply_property *properties;
+ size_t num_properties;
+
+ int (*get_property)(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
+ int (*set_property)(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp,
+ const union power_supply_propval *val);
+ int (*property_is_writeable)(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp);
+};
+
struct power_supply {
const struct power_supply_desc *desc;
@@ -288,7 +320,6 @@ struct power_supply {
char **supplied_from;
size_t num_supplies;
- struct device_node *of_node;
/* Driver private data */
void *drv_data;
@@ -299,29 +330,29 @@ struct power_supply {
struct delayed_work deferred_register_work;
spinlock_t changed_lock;
bool changed;
+ bool update_groups;
bool initialized;
bool removing;
atomic_t use_cnt;
struct power_supply_battery_info *battery_info;
+ struct rw_semaphore extensions_sem; /* protects "extensions" */
+ struct list_head extensions;
#ifdef CONFIG_THERMAL
struct thermal_zone_device *tzd;
struct thermal_cooling_device *tcd;
#endif
#ifdef CONFIG_LEDS_TRIGGERS
- struct led_trigger *charging_full_trig;
- char *charging_full_trig_name;
+ struct led_trigger *trig;
struct led_trigger *charging_trig;
- char *charging_trig_name;
struct led_trigger *full_trig;
- char *full_trig_name;
- struct led_trigger *online_trig;
- char *online_trig_name;
struct led_trigger *charging_blink_full_solid_trig;
- char *charging_blink_full_solid_trig_name;
+ struct led_trigger *charging_orange_full_green_trig;
#endif
};
+#define dev_to_psy(__dev) container_of_const(__dev, struct power_supply, dev)
+
/*
* This is recommended structure to specify static power supply parameters.
* Generic one, parametrizable for different power supplies. Power supply
@@ -741,7 +772,7 @@ struct power_supply_battery_info {
int overvoltage_limit_uv;
int constant_charge_current_max_ua;
int constant_charge_voltage_max_uv;
- struct power_supply_maintenance_charge_table *maintenance_charge;
+ const struct power_supply_maintenance_charge_table *maintenance_charge;
int maintenance_charge_size;
int alert_low_temp_charge_current_ua;
int alert_low_temp_charge_voltage_uv;
@@ -756,13 +787,13 @@ struct power_supply_battery_info {
int temp_alert_max;
int temp_min;
int temp_max;
- struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX];
+ const struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX];
int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX];
- struct power_supply_resistance_temp_table *resist_table;
+ const struct power_supply_resistance_temp_table *resist_table;
int resist_table_size;
- struct power_supply_vbat_ri_table *vbat2ri_discharging;
+ const struct power_supply_vbat_ri_table *vbat2ri_discharging;
int vbat2ri_discharging_size;
- struct power_supply_vbat_ri_table *vbat2ri_charging;
+ const struct power_supply_vbat_ri_table *vbat2ri_charging;
int vbat2ri_charging_size;
int bti_resistance_ohm;
int bti_resistance_tolerance;
@@ -778,19 +809,10 @@ static inline void power_supply_put(struct power_supply *psy) {}
static inline struct power_supply *power_supply_get_by_name(const char *name)
{ return NULL; }
#endif
-#ifdef CONFIG_OF
-extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
- const char *property);
-extern struct power_supply *devm_power_supply_get_by_phandle(
+extern struct power_supply *power_supply_get_by_reference(struct fwnode_handle *fwnode,
+ const char *property);
+extern struct power_supply *devm_power_supply_get_by_reference(
struct device *dev, const char *property);
-#else /* !CONFIG_OF */
-static inline struct power_supply *
-power_supply_get_by_phandle(struct device_node *np, const char *property)
-{ return NULL; }
-static inline struct power_supply *
-devm_power_supply_get_by_phandle(struct device *dev, const char *property)
-{ return NULL; }
-#endif /* CONFIG_OF */
extern const enum power_supply_property power_supply_battery_info_properties[];
extern const size_t power_supply_battery_info_properties_size;
@@ -803,19 +825,19 @@ extern bool power_supply_battery_info_has_prop(struct power_supply_battery_info
extern int power_supply_battery_info_get_prop(struct power_supply_battery_info *info,
enum power_supply_property psp,
union power_supply_propval *val);
-extern int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table,
+extern int power_supply_ocv2cap_simple(const struct power_supply_battery_ocv_table *table,
int table_len, int ocv);
-extern struct power_supply_battery_ocv_table *
+extern const struct power_supply_battery_ocv_table *
power_supply_find_ocv2cap_table(struct power_supply_battery_info *info,
int temp, int *table_len);
extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info,
int ocv, int temp);
extern int
-power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table,
+power_supply_temp2resist_simple(const struct power_supply_resistance_temp_table *table,
int table_len, int temp);
extern int power_supply_vbat2ri(struct power_supply_battery_info *info,
int vbat_uv, bool charging);
-extern struct power_supply_maintenance_charge_table *
+extern const struct power_supply_maintenance_charge_table *
power_supply_get_maintenance_charging_setting(struct power_supply_battery_info *info, int index);
extern bool power_supply_battery_bti_in_range(struct power_supply_battery_info *info,
int resistance);
@@ -824,12 +846,11 @@ extern int power_supply_am_i_supplied(struct power_supply *psy);
int power_supply_get_property_from_supplier(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val);
-extern int power_supply_set_battery_charged(struct power_supply *psy);
static inline bool
power_supply_supports_maintenance_charging(struct power_supply_battery_info *info)
{
- struct power_supply_maintenance_charge_table *mt;
+ const struct power_supply_maintenance_charge_table *mt;
mt = power_supply_get_maintenance_charging_setting(info, 0);
@@ -859,18 +880,24 @@ static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
extern int power_supply_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val);
+int power_supply_get_property_direct(struct power_supply *psy, enum power_supply_property psp,
+ union power_supply_propval *val);
#if IS_ENABLED(CONFIG_POWER_SUPPLY)
extern int power_supply_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val);
+int power_supply_set_property_direct(struct power_supply *psy, enum power_supply_property psp,
+ const union power_supply_propval *val);
#else
static inline int power_supply_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
{ return 0; }
+static inline int power_supply_set_property_direct(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{ return 0; }
#endif
-extern int power_supply_property_is_writeable(struct power_supply *psy,
- enum power_supply_property psp);
extern void power_supply_external_power_changed(struct power_supply *psy);
extern struct power_supply *__must_check
@@ -878,24 +905,24 @@ power_supply_register(struct device *parent,
const struct power_supply_desc *desc,
const struct power_supply_config *cfg);
extern struct power_supply *__must_check
-power_supply_register_no_ws(struct device *parent,
- const struct power_supply_desc *desc,
- const struct power_supply_config *cfg);
-extern struct power_supply *__must_check
devm_power_supply_register(struct device *parent,
const struct power_supply_desc *desc,
const struct power_supply_config *cfg);
-extern struct power_supply *__must_check
-devm_power_supply_register_no_ws(struct device *parent,
- const struct power_supply_desc *desc,
- const struct power_supply_config *cfg);
extern void power_supply_unregister(struct power_supply *psy);
extern int power_supply_powers(struct power_supply *psy, struct device *dev);
+extern int __must_check
+power_supply_register_extension(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ struct device *dev,
+ void *data);
+extern void power_supply_unregister_extension(struct power_supply *psy,
+ const struct power_supply_ext *ext);
+
#define to_power_supply(device) container_of(device, struct power_supply, dev)
extern void *power_supply_get_drvdata(struct power_supply *psy);
-extern int power_supply_for_each_device(void *data, int (*fn)(struct device *dev, void *data));
+extern int power_supply_for_each_psy(void *data, int (*fn)(struct power_supply *psy, void *data));
static inline bool power_supply_is_amp_property(enum power_supply_property psp)
{
@@ -951,19 +978,6 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp)
return false;
}
-#ifdef CONFIG_POWER_SUPPLY_HWMON
-int power_supply_add_hwmon_sysfs(struct power_supply *psy);
-void power_supply_remove_hwmon_sysfs(struct power_supply *psy);
-#else
-static inline int power_supply_add_hwmon_sysfs(struct power_supply *psy)
-{
- return 0;
-}
-
-static inline
-void power_supply_remove_hwmon_sysfs(struct power_supply *psy) {}
-#endif
-
#ifdef CONFIG_SYSFS
ssize_t power_supply_charge_behaviour_show(struct device *dev,
unsigned int available_behaviours,
@@ -971,6 +985,11 @@ ssize_t power_supply_charge_behaviour_show(struct device *dev,
char *buf);
int power_supply_charge_behaviour_parse(unsigned int available_behaviours, const char *buf);
+ssize_t power_supply_charge_types_show(struct device *dev,
+ unsigned int available_types,
+ enum power_supply_charge_type current_type,
+ char *buf);
+int power_supply_charge_types_parse(unsigned int available_types, const char *buf);
#else
static inline
ssize_t power_supply_charge_behaviour_show(struct device *dev,
@@ -986,6 +1005,20 @@ static inline int power_supply_charge_behaviour_parse(unsigned int available_beh
{
return -EOPNOTSUPP;
}
+
+static inline
+ssize_t power_supply_charge_types_show(struct device *dev,
+ unsigned int available_types,
+ enum power_supply_charge_type current_type,
+ char *buf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int power_supply_charge_types_parse(unsigned int available_types, const char *buf)
+{
+ return -EOPNOTSUPP;
+}
#endif
#endif /* __LINUX_POWER_SUPPLY_H__ */
diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h
index 45e6e427ceb8..f73fbea0dbc2 100644
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -42,8 +42,7 @@ struct ppp_channel {
int hdrlen; /* amount of headroom channel needs */
void *ppp; /* opaque to channel */
int speed; /* transfer rate (bytes/second) */
- /* the following is not used at present */
- int latency; /* overhead time in milliseconds */
+ bool direct_xmit; /* no qdisc, xmit directly */
};
#ifdef __KERNEL__
diff --git a/include/linux/pps_gen_kernel.h b/include/linux/pps_gen_kernel.h
new file mode 100644
index 000000000000..6214c8aa2e02
--- /dev/null
+++ b/include/linux/pps_gen_kernel.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PPS generator API kernel header
+ *
+ * Copyright (C) 2024 Rodolfo Giometti <giometti@enneenne.com>
+ */
+
+#ifndef LINUX_PPS_GEN_KERNEL_H
+#define LINUX_PPS_GEN_KERNEL_H
+
+#include <linux/pps_gen.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+
+/*
+ * Global defines
+ */
+
+#define PPS_GEN_MAX_SOURCES 16 /* should be enough... */
+
+struct pps_gen_device;
+
+/**
+ * struct pps_gen_source_info - the specific PPS generator info
+ * @use_system_clock: true, if the system clock is used to generate pulses
+ * @get_time: query the time stored into the generator clock
+ * @enable: enable/disable the PPS pulses generation
+ *
+ * This is the main generator struct where all needed information must be
+ * placed before calling the pps_gen_register_source().
+ */
+struct pps_gen_source_info {
+ bool use_system_clock;
+
+ int (*get_time)(struct pps_gen_device *pps_gen,
+ struct timespec64 *time);
+ int (*enable)(struct pps_gen_device *pps_gen, bool enable);
+
+/* private: internal use only */
+ struct module *owner;
+ struct device *parent; /* for device_create */
+};
+
+/* The main struct */
+struct pps_gen_device {
+ const struct pps_gen_source_info *info; /* PSS generator info */
+ bool enabled; /* PSS generator status */
+
+ unsigned int event;
+ unsigned int sequence;
+
+ unsigned int last_ev; /* last PPS event id */
+ wait_queue_head_t queue; /* PPS event queue */
+
+ unsigned int id; /* PPS generator unique ID */
+ struct cdev cdev;
+ struct device *dev;
+ struct fasync_struct *async_queue; /* fasync method */
+ spinlock_t lock;
+};
+
+/*
+ * Global variables
+ */
+
+extern const struct attribute_group *pps_gen_groups[];
+
+/*
+ * Exported functions
+ */
+
+extern struct pps_gen_device *pps_gen_register_source(
+ const struct pps_gen_source_info *info);
+extern void pps_gen_unregister_source(struct pps_gen_device *pps_gen);
+extern void pps_gen_event(struct pps_gen_device *pps_gen,
+ unsigned int event, void *data);
+
+#endif /* LINUX_PPS_GEN_KERNEL_H */
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
index 78c8ac4951b5..aab0aebb529e 100644
--- a/include/linux/pps_kernel.h
+++ b/include/linux/pps_kernel.h
@@ -52,12 +52,12 @@ struct pps_device {
int current_mode; /* PPS mode at event time */
unsigned int last_ev; /* last PPS event id */
+ unsigned int last_fetched_ev; /* last fetched PPS event id */
wait_queue_head_t queue; /* PPS event queue */
unsigned int id; /* PPS source unique ID */
void const *lookup_cookie; /* For pps_lookup_dev() only */
- struct cdev cdev;
- struct device *dev;
+ struct device dev;
struct fasync_struct *async_queue; /* fasync method */
spinlock_t lock;
};
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
index f7f1e5251c67..ff7dcc3fa105 100644
--- a/include/linux/prandom.h
+++ b/include/linux/prandom.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/once.h>
+#include <linux/percpu.h>
#include <linux/random.h>
struct rnd_state {
@@ -46,10 +47,4 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
state->s4 = __seed(i, 128U);
}
-/* Pseudo random number generator from numerical recipes. */
-static inline u32 next_pseudo_random32(u32 seed)
-{
- return seed * 1664525 + 1013904223;
-}
-
#endif
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 7233e9cf1bab..d964f965c8ff 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -134,11 +134,9 @@ static __always_inline unsigned char interrupt_context_level(void)
/*
* The following macros are deprecated and should not be used in new code:
- * in_irq() - Obsolete version of in_hardirq()
* in_softirq() - We have BH disabled, or are processing softirqs
* in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
*/
-#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
@@ -319,6 +317,7 @@ do { \
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
+struct task_struct;
/**
* preempt_ops - notifiers called when a task is preempted and rescheduled
@@ -368,12 +367,10 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
#endif
-#ifdef CONFIG_SMP
-
/*
* Migrate-Disable and why it is undesired.
*
- * When a preempted task becomes elegible to run under the ideal model (IOW it
+ * When a preempted task becomes eligible to run under the ideal model (IOW it
* becomes one of the M highest priority tasks), it might still have to wait
* for the preemptee's migrate_disable() section to complete. Thereby suffering
* a reduction in bandwidth in the exact duration of the migrate_disable()
@@ -388,7 +385,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* - a lower priority tasks; which under preempt_disable() could've instantly
* migrated away when another CPU becomes available, is now constrained
* by the ability to push the higher priority task away, which might itself be
- * in a migrate_disable() section, reducing it's available bandwidth.
+ * in a migrate_disable() section, reducing its available bandwidth.
*
* IOW it trades latency / moves the interference term, but it stays in the
* system, and as long as it remains unbounded, the system is not fully
@@ -400,7 +397,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
* number of primitives into becoming preemptible, they would also allow
* migration. This turns out to break a bunch of per-cpu usage. To this end,
- * all these primitives employ migirate_disable() to restore this implicit
+ * all these primitives employ migrate_disable() to restore this implicit
* assumption.
*
* This is a 'temporary' work-around at best. The correct solution is getting
@@ -408,7 +405,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* per-cpu locking or short preempt-disable regions.
*
* The end goal must be to get rid of migrate_disable(), alternatively we need
- * a schedulability theory that does not depend on abritrary migration.
+ * a schedulability theory that does not depend on arbitrary migration.
*
*
* Notes on the implementation.
@@ -425,15 +422,6 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* work-conserving schedulers.
*
*/
-extern void migrate_disable(void);
-extern void migrate_enable(void);
-
-#else
-
-static inline void migrate_disable(void) { }
-static inline void migrate_enable(void) { }
-
-#endif /* CONFIG_SMP */
/**
* preempt_disable_nested - Disable preemption inside a normally preempt disabled section
@@ -479,6 +467,54 @@ static __always_inline void preempt_enable_nested(void)
DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace())
-DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+
+extern bool preempt_model_none(void);
+extern bool preempt_model_voluntary(void);
+extern bool preempt_model_full(void);
+extern bool preempt_model_lazy(void);
+
+#else
+
+static inline bool preempt_model_none(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_NONE);
+}
+static inline bool preempt_model_voluntary(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
+}
+static inline bool preempt_model_full(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT);
+}
+
+static inline bool preempt_model_lazy(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_LAZY);
+}
+
+#endif
+
+static inline bool preempt_model_rt(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_RT);
+}
+
+extern const char *preempt_model_str(void);
+
+/*
+ * Does the preemption model allow non-cooperative preemption?
+ *
+ * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
+ * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
+ * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
+ * PREEMPT_NONE model.
+ */
+static inline bool preempt_model_preemptible(void)
+{
+ return preempt_model_full() || preempt_model_lazy() || preempt_model_rt();
+}
#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 40afab23881a..45c663124c9b 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -9,6 +9,8 @@
#include <linux/ratelimit_types.h>
#include <linux/once_lite.h>
+struct console;
+
extern const char linux_banner[];
extern const char linux_proc_banner[];
@@ -60,8 +62,9 @@ static inline const char *printk_skip_headers(const char *buffer)
#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT
#define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET
-int add_preferred_console_match(const char *match, const char *name,
- const short idx);
+int match_devname_and_update_preferred_console(const char *match,
+ const char *name,
+ const short idx);
extern int console_printk[];
@@ -151,6 +154,8 @@ int vprintk_emit(int facility, int level,
asmlinkage __printf(1, 0)
int vprintk(const char *fmt, va_list args);
+__printf(1, 0)
+int vprintk_deferred(const char *fmt, va_list args);
asmlinkage __printf(1, 2) __cold
int _printk(const char *fmt, ...);
@@ -160,15 +165,19 @@ int _printk(const char *fmt, ...);
*/
__printf(1, 2) __cold int _printk_deferred(const char *fmt, ...);
-extern void __printk_safe_enter(void);
-extern void __printk_safe_exit(void);
+extern void __printk_deferred_enter(void);
+extern void __printk_deferred_exit(void);
+
+extern void printk_force_console_enter(void);
+extern void printk_force_console_exit(void);
+
/*
* The printk_deferred_enter/exit macros are available only as a hack for
* some code paths that need to defer all printk console printing. Interrupts
* must be disabled for the deferred duration.
*/
-#define printk_deferred_enter __printk_safe_enter
-#define printk_deferred_exit __printk_safe_exit
+#define printk_deferred_enter() __printk_deferred_enter()
+#define printk_deferred_exit() __printk_deferred_exit()
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
@@ -195,13 +204,23 @@ void show_regs_print_info(const char *log_lvl);
extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
extern asmlinkage void dump_stack(void) __cold;
void printk_trigger_flush(void);
-void console_replay_all(void);
+void console_try_replay_all(void);
+void printk_legacy_allow_panic_sync(void);
+extern bool nbcon_device_try_acquire(struct console *con);
+extern void nbcon_device_release(struct console *con);
+void nbcon_atomic_flush_unsafe(void);
+bool pr_flush(int timeout_ms, bool reset_on_progress);
#else
static inline __printf(1, 0)
int vprintk(const char *s, va_list args)
{
return 0;
}
+static inline __printf(1, 0)
+int vprintk_deferred(const char *fmt, va_list args)
+{
+ return 0;
+}
static inline __printf(1, 2) __cold
int _printk(const char *s, ...)
{
@@ -221,6 +240,14 @@ static inline void printk_deferred_exit(void)
{
}
+static inline void printk_force_console_enter(void)
+{
+}
+
+static inline void printk_force_console_exit(void)
+{
+}
+
static inline int printk_ratelimit(void)
{
return 0;
@@ -275,12 +302,33 @@ static inline void dump_stack(void)
static inline void printk_trigger_flush(void)
{
}
-static inline void console_replay_all(void)
+static inline void console_try_replay_all(void)
{
}
-#endif
-bool this_cpu_in_panic(void);
+static inline void printk_legacy_allow_panic_sync(void)
+{
+}
+
+static inline bool nbcon_device_try_acquire(struct console *con)
+{
+ return false;
+}
+
+static inline void nbcon_device_release(struct console *con)
+{
+}
+
+static inline void nbcon_atomic_flush_unsafe(void)
+{
+}
+
+static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
+{
+ return true;
+}
+
+#endif
#ifdef CONFIG_SMP
extern int __printk_cpu_sync_try_get(void);
diff --git a/include/linux/prmt.h b/include/linux/prmt.h
index 24da8364b919..8cdc987de963 100644
--- a/include/linux/prmt.h
+++ b/include/linux/prmt.h
@@ -1,7 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0-only */
+#include <linux/uuid.h>
+
#ifdef CONFIG_ACPI_PRMT
void init_prmt(void);
+bool acpi_prm_handler_available(const guid_t *handler_guid);
+int acpi_call_prm_handler(guid_t handler_guid, void *param_buffer);
#else
static inline void init_prmt(void) { }
+static inline bool acpi_prm_handler_available(const guid_t *handler_guid) { return false; }
+static inline int acpi_call_prm_handler(guid_t handler_guid, void *param_buffer)
+{
+ return -EOPNOTSUPP;
+}
#endif
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 0b2a89854440..19d1c5e5f335 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -20,10 +20,16 @@ enum {
* If in doubt, ignore this flag.
*/
#ifdef MODULE
- PROC_ENTRY_PERMANENT = 0U,
+ PROC_ENTRY_PERMANENT = 0U,
#else
- PROC_ENTRY_PERMANENT = 1U << 0,
+ PROC_ENTRY_PERMANENT = 1U << 0,
#endif
+
+ PROC_ENTRY_proc_read_iter = 1U << 1,
+ PROC_ENTRY_proc_compat_ioctl = 1U << 2,
+ PROC_ENTRY_proc_lseek = 1U << 3,
+
+ PROC_ENTRY_FORCE_LOOKUP = 1U << 7,
};
struct proc_ops {
@@ -60,8 +66,6 @@ enum proc_pidonly {
struct proc_fs_info {
struct pid_namespace *pid_ns;
- struct dentry *proc_self; /* For /proc/self */
- struct dentry *proc_thread_self; /* For /proc/thread-self */
kgid_t pid_gid;
enum proc_hidepid hide_pid;
enum proc_pidonly pidonly;
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 5ea470eb4d76..e81b8e596e4f 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -5,7 +5,8 @@
#ifndef _LINUX_PROC_NS_H
#define _LINUX_PROC_NS_H
-#include <linux/ns_common.h>
+#include <linux/nsfs.h>
+#include <uapi/linux/nsfs.h>
struct pid_namespace;
struct nsset;
@@ -16,7 +17,6 @@ struct inode;
struct proc_ns_operations {
const char *name;
const char *real_ns_name;
- int type;
struct ns_common *(*get)(struct task_struct *task);
void (*put)(struct ns_common *ns);
int (*install)(struct nsset *nsset, struct ns_common *ns);
@@ -39,13 +39,14 @@ extern const struct proc_ns_operations timens_for_children_operations;
* We always define these enumerators
*/
enum {
- PROC_ROOT_INO = 1,
- PROC_IPC_INIT_INO = 0xEFFFFFFFU,
- PROC_UTS_INIT_INO = 0xEFFFFFFEU,
- PROC_USER_INIT_INO = 0xEFFFFFFDU,
- PROC_PID_INIT_INO = 0xEFFFFFFCU,
- PROC_CGROUP_INIT_INO = 0xEFFFFFFBU,
- PROC_TIME_INIT_INO = 0xEFFFFFFAU,
+ PROC_IPC_INIT_INO = IPC_NS_INIT_INO,
+ PROC_UTS_INIT_INO = UTS_NS_INIT_INO,
+ PROC_USER_INIT_INO = USER_NS_INIT_INO,
+ PROC_PID_INIT_INO = PID_NS_INIT_INO,
+ PROC_CGROUP_INIT_INO = CGROUP_NS_INIT_INO,
+ PROC_TIME_INIT_INO = TIME_NS_INIT_INO,
+ PROC_NET_INIT_INO = NET_NS_INIT_INO,
+ PROC_MNT_INIT_INO = MNT_NS_INIT_INO,
};
#ifdef CONFIG_PROC_FS
@@ -64,25 +65,6 @@ static inline void proc_free_inum(unsigned int inum) {}
#endif /* CONFIG_PROC_FS */
-static inline int ns_alloc_inum(struct ns_common *ns)
-{
- WRITE_ONCE(ns->stashed, NULL);
- return proc_alloc_inum(&ns->inum);
-}
-
-#define ns_free_inum(ns) proc_free_inum((ns)->inum)
-
#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
-extern int ns_get_path(struct path *path, struct task_struct *task,
- const struct proc_ns_operations *ns_ops);
-typedef struct ns_common *ns_get_path_helper_t(void *);
-extern int ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
- void *private_data);
-
-extern bool ns_match(const struct ns_common *ns, dev_t dev, ino_t ino);
-
-extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
- const struct proc_ns_operations *ns_ops);
-extern void nsfs_init(void);
#endif /* _LINUX_PROC_NS_H */
diff --git a/include/linux/profile.h b/include/linux/profile.h
index 04ae5ebcb637..3f53cdb0c27c 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -4,14 +4,12 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/cpumask.h>
#include <linux/cache.h>
#include <asm/errno.h>
#define CPU_PROFILING 1
#define SCHED_PROFILING 2
-#define SLEEP_PROFILING 3
#define KVM_PROFILING 4
struct proc_dir_entry;
diff --git a/include/linux/property.h b/include/linux/property.h
index 61fc20e5f81f..272bfbdea7bf 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -17,6 +17,7 @@
#include <linux/fwnode.h>
#include <linux/stddef.h>
#include <linux/types.h>
+#include <linux/util_macros.h>
struct device;
@@ -37,6 +38,7 @@ struct fwnode_handle *__dev_fwnode(struct device *dev);
struct device *: __dev_fwnode)(dev)
bool device_property_present(const struct device *dev, const char *propname);
+bool device_property_read_bool(const struct device *dev, const char *propname);
int device_property_read_u8_array(const struct device *dev, const char *propname,
u8 *val, size_t nval);
int device_property_read_u16_array(const struct device *dev, const char *propname,
@@ -54,6 +56,8 @@ int device_property_match_string(const struct device *dev,
bool fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname);
+bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
+ const char *propname);
int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode,
const char *propname, u8 *val,
size_t nval);
@@ -164,10 +168,24 @@ struct fwnode_handle *fwnode_get_next_available_child_node(
for (child = fwnode_get_next_child_node(fwnode, NULL); child; \
child = fwnode_get_next_child_node(fwnode, child))
+#define fwnode_for_each_named_child_node(fwnode, child, name) \
+ fwnode_for_each_child_node(fwnode, child) \
+ for_each_if(fwnode_name_eq(child, name))
+
#define fwnode_for_each_available_child_node(fwnode, child) \
for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\
child = fwnode_get_next_available_child_node(fwnode, child))
+#define fwnode_for_each_child_node_scoped(fwnode, child) \
+ for (struct fwnode_handle *child __free(fwnode_handle) = \
+ fwnode_get_next_child_node(fwnode, NULL); \
+ child; child = fwnode_get_next_child_node(fwnode, child))
+
+#define fwnode_for_each_available_child_node_scoped(fwnode, child) \
+ for (struct fwnode_handle *child __free(fwnode_handle) = \
+ fwnode_get_next_available_child_node(fwnode, NULL); \
+ child; child = fwnode_get_next_available_child_node(fwnode, child))
+
struct fwnode_handle *device_get_next_child_node(const struct device *dev,
struct fwnode_handle *child);
@@ -175,11 +193,19 @@ struct fwnode_handle *device_get_next_child_node(const struct device *dev,
for (child = device_get_next_child_node(dev, NULL); child; \
child = device_get_next_child_node(dev, child))
+#define device_for_each_named_child_node(dev, child, name) \
+ device_for_each_child_node(dev, child) \
+ for_each_if(fwnode_name_eq(child, name))
+
#define device_for_each_child_node_scoped(dev, child) \
for (struct fwnode_handle *child __free(fwnode_handle) = \
device_get_next_child_node(dev, NULL); \
child; child = device_get_next_child_node(dev, child))
+#define device_for_each_named_child_node_scoped(dev, child, name) \
+ device_for_each_child_node_scoped(dev, child) \
+ for_each_if(fwnode_name_eq(child, name))
+
struct fwnode_handle *fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
const char *childname);
struct fwnode_handle *device_get_named_child_node(const struct device *dev,
@@ -205,12 +231,19 @@ DEFINE_FREE(fwnode_handle, struct fwnode_handle *, fwnode_handle_put(_T))
int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index);
int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name);
-unsigned int device_get_child_node_count(const struct device *dev);
+unsigned int fwnode_get_child_node_count(const struct fwnode_handle *fwnode);
+
+static inline unsigned int device_get_child_node_count(const struct device *dev)
+{
+ return fwnode_get_child_node_count(dev_fwnode(dev));
+}
-static inline bool device_property_read_bool(const struct device *dev,
- const char *propname)
+unsigned int fwnode_get_named_child_node_count(const struct fwnode_handle *fwnode,
+ const char *name);
+static inline unsigned int device_get_named_child_node_count(const struct device *dev,
+ const char *name)
{
- return device_property_present(dev, propname);
+ return fwnode_get_named_child_node_count(dev_fwnode(dev), name);
}
static inline int device_property_read_u8(const struct device *dev,
@@ -263,12 +296,6 @@ static inline int device_property_string_array_count(const struct device *dev,
return device_property_read_string_array(dev, propname, NULL, 0);
}
-static inline bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
- const char *propname)
-{
- return fwnode_property_present(fwnode, propname);
-}
-
static inline int fwnode_property_read_u8(const struct fwnode_handle *fwnode,
const char *propname, u8 *val)
{
@@ -328,19 +355,26 @@ struct software_node;
/**
* struct software_node_ref_args - Reference property with additional arguments
- * @node: Reference to a software node
+ * @swnode: Reference to a software node
+ * @fwnode: Alternative reference to a firmware node handle
* @nargs: Number of elements in @args array
* @args: Integer arguments
*/
struct software_node_ref_args {
- const struct software_node *node;
+ const struct software_node *swnode;
+ struct fwnode_handle *fwnode;
unsigned int nargs;
u64 args[NR_FWNODE_REFERENCE_ARGS];
};
#define SOFTWARE_NODE_REFERENCE(_ref_, ...) \
(const struct software_node_ref_args) { \
- .node = _ref_, \
+ .swnode = _Generic(_ref_, \
+ const struct software_node *: _ref_, \
+ default: NULL), \
+ .fwnode = _Generic(_ref_, \
+ struct fwnode_handle *: _ref_, \
+ default: NULL), \
.nargs = COUNT_ARGS(__VA_ARGS__), \
.args = { __VA_ARGS__ }, \
}
@@ -557,8 +591,8 @@ const struct software_node *
software_node_find_by_name(const struct software_node *parent,
const char *name);
-int software_node_register_node_group(const struct software_node **node_group);
-void software_node_unregister_node_group(const struct software_node **node_group);
+int software_node_register_node_group(const struct software_node * const *node_group);
+void software_node_unregister_node_group(const struct software_node * const *node_group);
int software_node_register(const struct software_node *node);
void software_node_unregister(const struct software_node *node);
diff --git a/include/linux/pruss_driver.h b/include/linux/pruss_driver.h
index c9a31c567e85..2e18fef1a2e1 100644
--- a/include/linux/pruss_driver.h
+++ b/include/linux/pruss_driver.h
@@ -144,32 +144,32 @@ static inline int pruss_release_mem_region(struct pruss *pruss,
static inline int pruss_cfg_get_gpmux(struct pruss *pruss,
enum pruss_pru_id pru_id, u8 *mux)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
static inline int pruss_cfg_set_gpmux(struct pruss *pruss,
enum pruss_pru_id pru_id, u8 mux)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
static inline int pruss_cfg_gpimode(struct pruss *pruss,
enum pruss_pru_id pru_id,
enum pruss_gpi_mode mode)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
static inline int pruss_cfg_miirt_enable(struct pruss *pruss, bool enable)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
static inline int pruss_cfg_xfr_enable(struct pruss *pruss,
enum pru_type pru_type,
- bool enable);
+ bool enable)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
#endif /* CONFIG_TI_PRUSS */
diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
index 6d07c95dabb9..4e5696cfade7 100644
--- a/include/linux/pse-pd/pse.h
+++ b/include/linux/pse-pd/pse.h
@@ -5,12 +5,55 @@
#ifndef _LINUX_PSE_CONTROLLER_H
#define _LINUX_PSE_CONTROLLER_H
-#include <linux/ethtool.h>
#include <linux/list.h>
+#include <linux/netlink.h>
+#include <linux/kfifo.h>
#include <uapi/linux/ethtool.h>
+#include <uapi/linux/ethtool_netlink_generated.h>
+#include <linux/regulator/driver.h>
+/* Maximum current in uA according to IEEE 802.3-2022 Table 145-1 */
+#define MAX_PI_CURRENT 1920000
+/* Maximum power in mW according to IEEE 802.3-2022 Table 145-16 */
+#define MAX_PI_PW 99900
+
+struct net_device;
struct phy_device;
struct pse_controller_dev;
+struct netlink_ext_ack;
+
+/* C33 PSE extended state and substate. */
+struct ethtool_c33_pse_ext_state_info {
+ enum ethtool_c33_pse_ext_state c33_pse_ext_state;
+ union {
+ enum ethtool_c33_pse_ext_substate_error_condition error_condition;
+ enum ethtool_c33_pse_ext_substate_mr_pse_enable mr_pse_enable;
+ enum ethtool_c33_pse_ext_substate_option_detect_ted option_detect_ted;
+ enum ethtool_c33_pse_ext_substate_option_vport_lim option_vport_lim;
+ enum ethtool_c33_pse_ext_substate_ovld_detected ovld_detected;
+ enum ethtool_c33_pse_ext_substate_power_not_available power_not_available;
+ enum ethtool_c33_pse_ext_substate_short_detected short_detected;
+ u32 __c33_pse_ext_substate;
+ };
+};
+
+struct ethtool_c33_pse_pw_limit_range {
+ u32 min;
+ u32 max;
+};
+
+/**
+ * struct pse_irq_desc - notification sender description for IRQ based events.
+ *
+ * @name: the visible name for the IRQ
+ * @map_event: driver callback to map IRQ status into PSE devices with events.
+ */
+struct pse_irq_desc {
+ const char *name;
+ int (*map_event)(int irq, struct pse_controller_dev *pcdev,
+ unsigned long *notifs,
+ unsigned long *notifs_mask);
+};
/**
* struct pse_control_config - PSE control/channel configuration.
@@ -26,8 +69,54 @@ struct pse_control_config {
};
/**
- * struct pse_control_status - PSE control/channel status.
+ * struct pse_admin_state - PSE operational state
+ *
+ * @podl_admin_state: operational state of the PoDL PSE
+ * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
+ * @c33_admin_state: operational state of the PSE
+ * functions. IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState
+ */
+struct pse_admin_state {
+ enum ethtool_podl_pse_admin_state podl_admin_state;
+ enum ethtool_c33_pse_admin_state c33_admin_state;
+};
+
+/**
+ * struct pse_pw_status - PSE power detection status
+ *
+ * @podl_pw_status: power detection status of the PoDL PSE.
+ * IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus:
+ * @c33_pw_status: power detection status of the PSE.
+ * IEEE 802.3-2022 30.9.1.1.5 aPSEPowerDetectionStatus:
+ */
+struct pse_pw_status {
+ enum ethtool_podl_pse_pw_d_status podl_pw_status;
+ enum ethtool_c33_pse_pw_d_status c33_pw_status;
+};
+
+/**
+ * struct pse_ext_state_info - PSE extended state information
+ *
+ * @c33_ext_state_info: extended state information of the PSE
+ */
+struct pse_ext_state_info {
+ struct ethtool_c33_pse_ext_state_info c33_ext_state_info;
+};
+
+/**
+ * struct pse_pw_limit_ranges - PSE power limit configuration range
*
+ * @c33_pw_limit_ranges: supported power limit configuration range. The driver
+ * is in charge of the memory allocation.
+ */
+struct pse_pw_limit_ranges {
+ struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges;
+};
+
+/**
+ * struct ethtool_pse_control_status - PSE control/channel status.
+ *
+ * @pw_d_id: PSE power domain index.
* @podl_admin_state: operational state of the PoDL PSE
* functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
* @podl_pw_status: power detection status of the PoDL PSE.
@@ -36,38 +125,100 @@ struct pse_control_config {
* functions. IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState
* @c33_pw_status: power detection status of the PSE.
* IEEE 802.3-2022 30.9.1.1.5 aPSEPowerDetectionStatus:
+ * @c33_pw_class: detected class of a powered PD
+ * IEEE 802.3-2022 30.9.1.1.8 aPSEPowerClassification
+ * @c33_actual_pw: power currently delivered by the PSE in mW
+ * IEEE 802.3-2022 30.9.1.1.23 aPSEActualPower
+ * @c33_ext_state_info: extended state information of the PSE
+ * @c33_avail_pw_limit: available power limit of the PSE in mW
+ * IEEE 802.3-2022 145.2.5.4 pse_avail_pwr
+ * @c33_pw_limit_ranges: supported power limit configuration range. The driver
+ * is in charge of the memory allocation
+ * @c33_pw_limit_nb_ranges: number of supported power limit configuration
+ * ranges
+ * @prio_max: max priority allowed for the c33_prio variable value.
+ * @prio: priority of the PSE. Managed by PSE core in case of static budget
+ * evaluation strategy.
*/
-struct pse_control_status {
+struct ethtool_pse_control_status {
+ u32 pw_d_id;
enum ethtool_podl_pse_admin_state podl_admin_state;
enum ethtool_podl_pse_pw_d_status podl_pw_status;
enum ethtool_c33_pse_admin_state c33_admin_state;
enum ethtool_c33_pse_pw_d_status c33_pw_status;
+ u32 c33_pw_class;
+ u32 c33_actual_pw;
+ struct ethtool_c33_pse_ext_state_info c33_ext_state_info;
+ u32 c33_avail_pw_limit;
+ struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges;
+ u32 c33_pw_limit_nb_ranges;
+ u32 prio_max;
+ u32 prio;
};
/**
* struct pse_controller_ops - PSE controller driver callbacks
*
- * @ethtool_get_status: get PSE control status for ethtool interface
- * @setup_pi_matrix: setup PI matrix of the PSE controller
- * @pi_is_enabled: Return 1 if the PSE PI is enabled, 0 if not.
- * May also return negative errno.
+ * @setup_pi_matrix: Setup PI matrix of the PSE controller.
+ * The PSE PIs devicetree nodes have already been parsed by
+ * of_load_pse_pis() and the pcdev->pi[x]->pairset[y].np
+ * populated. This callback should establish the
+ * relationship between the PSE controller hardware ports
+ * and the PSE Power Interfaces, either through software
+ * mapping or hardware configuration.
+ * @pi_get_admin_state: Get the operational state of the PSE PI. This ops
+ * is mandatory.
+ * @pi_get_pw_status: Get the power detection status of the PSE PI. This
+ * ops is mandatory.
+ * @pi_get_ext_state: Get the extended state of the PSE PI.
+ * @pi_get_pw_class: Get the power class of the PSE PI.
+ * @pi_get_actual_pw: Get actual power of the PSE PI in mW.
* @pi_enable: Configure the PSE PI as enabled.
* @pi_disable: Configure the PSE PI as disabled.
+ * @pi_get_voltage: Return voltage similarly to get_voltage regulator
+ * callback in uV.
+ * @pi_get_pw_limit: Get the configured power limit of the PSE PI in mW.
+ * @pi_set_pw_limit: Configure the power limit of the PSE PI in mW.
+ * @pi_get_pw_limit_ranges: Get the supported power limit configuration
+ * range. The driver is in charge of the memory
+ * allocation and should return the number of
+ * ranges.
+ * @pi_get_prio: Get the PSE PI priority.
+ * @pi_set_prio: Configure the PSE PI priority.
+ * @pi_get_pw_req: Get the power requested by a PD before enabling the PSE PI.
+ * This is only relevant when an interrupt is registered using
+ * devm_pse_irq_helper helper.
*/
struct pse_controller_ops {
- int (*ethtool_get_status)(struct pse_controller_dev *pcdev,
- unsigned long id, struct netlink_ext_ack *extack,
- struct pse_control_status *status);
int (*setup_pi_matrix)(struct pse_controller_dev *pcdev);
- int (*pi_is_enabled)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_get_admin_state)(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state);
+ int (*pi_get_pw_status)(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status);
+ int (*pi_get_ext_state)(struct pse_controller_dev *pcdev, int id,
+ struct pse_ext_state_info *ext_state_info);
+ int (*pi_get_pw_class)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_get_actual_pw)(struct pse_controller_dev *pcdev, int id);
int (*pi_enable)(struct pse_controller_dev *pcdev, int id);
int (*pi_disable)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_get_voltage)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_get_pw_limit)(struct pse_controller_dev *pcdev,
+ int id);
+ int (*pi_set_pw_limit)(struct pse_controller_dev *pcdev,
+ int id, int max_mW);
+ int (*pi_get_pw_limit_ranges)(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_limit_ranges *pw_limit_ranges);
+ int (*pi_get_prio)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_set_prio)(struct pse_controller_dev *pcdev, int id,
+ unsigned int prio);
+ int (*pi_get_pw_req)(struct pse_controller_dev *pcdev, int id);
};
struct module;
struct device_node;
struct of_phandle_args;
struct pse_control;
+struct ethtool_pse_control_status;
/* PSE PI pairset pinout can either be Alternative A or Alternative B */
enum pse_pi_pairset_pinout {
@@ -95,12 +246,35 @@ struct pse_pi_pairset {
* @np: device node pointer of the PSE PI node
* @rdev: regulator represented by the PSE PI
* @admin_state_enabled: PI enabled state
+ * @pw_d: Power domain of the PSE PI
+ * @prio: Priority of the PSE PI. Used in static budget evaluation strategy
+ * @isr_pd_detected: PSE PI detection status managed by the interruption
+ * handler. This variable is relevant when the power enabled
+ * management is managed in software like the static
+ * budget evaluation strategy.
+ * @pw_allocated_mW: Power allocated to a PSE PI to manage power budget in
+ * static budget evaluation strategy.
*/
struct pse_pi {
struct pse_pi_pairset pairset[2];
struct device_node *np;
struct regulator_dev *rdev;
bool admin_state_enabled;
+ struct pse_power_domain *pw_d;
+ int prio;
+ bool isr_pd_detected;
+ int pw_allocated_mW;
+};
+
+/**
+ * struct pse_ntf - PSE notification element
+ *
+ * @id: ID of the PSE control
+ * @notifs: PSE notifications to be reported
+ */
+struct pse_ntf {
+ int id;
+ unsigned long notifs;
};
/**
@@ -117,6 +291,13 @@ struct pse_pi {
* @types: types of the PSE controller
* @pi: table of PSE PIs described in this controller device
* @no_of_pse_pi: flag set if the pse_pis devicetree node is not used
+ * @irq: PSE interrupt
+ * @pis_prio_max: Maximum value allowed for the PSE PIs priority
+ * @supp_budget_eval_strategies: budget evaluation strategies supported
+ * by the PSE
+ * @ntf_work: workqueue for PSE notification management
+ * @ntf_fifo: PSE notifications FIFO
+ * @ntf_fifo_lock: protect @ntf_fifo writer
*/
struct pse_controller_dev {
const struct pse_controller_ops *ops;
@@ -130,6 +311,30 @@ struct pse_controller_dev {
enum ethtool_pse_types types;
struct pse_pi *pi;
bool no_of_pse_pi;
+ int irq;
+ unsigned int pis_prio_max;
+ u32 supp_budget_eval_strategies;
+ struct work_struct ntf_work;
+ DECLARE_KFIFO_PTR(ntf_fifo, struct pse_ntf);
+ spinlock_t ntf_fifo_lock; /* Protect @ntf_fifo writer */
+};
+
+/**
+ * enum pse_budget_eval_strategies - PSE budget evaluation strategies.
+ * @PSE_BUDGET_EVAL_STRAT_DISABLED: Budget evaluation strategy disabled.
+ * @PSE_BUDGET_EVAL_STRAT_STATIC: PSE static budget evaluation strategy.
+ * Budget evaluation strategy based on the power requested during PD
+ * classification. This strategy is managed by the PSE core.
+ * @PSE_BUDGET_EVAL_STRAT_DYNAMIC: PSE dynamic budget evaluation
+ * strategy. Budget evaluation strategy based on the current consumption
+ * per ports compared to the total power budget. This mode is managed by
+ * the PSE controller.
+ */
+
+enum pse_budget_eval_strategies {
+ PSE_BUDGET_EVAL_STRAT_DISABLED = 1 << 0,
+ PSE_BUDGET_EVAL_STRAT_STATIC = 1 << 1,
+ PSE_BUDGET_EVAL_STRAT_DYNAMIC = 1 << 2,
};
#if IS_ENABLED(CONFIG_PSE_CONTROLLER)
@@ -138,23 +343,33 @@ void pse_controller_unregister(struct pse_controller_dev *pcdev);
struct device;
int devm_pse_controller_register(struct device *dev,
struct pse_controller_dev *pcdev);
+int devm_pse_irq_helper(struct pse_controller_dev *pcdev, int irq,
+ int irq_flags, const struct pse_irq_desc *d);
-struct pse_control *of_pse_control_get(struct device_node *node);
+struct pse_control *of_pse_control_get(struct device_node *node,
+ struct phy_device *phydev);
void pse_control_put(struct pse_control *psec);
int pse_ethtool_get_status(struct pse_control *psec,
struct netlink_ext_ack *extack,
- struct pse_control_status *status);
+ struct ethtool_pse_control_status *status);
int pse_ethtool_set_config(struct pse_control *psec,
struct netlink_ext_ack *extack,
const struct pse_control_config *config);
+int pse_ethtool_set_pw_limit(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const unsigned int pw_limit);
+int pse_ethtool_set_prio(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ unsigned int prio);
bool pse_has_podl(struct pse_control *psec);
bool pse_has_c33(struct pse_control *psec);
#else
-static inline struct pse_control *of_pse_control_get(struct device_node *node)
+static inline struct pse_control *of_pse_control_get(struct device_node *node,
+ struct phy_device *phydev)
{
return ERR_PTR(-ENOENT);
}
@@ -165,16 +380,30 @@ static inline void pse_control_put(struct pse_control *psec)
static inline int pse_ethtool_get_status(struct pse_control *psec,
struct netlink_ext_ack *extack,
- struct pse_control_status *status)
+ struct ethtool_pse_control_status *status)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int pse_ethtool_set_config(struct pse_control *psec,
struct netlink_ext_ack *extack,
const struct pse_control_config *config)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+
+static inline int pse_ethtool_set_pw_limit(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const unsigned int pw_limit)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int pse_ethtool_set_prio(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ unsigned int prio)
+{
+ return -EOPNOTSUPP;
}
static inline bool pse_has_podl(struct pse_control *psec)
diff --git a/include/linux/pseudo_fs.h b/include/linux/pseudo_fs.h
index 730f77381d55..a651e60d9410 100644
--- a/include/linux/pseudo_fs.h
+++ b/include/linux/pseudo_fs.h
@@ -5,9 +5,11 @@
struct pseudo_fs_context {
const struct super_operations *ops;
+ const struct export_operations *eops;
const struct xattr_handler * const *xattr;
const struct dentry_operations *dops;
unsigned long magic;
+ unsigned int s_d_flags;
};
struct pseudo_fs_context *init_pseudo(struct fs_context *fc,
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index f1fd3a8044e0..dd10c22299ab 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -84,11 +84,9 @@ enum psi_aggregators {
struct psi_group_cpu {
/* 1st cacheline updated by the scheduler */
- /* Aggregator needs to know of concurrent changes */
- seqcount_t seq ____cacheline_aligned_in_smp;
-
/* States of the tasks belonging to this group */
- unsigned int tasks[NR_PSI_TASK_COUNTS];
+ unsigned int tasks[NR_PSI_TASK_COUNTS]
+ ____cacheline_aligned_in_smp;
/* Aggregate pressure state derived from the tasks */
u32 state_mask;
diff --git a/include/linux/psp-platform-access.h b/include/linux/psp-platform-access.h
index c1dc87fc536b..540abf7de048 100644
--- a/include/linux/psp-platform-access.h
+++ b/include/linux/psp-platform-access.h
@@ -6,8 +6,11 @@
#include <linux/psp.h>
enum psp_platform_access_msg {
- PSP_CMD_NONE = 0x0,
- PSP_I2C_REQ_BUS_CMD = 0x64,
+ PSP_CMD_NONE = 0x0,
+ PSP_SFS_GET_FW_VERSIONS,
+ PSP_SFS_UPDATE,
+ PSP_CMD_HSTI_QUERY = 0x14,
+ PSP_I2C_REQ_BUS_CMD = 0x64,
PSP_DYNAMIC_BOOST_GET_NONCE,
PSP_DYNAMIC_BOOST_SET_UID,
PSP_DYNAMIC_BOOST_GET_PARAMETER,
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index 3705c2044fc0..69ffa4b4d1fa 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -14,6 +14,39 @@
#include <uapi/linux/psp-sev.h>
+/* As defined by SEV API, under "Guest Policy". */
+#define SEV_POLICY_MASK_NODBG BIT(0)
+#define SEV_POLICY_MASK_NOKS BIT(1)
+#define SEV_POLICY_MASK_ES BIT(2)
+#define SEV_POLICY_MASK_NOSEND BIT(3)
+#define SEV_POLICY_MASK_DOMAIN BIT(4)
+#define SEV_POLICY_MASK_SEV BIT(5)
+#define SEV_POLICY_MASK_API_MAJOR GENMASK(23, 16)
+#define SEV_POLICY_MASK_API_MINOR GENMASK(31, 24)
+
+/* As defined by SEV-SNP Firmware ABI, under "Guest Policy". */
+#define SNP_POLICY_MASK_API_MINOR GENMASK_ULL(7, 0)
+#define SNP_POLICY_MASK_API_MAJOR GENMASK_ULL(15, 8)
+#define SNP_POLICY_MASK_SMT BIT_ULL(16)
+#define SNP_POLICY_MASK_RSVD_MBO BIT_ULL(17)
+#define SNP_POLICY_MASK_MIGRATE_MA BIT_ULL(18)
+#define SNP_POLICY_MASK_DEBUG BIT_ULL(19)
+#define SNP_POLICY_MASK_SINGLE_SOCKET BIT_ULL(20)
+#define SNP_POLICY_MASK_CXL_ALLOW BIT_ULL(21)
+#define SNP_POLICY_MASK_MEM_AES_256_XTS BIT_ULL(22)
+#define SNP_POLICY_MASK_RAPL_DIS BIT_ULL(23)
+#define SNP_POLICY_MASK_CIPHERTEXT_HIDING_DRAM BIT_ULL(24)
+#define SNP_POLICY_MASK_PAGE_SWAP_DISABLE BIT_ULL(25)
+
+/* Base SEV-SNP policy bitmask for minimum supported SEV firmware version */
+#define SNP_POLICY_MASK_BASE (SNP_POLICY_MASK_API_MINOR | \
+ SNP_POLICY_MASK_API_MAJOR | \
+ SNP_POLICY_MASK_SMT | \
+ SNP_POLICY_MASK_RSVD_MBO | \
+ SNP_POLICY_MASK_MIGRATE_MA | \
+ SNP_POLICY_MASK_DEBUG | \
+ SNP_POLICY_MASK_SINGLE_SOCKET)
+
#define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */
/**
@@ -107,7 +140,15 @@ enum sev_cmd {
SEV_CMD_SNP_DOWNLOAD_FIRMWARE_EX = 0x0CA,
SEV_CMD_SNP_COMMIT = 0x0CB,
SEV_CMD_SNP_VLEK_LOAD = 0x0CD,
-
+ SEV_CMD_SNP_FEATURE_INFO = 0x0CE,
+
+ /* SEV-TIO commands */
+ SEV_CMD_TIO_STATUS = 0x0D0,
+ SEV_CMD_TIO_INIT = 0x0D1,
+ SEV_CMD_TIO_DEV_CREATE = 0x0D2,
+ SEV_CMD_TIO_DEV_RECLAIM = 0x0D3,
+ SEV_CMD_TIO_DEV_CONNECT = 0x0D4,
+ SEV_CMD_TIO_DEV_DISCONNECT = 0x0D5,
SEV_CMD_MAX,
};
@@ -594,6 +635,7 @@ struct sev_data_snp_addr {
* @imi_en: launch flow is launching an IMI (Incoming Migration Image) for the
* purpose of guest-assisted migration.
* @rsvd: reserved
+ * @desired_tsc_khz: hypervisor desired mean TSC freq in kHz of the guest
* @gosvw: guest OS-visible workarounds, as defined by hypervisor
*/
struct sev_data_snp_launch_start {
@@ -603,6 +645,7 @@ struct sev_data_snp_launch_start {
u32 ma_en:1; /* In */
u32 imi_en:1; /* In */
u32 rsvd:30;
+ u32 desired_tsc_khz; /* In */
u8 gosvw[16]; /* In */
} __packed;
@@ -658,6 +701,7 @@ struct sev_data_snp_launch_update {
* @id_auth_paddr: system physical address of ID block authentication structure
* @id_block_en: indicates whether ID block is present
* @auth_key_en: indicates whether author key is present in authentication structure
+ * @vcek_disabled: indicates whether use of VCEK is allowed for attestation reports
* @rsvd: reserved
* @host_data: host-supplied data for guest, not interpreted by firmware
*/
@@ -667,7 +711,8 @@ struct sev_data_snp_launch_finish {
u64 id_auth_paddr;
u8 id_block_en:1;
u8 auth_key_en:1;
- u64 rsvd:62;
+ u8 vcek_disabled:1;
+ u64 rsvd:61;
u8 host_data[32];
} __packed;
@@ -743,10 +788,14 @@ struct sev_data_snp_guest_request {
struct sev_data_snp_init_ex {
u32 init_rmp:1;
u32 list_paddr_en:1;
- u32 rsvd:30;
+ u32 rapl_dis:1;
+ u32 ciphertext_hiding_en:1;
+ u32 tio_en:1;
+ u32 rsvd:27;
u32 rsvd1;
u64 list_paddr;
- u8 rsvd2[48];
+ u16 max_snp_asid;
+ u8 rsvd2[46];
} __packed;
/**
@@ -795,10 +844,13 @@ struct sev_data_snp_shutdown_ex {
* @probe: True if this is being called as part of CCP module probe, which
* will defer SEV_INIT/SEV_INIT_EX firmware initialization until needed
* unless psp_init_on_probe module param is set
+ * @max_snp_asid: When non-zero, enable ciphertext hiding and specify the
+ * maximum ASID that can be used for an SEV-SNP guest.
*/
struct sev_platform_init_args {
int error;
bool probe;
+ unsigned int max_snp_asid;
};
/**
@@ -810,9 +862,55 @@ struct sev_data_snp_commit {
u32 len;
} __packed;
+/**
+ * struct sev_data_snp_feature_info - SEV_SNP_FEATURE_INFO structure
+ *
+ * @length: len of the command buffer read by the PSP
+ * @ecx_in: subfunction index
+ * @feature_info_paddr : System Physical Address of the FEATURE_INFO structure
+ */
+struct sev_data_snp_feature_info {
+ u32 length;
+ u32 ecx_in;
+ u64 feature_info_paddr;
+} __packed;
+
+/**
+ * struct feature_info - FEATURE_INFO structure
+ *
+ * @eax: output of SNP_FEATURE_INFO command
+ * @ebx: output of SNP_FEATURE_INFO command
+ * @ecx: output of SNP_FEATURE_INFO command
+ * #edx: output of SNP_FEATURE_INFO command
+ */
+struct snp_feature_info {
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+} __packed;
+
+/* Feature bits in ECX */
+#define SNP_RAPL_DISABLE_SUPPORTED BIT(2)
+#define SNP_CIPHER_TEXT_HIDING_SUPPORTED BIT(3)
+#define SNP_AES_256_XTS_POLICY_SUPPORTED BIT(4)
+#define SNP_CXL_ALLOW_POLICY_SUPPORTED BIT(5)
+
+/* Feature bits in EBX */
+#define SNP_SEV_TIO_SUPPORTED BIT(1)
+
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
/**
+ * sev_module_init - perform PSP SEV module initialization
+ *
+ * Returns:
+ * 0 if the PSP module is successfully initialized
+ * negative value if the PSP module initialization fails
+ */
+int sev_module_init(void);
+
+/**
* sev_platform_init - perform SEV INIT command
*
* @args: struct sev_platform_init_args to pass in arguments
@@ -942,7 +1040,11 @@ int sev_do_cmd(int cmd, void *data, int *psp_ret);
void *psp_copy_user_blob(u64 uaddr, u32 len);
void *snp_alloc_firmware_page(gfp_t mask);
+int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked);
void snp_free_firmware_page(void *addr);
+void sev_platform_shutdown(void);
+bool sev_is_snp_ciphertext_hiding_supported(void);
+u64 sev_get_snp_policy_bits(void);
#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
@@ -975,8 +1077,17 @@ static inline void *snp_alloc_firmware_page(gfp_t mask)
return NULL;
}
+static inline int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
+{
+ return -ENODEV;
+}
+
static inline void snp_free_firmware_page(void *addr) { }
+static inline void sev_platform_shutdown(void) { }
+
+static inline bool sev_is_snp_ciphertext_hiding_supported(void) { return false; }
+
#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
#endif /* __PSP_SEV_H__ */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 638507a3c8ff..fed601053c51 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -182,7 +182,7 @@ struct pstore_info {
struct module *owner;
const char *name;
- spinlock_t buf_lock;
+ raw_spinlock_t buf_lock;
char *buf;
size_t bufsize;
diff --git a/include/linux/ptdump.h b/include/linux/ptdump.h
index 8dbd51ea8626..240bd3bff18d 100644
--- a/include/linux/ptdump.h
+++ b/include/linux/ptdump.h
@@ -11,10 +11,17 @@ struct ptdump_range {
};
struct ptdump_state {
- /* level is 0:PGD to 4:PTE, or -1 if unknown */
- void (*note_page)(struct ptdump_state *st, unsigned long addr,
- int level, u64 val);
- void (*effective_prot)(struct ptdump_state *st, int level, u64 val);
+ void (*note_page_pte)(struct ptdump_state *st, unsigned long addr, pte_t pte);
+ void (*note_page_pmd)(struct ptdump_state *st, unsigned long addr, pmd_t pmd);
+ void (*note_page_pud)(struct ptdump_state *st, unsigned long addr, pud_t pud);
+ void (*note_page_p4d)(struct ptdump_state *st, unsigned long addr, p4d_t p4d);
+ void (*note_page_pgd)(struct ptdump_state *st, unsigned long addr, pgd_t pgd);
+ void (*note_page_flush)(struct ptdump_state *st);
+ void (*effective_prot_pte)(struct ptdump_state *st, pte_t pte);
+ void (*effective_prot_pmd)(struct ptdump_state *st, pmd_t pmd);
+ void (*effective_prot_pud)(struct ptdump_state *st, pud_t pud);
+ void (*effective_prot_p4d)(struct ptdump_state *st, p4d_t p4d);
+ void (*effective_prot_pgd)(struct ptdump_state *st, pgd_t pgd);
const struct ptdump_range *range;
};
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index 1b5a953c6bbc..3a74f69e0b59 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -10,7 +10,7 @@
#ifndef _PTP_CLASSIFY_H_
#define _PTP_CLASSIFY_H_
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/ip.h>
#include <linux/ktime.h>
#include <linux/skbuff.h>
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 6e4b8206c7d0..884364596dd3 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -47,10 +47,12 @@ struct system_device_crosststamp;
* struct ptp_system_timestamp - system time corresponding to a PHC timestamp
* @pre_ts: system timestamp before capturing PHC
* @post_ts: system timestamp after capturing PHC
+ * @clockid: clock-base used for capturing the system timestamps
*/
struct ptp_system_timestamp {
struct timespec64 pre_ts;
struct timespec64 post_ts;
+ clockid_t clockid;
};
/**
@@ -65,7 +67,25 @@ struct ptp_system_timestamp {
* @n_ext_ts: The number of external time stamp channels.
* @n_per_out: The number of programmable periodic signals.
* @n_pins: The number of programmable pins.
+ * @n_per_lp: The number of channels that support loopback the periodic
+ * output signal.
* @pps: Indicates whether the clock supports a PPS callback.
+ *
+ * @supported_perout_flags: The set of flags the driver supports for the
+ * PTP_PEROUT_REQUEST ioctl. The PTP core will
+ * reject a request with any flag not specified
+ * here.
+ *
+ * @supported_extts_flags: The set of flags the driver supports for the
+ * PTP_EXTTS_REQUEST ioctl. The PTP core will use
+ * this list to reject unsupported requests.
+ * PTP_ENABLE_FEATURE is assumed and does not need to
+ * be included. If PTP_STRICT_FLAGS is *not* set,
+ * then both PTP_RISING_EDGE and PTP_FALLING_EDGE
+ * will be assumed. Note that PTP_STRICT_FLAGS must
+ * be set if the drivers wants to honor
+ * PTP_EXTTS_REQUEST2 and any future flags.
+ *
* @pin_config: Array of length 'n_pins'. If the number of
* programmable pins is nonzero, then drivers must
* allocate and initialize this array.
@@ -157,6 +177,11 @@ struct ptp_system_timestamp {
* scheduling time (>=0) or negative value in case further
* scheduling is not required.
*
+ * @perout_loopback: Request driver to enable or disable the periodic output
+ * signal loopback.
+ * parameter index: index of the periodic output signal channel.
+ * parameter on: caller passes one to enable or zero to disable.
+ *
* Drivers should embed their ptp_clock_info within a private
* structure, obtaining a reference to it using container_of().
*
@@ -171,7 +196,10 @@ struct ptp_clock_info {
int n_ext_ts;
int n_per_out;
int n_pins;
+ int n_per_lp;
int pps;
+ unsigned int supported_perout_flags;
+ unsigned int supported_extts_flags;
struct ptp_pin_desc *pin_config;
int (*adjfine)(struct ptp_clock_info *ptp, long scaled_ppm);
int (*adjphase)(struct ptp_clock_info *ptp, s32 phase);
@@ -193,6 +221,8 @@ struct ptp_clock_info {
int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan);
long (*do_aux_work)(struct ptp_clock_info *ptp);
+ int (*perout_loopback)(struct ptp_clock_info *ptp, unsigned int index,
+ int on);
};
struct ptp_clock;
@@ -305,7 +335,7 @@ static inline u64 adjust_by_scaled_ppm(u64 base, long scaled_ppm)
* @info: Structure describing the new clock.
* @parent: Pointer to the parent device of the new clock.
*
- * Returns a valid pointer on success or PTR_ERR on failure. If PHC
+ * Returns: a valid pointer on success or PTR_ERR on failure. If PHC
* support is missing at the configuration level, this function
* returns NULL, and drivers are expected to gracefully handle that
* case separately.
@@ -341,6 +371,24 @@ extern void ptp_clock_event(struct ptp_clock *ptp,
extern int ptp_clock_index(struct ptp_clock *ptp);
/**
+ * ptp_clock_index_by_of_node() - obtain the device index of
+ * a PTP clock based on the PTP device of_node
+ *
+ * @np: The device of_node pointer of the PTP device.
+ * Return: The PHC index on success or -1 on failure.
+ */
+int ptp_clock_index_by_of_node(struct device_node *np);
+
+/**
+ * ptp_clock_index_by_dev() - obtain the device index of
+ * a PTP clock based on the PTP device.
+ *
+ * @parent: The parent device (PTP device) pointer of the PTP clock.
+ * Return: The PHC index on success or -1 on failure.
+ */
+int ptp_clock_index_by_dev(struct device *parent);
+
+/**
* ptp_find_pin() - obtain the pin index of a given auxiliary function
*
* The caller must hold ptp_clock::pincfg_mux. Drivers do not have
@@ -405,6 +453,10 @@ static inline void ptp_clock_event(struct ptp_clock *ptp,
{ }
static inline int ptp_clock_index(struct ptp_clock *ptp)
{ return -1; }
+static inline int ptp_clock_index_by_of_node(struct device_node *np)
+{ return -1; }
+static inline int ptp_clock_index_by_dev(struct device *parent)
+{ return -1; }
static inline int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan)
{ return -1; }
@@ -443,7 +495,7 @@ int ptp_get_vclocks_index(int pclock_index, int **vclock_index);
* @hwtstamp: timestamp
* @vclock_index: phc index of ptp vclock.
*
- * Returns converted timestamp, or 0 on error.
+ * Returns: converted timestamp, or 0 on error.
*/
ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index);
#else
@@ -458,13 +510,13 @@ static inline ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp,
static inline void ptp_read_system_prets(struct ptp_system_timestamp *sts)
{
if (sts)
- ktime_get_real_ts64(&sts->pre_ts);
+ ktime_get_clock_ts64(sts->clockid, &sts->pre_ts);
}
static inline void ptp_read_system_postts(struct ptp_system_timestamp *sts)
{
if (sts)
- ktime_get_real_ts64(&sts->post_ts);
+ ktime_get_clock_ts64(sts->clockid, &sts->post_ts);
}
#endif
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index fd037c127bb0..534531807d95 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -243,6 +243,24 @@ static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
return ret;
}
+/* Zero entries from tail to specified head.
+ * NB: if consumer_head can be >= r->size need to fixup tail later.
+ */
+static inline void __ptr_ring_zero_tail(struct ptr_ring *r, int consumer_head)
+{
+ int head = consumer_head;
+
+ /* Zero out entries in the reverse order: this way we touch the
+ * cache line that producer might currently be reading the last;
+ * producer won't make progress and touch other cache lines
+ * besides the first one until we write out all entries.
+ */
+ while (likely(head > r->consumer_tail))
+ r->queue[--head] = NULL;
+
+ r->consumer_tail = consumer_head;
+}
+
/* Must only be called after __ptr_ring_peek returned !NULL */
static inline void __ptr_ring_discard_one(struct ptr_ring *r)
{
@@ -261,8 +279,7 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
/* Note: we must keep consumer_head valid at all times for __ptr_ring_empty
* to work correctly.
*/
- int consumer_head = r->consumer_head;
- int head = consumer_head++;
+ int consumer_head = r->consumer_head + 1;
/* Once we have processed enough entries invalidate them in
* the ring all at once so producer can reuse their space in the ring.
@@ -270,16 +287,9 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
* but helps keep the implementation simple.
*/
if (unlikely(consumer_head - r->consumer_tail >= r->batch ||
- consumer_head >= r->size)) {
- /* Zero out entries in the reverse order: this way we touch the
- * cache line that producer might currently be reading the last;
- * producer won't make progress and touch other cache lines
- * besides the first one until we write out all entries.
- */
- while (likely(head >= r->consumer_tail))
- r->queue[head--] = NULL;
- r->consumer_tail = consumer_head;
- }
+ consumer_head >= r->size))
+ __ptr_ring_zero_tail(r, consumer_head);
+
if (unlikely(consumer_head >= r->size)) {
consumer_head = 0;
r->consumer_tail = 0;
@@ -513,7 +523,6 @@ static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
void (*destroy)(void *))
{
unsigned long flags;
- int head;
spin_lock_irqsave(&r->consumer_lock, flags);
spin_lock(&r->producer_lock);
@@ -525,17 +534,14 @@ static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
* Clean out buffered entries (for simplicity). This way following code
* can test entries for NULL and if not assume they are valid.
*/
- head = r->consumer_head - 1;
- while (likely(head >= r->consumer_tail))
- r->queue[head--] = NULL;
- r->consumer_tail = r->consumer_head;
+ __ptr_ring_zero_tail(r, r->consumer_head);
/*
* Go over entries in batch, start moving head back and copy entries.
* Stop when we run into previously unconsumed entries.
*/
while (n) {
- head = r->consumer_head - 1;
+ int head = r->consumer_head - 1;
if (head < 0)
head = r->size - 1;
if (r->queue[head]) {
@@ -615,15 +621,14 @@ static inline int ptr_ring_resize_noprof(struct ptr_ring *r, int size, gfp_t gfp
/*
* Note: producer lock is nested within consumer lock, so if you
* resize you must make sure all uses nest correctly.
- * In particular if you consume ring in interrupt or BH context, you must
- * disable interrupts/BH when doing so.
+ * In particular if you consume ring in BH context, you must
+ * disable BH when doing so.
*/
-static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings,
- unsigned int nrings,
- int size,
- gfp_t gfp, void (*destroy)(void *))
+static inline int ptr_ring_resize_multiple_bh_noprof(struct ptr_ring **rings,
+ unsigned int nrings,
+ int size, gfp_t gfp,
+ void (*destroy)(void *))
{
- unsigned long flags;
void ***queues;
int i;
@@ -638,12 +643,12 @@ static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings,
}
for (i = 0; i < nrings; ++i) {
- spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
+ spin_lock_bh(&(rings[i])->consumer_lock);
spin_lock(&(rings[i])->producer_lock);
queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
size, gfp, destroy);
spin_unlock(&(rings[i])->producer_lock);
- spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
+ spin_unlock_bh(&(rings[i])->consumer_lock);
}
for (i = 0; i < nrings; ++i)
@@ -662,8 +667,8 @@ nomem:
noqueues:
return -ENOMEM;
}
-#define ptr_ring_resize_multiple(...) \
- alloc_hooks(ptr_ring_resize_multiple_noprof(__VA_ARGS__))
+#define ptr_ring_resize_multiple_bh(...) \
+ alloc_hooks(ptr_ring_resize_multiple_bh_noprof(__VA_ARGS__))
static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
{
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 60b92c2c75ef..b11ae91723f8 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -2,11 +2,16 @@
#ifndef __LINUX_PWM_H
#define __LINUX_PWM_H
+#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
+MODULE_IMPORT_NS("PWM");
+
struct pwm_chip;
/**
@@ -46,6 +51,31 @@ enum {
PWMF_EXPORTED = 1,
};
+/**
+ * struct pwm_waveform - description of a PWM waveform
+ * @period_length_ns: PWM period
+ * @duty_length_ns: PWM duty cycle
+ * @duty_offset_ns: offset of the rising edge from the period's start
+ *
+ * This is a representation of a PWM waveform alternative to struct pwm_state
+ * below. It's more expressive than struct pwm_state as it contains a
+ * duty_offset_ns and so can represent offsets other than zero (with .polarity =
+ * PWM_POLARITY_NORMAL) and period - duty_cycle (.polarity =
+ * PWM_POLARITY_INVERSED).
+ *
+ * Note there is no explicit bool for enabled. A "disabled" PWM is represented
+ * by .period_length_ns = 0. Note further that the behaviour of a "disabled" PWM
+ * is undefined. Depending on the hardware's capabilities it might drive the
+ * active or inactive level, go high-z or even continue to toggle.
+ *
+ * The unit for all three members is nanoseconds.
+ */
+struct pwm_waveform {
+ u64 period_length_ns;
+ u64 duty_length_ns;
+ u64 duty_offset_ns;
+};
+
/*
* struct pwm_state - state of a PWM channel
* @period: PWM period (in nanoseconds)
@@ -190,6 +220,8 @@ static inline void pwm_init_state(const struct pwm_device *pwm,
*
* pwm_get_state(pwm, &state);
* duty = pwm_get_relative_duty_cycle(&state, 100);
+ *
+ * Returns: rounded relative duty cycle multiplied by @scale
*/
static inline unsigned int
pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale)
@@ -216,8 +248,8 @@ pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale)
* pwm_set_relative_duty_cycle(&state, 50, 100);
* pwm_apply_might_sleep(pwm, &state);
*
- * This functions returns -EINVAL if @duty_cycle and/or @scale are
- * inconsistent (@scale == 0 or @duty_cycle > @scale).
+ * Returns: 0 on success or ``-EINVAL`` if @duty_cycle and/or @scale are
+ * inconsistent (@scale == 0 or @duty_cycle > @scale)
*/
static inline int
pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle,
@@ -243,21 +275,37 @@ struct pwm_capture {
unsigned int duty_cycle;
};
+#define PWM_WFHWSIZE 20
+
/**
* struct pwm_ops - PWM controller operations
* @request: optional hook for requesting a PWM
* @free: optional hook for freeing a PWM
* @capture: capture and report PWM signal
+ * @sizeof_wfhw: size (in bytes) of driver specific waveform presentation
+ * @round_waveform_tohw: convert a struct pwm_waveform to driver specific presentation
+ * @round_waveform_fromhw: convert a driver specific waveform presentation to struct pwm_waveform
+ * @read_waveform: read driver specific waveform presentation from hardware
+ * @write_waveform: write driver specific waveform presentation to hardware
* @apply: atomically apply a new PWM config
- * @get_state: get the current PWM state. This function is only
- * called once per PWM device when the PWM chip is
- * registered.
+ * @get_state: get the current PWM state.
*/
struct pwm_ops {
int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
void (*free)(struct pwm_chip *chip, struct pwm_device *pwm);
int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_capture *result, unsigned long timeout);
+
+ size_t sizeof_wfhw;
+ int (*round_waveform_tohw)(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_waveform *wf, void *wfhw);
+ int (*round_waveform_fromhw)(struct pwm_chip *chip, struct pwm_device *pwm,
+ const void *wfhw, struct pwm_waveform *wf);
+ int (*read_waveform)(struct pwm_chip *chip, struct pwm_device *pwm,
+ void *wfhw);
+ int (*write_waveform)(struct pwm_chip *chip, struct pwm_device *pwm,
+ const void *wfhw);
+
int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state);
int (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -267,17 +315,23 @@ struct pwm_ops {
/**
* struct pwm_chip - abstract a PWM controller
* @dev: device providing the PWMs
+ * @cdev: &struct cdev for this device
* @ops: callbacks for this PWM controller
* @owner: module providing this chip
* @id: unique number of this PWM chip
* @npwm: number of PWMs controlled by this chip
* @of_xlate: request a PWM device given a device tree PWM specifier
* @atomic: can the driver's ->apply() be called in atomic context
+ * @gpio: &struct gpio_chip to operate this PWM chip's lines as GPO
* @uses_pwmchip_alloc: signals if pwmchip_allow was used to allocate this chip
+ * @operational: signals if the chip can be used (or is already deregistered)
+ * @nonatomic_lock: mutex for nonatomic chips
+ * @atomic_lock: mutex for atomic chips
* @pwms: array of PWM devices allocated by the framework
*/
struct pwm_chip {
struct device dev;
+ struct cdev cdev;
const struct pwm_ops *ops;
struct module *owner;
unsigned int id;
@@ -288,16 +342,44 @@ struct pwm_chip {
bool atomic;
/* only used internally by the PWM framework */
+ struct gpio_chip gpio;
bool uses_pwmchip_alloc;
+ bool operational;
+ union {
+ /*
+ * depending on the chip being atomic or not either the mutex or
+ * the spinlock is used. It protects .operational and
+ * synchronizes the callbacks in .ops
+ */
+ struct mutex nonatomic_lock;
+ spinlock_t atomic_lock;
+ };
struct pwm_device pwms[] __counted_by(npwm);
};
+/**
+ * pwmchip_supports_waveform() - checks if the given chip supports waveform callbacks
+ * @chip: The pwm_chip to test
+ *
+ * Returns: true iff the pwm chip support the waveform functions like
+ * pwm_set_waveform_might_sleep() and pwm_round_waveform_might_sleep()
+ */
+static inline bool pwmchip_supports_waveform(struct pwm_chip *chip)
+{
+ /*
+ * only check for .write_waveform(). If that is available,
+ * .round_waveform_tohw() and .round_waveform_fromhw() asserted to be
+ * available, too, in pwmchip_add().
+ */
+ return chip->ops->write_waveform != NULL;
+}
+
static inline struct device *pwmchip_parent(const struct pwm_chip *chip)
{
return chip->dev.parent;
}
-static inline void *pwmchip_get_drvdata(struct pwm_chip *chip)
+static inline void *pwmchip_get_drvdata(const struct pwm_chip *chip)
{
return dev_get_drvdata(&chip->dev);
}
@@ -307,10 +389,15 @@ static inline void pwmchip_set_drvdata(struct pwm_chip *chip, void *data)
dev_set_drvdata(&chip->dev, data);
}
-#if IS_ENABLED(CONFIG_PWM)
-/* PWM user APIs */
+#if IS_REACHABLE(CONFIG_PWM)
+
+/* PWM consumer APIs */
+int pwm_round_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *wf);
+int pwm_get_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *wf);
+int pwm_set_waveform_might_sleep(struct pwm_device *pwm, const struct pwm_waveform *wf, bool exact);
int pwm_apply_might_sleep(struct pwm_device *pwm, const struct pwm_state *state);
int pwm_apply_atomic(struct pwm_device *pwm, const struct pwm_state *state);
+int pwm_get_state_hw(struct pwm_device *pwm, struct pwm_state *state);
int pwm_adjust_config(struct pwm_device *pwm);
/**
@@ -393,9 +480,6 @@ static inline bool pwm_might_sleep(struct pwm_device *pwm)
}
/* PWM provider APIs */
-int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
- unsigned long timeout);
-
void pwmchip_put(struct pwm_chip *chip);
struct pwm_chip *pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv);
struct pwm_chip *devm_pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv);
@@ -404,13 +488,15 @@ int __pwmchip_add(struct pwm_chip *chip, struct module *owner);
#define pwmchip_add(chip) __pwmchip_add(chip, THIS_MODULE)
void pwmchip_remove(struct pwm_chip *chip);
+/*
+ * For FFI wrapper use only:
+ * The Rust PWM abstraction needs this to properly free the pwm_chip.
+ */
+void pwmchip_release(struct device *dev);
+
int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner);
#define devm_pwmchip_add(dev, chip) __devm_pwmchip_add(dev, chip, THIS_MODULE)
-struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
- unsigned int index,
- const char *label);
-
struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *chip,
const struct of_phandle_args *args);
struct pwm_device *of_pwm_single_xlate(struct pwm_chip *chip,
@@ -442,6 +528,11 @@ static inline int pwm_apply_atomic(struct pwm_device *pwm,
return -EOPNOTSUPP;
}
+static inline int pwm_get_state_hw(struct pwm_device *pwm, struct pwm_state *state)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int pwm_adjust_config(struct pwm_device *pwm)
{
return -EOPNOTSUPP;
@@ -465,13 +556,6 @@ static inline void pwm_disable(struct pwm_device *pwm)
might_sleep();
}
-static inline int pwm_capture(struct pwm_device *pwm,
- struct pwm_capture *result,
- unsigned long timeout)
-{
- return -EINVAL;
-}
-
static inline void pwmchip_put(struct pwm_chip *chip)
{
}
@@ -505,14 +589,6 @@ static inline int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip)
return -EINVAL;
}
-static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
- unsigned int index,
- const char *label)
-{
- might_sleep();
- return ERR_PTR(-ENODEV);
-}
-
static inline struct pwm_device *pwm_get(struct device *dev,
const char *consumer)
{
@@ -541,46 +617,6 @@ devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode,
}
#endif
-static inline void pwm_apply_args(struct pwm_device *pwm)
-{
- struct pwm_state state = { };
-
- /*
- * PWM users calling pwm_apply_args() expect to have a fresh config
- * where the polarity and period are set according to pwm_args info.
- * The problem is, polarity can only be changed when the PWM is
- * disabled.
- *
- * PWM drivers supporting hardware readout may declare the PWM device
- * as enabled, and prevent polarity setting, which changes from the
- * existing behavior, where all PWM devices are declared as disabled
- * at startup (even if they are actually enabled), thus authorizing
- * polarity setting.
- *
- * To fulfill this requirement, we apply a new state which disables
- * the PWM device and set the reference period and polarity config.
- *
- * Note that PWM users requiring a smooth handover between the
- * bootloader and the kernel (like critical regulators controlled by
- * PWM devices) will have to switch to the atomic API and avoid calling
- * pwm_apply_args().
- */
-
- state.enabled = false;
- state.polarity = pwm->args.polarity;
- state.period = pwm->args.period;
- state.usage_power = false;
-
- pwm_apply_might_sleep(pwm, &state);
-}
-
-/* only for backwards-compatibility, new code should not use this */
-static inline int pwm_apply_state(struct pwm_device *pwm,
- const struct pwm_state *state)
-{
- return pwm_apply_might_sleep(pwm, state);
-}
-
struct pwm_lookup {
struct list_head list;
const char *provider;
@@ -608,7 +644,7 @@ struct pwm_lookup {
PWM_LOOKUP_WITH_MODULE(_provider, _index, _dev_id, _con_id, _period, \
_polarity, NULL)
-#if IS_ENABLED(CONFIG_PWM)
+#if IS_REACHABLE(CONFIG_PWM)
void pwm_add_table(struct pwm_lookup *table, size_t num);
void pwm_remove_table(struct pwm_lookup *table, size_t num);
#else
diff --git a/include/linux/pwrseq/consumer.h b/include/linux/pwrseq/consumer.h
new file mode 100644
index 000000000000..7d583b4f266e
--- /dev/null
+++ b/include/linux/pwrseq/consumer.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#ifndef __POWER_SEQUENCING_CONSUMER_H__
+#define __POWER_SEQUENCING_CONSUMER_H__
+
+#include <linux/err.h>
+
+struct device;
+struct pwrseq_desc;
+
+#if IS_ENABLED(CONFIG_POWER_SEQUENCING)
+
+struct pwrseq_desc * __must_check
+pwrseq_get(struct device *dev, const char *target);
+void pwrseq_put(struct pwrseq_desc *desc);
+
+struct pwrseq_desc * __must_check
+devm_pwrseq_get(struct device *dev, const char *target);
+
+int pwrseq_power_on(struct pwrseq_desc *desc);
+int pwrseq_power_off(struct pwrseq_desc *desc);
+
+#else /* CONFIG_POWER_SEQUENCING */
+
+static inline struct pwrseq_desc * __must_check
+pwrseq_get(struct device *dev, const char *target)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void pwrseq_put(struct pwrseq_desc *desc)
+{
+}
+
+static inline struct pwrseq_desc * __must_check
+devm_pwrseq_get(struct device *dev, const char *target)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int pwrseq_power_on(struct pwrseq_desc *desc)
+{
+ return -ENOSYS;
+}
+
+static inline int pwrseq_power_off(struct pwrseq_desc *desc)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_POWER_SEQUENCING */
+
+#endif /* __POWER_SEQUENCING_CONSUMER_H__ */
diff --git a/include/linux/pwrseq/provider.h b/include/linux/pwrseq/provider.h
new file mode 100644
index 000000000000..33b3d2c2e39d
--- /dev/null
+++ b/include/linux/pwrseq/provider.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#ifndef __POWER_SEQUENCING_PROVIDER_H__
+#define __POWER_SEQUENCING_PROVIDER_H__
+
+struct device;
+struct module;
+struct pwrseq_device;
+
+typedef int (*pwrseq_power_state_func)(struct pwrseq_device *);
+typedef int (*pwrseq_match_func)(struct pwrseq_device *, struct device *);
+
+#define PWRSEQ_NO_MATCH 0
+#define PWRSEQ_MATCH_OK 1
+
+/**
+ * struct pwrseq_unit_data - Configuration of a single power sequencing
+ * unit.
+ * @name: Name of the unit.
+ * @deps: Units that must be enabled before this one and disabled after it
+ * in the order they come in this array. Must be NULL-terminated.
+ * @enable: Callback running the part of the power-on sequence provided by
+ * this unit.
+ * @disable: Callback running the part of the power-off sequence provided
+ * by this unit.
+ */
+struct pwrseq_unit_data {
+ const char *name;
+ const struct pwrseq_unit_data **deps;
+ pwrseq_power_state_func enable;
+ pwrseq_power_state_func disable;
+};
+
+/**
+ * struct pwrseq_target_data - Configuration of a power sequencing target.
+ * @name: Name of the target.
+ * @unit: Final unit that this target must reach in order to be considered
+ * enabled.
+ * @post_enable: Callback run after the target unit has been enabled, *after*
+ * the state lock has been released. It's useful for implementing
+ * boot-up delays without blocking other users from powering up
+ * using the same power sequencer.
+ */
+struct pwrseq_target_data {
+ const char *name;
+ const struct pwrseq_unit_data *unit;
+ pwrseq_power_state_func post_enable;
+};
+
+/**
+ * struct pwrseq_config - Configuration used for registering a new provider.
+ * @parent: Parent device for the sequencer. Must be set.
+ * @owner: Module providing this device.
+ * @drvdata: Private driver data.
+ * @match: Provider callback used to match the consumer device to the sequencer.
+ * @targets: Array of targets for this power sequencer. Must be NULL-terminated.
+ */
+struct pwrseq_config {
+ struct device *parent;
+ struct module *owner;
+ void *drvdata;
+ pwrseq_match_func match;
+ const struct pwrseq_target_data **targets;
+};
+
+struct pwrseq_device *
+pwrseq_device_register(const struct pwrseq_config *config);
+void pwrseq_device_unregister(struct pwrseq_device *pwrseq);
+struct pwrseq_device *
+devm_pwrseq_device_register(struct device *dev,
+ const struct pwrseq_config *config);
+
+void *pwrseq_device_get_drvdata(struct pwrseq_device *pwrseq);
+
+#endif /* __POWER_SEQUENCING_PROVIDER_H__ */
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index 5b67cd03276e..aa29ac53b833 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -267,7 +267,7 @@ struct qed_ll2_ops {
int qed_ll2_alloc_if(struct qed_dev *);
void qed_ll2_dealloc_if(struct qed_dev *);
#else
-static const struct qed_ll2_ops qed_ll2_ops_pass = {
+static __maybe_unused const struct qed_ll2_ops qed_ll2_ops_pass = {
.start = NULL,
.stop = NULL,
.start_xmit = NULL,
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 07071e64abf3..89a0d83ddad0 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -526,7 +526,7 @@ struct quota_info {
const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
};
-int register_quota_format(struct quota_format_type *fmt);
+void register_quota_format(struct quota_format_type *fmt);
void unregister_quota_format(struct quota_format_type *fmt);
struct quota_module_name {
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 06cc8888199e..c334f82ed385 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -19,7 +19,7 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
return &sb->s_dquot;
}
-/* i_mutex must being held */
+/* i_rwsem must being held */
static inline bool is_quota_modification(struct mnt_idmap *idmap,
struct inode *inode, struct iattr *ia)
{
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 98030accf641..2467b3be15c9 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -11,8 +11,13 @@
#ifdef __KERNEL__
#include <linux/blkdev.h>
+#include <linux/mm.h>
-extern const char raid6_empty_zero_page[PAGE_SIZE];
+/* This should be const but the raid6 code is too convoluted for that. */
+static inline void *raid6_get_zero_page(void)
+{
+ return page_address(ZERO_PAGE(0));
+}
#else /* ! __KERNEL__ */
/* Used for testing in user space */
@@ -108,6 +113,10 @@ extern const struct raid6_calls raid6_vpermxor4;
extern const struct raid6_calls raid6_vpermxor8;
extern const struct raid6_calls raid6_lsx;
extern const struct raid6_calls raid6_lasx;
+extern const struct raid6_calls raid6_rvvx1;
+extern const struct raid6_calls raid6_rvvx2;
+extern const struct raid6_calls raid6_rvvx4;
+extern const struct raid6_calls raid6_rvvx8;
struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **);
@@ -125,6 +134,7 @@ extern const struct raid6_recov_calls raid6_recov_s390xc;
extern const struct raid6_recov_calls raid6_recov_neon;
extern const struct raid6_recov_calls raid6_recov_lsx;
extern const struct raid6_recov_calls raid6_recov_lasx;
+extern const struct raid6_recov_calls raid6_recov_rvv;
extern const struct raid6_calls raid6_neonx1;
extern const struct raid6_calls raid6_neonx2;
@@ -186,6 +196,11 @@ static inline uint32_t raid6_jiffies(void)
return tv.tv_sec*1000 + tv.tv_usec/1000;
}
+static inline void *raid6_get_zero_page(void)
+{
+ return raid6_empty_zero_page;
+}
+
#endif /* ! __KERNEL__ */
#endif /* LINUX_RAID_RAID6_H */
diff --git a/include/linux/random.h b/include/linux/random.h
index b0a940af4fff..8a8064dc3970 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -130,28 +130,6 @@ static inline int get_random_bytes_wait(void *buf, size_t nbytes)
return ret;
}
-#define declare_get_random_var_wait(name, ret_type) \
- static inline int get_random_ ## name ## _wait(ret_type *out) { \
- int ret = wait_for_random_bytes(); \
- if (unlikely(ret)) \
- return ret; \
- *out = get_random_ ## name(); \
- return 0; \
- }
-declare_get_random_var_wait(u8, u8)
-declare_get_random_var_wait(u16, u16)
-declare_get_random_var_wait(u32, u32)
-declare_get_random_var_wait(u64, u32)
-declare_get_random_var_wait(long, unsigned long)
-#undef declare_get_random_var
-
-/*
- * This is designed to be standalone for just prandom
- * users, but for now we include it from <linux/random.h>
- * for legacy reasons.
- */
-#include <linux/prandom.h>
-
#ifdef CONFIG_SMP
int random_prepare_cpu(unsigned int cpu);
int random_online_cpu(unsigned int cpu);
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
index 6d92b68efbf6..1d982dbdd0d0 100644
--- a/include/linux/randomize_kstack.h
+++ b/include/linux/randomize_kstack.h
@@ -32,13 +32,19 @@ DECLARE_PER_CPU(u32, kstack_offset);
#endif
/*
- * Use, at most, 10 bits of entropy. We explicitly cap this to keep the
- * "VLA" from being unbounded (see above). 10 bits leaves enough room for
- * per-arch offset masks to reduce entropy (by removing higher bits, since
- * high entropy may overly constrain usable stack space), and for
- * compiler/arch-specific stack alignment to remove the lower bits.
+ * Use, at most, 6 bits of entropy (on 64-bit; 8 on 32-bit). This cap is
+ * to keep the "VLA" from being unbounded (see above). Additionally clear
+ * the bottom 4 bits (on 64-bit systems, 2 for 32-bit), since stack
+ * alignment will always be at least word size. This makes the compiler
+ * code gen better when it is applying the actual per-arch alignment to
+ * the final offset. The resulting randomness is reasonable without overly
+ * constraining usable stack space.
*/
-#define KSTACK_OFFSET_MAX(x) ((x) & 0x3FF)
+#ifdef CONFIG_64BIT
+#define KSTACK_OFFSET_MAX(x) ((x) & 0b1111110000)
+#else
+#define KSTACK_OFFSET_MAX(x) ((x) & 0b1111111100)
+#endif
/**
* add_random_kstack_offset - Increase stack utilization by previously
diff --git a/include/linux/range.h b/include/linux/range.h
index 6ad0b73cb7ad..d7f98e1285d7 100644
--- a/include/linux/range.h
+++ b/include/linux/range.h
@@ -13,11 +13,20 @@ static inline u64 range_len(const struct range *range)
return range->end - range->start + 1;
}
-static inline bool range_contains(struct range *r1, struct range *r2)
+/* True if r1 completely contains r2 */
+static inline bool range_contains(const struct range *r1,
+ const struct range *r2)
{
return r1->start <= r2->start && r1->end >= r2->end;
}
+/* True if any part of r1 overlaps r2 */
+static inline bool range_overlaps(const struct range *r1,
+ const struct range *r2)
+{
+ return r1->start <= r2->end && r1->end >= r2->start;
+}
+
int add_range(struct range *range, int az, int nr_range,
u64 start, u64 end);
@@ -31,4 +40,10 @@ int clean_sort_range(struct range *range, int az);
void sort_range(struct range *range, int nr_range);
+#define DEFINE_RANGE(_start, _end) \
+(struct range) { \
+ .start = (_start), \
+ .end = (_end), \
+ }
+
#endif
diff --git a/include/linux/ras.h b/include/linux/ras.h
index a64182bc72ad..468941bfe855 100644
--- a/include/linux/ras.h
+++ b/include/linux/ras.h
@@ -24,8 +24,7 @@ int __init parse_cec_param(char *str);
void log_non_standard_event(const guid_t *sec_type,
const guid_t *fru_id, const char *fru_text,
const u8 sev, const u8 *err, const u32 len);
-void log_arm_hw_error(struct cper_sec_proc_arm *err);
-
+void log_arm_hw_error(struct cper_sec_proc_arm *err, const u8 sev);
#else
static inline void
log_non_standard_event(const guid_t *sec_type,
@@ -33,7 +32,7 @@ log_non_standard_event(const guid_t *sec_type,
const u8 sev, const u8 *err, const u32 len)
{ return; }
static inline void
-log_arm_hw_error(struct cper_sec_proc_arm *err) { return; }
+log_arm_hw_error(struct cper_sec_proc_arm *err, const u8 sev) { return; }
#endif
struct atl_err {
@@ -53,4 +52,15 @@ static inline unsigned long
amd_convert_umc_mca_addr_to_sys_addr(struct atl_err *err) { return -EINVAL; }
#endif /* CONFIG_AMD_ATL */
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+#include <asm/smp_plat.h>
+/*
+ * Include ARM-specific SMP header which provides a function mapping mpidr to
+ * CPU logical index.
+ */
+#define GET_LOGICAL_INDEX(mpidr) get_logical_index(mpidr & MPIDR_HWID_BITMASK)
+#else
+#define GET_LOGICAL_INDEX(mpidr) -EINVAL
+#endif /* CONFIG_ARM || CONFIG_ARM64 */
+
#endif /* __RAS_H__ */
diff --git a/include/linux/raspberrypi/vchiq.h b/include/linux/raspberrypi/vchiq.h
new file mode 100644
index 000000000000..ee4469f4fc51
--- /dev/null
+++ b/include/linux/raspberrypi/vchiq.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
+
+#ifndef VCHIQ_H
+#define VCHIQ_H
+
+#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
+ (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
+
+enum vchiq_reason {
+ VCHIQ_SERVICE_OPENED, /* service, -, - */
+ VCHIQ_SERVICE_CLOSED, /* service, -, - */
+ VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */
+ VCHIQ_BULK_TRANSMIT_DONE, /* service, -, bulk_userdata */
+ VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */
+ VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */
+ VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
+};
+
+enum vchiq_bulk_mode {
+ VCHIQ_BULK_MODE_CALLBACK,
+ VCHIQ_BULK_MODE_BLOCKING,
+ VCHIQ_BULK_MODE_NOCALLBACK,
+ VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */
+};
+
+enum vchiq_service_option {
+ VCHIQ_SERVICE_OPTION_AUTOCLOSE,
+ VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
+ VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
+ VCHIQ_SERVICE_OPTION_SYNCHRONOUS,
+ VCHIQ_SERVICE_OPTION_TRACE
+};
+
+struct vchiq_header {
+ /* The message identifier - opaque to applications. */
+ int msgid;
+
+ /* Size of message data. */
+ unsigned int size;
+
+ char data[]; /* message */
+};
+
+struct vchiq_element {
+ const void __user *data;
+ unsigned int size;
+};
+
+struct vchiq_instance;
+struct vchiq_state;
+
+struct vchiq_service_base {
+ int fourcc;
+ int (*callback)(struct vchiq_instance *instance,
+ enum vchiq_reason reason,
+ struct vchiq_header *header,
+ unsigned int handle,
+ void *cb_data, void __user *cb_userdata);
+ void *userdata;
+};
+
+struct vchiq_completion_data_kernel {
+ enum vchiq_reason reason;
+ struct vchiq_header *header;
+ void *service_userdata;
+ void *cb_data;
+ void __user *cb_userdata;
+};
+
+struct vchiq_service_params_kernel {
+ int fourcc;
+ int (*callback)(struct vchiq_instance *instance,
+ enum vchiq_reason reason,
+ struct vchiq_header *header,
+ unsigned int handle,
+ void *cb_data, void __user *cb_userdata);
+ void *userdata;
+ short version; /* Increment for non-trivial changes */
+ short version_min; /* Update for incompatible changes */
+};
+
+extern int vchiq_initialise(struct vchiq_state *state,
+ struct vchiq_instance **pinstance);
+extern int vchiq_shutdown(struct vchiq_instance *instance);
+extern int vchiq_connect(struct vchiq_instance *instance);
+extern int vchiq_open_service(struct vchiq_instance *instance,
+ const struct vchiq_service_params_kernel *params,
+ unsigned int *pservice);
+extern int vchiq_close_service(struct vchiq_instance *instance,
+ unsigned int service);
+extern int vchiq_use_service(struct vchiq_instance *instance, unsigned int service);
+extern int vchiq_release_service(struct vchiq_instance *instance,
+ unsigned int service);
+extern void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle,
+ struct vchiq_header *header);
+extern void vchiq_release_message(struct vchiq_instance *instance, unsigned int service,
+ struct vchiq_header *header);
+extern int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle,
+ void *data, unsigned int size);
+extern int vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int service,
+ const void *data, unsigned int size, void *userdata,
+ enum vchiq_bulk_mode mode);
+extern int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int service,
+ void *data, unsigned int size, void *userdata,
+ enum vchiq_bulk_mode mode);
+extern void *vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int service);
+extern int vchiq_get_peer_version(struct vchiq_instance *instance, unsigned int handle,
+ short *peer_version);
+extern struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle);
+
+#endif /* VCHIQ_H */
diff --git a/include/linux/raspberrypi/vchiq_arm.h b/include/linux/raspberrypi/vchiq_arm.h
new file mode 100644
index 000000000000..e32b02f99024
--- /dev/null
+++ b/include/linux/raspberrypi/vchiq_arm.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ */
+
+#ifndef VCHIQ_ARM_H
+#define VCHIQ_ARM_H
+
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+#include <linux/atomic.h>
+#include "vchiq_core.h"
+#include "vchiq_debugfs.h"
+
+/* Some per-instance constants */
+#define MAX_COMPLETIONS 128
+#define MAX_SERVICES 64
+#define MAX_ELEMENTS 8
+#define MSG_QUEUE_SIZE 128
+
+#define VCHIQ_DRV_MAX_CALLBACKS 10
+
+struct rpi_firmware;
+struct vchiq_device;
+
+enum USE_TYPE_E {
+ USE_TYPE_SERVICE,
+ USE_TYPE_VCHIQ
+};
+
+struct vchiq_platform_info {
+ unsigned int cache_line_size;
+};
+
+struct vchiq_drv_mgmt {
+ struct rpi_firmware *fw;
+ const struct vchiq_platform_info *info;
+
+ bool connected;
+ int num_deferred_callbacks;
+ /* Protects connected and num_deferred_callbacks */
+ struct mutex connected_mutex;
+
+ void (*deferred_callback[VCHIQ_DRV_MAX_CALLBACKS])(void);
+
+ struct semaphore free_fragments_sema;
+ struct semaphore free_fragments_mutex;
+ char *fragments_base;
+ char *free_fragments;
+ unsigned int fragments_size;
+
+ void __iomem *regs;
+
+ struct vchiq_state state;
+};
+
+struct user_service {
+ struct vchiq_service *service;
+ void __user *userdata;
+ struct vchiq_instance *instance;
+ char is_vchi;
+ char dequeue_pending;
+ char close_pending;
+ int message_available_pos;
+ int msg_insert;
+ int msg_remove;
+ struct completion insert_event;
+ struct completion remove_event;
+ struct completion close_event;
+ struct vchiq_header *msg_queue[MSG_QUEUE_SIZE];
+};
+
+struct bulk_waiter_node {
+ struct bulk_waiter bulk_waiter;
+ int pid;
+ struct list_head list;
+};
+
+struct vchiq_instance {
+ struct vchiq_state *state;
+ struct vchiq_completion_data_kernel completions[MAX_COMPLETIONS];
+ int completion_insert;
+ int completion_remove;
+ struct completion insert_event;
+ struct completion remove_event;
+ struct mutex completion_mutex;
+
+ int connected;
+ int closing;
+ int pid;
+ int mark;
+ int use_close_delivered;
+ int trace;
+
+ struct list_head bulk_waiter_list;
+ struct mutex bulk_waiter_list_mutex;
+
+ struct vchiq_debugfs_node debugfs_node;
+};
+
+int
+vchiq_use_service(struct vchiq_instance *instance, unsigned int handle);
+
+extern int
+vchiq_release_service(struct vchiq_instance *instance, unsigned int handle);
+
+extern int
+vchiq_check_service(struct vchiq_service *service);
+
+extern void
+vchiq_dump_service_use_state(struct vchiq_state *state);
+
+extern int
+vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
+ enum USE_TYPE_E use_type);
+extern int
+vchiq_release_internal(struct vchiq_state *state,
+ struct vchiq_service *service);
+
+extern struct vchiq_debugfs_node *
+vchiq_instance_get_debugfs_node(struct vchiq_instance *instance);
+
+extern int
+vchiq_instance_get_use_count(struct vchiq_instance *instance);
+
+extern int
+vchiq_instance_get_pid(struct vchiq_instance *instance);
+
+extern int
+vchiq_instance_get_trace(struct vchiq_instance *instance);
+
+extern void
+vchiq_instance_set_trace(struct vchiq_instance *instance, int trace);
+
+extern void
+vchiq_add_connected_callback(struct vchiq_device *device,
+ void (*callback)(void));
+
+#if IS_ENABLED(CONFIG_VCHIQ_CDEV)
+
+extern void
+vchiq_deregister_chrdev(void);
+
+extern int
+vchiq_register_chrdev(struct device *parent);
+
+#else
+
+static inline void vchiq_deregister_chrdev(void) { }
+static inline int vchiq_register_chrdev(struct device *parent) { return 0; }
+
+#endif /* IS_ENABLED(CONFIG_VCHIQ_CDEV) */
+
+extern int
+service_callback(struct vchiq_instance *vchiq_instance, enum vchiq_reason reason,
+ struct vchiq_header *header, unsigned int handle,
+ void *cb_data, void __user *cb_userdata);
+
+extern void
+free_bulk_waiter(struct vchiq_instance *instance);
+
+#endif /* VCHIQ_ARM_H */
diff --git a/include/linux/raspberrypi/vchiq_bus.h b/include/linux/raspberrypi/vchiq_bus.h
new file mode 100644
index 000000000000..9de179b39f85
--- /dev/null
+++ b/include/linux/raspberrypi/vchiq_bus.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Ideas On Board Oy
+ */
+
+#ifndef _VCHIQ_DEVICE_H
+#define _VCHIQ_DEVICE_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct vchiq_drv_mgmt;
+
+struct vchiq_device {
+ struct device dev;
+ struct vchiq_drv_mgmt *drv_mgmt;
+};
+
+struct vchiq_driver {
+ int (*probe)(struct vchiq_device *device);
+ void (*remove)(struct vchiq_device *device);
+ int (*resume)(struct vchiq_device *device);
+ int (*suspend)(struct vchiq_device *device,
+ pm_message_t state);
+
+ const struct vchiq_device_id *id_table;
+ struct device_driver driver;
+};
+
+static inline struct vchiq_device *to_vchiq_device(struct device *d)
+{
+ return container_of(d, struct vchiq_device, dev);
+}
+
+static inline struct vchiq_driver *to_vchiq_driver(struct device_driver *d)
+{
+ return container_of(d, struct vchiq_driver, driver);
+}
+
+extern const struct bus_type vchiq_bus_type;
+
+struct vchiq_device *
+vchiq_device_register(struct device *parent, const char *name);
+void vchiq_device_unregister(struct vchiq_device *dev);
+
+int vchiq_driver_register(struct vchiq_driver *vchiq_drv);
+void vchiq_driver_unregister(struct vchiq_driver *vchiq_drv);
+
+/**
+ * module_vchiq_driver() - Helper macro for registering a vchiq driver
+ * @__vchiq_driver: vchiq driver struct
+ *
+ * Helper macro for vchiq drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_vchiq_driver(__vchiq_driver) \
+ module_driver(__vchiq_driver, vchiq_driver_register, vchiq_driver_unregister)
+
+#endif /* _VCHIQ_DEVICE_H */
diff --git a/include/linux/raspberrypi/vchiq_cfg.h b/include/linux/raspberrypi/vchiq_cfg.h
new file mode 100644
index 000000000000..a16d0299996c
--- /dev/null
+++ b/include/linux/raspberrypi/vchiq_cfg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2014 Broadcom. All rights reserved. */
+
+#ifndef VCHIQ_CFG_H
+#define VCHIQ_CFG_H
+
+#define VCHIQ_MAGIC VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I')
+/* The version of VCHIQ - change with any non-trivial change */
+#define VCHIQ_VERSION 8
+/*
+ * The minimum compatible version - update to match VCHIQ_VERSION with any
+ * incompatible change
+ */
+#define VCHIQ_VERSION_MIN 3
+
+/* The version that introduced the VCHIQ_IOC_LIB_VERSION ioctl */
+#define VCHIQ_VERSION_LIB_VERSION 7
+
+/* The version that introduced the VCHIQ_IOC_CLOSE_DELIVERED ioctl */
+#define VCHIQ_VERSION_CLOSE_DELIVERED 7
+
+/* The version that made it safe to use SYNCHRONOUS mode */
+#define VCHIQ_VERSION_SYNCHRONOUS_MODE 8
+
+#define VCHIQ_MAX_STATES 1
+#define VCHIQ_MAX_SERVICES 4096
+#define VCHIQ_MAX_SLOTS 128
+#define VCHIQ_MAX_SLOTS_PER_SIDE 64
+
+#define VCHIQ_NUM_CURRENT_BULKS 32
+#define VCHIQ_NUM_SERVICE_BULKS 4
+
+#ifndef VCHIQ_ENABLE_DEBUG
+#define VCHIQ_ENABLE_DEBUG 1
+#endif
+
+#ifndef VCHIQ_ENABLE_STATS
+#define VCHIQ_ENABLE_STATS 1
+#endif
+
+#endif /* VCHIQ_CFG_H */
diff --git a/include/linux/raspberrypi/vchiq_core.h b/include/linux/raspberrypi/vchiq_core.h
new file mode 100644
index 000000000000..e7bf7a114985
--- /dev/null
+++ b/include/linux/raspberrypi/vchiq_core.h
@@ -0,0 +1,646 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
+
+#ifndef VCHIQ_CORE_H
+#define VCHIQ_CORE_H
+
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/dev_printk.h>
+#include <linux/kthread.h>
+#include <linux/kref.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock_types.h>
+#include <linux/wait.h>
+
+#include "vchiq.h"
+#include "vchiq_cfg.h"
+
+/* Do this so that we can test-build the code on non-rpi systems */
+#if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
+
+#else
+
+#ifndef dsb
+#define dsb(a)
+#endif
+
+#endif /* IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE) */
+
+#define VCHIQ_SERVICE_HANDLE_INVALID 0
+
+#define VCHIQ_SLOT_SIZE 4096
+#define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(struct vchiq_header))
+
+#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
+#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
+#define VCHIQ_SLOT_ZERO_SLOTS DIV_ROUND_UP(sizeof(struct vchiq_slot_zero), \
+ VCHIQ_SLOT_SIZE)
+
+#define BITSET_SIZE(b) ((b + 31) >> 5)
+#define BITSET_WORD(b) (b >> 5)
+#define BITSET_BIT(b) (1 << (b & 31))
+#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
+#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
+
+enum {
+ DEBUG_ENTRIES,
+#if VCHIQ_ENABLE_DEBUG
+ DEBUG_SLOT_HANDLER_COUNT,
+ DEBUG_SLOT_HANDLER_LINE,
+ DEBUG_PARSE_LINE,
+ DEBUG_PARSE_HEADER,
+ DEBUG_PARSE_MSGID,
+ DEBUG_AWAIT_COMPLETION_LINE,
+ DEBUG_DEQUEUE_MESSAGE_LINE,
+ DEBUG_SERVICE_CALLBACK_LINE,
+ DEBUG_MSG_QUEUE_FULL_COUNT,
+ DEBUG_COMPLETION_QUEUE_FULL_COUNT,
+#endif
+ DEBUG_MAX
+};
+
+#if VCHIQ_ENABLE_DEBUG
+
+#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug
+#define DEBUG_TRACE(d) \
+ do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(sy); } while (0)
+#define DEBUG_VALUE(d, v) \
+ do { debug_ptr[DEBUG_ ## d] = (v); dsb(sy); } while (0)
+#define DEBUG_COUNT(d) \
+ do { debug_ptr[DEBUG_ ## d]++; dsb(sy); } while (0)
+
+#else /* VCHIQ_ENABLE_DEBUG */
+
+#define DEBUG_INITIALISE(local)
+#define DEBUG_TRACE(d)
+#define DEBUG_VALUE(d, v)
+#define DEBUG_COUNT(d)
+
+#endif /* VCHIQ_ENABLE_DEBUG */
+
+enum vchiq_connstate {
+ VCHIQ_CONNSTATE_DISCONNECTED,
+ VCHIQ_CONNSTATE_CONNECTING,
+ VCHIQ_CONNSTATE_CONNECTED,
+ VCHIQ_CONNSTATE_PAUSING,
+ VCHIQ_CONNSTATE_PAUSE_SENT,
+ VCHIQ_CONNSTATE_PAUSED,
+ VCHIQ_CONNSTATE_RESUMING,
+ VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
+ VCHIQ_CONNSTATE_RESUME_TIMEOUT
+};
+
+enum {
+ VCHIQ_SRVSTATE_FREE,
+ VCHIQ_SRVSTATE_HIDDEN,
+ VCHIQ_SRVSTATE_LISTENING,
+ VCHIQ_SRVSTATE_OPENING,
+ VCHIQ_SRVSTATE_OPEN,
+ VCHIQ_SRVSTATE_OPENSYNC,
+ VCHIQ_SRVSTATE_CLOSESENT,
+ VCHIQ_SRVSTATE_CLOSERECVD,
+ VCHIQ_SRVSTATE_CLOSEWAIT,
+ VCHIQ_SRVSTATE_CLOSED
+};
+
+enum vchiq_bulk_dir {
+ VCHIQ_BULK_TRANSMIT,
+ VCHIQ_BULK_RECEIVE
+};
+
+struct vchiq_bulk {
+ short mode;
+ short dir;
+ void *cb_data;
+ void __user *cb_userdata;
+ struct bulk_waiter *waiter;
+ dma_addr_t dma_addr;
+ int size;
+ void *remote_data;
+ int remote_size;
+ int actual;
+ void *offset;
+ void __user *uoffset;
+};
+
+struct vchiq_bulk_queue {
+ int local_insert; /* Where to insert the next local bulk */
+ int remote_insert; /* Where to insert the next remote bulk (master) */
+ int process; /* Bulk to transfer next */
+ int remote_notify; /* Bulk to notify the remote client of next (mstr) */
+ int remove; /* Bulk to notify the local client of, and remove, next */
+ struct vchiq_bulk bulks[VCHIQ_NUM_SERVICE_BULKS];
+};
+
+/*
+ * Remote events provide a way of presenting several virtual doorbells to a
+ * peer (ARM host to VPU) using only one physical doorbell. They can be thought
+ * of as a way for the peer to signal a semaphore, in this case implemented as
+ * a workqueue.
+ *
+ * Remote events remain signalled until acknowledged by the receiver, and they
+ * are non-counting. They are designed in such a way as to minimise the number
+ * of interrupts and avoid unnecessary waiting.
+ *
+ * A remote_event is as small data structures that live in shared memory. It
+ * comprises two booleans - armed and fired:
+ *
+ * The sender sets fired when they signal the receiver.
+ * If fired is set, the receiver has been signalled and need not wait.
+ * The receiver sets the armed field before they begin to wait.
+ * If armed is set, the receiver is waiting and wishes to be woken by interrupt.
+ */
+struct remote_event {
+ int armed;
+ int fired;
+ u32 __unused;
+};
+
+struct opaque_platform_state;
+
+struct vchiq_slot {
+ char data[VCHIQ_SLOT_SIZE];
+};
+
+struct vchiq_slot_info {
+ /* Use two counters rather than one to avoid the need for a mutex. */
+ short use_count;
+ short release_count;
+};
+
+/*
+ * VCHIQ is a reliable connection-oriented datagram protocol.
+ *
+ * A VCHIQ service is equivalent to a TCP connection, except:
+ * + FOURCCs are used for the rendezvous, and port numbers are assigned at the
+ * time the connection is established.
+ * + There is less of a distinction between server and client sockets, the only
+ * difference being which end makes the first move.
+ * + For a multi-client server, the server creates new "listening" services as
+ * the existing one becomes connected - there is no need to specify the
+ * maximum number of clients up front.
+ * + Data transfer is reliable but packetized (messages have defined ends).
+ * + Messages can be either short (capable of fitting in a slot) and in-band,
+ * or copied between external buffers (bulk transfers).
+ */
+struct vchiq_service {
+ struct vchiq_service_base base;
+ unsigned int handle;
+ struct kref ref_count;
+ struct rcu_head rcu;
+ int srvstate;
+ void (*userdata_term)(void *userdata);
+ unsigned int localport;
+ unsigned int remoteport;
+ int public_fourcc;
+ int client_id;
+ char auto_close;
+ char sync;
+ char closing;
+ char trace;
+ atomic_t poll_flags;
+ short version;
+ short version_min;
+ short peer_version;
+
+ struct vchiq_state *state;
+ struct vchiq_instance *instance;
+
+ int service_use_count;
+
+ struct vchiq_bulk_queue bulk_tx;
+ struct vchiq_bulk_queue bulk_rx;
+
+ struct completion remove_event;
+ struct completion bulk_remove_event;
+ struct mutex bulk_mutex;
+
+ struct service_stats_struct {
+ int quota_stalls;
+ int slot_stalls;
+ int bulk_stalls;
+ int error_count;
+ int ctrl_tx_count;
+ int ctrl_rx_count;
+ int bulk_tx_count;
+ int bulk_rx_count;
+ int bulk_aborted_count;
+ u64 ctrl_tx_bytes;
+ u64 ctrl_rx_bytes;
+ u64 bulk_tx_bytes;
+ u64 bulk_rx_bytes;
+ } stats;
+
+ int msg_queue_read;
+ int msg_queue_write;
+ struct completion msg_queue_pop;
+ struct completion msg_queue_push;
+ struct vchiq_header *msg_queue[VCHIQ_MAX_SLOTS];
+};
+
+/*
+ * The quota information is outside struct vchiq_service so that it can
+ * be statically allocated, since for accounting reasons a service's slot
+ * usage is carried over between users of the same port number.
+ */
+struct vchiq_service_quota {
+ unsigned short slot_quota;
+ unsigned short slot_use_count;
+ unsigned short message_quota;
+ unsigned short message_use_count;
+ struct completion quota_event;
+ int previous_tx_index;
+};
+
+struct vchiq_shared_state {
+ /* A non-zero value here indicates that the content is valid. */
+ int initialised;
+
+ /* The first and last (inclusive) slots allocated to the owner. */
+ int slot_first;
+ int slot_last;
+
+ /* The slot allocated to synchronous messages from the owner. */
+ int slot_sync;
+
+ /*
+ * Signalling this event indicates that owner's slot handler thread
+ * should run.
+ */
+ struct remote_event trigger;
+
+ /*
+ * Indicates the byte position within the stream where the next message
+ * will be written. The least significant bits are an index into the
+ * slot. The next bits are the index of the slot in slot_queue.
+ */
+ int tx_pos;
+
+ /* This event should be signalled when a slot is recycled. */
+ struct remote_event recycle;
+
+ /* The slot_queue index where the next recycled slot will be written. */
+ int slot_queue_recycle;
+
+ /* This event should be signalled when a synchronous message is sent. */
+ struct remote_event sync_trigger;
+
+ /*
+ * This event should be signalled when a synchronous message has been
+ * released.
+ */
+ struct remote_event sync_release;
+
+ /* A circular buffer of slot indexes. */
+ int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
+
+ /* Debugging state */
+ int debug[DEBUG_MAX];
+};
+
+/*
+ * vchiq_slot_zero describes the memory shared between the ARM host and the
+ * VideoCore VPU. The "master" and "slave" states are owned by the respective
+ * sides but visible to the other; the slots are shared, and the remaining
+ * fields are read-only.
+ *
+ * In the configuration used by this implementation, the memory is allocated
+ * by the host, the VPU is the master (the side which controls the DMA for bulk
+ * transfers), and the host is the slave.
+ *
+ * The ownership of slots changes with use:
+ * + When empty they are owned by the sender.
+ * + When partially filled they are shared with the receiver.
+ * + When completely full they are owned by the receiver.
+ * + When the receiver has finished processing the contents, they are recycled
+ * back to the sender.
+ */
+struct vchiq_slot_zero {
+ int magic;
+ short version;
+ short version_min;
+ int slot_zero_size;
+ int slot_size;
+ int max_slots;
+ int max_slots_per_side;
+ int platform_data[2];
+ struct vchiq_shared_state master;
+ struct vchiq_shared_state slave;
+ struct vchiq_slot_info slots[VCHIQ_MAX_SLOTS];
+};
+
+/*
+ * This is the private runtime state used by each side. The same structure was
+ * originally used by both sides, but implementations have since diverged.
+ */
+struct vchiq_state {
+ struct device *dev;
+ int id;
+ int initialised;
+ enum vchiq_connstate conn_state;
+ short version_common;
+
+ struct vchiq_shared_state *local;
+ struct vchiq_shared_state *remote;
+ struct vchiq_slot *slot_data;
+
+ unsigned short default_slot_quota;
+ unsigned short default_message_quota;
+
+ /* Event indicating connect message received */
+ struct completion connect;
+
+ /* Mutex protecting services */
+ struct mutex mutex;
+ struct vchiq_instance **instance;
+
+ /* Processes all incoming messages which aren't synchronous */
+ struct task_struct *slot_handler_thread;
+
+ /*
+ * Slots which have been fully processed and released by the (peer)
+ * receiver are added to the receiver queue, which is asynchronously
+ * processed by the recycle thread.
+ */
+ struct task_struct *recycle_thread;
+
+ /*
+ * Processes incoming synchronous messages
+ *
+ * The synchronous message channel is shared between all synchronous
+ * services, and provides a way for urgent messages to bypass
+ * potentially long queues of asynchronous messages in the normal slots.
+ *
+ * There can be only one outstanding synchronous message in
+ * each direction, and as a precious shared resource synchronous
+ * services should be used sparingly.
+ */
+ struct task_struct *sync_thread;
+
+ /* Local implementation of the trigger remote event */
+ wait_queue_head_t trigger_event;
+
+ /* Local implementation of the recycle remote event */
+ wait_queue_head_t recycle_event;
+
+ /* Local implementation of the sync trigger remote event */
+ wait_queue_head_t sync_trigger_event;
+
+ /* Local implementation of the sync release remote event */
+ wait_queue_head_t sync_release_event;
+
+ char *tx_data;
+ char *rx_data;
+ struct vchiq_slot_info *rx_info;
+
+ struct mutex slot_mutex;
+
+ struct mutex recycle_mutex;
+
+ struct mutex sync_mutex;
+
+ spinlock_t msg_queue_spinlock;
+
+ spinlock_t bulk_waiter_spinlock;
+
+ spinlock_t quota_spinlock;
+
+ /*
+ * Indicates the byte position within the stream from where the next
+ * message will be read. The least significant bits are an index into
+ * the slot.The next bits are the index of the slot in
+ * remote->slot_queue.
+ */
+ int rx_pos;
+
+ /*
+ * A cached copy of local->tx_pos. Only write to local->tx_pos, and read
+ * from remote->tx_pos.
+ */
+ int local_tx_pos;
+
+ /* The slot_queue index of the slot to become available next. */
+ int slot_queue_available;
+
+ /* A flag to indicate if any poll has been requested */
+ int poll_needed;
+
+ /* Ths index of the previous slot used for data messages. */
+ int previous_data_index;
+
+ /* The number of slots occupied by data messages. */
+ unsigned short data_use_count;
+
+ /* The maximum number of slots to be occupied by data messages. */
+ unsigned short data_quota;
+
+ /* An array of bit sets indicating which services must be polled. */
+ atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
+
+ /* The number of the first unused service */
+ int unused_service;
+
+ /* Signalled when a free slot becomes available. */
+ struct completion slot_available_event;
+
+ /* Signalled when a free data slot becomes available. */
+ struct completion data_quota_event;
+
+ struct state_stats_struct {
+ int slot_stalls;
+ int data_stalls;
+ int ctrl_tx_count;
+ int ctrl_rx_count;
+ int error_count;
+ } stats;
+
+ struct vchiq_service __rcu *services[VCHIQ_MAX_SERVICES];
+ struct vchiq_service_quota service_quotas[VCHIQ_MAX_SERVICES];
+ struct vchiq_slot_info slot_info[VCHIQ_MAX_SLOTS];
+
+ struct opaque_platform_state *platform_state;
+};
+
+struct pagelist {
+ u32 length;
+ u16 type;
+ u16 offset;
+ u32 addrs[1]; /* N.B. 12 LSBs hold the number
+ * of following pages at consecutive
+ * addresses.
+ */
+};
+
+struct vchiq_pagelist_info {
+ struct pagelist *pagelist;
+ size_t pagelist_buffer_size;
+ dma_addr_t dma_addr;
+ enum dma_data_direction dma_dir;
+ unsigned int num_pages;
+ unsigned int pages_need_release;
+ struct page **pages;
+ struct scatterlist *scatterlist;
+ unsigned int scatterlist_mapped;
+};
+
+static inline bool vchiq_remote_initialised(const struct vchiq_state *state)
+{
+ return state->remote && state->remote->initialised;
+}
+
+struct bulk_waiter {
+ struct vchiq_bulk *bulk;
+ struct completion event;
+ int actual;
+};
+
+struct vchiq_config {
+ unsigned int max_msg_size;
+ unsigned int bulk_threshold; /* The message size above which it
+ * is better to use a bulk transfer
+ * (<= max_msg_size)
+ */
+ unsigned int max_outstanding_bulks;
+ unsigned int max_services;
+ short version; /* The version of VCHIQ */
+ short version_min; /* The minimum compatible version of VCHIQ */
+};
+
+extern spinlock_t bulk_waiter_spinlock;
+
+extern const char *
+get_conn_state_name(enum vchiq_connstate conn_state);
+
+extern struct vchiq_slot_zero *
+vchiq_init_slots(struct device *dev, void *mem_base, int mem_size);
+
+extern int
+vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev);
+
+extern int
+vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance);
+
+struct vchiq_service *
+vchiq_add_service_internal(struct vchiq_state *state,
+ const struct vchiq_service_params_kernel *params,
+ int srvstate, struct vchiq_instance *instance,
+ void (*userdata_term)(void *userdata));
+
+extern int
+vchiq_open_service_internal(struct vchiq_service *service, int client_id);
+
+extern int
+vchiq_close_service_internal(struct vchiq_service *service, int close_recvd);
+
+extern void
+vchiq_terminate_service_internal(struct vchiq_service *service);
+
+extern void
+vchiq_free_service_internal(struct vchiq_service *service);
+
+extern void
+vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance);
+
+extern void
+remote_event_pollall(struct vchiq_state *state);
+
+extern int
+vchiq_bulk_xfer_waiting(struct vchiq_instance *instance, unsigned int handle,
+ struct bulk_waiter *userdata);
+
+extern int
+vchiq_bulk_xfer_blocking(struct vchiq_instance *instance, unsigned int handle,
+ struct vchiq_bulk *bulk);
+
+extern int
+vchiq_bulk_xfer_callback(struct vchiq_instance *instance, unsigned int handle,
+ struct vchiq_bulk *bulk);
+
+extern void
+vchiq_dump_state(struct seq_file *f, struct vchiq_state *state);
+
+extern void
+request_poll(struct vchiq_state *state, struct vchiq_service *service,
+ int poll_type);
+
+struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle);
+
+extern struct vchiq_service *
+find_service_by_handle(struct vchiq_instance *instance, unsigned int handle);
+
+extern struct vchiq_service *
+find_service_by_port(struct vchiq_state *state, unsigned int localport);
+
+extern struct vchiq_service *
+find_service_for_instance(struct vchiq_instance *instance, unsigned int handle);
+
+extern struct vchiq_service *
+find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle);
+
+extern struct vchiq_service *
+__next_service_by_instance(struct vchiq_state *state,
+ struct vchiq_instance *instance,
+ int *pidx);
+
+extern struct vchiq_service *
+next_service_by_instance(struct vchiq_state *state,
+ struct vchiq_instance *instance,
+ int *pidx);
+
+extern void
+vchiq_service_get(struct vchiq_service *service);
+
+extern void
+vchiq_service_put(struct vchiq_service *service);
+
+extern int
+vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ size_t size);
+
+void vchiq_dump_platform_state(struct seq_file *f);
+
+void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f);
+
+void vchiq_dump_platform_service_state(struct seq_file *f, struct vchiq_service *service);
+
+int vchiq_use_service_internal(struct vchiq_service *service);
+
+int vchiq_release_service_internal(struct vchiq_service *service);
+
+void vchiq_on_remote_use(struct vchiq_state *state);
+
+void vchiq_on_remote_release(struct vchiq_state *state);
+
+int vchiq_platform_init_state(struct vchiq_state *state);
+
+int vchiq_check_service(struct vchiq_service *service);
+
+int vchiq_send_remote_use(struct vchiq_state *state);
+
+int vchiq_send_remote_use_active(struct vchiq_state *state);
+
+void vchiq_platform_conn_state_changed(struct vchiq_state *state,
+ enum vchiq_connstate oldstate,
+ enum vchiq_connstate newstate);
+
+void vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate);
+
+void vchiq_log_dump_mem(struct device *dev, const char *label, u32 addr,
+ const void *void_mem, size_t num_bytes);
+
+int vchiq_remove_service(struct vchiq_instance *instance, unsigned int service);
+
+int vchiq_get_client_id(struct vchiq_instance *instance, unsigned int service);
+
+void vchiq_get_config(struct vchiq_config *config);
+
+int vchiq_set_service_option(struct vchiq_instance *instance, unsigned int service,
+ enum vchiq_service_option option, int value);
+
+#endif
diff --git a/include/linux/raspberrypi/vchiq_debugfs.h b/include/linux/raspberrypi/vchiq_debugfs.h
new file mode 100644
index 000000000000..b29e6693c949
--- /dev/null
+++ b/include/linux/raspberrypi/vchiq_debugfs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. */
+
+#ifndef VCHIQ_DEBUGFS_H
+#define VCHIQ_DEBUGFS_H
+
+struct vchiq_state;
+struct vchiq_instance;
+
+struct vchiq_debugfs_node {
+ struct dentry *dentry;
+};
+
+void vchiq_debugfs_init(struct vchiq_state *state);
+
+void vchiq_debugfs_deinit(void);
+
+void vchiq_debugfs_add_instance(struct vchiq_instance *instance);
+
+void vchiq_debugfs_remove_instance(struct vchiq_instance *instance);
+
+#endif /* VCHIQ_DEBUGFS_H */
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index b17e0cd0a30c..7aaad158ee37 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -22,16 +22,43 @@ static inline void ratelimit_default_init(struct ratelimit_state *rs)
DEFAULT_RATELIMIT_BURST);
}
+static inline void ratelimit_state_inc_miss(struct ratelimit_state *rs)
+{
+ atomic_inc(&rs->missed);
+}
+
+static inline int ratelimit_state_get_miss(struct ratelimit_state *rs)
+{
+ return atomic_read(&rs->missed);
+}
+
+static inline int ratelimit_state_reset_miss(struct ratelimit_state *rs)
+{
+ return atomic_xchg_relaxed(&rs->missed, 0);
+}
+
+static inline void ratelimit_state_reset_interval(struct ratelimit_state *rs, int interval_init)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&rs->lock, flags);
+ rs->interval = interval_init;
+ rs->flags &= ~RATELIMIT_INITIALIZED;
+ atomic_set(&rs->rs_n_left, rs->burst);
+ ratelimit_state_reset_miss(rs);
+ raw_spin_unlock_irqrestore(&rs->lock, flags);
+}
+
static inline void ratelimit_state_exit(struct ratelimit_state *rs)
{
+ int m;
+
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE))
return;
- if (rs->missed) {
- pr_warn("%s: %d output lines suppressed due to ratelimiting\n",
- current->comm, rs->missed);
- rs->missed = 0;
- }
+ m = ratelimit_state_reset_miss(rs);
+ if (m)
+ pr_warn("%s: %d output lines suppressed due to ratelimiting\n", current->comm, m);
}
static inline void
diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
index 002266693e50..b19c4354540a 100644
--- a/include/linux/ratelimit_types.h
+++ b/include/linux/ratelimit_types.h
@@ -11,16 +11,17 @@
/* issue num suppressed message on exit */
#define RATELIMIT_MSG_ON_RELEASE BIT(0)
+#define RATELIMIT_INITIALIZED BIT(1)
struct ratelimit_state {
raw_spinlock_t lock; /* protect the state */
int interval;
int burst;
- int printed;
- int missed;
+ atomic_t rs_n_left;
+ atomic_t missed;
+ unsigned int flags;
unsigned long begin;
- unsigned long flags;
};
#define RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, flags_init) { \
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index f7edca369eda..4091e978aef2 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -43,8 +43,36 @@ extern void rb_erase(struct rb_node *, struct rb_root *);
/* Find logical next and previous nodes in a tree */
extern struct rb_node *rb_next(const struct rb_node *);
extern struct rb_node *rb_prev(const struct rb_node *);
-extern struct rb_node *rb_first(const struct rb_root *);
-extern struct rb_node *rb_last(const struct rb_root *);
+
+/*
+ * This function returns the first node (in sort order) of the tree.
+ */
+static inline struct rb_node *rb_first(const struct rb_root *root)
+{
+ struct rb_node *n;
+
+ n = root->rb_node;
+ if (!n)
+ return NULL;
+ while (n->rb_left)
+ n = n->rb_left;
+ return n;
+}
+
+/*
+ * This function returns the last node (in sort order) of the tree.
+ */
+static inline struct rb_node *rb_last(const struct rb_root *root)
+{
+ struct rb_node *n;
+
+ n = root->rb_node;
+ if (!n)
+ return NULL;
+ while (n->rb_right)
+ n = n->rb_right;
+ return n;
+}
/* Postorder iteration - always visit the parent after its children */
extern struct rb_node *rb_first_postorder(const struct rb_root *);
@@ -211,6 +239,43 @@ rb_add(struct rb_node *node, struct rb_root *tree,
}
/**
+ * rb_find_add_cached() - find equivalent @node in @tree, or add @node
+ * @node: node to look-for / insert
+ * @tree: tree to search / modify
+ * @cmp: operator defining the node order
+ *
+ * Returns the rb_node matching @node, or NULL when no match is found and @node
+ * is inserted.
+ */
+static __always_inline struct rb_node *
+rb_find_add_cached(struct rb_node *node, struct rb_root_cached *tree,
+ int (*cmp)(const struct rb_node *new, const struct rb_node *exist))
+{
+ bool leftmost = true;
+ struct rb_node **link = &tree->rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ int c;
+
+ while (*link) {
+ parent = *link;
+ c = cmp(node, parent);
+
+ if (c < 0) {
+ link = &parent->rb_left;
+ } else if (c > 0) {
+ link = &parent->rb_right;
+ leftmost = false;
+ } else {
+ return parent;
+ }
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color_cached(node, tree, leftmost);
+ return NULL;
+}
+
+/**
* rb_find_add() - find equivalent @node in @tree, or add @node
* @node: node to look-for / insert
* @tree: tree to search / modify
@@ -245,6 +310,42 @@ rb_find_add(struct rb_node *node, struct rb_root *tree,
}
/**
+ * rb_find_add_rcu() - find equivalent @node in @tree, or add @node
+ * @node: node to look-for / insert
+ * @tree: tree to search / modify
+ * @cmp: operator defining the node order
+ *
+ * Adds a Store-Release for link_node.
+ *
+ * Returns the rb_node matching @node, or NULL when no match is found and @node
+ * is inserted.
+ */
+static __always_inline struct rb_node *
+rb_find_add_rcu(struct rb_node *node, struct rb_root *tree,
+ int (*cmp)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_node;
+ struct rb_node *parent = NULL;
+ int c;
+
+ while (*link) {
+ parent = *link;
+ c = cmp(node, parent);
+
+ if (c < 0)
+ link = &parent->rb_left;
+ else if (c > 0)
+ link = &parent->rb_right;
+ else
+ return parent;
+ }
+
+ rb_link_node_rcu(node, parent, link);
+ rb_insert_color(node, tree);
+ return NULL;
+}
+
+/**
* rb_find() - find @key in tree @tree
* @key: key to match
* @tree: tree to search
@@ -273,6 +374,37 @@ rb_find(const void *key, const struct rb_root *tree,
}
/**
+ * rb_find_rcu() - find @key in tree @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining the node order
+ *
+ * Notably, tree descent vs concurrent tree rotations is unsound and can result
+ * in false-negatives.
+ *
+ * Returns the rb_node matching @key or NULL.
+ */
+static __always_inline struct rb_node *
+rb_find_rcu(const void *key, const struct rb_root *tree,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ struct rb_node *node = tree->rb_node;
+
+ while (node) {
+ int c = cmp(key, node);
+
+ if (c < 0)
+ node = rcu_dereference_raw(node->rb_left);
+ else if (c > 0)
+ node = rcu_dereference_raw(node->rb_right);
+ else
+ return node;
+ }
+
+ return NULL;
+}
+
+/**
* rb_find_first() - find the first @key in @tree
* @key: key to match
* @tree: tree to search
diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h
index 6a0999c26c7c..2f630eb8307e 100644
--- a/include/linux/rbtree_latch.h
+++ b/include/linux/rbtree_latch.h
@@ -14,7 +14,7 @@
*
* If we need to allow unconditional lookups (say as required for NMI context
* usage) we need a more complex setup; this data structure provides this by
- * employing the latch technique -- see @raw_write_seqcount_latch -- to
+ * employing the latch technique -- see @write_seqcount_latch_begin -- to
* implement a latched RB-tree which does allow for unconditional lookups by
* virtue of always having (at least) one stable copy of the tree.
*
@@ -132,7 +132,7 @@ __lt_find(void *key, struct latch_tree_root *ltr, int idx,
* @ops: operators defining the node order
*
* It inserts @node into @root in an ordered fashion such that we can always
- * observe one complete tree. See the comment for raw_write_seqcount_latch().
+ * observe one complete tree. See the comment for write_seqcount_latch_begin().
*
* The inserts use rcu_assign_pointer() to publish the element such that the
* tree structure is stored before we can observe the new @node.
@@ -145,10 +145,11 @@ latch_tree_insert(struct latch_tree_node *node,
struct latch_tree_root *root,
const struct latch_tree_ops *ops)
{
- raw_write_seqcount_latch(&root->seq);
+ write_seqcount_latch_begin(&root->seq);
__lt_insert(node, root, 0, ops->less);
- raw_write_seqcount_latch(&root->seq);
+ write_seqcount_latch(&root->seq);
__lt_insert(node, root, 1, ops->less);
+ write_seqcount_latch_end(&root->seq);
}
/**
@@ -159,7 +160,7 @@ latch_tree_insert(struct latch_tree_node *node,
*
* Removes @node from the trees @root in an ordered fashion such that we can
* always observe one complete tree. See the comment for
- * raw_write_seqcount_latch().
+ * write_seqcount_latch_begin().
*
* It is assumed that @node will observe one RCU quiescent state before being
* reused of freed.
@@ -172,10 +173,11 @@ latch_tree_erase(struct latch_tree_node *node,
struct latch_tree_root *root,
const struct latch_tree_ops *ops)
{
- raw_write_seqcount_latch(&root->seq);
+ write_seqcount_latch_begin(&root->seq);
__lt_erase(node, root, 0);
- raw_write_seqcount_latch(&root->seq);
+ write_seqcount_latch(&root->seq);
__lt_erase(node, root, 1);
+ write_seqcount_latch_end(&root->seq);
}
/**
@@ -204,9 +206,9 @@ latch_tree_find(void *key, struct latch_tree_root *root,
unsigned int seq;
do {
- seq = raw_read_seqcount_latch(&root->seq);
+ seq = read_seqcount_latch(&root->seq);
node = __lt_find(key, root, seq & 1, ops->comp);
- } while (raw_read_seqcount_latch_retry(&root->seq, seq));
+ } while (read_seqcount_latch_retry(&root->seq, seq));
return node;
}
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index 659d13a7ddaa..2fdc2208f1ca 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -80,36 +80,35 @@ struct rcu_cblist {
* | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
* | |
* | Callbacks processed by rcu_core() from softirqs or local |
- * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, |
- * | allowing nocb_timer to be armed. |
+ * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads. |
* ----------------------------------------------------------------------------
* |
* v
- * -----------------------------------
- * | |
- * v v
- * --------------------------------------- ----------------------------------|
- * | SEGCBLIST_RCU_CORE | | | SEGCBLIST_RCU_CORE | |
- * | SEGCBLIST_LOCKING | | | SEGCBLIST_LOCKING | |
- * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | |
- * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP |
- * | | | |
- * | | | |
- * | CB kthread woke up and | | GP kthread woke up and |
- * | acknowledged SEGCBLIST_OFFLOADED. | | acknowledged SEGCBLIST_OFFLOADED|
- * | Processes callbacks concurrently | | |
- * | with rcu_core(), holding | | |
- * | nocb_lock. | | |
- * --------------------------------------- -----------------------------------
- * | |
- * -----------------------------------
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
+ * | + unparked CB kthread |
+ * | |
+ * | CB kthread got unparked and processes callbacks concurrently with |
+ * | rcu_core(), holding nocb_lock. |
+ * ---------------------------------------------------------------------------
+ * |
+ * v
+ * ---------------------------------------------------------------------------|
+ * | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_GP |
+ * | + unparked CB kthread |
+ * | |
+ * | GP kthread woke up and acknowledged nocb_lock. |
+ * ---------------------------------------- -----------------------------------
* |
* v
* |--------------------------------------------------------------------------|
- * | SEGCBLIST_LOCKING | |
- * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
* | SEGCBLIST_KTHREAD_GP | |
- * | SEGCBLIST_KTHREAD_CB |
+ * | + unparked CB kthread |
* | |
* | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops |
* | handling callbacks. Enable bypass queueing. |
@@ -125,8 +124,8 @@ struct rcu_cblist {
* |--------------------------------------------------------------------------|
* | SEGCBLIST_LOCKING | |
* | SEGCBLIST_OFFLOADED | |
- * | SEGCBLIST_KTHREAD_CB | |
* | SEGCBLIST_KTHREAD_GP |
+ * | + unparked CB kthread |
* | |
* | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
* | ignores callbacks. Bypass enqueue is enabled. |
@@ -137,11 +136,11 @@ struct rcu_cblist {
* | SEGCBLIST_RCU_CORE | |
* | SEGCBLIST_LOCKING | |
* | SEGCBLIST_OFFLOADED | |
- * | SEGCBLIST_KTHREAD_CB | |
* | SEGCBLIST_KTHREAD_GP |
+ * | + unparked CB kthread |
* | |
* | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
- * | handles callbacks concurrently. Bypass enqueue is enabled. |
+ * | handles callbacks concurrently. Bypass enqueue is disabled. |
* | Invoke RCU core so we make sure not to preempt it in the middle with |
* | leaving some urgent work unattended within a jiffy. |
* ----------------------------------------------------------------------------
@@ -150,42 +149,31 @@ struct rcu_cblist {
* |--------------------------------------------------------------------------|
* | SEGCBLIST_RCU_CORE | |
* | SEGCBLIST_LOCKING | |
- * | SEGCBLIST_KTHREAD_CB | |
* | SEGCBLIST_KTHREAD_GP |
+ * | + unparked CB kthread |
* | |
* | CB/GP kthreads and local rcu_core() handle callbacks concurrently |
- * | holding nocb_lock. Wake up CB and GP kthreads if necessary. Disable |
- * | bypass enqueue. |
+ * | holding nocb_lock. Wake up GP kthread if necessary. |
* ----------------------------------------------------------------------------
* |
* v
- * -----------------------------------
- * | |
- * v v
- * ---------------------------------------------------------------------------|
- * | | |
- * | SEGCBLIST_RCU_CORE | | SEGCBLIST_RCU_CORE | |
- * | SEGCBLIST_LOCKING | | SEGCBLIST_LOCKING | |
- * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP |
- * | | |
- * | GP kthread woke up and | CB kthread woke up and |
- * | acknowledged the fact that | acknowledged the fact that |
- * | SEGCBLIST_OFFLOADED got cleared. | SEGCBLIST_OFFLOADED got cleared. |
- * | | The CB kthread goes to sleep |
- * | The callbacks from the target CPU | until it ever gets re-offloaded. |
- * | will be ignored from the GP kthread | |
- * | loop. | |
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | |
+ * | + unparked CB kthread |
+ * | |
+ * | GP kthread woke up and acknowledged the fact that SEGCBLIST_OFFLOADED |
+ * | got cleared. The callbacks from the target CPU will be ignored from the|
+ * | GP kthread loop. |
* ----------------------------------------------------------------------------
- * | |
- * -----------------------------------
* |
* v
* ----------------------------------------------------------------------------
* | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING |
+ * | + parked CB kthread |
* | |
- * | Callbacks processed by rcu_core() from softirqs or local |
- * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. |
- * | Flush pending nocb_timer. Flush nocb bypass callbacks. |
+ * | CB kthread is parked. Callbacks processed by rcu_core() from softirqs or |
+ * | local rcuc kthread, while holding nocb_lock. |
* ----------------------------------------------------------------------------
* |
* v
@@ -197,11 +185,7 @@ struct rcu_cblist {
* ----------------------------------------------------------------------------
*/
#define SEGCBLIST_ENABLED BIT(0)
-#define SEGCBLIST_RCU_CORE BIT(1)
-#define SEGCBLIST_LOCKING BIT(2)
-#define SEGCBLIST_KTHREAD_CB BIT(3)
-#define SEGCBLIST_KTHREAD_GP BIT(4)
-#define SEGCBLIST_OFFLOADED BIT(5)
+#define SEGCBLIST_OFFLOADED BIT(1)
struct rcu_segcblist {
struct rcu_head *head;
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 3dc1e58865f7..2abba7552605 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -30,6 +30,27 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
* way, we must not access it directly
*/
#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
+/*
+ * Return the ->prev pointer of a list_head in an rcu safe way. Don't
+ * access it directly.
+ *
+ * Any list traversed with list_bidir_prev_rcu() must never use
+ * list_del_rcu(). Doing so will poison the ->prev pointer that
+ * list_bidir_prev_rcu() relies on, which will result in segfaults.
+ * To prevent these segfaults, use list_bidir_del_rcu() instead
+ * of list_del_rcu().
+ */
+#define list_bidir_prev_rcu(list) (*((struct list_head __rcu **)(&(list)->prev)))
+
+/**
+ * list_for_each_rcu - Iterate over a list in an RCU-safe fashion
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each_rcu(pos, head) \
+ for (pos = rcu_dereference((head)->next); \
+ !list_is_head(pos, (head)); \
+ pos = rcu_dereference(pos->next))
/**
* list_tail_rcu - returns the prev pointer of the head of the list
@@ -159,6 +180,39 @@ static inline void list_del_rcu(struct list_head *entry)
}
/**
+ * list_bidir_del_rcu - deletes entry from list without re-initialization
+ * @entry: the element to delete from the list.
+ *
+ * In contrast to list_del_rcu() doesn't poison the prev pointer thus
+ * allowing backwards traversal via list_bidir_prev_rcu().
+ *
+ * Note: list_empty() on entry does not return true after this because
+ * the entry is in a special undefined state that permits RCU-based
+ * lockfree reverse traversal. In particular this means that we can not
+ * poison the forward and backwards pointers that may still be used for
+ * walking the list.
+ *
+ * The caller must take whatever precautions are necessary (such as
+ * holding appropriate locks) to avoid racing with another list-mutation
+ * primitive, such as list_bidir_del_rcu() or list_add_rcu(), running on
+ * this same list. However, it is perfectly legal to run concurrently
+ * with the _rcu list-traversal primitives, such as
+ * list_for_each_entry_rcu().
+ *
+ * Note that list_del_rcu() and list_bidir_del_rcu() must not be used on
+ * the same list.
+ *
+ * Note that the caller is not permitted to immediately free
+ * the newly deleted entry. Instead, either synchronize_rcu()
+ * or call_rcu() must be used to defer freeing until an RCU
+ * grace period has elapsed.
+ */
+static inline void list_bidir_del_rcu(struct list_head *entry)
+{
+ __list_del_entry(entry);
+}
+
+/**
* hlist_del_init_rcu - deletes entry from hash list with re-initialization
* @n: the element to delete from the hash list.
*
@@ -191,7 +245,10 @@ static inline void hlist_del_init_rcu(struct hlist_node *n)
* @old : the element to be replaced
* @new : the new element to insert
*
- * The @old entry will be replaced with the @new entry atomically.
+ * The @old entry will be replaced with the @new entry atomically from
+ * the perspective of concurrent readers. It is the caller's responsibility
+ * to synchronize with concurrent updaters, if any.
+ *
* Note: @old should not be empty.
*/
static inline void list_replace_rcu(struct list_head *old,
@@ -519,7 +576,9 @@ static inline void hlist_del_rcu(struct hlist_node *n)
* @old : the element to be replaced
* @new : the new element to insert
*
- * The @old entry will be replaced with the @new entry atomically.
+ * The @old entry will be replaced with the @new entry atomically from
+ * the perspective of concurrent readers. It is the caller's responsibility
+ * to synchronize with concurrent updaters, if any.
*/
static inline void hlist_replace_rcu(struct hlist_node *old,
struct hlist_node *new)
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 89186c499dd4..a97c3bcb1656 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -53,6 +53,13 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
(*((struct hlist_nulls_node __rcu __force **)&(node)->next))
/**
+ * hlist_nulls_pprev_rcu - returns the dereferenced pprev of @node.
+ * @node: element of the list.
+ */
+#define hlist_nulls_pprev_rcu(node) \
+ (*((struct hlist_nulls_node __rcu __force **)(node)->pprev))
+
+/**
* hlist_nulls_del_rcu - deletes entry from hash list without re-initialization
* @n: the element to delete from the hash list.
*
@@ -138,7 +145,7 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
if (last) {
WRITE_ONCE(n->next, last->next);
- n->pprev = &last->next;
+ WRITE_ONCE(n->pprev, &last->next);
rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
} else {
hlist_nulls_add_head_rcu(n, h);
@@ -148,8 +155,60 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
/* after that hlist_nulls_del will work */
static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n)
{
- n->pprev = &n->next;
- n->next = (struct hlist_nulls_node *)NULLS_MARKER(NULL);
+ WRITE_ONCE(n->pprev, &n->next);
+ WRITE_ONCE(n->next, (struct hlist_nulls_node *)NULLS_MARKER(NULL));
+}
+
+/**
+ * hlist_nulls_replace_rcu - replace an old entry by a new one
+ * @old: the element to be replaced
+ * @new: the new element to insert
+ *
+ * Description:
+ * Replace the old entry with the new one in a RCU-protected hlist_nulls, while
+ * permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary (such as holding
+ * appropriate locks) to avoid racing with another list-mutation primitive, such
+ * as hlist_nulls_add_head_rcu() or hlist_nulls_del_rcu(), running on this same
+ * list. However, it is perfectly legal to run concurrently with the _rcu
+ * list-traversal primitives, such as hlist_nulls_for_each_entry_rcu().
+ */
+static inline void hlist_nulls_replace_rcu(struct hlist_nulls_node *old,
+ struct hlist_nulls_node *new)
+{
+ struct hlist_nulls_node *next = old->next;
+
+ WRITE_ONCE(new->next, next);
+ WRITE_ONCE(new->pprev, old->pprev);
+ rcu_assign_pointer(hlist_nulls_pprev_rcu(new), new);
+ if (!is_a_nulls(next))
+ WRITE_ONCE(next->pprev, &new->next);
+}
+
+/**
+ * hlist_nulls_replace_init_rcu - replace an old entry by a new one and
+ * initialize the old
+ * @old: the element to be replaced
+ * @new: the new element to insert
+ *
+ * Description:
+ * Replace the old entry with the new one in a RCU-protected hlist_nulls, while
+ * permitting racing traversals, and reinitialize the old entry.
+ *
+ * Note: @old must be hashed.
+ *
+ * The caller must take whatever precautions are necessary (such as holding
+ * appropriate locks) to avoid racing with another list-mutation primitive, such
+ * as hlist_nulls_add_head_rcu() or hlist_nulls_del_rcu(), running on this same
+ * list. However, it is perfectly legal to run concurrently with the _rcu
+ * list-traversal primitives, such as hlist_nulls_for_each_entry_rcu().
+ */
+static inline void hlist_nulls_replace_init_rcu(struct hlist_nulls_node *old,
+ struct hlist_nulls_node *new)
+{
+ hlist_nulls_replace_rcu(old, new);
+ WRITE_ONCE(old->pprev, NULL);
}
/**
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index dfd2399f2cde..c5b30054cd01 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -24,21 +24,22 @@
#include <linux/compiler.h>
#include <linux/atomic.h>
#include <linux/irqflags.h>
-#include <linux/preempt.h>
+#include <linux/sched.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
#include <linux/cleanup.h>
#include <asm/processor.h>
-#include <linux/cpumask.h>
#include <linux/context_tracking_irq.h>
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
+#define RCU_SEQ_CTR_SHIFT 2
+#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
+
/* Exported common interfaces */
void call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier_tasks(void);
-void rcu_barrier_tasks_rude(void);
void synchronize_rcu(void);
struct rcu_gp_oldstate;
@@ -94,9 +95,9 @@ static inline void __rcu_read_lock(void)
static inline void __rcu_read_unlock(void)
{
- preempt_enable();
if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
rcu_read_unlock_strict();
+ preempt_enable();
}
static inline int rcu_preempt_depth(void)
@@ -120,12 +121,6 @@ void rcu_init(void);
extern int rcu_scheduler_active;
void rcu_sched_clock_irq(int user);
-#ifdef CONFIG_TASKS_RCU_GENERIC
-void rcu_init_tasks_generic(void);
-#else
-static inline void rcu_init_tasks_generic(void) { }
-#endif
-
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
void rcu_sysrq_end(void);
@@ -134,10 +129,10 @@ static inline void rcu_sysrq_start(void) { }
static inline void rcu_sysrq_end(void) { }
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
-#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
+#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_VIRT_XFER_TO_GUEST_WORK))
void rcu_irq_work_resched(void);
#else
-static inline void rcu_irq_work_resched(void) { }
+static __always_inline void rcu_irq_work_resched(void) { }
#endif
#ifdef CONFIG_RCU_NOCB_CPU
@@ -145,11 +140,18 @@ void rcu_init_nohz(void);
int rcu_nocb_cpu_offload(int cpu);
int rcu_nocb_cpu_deoffload(int cpu);
void rcu_nocb_flush_deferred_wakeup(void);
+
+#define RCU_NOCB_LOCKDEP_WARN(c, s) RCU_LOCKDEP_WARN(c, s)
+
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
+
static inline void rcu_init_nohz(void) { }
static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; }
static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; }
static inline void rcu_nocb_flush_deferred_wakeup(void) { }
+
+#define RCU_NOCB_LOCKDEP_WARN(c, s)
+
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/*
@@ -166,6 +168,7 @@ static inline void rcu_nocb_flush_deferred_wakeup(void) { }
} while (0)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
+void rcu_tasks_torture_stats_print(char *tt, char *tf);
# else
# define rcu_tasks_classic_qs(t, preempt) do { } while (0)
# define call_rcu_tasks call_rcu
@@ -192,6 +195,7 @@ void rcu_tasks_trace_qs_blkd(struct task_struct *t);
rcu_tasks_trace_qs_blkd(t); \
} \
} while (0)
+void rcu_tasks_trace_torture_stats_print(char *tt, char *tf);
# else
# define rcu_tasks_trace_qs(t) do { } while (0)
# endif
@@ -203,13 +207,12 @@ do { \
} while (0)
# ifdef CONFIG_TASKS_RUDE_RCU
-void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks_rude(void);
+void rcu_tasks_rude_torture_stats_print(char *tt, char *tf);
# endif
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
void exit_tasks_rcu_start(void);
-void exit_tasks_rcu_stop(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
#define rcu_tasks_classic_qs(t, preempt) do { } while (0)
@@ -218,7 +221,6 @@ void exit_tasks_rcu_finish(void);
#define call_rcu_tasks call_rcu
#define synchronize_rcu_tasks synchronize_rcu
static inline void exit_tasks_rcu_start(void) { }
-static inline void exit_tasks_rcu_stop(void) { }
static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
@@ -393,7 +395,7 @@ static inline int debug_lockdep_rcu_enabled(void)
*/
#define RCU_LOCKDEP_WARN(c, s) \
do { \
- static bool __section(".data.unlikely") __warned; \
+ static bool __section(".data..unlikely") __warned; \
if (debug_lockdep_rcu_enabled() && (c) && \
debug_lockdep_rcu_enabled() && !__warned) { \
__warned = true; \
@@ -421,11 +423,71 @@ static inline void rcu_preempt_sleep_check(void) { }
"Illegal context switch in RCU-sched read-side critical section"); \
} while (0)
+// See RCU_LOCKDEP_WARN() for an explanation of the double call to
+// debug_lockdep_rcu_enabled().
+static inline bool lockdep_assert_rcu_helper(bool c)
+{
+ return debug_lockdep_rcu_enabled() &&
+ (c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) &&
+ debug_lockdep_rcu_enabled();
+}
+
+/**
+ * lockdep_assert_in_rcu_read_lock - WARN if not protected by rcu_read_lock()
+ *
+ * Splats if lockdep is enabled and there is no rcu_read_lock() in effect.
+ */
+#define lockdep_assert_in_rcu_read_lock() \
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map)))
+
+/**
+ * lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh()
+ *
+ * Splats if lockdep is enabled and there is no rcu_read_lock_bh() in effect.
+ * Note that local_bh_disable() and friends do not suffice here, instead an
+ * actual rcu_read_lock_bh() is required.
+ */
+#define lockdep_assert_in_rcu_read_lock_bh() \
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map)))
+
+/**
+ * lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched()
+ *
+ * Splats if lockdep is enabled and there is no rcu_read_lock_sched()
+ * in effect. Note that preempt_disable() and friends do not suffice here,
+ * instead an actual rcu_read_lock_sched() is required.
+ */
+#define lockdep_assert_in_rcu_read_lock_sched() \
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map)))
+
+/**
+ * lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader
+ *
+ * Splats if lockdep is enabled and there is no RCU reader of any
+ * type in effect. Note that regions of code protected by things like
+ * preempt_disable, local_bh_disable(), and local_irq_disable() all qualify
+ * as RCU readers.
+ *
+ * Note that this will never trigger in PREEMPT_NONE or PREEMPT_VOLUNTARY
+ * kernels that are not also built with PREEMPT_COUNT. But if you have
+ * lockdep enabled, you might as well also enable PREEMPT_COUNT.
+ */
+#define lockdep_assert_in_rcu_reader() \
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \
+ !lock_is_held(&rcu_bh_lock_map) && \
+ !lock_is_held(&rcu_sched_lock_map) && \
+ preemptible()))
+
#else /* #ifdef CONFIG_PROVE_RCU */
#define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
#define rcu_sleep_check() do { } while (0)
+#define lockdep_assert_in_rcu_read_lock() do { } while (0)
+#define lockdep_assert_in_rcu_read_lock_bh() do { } while (0)
+#define lockdep_assert_in_rcu_read_lock_sched() do { } while (0)
+#define lockdep_assert_in_rcu_reader() do { } while (0)
+
#endif /* #else #ifdef CONFIG_PROVE_RCU */
/*
@@ -651,6 +713,24 @@ do { \
(c) || rcu_read_lock_sched_held(), \
__rcu)
+/**
+ * rcu_dereference_all_check() - rcu_dereference_all with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * This is similar to rcu_dereference_check(), but allows protection
+ * by all forms of vanilla RCU readers, including preemption disabled,
+ * bh-disabled, and interrupt-disabled regions of code. Note that "vanilla
+ * RCU" excludes SRCU and the various Tasks RCU flavors. Please note
+ * that this macro should not be backported to any Linux-kernel version
+ * preceding v5.0 due to changes in synchronize_rcu() semantics prior
+ * to that version.
+ */
+#define rcu_dereference_all_check(p, c) \
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || rcu_read_lock_any_held(), \
+ __rcu)
+
/*
* The tracing infrastructure traces RCU (we want that), but unfortunately
* some of the RCU checks causes tracing to lock up the system.
@@ -706,6 +786,14 @@ do { \
#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
/**
+ * rcu_dereference_all() - fetch RCU-all-protected pointer for dereferencing
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Makes rcu_dereference_check() do the dirty work.
+ */
+#define rcu_dereference_all(p) rcu_dereference_all_check(p, 0)
+
+/**
* rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism
* @p: The pointer to hand off
*
@@ -738,11 +826,9 @@ do { \
* sections, invocation of the corresponding RCU callback is deferred
* until after the all the other CPUs exit their critical sections.
*
- * In v5.0 and later kernels, synchronize_rcu() and call_rcu() also
- * wait for regions of code with preemption disabled, including regions of
- * code with interrupts or softirqs disabled. In pre-v5.0 kernels, which
- * define synchronize_sched(), only code enclosed within rcu_read_lock()
- * and rcu_read_unlock() are guaranteed to be waited for.
+ * Both synchronize_rcu() and call_rcu() also wait for regions of code
+ * with preemption disabled, including regions of code with interrupts or
+ * softirqs disabled.
*
* Note, however, that RCU callbacks are permitted to run concurrently
* with new RCU read-side critical sections. One way that this can happen
@@ -797,11 +883,10 @@ static __always_inline void rcu_read_lock(void)
* rcu_read_unlock() - marks the end of an RCU read-side critical section.
*
* In almost all situations, rcu_read_unlock() is immune from deadlock.
- * In recent kernels that have consolidated synchronize_sched() and
- * synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity
- * also extends to the scheduler's runqueue and priority-inheritance
- * spinlocks, courtesy of the quiescent-state deferral that is carried
- * out when rcu_read_unlock() is invoked with interrupts disabled.
+ * This deadlock immunity also extends to the scheduler's runqueue
+ * and priority-inheritance spinlocks, courtesy of the quiescent-state
+ * deferral that is carried out when rcu_read_unlock() is invoked with
+ * interrupts disabled.
*
* See rcu_read_lock() for more information.
*/
@@ -903,6 +988,20 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
preempt_enable_notrace();
}
+static __always_inline void rcu_read_lock_dont_migrate(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RCU))
+ migrate_disable();
+ rcu_read_lock();
+}
+
+static inline void rcu_read_unlock_migrate(void)
+{
+ rcu_read_unlock();
+ if (IS_ENABLED(CONFIG_PREEMPT_RCU))
+ migrate_enable();
+}
+
/**
* RCU_INIT_POINTER() - initialize an RCU protected pointer
* @p: The pointer to be initialized.
@@ -957,12 +1056,6 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define RCU_POINTER_INITIALIZER(p, v) \
.p = RCU_INITIALIZER(v)
-/*
- * Does the specified offset indicate that the corresponding rcu_head
- * structure can be handled by kvfree_rcu()?
- */
-#define __is_kvfree_rcu_offset(offset) ((offset) < 4096)
-
/**
* kfree_rcu() - kfree an object after a grace period.
* @ptr: pointer to kfree for double-argument invocations.
@@ -973,11 +1066,11 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* when they are used in a kernel module, that module must invoke the
* high-latency rcu_barrier() function at module-unload time.
*
- * The kfree_rcu() function handles this issue. Rather than encoding a
- * function address in the embedded rcu_head structure, kfree_rcu() instead
- * encodes the offset of the rcu_head structure within the base structure.
- * Because the functions are not allowed in the low-order 4096 bytes of
- * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
+ * The kfree_rcu() function handles this issue. In order to have a universal
+ * callback function handling different offsets of rcu_head, the callback needs
+ * to determine the starting address of the freed object, which can be a large
+ * kmalloc or vmalloc allocation. To allow simply aligning the pointer down to
+ * page boundary for those, only offsets up to 4095 bytes can be accommodated.
* If the offset is larger than 4095 bytes, a compile-time error will
* be generated in kvfree_rcu_arg_2(). If this error is triggered, you can
* either fall back to use of call_rcu() or rearrange the structure to
@@ -1014,14 +1107,23 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define kfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
#define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
+/*
+ * In mm/slab_common.c, no suitable header to include here.
+ */
+void kvfree_call_rcu(struct rcu_head *head, void *ptr);
+
+/*
+ * The BUILD_BUG_ON() makes sure the rcu_head offset can be handled. See the
+ * comment of kfree_rcu() for details.
+ */
#define kvfree_rcu_arg_2(ptr, rhf) \
do { \
typeof (ptr) ___p = (ptr); \
\
- if (___p) { \
- BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \
- kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \
- } \
+ if (___p) { \
+ BUILD_BUG_ON(offsetof(typeof(*(ptr)), rhf) >= 4096); \
+ kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \
+ } \
} while (0)
#define kvfree_rcu_arg_1(ptr) \
diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h
index eda493200663..e6c44eb428ab 100644
--- a/include/linux/rcupdate_trace.h
+++ b/include/linux/rcupdate_trace.h
@@ -10,6 +10,7 @@
#include <linux/sched.h>
#include <linux/rcupdate.h>
+#include <linux/cleanup.h>
extern struct lockdep_map rcu_trace_lock_map;
@@ -98,4 +99,8 @@ static inline void rcu_read_lock_trace(void) { BUG(); }
static inline void rcu_read_unlock_trace(void) { BUG(); }
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
+DEFINE_LOCK_GUARD_0(rcu_tasks_trace,
+ rcu_read_lock_trace(),
+ rcu_read_unlock_trace())
+
#endif /* __LINUX_RCUPDATE_TRACE_H */
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index 303ab9bee155..4c92d4291cce 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -16,6 +16,9 @@
struct rcu_synchronize {
struct rcu_head head;
struct completion completion;
+
+ /* This is for debugging. */
+ struct rcu_gp_oldstate oldstate;
};
void wakeme_after_rcu(struct rcu_head *head);
@@ -65,4 +68,15 @@ static inline void cond_resched_rcu(void)
#endif
}
+// Has the current task blocked within its current RCU read-side
+// critical section?
+static inline bool has_rcu_reader_blocked(void)
+{
+#ifdef CONFIG_PREEMPT_RCU
+ return !list_empty(&current->rcu_node_entry);
+#else
+ return false;
+#endif
+}
+
#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */
diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h
index 2c8bfd0f1b6b..2fb2af6d9824 100644
--- a/include/linux/rcuref.h
+++ b/include/linux/rcuref.h
@@ -30,7 +30,11 @@ static inline void rcuref_init(rcuref_t *ref, unsigned int cnt)
* rcuref_read - Read the number of held reference counts of a rcuref
* @ref: Pointer to the reference count
*
- * Return: The number of held references (0 ... N)
+ * Return: The number of held references (0 ... N). The value 0 does not
+ * indicate that it is safe to schedule the object, protected by this reference
+ * counter, for deconstruction.
+ * If you want to know if the reference counter has been marked DEAD (as
+ * signaled by rcuref_put()) please use rcuread_is_dead().
*/
static inline unsigned int rcuref_read(rcuref_t *ref)
{
@@ -40,6 +44,22 @@ static inline unsigned int rcuref_read(rcuref_t *ref)
return c >= RCUREF_RELEASED ? 0 : c + 1;
}
+/**
+ * rcuref_is_dead - Check if the rcuref has been already marked dead
+ * @ref: Pointer to the reference count
+ *
+ * Return: True if the object has been marked DEAD. This signals that a previous
+ * invocation of rcuref_put() returned true on this reference counter meaning
+ * the protected object can safely be scheduled for deconstruction.
+ * Otherwise, returns false.
+ */
+static inline bool rcuref_is_dead(rcuref_t *ref)
+{
+ unsigned int c = atomic_read(&ref->refcnt);
+
+ return (c >= RCUREF_RELEASED) && (c < RCUREF_NOREF);
+}
+
extern __must_check bool rcuref_get_slowpath(rcuref_t *ref);
/**
@@ -71,27 +91,30 @@ static inline __must_check bool rcuref_get(rcuref_t *ref)
return rcuref_get_slowpath(ref);
}
-extern __must_check bool rcuref_put_slowpath(rcuref_t *ref);
+extern __must_check bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt);
/*
* Internal helper. Do not invoke directly.
*/
static __always_inline __must_check bool __rcuref_put(rcuref_t *ref)
{
+ int cnt;
+
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),
"suspicious rcuref_put_rcusafe() usage");
/*
* Unconditionally decrease the reference count. The saturation and
* dead zones provide enough tolerance for this.
*/
- if (likely(!atomic_add_negative_release(-1, &ref->refcnt)))
+ cnt = atomic_sub_return_release(1, &ref->refcnt);
+ if (likely(cnt >= 0))
return false;
/*
* Handle the last reference drop and cases inside the saturation
* and dead zones.
*/
- return rcuref_put_slowpath(ref);
+ return rcuref_put_slowpath(ref, cnt);
}
/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index d9ac7b136aea..f519cd680228 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -90,36 +90,6 @@ static inline void synchronize_rcu_expedited(void)
synchronize_rcu();
}
-/*
- * Add one more declaration of kvfree() here. It is
- * not so straight forward to just include <linux/mm.h>
- * where it is defined due to getting many compile
- * errors caused by that include.
- */
-extern void kvfree(const void *addr);
-
-static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
-{
- if (head) {
- call_rcu(head, (rcu_callback_t) ((void *) head - ptr));
- return;
- }
-
- // kvfree_rcu(one_arg) call.
- might_sleep();
- synchronize_rcu();
- kvfree(ptr);
-}
-
-#ifdef CONFIG_KASAN_GENERIC
-void kvfree_call_rcu(struct rcu_head *head, void *ptr);
-#else
-static inline void kvfree_call_rcu(struct rcu_head *head, void *ptr)
-{
- __kvfree_call_rcu(head, ptr);
-}
-#endif
-
void rcu_qs(void);
static inline void rcu_softirq_qs(void)
@@ -158,9 +128,7 @@ void rcu_scheduler_starting(void);
static inline void rcu_end_inkernel_boot(void) { }
static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
static inline bool rcu_is_watching(void) { return true; }
-static inline void rcu_momentary_dyntick_idle(void) { }
-static inline void kfree_rcu_scheduler_running(void) { }
-static inline bool rcu_gp_might_be_stalled(void) { return false; }
+static inline void rcu_momentary_eqs(void) { }
/* Avoid RCU read-side critical sections leaking across. */
static inline void rcu_all_qs(void) { barrier(); }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 254244202ea9..9d2d7bd251d4 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -34,12 +34,9 @@ static inline void rcu_virt_note_context_switch(void)
}
void synchronize_rcu_expedited(void);
-void kvfree_call_rcu(struct rcu_head *head, void *ptr);
void rcu_barrier(void);
-void rcu_momentary_dyntick_idle(void);
-void kfree_rcu_scheduler_running(void);
-bool rcu_gp_might_be_stalled(void);
+void rcu_momentary_eqs(void);
struct rcu_gp_oldstate {
unsigned long rgos_norm;
@@ -103,7 +100,7 @@ extern int rcu_scheduler_active;
void rcu_end_inkernel_boot(void);
bool rcu_inkernel_boot_has_ended(void);
bool rcu_is_watching(void);
-#ifndef CONFIG_PREEMPTION
+#ifndef CONFIG_PREEMPT_RCU
void rcu_all_qs(void);
#endif
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h
index 27343424225c..9ad134a04b41 100644
--- a/include/linux/rcuwait.h
+++ b/include/linux/rcuwait.h
@@ -4,18 +4,7 @@
#include <linux/rcupdate.h>
#include <linux/sched/signal.h>
-
-/*
- * rcuwait provides a way of blocking and waking up a single
- * task in an rcu-safe manner.
- *
- * The only time @task is non-nil is when a user is blocked (or
- * checking if it needs to) on a condition, and reset as soon as we
- * know that the condition has succeeded and are awoken.
- */
-struct rcuwait {
- struct task_struct __rcu *task;
-};
+#include <linux/types.h>
#define __RCUWAIT_INITIALIZER(name) \
{ .task = NULL, }
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index abcdde4df697..aa08c3bbbf59 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -177,16 +177,38 @@ void ctrl_alt_del(void);
extern void orderly_poweroff(bool force);
extern void orderly_reboot(void);
-void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shutdown);
-static inline void hw_protection_reboot(const char *reason, int ms_until_forced)
-{
- __hw_protection_shutdown(reason, ms_until_forced, false);
-}
+/**
+ * enum hw_protection_action - Hardware protection action
+ *
+ * @HWPROT_ACT_DEFAULT:
+ * The default action should be taken. This is HWPROT_ACT_SHUTDOWN
+ * by default, but can be overridden.
+ * @HWPROT_ACT_SHUTDOWN:
+ * The system should be shut down (powered off) for HW protection.
+ * @HWPROT_ACT_REBOOT:
+ * The system should be rebooted for HW protection.
+ */
+enum hw_protection_action { HWPROT_ACT_DEFAULT, HWPROT_ACT_SHUTDOWN, HWPROT_ACT_REBOOT };
-static inline void hw_protection_shutdown(const char *reason, int ms_until_forced)
+void __hw_protection_trigger(const char *reason, int ms_until_forced,
+ enum hw_protection_action action);
+
+/**
+ * hw_protection_trigger - Trigger default emergency system hardware protection action
+ *
+ * @reason: Reason of emergency shutdown or reboot to be printed.
+ * @ms_until_forced: Time to wait for orderly shutdown or reboot before
+ * triggering it. Negative value disables the forced
+ * shutdown or reboot.
+ *
+ * Initiate an emergency system shutdown or reboot in order to protect
+ * hardware from further damage. The exact action taken is controllable at
+ * runtime and defaults to shutdown.
+ */
+static inline void hw_protection_trigger(const char *reason, int ms_until_forced)
{
- __hw_protection_shutdown(reason, ms_until_forced, true);
+ __hw_protection_trigger(reason, ms_until_forced, HWPROT_ACT_DEFAULT);
}
/*
diff --git a/include/linux/ref_tracker.h b/include/linux/ref_tracker.h
index 8eac4f3d5254..d10563afd91c 100644
--- a/include/linux/ref_tracker.h
+++ b/include/linux/ref_tracker.h
@@ -6,6 +6,8 @@
#include <linux/spinlock.h>
#include <linux/stackdepot.h>
+#define __ostream_printf __printf(2, 3)
+
struct ref_tracker;
struct ref_tracker_dir {
@@ -17,15 +19,45 @@ struct ref_tracker_dir {
bool dead;
struct list_head list; /* List of active trackers */
struct list_head quarantine; /* List of dead trackers */
- char name[32];
+ const char *class; /* object classname */
#endif
};
#ifdef CONFIG_REF_TRACKER
+#ifdef CONFIG_DEBUG_FS
+
+void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir);
+void ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...);
+
+#else /* CONFIG_DEBUG_FS */
+
+static inline void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir)
+{
+}
+
+static inline __ostream_printf
+void ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * ref_tracker_dir_init - initialize a ref_tracker dir
+ * @dir: ref_tracker_dir to be initialized
+ * @quarantine_count: max number of entries to be tracked
+ * @class: pointer to static string that describes object type
+ *
+ * Initialize a ref_tracker_dir. If debugfs is configured, then a file
+ * will also be created for it under the top-level ref_tracker debugfs
+ * directory.
+ *
+ * Note that @class must point to a static string.
+ */
static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
unsigned int quarantine_count,
- const char *name)
+ const char *class)
{
INIT_LIST_HEAD(&dir->list);
INIT_LIST_HEAD(&dir->quarantine);
@@ -34,7 +66,8 @@ static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
dir->dead = false;
refcount_set(&dir->untracked, 1);
refcount_set(&dir->no_tracker, 1);
- strscpy(dir->name, name, sizeof(dir->name));
+ dir->class = class;
+ ref_tracker_dir_debugfs(dir);
stack_depot_init();
}
@@ -58,7 +91,16 @@ int ref_tracker_free(struct ref_tracker_dir *dir,
static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
unsigned int quarantine_count,
- const char *name)
+ const char *class)
+{
+}
+
+static inline void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir)
+{
+}
+
+static inline __ostream_printf
+void ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...)
{
}
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 59b3b752394d..80dc023ac2bf 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -87,6 +87,15 @@
* The decrements dec_and_test() and sub_and_test() also provide acquire
* ordering on success.
*
+ * refcount_{add|inc}_not_zero_acquire() and refcount_set_release() provide
+ * acquire and release ordering for cases when the memory occupied by the
+ * object might be reused to store another object. This is important for the
+ * cases where secondary validation is required to detect such reuse, e.g.
+ * SLAB_TYPESAFE_BY_RCU. The secondary validation checks have to happen after
+ * the refcount is taken, hence acquire order is necessary. Similarly, when the
+ * object is initialized, all stores to its attributes should be visible before
+ * the refcount is set, otherwise a stale attribute value might be used by
+ * another task which succeeds in taking a refcount to the new object.
*/
#ifndef _LINUX_REFCOUNT_H
@@ -126,6 +135,31 @@ static inline void refcount_set(refcount_t *r, int n)
}
/**
+ * refcount_set_release - set a refcount's value with release ordering
+ * @r: the refcount
+ * @n: value to which the refcount will be set
+ *
+ * This function should be used when memory occupied by the object might be
+ * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU.
+ *
+ * Provides release memory ordering which will order previous memory operations
+ * against this store. This ensures all updates to this object are visible
+ * once the refcount is set and stale values from the object previously
+ * occupying this memory are overwritten with new ones.
+ *
+ * This function should be called only after new object is fully initialized.
+ * After this call the object should be considered visible to other tasks even
+ * if it was not yet added into an object collection normally used to discover
+ * it. This is because other tasks might have discovered the object previously
+ * occupying the same memory and after memory reuse they can succeed in taking
+ * refcount to the new object and start using it.
+ */
+static inline void refcount_set_release(refcount_t *r, int n)
+{
+ atomic_set_release(&r->refs, n);
+}
+
+/**
* refcount_read - get a refcount's value
* @r: the refcount
*
@@ -178,6 +212,71 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
return __refcount_add_not_zero(i, r, NULL);
}
+static inline __must_check __signed_wrap
+bool __refcount_add_not_zero_limited_acquire(int i, refcount_t *r, int *oldp,
+ int limit)
+{
+ int old = refcount_read(r);
+
+ do {
+ if (!old)
+ break;
+
+ if (i > limit - old) {
+ if (oldp)
+ *oldp = old;
+ return false;
+ }
+ } while (!atomic_try_cmpxchg_acquire(&r->refs, &old, old + i));
+
+ if (oldp)
+ *oldp = old;
+
+ if (unlikely(old < 0 || old + i < 0))
+ refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
+
+ return old;
+}
+
+static inline __must_check bool
+__refcount_inc_not_zero_limited_acquire(refcount_t *r, int *oldp, int limit)
+{
+ return __refcount_add_not_zero_limited_acquire(1, r, oldp, limit);
+}
+
+static inline __must_check __signed_wrap
+bool __refcount_add_not_zero_acquire(int i, refcount_t *r, int *oldp)
+{
+ return __refcount_add_not_zero_limited_acquire(i, r, oldp, INT_MAX);
+}
+
+/**
+ * refcount_add_not_zero_acquire - add a value to a refcount with acquire ordering unless it is 0
+ *
+ * @i: the value to add to the refcount
+ * @r: the refcount
+ *
+ * Will saturate at REFCOUNT_SATURATED and WARN.
+ *
+ * This function should be used when memory occupied by the object might be
+ * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU.
+ *
+ * Provides acquire memory ordering on success, it is assumed the caller has
+ * guaranteed the object memory to be stable (RCU, etc.). It does provide a
+ * control dependency and thereby orders future stores. See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_inc_not_zero_acquire() should instead be used to increment a
+ * reference count.
+ *
+ * Return: false if the passed refcount is 0, true otherwise
+ */
+static inline __must_check bool refcount_add_not_zero_acquire(int i, refcount_t *r)
+{
+ return __refcount_add_not_zero_acquire(i, r, NULL);
+}
+
static inline __signed_wrap
void __refcount_add(int i, refcount_t *r, int *oldp)
{
@@ -236,6 +335,32 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
return __refcount_inc_not_zero(r, NULL);
}
+static inline __must_check bool __refcount_inc_not_zero_acquire(refcount_t *r, int *oldp)
+{
+ return __refcount_add_not_zero_acquire(1, r, oldp);
+}
+
+/**
+ * refcount_inc_not_zero_acquire - increment a refcount with acquire ordering unless it is 0
+ * @r: the refcount to increment
+ *
+ * Similar to refcount_inc_not_zero(), but provides acquire memory ordering on
+ * success.
+ *
+ * This function should be used when memory occupied by the object might be
+ * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU.
+ *
+ * Provides acquire memory ordering on success, it is assumed the caller has
+ * guaranteed the object memory to be stable (RCU, etc.). It does provide a
+ * control dependency and thereby orders future stores. See the comment on top.
+ *
+ * Return: true if the increment was successful, false otherwise
+ */
+static inline __must_check bool refcount_inc_not_zero_acquire(refcount_t *r)
+{
+ return __refcount_inc_not_zero_acquire(r, NULL);
+}
+
static inline void __refcount_inc(refcount_t *r, int *oldp)
{
__refcount_add(1, r, oldp);
@@ -266,12 +391,12 @@ bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
if (oldp)
*oldp = old;
- if (old == i) {
+ if (old > 0 && old == i) {
smp_acquire__after_ctrl_dep();
return true;
}
- if (unlikely(old < 0 || old - i < 0))
+ if (unlikely(old <= 0 || old - i < 0))
refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
return false;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index a6bc2980a98b..b0b9be750d93 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -54,12 +54,24 @@ struct sdw_slave;
#define REGMAP_UPSHIFT(s) (-(s))
#define REGMAP_DOWNSHIFT(s) (s)
-/* An enum of all the supported cache types */
+/*
+ * The supported cache types, the default is no cache. Any new caches should
+ * usually use the maple tree cache unless they specifically require that there
+ * are never any allocations at runtime in which case they should use the sparse
+ * flat cache. The rbtree cache *may* have some performance advantage for very
+ * low end systems that make heavy use of cache syncs but is mainly legacy.
+ * These caches are sparse and entries will be initialized from hardware if no
+ * default has been provided.
+ * The non-sparse flat cache is provided for compatibility with existing users
+ * and will zero-initialize cache entries for which no defaults are provided.
+ * New users should use the sparse flat cache.
+ */
enum regcache_type {
REGCACHE_NONE,
REGCACHE_RBTREE,
REGCACHE_FLAT,
REGCACHE_MAPLE,
+ REGCACHE_FLAT_S,
};
/**
@@ -106,17 +118,17 @@ struct reg_sequence {
* @addr: Address to poll
* @val: Unsigned integer variable to read the value into
* @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0
- * tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.rst).
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
* error return value in case of a error read. In the two former cases,
* the last read value at @addr is stored in @val. Must not be called
* from atomic context if sleep_us or timeout_us are used.
- *
- * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
*/
#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
({ \
@@ -133,20 +145,20 @@ struct reg_sequence {
* @addr: Address to poll
* @val: Unsigned integer variable to read the value into
* @cond: Break condition (usually involving @val)
- * @delay_us: Time to udelay between reads in us (0 tight-loops).
- * Should be less than ~10us since udelay is used
- * (see Documentation/timers/timers-howto.rst).
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Please
+ * read udelay() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
- * error return value in case of a error read. In the two former cases,
- * the last read value at @addr is stored in @val.
- *
* This is modelled after the readx_poll_timeout_atomic macros in linux/iopoll.h.
*
* Note: In general regmap cannot be used in atomic context. If you want to use
* this macro then first setup your regmap for atomic use (flat or no cache
* and MMIO regmap).
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val.
*/
#define regmap_read_poll_timeout_atomic(map, addr, val, cond, delay_us, timeout_us) \
({ \
@@ -177,17 +189,17 @@ struct reg_sequence {
* @field: Regmap field to read from
* @val: Unsigned integer variable to read the value into
* @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0
- * tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.rst).
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read
* error return value in case of a error read. In the two former cases,
* the last read value at @addr is stored in @val. Must not be called
* from atomic context if sleep_us or timeout_us are used.
- *
- * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
*/
#define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \
({ \
@@ -499,6 +511,32 @@ struct regmap_range_cfg {
unsigned int window_len;
};
+/**
+ * struct regmap_sdw_mbq_cfg - Configuration for Multi-Byte Quantities
+ *
+ * @mbq_size: Callback returning the actual size of the given register.
+ * @deferrable: Callback returning true if the hardware can defer
+ * transactions to the given register. Deferral should
+ * only be used by SDCA parts and typically which controls
+ * are deferrable will be specified in either as a hard
+ * coded list or from the DisCo tables in the platform
+ * firmware.
+ *
+ * @timeout_us: The time in microseconds after which waiting for a deferred
+ * transaction should time out.
+ * @retry_us: The time in microseconds between polls of the function busy
+ * status whilst waiting for an opportunity to retry a deferred
+ * transaction.
+ *
+ * Provides additional configuration required for SoundWire MBQ register maps.
+ */
+struct regmap_sdw_mbq_cfg {
+ int (*mbq_size)(struct device *dev, unsigned int reg);
+ bool (*deferrable)(struct device *dev, unsigned int reg);
+ unsigned long timeout_us;
+ unsigned long retry_us;
+};
+
struct regmap_async;
typedef int (*regmap_hw_write)(void *context, const void *data,
@@ -643,8 +681,9 @@ struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
-struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
+struct regmap *__regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name);
struct regmap *__regmap_init_spi_avmm(struct spi_device *spi,
@@ -704,8 +743,9 @@ struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
-struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
+struct regmap *__devm_regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name);
struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
@@ -878,7 +918,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
+ * a struct regmap. Implies 'fast_io'.
*/
#define regmap_init_mmio_clk(dev, clk_id, regs, config) \
__regmap_lockdep_wrapper(__regmap_init_mmio_clk, #config, \
@@ -892,7 +932,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
+ * a struct regmap. Implies 'fast_io'.
*/
#define regmap_init_mmio(dev, regs, config) \
regmap_init_mmio_clk(dev, NULL, regs, config)
@@ -935,7 +975,22 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
*/
#define regmap_init_sdw_mbq(sdw, config) \
__regmap_lockdep_wrapper(__regmap_init_sdw_mbq, #config, \
- sdw, config)
+ &sdw->dev, sdw, config, NULL)
+
+/**
+ * regmap_init_sdw_mbq_cfg() - Initialise MBQ SDW register map with config
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ * @mbq_config: Properties for the MBQ registers
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define regmap_init_sdw_mbq_cfg(dev, sdw, config, mbq_config) \
+ __regmap_lockdep_wrapper(__regmap_init_sdw_mbq, #config, \
+ dev, sdw, config, mbq_config)
/**
* regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave
@@ -1088,7 +1143,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
*
* The return value will be an ERR_PTR() on error or a valid pointer
* to a struct regmap. The regmap will be automatically freed by the
- * device management code.
+ * device management code. Implies 'fast_io'.
*/
#define devm_regmap_init_mmio_clk(dev, clk_id, regs, config) \
__regmap_lockdep_wrapper(__devm_regmap_init_mmio_clk, #config, \
@@ -1103,7 +1158,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
*
* The return value will be an ERR_PTR() on error or a valid pointer
* to a struct regmap. The regmap will be automatically freed by the
- * device management code.
+ * device management code. Implies 'fast_io'.
*/
#define devm_regmap_init_mmio(dev, regs, config) \
devm_regmap_init_mmio_clk(dev, NULL, regs, config)
@@ -1148,7 +1203,23 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
*/
#define devm_regmap_init_sdw_mbq(sdw, config) \
__regmap_lockdep_wrapper(__devm_regmap_init_sdw_mbq, #config, \
- sdw, config)
+ &sdw->dev, sdw, config, NULL)
+
+/**
+ * devm_regmap_init_sdw_mbq_cfg() - Initialise managed MBQ SDW register map with config
+ *
+ * @dev: Device that will be interacted with
+ * @sdw: SoundWire Device that will be interacted with
+ * @config: Configuration for register map
+ * @mbq_config: Properties for the MBQ registers
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sdw_mbq_cfg(dev, sdw, config, mbq_config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sdw_mbq, \
+ #config, dev, sdw, config, mbq_config)
/**
* devm_regmap_init_slimbus() - Initialise managed register map
@@ -1237,6 +1308,8 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
void *val, size_t val_len);
int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count);
+int regmap_multi_reg_read(struct regmap *map, const unsigned int *reg, void *val,
+ size_t val_count);
int regmap_update_bits_base(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force);
@@ -1285,6 +1358,7 @@ bool regmap_can_raw_write(struct regmap *map);
size_t regmap_get_raw_read_max(struct regmap *map);
size_t regmap_get_raw_write_max(struct regmap *map);
+void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults);
int regcache_sync(struct regmap *map);
int regcache_sync_region(struct regmap *map, unsigned int min,
unsigned int max);
@@ -1326,6 +1400,15 @@ static inline int regmap_clear_bits(struct regmap *map,
return regmap_update_bits_base(map, reg, bits, 0, NULL, false, false);
}
+static inline int regmap_assign_bits(struct regmap *map, unsigned int reg,
+ unsigned int bits, bool value)
+{
+ if (value)
+ return regmap_set_bits(map, reg, bits);
+ else
+ return regmap_clear_bits(map, reg, bits);
+}
+
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits);
/**
@@ -1519,6 +1602,9 @@ struct regmap_irq_chip_data;
* struct regmap_irq_chip - Description of a generic regmap irq_chip.
*
* @name: Descriptive name for IRQ controller.
+ * @domain_suffix: Name suffix to be appended to end of IRQ domain name. Needed
+ * when multiple regmap-IRQ controllers are created from same
+ * device.
*
* @main_status: Base main status register address. For chips which have
* interrupts arranged in separate sub-irq blocks with own IRQ
@@ -1561,7 +1647,9 @@ struct regmap_irq_chip_data;
* @ack_invert: Inverted ack register: cleared bits for ack.
* @clear_ack: Use this to set 1 and 0 or vice-versa to clear interrupts.
* @status_invert: Inverted status register: cleared bits are active interrupts.
- * @wake_invert: Inverted wake register: cleared bits are wake enabled.
+ * @status_is_level: Status register is actuall signal level: Xor status
+ * register with previous value to get active interrupts.
+ * @wake_invert: Inverted wake register: cleared bits are wake disabled.
* @type_in_mask: Use the mask registers for controlling irq type. Use this if
* the hardware provides separate bits for rising/falling edge
* or low/high level interrupts and they should be combined into
@@ -1604,10 +1692,11 @@ struct regmap_irq_chip_data;
*/
struct regmap_irq_chip {
const char *name;
+ const char *domain_suffix;
unsigned int main_status;
unsigned int num_main_status_bits;
- struct regmap_irq_sub_irq_map *sub_reg_offsets;
+ const struct regmap_irq_sub_irq_map *sub_reg_offsets;
int num_main_regs;
unsigned int status_base;
@@ -1623,6 +1712,7 @@ struct regmap_irq_chip {
unsigned int ack_invert:1;
unsigned int clear_ack:1;
unsigned int status_invert:1;
+ unsigned int status_is_level:1;
unsigned int wake_invert:1;
unsigned int type_in_mask:1;
unsigned int clear_on_unmask:1;
@@ -1790,6 +1880,13 @@ static inline int regmap_clear_bits(struct regmap *map,
return -EINVAL;
}
+static inline int regmap_assign_bits(struct regmap *map, unsigned int reg,
+ unsigned int bits, bool value)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_test_bits(struct regmap *map,
unsigned int reg, unsigned int bits)
{
@@ -1956,6 +2053,12 @@ static inline bool regmap_might_sleep(struct regmap *map)
return true;
}
+static inline void regcache_sort_defaults(struct reg_default *defaults,
+ unsigned int ndefaults)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+}
+
static inline int regcache_sync(struct regmap *map)
{
WARN_ONCE(1, "regmap API is disabled");
diff --git a/include/linux/regset.h b/include/linux/regset.h
index 9061266dd8de..ad1ca6fe04f4 100644
--- a/include/linux/regset.h
+++ b/include/linux/regset.h
@@ -151,7 +151,8 @@ typedef int user_regset_writeback_fn(struct task_struct *target,
* @align: Required alignment, in bytes.
* @bias: Bias from natural indexing.
* @core_note_type: ELF note @n_type value used in core dumps.
- * @get: Function to fetch values.
+ * @core_note_name: ELF note name to qualify the note type.
+ * @regset_get: Function to fetch values.
* @set: Function to store values.
* @active: Function to report if regset is active, or %NULL.
* @writeback: Function to write data back to user memory, or %NULL.
@@ -190,6 +191,10 @@ typedef int user_regset_writeback_fn(struct task_struct *target,
*
* If nonzero, @core_note_type gives the n_type field (NT_* value)
* of the core file note in which this regset's data appears.
+ * @core_note_name specifies the note name. The preferred way to
+ * specify these two fields is to use the @USER_REGSET_NOTE_TYPE()
+ * macro.
+ *
* NT_PRSTATUS is a special case in that the regset data starts at
* offsetof(struct elf_prstatus, pr_reg) into the note data; that is
* part of the per-machine ELF formats userland knows about. In
@@ -207,8 +212,13 @@ struct user_regset {
unsigned int align;
unsigned int bias;
unsigned int core_note_type;
+ const char *core_note_name;
};
+#define USER_REGSET_NOTE_TYPE(type) \
+ .core_note_type = (NT_ ## type), \
+ .core_note_name = (NN_ ## type)
+
/**
* struct user_regset_view - available regsets
* @name: Identifier, e.g. UTS_MACHINE string.
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 59d0b9a79e6e..56fe2693d9b2 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -128,11 +128,11 @@ struct regulator;
*
* @supply: The name of the supply. Initialised by the user before
* using the bulk regulator APIs.
+ * @consumer: The regulator consumer for the supply. This will be managed
+ * by the bulk API.
* @init_load_uA: After getting the regulator, regulator_set_load() will be
* called with this load. Initialised by the user before
* using the bulk regulator APIs.
- * @consumer: The regulator consumer for the supply. This will be managed
- * by the bulk API.
*
* The regulator APIs provide a series of regulator_bulk_() API calls as
* a convenience to consumers which require multiple supplies. This
@@ -140,8 +140,8 @@ struct regulator;
*/
struct regulator_bulk_data {
const char *supply;
- int init_load_uA;
struct regulator *consumer;
+ int init_load_uA;
/* private: Internal use */
int ret;
@@ -200,8 +200,6 @@ int regulator_disable_deferred(struct regulator *regulator, int ms);
int __must_check regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers);
-int __must_check of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
- struct regulator_bulk_data **consumers);
int __must_check devm_regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers);
void devm_regulator_bulk_put(struct regulator_bulk_data *consumers);
@@ -235,6 +233,11 @@ int regulator_sync_voltage(struct regulator *regulator);
int regulator_set_current_limit(struct regulator *regulator,
int min_uA, int max_uA);
int regulator_get_current_limit(struct regulator *regulator);
+int regulator_get_unclaimed_power_budget(struct regulator *regulator);
+int regulator_request_power_budget(struct regulator *regulator,
+ unsigned int pw_req);
+void regulator_free_power_budget(struct regulator *regulator,
+ unsigned int pw);
int regulator_set_mode(struct regulator *regulator, unsigned int mode);
unsigned int regulator_get_mode(struct regulator *regulator);
@@ -250,6 +253,7 @@ int regulator_get_hardware_vsel_register(struct regulator *regulator,
unsigned *vsel_mask);
int regulator_list_hardware_vsel(struct regulator *regulator,
unsigned selector);
+int regulator_hardware_enable(struct regulator *regulator, bool enable);
/* regulator notifier block */
int regulator_register_notifier(struct regulator *regulator,
@@ -445,8 +449,10 @@ static inline int devm_regulator_bulk_get(struct device *dev, int num_consumers,
return 0;
}
-static inline int of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
- struct regulator_bulk_data **consumers)
+static inline int devm_regulator_bulk_get_const(
+ struct device *dev, int num_consumers,
+ const struct regulator_bulk_data *in_consumers,
+ struct regulator_bulk_data **out_consumers)
{
return 0;
}
@@ -525,6 +531,22 @@ static inline int regulator_get_current_limit(struct regulator *regulator)
return 0;
}
+static inline int regulator_get_unclaimed_power_budget(struct regulator *regulator)
+{
+ return INT_MAX;
+}
+
+static inline int regulator_request_power_budget(struct regulator *regulator,
+ unsigned int pw_req)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void regulator_free_power_budget(struct regulator *regulator,
+ unsigned int pw)
+{
+}
+
static inline int regulator_set_mode(struct regulator *regulator,
unsigned int mode)
{
@@ -571,6 +593,12 @@ static inline int regulator_list_hardware_vsel(struct regulator *regulator,
return -EOPNOTSUPP;
}
+static inline int regulator_hardware_enable(struct regulator *regulator,
+ bool enable)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int regulator_register_notifier(struct regulator *regulator,
struct notifier_block *nb)
{
@@ -648,6 +676,44 @@ regulator_is_equal(struct regulator *reg1, struct regulator *reg2)
}
#endif
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_REGULATOR)
+struct regulator *__must_check of_regulator_get(struct device *dev,
+ struct device_node *node,
+ const char *id);
+struct regulator *__must_check devm_of_regulator_get(struct device *dev,
+ struct device_node *node,
+ const char *id);
+struct regulator *__must_check of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id);
+struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id);
+int __must_check of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
+ struct regulator_bulk_data **consumers);
+#else
+static inline struct regulator *__must_check of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
+ struct regulator_bulk_data **consumers)
+{
+ return 0;
+}
+
+#endif
+
static inline int regulator_set_voltage_triplet(struct regulator *regulator,
int min_uV, int target_uV,
int max_uV)
diff --git a/include/linux/regulator/coupler.h b/include/linux/regulator/coupler.h
index 73291f280a23..5e314a4294fb 100644
--- a/include/linux/regulator/coupler.h
+++ b/include/linux/regulator/coupler.h
@@ -8,7 +8,8 @@
#ifndef __LINUX_REGULATOR_COUPLER_H_
#define __LINUX_REGULATOR_COUPLER_H_
-#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
#include <linux/suspend.h>
struct regulator_coupler;
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index f230a472ccd3..978cf593b662 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -269,6 +269,11 @@ enum regulator_type {
* config but it cannot store it for later usage.
* Callback should return 0 on success or negative ERRNO
* indicating failure.
+ * @init_cb: Optional callback called after the parsing of init_data.
+ * Allows the regulator to perform runtime init if necessary,
+ * such as synching the regulator and the parsed constraints.
+ * Callback should return 0 on success or negative ERRNO
+ * indicating failure.
* @id: Numerical identifier for the regulator.
* @ops: Regulator operations table.
* @irq: Interrupt number for the regulator.
@@ -365,6 +370,8 @@ struct regulator_desc {
int (*of_parse_cb)(struct device_node *,
const struct regulator_desc *,
struct regulator_config *);
+ int (*init_cb)(struct regulator_dev *,
+ struct regulator_config *);
int id;
unsigned int continuous_voltage_range:1;
unsigned n_voltages;
@@ -649,6 +656,11 @@ struct regulator_dev {
int cached_err;
bool use_cached_err;
spinlock_t err_lock;
+
+ int pw_requested_mW;
+
+ /* regulator notification forwarding */
+ struct notifier_block supply_fwd_nb;
};
/*
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 0cd76d264727..1fc440c5c4c7 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -113,6 +113,7 @@ struct notification_limit {
* @min_uA: Smallest current consumers may set.
* @max_uA: Largest current consumers may set.
* @ilim_uA: Maximum input current.
+ * @pw_budget_mW: Power budget for the regulator in mW.
* @system_load: Load that isn't captured by any consumer requests.
*
* @over_curr_limits: Limits for acting on over current.
@@ -185,6 +186,7 @@ struct regulation_constraints {
int max_uA;
int ilim_uA;
+ int pw_budget_mW;
int system_load;
/* used for coupled regulators */
@@ -273,8 +275,6 @@ struct regulator_consumer_supply {
* be usable.
* @num_consumer_supplies: Number of consumer device supplies.
* @consumer_supplies: Consumer device supply configuration.
- *
- * @regulator_init: Callback invoked when the regulator has been registered.
* @driver_data: Data passed to regulator_init.
*/
struct regulator_init_data {
@@ -285,8 +285,7 @@ struct regulator_init_data {
int num_consumer_supplies;
struct regulator_consumer_supply *consumer_supplies;
- /* optional regulator machine specific init */
- int (*regulator_init)(void *driver_data);
+ /* optional regulator machine specific data */
void *driver_data; /* core does not touch this */
};
diff --git a/include/linux/regulator/max8952.h b/include/linux/regulator/max8952.h
index 8712c091abf0..61dcd8e00a2f 100644
--- a/include/linux/regulator/max8952.h
+++ b/include/linux/regulator/max8952.h
@@ -2,7 +2,7 @@
/*
* max8952.h - Voltage regulation for the Maxim 8952
*
- * Copyright (C) 2010 Samsung Electrnoics
+ * Copyright (C) 2010 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
*/
diff --git a/include/linux/regulator/mt6363-regulator.h b/include/linux/regulator/mt6363-regulator.h
new file mode 100644
index 000000000000..60761f01d3ad
--- /dev/null
+++ b/include/linux/regulator/mt6363-regulator.h
@@ -0,0 +1,330 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 MediaTek Inc.
+ * Copyright (c) 2025 Collabora Ltd
+ */
+
+#include <linux/bits.h>
+
+#ifndef __LINUX_REGULATOR_MT6363_H
+#define __LINUX_REGULATOR_MT6363_H
+
+/* Register */
+#define MT6363_TOP_TRAP 0x6
+#define MT6363_TOP_TMA_KEY_L 0x36e
+#define MT6363_RG_BUCK0_EN_ADDR 0x210
+#define MT6363_RG_BUCK_VS2_EN_BIT 0
+#define MT6363_RG_BUCK_VBUCK1_EN_BIT 1
+#define MT6363_RG_BUCK_VBUCK2_EN_BIT 2
+#define MT6363_RG_BUCK_VBUCK3_EN_BIT 3
+#define MT6363_RG_BUCK_VBUCK4_EN_BIT 4
+#define MT6363_RG_BUCK_VBUCK5_EN_BIT 5
+#define MT6363_RG_BUCK_VBUCK6_EN_BIT 6
+#define MT6363_RG_BUCK_VBUCK7_EN_BIT 7
+#define MT6363_RG_BUCK1_EN_ADDR 0x213
+#define MT6363_RG_BUCK_VS1_EN_BIT 0
+#define MT6363_RG_BUCK_VS3_EN_BIT 1
+#define MT6363_RG_LDO_VSRAM_DIGRF_EN_BIT 4
+#define MT6363_RG_LDO_VSRAM_MDFE_EN_BIT 5
+#define MT6363_RG_LDO_VSRAM_MODEM_EN_BIT 6
+#define MT6363_RG_BUCK0_LP_ADDR 0x216
+#define MT6363_RG_BUCK_VS2_LP_BIT 0
+#define MT6363_RG_BUCK_VBUCK1_LP_BIT 1
+#define MT6363_RG_BUCK_VBUCK2_LP_BIT 2
+#define MT6363_RG_BUCK_VBUCK3_LP_BIT 3
+#define MT6363_RG_BUCK_VBUCK4_LP_BIT 4
+#define MT6363_RG_BUCK_VBUCK5_LP_BIT 5
+#define MT6363_RG_BUCK_VBUCK6_LP_BIT 6
+#define MT6363_RG_BUCK_VBUCK7_LP_BIT 7
+#define MT6363_RG_BUCK1_LP_ADDR 0x219
+#define MT6363_RG_BUCK_VS1_LP_BIT 0
+#define MT6363_RG_BUCK_VS3_LP_BIT 1
+#define MT6363_RG_LDO_VSRAM_DIGRF_LP_BIT 4
+#define MT6363_RG_LDO_VSRAM_MDFE_LP_BIT 5
+#define MT6363_RG_LDO_VSRAM_MODEM_LP_BIT 6
+#define MT6363_RG_BUCK_VS2_VOSEL_ADDR 0x21c
+#define MT6363_RG_BUCK_VS2_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VBUCK1_VOSEL_ADDR 0x21d
+#define MT6363_RG_BUCK_VBUCK1_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VBUCK2_VOSEL_ADDR 0x21e
+#define MT6363_RG_BUCK_VBUCK2_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VBUCK3_VOSEL_ADDR 0x21f
+#define MT6363_RG_BUCK_VBUCK3_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VBUCK4_VOSEL_ADDR 0x220
+#define MT6363_RG_BUCK_VBUCK4_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VBUCK5_VOSEL_ADDR 0x221
+#define MT6363_RG_BUCK_VBUCK5_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VBUCK6_VOSEL_ADDR 0x222
+#define MT6363_RG_BUCK_VBUCK6_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VBUCK7_VOSEL_ADDR 0x223
+#define MT6363_RG_BUCK_VBUCK7_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VS1_VOSEL_ADDR 0x224
+#define MT6363_RG_BUCK_VS1_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VS3_VOSEL_ADDR 0x225
+#define MT6363_RG_BUCK_VS3_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_LDO_VSRAM_DIGRF_VOSEL_ADDR 0x228
+#define MT6363_RG_LDO_VSRAM_DIGRF_VOSEL_MASK GENMASK(6, 0)
+#define MT6363_RG_LDO_VSRAM_MDFE_VOSEL_ADDR 0x229
+#define MT6363_RG_LDO_VSRAM_MDFE_VOSEL_MASK GENMASK(6, 0)
+#define MT6363_RG_LDO_VSRAM_MODEM_VOSEL_ADDR 0x22a
+#define MT6363_RG_LDO_VSRAM_MODEM_VOSEL_MASK GENMASK(6, 0)
+#define MT6363_BUCK_TOP_KEY_PROT_LO 0x13fa
+#define MT6363_BUCK_VS2_WDTDBG_VOSEL_ADDR 0x13fc
+#define MT6363_BUCK_VBUCK1_WDTDBG_VOSEL_ADDR 0x13fd
+#define MT6363_BUCK_VBUCK2_WDTDBG_VOSEL_ADDR 0x13fe
+#define MT6363_BUCK_VBUCK3_WDTDBG_VOSEL_ADDR 0x13ff
+#define MT6363_BUCK_VBUCK4_WDTDBG_VOSEL_ADDR 0x1400
+#define MT6363_BUCK_VBUCK5_WDTDBG_VOSEL_ADDR 0x1401
+#define MT6363_BUCK_VBUCK6_WDTDBG_VOSEL_ADDR 0x1402
+#define MT6363_BUCK_VBUCK7_WDTDBG_VOSEL_ADDR 0x1403
+#define MT6363_BUCK_VS1_WDTDBG_VOSEL_ADDR 0x1404
+#define MT6363_BUCK_VS3_WDTDBG_VOSEL_ADDR 0x1405
+#define MT6363_RG_BUCK_EFUSE_RSV1 0x1417
+#define MT6363_RG_BUCK_EFUSE_RSV1_MASK GENMASK(7, 4)
+#define MT6363_BUCK_VS2_OP_EN_0 0x145d
+#define MT6363_BUCK_VS2_HW_LP_MODE 0x1468
+#define MT6363_BUCK_VBUCK1_OP_EN_0 0x14dd
+#define MT6363_BUCK_VBUCK1_HW_LP_MODE 0x14e8
+#define MT6363_RG_BUCK_VBUCK1_SSHUB_EN_ADDR 0x14ea
+#define MT6363_RG_BUCK_VBUCK1_SSHUB_VOSEL_ADDR 0x14eb
+#define MT6363_RG_BUCK_VBUCK1_SSHUB_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_BUCK_VBUCK2_OP_EN_0 0x155d
+#define MT6363_BUCK_VBUCK2_HW_LP_MODE 0x1568
+#define MT6363_RG_BUCK_VBUCK2_SSHUB_EN_ADDR 0x156a
+#define MT6363_RG_BUCK_VBUCK2_SSHUB_VOSEL_ADDR 0x156b
+#define MT6363_RG_BUCK_VBUCK2_SSHUB_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_BUCK_VBUCK3_OP_EN_0 0x15dd
+#define MT6363_BUCK_VBUCK3_HW_LP_MODE 0x15e8
+#define MT6363_BUCK_VBUCK4_OP_EN_0 0x165d
+#define MT6363_BUCK_VBUCK4_HW_LP_MODE 0x1668
+#define MT6363_RG_BUCK_VBUCK4_SSHUB_EN_ADDR 0x166a
+#define MT6363_RG_BUCK_VBUCK4_SSHUB_VOSEL_ADDR 0x166b
+#define MT6363_RG_BUCK_VBUCK4_SSHUB_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_BUCK_VBUCK5_OP_EN_0 0x16dd
+#define MT6363_BUCK_VBUCK5_HW_LP_MODE 0x16e8
+#define MT6363_BUCK_VBUCK6_OP_EN_0 0x175d
+#define MT6363_BUCK_VBUCK6_HW_LP_MODE 0x1768
+#define MT6363_BUCK_VBUCK7_OP_EN_0 0x17dd
+#define MT6363_BUCK_VBUCK7_HW_LP_MODE 0x17e8
+#define MT6363_BUCK_VS1_OP_EN_0 0x185d
+#define MT6363_BUCK_VS1_HW_LP_MODE 0x1868
+#define MT6363_BUCK_VS3_OP_EN_0 0x18dd
+#define MT6363_BUCK_VS3_HW_LP_MODE 0x18e8
+#define MT6363_RG_VS1_FCCM_ADDR 0x1964
+#define MT6363_RG_VS1_FCCM_BIT 0
+#define MT6363_RG_VS3_FCCM_ADDR 0x1973
+#define MT6363_RG_VS3_FCCM_BIT 0
+#define MT6363_RG_BUCK0_FCCM_ADDR 0x1a02
+#define MT6363_RG_VBUCK1_FCCM_BIT 0
+#define MT6363_RG_VBUCK2_FCCM_BIT 1
+#define MT6363_RG_VBUCK3_FCCM_BIT 2
+#define MT6363_RG_VS2_FCCM_BIT 3
+#define MT6363_RG_BUCK0_1_FCCM_ADDR 0x1a82
+#define MT6363_RG_VBUCK4_FCCM_BIT 0
+#define MT6363_RG_VBUCK5_FCCM_BIT 1
+#define MT6363_RG_VBUCK6_FCCM_BIT 2
+#define MT6363_RG_VBUCK7_FCCM_BIT 3
+#define MT6363_RG_VCN13_VOSEL_ADDR 0x1b0f
+#define MT6363_RG_VCN13_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VEMC_VOSEL_ADDR 0x1b10
+#define MT6363_RG_VEMC_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VEMC_VOSEL_1_MASK GENMASK(7, 4)
+#define MT6363_RG_LDO_VSRAM_CPUB_VOSEL_ADDR 0x1b14
+#define MT6363_RG_LDO_VSRAM_CPUB_VOSEL_MASK GENMASK(6, 0)
+#define MT6363_RG_LDO_VSRAM_CPUM_VOSEL_ADDR 0x1b15
+#define MT6363_RG_LDO_VSRAM_CPUM_VOSEL_MASK GENMASK(6, 0)
+#define MT6363_RG_LDO_VSRAM_CPUL_VOSEL_ADDR 0x1b16
+#define MT6363_RG_LDO_VSRAM_CPUL_VOSEL_MASK GENMASK(6, 0)
+#define MT6363_RG_LDO_VSRAM_APU_VOSEL_ADDR 0x1b17
+#define MT6363_RG_LDO_VSRAM_APU_VOSEL_MASK GENMASK(6, 0)
+#define MT6363_RG_VEMC_VOCAL_ADDR 0x1b1b
+#define MT6363_RG_VEMC_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_LDO_VCN15_ADDR 0x1b57
+#define MT6363_RG_LDO_VCN15_EN_BIT 0
+#define MT6363_RG_LDO_VCN15_LP_BIT 1
+#define MT6363_LDO_VCN15_HW_LP_MODE 0x1b5b
+#define MT6363_LDO_VCN15_OP_EN0 0x1b5c
+#define MT6363_RG_LDO_VRF09_ADDR 0x1b65
+#define MT6363_RG_LDO_VRF09_EN_BIT 0
+#define MT6363_RG_LDO_VRF09_LP_BIT 1
+#define MT6363_LDO_VRF09_HW_LP_MODE 0x1b69
+#define MT6363_LDO_VRF09_OP_EN0 0x1b6a
+#define MT6363_RG_LDO_VRF12_ADDR 0x1b73
+#define MT6363_RG_LDO_VRF12_EN_BIT 0
+#define MT6363_RG_LDO_VRF12_LP_BIT 1
+#define MT6363_LDO_VRF12_HW_LP_MODE 0x1b77
+#define MT6363_LDO_VRF12_OP_EN0 0x1b78
+#define MT6363_RG_LDO_VRF13_ADDR 0x1b81
+#define MT6363_RG_LDO_VRF13_EN_BIT 0
+#define MT6363_RG_LDO_VRF13_LP_BIT 1
+#define MT6363_LDO_VRF13_HW_LP_MODE 0x1b85
+#define MT6363_LDO_VRF13_OP_EN0 0x1b86
+#define MT6363_RG_LDO_VRF18_ADDR 0x1b8f
+#define MT6363_RG_LDO_VRF18_EN_BIT 0
+#define MT6363_RG_LDO_VRF18_LP_BIT 1
+#define MT6363_LDO_VRF18_HW_LP_MODE 0x1b93
+#define MT6363_LDO_VRF18_OP_EN0 0x1b94
+#define MT6363_RG_LDO_VRFIO18_ADDR 0x1b9d
+#define MT6363_RG_LDO_VRFIO18_EN_BIT 0
+#define MT6363_RG_LDO_VRFIO18_LP_BIT 1
+#define MT6363_LDO_VRFIO18_HW_LP_MODE 0x1ba1
+#define MT6363_LDO_VRFIO18_OP_EN0 0x1ba2
+#define MT6363_RG_LDO_VTREF18_ADDR 0x1bd7
+#define MT6363_RG_LDO_VTREF18_EN_BIT 0
+#define MT6363_RG_LDO_VTREF18_LP_BIT 1
+#define MT6363_LDO_VTREF18_HW_LP_MODE 0x1bdb
+#define MT6363_LDO_VTREF18_OP_EN0 0x1bdc
+#define MT6363_RG_LDO_VAUX18_ADDR 0x1be5
+#define MT6363_RG_LDO_VAUX18_EN_BIT 0
+#define MT6363_RG_LDO_VAUX18_LP_BIT 1
+#define MT6363_LDO_VAUX18_HW_LP_MODE 0x1be9
+#define MT6363_LDO_VAUX18_OP_EN0 0x1bea
+#define MT6363_RG_LDO_VEMC_ADDR 0x1bf3
+#define MT6363_RG_LDO_VEMC_EN_BIT 0
+#define MT6363_RG_LDO_VEMC_LP_BIT 1
+#define MT6363_LDO_VEMC_HW_LP_MODE 0x1bf7
+#define MT6363_LDO_VEMC_OP_EN0 0x1bf8
+#define MT6363_RG_LDO_VUFS12_ADDR 0x1c01
+#define MT6363_RG_LDO_VUFS12_EN_BIT 0
+#define MT6363_RG_LDO_VUFS12_LP_BIT 1
+#define MT6363_LDO_VUFS12_HW_LP_MODE 0x1c05
+#define MT6363_LDO_VUFS12_OP_EN0 0x1c06
+#define MT6363_RG_LDO_VUFS18_ADDR 0x1c0f
+#define MT6363_RG_LDO_VUFS18_EN_BIT 0
+#define MT6363_RG_LDO_VUFS18_LP_BIT 1
+#define MT6363_LDO_VUFS18_HW_LP_MODE 0x1c13
+#define MT6363_LDO_VUFS18_OP_EN0 0x1c14
+#define MT6363_RG_LDO_VIO18_ADDR 0x1c1d
+#define MT6363_RG_LDO_VIO18_EN_BIT 0
+#define MT6363_RG_LDO_VIO18_LP_BIT 1
+#define MT6363_LDO_VIO18_HW_LP_MODE 0x1c21
+#define MT6363_LDO_VIO18_OP_EN0 0x1c22
+#define MT6363_RG_LDO_VIO075_ADDR 0x1c57
+#define MT6363_RG_LDO_VIO075_EN_BIT 0
+#define MT6363_RG_LDO_VIO075_LP_BIT 1
+#define MT6363_LDO_VIO075_HW_LP_MODE 0x1c5b
+#define MT6363_LDO_VIO075_OP_EN0 0x1c5c
+#define MT6363_RG_LDO_VA12_1_ADDR 0x1c65
+#define MT6363_RG_LDO_VA12_1_EN_BIT 0
+#define MT6363_RG_LDO_VA12_1_LP_BIT 1
+#define MT6363_LDO_VA12_1_HW_LP_MODE 0x1c69
+#define MT6363_LDO_VA12_1_OP_EN0 0x1c6a
+#define MT6363_RG_LDO_VA12_2_ADDR 0x1c73
+#define MT6363_RG_LDO_VA12_2_EN_BIT 0
+#define MT6363_RG_LDO_VA12_2_LP_BIT 1
+#define MT6363_LDO_VA12_2_HW_LP_MODE 0x1c77
+#define MT6363_LDO_VA12_2_OP_EN0 0x1c78
+#define MT6363_RG_LDO_VA15_ADDR 0x1c81
+#define MT6363_RG_LDO_VA15_EN_BIT 0
+#define MT6363_RG_LDO_VA15_LP_BIT 1
+#define MT6363_LDO_VA15_HW_LP_MODE 0x1c85
+#define MT6363_LDO_VA15_OP_EN0 0x1c86
+#define MT6363_RG_LDO_VM18_ADDR 0x1c8f
+#define MT6363_RG_LDO_VM18_EN_BIT 0
+#define MT6363_RG_LDO_VM18_LP_BIT 1
+#define MT6363_LDO_VM18_HW_LP_MODE 0x1c93
+#define MT6363_LDO_VM18_OP_EN0 0x1c94
+#define MT6363_RG_LDO_VCN13_ADDR 0x1cd7
+#define MT6363_RG_LDO_VCN13_EN_BIT 0
+#define MT6363_RG_LDO_VCN13_LP_BIT 1
+#define MT6363_LDO_VCN13_HW_LP_MODE 0x1cdb
+#define MT6363_LDO_VCN13_OP_EN0 0x1ce4
+#define MT6363_LDO_VSRAM_DIGRF_HW_LP_MODE 0x1cf1
+#define MT6363_LDO_VSRAM_DIGRF_OP_EN0 0x1cfa
+#define MT6363_LDO_VSRAM_MDFE_HW_LP_MODE 0x1d5b
+#define MT6363_LDO_VSRAM_MDFE_OP_EN0 0x1d64
+#define MT6363_LDO_VSRAM_MODEM_HW_LP_MODE 0x1d76
+#define MT6363_LDO_VSRAM_MODEM_OP_EN0 0x1d7f
+#define MT6363_RG_LDO_VSRAM_CPUB_ADDR 0x1dd7
+#define MT6363_RG_LDO_VSRAM_CPUB_EN_BIT 0
+#define MT6363_RG_LDO_VSRAM_CPUB_LP_BIT 1
+#define MT6363_LDO_VSRAM_CPUB_HW_LP_MODE 0x1ddb
+#define MT6363_LDO_VSRAM_CPUB_OP_EN0 0x1de4
+#define MT6363_RG_LDO_VSRAM_CPUM_ADDR 0x1ded
+#define MT6363_RG_LDO_VSRAM_CPUM_EN_BIT 0
+#define MT6363_RG_LDO_VSRAM_CPUM_LP_BIT 1
+#define MT6363_LDO_VSRAM_CPUM_HW_LP_MODE 0x1df1
+#define MT6363_LDO_VSRAM_CPUM_OP_EN0 0x1dfa
+#define MT6363_RG_LDO_VSRAM_CPUL_ADDR 0x1e57
+#define MT6363_RG_LDO_VSRAM_CPUL_EN_BIT 0
+#define MT6363_RG_LDO_VSRAM_CPUL_LP_BIT 1
+#define MT6363_LDO_VSRAM_CPUL_HW_LP_MODE 0x1e5b
+#define MT6363_LDO_VSRAM_CPUL_OP_EN0 0x1e64
+#define MT6363_RG_LDO_VSRAM_APU_ADDR 0x1e6d
+#define MT6363_RG_LDO_VSRAM_APU_EN_BIT 0
+#define MT6363_RG_LDO_VSRAM_APU_LP_BIT 1
+#define MT6363_LDO_VSRAM_APU_HW_LP_MODE 0x1e71
+#define MT6363_LDO_VSRAM_APU_OP_EN0 0x1e7a
+#define MT6363_RG_VTREF18_VOCAL_ADDR 0x1ed8
+#define MT6363_RG_VTREF18_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VTREF18_VOSEL_ADDR 0x1ed9
+#define MT6363_RG_VTREF18_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VAUX18_VOCAL_ADDR 0x1edc
+#define MT6363_RG_VAUX18_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VAUX18_VOSEL_ADDR 0x1edd
+#define MT6363_RG_VAUX18_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VCN15_VOCAL_ADDR 0x1ee3
+#define MT6363_RG_VCN15_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VCN15_VOSEL_ADDR 0x1ee4
+#define MT6363_RG_VCN15_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VUFS18_VOCAL_ADDR 0x1ee7
+#define MT6363_RG_VUFS18_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VUFS18_VOSEL_ADDR 0x1ee8
+#define MT6363_RG_VUFS18_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VIO18_VOCAL_ADDR 0x1eeb
+#define MT6363_RG_VIO18_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VIO18_VOSEL_ADDR 0x1eec
+#define MT6363_RG_VIO18_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VM18_VOCAL_ADDR 0x1eef
+#define MT6363_RG_VM18_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VM18_VOSEL_ADDR 0x1ef0
+#define MT6363_RG_VM18_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VA15_VOCAL_ADDR 0x1ef3
+#define MT6363_RG_VA15_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VA15_VOSEL_ADDR 0x1ef4
+#define MT6363_RG_VA15_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRF18_VOCAL_ADDR 0x1ef7
+#define MT6363_RG_VRF18_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRF18_VOSEL_ADDR 0x1ef8
+#define MT6363_RG_VRF18_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRFIO18_VOCAL_ADDR 0x1efb
+#define MT6363_RG_VRFIO18_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRFIO18_VOSEL_ADDR 0x1efc
+#define MT6363_RG_VRFIO18_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VIO075_VOCFG_ADDR 0x1f01
+#define MT6363_RG_VIO075_VOCAL_ADDR MT6363_RG_VIO075_VOCFG_ADDR
+#define MT6363_RG_VIO075_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VIO075_VOSEL_ADDR MT6363_RG_VIO075_VOCFG_ADDR
+#define MT6363_RG_VIO075_VOSEL_MASK GENMASK(6, 4)
+#define MT6363_RG_VCN13_VOCAL_ADDR 0x1f58
+#define MT6363_RG_VCN13_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VUFS12_VOCAL_ADDR 0x1f61
+#define MT6363_RG_VUFS12_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VUFS12_VOSEL_ADDR 0x1f62
+#define MT6363_RG_VUFS12_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VA12_1_VOCAL_ADDR 0x1f65
+#define MT6363_RG_VA12_1_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VA12_1_VOSEL_ADDR 0x1f66
+#define MT6363_RG_VA12_1_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VA12_2_VOCAL_ADDR 0x1f69
+#define MT6363_RG_VA12_2_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VA12_2_VOSEL_ADDR 0x1f6a
+#define MT6363_RG_VA12_2_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRF12_VOCAL_ADDR 0x1f6d
+#define MT6363_RG_VRF12_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRF12_VOSEL_ADDR 0x1f6e
+#define MT6363_RG_VRF12_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRF13_VOCAL_ADDR 0x1f71
+#define MT6363_RG_VRF13_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRF13_VOSEL_ADDR 0x1f72
+#define MT6363_RG_VRF13_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRF09_VOCAL_ADDR 0x1f78
+#define MT6363_RG_VRF09_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRF09_VOSEL_ADDR 0x1f79
+#define MT6363_RG_VRF09_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_ISINK_EN_CTRL0 0x21db
+#define MT6363_ISINK_CTRL0_MASK GENMASK(7, 0)
+#define MT6363_ISINK_EN_CTRL1 0x21dc
+#define MT6363_ISINK_CTRL1_MASK GENMASK(7, 4)
+
+#endif /* __LINUX_REGULATOR_MT6363_H */
diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h
index 243633c8dceb..0df8b3c48082 100644
--- a/include/linux/regulator/pca9450.h
+++ b/include/linux/regulator/pca9450.h
@@ -10,6 +10,7 @@ enum pca9450_chip_type {
PCA9450_TYPE_PCA9450A = 0,
PCA9450_TYPE_PCA9450BC,
PCA9450_TYPE_PCA9451A,
+ PCA9450_TYPE_PCA9452,
PCA9450_TYPE_AMOUNT,
};
@@ -34,6 +35,8 @@ enum {
PCA9450_DVS_LEVEL_MAX,
};
+#define PCA9450_RESTART_HANDLER_PRIORITY 130
+
#define PCA9450_BUCK1_VOLTAGE_NUM 0x80
#define PCA9450_BUCK2_VOLTAGE_NUM 0x80
#define PCA9450_BUCK3_VOLTAGE_NUM 0x80
@@ -220,12 +223,44 @@ enum {
#define IRQ_THERM_105 0x02
#define IRQ_THERM_125 0x01
+/* PCA9450_REG_PWRCTRL bits */
+#define T_ON_DEB_MASK 0xC0
+#define T_ON_DEB_120US (0 << 6)
+#define T_ON_DEB_20MS (1 << 6)
+#define T_ON_DEB_100MS (2 << 6)
+#define T_ON_DEB_750MS (3 << 6)
+#define T_OFF_DEB_MASK 0x20
+#define T_OFF_DEB_120US (0 << 5)
+#define T_OFF_DEB_2MS (1 << 5)
+#define T_ON_STEP_MASK 0x18
+#define T_ON_STEP_1MS (0 << 3)
+#define T_ON_STEP_2MS (1 << 3)
+#define T_ON_STEP_4MS (2 << 3)
+#define T_ON_STEP_8MS (3 << 3)
+#define T_OFF_STEP_MASK 0x06
+#define T_OFF_STEP_2MS (0 << 1)
+#define T_OFF_STEP_4MS (1 << 1)
+#define T_OFF_STEP_8MS (2 << 1)
+#define T_OFF_STEP_16MS (3 << 1)
+#define T_RESTART_MASK 0x01
+#define T_RESTART_250MS 0
+#define T_RESTART_500MS 1
+
/* PCA9450_REG_RESET_CTRL bits */
#define WDOG_B_CFG_MASK 0xC0
#define WDOG_B_CFG_NONE 0x00
#define WDOG_B_CFG_WARM 0x40
#define WDOG_B_CFG_COLD_LDO12 0x80
#define WDOG_B_CFG_COLD 0xC0
+#define T_PMIC_RST_DEB_MASK 0x07
+#define T_PMIC_RST_DEB_10MS 0x00
+#define T_PMIC_RST_DEB_50MS 0x01
+#define T_PMIC_RST_DEB_100MS 0x02
+#define T_PMIC_RST_DEB_500MS 0x03
+#define T_PMIC_RST_DEB_1S 0x04
+#define T_PMIC_RST_DEB_2S 0x05
+#define T_PMIC_RST_DEB_4S 0x06
+#define T_PMIC_RST_DEB_8S 0x07
/* PCA9450_REG_CONFIG2 bits */
#define I2C_LT_MASK 0x03
@@ -234,4 +269,7 @@ enum {
#define I2C_LT_ON_RUN 0x02
#define I2C_LT_FORCE_ENABLE 0x03
+/* PCA9450_REG_SW_RST command */
+#define SW_RST_COMMAND 0x14
+
#endif /* __LINUX_REG_PCA9450_H__ */
diff --git a/include/linux/regulator/s2dos05.h b/include/linux/regulator/s2dos05.h
new file mode 100644
index 000000000000..2e89fcbce769
--- /dev/null
+++ b/include/linux/regulator/s2dos05.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// s2dos05.h
+//
+// Copyright (c) 2016 Samsung Electronics Co., Ltd
+// http://www.samsung.com
+// Copyright (C) 2024 Dzmitry Sankouski <dsankouski@gmail.com>
+
+#ifndef __LINUX_S2DOS05_H
+#define __LINUX_S2DOS05_H
+
+// S2DOS05 registers
+// Slave Addr : 0xC0
+enum S2DOS05_reg {
+ S2DOS05_REG_DEV_ID,
+ S2DOS05_REG_TOPSYS_STAT,
+ S2DOS05_REG_STAT,
+ S2DOS05_REG_EN,
+ S2DOS05_REG_LDO1_CFG,
+ S2DOS05_REG_LDO2_CFG,
+ S2DOS05_REG_LDO3_CFG,
+ S2DOS05_REG_LDO4_CFG,
+ S2DOS05_REG_BUCK_CFG,
+ S2DOS05_REG_BUCK_VOUT,
+ S2DOS05_REG_IRQ_MASK = 0x0D,
+ S2DOS05_REG_SSD_TSD = 0x0E,
+ S2DOS05_REG_OCL = 0x10,
+ S2DOS05_REG_IRQ = 0x11
+};
+
+// S2DOS05 regulator ids
+enum S2DOS05_regulators {
+ S2DOS05_LDO1,
+ S2DOS05_LDO2,
+ S2DOS05_LDO3,
+ S2DOS05_LDO4,
+ S2DOS05_BUCK1,
+ S2DOS05_REG_MAX,
+};
+
+#define S2DOS05_IRQ_PWRMT_MASK BIT(5)
+#define S2DOS05_IRQ_TSD_MASK BIT(4)
+#define S2DOS05_IRQ_SSD_MASK BIT(3)
+#define S2DOS05_IRQ_SCP_MASK BIT(2)
+#define S2DOS05_IRQ_UVLO_MASK BIT(1)
+#define S2DOS05_IRQ_OCD_MASK BIT(0)
+
+#define S2DOS05_BUCK_MIN1 506250
+#define S2DOS05_LDO_MIN1 1500000
+#define S2DOS05_LDO_MIN2 2700000
+#define S2DOS05_BUCK_STEP1 6250
+#define S2DOS05_LDO_STEP1 25000
+#define S2DOS05_LDO_VSEL_MASK 0x7F
+#define S2DOS05_LDO_FD_MASK 0x80
+#define S2DOS05_BUCK_VSEL_MASK 0xFF
+#define S2DOS05_BUCK_FD_MASK 0x08
+
+#define S2DOS05_ENABLE_MASK_L1 BIT(0)
+#define S2DOS05_ENABLE_MASK_L2 BIT(1)
+#define S2DOS05_ENABLE_MASK_L3 BIT(2)
+#define S2DOS05_ENABLE_MASK_L4 BIT(3)
+#define S2DOS05_ENABLE_MASK_B1 BIT(4)
+
+#define S2DOS05_RAMP_DELAY 12000
+
+#define S2DOS05_ENABLE_TIME_LDO 50
+#define S2DOS05_ENABLE_TIME_BUCK 350
+
+#define S2DOS05_LDO_N_VOLTAGES (S2DOS05_LDO_VSEL_MASK + 1)
+#define S2DOS05_BUCK_N_VOLTAGES (S2DOS05_BUCK_VSEL_MASK + 1)
+
+#define S2DOS05_REGULATOR_MAX (S2DOS05_REG_MAX)
+
+#endif // __LINUX_S2DOS05_H
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 72b876dd5cb8..6772a7075840 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -29,6 +29,22 @@
#define RELAYFS_CHANNEL_VERSION 7
/*
+ * Relay buffer statistics
+ */
+enum {
+ RELAY_STATS_BUF_FULL = (1 << 0),
+ RELAY_STATS_WRT_BIG = (1 << 1),
+
+ RELAY_STATS_LAST = RELAY_STATS_WRT_BIG,
+};
+
+struct rchan_buf_stats
+{
+ unsigned int full_count; /* counter for buffer full */
+ unsigned int big_count; /* counter for too big to write */
+};
+
+/*
* Per-cpu relay channel buffer
*/
struct rchan_buf
@@ -43,11 +59,11 @@ struct rchan_buf
struct irq_work wakeup_work; /* reader wakeup */
struct dentry *dentry; /* channel file dentry */
struct kref kref; /* channel buffer refcount */
+ struct rchan_buf_stats stats; /* buffer stats */
struct page **page_array; /* array of current buffer pages */
unsigned int page_count; /* number of current buffer pages */
unsigned int finalized; /* buffer has been finalized */
size_t *padding; /* padding counts per sub-buffer */
- size_t prev_padding; /* temporary variable */
size_t bytes_consumed; /* bytes consumed in cur read subbuf */
size_t early_bytes; /* bytes consumed before VFS inited */
unsigned int cpu; /* this buf's cpu */
@@ -65,7 +81,6 @@ struct rchan
const struct rchan_callbacks *cb; /* client callbacks */
struct kref kref; /* channel refcount */
void *private_data; /* for user-defined data */
- size_t last_toobig; /* tried to log event > subbuf size */
struct rchan_buf * __percpu *buf; /* per-cpu channel buffers */
int is_global; /* One global buffer ? */
struct list_head list; /* for channel list */
@@ -84,7 +99,6 @@ struct rchan_callbacks
* @buf: the channel buffer containing the new sub-buffer
* @subbuf: the start of the new sub-buffer
* @prev_subbuf: the start of the previous sub-buffer
- * @prev_padding: unused space at the end of previous sub-buffer
*
* The client should return 1 to continue logging, 0 to stop
* logging.
@@ -100,8 +114,7 @@ struct rchan_callbacks
*/
int (*subbuf_start) (struct rchan_buf *buf,
void *subbuf,
- void *prev_subbuf,
- size_t prev_padding);
+ void *prev_subbuf);
/*
* create_buf_file - create file to represent a relay channel buffer
@@ -159,11 +172,9 @@ struct rchan *relay_open(const char *base_filename,
size_t n_subbufs,
const struct rchan_callbacks *cb,
void *private_data);
-extern int relay_late_setup_files(struct rchan *chan,
- const char *base_filename,
- struct dentry *parent);
extern void relay_close(struct rchan *chan);
extern void relay_flush(struct rchan *chan);
+size_t relay_stats(struct rchan *chan, int flags);
extern void relay_subbufs_consumed(struct rchan *chan,
unsigned int cpu,
size_t consumed);
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index a365f67131ec..54701668b3df 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -2,9 +2,15 @@
#ifndef _RESCTRL_H
#define _RESCTRL_H
+#include <linux/cacheinfo.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/pid.h>
+#include <linux/resctrl_types.h>
+
+#ifdef CONFIG_ARCH_HAS_CPU_RESCTRL
+#include <asm/resctrl.h>
+#endif
/* CLOSID, RMID value used by the default control group */
#define RESCTRL_RESERVED_CLOSID 0
@@ -24,6 +30,34 @@ int proc_resctrl_show(struct seq_file *m,
/* max value for struct rdt_domain's mbps_val */
#define MBA_MAX_MBPS U32_MAX
+/* Walk all possible resources, with variants for only controls or monitors. */
+#define for_each_rdt_resource(_r) \
+ for ((_r) = resctrl_arch_get_resource(0); \
+ (_r) && (_r)->rid < RDT_NUM_RESOURCES; \
+ (_r) = resctrl_arch_get_resource((_r)->rid + 1))
+
+#define for_each_capable_rdt_resource(r) \
+ for_each_rdt_resource((r)) \
+ if ((r)->alloc_capable || (r)->mon_capable)
+
+#define for_each_alloc_capable_rdt_resource(r) \
+ for_each_rdt_resource((r)) \
+ if ((r)->alloc_capable)
+
+#define for_each_mon_capable_rdt_resource(r) \
+ for_each_rdt_resource((r)) \
+ if ((r)->mon_capable)
+
+enum resctrl_res_level {
+ RDT_RESOURCE_L3,
+ RDT_RESOURCE_L2,
+ RDT_RESOURCE_MBA,
+ RDT_RESOURCE_SMBA,
+
+ /* Must be the last */
+ RDT_NUM_RESOURCES,
+};
+
/**
* enum resctrl_conf_type - The type of configuration.
* @CDP_NONE: No prioritisation, both code and data are controlled or monitored.
@@ -39,13 +73,42 @@ enum resctrl_conf_type {
#define CDP_NUM_TYPES (CDP_DATA + 1)
/*
- * Event IDs, the values match those used to program IA32_QM_EVTSEL before
- * reading IA32_QM_CTR on RDT systems.
+ * struct pseudo_lock_region - pseudo-lock region information
+ * @s: Resctrl schema for the resource to which this
+ * pseudo-locked region belongs
+ * @closid: The closid that this pseudo-locked region uses
+ * @d: RDT domain to which this pseudo-locked region
+ * belongs
+ * @cbm: bitmask of the pseudo-locked region
+ * @lock_thread_wq: waitqueue used to wait on the pseudo-locking thread
+ * completion
+ * @thread_done: variable used by waitqueue to test if pseudo-locking
+ * thread completed
+ * @cpu: core associated with the cache on which the setup code
+ * will be run
+ * @line_size: size of the cache lines
+ * @size: size of pseudo-locked region in bytes
+ * @kmem: the kernel memory associated with pseudo-locked region
+ * @minor: minor number of character device associated with this
+ * region
+ * @debugfs_dir: pointer to this region's directory in the debugfs
+ * filesystem
+ * @pm_reqs: Power management QoS requests related to this region
*/
-enum resctrl_event_id {
- QOS_L3_OCCUP_EVENT_ID = 0x01,
- QOS_L3_MBM_TOTAL_EVENT_ID = 0x02,
- QOS_L3_MBM_LOCAL_EVENT_ID = 0x03,
+struct pseudo_lock_region {
+ struct resctrl_schema *s;
+ u32 closid;
+ struct rdt_ctrl_domain *d;
+ u32 cbm;
+ wait_queue_head_t lock_thread_wq;
+ int thread_done;
+ int cpu;
+ unsigned int line_size;
+ unsigned int size;
+ void *kmem;
+ unsigned int minor;
+ struct dentry *debugfs_dir;
+ struct list_head pm_reqs;
};
/**
@@ -58,38 +121,78 @@ struct resctrl_staged_config {
bool have_new_ctrl;
};
+enum resctrl_domain_type {
+ RESCTRL_CTRL_DOMAIN,
+ RESCTRL_MON_DOMAIN,
+};
+
/**
- * struct rdt_domain - group of CPUs sharing a resctrl resource
+ * struct rdt_domain_hdr - common header for different domain types
* @list: all instances of this resource
* @id: unique id for this instance
+ * @type: type of this instance
* @cpu_mask: which CPUs share this resource
- * @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold
- * @mbm_total: saved state for MBM total bandwidth
- * @mbm_local: saved state for MBM local bandwidth
- * @mbm_over: worker to periodically read MBM h/w counters
- * @cqm_limbo: worker to periodically read CQM h/w counters
- * @mbm_work_cpu: worker CPU for MBM h/w counters
- * @cqm_work_cpu: worker CPU for CQM h/w counters
+ */
+struct rdt_domain_hdr {
+ struct list_head list;
+ int id;
+ enum resctrl_domain_type type;
+ struct cpumask cpu_mask;
+};
+
+/**
+ * struct rdt_ctrl_domain - group of CPUs sharing a resctrl control resource
+ * @hdr: common header for different domain types
* @plr: pseudo-locked region (if any) associated with domain
* @staged_config: parsed configuration to be applied
* @mbps_val: When mba_sc is enabled, this holds the array of user
* specified control values for mba_sc in MBps, indexed
* by closid
*/
-struct rdt_domain {
- struct list_head list;
- int id;
- struct cpumask cpu_mask;
+struct rdt_ctrl_domain {
+ struct rdt_domain_hdr hdr;
+ struct pseudo_lock_region *plr;
+ struct resctrl_staged_config staged_config[CDP_NUM_TYPES];
+ u32 *mbps_val;
+};
+
+/**
+ * struct mbm_cntr_cfg - Assignable counter configuration.
+ * @evtid: MBM event to which the counter is assigned. Only valid
+ * if @rdtgroup is not NULL.
+ * @rdtgrp: resctrl group assigned to the counter. NULL if the
+ * counter is free.
+ */
+struct mbm_cntr_cfg {
+ enum resctrl_event_id evtid;
+ struct rdtgroup *rdtgrp;
+};
+
+/**
+ * struct rdt_mon_domain - group of CPUs sharing a resctrl monitor resource
+ * @hdr: common header for different domain types
+ * @ci_id: cache info id for this domain
+ * @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold
+ * @mbm_states: Per-event pointer to the MBM event's saved state.
+ * An MBM event's state is an array of struct mbm_state
+ * indexed by RMID on x86 or combined CLOSID, RMID on Arm.
+ * @mbm_over: worker to periodically read MBM h/w counters
+ * @cqm_limbo: worker to periodically read CQM h/w counters
+ * @mbm_work_cpu: worker CPU for MBM h/w counters
+ * @cqm_work_cpu: worker CPU for CQM h/w counters
+ * @cntr_cfg: array of assignable counters' configuration (indexed
+ * by counter ID)
+ */
+struct rdt_mon_domain {
+ struct rdt_domain_hdr hdr;
+ unsigned int ci_id;
unsigned long *rmid_busy_llc;
- struct mbm_state *mbm_total;
- struct mbm_state *mbm_local;
+ struct mbm_state *mbm_states[QOS_NUM_L3_MBM_EVENTS];
struct delayed_work mbm_over;
struct delayed_work cqm_limbo;
int mbm_work_cpu;
int cqm_work_cpu;
- struct pseudo_lock_region *plr;
- struct resctrl_staged_config staged_config[CDP_NUM_TYPES];
- u32 *mbps_val;
+ struct mbm_cntr_cfg *cntr_cfg;
};
/**
@@ -103,6 +206,8 @@ struct rdt_domain {
* @arch_has_sparse_bitmasks: True if a bitmask like f00f is valid.
* @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache
* level has CPU scope.
+ * @io_alloc_capable: True if portion of the cache can be configured
+ * for I/O traffic.
*/
struct resctrl_cache {
unsigned int cbm_len;
@@ -110,6 +215,7 @@ struct resctrl_cache {
unsigned int shareable_bits;
bool arch_has_sparse_bitmasks;
bool arch_has_per_cpu_cfg;
+ bool io_alloc_capable;
};
/**
@@ -129,6 +235,7 @@ enum membw_throttle_mode {
/**
* struct resctrl_membw - Memory bandwidth allocation related data
* @min_bw: Minimum memory bandwidth percentage user can request
+ * @max_bw: Maximum memory bandwidth value, used as the reset value
* @bw_gran: Granularity at which the memory bandwidth is allocated
* @delay_linear: True if memory B/W delay is in linear scale
* @arch_needs_linear: True if we can't configure non-linear resources
@@ -139,6 +246,7 @@ enum membw_throttle_mode {
*/
struct resctrl_membw {
u32 min_bw;
+ u32 max_bw;
u32 bw_gran;
u32 delay_linear;
bool arch_needs_linear;
@@ -147,54 +255,87 @@ struct resctrl_membw {
u32 *mb_map;
};
-struct rdt_parse_data;
struct resctrl_schema;
+enum resctrl_scope {
+ RESCTRL_L2_CACHE = 2,
+ RESCTRL_L3_CACHE = 3,
+ RESCTRL_L3_NODE,
+};
+
+/**
+ * enum resctrl_schema_fmt - The format user-space provides for a schema.
+ * @RESCTRL_SCHEMA_BITMAP: The schema is a bitmap in hex.
+ * @RESCTRL_SCHEMA_RANGE: The schema is a decimal number.
+ */
+enum resctrl_schema_fmt {
+ RESCTRL_SCHEMA_BITMAP,
+ RESCTRL_SCHEMA_RANGE,
+};
+
+/**
+ * struct resctrl_mon - Monitoring related data of a resctrl resource.
+ * @num_rmid: Number of RMIDs available.
+ * @mbm_cfg_mask: Memory transactions that can be tracked when bandwidth
+ * monitoring events can be configured.
+ * @num_mbm_cntrs: Number of assignable counters.
+ * @mbm_cntr_assignable:Is system capable of supporting counter assignment?
+ * @mbm_assign_on_mkdir:True if counters should automatically be assigned to MBM
+ * events of monitor groups created via mkdir.
+ */
+struct resctrl_mon {
+ int num_rmid;
+ unsigned int mbm_cfg_mask;
+ int num_mbm_cntrs;
+ bool mbm_cntr_assignable;
+ bool mbm_assign_on_mkdir;
+};
+
/**
* struct rdt_resource - attributes of a resctrl resource
* @rid: The index of the resource
* @alloc_capable: Is allocation available on this machine
* @mon_capable: Is monitor feature available on this machine
- * @num_rmid: Number of RMIDs available
- * @cache_level: Which cache level defines scope of this resource
+ * @ctrl_scope: Scope of this resource for control functions
+ * @mon_scope: Scope of this resource for monitor functions
* @cache: Cache allocation related data
* @membw: If the component has bandwidth controls, their properties.
- * @domains: RCU list of all domains for this resource
+ * @mon: Monitoring related data.
+ * @ctrl_domains: RCU list of all control domains for this resource
+ * @mon_domains: RCU list of all monitor domains for this resource
* @name: Name to use in "schemata" file.
- * @data_width: Character width of data when displaying
- * @default_ctrl: Specifies default cache cbm or memory B/W percent.
- * @format_str: Per resource format string to show domain value
- * @parse_ctrlval: Per resource function pointer to parse control values
- * @evt_list: List of monitoring events
- * @fflags: flags to choose base and info files
+ * @schema_fmt: Which format string and parser is used for this schema.
* @cdp_capable: Is the CDP feature available on this resource
*/
struct rdt_resource {
int rid;
bool alloc_capable;
bool mon_capable;
- int num_rmid;
- int cache_level;
+ enum resctrl_scope ctrl_scope;
+ enum resctrl_scope mon_scope;
struct resctrl_cache cache;
struct resctrl_membw membw;
- struct list_head domains;
+ struct resctrl_mon mon;
+ struct list_head ctrl_domains;
+ struct list_head mon_domains;
char *name;
- int data_width;
- u32 default_ctrl;
- const char *format_str;
- int (*parse_ctrlval)(struct rdt_parse_data *data,
- struct resctrl_schema *s,
- struct rdt_domain *d);
- struct list_head evt_list;
- unsigned long fflags;
+ enum resctrl_schema_fmt schema_fmt;
bool cdp_capable;
};
+/*
+ * Get the resource that exists at this level. If the level is not supported
+ * a dummy/not-capable resource can be returned. Levels >= RDT_NUM_RESOURCES
+ * will return NULL.
+ */
+struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l);
+
/**
* struct resctrl_schema - configuration abilities of a resource presented to
* user-space
* @list: Member of resctrl_schema_all.
* @name: The name to use in the "schemata" file.
+ * @fmt_str: Format string to show domain value.
* @conf_type: Whether this schema is specific to code/data.
* @res: The resource structure exported by the architecture to describe
* the hardware that is configured by this schema.
@@ -205,26 +346,161 @@ struct rdt_resource {
struct resctrl_schema {
struct list_head list;
char name[8];
+ const char *fmt_str;
enum resctrl_conf_type conf_type;
struct rdt_resource *res;
u32 num_closid;
};
+struct resctrl_cpu_defaults {
+ u32 closid;
+ u32 rmid;
+};
+
+struct resctrl_mon_config_info {
+ struct rdt_resource *r;
+ struct rdt_mon_domain *d;
+ u32 evtid;
+ u32 mon_config;
+};
+
+/**
+ * resctrl_arch_sync_cpu_closid_rmid() - Refresh this CPU's CLOSID and RMID.
+ * Call via IPI.
+ * @info: If non-NULL, a pointer to a struct resctrl_cpu_defaults
+ * specifying the new CLOSID and RMID for tasks in the default
+ * resctrl ctrl and mon group when running on this CPU. If NULL,
+ * this CPU is not re-assigned to a different default group.
+ *
+ * Propagates reassignment of CPUs and/or tasks to different resctrl groups
+ * when requested by the resctrl core code.
+ *
+ * This function records the per-cpu defaults specified by @info (if any),
+ * and then reconfigures the CPU's hardware CLOSID and RMID for subsequent
+ * execution based on @current, in the same way as during a task switch.
+ */
+void resctrl_arch_sync_cpu_closid_rmid(void *info);
+
+/**
+ * resctrl_get_default_ctrl() - Return the default control value for this
+ * resource.
+ * @r: The resource whose default control type is queried.
+ */
+static inline u32 resctrl_get_default_ctrl(struct rdt_resource *r)
+{
+ switch (r->schema_fmt) {
+ case RESCTRL_SCHEMA_BITMAP:
+ return BIT_MASK(r->cache.cbm_len) - 1;
+ case RESCTRL_SCHEMA_RANGE:
+ return r->membw.max_bw;
+ }
+
+ return WARN_ON_ONCE(1);
+}
+
/* The number of closid supported by this resource regardless of CDP */
u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
+u32 resctrl_arch_system_num_rmid_idx(void);
int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
+void resctrl_enable_mon_event(enum resctrl_event_id eventid);
+
+bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid);
+
+bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt);
+
+static inline bool resctrl_is_mbm_event(enum resctrl_event_id eventid)
+{
+ return (eventid >= QOS_L3_MBM_TOTAL_EVENT_ID &&
+ eventid <= QOS_L3_MBM_LOCAL_EVENT_ID);
+}
+
+u32 resctrl_get_mon_evt_cfg(enum resctrl_event_id eventid);
+
+/* Iterate over all memory bandwidth events */
+#define for_each_mbm_event_id(eventid) \
+ for (eventid = QOS_L3_MBM_TOTAL_EVENT_ID; \
+ eventid <= QOS_L3_MBM_LOCAL_EVENT_ID; eventid++)
+
+/* Iterate over memory bandwidth arrays in domain structures */
+#define for_each_mbm_idx(idx) \
+ for (idx = 0; idx < QOS_NUM_L3_MBM_EVENTS; idx++)
+
+/**
+ * resctrl_arch_mon_event_config_write() - Write the config for an event.
+ * @config_info: struct resctrl_mon_config_info describing the resource, domain
+ * and event.
+ *
+ * Reads resource, domain and eventid from @config_info and writes the
+ * event config_info->mon_config into hardware.
+ *
+ * Called via IPI to reach a CPU that is a member of the specified domain.
+ */
+void resctrl_arch_mon_event_config_write(void *config_info);
+
+/**
+ * resctrl_arch_mon_event_config_read() - Read the config for an event.
+ * @config_info: struct resctrl_mon_config_info describing the resource, domain
+ * and event.
+ *
+ * Reads resource, domain and eventid from @config_info and reads the
+ * hardware config value into config_info->mon_config.
+ *
+ * Called via IPI to reach a CPU that is a member of the specified domain.
+ */
+void resctrl_arch_mon_event_config_read(void *config_info);
+
+/* For use by arch code to remap resctrl's smaller CDP CLOSID range */
+static inline u32 resctrl_get_config_index(u32 closid,
+ enum resctrl_conf_type type)
+{
+ switch (type) {
+ default:
+ case CDP_NONE:
+ return closid;
+ case CDP_CODE:
+ return closid * 2 + 1;
+ case CDP_DATA:
+ return closid * 2;
+ }
+}
+
+bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l);
+int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
+
+/**
+ * resctrl_arch_mbm_cntr_assign_enabled() - Check if MBM counter assignment
+ * mode is enabled.
+ * @r: Pointer to the resource structure.
+ *
+ * Return:
+ * true if the assignment mode is enabled, false otherwise.
+ */
+bool resctrl_arch_mbm_cntr_assign_enabled(struct rdt_resource *r);
+
+/**
+ * resctrl_arch_mbm_cntr_assign_set() - Configure the MBM counter assignment mode.
+ * @r: Pointer to the resource structure.
+ * @enable: Set to true to enable, false to disable the assignment mode.
+ *
+ * Return:
+ * 0 on success, < 0 on error.
+ */
+int resctrl_arch_mbm_cntr_assign_set(struct rdt_resource *r, bool enable);
+
/*
* Update the ctrl_val and apply this config right now.
* Must be called on one of the domain's CPUs.
*/
-int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d,
+int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d,
u32 closid, enum resctrl_conf_type t, u32 cfg_val);
-u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
+u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
u32 closid, enum resctrl_conf_type type);
-int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d);
-void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d);
+int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
+int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
+void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
+void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
void resctrl_online_cpu(unsigned int cpu);
void resctrl_offline_cpu(unsigned int cpu);
@@ -253,7 +529,7 @@ void resctrl_offline_cpu(unsigned int cpu);
* Return:
* 0 on success, or -EIO, -EINVAL etc on error.
*/
-int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
+int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
u32 closid, u32 rmid, enum resctrl_event_id eventid,
u64 *val, void *arch_mon_ctx);
@@ -275,6 +551,20 @@ static inline void resctrl_arch_rmid_read_context_check(void)
}
/**
+ * resctrl_find_domain() - Search for a domain id in a resource domain list.
+ * @h: The domain list to search.
+ * @id: The domain id to search for.
+ * @pos: A pointer to position in the list id should be inserted.
+ *
+ * Search the domain list to find the domain id. If the domain id is
+ * found, return the domain. NULL otherwise. If the domain id is not
+ * found (and NULL returned) then the first domain with id bigger than
+ * the input id can be returned to the caller via @pos.
+ */
+struct rdt_domain_hdr *resctrl_find_domain(struct list_head *h, int id,
+ struct list_head **pos);
+
+/**
* resctrl_arch_reset_rmid() - Reset any private state associated with rmid
* and eventid.
* @r: The domain's resource.
@@ -286,7 +576,7 @@ static inline void resctrl_arch_rmid_read_context_check(void)
*
* This can be called from any CPU.
*/
-void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
+void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
u32 closid, u32 rmid,
enum resctrl_event_id eventid);
@@ -299,9 +589,112 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
*
* This can be called from any CPU.
*/
-void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d);
+void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *d);
+
+/**
+ * resctrl_arch_reset_all_ctrls() - Reset the control for each CLOSID to its
+ * default.
+ * @r: The resctrl resource to reset.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_reset_all_ctrls(struct rdt_resource *r);
+
+/**
+ * resctrl_arch_config_cntr() - Configure the counter with its new RMID
+ * and event details.
+ * @r: Resource structure.
+ * @d: The domain in which counter with ID @cntr_id should be configured.
+ * @evtid: Monitoring event type (e.g., QOS_L3_MBM_TOTAL_EVENT_ID
+ * or QOS_L3_MBM_LOCAL_EVENT_ID).
+ * @rmid: RMID.
+ * @closid: CLOSID.
+ * @cntr_id: Counter ID to configure.
+ * @assign: True to assign the counter or update an existing assignment,
+ * false to unassign the counter.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+ enum resctrl_event_id evtid, u32 rmid, u32 closid,
+ u32 cntr_id, bool assign);
+
+/**
+ * resctrl_arch_cntr_read() - Read the event data corresponding to the counter ID
+ * assigned to the RMID, event pair for this resource
+ * and domain.
+ * @r: Resource that the counter should be read from.
+ * @d: Domain that the counter should be read from.
+ * @closid: CLOSID that matches the RMID.
+ * @rmid: The RMID to which @cntr_id is assigned.
+ * @cntr_id: The counter to read.
+ * @eventid: The MBM event to which @cntr_id is assigned.
+ * @val: Result of the counter read in bytes.
+ *
+ * Called on a CPU that belongs to domain @d when "mbm_event" mode is enabled.
+ * Called from a non-migrateable process context via smp_call_on_cpu() unless all
+ * CPUs are nohz_full, in which case it is called via IPI (smp_call_function_any()).
+ *
+ * Return:
+ * 0 on success, or -EIO, -EINVAL etc on error.
+ */
+int resctrl_arch_cntr_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+ u32 closid, u32 rmid, int cntr_id,
+ enum resctrl_event_id eventid, u64 *val);
+
+/**
+ * resctrl_arch_reset_cntr() - Reset any private state associated with counter ID.
+ * @r: The domain's resource.
+ * @d: The counter ID's domain.
+ * @closid: CLOSID that matches the RMID.
+ * @rmid: The RMID to which @cntr_id is assigned.
+ * @cntr_id: The counter to reset.
+ * @eventid: The MBM event to which @cntr_id is assigned.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_reset_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+ u32 closid, u32 rmid, int cntr_id,
+ enum resctrl_event_id eventid);
+
+/**
+ * resctrl_arch_io_alloc_enable() - Enable/disable io_alloc feature.
+ * @r: The resctrl resource.
+ * @enable: Enable (true) or disable (false) io_alloc on resource @r.
+ *
+ * This can be called from any CPU.
+ *
+ * Return:
+ * 0 on success, <0 on error.
+ */
+int resctrl_arch_io_alloc_enable(struct rdt_resource *r, bool enable);
+
+/**
+ * resctrl_arch_get_io_alloc_enabled() - Get io_alloc feature state.
+ * @r: The resctrl resource.
+ *
+ * Return:
+ * true if io_alloc is enabled or false if disabled.
+ */
+bool resctrl_arch_get_io_alloc_enabled(struct rdt_resource *r);
extern unsigned int resctrl_rmid_realloc_threshold;
extern unsigned int resctrl_rmid_realloc_limit;
+int resctrl_init(void);
+void resctrl_exit(void);
+
+#ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
+u64 resctrl_arch_get_prefetch_disable_bits(void);
+int resctrl_arch_pseudo_lock_fn(void *_plr);
+int resctrl_arch_measure_cycles_lat_fn(void *_plr);
+int resctrl_arch_measure_l2_residency(void *_plr);
+int resctrl_arch_measure_l3_residency(void *_plr);
+#else
+static inline u64 resctrl_arch_get_prefetch_disable_bits(void) { return 0; }
+static inline int resctrl_arch_pseudo_lock_fn(void *_plr) { return 0; }
+static inline int resctrl_arch_measure_cycles_lat_fn(void *_plr) { return 0; }
+static inline int resctrl_arch_measure_l2_residency(void *_plr) { return 0; }
+static inline int resctrl_arch_measure_l3_residency(void *_plr) { return 0; }
+#endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */
#endif /* _RESCTRL_H */
diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h
new file mode 100644
index 000000000000..acfe07860b34
--- /dev/null
+++ b/include/linux/resctrl_types.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Arm Ltd.
+ * Based on arch/x86/kernel/cpu/resctrl/internal.h
+ */
+
+#ifndef __LINUX_RESCTRL_TYPES_H
+#define __LINUX_RESCTRL_TYPES_H
+
+#define MAX_MBA_BW 100u
+#define MBM_OVERFLOW_INTERVAL 1000
+
+/* Reads to Local DRAM Memory */
+#define READS_TO_LOCAL_MEM BIT(0)
+
+/* Reads to Remote DRAM Memory */
+#define READS_TO_REMOTE_MEM BIT(1)
+
+/* Non-Temporal Writes to Local Memory */
+#define NON_TEMP_WRITE_TO_LOCAL_MEM BIT(2)
+
+/* Non-Temporal Writes to Remote Memory */
+#define NON_TEMP_WRITE_TO_REMOTE_MEM BIT(3)
+
+/* Reads to Local Memory the system identifies as "Slow Memory" */
+#define READS_TO_LOCAL_S_MEM BIT(4)
+
+/* Reads to Remote Memory the system identifies as "Slow Memory" */
+#define READS_TO_REMOTE_S_MEM BIT(5)
+
+/* Dirty Victims to All Types of Memory */
+#define DIRTY_VICTIMS_TO_ALL_MEM BIT(6)
+
+/* Max event bits supported */
+#define MAX_EVT_CONFIG_BITS GENMASK(6, 0)
+
+/* Number of memory transactions that an MBM event can be configured with */
+#define NUM_MBM_TRANSACTIONS 7
+
+/* Event IDs */
+enum resctrl_event_id {
+ /* Must match value of first event below */
+ QOS_FIRST_EVENT = 0x01,
+
+ /*
+ * These values match those used to program IA32_QM_EVTSEL before
+ * reading IA32_QM_CTR on RDT systems.
+ */
+ QOS_L3_OCCUP_EVENT_ID = 0x01,
+ QOS_L3_MBM_TOTAL_EVENT_ID = 0x02,
+ QOS_L3_MBM_LOCAL_EVENT_ID = 0x03,
+
+ /* Must be the last */
+ QOS_NUM_EVENTS,
+};
+
+#define QOS_NUM_L3_MBM_EVENTS (QOS_L3_MBM_LOCAL_EVENT_ID - QOS_L3_MBM_TOTAL_EVENT_ID + 1)
+#define MBM_STATE_IDX(evt) ((evt) - QOS_L3_MBM_TOTAL_EVENT_ID)
+
+#endif /* __LINUX_RESCTRL_TYPES_H */
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index 357df16ede32..46514cb1b9e0 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -27,31 +27,6 @@ struct device_node;
struct of_phandle_args;
/**
- * struct reset_control_lookup - represents a single lookup entry
- *
- * @list: internal list of all reset lookup entries
- * @provider: name of the reset controller device controlling this reset line
- * @index: ID of the reset controller in the reset controller device
- * @dev_id: name of the device associated with this reset line
- * @con_id: name of the reset line (can be NULL)
- */
-struct reset_control_lookup {
- struct list_head list;
- const char *provider;
- unsigned int index;
- const char *dev_id;
- const char *con_id;
-};
-
-#define RESET_LOOKUP(_provider, _index, _dev_id, _con_id) \
- { \
- .provider = _provider, \
- .index = _index, \
- .dev_id = _dev_id, \
- .con_id = _con_id, \
- }
-
-/**
* struct reset_controller_dev - reset controller entity that might
* provide multiple reset controls
* @ops: a pointer to device specific struct reset_control_ops
@@ -90,9 +65,6 @@ void reset_controller_unregister(struct reset_controller_dev *rcdev);
struct device;
int devm_reset_controller_register(struct device *dev,
struct reset_controller_dev *rcdev);
-
-void reset_controller_add_lookup(struct reset_control_lookup *lookup,
- unsigned int num_entries);
#else
static inline int reset_controller_register(struct reset_controller_dev *rcdev)
{
@@ -108,11 +80,6 @@ static inline int devm_reset_controller_register(struct device *dev,
{
return 0;
}
-
-static inline void reset_controller_add_lookup(struct reset_control_lookup *lookup,
- unsigned int num_entries)
-{
-}
#endif
#endif
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 514ddf003efc..44f9e3415f92 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_RESET_H_
#define _LINUX_RESET_H_
+#include <linux/bits.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -25,6 +26,48 @@ struct reset_control_bulk_data {
struct reset_control *rstc;
};
+#define RESET_CONTROL_FLAGS_BIT_SHARED BIT(0) /* not exclusive */
+#define RESET_CONTROL_FLAGS_BIT_OPTIONAL BIT(1)
+#define RESET_CONTROL_FLAGS_BIT_ACQUIRED BIT(2) /* iff exclusive, not released */
+#define RESET_CONTROL_FLAGS_BIT_DEASSERTED BIT(3)
+
+/**
+ * enum reset_control_flags - Flags that can be passed to the reset_control_get functions
+ * to determine the type of reset control.
+ * These values cannot be OR'd.
+ *
+ * @RESET_CONTROL_EXCLUSIVE: exclusive, acquired,
+ * @RESET_CONTROL_EXCLUSIVE_DEASSERTED: exclusive, acquired, deasserted
+ * @RESET_CONTROL_EXCLUSIVE_RELEASED: exclusive, released,
+ * @RESET_CONTROL_SHARED: shared
+ * @RESET_CONTROL_SHARED_DEASSERTED: shared, deasserted
+ * @RESET_CONTROL_OPTIONAL_EXCLUSIVE: optional, exclusive, acquired
+ * @RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED: optional, exclusive, acquired, deasserted
+ * @RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED: optional, exclusive, released
+ * @RESET_CONTROL_OPTIONAL_SHARED: optional, shared
+ * @RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED: optional, shared, deasserted
+ */
+enum reset_control_flags {
+ RESET_CONTROL_EXCLUSIVE = RESET_CONTROL_FLAGS_BIT_ACQUIRED,
+ RESET_CONTROL_EXCLUSIVE_DEASSERTED = RESET_CONTROL_FLAGS_BIT_ACQUIRED |
+ RESET_CONTROL_FLAGS_BIT_DEASSERTED,
+ RESET_CONTROL_EXCLUSIVE_RELEASED = 0,
+ RESET_CONTROL_SHARED = RESET_CONTROL_FLAGS_BIT_SHARED,
+ RESET_CONTROL_SHARED_DEASSERTED = RESET_CONTROL_FLAGS_BIT_SHARED |
+ RESET_CONTROL_FLAGS_BIT_DEASSERTED,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE = RESET_CONTROL_FLAGS_BIT_OPTIONAL |
+ RESET_CONTROL_FLAGS_BIT_ACQUIRED,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED = RESET_CONTROL_FLAGS_BIT_OPTIONAL |
+ RESET_CONTROL_FLAGS_BIT_ACQUIRED |
+ RESET_CONTROL_FLAGS_BIT_DEASSERTED,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED = RESET_CONTROL_FLAGS_BIT_OPTIONAL,
+ RESET_CONTROL_OPTIONAL_SHARED = RESET_CONTROL_FLAGS_BIT_OPTIONAL |
+ RESET_CONTROL_FLAGS_BIT_SHARED,
+ RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED = RESET_CONTROL_FLAGS_BIT_OPTIONAL |
+ RESET_CONTROL_FLAGS_BIT_SHARED |
+ RESET_CONTROL_FLAGS_BIT_DEASSERTED,
+};
+
#ifdef CONFIG_RESET_CONTROLLER
int reset_control_reset(struct reset_control *rstc);
@@ -42,30 +85,25 @@ int reset_control_bulk_acquire(int num_rstcs, struct reset_control_bulk_data *rs
void reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs);
struct reset_control *__of_reset_control_get(struct device_node *node,
- const char *id, int index, bool shared,
- bool optional, bool acquired);
+ const char *id, int index, enum reset_control_flags flags);
struct reset_control *__reset_control_get(struct device *dev, const char *id,
- int index, bool shared,
- bool optional, bool acquired);
+ int index, enum reset_control_flags flags);
void reset_control_put(struct reset_control *rstc);
int __reset_control_bulk_get(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs,
- bool shared, bool optional, bool acquired);
+ enum reset_control_flags flags);
void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs);
int __device_reset(struct device *dev, bool optional);
struct reset_control *__devm_reset_control_get(struct device *dev,
- const char *id, int index, bool shared,
- bool optional, bool acquired);
+ const char *id, int index, enum reset_control_flags flags);
int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs,
- bool shared, bool optional, bool acquired);
+ enum reset_control_flags flags);
struct reset_control *devm_reset_control_array_get(struct device *dev,
- bool shared, bool optional);
-struct reset_control *of_reset_control_array_get(struct device_node *np,
- bool shared, bool optional,
- bool acquired);
+ enum reset_control_flags flags);
+struct reset_control *of_reset_control_array_get(struct device_node *np, enum reset_control_flags);
int reset_control_get_count(struct device *dev);
@@ -116,17 +154,19 @@ static inline int __device_reset(struct device *dev, bool optional)
static inline struct reset_control *__of_reset_control_get(
struct device_node *node,
- const char *id, int index, bool shared,
- bool optional, bool acquired)
+ const char *id, int index, enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
static inline struct reset_control *__reset_control_get(
struct device *dev, const char *id,
- int index, bool shared, bool optional,
- bool acquired)
+ int index, enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
@@ -162,8 +202,10 @@ reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs)
static inline int
__reset_control_bulk_get(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs,
- bool shared, bool optional, bool acquired)
+ enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? 0 : -EOPNOTSUPP;
}
@@ -174,30 +216,36 @@ reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
static inline struct reset_control *__devm_reset_control_get(
struct device *dev, const char *id,
- int index, bool shared, bool optional,
- bool acquired)
+ int index, enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
static inline int
__devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs,
- bool shared, bool optional, bool acquired)
+ enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? 0 : -EOPNOTSUPP;
}
static inline struct reset_control *
-devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
+devm_reset_control_array_get(struct device *dev, enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
static inline struct reset_control *
-of_reset_control_array_get(struct device_node *np, bool shared, bool optional,
- bool acquired)
+of_reset_control_array_get(struct device_node *np, enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
@@ -236,7 +284,7 @@ static inline int device_reset_optional(struct device *dev)
static inline struct reset_control *
__must_check reset_control_get_exclusive(struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, false, false, true);
+ return __reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -253,7 +301,7 @@ static inline int __must_check
reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -274,7 +322,7 @@ static inline struct reset_control *
__must_check reset_control_get_exclusive_released(struct device *dev,
const char *id)
{
- return __reset_control_get(dev, id, 0, false, false, false);
+ return __reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE_RELEASED);
}
/**
@@ -295,7 +343,7 @@ static inline int __must_check
reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_EXCLUSIVE_RELEASED);
}
/**
@@ -316,7 +364,8 @@ static inline int __must_check
reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED);
}
/**
@@ -344,7 +393,7 @@ reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_r
static inline struct reset_control *reset_control_get_shared(
struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, true, false, false);
+ return __reset_control_get(dev, id, 0, RESET_CONTROL_SHARED);
}
/**
@@ -361,7 +410,7 @@ static inline int __must_check
reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_SHARED);
}
/**
@@ -378,7 +427,7 @@ reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
static inline struct reset_control *reset_control_get_optional_exclusive(
struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, false, true, true);
+ return __reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
/**
@@ -398,7 +447,7 @@ static inline int __must_check
reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
/**
@@ -415,7 +464,7 @@ reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
static inline struct reset_control *reset_control_get_optional_shared(
struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, true, true, false);
+ return __reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_SHARED);
}
/**
@@ -435,7 +484,7 @@ static inline int __must_check
reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_OPTIONAL_SHARED);
}
/**
@@ -451,7 +500,7 @@ reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
static inline struct reset_control *of_reset_control_get_exclusive(
struct device_node *node, const char *id)
{
- return __of_reset_control_get(node, id, 0, false, false, true);
+ return __of_reset_control_get(node, id, 0, RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -471,7 +520,7 @@ static inline struct reset_control *of_reset_control_get_exclusive(
static inline struct reset_control *of_reset_control_get_optional_exclusive(
struct device_node *node, const char *id)
{
- return __of_reset_control_get(node, id, 0, false, true, true);
+ return __of_reset_control_get(node, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
/**
@@ -496,7 +545,7 @@ static inline struct reset_control *of_reset_control_get_optional_exclusive(
static inline struct reset_control *of_reset_control_get_shared(
struct device_node *node, const char *id)
{
- return __of_reset_control_get(node, id, 0, true, false, false);
+ return __of_reset_control_get(node, id, 0, RESET_CONTROL_SHARED);
}
/**
@@ -513,7 +562,7 @@ static inline struct reset_control *of_reset_control_get_shared(
static inline struct reset_control *of_reset_control_get_exclusive_by_index(
struct device_node *node, int index)
{
- return __of_reset_control_get(node, NULL, index, false, false, true);
+ return __of_reset_control_get(node, NULL, index, RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -541,7 +590,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index(
static inline struct reset_control *of_reset_control_get_shared_by_index(
struct device_node *node, int index)
{
- return __of_reset_control_get(node, NULL, index, true, false, false);
+ return __of_reset_control_get(node, NULL, index, RESET_CONTROL_SHARED);
}
/**
@@ -560,7 +609,26 @@ static inline struct reset_control *
__must_check devm_reset_control_get_exclusive(struct device *dev,
const char *id)
{
- return __devm_reset_control_get(dev, id, 0, false, false, true);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE);
+}
+
+/**
+ * devm_reset_control_get_exclusive_deasserted - resource managed
+ * reset_control_get_exclusive() +
+ * reset_control_deassert()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_exclusive() + reset_control_deassert(). For reset
+ * controllers returned from this function, reset_control_assert() +
+ * reset_control_put() is called automatically on driver detach.
+ *
+ * See reset_control_get_exclusive() for more information.
+ */
+static inline struct reset_control * __must_check
+devm_reset_control_get_exclusive_deasserted(struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE_DEASSERTED);
}
/**
@@ -580,7 +648,8 @@ static inline int __must_check
devm_reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -599,7 +668,7 @@ static inline struct reset_control *
__must_check devm_reset_control_get_exclusive_released(struct device *dev,
const char *id)
{
- return __devm_reset_control_get(dev, id, 0, false, false, false);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE_RELEASED);
}
/**
@@ -619,7 +688,8 @@ static inline int __must_check
devm_reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_EXCLUSIVE_RELEASED);
}
/**
@@ -638,7 +708,7 @@ static inline struct reset_control *
__must_check devm_reset_control_get_optional_exclusive_released(struct device *dev,
const char *id)
{
- return __devm_reset_control_get(dev, id, 0, false, true, false);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED);
}
/**
@@ -658,7 +728,8 @@ static inline int __must_check
devm_reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED);
}
/**
@@ -673,7 +744,26 @@ devm_reset_control_bulk_get_optional_exclusive_released(struct device *dev, int
static inline struct reset_control *devm_reset_control_get_shared(
struct device *dev, const char *id)
{
- return __devm_reset_control_get(dev, id, 0, true, false, false);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_SHARED);
+}
+
+/**
+ * devm_reset_control_get_shared_deasserted - resource managed
+ * reset_control_get_shared() +
+ * reset_control_deassert()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_shared() + reset_control_deassert(). For reset
+ * controllers returned from this function, reset_control_assert() +
+ * reset_control_put() is called automatically on driver detach.
+ *
+ * See devm_reset_control_get_shared() for more information.
+ */
+static inline struct reset_control * __must_check
+devm_reset_control_get_shared_deasserted(struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_SHARED_DEASSERTED);
}
/**
@@ -693,7 +783,29 @@ static inline int __must_check
devm_reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_SHARED);
+}
+
+/**
+ * devm_reset_control_bulk_get_shared_deasserted - resource managed
+ * reset_control_bulk_get_shared() +
+ * reset_control_bulk_deassert()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_shared() + reset_control_bulk_deassert(). For
+ * reset controllers returned from this function, reset_control_bulk_assert() +
+ * reset_control_bulk_put() are called automatically on driver detach.
+ *
+ * See devm_reset_control_bulk_get_shared() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_shared_deasserted(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_SHARED_DEASSERTED);
}
/**
@@ -711,7 +823,26 @@ devm_reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
static inline struct reset_control *devm_reset_control_get_optional_exclusive(
struct device *dev, const char *id)
{
- return __devm_reset_control_get(dev, id, 0, false, true, true);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
+}
+
+/**
+ * devm_reset_control_get_optional_exclusive_deasserted - resource managed
+ * reset_control_get_optional_exclusive() +
+ * reset_control_deassert()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_optional_exclusive() + reset_control_deassert().
+ * For reset controllers returned from this function, reset_control_assert() +
+ * reset_control_put() is called automatically on driver detach.
+ *
+ * See devm_reset_control_get_optional_exclusive() for more information.
+ */
+static inline struct reset_control *
+devm_reset_control_get_optional_exclusive_deasserted(struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED);
}
/**
@@ -731,7 +862,8 @@ static inline int __must_check
devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
/**
@@ -749,7 +881,26 @@ devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs
static inline struct reset_control *devm_reset_control_get_optional_shared(
struct device *dev, const char *id)
{
- return __devm_reset_control_get(dev, id, 0, true, true, false);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_SHARED);
+}
+
+/**
+ * devm_reset_control_get_optional_shared_deasserted - resource managed
+ * reset_control_get_optional_shared() +
+ * reset_control_deassert()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_optional_shared() + reset_control_deassert(). For
+ * reset controllers returned from this function, reset_control_assert() +
+ * reset_control_put() is called automatically on driver detach.
+ *
+ * See devm_reset_control_get_optional_shared() for more information.
+ */
+static inline struct reset_control *
+devm_reset_control_get_optional_shared_deasserted(struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED);
}
/**
@@ -769,7 +920,7 @@ static inline int __must_check
devm_reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_OPTIONAL_SHARED);
}
/**
@@ -787,7 +938,7 @@ devm_reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
static inline struct reset_control *
devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
{
- return __devm_reset_control_get(dev, NULL, index, false, false, true);
+ return __devm_reset_control_get(dev, NULL, index, RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -803,7 +954,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
static inline struct reset_control *
devm_reset_control_get_shared_by_index(struct device *dev, int index)
{
- return __devm_reset_control_get(dev, NULL, index, true, false, false);
+ return __devm_reset_control_get(dev, NULL, index, RESET_CONTROL_SHARED);
}
/*
@@ -851,54 +1002,60 @@ static inline struct reset_control *devm_reset_control_get_by_index(
static inline struct reset_control *
devm_reset_control_array_get_exclusive(struct device *dev)
{
- return devm_reset_control_array_get(dev, false, false);
+ return devm_reset_control_array_get(dev, RESET_CONTROL_EXCLUSIVE);
+}
+
+static inline struct reset_control *
+devm_reset_control_array_get_exclusive_released(struct device *dev)
+{
+ return devm_reset_control_array_get(dev, RESET_CONTROL_EXCLUSIVE_RELEASED);
}
static inline struct reset_control *
devm_reset_control_array_get_shared(struct device *dev)
{
- return devm_reset_control_array_get(dev, true, false);
+ return devm_reset_control_array_get(dev, RESET_CONTROL_SHARED);
}
static inline struct reset_control *
devm_reset_control_array_get_optional_exclusive(struct device *dev)
{
- return devm_reset_control_array_get(dev, false, true);
+ return devm_reset_control_array_get(dev, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
static inline struct reset_control *
devm_reset_control_array_get_optional_shared(struct device *dev)
{
- return devm_reset_control_array_get(dev, true, true);
+ return devm_reset_control_array_get(dev, RESET_CONTROL_OPTIONAL_SHARED);
}
static inline struct reset_control *
of_reset_control_array_get_exclusive(struct device_node *node)
{
- return of_reset_control_array_get(node, false, false, true);
+ return of_reset_control_array_get(node, RESET_CONTROL_EXCLUSIVE);
}
static inline struct reset_control *
of_reset_control_array_get_exclusive_released(struct device_node *node)
{
- return of_reset_control_array_get(node, false, false, false);
+ return of_reset_control_array_get(node, RESET_CONTROL_EXCLUSIVE_RELEASED);
}
static inline struct reset_control *
of_reset_control_array_get_shared(struct device_node *node)
{
- return of_reset_control_array_get(node, true, false, true);
+ return of_reset_control_array_get(node, RESET_CONTROL_SHARED);
}
static inline struct reset_control *
of_reset_control_array_get_optional_exclusive(struct device_node *node)
{
- return of_reset_control_array_get(node, false, true, true);
+ return of_reset_control_array_get(node, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
static inline struct reset_control *
of_reset_control_array_get_optional_shared(struct device_node *node)
{
- return of_reset_control_array_get(node, true, true, true);
+ return of_reset_control_array_get(node, RESET_CONTROL_OPTIONAL_SHARED);
}
#endif
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index 13f17676c5f4..36ddfa1ec301 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -26,7 +26,7 @@ struct restart_block {
unsigned long arch_data;
long (*fn)(struct restart_block *);
union {
- /* For futex_wait and futex_wait_requeue_pi */
+ /* For futex_wait() */
struct {
u32 __user *uaddr;
u32 val;
@@ -43,7 +43,7 @@ struct restart_block {
struct __kernel_timespec __user *rmtp;
struct old_timespec32 __user *compat_rmtp;
};
- u64 expires;
+ ktime_t expires;
} nanosleep;
/* For poll */
struct {
diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h
index e0135e0adae0..bf92227c78d0 100644
--- a/include/linux/resume_user_mode.h
+++ b/include/linux/resume_user_mode.h
@@ -59,7 +59,7 @@ static inline void resume_user_mode_work(struct pt_regs *regs)
mem_cgroup_handle_over_high(GFP_KERNEL);
blkcg_maybe_throttle_current();
- rseq_handle_notify_resume(NULL, regs);
+ rseq_handle_slowpath(regs);
}
#endif /* LINUX_RESUME_USER_MODE_H */
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 373003ace639..6816e4c5f3f0 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -147,7 +147,8 @@ void rfkill_destroy(struct rfkill *rfkill);
* Prefer to use rfkill_set_hw_state if you don't need any special reason.
*/
bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
- bool blocked, unsigned long reason);
+ bool blocked,
+ enum rfkill_hard_block_reasons reason);
/**
* rfkill_set_hw_state - Set the internal rfkill hardware block state
* @rfkill: pointer to the rfkill class to modify.
@@ -240,7 +241,7 @@ bool rfkill_soft_blocked(struct rfkill *rfkill);
* rfkill_find_type - Helper for finding rfkill type by name
* @name: the name of the type
*
- * Returns enum rfkill_type that corresponds to the name.
+ * Returns: enum rfkill_type that corresponds to the name.
*/
enum rfkill_type rfkill_find_type(const char *name);
@@ -280,7 +281,7 @@ static inline void rfkill_destroy(struct rfkill *rfkill)
static inline bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
bool blocked,
- unsigned long reason)
+ enum rfkill_hard_block_reasons reason)
{
return blocked;
}
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 8463a128e2f4..08e664b21f5a 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -122,7 +122,7 @@ static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
return hash & (tbl->size - 1);
}
-static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
+static __always_inline unsigned int rht_key_get_hash(struct rhashtable *ht,
const void *key, const struct rhashtable_params params,
unsigned int hash_rnd)
{
@@ -152,7 +152,7 @@ static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
return hash;
}
-static inline unsigned int rht_key_hashfn(
+static __always_inline unsigned int rht_key_hashfn(
struct rhashtable *ht, const struct bucket_table *tbl,
const void *key, const struct rhashtable_params params)
{
@@ -161,7 +161,7 @@ static inline unsigned int rht_key_hashfn(
return rht_bucket_index(tbl, hash);
}
-static inline unsigned int rht_head_hashfn(
+static __always_inline unsigned int rht_head_hashfn(
struct rhashtable *ht, const struct bucket_table *tbl,
const struct rhash_head *he, const struct rhashtable_params params)
{
@@ -272,13 +272,13 @@ struct rhash_lock_head __rcu **rht_bucket_nested_insert(
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
#define rht_dereference_rcu(p, ht) \
- rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
+ rcu_dereference_all_check(p, lockdep_rht_mutex_is_held(ht))
#define rht_dereference_bucket(p, tbl, hash) \
rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
#define rht_dereference_bucket_rcu(p, tbl, hash) \
- rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
+ rcu_dereference_all_check(p, lockdep_rht_bucket_is_held(tbl, hash))
#define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
@@ -355,12 +355,25 @@ static inline void rht_unlock(struct bucket_table *tbl,
local_irq_restore(flags);
}
-static inline struct rhash_head *__rht_ptr(
- struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
+enum rht_lookup_freq {
+ RHT_LOOKUP_NORMAL,
+ RHT_LOOKUP_LIKELY,
+};
+
+static __always_inline struct rhash_head *__rht_ptr(
+ struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt,
+ const enum rht_lookup_freq freq)
{
- return (struct rhash_head *)
- ((unsigned long)p & ~BIT(0) ?:
- (unsigned long)RHT_NULLS_MARKER(bkt));
+ unsigned long p_val = (unsigned long)p & ~BIT(0);
+
+ BUILD_BUG_ON(!__builtin_constant_p(freq));
+
+ if (freq == RHT_LOOKUP_LIKELY)
+ return (struct rhash_head *)
+ (likely(p_val) ? p_val : (unsigned long)RHT_NULLS_MARKER(bkt));
+ else
+ return (struct rhash_head *)
+ (p_val ?: (unsigned long)RHT_NULLS_MARKER(bkt));
}
/*
@@ -370,10 +383,17 @@ static inline struct rhash_head *__rht_ptr(
* rht_ptr_exclusive() dereferences in a context where exclusive
* access is guaranteed, such as when destroying the table.
*/
+static __always_inline struct rhash_head *__rht_ptr_rcu(
+ struct rhash_lock_head __rcu *const *bkt,
+ const enum rht_lookup_freq freq)
+{
+ return __rht_ptr(rcu_dereference_all(*bkt), bkt, freq);
+}
+
static inline struct rhash_head *rht_ptr_rcu(
struct rhash_lock_head __rcu *const *bkt)
{
- return __rht_ptr(rcu_dereference(*bkt), bkt);
+ return __rht_ptr_rcu(bkt, RHT_LOOKUP_NORMAL);
}
static inline struct rhash_head *rht_ptr(
@@ -381,13 +401,15 @@ static inline struct rhash_head *rht_ptr(
struct bucket_table *tbl,
unsigned int hash)
{
- return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
+ return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt,
+ RHT_LOOKUP_NORMAL);
}
static inline struct rhash_head *rht_ptr_exclusive(
struct rhash_lock_head __rcu *const *bkt)
{
- return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
+ return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt,
+ RHT_LOOKUP_NORMAL);
}
static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
@@ -497,7 +519,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
for (({barrier(); }), \
pos = head; \
!rht_is_a_nulls(pos); \
- pos = rcu_dereference_raw(pos->next))
+ pos = rcu_dereference_all(pos->next))
/**
* rht_for_each_rcu - iterate over rcu hash chain
@@ -513,7 +535,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
for (({barrier(); }), \
pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \
!rht_is_a_nulls(pos); \
- pos = rcu_dereference_raw(pos->next))
+ pos = rcu_dereference_all(pos->next))
/**
* rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head
@@ -560,7 +582,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
* list returned by rhltable_lookup.
*/
#define rhl_for_each_rcu(pos, list) \
- for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
+ for (pos = list; pos; pos = rcu_dereference_all(pos->next))
/**
* rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
@@ -574,7 +596,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
*/
#define rhl_for_each_entry_rcu(tpos, pos, list, member) \
for (pos = list; pos && rht_entry(tpos, pos, member); \
- pos = rcu_dereference_raw(pos->next))
+ pos = rcu_dereference_all(pos->next))
static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
const void *obj)
@@ -586,9 +608,10 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
}
/* Internal function, do not use. */
-static inline struct rhash_head *__rhashtable_lookup(
+static __always_inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
- const struct rhashtable_params params)
+ const struct rhashtable_params params,
+ const enum rht_lookup_freq freq)
{
struct rhashtable_compare_arg arg = {
.ht = ht,
@@ -599,12 +622,13 @@ static inline struct rhash_head *__rhashtable_lookup(
struct rhash_head *he;
unsigned int hash;
+ BUILD_BUG_ON(!__builtin_constant_p(freq));
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
bkt = rht_bucket(tbl, hash);
do {
- rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) {
+ rht_for_each_rcu_from(he, __rht_ptr_rcu(bkt, freq), tbl, hash) {
if (params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, he)) :
rhashtable_compare(&arg, rht_obj(ht, he)))
@@ -639,15 +663,26 @@ restart:
*
* Returns the first entry on which the compare function returned true.
*/
-static inline void *rhashtable_lookup(
+static __always_inline void *rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
- struct rhash_head *he = __rhashtable_lookup(ht, key, params);
+ struct rhash_head *he = __rhashtable_lookup(ht, key, params,
+ RHT_LOOKUP_NORMAL);
return he ? rht_obj(ht, he) : NULL;
}
+static __always_inline void *rhashtable_lookup_likely(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(ht, key, params,
+ RHT_LOOKUP_LIKELY);
+
+ return likely(he) ? rht_obj(ht, he) : NULL;
+}
+
/**
* rhashtable_lookup_fast - search hash table, without RCU read lock
* @ht: hash table
@@ -662,7 +697,7 @@ static inline void *rhashtable_lookup(
*
* Returns the first entry on which the compare function returned true.
*/
-static inline void *rhashtable_lookup_fast(
+static __always_inline void *rhashtable_lookup_fast(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
@@ -689,20 +724,31 @@ static inline void *rhashtable_lookup_fast(
*
* Returns the list of entries that match the given key.
*/
-static inline struct rhlist_head *rhltable_lookup(
+static __always_inline struct rhlist_head *rhltable_lookup(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
{
- struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
+ struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
+ RHT_LOOKUP_NORMAL);
return he ? container_of(he, struct rhlist_head, rhead) : NULL;
}
+static __always_inline struct rhlist_head *rhltable_lookup_likely(
+ struct rhltable *hlt, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
+ RHT_LOOKUP_LIKELY);
+
+ return likely(he) ? container_of(he, struct rhlist_head, rhead) : NULL;
+}
+
/* Internal function, please use rhashtable_insert_fast() instead. This
* function returns the existing element already in hashes if there is a clash,
* otherwise it returns an error via ERR_PTR().
*/
-static inline void *__rhashtable_insert_fast(
+static __always_inline void *__rhashtable_insert_fast(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params, bool rhlist)
{
@@ -825,7 +871,7 @@ out_unlock:
* Will trigger an automatic deferred table resizing if residency in the
* table grows beyond 70%.
*/
-static inline int rhashtable_insert_fast(
+static __always_inline int rhashtable_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
@@ -854,7 +900,7 @@ static inline int rhashtable_insert_fast(
* Will trigger an automatic deferred table resizing if residency in the
* table grows beyond 70%.
*/
-static inline int rhltable_insert_key(
+static __always_inline int rhltable_insert_key(
struct rhltable *hlt, const void *key, struct rhlist_head *list,
const struct rhashtable_params params)
{
@@ -877,7 +923,7 @@ static inline int rhltable_insert_key(
* Will trigger an automatic deferred table resizing if residency in the
* table grows beyond 70%.
*/
-static inline int rhltable_insert(
+static __always_inline int rhltable_insert(
struct rhltable *hlt, struct rhlist_head *list,
const struct rhashtable_params params)
{
@@ -902,7 +948,7 @@ static inline int rhltable_insert(
* Will trigger an automatic deferred table resizing if residency in the
* table grows beyond 70%.
*/
-static inline int rhashtable_lookup_insert_fast(
+static __always_inline int rhashtable_lookup_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
@@ -929,7 +975,7 @@ static inline int rhashtable_lookup_insert_fast(
* object if it exists, NULL if it did not and the insertion was successful,
* and an ERR_PTR otherwise.
*/
-static inline void *rhashtable_lookup_get_insert_fast(
+static __always_inline void *rhashtable_lookup_get_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
@@ -956,7 +1002,7 @@ static inline void *rhashtable_lookup_get_insert_fast(
*
* Returns zero on success.
*/
-static inline int rhashtable_lookup_insert_key(
+static __always_inline int rhashtable_lookup_insert_key(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params)
{
@@ -982,7 +1028,7 @@ static inline int rhashtable_lookup_insert_key(
* object if it exists, NULL if it does not and the insertion was successful,
* and an ERR_PTR otherwise.
*/
-static inline void *rhashtable_lookup_get_insert_key(
+static __always_inline void *rhashtable_lookup_get_insert_key(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params)
{
@@ -992,7 +1038,7 @@ static inline void *rhashtable_lookup_get_insert_key(
}
/* Internal function, please use rhashtable_remove_fast() instead */
-static inline int __rhashtable_remove_fast_one(
+static __always_inline int __rhashtable_remove_fast_one(
struct rhashtable *ht, struct bucket_table *tbl,
struct rhash_head *obj, const struct rhashtable_params params,
bool rhlist)
@@ -1074,7 +1120,7 @@ unlocked:
}
/* Internal function, please use rhashtable_remove_fast() instead */
-static inline int __rhashtable_remove_fast(
+static __always_inline int __rhashtable_remove_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params, bool rhlist)
{
@@ -1115,7 +1161,7 @@ static inline int __rhashtable_remove_fast(
*
* Returns zero on success, -ENOENT if the entry could not be found.
*/
-static inline int rhashtable_remove_fast(
+static __always_inline int rhashtable_remove_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
@@ -1137,7 +1183,7 @@ static inline int rhashtable_remove_fast(
*
* Returns zero on success, -ENOENT if the entry could not be found.
*/
-static inline int rhltable_remove(
+static __always_inline int rhltable_remove(
struct rhltable *hlt, struct rhlist_head *list,
const struct rhashtable_params params)
{
@@ -1145,7 +1191,7 @@ static inline int rhltable_remove(
}
/* Internal function, please use rhashtable_replace_fast() instead */
-static inline int __rhashtable_replace_fast(
+static __always_inline int __rhashtable_replace_fast(
struct rhashtable *ht, struct bucket_table *tbl,
struct rhash_head *obj_old, struct rhash_head *obj_new,
const struct rhashtable_params params)
@@ -1208,7 +1254,7 @@ unlocked:
* Returns zero on success, -ENOENT if the entry could not be found,
* -EINVAL if hash is not the same for the old and new objects.
*/
-static inline int rhashtable_replace_fast(
+static __always_inline int rhashtable_replace_fast(
struct rhashtable *ht, struct rhash_head *obj_old,
struct rhash_head *obj_new,
const struct rhashtable_params params)
@@ -1259,7 +1305,7 @@ static inline int rhashtable_replace_fast(
static inline void rhltable_walk_enter(struct rhltable *hlt,
struct rhashtable_iter *iter)
{
- return rhashtable_walk_enter(&hlt->ht, iter);
+ rhashtable_walk_enter(&hlt->ht, iter);
}
/**
@@ -1275,12 +1321,12 @@ static inline void rhltable_free_and_destroy(struct rhltable *hlt,
void *arg),
void *arg)
{
- return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
+ rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
}
static inline void rhltable_destroy(struct rhltable *hlt)
{
- return rhltable_free_and_destroy(hlt, NULL, NULL);
+ rhltable_free_and_destroy(hlt, NULL, NULL);
}
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 96d2140b471e..876358cfe1b1 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -89,6 +89,14 @@ void ring_buffer_discard_commit(struct trace_buffer *buffer,
struct trace_buffer *
__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
+struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags,
+ int order, unsigned long start,
+ unsigned long range_size,
+ unsigned long scratch_size,
+ struct lock_class_key *key);
+
+void *ring_buffer_meta_scratch(struct trace_buffer *buffer, unsigned int *size);
+
/*
* Because the ring buffer is generic, if other users of the ring buffer get
* traced by ftrace, it can produce lockdep warnings. We need to keep each
@@ -100,6 +108,18 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \
})
+/*
+ * Because the ring buffer is generic, if other users of the ring buffer get
+ * traced by ftrace, it can produce lockdep warnings. We need to keep each
+ * ring buffer's lock class separate.
+ */
+#define ring_buffer_alloc_range(size, flags, order, start, range_size, s_size) \
+({ \
+ static struct lock_class_key __key; \
+ __ring_buffer_alloc_range((size), (flags), (order), (start), \
+ (range_size), (s_size), &__key); \
+})
+
typedef bool (*ring_buffer_cond_fn)(void *data);
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
ring_buffer_cond_fn cond, void *data);
@@ -124,6 +144,9 @@ int ring_buffer_write(struct trace_buffer *buffer,
void ring_buffer_nest_start(struct trace_buffer *buffer);
void ring_buffer_nest_end(struct trace_buffer *buffer);
+DEFINE_GUARD(ring_buffer_nest, struct trace_buffer *,
+ ring_buffer_nest_start(_T), ring_buffer_nest_end(_T))
+
struct ring_buffer_event *
ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
@@ -132,9 +155,7 @@ ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_iter *
-ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
-void ring_buffer_read_prepare_sync(void);
-void ring_buffer_read_start(struct ring_buffer_iter *iter);
+ring_buffer_read_start(struct trace_buffer *buffer, int cpu, gfp_t flags);
void ring_buffer_read_finish(struct ring_buffer_iter *iter);
struct ring_buffer_event *
@@ -172,6 +193,7 @@ void ring_buffer_record_off(struct trace_buffer *buffer);
void ring_buffer_record_on(struct trace_buffer *buffer);
bool ring_buffer_record_is_on(struct trace_buffer *buffer);
bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
+bool ring_buffer_record_is_on_cpu(struct trace_buffer *buffer, int cpu);
void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
@@ -193,7 +215,6 @@ void ring_buffer_set_clock(struct trace_buffer *buffer,
void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
-size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
struct buffer_data_read_page;
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 2cd637268b4f..2c29f21ba9e5 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -78,7 +78,7 @@
#define RIO_CTAG_RESRVD 0xfffe0000 /* Reserved */
#define RIO_CTAG_UDEVID 0x0001ffff /* Unique device identifier */
-extern struct bus_type rio_bus_type;
+extern const struct bus_type rio_bus_type;
extern struct class rio_mport_class;
struct rio_mport;
@@ -465,7 +465,7 @@ struct rio_driver {
struct device_driver driver;
};
-#define to_rio_driver(drv) container_of(drv,struct rio_driver, driver)
+#define to_rio_driver(drv) container_of_const(drv,struct rio_driver, driver)
union rio_pw_msg {
struct {
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index e49c32b0f394..dd8afe511242 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -391,13 +391,8 @@ struct rio_dev *rio_dev_get(struct rio_dev *);
void rio_dev_put(struct rio_dev *);
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
-extern struct dma_chan *rio_request_dma(struct rio_dev *rdev);
extern struct dma_chan *rio_request_mport_dma(struct rio_mport *mport);
extern void rio_release_dma(struct dma_chan *dchan);
-extern struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(
- struct rio_dev *rdev, struct dma_chan *dchan,
- struct rio_dma_data *data,
- enum dma_transfer_direction direction, unsigned long flags);
extern struct dma_async_tx_descriptor *rio_dma_prep_xfer(
struct dma_chan *dchan, u16 destid,
struct rio_dma_data *data,
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 7229b9baf20d..daa92a58585d 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -13,6 +13,7 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/memremap.h>
+#include <linux/bit_spinlock.h>
/*
* The anon_vma heads a list of private "related" vmas, to scan if
@@ -171,7 +172,215 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
unlink_anon_vmas(next);
}
-struct anon_vma *folio_get_anon_vma(struct folio *folio);
+struct anon_vma *folio_get_anon_vma(const struct folio *folio);
+
+#ifdef CONFIG_MM_ID
+static __always_inline void folio_lock_large_mapcount(struct folio *folio)
+{
+ bit_spin_lock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids);
+}
+
+static __always_inline void folio_unlock_large_mapcount(struct folio *folio)
+{
+ __bit_spin_unlock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids);
+}
+
+static inline unsigned int folio_mm_id(const struct folio *folio, int idx)
+{
+ VM_WARN_ON_ONCE(idx != 0 && idx != 1);
+ return folio->_mm_id[idx] & MM_ID_MASK;
+}
+
+static inline void folio_set_mm_id(struct folio *folio, int idx, mm_id_t id)
+{
+ VM_WARN_ON_ONCE(idx != 0 && idx != 1);
+ folio->_mm_id[idx] &= ~MM_ID_MASK;
+ folio->_mm_id[idx] |= id;
+}
+
+static inline void __folio_large_mapcount_sanity_checks(const struct folio *folio,
+ int diff, mm_id_t mm_id)
+{
+ VM_WARN_ON_ONCE(!folio_test_large(folio) || folio_test_hugetlb(folio));
+ VM_WARN_ON_ONCE(diff <= 0);
+ VM_WARN_ON_ONCE(mm_id < MM_ID_MIN || mm_id > MM_ID_MAX);
+
+ /*
+ * Make sure we can detect at least one complete PTE mapping of the
+ * folio in a single MM as "exclusively mapped". This is primarily
+ * a check on 32bit, where we currently reduce the size of the per-MM
+ * mapcount to a short.
+ */
+ VM_WARN_ON_ONCE(diff > folio_large_nr_pages(folio));
+ VM_WARN_ON_ONCE(folio_large_nr_pages(folio) - 1 > MM_ID_MAPCOUNT_MAX);
+
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 0) == MM_ID_DUMMY &&
+ folio->_mm_id_mapcount[0] != -1);
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != MM_ID_DUMMY &&
+ folio->_mm_id_mapcount[0] < 0);
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 1) == MM_ID_DUMMY &&
+ folio->_mm_id_mapcount[1] != -1);
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 1) != MM_ID_DUMMY &&
+ folio->_mm_id_mapcount[1] < 0);
+ VM_WARN_ON_ONCE(!folio_mapped(folio) &&
+ test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids));
+}
+
+static __always_inline void folio_set_large_mapcount(struct folio *folio,
+ int mapcount, struct vm_area_struct *vma)
+{
+ __folio_large_mapcount_sanity_checks(folio, mapcount, vma->vm_mm->mm_id);
+
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != MM_ID_DUMMY);
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 1) != MM_ID_DUMMY);
+
+ /* Note: mapcounts start at -1. */
+ atomic_set(&folio->_large_mapcount, mapcount - 1);
+ folio->_mm_id_mapcount[0] = mapcount - 1;
+ folio_set_mm_id(folio, 0, vma->vm_mm->mm_id);
+}
+
+static __always_inline int folio_add_return_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ const mm_id_t mm_id = vma->vm_mm->mm_id;
+ int new_mapcount_val;
+
+ folio_lock_large_mapcount(folio);
+ __folio_large_mapcount_sanity_checks(folio, diff, mm_id);
+
+ new_mapcount_val = atomic_read(&folio->_large_mapcount) + diff;
+ atomic_set(&folio->_large_mapcount, new_mapcount_val);
+
+ /*
+ * If a folio is mapped more than once into an MM on 32bit, we
+ * can in theory overflow the per-MM mapcount (although only for
+ * fairly large folios), turning it negative. In that case, just
+ * free up the slot and mark the folio "mapped shared", otherwise
+ * we might be in trouble when unmapping pages later.
+ */
+ if (folio_mm_id(folio, 0) == mm_id) {
+ folio->_mm_id_mapcount[0] += diff;
+ if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio->_mm_id_mapcount[0] < 0)) {
+ folio->_mm_id_mapcount[0] = -1;
+ folio_set_mm_id(folio, 0, MM_ID_DUMMY);
+ folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT;
+ }
+ } else if (folio_mm_id(folio, 1) == mm_id) {
+ folio->_mm_id_mapcount[1] += diff;
+ if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio->_mm_id_mapcount[1] < 0)) {
+ folio->_mm_id_mapcount[1] = -1;
+ folio_set_mm_id(folio, 1, MM_ID_DUMMY);
+ folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT;
+ }
+ } else if (folio_mm_id(folio, 0) == MM_ID_DUMMY) {
+ folio_set_mm_id(folio, 0, mm_id);
+ folio->_mm_id_mapcount[0] = diff - 1;
+ /* We might have other mappings already. */
+ if (new_mapcount_val != diff - 1)
+ folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT;
+ } else if (folio_mm_id(folio, 1) == MM_ID_DUMMY) {
+ folio_set_mm_id(folio, 1, mm_id);
+ folio->_mm_id_mapcount[1] = diff - 1;
+ /* Slot 0 certainly has mappings as well. */
+ folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT;
+ }
+ folio_unlock_large_mapcount(folio);
+ return new_mapcount_val + 1;
+}
+#define folio_add_large_mapcount folio_add_return_large_mapcount
+
+static __always_inline int folio_sub_return_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ const mm_id_t mm_id = vma->vm_mm->mm_id;
+ int new_mapcount_val;
+
+ folio_lock_large_mapcount(folio);
+ __folio_large_mapcount_sanity_checks(folio, diff, mm_id);
+
+ new_mapcount_val = atomic_read(&folio->_large_mapcount) - diff;
+ atomic_set(&folio->_large_mapcount, new_mapcount_val);
+
+ /*
+ * There are valid corner cases where we might underflow a per-MM
+ * mapcount (some mappings added when no slot was free, some mappings
+ * added once a slot was free), so we always set it to -1 once we go
+ * negative.
+ */
+ if (folio_mm_id(folio, 0) == mm_id) {
+ folio->_mm_id_mapcount[0] -= diff;
+ if (folio->_mm_id_mapcount[0] >= 0)
+ goto out;
+ folio->_mm_id_mapcount[0] = -1;
+ folio_set_mm_id(folio, 0, MM_ID_DUMMY);
+ } else if (folio_mm_id(folio, 1) == mm_id) {
+ folio->_mm_id_mapcount[1] -= diff;
+ if (folio->_mm_id_mapcount[1] >= 0)
+ goto out;
+ folio->_mm_id_mapcount[1] = -1;
+ folio_set_mm_id(folio, 1, MM_ID_DUMMY);
+ }
+
+ /*
+ * If one MM slot owns all mappings, the folio is mapped exclusively.
+ * Note that if the folio is now unmapped (new_mapcount_val == -1), both
+ * slots must be free (mapcount == -1), and we'll also mark it as
+ * exclusive.
+ */
+ if (folio->_mm_id_mapcount[0] == new_mapcount_val ||
+ folio->_mm_id_mapcount[1] == new_mapcount_val)
+ folio->_mm_ids &= ~FOLIO_MM_IDS_SHARED_BIT;
+out:
+ folio_unlock_large_mapcount(folio);
+ return new_mapcount_val + 1;
+}
+#define folio_sub_large_mapcount folio_sub_return_large_mapcount
+#else /* !CONFIG_MM_ID */
+/*
+ * See __folio_rmap_sanity_checks(), we might map large folios even without
+ * CONFIG_TRANSPARENT_HUGEPAGE. We'll keep that working for now.
+ */
+static inline void folio_set_large_mapcount(struct folio *folio, int mapcount,
+ struct vm_area_struct *vma)
+{
+ /* Note: mapcounts start at -1. */
+ atomic_set(&folio->_large_mapcount, mapcount - 1);
+}
+
+static inline void folio_add_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ atomic_add(diff, &folio->_large_mapcount);
+}
+
+static inline int folio_add_return_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ BUILD_BUG();
+}
+
+static inline void folio_sub_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ atomic_sub(diff, &folio->_large_mapcount);
+}
+
+static inline int folio_sub_return_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_MM_ID */
+
+#define folio_inc_large_mapcount(folio, vma) \
+ folio_add_large_mapcount(folio, 1, vma)
+#define folio_inc_return_large_mapcount(folio, vma) \
+ folio_add_return_large_mapcount(folio, 1, vma)
+#define folio_dec_large_mapcount(folio, vma) \
+ folio_sub_large_mapcount(folio, 1, vma)
+#define folio_dec_return_large_mapcount(folio, vma) \
+ folio_sub_return_large_mapcount(folio, 1, vma)
/* RMAP flags, currently only relevant for some anon rmap operations. */
typedef int __bitwise rmap_t;
@@ -185,21 +394,15 @@ typedef int __bitwise rmap_t;
/* The anonymous (sub)page is exclusive to a single process. */
#define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0))
-/*
- * Internally, we're using an enum to specify the granularity. We make the
- * compiler emit specialized code for each granularity.
- */
-enum rmap_level {
- RMAP_LEVEL_PTE = 0,
- RMAP_LEVEL_PMD,
-};
-
-static inline void __folio_rmap_sanity_checks(struct folio *folio,
- struct page *page, int nr_pages, enum rmap_level level)
+static __always_inline void __folio_rmap_sanity_checks(const struct folio *folio,
+ const struct page *page, int nr_pages, enum pgtable_level level)
{
/* hugetlb folios are handled separately. */
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
+ /* When (un)mapping zeropages, we should never touch ref+mapcount. */
+ VM_WARN_ON_FOLIO(is_zero_folio(folio), folio);
+
/*
* TODO: we get driver-allocated folios that have nothing to do with
* the rmap using vm_insert_page(); therefore, we cannot assume that
@@ -214,19 +417,49 @@ static inline void __folio_rmap_sanity_checks(struct folio *folio,
VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio);
switch (level) {
- case RMAP_LEVEL_PTE:
+ case PGTABLE_LEVEL_PTE:
break;
- case RMAP_LEVEL_PMD:
+ case PGTABLE_LEVEL_PMD:
/*
* We don't support folios larger than a single PMD yet. So
- * when RMAP_LEVEL_PMD is set, we assume that we are creating
+ * when PGTABLE_LEVEL_PMD is set, we assume that we are creating
* a single "entire" mapping of the folio.
*/
VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio);
VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio);
break;
+ case PGTABLE_LEVEL_PUD:
+ /*
+ * Assume that we are creating a single "entire" mapping of the
+ * folio.
+ */
+ VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PUD_NR, folio);
+ VM_WARN_ON_FOLIO(nr_pages != HPAGE_PUD_NR, folio);
+ break;
default:
- VM_WARN_ON_ONCE(true);
+ BUILD_BUG();
+ }
+
+ /*
+ * Anon folios must have an associated live anon_vma as long as they're
+ * mapped into userspace.
+ * Note that the atomic_read() mainly does two things:
+ *
+ * 1. In KASAN builds with CONFIG_SLUB_RCU_DEBUG, it causes KASAN to
+ * check that the associated anon_vma has not yet been freed (subject
+ * to KASAN's usual limitations). This check will pass if the
+ * anon_vma's refcount has already dropped to 0 but an RCU grace
+ * period hasn't passed since then.
+ * 2. If the anon_vma has not yet been freed, it checks that the
+ * anon_vma still has a nonzero refcount (as opposed to being in the
+ * middle of an RCU delay for getting freed).
+ */
+ if (folio_test_anon(folio) && !folio_test_ksm(folio)) {
+ unsigned long mapping = (unsigned long)folio->mapping;
+ struct anon_vma *anon_vma;
+
+ anon_vma = (void *)(mapping - FOLIO_MAPPING_ANON);
+ VM_WARN_ON_FOLIO(atomic_read(&anon_vma->refcount) == 0, folio);
}
}
@@ -241,19 +474,23 @@ void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages,
void folio_add_anon_rmap_pmd(struct folio *, struct page *,
struct vm_area_struct *, unsigned long address, rmap_t flags);
void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
- unsigned long address);
+ unsigned long address, rmap_t flags);
void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
struct vm_area_struct *);
#define folio_add_file_rmap_pte(folio, page, vma) \
folio_add_file_rmap_ptes(folio, page, 1, vma)
void folio_add_file_rmap_pmd(struct folio *, struct page *,
struct vm_area_struct *);
+void folio_add_file_rmap_pud(struct folio *, struct page *,
+ struct vm_area_struct *);
void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages,
struct vm_area_struct *);
#define folio_remove_rmap_pte(folio, page, vma) \
folio_remove_rmap_ptes(folio, page, 1, vma)
void folio_remove_rmap_pmd(struct folio *, struct page *,
struct vm_area_struct *);
+void folio_remove_rmap_pud(struct folio *, struct page *,
+ struct vm_area_struct *);
void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address, rmap_t flags);
@@ -319,28 +556,34 @@ static inline void hugetlb_remove_rmap(struct folio *folio)
}
static __always_inline void __folio_dup_file_rmap(struct folio *folio,
- struct page *page, int nr_pages, enum rmap_level level)
+ struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
+ enum pgtable_level level)
{
const int orig_nr_pages = nr_pages;
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
switch (level) {
- case RMAP_LEVEL_PTE:
+ case PGTABLE_LEVEL_PTE:
if (!folio_test_large(folio)) {
- atomic_inc(&page->_mapcount);
+ atomic_inc(&folio->_mapcount);
break;
}
- do {
- atomic_inc(&page->_mapcount);
- } while (page++, --nr_pages > 0);
- atomic_add(orig_nr_pages, &folio->_large_mapcount);
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) {
+ do {
+ atomic_inc(&page->_mapcount);
+ } while (page++, --nr_pages > 0);
+ }
+ folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
break;
- case RMAP_LEVEL_PMD:
+ case PGTABLE_LEVEL_PMD:
+ case PGTABLE_LEVEL_PUD:
atomic_inc(&folio->_entire_mapcount);
- atomic_inc(&folio->_large_mapcount);
+ folio_inc_large_mapcount(folio, dst_vma);
break;
+ default:
+ BUILD_BUG();
}
}
@@ -349,45 +592,47 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
* @folio: The folio to duplicate the mappings of
* @page: The first page to duplicate the mappings of
* @nr_pages: The number of pages of which the mapping will be duplicated
+ * @dst_vma: The destination vm area
*
* The page range of the folio is defined by [page, page + nr_pages)
*
* The caller needs to hold the page table lock.
*/
static inline void folio_dup_file_rmap_ptes(struct folio *folio,
- struct page *page, int nr_pages)
+ struct page *page, int nr_pages, struct vm_area_struct *dst_vma)
{
- __folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE);
+ __folio_dup_file_rmap(folio, page, nr_pages, dst_vma, PGTABLE_LEVEL_PTE);
}
static __always_inline void folio_dup_file_rmap_pte(struct folio *folio,
- struct page *page)
+ struct page *page, struct vm_area_struct *dst_vma)
{
- __folio_dup_file_rmap(folio, page, 1, RMAP_LEVEL_PTE);
+ __folio_dup_file_rmap(folio, page, 1, dst_vma, PGTABLE_LEVEL_PTE);
}
/**
* folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio
* @folio: The folio to duplicate the mapping of
* @page: The first page to duplicate the mapping of
+ * @dst_vma: The destination vm area
*
* The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
*
* The caller needs to hold the page table lock.
*/
static inline void folio_dup_file_rmap_pmd(struct folio *folio,
- struct page *page)
+ struct page *page, struct vm_area_struct *dst_vma)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE);
+ __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, PGTABLE_LEVEL_PTE);
#else
WARN_ON_ONCE(true);
#endif
}
static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
- struct page *page, int nr_pages, struct vm_area_struct *src_vma,
- enum rmap_level level)
+ struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma, enum pgtable_level level)
{
const int orig_nr_pages = nr_pages;
bool maybe_pinned;
@@ -412,7 +657,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
* copying if the folio maybe pinned.
*/
switch (level) {
- case RMAP_LEVEL_PTE:
+ case PGTABLE_LEVEL_PTE:
if (unlikely(maybe_pinned)) {
for (i = 0; i < nr_pages; i++)
if (PageAnonExclusive(page + i))
@@ -422,26 +667,30 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
if (!folio_test_large(folio)) {
if (PageAnonExclusive(page))
ClearPageAnonExclusive(page);
- atomic_inc(&page->_mapcount);
+ atomic_inc(&folio->_mapcount);
break;
}
do {
if (PageAnonExclusive(page))
ClearPageAnonExclusive(page);
- atomic_inc(&page->_mapcount);
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
+ atomic_inc(&page->_mapcount);
} while (page++, --nr_pages > 0);
- atomic_add(orig_nr_pages, &folio->_large_mapcount);
+ folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
break;
- case RMAP_LEVEL_PMD:
+ case PGTABLE_LEVEL_PMD:
+ case PGTABLE_LEVEL_PUD:
if (PageAnonExclusive(page)) {
if (unlikely(maybe_pinned))
return -EBUSY;
ClearPageAnonExclusive(page);
}
atomic_inc(&folio->_entire_mapcount);
- atomic_inc(&folio->_large_mapcount);
+ folio_inc_large_mapcount(folio, dst_vma);
break;
+ default:
+ BUILD_BUG();
}
return 0;
}
@@ -452,6 +701,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
* @folio: The folio to duplicate the mappings of
* @page: The first page to duplicate the mappings of
* @nr_pages: The number of pages of which the mapping will be duplicated
+ * @dst_vma: The destination vm area
* @src_vma: The vm area from which the mappings are duplicated
*
* The page range of the folio is defined by [page, page + nr_pages)
@@ -470,17 +720,19 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
* Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise.
*/
static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio,
- struct page *page, int nr_pages, struct vm_area_struct *src_vma)
+ struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma)
{
- return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma,
- RMAP_LEVEL_PTE);
+ return __folio_try_dup_anon_rmap(folio, page, nr_pages, dst_vma,
+ src_vma, PGTABLE_LEVEL_PTE);
}
static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
- struct page *page, struct vm_area_struct *src_vma)
+ struct page *page, struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma)
{
- return __folio_try_dup_anon_rmap(folio, page, 1, src_vma,
- RMAP_LEVEL_PTE);
+ return __folio_try_dup_anon_rmap(folio, page, 1, dst_vma, src_vma,
+ PGTABLE_LEVEL_PTE);
}
/**
@@ -488,6 +740,7 @@ static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
* of a folio
* @folio: The folio to duplicate the mapping of
* @page: The first page to duplicate the mapping of
+ * @dst_vma: The destination vm area
* @src_vma: The vm area from which the mapping is duplicated
*
* The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
@@ -506,11 +759,12 @@ static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
* Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
*/
static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
- struct page *page, struct vm_area_struct *src_vma)
+ struct page *page, struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma,
- RMAP_LEVEL_PMD);
+ return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, dst_vma,
+ src_vma, PGTABLE_LEVEL_PMD);
#else
WARN_ON_ONCE(true);
return -EBUSY;
@@ -518,7 +772,7 @@ static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
}
static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
- struct page *page, int nr_pages, enum rmap_level level)
+ struct page *page, int nr_pages, enum pgtable_level level)
{
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio);
@@ -613,7 +867,7 @@ static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
static inline int folio_try_share_anon_rmap_pte(struct folio *folio,
struct page *page)
{
- return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE);
+ return __folio_try_share_anon_rmap(folio, page, 1, PGTABLE_LEVEL_PTE);
}
/**
@@ -644,7 +898,7 @@ static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR,
- RMAP_LEVEL_PMD);
+ PGTABLE_LEVEL_PMD);
#else
WARN_ON_ONCE(true);
return -EBUSY;
@@ -655,20 +909,24 @@ static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
* Called from mm/vmscan.c to handle paging out
*/
int folio_referenced(struct folio *, int is_locked,
- struct mem_cgroup *memcg, unsigned long *vm_flags);
+ struct mem_cgroup *memcg, vm_flags_t *vm_flags);
void try_to_migrate(struct folio *folio, enum ttu_flags flags);
void try_to_unmap(struct folio *, enum ttu_flags flags);
-int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
- unsigned long end, struct page **pages,
- void *arg);
+struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr,
+ void *owner, struct folio **foliop);
/* Avoid racy checks */
#define PVMW_SYNC (1 << 0)
/* Look for migration entries rather than present PTEs */
#define PVMW_MIGRATION (1 << 1)
+/* Result flags */
+
+/* The page is mapped across page table boundary */
+#define PVMW_PGTABLE_CROSSED (1 << 16)
+
struct page_vma_mapped_walk {
unsigned long pfn;
unsigned long nr_pages;
@@ -681,16 +939,6 @@ struct page_vma_mapped_walk {
unsigned int flags;
};
-#define DEFINE_PAGE_VMA_WALK(name, _page, _vma, _address, _flags) \
- struct page_vma_mapped_walk name = { \
- .pfn = page_to_pfn(_page), \
- .nr_pages = compound_nr(_page), \
- .pgoff = page_to_pgoff(_page), \
- .vma = _vma, \
- .address = _address, \
- .flags = _flags, \
- }
-
#define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags) \
struct page_vma_mapped_walk name = { \
.pfn = folio_pfn(_folio), \
@@ -710,12 +958,33 @@ static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
spin_unlock(pvmw->ptl);
}
-bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
-
-/*
- * Used by swapoff to help locate where page is expected in vma.
+/**
+ * page_vma_mapped_walk_restart - Restart the page table walk.
+ * @pvmw: Pointer to struct page_vma_mapped_walk.
+ *
+ * It restarts the page table walk when changes occur in the page
+ * table, such as splitting a PMD. Ensures that the PTL held during
+ * the previous walk is released and resets the state to allow for
+ * a new walk starting at the current address stored in pvmw->address.
*/
-unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
+static inline void
+page_vma_mapped_walk_restart(struct page_vma_mapped_walk *pvmw)
+{
+ WARN_ON_ONCE(!pvmw->pmd && !pvmw->pte);
+
+ if (likely(pvmw->ptl))
+ spin_unlock(pvmw->ptl);
+ else
+ WARN_ON_ONCE(1);
+
+ pvmw->ptl = NULL;
+ pvmw->pmd = NULL;
+ pvmw->pte = NULL;
+}
+
+bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
+unsigned long page_address_in_vma(const struct folio *folio,
+ const struct page *, const struct vm_area_struct *);
/*
* Cleans the PTEs of shared mappings.
@@ -725,12 +994,18 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
*/
int folio_mkclean(struct folio *);
+int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
+ unsigned long pfn, unsigned long nr_pages);
+
int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
struct vm_area_struct *vma);
-void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
+enum rmp_flags {
+ RMP_LOCKED = 1 << 0,
+ RMP_USE_SHARED_ZEROPAGE = 1 << 1,
+};
-unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+void remove_migration_ptes(struct folio *src, struct folio *dst, int flags);
/*
* rmap_walk_control: To control rmap traversing for specific needs
@@ -754,14 +1029,14 @@ struct rmap_walk_control {
bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct folio *folio);
- struct anon_vma *(*anon_lock)(struct folio *folio,
+ struct anon_vma *(*anon_lock)(const struct folio *folio,
struct rmap_walk_control *rwc);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
-struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
+struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */
@@ -771,7 +1046,7 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
static inline int folio_referenced(struct folio *folio, int is_locked,
struct mem_cgroup *memcg,
- unsigned long *vm_flags)
+ vm_flags_t *vm_flags)
{
*vm_flags = 0;
return 0;
@@ -787,8 +1062,4 @@ static inline int folio_mkclean(struct folio *folio)
}
#endif /* CONFIG_MMU */
-static inline int page_mkclean(struct page *page)
-{
- return folio_mkclean(page_folio(page));
-}
#endif /* _LINUX_RMAP_H */
diff --git a/include/linux/rolling_buffer.h b/include/linux/rolling_buffer.h
new file mode 100644
index 000000000000..ac15b1ffdd83
--- /dev/null
+++ b/include/linux/rolling_buffer.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Rolling buffer of folios
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _ROLLING_BUFFER_H
+#define _ROLLING_BUFFER_H
+
+#include <linux/folio_queue.h>
+#include <linux/uio.h>
+
+/*
+ * Rolling buffer. Whilst the buffer is live and in use, folios and folio
+ * queue segments can be added to one end by one thread and removed from the
+ * other end by another thread. The buffer isn't allowed to be empty; it must
+ * always have at least one folio_queue in it so that neither side has to
+ * modify both queue pointers.
+ *
+ * The iterator in the buffer is extended as buffers are inserted. It can be
+ * snapshotted to use a segment of the buffer.
+ */
+struct rolling_buffer {
+ struct folio_queue *head; /* Producer's insertion point */
+ struct folio_queue *tail; /* Consumer's removal point */
+ struct iov_iter iter; /* Iterator tracking what's left in the buffer */
+ u8 next_head_slot; /* Next slot in ->head */
+ u8 first_tail_slot; /* First slot in ->tail */
+};
+
+/*
+ * Snapshot of a rolling buffer.
+ */
+struct rolling_buffer_snapshot {
+ struct folio_queue *curr_folioq; /* Queue segment in which current folio resides */
+ unsigned char curr_slot; /* Folio currently being read */
+ unsigned char curr_order; /* Order of folio */
+};
+
+/* Marks to store per-folio in the internal folio_queue structs. */
+#define ROLLBUF_MARK_1 BIT(0)
+#define ROLLBUF_MARK_2 BIT(1)
+
+int rolling_buffer_init(struct rolling_buffer *roll, unsigned int rreq_id,
+ unsigned int direction);
+int rolling_buffer_make_space(struct rolling_buffer *roll);
+ssize_t rolling_buffer_load_from_ra(struct rolling_buffer *roll,
+ struct readahead_control *ractl,
+ struct folio_batch *put_batch);
+ssize_t rolling_buffer_append(struct rolling_buffer *roll, struct folio *folio,
+ unsigned int flags);
+struct folio_queue *rolling_buffer_delete_spent(struct rolling_buffer *roll);
+void rolling_buffer_clear(struct rolling_buffer *roll);
+
+static inline void rolling_buffer_advance(struct rolling_buffer *roll, size_t amount)
+{
+ iov_iter_advance(&roll->iter, amount);
+}
+
+#endif /* _ROLLING_BUFFER_H */
diff --git a/include/linux/rpmb.h b/include/linux/rpmb.h
new file mode 100644
index 000000000000..ed3f8e431eff
--- /dev/null
+++ b/include/linux/rpmb.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Intel Corp. All rights reserved
+ * Copyright (C) 2021-2022 Linaro Ltd
+ */
+#ifndef __RPMB_H__
+#define __RPMB_H__
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+/**
+ * enum rpmb_type - type of underlying storage technology
+ *
+ * @RPMB_TYPE_EMMC : emmc (JESD84-B50.1)
+ * @RPMB_TYPE_UFS : UFS (JESD220)
+ * @RPMB_TYPE_NVME : NVM Express
+ */
+enum rpmb_type {
+ RPMB_TYPE_EMMC,
+ RPMB_TYPE_UFS,
+ RPMB_TYPE_NVME,
+};
+
+/**
+ * struct rpmb_descr - RPMB description provided by the underlying block device
+ *
+ * @type : block device type
+ * @route_frames : routes frames to and from the RPMB device
+ * @dev_id : unique device identifier read from the hardware
+ * @dev_id_len : length of unique device identifier
+ * @reliable_wr_count: number of sectors that can be written in one access
+ * @capacity : capacity of the device in units of 128K
+ *
+ * @dev_id is intended to be used as input when deriving the authenticaion key.
+ */
+struct rpmb_descr {
+ enum rpmb_type type;
+ int (*route_frames)(struct device *dev, u8 *req, unsigned int req_len,
+ u8 *resp, unsigned int resp_len);
+ u8 *dev_id;
+ size_t dev_id_len;
+ u16 reliable_wr_count;
+ u16 capacity;
+};
+
+/**
+ * struct rpmb_dev - device which can support RPMB partition
+ *
+ * @dev : device
+ * @id : device_id
+ * @list_node : linked list node
+ * @descr : RPMB description
+ */
+struct rpmb_dev {
+ struct device dev;
+ int id;
+ struct list_head list_node;
+ struct rpmb_descr descr;
+};
+
+#define to_rpmb_dev(x) container_of((x), struct rpmb_dev, dev)
+
+/**
+ * struct rpmb_frame - RPMB frame structure for authenticated access
+ *
+ * @stuff : stuff bytes, a padding/reserved area of 196 bytes at the
+ * beginning of the RPMB frame. They don’t carry meaningful
+ * data but are required to make the frame exactly 512 bytes.
+ * @key_mac : The authentication key or the message authentication
+ * code (MAC) depending on the request/response type.
+ * The MAC will be delivered in the last (or the only)
+ * block of data.
+ * @data : Data to be written or read by signed access.
+ * @nonce : Random number generated by the host for the requests
+ * and copied to the response by the RPMB engine.
+ * @write_counter: Counter value for the total amount of the successful
+ * authenticated data write requests made by the host.
+ * @addr : Address of the data to be programmed to or read
+ * from the RPMB. Address is the serial number of
+ * the accessed block (half sector 256B).
+ * @block_count : Number of blocks (half sectors, 256B) requested to be
+ * read/programmed.
+ * @result : Includes information about the status of the write counter
+ * (valid, expired) and result of the access made to the RPMB.
+ * @req_resp : Defines the type of request and response to/from the memory.
+ *
+ * The stuff bytes and big-endian properties are modeled to fit to the spec.
+ */
+struct rpmb_frame {
+ u8 stuff[196];
+ u8 key_mac[32];
+ u8 data[256];
+ u8 nonce[16];
+ __be32 write_counter;
+ __be16 addr;
+ __be16 block_count;
+ __be16 result;
+ __be16 req_resp;
+};
+
+#define RPMB_PROGRAM_KEY 0x1 /* Program RPMB Authentication Key */
+#define RPMB_GET_WRITE_COUNTER 0x2 /* Read RPMB write counter */
+#define RPMB_WRITE_DATA 0x3 /* Write data to RPMB partition */
+#define RPMB_READ_DATA 0x4 /* Read data from RPMB partition */
+#define RPMB_RESULT_READ 0x5 /* Read result request (Internal) */
+
+#if IS_ENABLED(CONFIG_RPMB)
+struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev);
+void rpmb_dev_put(struct rpmb_dev *rdev);
+struct rpmb_dev *rpmb_dev_find_device(const void *data,
+ const struct rpmb_dev *start,
+ int (*match)(struct device *dev,
+ const void *data));
+int rpmb_interface_register(struct class_interface *intf);
+void rpmb_interface_unregister(struct class_interface *intf);
+struct rpmb_dev *rpmb_dev_register(struct device *dev,
+ struct rpmb_descr *descr);
+int rpmb_dev_unregister(struct rpmb_dev *rdev);
+
+int rpmb_route_frames(struct rpmb_dev *rdev, u8 *req,
+ unsigned int req_len, u8 *resp, unsigned int resp_len);
+
+#else
+static inline struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev)
+{
+ return NULL;
+}
+
+static inline void rpmb_dev_put(struct rpmb_dev *rdev) { }
+
+static inline struct rpmb_dev *
+rpmb_dev_find_device(const void *data, const struct rpmb_dev *start,
+ int (*match)(struct device *dev, const void *data))
+{
+ return NULL;
+}
+
+static inline int rpmb_interface_register(struct class_interface *intf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void rpmb_interface_unregister(struct class_interface *intf)
+{
+}
+
+static inline struct rpmb_dev *
+rpmb_dev_register(struct device *dev, struct rpmb_descr *descr)
+{
+ return NULL;
+}
+
+static inline int rpmb_dev_unregister(struct rpmb_dev *dev)
+{
+ return 0;
+}
+
+static inline int rpmb_route_frames(struct rpmb_dev *rdev, u8 *req,
+ unsigned int req_len, u8 *resp,
+ unsigned int resp_len)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_RPMB */
+
+#endif /* __RPMB_H__ */
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 90d8e4475f80..fb7ab9165645 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -184,13 +184,9 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *,
int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
-int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len);
int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len);
int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
-int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len);
__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
@@ -271,15 +267,6 @@ static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
}
-static inline int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src,
- u32 dst, void *data, int len)
-{
- /* This shouldn't be possible */
- WARN_ON(1);
-
- return -ENXIO;
-}
-
static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
{
/* This shouldn't be possible */
@@ -297,15 +284,6 @@ static inline int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
return -ENXIO;
}
-static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
- u32 dst, void *data, int len)
-{
- /* This shouldn't be possible */
- WARN_ON(1);
-
- return -ENXIO;
-}
-
static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept,
struct file *filp, poll_table *wait)
{
diff --git a/include/linux/rseq.h b/include/linux/rseq.h
index bc8af3eb5598..2266f4dc77b6 100644
--- a/include/linux/rseq.h
+++ b/include/linux/rseq.h
@@ -3,129 +3,164 @@
#define _LINUX_RSEQ_H
#ifdef CONFIG_RSEQ
-
-#include <linux/preempt.h>
#include <linux/sched.h>
-/*
- * Map the event mask on the user-space ABI enum rseq_cs_flags
- * for direct mask checks.
- */
-enum rseq_event_mask_bits {
- RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
- RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
- RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
-};
-
-enum rseq_event_mask {
- RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
- RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
- RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
-};
-
-static inline void rseq_set_notify_resume(struct task_struct *t)
-{
- if (t->rseq)
- set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
-}
-
-void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
+#include <uapi/linux/rseq.h>
-static inline void rseq_handle_notify_resume(struct ksignal *ksig,
- struct pt_regs *regs)
-{
- if (current->rseq)
- __rseq_handle_notify_resume(ksig, regs);
-}
-
-static inline void rseq_signal_deliver(struct ksignal *ksig,
- struct pt_regs *regs)
-{
- preempt_disable();
- __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
- preempt_enable();
- rseq_handle_notify_resume(ksig, regs);
-}
+void __rseq_handle_slowpath(struct pt_regs *regs);
-/* rseq_preempt() requires preemption to be disabled. */
-static inline void rseq_preempt(struct task_struct *t)
+/* Invoked from resume_user_mode_work() */
+static inline void rseq_handle_slowpath(struct pt_regs *regs)
{
- __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
- rseq_set_notify_resume(t);
+ if (IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
+ if (current->rseq.event.slowpath)
+ __rseq_handle_slowpath(regs);
+ } else {
+ /* '&' is intentional to spare one conditional branch */
+ if (current->rseq.event.sched_switch & current->rseq.event.has_rseq)
+ __rseq_handle_slowpath(regs);
+ }
}
-/* rseq_migrate() requires preemption to be disabled. */
-static inline void rseq_migrate(struct task_struct *t)
-{
- __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
- rseq_set_notify_resume(t);
-}
+void __rseq_signal_deliver(int sig, struct pt_regs *regs);
/*
- * If parent process has a registered restartable sequences area, the
- * child inherits. Unregister rseq for a clone with CLONE_VM set.
+ * Invoked from signal delivery to fixup based on the register context before
+ * switching to the signal delivery context.
*/
-static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
+static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *regs)
{
- if (clone_flags & CLONE_VM) {
- t->rseq = NULL;
- t->rseq_len = 0;
- t->rseq_sig = 0;
- t->rseq_event_mask = 0;
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY)) {
+ /* '&' is intentional to spare one conditional branch */
+ if (current->rseq.event.has_rseq & current->rseq.event.user_irq)
+ __rseq_signal_deliver(ksig->sig, regs);
} else {
- t->rseq = current->rseq;
- t->rseq_len = current->rseq_len;
- t->rseq_sig = current->rseq_sig;
- t->rseq_event_mask = current->rseq_event_mask;
+ if (current->rseq.event.has_rseq)
+ __rseq_signal_deliver(ksig->sig, regs);
}
}
-static inline void rseq_execve(struct task_struct *t)
+static inline void rseq_raise_notify_resume(struct task_struct *t)
{
- t->rseq = NULL;
- t->rseq_len = 0;
- t->rseq_sig = 0;
- t->rseq_event_mask = 0;
+ set_tsk_thread_flag(t, TIF_RSEQ);
}
-#else
-
-static inline void rseq_set_notify_resume(struct task_struct *t)
+/* Invoked from context switch to force evaluation on exit to user */
+static __always_inline void rseq_sched_switch_event(struct task_struct *t)
{
+ struct rseq_event *ev = &t->rseq.event;
+
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY)) {
+ /*
+ * Avoid a boat load of conditionals by using simple logic
+ * to determine whether NOTIFY_RESUME needs to be raised.
+ *
+ * It's required when the CPU or MM CID has changed or
+ * the entry was from user space.
+ */
+ bool raise = (ev->user_irq | ev->ids_changed) & ev->has_rseq;
+
+ if (raise) {
+ ev->sched_switch = true;
+ rseq_raise_notify_resume(t);
+ }
+ } else {
+ if (ev->has_rseq) {
+ t->rseq.event.sched_switch = true;
+ rseq_raise_notify_resume(t);
+ }
+ }
}
-static inline void rseq_handle_notify_resume(struct ksignal *ksig,
- struct pt_regs *regs)
+
+/*
+ * Invoked from __set_task_cpu() when a task migrates or from
+ * mm_cid_schedin() when the CID changes to enforce an IDs update.
+ *
+ * This does not raise TIF_NOTIFY_RESUME as that happens in
+ * rseq_sched_switch_event().
+ */
+static __always_inline void rseq_sched_set_ids_changed(struct task_struct *t)
{
+ t->rseq.event.ids_changed = true;
}
-static inline void rseq_signal_deliver(struct ksignal *ksig,
- struct pt_regs *regs)
+
+/* Enforce a full update after RSEQ registration and when execve() failed */
+static inline void rseq_force_update(void)
{
+ if (current->rseq.event.has_rseq) {
+ current->rseq.event.ids_changed = true;
+ current->rseq.event.sched_switch = true;
+ rseq_raise_notify_resume(current);
+ }
}
-static inline void rseq_preempt(struct task_struct *t)
+
+/*
+ * KVM/HYPERV invoke resume_user_mode_work() before entering guest mode,
+ * which clears TIF_NOTIFY_RESUME on architectures that don't use the
+ * generic TIF bits and therefore can't provide a separate TIF_RSEQ flag.
+ *
+ * To avoid updating user space RSEQ in that case just to do it eventually
+ * again before returning to user space, because __rseq_handle_slowpath()
+ * does nothing when invoked with NULL register state.
+ *
+ * After returning from guest mode, before exiting to userspace, hypervisors
+ * must invoke this function to re-raise TIF_NOTIFY_RESUME if necessary.
+ */
+static inline void rseq_virt_userspace_exit(void)
{
+ /*
+ * The generic optimization for deferring RSEQ updates until the next
+ * exit relies on having a dedicated TIF_RSEQ.
+ */
+ if (!IS_ENABLED(CONFIG_HAVE_GENERIC_TIF_BITS) &&
+ current->rseq.event.sched_switch)
+ rseq_raise_notify_resume(current);
}
-static inline void rseq_migrate(struct task_struct *t)
+
+static inline void rseq_reset(struct task_struct *t)
{
+ memset(&t->rseq, 0, sizeof(t->rseq));
+ t->rseq.ids.cpu_id = RSEQ_CPU_ID_UNINITIALIZED;
}
-static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
+
+static inline void rseq_execve(struct task_struct *t)
{
+ rseq_reset(t);
}
-static inline void rseq_execve(struct task_struct *t)
+
+/*
+ * If parent process has a registered restartable sequences area, the
+ * child inherits. Unregister rseq for a clone with CLONE_VM set.
+ *
+ * On fork, keep the IDs (CPU, MMCID) of the parent, which avoids a fault
+ * on the COW page on exit to user space, when the child stays on the same
+ * CPU as the parent. That's obviously not guaranteed, but in overcommit
+ * scenarios it is more likely and optimizes for the fork/exec case without
+ * taking the fault.
+ */
+static inline void rseq_fork(struct task_struct *t, u64 clone_flags)
{
+ if (clone_flags & CLONE_VM)
+ rseq_reset(t);
+ else
+ t->rseq = current->rseq;
}
-#endif
+#else /* CONFIG_RSEQ */
+static inline void rseq_handle_slowpath(struct pt_regs *regs) { }
+static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *regs) { }
+static inline void rseq_sched_switch_event(struct task_struct *t) { }
+static inline void rseq_sched_set_ids_changed(struct task_struct *t) { }
+static inline void rseq_force_update(void) { }
+static inline void rseq_virt_userspace_exit(void) { }
+static inline void rseq_fork(struct task_struct *t, u64 clone_flags) { }
+static inline void rseq_execve(struct task_struct *t) { }
+#endif /* !CONFIG_RSEQ */
#ifdef CONFIG_DEBUG_RSEQ
-
void rseq_syscall(struct pt_regs *regs);
-
-#else
-
-static inline void rseq_syscall(struct pt_regs *regs)
-{
-}
-
-#endif
+#else /* CONFIG_DEBUG_RSEQ */
+static inline void rseq_syscall(struct pt_regs *regs) { }
+#endif /* !CONFIG_DEBUG_RSEQ */
#endif /* _LINUX_RSEQ_H */
diff --git a/include/linux/rseq_entry.h b/include/linux/rseq_entry.h
new file mode 100644
index 000000000000..c92167ff8a7f
--- /dev/null
+++ b/include/linux/rseq_entry.h
@@ -0,0 +1,616 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_RSEQ_ENTRY_H
+#define _LINUX_RSEQ_ENTRY_H
+
+/* Must be outside the CONFIG_RSEQ guard to resolve the stubs */
+#ifdef CONFIG_RSEQ_STATS
+#include <linux/percpu.h>
+
+struct rseq_stats {
+ unsigned long exit;
+ unsigned long signal;
+ unsigned long slowpath;
+ unsigned long fastpath;
+ unsigned long ids;
+ unsigned long cs;
+ unsigned long clear;
+ unsigned long fixup;
+};
+
+DECLARE_PER_CPU(struct rseq_stats, rseq_stats);
+
+/*
+ * Slow path has interrupts and preemption enabled, but the fast path
+ * runs with interrupts disabled so there is no point in having the
+ * preemption checks implied in __this_cpu_inc() for every operation.
+ */
+#ifdef RSEQ_BUILD_SLOW_PATH
+#define rseq_stat_inc(which) this_cpu_inc((which))
+#else
+#define rseq_stat_inc(which) raw_cpu_inc((which))
+#endif
+
+#else /* CONFIG_RSEQ_STATS */
+#define rseq_stat_inc(x) do { } while (0)
+#endif /* !CONFIG_RSEQ_STATS */
+
+#ifdef CONFIG_RSEQ
+#include <linux/jump_label.h>
+#include <linux/rseq.h>
+#include <linux/uaccess.h>
+
+#include <linux/tracepoint-defs.h>
+
+#ifdef CONFIG_TRACEPOINTS
+DECLARE_TRACEPOINT(rseq_update);
+DECLARE_TRACEPOINT(rseq_ip_fixup);
+void __rseq_trace_update(struct task_struct *t);
+void __rseq_trace_ip_fixup(unsigned long ip, unsigned long start_ip,
+ unsigned long offset, unsigned long abort_ip);
+
+static inline void rseq_trace_update(struct task_struct *t, struct rseq_ids *ids)
+{
+ if (tracepoint_enabled(rseq_update) && ids)
+ __rseq_trace_update(t);
+}
+
+static inline void rseq_trace_ip_fixup(unsigned long ip, unsigned long start_ip,
+ unsigned long offset, unsigned long abort_ip)
+{
+ if (tracepoint_enabled(rseq_ip_fixup))
+ __rseq_trace_ip_fixup(ip, start_ip, offset, abort_ip);
+}
+
+#else /* CONFIG_TRACEPOINT */
+static inline void rseq_trace_update(struct task_struct *t, struct rseq_ids *ids) { }
+static inline void rseq_trace_ip_fixup(unsigned long ip, unsigned long start_ip,
+ unsigned long offset, unsigned long abort_ip) { }
+#endif /* !CONFIG_TRACEPOINT */
+
+DECLARE_STATIC_KEY_MAYBE(CONFIG_RSEQ_DEBUG_DEFAULT_ENABLE, rseq_debug_enabled);
+
+#ifdef RSEQ_BUILD_SLOW_PATH
+#define rseq_inline
+#else
+#define rseq_inline __always_inline
+#endif
+
+bool rseq_debug_update_user_cs(struct task_struct *t, struct pt_regs *regs, unsigned long csaddr);
+bool rseq_debug_validate_ids(struct task_struct *t);
+
+static __always_inline void rseq_note_user_irq_entry(void)
+{
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY))
+ current->rseq.event.user_irq = true;
+}
+
+/*
+ * Check whether there is a valid critical section and whether the
+ * instruction pointer in @regs is inside the critical section.
+ *
+ * - If the critical section is invalid, terminate the task.
+ *
+ * - If valid and the instruction pointer is inside, set it to the abort IP.
+ *
+ * - If valid and the instruction pointer is outside, clear the critical
+ * section address.
+ *
+ * Returns true, if the section was valid and either fixup or clear was
+ * done, false otherwise.
+ *
+ * In the failure case task::rseq_event::fatal is set when a invalid
+ * section was found. It's clear when the failure was an unresolved page
+ * fault.
+ *
+ * If inlined into the exit to user path with interrupts disabled, the
+ * caller has to protect against page faults with pagefault_disable().
+ *
+ * In preemptible task context this would be counterproductive as the page
+ * faults could not be fully resolved. As a consequence unresolved page
+ * faults in task context are fatal too.
+ */
+
+#ifdef RSEQ_BUILD_SLOW_PATH
+/*
+ * The debug version is put out of line, but kept here so the code stays
+ * together.
+ *
+ * @csaddr has already been checked by the caller to be in user space
+ */
+bool rseq_debug_update_user_cs(struct task_struct *t, struct pt_regs *regs,
+ unsigned long csaddr)
+{
+ struct rseq_cs __user *ucs = (struct rseq_cs __user *)(unsigned long)csaddr;
+ u64 start_ip, abort_ip, offset, cs_end, head, tasksize = TASK_SIZE;
+ unsigned long ip = instruction_pointer(regs);
+ u64 __user *uc_head = (u64 __user *) ucs;
+ u32 usig, __user *uc_sig;
+
+ scoped_user_rw_access(ucs, efault) {
+ /*
+ * Evaluate the user pile and exit if one of the conditions
+ * is not fulfilled.
+ */
+ unsafe_get_user(start_ip, &ucs->start_ip, efault);
+ if (unlikely(start_ip >= tasksize))
+ goto die;
+ /* If outside, just clear the critical section. */
+ if (ip < start_ip)
+ goto clear;
+
+ unsafe_get_user(offset, &ucs->post_commit_offset, efault);
+ cs_end = start_ip + offset;
+ /* Check for overflow and wraparound */
+ if (unlikely(cs_end >= tasksize || cs_end < start_ip))
+ goto die;
+
+ /* If not inside, clear it. */
+ if (ip >= cs_end)
+ goto clear;
+
+ unsafe_get_user(abort_ip, &ucs->abort_ip, efault);
+ /* Ensure it's "valid" */
+ if (unlikely(abort_ip >= tasksize || abort_ip < sizeof(*uc_sig)))
+ goto die;
+ /* Validate that the abort IP is not in the critical section */
+ if (unlikely(abort_ip - start_ip < offset))
+ goto die;
+
+ /*
+ * Check version and flags for 0. No point in emitting
+ * deprecated warnings before dying. That could be done in
+ * the slow path eventually, but *shrug*.
+ */
+ unsafe_get_user(head, uc_head, efault);
+ if (unlikely(head))
+ goto die;
+
+ /* abort_ip - 4 is >= 0. See abort_ip check above */
+ uc_sig = (u32 __user *)(unsigned long)(abort_ip - sizeof(*uc_sig));
+ unsafe_get_user(usig, uc_sig, efault);
+ if (unlikely(usig != t->rseq.sig))
+ goto die;
+
+ /* rseq_event.user_irq is only valid if CONFIG_GENERIC_IRQ_ENTRY=y */
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY)) {
+ /* If not in interrupt from user context, let it die */
+ if (unlikely(!t->rseq.event.user_irq))
+ goto die;
+ }
+ unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
+ instruction_pointer_set(regs, (unsigned long)abort_ip);
+ rseq_stat_inc(rseq_stats.fixup);
+ break;
+ clear:
+ unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
+ rseq_stat_inc(rseq_stats.clear);
+ abort_ip = 0ULL;
+ }
+
+ if (unlikely(abort_ip))
+ rseq_trace_ip_fixup(ip, start_ip, offset, abort_ip);
+ return true;
+die:
+ t->rseq.event.fatal = true;
+efault:
+ return false;
+}
+
+/*
+ * On debug kernels validate that user space did not mess with it if the
+ * debug branch is enabled.
+ */
+bool rseq_debug_validate_ids(struct task_struct *t)
+{
+ struct rseq __user *rseq = t->rseq.usrptr;
+ u32 cpu_id, uval, node_id;
+
+ /*
+ * On the first exit after registering the rseq region CPU ID is
+ * RSEQ_CPU_ID_UNINITIALIZED and node_id in user space is 0!
+ */
+ node_id = t->rseq.ids.cpu_id != RSEQ_CPU_ID_UNINITIALIZED ?
+ cpu_to_node(t->rseq.ids.cpu_id) : 0;
+
+ scoped_user_read_access(rseq, efault) {
+ unsafe_get_user(cpu_id, &rseq->cpu_id_start, efault);
+ if (cpu_id != t->rseq.ids.cpu_id)
+ goto die;
+ unsafe_get_user(uval, &rseq->cpu_id, efault);
+ if (uval != cpu_id)
+ goto die;
+ unsafe_get_user(uval, &rseq->node_id, efault);
+ if (uval != node_id)
+ goto die;
+ unsafe_get_user(uval, &rseq->mm_cid, efault);
+ if (uval != t->rseq.ids.mm_cid)
+ goto die;
+ }
+ return true;
+die:
+ t->rseq.event.fatal = true;
+efault:
+ return false;
+}
+
+#endif /* RSEQ_BUILD_SLOW_PATH */
+
+/*
+ * This only ensures that abort_ip is in the user address space and
+ * validates that it is preceded by the signature.
+ *
+ * No other sanity checks are done here, that's what the debug code is for.
+ */
+static rseq_inline bool
+rseq_update_user_cs(struct task_struct *t, struct pt_regs *regs, unsigned long csaddr)
+{
+ struct rseq_cs __user *ucs = (struct rseq_cs __user *)(unsigned long)csaddr;
+ unsigned long ip = instruction_pointer(regs);
+ unsigned long tasksize = TASK_SIZE;
+ u64 start_ip, abort_ip, offset;
+ u32 usig, __user *uc_sig;
+
+ rseq_stat_inc(rseq_stats.cs);
+
+ if (unlikely(csaddr >= tasksize)) {
+ t->rseq.event.fatal = true;
+ return false;
+ }
+
+ if (static_branch_unlikely(&rseq_debug_enabled))
+ return rseq_debug_update_user_cs(t, regs, csaddr);
+
+ scoped_user_rw_access(ucs, efault) {
+ unsafe_get_user(start_ip, &ucs->start_ip, efault);
+ unsafe_get_user(offset, &ucs->post_commit_offset, efault);
+ unsafe_get_user(abort_ip, &ucs->abort_ip, efault);
+
+ /*
+ * No sanity checks. If user space screwed it up, it can
+ * keep the pieces. That's what debug code is for.
+ *
+ * If outside, just clear the critical section.
+ */
+ if (ip - start_ip >= offset)
+ goto clear;
+
+ /*
+ * Two requirements for @abort_ip:
+ * - Must be in user space as x86 IRET would happily return to
+ * the kernel.
+ * - The four bytes preceding the instruction at @abort_ip must
+ * contain the signature.
+ *
+ * The latter protects against the following attack vector:
+ *
+ * An attacker with limited abilities to write, creates a critical
+ * section descriptor, sets the abort IP to a library function or
+ * some other ROP gadget and stores the address of the descriptor
+ * in TLS::rseq::rseq_cs. An RSEQ abort would then evade ROP
+ * protection.
+ */
+ if (unlikely(abort_ip >= tasksize || abort_ip < sizeof(*uc_sig)))
+ goto die;
+
+ /* The address is guaranteed to be >= 0 and < TASK_SIZE */
+ uc_sig = (u32 __user *)(unsigned long)(abort_ip - sizeof(*uc_sig));
+ unsafe_get_user(usig, uc_sig, efault);
+ if (unlikely(usig != t->rseq.sig))
+ goto die;
+
+ /* Invalidate the critical section */
+ unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
+ /* Update the instruction pointer */
+ instruction_pointer_set(regs, (unsigned long)abort_ip);
+ rseq_stat_inc(rseq_stats.fixup);
+ break;
+ clear:
+ unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
+ rseq_stat_inc(rseq_stats.clear);
+ abort_ip = 0ULL;
+ }
+
+ if (unlikely(abort_ip))
+ rseq_trace_ip_fixup(ip, start_ip, offset, abort_ip);
+ return true;
+die:
+ t->rseq.event.fatal = true;
+efault:
+ return false;
+}
+
+/*
+ * Updates CPU ID, Node ID and MM CID and reads the critical section
+ * address, when @csaddr != NULL. This allows to put the ID update and the
+ * read under the same uaccess region to spare a separate begin/end.
+ *
+ * As this is either invoked from a C wrapper with @csaddr = NULL or from
+ * the fast path code with a valid pointer, a clever compiler should be
+ * able to optimize the read out. Spares a duplicate implementation.
+ *
+ * Returns true, if the operation was successful, false otherwise.
+ *
+ * In the failure case task::rseq_event::fatal is set when invalid data
+ * was found on debug kernels. It's clear when the failure was an unresolved page
+ * fault.
+ *
+ * If inlined into the exit to user path with interrupts disabled, the
+ * caller has to protect against page faults with pagefault_disable().
+ *
+ * In preemptible task context this would be counterproductive as the page
+ * faults could not be fully resolved. As a consequence unresolved page
+ * faults in task context are fatal too.
+ */
+static rseq_inline
+bool rseq_set_ids_get_csaddr(struct task_struct *t, struct rseq_ids *ids,
+ u32 node_id, u64 *csaddr)
+{
+ struct rseq __user *rseq = t->rseq.usrptr;
+
+ if (static_branch_unlikely(&rseq_debug_enabled)) {
+ if (!rseq_debug_validate_ids(t))
+ return false;
+ }
+
+ scoped_user_rw_access(rseq, efault) {
+ unsafe_put_user(ids->cpu_id, &rseq->cpu_id_start, efault);
+ unsafe_put_user(ids->cpu_id, &rseq->cpu_id, efault);
+ unsafe_put_user(node_id, &rseq->node_id, efault);
+ unsafe_put_user(ids->mm_cid, &rseq->mm_cid, efault);
+ if (csaddr)
+ unsafe_get_user(*csaddr, &rseq->rseq_cs, efault);
+ }
+
+ /* Cache the new values */
+ t->rseq.ids.cpu_cid = ids->cpu_cid;
+ rseq_stat_inc(rseq_stats.ids);
+ rseq_trace_update(t, ids);
+ return true;
+efault:
+ return false;
+}
+
+/*
+ * Update user space with new IDs and conditionally check whether the task
+ * is in a critical section.
+ */
+static rseq_inline bool rseq_update_usr(struct task_struct *t, struct pt_regs *regs,
+ struct rseq_ids *ids, u32 node_id)
+{
+ u64 csaddr;
+
+ if (!rseq_set_ids_get_csaddr(t, ids, node_id, &csaddr))
+ return false;
+
+ /*
+ * On architectures which utilize the generic entry code this
+ * allows to skip the critical section when the entry was not from
+ * a user space interrupt, unless debug mode is enabled.
+ */
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY)) {
+ if (!static_branch_unlikely(&rseq_debug_enabled)) {
+ if (likely(!t->rseq.event.user_irq))
+ return true;
+ }
+ }
+ if (likely(!csaddr))
+ return true;
+ /* Sigh, this really needs to do work */
+ return rseq_update_user_cs(t, regs, csaddr);
+}
+
+/*
+ * If you want to use this then convert your architecture to the generic
+ * entry code. I'm tired of building workarounds for people who can't be
+ * bothered to make the maintenance of generic infrastructure less
+ * burdensome. Just sucking everything into the architecture code and
+ * thereby making others chase the horrible hacks and keep them working is
+ * neither acceptable nor sustainable.
+ */
+#ifdef CONFIG_GENERIC_ENTRY
+
+/*
+ * This is inlined into the exit path because:
+ *
+ * 1) It's a one time comparison in the fast path when there is no event to
+ * handle
+ *
+ * 2) The access to the user space rseq memory (TLS) is unlikely to fault
+ * so the straight inline operation is:
+ *
+ * - Four 32-bit stores only if CPU ID/ MM CID need to be updated
+ * - One 64-bit load to retrieve the critical section address
+ *
+ * 3) In the unlikely case that the critical section address is != NULL:
+ *
+ * - One 64-bit load to retrieve the start IP
+ * - One 64-bit load to retrieve the offset for calculating the end
+ * - One 64-bit load to retrieve the abort IP
+ * - One 64-bit load to retrieve the signature
+ * - One store to clear the critical section address
+ *
+ * The non-debug case implements only the minimal required checking. It
+ * provides protection against a rogue abort IP in kernel space, which
+ * would be exploitable at least on x86, and also against a rogue CS
+ * descriptor by checking the signature at the abort IP. Any fallout from
+ * invalid critical section descriptors is a user space problem. The debug
+ * case provides the full set of checks and terminates the task if a
+ * condition is not met.
+ *
+ * In case of a fault or an invalid value, this sets TIF_NOTIFY_RESUME and
+ * tells the caller to loop back into exit_to_user_mode_loop(). The rseq
+ * slow path there will handle the failure.
+ */
+static __always_inline bool rseq_exit_user_update(struct pt_regs *regs, struct task_struct *t)
+{
+ /*
+ * Page faults need to be disabled as this is called with
+ * interrupts disabled
+ */
+ guard(pagefault)();
+ if (likely(!t->rseq.event.ids_changed)) {
+ struct rseq __user *rseq = t->rseq.usrptr;
+ /*
+ * If IDs have not changed rseq_event::user_irq must be true
+ * See rseq_sched_switch_event().
+ */
+ u64 csaddr;
+
+ if (unlikely(get_user_inline(csaddr, &rseq->rseq_cs)))
+ return false;
+
+ if (static_branch_unlikely(&rseq_debug_enabled) || unlikely(csaddr)) {
+ if (unlikely(!rseq_update_user_cs(t, regs, csaddr)))
+ return false;
+ }
+ return true;
+ }
+
+ struct rseq_ids ids = {
+ .cpu_id = task_cpu(t),
+ .mm_cid = task_mm_cid(t),
+ };
+ u32 node_id = cpu_to_node(ids.cpu_id);
+
+ return rseq_update_usr(t, regs, &ids, node_id);
+}
+
+static __always_inline bool __rseq_exit_to_user_mode_restart(struct pt_regs *regs)
+{
+ struct task_struct *t = current;
+
+ /*
+ * If the task did not go through schedule or got the flag enforced
+ * by the rseq syscall or execve, then nothing to do here.
+ *
+ * CPU ID and MM CID can only change when going through a context
+ * switch.
+ *
+ * rseq_sched_switch_event() sets the rseq_event::sched_switch bit
+ * only when rseq_event::has_rseq is true. That conditional is
+ * required to avoid setting the TIF bit if RSEQ is not registered
+ * for a task. rseq_event::sched_switch is cleared when RSEQ is
+ * unregistered by a task so it's sufficient to check for the
+ * sched_switch bit alone.
+ *
+ * A sane compiler requires three instructions for the nothing to do
+ * case including clearing the events, but your mileage might vary.
+ */
+ if (unlikely((t->rseq.event.sched_switch))) {
+ rseq_stat_inc(rseq_stats.fastpath);
+
+ if (unlikely(!rseq_exit_user_update(regs, t)))
+ return true;
+ }
+ /* Clear state so next entry starts from a clean slate */
+ t->rseq.event.events = 0;
+ return false;
+}
+
+/* Required to allow conversion to GENERIC_ENTRY w/o GENERIC_TIF_BITS */
+#ifdef CONFIG_HAVE_GENERIC_TIF_BITS
+static __always_inline bool test_tif_rseq(unsigned long ti_work)
+{
+ return ti_work & _TIF_RSEQ;
+}
+
+static __always_inline void clear_tif_rseq(void)
+{
+ static_assert(TIF_RSEQ != TIF_NOTIFY_RESUME);
+ clear_thread_flag(TIF_RSEQ);
+}
+#else
+static __always_inline bool test_tif_rseq(unsigned long ti_work) { return true; }
+static __always_inline void clear_tif_rseq(void) { }
+#endif
+
+static __always_inline bool
+rseq_exit_to_user_mode_restart(struct pt_regs *regs, unsigned long ti_work)
+{
+ if (likely(!test_tif_rseq(ti_work)))
+ return false;
+
+ if (unlikely(__rseq_exit_to_user_mode_restart(regs))) {
+ current->rseq.event.slowpath = true;
+ set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+ return true;
+ }
+
+ clear_tif_rseq();
+ return false;
+}
+
+#else /* CONFIG_GENERIC_ENTRY */
+static inline bool rseq_exit_to_user_mode_restart(struct pt_regs *regs, unsigned long ti_work)
+{
+ return false;
+}
+#endif /* !CONFIG_GENERIC_ENTRY */
+
+static __always_inline void rseq_syscall_exit_to_user_mode(void)
+{
+ struct rseq_event *ev = &current->rseq.event;
+
+ rseq_stat_inc(rseq_stats.exit);
+
+ /* Needed to remove the store for the !lockdep case */
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ WARN_ON_ONCE(ev->sched_switch);
+ ev->events = 0;
+ }
+}
+
+static __always_inline void rseq_irqentry_exit_to_user_mode(void)
+{
+ struct rseq_event *ev = &current->rseq.event;
+
+ rseq_stat_inc(rseq_stats.exit);
+
+ lockdep_assert_once(!ev->sched_switch);
+
+ /*
+ * Ensure that event (especially user_irq) is cleared when the
+ * interrupt did not result in a schedule and therefore the
+ * rseq processing could not clear it.
+ */
+ ev->events = 0;
+}
+
+/* Required to keep ARM64 working */
+static __always_inline void rseq_exit_to_user_mode_legacy(void)
+{
+ struct rseq_event *ev = &current->rseq.event;
+
+ rseq_stat_inc(rseq_stats.exit);
+
+ if (static_branch_unlikely(&rseq_debug_enabled))
+ WARN_ON_ONCE(ev->sched_switch);
+
+ /*
+ * Ensure that event (especially user_irq) is cleared when the
+ * interrupt did not result in a schedule and therefore the
+ * rseq processing did not clear it.
+ */
+ ev->events = 0;
+}
+
+void __rseq_debug_syscall_return(struct pt_regs *regs);
+
+static inline void rseq_debug_syscall_return(struct pt_regs *regs)
+{
+ if (static_branch_unlikely(&rseq_debug_enabled))
+ __rseq_debug_syscall_return(regs);
+}
+#else /* CONFIG_RSEQ */
+static inline void rseq_note_user_irq_entry(void) { }
+static inline bool rseq_exit_to_user_mode_restart(struct pt_regs *regs, unsigned long ti_work)
+{
+ return false;
+}
+static inline void rseq_syscall_exit_to_user_mode(void) { }
+static inline void rseq_irqentry_exit_to_user_mode(void) { }
+static inline void rseq_exit_to_user_mode_legacy(void) { }
+static inline void rseq_debug_syscall_return(struct pt_regs *regs) { }
+#endif /* !CONFIG_RSEQ */
+
+#endif /* _LINUX_RSEQ_ENTRY_H */
diff --git a/include/linux/rseq_types.h b/include/linux/rseq_types.h
new file mode 100644
index 000000000000..332dc14b81c9
--- /dev/null
+++ b/include/linux/rseq_types.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_RSEQ_TYPES_H
+#define _LINUX_RSEQ_TYPES_H
+
+#include <linux/irq_work_types.h>
+#include <linux/types.h>
+#include <linux/workqueue_types.h>
+
+#ifdef CONFIG_RSEQ
+struct rseq;
+
+/**
+ * struct rseq_event - Storage for rseq related event management
+ * @all: Compound to initialize and clear the data efficiently
+ * @events: Compound to access events with a single load/store
+ * @sched_switch: True if the task was scheduled and needs update on
+ * exit to user
+ * @ids_changed: Indicator that IDs need to be updated
+ * @user_irq: True on interrupt entry from user mode
+ * @has_rseq: True if the task has a rseq pointer installed
+ * @error: Compound error code for the slow path to analyze
+ * @fatal: User space data corrupted or invalid
+ * @slowpath: Indicator that slow path processing via TIF_NOTIFY_RESUME
+ * is required
+ *
+ * @sched_switch and @ids_changed must be adjacent and the combo must be
+ * 16bit aligned to allow a single store, when both are set at the same
+ * time in the scheduler.
+ */
+struct rseq_event {
+ union {
+ u64 all;
+ struct {
+ union {
+ u32 events;
+ struct {
+ u8 sched_switch;
+ u8 ids_changed;
+ u8 user_irq;
+ };
+ };
+
+ u8 has_rseq;
+ u8 __pad;
+ union {
+ u16 error;
+ struct {
+ u8 fatal;
+ u8 slowpath;
+ };
+ };
+ };
+ };
+};
+
+/**
+ * struct rseq_ids - Cache for ids, which need to be updated
+ * @cpu_cid: Compound of @cpu_id and @mm_cid to make the
+ * compiler emit a single compare on 64-bit
+ * @cpu_id: The CPU ID which was written last to user space
+ * @mm_cid: The MM CID which was written last to user space
+ *
+ * @cpu_id and @mm_cid are updated when the data is written to user space.
+ */
+struct rseq_ids {
+ union {
+ u64 cpu_cid;
+ struct {
+ u32 cpu_id;
+ u32 mm_cid;
+ };
+ };
+};
+
+/**
+ * struct rseq_data - Storage for all rseq related data
+ * @usrptr: Pointer to the registered user space RSEQ memory
+ * @len: Length of the RSEQ region
+ * @sig: Signature of critial section abort IPs
+ * @event: Storage for event management
+ * @ids: Storage for cached CPU ID and MM CID
+ */
+struct rseq_data {
+ struct rseq __user *usrptr;
+ u32 len;
+ u32 sig;
+ struct rseq_event event;
+ struct rseq_ids ids;
+};
+
+#else /* CONFIG_RSEQ */
+struct rseq_data { };
+#endif /* !CONFIG_RSEQ */
+
+#ifdef CONFIG_SCHED_MM_CID
+
+#define MM_CID_UNSET BIT(31)
+#define MM_CID_ONCPU BIT(30)
+#define MM_CID_TRANSIT BIT(29)
+
+/**
+ * struct sched_mm_cid - Storage for per task MM CID data
+ * @active: MM CID is active for the task
+ * @cid: The CID associated to the task either permanently or
+ * borrowed from the CPU
+ */
+struct sched_mm_cid {
+ unsigned int active;
+ unsigned int cid;
+};
+
+/**
+ * struct mm_cid_pcpu - Storage for per CPU MM_CID data
+ * @cid: The CID associated to the CPU either permanently or
+ * while a task with a CID is running
+ */
+struct mm_cid_pcpu {
+ unsigned int cid;
+}____cacheline_aligned_in_smp;
+
+/**
+ * struct mm_mm_cid - Storage for per MM CID data
+ * @pcpu: Per CPU storage for CIDs associated to a CPU
+ * @percpu: Set, when CIDs are in per CPU mode
+ * @transit: Set to MM_CID_TRANSIT during a mode change transition phase
+ * @max_cids: The exclusive maximum CID value for allocation and convergence
+ * @irq_work: irq_work to handle the affinity mode change case
+ * @work: Regular work to handle the affinity mode change case
+ * @lock: Spinlock to protect against affinity setting which can't take @mutex
+ * @mutex: Mutex to serialize forks and exits related to this mm
+ * @nr_cpus_allowed: The number of CPUs in the per MM allowed CPUs map. The map
+ * is growth only.
+ * @users: The number of tasks sharing this MM. Separate from mm::mm_users
+ * as that is modified by mmget()/mm_put() by other entities which
+ * do not actually share the MM.
+ * @pcpu_thrs: Threshold for switching back from per CPU mode
+ * @update_deferred: A deferred switch back to per task mode is pending.
+ */
+struct mm_mm_cid {
+ /* Hotpath read mostly members */
+ struct mm_cid_pcpu __percpu *pcpu;
+ unsigned int percpu;
+ unsigned int transit;
+ unsigned int max_cids;
+
+ /* Rarely used. Moves @lock and @mutex into the second cacheline */
+ struct irq_work irq_work;
+ struct work_struct work;
+
+ raw_spinlock_t lock;
+ struct mutex mutex;
+
+ /* Low frequency modified */
+ unsigned int nr_cpus_allowed;
+ unsigned int users;
+ unsigned int pcpu_thrs;
+ unsigned int update_deferred;
+}____cacheline_aligned_in_smp;
+#else /* CONFIG_SCHED_MM_CID */
+struct mm_mm_cid { };
+struct sched_mm_cid { };
+#endif /* !CONFIG_SCHED_MM_CID */
+
+#endif
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 3f4d315aaec9..95da051fb155 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -170,6 +170,7 @@ struct rtc_device {
/* useful timestamps */
#define RTC_TIMESTAMP_BEGIN_0000 -62167219200ULL /* 0000-01-01 00:00:00 */
#define RTC_TIMESTAMP_BEGIN_1900 -2208988800LL /* 1900-01-01 00:00:00 */
+#define RTC_TIMESTAMP_EPOCH_GPS 315964800LL /* 1980-01-06 00:00:00 */
#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */
#define RTC_TIMESTAMP_END_2063 2966371199LL /* 2063-12-31 23:59:59 */
#define RTC_TIMESTAMP_END_2079 3471292799LL /* 2079-12-31 23:59:59 */
diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h
index 5a41c3bbcbe3..01da4582db6d 100644
--- a/include/linux/rtc/ds1685.h
+++ b/include/linux/rtc/ds1685.h
@@ -8,7 +8,7 @@
* include larger, battery-backed NV-SRAM, burst-mode access, and an RTC
* write counter.
*
- * Copyright (C) 2011-2014 Joshua Kinard <kumba@gentoo.org>.
+ * Copyright (C) 2011-2014 Joshua Kinard <linux@kumba.dev>.
* Copyright (C) 2009 Matthias Fuchs <matthias.fuchs@esd-electronics.com>.
*
* References:
diff --git a/include/linux/rtc/m48t59.h b/include/linux/rtc/m48t59.h
index 9465d5405fe2..373ba77071c6 100644
--- a/include/linux/rtc/m48t59.h
+++ b/include/linux/rtc/m48t59.h
@@ -56,6 +56,9 @@ struct m48t59_plat_data {
void __iomem *ioaddr;
/* offset to RTC registers, automatically set according to the type */
unsigned int offset;
+
+ /* YY digits (in RTC) are offset, i.e. year is 1900 + yy_offset + YY */
+ int yy_offset;
};
#endif /* _LINUX_RTC_M48T59_H_ */
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 7d049883a08a..ede4c6bf6f22 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -18,7 +18,7 @@
#include <linux/rbtree_types.h>
#include <linux/spinlock_types_raw.h>
-extern int max_lock_depth; /* for sysctl */
+extern int max_lock_depth;
struct rt_mutex_base {
raw_spinlock_t wait_lock;
@@ -44,6 +44,16 @@ static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock)
return READ_ONCE(lock->owner) != NULL;
}
+#ifdef CONFIG_RT_MUTEXES
+#define RT_MUTEX_HAS_WAITERS 1UL
+
+static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
+{
+ unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
+
+ return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
+}
+#endif
extern void rt_mutex_base_init(struct rt_mutex_base *rtb);
/**
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index a7da7dfc06a2..ea39dd23a197 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -7,7 +7,6 @@
#include <linux/netdevice.h>
#include <linux/wait.h>
#include <linux/refcount.h>
-#include <linux/cleanup.h>
#include <uapi/linux/rtnetlink.h>
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
@@ -44,16 +43,19 @@ extern void rtnl_lock(void);
extern void rtnl_unlock(void);
extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
+extern int rtnl_lock_interruptible(void);
extern int rtnl_lock_killable(void);
extern bool refcount_dec_and_rtnl_lock(refcount_t *r);
-DEFINE_LOCK_GUARD_0(rtnl, rtnl_lock(), rtnl_unlock())
-
extern wait_queue_head_t netdev_unregistering_wq;
extern atomic_t dev_unreg_count;
extern struct rw_semaphore pernet_ops_rwsem;
extern struct rw_semaphore net_rwsem;
+#define ASSERT_RTNL() \
+ WARN_ONCE(!rtnl_is_locked(), \
+ "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__)
+
#ifdef CONFIG_PROVE_LOCKING
extern bool lockdep_rtnl_is_held(void);
#else
@@ -77,7 +79,7 @@ static inline bool lockdep_rtnl_is_held(void)
* rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
* @p: The pointer to read, prior to dereferencing
*
- * Return the value of the specified RCU-protected pointer, but omit
+ * Return: the value of the specified RCU-protected pointer, but omit
* the READ_ONCE(), because caller holds RTNL.
*/
#define rtnl_dereference(p) \
@@ -95,6 +97,67 @@ static inline bool lockdep_rtnl_is_held(void)
#define rcu_replace_pointer_rtnl(rp, p) \
rcu_replace_pointer(rp, p, lockdep_rtnl_is_held())
+#ifdef CONFIG_DEBUG_NET_SMALL_RTNL
+void __rtnl_net_lock(struct net *net);
+void __rtnl_net_unlock(struct net *net);
+void rtnl_net_lock(struct net *net);
+void rtnl_net_unlock(struct net *net);
+int rtnl_net_trylock(struct net *net);
+int rtnl_net_lock_killable(struct net *net);
+int rtnl_net_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b);
+
+bool rtnl_net_is_locked(struct net *net);
+
+#define ASSERT_RTNL_NET(net) \
+ WARN_ONCE(!rtnl_net_is_locked(net), \
+ "RTNL_NET: assertion failed at %s (%d)\n", \
+ __FILE__, __LINE__)
+
+bool lockdep_rtnl_net_is_held(struct net *net);
+
+#define rcu_dereference_rtnl_net(net, p) \
+ rcu_dereference_check(p, lockdep_rtnl_net_is_held(net))
+#define rtnl_net_dereference(net, p) \
+ rcu_dereference_protected(p, lockdep_rtnl_net_is_held(net))
+#define rcu_replace_pointer_rtnl_net(net, rp, p) \
+ rcu_replace_pointer(rp, p, lockdep_rtnl_net_is_held(net))
+#else
+static inline void __rtnl_net_lock(struct net *net) {}
+static inline void __rtnl_net_unlock(struct net *net) {}
+
+static inline void rtnl_net_lock(struct net *net)
+{
+ rtnl_lock();
+}
+
+static inline void rtnl_net_unlock(struct net *net)
+{
+ rtnl_unlock();
+}
+
+static inline int rtnl_net_trylock(struct net *net)
+{
+ return rtnl_trylock();
+}
+
+static inline int rtnl_net_lock_killable(struct net *net)
+{
+ return rtnl_lock_killable();
+}
+
+static inline void ASSERT_RTNL_NET(struct net *net)
+{
+ ASSERT_RTNL();
+}
+
+#define rcu_dereference_rtnl_net(net, p) \
+ rcu_dereference_rtnl(p)
+#define rtnl_net_dereference(net, p) \
+ rtnl_dereference(p)
+#define rcu_replace_pointer_rtnl_net(net, rp, p) \
+ rcu_replace_pointer_rtnl(rp, p)
+#endif
+
static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
{
return rtnl_dereference(dev->ingress_queue);
@@ -122,9 +185,11 @@ void rtnetlink_init(void);
void __rtnl_unlock(void);
void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);
-#define ASSERT_RTNL() \
- WARN_ONCE(!rtnl_is_locked(), \
- "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__)
+/* Shared by rtnl_fdb_dump() and various ndo_fdb_dump() helpers. */
+struct ndo_fdb_dump_context {
+ unsigned long ifindex;
+ unsigned long fdb_idx;
+};
extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
@@ -175,6 +240,6 @@ rtnl_notify_needed(const struct net *net, u16 nlflags, u32 group)
return (nlflags & NLM_F_ECHO) || rtnl_has_listeners(net, group);
}
-void netdev_set_operstate(struct net_device *dev, int newstate);
+void netif_set_operstate(struct net_device *dev, int newstate);
#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/rtsx_common.h b/include/linux/rtsx_common.h
index bf290ad14c57..da9c8c6b5d50 100644
--- a/include/linux/rtsx_common.h
+++ b/include/linux/rtsx_common.h
@@ -12,7 +12,6 @@
#define DRV_NAME_RTSX_PCI "rtsx_pci"
#define DRV_NAME_RTSX_PCI_SDMMC "rtsx_pci_sdmmc"
-#define DRV_NAME_RTSX_PCI_MS "rtsx_pci_ms"
#define RTSX_REG_PAIR(addr, val) (((u32)(addr) << 16) | (u8)(val))
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 4612ef09a0c7..3c5689356004 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -1160,6 +1160,8 @@ struct rtsx_cr_option {
bool ocp_en;
u8 sd_400mA_ocp_thd;
u8 sd_800mA_ocp_thd;
+ u8 sd_cd_reverse_en;
+ u8 sd_wp_reverse_en;
};
/*
@@ -1312,8 +1314,6 @@ void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
u8 cmd_type, u16 reg_addr, u8 mask, u8 data);
void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr);
int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout);
-int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
- int num_sg, bool read, int timeout);
int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
int num_sg, bool read);
void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
diff --git a/include/linux/rtsx_usb.h b/include/linux/rtsx_usb.h
index 3247ed8e9ff0..276b509c03e3 100644
--- a/include/linux/rtsx_usb.h
+++ b/include/linux/rtsx_usb.h
@@ -12,6 +12,10 @@
#include <linux/usb.h>
+#define DRV_NAME_RTSX_USB "rtsx_usb"
+#define DRV_NAME_RTSX_USB_SDMMC "rtsx_usb_sdmmc"
+#define DRV_NAME_RTSX_USB_MS "rtsx_usb_ms"
+
/* related module names */
#define RTSX_USB_SD_CARD 0
#define RTSX_USB_MS_CARD 1
@@ -95,6 +99,17 @@ extern int rtsx_usb_card_exclusive_check(struct rtsx_ucr *ucr, int card);
#define CD_MASK (SD_CD | MS_CD | XD_CD)
#define SD_WP 0x08
+/* OCPCTL */
+#define MS_OCP_DETECT_EN 0x08
+#define MS_OCP_INT_EN 0x04
+#define MS_OCP_INT_CLR 0x02
+#define MS_OCP_CLEAR 0x01
+
+/* OCPSTAT */
+#define MS_OCP_DETECT 0x80
+#define MS_OCP_NOW 0x02
+#define MS_OCP_EVER 0x01
+
/* reader command field offset & parameters */
#define READ_REG_CMD 0
#define WRITE_REG_CMD 1
diff --git a/include/linux/rv.h b/include/linux/rv.h
index 8883b41d88ec..92fd467547e7 100644
--- a/include/linux/rv.h
+++ b/include/linux/rv.h
@@ -7,9 +7,15 @@
#ifndef _LINUX_RV_H
#define _LINUX_RV_H
-#define MAX_DA_NAME_LEN 24
+#define MAX_DA_NAME_LEN 32
+#define MAX_DA_RETRY_RACING_EVENTS 3
#ifdef CONFIG_RV
+#include <linux/array_size.h>
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
/*
* Deterministic automaton per-object variables.
*/
@@ -18,27 +24,72 @@ struct da_monitor {
unsigned int curr_state;
};
+#ifdef CONFIG_RV_LTL_MONITOR
+
/*
- * Per-task RV monitors count. Nowadays fixed in RV_PER_TASK_MONITORS.
- * If we find justification for more monitors, we can think about
- * adding more or developing a dynamic method. So far, none of
- * these are justified.
+ * In the future, if the number of atomic propositions or the size of Buchi
+ * automaton is larger, we can switch to dynamic allocation. For now, the code
+ * is simpler this way.
*/
-#define RV_PER_TASK_MONITORS 1
-#define RV_PER_TASK_MONITOR_INIT (RV_PER_TASK_MONITORS)
+#define RV_MAX_LTL_ATOM 32
+#define RV_MAX_BA_STATES 32
-/*
- * Futher monitor types are expected, so make this a union.
+/**
+ * struct ltl_monitor - A linear temporal logic runtime verification monitor
+ * @states: States in the Buchi automaton. As Buchi automaton is a
+ * non-deterministic state machine, the monitor can be in multiple
+ * states simultaneously. This is a bitmask of all possible states.
+ * If this is zero, that means either:
+ * - The monitor has not started yet (e.g. because not all
+ * atomic propositions are known).
+ * - There is no possible state to be in. In other words, a
+ * violation of the LTL property is detected.
+ * @atoms: The values of atomic propositions.
+ * @unknown_atoms: Atomic propositions which are still unknown.
*/
+struct ltl_monitor {
+ DECLARE_BITMAP(states, RV_MAX_BA_STATES);
+ DECLARE_BITMAP(atoms, RV_MAX_LTL_ATOM);
+ DECLARE_BITMAP(unknown_atoms, RV_MAX_LTL_ATOM);
+};
+
+static inline bool rv_ltl_valid_state(struct ltl_monitor *mon)
+{
+ for (int i = 0; i < ARRAY_SIZE(mon->states); ++i) {
+ if (mon->states[i])
+ return true;
+ }
+ return false;
+}
+
+static inline bool rv_ltl_all_atoms_known(struct ltl_monitor *mon)
+{
+ for (int i = 0; i < ARRAY_SIZE(mon->unknown_atoms); ++i) {
+ if (mon->unknown_atoms[i])
+ return false;
+ }
+ return true;
+}
+
+#else
+
+struct ltl_monitor {};
+
+#endif /* CONFIG_RV_LTL_MONITOR */
+
+#define RV_PER_TASK_MONITOR_INIT (CONFIG_RV_PER_TASK_MONITORS)
+
union rv_task_monitor {
- struct da_monitor da_mon;
+ struct da_monitor da_mon;
+ struct ltl_monitor ltl_mon;
};
#ifdef CONFIG_RV_REACTORS
struct rv_reactor {
const char *name;
const char *description;
- void (*react)(char *msg);
+ __printf(1, 0) void (*react)(const char *msg, va_list args);
+ struct list_head list;
};
#endif
@@ -50,20 +101,30 @@ struct rv_monitor {
void (*disable)(void);
void (*reset)(void);
#ifdef CONFIG_RV_REACTORS
- void (*react)(char *msg);
+ struct rv_reactor *reactor;
+ __printf(1, 0) void (*react)(const char *msg, va_list args);
#endif
+ struct list_head list;
+ struct rv_monitor *parent;
+ struct dentry *root_d;
};
bool rv_monitoring_on(void);
int rv_unregister_monitor(struct rv_monitor *monitor);
-int rv_register_monitor(struct rv_monitor *monitor);
+int rv_register_monitor(struct rv_monitor *monitor, struct rv_monitor *parent);
int rv_get_task_monitor_slot(void);
void rv_put_task_monitor_slot(int slot);
#ifdef CONFIG_RV_REACTORS
-bool rv_reacting_on(void);
int rv_unregister_reactor(struct rv_reactor *reactor);
int rv_register_reactor(struct rv_reactor *reactor);
+__printf(2, 3)
+void rv_react(struct rv_monitor *monitor, const char *msg, ...);
+#else
+__printf(2, 3)
+static inline void rv_react(struct rv_monitor *monitor, const char *msg, ...)
+{
+}
#endif /* CONFIG_RV_REACTORS */
#endif /* CONFIG_RV */
diff --git a/include/linux/rw_hint.h b/include/linux/rw_hint.h
index 309ca72f2dfb..adcc43042c90 100644
--- a/include/linux/rw_hint.h
+++ b/include/linux/rw_hint.h
@@ -14,6 +14,7 @@ enum rw_hint {
WRITE_LIFE_MEDIUM = RWH_WRITE_LIFE_MEDIUM,
WRITE_LIFE_LONG = RWH_WRITE_LIFE_LONG,
WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME,
+ WRITE_LIFE_HINT_NR,
} __packed;
/* Sparse ignores __packed annotations on enums, hence the #ifndef below. */
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index c0ef596f340b..5b87c6f4a243 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -2,7 +2,7 @@
#define __LINUX_RWLOCK_H
#ifndef __LINUX_INSIDE_SPINLOCK_H
-# error "please don't include this file directly"
+# error "Please do not include this file directly."
#endif
/*
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index dceb0a59b692..31d3d1116323 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -2,7 +2,7 @@
#define __LINUX_RWLOCK_API_SMP_H
#ifndef __LINUX_SPINLOCK_API_SMP_H
-# error "please don't include this file directly"
+# error "Please do not include this file directly."
#endif
/*
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
index 8544ff05e594..7d81fc6918ee 100644
--- a/include/linux/rwlock_rt.h
+++ b/include/linux/rwlock_rt.h
@@ -24,13 +24,13 @@ do { \
__rt_rwlock_init(rwl, #rwl, &__key); \
} while (0)
-extern void rt_read_lock(rwlock_t *rwlock);
+extern void rt_read_lock(rwlock_t *rwlock) __acquires(rwlock);
extern int rt_read_trylock(rwlock_t *rwlock);
-extern void rt_read_unlock(rwlock_t *rwlock);
-extern void rt_write_lock(rwlock_t *rwlock);
-extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass);
+extern void rt_read_unlock(rwlock_t *rwlock) __releases(rwlock);
+extern void rt_write_lock(rwlock_t *rwlock) __acquires(rwlock);
+extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass) __acquires(rwlock);
extern int rt_write_trylock(rwlock_t *rwlock);
-extern void rt_write_unlock(rwlock_t *rwlock);
+extern void rt_write_unlock(rwlock_t *rwlock) __releases(rwlock);
static __always_inline void read_lock(rwlock_t *rwlock)
{
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index c8b543d428b0..f1aaf676a874 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -132,6 +132,18 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
return !list_empty(&sem->wait_list);
}
+#if defined(CONFIG_DEBUG_RWSEMS) || defined(CONFIG_DETECT_HUNG_TASK_BLOCKER)
+/*
+ * Return just the real task structure pointer of the owner
+ */
+extern struct task_struct *rwsem_owner(struct rw_semaphore *sem);
+
+/*
+ * Return true if the rwsem is owned by a reader.
+ */
+extern bool is_rwsem_reader_owned(struct rw_semaphore *sem);
+#endif
+
#else /* !CONFIG_PREEMPT_RT */
#include <linux/rwbase_rt.h>
@@ -240,10 +252,11 @@ extern void up_write(struct rw_semaphore *sem);
DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
-DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0)
+DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T), _RET == 0)
DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
+DEFINE_GUARD_COND(rwsem_write, _kill, down_write_killable(_T), _RET == 0)
/*
* downgrade write lock to read lock
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index d662cf136021..cc7ad189caa5 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -36,6 +36,11 @@ struct sbitmap_word {
* @cleared: word holding cleared bits
*/
unsigned long cleared ____cacheline_aligned_in_smp;
+
+ /**
+ * @swap_lock: serializes simultaneous updates of ->word and ->cleared
+ */
+ raw_spinlock_t swap_lock;
} ____cacheline_aligned_in_smp;
/**
@@ -70,7 +75,7 @@ struct sbitmap {
*/
struct sbitmap_word *map;
- /*
+ /**
* @alloc_hint: Cache of last successfully allocated or freed bit.
*
* This is per-cpu, which allows multiple users to stick to different
@@ -123,7 +128,7 @@ struct sbitmap_queue {
*/
struct sbq_wait_state *ws;
- /*
+ /**
* @ws_active: count of currently active ws waitqueues
*/
atomic_t ws_active;
@@ -205,23 +210,6 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
int sbitmap_get(struct sbitmap *sb);
/**
- * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
- * limiting the depth used from each word.
- * @sb: Bitmap to allocate from.
- * @shallow_depth: The maximum number of bits to allocate from a single word.
- *
- * This rather specific operation allows for having multiple users with
- * different allocation limits. E.g., there can be a high-priority class that
- * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
- * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
- * class can only allocate half of the total bits in the bitmap, preventing it
- * from starving out the high-priority class.
- *
- * Return: Non-negative allocated bit number if successful, -1 otherwise.
- */
-int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth);
-
-/**
* sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
* @sb: Bitmap to check.
*
@@ -473,7 +461,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
* sbitmap_queue, limiting the depth used from each word, with preemption
* already disabled.
* @sbq: Bitmap queue to allocate from.
- * @shallow_depth: The maximum number of bits to allocate from a single word.
+ * @shallow_depth: The maximum number of bits to allocate from the queue.
* See sbitmap_get_shallow().
*
* If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
@@ -559,6 +547,8 @@ static inline void sbq_index_atomic_inc(atomic_t *index)
* sbitmap_queue.
* @sbq: Bitmap queue to wait on.
* @wait_index: A counter per "user" of @sbq.
+ *
+ * Return: Next wait queue to be used
*/
static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
atomic_t *wait_index)
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 77df3d7b18a6..29f6ceb98d74 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -95,6 +95,28 @@ static inline bool sg_is_last(struct scatterlist *sg)
}
/**
+ * sg_next - return the next scatterlist entry in a list
+ * @sg: The current sg entry
+ *
+ * Description:
+ * Usually the next entry will be @sg + 1, but if this sg element is part
+ * of a chained scatterlist, it could jump to the start of a new
+ * scatterlist array.
+ *
+ **/
+static inline struct scatterlist *sg_next(struct scatterlist *sg)
+{
+ if (sg_is_last(sg))
+ return NULL;
+
+ sg++;
+ if (unlikely(sg_is_chain(sg)))
+ sg = sg_chain_ptr(sg);
+
+ return sg;
+}
+
+/**
* sg_assign_page - Assign a given page to an SG entry
* @sg: SG entry
* @page: The page
@@ -136,6 +158,7 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
static inline void sg_set_page(struct scatterlist *sg, struct page *page,
unsigned int len, unsigned int offset)
{
+ VM_WARN_ON_ONCE(!page_range_contiguous(page, ALIGN(len + offset, PAGE_SIZE) / PAGE_SIZE));
sg_assign_page(sg, page);
sg->offset = offset;
sg->length = len;
@@ -232,7 +255,7 @@ static inline void __sg_chain(struct scatterlist *chain_sg,
* @sgl: Second scatterlist
*
* Description:
- * Links @prv@ and @sgl@ together, to form a longer scatterlist.
+ * Links @prv and @sgl together, to form a longer scatterlist.
*
**/
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
@@ -273,7 +296,7 @@ static inline void sg_unmark_end(struct scatterlist *sg)
}
/*
- * One 64-bit architectures there is a 4-byte padding in struct scatterlist
+ * On 64-bit architectures there is a 4-byte padding in struct scatterlist
* (assuming also CONFIG_NEED_SG_DMA_LENGTH is set). Use this padding for DMA
* flags bits to indicate when a specific dma address is a bus address or the
* buffer may have been bounced via SWIOTLB.
@@ -313,7 +336,7 @@ static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
}
/**
- * sg_unmark_bus_address - Unmark the scatterlist entry as a bus address
+ * sg_dma_unmark_bus_address - Unmark the scatterlist entry as a bus address
* @sg: SG entry
*
* Description:
@@ -332,7 +355,7 @@ static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
* Description:
* Returns true if the scatterlist was marked for SWIOTLB bouncing. Not all
* elements may have been bounced, so the caller would have to check
- * individual SG entries with is_swiotlb_buffer().
+ * individual SG entries with swiotlb_find_pool().
*/
static inline bool sg_dma_is_swiotlb(struct scatterlist *sg)
{
@@ -418,7 +441,6 @@ static inline void sg_init_marker(struct scatterlist *sgl,
int sg_nents(struct scatterlist *sg);
int sg_nents_for_len(struct scatterlist *sg, u64 len);
-struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
void sg_init_one(struct scatterlist *, const void *, unsigned int);
@@ -579,7 +601,7 @@ void __sg_page_iter_start(struct sg_page_iter *piter,
*/
static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
{
- return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
+ return sg_page(piter->sg) + piter->sg_pgoffset;
}
/**
@@ -671,6 +693,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
#define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */
#define SG_MITER_FROM_SG (1 << 2) /* nop */
+#define SG_MITER_LOCAL (1 << 3) /* use kmap_local */
struct sg_mapping_iter {
/* the following three fields can be accessed directly */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 61591ac6eab6..d395f2810fac 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -13,7 +13,7 @@
#include <asm/processor.h>
#include <linux/thread_info.h>
#include <linux/preempt.h>
-#include <linux/cpumask.h>
+#include <linux/cpumask_types.h>
#include <linux/cache.h>
#include <linux/irqflags_types.h>
@@ -34,18 +34,24 @@
#include <linux/sched/prio.h>
#include <linux/sched/types.h>
#include <linux/signal_types.h>
+#include <linux/spinlock.h>
#include <linux/syscall_user_dispatch_types.h>
#include <linux/mm_types_task.h>
+#include <linux/netdevice_xmit.h>
#include <linux/task_io_accounting.h>
#include <linux/posix-timers_types.h>
#include <linux/restart_block.h>
-#include <uapi/linux/rseq.h>
+#include <linux/rseq_types.h>
#include <linux/seqlock_types.h>
#include <linux/kcsan.h>
#include <linux/rv.h>
-#include <linux/livepatch_sched.h>
#include <linux/uidgid_types.h>
+#include <linux/tracepoint-defs.h>
+#include <linux/unwind_deferred_types.h>
#include <asm/kmap_size.h>
+#ifndef COMPILE_OFFSETS
+#include <generated/rq-offsets.h>
+#endif
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
@@ -53,6 +59,7 @@ struct bio_list;
struct blk_plug;
struct bpf_local_storage;
struct bpf_run_ctx;
+struct bpf_net_context;
struct capture_control;
struct cfs_rq;
struct fs_struct;
@@ -63,6 +70,7 @@ struct mempolicy;
struct nameidata;
struct nsproxy;
struct perf_event_context;
+struct perf_ctx_data;
struct pid_namespace;
struct pipe_inode_info;
struct rcu_node;
@@ -80,6 +88,8 @@ struct task_group;
struct task_struct;
struct user_event_mm;
+#include <linux/sched/ext.h>
+
/*
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
@@ -147,8 +157,9 @@ struct user_event_mm;
* Special states are those that do not use the normal wait-loop pattern. See
* the comment with set_special_state().
*/
-#define is_special_task_state(state) \
- ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
+#define is_special_task_state(state) \
+ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | \
+ TASK_DEAD | TASK_FROZEN))
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
# define debug_normal_state_change(state_value) \
@@ -181,6 +192,12 @@ struct user_event_mm;
# define debug_rtlock_wait_restore_state() do { } while (0)
#endif
+#define trace_set_current_state(state_value) \
+ do { \
+ if (tracepoint_enabled(sched_set_state_tp)) \
+ __trace_set_current_state(state_value); \
+ } while (0)
+
/*
* set_current_state() includes a barrier so that the write of current->__state
* is correctly serialised wrt the caller's subsequent test of whether to
@@ -221,12 +238,14 @@ struct user_event_mm;
#define __set_current_state(state_value) \
do { \
debug_normal_state_change((state_value)); \
+ trace_set_current_state(state_value); \
WRITE_ONCE(current->__state, (state_value)); \
} while (0)
#define set_current_state(state_value) \
do { \
debug_normal_state_change((state_value)); \
+ trace_set_current_state(state_value); \
smp_store_mb(current->__state, (state_value)); \
} while (0)
@@ -242,6 +261,7 @@ struct user_event_mm;
\
raw_spin_lock_irqsave(&current->pi_lock, flags); \
debug_special_state_change((state_value)); \
+ trace_set_current_state(state_value); \
WRITE_ONCE(current->__state, (state_value)); \
raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
} while (0)
@@ -277,6 +297,7 @@ struct user_event_mm;
raw_spin_lock(&current->pi_lock); \
current->saved_state = current->__state; \
debug_rtlock_wait_set_state(); \
+ trace_set_current_state(TASK_RTLOCK_WAIT); \
WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
raw_spin_unlock(&current->pi_lock); \
} while (0);
@@ -286,6 +307,7 @@ struct user_event_mm;
lockdep_assert_irqs_disabled(); \
raw_spin_lock(&current->pi_lock); \
debug_rtlock_wait_restore_state(); \
+ trace_set_current_state(current->saved_state); \
WRITE_ONCE(current->__state, current->saved_state); \
current->saved_state = TASK_RUNNING; \
raw_spin_unlock(&current->pi_lock); \
@@ -322,6 +344,12 @@ extern void io_schedule_finish(int token);
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
+/* wrapper functions to trace from this header file */
+DECLARE_TRACEPOINT(sched_set_state_tp);
+extern void __trace_set_current_state(int state_value);
+DECLARE_TRACEPOINT(sched_set_need_resched_tp);
+extern void __trace_set_need_resched(struct task_struct *curr, int tif);
+
/**
* struct prev_cputime - snapshot of system and user cputime
* @utime: time spent in user mode
@@ -374,10 +402,10 @@ enum uclamp_id {
UCLAMP_CNT
};
-#ifdef CONFIG_SMP
extern struct root_domain def_root_domain;
extern struct mutex sched_domains_mutex;
-#endif
+extern void sched_domains_mutex_lock(void);
+extern void sched_domains_mutex_unlock(void);
struct sched_param {
int sched_priority;
@@ -393,6 +421,12 @@ struct sched_info {
/* Time spent waiting on a runqueue: */
unsigned long long run_delay;
+ /* Max time spent waiting on a runqueue: */
+ unsigned long long max_run_delay;
+
+ /* Min time spent waiting on a runqueue: */
+ unsigned long long min_run_delay;
+
/* Timestamps: */
/* When did we last run on a CPU? */
@@ -539,15 +573,28 @@ struct sched_entity {
struct rb_node run_node;
u64 deadline;
u64 min_vruntime;
+ u64 min_slice;
struct list_head group_node;
- unsigned int on_rq;
+ unsigned char on_rq;
+ unsigned char sched_delayed;
+ unsigned char rel_deadline;
+ unsigned char custom_slice;
+ /* hole */
u64 exec_start;
u64 sum_exec_runtime;
u64 prev_sum_exec_runtime;
u64 vruntime;
- s64 vlag;
+ union {
+ /*
+ * When !@on_rq this field is vlag.
+ * When cfs_rq->curr == se (which implies @on_rq)
+ * this field is vprot. See protect_slice().
+ */
+ s64 vlag;
+ u64 vprot;
+ };
u64 slice;
u64 nr_migrations;
@@ -563,7 +610,6 @@ struct sched_entity {
unsigned long runnable_weight;
#endif
-#ifdef CONFIG_SMP
/*
* Per entity load average tracking.
*
@@ -571,7 +617,6 @@ struct sched_entity {
* collide with read-mostly values above.
*/
struct sched_avg avg;
-#endif
};
struct sched_rt_entity {
@@ -592,8 +637,8 @@ struct sched_rt_entity {
#endif
} __randomize_layout;
-typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *);
-typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *);
+struct rq_flags;
+typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *, struct rq_flags *rf);
struct sched_dl_entity {
struct rb_node rb_node;
@@ -637,12 +682,36 @@ struct sched_dl_entity {
*
* @dl_overrun tells if the task asked to be informed about runtime
* overruns.
+ *
+ * @dl_server tells if this is a server entity.
+ *
+ * @dl_server_active tells if the dlserver is active(started).
+ * dlserver is started on first cfs enqueue on an idle runqueue
+ * and is stopped when a dequeue results in 0 cfs tasks on the
+ * runqueue. In other words, dlserver is active only when cpu's
+ * runqueue has atleast one cfs task.
+ *
+ * @dl_defer tells if this is a deferred or regular server. For
+ * now only defer server exists.
+ *
+ * @dl_defer_armed tells if the deferrable server is waiting
+ * for the replenishment timer to activate it.
+ *
+ * @dl_defer_running tells if the deferrable server is actually
+ * running, skipping the defer phase.
+ *
+ * @dl_defer_idle tracks idle state
*/
unsigned int dl_throttled : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
unsigned int dl_server : 1;
+ unsigned int dl_server_active : 1;
+ unsigned int dl_defer : 1;
+ unsigned int dl_defer_armed : 1;
+ unsigned int dl_defer_running : 1;
+ unsigned int dl_defer_idle : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -664,13 +733,9 @@ struct sched_dl_entity {
* dl_server_update().
*
* @rq the runqueue this server is for
- *
- * @server_has_tasks() returns true if @server_pick return a
- * runnable task.
*/
struct rq *rq;
- dl_server_has_tasks_f server_has_tasks;
- dl_server_pick_f server_pick;
+ dl_server_pick_f server_pick_task;
#ifdef CONFIG_RT_MUTEXES
/*
@@ -734,6 +799,12 @@ enum perf_event_task_context {
perf_nr_task_contexts,
};
+/*
+ * Number of contexts where an event can trigger:
+ * task, softirq, hardirq, nmi.
+ */
+#define PERF_NR_CONTEXTS 4
+
struct wake_q_node {
struct wake_q_node *next;
};
@@ -774,7 +845,6 @@ struct task_struct {
struct alloc_tag *alloc_tag;
#endif
-#ifdef CONFIG_SMP
int on_cpu;
struct __call_single_node wake_entry;
unsigned int wakee_flips;
@@ -790,7 +860,6 @@ struct task_struct {
*/
int recent_used_cpu;
int wake_cpu;
-#endif
int on_rq;
int prio;
@@ -802,6 +871,9 @@ struct task_struct {
struct sched_rt_entity rt;
struct sched_dl_entity dl;
struct sched_dl_entity *dl_server;
+#ifdef CONFIG_SCHED_CLASS_EXT
+ struct sched_ext_entity scx;
+#endif
const struct sched_class *sched_class;
#ifdef CONFIG_SCHED_CORE
@@ -812,6 +884,11 @@ struct task_struct {
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
+#ifdef CONFIG_CFS_BANDWIDTH
+ struct callback_head sched_throttle_work;
+ struct list_head throttle_node;
+ bool throttled;
+#endif
#endif
@@ -846,9 +923,7 @@ struct task_struct {
cpumask_t *user_cpus_ptr;
cpumask_t cpus_mask;
void *migration_pending;
-#ifdef CONFIG_SMP
unsigned short migration_disabled;
-#endif
unsigned short migration_flags;
#ifdef CONFIG_PREEMPT_RCU
@@ -880,10 +955,8 @@ struct task_struct {
struct sched_info sched_info;
struct list_head tasks;
-#ifdef CONFIG_SMP
struct plist_node pushable_tasks;
struct rb_node pushable_dl_tasks;
-#endif
struct mm_struct *mm;
struct mm_struct *active_mm;
@@ -904,6 +977,7 @@ struct task_struct {
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
+ unsigned sched_task_hot:1;
/* Force alignment to the next boundary: */
unsigned :0;
@@ -934,7 +1008,7 @@ struct task_struct {
#ifndef TIF_RESTORE_SIGMASK
unsigned restore_sigmask:1;
#endif
-#ifdef CONFIG_MEMCG
+#ifdef CONFIG_MEMCG_V1
unsigned in_user_fault:1;
#endif
#ifdef CONFIG_LRU_GEN
@@ -968,14 +1042,17 @@ struct task_struct {
#ifdef CONFIG_ARCH_HAS_CPU_PASID
unsigned pasid_activated:1;
#endif
-#ifdef CONFIG_CPU_SUP_INTEL
+#ifdef CONFIG_X86_BUS_LOCK_DETECT
unsigned reported_split_lock:1;
#endif
#ifdef CONFIG_TASK_DELAY_ACCT
/* delay due to memory thrashing */
unsigned in_thrashing:1;
#endif
-
+ unsigned in_nf_duplicate:1;
+#ifdef CONFIG_PREEMPT_RT
+ struct netdev_xmit net_xmit;
+#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
struct restart_block restart_block;
@@ -1086,9 +1163,12 @@ struct task_struct {
/*
* executable name, excluding path.
*
- * - normally initialized setup_new_exec()
- * - access it with [gs]et_task_comm()
- * - lock it with task_lock()
+ * - normally initialized begin_new_exec()
+ * - set it with set_task_comm()
+ * - strscpy_pad() to ensure it is always NUL-terminated and
+ * zero-padded
+ * - task_lock() to ensure the operation is atomic and the name is
+ * fully updated.
*/
char comm[TASK_COMM_LEN];
@@ -1160,9 +1240,14 @@ struct task_struct {
struct rt_mutex_waiter *pi_blocked_on;
#endif
-#ifdef CONFIG_DEBUG_MUTEXES
- /* Mutex deadlock detection: */
- struct mutex_waiter *blocked_on;
+ struct mutex *blocked_on; /* lock we're blocked on */
+
+#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
+ /*
+ * Encoded lock address causing task block (lower 2 bits = type from
+ * <linux/hung_task.h>). Accessed via hung_task_*() helpers.
+ */
+ unsigned long blocker;
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
@@ -1233,14 +1318,16 @@ struct task_struct {
/* Sequence number to catch updates: */
seqcount_spinlock_t mems_allowed_seq;
int cpuset_mem_spread_rotor;
- int cpuset_slab_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS
/* Control Group info protected by css_set_lock: */
struct css_set __rcu *cgroups;
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
struct list_head cg_list;
-#endif
+#ifdef CONFIG_PREEMPT_RT
+ struct llist_node cg_dead_lnode;
+#endif /* CONFIG_PREEMPT_RT */
+#endif /* CONFIG_CGROUPS */
#ifdef CONFIG_X86_CPU_RESCTRL
u32 closid;
u32 rmid;
@@ -1256,9 +1343,11 @@ struct task_struct {
unsigned int futex_state;
#endif
#ifdef CONFIG_PERF_EVENTS
+ u8 perf_recursion[PERF_NR_CONTEXTS];
struct perf_event_context *perf_event_ctxp;
struct mutex perf_event_mutex;
struct list_head perf_event_list;
+ struct perf_ctx_data __rcu *perf_ctx_data;
#endif
#ifdef CONFIG_DEBUG_PREEMPT
unsigned long preempt_disable_ip;
@@ -1320,24 +1409,8 @@ struct task_struct {
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
-#ifdef CONFIG_RSEQ
- struct rseq __user *rseq;
- u32 rseq_len;
- u32 rseq_sig;
- /*
- * RmW on rseq_event_mask must be performed atomically
- * with respect to preemption.
- */
- unsigned long rseq_event_mask;
-#endif
-
-#ifdef CONFIG_SCHED_MM_CID
- int mm_cid; /* Current cid in mm */
- int last_mm_cid; /* Most recent cid in mm */
- int migrate_from_cpu;
- int mm_cid_active; /* Whether cid bitmap is active */
- struct callback_head cid_work;
-#endif
+ struct rseq_data rseq;
+ struct sched_mm_cid mm_cid;
struct tlbflush_unmap_batch tlb_ubc;
@@ -1402,10 +1475,11 @@ struct task_struct {
int curr_ret_depth;
/* Stack of return addresses for return function tracing: */
- struct ftrace_ret_stack *ret_stack;
+ unsigned long *ret_stack;
/* Timestamp for last schedule: */
unsigned long long ftrace_timestamp;
+ unsigned long long ftrace_sleeptime;
/*
* Number of functions that haven't been traced
@@ -1447,17 +1521,18 @@ struct task_struct {
unsigned int kcov_softirq;
#endif
-#ifdef CONFIG_MEMCG
+#ifdef CONFIG_MEMCG_V1
struct mem_cgroup *memcg_in_oom;
+#endif
+#ifdef CONFIG_MEMCG
/* Number of pages to reclaim on returning to userland: */
unsigned int memcg_nr_pages_over_high;
/* Used by memcontrol for targeted memcg charge: */
struct mem_cgroup *active_memcg;
-#endif
-#ifdef CONFIG_MEMCG_KMEM
+ /* Cache for current->cgroups->memcg->objcg lookups: */
struct obj_cgroup *objcg;
#endif
@@ -1506,9 +1581,13 @@ struct task_struct {
/* Used for BPF run context */
struct bpf_run_ctx *bpf_ctx;
#endif
+ /* Used by BPF for per-TASK xdp storage */
+ struct bpf_net_context *bpf_net_context;
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+#ifdef CONFIG_KSTACK_ERASE
unsigned long lowest_stack;
+#endif
+#ifdef CONFIG_KSTACK_ERASE_METRICS
unsigned long prev_lowest_stack;
#endif
@@ -1542,34 +1621,42 @@ struct task_struct {
#ifdef CONFIG_RV
/*
- * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
- * If we find justification for more monitors, we can think
- * about adding more or developing a dynamic method. So far,
- * none of these are justified.
+ * Per-task RV monitor, fixed in CONFIG_RV_PER_TASK_MONITORS.
+ * If memory becomes a concern, we can think about a dynamic method.
*/
- union rv_task_monitor rv[RV_PER_TASK_MONITORS];
+ union rv_task_monitor rv[CONFIG_RV_PER_TASK_MONITORS];
#endif
#ifdef CONFIG_USER_EVENTS
struct user_event_mm *user_event_mm;
#endif
- /*
- * New fields for task_struct should be added above here, so that
- * they are included in the randomized portion of task_struct.
- */
- randomized_struct_fields_end
+#ifdef CONFIG_UNWIND_USER
+ struct unwind_task_info unwind_info;
+#endif
/* CPU-specific state of this task: */
struct thread_struct thread;
/*
- * WARNING: on x86, 'thread_struct' contains a variable-sized
- * structure. It *MUST* be at the end of 'task_struct'.
- *
- * Do not put anything below here!
+ * New fields for task_struct should be added above here, so that
+ * they are included in the randomized portion of task_struct.
*/
-};
+ randomized_struct_fields_end
+} __attribute__ ((aligned (64)));
+
+#ifdef CONFIG_SCHED_PROXY_EXEC
+DECLARE_STATIC_KEY_TRUE(__sched_proxy_exec);
+static inline bool sched_proxy_exec(void)
+{
+ return static_branch_likely(&__sched_proxy_exec);
+}
+#else
+static inline bool sched_proxy_exec(void)
+{
+ return false;
+}
+#endif
#define TASK_REPORT_IDLE (TASK_REPORT + 1)
#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
@@ -1588,8 +1675,9 @@ static inline unsigned int __task_state_index(unsigned int tsk_state,
* We're lying here, but rather than expose a completely new task state
* to userspace, we can make this appear as if the task has gone through
* a regular rt_mutex_lock() call.
+ * Report frozen tasks as uninterruptible.
*/
- if (tsk_state & TASK_RTLOCK_WAIT)
+ if ((tsk_state & TASK_RTLOCK_WAIT) || (tsk_state & TASK_FROZEN))
state = TASK_UNINTERRUPTIBLE;
return fls(state);
@@ -1604,7 +1692,7 @@ static inline char task_index_to_char(unsigned int state)
{
static const char state_char[] = "RSDTtXZPI";
- BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
+ BUILD_BUG_ON(TASK_REPORT_MAX * 2 != 1 << (sizeof(state_char) - 1));
return state_char[state];
}
@@ -1635,7 +1723,7 @@ extern struct pid *cad_pid;
#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
#define PF_USER_WORKER 0x00004000 /* Kernel thread cloned from userspace thread */
#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
-#define PF__HOLE__00010000 0x00010000
+#define PF_KCOMPACTD 0x00010000 /* I am kcompactd */
#define PF_KSWAPD 0x00020000 /* I am kswapd */
#define PF_MEMALLOC_NOFS 0x00040000 /* All allocations inherit GFP_NOFS. See memalloc_nfs_save() */
#define PF_MEMALLOC_NOIO 0x00080000 /* All allocations inherit GFP_NOIO. See memalloc_noio_save() */
@@ -1643,8 +1731,8 @@ extern struct pid *cad_pid;
* I am cleaning dirty pages from some other bdi. */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
-#define PF_MEMALLOC_NORECLAIM 0x00800000 /* All allocation requests will clear __GFP_DIRECT_RECLAIM */
-#define PF_MEMALLOC_NOWARN 0x01000000 /* All allocation requests will inherit __GFP_NOWARN */
+#define PF__HOLE__00800000 0x00800000
+#define PF__HOLE__01000000 0x01000000
#define PF__HOLE__02000000 0x02000000
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
@@ -1684,12 +1772,8 @@ extern struct pid *cad_pid;
static __always_inline bool is_percpu_thread(void)
{
-#ifdef CONFIG_SMP
return (current->flags & PF_NO_SETAFFINITY) &&
(current->nr_cpus_allowed == 1);
-#else
- return true;
-#endif
}
/* Per-process atomic flags. */
@@ -1754,10 +1838,9 @@ extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpu
extern int task_can_attach(struct task_struct *p);
extern int dl_bw_alloc(int cpu, u64 dl_bw);
extern void dl_bw_free(int cpu, u64 dl_bw);
-#ifdef CONFIG_SMP
-/* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
-extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
+/* set_cpus_allowed_force() - consider using set_cpus_allowed_ptr() instead */
+extern void set_cpus_allowed_force(struct task_struct *p, const struct cpumask *new_mask);
/**
* set_cpus_allowed_ptr - set CPU affinity mask of a task
@@ -1772,32 +1855,6 @@ extern void release_user_cpus_ptr(struct task_struct *p);
extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
-#else
-static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-{
-}
-static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
-{
- if (!cpumask_test_cpu(0, new_mask))
- return -EINVAL;
- return 0;
-}
-static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
-{
- if (src->user_cpus_ptr)
- return -EINVAL;
- return 0;
-}
-static inline void release_user_cpus_ptr(struct task_struct *p)
-{
- WARN_ON(p->user_cpus_ptr);
-}
-
-static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
-{
- return 0;
-}
-#endif
extern int yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice);
@@ -1822,6 +1879,7 @@ extern int sched_setscheduler(struct task_struct *, int, const struct sched_para
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern void sched_set_fifo(struct task_struct *p);
extern void sched_set_fifo_low(struct task_struct *p);
+extern void sched_set_fifo_secondary(struct task_struct *p);
extern void sched_set_normal(struct task_struct *p, int nice);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
@@ -1859,7 +1917,7 @@ extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
#ifdef CONFIG_THREAD_INFO_IN_TASK
# define task_thread_info(task) (&(task)->thread_info)
-#elif !defined(__HAVE_THREAD_FUNCTIONS)
+#else
# define task_thread_info(task) ((struct thread_info *)(task)->stack)
#endif
@@ -1886,26 +1944,33 @@ extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
-#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
-#else
-static inline void kick_process(struct task_struct *tsk) { }
-#endif
extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
+#define set_task_comm(tsk, from) ({ \
+ BUILD_BUG_ON(sizeof(from) != TASK_COMM_LEN); \
+ __set_task_comm(tsk, from, false); \
+})
-static inline void set_task_comm(struct task_struct *tsk, const char *from)
-{
- __set_task_comm(tsk, from, false);
-}
-
-extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
+/*
+ * - Why not use task_lock()?
+ * User space can randomly change their names anyway, so locking for readers
+ * doesn't make sense. For writers, locking is probably necessary, as a race
+ * condition could lead to long-term mixed results.
+ * The strscpy_pad() in __set_task_comm() can ensure that the task comm is
+ * always NUL-terminated and zero-padded. Therefore the race condition between
+ * reader and writer is not an issue.
+ *
+ * - BUILD_BUG_ON() can help prevent the buf from being truncated.
+ * Since the callers don't perform any return value checks, this safeguard is
+ * necessary.
+ */
#define get_task_comm(buf, tsk) ({ \
- BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
- __get_task_comm(buf, sizeof(buf), tsk); \
+ BUILD_BUG_ON(sizeof(buf) < TASK_COMM_LEN); \
+ strscpy_pad(buf, (tsk)->comm); \
+ buf; \
})
-#ifdef CONFIG_SMP
static __always_inline void scheduler_ipi(void)
{
/*
@@ -1915,9 +1980,6 @@ static __always_inline void scheduler_ipi(void)
*/
preempt_fold_need_resched();
}
-#else
-static inline void scheduler_ipi(void) { }
-#endif
extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
@@ -1958,12 +2020,16 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
static inline void set_tsk_need_resched(struct task_struct *tsk)
{
+ if (tracepoint_enabled(sched_set_need_resched_tp) &&
+ !test_tsk_thread_flag(tsk, TIF_NEED_RESCHED))
+ __trace_set_need_resched(tsk, TIF_NEED_RESCHED);
set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
- clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
+ atomic_long_andnot(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY,
+ (atomic_long_t *)&task_thread_info(tsk)->flags);
}
static inline int test_tsk_need_resched(struct task_struct *tsk)
@@ -1971,6 +2037,13 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
+static inline void set_need_resched_current(void)
+{
+ lockdep_assert_irqs_disabled();
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
+}
+
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
@@ -1982,9 +2055,6 @@ extern int __cond_resched(void);
#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
-void sched_dynamic_klp_enable(void);
-void sched_dynamic_klp_disable(void);
-
DECLARE_STATIC_CALL(cond_resched, __cond_resched);
static __always_inline int _cond_resched(void)
@@ -2005,7 +2075,6 @@ static __always_inline int _cond_resched(void)
static inline int _cond_resched(void)
{
- klp_sched_try_switch();
return __cond_resched();
}
@@ -2015,7 +2084,6 @@ static inline int _cond_resched(void)
static inline int _cond_resched(void)
{
- klp_sched_try_switch();
return 0;
}
@@ -2064,46 +2132,71 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock);
__cond_resched_rwlock_write(lock); \
})
-#ifdef CONFIG_PREEMPT_DYNAMIC
-
-extern bool preempt_model_none(void);
-extern bool preempt_model_voluntary(void);
-extern bool preempt_model_full(void);
+#ifndef CONFIG_PREEMPT_RT
+static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
+{
+ struct mutex *m = p->blocked_on;
-#else
+ if (m)
+ lockdep_assert_held_once(&m->wait_lock);
+ return m;
+}
-static inline bool preempt_model_none(void)
+static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
{
- return IS_ENABLED(CONFIG_PREEMPT_NONE);
+ struct mutex *blocked_on = READ_ONCE(p->blocked_on);
+
+ WARN_ON_ONCE(!m);
+ /* The task should only be setting itself as blocked */
+ WARN_ON_ONCE(p != current);
+ /* Currently we serialize blocked_on under the mutex::wait_lock */
+ lockdep_assert_held_once(&m->wait_lock);
+ /*
+ * Check ensure we don't overwrite existing mutex value
+ * with a different mutex. Note, setting it to the same
+ * lock repeatedly is ok.
+ */
+ WARN_ON_ONCE(blocked_on && blocked_on != m);
+ WRITE_ONCE(p->blocked_on, m);
}
-static inline bool preempt_model_voluntary(void)
+
+static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
{
- return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
+ guard(raw_spinlock_irqsave)(&m->wait_lock);
+ __set_task_blocked_on(p, m);
}
-static inline bool preempt_model_full(void)
+
+static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m)
{
- return IS_ENABLED(CONFIG_PREEMPT);
+ if (m) {
+ struct mutex *blocked_on = READ_ONCE(p->blocked_on);
+
+ /* Currently we serialize blocked_on under the mutex::wait_lock */
+ lockdep_assert_held_once(&m->wait_lock);
+ /*
+ * There may be cases where we re-clear already cleared
+ * blocked_on relationships, but make sure we are not
+ * clearing the relationship with a different lock.
+ */
+ WARN_ON_ONCE(blocked_on && blocked_on != m);
+ }
+ WRITE_ONCE(p->blocked_on, NULL);
}
-#endif
-
-static inline bool preempt_model_rt(void)
+static inline void clear_task_blocked_on(struct task_struct *p, struct mutex *m)
+{
+ guard(raw_spinlock_irqsave)(&m->wait_lock);
+ __clear_task_blocked_on(p, m);
+}
+#else
+static inline void __clear_task_blocked_on(struct task_struct *p, struct rt_mutex *m)
{
- return IS_ENABLED(CONFIG_PREEMPT_RT);
}
-/*
- * Does the preemption model allow non-cooperative preemption?
- *
- * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
- * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
- * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
- * PREEMPT_NONE model.
- */
-static inline bool preempt_model_preemptible(void)
+static inline void clear_task_blocked_on(struct task_struct *p, struct rt_mutex *m)
{
- return preempt_model_full() || preempt_model_rt();
}
+#endif /* !CONFIG_PREEMPT_RT */
static __always_inline bool need_resched(void)
{
@@ -2135,12 +2228,15 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
+static inline bool task_is_runnable(struct task_struct *p)
+{
+ return p->on_rq && !p->se.sched_delayed;
+}
+
extern bool sched_task_on_rq(struct task_struct *p);
extern unsigned long get_wchan(struct task_struct *p);
extern struct task_struct *cpu_curr_snapshot(int cpu);
-#include <linux/spinlock.h>
-
/*
* In order to reduce various lock holder preemption latencies provide an
* interface to see if a vCPU is currently running or not.
@@ -2163,7 +2259,6 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
-#ifdef CONFIG_SMP
static inline bool owner_on_cpu(struct task_struct *owner)
{
/*
@@ -2175,7 +2270,6 @@ static inline bool owner_on_cpu(struct task_struct *owner)
/* Returns effective CPU energy utilization, as seen by the scheduler */
unsigned long sched_cpu_util(int cpu);
-#endif /* CONFIG_SMP */
#ifdef CONFIG_SCHED_CORE
extern void sched_core_free(struct task_struct *tsk);
@@ -2192,13 +2286,13 @@ static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
extern void sched_set_stop_task(int cpu, struct task_struct *stop);
#ifdef CONFIG_MEM_ALLOC_PROFILING
-static inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag)
+static __always_inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag)
{
swap(current->alloc_tag, tag);
return tag;
}
-static inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old)
+static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old)
{
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n");
@@ -2210,4 +2304,140 @@ static inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *ol
#define alloc_tag_restore(_tag, _old) do {} while (0)
#endif
+/* Avoids recursive inclusion hell */
+#ifdef CONFIG_SCHED_MM_CID
+void sched_mm_cid_before_execve(struct task_struct *t);
+void sched_mm_cid_after_execve(struct task_struct *t);
+void sched_mm_cid_fork(struct task_struct *t);
+void sched_mm_cid_exit(struct task_struct *t);
+static __always_inline int task_mm_cid(struct task_struct *t)
+{
+ return t->mm_cid.cid & ~(MM_CID_ONCPU | MM_CID_TRANSIT);
+}
+#else
+static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
+static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
+static inline void sched_mm_cid_fork(struct task_struct *t) { }
+static inline void sched_mm_cid_exit(struct task_struct *t) { }
+static __always_inline int task_mm_cid(struct task_struct *t)
+{
+ /*
+ * Use the processor id as a fall-back when the mm cid feature is
+ * disabled. This provides functional per-cpu data structure accesses
+ * in user-space, althrough it won't provide the memory usage benefits.
+ */
+ return task_cpu(t);
+}
+#endif
+
+#ifndef MODULE
+#ifndef COMPILE_OFFSETS
+
+extern void ___migrate_enable(void);
+
+struct rq;
+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+
+/*
+ * The "struct rq" is not available here, so we can't access the
+ * "runqueues" with this_cpu_ptr(), as the compilation will fail in
+ * this_cpu_ptr() -> raw_cpu_ptr() -> __verify_pcpu_ptr():
+ * typeof((ptr) + 0)
+ *
+ * So use arch_raw_cpu_ptr()/PERCPU_PTR() directly here.
+ */
+#ifdef CONFIG_SMP
+#define this_rq_raw() arch_raw_cpu_ptr(&runqueues)
+#else
+#define this_rq_raw() PERCPU_PTR(&runqueues)
+#endif
+#define this_rq_pinned() (*(unsigned int *)((void *)this_rq_raw() + RQ_nr_pinned))
+
+static inline void __migrate_enable(void)
+{
+ struct task_struct *p = current;
+
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ * Check both overflow from migrate_disable() and superfluous
+ * migrate_enable().
+ */
+ if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
+ return;
+#endif
+
+ if (p->migration_disabled > 1) {
+ p->migration_disabled--;
+ return;
+ }
+
+ /*
+ * Ensure stop_task runs either before or after this, and that
+ * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
+ */
+ guard(preempt)();
+ if (unlikely(p->cpus_ptr != &p->cpus_mask))
+ ___migrate_enable();
+ /*
+ * Mustn't clear migration_disabled() until cpus_ptr points back at the
+ * regular cpus_mask, otherwise things that race (eg.
+ * select_fallback_rq) get confused.
+ */
+ barrier();
+ p->migration_disabled = 0;
+ this_rq_pinned()--;
+}
+
+static inline void __migrate_disable(void)
+{
+ struct task_struct *p = current;
+
+ if (p->migration_disabled) {
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ *Warn about overflow half-way through the range.
+ */
+ WARN_ON_ONCE((s16)p->migration_disabled < 0);
+#endif
+ p->migration_disabled++;
+ return;
+ }
+
+ guard(preempt)();
+ this_rq_pinned()++;
+ p->migration_disabled = 1;
+}
+#else /* !COMPILE_OFFSETS */
+static inline void __migrate_disable(void) { }
+static inline void __migrate_enable(void) { }
+#endif /* !COMPILE_OFFSETS */
+
+/*
+ * So that it is possible to not export the runqueues variable, define and
+ * export migrate_enable/migrate_disable in kernel/sched/core.c too, and use
+ * them for the modules. The macro "INSTANTIATE_EXPORTED_MIGRATE_DISABLE" will
+ * be defined in kernel/sched/core.c.
+ */
+#ifndef INSTANTIATE_EXPORTED_MIGRATE_DISABLE
+static __always_inline void migrate_disable(void)
+{
+ __migrate_disable();
+}
+
+static __always_inline void migrate_enable(void)
+{
+ __migrate_enable();
+}
+#else /* INSTANTIATE_EXPORTED_MIGRATE_DISABLE */
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+#endif /* INSTANTIATE_EXPORTED_MIGRATE_DISABLE */
+
+#else /* MODULE */
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+#endif /* MODULE */
+
+DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
+
#endif
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index e62ff805cfc9..624fda17a785 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -8,11 +8,19 @@
#define SUID_DUMP_USER 1 /* Dump as user of process */
#define SUID_DUMP_ROOT 2 /* Dump as root */
-/* mm flags */
+static inline unsigned long __mm_flags_get_dumpable(const struct mm_struct *mm)
+{
+ /*
+ * By convention, dumpable bits are contained in first 32 bits of the
+ * bitmap, so we can simply access this first unsigned long directly.
+ */
+ return __mm_flags_get_word(mm);
+}
-/* for SUID_DUMP_* above */
-#define MMF_DUMPABLE_BITS 2
-#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
+static inline void __mm_flags_set_mask_dumpable(struct mm_struct *mm, int value)
+{
+ __mm_flags_set_mask_bits_word(mm, MMF_DUMPABLE_MASK, value);
+}
extern void set_dumpable(struct mm_struct *mm, int value);
/*
@@ -28,83 +36,9 @@ static inline int __get_dumpable(unsigned long mm_flags)
static inline int get_dumpable(struct mm_struct *mm)
{
- return __get_dumpable(mm->flags);
-}
-
-/* coredump filter bits */
-#define MMF_DUMP_ANON_PRIVATE 2
-#define MMF_DUMP_ANON_SHARED 3
-#define MMF_DUMP_MAPPED_PRIVATE 4
-#define MMF_DUMP_MAPPED_SHARED 5
-#define MMF_DUMP_ELF_HEADERS 6
-#define MMF_DUMP_HUGETLB_PRIVATE 7
-#define MMF_DUMP_HUGETLB_SHARED 8
-#define MMF_DUMP_DAX_PRIVATE 9
-#define MMF_DUMP_DAX_SHARED 10
-
-#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
-#define MMF_DUMP_FILTER_BITS 9
-#define MMF_DUMP_FILTER_MASK \
- (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
-#define MMF_DUMP_FILTER_DEFAULT \
- ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
- (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
-
-#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
-# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
-#else
-# define MMF_DUMP_MASK_DEFAULT_ELF 0
-#endif
- /* leave room for more dump flags */
-#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
-#define MMF_VM_HUGEPAGE 17 /* set when mm is available for
- khugepaged */
-/*
- * This one-shot flag is dropped due to necessity of changing exe once again
- * on NFS restore
- */
-//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
+ unsigned long flags = __mm_flags_get_dumpable(mm);
-#define MMF_HAS_UPROBES 19 /* has uprobes */
-#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
-#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */
-#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
-#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
-#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
-#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
-#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */
-#define MMF_MULTIPROCESS 26 /* mm is shared between processes */
-/*
- * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either
- * replaced in the future by mm.pinned_vm when it becomes stable, or grow into
- * a counter on its own. We're aggresive on this bit for now: even if the
- * pinned pages were unpinned later on, we'll still keep this bit set for the
- * lifecycle of this mm, just for simplicity.
- */
-#define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */
-
-#define MMF_HAS_MDWE 28
-#define MMF_HAS_MDWE_MASK (1 << MMF_HAS_MDWE)
-
-
-#define MMF_HAS_MDWE_NO_INHERIT 29
-
-#define MMF_VM_MERGE_ANY 30
-#define MMF_VM_MERGE_ANY_MASK (1 << MMF_VM_MERGE_ANY)
-
-#define MMF_TOPDOWN 31 /* mm searches top down by default */
-#define MMF_TOPDOWN_MASK (1 << MMF_TOPDOWN)
-
-#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
- MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\
- MMF_VM_MERGE_ANY_MASK | MMF_TOPDOWN_MASK)
-
-static inline unsigned long mmf_init_flags(unsigned long flags)
-{
- if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT))
- flags &= ~((1UL << MMF_HAS_MDWE) |
- (1UL << MMF_HAS_MDWE_NO_INHERIT));
- return flags & MMF_INIT_MASK;
+ return __get_dumpable(flags);
}
#endif /* _LINUX_SCHED_COREDUMP_H */
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index df3aca89d4f5..c40115d4e34d 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -10,16 +10,16 @@
#include <linux/sched.h>
-#define MAX_DL_PRIO 0
-
-static inline int dl_prio(int prio)
+static inline bool dl_prio(int prio)
{
- if (unlikely(prio < MAX_DL_PRIO))
- return 1;
- return 0;
+ return unlikely(prio < MAX_DL_PRIO);
}
-static inline int dl_task(struct task_struct *p)
+/*
+ * Returns true if a task has a priority that belongs to DL class. PI-boosted
+ * tasks will return true. Use dl_policy() to ignore PI-boosted tasks.
+ */
+static inline bool dl_task(struct task_struct *p)
{
return dl_prio(p->prio);
}
@@ -29,12 +29,12 @@ static inline bool dl_time_before(u64 a, u64 b)
return (s64)(a - b) < 0;
}
-#ifdef CONFIG_SMP
-
struct root_domain;
extern void dl_add_task_root_domain(struct task_struct *p);
extern void dl_clear_root_domain(struct root_domain *rd);
+extern void dl_clear_root_domain_cpu(int cpu);
-#endif /* CONFIG_SMP */
+extern u64 dl_cookie;
+extern bool dl_bw_visited(int cpu, u64 cookie);
#endif /* _LINUX_SCHED_DEADLINE_H */
diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h
index b5035afa2396..35ed4577a6cc 100644
--- a/include/linux/sched/debug.h
+++ b/include/linux/sched/debug.h
@@ -35,12 +35,10 @@ extern void show_stack(struct task_struct *task, unsigned long *sp,
extern void sched_show_task(struct task_struct *p);
-#ifdef CONFIG_SCHED_DEBUG
struct seq_file;
extern void proc_sched_show_task(struct task_struct *p,
struct pid_namespace *ns, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
-#endif
/* Attach to any functions which should be ignored in wchan output. */
#define __sched __section(".sched.text")
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
new file mode 100644
index 000000000000..bcb962d5ee7d
--- /dev/null
+++ b/include/linux/sched/ext.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#ifndef _LINUX_SCHED_EXT_H
+#define _LINUX_SCHED_EXT_H
+
+#ifdef CONFIG_SCHED_CLASS_EXT
+
+#include <linux/llist.h>
+#include <linux/rhashtable-types.h>
+
+enum scx_public_consts {
+ SCX_OPS_NAME_LEN = 128,
+
+ /*
+ * %SCX_SLICE_DFL is used to refill slices when the BPF scheduler misses
+ * to set the slice for a task that is selected for execution.
+ * %SCX_EV_REFILL_SLICE_DFL counts the number of times the default slice
+ * refill has been triggered.
+ *
+ * %SCX_SLICE_BYPASS is used as the slice for all tasks in the bypass
+ * mode. As making forward progress for all tasks is the main goal of
+ * the bypass mode, a shorter slice is used.
+ */
+ SCX_SLICE_DFL = 20 * 1000000, /* 20ms */
+ SCX_SLICE_BYPASS = 5 * 1000000, /* 5ms */
+ SCX_SLICE_INF = U64_MAX, /* infinite, implies nohz */
+};
+
+/*
+ * DSQ (dispatch queue) IDs are 64bit of the format:
+ *
+ * Bits: [63] [62 .. 0]
+ * [ B] [ ID ]
+ *
+ * B: 1 for IDs for built-in DSQs, 0 for ops-created user DSQs
+ * ID: 63 bit ID
+ *
+ * Built-in IDs:
+ *
+ * Bits: [63] [62] [61..32] [31 .. 0]
+ * [ 1] [ L] [ R ] [ V ]
+ *
+ * 1: 1 for built-in DSQs.
+ * L: 1 for LOCAL_ON DSQ IDs, 0 for others
+ * V: For LOCAL_ON DSQ IDs, a CPU number. For others, a pre-defined value.
+ */
+enum scx_dsq_id_flags {
+ SCX_DSQ_FLAG_BUILTIN = 1LLU << 63,
+ SCX_DSQ_FLAG_LOCAL_ON = 1LLU << 62,
+
+ SCX_DSQ_INVALID = SCX_DSQ_FLAG_BUILTIN | 0,
+ SCX_DSQ_GLOBAL = SCX_DSQ_FLAG_BUILTIN | 1,
+ SCX_DSQ_LOCAL = SCX_DSQ_FLAG_BUILTIN | 2,
+ SCX_DSQ_BYPASS = SCX_DSQ_FLAG_BUILTIN | 3,
+ SCX_DSQ_LOCAL_ON = SCX_DSQ_FLAG_BUILTIN | SCX_DSQ_FLAG_LOCAL_ON,
+ SCX_DSQ_LOCAL_CPU_MASK = 0xffffffffLLU,
+};
+
+/*
+ * A dispatch queue (DSQ) can be either a FIFO or p->scx.dsq_vtime ordered
+ * queue. A built-in DSQ is always a FIFO. The built-in local DSQs are used to
+ * buffer between the scheduler core and the BPF scheduler. See the
+ * documentation for more details.
+ */
+struct scx_dispatch_q {
+ raw_spinlock_t lock;
+ struct task_struct __rcu *first_task; /* lockless peek at head */
+ struct list_head list; /* tasks in dispatch order */
+ struct rb_root priq; /* used to order by p->scx.dsq_vtime */
+ u32 nr;
+ u32 seq; /* used by BPF iter */
+ u64 id;
+ struct rhash_head hash_node;
+ struct llist_node free_node;
+ struct rcu_head rcu;
+};
+
+/* scx_entity.flags */
+enum scx_ent_flags {
+ SCX_TASK_QUEUED = 1 << 0, /* on ext runqueue */
+ SCX_TASK_RESET_RUNNABLE_AT = 1 << 2, /* runnable_at should be reset */
+ SCX_TASK_DEQD_FOR_SLEEP = 1 << 3, /* last dequeue was for SLEEP */
+
+ SCX_TASK_STATE_SHIFT = 8, /* bit 8 and 9 are used to carry scx_task_state */
+ SCX_TASK_STATE_BITS = 2,
+ SCX_TASK_STATE_MASK = ((1 << SCX_TASK_STATE_BITS) - 1) << SCX_TASK_STATE_SHIFT,
+
+ SCX_TASK_CURSOR = 1 << 31, /* iteration cursor, not a task */
+};
+
+/* scx_entity.flags & SCX_TASK_STATE_MASK */
+enum scx_task_state {
+ SCX_TASK_NONE, /* ops.init_task() not called yet */
+ SCX_TASK_INIT, /* ops.init_task() succeeded, but task can be cancelled */
+ SCX_TASK_READY, /* fully initialized, but not in sched_ext */
+ SCX_TASK_ENABLED, /* fully initialized and in sched_ext */
+
+ SCX_TASK_NR_STATES,
+};
+
+/* scx_entity.dsq_flags */
+enum scx_ent_dsq_flags {
+ SCX_TASK_DSQ_ON_PRIQ = 1 << 0, /* task is queued on the priority queue of a dsq */
+};
+
+/*
+ * Mask bits for scx_entity.kf_mask. Not all kfuncs can be called from
+ * everywhere and the following bits track which kfunc sets are currently
+ * allowed for %current. This simple per-task tracking works because SCX ops
+ * nest in a limited way. BPF will likely implement a way to allow and disallow
+ * kfuncs depending on the calling context which will replace this manual
+ * mechanism. See scx_kf_allow().
+ */
+enum scx_kf_mask {
+ SCX_KF_UNLOCKED = 0, /* sleepable and not rq locked */
+ /* ENQUEUE and DISPATCH may be nested inside CPU_RELEASE */
+ SCX_KF_CPU_RELEASE = 1 << 0, /* ops.cpu_release() */
+ /*
+ * ops.dispatch() may release rq lock temporarily and thus ENQUEUE and
+ * SELECT_CPU may be nested inside. ops.dequeue (in REST) may also be
+ * nested inside DISPATCH.
+ */
+ SCX_KF_DISPATCH = 1 << 1, /* ops.dispatch() */
+ SCX_KF_ENQUEUE = 1 << 2, /* ops.enqueue() and ops.select_cpu() */
+ SCX_KF_SELECT_CPU = 1 << 3, /* ops.select_cpu() */
+ SCX_KF_REST = 1 << 4, /* other rq-locked operations */
+
+ __SCX_KF_RQ_LOCKED = SCX_KF_CPU_RELEASE | SCX_KF_DISPATCH |
+ SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST,
+ __SCX_KF_TERMINAL = SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST,
+};
+
+enum scx_dsq_lnode_flags {
+ SCX_DSQ_LNODE_ITER_CURSOR = 1 << 0,
+
+ /* high 16 bits can be for iter cursor flags */
+ __SCX_DSQ_LNODE_PRIV_SHIFT = 16,
+};
+
+struct scx_dsq_list_node {
+ struct list_head node;
+ u32 flags;
+ u32 priv; /* can be used by iter cursor */
+};
+
+#define INIT_DSQ_LIST_CURSOR(__node, __flags, __priv) \
+ (struct scx_dsq_list_node) { \
+ .node = LIST_HEAD_INIT((__node).node), \
+ .flags = SCX_DSQ_LNODE_ITER_CURSOR | (__flags), \
+ .priv = (__priv), \
+ }
+
+/*
+ * The following is embedded in task_struct and contains all fields necessary
+ * for a task to be scheduled by SCX.
+ */
+struct sched_ext_entity {
+ struct scx_dispatch_q *dsq;
+ struct scx_dsq_list_node dsq_list; /* dispatch order */
+ struct rb_node dsq_priq; /* p->scx.dsq_vtime order */
+ u32 dsq_seq;
+ u32 dsq_flags; /* protected by DSQ lock */
+ u32 flags; /* protected by rq lock */
+ u32 weight;
+ s32 sticky_cpu;
+ s32 holding_cpu;
+ s32 selected_cpu;
+ u32 kf_mask; /* see scx_kf_mask above */
+ struct task_struct *kf_tasks[2]; /* see SCX_CALL_OP_TASK() */
+ atomic_long_t ops_state;
+
+ struct list_head runnable_node; /* rq->scx.runnable_list */
+ unsigned long runnable_at;
+
+#ifdef CONFIG_SCHED_CORE
+ u64 core_sched_at; /* see scx_prio_less() */
+#endif
+ u64 ddsp_dsq_id;
+ u64 ddsp_enq_flags;
+
+ /* BPF scheduler modifiable fields */
+
+ /*
+ * Runtime budget in nsecs. This is usually set through
+ * scx_bpf_dsq_insert() but can also be modified directly by the BPF
+ * scheduler. Automatically decreased by SCX as the task executes. On
+ * depletion, a scheduling event is triggered.
+ *
+ * This value is cleared to zero if the task is preempted by
+ * %SCX_KICK_PREEMPT and shouldn't be used to determine how long the
+ * task ran. Use p->se.sum_exec_runtime instead.
+ */
+ u64 slice;
+
+ /*
+ * Used to order tasks when dispatching to the vtime-ordered priority
+ * queue of a dsq. This is usually set through
+ * scx_bpf_dsq_insert_vtime() but can also be modified directly by the
+ * BPF scheduler. Modifying it while a task is queued on a dsq may
+ * mangle the ordering and is not recommended.
+ */
+ u64 dsq_vtime;
+
+ /*
+ * If set, reject future sched_setscheduler(2) calls updating the policy
+ * to %SCHED_EXT with -%EACCES.
+ *
+ * Can be set from ops.init_task() while the BPF scheduler is being
+ * loaded (!scx_init_task_args->fork). If set and the task's policy is
+ * already %SCHED_EXT, the task's policy is rejected and forcefully
+ * reverted to %SCHED_NORMAL. The number of such events are reported
+ * through /sys/kernel/debug/sched_ext::nr_rejected. Setting this flag
+ * during fork is not allowed.
+ */
+ bool disallow; /* reject switching into SCX */
+
+ /* cold fields */
+#ifdef CONFIG_EXT_GROUP_SCHED
+ struct cgroup *cgrp_moving_from;
+#endif
+ struct list_head tasks_node;
+};
+
+void sched_ext_dead(struct task_struct *p);
+void print_scx_info(const char *log_lvl, struct task_struct *p);
+void scx_softlockup(u32 dur_s);
+bool scx_hardlockup(int cpu);
+bool scx_rcu_cpu_stall(void);
+
+#else /* !CONFIG_SCHED_CLASS_EXT */
+
+static inline void sched_ext_dead(struct task_struct *p) {}
+static inline void print_scx_info(const char *log_lvl, struct task_struct *p) {}
+static inline void scx_softlockup(u32 dur_s) {}
+static inline bool scx_hardlockup(int cpu) { return false; }
+static inline bool scx_rcu_cpu_stall(void) { return false; }
+
+#endif /* CONFIG_SCHED_CLASS_EXT */
+
+struct scx_task_group {
+#ifdef CONFIG_EXT_GROUP_SCHED
+ u32 flags; /* SCX_TG_* */
+ u32 weight;
+ u64 bw_period_us;
+ u64 bw_quota_us;
+ u64 bw_burst_us;
+ bool idle;
+#endif
+};
+
+#endif /* _LINUX_SCHED_EXT_H */
diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h
index 412cdaba33eb..17e04859b9a4 100644
--- a/include/linux/sched/hotplug.h
+++ b/include/linux/sched/hotplug.h
@@ -18,10 +18,6 @@ extern int sched_cpu_dying(unsigned int cpu);
# define sched_cpu_dying NULL
#endif
-#ifdef CONFIG_HOTPLUG_CPU
-extern void idle_task_exit(void);
-#else
static inline void idle_task_exit(void) {}
-#endif
#endif /* _LINUX_SCHED_HOTPLUG_H */
diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h
index e670ac282333..8465ff1f20d1 100644
--- a/include/linux/sched/idle.h
+++ b/include/linux/sched/idle.h
@@ -11,11 +11,7 @@ enum cpu_idle_type {
CPU_MAX_IDLE_TYPES
};
-#ifdef CONFIG_SMP
extern void wake_up_if_idle(int cpu);
-#else
-static inline void wake_up_if_idle(int cpu) { }
-#endif
/*
* Idle thread specific functions to determine the need_resched
@@ -79,6 +75,21 @@ static __always_inline bool __must_check current_clr_polling_and_test(void)
return unlikely(tif_need_resched());
}
+static __always_inline void current_clr_polling(void)
+{
+ __current_clr_polling();
+
+ /*
+ * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
+ * Once the bit is cleared, we'll get IPIs with every new
+ * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
+ * fold.
+ */
+ smp_mb__after_atomic(); /* paired with resched_curr() */
+
+ preempt_fold_need_resched();
+}
+
#else
static inline void __current_set_polling(void) { }
static inline void __current_clr_polling(void) { }
@@ -91,21 +102,15 @@ static inline bool __must_check current_clr_polling_and_test(void)
{
return unlikely(tif_need_resched());
}
-#endif
static __always_inline void current_clr_polling(void)
{
__current_clr_polling();
- /*
- * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
- * Once the bit is cleared, we'll get IPIs with every new
- * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
- * fold.
- */
smp_mb(); /* paired with resched_curr() */
preempt_fold_need_resched();
}
+#endif
#endif /* _LINUX_SCHED_IDLE_H */
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index 2b461129d1fa..d8501f4709b5 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -7,16 +7,21 @@
#include <linux/tick.h>
enum hk_type {
- HK_TYPE_TIMER,
- HK_TYPE_RCU,
- HK_TYPE_MISC,
- HK_TYPE_SCHED,
- HK_TYPE_TICK,
HK_TYPE_DOMAIN,
- HK_TYPE_WQ,
HK_TYPE_MANAGED_IRQ,
- HK_TYPE_KTHREAD,
- HK_TYPE_MAX
+ HK_TYPE_KERNEL_NOISE,
+ HK_TYPE_MAX,
+
+ /*
+ * The following housekeeping types are only set by the nohz_full
+ * boot commandline option. So they can share the same value.
+ */
+ HK_TYPE_TICK = HK_TYPE_KERNEL_NOISE,
+ HK_TYPE_TIMER = HK_TYPE_KERNEL_NOISE,
+ HK_TYPE_RCU = HK_TYPE_KERNEL_NOISE,
+ HK_TYPE_MISC = HK_TYPE_KERNEL_NOISE,
+ HK_TYPE_WQ = HK_TYPE_KERNEL_NOISE,
+ HK_TYPE_KTHREAD = HK_TYPE_KERNEL_NOISE
};
#ifdef CONFIG_CPU_ISOLATION
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 91546493c43d..0e1d73955fa5 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -140,7 +140,7 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
-#ifdef CONFIG_MMU
+#if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH)
/* same as above but performs the slow path from the async context. Can
* be called from the atomic context as well
*/
@@ -178,30 +178,22 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
#endif
extern void arch_pick_mmap_layout(struct mm_struct *mm,
- struct rlimit *rlim_stack);
-extern unsigned long
-arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
- unsigned long, unsigned long);
-extern unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags);
-
-unsigned long mm_get_unmapped_area(struct mm_struct *mm, struct file *filp,
- unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags);
+ const struct rlimit *rlim_stack);
unsigned long
-arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags, vm_flags_t vm_flags);
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags);
unsigned long
-arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags, vm_flags_t);
+arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t);
-unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
- struct file *filp,
+unsigned long mm_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
+unsigned long mm_get_unmapped_area_vmflags(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
@@ -211,14 +203,14 @@ unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
unsigned long
generic_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags);
+ unsigned long flags, vm_flags_t vm_flags);
unsigned long
generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags);
+ unsigned long flags, vm_flags_t vm_flags);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm,
- struct rlimit *rlim_stack) {}
+ const struct rlimit *rlim_stack) {}
#endif
static inline bool in_vfork(struct task_struct *tsk)
@@ -258,25 +250,16 @@ static inline gfp_t current_gfp_context(gfp_t flags)
{
unsigned int pflags = READ_ONCE(current->flags);
- if (unlikely(pflags & (PF_MEMALLOC_NOIO |
- PF_MEMALLOC_NOFS |
- PF_MEMALLOC_NORECLAIM |
- PF_MEMALLOC_NOWARN |
- PF_MEMALLOC_PIN))) {
+ if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
/*
- * Stronger flags before weaker flags:
- * NORECLAIM implies NOIO, which in turn implies NOFS
+ * NOIO implies both NOIO and NOFS and it is a weaker context
+ * so always make sure it makes precedence
*/
- if (pflags & PF_MEMALLOC_NORECLAIM)
- flags &= ~__GFP_DIRECT_RECLAIM;
- else if (pflags & PF_MEMALLOC_NOIO)
+ if (pflags & PF_MEMALLOC_NOIO)
flags &= ~(__GFP_IO | __GFP_FS);
else if (pflags & PF_MEMALLOC_NOFS)
flags &= ~__GFP_FS;
- if (pflags & PF_MEMALLOC_NOWARN)
- flags |= __GFP_NOWARN;
-
if (pflags & PF_MEMALLOC_PIN)
flags &= ~__GFP_MOVABLE;
}
@@ -334,6 +317,9 @@ static inline void might_alloc(gfp_t gfp_mask)
fs_reclaim_acquire(gfp_mask);
fs_reclaim_release(gfp_mask);
+ if (current->flags & PF_MEMALLOC)
+ return;
+
might_sleep_if(gfpflags_allow_blocking(gfp_mask));
}
@@ -547,6 +533,13 @@ enum {
static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
{
+ /*
+ * The atomic_read() below prevents CSE. The following should
+ * help the compiler generate more efficient code on architectures
+ * where sync_core_before_usermode() is a no-op.
+ */
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE))
+ return;
if (current->mm != mm)
return;
if (likely(!(atomic_read(&mm->membarrier_state) &
diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
index 6d67e9a5af6b..0db7f67935fe 100644
--- a/include/linux/sched/nohz.h
+++ b/include/linux/sched/nohz.h
@@ -6,7 +6,7 @@
* This is the interface between the scheduler and nohz/dynticks:
*/
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+#ifdef CONFIG_NO_HZ_COMMON
extern void nohz_balance_enter_idle(int cpu);
extern int get_nohz_timer_target(void);
#else
@@ -23,7 +23,7 @@ static inline void calc_load_nohz_remote(struct rq *rq) { }
static inline void calc_load_nohz_stop(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
-#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
+#ifdef CONFIG_NO_HZ_COMMON
extern void wake_up_nohz_cpu(int cpu);
#else
static inline void wake_up_nohz_cpu(int cpu) { }
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
index ab83d85e1183..6ab43b4f72f9 100644
--- a/include/linux/sched/prio.h
+++ b/include/linux/sched/prio.h
@@ -14,6 +14,7 @@
*/
#define MAX_RT_PRIO 100
+#define MAX_DL_PRIO 0
#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index b2b9e6eb9683..4e3338103654 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -6,19 +6,40 @@
struct task_struct;
-static inline int rt_prio(int prio)
+static inline bool rt_prio(int prio)
{
- if (unlikely(prio < MAX_RT_PRIO))
- return 1;
- return 0;
+ return unlikely(prio < MAX_RT_PRIO && prio >= MAX_DL_PRIO);
}
-static inline int rt_task(struct task_struct *p)
+static inline bool rt_or_dl_prio(int prio)
+{
+ return unlikely(prio < MAX_RT_PRIO);
+}
+
+/*
+ * Returns true if a task has a priority that belongs to RT class. PI-boosted
+ * tasks will return true. Use rt_policy() to ignore PI-boosted tasks.
+ */
+static inline bool rt_task(struct task_struct *p)
{
return rt_prio(p->prio);
}
-static inline bool task_is_realtime(struct task_struct *tsk)
+/*
+ * Returns true if a task has a priority that belongs to RT or DL classes.
+ * PI-boosted tasks will return true. Use rt_or_dl_task_policy() to ignore
+ * PI-boosted tasks.
+ */
+static inline bool rt_or_dl_task(struct task_struct *p)
+{
+ return rt_or_dl_prio(p->prio);
+}
+
+/*
+ * Returns true if a task has a policy that belongs to RT or DL classes.
+ * PI-boosted tasks will return false.
+ */
+static inline bool rt_or_dl_task_policy(struct task_struct *tsk)
{
int policy = tsk->policy;
diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h
index b04a5d04dee9..42839cfa2778 100644
--- a/include/linux/sched/sd_flags.h
+++ b/include/linux/sched/sd_flags.h
@@ -154,14 +154,6 @@ SD_FLAG(SD_ASYM_PACKING, SDF_NEEDS_GROUPS)
SD_FLAG(SD_PREFER_SIBLING, SDF_NEEDS_GROUPS)
/*
- * sched_groups of this level overlap
- *
- * SHARED_PARENT: Set for all NUMA levels above NODE.
- * NEEDS_GROUPS: Overlaps can only exist with more than one group.
- */
-SD_FLAG(SD_OVERLAP, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
-
-/*
* Cross-node balancing
*
* SHARED_PARENT: Set for all NUMA levels above NODE.
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 0a0e23c45406..7d6449982822 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -136,8 +136,10 @@ struct signal_struct {
#ifdef CONFIG_POSIX_TIMERS
/* POSIX.1b Interval Timers */
- unsigned int next_posix_timer_id;
- struct list_head posix_timers;
+ unsigned int timer_create_restore_ids:1;
+ atomic_t next_posix_timer_id;
+ struct hlist_head posix_timers;
+ struct hlist_head ignored_posix_timers;
/* ITIMER_REAL timer for the process */
struct hrtimer real_timer;
@@ -224,6 +226,10 @@ struct signal_struct {
struct tty_audit_buf *tty_audit_buf;
#endif
+#ifdef CONFIG_CGROUPS
+ struct rw_semaphore cgroup_threadgroup_rwsem;
+#endif
+
/*
* Thread is the potential origin of an oom condition; kill first on
* oom
@@ -276,8 +282,7 @@ static inline void signal_set_stop_flags(struct signal_struct *sig,
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
-extern int dequeue_signal(struct task_struct *task, sigset_t *mask,
- kernel_siginfo_t *info, enum pid_type *type);
+extern int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type);
static inline int kernel_dequeue_signal(void)
{
@@ -287,7 +292,7 @@ static inline int kernel_dequeue_signal(void)
int ret;
spin_lock_irq(&task->sighand->siglock);
- ret = dequeue_signal(task, &task->blocked, &__info, &__type);
+ ret = dequeue_signal(&task->blocked, &__info, &__type);
spin_unlock_irq(&task->sighand->siglock);
return ret;
@@ -339,9 +344,6 @@ extern void force_fatal_sig(int);
extern void force_exit_sig(int);
extern int send_sig(int, struct task_struct *, int);
extern int zap_other_threads(struct task_struct *p);
-extern struct sigqueue *sigqueue_alloc(void);
-extern void sigqueue_free(struct sigqueue *);
-extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
static inline void clear_notify_signal(void)
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
index fb1e295e7e63..166b19af956f 100644
--- a/include/linux/sched/smt.h
+++ b/include/linux/sched/smt.h
@@ -12,7 +12,7 @@ static __always_inline bool sched_smt_active(void)
return static_branch_likely(&sched_smt_present);
}
#else
-static inline bool sched_smt_active(void) { return false; }
+static __always_inline bool sched_smt_active(void) { return false; }
#endif
void arch_smt_update(void);
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index d362aacf9f89..525aa2a632b2 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -43,6 +43,7 @@ struct kernel_clone_args {
void *fn_arg;
struct cgroup *cgrp;
struct css_set *cset;
+ unsigned int kill_seq;
};
/*
@@ -62,8 +63,9 @@ extern int lockdep_tasklist_lock_is_held(void);
extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
-extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
-extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
+extern int sched_fork(u64 clone_flags, struct task_struct *p);
+extern int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
+extern void sched_cancel_fork(struct task_struct *p);
extern void sched_post_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p);
@@ -107,11 +109,7 @@ int kernel_wait(pid_t pid, int *stat);
extern void free_task(struct task_struct *tsk);
/* sched_exec is called by processes performing an exec */
-#ifdef CONFIG_SMP
extern void sched_exec(void);
-#else
-#define sched_exec() {}
-#endif
static inline struct task_struct *get_task_struct(struct task_struct *t)
{
@@ -119,6 +117,11 @@ static inline struct task_struct *get_task_struct(struct task_struct *t)
return t;
}
+static inline struct task_struct *tryget_task_struct(struct task_struct *t)
+{
+ return refcount_inc_not_zero(&t->usage) ? t : NULL;
+}
+
extern void __put_task_struct(struct task_struct *t);
extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
@@ -128,24 +131,17 @@ static inline void put_task_struct(struct task_struct *t)
return;
/*
- * In !RT, it is always safe to call __put_task_struct().
- * Under RT, we can only call it in preemptible context.
- */
- if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
- static DEFINE_WAIT_OVERRIDE_MAP(put_task_map, LD_WAIT_SLEEP);
-
- lock_map_acquire_try(&put_task_map);
- __put_task_struct(t);
- lock_map_release(&put_task_map);
- return;
- }
-
- /*
- * under PREEMPT_RT, we can't call put_task_struct
+ * Under PREEMPT_RT, we can't call __put_task_struct
* in atomic context because it will indirectly
- * acquire sleeping locks.
+ * acquire sleeping locks. The same is true if the
+ * current process has a mutex enqueued (blocked on
+ * a PI chain).
+ *
+ * In !RT, it is always safe to call __put_task_struct().
+ * Though, in order to simplify the code, resort to the
+ * deferred call too.
*
- * call_rcu() will schedule delayed_put_task_struct_rcu()
+ * call_rcu() will schedule __put_task_struct_rcu_cb()
* to be called in process context.
*
* __put_task_struct() is called when
@@ -158,7 +154,7 @@ static inline void put_task_struct(struct task_struct *t)
*
* delayed_free_task() also uses ->rcu, but it is only called
* when it fails to fork a process. Therefore, there is no
- * way it can conflict with put_task_struct().
+ * way it can conflict with __put_task_struct().
*/
call_rcu(&t->rcu, __put_task_struct_rcu_cb);
}
@@ -214,9 +210,8 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
* pins the final release of task.io_context. Also protects ->cpuset and
* ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
*
- * Nests both inside and outside of read_lock(&tasklist_lock).
- * It must not be nested with write_lock_irq(&tasklist_lock),
- * neither inside nor outside.
+ * Nests inside of read_lock(&tasklist_lock). It must not be nested with
+ * write_lock_irq(&tasklist_lock), neither inside nor outside.
*/
static inline void task_lock(struct task_struct *p)
{
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index ccd72b978e1f..1fab7e9043a3 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/magic.h>
#include <linux/refcount.h>
+#include <linux/kasan.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
@@ -33,7 +34,7 @@ static __always_inline unsigned long *end_of_stack(const struct task_struct *tas
#endif
}
-#elif !defined(__HAVE_THREAD_FUNCTIONS)
+#else
#define task_stack_page(task) ((void *)(task)->stack)
@@ -52,7 +53,7 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
* When the stack grows up, this is the highest address.
* Beyond that position, we corrupt data on the next page.
*/
-static inline unsigned long *end_of_stack(struct task_struct *p)
+static inline unsigned long *end_of_stack(const struct task_struct *p)
{
#ifdef CONFIG_STACK_GROWSUP
return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
@@ -89,34 +90,22 @@ static inline int object_is_on_stack(const void *obj)
{
void *stack = task_stack_page(current);
+ obj = kasan_reset_tag(obj);
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}
extern void thread_stack_cache_init(void);
#ifdef CONFIG_DEBUG_STACK_USAGE
+unsigned long stack_not_used(struct task_struct *p);
+#else
static inline unsigned long stack_not_used(struct task_struct *p)
{
- unsigned long *n = end_of_stack(p);
-
- do { /* Skip over canary */
-# ifdef CONFIG_STACK_GROWSUP
- n--;
-# else
- n++;
-# endif
- } while (!*n);
-
-# ifdef CONFIG_STACK_GROWSUP
- return (unsigned long)end_of_stack(p) - (unsigned long)n;
-# else
- return (unsigned long)n - (unsigned long)end_of_stack(p);
-# endif
+ return 0;
}
#endif
extern void set_task_stack_end_magic(struct task_struct *tsk);
-#ifndef __HAVE_ARCH_KSTACK_END
static inline int kstack_end(void *addr)
{
/* Reliable end of stack detection:
@@ -124,6 +113,5 @@ static inline int kstack_end(void *addr)
*/
return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
}
-#endif
#endif /* _LINUX_SCHED_TASK_STACK_H */
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 4237daa5ac7a..45c0022b91ce 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -9,7 +9,6 @@
/*
* sched-domains (multiprocessor balancing) declarations:
*/
-#ifdef CONFIG_SMP
/* Generate SD flag indexes */
#define SD_FLAG(name, mflags) __##name,
@@ -25,43 +24,30 @@ enum {
};
#undef SD_FLAG
-#ifdef CONFIG_SCHED_DEBUG
-
struct sd_flag_debug {
unsigned int meta_flags;
char *name;
};
extern const struct sd_flag_debug sd_flag_debug[];
-#endif
+struct sched_domain_topology_level;
#ifdef CONFIG_SCHED_SMT
-static inline int cpu_smt_flags(void)
-{
- return SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
-}
+extern int cpu_smt_flags(void);
+extern const struct cpumask *tl_smt_mask(struct sched_domain_topology_level *tl, int cpu);
#endif
#ifdef CONFIG_SCHED_CLUSTER
-static inline int cpu_cluster_flags(void)
-{
- return SD_CLUSTER | SD_SHARE_LLC;
-}
+extern int cpu_cluster_flags(void);
+extern const struct cpumask *tl_cls_mask(struct sched_domain_topology_level *tl, int cpu);
#endif
#ifdef CONFIG_SCHED_MC
-static inline int cpu_core_flags(void)
-{
- return SD_SHARE_LLC;
-}
+extern int cpu_core_flags(void);
+extern const struct cpumask *tl_mc_mask(struct sched_domain_topology_level *tl, int cpu);
#endif
-#ifdef CONFIG_NUMA
-static inline int cpu_numa_flags(void)
-{
- return SD_NUMA;
-}
-#endif
+extern const struct cpumask *tl_pkg_mask(struct sched_domain_topology_level *tl, int cpu);
extern int arch_asym_cpu_priority(int cpu);
@@ -106,6 +92,9 @@ struct sched_domain {
unsigned int nr_balance_failed; /* initialise to 0 */
/* idle_balance() stats */
+ unsigned int newidle_call;
+ unsigned int newidle_success;
+ unsigned int newidle_ratio;
u64 max_newidle_lb_cost;
unsigned long last_decay_max_lb_cost;
@@ -114,7 +103,10 @@ struct sched_domain {
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
- unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_imbalance_load[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_imbalance_util[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_imbalance_task[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_imbalance_misfit[CPU_MAX_IDLE_TYPES];
unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
@@ -140,9 +132,7 @@ struct sched_domain {
unsigned int ttwu_move_affine;
unsigned int ttwu_move_balance;
#endif
-#ifdef CONFIG_SCHED_DEBUG
char *name;
-#endif
union {
void *private; /* used during construction */
struct rcu_head rcu; /* used during destruction */
@@ -165,10 +155,6 @@ static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
return to_cpumask(sd->span);
}
-extern void partition_sched_domains_locked(int ndoms_new,
- cpumask_var_t doms_new[],
- struct sched_domain_attr *dattr_new);
-
extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new);
@@ -180,11 +166,9 @@ bool cpus_equal_capacity(int this_cpu, int that_cpu);
bool cpus_share_cache(int this_cpu, int that_cpu);
bool cpus_share_resources(int this_cpu, int that_cpu);
-typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
+typedef const struct cpumask *(*sched_domain_mask_f)(struct sched_domain_topology_level *tl, int cpu);
typedef int (*sched_domain_flags_f)(void);
-#define SDTL_OVERLAP 0x01
-
struct sd_data {
struct sched_domain *__percpu *sd;
struct sched_domain_shared *__percpu *sds;
@@ -195,54 +179,16 @@ struct sd_data {
struct sched_domain_topology_level {
sched_domain_mask_f mask;
sched_domain_flags_f sd_flags;
- int flags;
int numa_level;
struct sd_data data;
-#ifdef CONFIG_SCHED_DEBUG
char *name;
-#endif
};
extern void __init set_sched_topology(struct sched_domain_topology_level *tl);
+extern void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio);
-#ifdef CONFIG_SCHED_DEBUG
-# define SD_INIT_NAME(type) .name = #type
-#else
-# define SD_INIT_NAME(type)
-#endif
-
-#else /* CONFIG_SMP */
-
-struct sched_domain_attr;
-
-static inline void
-partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
- struct sched_domain_attr *dattr_new)
-{
-}
-
-static inline void
-partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
- struct sched_domain_attr *dattr_new)
-{
-}
-
-static inline bool cpus_equal_capacity(int this_cpu, int that_cpu)
-{
- return true;
-}
-
-static inline bool cpus_share_cache(int this_cpu, int that_cpu)
-{
- return true;
-}
-
-static inline bool cpus_share_resources(int this_cpu, int that_cpu)
-{
- return true;
-}
-
-#endif /* !CONFIG_SMP */
+#define SDTL_INIT(maskfn, flagsfn, dname) ((struct sched_domain_topology_level) \
+ { .mask = maskfn, .sd_flags = flagsfn, .name = #dname })
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
extern void rebuild_sched_domains_energy(void);
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index 06cd8fb2f409..0f28b4623ad4 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -63,4 +63,38 @@ extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
extern void wake_up_q(struct wake_q_head *head);
+/* Spin unlock helpers to unlock and call wake_up_q with preempt disabled */
+static inline
+void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+{
+ guard(preempt)();
+ raw_spin_unlock(lock);
+ if (wake_q) {
+ wake_up_q(wake_q);
+ wake_q_init(wake_q);
+ }
+}
+
+static inline
+void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+{
+ guard(preempt)();
+ raw_spin_unlock_irq(lock);
+ if (wake_q) {
+ wake_up_q(wake_q);
+ wake_q_init(wake_q);
+ }
+}
+
+static inline
+void raw_spin_unlock_irqrestore_wake(raw_spinlock_t *lock, unsigned long flags,
+ struct wake_q_head *wake_q)
+{
+ guard(preempt)();
+ raw_spin_unlock_irqrestore(lock, flags);
+ if (wake_q) {
+ wake_up_q(wake_q);
+ wake_q_init(wake_q);
+ }
+}
#endif /* _LINUX_SCHED_WAKE_Q_H */
diff --git a/include/linux/scmi_imx_protocol.h b/include/linux/scmi_imx_protocol.h
new file mode 100644
index 000000000000..27bd372cbfb1
--- /dev/null
+++ b/include/linux/scmi_imx_protocol.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * SCMI Message Protocol driver NXP extension header
+ *
+ * Copyright 2024 NXP.
+ */
+
+#ifndef _LINUX_SCMI_NXP_PROTOCOL_H
+#define _LINUX_SCMI_NXP_PROTOCOL_H
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/scmi_protocol.h>
+#include <linux/types.h>
+
+#define SCMI_PROTOCOL_IMX_LMM 0x80
+#define SCMI_PROTOCOL_IMX_BBM 0x81
+#define SCMI_PROTOCOL_IMX_CPU 0x82
+#define SCMI_PROTOCOL_IMX_MISC 0x84
+
+#define SCMI_IMX_VENDOR "NXP"
+#define SCMI_IMX_SUBVENDOR "IMX"
+
+struct scmi_imx_bbm_proto_ops {
+ int (*rtc_time_set)(const struct scmi_protocol_handle *ph, u32 id,
+ uint64_t sec);
+ int (*rtc_time_get)(const struct scmi_protocol_handle *ph, u32 id,
+ u64 *val);
+ int (*rtc_alarm_set)(const struct scmi_protocol_handle *ph, u32 id,
+ bool enable, u64 sec);
+ int (*button_get)(const struct scmi_protocol_handle *ph, u32 *state);
+};
+
+enum scmi_nxp_notification_events {
+ SCMI_EVENT_IMX_BBM_RTC = 0x0,
+ SCMI_EVENT_IMX_BBM_BUTTON = 0x1,
+ SCMI_EVENT_IMX_MISC_CONTROL = 0x0,
+};
+
+struct scmi_imx_bbm_notif_report {
+ bool is_rtc;
+ bool is_button;
+ ktime_t timestamp;
+ unsigned int rtc_id;
+ unsigned int rtc_evt;
+};
+
+struct scmi_imx_misc_ctrl_notify_report {
+ ktime_t timestamp;
+ unsigned int ctrl_id;
+ unsigned int flags;
+};
+
+struct scmi_imx_misc_proto_ops {
+ int (*misc_ctrl_set)(const struct scmi_protocol_handle *ph, u32 id,
+ u32 num, u32 *val);
+ int (*misc_ctrl_get)(const struct scmi_protocol_handle *ph, u32 id,
+ u32 *num, u32 *val);
+ int (*misc_ctrl_req_notify)(const struct scmi_protocol_handle *ph,
+ u32 ctrl_id, u32 evt_id, u32 flags);
+};
+
+/* See LMM_ATTRIBUTES in imx95.rst */
+#define LMM_ID_DISCOVER 0xFFFFFFFFU
+#define LMM_MAX_NAME 16
+
+enum scmi_imx_lmm_state {
+ LMM_STATE_LM_OFF,
+ LMM_STATE_LM_ON,
+ LMM_STATE_LM_SUSPEND,
+ LMM_STATE_LM_POWERED,
+};
+
+struct scmi_imx_lmm_info {
+ u32 lmid;
+ enum scmi_imx_lmm_state state;
+ u32 errstatus;
+ u8 name[LMM_MAX_NAME];
+};
+
+struct scmi_imx_lmm_proto_ops {
+ int (*lmm_power_boot)(const struct scmi_protocol_handle *ph, u32 lmid,
+ bool boot);
+ int (*lmm_info)(const struct scmi_protocol_handle *ph, u32 lmid,
+ struct scmi_imx_lmm_info *info);
+ int (*lmm_reset_vector_set)(const struct scmi_protocol_handle *ph,
+ u32 lmid, u32 cpuid, u32 flags, u64 vector);
+ int (*lmm_shutdown)(const struct scmi_protocol_handle *ph, u32 lmid,
+ u32 flags);
+};
+
+struct scmi_imx_cpu_proto_ops {
+ int (*cpu_reset_vector_set)(const struct scmi_protocol_handle *ph,
+ u32 cpuid, u64 vector, bool start,
+ bool boot, bool resume);
+ int (*cpu_start)(const struct scmi_protocol_handle *ph, u32 cpuid,
+ bool start);
+ int (*cpu_started)(const struct scmi_protocol_handle *ph, u32 cpuid,
+ bool *started);
+};
+#endif
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 3a9bb5b9a9e8..aafaac1496b0 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -153,7 +153,7 @@ struct scmi_perf_domain_info {
* for a given device
* @fast_switch_rate_limit: gets the minimum time (us) required between
* successive fast_switching requests
- * @power_scale_mw_get: indicates if the power values provided are in milliWatts
+ * @power_scale_get: indicates if the power values provided are in milliWatts
* or in some other (abstract) scale
*/
struct scmi_perf_proto_ops {
@@ -945,7 +945,7 @@ struct scmi_device {
struct scmi_handle *handle;
};
-#define to_scmi_dev(d) container_of(d, struct scmi_device, dev)
+#define to_scmi_dev(d) container_of_const(d, struct scmi_device, dev)
struct scmi_device_id {
u8 protocol_id;
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index 75303c126285..1690706206e8 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -12,6 +12,7 @@
#define SCREEN_INFO_MAX_RESOURCES 3
struct pci_dev;
+struct pixel_format;
struct resource;
static inline bool __screen_info_has_lfb(unsigned int type)
@@ -49,6 +50,16 @@ static inline u64 __screen_info_lfb_size(const struct screen_info *si, unsigned
return lfb_size;
}
+static inline bool __screen_info_vbe_mode_nonvga(const struct screen_info *si)
+{
+ /*
+ * VESA modes typically run on VGA hardware. Set bit 5 signals that this
+ * is not the case. Drivers can then not make use of VGA resources. See
+ * Sec 4.4 of the VBE 2.0 spec.
+ */
+ return si->vesa_attributes & BIT(5);
+}
+
static inline unsigned int __screen_info_video_type(unsigned int type)
{
switch (type) {
@@ -116,8 +127,18 @@ static inline unsigned int screen_info_video_type(const struct screen_info *si)
return VIDEO_TYPE_CGA;
}
+static inline u32 __screen_info_vesapm_info_base(const struct screen_info *si)
+{
+ if (si->vesapm_seg < 0xc000)
+ return 0;
+ return (si->vesapm_seg << 4) + si->vesapm_off;
+}
+
ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, size_t num);
+u32 __screen_info_lfb_bits_per_pixel(const struct screen_info *si);
+int screen_info_pixel_format(const struct screen_info *si, struct pixel_format *f);
+
#if defined(CONFIG_PCI)
void screen_info_apply_fixups(void);
struct pci_dev *screen_info_pci_dev(const struct screen_info *si);
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 836a7e200f39..6719949135c9 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -222,7 +222,6 @@ struct sctp_datahdr {
__be16 stream;
__be16 ssn;
__u32 ppid;
- /* __u8 payload[]; */
};
struct sctp_data_chunk {
@@ -239,7 +238,6 @@ struct sctp_idatahdr {
__u32 ppid;
__be32 fsn;
};
- __u8 payload[0];
};
struct sctp_idata_chunk {
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 709ad84809e1..9b959972bf4a 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -22,12 +22,13 @@
#include <linux/atomic.h>
#include <asm/seccomp.h>
+extern int __secure_computing(void);
+
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
-extern int __secure_computing(const struct seccomp_data *sd);
static inline int secure_computing(void)
{
if (unlikely(test_syscall_work(SECCOMP)))
- return __secure_computing(NULL);
+ return __secure_computing();
return 0;
}
#else
@@ -50,10 +51,10 @@ struct seccomp_data;
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
static inline int secure_computing(void) { return 0; }
-static inline int __secure_computing(const struct seccomp_data *sd) { return 0; }
#else
static inline void secure_computing_strict(int this_syscall) { return; }
#endif
+static inline int __secure_computing(void) { return 0; }
static inline long prctl_get_seccomp(void)
{
diff --git a/include/linux/security.h b/include/linux/security.h
index 21cf70346b33..83a646d72f6f 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -34,6 +34,10 @@
#include <linux/sockptr.h>
#include <linux/bpf.h>
#include <uapi/linux/lsm.h>
+#include <linux/lsm/selinux.h>
+#include <linux/lsm/smack.h>
+#include <linux/lsm/apparmor.h>
+#include <linux/lsm/bpf.h>
struct linux_binprm;
struct cred;
@@ -81,6 +85,19 @@ struct timezone;
enum lsm_event {
LSM_POLICY_CHANGE,
+ LSM_STARTED_ALL,
+};
+
+struct dm_verity_digest {
+ const char *alg;
+ const u8 *digest;
+ size_t digest_len;
+};
+
+enum lsm_integrity_type {
+ LSM_INT_DMVERITY_SIG_VALID,
+ LSM_INT_DMVERITY_ROOTHASH,
+ LSM_INT_FSVERITY_BUILTINSIG_VALID,
};
/*
@@ -140,9 +157,17 @@ enum lockdown_reason {
LOCKDOWN_CONFIDENTIALITY_MAX,
};
+/*
+ * Data exported by the security modules
+ */
+struct lsm_prop {
+ struct lsm_prop_selinux selinux;
+ struct lsm_prop_smack smack;
+ struct lsm_prop_apparmor apparmor;
+ struct lsm_prop_bpf bpf;
+};
+
extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1];
-extern u32 lsm_active_cnt;
-extern const struct lsm_id *lsm_idlist[];
/* These functions are in security/commoncap.c */
extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
@@ -167,8 +192,6 @@ int cap_inode_getsecurity(struct mnt_idmap *idmap,
struct inode *inode, const char *name, void **buffer,
bool alloc);
extern int cap_mmap_addr(unsigned long addr);
-extern int cap_mmap_file(struct file *file, unsigned long reqprot,
- unsigned long prot, unsigned long flags);
extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
@@ -200,6 +223,18 @@ extern unsigned long dac_mmap_min_addr;
#endif
/*
+ * A "security context" is the text representation of
+ * the information used by LSMs.
+ * This structure contains the string, its length, and which LSM
+ * it is useful for.
+ */
+struct lsm_context {
+ char *context; /* Provided by the module */
+ u32 len;
+ int id; /* Identifies the module */
+};
+
+/*
* Values used in the task_security_ops calls
*/
/* setuid or setgid, id0 == uid or gid */
@@ -228,7 +263,7 @@ struct request_sock;
#define LSM_UNSAFE_NO_NEW_PRIVS 4
#ifdef CONFIG_MMU
-extern int mmap_min_addr_handler(struct ctl_table *table, int write,
+extern int mmap_min_addr_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
#endif
@@ -257,8 +292,32 @@ static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id)
return kernel_load_data_str[id];
}
+/**
+ * lsmprop_init - initialize a lsm_prop structure
+ * @prop: Pointer to the data to initialize
+ *
+ * Set all secid for all modules to the specified value.
+ */
+static inline void lsmprop_init(struct lsm_prop *prop)
+{
+ memset(prop, 0, sizeof(*prop));
+}
+
#ifdef CONFIG_SECURITY
+/**
+ * lsmprop_is_set - report if there is a value in the lsm_prop
+ * @prop: Pointer to the exported LSM data
+ *
+ * Returns true if there is a value set, false otherwise
+ */
+static inline bool lsmprop_is_set(struct lsm_prop *prop)
+{
+ const struct lsm_prop empty = {};
+
+ return !!memcmp(prop, &empty, sizeof(*prop));
+}
+
int call_blocking_lsm_notifier(enum lsm_event event, void *data);
int register_blocking_lsm_notifier(struct notifier_block *nb);
int unregister_blocking_lsm_notifier(struct notifier_block *nb);
@@ -328,15 +387,15 @@ int security_sb_clone_mnt_opts(const struct super_block *oldsb,
int security_move_mount(const struct path *from_path, const struct path *to_path);
int security_dentry_init_security(struct dentry *dentry, int mode,
const struct qstr *name,
- const char **xattr_name, void **ctx,
- u32 *ctxlen);
+ const char **xattr_name,
+ struct lsm_context *lsmcxt);
int security_dentry_create_files_as(struct dentry *dentry, int mode,
- struct qstr *name,
+ const struct qstr *name,
const struct cred *old,
struct cred *new);
int security_path_notify(const struct path *path, u64 mask,
unsigned int obj_type);
-int security_inode_alloc(struct inode *inode);
+int security_inode_alloc(struct inode *inode, gfp_t gfp);
void security_inode_free(struct inode *inode);
int security_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
@@ -389,6 +448,10 @@ int security_inode_listxattr(struct dentry *dentry);
int security_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name);
void security_inode_post_removexattr(struct dentry *dentry, const char *name);
+int security_inode_file_setattr(struct dentry *dentry,
+ struct file_kattr *fa);
+int security_inode_file_getattr(struct dentry *dentry,
+ struct file_kattr *fa);
int security_inode_need_killpriv(struct dentry *dentry);
int security_inode_killpriv(struct mnt_idmap *idmap, struct dentry *dentry);
int security_inode_getsecurity(struct mnt_idmap *idmap,
@@ -396,9 +459,12 @@ int security_inode_getsecurity(struct mnt_idmap *idmap,
void **buffer, bool alloc);
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags);
int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size);
-void security_inode_getsecid(struct inode *inode, u32 *secid);
+void security_inode_getlsmprop(struct inode *inode, struct lsm_prop *prop);
int security_inode_copy_up(struct dentry *src, struct cred **new);
int security_inode_copy_up_xattr(struct dentry *src, const char *name);
+int security_inode_setintegrity(const struct inode *inode,
+ enum lsm_integrity_type type, const void *value,
+ size_t size);
int security_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn);
int security_file_permission(struct file *file, int mask);
@@ -422,13 +488,14 @@ int security_file_receive(struct file *file);
int security_file_open(struct file *file);
int security_file_post_open(struct file *file, int mask);
int security_file_truncate(struct file *file);
-int security_task_alloc(struct task_struct *task, unsigned long clone_flags);
+int security_task_alloc(struct task_struct *task, u64 clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
void security_cred_free(struct cred *cred);
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
void security_transfer_creds(struct cred *new, const struct cred *old);
void security_cred_getsecid(const struct cred *c, u32 *secid);
+void security_cred_getlsmprop(const struct cred *c, struct lsm_prop *prop);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_kernel_module_request(char *kmod_name);
@@ -448,8 +515,8 @@ int security_task_fix_setgroups(struct cred *new, const struct cred *old);
int security_task_setpgid(struct task_struct *p, pid_t pgid);
int security_task_getpgid(struct task_struct *p);
int security_task_getsid(struct task_struct *p);
-void security_current_getsecid_subj(u32 *secid);
-void security_task_getsecid_obj(struct task_struct *p, u32 *secid);
+void security_current_getlsmprop_subj(struct lsm_prop *prop);
+void security_task_getlsmprop_obj(struct task_struct *p, struct lsm_prop *prop);
int security_task_setnice(struct task_struct *p, int nice);
int security_task_setioprio(struct task_struct *p, int ioprio);
int security_task_getioprio(struct task_struct *p);
@@ -467,7 +534,7 @@ int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
void security_task_to_inode(struct task_struct *p, struct inode *inode);
int security_create_user_ns(const struct cred *cred);
int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
-void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid);
+void security_ipc_getlsmprop(struct kern_ipc_perm *ipcp, struct lsm_prop *prop);
int security_msg_msg_alloc(struct msg_msg *msg);
void security_msg_msg_free(struct msg_msg *msg);
int security_msg_queue_alloc(struct kern_ipc_perm *msq);
@@ -497,20 +564,37 @@ int security_setselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
int security_getprocattr(struct task_struct *p, int lsmid, const char *name,
char **value);
int security_setprocattr(int lsmid, const char *name, void *value, size_t size);
-int security_netlink_send(struct sock *sk, struct sk_buff *skb);
int security_ismaclabel(const char *name);
-int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
+int security_secid_to_secctx(u32 secid, struct lsm_context *cp);
+int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp,
+ int lsmid);
int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
-void security_release_secctx(char *secdata, u32 seclen);
+void security_release_secctx(struct lsm_context *cp);
void security_inode_invalidate_secctx(struct inode *inode);
int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
-int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
+int security_inode_getsecctx(struct inode *inode, struct lsm_context *cp);
int security_locked_down(enum lockdown_reason what);
int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, u32 *uctx_len,
void *val, size_t val_len, u64 id, u64 flags);
+int security_bdev_alloc(struct block_device *bdev);
+void security_bdev_free(struct block_device *bdev);
+int security_bdev_setintegrity(struct block_device *bdev,
+ enum lsm_integrity_type type, const void *value,
+ size_t size);
#else /* CONFIG_SECURITY */
+/**
+ * lsmprop_is_set - report if there is a value in the lsm_prop
+ * @prop: Pointer to the exported LSM data
+ *
+ * Returns true if there is a value set, false otherwise
+ */
+static inline bool lsmprop_is_set(struct lsm_prop *prop)
+{
+ return false;
+}
+
static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data)
{
return 0;
@@ -634,7 +718,7 @@ static inline int security_settime64(const struct timespec64 *ts,
static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
- return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages));
+ return __vm_enough_memory(mm, pages, !cap_vm_enough_memory(mm, pages));
}
static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm)
@@ -769,7 +853,7 @@ static inline int security_path_notify(const struct path *path, u64 mask,
return 0;
}
-static inline int security_inode_alloc(struct inode *inode)
+static inline int security_inode_alloc(struct inode *inode, gfp_t gfp)
{
return 0;
}
@@ -781,14 +865,13 @@ static inline int security_dentry_init_security(struct dentry *dentry,
int mode,
const struct qstr *name,
const char **xattr_name,
- void **ctx,
- u32 *ctxlen)
+ struct lsm_context *lsmcxt)
{
return -EOPNOTSUPP;
}
static inline int security_dentry_create_files_as(struct dentry *dentry,
- int mode, struct qstr *name,
+ int mode, const struct qstr *name,
const struct cred *old,
struct cred *new)
{
@@ -971,6 +1054,18 @@ static inline void security_inode_post_removexattr(struct dentry *dentry,
const char *name)
{ }
+static inline int security_inode_file_setattr(struct dentry *dentry,
+ struct file_kattr *fa)
+{
+ return 0;
+}
+
+static inline int security_inode_file_getattr(struct dentry *dentry,
+ struct file_kattr *fa)
+{
+ return 0;
+}
+
static inline int security_inode_need_killpriv(struct dentry *dentry)
{
return cap_inode_need_killpriv(dentry);
@@ -1000,9 +1095,10 @@ static inline int security_inode_listsecurity(struct inode *inode, char *buffer,
return 0;
}
-static inline void security_inode_getsecid(struct inode *inode, u32 *secid)
+static inline void security_inode_getlsmprop(struct inode *inode,
+ struct lsm_prop *prop)
{
- *secid = 0;
+ lsmprop_init(prop);
}
static inline int security_inode_copy_up(struct dentry *src, struct cred **new)
@@ -1010,6 +1106,13 @@ static inline int security_inode_copy_up(struct dentry *src, struct cred **new)
return 0;
}
+static inline int security_inode_setintegrity(const struct inode *inode,
+ enum lsm_integrity_type type,
+ const void *value, size_t size)
+{
+ return 0;
+}
+
static inline int security_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn)
{
@@ -1112,7 +1215,7 @@ static inline int security_file_truncate(struct file *file)
}
static inline int security_task_alloc(struct task_struct *task,
- unsigned long clone_flags)
+ u64 clone_flags)
{
return 0;
}
@@ -1145,6 +1248,10 @@ static inline void security_cred_getsecid(const struct cred *c, u32 *secid)
*secid = 0;
}
+static inline void security_cred_getlsmprop(const struct cred *c,
+ struct lsm_prop *prop)
+{ }
+
static inline int security_kernel_act_as(struct cred *cred, u32 secid)
{
return 0;
@@ -1222,14 +1329,15 @@ static inline int security_task_getsid(struct task_struct *p)
return 0;
}
-static inline void security_current_getsecid_subj(u32 *secid)
+static inline void security_current_getlsmprop_subj(struct lsm_prop *prop)
{
- *secid = 0;
+ lsmprop_init(prop);
}
-static inline void security_task_getsecid_obj(struct task_struct *p, u32 *secid)
+static inline void security_task_getlsmprop_obj(struct task_struct *p,
+ struct lsm_prop *prop)
{
- *secid = 0;
+ lsmprop_init(prop);
}
static inline int security_task_setnice(struct task_struct *p, int nice)
@@ -1305,9 +1413,10 @@ static inline int security_ipc_permission(struct kern_ipc_perm *ipcp,
return 0;
}
-static inline void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
+static inline void security_ipc_getlsmprop(struct kern_ipc_perm *ipcp,
+ struct lsm_prop *prop)
{
- *secid = 0;
+ lsmprop_init(prop);
}
static inline int security_msg_msg_alloc(struct msg_msg *msg)
@@ -1431,17 +1540,19 @@ static inline int security_setprocattr(int lsmid, char *name, void *value,
return -EINVAL;
}
-static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb)
+static inline int security_ismaclabel(const char *name)
{
return 0;
}
-static inline int security_ismaclabel(const char *name)
+static inline int security_secid_to_secctx(u32 secid, struct lsm_context *cp)
{
- return 0;
+ return -EOPNOTSUPP;
}
-static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+static inline int security_lsmprop_to_secctx(struct lsm_prop *prop,
+ struct lsm_context *cp,
+ int lsmid)
{
return -EOPNOTSUPP;
}
@@ -1453,7 +1564,7 @@ static inline int security_secctx_to_secid(const char *secdata,
return -EOPNOTSUPP;
}
-static inline void security_release_secctx(char *secdata, u32 seclen)
+static inline void security_release_secctx(struct lsm_context *cp)
{
}
@@ -1469,7 +1580,8 @@ static inline int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32
{
return -EOPNOTSUPP;
}
-static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+static inline int security_inode_getsecctx(struct inode *inode,
+ struct lsm_context *cp)
{
return -EOPNOTSUPP;
}
@@ -1483,6 +1595,23 @@ static inline int lsm_fill_user_ctx(struct lsm_ctx __user *uctx,
{
return -EOPNOTSUPP;
}
+
+static inline int security_bdev_alloc(struct block_device *bdev)
+{
+ return 0;
+}
+
+static inline void security_bdev_free(struct block_device *bdev)
+{
+}
+
+static inline int security_bdev_setintegrity(struct block_device *bdev,
+ enum lsm_integrity_type type,
+ const void *value, size_t size)
+{
+ return 0;
+}
+
#endif /* CONFIG_SECURITY */
#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
@@ -1509,6 +1638,7 @@ static inline int security_watch_key(struct key *key)
#ifdef CONFIG_SECURITY_NETWORK
+int security_netlink_send(struct sock *sk, struct sk_buff *skb);
int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk);
int security_unix_may_send(struct socket *sock, struct socket *other);
int security_socket_create(int family, int type, int protocol, int kern);
@@ -1564,6 +1694,11 @@ int security_sctp_assoc_established(struct sctp_association *asoc,
int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk);
#else /* CONFIG_SECURITY_NETWORK */
+static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb)
+{
+ return 0;
+}
+
static inline int security_unix_stream_connect(struct sock *sock,
struct sock *other,
struct sock *newsk)
@@ -2048,15 +2183,17 @@ static inline void security_key_post_create_or_update(struct key *keyring,
#ifdef CONFIG_AUDIT
#ifdef CONFIG_SECURITY
-int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule);
+int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
+ gfp_t gfp);
int security_audit_rule_known(struct audit_krule *krule);
-int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule);
+int security_audit_rule_match(struct lsm_prop *prop, u32 field, u32 op,
+ void *lsmrule);
void security_audit_rule_free(void *lsmrule);
#else
static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr,
- void **lsmrule)
+ void **lsmrule, gfp_t gfp)
{
return 0;
}
@@ -2066,8 +2203,8 @@ static inline int security_audit_rule_known(struct audit_krule *krule)
return 0;
}
-static inline int security_audit_rule_match(u32 secid, u32 field, u32 op,
- void *lsmrule)
+static inline int security_audit_rule_match(struct lsm_prop *prop, u32 field,
+ u32 op, void *lsmrule)
{
return 0;
}
@@ -2126,23 +2263,23 @@ struct bpf_map;
struct bpf_prog;
struct bpf_token;
#ifdef CONFIG_SECURITY
-extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size);
+extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size, bool kernel);
extern int security_bpf_map(struct bpf_map *map, fmode_t fmode);
extern int security_bpf_prog(struct bpf_prog *prog);
extern int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
- struct bpf_token *token);
+ struct bpf_token *token, bool kernel);
extern void security_bpf_map_free(struct bpf_map *map);
extern int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
- struct bpf_token *token);
+ struct bpf_token *token, bool kernel);
extern void security_bpf_prog_free(struct bpf_prog *prog);
extern int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
- struct path *path);
+ const struct path *path);
extern void security_bpf_token_free(struct bpf_token *token);
extern int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
extern int security_bpf_token_capable(const struct bpf_token *token, int cap);
#else
static inline int security_bpf(int cmd, union bpf_attr *attr,
- unsigned int size)
+ unsigned int size, bool kernel)
{
return 0;
}
@@ -2158,7 +2295,7 @@ static inline int security_bpf_prog(struct bpf_prog *prog)
}
static inline int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
{
return 0;
}
@@ -2167,7 +2304,7 @@ static inline void security_bpf_map_free(struct bpf_map *map)
{ }
static inline int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
- struct bpf_token *token)
+ struct bpf_token *token, bool kernel)
{
return 0;
}
@@ -2176,7 +2313,7 @@ static inline void security_bpf_prog_free(struct bpf_prog *prog)
{ }
static inline int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
- struct path *path)
+ const struct path *path)
{
return 0;
}
@@ -2201,14 +2338,13 @@ struct perf_event_attr;
struct perf_event;
#ifdef CONFIG_SECURITY
-extern int security_perf_event_open(struct perf_event_attr *attr, int type);
+extern int security_perf_event_open(int type);
extern int security_perf_event_alloc(struct perf_event *event);
extern void security_perf_event_free(struct perf_event *event);
extern int security_perf_event_read(struct perf_event *event);
extern int security_perf_event_write(struct perf_event *event);
#else
-static inline int security_perf_event_open(struct perf_event_attr *attr,
- int type)
+static inline int security_perf_event_open(int type)
{
return 0;
}
@@ -2239,6 +2375,7 @@ static inline int security_perf_event_write(struct perf_event *event)
extern int security_uring_override_creds(const struct cred *new);
extern int security_uring_sqpoll(void);
extern int security_uring_cmd(struct io_uring_cmd *ioucmd);
+extern int security_uring_allowed(void);
#else
static inline int security_uring_override_creds(const struct cred *new)
{
@@ -2252,7 +2389,19 @@ static inline int security_uring_cmd(struct io_uring_cmd *ioucmd)
{
return 0;
}
+static inline int security_uring_allowed(void)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_IO_URING */
+#ifdef CONFIG_SECURITY
+extern void security_initramfs_populated(void);
+#else
+static inline void security_initramfs_populated(void)
+{
+}
+#endif /* CONFIG_SECURITY */
+
#endif /* ! __LINUX_SECURITY_H */
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
index 2ac50822554e..80f33a93f944 100644
--- a/include/linux/sed-opal.h
+++ b/include/linux/sed-opal.h
@@ -52,6 +52,7 @@ static inline bool is_sed_ioctl(unsigned int cmd)
case IOC_OPAL_GET_GEOMETRY:
case IOC_OPAL_DISCOVERY:
case IOC_OPAL_REVERT_LSP:
+ case IOC_OPAL_SET_SID_PW:
return true;
}
return false;
diff --git a/include/linux/sem.h b/include/linux/sem.h
index c4deefe42aeb..275269ce2ec8 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -9,12 +9,12 @@ struct task_struct;
#ifdef CONFIG_SYSVIPC
-extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
+extern int copy_semundo(u64 clone_flags, struct task_struct *tsk);
extern void exit_sem(struct task_struct *tsk);
#else
-static inline int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
+static inline int copy_semundo(u64 clone_flags, struct task_struct *tsk)
{
return 0;
}
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 04655faadc2d..89706157e622 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -16,13 +16,25 @@ struct semaphore {
raw_spinlock_t lock;
unsigned int count;
struct list_head wait_list;
+
+#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
+ unsigned long last_holder;
+#endif
};
+#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
+#define __LAST_HOLDER_SEMAPHORE_INITIALIZER \
+ , .last_holder = 0UL
+#else
+#define __LAST_HOLDER_SEMAPHORE_INITIALIZER
+#endif
+
#define __SEMAPHORE_INITIALIZER(name, n) \
{ \
.lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \
.count = n, \
- .wait_list = LIST_HEAD_INIT((name).wait_list), \
+ .wait_list = LIST_HEAD_INIT((name).wait_list) \
+ __LAST_HOLDER_SEMAPHORE_INITIALIZER \
}
/*
@@ -47,5 +59,6 @@ extern int __must_check down_killable(struct semaphore *sem);
extern int __must_check down_trylock(struct semaphore *sem);
extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
extern void up(struct semaphore *sem);
+extern unsigned long sem_last_holder(struct semaphore *sem);
#endif /* __LINUX_SEMAPHORE_H */
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index fe41da005970..9f2839e73f8a 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -149,6 +149,23 @@ static inline void seq_buf_commit(struct seq_buf *s, int num)
}
}
+/**
+ * seq_buf_pop - pop off the last written character
+ * @s: the seq_buf handle
+ *
+ * Removes the last written character to the seq_buf @s.
+ *
+ * Returns the last character or -1 if it is empty.
+ */
+static inline int seq_buf_pop(struct seq_buf *s)
+{
+ if (!s->len)
+ return -1;
+
+ s->len--;
+ return (unsigned int)s->buffer[s->len];
+}
+
extern __printf(2, 3)
int seq_buf_printf(struct seq_buf *s, const char *fmt, ...);
extern __printf(2, 0)
@@ -167,8 +184,8 @@ extern int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str,
const void *buf, size_t len, bool ascii);
#ifdef CONFIG_BINARY_PRINTF
-extern int
-seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
+__printf(2, 0)
+int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
#endif
void seq_buf_do_printk(struct seq_buf *s, const char *lvl);
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 8bd4fda6e027..d6ebf0596510 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -7,7 +7,6 @@
#include <linux/string_helpers.h>
#include <linux/bug.h>
#include <linux/mutex.h>
-#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/fs.h>
#include <linux/cred.h>
@@ -182,6 +181,7 @@ int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
#ifdef CONFIG_BINARY_PRINTF
+__printf(2, 0)
void seq_bprintf(struct seq_file *m, const char *f, const u32 *binary);
#endif
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index d90d8ee29d81..221123660e71 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -157,7 +157,7 @@ __seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \
static __always_inline unsigned \
__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
{ \
- unsigned seq = READ_ONCE(s->seqcount.sequence); \
+ unsigned seq = smp_load_acquire(&s->seqcount.sequence); \
\
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
return seq; \
@@ -170,7 +170,7 @@ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
* Re-read the sequence counter since the (possibly \
* preempted) writer made progress. \
*/ \
- seq = READ_ONCE(s->seqcount.sequence); \
+ seq = smp_load_acquire(&s->seqcount.sequence); \
} \
\
return seq; \
@@ -208,7 +208,7 @@ static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s)
static inline unsigned __seqprop_sequence(const seqcount_t *s)
{
- return READ_ONCE(s->sequence);
+ return smp_load_acquire(&s->sequence);
}
static inline bool __seqprop_preemptible(const seqcount_t *s)
@@ -263,24 +263,16 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
#define seqprop_assert(s) __seqprop(s, assert)(s)
/**
- * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
+ * __read_seqcount_begin() - begin a seqcount_t read section
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
- * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
- * provided before actually loading any of the variables that are to be
- * protected in this critical section.
- *
- * Use carefully, only in critical code, and comment how the barrier is
- * provided.
- *
* Return: count to be passed to read_seqcount_retry()
*/
#define __read_seqcount_begin(s) \
({ \
unsigned __seq; \
\
- while ((__seq = seqprop_sequence(s)) & 1) \
+ while (unlikely((__seq = seqprop_sequence(s)) & 1)) \
cpu_relax(); \
\
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
@@ -293,13 +285,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
*
* Return: count to be passed to read_seqcount_retry()
*/
-#define raw_read_seqcount_begin(s) \
-({ \
- unsigned _seq = __read_seqcount_begin(s); \
- \
- smp_rmb(); \
- _seq; \
-})
+#define raw_read_seqcount_begin(s) __read_seqcount_begin(s)
/**
* read_seqcount_begin() - begin a seqcount_t read critical section
@@ -328,12 +314,34 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
({ \
unsigned __seq = seqprop_sequence(s); \
\
- smp_rmb(); \
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
__seq; \
})
/**
+ * raw_seqcount_try_begin() - begin a seqcount_t read critical section
+ * w/o lockdep and w/o counter stabilization
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ * @start: count to be passed to read_seqcount_retry()
+ *
+ * Similar to raw_seqcount_begin(), except it enables eliding the critical
+ * section entirely if odd, instead of doing the speculation knowing it will
+ * fail.
+ *
+ * Useful when counter stabilization is more or less equivalent to taking
+ * the lock and there is a slowpath that does that.
+ *
+ * If true, start will be set to the (even) sequence count read.
+ *
+ * Return: true when a read critical section is started.
+ */
+#define raw_seqcount_try_begin(s, start) \
+({ \
+ start = raw_read_seqcount(s); \
+ !(start & 1); \
+})
+
+/**
* raw_seqcount_begin() - begin a seqcount_t read critical section w/o
* lockdep and w/o counter stabilization
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
@@ -637,6 +645,23 @@ static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *
}
/**
+ * read_seqcount_latch() - pick even/odd latch data copy
+ * @s: Pointer to seqcount_latch_t
+ *
+ * See write_seqcount_latch() for details and a full reader/writer usage
+ * example.
+ *
+ * Return: sequence counter raw value. Use the lowest bit as an index for
+ * picking which data copy to read. The full counter must then be checked
+ * with read_seqcount_latch_retry().
+ */
+static __always_inline unsigned read_seqcount_latch(const seqcount_latch_t *s)
+{
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
+ return raw_read_seqcount_latch(s);
+}
+
+/**
* raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
* @s: Pointer to seqcount_latch_t
* @start: count, from raw_read_seqcount_latch()
@@ -651,8 +676,33 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
}
/**
+ * read_seqcount_latch_retry() - end a seqcount_latch_t read section
+ * @s: Pointer to seqcount_latch_t
+ * @start: count, from read_seqcount_latch()
+ *
+ * Return: true if a read section retry is required, else false
+ */
+static __always_inline int
+read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
+{
+ kcsan_atomic_next(0);
+ return raw_read_seqcount_latch_retry(s, start);
+}
+
+/**
* raw_write_seqcount_latch() - redirect latch readers to even/odd copy
* @s: Pointer to seqcount_latch_t
+ */
+static __always_inline void raw_write_seqcount_latch(seqcount_latch_t *s)
+{
+ smp_wmb(); /* prior stores before incrementing "sequence" */
+ s->seqcount.sequence++;
+ smp_wmb(); /* increment "sequence" before following stores */
+}
+
+/**
+ * write_seqcount_latch_begin() - redirect latch readers to odd copy
+ * @s: Pointer to seqcount_latch_t
*
* The latch technique is a multiversion concurrency control method that allows
* queries during non-atomic modifications. If you can guarantee queries never
@@ -680,17 +730,11 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
*
* void latch_modify(struct latch_struct *latch, ...)
* {
- * smp_wmb(); // Ensure that the last data[1] update is visible
- * latch->seq.sequence++;
- * smp_wmb(); // Ensure that the seqcount update is visible
- *
+ * write_seqcount_latch_begin(&latch->seq);
* modify(latch->data[0], ...);
- *
- * smp_wmb(); // Ensure that the data[0] update is visible
- * latch->seq.sequence++;
- * smp_wmb(); // Ensure that the seqcount update is visible
- *
+ * write_seqcount_latch(&latch->seq);
* modify(latch->data[1], ...);
+ * write_seqcount_latch_end(&latch->seq);
* }
*
* The query will have a form like::
@@ -701,13 +745,13 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
* unsigned seq, idx;
*
* do {
- * seq = raw_read_seqcount_latch(&latch->seq);
+ * seq = read_seqcount_latch(&latch->seq);
*
* idx = seq & 0x01;
* entry = data_query(latch->data[idx], ...);
*
* // This includes needed smp_rmb()
- * } while (raw_read_seqcount_latch_retry(&latch->seq, seq));
+ * } while (read_seqcount_latch_retry(&latch->seq, seq));
*
* return entry;
* }
@@ -731,11 +775,31 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
* When data is a dynamic data structure; one should use regular RCU
* patterns to manage the lifetimes of the objects within.
*/
-static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
+static __always_inline void write_seqcount_latch_begin(seqcount_latch_t *s)
{
- smp_wmb(); /* prior stores before incrementing "sequence" */
- s->seqcount.sequence++;
- smp_wmb(); /* increment "sequence" before following stores */
+ kcsan_nestable_atomic_begin();
+ raw_write_seqcount_latch(s);
+}
+
+/**
+ * write_seqcount_latch() - redirect latch readers to even copy
+ * @s: Pointer to seqcount_latch_t
+ */
+static __always_inline void write_seqcount_latch(seqcount_latch_t *s)
+{
+ raw_write_seqcount_latch(s);
+}
+
+/**
+ * write_seqcount_latch_end() - end a seqcount_latch_t write section
+ * @s: Pointer to seqcount_latch_t
+ *
+ * Marks the end of a seqcount_latch_t writer section, after all copies of the
+ * latch-protected data have been updated.
+ */
+static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
+{
+ kcsan_nestable_atomic_end();
}
#define __SEQLOCK_UNLOCKED(lockname) \
@@ -769,11 +833,7 @@ static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
- unsigned ret = read_seqcount_begin(&sl->seqcount);
-
- kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
- kcsan_flat_atomic_begin();
- return ret;
+ return read_seqcount_begin(&sl->seqcount);
}
/**
@@ -789,12 +849,6 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
*/
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
- /*
- * Assume not nested: read_seqretry() may be called multiple times when
- * completing read critical section.
- */
- kcsan_flat_atomic_end();
-
return read_seqcount_retry(&sl->seqcount, start);
}
@@ -1155,4 +1209,118 @@ done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
if (seq & 1)
read_sequnlock_excl_irqrestore(lock, flags);
}
+
+enum ss_state {
+ ss_done = 0,
+ ss_lock,
+ ss_lock_irqsave,
+ ss_lockless,
+};
+
+struct ss_tmp {
+ enum ss_state state;
+ unsigned long data;
+ spinlock_t *lock;
+ spinlock_t *lock_irqsave;
+};
+
+static __always_inline void __scoped_seqlock_cleanup(struct ss_tmp *sst)
+{
+ if (sst->lock)
+ spin_unlock(sst->lock);
+ if (sst->lock_irqsave)
+ spin_unlock_irqrestore(sst->lock_irqsave, sst->data);
+}
+
+extern void __scoped_seqlock_invalid_target(void);
+
+#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 90000) || defined(CONFIG_KASAN)
+/*
+ * For some reason some GCC-8 architectures (nios2, alpha) have trouble
+ * determining that the ss_done state is impossible in __scoped_seqlock_next()
+ * below.
+ *
+ * Similarly KASAN is known to confuse compilers enough to break this. But we
+ * don't care about code quality for KASAN builds anyway.
+ */
+static inline void __scoped_seqlock_bug(void) { }
+#else
+/*
+ * Canary for compiler optimization -- if the compiler doesn't realize this is
+ * an impossible state, it very likely generates sub-optimal code here.
+ */
+extern void __scoped_seqlock_bug(void);
+#endif
+
+static __always_inline void
+__scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
+{
+ switch (sst->state) {
+ case ss_done:
+ __scoped_seqlock_bug();
+ return;
+
+ case ss_lock:
+ case ss_lock_irqsave:
+ sst->state = ss_done;
+ return;
+
+ case ss_lockless:
+ if (!read_seqretry(lock, sst->data)) {
+ sst->state = ss_done;
+ return;
+ }
+ break;
+ }
+
+ switch (target) {
+ case ss_done:
+ __scoped_seqlock_invalid_target();
+ return;
+
+ case ss_lock:
+ sst->lock = &lock->lock;
+ spin_lock(sst->lock);
+ sst->state = ss_lock;
+ return;
+
+ case ss_lock_irqsave:
+ sst->lock_irqsave = &lock->lock;
+ spin_lock_irqsave(sst->lock_irqsave, sst->data);
+ sst->state = ss_lock_irqsave;
+ return;
+
+ case ss_lockless:
+ sst->data = read_seqbegin(lock);
+ return;
+ }
+}
+
+#define __scoped_seqlock_read(_seqlock, _target, _s) \
+ for (struct ss_tmp _s __cleanup(__scoped_seqlock_cleanup) = \
+ { .state = ss_lockless, .data = read_seqbegin(_seqlock) }; \
+ _s.state != ss_done; \
+ __scoped_seqlock_next(&_s, _seqlock, _target))
+
+/**
+ * scoped_seqlock_read (lock, ss_state) - execute the read side critical
+ * section without manual sequence
+ * counter handling or calls to other
+ * helpers
+ * @lock: pointer to seqlock_t protecting the data
+ * @ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless} indicating
+ * the type of critical read section
+ *
+ * Example:
+ *
+ * scoped_seqlock_read (&lock, ss_lock) {
+ * // read-side critical section
+ * }
+ *
+ * Starts with a lockess pass first. If it fails, restarts the critical
+ * section with the lock held.
+ */
+#define scoped_seqlock_read(_seqlock, _target) \
+ __scoped_seqlock_read(_seqlock, _target, __UNIQUE_ID(seqlock))
+
#endif /* __LINUX_SEQLOCK_H */
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index ff78efc1f60d..34562eb99931 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -84,7 +84,6 @@ enum serdev_parity {
struct serdev_controller_ops {
ssize_t (*write_buf)(struct serdev_controller *, const u8 *, size_t);
void (*write_flush)(struct serdev_controller *);
- int (*write_room)(struct serdev_controller *);
int (*open)(struct serdev_controller *);
void (*close)(struct serdev_controller *);
void (*set_flow_control)(struct serdev_controller *, bool);
@@ -212,7 +211,6 @@ int serdev_device_break_ctl(struct serdev_device *serdev, int break_state);
void serdev_device_write_wakeup(struct serdev_device *);
ssize_t serdev_device_write(struct serdev_device *, const u8 *, size_t, long);
void serdev_device_write_flush(struct serdev_device *);
-int serdev_device_write_room(struct serdev_device *);
/*
* serdev device driver functions
@@ -273,10 +271,6 @@ static inline ssize_t serdev_device_write(struct serdev_device *sdev,
return -ENODEV;
}
static inline void serdev_device_write_flush(struct serdev_device *sdev) {}
-static inline int serdev_device_write_room(struct serdev_device *sdev)
-{
- return 0;
-}
#define serdev_device_driver_register(x)
#define serdev_device_driver_unregister(x)
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index fd59ed2cca53..01efdce0fda0 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -46,8 +46,8 @@ struct plat_serial8250_port {
unsigned int type; /* If UPF_FIXED_TYPE */
upf_t flags; /* UPF_* flags */
u16 bugs; /* port bugs */
- unsigned int (*serial_in)(struct uart_port *, int);
- void (*serial_out)(struct uart_port *, int, int);
+ u32 (*serial_in)(struct uart_port *, unsigned int offset);
+ void (*serial_out)(struct uart_port *, unsigned int offset, u32 val);
u32 (*dl_read)(struct uart_8250_port *up);
void (*dl_write)(struct uart_8250_port *up, u32 value);
void (*set_termios)(struct uart_port *,
@@ -161,8 +161,8 @@ struct uart_8250_port {
void (*dl_write)(struct uart_8250_port *up, u32 value);
struct uart_8250_em485 *em485;
- void (*rs485_start_tx)(struct uart_8250_port *);
- void (*rs485_stop_tx)(struct uart_8250_port *);
+ void (*rs485_start_tx)(struct uart_8250_port *up, bool toggle_ier);
+ void (*rs485_stop_tx)(struct uart_8250_port *up, bool toggle_ier);
/* Serial port overrun backoff */
struct delayed_work overrun_backoff;
@@ -193,7 +193,7 @@ void serial8250_do_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate);
void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
- unsigned int quot, unsigned int quot_frac);
+ unsigned int quot);
int fsl8250_handle_irq(struct uart_port *port);
int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 8cb65f50e830..666430b47899 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -11,6 +11,8 @@
#include <linux/compiler.h>
#include <linux/console.h>
#include <linux/interrupt.h>
+#include <linux/lockdep.h>
+#include <linux/printk.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/tty.h>
@@ -425,12 +427,24 @@ struct uart_icount {
typedef u64 __bitwise upf_t;
typedef unsigned int __bitwise upstat_t;
+enum uart_iotype {
+ UPIO_UNKNOWN = -1,
+ UPIO_PORT = SERIAL_IO_PORT, /* 8b I/O port access */
+ UPIO_HUB6 = SERIAL_IO_HUB6, /* Hub6 ISA card */
+ UPIO_MEM = SERIAL_IO_MEM, /* driver-specific */
+ UPIO_MEM32 = SERIAL_IO_MEM32, /* 32b little endian */
+ UPIO_AU = SERIAL_IO_AU, /* Au1x00 and RT288x type IO */
+ UPIO_TSI = SERIAL_IO_TSI, /* Tsi108/109 type IO */
+ UPIO_MEM32BE = SERIAL_IO_MEM32BE, /* 32b big endian */
+ UPIO_MEM16 = SERIAL_IO_MEM16, /* 16b little endian */
+};
+
struct uart_port {
spinlock_t lock; /* port lock */
unsigned long iobase; /* in/out[bwl] */
unsigned char __iomem *membase; /* read/write[bwl] */
- unsigned int (*serial_in)(struct uart_port *, int);
- void (*serial_out)(struct uart_port *, int, int);
+ u32 (*serial_in)(struct uart_port *, unsigned int offset);
+ void (*serial_out)(struct uart_port *, unsigned int offset, u32 val);
void (*set_termios)(struct uart_port *,
struct ktermios *new,
const struct ktermios *old);
@@ -467,23 +481,13 @@ struct uart_port {
unsigned char x_char; /* xon/xoff char */
unsigned char regshift; /* reg offset shift */
- unsigned char iotype; /* io access style */
-
-#define UPIO_UNKNOWN ((unsigned char)~0U) /* UCHAR_MAX */
-#define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */
-#define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */
-#define UPIO_MEM (SERIAL_IO_MEM) /* driver-specific */
-#define UPIO_MEM32 (SERIAL_IO_MEM32) /* 32b little endian */
-#define UPIO_AU (SERIAL_IO_AU) /* Au1x00 and RT288x type IO */
-#define UPIO_TSI (SERIAL_IO_TSI) /* Tsi108/109 type IO */
-#define UPIO_MEM32BE (SERIAL_IO_MEM32BE) /* 32b big endian */
-#define UPIO_MEM16 (SERIAL_IO_MEM16) /* 16b little endian */
-
unsigned char quirks; /* internal quirks */
/* internal quirks must be updated while holding port mutex */
#define UPQ_NO_TXEN_TEST BIT(0)
+ enum uart_iotype iotype; /* io access style */
+
unsigned int read_status_mask; /* driver specific */
unsigned int ignore_status_mask; /* driver specific */
struct uart_state *state; /* pointer to parent state */
@@ -503,7 +507,11 @@ struct uart_port {
* The remaining bits are serial-core specific and not modifiable by
* userspace.
*/
+#ifdef CONFIG_HAS_IOPORT
#define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ )
+#else
+#define UPF_FOURPORT 0
+#endif
#define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ )
#define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ )
#define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ )
@@ -590,6 +598,95 @@ struct uart_port {
void *private_data; /* generic platform data pointer */
};
+/*
+ * Only for console->device_lock()/_unlock() callbacks and internal
+ * port lock wrapper synchronization.
+ */
+static inline void __uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
+{
+ spin_lock_irqsave(&up->lock, *flags);
+}
+
+/*
+ * Only for console->device_lock()/_unlock() callbacks and internal
+ * port lock wrapper synchronization.
+ */
+static inline void __uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
+{
+ spin_unlock_irqrestore(&up->lock, flags);
+}
+
+/**
+ * uart_port_set_cons - Safely set the @cons field for a uart
+ * @up: The uart port to set
+ * @con: The new console to set to
+ *
+ * This function must be used to set @up->cons. It uses the port lock to
+ * synchronize with the port lock wrappers in order to ensure that the console
+ * cannot change or disappear while another context is holding the port lock.
+ */
+static inline void uart_port_set_cons(struct uart_port *up, struct console *con)
+{
+ unsigned long flags;
+
+ __uart_port_lock_irqsave(up, &flags);
+ up->cons = con;
+ __uart_port_unlock_irqrestore(up, flags);
+}
+
+/* Only for internal port lock wrapper usage. */
+static inline bool __uart_port_using_nbcon(struct uart_port *up)
+{
+ lockdep_assert_held_once(&up->lock);
+
+ if (likely(!uart_console(up)))
+ return false;
+
+ /*
+ * @up->cons is only modified under the port lock. Therefore it is
+ * certain that it cannot disappear here.
+ *
+ * @up->cons->node is added/removed from the console list under the
+ * port lock. Therefore it is certain that the registration status
+ * cannot change here, thus @up->cons->flags can be read directly.
+ */
+ if (hlist_unhashed_lockless(&up->cons->node) ||
+ !(up->cons->flags & CON_NBCON) ||
+ !up->cons->write_atomic) {
+ return false;
+ }
+
+ return true;
+}
+
+/* Only for internal port lock wrapper usage. */
+static inline bool __uart_port_nbcon_try_acquire(struct uart_port *up)
+{
+ if (!__uart_port_using_nbcon(up))
+ return true;
+
+ return nbcon_device_try_acquire(up->cons);
+}
+
+/* Only for internal port lock wrapper usage. */
+static inline void __uart_port_nbcon_acquire(struct uart_port *up)
+{
+ if (!__uart_port_using_nbcon(up))
+ return;
+
+ while (!nbcon_device_try_acquire(up->cons))
+ cpu_relax();
+}
+
+/* Only for internal port lock wrapper usage. */
+static inline void __uart_port_nbcon_release(struct uart_port *up)
+{
+ if (!__uart_port_using_nbcon(up))
+ return;
+
+ nbcon_device_release(up->cons);
+}
+
/**
* uart_port_lock - Lock the UART port
* @up: Pointer to UART port structure
@@ -597,6 +694,7 @@ struct uart_port {
static inline void uart_port_lock(struct uart_port *up)
{
spin_lock(&up->lock);
+ __uart_port_nbcon_acquire(up);
}
/**
@@ -606,6 +704,7 @@ static inline void uart_port_lock(struct uart_port *up)
static inline void uart_port_lock_irq(struct uart_port *up)
{
spin_lock_irq(&up->lock);
+ __uart_port_nbcon_acquire(up);
}
/**
@@ -616,6 +715,7 @@ static inline void uart_port_lock_irq(struct uart_port *up)
static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
{
spin_lock_irqsave(&up->lock, *flags);
+ __uart_port_nbcon_acquire(up);
}
/**
@@ -626,7 +726,15 @@ static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *f
*/
static inline bool uart_port_trylock(struct uart_port *up)
{
- return spin_trylock(&up->lock);
+ if (!spin_trylock(&up->lock))
+ return false;
+
+ if (!__uart_port_nbcon_try_acquire(up)) {
+ spin_unlock(&up->lock);
+ return false;
+ }
+
+ return true;
}
/**
@@ -638,7 +746,15 @@ static inline bool uart_port_trylock(struct uart_port *up)
*/
static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
{
- return spin_trylock_irqsave(&up->lock, *flags);
+ if (!spin_trylock_irqsave(&up->lock, *flags))
+ return false;
+
+ if (!__uart_port_nbcon_try_acquire(up)) {
+ spin_unlock_irqrestore(&up->lock, *flags);
+ return false;
+ }
+
+ return true;
}
/**
@@ -647,6 +763,7 @@ static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long
*/
static inline void uart_port_unlock(struct uart_port *up)
{
+ __uart_port_nbcon_release(up);
spin_unlock(&up->lock);
}
@@ -656,6 +773,7 @@ static inline void uart_port_unlock(struct uart_port *up)
*/
static inline void uart_port_unlock_irq(struct uart_port *up)
{
+ __uart_port_nbcon_release(up);
spin_unlock_irq(&up->lock);
}
@@ -666,9 +784,23 @@ static inline void uart_port_unlock_irq(struct uart_port *up)
*/
static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
{
+ __uart_port_nbcon_release(up);
spin_unlock_irqrestore(&up->lock, flags);
}
+DEFINE_GUARD(uart_port_lock, struct uart_port *, uart_port_lock(_T), uart_port_unlock(_T));
+DEFINE_GUARD_COND(uart_port_lock, _try, uart_port_trylock(_T));
+
+DEFINE_GUARD(uart_port_lock_irq, struct uart_port *, uart_port_lock_irq(_T),
+ uart_port_unlock_irq(_T));
+
+DEFINE_LOCK_GUARD_1(uart_port_lock_irqsave, struct uart_port,
+ uart_port_lock_irqsave(_T->lock, &_T->flags),
+ uart_port_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags);
+DEFINE_LOCK_GUARD_1_COND(uart_port_lock_irqsave, _try,
+ uart_port_trylock_irqsave(_T->lock, &_T->flags));
+
static inline int serial_port_in(struct uart_port *up, int offset)
{
return up->serial_in(up, offset);
@@ -811,8 +943,7 @@ enum UART_TX_FLAGS {
if (pending < WAKEUP_CHARS) { \
uart_write_wakeup(__port); \
\
- if (!((flags) & UART_TX_NOSTOP) && pending == 0 && \
- __port->ops->tx_empty(__port)) \
+ if (!((flags) & UART_TX_NOSTOP) && pending == 0) \
__port->ops->stop_tx(__port); \
} \
\
@@ -852,6 +983,24 @@ enum UART_TX_FLAGS {
})
/**
+ * uart_port_tx_limited_flags -- transmit helper for uart_port with count limiting with flags
+ * @port: uart port
+ * @ch: variable to store a character to be written to the HW
+ * @flags: %UART_TX_NOSTOP or similar
+ * @count: a limit of characters to send
+ * @tx_ready: can HW accept more data function
+ * @put_char: function to write a character
+ * @tx_done: function to call after the loop is done
+ *
+ * See uart_port_tx_limited() for more details.
+ */
+#define uart_port_tx_limited_flags(port, ch, flags, count, tx_ready, put_char, tx_done) ({ \
+ unsigned int __count = (count); \
+ __uart_port_tx(port, ch, flags, tx_ready, put_char, tx_done, __count, \
+ __count--); \
+})
+
+/**
* uart_port_tx -- transmit helper for uart_port
* @port: uart port
* @ch: variable to store a character to be written to the HW
@@ -965,10 +1114,8 @@ static inline bool uart_console_registered(struct uart_port *port)
return uart_console(port) && console_is_registered(port->cons);
}
-struct uart_port *uart_get_console(struct uart_port *ports, int nr,
- struct console *c);
-int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
- char **options);
+int uart_parse_earlycon(char *p, enum uart_iotype *iotype,
+ resource_size_t *addr, char **options);
void uart_parse_options(const char *options, int *baud, int *parity, int *bits,
int *flow);
int uart_set_options(struct uart_port *port, struct console *co, int baud,
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index 1672cf0810ef..102aa33d956c 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -246,24 +246,28 @@
S5PV210_UFCON_TXTRIG4 | \
S5PV210_UFCON_RXTRIG4)
-#define APPLE_S5L_UCON_RXTO_ENA 9
-#define APPLE_S5L_UCON_RXTHRESH_ENA 12
-#define APPLE_S5L_UCON_TXTHRESH_ENA 13
-#define APPLE_S5L_UCON_RXTO_ENA_MSK (1 << APPLE_S5L_UCON_RXTO_ENA)
-#define APPLE_S5L_UCON_RXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_RXTHRESH_ENA)
-#define APPLE_S5L_UCON_TXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_TXTHRESH_ENA)
+#define APPLE_S5L_UCON_RXTO_ENA 9
+#define APPLE_S5L_UCON_RXTO_LEGACY_ENA 11
+#define APPLE_S5L_UCON_RXTHRESH_ENA 12
+#define APPLE_S5L_UCON_TXTHRESH_ENA 13
+#define APPLE_S5L_UCON_RXTO_ENA_MSK BIT(APPLE_S5L_UCON_RXTO_ENA)
+#define APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK BIT(APPLE_S5L_UCON_RXTO_LEGACY_ENA)
+#define APPLE_S5L_UCON_RXTHRESH_ENA_MSK BIT(APPLE_S5L_UCON_RXTHRESH_ENA)
+#define APPLE_S5L_UCON_TXTHRESH_ENA_MSK BIT(APPLE_S5L_UCON_TXTHRESH_ENA)
#define APPLE_S5L_UCON_DEFAULT (S3C2410_UCON_TXIRQMODE | \
S3C2410_UCON_RXIRQMODE | \
S3C2410_UCON_RXFIFO_TOI)
#define APPLE_S5L_UCON_MASK (APPLE_S5L_UCON_RXTO_ENA_MSK | \
+ APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK | \
APPLE_S5L_UCON_RXTHRESH_ENA_MSK | \
APPLE_S5L_UCON_TXTHRESH_ENA_MSK)
-#define APPLE_S5L_UTRSTAT_RXTHRESH (1<<4)
-#define APPLE_S5L_UTRSTAT_TXTHRESH (1<<5)
-#define APPLE_S5L_UTRSTAT_RXTO (1<<9)
-#define APPLE_S5L_UTRSTAT_ALL_FLAGS (0x3f0)
+#define APPLE_S5L_UTRSTAT_RXTO_LEGACY BIT(3)
+#define APPLE_S5L_UTRSTAT_RXTHRESH BIT(4)
+#define APPLE_S5L_UTRSTAT_TXTHRESH BIT(5)
+#define APPLE_S5L_UTRSTAT_RXTO BIT(9)
+#define APPLE_S5L_UTRSTAT_ALL_FLAGS GENMASK(9, 3)
#ifndef __ASSEMBLY__
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 1c89611e0e06..0f2f50b8d28e 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -37,6 +37,7 @@ enum {
SCIx_SH7705_SCIF_REGTYPE,
SCIx_HSCIF_REGTYPE,
SCIx_RZ_SCIFA_REGTYPE,
+ SCIx_RZV2H_SCIF_REGTYPE,
SCIx_NR_REGTYPES,
};
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 7ca41af93b37..69a47674af65 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -6,6 +6,7 @@
#define _SERIO_H
+#include <linux/cleanup.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
@@ -80,7 +81,7 @@ struct serio_driver {
struct device_driver driver;
};
-#define to_serio_driver(d) container_of(d, struct serio_driver, driver)
+#define to_serio_driver(d) container_of_const(d, struct serio_driver, driver)
int serio_open(struct serio *serio, struct serio_driver *drv);
void serio_close(struct serio *serio);
@@ -161,4 +162,6 @@ static inline void serio_continue_rx(struct serio *serio)
spin_unlock_irq(&serio->lock);
}
+DEFINE_GUARD(serio_pause_rx, struct serio *, serio_pause_rx(_T), serio_continue_rx(_T))
+
#endif
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 95ac8398ee72..3030d9245f5a 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -8,10 +8,10 @@
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
#include <asm/set_memory.h>
#else
-static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_ro(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_rw(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_x(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
#ifndef set_memory_rox
@@ -34,6 +34,12 @@ static inline int set_direct_map_default_noflush(struct page *page)
return 0;
}
+static inline int set_direct_map_valid_noflush(struct page *page,
+ unsigned nr, bool valid)
+{
+ return 0;
+}
+
static inline bool kernel_page_present(struct page *page)
{
return true;
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index a45da7eef9a2..5c71945a5e4d 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -284,6 +284,12 @@ enum {
SFF8024_ID_QSFP_8438 = 0x0c,
SFF8024_ID_QSFP_8436_8636 = 0x0d,
SFF8024_ID_QSFP28_8636 = 0x11,
+ SFF8024_ID_QSFP_DD = 0x18,
+ SFF8024_ID_OSFP = 0x19,
+ SFF8024_ID_DSFP = 0x1B,
+ SFF8024_ID_QSFP_PLUS_CMIS = 0x1E,
+ SFF8024_ID_SFP_DD_CMIS = 0x1F,
+ SFF8024_ID_SFP_PLUS_CMIS = 0x20,
SFF8024_ENCODING_UNSPEC = 0x00,
SFF8024_ENCODING_8B10B = 0x01,
@@ -516,6 +522,28 @@ struct ethtool_modinfo;
struct sfp_bus;
/**
+ * struct sfp_module_caps - sfp module capabilities
+ * @interfaces: bitmap of interfaces that the module may support
+ * @link_modes: bitmap of ethtool link modes that the module may support
+ */
+struct sfp_module_caps {
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(link_modes);
+ /**
+ * @may_have_phy: indicate whether the module may have an ethernet PHY
+ * There is no way to be sure that a module has a PHY as the EEPROM
+ * doesn't contain this information. When set, this does not mean that
+ * the module definitely has a PHY.
+ */
+ bool may_have_phy;
+ /**
+ * @port: one of ethtool %PORT_* definitions, parsed from the module
+ * EEPROM, or %PORT_OTHER if the port type is not known.
+ */
+ u8 port;
+};
+
+/**
* struct sfp_upstream_ops - upstream operations structure
* @attach: called when the sfp socket driver is bound to the upstream
* (mandatory).
@@ -544,15 +572,11 @@ struct sfp_upstream_ops {
void (*link_down)(void *priv);
void (*link_up)(void *priv);
int (*connect_phy)(void *priv, struct phy_device *);
- void (*disconnect_phy)(void *priv);
+ void (*disconnect_phy)(void *priv, struct phy_device *);
};
#if IS_ENABLED(CONFIG_SFP)
-int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support);
-bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
-void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support, unsigned long *interfaces);
+const struct sfp_module_caps *sfp_get_module_caps(struct sfp_bus *bus);
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
const unsigned long *link_modes);
@@ -570,25 +594,12 @@ struct sfp_bus *sfp_bus_find_fwnode(const struct fwnode_handle *fwnode);
int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
const struct sfp_upstream_ops *ops);
void sfp_bus_del_upstream(struct sfp_bus *bus);
+const char *sfp_get_name(struct sfp_bus *bus);
#else
-static inline int sfp_parse_port(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id,
- unsigned long *support)
-{
- return PORT_OTHER;
-}
-
-static inline bool sfp_may_have_phy(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id)
-{
- return false;
-}
-
-static inline void sfp_parse_support(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id,
- unsigned long *support,
- unsigned long *interfaces)
+static inline const struct sfp_module_caps *
+sfp_get_module_caps(struct sfp_bus *bus)
{
+ return NULL;
}
static inline phy_interface_t sfp_select_interface(struct sfp_bus *bus,
@@ -648,6 +659,11 @@ static inline int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
static inline void sfp_bus_del_upstream(struct sfp_bus *bus)
{
}
+
+static inline const char *sfp_get_name(struct sfp_bus *bus)
+{
+ return NULL;
+}
#endif
#endif
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index 6dfd05ef5c2d..03ba4dab2ef7 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -96,7 +96,7 @@ struct shdma_ops {
int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
dma_addr_t, dma_addr_t, size_t *);
int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool);
- void (*setup_xfer)(struct shdma_chan *, int);
+ int (*setup_xfer)(struct shdma_chan *, int);
void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
struct shdma_desc *(*embedded_desc)(void *, int);
bool (*chan_irq)(struct shdma_chan *, int);
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 3fb18f7eb73e..e2069b3179c4 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -10,6 +10,9 @@
#include <linux/xattr.h>
#include <linux/fs_parser.h>
#include <linux/userfaultfd_k.h>
+#include <linux/bits.h>
+
+struct swap_iocb;
/* inode in-kernel data */
@@ -17,6 +20,19 @@
#define SHMEM_MAXQUOTAS 2
#endif
+/* Suppress pre-accounting of the entire object size. */
+#define SHMEM_F_NORESERVE BIT(0)
+/* Disallow swapping. */
+#define SHMEM_F_LOCKED BIT(1)
+/*
+ * Disallow growing, shrinking, or hole punching in the inode. Combined with
+ * folio pinning, makes sure the inode's mapping stays fixed.
+ *
+ * In some ways similar to F_SEAL_GROW | F_SEAL_SHRINK, but can be removed and
+ * isn't directly visible to userspace.
+ */
+#define SHMEM_F_MAPPING_FROZEN BIT(2)
+
struct shmem_inode_info {
spinlock_t lock;
unsigned int seals; /* shmem seals */
@@ -42,10 +58,10 @@ struct shmem_inode_info {
struct inode vfs_inode;
};
-#define SHMEM_FL_USER_VISIBLE FS_FL_USER_VISIBLE
+#define SHMEM_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | FS_CASEFOLD_FL)
#define SHMEM_FL_USER_MODIFIABLE \
- (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL)
-#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL)
+ (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL | FS_CASEFOLD_FL)
+#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL | FS_CASEFOLD_FL)
struct shmem_quota_limits {
qsize_t usrquota_bhardlimit; /* Default user quota block hard limit */
@@ -92,30 +108,41 @@ extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
unsigned long flags);
extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt,
const char *name, loff_t size, unsigned long flags);
-extern int shmem_zero_setup(struct vm_area_struct *);
+int shmem_zero_setup(struct vm_area_struct *vma);
+int shmem_zero_setup_desc(struct vm_area_desc *desc);
extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts);
#ifdef CONFIG_SHMEM
-bool shmem_mapping(struct address_space *mapping);
+bool shmem_mapping(const struct address_space *mapping);
#else
-static inline bool shmem_mapping(struct address_space *mapping)
+static inline bool shmem_mapping(const struct address_space *mapping)
{
return false;
}
#endif /* CONFIG_SHMEM */
-extern void shmem_unlock_mapping(struct address_space *mapping);
-extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+void shmem_unlock_mapping(struct address_space *mapping);
+struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
-extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
+int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
+ struct list_head *folio_list);
+void shmem_truncate_range(struct inode *inode, loff_t start, uoff_t end);
int shmem_unuse(unsigned int type);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
- struct mm_struct *mm, unsigned long vm_flags);
+unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ struct vm_area_struct *vma, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force);
+bool shmem_hpage_pmd_enabled(void);
#else
-static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force,
- struct mm_struct *mm, unsigned long vm_flags)
+static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ struct vm_area_struct *vma, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force)
+{
+ return 0;
+}
+
+static inline bool shmem_hpage_pmd_enabled(void)
{
return false;
}
@@ -123,11 +150,16 @@ static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bo
#ifdef CONFIG_SHMEM
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
+extern void shmem_uncharge(struct inode *inode, long pages);
#else
static inline unsigned long shmem_swap_usage(struct vm_area_struct *vma)
{
return 0;
}
+
+static inline void shmem_uncharge(struct inode *inode, long pages)
+{
+}
#endif
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end);
@@ -141,8 +173,8 @@ enum sgp_type {
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
};
-int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
- enum sgp_type sgp);
+int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
+ struct folio **foliop, enum sgp_type sgp);
struct folio *shmem_read_folio_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp);
@@ -168,6 +200,15 @@ static inline bool shmem_file(struct file *file)
return shmem_mapping(file->f_mapping);
}
+/* Must be called with inode lock taken exclusive. */
+static inline void shmem_freeze(struct inode *inode, bool freeze)
+{
+ if (freeze)
+ SHMEM_I(inode)->flags |= SHMEM_F_MAPPING_FROZEN;
+ else
+ SHMEM_I(inode)->flags &= ~SHMEM_F_MAPPING_FROZEN;
+}
+
/*
* If fallocate(FALLOC_FL_KEEP_SIZE) has been used, there may be pages
* beyond i_size's notion of EOF, which fallocate has committed to reserving:
@@ -181,7 +222,6 @@ static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof)
}
extern bool shmem_charge(struct inode *inode, long pages);
-extern void shmem_uncharge(struct inode *inode, long pages);
#ifdef CONFIG_USERFAULTFD
#ifdef CONFIG_SHMEM
diff --git a/include/linux/sizes.h b/include/linux/sizes.h
index c3a00b967d18..f1f1a055b047 100644
--- a/include/linux/sizes.h
+++ b/include/linux/sizes.h
@@ -23,17 +23,25 @@
#define SZ_4K 0x00001000
#define SZ_8K 0x00002000
#define SZ_16K 0x00004000
+#define SZ_24K 0x00006000
#define SZ_32K 0x00008000
#define SZ_64K 0x00010000
#define SZ_128K 0x00020000
+#define SZ_192K 0x00030000
#define SZ_256K 0x00040000
+#define SZ_384K 0x00060000
#define SZ_512K 0x00080000
#define SZ_1M 0x00100000
#define SZ_2M 0x00200000
+#define SZ_3M 0x00300000
#define SZ_4M 0x00400000
+#define SZ_6M 0x00600000
#define SZ_8M 0x00800000
+#define SZ_12M 0x00c00000
#define SZ_16M 0x01000000
+#define SZ_18M 0x01200000
+#define SZ_24M 0x01800000
#define SZ_32M 0x02000000
#define SZ_64M 0x04000000
#define SZ_128M 0x08000000
@@ -59,5 +67,6 @@
#define SZ_16T _AC(0x100000000000, ULL)
#define SZ_32T _AC(0x200000000000, ULL)
#define SZ_64T _AC(0x400000000000, ULL)
+#define SZ_128T _AC(0x800000000000, ULL)
#endif /* __LINUX_SIZES_H__ */
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index 926496c9cc9c..bf178238a308 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -199,17 +199,18 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
}
-static inline int skb_array_resize_multiple_noprof(struct skb_array **rings,
- int nrings, unsigned int size,
- gfp_t gfp)
+static inline int skb_array_resize_multiple_bh_noprof(struct skb_array **rings,
+ int nrings,
+ unsigned int size,
+ gfp_t gfp)
{
BUILD_BUG_ON(offsetof(struct skb_array, ring));
- return ptr_ring_resize_multiple_noprof((struct ptr_ring **)rings,
- nrings, size, gfp,
- __skb_array_destroy_skb);
+ return ptr_ring_resize_multiple_bh_noprof((struct ptr_ring **)rings,
+ nrings, size, gfp,
+ __skb_array_destroy_skb);
}
-#define skb_array_resize_multiple(...) \
- alloc_hooks(skb_array_resize_multiple_noprof(__VA_ARGS__))
+#define skb_array_resize_multiple_bh(...) \
+ alloc_hooks(skb_array_resize_multiple_bh_noprof(__VA_ARGS__))
static inline void skb_array_cleanup(struct skb_array *a)
{
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 1c2902eaebd3..86737076101d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -31,6 +31,7 @@
#include <linux/in6.h>
#include <linux/if_packet.h>
#include <linux/llist.h>
+#include <linux/page_frag_cache.h>
#include <net/flow.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_common.h>
@@ -273,7 +274,6 @@
SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
-struct ahash_request;
struct net_device;
struct scatterlist;
struct pipe_inode_info;
@@ -469,7 +469,7 @@ struct skb_shared_hwtstamps {
/* Definitions for tx_flags in struct skb_shared_info */
enum {
/* generate hardware time stamp */
- SKBTX_HW_TSTAMP = 1 << 0,
+ SKBTX_HW_TSTAMP_NOBPF = 1 << 0,
/* generate software time stamp when queueing packet to NIC */
SKBTX_SW_TSTAMP = 1 << 1,
@@ -477,23 +477,26 @@ enum {
/* device driver is going to provide hardware time stamp */
SKBTX_IN_PROGRESS = 1 << 2,
- /* generate hardware time stamp based on cycles if supported */
- SKBTX_HW_TSTAMP_USE_CYCLES = 1 << 3,
-
- /* generate wifi status information (where possible) */
- SKBTX_WIFI_STATUS = 1 << 4,
+ /* generate software time stamp on packet tx completion */
+ SKBTX_COMPLETION_TSTAMP = 1 << 3,
/* determine hardware time stamp based on time or cycles */
SKBTX_HW_TSTAMP_NETDEV = 1 << 5,
/* generate software time stamp when entering packet scheduling */
SKBTX_SCHED_TSTAMP = 1 << 6,
+
+ /* used for bpf extension when a bpf program is loaded */
+ SKBTX_BPF = 1 << 7,
};
+#define SKBTX_HW_TSTAMP (SKBTX_HW_TSTAMP_NOBPF | SKBTX_BPF)
+
#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
- SKBTX_SCHED_TSTAMP)
+ SKBTX_SCHED_TSTAMP | \
+ SKBTX_BPF | \
+ SKBTX_COMPLETION_TSTAMP)
#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | \
- SKBTX_HW_TSTAMP_USE_CYCLES | \
SKBTX_ANY_SW_TSTAMP)
/* Definitions for flags in struct skb_shared_info */
@@ -607,11 +610,19 @@ struct skb_shared_info {
* Warning : all fields before dataref are cleared in __alloc_skb()
*/
atomic_t dataref;
- unsigned int xdp_frags_size;
- /* Intermediate layers must ensure that destructor_arg
- * remains valid until skb destructor */
- void * destructor_arg;
+ union {
+ struct {
+ u32 xdp_frags_size;
+ u32 xdp_frags_truesize;
+ };
+
+ /*
+ * Intermediate layers must ensure that destructor_arg
+ * remains valid until skb destructor.
+ */
+ void *destructor_arg;
+ };
/* must be last field, see pskb_expand_head() */
skb_frag_t frags[MAX_SKB_FRAGS];
@@ -663,7 +674,7 @@ enum {
/* This indicates the tcp segment has CWR set. */
SKB_GSO_TCP_ECN = 1 << 2,
- SKB_GSO_TCP_FIXEDID = 1 << 3,
+ __SKB_GSO_TCP_FIXEDID = 1 << 3,
SKB_GSO_TCPV6 = 1 << 4,
@@ -694,6 +705,14 @@ enum {
SKB_GSO_UDP_L4 = 1 << 17,
SKB_GSO_FRAGLIST = 1 << 18,
+
+ SKB_GSO_TCP_ACCECN = 1 << 19,
+
+ /* These indirectly map onto the same netdev feature.
+ * If NETIF_F_TSO_MANGLEID is set it may mangle both inner and outer IDs.
+ */
+ SKB_GSO_TCP_FIXEDID = 1 << 30,
+ SKB_GSO_TCP_FIXEDID_INNER = 1 << 31,
};
#if BITS_PER_LONG > 32
@@ -706,6 +725,13 @@ typedef unsigned int sk_buff_data_t;
typedef unsigned char *sk_buff_data_t;
#endif
+enum skb_tstamp_type {
+ SKB_CLOCK_REALTIME,
+ SKB_CLOCK_MONOTONIC,
+ SKB_CLOCK_TAI,
+ __SKB_CLOCK_MAX = SKB_CLOCK_TAI,
+};
+
/**
* DOC: Basic sk_buff geometry
*
@@ -820,13 +846,13 @@ typedef unsigned char *sk_buff_data_t;
* @csum_level: indicates the number of consecutive checksums found in
* the packet minus one that have been verified as
* CHECKSUM_UNNECESSARY (max 3)
+ * @unreadable: indicates that at least 1 of the fragments in this skb is
+ * unreadable.
* @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB
* @slow_gro: state present at GRO time, slower prepare step required
- * @mono_delivery_time: When set, skb->tstamp has the
- * delivery_time in mono clock base (i.e. EDT). Otherwise, the
- * skb->tstamp has the (rcv) timestamp at ingress and
- * delivery_time at egress.
+ * @tstamp_type: When set, skb->tstamp has the
+ * delivery_time clock base of skb->tstamp.
* @napi_id: id of the NAPI struct this skb came from
* @sender_cpu: (aka @napi_id) source CPU in XPS
* @alloc_cpu: CPU which did the skb allocation.
@@ -954,7 +980,7 @@ struct sk_buff {
/* private: */
__u8 __mono_tc_offset[0];
/* public: */
- __u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */
+ __u8 tstamp_type:2; /* See skb_tstamp_type */
#ifdef CONFIG_NET_XGRESS
__u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */
__u8 tc_skip_classify:1;
@@ -1003,7 +1029,7 @@ struct sk_buff {
#if IS_ENABLED(CONFIG_IP_SCTP)
__u8 csum_not_inet:1;
#endif
-
+ __u8 unreadable:1;
#if defined(CONFIG_NET_SCHED) || defined(CONFIG_NET_XGRESS)
__u16 tc_index; /* traffic control index */
#endif
@@ -1084,15 +1110,16 @@ struct sk_buff {
#endif
#define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset)
-/* if you move tc_at_ingress or mono_delivery_time
+/* if you move tc_at_ingress or tstamp_type
* around, you also must adapt these constants.
*/
#ifdef __BIG_ENDIAN_BITFIELD
-#define SKB_MONO_DELIVERY_TIME_MASK (1 << 7)
-#define TC_AT_INGRESS_MASK (1 << 6)
+#define SKB_TSTAMP_TYPE_MASK (3 << 6)
+#define SKB_TSTAMP_TYPE_RSHIFT (6)
+#define TC_AT_INGRESS_MASK (1 << 5)
#else
-#define SKB_MONO_DELIVERY_TIME_MASK (1 << 0)
-#define TC_AT_INGRESS_MASK (1 << 1)
+#define SKB_TSTAMP_TYPE_MASK (3)
+#define TC_AT_INGRESS_MASK (1 << 2)
#endif
#define SKB_BF_MONO_TC_OFFSET offsetof(struct sk_buff, __mono_tc_offset)
@@ -1125,7 +1152,7 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb)
* skb_dst - returns skb dst_entry
* @skb: buffer
*
- * Returns skb dst_entry, regardless of reference taken or not.
+ * Returns: skb dst_entry, regardless of reference taken or not.
*/
static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
{
@@ -1138,6 +1165,45 @@ static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
}
+static inline void skb_dst_check_unset(struct sk_buff *skb)
+{
+ DEBUG_NET_WARN_ON_ONCE((skb->_skb_refdst & SKB_DST_PTRMASK) &&
+ !(skb->_skb_refdst & SKB_DST_NOREF));
+}
+
+/**
+ * skb_dstref_steal() - return current dst_entry value and clear it
+ * @skb: buffer
+ *
+ * Resets skb dst_entry without adjusting its reference count. Useful in
+ * cases where dst_entry needs to be temporarily reset and restored.
+ * Note that the returned value cannot be used directly because it
+ * might contain SKB_DST_NOREF bit.
+ *
+ * When in doubt, prefer skb_dst_drop() over skb_dstref_steal() to correctly
+ * handle dst_entry reference counting.
+ *
+ * Returns: original skb dst_entry.
+ */
+static inline unsigned long skb_dstref_steal(struct sk_buff *skb)
+{
+ unsigned long refdst = skb->_skb_refdst;
+
+ skb->_skb_refdst = 0;
+ return refdst;
+}
+
+/**
+ * skb_dstref_restore() - restore skb dst_entry removed via skb_dstref_steal()
+ * @skb: buffer
+ * @refdst: dst entry from a call to skb_dstref_steal()
+ */
+static inline void skb_dstref_restore(struct sk_buff *skb, unsigned long refdst)
+{
+ skb_dst_check_unset(skb);
+ skb->_skb_refdst = refdst;
+}
+
/**
* skb_dst_set - sets skb dst
* @skb: buffer
@@ -1148,6 +1214,7 @@ static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
*/
static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
{
+ skb_dst_check_unset(skb);
skb->slow_gro |= !!dst;
skb->_skb_refdst = (unsigned long)dst;
}
@@ -1164,6 +1231,7 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
*/
static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{
+ skb_dst_check_unset(skb);
WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
skb->slow_gro |= !!dst;
skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
@@ -1213,13 +1281,13 @@ static inline bool skb_wifi_acked_valid(const struct sk_buff *skb)
* skb_unref - decrement the skb's reference count
* @skb: buffer
*
- * Returns true if we can free the skb.
+ * Returns: true if we can free the skb.
*/
static inline bool skb_unref(struct sk_buff *skb)
{
if (unlikely(!skb))
return false;
- if (likely(refcount_read(&skb->users) == 1))
+ if (!IS_ENABLED(CONFIG_DEBUG_NET) && likely(refcount_read(&skb->users) == 1))
smp_rmb();
else if (likely(!refcount_dec_and_test(&skb->users)))
return false;
@@ -1245,8 +1313,14 @@ static inline bool skb_data_unref(const struct sk_buff *skb,
return true;
}
-void __fix_address
-kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason);
+void __fix_address sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason reason);
+
+static inline void
+kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
+{
+ sk_skb_reason_drop(NULL, skb, reason);
+}
/**
* kfree_skb - free an sk_buff with 'NOT_SPECIFIED' reason
@@ -1292,6 +1366,7 @@ struct sk_buff *build_skb_around(struct sk_buff *skb,
void *data, unsigned int frag_size);
void skb_attempt_defer_free(struct sk_buff *skb);
+u32 napi_skb_cache_get_bulk(void **skbs, u32 n);
struct sk_buff *napi_build_skb(void *data, unsigned int frag_size);
struct sk_buff *slab_build_skb(void *data);
@@ -1329,7 +1404,7 @@ struct sk_buff_fclones {
* @sk: socket
* @skb: buffer
*
- * Returns true if skb is a fast clone, and its clone is not freed.
+ * Returns: true if skb is a fast clone, and its clone is not freed.
* Some drivers call skb_orphan() in their ndo_start_xmit(),
* so we also check that didn't happen.
*/
@@ -1421,6 +1496,7 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
struct skb_seq_state *st);
void skb_abort_seq_read(struct skb_seq_state *st);
+int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len);
unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config);
@@ -1492,20 +1568,20 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
__skb_set_hash(skb, hash, true, is_l4);
}
-void __skb_get_hash(struct sk_buff *skb);
-u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
-u32 skb_get_poff(const struct sk_buff *skb);
-u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
- const struct flow_keys_basic *keys, int hlen);
-__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
- const void *data, int hlen_proto);
+u32 __skb_get_hash_symmetric_net(const struct net *net, const struct sk_buff *skb);
-static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
- int thoff, u8 ip_proto)
+static inline u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
{
- return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
+ return __skb_get_hash_symmetric_net(NULL, skb);
}
+void __skb_get_hash_net(const struct net *net, struct sk_buff *skb);
+u32 skb_get_poff(const struct sk_buff *skb);
+u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
+ const struct flow_keys_basic *keys, int hlen);
+__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+ const void *data, int hlen_proto);
+
void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
const struct flow_dissector_key *key,
unsigned int key_count);
@@ -1572,10 +1648,18 @@ void skb_flow_dissect_hash(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container);
+static inline __u32 skb_get_hash_net(const struct net *net, struct sk_buff *skb)
+{
+ if (!skb->l4_hash && !skb->sw_hash)
+ __skb_get_hash_net(net, skb);
+
+ return skb->hash;
+}
+
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
- __skb_get_hash(skb);
+ __skb_get_hash_net(NULL, skb);
return skb->hash;
}
@@ -1669,23 +1753,31 @@ static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
extern const struct ubuf_info_ops msg_zerocopy_ubuf_ops;
struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
- struct ubuf_info *uarg);
+ struct ubuf_info *uarg, bool devmem);
void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
+struct net_devmem_dmabuf_binding;
+
int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, struct iov_iter *from,
- size_t length);
+ size_t length,
+ struct net_devmem_dmabuf_binding *binding);
+
+int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb,
struct msghdr *msg, int len)
{
- return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len);
+ return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len,
+ NULL);
}
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct msghdr *msg, int len,
- struct ubuf_info *uarg);
+ struct ubuf_info *uarg,
+ struct net_devmem_dmabuf_binding *binding);
/* Internal */
#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
@@ -1794,6 +1886,12 @@ static inline void skb_zcopy_downgrade_managed(struct sk_buff *skb)
__skb_zcopy_downgrade_managed(skb);
}
+/* Return true if frags in this skb are readable by the host. */
+static inline bool skb_frags_readable(const struct sk_buff *skb)
+{
+ return !skb->unreadable;
+}
+
static inline void skb_mark_not_on_list(struct sk_buff *skb)
{
skb->next = NULL;
@@ -2510,10 +2608,17 @@ static inline void skb_len_add(struct sk_buff *skb, int delta)
static inline void __skb_fill_netmem_desc(struct sk_buff *skb, int i,
netmem_ref netmem, int off, int size)
{
- struct page *page = netmem_to_page(netmem);
+ struct page *page;
__skb_fill_netmem_desc_noacc(skb_shinfo(skb), i, netmem, off, size);
+ if (netmem_is_net_iov(netmem)) {
+ skb->unreadable = true;
+ return;
+ }
+
+ page = netmem_to_page(netmem);
+
/* Propagate page pfmemalloc to the skb if we can. The problem is
* that not all callers have unique ownership of the page but rely
* on page_is_pfmemalloc doing the right thing(tm).
@@ -2636,6 +2741,12 @@ static inline void skb_assert_len(struct sk_buff *skb)
#endif /* CONFIG_DEBUG_NET */
}
+#if defined(CONFIG_FAIL_SKB_REALLOC)
+void skb_might_realloc(struct sk_buff *skb);
+#else
+static inline void skb_might_realloc(struct sk_buff *skb) {}
+#endif
+
/*
* Add data to an sk_buff
*/
@@ -2736,6 +2847,7 @@ static inline enum skb_drop_reason
pskb_may_pull_reason(struct sk_buff *skb, unsigned int len)
{
DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+ skb_might_realloc(skb);
if (likely(len <= skb_headlen(skb)))
return SKB_NOT_DROPPED_YET;
@@ -2864,9 +2976,19 @@ static inline void skb_reset_inner_headers(struct sk_buff *skb)
skb->inner_transport_header = skb->transport_header;
}
+static inline int skb_mac_header_was_set(const struct sk_buff *skb)
+{
+ return skb->mac_header != (typeof(skb->mac_header))~0U;
+}
+
static inline void skb_reset_mac_len(struct sk_buff *skb)
{
- skb->mac_len = skb->network_header - skb->mac_header;
+ if (!skb_mac_header_was_set(skb)) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ skb->mac_len = 0;
+ } else {
+ skb->mac_len = skb->network_header - skb->mac_header;
+ }
}
static inline unsigned char *skb_inner_transport_header(const struct sk_buff
@@ -2882,7 +3004,10 @@ static inline int skb_inner_transport_offset(const struct sk_buff *skb)
static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
{
- skb->inner_transport_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_transport_header))offset);
+ skb->inner_transport_header = offset;
}
static inline void skb_set_inner_transport_header(struct sk_buff *skb,
@@ -2899,7 +3024,10 @@ static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
static inline void skb_reset_inner_network_header(struct sk_buff *skb)
{
- skb->inner_network_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_network_header))offset);
+ skb->inner_network_header = offset;
}
static inline void skb_set_inner_network_header(struct sk_buff *skb,
@@ -2921,7 +3049,10 @@ static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
{
- skb->inner_mac_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_mac_header))offset);
+ skb->inner_mac_header = offset;
}
static inline void skb_set_inner_mac_header(struct sk_buff *skb,
@@ -2943,7 +3074,33 @@ static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
static inline void skb_reset_transport_header(struct sk_buff *skb)
{
- skb->transport_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->transport_header))offset);
+ skb->transport_header = offset;
+}
+
+/**
+ * skb_reset_transport_header_careful - conditionally reset transport header
+ * @skb: buffer to alter
+ *
+ * Hardened version of skb_reset_transport_header().
+ *
+ * Returns: true if the operation was a success.
+ */
+static inline bool __must_check
+skb_reset_transport_header_careful(struct sk_buff *skb)
+{
+ long offset = skb->data - skb->head;
+
+ if (unlikely(offset != (typeof(skb->transport_header))offset))
+ return false;
+
+ if (unlikely(offset == (typeof(skb->transport_header))~0U))
+ return false;
+
+ skb->transport_header = offset;
+ return true;
}
static inline void skb_set_transport_header(struct sk_buff *skb,
@@ -2960,7 +3117,10 @@ static inline unsigned char *skb_network_header(const struct sk_buff *skb)
static inline void skb_reset_network_header(struct sk_buff *skb)
{
- skb->network_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->network_header))offset);
+ skb->network_header = offset;
}
static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
@@ -2969,11 +3129,6 @@ static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
skb->network_header += offset;
}
-static inline int skb_mac_header_was_set(const struct sk_buff *skb)
-{
- return skb->mac_header != (typeof(skb->mac_header))~0U;
-}
-
static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
{
DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb));
@@ -2998,7 +3153,10 @@ static inline void skb_unset_mac_header(struct sk_buff *skb)
static inline void skb_reset_mac_header(struct sk_buff *skb)
{
- skb->mac_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->mac_header))offset);
+ skb->mac_header = offset;
}
static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
@@ -3085,9 +3243,15 @@ static inline int skb_inner_network_offset(const struct sk_buff *skb)
return skb_inner_network_header(skb) - skb->data;
}
+static inline enum skb_drop_reason
+pskb_network_may_pull_reason(struct sk_buff *skb, unsigned int len)
+{
+ return pskb_may_pull_reason(skb, skb_network_offset(skb) + len);
+}
+
static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
{
- return pskb_may_pull(skb, skb_network_offset(skb) + len);
+ return pskb_network_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET;
}
/*
@@ -3165,6 +3329,7 @@ static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
{
+ skb_might_realloc(skb);
return (len < skb->len) ? __pskb_trim(skb, len) : 0;
}
@@ -3400,6 +3565,10 @@ static inline struct page *__dev_alloc_pages_noprof(gfp_t gfp_mask,
}
#define __dev_alloc_pages(...) alloc_hooks(__dev_alloc_pages_noprof(__VA_ARGS__))
+/*
+ * This specialized allocator has to be a macro for its allocations to be
+ * accounted separately (to have a separate alloc_tag).
+ */
#define dev_alloc_pages(_order) __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, _order)
/**
@@ -3416,6 +3585,10 @@ static inline struct page *__dev_alloc_page_noprof(gfp_t gfp_mask)
}
#define __dev_alloc_page(...) alloc_hooks(__dev_alloc_page_noprof(__VA_ARGS__))
+/*
+ * This specialized allocator has to be a macro for its allocations to be
+ * accounted separately (to have a separate alloc_tag).
+ */
#define dev_alloc_page() dev_alloc_pages(0)
/**
@@ -3425,7 +3598,7 @@ static inline struct page *__dev_alloc_page_noprof(gfp_t gfp_mask)
* A page shouldn't be considered for reusing/recycling if it was allocated
* under memory pressure or at a distant memory node.
*
- * Returns false if this page should be returned to page allocator, true
+ * Returns: false if this page should be returned to page allocator, true
* otherwise.
*/
static inline bool dev_page_is_reusable(const struct page *page)
@@ -3486,30 +3659,70 @@ static inline void skb_frag_off_copy(skb_frag_t *fragto,
fragto->offset = fragfrom->offset;
}
+/* Return: true if the skb_frag contains a net_iov. */
+static inline bool skb_frag_is_net_iov(const skb_frag_t *frag)
+{
+ return netmem_is_net_iov(frag->netmem);
+}
+
+/**
+ * skb_frag_net_iov - retrieve the net_iov referred to by fragment
+ * @frag: the fragment
+ *
+ * Return: the &struct net_iov associated with @frag. Returns NULL if this
+ * frag has no associated net_iov.
+ */
+static inline struct net_iov *skb_frag_net_iov(const skb_frag_t *frag)
+{
+ if (!skb_frag_is_net_iov(frag))
+ return NULL;
+
+ return netmem_to_net_iov(frag->netmem);
+}
+
/**
* skb_frag_page - retrieve the page referred to by a paged fragment
* @frag: the paged fragment
*
- * Returns the &struct page associated with @frag.
+ * Return: the &struct page associated with @frag. Returns NULL if this frag
+ * has no associated page.
*/
static inline struct page *skb_frag_page(const skb_frag_t *frag)
{
+ if (skb_frag_is_net_iov(frag))
+ return NULL;
+
return netmem_to_page(frag->netmem);
}
+/**
+ * skb_frag_netmem - retrieve the netmem referred to by a fragment
+ * @frag: the fragment
+ *
+ * Return: the &netmem_ref associated with @frag.
+ */
+static inline netmem_ref skb_frag_netmem(const skb_frag_t *frag)
+{
+ return frag->netmem;
+}
+
int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
unsigned int headroom);
int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
- struct bpf_prog *prog);
+ const struct bpf_prog *prog);
+
/**
* skb_frag_address - gets the address of the data contained in a paged fragment
* @frag: the paged fragment buffer
*
- * Returns the address of the data within @frag. The page must already
+ * Returns: the address of the data within @frag. The page must already
* be mapped.
*/
static inline void *skb_frag_address(const skb_frag_t *frag)
{
+ if (!skb_frag_page(frag))
+ return NULL;
+
return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
}
@@ -3517,12 +3730,18 @@ static inline void *skb_frag_address(const skb_frag_t *frag)
* skb_frag_address_safe - gets the address of the data contained in a paged fragment
* @frag: the paged fragment buffer
*
- * Returns the address of the data within @frag. Checks that the page
+ * Returns: the address of the data within @frag. Checks that the page
* is mapped and returns %NULL otherwise.
*/
static inline void *skb_frag_address_safe(const skb_frag_t *frag)
{
- void *ptr = page_address(skb_frag_page(frag));
+ struct page *page = skb_frag_page(frag);
+ void *ptr;
+
+ if (!page)
+ return NULL;
+
+ ptr = page_address(page);
if (unlikely(!ptr))
return NULL;
@@ -3543,7 +3762,7 @@ static inline void skb_frag_page_copy(skb_frag_t *fragto,
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
/**
- * skb_frag_dma_map - maps a paged fragment via the DMA API
+ * __skb_frag_dma_map - maps a paged fragment via the DMA API
* @dev: the device to map the fragment to
* @frag: the paged fragment to map
* @offset: the offset within the fragment (starting at the
@@ -3553,15 +3772,40 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
*
* Maps the page associated with @frag to @device.
*/
-static inline dma_addr_t skb_frag_dma_map(struct device *dev,
- const skb_frag_t *frag,
- size_t offset, size_t size,
- enum dma_data_direction dir)
+static inline dma_addr_t __skb_frag_dma_map(struct device *dev,
+ const skb_frag_t *frag,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
{
+ if (skb_frag_is_net_iov(frag)) {
+ return netmem_to_net_iov(frag->netmem)->desc.dma_addr +
+ offset + frag->offset;
+ }
return dma_map_page(dev, skb_frag_page(frag),
skb_frag_off(frag) + offset, size, dir);
}
+#define skb_frag_dma_map(dev, frag, ...) \
+ CONCATENATE(_skb_frag_dma_map, \
+ COUNT_ARGS(__VA_ARGS__))(dev, frag, ##__VA_ARGS__)
+
+#define __skb_frag_dma_map1(dev, frag, offset, uf, uo) ({ \
+ const skb_frag_t *uf = (frag); \
+ size_t uo = (offset); \
+ \
+ __skb_frag_dma_map(dev, uf, uo, skb_frag_size(uf) - uo, \
+ DMA_TO_DEVICE); \
+})
+#define _skb_frag_dma_map1(dev, frag, offset) \
+ __skb_frag_dma_map1(dev, frag, offset, __UNIQUE_ID(frag_), \
+ __UNIQUE_ID(offset_))
+#define _skb_frag_dma_map0(dev, frag) \
+ _skb_frag_dma_map1(dev, frag, 0)
+#define _skb_frag_dma_map2(dev, frag, offset, size) \
+ __skb_frag_dma_map(dev, frag, offset, size, DMA_TO_DEVICE)
+#define _skb_frag_dma_map3(dev, frag, offset, size, dir) \
+ __skb_frag_dma_map(dev, frag, offset, size, dir)
+
static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
gfp_t gfp_mask)
{
@@ -3705,39 +3949,26 @@ static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int l
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i)
__must_check;
-static inline int skb_add_data(struct sk_buff *skb,
- struct iov_iter *from, int copy)
-{
- const int off = skb->len;
-
- if (skb->ip_summed == CHECKSUM_NONE) {
- __wsum csum = 0;
- if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
- &csum, from)) {
- skb->csum = csum_block_add(skb->csum, csum, off);
- return 0;
- }
- } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
- return 0;
-
- __skb_trim(skb, off);
- return -EFAULT;
-}
-
-static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
- const struct page *page, int off)
+static inline bool skb_can_coalesce_netmem(struct sk_buff *skb, int i,
+ netmem_ref netmem, int off)
{
if (skb_zcopy(skb))
return false;
if (i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
- return page == skb_frag_page(frag) &&
+ return netmem == skb_frag_netmem(frag) &&
off == skb_frag_off(frag) + skb_frag_size(frag);
}
return false;
}
+static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
+ const struct page *page, int off)
+{
+ return skb_can_coalesce_netmem(skb, i, page_to_netmem(page), off);
+}
+
static inline int __skb_linearize(struct sk_buff *skb)
{
return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
@@ -3759,7 +3990,7 @@ static inline int skb_linearize(struct sk_buff *skb)
* skb_has_shared_frag - can any frag be overwritten
* @skb: buffer to test
*
- * Return true if the skb has at least one frag that might be modified
+ * Return: true if the skb has at least one frag that might be modified
* by an external entity (as in vmsplice()/sendfile())
*/
static inline bool skb_has_shared_frag(const struct sk_buff *skb)
@@ -3871,6 +4102,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
+ skb_might_realloc(skb);
if (likely(len >= skb->len))
return 0;
return pskb_trim_rcsum_slow(skb, len);
@@ -3960,8 +4192,7 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
int *err, long *timeo_p,
const struct sk_buff *skb);
-struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
- struct sk_buff_head *queue,
+struct sk_buff *__skb_try_recv_from_queue(struct sk_buff_head *queue,
unsigned int flags,
int *off, int *err,
struct sk_buff **last);
@@ -3973,6 +4204,9 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk,
struct sk_buff_head *sk_queue,
unsigned int flags, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int *err);
+__poll_t datagram_poll_queue(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait,
+ struct sk_buff_head *rcv_queue);
__poll_t datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
@@ -3984,11 +4218,12 @@ static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
}
int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
struct msghdr *msg);
-int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
- struct iov_iter *to, int len,
- struct ahash_request *hash);
+int skb_copy_and_crc32c_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len, u32 *crcp);
int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
struct iov_iter *from, int len);
+int skb_copy_datagram_from_iter_full(struct sk_buff *skb, int offset,
+ struct iov_iter *from, int len);
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
@@ -4001,6 +4236,8 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
unsigned int flags);
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
int len);
+int skb_send_sock_locked_with_flags(struct sock *sk, struct sk_buff *skb,
+ int offset, int len, int flags);
int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
@@ -4040,17 +4277,9 @@ static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
}
-struct skb_checksum_ops {
- __wsum (*update)(const void *mem, int len, __wsum wsum);
- __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
-};
-
-extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
-
-__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
- __wsum csum, const struct skb_checksum_ops *ops);
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum);
+u32 skb_crc32c(const struct sk_buff *skb, int offset, int len, u32 crc);
static inline void * __must_check
__skb_header_pointer(const struct sk_buff *skb, int offset, int len,
@@ -4179,7 +4408,7 @@ static inline void skb_get_new_timestampns(const struct sk_buff *skb,
static inline void __net_timestamp(struct sk_buff *skb)
{
skb->tstamp = ktime_get_real();
- skb->mono_delivery_time = 0;
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
}
static inline ktime_t net_timedelta(ktime_t t)
@@ -4188,10 +4417,36 @@ static inline ktime_t net_timedelta(ktime_t t)
}
static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt,
- bool mono)
+ u8 tstamp_type)
{
skb->tstamp = kt;
- skb->mono_delivery_time = kt && mono;
+
+ if (kt)
+ skb->tstamp_type = tstamp_type;
+ else
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
+}
+
+static inline void skb_set_delivery_type_by_clockid(struct sk_buff *skb,
+ ktime_t kt, clockid_t clockid)
+{
+ u8 tstamp_type = SKB_CLOCK_REALTIME;
+
+ switch (clockid) {
+ case CLOCK_REALTIME:
+ break;
+ case CLOCK_MONOTONIC:
+ tstamp_type = SKB_CLOCK_MONOTONIC;
+ break;
+ case CLOCK_TAI:
+ tstamp_type = SKB_CLOCK_TAI;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ kt = 0;
+ }
+
+ skb_set_delivery_time(skb, kt, tstamp_type);
}
DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);
@@ -4201,8 +4456,8 @@ DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);
*/
static inline void skb_clear_delivery_time(struct sk_buff *skb)
{
- if (skb->mono_delivery_time) {
- skb->mono_delivery_time = 0;
+ if (skb->tstamp_type) {
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
if (static_branch_unlikely(&netstamp_needed_key))
skb->tstamp = ktime_get_real();
else
@@ -4212,7 +4467,7 @@ static inline void skb_clear_delivery_time(struct sk_buff *skb)
static inline void skb_clear_tstamp(struct sk_buff *skb)
{
- if (skb->mono_delivery_time)
+ if (skb->tstamp_type)
return;
skb->tstamp = 0;
@@ -4220,7 +4475,7 @@ static inline void skb_clear_tstamp(struct sk_buff *skb)
static inline ktime_t skb_tstamp(const struct sk_buff *skb)
{
- if (skb->mono_delivery_time)
+ if (skb->tstamp_type)
return 0;
return skb->tstamp;
@@ -4228,7 +4483,7 @@ static inline ktime_t skb_tstamp(const struct sk_buff *skb)
static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond)
{
- if (!skb->mono_delivery_time && skb->tstamp)
+ if (skb->tstamp_type != SKB_CLOCK_MONOTONIC && skb->tstamp)
return skb->tstamp;
if (static_branch_unlikely(&netstamp_needed_key) || cond)
@@ -4309,6 +4564,81 @@ static inline void skb_metadata_clear(struct sk_buff *skb)
skb_metadata_set(skb, 0);
}
+/**
+ * skb_data_move - Move packet data and metadata after skb_push() or skb_pull().
+ * @skb: packet to operate on
+ * @len: number of bytes pushed or pulled from &sk_buff->data
+ * @n: number of bytes to memmove() from pre-push/pull &sk_buff->data
+ *
+ * Moves @n bytes of packet data, can be zero, and all bytes of skb metadata.
+ *
+ * Assumes metadata is located immediately before &sk_buff->data prior to the
+ * push/pull, and that sufficient headroom exists to hold it after an
+ * skb_push(). Otherwise, metadata is cleared and a one-time warning is issued.
+ *
+ * Prefer skb_postpull_data_move() or skb_postpush_data_move() to calling this
+ * helper directly.
+ */
+static inline void skb_data_move(struct sk_buff *skb, const int len,
+ const unsigned int n)
+{
+ const u8 meta_len = skb_metadata_len(skb);
+ u8 *meta, *meta_end;
+
+ if (!len || (!n && !meta_len))
+ return;
+
+ if (!meta_len)
+ goto no_metadata;
+
+ meta_end = skb_metadata_end(skb);
+ meta = meta_end - meta_len;
+
+ if (WARN_ON_ONCE(meta_end + len != skb->data ||
+ meta_len > skb_headroom(skb))) {
+ skb_metadata_clear(skb);
+ goto no_metadata;
+ }
+
+ memmove(meta + len, meta, meta_len + n);
+ return;
+
+no_metadata:
+ memmove(skb->data, skb->data - len, n);
+}
+
+/**
+ * skb_postpull_data_move - Move packet data and metadata after skb_pull().
+ * @skb: packet to operate on
+ * @len: number of bytes pulled from &sk_buff->data
+ * @n: number of bytes to memmove() from pre-pull &sk_buff->data
+ *
+ * See skb_data_move() for details.
+ */
+static inline void skb_postpull_data_move(struct sk_buff *skb,
+ const unsigned int len,
+ const unsigned int n)
+{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+ skb_data_move(skb, len, n);
+}
+
+/**
+ * skb_postpush_data_move - Move packet data and metadata after skb_push().
+ * @skb: packet to operate on
+ * @len: number of bytes pushed onto &sk_buff->data
+ * @n: number of bytes to memmove() from pre-push &sk_buff->data
+ *
+ * See skb_data_move() for details.
+ */
+static inline void skb_postpush_data_move(struct sk_buff *skb,
+ const unsigned int len,
+ const unsigned int n)
+{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+ skb_data_move(skb, -len, n);
+}
+
struct sk_buff *skb_clone_sk(struct sk_buff *skb);
#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
@@ -4377,7 +4707,7 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
static inline void skb_tx_timestamp(struct sk_buff *skb)
{
skb_clone_tx_timestamp(skb);
- if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
+ if (skb_shinfo(skb)->tx_flags & (SKBTX_SW_TSTAMP | SKBTX_BPF))
skb_tstamp_tx(skb, NULL);
}
@@ -4454,7 +4784,7 @@ static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb)
/* Check if we need to perform checksum complete validation.
*
- * Returns true if checksum complete is needed, false otherwise
+ * Returns: true if checksum complete is needed, false otherwise
* (either checksum is unnecessary or zero checksum is allowed).
*/
static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
@@ -4656,6 +4986,9 @@ enum skb_ext_id {
#if IS_ENABLED(CONFIG_MCTP_FLOWS)
SKB_EXT_MCTP,
#endif
+#if IS_ENABLED(CONFIG_INET_PSP)
+ SKB_EXT_PSP,
+#endif
SKB_EXT_NUM, /* must be last */
};
@@ -5085,7 +5418,7 @@ static inline void skb_mark_for_recycle(struct sk_buff *skb)
}
ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
- ssize_t maxsize, gfp_t gfp);
+ ssize_t maxsize);
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/skbuff_ref.h b/include/linux/skbuff_ref.h
index 11f0a4063403..9e49372ef1a0 100644
--- a/include/linux/skbuff_ref.h
+++ b/include/linux/skbuff_ref.h
@@ -17,7 +17,7 @@
*/
static inline void __skb_frag_ref(skb_frag_t *frag)
{
- get_page(skb_frag_page(frag));
+ get_netmem(skb_frag_netmem(frag));
}
/**
@@ -32,16 +32,15 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
}
-bool napi_pp_put_page(struct page *page);
+bool napi_pp_put_page(netmem_ref netmem);
-static inline void
-skb_page_unref(struct page *page, bool recycle)
+static inline void skb_page_unref(netmem_ref netmem, bool recycle)
{
#ifdef CONFIG_PAGE_POOL
- if (recycle && napi_pp_put_page(page))
+ if (recycle && napi_pp_put_page(netmem))
return;
#endif
- put_page(page);
+ put_netmem(netmem);
}
/**
@@ -54,7 +53,7 @@ skb_page_unref(struct page *page, bool recycle)
*/
static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
{
- skb_page_unref(skb_frag_page(frag), recycle);
+ skb_page_unref(skb_frag_netmem(frag), recycle);
}
/**
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index c9efda9df285..49847888c287 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -91,6 +91,8 @@ struct sk_psock {
struct sk_psock_progs progs;
#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
struct strparser strp;
+ u32 copied_seq;
+ u32 ingress_bytes;
#endif
struct sk_buff_head ingress_skb;
struct list_head ingress_msg;
@@ -313,21 +315,26 @@ static inline bool sk_psock_test_state(const struct sk_psock *psock,
static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
{
- sk_drops_add(sk, skb);
+ sk_drops_skbadd(sk, skb);
kfree_skb(skb);
}
-static inline void sk_psock_queue_msg(struct sk_psock *psock,
+static inline bool sk_psock_queue_msg(struct sk_psock *psock,
struct sk_msg *msg)
{
+ bool ret;
+
spin_lock_bh(&psock->ingress_lock);
- if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
+ if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
list_add_tail(&msg->list, &psock->ingress_msg);
- else {
+ ret = true;
+ } else {
sk_msg_free(psock->sk, msg);
kfree(msg);
+ ret = false;
}
spin_unlock_bh(&psock->ingress_lock);
+ return ret;
}
static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
@@ -414,6 +421,11 @@ void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
struct sk_msg *msg);
+/*
+ * This specialized allocator has to be a macro for its allocations to be
+ * accounted separately (to have a separate alloc_tag). The typecast is
+ * intentional to enforce typesafety.
+ */
#define sk_psock_init_link() \
((struct sk_psock_link *)kzalloc(sizeof(struct sk_psock_link), \
GFP_ATOMIC | __GFP_NOWARN))
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 7247e217e21b..cf443f064a66 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -16,6 +16,7 @@
#include <linux/gfp.h>
#include <linux/overflow.h>
#include <linux/types.h>
+#include <linux/rcupdate.h>
#include <linux/workqueue.h>
#include <linux/percpu-refcount.h>
#include <linux/cleanup.h>
@@ -41,7 +42,7 @@ enum _slab_flag_bits {
#ifdef CONFIG_FAILSLAB
_SLAB_FAILSLAB,
#endif
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
_SLAB_ACCOUNT,
#endif
#ifdef CONFIG_KASAN_GENERIC
@@ -77,7 +78,17 @@ enum _slab_flag_bits {
#define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON)
/* Indicate a kmalloc slab */
#define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC)
-/* Align objs on cache lines */
+/**
+ * define SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries.
+ *
+ * Sufficiently large objects are aligned on cache line boundary. For object
+ * size smaller than a half of cache line size, the alignment is on the half of
+ * cache line size. In general, if object size is smaller than 1/2^n of cache
+ * line size, the alignment is adjusted to 1/2^n.
+ *
+ * If explicit alignment is also requested by the respective
+ * &struct kmem_cache_args field, the greater of both is alignments is applied.
+ */
#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
/* Use GFP_DMA memory */
#define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA)
@@ -87,8 +98,8 @@ enum _slab_flag_bits {
#define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER)
/* Panic if kmem_cache_create() fails */
#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
-/*
- * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
+/**
+ * define SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
*
* This delays freeing the SLAB page by a grace period, it does _NOT_
* delay object freeing. This means that if you do kmem_cache_free()
@@ -99,20 +110,22 @@ enum _slab_flag_bits {
* stays valid, the trick to using this is relying on an independent
* object validation pass. Something like:
*
- * begin:
- * rcu_read_lock();
- * obj = lockless_lookup(key);
- * if (obj) {
- * if (!try_get_ref(obj)) // might fail for free objects
- * rcu_read_unlock();
- * goto begin;
- *
- * if (obj->key != key) { // not the object we expected
- * put_ref(obj);
- * rcu_read_unlock();
- * goto begin;
- * }
- * }
+ * ::
+ *
+ * begin:
+ * rcu_read_lock();
+ * obj = lockless_lookup(key);
+ * if (obj) {
+ * if (!try_get_ref(obj)) // might fail for free objects
+ * rcu_read_unlock();
+ * goto begin;
+ *
+ * if (obj->key != key) { // not the object we expected
+ * put_ref(obj);
+ * rcu_read_unlock();
+ * goto begin;
+ * }
+ * }
* rcu_read_unlock();
*
* This is useful if we need to approach a kernel structure obliquely,
@@ -124,6 +137,15 @@ enum _slab_flag_bits {
* rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address.
*
+ * Note that object identity check has to be done *after* acquiring a
+ * reference, therefore user has to ensure proper ordering for loads.
+ * Similarly, when initializing objects allocated with SLAB_TYPESAFE_BY_RCU,
+ * the newly allocated object has to be fully initialized *before* its
+ * refcount gets initialized and proper ordering for stores is required.
+ * refcount_{add|inc}_not_zero_acquire() and refcount_set_release() are
+ * designed with the proper fences required for reference counting objects
+ * allocated with SLAB_TYPESAFE_BY_RCU.
+ *
* Note that it is not possible to acquire a lock within a structure
* allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference
* as described above. The reason is that SLAB_TYPESAFE_BY_RCU pages
@@ -137,7 +159,6 @@ enum _slab_flag_bits {
*
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
*/
-/* Defer freeing slabs to RCU */
#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
/* Trace allocations and frees */
#define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE)
@@ -170,8 +191,13 @@ enum _slab_flag_bits {
#else
# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED
#endif
-/* Account to memcg */
-#ifdef CONFIG_MEMCG_KMEM
+/**
+ * define SLAB_ACCOUNT - Account allocations to memcg.
+ *
+ * All object allocations from this cache will be memcg accounted, regardless of
+ * __GFP_ACCOUNT being or not being passed to individual allocations.
+ */
+#ifdef CONFIG_MEMCG
# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
#else
# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED
@@ -197,7 +223,13 @@ enum _slab_flag_bits {
#endif
/* The following flags affect the page allocator grouping pages by mobility */
-/* Objects are reclaimable */
+/**
+ * define SLAB_RECLAIM_ACCOUNT - Objects are reclaimable.
+ *
+ * Use this flag for caches that have an associated shrinker. As a result, slab
+ * pages are allocated with __GFP_RECLAIMABLE, which affects grouping pages by
+ * mobility, and are accounted in SReclaimable counter in /proc/meminfo
+ */
#ifndef CONFIG_SLUB_TINY
#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT)
#else
@@ -234,14 +266,204 @@ struct mem_cgroup;
*/
bool slab_is_available(void);
-struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
- unsigned int align, slab_flags_t flags,
- void (*ctor)(void *));
-struct kmem_cache *kmem_cache_create_usercopy(const char *name,
- unsigned int size, unsigned int align,
- slab_flags_t flags,
- unsigned int useroffset, unsigned int usersize,
- void (*ctor)(void *));
+/**
+ * struct kmem_cache_args - Less common arguments for kmem_cache_create()
+ *
+ * Any uninitialized fields of the structure are interpreted as unused. The
+ * exception is @freeptr_offset where %0 is a valid value, so
+ * @use_freeptr_offset must be also set to %true in order to interpret the field
+ * as used. For @useroffset %0 is also valid, but only with non-%0
+ * @usersize.
+ *
+ * When %NULL args is passed to kmem_cache_create(), it is equivalent to all
+ * fields unused.
+ */
+struct kmem_cache_args {
+ /**
+ * @align: The required alignment for the objects.
+ *
+ * %0 means no specific alignment is requested.
+ */
+ unsigned int align;
+ /**
+ * @useroffset: Usercopy region offset.
+ *
+ * %0 is a valid offset, when @usersize is non-%0
+ */
+ unsigned int useroffset;
+ /**
+ * @usersize: Usercopy region size.
+ *
+ * %0 means no usercopy region is specified.
+ */
+ unsigned int usersize;
+ /**
+ * @freeptr_offset: Custom offset for the free pointer
+ * in &SLAB_TYPESAFE_BY_RCU caches
+ *
+ * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
+ * outside of the object. This might cause the object to grow in size.
+ * Cache creators that have a reason to avoid this can specify a custom
+ * free pointer offset in their struct where the free pointer will be
+ * placed.
+ *
+ * Note that placing the free pointer inside the object requires the
+ * caller to ensure that no fields are invalidated that are required to
+ * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
+ * details).
+ *
+ * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
+ * is specified, %use_freeptr_offset must be set %true.
+ *
+ * Note that @ctor currently isn't supported with custom free pointers
+ * as a @ctor requires an external free pointer.
+ */
+ unsigned int freeptr_offset;
+ /**
+ * @use_freeptr_offset: Whether a @freeptr_offset is used.
+ */
+ bool use_freeptr_offset;
+ /**
+ * @ctor: A constructor for the objects.
+ *
+ * The constructor is invoked for each object in a newly allocated slab
+ * page. It is the cache user's responsibility to free object in the
+ * same state as after calling the constructor, or deal appropriately
+ * with any differences between a freshly constructed and a reallocated
+ * object.
+ *
+ * %NULL means no constructor.
+ */
+ void (*ctor)(void *);
+ /**
+ * @sheaf_capacity: Enable sheaves of given capacity for the cache.
+ *
+ * With a non-zero value, allocations from the cache go through caching
+ * arrays called sheaves. Each cpu has a main sheaf that's always
+ * present, and a spare sheaf that may be not present. When both become
+ * empty, there's an attempt to replace an empty sheaf with a full sheaf
+ * from the per-node barn.
+ *
+ * When no full sheaf is available, and gfp flags allow blocking, a
+ * sheaf is allocated and filled from slab(s) using bulk allocation.
+ * Otherwise the allocation falls back to the normal operation
+ * allocating a single object from a slab.
+ *
+ * Analogically when freeing and both percpu sheaves are full, the barn
+ * may replace it with an empty sheaf, unless it's over capacity. In
+ * that case a sheaf is bulk freed to slab pages.
+ *
+ * The sheaves do not enforce NUMA placement of objects, so allocations
+ * via kmem_cache_alloc_node() with a node specified other than
+ * NUMA_NO_NODE will bypass them.
+ *
+ * Bulk allocation and free operations also try to use the cpu sheaves
+ * and barn, but fallback to using slab pages directly.
+ *
+ * When slub_debug is enabled for the cache, the sheaf_capacity argument
+ * is ignored.
+ *
+ * %0 means no sheaves will be created.
+ */
+ unsigned int sheaf_capacity;
+};
+
+struct kmem_cache *__kmem_cache_create_args(const char *name,
+ unsigned int object_size,
+ struct kmem_cache_args *args,
+ slab_flags_t flags);
+static inline struct kmem_cache *
+__kmem_cache_create(const char *name, unsigned int size, unsigned int align,
+ slab_flags_t flags, void (*ctor)(void *))
+{
+ struct kmem_cache_args kmem_args = {
+ .align = align,
+ .ctor = ctor,
+ };
+
+ return __kmem_cache_create_args(name, size, &kmem_args, flags);
+}
+
+/**
+ * kmem_cache_create_usercopy - Create a kmem cache with a region suitable
+ * for copying to userspace.
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @size: The size of objects to be created in this cache.
+ * @align: The required alignment for the objects.
+ * @flags: SLAB flags
+ * @useroffset: Usercopy region offset
+ * @usersize: Usercopy region size
+ * @ctor: A constructor for the objects, or %NULL.
+ *
+ * This is a legacy wrapper, new code should use either KMEM_CACHE_USERCOPY()
+ * if whitelisting a single field is sufficient, or kmem_cache_create() with
+ * the necessary parameters passed via the args parameter (see
+ * &struct kmem_cache_args)
+ *
+ * Return: a pointer to the cache on success, NULL on failure.
+ */
+static inline struct kmem_cache *
+kmem_cache_create_usercopy(const char *name, unsigned int size,
+ unsigned int align, slab_flags_t flags,
+ unsigned int useroffset, unsigned int usersize,
+ void (*ctor)(void *))
+{
+ struct kmem_cache_args kmem_args = {
+ .align = align,
+ .ctor = ctor,
+ .useroffset = useroffset,
+ .usersize = usersize,
+ };
+
+ return __kmem_cache_create_args(name, size, &kmem_args, flags);
+}
+
+/* If NULL is passed for @args, use this variant with default arguments. */
+static inline struct kmem_cache *
+__kmem_cache_default_args(const char *name, unsigned int size,
+ struct kmem_cache_args *args,
+ slab_flags_t flags)
+{
+ struct kmem_cache_args kmem_default_args = {};
+
+ /* Make sure we don't get passed garbage. */
+ if (WARN_ON_ONCE(args))
+ return ERR_PTR(-EINVAL);
+
+ return __kmem_cache_create_args(name, size, &kmem_default_args, flags);
+}
+
+/**
+ * kmem_cache_create - Create a kmem cache.
+ * @__name: A string which is used in /proc/slabinfo to identify this cache.
+ * @__object_size: The size of objects to be created in this cache.
+ * @__args: Optional arguments, see &struct kmem_cache_args. Passing %NULL
+ * means defaults will be used for all the arguments.
+ *
+ * This is currently implemented as a macro using ``_Generic()`` to call
+ * either the new variant of the function, or a legacy one.
+ *
+ * The new variant has 4 parameters:
+ * ``kmem_cache_create(name, object_size, args, flags)``
+ *
+ * See __kmem_cache_create_args() which implements this.
+ *
+ * The legacy variant has 5 parameters:
+ * ``kmem_cache_create(name, object_size, align, flags, ctor)``
+ *
+ * The align and ctor parameters map to the respective fields of
+ * &struct kmem_cache_args
+ *
+ * Context: Cannot be called within a interrupt, but can be interrupted.
+ *
+ * Return: a pointer to the cache on success, NULL on failure.
+ */
+#define kmem_cache_create(__name, __object_size, __args, ...) \
+ _Generic((__args), \
+ struct kmem_cache_args *: __kmem_cache_create_args, \
+ void *: __kmem_cache_default_args, \
+ default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__)
+
void kmem_cache_destroy(struct kmem_cache *s);
int kmem_cache_shrink(struct kmem_cache *s);
@@ -253,33 +475,42 @@ int kmem_cache_shrink(struct kmem_cache *s);
* f.e. add ____cacheline_aligned_in_smp to the struct declaration
* then the objects will be properly aligned in SMP configurations.
*/
-#define KMEM_CACHE(__struct, __flags) \
- kmem_cache_create(#__struct, sizeof(struct __struct), \
- __alignof__(struct __struct), (__flags), NULL)
+#define KMEM_CACHE(__struct, __flags) \
+ __kmem_cache_create_args(#__struct, sizeof(struct __struct), \
+ &(struct kmem_cache_args) { \
+ .align = __alignof__(struct __struct), \
+ }, (__flags))
/*
* To whitelist a single field for copying to/from usercopy, use this
* macro instead for KMEM_CACHE() above.
*/
-#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
- kmem_cache_create_usercopy(#__struct, \
- sizeof(struct __struct), \
- __alignof__(struct __struct), (__flags), \
- offsetof(struct __struct, __field), \
- sizeof_field(struct __struct, __field), NULL)
+#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
+ __kmem_cache_create_args(#__struct, sizeof(struct __struct), \
+ &(struct kmem_cache_args) { \
+ .align = __alignof__(struct __struct), \
+ .useroffset = offsetof(struct __struct, __field), \
+ .usersize = sizeof_field(struct __struct, __field), \
+ }, (__flags))
/*
* Common kmalloc functions provided by all allocators
*/
-void * __must_check krealloc_noprof(const void *objp, size_t new_size,
- gfp_t flags) __realloc_size(2);
-#define krealloc(...) alloc_hooks(krealloc_noprof(__VA_ARGS__))
+void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size,
+ unsigned long align,
+ gfp_t flags, int nid) __realloc_size(2);
+#define krealloc_noprof(_o, _s, _f) krealloc_node_align_noprof(_o, _s, 1, _f, NUMA_NO_NODE)
+#define krealloc_node_align(...) alloc_hooks(krealloc_node_align_noprof(__VA_ARGS__))
+#define krealloc_node(_o, _s, _f, _n) krealloc_node_align(_o, _s, 1, _f, _n)
+#define krealloc(...) krealloc_node(__VA_ARGS__, NUMA_NO_NODE)
void kfree(const void *objp);
+void kfree_nolock(const void *objp);
void kfree_sensitive(const void *objp);
size_t __ksize(const void *objp);
DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
+DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))
/**
* ksize - Report actual allocation size of associated object
@@ -407,7 +638,7 @@ enum kmalloc_cache_type {
#ifndef CONFIG_ZONE_DMA
KMALLOC_DMA = KMALLOC_NORMAL,
#endif
-#ifndef CONFIG_MEMCG_KMEM
+#ifndef CONFIG_MEMCG
KMALLOC_CGROUP = KMALLOC_NORMAL,
#endif
KMALLOC_RANDOM_START = KMALLOC_NORMAL,
@@ -420,14 +651,15 @@ enum kmalloc_cache_type {
#ifdef CONFIG_ZONE_DMA
KMALLOC_DMA,
#endif
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
KMALLOC_CGROUP,
#endif
NR_KMALLOC_TYPES
};
-extern struct kmem_cache *
-kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
+typedef struct kmem_cache * kmem_buckets[KMALLOC_SHIFT_HIGH + 1];
+
+extern kmem_buckets kmalloc_caches[NR_KMALLOC_TYPES];
/*
* Define gfp bits that should not be set for KMALLOC_NORMAL.
@@ -435,7 +667,7 @@ kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
#define KMALLOC_NOT_NORMAL_BITS \
(__GFP_RECLAIMABLE | \
(IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
- (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
+ (IS_ENABLED(CONFIG_MEMCG) ? __GFP_ACCOUNT : 0))
extern unsigned long random_kmalloc_seed;
@@ -463,7 +695,7 @@ static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigne
*/
if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
return KMALLOC_DMA;
- if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
+ if (!IS_ENABLED(CONFIG_MEMCG) || (flags & __GFP_RECLAIMABLE))
return KMALLOC_RECLAIM;
else
return KMALLOC_CGROUP;
@@ -528,9 +760,6 @@ static_assert(PAGE_SHIFT <= 20);
#include <linux/alloc_tag.h>
-void *__kmalloc_noprof(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
-#define __kmalloc(...) alloc_hooks(__kmalloc_noprof(__VA_ARGS__))
-
/**
* kmem_cache_alloc - Allocate an object
* @cachep: The cache to allocate from.
@@ -549,8 +778,41 @@ void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
gfp_t gfpflags) __assume_slab_alignment __malloc;
#define kmem_cache_alloc_lru(...) alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__))
+/**
+ * kmem_cache_charge - memcg charge an already allocated slab memory
+ * @objp: address of the slab object to memcg charge
+ * @gfpflags: describe the allocation context
+ *
+ * kmem_cache_charge allows charging a slab object to the current memcg,
+ * primarily in cases where charging at allocation time might not be possible
+ * because the target memcg is not known (i.e. softirq context)
+ *
+ * The objp should be pointer returned by the slab allocator functions like
+ * kmalloc (with __GFP_ACCOUNT in flags) or kmem_cache_alloc. The memcg charge
+ * behavior can be controlled through gfpflags parameter, which affects how the
+ * necessary internal metadata can be allocated. Including __GFP_NOFAIL denotes
+ * that overcharging is requested instead of failure, but is not applied for the
+ * internal metadata allocation.
+ *
+ * There are several cases where it will return true even if the charging was
+ * not done:
+ * More specifically:
+ *
+ * 1. For !CONFIG_MEMCG or cgroup_disable=memory systems.
+ * 2. Already charged slab objects.
+ * 3. For slab objects from KMALLOC_NORMAL caches - allocated by kmalloc()
+ * without __GFP_ACCOUNT
+ * 4. Allocating internal metadata has failed
+ *
+ * Return: true if charge was successful otherwise false.
+ */
+bool kmem_cache_charge(void *objp, gfp_t gfpflags);
void kmem_cache_free(struct kmem_cache *s, void *objp);
+kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
+ unsigned int useroffset, unsigned int usersize,
+ void (*ctor)(void *));
+
/*
* Bulk allocation and freeing operations. These are accelerated in an
* allocator specific way to avoid taking locks repeatedly or building
@@ -568,31 +830,65 @@ static __always_inline void kfree_bulk(size_t size, void **p)
kmem_cache_free_bulk(NULL, size, p);
}
-void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
- __alloc_size(1);
-#define __kmalloc_node(...) alloc_hooks(__kmalloc_node_noprof(__VA_ARGS__))
-
void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
int node) __assume_slab_alignment __malloc;
#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
-void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
- __assume_kmalloc_alignment __alloc_size(3);
+struct slab_sheaf *
+kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size);
+
+int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf **sheafp, unsigned int size);
-void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags,
- int node, size_t size) __assume_kmalloc_alignment
- __alloc_size(4);
-#define kmalloc_trace(...) alloc_hooks(kmalloc_trace_noprof(__VA_ARGS__))
+void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf);
-#define kmalloc_node_trace(...) alloc_hooks(kmalloc_node_trace_noprof(__VA_ARGS__))
+void *kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *cachep, gfp_t gfp,
+ struct slab_sheaf *sheaf) __assume_slab_alignment __malloc;
+#define kmem_cache_alloc_from_sheaf(...) \
+ alloc_hooks(kmem_cache_alloc_from_sheaf_noprof(__VA_ARGS__))
-void *kmalloc_large_noprof(size_t size, gfp_t flags) __assume_page_alignment
- __alloc_size(1);
-#define kmalloc_large(...) alloc_hooks(kmalloc_large_noprof(__VA_ARGS__))
+unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf);
-void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) __assume_page_alignment
- __alloc_size(1);
-#define kmalloc_large_node(...) alloc_hooks(kmalloc_large_node_noprof(__VA_ARGS__))
+/*
+ * These macros allow declaring a kmem_buckets * parameter alongside size, which
+ * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
+ * sites don't have to pass NULL.
+ */
+#ifdef CONFIG_SLAB_BUCKETS
+#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size), kmem_buckets *(_b)
+#define PASS_BUCKET_PARAMS(_size, _b) (_size), (_b)
+#define PASS_BUCKET_PARAM(_b) (_b)
+#else
+#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size)
+#define PASS_BUCKET_PARAMS(_size, _b) (_size)
+#define PASS_BUCKET_PARAM(_b) NULL
+#endif
+
+/*
+ * The following functions are not to be used directly and are intended only
+ * for internal use from kmalloc() and kmalloc_node()
+ * with the exception of kunit tests
+ */
+
+void *__kmalloc_noprof(size_t size, gfp_t flags)
+ __assume_kmalloc_alignment __alloc_size(1);
+
+void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
+ __assume_kmalloc_alignment __alloc_size(1);
+
+void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
+ __assume_kmalloc_alignment __alloc_size(3);
+
+void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
+ int node, size_t size)
+ __assume_kmalloc_alignment __alloc_size(4);
+
+void *__kmalloc_large_noprof(size_t size, gfp_t flags)
+ __assume_page_alignment __alloc_size(1);
+
+void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
+ __assume_page_alignment __alloc_size(1);
/**
* kmalloc - allocate kernel memory
@@ -604,7 +900,8 @@ void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) __assume_pag
*
* The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
* bytes. For @size of power of two bytes, the alignment is also guaranteed
- * to be at least to the size.
+ * to be at least to the size. For other sizes, the alignment is guaranteed to
+ * be at least the largest power-of-two divisor of @size.
*
* The @flags argument may be one of the GFP flags defined at
* include/linux/gfp_types.h and described at
@@ -654,10 +951,10 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f
unsigned int index;
if (size > KMALLOC_MAX_CACHE_SIZE)
- return kmalloc_large_noprof(size, flags);
+ return __kmalloc_large_noprof(size, flags);
index = kmalloc_index(size);
- return kmalloc_trace_noprof(
+ return __kmalloc_cache_noprof(
kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
flags, size);
}
@@ -665,20 +962,29 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f
}
#define kmalloc(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__))
+void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node);
+#define kmalloc_nolock(...) alloc_hooks(kmalloc_nolock_noprof(__VA_ARGS__))
+
+#define kmem_buckets_alloc(_b, _size, _flags) \
+ alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
+
+#define kmem_buckets_alloc_track_caller(_b, _size, _flags) \
+ alloc_hooks(__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE, _RET_IP_))
+
static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) && size) {
unsigned int index;
if (size > KMALLOC_MAX_CACHE_SIZE)
- return kmalloc_large_node_noprof(size, flags, node);
+ return __kmalloc_large_node_noprof(size, flags, node);
index = kmalloc_index(size);
- return kmalloc_node_trace_noprof(
+ return __kmalloc_cache_node_noprof(
kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
flags, node, size);
}
- return __kmalloc_node_noprof(size, flags, node);
+ return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node);
}
#define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))
@@ -694,8 +1000,6 @@ static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t siz
if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
- if (__builtin_constant_p(n) && __builtin_constant_p(size))
- return kmalloc_noprof(bytes, flags);
return kmalloc_noprof(bytes, flags);
}
#define kmalloc_array(...) alloc_hooks(kmalloc_array_noprof(__VA_ARGS__))
@@ -706,6 +1010,16 @@ static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t siz
* @new_n: new number of elements to alloc
* @new_size: new size of a single member of the array
* @flags: the type of memory to allocate (see kmalloc)
+ *
+ * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
+ * initial memory allocation, every subsequent call to this API for the same
+ * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
+ * __GFP_ZERO is not fully honored by this API.
+ *
+ * See krealloc_noprof() for further details.
+ *
+ * In any case, the contents of the object pointed to are preserved up to the
+ * lesser of the new and old sizes.
*/
static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p,
size_t new_n,
@@ -729,8 +1043,10 @@ static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(voi
*/
#define kcalloc(n, size, flags) kmalloc_array(n, size, (flags) | __GFP_ZERO)
-void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node,
- unsigned long caller) __alloc_size(1);
+void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node,
+ unsigned long caller) __alloc_size(1);
+#define kmalloc_node_track_caller_noprof(size, flags, node, caller) \
+ __kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller)
#define kmalloc_node_track_caller(...) \
alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))
@@ -756,7 +1072,7 @@ static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_
return NULL;
if (__builtin_constant_p(n) && __builtin_constant_p(size))
return kmalloc_node_noprof(bytes, flags, node);
- return __kmalloc_node_noprof(bytes, flags, node);
+ return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(bytes, NULL), flags, node);
}
#define kmalloc_array_node(...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__))
@@ -780,15 +1096,21 @@ static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags)
#define kzalloc(...) alloc_hooks(kzalloc_noprof(__VA_ARGS__))
#define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
-extern void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) __alloc_size(1);
-#define kvmalloc_node(...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__))
-
-#define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE)
-#define kvmalloc_noprof(_size, _flags) kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE)
+void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
+ gfp_t flags, int node) __alloc_size(1);
+#define kvmalloc_node_align_noprof(_size, _align, _flags, _node) \
+ __kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, NULL), _align, _flags, _node)
+#define kvmalloc_node_align(...) \
+ alloc_hooks(kvmalloc_node_align_noprof(__VA_ARGS__))
+#define kvmalloc_node(_s, _f, _n) kvmalloc_node_align(_s, 1, _f, _n)
+#define kvmalloc(...) kvmalloc_node(__VA_ARGS__, NUMA_NO_NODE)
#define kvzalloc(_size, _flags) kvmalloc(_size, (_flags)|__GFP_ZERO)
#define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
+#define kmem_buckets_valloc(_b, _size, _flags) \
+ alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), 1, _flags, NUMA_NO_NODE))
+
static inline __alloc_size(1, 2) void *
kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
{
@@ -797,7 +1119,7 @@ kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
- return kvmalloc_node_noprof(bytes, flags, node);
+ return kvmalloc_node_align_noprof(bytes, 1, flags, node);
}
#define kvmalloc_array_noprof(...) kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
@@ -808,9 +1130,12 @@ kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
#define kvcalloc_node(...) alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
#define kvcalloc(...) alloc_hooks(kvcalloc_noprof(__VA_ARGS__))
-extern void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
- __realloc_size(3);
-#define kvrealloc(...) alloc_hooks(kvrealloc_noprof(__VA_ARGS__))
+void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
+ gfp_t flags, int nid) __realloc_size(2);
+#define kvrealloc_node_align(...) \
+ alloc_hooks(kvrealloc_node_align_noprof(__VA_ARGS__))
+#define kvrealloc_node(_p, _s, _f, _n) kvrealloc_node_align(_p, _s, 1, _f, _n)
+#define kvrealloc(...) kvrealloc_node(__VA_ARGS__, NUMA_NO_NODE)
extern void kvfree(const void *addr);
DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T))
@@ -819,6 +1144,19 @@ extern void kvfree_sensitive(const void *addr, size_t len);
unsigned int kmem_cache_size(struct kmem_cache *s);
+#ifndef CONFIG_KVFREE_RCU_BATCHED
+static inline void kvfree_rcu_barrier(void)
+{
+ rcu_barrier();
+}
+
+static inline void kfree_rcu_scheduler_running(void) { }
+#else
+void kvfree_rcu_barrier(void);
+
+void kfree_rcu_scheduler_running(void);
+#endif
+
/**
* kmalloc_size_roundup - Report allocation bucket size for the given size
*
@@ -836,5 +1174,6 @@ unsigned int kmem_cache_size(struct kmem_cache *s);
size_t kmalloc_size_roundup(size_t size);
void __init kmem_cache_init_late(void);
+void __init kvfree_rcu_init(void);
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slimbus.h b/include/linux/slimbus.h
index 3042385b7b40..a4608d9a9684 100644
--- a/include/linux/slimbus.h
+++ b/include/linux/slimbus.h
@@ -91,7 +91,7 @@ struct slim_driver {
struct device_driver driver;
const struct slim_device_id *id_table;
};
-#define to_slim_driver(d) container_of(d, struct slim_driver, driver)
+#define to_slim_driver(d) container_of_const(d, struct slim_driver, driver)
/**
* struct slim_val_inf - Slimbus value or information element
diff --git a/include/linux/sm501.h b/include/linux/sm501.h
index 2f3488b2875d..bcda27a46e7a 100644
--- a/include/linux/sm501.h
+++ b/include/linux/sm501.h
@@ -12,9 +12,6 @@ extern int sm501_unit_power(struct device *dev,
extern unsigned long sm501_set_clock(struct device *dev,
int clksrc, unsigned long freq);
-extern unsigned long sm501_find_clock(struct device *dev,
- int clksrc, unsigned long req_freq);
-
/* sm501_misc_control
*
* Modify the SM501's MISC_CONTROL register
diff --git a/include/linux/smp.h b/include/linux/smp.h
index fcd61dfe2af3..91d0ecf3b8d3 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -109,7 +109,7 @@ static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
* Architecture specific boot CPU setup. Defined as empty weak function in
* init/main.c. Architectures can override it.
*/
-void smp_prepare_boot_cpu(void);
+void __init smp_prepare_boot_cpu(void);
#ifdef CONFIG_SMP
@@ -168,6 +168,7 @@ int smp_call_function_any(const struct cpumask *mask,
void kick_all_cpus_sync(void);
void wake_up_all_idle_cpus(void);
+bool cpus_peek_for_pending_ipi(const struct cpumask *mask);
/*
* Generic and arch helpers
@@ -216,12 +217,16 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
static inline void kick_all_cpus_sync(void) { }
static inline void wake_up_all_idle_cpus(void) { }
+static inline bool cpus_peek_for_pending_ipi(const struct cpumask *mask)
+{
+ return false;
+}
#define setup_max_cpus 0
#ifdef CONFIG_UP_LATE_INIT
extern void __init up_late_init(void);
-static inline void smp_init(void) { up_late_init(); }
+static __always_inline void smp_init(void) { up_late_init(); }
#else
static inline void smp_init(void) { }
#endif
@@ -234,7 +239,7 @@ static inline int get_boot_cpu_id(void)
#endif /* !SMP */
/**
- * raw_processor_id() - get the current (unstable) CPU id
+ * raw_smp_processor_id() - get the current (unstable) CPU id
*
* For then you know what you are doing and need an unstable
* CPU id.
@@ -294,4 +299,10 @@ int smpcfd_prepare_cpu(unsigned int cpu);
int smpcfd_dead_cpu(unsigned int cpu);
int smpcfd_dying_cpu(unsigned int cpu);
+#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
+bool csd_lock_is_stuck(void);
+#else
+static inline bool csd_lock_is_stuck(void) { return false; }
+#endif
+
#endif /* __LINUX_SMP_H */
diff --git a/include/linux/soc/airoha/airoha_offload.h b/include/linux/soc/airoha/airoha_offload.h
new file mode 100644
index 000000000000..4d23cbb7d407
--- /dev/null
+++ b/include/linux/soc/airoha/airoha_offload.h
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+#ifndef AIROHA_OFFLOAD_H
+#define AIROHA_OFFLOAD_H
+
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+enum {
+ PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
+};
+
+struct airoha_ppe_dev {
+ struct {
+ int (*setup_tc_block_cb)(struct airoha_ppe_dev *dev,
+ void *type_data);
+ void (*check_skb)(struct airoha_ppe_dev *dev,
+ struct sk_buff *skb, u16 hash,
+ bool rx_wlan);
+ } ops;
+
+ void *priv;
+};
+
+#if (IS_BUILTIN(CONFIG_NET_AIROHA) || IS_MODULE(CONFIG_NET_AIROHA))
+struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev);
+void airoha_ppe_put_dev(struct airoha_ppe_dev *dev);
+
+static inline int airoha_ppe_dev_setup_tc_block_cb(struct airoha_ppe_dev *dev,
+ void *type_data)
+{
+ return dev->ops.setup_tc_block_cb(dev, type_data);
+}
+
+static inline void airoha_ppe_dev_check_skb(struct airoha_ppe_dev *dev,
+ struct sk_buff *skb,
+ u16 hash, bool rx_wlan)
+{
+ dev->ops.check_skb(dev, skb, hash, rx_wlan);
+}
+#else
+static inline struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
+{
+ return NULL;
+}
+
+static inline void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
+{
+}
+
+static inline int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev,
+ void *type_data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void airoha_ppe_dev_check_skb(struct airoha_ppe_dev *dev,
+ struct sk_buff *skb, u16 hash,
+ bool rx_wlan)
+{
+}
+#endif
+
+#define NPU_NUM_CORES 8
+#define NPU_NUM_IRQ 6
+#define NPU_RX0_DESC_NUM 512
+#define NPU_RX1_DESC_NUM 512
+
+/* CTRL */
+#define NPU_RX_DMA_DESC_LAST_MASK BIT(29)
+#define NPU_RX_DMA_DESC_LEN_MASK GENMASK(28, 15)
+#define NPU_RX_DMA_DESC_CUR_LEN_MASK GENMASK(14, 1)
+#define NPU_RX_DMA_DESC_DONE_MASK BIT(0)
+/* INFO */
+#define NPU_RX_DMA_PKT_COUNT_MASK GENMASK(31, 28)
+#define NPU_RX_DMA_PKT_ID_MASK GENMASK(28, 26)
+#define NPU_RX_DMA_SRC_PORT_MASK GENMASK(25, 21)
+#define NPU_RX_DMA_CRSN_MASK GENMASK(20, 16)
+#define NPU_RX_DMA_FOE_ID_MASK GENMASK(15, 0)
+/* DATA */
+#define NPU_RX_DMA_SID_MASK GENMASK(31, 16)
+#define NPU_RX_DMA_FRAG_TYPE_MASK GENMASK(15, 14)
+#define NPU_RX_DMA_PRIORITY_MASK GENMASK(13, 10)
+#define NPU_RX_DMA_RADIO_ID_MASK GENMASK(9, 6)
+#define NPU_RX_DMA_VAP_ID_MASK GENMASK(5, 2)
+#define NPU_RX_DMA_FRAME_TYPE_MASK GENMASK(1, 0)
+
+struct airoha_npu_rx_dma_desc {
+ u32 ctrl;
+ u32 info;
+ u32 data;
+ u32 addr;
+ u64 rsv;
+} __packed;
+
+/* CTRL */
+#define NPU_TX_DMA_DESC_SCHED_MASK BIT(31)
+#define NPU_TX_DMA_DESC_LEN_MASK GENMASK(30, 18)
+#define NPU_TX_DMA_DESC_VEND_LEN_MASK GENMASK(17, 1)
+#define NPU_TX_DMA_DESC_DONE_MASK BIT(0)
+
+#define NPU_TXWI_LEN 192
+
+struct airoha_npu_tx_dma_desc {
+ u32 ctrl;
+ u32 addr;
+ u64 rsv;
+ u8 txwi[NPU_TXWI_LEN];
+} __packed;
+
+enum airoha_npu_wlan_set_cmd {
+ WLAN_FUNC_SET_WAIT_PCIE_ADDR,
+ WLAN_FUNC_SET_WAIT_DESC,
+ WLAN_FUNC_SET_WAIT_NPU_INIT_DONE,
+ WLAN_FUNC_SET_WAIT_TRAN_TO_CPU,
+ WLAN_FUNC_SET_WAIT_BA_WIN_SIZE,
+ WLAN_FUNC_SET_WAIT_DRIVER_MODEL,
+ WLAN_FUNC_SET_WAIT_DEL_STA,
+ WLAN_FUNC_SET_WAIT_DRAM_BA_NODE_ADDR,
+ WLAN_FUNC_SET_WAIT_PKT_BUF_ADDR,
+ WLAN_FUNC_SET_WAIT_IS_TEST_NOBA,
+ WLAN_FUNC_SET_WAIT_FLUSHONE_TIMEOUT,
+ WLAN_FUNC_SET_WAIT_FLUSHALL_TIMEOUT,
+ WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU,
+ WLAN_FUNC_SET_WAIT_PCIE_STATE,
+ WLAN_FUNC_SET_WAIT_PCIE_PORT_TYPE,
+ WLAN_FUNC_SET_WAIT_ERROR_RETRY_TIMES,
+ WLAN_FUNC_SET_WAIT_BAR_INFO,
+ WLAN_FUNC_SET_WAIT_FAST_FLAG,
+ WLAN_FUNC_SET_WAIT_NPU_BAND0_ONCPU,
+ WLAN_FUNC_SET_WAIT_TX_RING_PCIE_ADDR,
+ WLAN_FUNC_SET_WAIT_TX_DESC_HW_BASE,
+ WLAN_FUNC_SET_WAIT_TX_BUF_SPACE_HW_BASE,
+ WLAN_FUNC_SET_WAIT_RX_RING_FOR_TXDONE_HW_BASE,
+ WLAN_FUNC_SET_WAIT_TX_PKT_BUF_ADDR,
+ WLAN_FUNC_SET_WAIT_INODE_TXRX_REG_ADDR,
+ WLAN_FUNC_SET_WAIT_INODE_DEBUG_FLAG,
+ WLAN_FUNC_SET_WAIT_INODE_HW_CFG_INFO,
+ WLAN_FUNC_SET_WAIT_INODE_STOP_ACTION,
+ WLAN_FUNC_SET_WAIT_INODE_PCIE_SWAP,
+ WLAN_FUNC_SET_WAIT_RATELIMIT_CTRL,
+ WLAN_FUNC_SET_WAIT_HWNAT_INIT,
+ WLAN_FUNC_SET_WAIT_ARHT_CHIP_INFO,
+ WLAN_FUNC_SET_WAIT_TX_BUF_CHECK_ADDR,
+ WLAN_FUNC_SET_WAIT_TOKEN_ID_SIZE,
+};
+
+enum airoha_npu_wlan_get_cmd {
+ WLAN_FUNC_GET_WAIT_NPU_INFO,
+ WLAN_FUNC_GET_WAIT_LAST_RATE,
+ WLAN_FUNC_GET_WAIT_COUNTER,
+ WLAN_FUNC_GET_WAIT_DBG_COUNTER,
+ WLAN_FUNC_GET_WAIT_RXDESC_BASE,
+ WLAN_FUNC_GET_WAIT_WCID_DBG_COUNTER,
+ WLAN_FUNC_GET_WAIT_DMA_ADDR,
+ WLAN_FUNC_GET_WAIT_RING_SIZE,
+ WLAN_FUNC_GET_WAIT_NPU_SUPPORT_MAP,
+ WLAN_FUNC_GET_WAIT_MDC_LOCK_ADDRESS,
+ WLAN_FUNC_GET_WAIT_NPU_VERSION,
+};
+
+struct airoha_npu {
+#if (IS_BUILTIN(CONFIG_NET_AIROHA_NPU) || IS_MODULE(CONFIG_NET_AIROHA_NPU))
+ struct device *dev;
+ struct regmap *regmap;
+
+ struct airoha_npu_core {
+ struct airoha_npu *npu;
+ /* protect concurrent npu memory accesses */
+ spinlock_t lock;
+ struct work_struct wdt_work;
+ } cores[NPU_NUM_CORES];
+
+ int irqs[NPU_NUM_IRQ];
+
+ struct airoha_foe_stats __iomem *stats;
+
+ struct {
+ int (*ppe_init)(struct airoha_npu *npu);
+ int (*ppe_deinit)(struct airoha_npu *npu);
+ int (*ppe_init_stats)(struct airoha_npu *npu,
+ dma_addr_t addr, u32 num_stats_entries);
+ int (*ppe_flush_sram_entries)(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ int sram_num_entries);
+ int (*ppe_foe_commit_entry)(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ u32 entry_size, u32 hash,
+ bool ppe2);
+ int (*wlan_init_reserved_memory)(struct airoha_npu *npu);
+ int (*wlan_send_msg)(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_set_cmd func_id,
+ void *data, int data_len, gfp_t gfp);
+ int (*wlan_get_msg)(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_get_cmd func_id,
+ void *data, int data_len, gfp_t gfp);
+ u32 (*wlan_get_queue_addr)(struct airoha_npu *npu, int qid,
+ bool xmit);
+ void (*wlan_set_irq_status)(struct airoha_npu *npu, u32 val);
+ u32 (*wlan_get_irq_status)(struct airoha_npu *npu, int q);
+ void (*wlan_enable_irq)(struct airoha_npu *npu, int q);
+ void (*wlan_disable_irq)(struct airoha_npu *npu, int q);
+ } ops;
+#endif
+};
+
+#if (IS_BUILTIN(CONFIG_NET_AIROHA_NPU) || IS_MODULE(CONFIG_NET_AIROHA_NPU))
+struct airoha_npu *airoha_npu_get(struct device *dev);
+void airoha_npu_put(struct airoha_npu *npu);
+
+static inline int airoha_npu_wlan_init_reserved_memory(struct airoha_npu *npu)
+{
+ return npu->ops.wlan_init_reserved_memory(npu);
+}
+
+static inline int airoha_npu_wlan_send_msg(struct airoha_npu *npu,
+ int ifindex,
+ enum airoha_npu_wlan_set_cmd cmd,
+ void *data, int data_len, gfp_t gfp)
+{
+ return npu->ops.wlan_send_msg(npu, ifindex, cmd, data, data_len, gfp);
+}
+
+static inline int airoha_npu_wlan_get_msg(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_get_cmd cmd,
+ void *data, int data_len, gfp_t gfp)
+{
+ return npu->ops.wlan_get_msg(npu, ifindex, cmd, data, data_len, gfp);
+}
+
+static inline u32 airoha_npu_wlan_get_queue_addr(struct airoha_npu *npu,
+ int qid, bool xmit)
+{
+ return npu->ops.wlan_get_queue_addr(npu, qid, xmit);
+}
+
+static inline void airoha_npu_wlan_set_irq_status(struct airoha_npu *npu,
+ u32 val)
+{
+ npu->ops.wlan_set_irq_status(npu, val);
+}
+
+static inline u32 airoha_npu_wlan_get_irq_status(struct airoha_npu *npu, int q)
+{
+ return npu->ops.wlan_get_irq_status(npu, q);
+}
+
+static inline void airoha_npu_wlan_enable_irq(struct airoha_npu *npu, int q)
+{
+ npu->ops.wlan_enable_irq(npu, q);
+}
+
+static inline void airoha_npu_wlan_disable_irq(struct airoha_npu *npu, int q)
+{
+ npu->ops.wlan_disable_irq(npu, q);
+}
+#else
+static inline struct airoha_npu *airoha_npu_get(struct device *dev)
+{
+ return NULL;
+}
+
+static inline void airoha_npu_put(struct airoha_npu *npu)
+{
+}
+
+static inline int airoha_npu_wlan_init_reserved_memory(struct airoha_npu *npu)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int airoha_npu_wlan_send_msg(struct airoha_npu *npu,
+ int ifindex,
+ enum airoha_npu_wlan_set_cmd cmd,
+ void *data, int data_len, gfp_t gfp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int airoha_npu_wlan_get_msg(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_get_cmd cmd,
+ void *data, int data_len, gfp_t gfp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline u32 airoha_npu_wlan_get_queue_addr(struct airoha_npu *npu,
+ int qid, bool xmit)
+{
+ return 0;
+}
+
+static inline void airoha_npu_wlan_set_irq_status(struct airoha_npu *npu,
+ u32 val)
+{
+}
+
+static inline u32 airoha_npu_wlan_get_irq_status(struct airoha_npu *npu,
+ int q)
+{
+ return 0;
+}
+
+static inline void airoha_npu_wlan_enable_irq(struct airoha_npu *npu, int q)
+{
+}
+
+static inline void airoha_npu_wlan_disable_irq(struct airoha_npu *npu, int q)
+{
+}
+#endif
+
+#endif /* AIROHA_OFFLOAD_H */
diff --git a/include/linux/soc/amd/isp4_misc.h b/include/linux/soc/amd/isp4_misc.h
new file mode 100644
index 000000000000..6738796986a7
--- /dev/null
+++ b/include/linux/soc/amd/isp4_misc.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __SOC_ISP4_MISC_H
+#define __SOC_ISP4_MISC_H
+
+#define AMDISP_I2C_ADAP_NAME "AMDISP DesignWare I2C adapter"
+
+#endif
diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h
index 8c9ca857ccf6..736f53018017 100644
--- a/include/linux/soc/apple/rtkit.h
+++ b/include/linux/soc/apple/rtkit.h
@@ -56,7 +56,7 @@ struct apple_rtkit_shmem {
* context.
*/
struct apple_rtkit_ops {
- void (*crashed)(void *cookie);
+ void (*crashed)(void *cookie, const void *crashlog, size_t crashlog_size);
void (*recv_message)(void *cookie, u8 endpoint, u64 message);
bool (*recv_message_early)(void *cookie, u8 endpoint, u64 message);
int (*shmem_setup)(void *cookie, struct apple_rtkit_shmem *bfr);
@@ -69,7 +69,7 @@ struct apple_rtkit;
* Initializes the internal state required to handle RTKit. This
* should usually be called within _probe.
*
- * @dev: Pointer to the device node this coprocessor is assocated with
+ * @dev: Pointer to the device node this coprocessor is associated with
* @cookie: opaque cookie passed to all functions defined in rtkit_ops
* @mbox_name: mailbox name used to communicate with the co-processor
* @mbox_idx: mailbox index to be used if mbox_name is NULL
@@ -83,7 +83,7 @@ struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
* Non-devm version of devm_apple_rtkit_init. Must be freed with
* apple_rtkit_free.
*
- * @dev: Pointer to the device node this coprocessor is assocated with
+ * @dev: Pointer to the device node this coprocessor is associated with
* @cookie: opaque cookie passed to all functions defined in rtkit_ops
* @mbox_name: mailbox name used to communicate with the co-processor
* @mbox_idx: mailbox index to be used if mbox_name is NULL
diff --git a/include/linux/soc/cirrus/ep93xx.h b/include/linux/soc/cirrus/ep93xx.h
index 56fbe2dc59b1..3e6cf2b25a97 100644
--- a/include/linux/soc/cirrus/ep93xx.h
+++ b/include/linux/soc/cirrus/ep93xx.h
@@ -2,7 +2,18 @@
#ifndef _SOC_EP93XX_H
#define _SOC_EP93XX_H
-struct platform_device;
+struct regmap;
+struct spinlock_t;
+
+enum ep93xx_soc_model {
+ EP93XX_9301_SOC,
+ EP93XX_9307_SOC,
+ EP93XX_9312_SOC,
+};
+
+#include <linux/auxiliary_bus.h>
+#include <linux/compiler_types.h>
+#include <linux/container_of.h>
#define EP93XX_CHIP_REV_D0 3
#define EP93XX_CHIP_REV_D1 4
@@ -10,28 +21,18 @@ struct platform_device;
#define EP93XX_CHIP_REV_E1 6
#define EP93XX_CHIP_REV_E2 7
-#ifdef CONFIG_ARCH_EP93XX
-int ep93xx_pwm_acquire_gpio(struct platform_device *pdev);
-void ep93xx_pwm_release_gpio(struct platform_device *pdev);
-int ep93xx_ide_acquire_gpio(struct platform_device *pdev);
-void ep93xx_ide_release_gpio(struct platform_device *pdev);
-int ep93xx_keypad_acquire_gpio(struct platform_device *pdev);
-void ep93xx_keypad_release_gpio(struct platform_device *pdev);
-int ep93xx_i2s_acquire(void);
-void ep93xx_i2s_release(void);
-unsigned int ep93xx_chip_revision(void);
+struct ep93xx_regmap_adev {
+ struct auxiliary_device adev;
+ struct regmap *map;
+ void __iomem *base;
+ spinlock_t *lock;
+ void (*write)(struct regmap *map, spinlock_t *lock, unsigned int reg,
+ unsigned int val);
+ void (*update_bits)(struct regmap *map, spinlock_t *lock,
+ unsigned int reg, unsigned int mask, unsigned int val);
+};
-#else
-static inline int ep93xx_pwm_acquire_gpio(struct platform_device *pdev) { return 0; }
-static inline void ep93xx_pwm_release_gpio(struct platform_device *pdev) {}
-static inline int ep93xx_ide_acquire_gpio(struct platform_device *pdev) { return 0; }
-static inline void ep93xx_ide_release_gpio(struct platform_device *pdev) {}
-static inline int ep93xx_keypad_acquire_gpio(struct platform_device *pdev) { return 0; }
-static inline void ep93xx_keypad_release_gpio(struct platform_device *pdev) {}
-static inline int ep93xx_i2s_acquire(void) { return 0; }
-static inline void ep93xx_i2s_release(void) {}
-static inline unsigned int ep93xx_chip_revision(void) { return 0; }
-
-#endif
+#define to_ep93xx_regmap_adev(_adev) \
+ container_of((_adev), struct ep93xx_regmap_adev, adev)
#endif
diff --git a/include/linux/soc/marvell/silicons.h b/include/linux/soc/marvell/silicons.h
new file mode 100644
index 000000000000..66bb9bfaf17d
--- /dev/null
+++ b/include/linux/soc/marvell/silicons.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2024 Marvell.
+ */
+
+#ifndef __SOC_SILICON_H
+#define __SOC_SILICON_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#if defined(CONFIG_ARM64)
+
+#define CN20K_CHIPID 0x20
+/*
+ * Silicon check for CN20K family
+ */
+static inline bool is_cn20k(struct pci_dev *pdev)
+{
+ return (pdev->subsystem_device & 0xFF) == CN20K_CHIPID;
+}
+#else
+#define is_cn20k(pdev) ((void)(pdev), 0)
+#endif
+
+#endif /* __SOC_SILICON_H */
diff --git a/include/linux/soc/mediatek/dvfsrc.h b/include/linux/soc/mediatek/dvfsrc.h
new file mode 100644
index 000000000000..1498b3ed396b
--- /dev/null
+++ b/include/linux/soc/mediatek/dvfsrc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2021 MediaTek Inc.
+ * Copyright (c) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MEDIATEK_DVFSRC_H
+#define __MEDIATEK_DVFSRC_H
+
+enum mtk_dvfsrc_cmd {
+ MTK_DVFSRC_CMD_BW,
+ MTK_DVFSRC_CMD_HRT_BW,
+ MTK_DVFSRC_CMD_PEAK_BW,
+ MTK_DVFSRC_CMD_OPP,
+ MTK_DVFSRC_CMD_VCORE_LEVEL,
+ MTK_DVFSRC_CMD_VSCP_LEVEL,
+ MTK_DVFSRC_CMD_MAX,
+};
+
+#if IS_ENABLED(CONFIG_MTK_DVFSRC)
+
+int mtk_dvfsrc_send_request(const struct device *dev, u32 cmd, u64 data);
+int mtk_dvfsrc_query_info(const struct device *dev, u32 cmd, int *data);
+
+#else
+
+static inline int mtk_dvfsrc_send_request(const struct device *dev, u32 cmd, u64 data)
+{ return -ENODEV; }
+
+static inline int mtk_dvfsrc_query_info(const struct device *dev, u32 cmd, int *data)
+{ return -ENODEV; }
+
+#endif /* CONFIG_MTK_DVFSRC */
+
+#endif
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
index 6c6cccc848f4..9956e18c5ffa 100644
--- a/include/linux/soc/mediatek/infracfg.h
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -434,6 +434,11 @@
#define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \
BIT(7) | BIT(8))
+#define MT6735_TOP_AXI_PROT_EN_CONN (BIT(2) | BIT(8))
+#define MT6735_TOP_AXI_PROT_EN_MD1 (BIT(24) | BIT(25) | \
+ BIT(26) | BIT(27) | \
+ BIT(28))
+
#define INFRA_TOPAXI_PROTECTEN 0x0220
#define INFRA_TOPAXI_PROTECTSTA1 0x0228
#define INFRA_TOPAXI_PROTECTEN_SET 0x0260
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index d4a8e34505e6..0c3906e8ad19 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -25,6 +25,31 @@
struct cmdq_pkt;
+enum cmdq_logic_op {
+ CMDQ_LOGIC_ASSIGN = 0,
+ CMDQ_LOGIC_ADD = 1,
+ CMDQ_LOGIC_SUBTRACT = 2,
+ CMDQ_LOGIC_MULTIPLY = 3,
+ CMDQ_LOGIC_XOR = 8,
+ CMDQ_LOGIC_NOT = 9,
+ CMDQ_LOGIC_OR = 10,
+ CMDQ_LOGIC_AND = 11,
+ CMDQ_LOGIC_LEFT_SHIFT = 12,
+ CMDQ_LOGIC_RIGHT_SHIFT = 13,
+ CMDQ_LOGIC_MAX,
+};
+
+struct cmdq_operand {
+ /* register type */
+ bool reg;
+ union {
+ /* index */
+ u16 idx;
+ /* value */
+ u16 value;
+ };
+};
+
struct cmdq_client_reg {
u8 subsys;
u16 offset;
@@ -273,6 +298,23 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask);
/**
+ * cmdq_pkt_logic_command() - Append logic command to the CMDQ packet, ask GCE to
+ * execute an instruction that store the result of logic operation
+ * with left and right operand into result_reg_idx.
+ * @pkt: the CMDQ packet
+ * @result_reg_idx: SPR index that store operation result of left_operand and right_operand
+ * @left_operand: left operand
+ * @s_op: the logic operator enum
+ * @right_operand: right operand
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_logic_command(struct cmdq_pkt *pkt, u16 result_reg_idx,
+ struct cmdq_operand *left_operand,
+ enum cmdq_logic_op s_op,
+ struct cmdq_operand *right_operand);
+
+/**
* cmdq_pkt_assign() - Append logic assign command to the CMDQ packet, ask GCE
* to execute an instruction that set a constant value into
* internal register and use as value, mask or address in
@@ -349,14 +391,6 @@ int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa);
*/
int cmdq_pkt_eoc(struct cmdq_pkt *pkt);
-/**
- * cmdq_pkt_finalize() - Append EOC and jump command to pkt.
- * @pkt: the CMDQ packet
- *
- * Return: 0 for success; else the error code is returned
- */
-int cmdq_pkt_finalize(struct cmdq_pkt *pkt);
-
#else /* IS_ENABLED(CONFIG_MTK_CMDQ) */
static inline int cmdq_dev_get_client_reg(struct device *dev,
@@ -477,11 +511,6 @@ static inline int cmdq_pkt_eoc(struct cmdq_pkt *pkt)
return -EINVAL;
}
-static inline int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
-{
- return -EINVAL;
-}
-
#endif /* IS_ENABLED(CONFIG_MTK_CMDQ) */
#endif /* __MTK_CMDQ_H__ */
diff --git a/include/linux/soc/mediatek/mtk_sip_svc.h b/include/linux/soc/mediatek/mtk_sip_svc.h
index 0761128b4354..abe24a73ee19 100644
--- a/include/linux/soc/mediatek/mtk_sip_svc.h
+++ b/include/linux/soc/mediatek/mtk_sip_svc.h
@@ -22,6 +22,9 @@
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, MTK_SIP_SMC_CONVENTION, \
ARM_SMCCC_OWNER_SIP, fn_id)
+/* DVFSRC SMC calls */
+#define MTK_SIP_DVFSRC_VCOREFS_CONTROL MTK_SIP_SMC_CMD(0x506)
+
/* IOMMU related SMC call */
#define MTK_SIP_KERNEL_IOMMU_CONTROL MTK_SIP_SMC_CMD(0x514)
diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
index a476648858a6..3fa93bd65004 100644
--- a/include/linux/soc/mediatek/mtk_wed.h
+++ b/include/linux/soc/mediatek/mtk_wed.h
@@ -147,13 +147,14 @@ struct mtk_wed_device {
u32 wpdma_tx;
u32 wpdma_txfree;
u32 wpdma_rx_glo;
- u32 wpdma_rx;
+ u32 wpdma_rx[MTK_WED_RX_QUEUES];
u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
u32 wpdma_rx_pg;
bool wcid_512;
bool hw_rro;
bool msi;
+ bool hif2;
u16 token_start;
unsigned int nbuf;
@@ -192,7 +193,7 @@ struct mtk_wed_device {
};
struct mtk_wed_ops {
- int (*attach)(struct mtk_wed_device *dev);
+ int (*attach)(struct mtk_wed_device *dev) __releases(RCU);
int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
void __iomem *regs, bool reset);
int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h
index 7161a3183eda..a532d1e4b1f4 100644
--- a/include/linux/soc/qcom/apr.h
+++ b/include/linux/soc/qcom/apr.h
@@ -162,7 +162,7 @@ struct apr_driver {
};
typedef struct apr_driver gpr_driver_t;
-#define to_apr_driver(d) container_of(d, struct apr_driver, driver)
+#define to_apr_driver(d) container_of_const(d, struct apr_driver, driver)
/*
* use a macro to avoid include chaining to get THIS_MODULE
diff --git a/include/linux/soc/qcom/geni-se.h b/include/linux/soc/qcom/geni-se.h
index 0f038a1a0330..0a984e2579fe 100644
--- a/include/linux/soc/qcom/geni-se.h
+++ b/include/linux/soc/qcom/geni-se.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef _LINUX_QCOM_GENI_SE
@@ -36,6 +37,7 @@ enum geni_se_protocol_type {
GENI_SE_I2C,
GENI_SE_I3C,
GENI_SE_SPI_SLAVE,
+ GENI_SE_INVALID_PROTO = 255,
};
struct geni_wrapper;
@@ -88,11 +90,15 @@ struct geni_se {
#define SE_GENI_M_IRQ_STATUS 0x610
#define SE_GENI_M_IRQ_EN 0x614
#define SE_GENI_M_IRQ_CLEAR 0x618
+#define SE_GENI_M_IRQ_EN_SET 0x61c
+#define SE_GENI_M_IRQ_EN_CLEAR 0x620
#define SE_GENI_S_CMD0 0x630
#define SE_GENI_S_CMD_CTRL_REG 0x634
#define SE_GENI_S_IRQ_STATUS 0x640
#define SE_GENI_S_IRQ_EN 0x644
#define SE_GENI_S_IRQ_CLEAR 0x648
+#define SE_GENI_S_IRQ_EN_SET 0x64c
+#define SE_GENI_S_IRQ_EN_CLEAR 0x650
#define SE_GENI_TX_FIFOn 0x700
#define SE_GENI_RX_FIFOn 0x780
#define SE_GENI_TX_FIFO_STATUS 0x800
@@ -101,6 +107,8 @@ struct geni_se {
#define SE_GENI_RX_WATERMARK_REG 0x810
#define SE_GENI_RX_RFR_WATERMARK_REG 0x814
#define SE_GENI_IOS 0x908
+#define SE_GENI_M_GP_LENGTH 0x910
+#define SE_GENI_S_GP_LENGTH 0x914
#define SE_DMA_TX_IRQ_STAT 0xc40
#define SE_DMA_TX_IRQ_CLR 0xc44
#define SE_DMA_TX_FSM_RST 0xc58
@@ -234,6 +242,9 @@ struct geni_se {
#define IO2_DATA_IN BIT(1)
#define RX_DATA_IN BIT(0)
+/* SE_GENI_M_GP_LENGTH and SE_GENI_S_GP_LENGTH fields */
+#define GP_LENGTH GENMASK(31, 0)
+
/* SE_DMA_TX_IRQ_STAT Register fields */
#define TX_DMA_DONE BIT(0)
#define TX_EOT BIT(1)
@@ -249,8 +260,8 @@ struct geni_se {
#define RX_DMA_PARITY_ERR BIT(5)
#define RX_DMA_BREAK GENMASK(8, 7)
#define RX_GENI_GP_IRQ GENMASK(10, 5)
-#define RX_GENI_CANCEL_IRQ BIT(11)
#define RX_GENI_GP_IRQ_EXT GENMASK(13, 12)
+#define RX_GENI_CANCEL_IRQ BIT(14)
/* SE_HW_PARAM_0 fields */
#define TX_FIFO_WIDTH_MSK GENMASK(29, 24)
@@ -522,5 +533,7 @@ void geni_icc_set_tag(struct geni_se *se, u32 tag);
int geni_icc_enable(struct geni_se *se);
int geni_icc_disable(struct geni_se *se);
+
+int geni_load_se_firmware(struct geni_se *se, enum geni_se_protocol_type protocol);
#endif
#endif
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 1a886666bbb6..0287f9182c4d 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -24,6 +24,7 @@
#define LLCC_CMPTDMA 15
#define LLCC_DISP 16
#define LLCC_VIDFW 17
+#define LLCC_CAMFW 18
#define LLCC_MDMHPFX 20
#define LLCC_MDMPNG 21
#define LLCC_AUDHW 22
@@ -54,7 +55,33 @@
#define LLCC_CAMEXP4 52
#define LLCC_DISP_WB 53
#define LLCC_DISP_1 54
+#define LLCC_VIEYE 57
+#define LLCC_VIDPTH 58
+#define LLCC_GPUMV 59
+#define LLCC_EVA_LEFT 60
+#define LLCC_EVA_RIGHT 61
+#define LLCC_EVAGAIN 62
+#define LLCC_VIPTH 63
#define LLCC_VIDVSP 64
+#define LLCC_DISP_LEFT 65
+#define LLCC_DISP_RIGHT 66
+#define LLCC_EVCS_LEFT 67
+#define LLCC_EVCS_RIGHT 68
+#define LLCC_SPAD 69
+#define LLCC_VIDDEC 70
+#define LLCC_CAMOFE 71
+#define LLCC_CAMRTIP 72
+#define LLCC_CAMSRTIP 73
+#define LLCC_CAMRTRF 74
+#define LLCC_CAMSRTRF 75
+#define LLCC_VIDEO_APV 83
+#define LLCC_COMPUTE1 87
+#define LLCC_CPUSS_OPP 88
+#define LLCC_CPUSSMPAM 89
+#define LLCC_CAM_IPE_STROV 92
+#define LLCC_CAM_OFE_STROV 93
+#define LLCC_CPUSS_HEU 94
+#define LLCC_MDM_PNG_FIXED 100
/**
* struct llcc_slice_desc - Cache slice descriptor
@@ -115,7 +142,8 @@ struct llcc_edac_reg_offset {
/**
* struct llcc_drv_data - Data associated with the llcc driver
* @regmaps: regmaps associated with the llcc device
- * @bcast_regmap: regmap associated with llcc broadcast offset
+ * @bcast_regmap: regmap associated with llcc broadcast OR offset
+ * @bcast_and_regmap: regmap associated with llcc broadcast AND offset
* @cfg: pointer to the data structure for slice configuration
* @edac_reg_offset: Offset of the LLCC EDAC registers
* @lock: mutex associated with each slice
@@ -124,11 +152,13 @@ struct llcc_edac_reg_offset {
* @num_banks: Number of llcc banks
* @bitmap: Bit map to track the active slice ids
* @ecc_irq: interrupt for llcc cache error detection and reporting
+ * @ecc_irq_configured: 'True' if firmware has already configured the irq propagation
* @version: Indicates the LLCC version
*/
struct llcc_drv_data {
struct regmap **regmaps;
struct regmap *bcast_regmap;
+ struct regmap *bcast_and_regmap;
const struct llcc_slice_config *cfg;
const struct llcc_edac_reg_offset *edac_reg_offset;
struct mutex lock;
@@ -137,6 +167,7 @@ struct llcc_drv_data {
u32 num_banks;
unsigned long *bitmap;
int ecc_irq;
+ bool ecc_irq_configured;
u32 version;
};
diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h
index 9e8e60421192..8ea8230579a2 100644
--- a/include/linux/soc/qcom/mdt_loader.h
+++ b/include/linux/soc/qcom/mdt_loader.h
@@ -24,7 +24,7 @@ int qcom_mdt_load(struct device *dev, const struct firmware *fw,
phys_addr_t *reloc_base);
int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
- const char *fw_name, int pas_id, void *mem_region,
+ const char *fw_name, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base);
void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len,
@@ -54,9 +54,8 @@ static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw,
static inline int qcom_mdt_load_no_init(struct device *dev,
const struct firmware *fw,
- const char *fw_name, int pas_id,
- void *mem_region, phys_addr_t mem_phys,
- size_t mem_size,
+ const char *fw_name, void *mem_region,
+ phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base)
{
return -ENODEV;
diff --git a/include/linux/soc/qcom/pmic_glink.h b/include/linux/soc/qcom/pmic_glink.h
index fd124aa18c81..7cddf1027752 100644
--- a/include/linux/soc/qcom/pmic_glink.h
+++ b/include/linux/soc/qcom/pmic_glink.h
@@ -23,10 +23,11 @@ struct pmic_glink_hdr {
int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len);
-struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
- unsigned int id,
- void (*cb)(const void *, size_t, void *),
- void (*pdr)(void *, int),
- void *priv);
+struct pmic_glink_client *devm_pmic_glink_client_alloc(struct device *dev,
+ unsigned int id,
+ void (*cb)(const void *, size_t, void *),
+ void (*pdr)(void *, int),
+ void *priv);
+void pmic_glink_client_register(struct pmic_glink_client *client);
#endif
diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h
index 469e02d2aa0d..291cdc7ef49c 100644
--- a/include/linux/soc/qcom/qmi.h
+++ b/include/linux/soc/qcom/qmi.h
@@ -24,9 +24,9 @@ struct socket;
*/
struct qmi_header {
u8 type;
- u16 txn_id;
- u16 msg_id;
- u16 msg_len;
+ __le16 txn_id;
+ __le16 msg_id;
+ __le16 msg_len;
} __packed;
#define QMI_REQUEST 0
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
index a36a3b9d4929..f946e3beca21 100644
--- a/include/linux/soc/qcom/smem.h
+++ b/include/linux/soc/qcom/smem.h
@@ -13,5 +13,8 @@ int qcom_smem_get_free_space(unsigned host);
phys_addr_t qcom_smem_virt_to_phys(void *p);
int qcom_smem_get_soc_id(u32 *id);
+int qcom_smem_get_feature_code(u32 *code);
+
+int qcom_smem_bust_hwspin_lock_by_host(unsigned int host);
#endif
diff --git a/include/linux/soc/qcom/socinfo.h b/include/linux/soc/qcom/socinfo.h
index e78777bb0f4a..ba823a0013c5 100644
--- a/include/linux/soc/qcom/socinfo.h
+++ b/include/linux/soc/qcom/socinfo.h
@@ -3,6 +3,8 @@
#ifndef __QCOM_SOCINFO_H__
#define __QCOM_SOCINFO_H__
+#include <linux/types.h>
+
/*
* SMEM item id, used to acquire handles to respective
* SMEM region.
@@ -12,6 +14,14 @@
#define SMEM_SOCINFO_BUILD_ID_LENGTH 32
#define SMEM_SOCINFO_CHIP_ID_LENGTH 32
+/*
+ * SoC version type with major number in the upper 16 bits and minor
+ * number in the lower 16 bits.
+ */
+#define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define SOCINFO_MINOR(ver) ((ver) & 0xffff)
+#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff))
+
/* Socinfo SMEM item structure */
struct socinfo {
__le32 fmt;
@@ -72,6 +82,34 @@ struct socinfo {
__le32 num_func_clusters;
__le32 boot_cluster;
__le32 boot_core;
+ /* Version 20 */
+ __le32 raw_package_type;
+ /* Version 21, 22, 23 */
+ __le32 reserve1[4];
};
+/* Internal feature codes */
+enum qcom_socinfo_feature_code {
+ /* External feature codes */
+ SOCINFO_FC_UNKNOWN = 0x0,
+ SOCINFO_FC_AA,
+ SOCINFO_FC_AB,
+ SOCINFO_FC_AC,
+ SOCINFO_FC_AD,
+ SOCINFO_FC_AE,
+ SOCINFO_FC_AF,
+ SOCINFO_FC_AG,
+ SOCINFO_FC_AH,
+};
+
+/* Internal feature codes */
+/* Valid values: 0 <= n <= 0xf */
+#define SOCINFO_FC_Yn(n) (0xf1 + (n))
+#define SOCINFO_FC_INT_MAX SOCINFO_FC_Yn(0xf)
+
+/* Product codes */
+#define SOCINFO_PC_UNKNOWN 0
+#define SOCINFO_PCn(n) ((n) + 1)
+#define SOCINFO_PC_RESERVE (BIT(31) - 1)
+
#endif
diff --git a/include/linux/soc/qcom/ubwc.h b/include/linux/soc/qcom/ubwc.h
new file mode 100644
index 000000000000..0a4edfe3d96d
--- /dev/null
+++ b/include/linux/soc/qcom/ubwc.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018, The Linux Foundation
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef __QCOM_UBWC_H__
+#define __QCOM_UBWC_H__
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+struct qcom_ubwc_cfg_data {
+ u32 ubwc_enc_version;
+ /* Can be read from MDSS_BASE + 0x58 */
+ u32 ubwc_dec_version;
+
+ /**
+ * @ubwc_swizzle: Whether to enable level 1, 2 & 3 bank swizzling.
+ *
+ * UBWC 1.0 always enables all three levels.
+ * UBWC 2.0 removes level 1 bank swizzling, leaving levels 2 & 3.
+ * UBWC 4.0 adds the optional ability to disable levels 2 & 3.
+ */
+ u32 ubwc_swizzle;
+#define UBWC_SWIZZLE_ENABLE_LVL1 BIT(0)
+#define UBWC_SWIZZLE_ENABLE_LVL2 BIT(1)
+#define UBWC_SWIZZLE_ENABLE_LVL3 BIT(2)
+
+ /**
+ * @highest_bank_bit: Highest Bank Bit
+ *
+ * The Highest Bank Bit value represents the bit of the highest
+ * DDR bank. This should ideally use DRAM type detection.
+ */
+ int highest_bank_bit;
+ bool ubwc_bank_spread;
+
+ /**
+ * @macrotile_mode: Macrotile Mode
+ *
+ * Whether to use 4-channel macrotiling mode or the newer
+ * 8-channel macrotiling mode introduced in UBWC 3.1. 0 is
+ * 4-channel and 1 is 8-channel.
+ */
+ bool macrotile_mode;
+};
+
+#define UBWC_1_0 0x10000000
+#define UBWC_2_0 0x20000000
+#define UBWC_3_0 0x30000000
+#define UBWC_4_0 0x40000000
+#define UBWC_4_3 0x40030000
+#define UBWC_5_0 0x50000000
+#define UBWC_6_0 0x60000000
+
+#if IS_ENABLED(CONFIG_QCOM_UBWC_CONFIG)
+const struct qcom_ubwc_cfg_data *qcom_ubwc_config_get_data(void);
+#else
+static inline const struct qcom_ubwc_cfg_data *qcom_ubwc_config_get_data(void)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif
+
+static inline bool qcom_ubwc_get_ubwc_mode(const struct qcom_ubwc_cfg_data *cfg)
+{
+ bool ret = cfg->ubwc_enc_version == UBWC_1_0;
+
+ if (ret && !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL1))
+ pr_err("UBWC config discrepancy - level 1 swizzling disabled on UBWC 1.0\n");
+
+ return ret;
+}
+
+#endif /* __QCOM_UBWC_H__ */
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index aa840ed043e1..532c6c2d1195 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -55,6 +55,8 @@
#define EXYNOS4_MIPI_PHY_SRESETN (1 << 1)
#define EXYNOS4_MIPI_PHY_MRESETN (1 << 2)
#define EXYNOS4_MIPI_PHY_RESET_MASK (3 << 1)
+/* USB PHY enable bit, valid for Exynos7870 */
+#define EXYNOS7870_USB2PHY_ENABLE (1 << 1)
#define S5P_INFORM0 0x0800
#define S5P_INFORM1 0x0804
@@ -185,6 +187,9 @@
/* Only for S5Pv210 */
#define S5PV210_EINT_WAKEUP_MASK 0xC004
+/* Only for Exynos2200 */
+#define EXYNOS2200_PHY_CTRL_USB20 0x72C
+
/* Only for Exynos4210 */
#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154
#define S5P_CMU_RESET_LCD1_LOWPWR 0x1174
@@ -657,4 +662,357 @@
#define EXYNOS5433_PAD_RETENTION_UFS_OPTION (0x3268)
#define EXYNOS5433_PAD_RETENTION_FSYSGENIO_OPTION (0x32A8)
+/* For Exynos990 */
+#define EXYNOS990_PHY_CTRL_USB20 (0x72C)
+
+/* For Exynos7870 */
+#define EXYNOS7870_MIPI_PHY_CONTROL0 (0x070c)
+#define EXYNOS7870_MIPI_PHY_CONTROL1 (0x0714)
+#define EXYNOS7870_MIPI_PHY_CONTROL2 (0x0734)
+
+/* For Tensor GS101 */
+/* PMU ALIVE */
+#define GS101_OM_STAT 0x0000
+#define GS101_VERSION 0x0004
+#define GS101_PORESET_CHECK 0x0008
+#define GS101_OTP_STATUS 0x000c
+#define GS101_SYSTEM_INFO 0x0010
+#define GS101_IDLE_IP(n) (0x03e0 + ((n) & 3) * 4)
+#define GS101_IDLE_IP_MASK(n) (0x03f0 + ((n) & 3) * 4)
+#define GS101_SLC_CH_OFFSET(ch) (0x0400 + ((ch) & 3) * 0x10)
+#define GS101_DATARAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x00)
+#define GS101_TAGRAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x04)
+#define GS101_LRURAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x08)
+#define GS101_PPMPURAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x0c)
+#define GS101_DATARAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x40)
+#define GS101_TAGRAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x44)
+#define GS101_LRURAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x48)
+#define GS101_PPMPURAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x4c)
+#define GS101_INFORM0 0x0800
+#define GS101_INFORM1 0x0804
+#define GS101_INFORM2 0x0808
+#define GS101_INFORM3 0x080c
+#define GS101_SYSIP_DAT(n) (0x0810 + ((n) & 3) * 4)
+#define GS101_PWR_HOLD_HW_TRIP 0x0820
+#define GS101_PWR_HOLD_SW_TRIP 0x0824
+#define GS101_GSA_INFORM(n) (0x0830 + ((n) & 1) * 4)
+#define GS101_INFORM4 0x0840
+#define GS101_INFORM5 0x0844
+#define GS101_INFORM6 0x0848
+#define GS101_INFORM7 0x084c
+#define GS101_INFORM8 0x0850
+#define GS101_INFORM9 0x0854
+#define GS101_INFORM10 0x0858
+#define GS101_INFORM11 0x085c
+#define GS101_CPU_INFORM(cpu) (0x0860 + ((cpu) & 7) * 4)
+#define GS101_IROM_INFORM 0x0880
+#define GS101_IROM_CPU_INFORM(cpu) (0x0890 + ((cpu) & 7) * 4)
+#define GS101_PMU_SPARE(n) (0x0900 + ((n) & 3) * 4)
+#define GS101_IROM_DATA_REG(n) (0x0980 + ((n) & 3) * 4)
+#define GS101_IROM_PWRMODE 0x0990
+#define GS101_DREX_CALIBRATION(n) (0x09a0 + ((n) & 7) * 4)
+
+#define GS101_CLUSTER0_OFFSET 0x1000
+#define GS101_CLUSTER1_OFFSET 0x1300
+#define GS101_CLUSTER2_OFFSET 0x1500
+#define GS101_CLUSTER_CPU_OFFSET(cl, cpu) ((cl) + ((cpu) * 0x80))
+#define GS101_CLUSTER_CPU_CONFIGURATION(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x00)
+#define GS101_CLUSTER_CPU_STATUS(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x04)
+#define GS101_CLUSTER_CPU_STATES(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x08)
+#define GS101_CLUSTER_CPU_OPTION(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x0c)
+#define GS101_CLUSTER_CPU_OUT(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x20)
+#define GS101_CLUSTER_CPU_IN(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x24)
+#define GS101_CLUSTER_CPU_INT_IN(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x40)
+#define GS101_CLUSTER_CPU_INT_EN(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x44)
+#define GS101_CLUSTER_CPU_INT_TYPE(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x48)
+#define GS101_CLUSTER_CPU_INT_DIR(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x4c)
+
+#define GS101_CLUSTER_NONCPU_OFFSET(cl) (0x1200 + ((cl) * 0x200))
+#define GS101_CLUSTER_NONCPU_CONFIGURATION(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x00)
+#define GS101_CLUSTER_NONCPU_STATUS(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x04)
+#define GS101_CLUSTER_NONCPU_STATES(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x08)
+#define GS101_CLUSTER_NONCPU_OPTION(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x0c)
+#define GS101_CLUSTER_NONCPU_OUT(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x20)
+#define GS101_CLUSTER_NONCPU_IN(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x24)
+#define GS101_CLUSTER_NONCPU_INT_IN(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x40)
+#define GS101_CLUSTER_NONCPU_INT_EN(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x44)
+#define GS101_CLUSTER_NONCPU_INT_TYPE(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x48)
+#define GS101_CLUSTER_NONCPU_INT_DIR(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x4c)
+#define GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_OUT(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x60)
+#define GS101_CLUSTER_NONCPU_DUALRAIL_POS_OUT(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x64)
+#define GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x6c)
+#define GS101_CLUSTER0_NONCPU_DSU_PCH \
+ (GS101_CLUSTER_NONCPU_OFFSET(0) + 0x80)
+
+#define GS101_SUBBBLK_OFFSET_ALIVE 0x1800
+#define GS101_SUBBBLK_OFFSET_AOC 0x1880
+#define GS101_SUBBBLK_OFFSET_APM 0x1900
+#define GS101_SUBBBLK_OFFSET_CMU 0x1980
+#define GS101_SUBBBLK_OFFSET_BUS0 0x1a00
+#define GS101_SUBBBLK_OFFSET_BUS1 0x1a80
+#define GS101_SUBBBLK_OFFSET_BUS2 0x1b00
+#define GS101_SUBBBLK_OFFSET_CORE 0x1b80
+#define GS101_SUBBBLK_OFFSET_EH 0x1c00
+#define GS101_SUBBBLK_OFFSET_CPUCL0 0x1c80
+#define GS101_SUBBBLK_OFFSET_CPUCL1 0x1d00
+#define GS101_SUBBBLK_OFFSET_CPUCL2 0x1d80
+#define GS101_SUBBBLK_OFFSET_G3D 0x1e00
+#define GS101_SUBBBLK_OFFSET_EMBEDDED_CPUCL0 0x1e80
+#define GS101_SUBBBLK_OFFSET_EMBEDDED_G3D 0x2000
+#define GS101_SUBBBLK_OFFSET_HSI0 0x2080
+#define GS101_SUBBBLK_OFFSET_HSI1 0x2100
+#define GS101_SUBBBLK_OFFSET_HSI2 0x2180
+#define GS101_SUBBBLK_OFFSET_DPU 0x2200
+#define GS101_SUBBBLK_OFFSET_DISP 0x2280
+#define GS101_SUBBBLK_OFFSET_G2D 0x2300
+#define GS101_SUBBBLK_OFFSET_MFC 0x2380
+#define GS101_SUBBBLK_OFFSET_CSIS 0x2400
+#define GS101_SUBBBLK_OFFSET_PDP 0x2480
+#define GS101_SUBBBLK_OFFSET_DNS 0x2500
+#define GS101_SUBBBLK_OFFSET_G3AA 0x2580
+#define GS101_SUBBBLK_OFFSET_IPP 0x2600
+#define GS101_SUBBBLK_OFFSET_ITP 0x2680
+#define GS101_SUBBBLK_OFFSET_MCSC 0x2700
+#define GS101_SUBBBLK_OFFSET_GDC 0x2780
+#define GS101_SUBBBLK_OFFSET_TNR 0x2800
+#define GS101_SUBBBLK_OFFSET_BO 0x2880
+#define GS101_SUBBBLK_OFFSET_TPU 0x2900
+#define GS101_SUBBBLK_OFFSET_MIF0 0x2980
+#define GS101_SUBBBLK_OFFSET_MIF1 0x2a00
+#define GS101_SUBBBLK_OFFSET_MIF2 0x2a80
+#define GS101_SUBBBLK_OFFSET_MIF3 0x2b00
+#define GS101_SUBBBLK_OFFSET_MISC 0x2b80
+#define GS101_SUBBBLK_OFFSET_PERIC0 0x2c00
+#define GS101_SUBBBLK_OFFSET_PERIC1 0x2c80
+#define GS101_SUBBBLK_OFFSET_S2D 0x2d00
+#define GS101_SUBBLK_CONFIGURATION(blk) ((blk) + 0x00)
+#define GS101_SUBBLK_STATUS(blk) ((blk) + 0x04)
+#define GS101_SUBBLK_STATES(blk) ((blk) + 0x08)
+#define GS101_SUBBLK_OPTION(blk) ((blk) + 0x0c)
+#define GS101_SUBBLK_CTRL(blk) ((blk) + 0x10)
+#define GS101_SUBBLK_OUT(blk) ((blk) + 0x20)
+#define GS101_SUBBLK_IN(blk) ((blk) + 0x24)
+#define GS101_SUBBLK_INT_IN(blk) ((blk) + 0x40)
+#define GS101_SUBBLK_INT_EN(blk) ((blk) + 0x44)
+#define GS101_SUBBLK_INT_TYPE(blk) ((blk) + 0x48)
+#define GS101_SUBBLK_INT_DIR(blk) ((blk) + 0x4c)
+#define GS101_SUBBLK_MEMORY_OUT(blk) ((blk) + 0x60)
+#define GS101_SUBBLK_MEMORY_IN(blk) ((blk) + 0x64)
+
+#define GS101_SUBBBLK_CPU_OFFSET_APM 0x3000
+#define GS101_SUBBBLK_CPU_OFFSET_DBGCORE 0x3080
+#define GS101_SUBBBLK_CPU_OFFSET_SSS 0x3100
+#define GS101_SUBBLK_CPU_CONFIGURATION(blk) ((blk) + 0x00)
+#define GS101_SUBBLK_CPU_STATUS(blk) ((blk) + 0x04)
+#define GS101_SUBBLK_CPU_STATES(blk) ((blk) + 0x08)
+#define GS101_SUBBLK_CPU_OPTION(blk) ((blk) + 0x0c)
+#define GS101_SUBBLK_CPU_OUT(blk) ((blk) + 0x20)
+#define GS101_SUBBLK_CPU_IN(blk) ((blk) + 0x24)
+#define GS101_SUBBLK_CPU_INT_IN(blk) ((blk) + 0x40)
+#define GS101_SUBBLK_CPU_INT_EN(blk) ((blk) + 0x44)
+#define GS101_SUBBLK_CPU_INT_TYPE(blk) ((blk) + 0x48)
+#define GS101_SUBBLK_CPU_INT_DIR(blk) ((blk) + 0x4c)
+
+#define GS101_MIF_CONFIGURATION 0x3800
+#define GS101_MIF_STATUS 0x3804
+#define GS101_MIF_STATES 0x3808
+#define GS101_MIF_OPTION 0x380c
+#define GS101_MIF_CTRL 0x3810
+#define GS101_MIF_OUT 0x3820
+#define GS101_MIF_IN 0x3824
+#define GS101_MIF_INT_IN 0x3840
+#define GS101_MIF_INT_EN 0x3844
+#define GS101_MIF_INT_TYPE 0x3848
+#define GS101_MIF_INT_DIR 0x384c
+#define GS101_TOP_CONFIGURATION 0x3900
+#define GS101_TOP_STATUS 0x3904
+#define GS101_TOP_STATES 0x3908
+#define GS101_TOP_OPTION 0x390c
+#define GS101_TOP_OUT 0x3920
+#define GS101_TOP_IN 0x3924
+#define GS101_TOP_INT_IN 0x3940
+#define GS101_TOP_INT_EN 0x3944
+#define GS101_TOP_INT_TYPE 0x3948
+#define GS101_TOP_INT_DIR 0x394c
+#define GS101_WAKEUP_STAT 0x3950
+#define GS101_WAKEUP2_STAT 0x3954
+#define GS101_WAKEUP2_INT_IN 0x3960
+#define GS101_WAKEUP2_INT_EN 0x3964
+#define GS101_WAKEUP2_INT_TYPE 0x3968
+#define GS101_WAKEUP2_INT_DIR 0x396c
+#define GS101_SYSTEM_CONFIGURATION 0x3a00
+#define GS101_SYSTEM_STATUS 0x3a04
+#define GS101_SYSTEM_STATES 0x3a08
+#define GS101_SYSTEM_OPTION 0x3a0c
+#define GS101_SYSTEM_CTRL 0x3a10
+#define GS101_SPARE_CTRL 0x3a14
+#define GS101_USER_DEFINED_OUT 0x3a18
+#define GS101_SYSTEM_OUT 0x3a20
+#define GS101_SYSTEM_IN 0x3a24
+#define GS101_SYSTEM_INT_IN 0x3a40
+#define GS101_SYSTEM_INT_EN 0x3a44
+#define GS101_SYSTEM_INT_TYPE 0x3a48
+#define GS101_SYSTEM_INT_DIR 0x3a4c
+#define GS101_EINT_INT_IN 0x3a50
+#define GS101_EINT_INT_EN 0x3a54
+#define GS101_EINT_INT_TYPE 0x3a58
+#define GS101_EINT_INT_DIR 0x3a5c
+#define GS101_EINT2_INT_IN 0x3a60
+#define GS101_EINT2_INT_EN 0x3a64
+#define GS101_EINT2_INT_TYPE 0x3a68
+#define GS101_EINT2_INT_DIR 0x3a6c
+#define GS101_EINT3_INT_IN 0x3a70
+#define GS101_EINT3_INT_EN 0x3a74
+#define GS101_EINT3_INT_TYPE 0x3a78
+#define GS101_EINT3_INT_DIR 0x3a7c
+#define GS101_EINT_WAKEUP_MASK 0x3a80
+#define GS101_EINT_WAKEUP_MASK2 0x3a84
+#define GS101_EINT_WAKEUP_MASK3 0x3a88
+#define GS101_USER_DEFINED_INT_IN 0x3a90
+#define GS101_USER_DEFINED_INT_EN 0x3a94
+#define GS101_USER_DEFINED_INT_TYPE 0x3a98
+#define GS101_USER_DEFINED_INT_DIR 0x3a9c
+#define GS101_SCAN2DRAM_INT_IN 0x3aa0
+#define GS101_SCAN2DRAM_INT_EN 0x3aa4
+#define GS101_SCAN2DRAM_INT_TYPE 0x3aa8
+#define GS101_SCAN2DRAM_INT_DIR 0x3aac
+#define GS101_HCU_START 0x3ab0
+#define GS101_CUSTOM_OUT 0x3ac0
+#define GS101_CUSTOM_IN 0x3ac4
+#define GS101_CUSTOM_INT_IN 0x3ad0
+#define GS101_CUSTOM_INT_EN 0x3ad4
+#define GS101_CUSTOM_INT_TYPE 0x3ad8
+#define GS101_CUSTOM_INT_DIR 0x3adc
+#define GS101_ACK_LAST_CPU 0x3afc
+#define GS101_HCU_R(n) (0x3b00 + ((n) & 3) * 4)
+#define GS101_HCU_SP 0x3b14
+#define GS101_HCU_PC 0x3b18
+#define GS101_PMU_RAM_CTRL 0x3b20
+#define GS101_APM_HCU_CTRL 0x3b24
+#define GS101_APM_NMI_ENABLE 0x3b30
+#define GS101_DBGCORE_NMI_ENABLE 0x3b34
+#define GS101_HCU_NMI_ENABLE 0x3b38
+#define GS101_PWR_HOLD_WDT_ENABLE 0x3b3c
+#define GS101_NMI_SRC_IN 0x3b40
+#define GS101_RST_STAT 0x3b44
+#define GS101_RST_STAT_PMU 0x3b48
+#define GS101_HPM_INT_IN 0x3b60
+#define GS101_HPM_INT_EN 0x3b64
+#define GS101_HPM_INT_TYPE 0x3b68
+#define GS101_HPM_INT_DIR 0x3b6c
+#define GS101_S2D_AUTH 0x3b70
+#define GS101_BOOT_STAT 0x3b74
+#define GS101_PMLINK_OUT 0x3c00
+#define GS101_PMLINK_AOC_OUT 0x3c04
+#define GS101_PMLINK_AOC_CTRL 0x3c08
+#define GS101_TCXO_BUF_CTRL 0x3c10
+#define GS101_ADD_CTRL 0x3c14
+#define GS101_HCU_TIMEOUT_RESET 0x3c20
+#define GS101_HCU_TIMEOUT_SCAN2DRAM 0x3c24
+#define GS101_TIMER(n) (0x3c80 + ((n) & 3) * 4)
+#define GS101_PPC_MIF(n) (0x3c90 + ((n) & 3) * 4)
+#define GS101_PPC_CORE 0x3ca0
+#define GS101_PPC_EH 0x3ca4
+#define GS101_PPC_CPUCL1_0 0x3ca8
+#define GS101_PPC_CPUCL1_1 0x3cac
+#define GS101_EXT_REGULATOR_MIF_DURATION 0x3cb0
+#define GS101_EXT_REGULATOR_TOP_DURATION 0x3cb4
+#define GS101_EXT_REGULATOR_CPUCL2_DURATION 0x3cb8
+#define GS101_EXT_REGULATOR_CPUCL1_DURATION 0x3cbc
+#define GS101_EXT_REGULATOR_G3D_DURATION 0x3cc0
+#define GS101_EXT_REGULATOR_TPU_DURATION 0x3cc4
+#define GS101_TCXO_DURATION 0x3cc8
+#define GS101_BURNIN_CTRL 0x3cd0
+#define GS101_JTAG_DBG_DET 0x3cd4
+#define GS101_MMC_CONWKUP_CTRL 0x3cd8
+#define GS101_USBDPPHY0_USBDP_WAKEUP 0x3cdc
+#define GS101_TMU_TOP_TRIP 0x3ce0
+#define GS101_TMU_SUB_TRIP 0x3ce4
+#define GS101_MEMORY_CEN 0x3d00
+#define GS101_MEMORY_PGEN 0x3d04
+#define GS101_MEMORY_RET 0x3d08
+#define GS101_MEMORY_PGEN_FEEDBACK 0x3d0c
+#define GS101_MEMORY_SMX 0x3d10
+#define GS101_MEMORY_SMX_FEEDBACK 0x3d14
+#define GS101_SLC_PCH_CHANNEL 0x3d20
+#define GS101_SLC_PCH_CB 0x3d24
+#define GS101_FORCE_NOMC 0x3d3c
+#define GS101_FORCE_BOOST 0x3d4c
+#define GS101_PMLINK_SLC_REQ 0x3d50
+#define GS101_PMLINK_SLC_ACK 0x3d54
+#define GS101_PMLINK_SLC_BUSY 0x3d58
+#define GS101_BOOTSYNC_OUT 0x3d80
+#define GS101_BOOTSYNC_IN 0x3d84
+#define GS101_SCAN_READY_OUT 0x3d88
+#define GS101_SCAN_READY_IN 0x3d8c
+#define GS101_GSA_RESTORE 0x3d90
+#define GS101_ALIVE_OTP_LATCH 0x3d94
+#define GS101_DEBUG_OVERRIDE 0x3d98
+#define GS101_WDT_OPTION 0x3d9c
+#define GS101_AOC_WDT_CFG 0x3da0
+#define GS101_CTRL_SECJTAG_ALIVE 0x3da4
+#define GS101_CTRL_DIV_PLL_ALV_DIVLOW 0x3e00
+#define GS101_CTRL_MUX_CLK_APM_REFSRC_AUTORESTORE 0x3e04
+#define GS101_CTRL_MUX_CLK_APM_REFSRC 0x3e08
+#define GS101_CTRL_MUX_CLK_APM_REF 0x3e0c
+#define GS101_CTRL_MUX_PLL_ALV_DIV4 0x3e10
+#define GS101_CTRL_PLL_ALV_DIV4 0x3e14
+#define GS101_CTRL_OSCCLK_APMGSA 0x3e18
+#define GS101_CTRL_BLK_AOC_CLKS 0x3e1c
+#define GS101_CTRL_PLL_ALV_LOCK 0x3e20
+#define GS101_CTRL_CLKDIV__CLKRTC 0x3e24
+#define GS101_CTRL_SOC32K 0x3e30
+#define GS101_CTRL_STM_PMU 0x3e34
+#define GS101_CTRL_PMU_DEBUG 0x3e38
+#define GS101_CTRL_DEBUG_UART 0x3e3c
+#define GS101_CTRL_TCK 0x3e40
+#define GS101_CTRL_SBU_SW_EN 0x3e44
+#define GS101_PAD_CTRL_CLKOUT0 0x3e80
+#define GS101_PAD_CTRL_CLKOUT1 0x3e84
+#define GS101_PAD_CTRL_APM_24MOUT_0 0x3e88
+#define GS101_PAD_CTRL_APM_24MOUT_1 0x3e8c
+#define GS101_PAD_CTRL_IO_FORCE_RETENTION 0x3e90
+#define GS101_PAD_CTRL_APACTIVE_n 0x3e94
+#define GS101_PAD_CTRL_TCXO_ON 0x3e98
+#define GS101_PAD_CTRL_PWR_HOLD 0x3e9c
+#define GS101_PAD_CTRL_RESETO_n 0x3ea0
+#define GS101_PAD_CTRL_WRESETO_n 0x3ea4
+#define GS101_PHY_CTRL_USB20 0x3eb0
+#define GS101_PHY_CTRL_USBDP 0x3eb4
+#define GS101_PHY_CTRL_MIPI_DCPHY_M4M4 0x3eb8
+#define GS101_PHY_CTRL_MIPI_DCPHY_S4S4S4S4 0x3ebc
+#define GS101_PHY_CTRL_PCIE_GEN4_0 0x3ec0
+#define GS101_PHY_CTRL_PCIE_GEN4_1 0x3ec4
+#define GS101_PHY_CTRL_UFS 0x3ec8
+
+/* PMU INTR GEN */
+#define GS101_GRP1_INTR_BID_UPEND (0x0108)
+#define GS101_GRP1_INTR_BID_CLEAR (0x010c)
+#define GS101_GRP2_INTR_BID_ENABLE (0x0200)
+#define GS101_GRP2_INTR_BID_UPEND (0x0208)
+#define GS101_GRP2_INTR_BID_CLEAR (0x020c)
+
#endif /* __LINUX_SOC_EXYNOS_REGS_PMU_H */
diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h
index bd0d11af76c5..fd104b666836 100644
--- a/include/linux/soc/ti/ti_sci_protocol.h
+++ b/include/linux/soc/ti/ti_sci_protocol.h
@@ -195,6 +195,35 @@ struct ti_sci_clk_ops {
u64 *current_freq);
};
+/* TISCI LPM IO isolation control values */
+#define TISCI_MSG_VALUE_IO_ENABLE 1
+#define TISCI_MSG_VALUE_IO_DISABLE 0
+
+/* TISCI LPM constraint state values */
+#define TISCI_MSG_CONSTRAINT_SET 1
+#define TISCI_MSG_CONSTRAINT_CLR 0
+
+/**
+ * struct ti_sci_pm_ops - Low Power Mode (LPM) control operations
+ * @lpm_wake_reason: Get the wake up source that woke the SoC from LPM
+ * - source: The wake up source that woke soc from LPM.
+ * - timestamp: Timestamp at which soc woke.
+ * @set_device_constraint: Set LPM constraint on behalf of a device
+ * - id: Device Identifier
+ * - state: The desired state of device constraint: set or clear.
+ * @set_latency_constraint: Set LPM resume latency constraint
+ * - latency: maximum acceptable latency to wake up from low power mode
+ * - state: The desired state of latency constraint: set or clear.
+ */
+struct ti_sci_pm_ops {
+ int (*lpm_wake_reason)(const struct ti_sci_handle *handle,
+ u32 *source, u64 *timestamp, u8 *pin, u8 *mode);
+ int (*set_device_constraint)(const struct ti_sci_handle *handle,
+ u32 id, u8 state);
+ int (*set_latency_constraint)(const struct ti_sci_handle *handle,
+ u16 latency, u8 state);
+};
+
/**
* struct ti_sci_resource_desc - Description of TI SCI resource instance range.
* @start: Start index of the first resource range.
@@ -539,6 +568,7 @@ struct ti_sci_ops {
struct ti_sci_core_ops core_ops;
struct ti_sci_dev_ops dev_ops;
struct ti_sci_clk_ops clk_ops;
+ struct ti_sci_pm_ops pm_ops;
struct ti_sci_rm_core_ops rm_core_ops;
struct ti_sci_rm_irq_ops rm_irq_ops;
struct ti_sci_rm_ringacc_ops rm_ring_ops;
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 89d16b90370b..ec715ad4bf25 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -32,12 +32,27 @@ typedef __kernel_sa_family_t sa_family_t;
* 1003.1g requires sa_family_t and that sa_data is char.
*/
+/* Deprecated for in-kernel use. Use struct sockaddr_unsized instead. */
struct sockaddr {
sa_family_t sa_family; /* address family, AF_xxx */
- union {
- char sa_data_min[14]; /* Minimum 14 bytes of protocol address */
- DECLARE_FLEX_ARRAY(char, sa_data);
- };
+ char sa_data[14]; /* 14 bytes of protocol address */
+};
+
+/**
+ * struct sockaddr_unsized - Unspecified size sockaddr for callbacks
+ * @sa_family: Address family (AF_UNIX, AF_INET, AF_INET6, etc.)
+ * @sa_data: Flexible array for address data
+ *
+ * This structure is designed for callback interfaces where the
+ * total size is known via the sockaddr_len parameter. Unlike struct
+ * sockaddr which has a fixed 14-byte sa_data limit or struct
+ * sockaddr_storage which has a fixed 128-byte sa_data limit, this
+ * structure can accommodate addresses of any size, but must be used
+ * carefully.
+ */
+struct sockaddr_unsized {
+ __kernel_sa_family_t sa_family; /* address family, AF_xxx */
+ char sa_data[]; /* flexible address data */
};
struct linger {
@@ -76,7 +91,7 @@ struct msghdr {
__kernel_size_t msg_controllen; /* ancillary data buffer length */
struct kiocb *msg_iocb; /* ptr to iocb for async requests */
struct ubuf_info *msg_ubuf;
- int (*sg_from_iter)(struct sock *sk, struct sk_buff *skb,
+ int (*sg_from_iter)(struct sk_buff *skb,
struct iov_iter *from, size_t length);
};
@@ -168,7 +183,7 @@ static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr
return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
}
-static inline size_t msg_data_left(struct msghdr *msg)
+static inline size_t msg_data_left(const struct msghdr *msg)
{
return iov_iter_count(&msg->msg_iter);
}
@@ -327,6 +342,7 @@ struct ucred {
* plain text and require encryption
*/
+#define MSG_SOCK_DEVMEM 0x2000000 /* Receive devmem skbs as cmsg */
#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
#define MSG_SPLICE_PAGES 0x8000000 /* Splice the pages from the iterator in sendmsg() */
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
@@ -391,6 +407,8 @@ struct ucred {
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
+extern int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len,
+ void *data);
struct timespec64;
struct __kernel_timespec;
@@ -442,15 +460,18 @@ extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
extern int __sys_socket(int family, int type, int protocol);
extern struct file *__sys_socket_file(int family, int type, int protocol);
extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen);
+extern int __sys_bind_socket(struct socket *sock, struct sockaddr_storage *address,
+ int addrlen);
extern int __sys_connect_file(struct file *file, struct sockaddr_storage *addr,
int addrlen, int file_flags);
extern int __sys_connect(int fd, struct sockaddr __user *uservaddr,
int addrlen);
extern int __sys_listen(int fd, int backlog);
+extern int __sys_listen_socket(struct socket *sock, int backlog);
+extern int do_getsockname(struct socket *sock, int peer,
+ struct sockaddr __user *usockaddr, int __user *usockaddr_len);
extern int __sys_getsockname(int fd, struct sockaddr __user *usockaddr,
- int __user *usockaddr_len);
-extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr,
- int __user *usockaddr_len);
+ int __user *usockaddr_len, int peer);
extern int __sys_socketpair(int family, int type, int protocol,
int __user *usockvec);
extern int __sys_shutdown_sock(struct socket *sock, int how);
diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h
index fc5a206c4043..3e6c8e9d67ae 100644
--- a/include/linux/sockptr.h
+++ b/include/linux/sockptr.h
@@ -53,6 +53,8 @@ static inline int copy_from_sockptr_offset(void *dst, sockptr_t src,
/* Deprecated.
* This is unsafe, unless caller checked user provided optlen.
* Prefer copy_safe_from_sockptr() instead.
+ *
+ * Returns 0 for success, or number of bytes not copied on error.
*/
static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size)
{
@@ -77,7 +79,9 @@ static inline int copy_safe_from_sockptr(void *dst, size_t ksize,
{
if (optlen < ksize)
return -EINVAL;
- return copy_from_sockptr(dst, optval, ksize);
+ if (copy_from_sockptr(dst, optval, ksize))
+ return -EFAULT;
+ return 0;
}
static inline int copy_struct_from_sockptr(void *dst, size_t ksize,
diff --git a/include/linux/sony-laptop.h b/include/linux/sony-laptop.h
deleted file mode 100644
index 1e3c92feea6e..000000000000
--- a/include/linux/sony-laptop.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SONYLAPTOP_H_
-#define _SONYLAPTOP_H_
-
-#include <linux/types.h>
-
-#ifdef __KERNEL__
-
-/* used only for communication between v4l and sony-laptop */
-
-#define SONY_PIC_COMMAND_GETCAMERA 1 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERA 2
-#define SONY_PIC_COMMAND_GETCAMERABRIGHTNESS 3 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERABRIGHTNESS 4
-#define SONY_PIC_COMMAND_GETCAMERACONTRAST 5 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERACONTRAST 6
-#define SONY_PIC_COMMAND_GETCAMERAHUE 7 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERAHUE 8
-#define SONY_PIC_COMMAND_GETCAMERACOLOR 9 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERACOLOR 10
-#define SONY_PIC_COMMAND_GETCAMERASHARPNESS 11 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERASHARPNESS 12
-#define SONY_PIC_COMMAND_GETCAMERAPICTURE 13 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERAPICTURE 14
-#define SONY_PIC_COMMAND_GETCAMERAAGC 15 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERAAGC 16
-#define SONY_PIC_COMMAND_GETCAMERADIRECTION 17 /* obsolete */
-#define SONY_PIC_COMMAND_GETCAMERAROMVERSION 18 /* obsolete */
-#define SONY_PIC_COMMAND_GETCAMERAREVISION 19 /* obsolete */
-
-#if IS_ENABLED(CONFIG_SONY_LAPTOP)
-int sony_pic_camera_command(int command, u8 value);
-#else
-static inline int sony_pic_camera_command(int command, u8 value) { return 0; }
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* _SONYLAPTOP_H_ */
diff --git a/include/linux/sort.h b/include/linux/sort.h
index e163287ac6c1..c01ef804a0eb 100644
--- a/include/linux/sort.h
+++ b/include/linux/sort.h
@@ -4,6 +4,16 @@
#include <linux/types.h>
+/**
+ * cmp_int - perform a three-way comparison of the arguments
+ * @l: the left argument
+ * @r: the right argument
+ *
+ * Return: 1 if the left argument is greater than the right one; 0 if the
+ * arguments are equal; -1 if the left argument is less than the right one.
+ */
+#define cmp_int(l, r) (((l) > (r)) - ((l) < (r)))
+
void sort_r(void *base, size_t num, size_t size,
cmp_r_func_t cmp_func,
swap_r_func_t swap_func,
@@ -13,4 +23,15 @@ void sort(void *base, size_t num, size_t size,
cmp_func_t cmp_func,
swap_func_t swap_func);
+/* Versions that periodically call cond_resched(): */
+
+void sort_r_nonatomic(void *base, size_t num, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv);
+
+void sort_nonatomic(void *base, size_t num, size_t size,
+ cmp_func_t cmp_func,
+ swap_func_t swap_func);
+
#endif
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 13e96d8b7423..e6a3476bcef1 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -4,12 +4,22 @@
#ifndef __SOUNDWIRE_H
#define __SOUNDWIRE_H
+#include <linux/bitfield.h>
#include <linux/bug.h>
-#include <linux/lockdep_types.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/idr.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/lockdep_types.h>
#include <linux/mod_devicetable.h>
-#include <linux/bitfield.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <sound/sdca.h>
+
+struct dentry;
+struct fwnode_handle;
+struct device_node;
struct sdw_bus;
struct sdw_slave;
@@ -42,10 +52,13 @@ struct sdw_slave;
#define SDW_FRAME_CTRL_BITS 48
#define SDW_MAX_DEVICES 11
+#define SDW_FW_MAX_DEVICES 16
#define SDW_MAX_PORTS 15
#define SDW_VALID_PORT_RANGE(n) ((n) < SDW_MAX_PORTS && (n) >= 1)
+#define SDW_MAX_LANES 8
+
enum {
SDW_PORT_DIRN_SINK = 0,
SDW_PORT_DIRN_SOURCE,
@@ -140,12 +153,14 @@ enum sdw_dpn_pkg_mode {
*
* @SDW_STREAM_PCM: PCM data stream
* @SDW_STREAM_PDM: PDM data stream
+ * @SDW_STREAM_BPT: BPT data stream
*
* spec doesn't define this, but is used in implementation
*/
enum sdw_stream_type {
SDW_STREAM_PCM = 0,
SDW_STREAM_PDM = 1,
+ SDW_STREAM_BPT = 2,
};
/**
@@ -226,64 +241,36 @@ enum sdw_clk_stop_mode {
/**
* struct sdw_dp0_prop - DP0 properties
+ * @words: wordlengths supported
* @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64
* (inclusive)
* @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64
* (inclusive)
* @num_words: number of wordlengths supported
- * @words: wordlengths supported
+ * @ch_prep_timeout: Port-specific timeout value, in milliseconds
* @BRA_flow_controlled: Slave implementation results in an OK_NotReady
* response
* @simple_ch_prep_sm: If channel prepare sequence is required
- * @ch_prep_timeout: Port-specific timeout value, in milliseconds
* @imp_def_interrupts: If set, each bit corresponds to support for
* implementation-defined interrupts
+ * @num_lanes: array size of @lane_list
+ * @lane_list: indicates which Lanes can be used by DP0
*
* The wordlengths are specified by Spec as max, min AND number of
* discrete values, implementation can define based on the wordlengths they
* support
*/
struct sdw_dp0_prop {
+ u32 *words;
u32 max_word;
u32 min_word;
u32 num_words;
- u32 *words;
+ u32 ch_prep_timeout;
bool BRA_flow_controlled;
bool simple_ch_prep_sm;
- u32 ch_prep_timeout;
bool imp_def_interrupts;
-};
-
-/**
- * struct sdw_dpn_audio_mode - Audio mode properties for DPn
- * @bus_min_freq: Minimum bus frequency, in Hz
- * @bus_max_freq: Maximum bus frequency, in Hz
- * @bus_num_freq: Number of discrete frequencies supported
- * @bus_freq: Discrete bus frequencies, in Hz
- * @min_freq: Minimum sampling frequency, in Hz
- * @max_freq: Maximum sampling bus frequency, in Hz
- * @num_freq: Number of discrete sampling frequency supported
- * @freq: Discrete sampling frequencies, in Hz
- * @prep_ch_behave: Specifies the dependencies between Channel Prepare
- * sequence and bus clock configuration
- * If 0, Channel Prepare can happen at any Bus clock rate
- * If 1, Channel Prepare sequence shall happen only after Bus clock is
- * changed to a frequency supported by this mode or compatible modes
- * described by the next field
- * @glitchless: Bitmap describing possible glitchless transitions from this
- * Audio Mode to other Audio Modes
- */
-struct sdw_dpn_audio_mode {
- u32 bus_min_freq;
- u32 bus_max_freq;
- u32 bus_num_freq;
- u32 *bus_freq;
- u32 max_freq;
- u32 min_freq;
- u32 num_freq;
- u32 *freq;
- u32 prep_ch_behave;
- u32 glitchless;
+ int num_lanes;
+ u32 *lane_list;
};
/**
@@ -298,24 +285,25 @@ struct sdw_dpn_audio_mode {
* @type: Data port type. Full, Simplified or Reduced
* @max_grouping: Maximum number of samples that can be grouped together for
* a full data port
- * @simple_ch_prep_sm: If the port supports simplified channel prepare state
- * machine
* @ch_prep_timeout: Port-specific timeout value, in milliseconds
* @imp_def_interrupts: If set, each bit corresponds to support for
* implementation-defined interrupts
* @max_ch: Maximum channels supported
* @min_ch: Minimum channels supported
* @num_channels: Number of discrete channels supported
- * @channels: Discrete channels supported
* @num_ch_combinations: Number of channel combinations supported
+ * @channels: Discrete channels supported
* @ch_combinations: Channel combinations supported
+ * @lane_list: indicates which Lanes can be used by DPn
+ * @num_lanes: array size of @lane_list
* @modes: SDW mode supported
* @max_async_buffer: Number of samples that this port can buffer in
* asynchronous modes
+ * @port_encoding: Payload Channel Sample encoding schemes supported
* @block_pack_mode: Type of block port mode supported
* @read_only_wordlength: Read Only wordlength field in DPN_BlockCtrl1 register
- * @port_encoding: Payload Channel Sample encoding schemes supported
- * @audio_modes: Audio modes supported
+ * @simple_ch_prep_sm: If the port supports simplified channel prepare state
+ * machine
*/
struct sdw_dpn_prop {
u32 num;
@@ -325,25 +313,29 @@ struct sdw_dpn_prop {
u32 *words;
enum sdw_dpn_type type;
u32 max_grouping;
- bool simple_ch_prep_sm;
u32 ch_prep_timeout;
u32 imp_def_interrupts;
u32 max_ch;
u32 min_ch;
u32 num_channels;
- u32 *channels;
u32 num_ch_combinations;
+ u32 *channels;
u32 *ch_combinations;
+ u32 *lane_list;
+ int num_lanes;
u32 modes;
u32 max_async_buffer;
+ u32 port_encoding;
bool block_pack_mode;
bool read_only_wordlength;
- u32 port_encoding;
- struct sdw_dpn_audio_mode *audio_modes;
+ bool simple_ch_prep_sm;
};
/**
* struct sdw_slave_prop - SoundWire Slave properties
+ * @dp0_prop: Data Port 0 properties
+ * @src_dpn_prop: Source Data Port N properties
+ * @sink_dpn_prop: Sink Data Port N properties
* @mipi_revision: Spec version of the implementation
* @wake_capable: Wake-up events are supported
* @test_mode_capable: If test mode is supported
@@ -360,23 +352,27 @@ struct sdw_dpn_prop {
* SCP_AddrPage2
* @bank_delay_support: Slave implements bank delay/bridge support registers
* SCP_BankDelay and SCP_NextFrame
+ * @lane_control_support: Slave supports lane control
* @p15_behave: Slave behavior when the Master attempts a read to the Port15
* alias
- * @lane_control_support: Slave supports lane control
* @master_count: Number of Masters present on this Slave
* @source_ports: Bitmap identifying source ports
* @sink_ports: Bitmap identifying sink ports
- * @dp0_prop: Data Port 0 properties
- * @src_dpn_prop: Source Data Port N properties
- * @sink_dpn_prop: Sink Data Port N properties
- * @scp_int1_mask: SCP_INT1_MASK desired settings
* @quirks: bitmask identifying deltas from the MIPI specification
+ * @sdca_interrupt_register_list: indicates which sets of SDCA interrupt status
+ * and masks are supported
+ * @commit_register_supported: is PCP_Commit register supported
+ * @scp_int1_mask: SCP_INT1_MASK desired settings
+ * @lane_maps: Lane mapping for the slave, only valid if lane_control_support is set
* @clock_reg_supported: the Peripheral implements the clock base and scale
* registers introduced with the SoundWire 1.2 specification. SDCA devices
* do not need to set this boolean property as the registers are required.
* @use_domain_irq: call actual IRQ handler on slave, as well as callback
*/
struct sdw_slave_prop {
+ struct sdw_dp0_prop *dp0_prop;
+ struct sdw_dpn_prop *src_dpn_prop;
+ struct sdw_dpn_prop *sink_dpn_prop;
u32 mipi_revision;
bool wake_capable;
bool test_mode_capable;
@@ -388,16 +384,16 @@ struct sdw_slave_prop {
bool high_PHY_capable;
bool paging_support;
bool bank_delay_support;
- enum sdw_p15_behave p15_behave;
bool lane_control_support;
+ enum sdw_p15_behave p15_behave;
u32 master_count;
u32 source_ports;
u32 sink_ports;
- struct sdw_dp0_prop *dp0_prop;
- struct sdw_dpn_prop *src_dpn_prop;
- struct sdw_dpn_prop *sink_dpn_prop;
- u8 scp_int1_mask;
u32 quirks;
+ u32 sdca_interrupt_register_list;
+ u8 commit_register_supported;
+ u8 scp_int1_mask;
+ u8 lane_maps[SDW_MAX_LANES];
bool clock_reg_supported;
bool use_domain_irq;
};
@@ -406,13 +402,14 @@ struct sdw_slave_prop {
/**
* struct sdw_master_prop - Master properties
+ * @clk_gears: Clock gears supported
+ * @clk_freq: Clock frequencies supported, in Hz
+ * @quirks: bitmask identifying optional behavior beyond the scope of the MIPI specification
* @revision: MIPI spec version of the implementation
* @clk_stop_modes: Bitmap, bit N set when clock-stop-modeN supported
* @max_clk_freq: Maximum Bus clock frequency, in Hz
* @num_clk_gears: Number of clock gears supported
- * @clk_gears: Clock gears supported
* @num_clk_freq: Number of clock frequencies supported, in Hz
- * @clk_freq: Clock frequencies supported, in Hz
* @default_frame_rate: Controller default Frame rate, in Hz
* @default_row: Number of rows
* @default_col: Number of columns
@@ -421,24 +418,23 @@ struct sdw_slave_prop {
* command
* @mclk_freq: clock reference passed to SoundWire Master, in Hz.
* @hw_disabled: if true, the Master is not functional, typically due to pin-mux
- * @quirks: bitmask identifying optional behavior beyond the scope of the MIPI specification
*/
struct sdw_master_prop {
+ u32 *clk_gears;
+ u32 *clk_freq;
+ u64 quirks;
u32 revision;
u32 clk_stop_modes;
u32 max_clk_freq;
u32 num_clk_gears;
- u32 *clk_gears;
u32 num_clk_freq;
- u32 *clk_freq;
u32 default_frame_rate;
u32 default_row;
u32 default_col;
- bool dynamic_frame;
u32 err_threshold;
u32 mclk_freq;
+ bool dynamic_frame;
bool hw_disabled;
- u64 quirks;
};
/* Definitions for Master quirks */
@@ -463,6 +459,7 @@ struct sdw_master_prop {
int sdw_master_read_prop(struct sdw_bus *bus);
int sdw_slave_read_prop(struct sdw_slave *slave);
+int sdw_slave_read_lane_mapping(struct sdw_slave *slave);
/*
* SDW Slave Structures and APIs
@@ -488,9 +485,9 @@ struct sdw_slave_id {
__u8 sdw_version:4;
};
-struct sdw_extended_slave_id {
- int link_id;
- struct sdw_slave_id id;
+struct sdw_peripherals {
+ int num_peripherals;
+ struct sdw_slave *array[];
};
/*
@@ -630,13 +627,13 @@ struct sdw_slave_ops {
int (*clk_stop)(struct sdw_slave *slave,
enum sdw_clk_stop_mode mode,
enum sdw_clk_stop_type type);
-
};
/**
* struct sdw_slave - SoundWire Slave
* @id: MIPI device ID
* @dev: Linux device
+ * @index: internal ID for this slave
* @irq: IRQ number
* @status: Status reported by the Slave
* @bus: Bus handle
@@ -663,10 +660,12 @@ struct sdw_slave_ops {
* @is_mockup_device: status flag used to squelch errors in the command/control
* protocol for SoundWire mockup devices
* @sdw_dev_lock: mutex used to protect callbacks/remove races
+ * @sdca_data: structure containing all device data for SDCA helpers
*/
struct sdw_slave {
struct sdw_slave_id id;
struct device dev;
+ int index;
int irq;
enum sdw_slave_status status;
struct sdw_bus *bus;
@@ -686,6 +685,7 @@ struct sdw_slave {
bool first_interrupt_done;
bool is_mockup_device;
struct mutex sdw_dev_lock; /* protect callbacks/remove races */
+ struct sdca_device_data sdca_data;
};
#define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev)
@@ -704,10 +704,7 @@ struct sdw_master_device {
container_of(d, struct sdw_master_device, dev)
struct sdw_driver {
- const char *name;
-
- int (*probe)(struct sdw_slave *sdw,
- const struct sdw_device_id *id);
+ int (*probe)(struct sdw_slave *sdw, const struct sdw_device_id *id);
int (*remove)(struct sdw_slave *sdw);
void (*shutdown)(struct sdw_slave *sdw);
@@ -726,7 +723,7 @@ struct sdw_driver {
SDW_SLAVE_ENTRY_EXT((_mfg_id), (_part_id), 0, 0, (_drv_data))
int sdw_handle_slave_status(struct sdw_bus *bus,
- enum sdw_slave_status status[]);
+ enum sdw_slave_status status[]);
/*
* SDW master structures and APIs
@@ -808,31 +805,39 @@ struct sdw_enable_ch {
*/
struct sdw_master_port_ops {
int (*dpn_set_port_params)(struct sdw_bus *bus,
- struct sdw_port_params *port_params,
- unsigned int bank);
+ struct sdw_port_params *port_params,
+ unsigned int bank);
int (*dpn_set_port_transport_params)(struct sdw_bus *bus,
- struct sdw_transport_params *transport_params,
- enum sdw_reg_bank bank);
- int (*dpn_port_prep)(struct sdw_bus *bus,
- struct sdw_prepare_ch *prepare_ch);
+ struct sdw_transport_params *transport_params,
+ enum sdw_reg_bank bank);
+ int (*dpn_port_prep)(struct sdw_bus *bus, struct sdw_prepare_ch *prepare_ch);
int (*dpn_port_enable_ch)(struct sdw_bus *bus,
- struct sdw_enable_ch *enable_ch, unsigned int bank);
+ struct sdw_enable_ch *enable_ch, unsigned int bank);
};
struct sdw_msg;
/**
- * struct sdw_defer - SDW deffered message
- * @length: message length
+ * struct sdw_defer - SDW deferred message
* @complete: message completion
* @msg: SDW message
+ * @length: message length
*/
struct sdw_defer {
+ struct sdw_msg *msg;
int length;
struct completion complete;
- struct sdw_msg *msg;
};
+/*
+ * Add a practical limit to BPT transfer sizes. BPT is typically used
+ * to transfer firmware, and larger firmware transfers will increase
+ * the cold latency beyond typical OS or user requirements.
+ */
+#define SDW_BPT_MSG_MAX_BYTES (1024 * 1024)
+
+struct sdw_bpt_msg;
+
/**
* struct sdw_master_ops - Master driver ops
* @read_prop: Read Master properties
@@ -848,17 +853,18 @@ struct sdw_defer {
* @get_device_num: Callback for vendor-specific device_number allocation
* @put_device_num: Callback for vendor-specific device_number release
* @new_peripheral_assigned: Callback to handle enumeration of new peripheral.
+ * @bpt_send_async: reserve resources for BPT stream and send message
+ * using BTP protocol
+ * @bpt_wait: wait for message completion using BTP protocol
+ * and release resources
*/
struct sdw_master_ops {
int (*read_prop)(struct sdw_bus *bus);
- u64 (*override_adr)
- (struct sdw_bus *bus, u64 addr);
- enum sdw_command_response (*xfer_msg)
- (struct sdw_bus *bus, struct sdw_msg *msg);
- enum sdw_command_response (*xfer_msg_defer)
- (struct sdw_bus *bus);
+ u64 (*override_adr)(struct sdw_bus *bus, u64 addr);
+ enum sdw_command_response (*xfer_msg)(struct sdw_bus *bus, struct sdw_msg *msg);
+ enum sdw_command_response (*xfer_msg_defer)(struct sdw_bus *bus);
int (*set_bus_conf)(struct sdw_bus *bus,
- struct sdw_bus_params *params);
+ struct sdw_bus_params *params);
int (*pre_bank_switch)(struct sdw_bus *bus);
int (*post_bank_switch)(struct sdw_bus *bus);
u32 (*read_ping_status)(struct sdw_bus *bus);
@@ -867,72 +873,9 @@ struct sdw_master_ops {
void (*new_peripheral_assigned)(struct sdw_bus *bus,
struct sdw_slave *slave,
int dev_num);
-};
-
-/**
- * struct sdw_bus - SoundWire bus
- * @dev: Shortcut to &bus->md->dev to avoid changing the entire code.
- * @md: Master device
- * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used.
- * @link_id: Link id number, can be 0 to N, unique for each Controller
- * @id: bus system-wide unique id
- * @slaves: list of Slaves on this bus
- * @assigned: Bitmap for Slave device numbers.
- * Bit set implies used number, bit clear implies unused number.
- * @bus_lock: bus lock
- * @msg_lock: message lock
- * @compute_params: points to Bus resource management implementation
- * @ops: Master callback ops
- * @port_ops: Master port callback ops
- * @params: Current bus parameters
- * @prop: Master properties
- * @vendor_specific_prop: pointer to non-standard properties
- * @m_rt_list: List of Master instance of all stream(s) running on Bus. This
- * is used to compute and program bus bandwidth, clock, frame shape,
- * transport and port parameters
- * @debugfs: Bus debugfs
- * @domain: IRQ domain
- * @defer_msg: Defer message
- * @clk_stop_timeout: Clock stop timeout computed
- * @bank_switch_timeout: Bank switch timeout computed
- * @multi_link: Store bus property that indicates if multi links
- * are supported. This flag is populated by drivers after reading
- * appropriate firmware (ACPI/DT).
- * @hw_sync_min_links: Number of links used by a stream above which
- * hardware-based synchronization is required. This value is only
- * meaningful if multi_link is set. If set to 1, hardware-based
- * synchronization will be used even if a stream only uses a single
- * SoundWire segment.
- */
-struct sdw_bus {
- struct device *dev;
- struct sdw_master_device *md;
- int controller_id;
- unsigned int link_id;
- int id;
- struct list_head slaves;
- DECLARE_BITMAP(assigned, SDW_MAX_DEVICES);
- struct mutex bus_lock;
- struct lock_class_key bus_lock_key;
- struct mutex msg_lock;
- struct lock_class_key msg_lock_key;
- int (*compute_params)(struct sdw_bus *bus);
- const struct sdw_master_ops *ops;
- const struct sdw_master_port_ops *port_ops;
- struct sdw_bus_params params;
- struct sdw_master_prop prop;
- void *vendor_specific_prop;
- struct list_head m_rt_list;
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs;
-#endif
- struct irq_chip irq_chip;
- struct irq_domain *domain;
- struct sdw_defer defer_msg;
- unsigned int clk_stop_timeout;
- u32 bank_switch_timeout;
- bool multi_link;
- int hw_sync_min_links;
+ int (*bpt_send_async)(struct sdw_bus *bus, struct sdw_slave *slave,
+ struct sdw_bpt_msg *msg);
+ int (*bpt_wait)(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg);
};
int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
@@ -959,7 +902,7 @@ struct sdw_port_config {
* @ch_count: Channel count of the stream
* @bps: Number of bits per audio sample
* @direction: Data direction
- * @type: Stream type PCM or PDM
+ * @type: Stream type PCM, PDM or BPT
*/
struct sdw_stream_config {
unsigned int frame_rate;
@@ -1009,33 +952,113 @@ struct sdw_stream_params {
* @name: SoundWire stream name
* @params: Stream parameters
* @state: Current state of the stream
- * @type: Stream type PCM or PDM
+ * @type: Stream type PCM, PDM or BPT
+ * @m_rt_count: Count of Master runtime(s) in this stream
* @master_list: List of Master runtime(s) in this stream.
* master_list can contain only one m_rt per Master instance
* for a stream
- * @m_rt_count: Count of Master runtime(s) in this stream
*/
struct sdw_stream_runtime {
const char *name;
struct sdw_stream_params params;
enum sdw_stream_state state;
enum sdw_stream_type type;
- struct list_head master_list;
int m_rt_count;
+ struct list_head master_list;
};
-struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name);
+/**
+ * struct sdw_bus - SoundWire bus
+ * @dev: Shortcut to &bus->md->dev to avoid changing the entire code.
+ * @md: Master device
+ * @bus_lock_key: bus lock key associated to @bus_lock
+ * @bus_lock: bus lock
+ * @slave_ida: IDA for allocating internal slave IDs
+ * @slaves: list of Slaves on this bus
+ * @msg_lock_key: message lock key associated to @msg_lock
+ * @msg_lock: message lock
+ * @m_rt_list: List of Master instance of all stream(s) running on Bus. This
+ * is used to compute and program bus bandwidth, clock, frame shape,
+ * transport and port parameters
+ * @defer_msg: Defer message
+ * @params: Current bus parameters
+ * @stream_refcount: number of streams currently using this bus
+ * @btp_stream_refcount: number of BTP streams currently using this bus (should
+ * be zero or one, multiple streams per link is not supported).
+ * @bpt_stream: pointer stored to handle BTP streams.
+ * @ops: Master callback ops
+ * @port_ops: Master port callback ops
+ * @prop: Master properties
+ * @vendor_specific_prop: pointer to non-standard properties
+ * @hw_sync_min_links: Number of links used by a stream above which
+ * hardware-based synchronization is required. This value is only
+ * meaningful if multi_link is set. If set to 1, hardware-based
+ * synchronization will be used even if a stream only uses a single
+ * SoundWire segment.
+ * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used.
+ * @link_id: Link id number, can be 0 to N, unique for each Controller
+ * @id: bus system-wide unique id
+ * @compute_params: points to Bus resource management implementation
+ * @assigned: Bitmap for Slave device numbers.
+ * Bit set implies used number, bit clear implies unused number.
+ * @clk_stop_timeout: Clock stop timeout computed
+ * @bank_switch_timeout: Bank switch timeout computed
+ * @domain: IRQ domain
+ * @irq_chip: IRQ chip
+ * @debugfs: Bus debugfs (optional)
+ * @multi_link: Store bus property that indicates if multi links
+ * are supported. This flag is populated by drivers after reading
+ * appropriate firmware (ACPI/DT).
+ * @lane_used_bandwidth: how much bandwidth in bits per second is used by each lane
+ */
+struct sdw_bus {
+ struct device *dev;
+ struct sdw_master_device *md;
+ struct lock_class_key bus_lock_key;
+ struct mutex bus_lock;
+ struct ida slave_ida;
+ struct list_head slaves;
+ struct lock_class_key msg_lock_key;
+ struct mutex msg_lock;
+ struct list_head m_rt_list;
+ struct sdw_defer defer_msg;
+ struct sdw_bus_params params;
+ int stream_refcount;
+ int bpt_stream_refcount;
+ struct sdw_stream_runtime *bpt_stream;
+ const struct sdw_master_ops *ops;
+ const struct sdw_master_port_ops *port_ops;
+ struct sdw_master_prop prop;
+ void *vendor_specific_prop;
+ int hw_sync_min_links;
+ int controller_id;
+ unsigned int link_id;
+ int id;
+ int (*compute_params)(struct sdw_bus *bus, struct sdw_stream_runtime *stream);
+ DECLARE_BITMAP(assigned, SDW_MAX_DEVICES);
+ unsigned int clk_stop_timeout;
+ u32 bank_switch_timeout;
+ struct irq_chip irq_chip;
+ struct irq_domain *domain;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
+ bool multi_link;
+ unsigned int lane_used_bandwidth[SDW_MAX_LANES];
+};
+
+struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name, enum sdw_stream_type type);
void sdw_release_stream(struct sdw_stream_runtime *stream);
-int sdw_compute_params(struct sdw_bus *bus);
+int sdw_compute_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream);
int sdw_stream_add_master(struct sdw_bus *bus,
- struct sdw_stream_config *stream_config,
- const struct sdw_port_config *port_config,
- unsigned int num_ports,
- struct sdw_stream_runtime *stream);
+ struct sdw_stream_config *stream_config,
+ const struct sdw_port_config *port_config,
+ unsigned int num_ports,
+ struct sdw_stream_runtime *stream);
int sdw_stream_remove_master(struct sdw_bus *bus,
- struct sdw_stream_runtime *stream);
+ struct sdw_stream_runtime *stream);
int sdw_startup_stream(void *sdw_substream);
int sdw_prepare_stream(struct sdw_stream_runtime *stream);
int sdw_enable_stream(struct sdw_stream_runtime *stream);
@@ -1048,6 +1071,11 @@ int sdw_bus_exit_clk_stop(struct sdw_bus *bus);
int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id);
void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id);
+bool is_clock_scaling_supported_by_slave(struct sdw_slave *slave);
+
+int sdw_bpt_send_async(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg);
+int sdw_bpt_wait(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg);
+int sdw_bpt_send_sync(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg);
#if IS_ENABLED(CONFIG_SOUNDWIRE)
@@ -1059,6 +1087,12 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
int sdw_stream_remove_slave(struct sdw_slave *slave,
struct sdw_stream_runtime *stream);
+struct device *of_sdw_find_device_by_node(struct device_node *np);
+
+int sdw_slave_get_current_bank(struct sdw_slave *sdev);
+
+int sdw_slave_get_scale_index(struct sdw_slave *slave, u8 *base);
+
/* messaging and data APIs */
int sdw_read(struct sdw_slave *slave, u32 addr);
int sdw_write(struct sdw_slave *slave, u32 addr, u8 value);
@@ -1090,6 +1124,18 @@ static inline int sdw_stream_remove_slave(struct sdw_slave *slave,
return -EINVAL;
}
+static inline struct device *of_sdw_find_device_by_node(struct device_node *np)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return NULL;
+}
+
+static inline int sdw_slave_get_current_bank(struct sdw_slave *sdev)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
/* messaging and data APIs */
static inline int sdw_read(struct sdw_slave *slave, u32 addr)
{
diff --git a/include/linux/soundwire/sdw_amd.h b/include/linux/soundwire/sdw_amd.h
index 28a4eb77717f..fe31773d5210 100644
--- a/include/linux/soundwire/sdw_amd.h
+++ b/include/linux/soundwire/sdw_amd.h
@@ -27,9 +27,14 @@
#define ACP_SDW0 0
#define ACP_SDW1 1
#define AMD_SDW_MAX_MANAGER_COUNT 2
+#define ACP63_PCI_REV_ID 0x63
+#define ACP70_PCI_REV_ID 0x70
+#define ACP71_PCI_REV_ID 0x71
+#define ACP72_PCI_REV_ID 0x72
struct acp_sdw_pdata {
u16 instance;
+ u32 acp_rev;
/* mutex to protect acp common register access */
struct mutex *acp_sdw_lock;
};
@@ -66,6 +71,7 @@ struct sdw_amd_dai_runtime {
* @instance: SoundWire manager instance
* @quirks: SoundWire manager quirks
* @wake_en_mask: wake enable mask per SoundWire manager
+ * @acp_rev: acp pci device revision id
* @clk_stopped: flag set to true when clock is stopped
* @power_mode_mask: flag interprets amd SoundWire manager power mode
* @dai_runtime_array: dai runtime array
@@ -94,6 +100,7 @@ struct amd_sdw_manager {
u32 quirks;
u32 wake_en_mask;
u32 power_mode_mask;
+ u32 acp_rev;
bool clk_stopped;
struct sdw_amd_dai_runtime **dai_runtime_array;
@@ -115,25 +122,23 @@ struct sdw_amd_acpi_info {
* struct sdw_amd_ctx - context allocated by the controller driver probe
*
* @count: link count
- * @num_slaves: total number of devices exposed across all enabled links
* @link_mask: bit-wise mask listing SoundWire links reported by the
* Controller
- * @ids: array of slave_id, representing Slaves exposed across all enabled
- * links
* @pdev: platform device structure
+ * @peripherals: array representing Peripherals exposed across all enabled links
*/
struct sdw_amd_ctx {
int count;
- int num_slaves;
u32 link_mask;
- struct sdw_extended_slave_id *ids;
struct platform_device *pdev[AMD_SDW_MAX_MANAGER_COUNT];
+ struct sdw_peripherals *peripherals;
};
/**
* struct sdw_amd_res - Soundwire AMD global resource structure,
* typically populated by the DSP driver/Legacy driver
*
+ * @acp_rev: acp pci device revision id
* @addr: acp pci device resource start address
* @reg_range: ACP register range
* @link_mask: bit-wise mask listing links selected by the DSP driver/
@@ -146,6 +151,7 @@ struct sdw_amd_ctx {
* @acp_lock: mutex protecting acp common registers access
*/
struct sdw_amd_res {
+ u32 acp_rev;
u32 addr;
u32 reg_range;
u32 link_mask;
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 8e78417156e3..9c9435009537 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -4,6 +4,7 @@
#ifndef __SDW_INTEL_H
#define __SDW_INTEL_H
+#include <linux/acpi.h>
#include <linux/irqreturn.h>
#include <linux/soundwire/sdw.h>
@@ -182,6 +183,14 @@
#define SDW_SHIM2_INTEL_VS_ACTMCTL_DODSE BIT(2)
#define SDW_SHIM2_INTEL_VS_ACTMCTL_DOAIS GENMASK(4, 3)
#define SDW_SHIM2_INTEL_VS_ACTMCTL_DOAISE BIT(5)
+#define SDW_SHIM3_INTEL_VS_ACTMCTL_CLSS BIT(6)
+#define SDW_SHIM3_INTEL_VS_ACTMCTL_CLDS GENMASK(11, 7)
+#define SDW_SHIM3_INTEL_VS_ACTMCTL_DODSE2 GENMASK(13, 12)
+#define SDW_SHIM3_INTEL_VS_ACTMCTL_DOAISE2 BIT(14)
+#define SDW_SHIM3_INTEL_VS_ACTMCTL_CLDE BIT(15)
+
+/* ACE3+ Mic privacy control and status register */
+#define SDW_SHIM2_INTEL_VS_PVCCS 0x10
/**
* struct sdw_intel_stream_params_data: configuration passed during
@@ -222,7 +231,7 @@ struct sdw_intel_ops {
/**
* struct sdw_intel_acpi_info - Soundwire Intel information found in ACPI tables
* @handle: ACPI controller handle
- * @count: link count found with "sdw-master-count" property
+ * @count: link count found with "sdw-master-count" or "sdw-manager-list" property
* @link_mask: bit-wise mask listing links enabled by BIOS menu
*
* this structure could be expanded to e.g. provide all the _ADR
@@ -281,31 +290,28 @@ struct hdac_bus;
* hardware capabilities after all power dependencies are settled.
* @link_mask: bit-wise mask listing SoundWire links reported by the
* Controller
- * @num_slaves: total number of devices exposed across all enabled links
* @handle: ACPI parent handle
* @ldev: information for each link (controller-specific and kept
* opaque here)
- * @ids: array of slave_id, representing Slaves exposed across all enabled
- * links
* @link_list: list to handle interrupts across all links
* @shim_lock: mutex to handle concurrent rmw access to shared SHIM registers.
* @shim_mask: flags to track initialization of SHIM shared registers
* @shim_base: sdw shim base.
* @alh_base: sdw alh base.
+ * @peripherals: array representing Peripherals exposed across all enabled links
*/
struct sdw_intel_ctx {
int count;
void __iomem *mmio_base;
u32 link_mask;
- int num_slaves;
acpi_handle handle;
struct sdw_intel_link_dev **ldev;
- struct sdw_extended_slave_id *ids;
struct list_head link_list;
struct mutex shim_lock; /* lock for access to shared SHIM registers */
u32 shim_mask;
u32 shim_base;
u32 alh_base;
+ struct sdw_peripherals *peripherals;
};
/**
@@ -328,6 +334,7 @@ struct sdw_intel_ctx {
* @shim_base: sdw shim base.
* @alh_base: sdw alh base.
* @ext: extended HDaudio link support
+ * @mic_privacy: ACE version supports microphone privacy
* @hbus: hdac_bus pointer, needed for power management
* @eml_lock: mutex protecting shared registers in the HDaudio multi-link
* space
@@ -346,6 +353,7 @@ struct sdw_intel_res {
u32 shim_base;
u32 alh_base;
bool ext;
+ bool mic_privacy;
struct hdac_bus *hbus;
struct mutex *eml_lock;
};
@@ -362,7 +370,7 @@ struct sdw_intel_res {
* on e.g. which machine driver to select (I2S mode, HDaudio or
* SoundWire).
*/
-int sdw_intel_acpi_scan(acpi_handle *parent_handle,
+int sdw_intel_acpi_scan(acpi_handle parent_handle,
struct sdw_intel_acpi_info *info);
void sdw_intel_process_wakeen_event(struct sdw_intel_ctx *ctx);
@@ -383,6 +391,7 @@ struct sdw_intel;
/* struct intel_sdw_hw_ops - SoundWire ops for Intel platforms.
* @debugfs_init: initialize all debugfs capabilities
* @debugfs_exit: close and cleanup debugfs capabilities
+ * @get_link_count: fetch link count from hardware registers
* @register_dai: read all PDI information and register DAIs
* @check_clock_stop: throw error message if clock is not stopped.
* @start_bus: normal start
@@ -407,6 +416,8 @@ struct sdw_intel_hw_ops {
void (*debugfs_init)(struct sdw_intel *sdw);
void (*debugfs_exit)(struct sdw_intel *sdw);
+ int (*get_link_count)(struct sdw_intel *sdw);
+
int (*register_dai)(struct sdw_intel *sdw);
void (*check_clock_stop)(struct sdw_intel *sdw);
@@ -430,6 +441,10 @@ struct sdw_intel_hw_ops {
bool (*sync_check_cmdsync_unlocked)(struct sdw_intel *sdw);
void (*program_sdi)(struct sdw_intel *sdw, int dev_num);
+
+ int (*bpt_send_async)(struct sdw_intel *sdw, struct sdw_slave *slave,
+ struct sdw_bpt_msg *msg);
+ int (*bpt_wait)(struct sdw_intel *sdw, struct sdw_slave *slave, struct sdw_bpt_msg *msg);
};
extern const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops;
@@ -442,4 +457,9 @@ extern const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops;
#define SDW_INTEL_DEV_NUM_IDA_MIN 6
+/*
+ * Max number of links supported in hardware
+ */
+#define SDW_INTEL_MAX_LINKS 5
+
#endif
diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h
index 658b10fa5b20..cae8a0a5a9b0 100644
--- a/include/linux/soundwire/sdw_registers.h
+++ b/include/linux/soundwire/sdw_registers.h
@@ -4,6 +4,9 @@
#ifndef __SDW_REGISTERS_H
#define __SDW_REGISTERS_H
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
/*
* SDW registers as defined by MIPI 1.2 Spec
*/
@@ -329,16 +332,29 @@
* 2:0 Control Number[2:0]
*/
-#define SDW_SDCA_CTL(fun, ent, ctl, ch) (BIT(30) | \
- (((fun) & 0x7) << 22) | \
- (((ent) & 0x40) << 15) | \
- (((ent) & 0x3f) << 7) | \
- (((ctl) & 0x30) << 15) | \
- (((ctl) & 0x0f) << 3) | \
- (((ch) & 0x38) << 12) | \
- ((ch) & 0x07))
+#define SDW_SDCA_CTL(fun, ent, ctl, ch) (BIT(30) | \
+ (((fun) & GENMASK(2, 0)) << 22) | \
+ (((ent) & BIT(6)) << 15) | \
+ (((ent) & GENMASK(5, 0)) << 7) | \
+ (((ctl) & GENMASK(5, 4)) << 15) | \
+ (((ctl) & GENMASK(3, 0)) << 3) | \
+ (((ch) & GENMASK(5, 3)) << 12) | \
+ ((ch) & GENMASK(2, 0)))
+
+#define SDW_SDCA_CTL_FUNC(reg) FIELD_GET(GENMASK(24, 22), (reg))
+#define SDW_SDCA_CTL_ENT(reg) ((FIELD_GET(BIT(21), (reg)) << 6) | \
+ FIELD_GET(GENMASK(12, 7), (reg)))
+#define SDW_SDCA_CTL_CSEL(reg) ((FIELD_GET(GENMASK(20, 19), (reg)) << 4) | \
+ FIELD_GET(GENMASK(6, 3), (reg)))
+#define SDW_SDCA_CTL_CNUM(reg) ((FIELD_GET(GENMASK(17, 15), (reg)) << 3) | \
+ FIELD_GET(GENMASK(2, 0), (reg)))
#define SDW_SDCA_MBQ_CTL(reg) ((reg) | BIT(13))
#define SDW_SDCA_NEXT_CTL(reg) ((reg) | BIT(14))
+/* Check the reserved and fixed bits in address */
+#define SDW_SDCA_VALID_CTL(reg) (((reg) & (GENMASK(31, 25) | BIT(18) | BIT(13))) == BIT(30))
+
+#define SDW_SDCA_MAX_REGISTER 0x47FFFFFF
+
#endif /* __SDW_REGISTERS_H */
diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h
index 693320b4f5c2..d405935a45fe 100644
--- a/include/linux/soundwire/sdw_type.h
+++ b/include/linux/soundwire/sdw_type.h
@@ -13,7 +13,7 @@ static inline int is_sdw_slave(const struct device *dev)
return dev->type == &sdw_slave_type;
}
-#define drv_to_sdw_driver(_drv) container_of(_drv, struct sdw_driver, driver)
+#define drv_to_sdw_driver(_drv) container_of_const(_drv, struct sdw_driver, driver)
#define sdw_register_driver(drv) \
__sdw_register_driver(drv, THIS_MODULE)
diff --git a/include/linux/spi/offload/consumer.h b/include/linux/spi/offload/consumer.h
new file mode 100644
index 000000000000..cd7d5daa21e6
--- /dev/null
+++ b/include/linux/spi/offload/consumer.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Analog Devices Inc.
+ * Copyright (C) 2024 BayLibre, SAS
+ */
+
+#ifndef __LINUX_SPI_OFFLOAD_CONSUMER_H
+#define __LINUX_SPI_OFFLOAD_CONSUMER_H
+
+#include <linux/module.h>
+#include <linux/spi/offload/types.h>
+#include <linux/types.h>
+
+MODULE_IMPORT_NS("SPI_OFFLOAD");
+
+struct device;
+struct spi_device;
+
+struct spi_offload *devm_spi_offload_get(struct device *dev, struct spi_device *spi,
+ const struct spi_offload_config *config);
+
+struct spi_offload_trigger
+*devm_spi_offload_trigger_get(struct device *dev,
+ struct spi_offload *offload,
+ enum spi_offload_trigger_type type);
+int spi_offload_trigger_validate(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config);
+int spi_offload_trigger_enable(struct spi_offload *offload,
+ struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config);
+void spi_offload_trigger_disable(struct spi_offload *offload,
+ struct spi_offload_trigger *trigger);
+
+struct dma_chan *devm_spi_offload_tx_stream_request_dma_chan(struct device *dev,
+ struct spi_offload *offload);
+struct dma_chan *devm_spi_offload_rx_stream_request_dma_chan(struct device *dev,
+ struct spi_offload *offload);
+
+#endif /* __LINUX_SPI_OFFLOAD_CONSUMER_H */
diff --git a/include/linux/spi/offload/provider.h b/include/linux/spi/offload/provider.h
new file mode 100644
index 000000000000..76c7cf651092
--- /dev/null
+++ b/include/linux/spi/offload/provider.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Analog Devices Inc.
+ * Copyright (C) 2024 BayLibre, SAS
+ */
+
+#ifndef __LINUX_SPI_OFFLOAD_PROVIDER_H
+#define __LINUX_SPI_OFFLOAD_PROVIDER_H
+
+#include <linux/module.h>
+#include <linux/spi/offload/types.h>
+#include <linux/types.h>
+
+MODULE_IMPORT_NS("SPI_OFFLOAD");
+
+struct device;
+struct spi_offload_trigger;
+
+struct spi_offload *devm_spi_offload_alloc(struct device *dev, size_t priv_size);
+
+struct spi_offload_trigger_ops {
+ bool (*match)(struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type, u64 *args, u32 nargs);
+ int (*request)(struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type, u64 *args, u32 nargs);
+ void (*release)(struct spi_offload_trigger *trigger);
+ int (*validate)(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config);
+ int (*enable)(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config);
+ void (*disable)(struct spi_offload_trigger *trigger);
+};
+
+struct spi_offload_trigger_info {
+ /** @fwnode: Provider fwnode, used to match to consumer. */
+ struct fwnode_handle *fwnode;
+ /** @ops: Provider-specific callbacks. */
+ const struct spi_offload_trigger_ops *ops;
+ /** Provider-specific state to be used in callbacks. */
+ void *priv;
+};
+
+int devm_spi_offload_trigger_register(struct device *dev,
+ struct spi_offload_trigger_info *info);
+void *spi_offload_trigger_get_priv(struct spi_offload_trigger *trigger);
+
+#endif /* __LINUX_SPI_OFFLOAD_PROVIDER_H */
diff --git a/include/linux/spi/offload/types.h b/include/linux/spi/offload/types.h
new file mode 100644
index 000000000000..cd61f8adb7a5
--- /dev/null
+++ b/include/linux/spi/offload/types.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Analog Devices Inc.
+ * Copyright (C) 2024 BayLibre, SAS
+ */
+
+#ifndef __LINUX_SPI_OFFLOAD_TYPES_H
+#define __LINUX_SPI_OFFLOAD_TYPES_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+struct device;
+
+/* This is write xfer but TX uses external data stream rather than tx_buf. */
+#define SPI_OFFLOAD_XFER_TX_STREAM BIT(0)
+/* This is read xfer but RX uses external data stream rather than rx_buf. */
+#define SPI_OFFLOAD_XFER_RX_STREAM BIT(1)
+
+/* Offload can be triggered by external hardware event. */
+#define SPI_OFFLOAD_CAP_TRIGGER BIT(0)
+/* Offload can record and then play back TX data when triggered. */
+#define SPI_OFFLOAD_CAP_TX_STATIC_DATA BIT(1)
+/* Offload can get TX data from an external stream source. */
+#define SPI_OFFLOAD_CAP_TX_STREAM_DMA BIT(2)
+/* Offload can send RX data to an external stream sink. */
+#define SPI_OFFLOAD_CAP_RX_STREAM_DMA BIT(3)
+
+/**
+ * struct spi_offload_config - offload configuration
+ *
+ * This is used to request an offload with specific configuration.
+ */
+struct spi_offload_config {
+ /** @capability_flags: required capabilities. See %SPI_OFFLOAD_CAP_* */
+ u32 capability_flags;
+};
+
+/**
+ * struct spi_offload - offload instance
+ */
+struct spi_offload {
+ /** @provider_dev: for get/put reference counting */
+ struct device *provider_dev;
+ /** @priv: provider driver private data */
+ void *priv;
+ /** @ops: callbacks for offload support */
+ const struct spi_offload_ops *ops;
+ /** @xfer_flags: %SPI_OFFLOAD_XFER_* flags supported by provider */
+ u32 xfer_flags;
+};
+
+enum spi_offload_trigger_type {
+ /* Indication from SPI peripheral that data is read to read. */
+ SPI_OFFLOAD_TRIGGER_DATA_READY,
+ /* Trigger comes from a periodic source such as a clock. */
+ SPI_OFFLOAD_TRIGGER_PERIODIC,
+};
+
+/**
+ * spi_offload_trigger_periodic - configuration parameters for periodic triggers
+ * @frequency_hz: The rate that the trigger should fire in Hz.
+ * @offset_ns: A delay in nanoseconds between when this trigger fires
+ * compared to another trigger. This requires specialized hardware
+ * that supports such synchronization with a delay between two or
+ * more triggers. Set to 0 when not needed.
+ */
+struct spi_offload_trigger_periodic {
+ u64 frequency_hz;
+ u64 offset_ns;
+};
+
+struct spi_offload_trigger_config {
+ /** @type: type discriminator for union */
+ enum spi_offload_trigger_type type;
+ union {
+ struct spi_offload_trigger_periodic periodic;
+ };
+};
+
+/**
+ * struct spi_offload_ops - callbacks implemented by offload providers
+ */
+struct spi_offload_ops {
+ /**
+ * @trigger_enable: Optional callback to enable the trigger for the
+ * given offload instance.
+ */
+ int (*trigger_enable)(struct spi_offload *offload);
+ /**
+ * @trigger_disable: Optional callback to disable the trigger for the
+ * given offload instance.
+ */
+ void (*trigger_disable)(struct spi_offload *offload);
+ /**
+ * @tx_stream_request_dma_chan: Optional callback for controllers that
+ * have an offload where the TX data stream is connected directly to a
+ * DMA channel.
+ */
+ struct dma_chan *(*tx_stream_request_dma_chan)(struct spi_offload *offload);
+ /**
+ * @rx_stream_request_dma_chan: Optional callback for controllers that
+ * have an offload where the RX data stream is connected directly to a
+ * DMA channel.
+ */
+ struct dma_chan *(*rx_stream_request_dma_chan)(struct spi_offload *offload);
+};
+
+#endif /* __LINUX_SPI_OFFLOAD_TYPES_H */
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
index f950d280461b..9fbef3fd4056 100644
--- a/include/linux/spi/sh_msiof.h
+++ b/include/linux/spi/sh_msiof.h
@@ -2,6 +2,131 @@
#ifndef __SPI_SH_MSIOF_H__
#define __SPI_SH_MSIOF_H__
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+#define SITMDR1 0x00 /* Transmit Mode Register 1 */
+#define SITMDR2 0x04 /* Transmit Mode Register 2 */
+#define SITMDR3 0x08 /* Transmit Mode Register 3 */
+#define SIRMDR1 0x10 /* Receive Mode Register 1 */
+#define SIRMDR2 0x14 /* Receive Mode Register 2 */
+#define SIRMDR3 0x18 /* Receive Mode Register 3 */
+#define SITSCR 0x20 /* Transmit Clock Select Register */
+#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
+#define SICTR 0x28 /* Control Register */
+#define SIFCTR 0x30 /* FIFO Control Register */
+#define SISTR 0x40 /* Status Register */
+#define SIIER 0x44 /* Interrupt Enable Register */
+#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
+#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
+#define SITFDR 0x50 /* Transmit FIFO Data Register */
+#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
+#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
+#define SIRFDR 0x60 /* Receive FIFO Data Register */
+
+/* SITMDR1 and SIRMDR1 */
+#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
+#define SIMDR1_SYNCMD GENMASK(29, 28) /* SYNC Mode */
+#define SIMDR1_SYNCMD_PULSE 0U /* Frame start sync pulse */
+#define SIMDR1_SYNCMD_SPI 2U /* Level mode/SPI */
+#define SIMDR1_SYNCMD_LR 3U /* L/R mode */
+#define SIMDR1_SYNCAC BIT(25) /* Sync Polarity (1 = Active-low) */
+#define SIMDR1_BITLSB BIT(24) /* MSB/LSB First (1 = LSB first) */
+#define SIMDR1_DTDL GENMASK(22, 20) /* Data Pin Bit Delay for MSIOF_SYNC */
+#define SIMDR1_SYNCDL GENMASK(18, 16) /* Frame Sync Signal Timing Delay */
+#define SIMDR1_FLD GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
+#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
+/* SITMDR1 */
+#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */
+#define SITMDR1_SYNCCH GENMASK(27, 26) /* Sync Signal Channel Select */
+ /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
+
+/* SITMDR2 and SIRMDR2 */
+#define SIMDR2_GRP GENMASK(31, 30) /* Group Count */
+#define SIMDR2_BITLEN1 GENMASK(28, 24) /* Data Size (8-32 bits) */
+#define SIMDR2_WDLEN1 GENMASK(23, 16) /* Word Count (1-64/256 (SH, A1))) */
+#define SIMDR2_GRPMASK GENMASK(3, 0) /* Group Output Mask 1-4 (SH, A1) */
+
+/* SITMDR3 and SIRMDR3 */
+#define SIMDR3_BITLEN2 GENMASK(28, 24) /* Data Size (8-32 bits) */
+#define SIMDR3_WDLEN2 GENMASK(23, 16) /* Word Count (1-64/256 (SH, A1))) */
+
+/* SITSCR and SIRSCR */
+#define SISCR_BRPS GENMASK(12, 8) /* Prescaler Setting (1-32) */
+#define SISCR_BRDV GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
+
+/* SICTR */
+#define SICTR_TSCKIZ GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
+#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
+#define SICTR_TSCKIZ_POL BIT(30) /* Transmit Clock Polarity */
+#define SICTR_RSCKIZ GENMASK(29, 28) /* Receive Clock Polarity Select */
+#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
+#define SICTR_RSCKIZ_POL BIT(28) /* Receive Clock Polarity */
+#define SICTR_TEDG BIT(27) /* Transmit Timing (1 = falling edge) */
+#define SICTR_REDG BIT(26) /* Receive Timing (1 = falling edge) */
+#define SICTR_TXDIZ GENMASK(23, 22) /* Pin Output When TX is Disabled */
+#define SICTR_TXDIZ_LOW 0U /* 0 */
+#define SICTR_TXDIZ_HIGH 1U /* 1 */
+#define SICTR_TXDIZ_HIZ 2U /* High-impedance */
+#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
+#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
+#define SICTR_TXE BIT(9) /* Transmit Enable */
+#define SICTR_RXE BIT(8) /* Receive Enable */
+#define SICTR_TXRST BIT(1) /* Transmit Reset */
+#define SICTR_RXRST BIT(0) /* Receive Reset */
+
+/* SIFCTR */
+#define SIFCTR_TFWM GENMASK(31, 29) /* Transmit FIFO Watermark */
+#define SIFCTR_TFWM_64 0U /* Transfer Request when 64 empty stages */
+#define SIFCTR_TFWM_32 1U /* Transfer Request when 32 empty stages */
+#define SIFCTR_TFWM_24 2U /* Transfer Request when 24 empty stages */
+#define SIFCTR_TFWM_16 3U /* Transfer Request when 16 empty stages */
+#define SIFCTR_TFWM_12 4U /* Transfer Request when 12 empty stages */
+#define SIFCTR_TFWM_8 5U /* Transfer Request when 8 empty stages */
+#define SIFCTR_TFWM_4 6U /* Transfer Request when 4 empty stages */
+#define SIFCTR_TFWM_1 7U /* Transfer Request when 1 empty stage */
+#define SIFCTR_TFUA GENMASK(28, 20) /* Transmit FIFO Usable Area */
+#define SIFCTR_RFWM GENMASK(15, 13) /* Receive FIFO Watermark */
+#define SIFCTR_RFWM_1 0U /* Transfer Request when 1 valid stages */
+#define SIFCTR_RFWM_4 1U /* Transfer Request when 4 valid stages */
+#define SIFCTR_RFWM_8 2U /* Transfer Request when 8 valid stages */
+#define SIFCTR_RFWM_16 3U /* Transfer Request when 16 valid stages */
+#define SIFCTR_RFWM_32 4U /* Transfer Request when 32 valid stages */
+#define SIFCTR_RFWM_64 5U /* Transfer Request when 64 valid stages */
+#define SIFCTR_RFWM_128 6U /* Transfer Request when 128 valid stages */
+#define SIFCTR_RFWM_256 7U /* Transfer Request when 256 valid stages */
+#define SIFCTR_RFUA GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
+
+/* SISTR */
+#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */
+#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */
+#define SISTR_TEOF BIT(23) /* Frame Transmission End */
+#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
+#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */
+#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */
+#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */
+#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */
+#define SISTR_REOF BIT(7) /* Frame Reception End */
+#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
+#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */
+#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */
+
+/* SIIER */
+#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
+#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
+#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
+#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */
+#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
+#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
+#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
+#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
+#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */
+#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
+#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */
+#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
+#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
+#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
+
enum {
MSIOF_SPI_HOST,
MSIOF_SPI_TARGET,
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index f866d5c8ed32..82390712794c 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -15,16 +15,32 @@
#define SPI_MEM_OP_CMD(__opcode, __buswidth) \
{ \
+ .nbytes = 1, \
.buswidth = __buswidth, \
.opcode = __opcode, \
+ }
+
+#define SPI_MEM_DTR_OP_CMD(__opcode, __buswidth) \
+ { \
.nbytes = 1, \
+ .opcode = __opcode, \
+ .buswidth = __buswidth, \
+ .dtr = true, \
}
#define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \
{ \
.nbytes = __nbytes, \
+ .buswidth = __buswidth, \
+ .val = __val, \
+ }
+
+#define SPI_MEM_DTR_OP_ADDR(__nbytes, __val, __buswidth) \
+ { \
+ .nbytes = __nbytes, \
.val = __val, \
.buswidth = __buswidth, \
+ .dtr = true, \
}
#define SPI_MEM_OP_NO_ADDR { }
@@ -35,22 +51,47 @@
.buswidth = __buswidth, \
}
+#define SPI_MEM_DTR_OP_DUMMY(__nbytes, __buswidth) \
+ { \
+ .nbytes = __nbytes, \
+ .buswidth = __buswidth, \
+ .dtr = true, \
+ }
+
#define SPI_MEM_OP_NO_DUMMY { }
#define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth) \
{ \
+ .buswidth = __buswidth, \
+ .dir = SPI_MEM_DATA_IN, \
+ .nbytes = __nbytes, \
+ .buf.in = __buf, \
+ }
+
+#define SPI_MEM_DTR_OP_DATA_IN(__nbytes, __buf, __buswidth) \
+ { \
.dir = SPI_MEM_DATA_IN, \
.nbytes = __nbytes, \
.buf.in = __buf, \
.buswidth = __buswidth, \
+ .dtr = true, \
}
#define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth) \
{ \
+ .buswidth = __buswidth, \
+ .dir = SPI_MEM_DATA_OUT, \
+ .nbytes = __nbytes, \
+ .buf.out = __buf, \
+ }
+
+#define SPI_MEM_DTR_OP_DATA_OUT(__nbytes, __buf, __buswidth) \
+ { \
.dir = SPI_MEM_DATA_OUT, \
.nbytes = __nbytes, \
.buf.out = __buf, \
.buswidth = __buswidth, \
+ .dtr = true, \
}
#define SPI_MEM_OP_NO_DATA { }
@@ -68,6 +109,9 @@ enum spi_mem_data_dir {
SPI_MEM_DATA_OUT,
};
+#define SPI_MEM_OP_MAX_FREQ(__freq) \
+ .max_freq = __freq
+
/**
* struct spi_mem_op - describes a SPI memory operation
* @cmd.nbytes: number of opcode bytes (only 1 or 2 are valid). The opcode is
@@ -90,11 +134,16 @@ enum spi_mem_data_dir {
* @data.buswidth: number of IO lanes used to send/receive the data
* @data.dtr: whether the data should be sent in DTR mode or not
* @data.ecc: whether error correction is required or not
+ * @data.swap16: whether the byte order of 16-bit words is swapped when read
+ * or written in Octal DTR mode compared to STR mode.
* @data.dir: direction of the transfer
* @data.nbytes: number of data bytes to send/receive. Can be zero if the
* operation does not involve transferring data
* @data.buf.in: input buffer (must be DMA-able)
* @data.buf.out: output buffer (must be DMA-able)
+ * @max_freq: frequency limitation wrt this operation. 0 means there is no
+ * specific constraint and the highest achievable frequency can be
+ * attempted.
*/
struct spi_mem_op {
struct {
@@ -124,7 +173,8 @@ struct spi_mem_op {
u8 buswidth;
u8 dtr : 1;
u8 ecc : 1;
- u8 __pad : 6;
+ u8 swap16 : 1;
+ u8 __pad : 5;
enum spi_mem_data_dir dir;
unsigned int nbytes;
union {
@@ -132,14 +182,17 @@ struct spi_mem_op {
const void *out;
} buf;
} data;
+
+ unsigned int max_freq;
};
-#define SPI_MEM_OP(__cmd, __addr, __dummy, __data) \
+#define SPI_MEM_OP(__cmd, __addr, __dummy, __data, ...) \
{ \
.cmd = __cmd, \
.addr = __addr, \
.dummy = __dummy, \
.data = __data, \
+ __VA_ARGS__ \
}
/**
@@ -297,10 +350,15 @@ struct spi_controller_mem_ops {
* struct spi_controller_mem_caps - SPI memory controller capabilities
* @dtr: Supports DTR operations
* @ecc: Supports operations with error correction
+ * @swap16: Supports swapping bytes on a 16 bit boundary when configured in
+ * Octal DTR
+ * @per_op_freq: Supports per operation frequency switching
*/
struct spi_controller_mem_caps {
bool dtr;
bool ecc;
+ bool swap16;
+ bool per_op_freq;
};
#define spi_mem_controller_is_capable(ctlr, cap) \
@@ -365,6 +423,8 @@ bool spi_mem_default_supports_op(struct spi_mem *mem,
#endif /* CONFIG_SPI_MEM */
int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
+void spi_mem_adjust_op_freq(struct spi_mem *mem, struct spi_mem_op *op);
+u64 spi_mem_calc_op_duration(struct spi_mem *mem, struct spi_mem_op *op);
bool spi_mem_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index e8e1e798924f..cb2c2df31089 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -21,7 +21,7 @@
#include <uapi/linux/spi/spi.h>
/* Max no. of CS supported per spi device */
-#define SPI_CS_CNT_MAX 16
+#define SPI_DEVICE_CS_CNT_MAX 4
struct dma_chan;
struct software_node;
@@ -31,9 +31,11 @@ struct spi_transfer;
struct spi_controller_mem_ops;
struct spi_controller_mem_caps;
struct spi_message;
+struct spi_offload;
+struct spi_offload_config;
/*
- * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
+ * INTERFACES between SPI controller-side drivers and SPI target protocol handlers,
* and SPI infrastructure.
*/
extern const struct bus_type spi_bus_type;
@@ -128,19 +130,12 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
struct spi_transfer *xfer);
/**
- * struct spi_device - Controller side proxy for an SPI slave device
+ * struct spi_device - Controller side proxy for an SPI target device
* @dev: Driver model representation of the device.
* @controller: SPI controller used with the device.
* @max_speed_hz: Maximum clock rate to be used with this chip
* (on this board); may be changed by the device's driver.
* The spi_transfer.speed_hz can override this for each transfer.
- * @chip_select: Array of physical chipselect, spi->chipselect[i] gives
- * the corresponding physical CS for logical CS i.
- * @mode: The spi mode defines how data is clocked out and in.
- * This may be changed by the device's driver.
- * The "active low" default for chipselect mode can be overridden
- * (by specifying SPI_CS_HIGH) as can the "MSB first" default for
- * each word in a transfer (by specifying SPI_LSB_FIRST).
* @bits_per_word: Data transfers involve one or more words; word sizes
* like eight or 12 bits are common. In-memory wordsizes are
* powers of two bytes (e.g. 20 bit samples use 32 bits).
@@ -148,6 +143,11 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* default (0) indicating protocol words are eight bit bytes.
* The spi_transfer.bits_per_word can override this for each transfer.
* @rt: Make the pump thread real time priority.
+ * @mode: The spi mode defines how data is clocked out and in.
+ * This may be changed by the device's driver.
+ * The "active low" default for chipselect mode can be overridden
+ * (by specifying SPI_CS_HIGH) as can the "MSB first" default for
+ * each word in a transfer (by specifying SPI_LSB_FIRST).
* @irq: Negative, or the number passed to request_irq() to receive
* interrupts from this device.
* @controller_state: Controller's runtime state
@@ -160,8 +160,7 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* the device will bind to the named driver and only the named driver.
* Do not set directly, because core frees it; use driver_set_override() to
* set or clear it.
- * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines
- * (optional, NULL when not using a GPIO line)
+ * @pcpu_statistics: statistics for the spi_device
* @word_delay: delay to be inserted between consecutive
* words of a transfer
* @cs_setup: delay to be introduced by the controller after CS is asserted
@@ -169,10 +168,14 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* @cs_inactive: delay to be introduced by the controller after CS is
* deasserted. If @cs_change_delay is used from @spi_transfer, then the
* two delays will be added up.
- * @pcpu_statistics: statistics for the spi_device
+ * @chip_select: Array of physical chipselect, spi->chipselect[i] gives
+ * the corresponding physical CS for logical CS i.
+ * @num_chipselect: Number of physical chipselects used.
* @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array
+ * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines
+ * (optional, NULL when not using a GPIO line)
*
- * A @spi_device is used to interchange data between an SPI slave
+ * A @spi_device is used to interchange data between an SPI target device
* (usually a discrete chip) and CPU memory.
*
* In @dev, the platform_data is used to hold information about this
@@ -185,7 +188,6 @@ struct spi_device {
struct device dev;
struct spi_controller *controller;
u32 max_speed_hz;
- u8 chip_select[SPI_CS_CNT_MAX];
u8 bits_per_word;
bool rt;
#define SPI_NO_TX BIT(31) /* No transmit wire */
@@ -216,22 +218,29 @@ struct spi_device {
void *controller_data;
char modalias[SPI_NAME_SIZE];
const char *driver_override;
- struct gpio_desc *cs_gpiod[SPI_CS_CNT_MAX]; /* Chip select gpio desc */
+
+ /* The statistics */
+ struct spi_statistics __percpu *pcpu_statistics;
+
struct spi_delay word_delay; /* Inter-word delay */
+
/* CS delays */
struct spi_delay cs_setup;
struct spi_delay cs_hold;
struct spi_delay cs_inactive;
- /* The statistics */
- struct spi_statistics __percpu *pcpu_statistics;
+ u8 chip_select[SPI_DEVICE_CS_CNT_MAX];
+ u8 num_chipselect;
- /* Bit mask of the chipselect(s) that the driver need to use from
- * the chipselect array.When the controller is capable to handle
+ /*
+ * Bit mask of the chipselect(s) that the driver need to use from
+ * the chipselect array. When the controller is capable to handle
* multiple chip selects & memories are connected in parallel
* then more than one bit need to be set in cs_index_mask.
*/
- u32 cs_index_mask : SPI_CS_CNT_MAX;
+ u32 cs_index_mask : SPI_DEVICE_CS_CNT_MAX;
+
+ struct gpio_desc *cs_gpiod[SPI_DEVICE_CS_CNT_MAX]; /* Chip select gpio desc */
/*
* Likely need more hooks for more protocol options affecting how
@@ -247,10 +256,7 @@ struct spi_device {
static_assert((SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK) == 0,
"SPI_MODE_USER_MASK & SPI_MODE_KERNEL_MASK must not overlap");
-static inline struct spi_device *to_spi_device(const struct device *dev)
-{
- return dev ? container_of(dev, struct spi_device, dev) : NULL;
-}
+#define to_spi_device(__dev) container_of_const(__dev, struct spi_device, dev)
/* Most drivers won't need to care about device refcounting */
static inline struct spi_device *spi_dev_get(struct spi_device *spi)
@@ -311,7 +317,7 @@ static inline bool spi_is_csgpiod(struct spi_device *spi)
{
u8 idx;
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ for (idx = 0; idx < spi->num_chipselect; idx++) {
if (spi_get_csgpiod(spi, idx))
return true;
}
@@ -351,10 +357,8 @@ struct spi_driver {
struct device_driver driver;
};
-static inline struct spi_driver *to_spi_driver(struct device_driver *drv)
-{
- return drv ? container_of(drv, struct spi_driver, driver) : NULL;
-}
+#define to_spi_driver(__drv) \
+ ( __drv ? container_of_const(__drv, struct spi_driver, driver) : NULL )
extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv);
@@ -388,15 +392,15 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
spi_unregister_driver)
/**
- * struct spi_controller - interface to SPI master or slave controller
+ * struct spi_controller - interface to SPI host or target controller
* @dev: device interface to this driver
* @list: link with the global spi_controller list
* @bus_num: board-specific (and often SOC-specific) identifier for a
* given SPI controller.
* @num_chipselect: chipselects are used to distinguish individual
- * SPI slaves, and are numbered from zero to num_chipselects.
- * each slave has a chipselect signal, but it's common that not
- * every chipselect is connected to a slave.
+ * SPI targets, and are numbered from zero to num_chipselects.
+ * each target has a chipselect signal, but it's common that not
+ * every chipselect is connected to a target.
* @dma_alignment: SPI controller constraint on DMA buffers alignment.
* @mode_bits: flags understood by this controller driver
* @buswidth_override_bits: flags to override for this controller driver
@@ -425,9 +429,9 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* must fail if an unrecognized or unsupported mode is requested.
* It's always safe to call this unless transfers are pending on
* the device whose settings are being modified.
- * @set_cs_timing: optional hook for SPI devices to request SPI master
+ * @set_cs_timing: optional hook for SPI devices to request SPI
* controller for configuring specific CS setup time, hold time and inactive
- * delay interms of clock counts
+ * delay in terms of clock counts
* @transfer: adds a message to the controller's transfer queue.
* @cleanup: frees controller-specific state
* @can_dma: determine whether this controller supports DMA
@@ -447,7 +451,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @cur_msg_need_completion: Flag used internally to opportunistically skip
* the @cur_msg_completion. This flag is used to signal the context that
* is running spi_finalize_current_message() that it needs to complete()
- * @cur_msg_mapped: message has been mapped for DMA
* @fallback: fallback to PIO if DMA transfer return failure with
* SPI_TRANS_FAIL_NO_START.
* @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs.
@@ -499,9 +502,14 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @mem_ops: optimized/dedicated operations for interactions with SPI memory.
* This field is optional and should only be implemented if the
* controller has native support for memory like operations.
+ * @get_offload: callback for controllers with offload support to get matching
+ * offload instance. Implementations should return -ENODEV if no match is
+ * found.
+ * @put_offload: release the offload instance acquired by @get_offload.
* @mem_caps: controller capabilities for the handling of memory operations.
+ * @dtr_caps: true if controller has dtr(single/dual transfer rate) capability.
+ * QSPI based controller should fill this based on controller's capability.
* @unprepare_message: undo any work done by prepare_message().
- * @slave_abort: abort the ongoing transfer request on an SPI slave controller
* @target_abort: abort the ongoing transfer request on an SPI target controller
* @cs_gpiods: Array of GPIO descriptors to use as chip select lines; one per CS
* number. Any individual value may be NULL for CS lines that
@@ -533,6 +541,9 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @queue_empty: signal green light for opportunistically skipping the queue
* for spi_sync transfers.
* @must_async: disable all fast paths in the core
+ * @defer_optimize_message: set to true if controller cannot pre-optimize messages
+ * and needs to defer the optimization step until the message is actually
+ * being transferred
*
* Each SPI controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -542,7 +553,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
*
* The driver for an SPI controller manages access to those devices through
* a queue of spi_message transactions, copying data between CPU memory and
- * an SPI slave device. For each such message it queues, it calls the
+ * an SPI target device. For each such message it queues, it calls the
* message's completion function when the transaction completes.
*/
struct spi_controller {
@@ -592,7 +603,7 @@ struct spi_controller {
#define SPI_CONTROLLER_NO_TX BIT(2) /* Can't do buffer write */
#define SPI_CONTROLLER_MUST_RX BIT(3) /* Requires rx */
#define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */
-#define SPI_CONTROLLER_GPIO_SS BIT(5) /* GPIO CS must select slave */
+#define SPI_CONTROLLER_GPIO_SS BIT(5) /* GPIO CS must select target device */
#define SPI_CONTROLLER_SUSPENDED BIT(6) /* Currently suspended */
/*
* The spi-controller has multi chip select capability and can
@@ -659,7 +670,7 @@ struct spi_controller {
* + To a given spi_device, message queueing is pure FIFO
*
* + The controller's main job is to process its message queue,
- * selecting a chip (for masters), then transferring data
+ * selecting a chip (for controllers), then transferring data
* + If there are multiple spi_device children, the i/o queue
* arbitration algorithm is unspecified (round robin, FIFO,
* priority, reservations, preemption, etc)
@@ -708,11 +719,10 @@ struct spi_controller {
bool running;
bool rt;
bool auto_runtime_pm;
- bool cur_msg_mapped;
bool fallback;
bool last_cs_mode_high;
- s8 last_cs[SPI_CS_CNT_MAX];
- u32 last_cs_index_mask : SPI_CS_CNT_MAX;
+ s8 last_cs[SPI_DEVICE_CS_CNT_MAX];
+ u32 last_cs_index_mask : SPI_DEVICE_CS_CNT_MAX;
struct completion xfer_completion;
size_t max_dma_len;
@@ -726,10 +736,7 @@ struct spi_controller {
struct spi_message *message);
int (*unprepare_message)(struct spi_controller *ctlr,
struct spi_message *message);
- union {
- int (*slave_abort)(struct spi_controller *ctlr);
- int (*target_abort)(struct spi_controller *ctlr);
- };
+ int (*target_abort)(struct spi_controller *ctlr);
/*
* These hooks are for drivers that use a generic implementation
@@ -745,6 +752,13 @@ struct spi_controller {
const struct spi_controller_mem_ops *mem_ops;
const struct spi_controller_mem_caps *mem_caps;
+ /* SPI or QSPI controller can set to true if supports SDR/DDR transfer rate */
+ bool dtr_caps;
+
+ struct spi_offload *(*get_offload)(struct spi_device *spi,
+ const struct spi_offload_config *config);
+ void (*put_offload)(struct spi_offload *offload);
+
/* GPIO chip select */
struct gpio_desc **cs_gpiods;
bool use_gpio_descriptors;
@@ -776,6 +790,7 @@ struct spi_controller {
/* Flag for enabling opportunistic skipping of the queue in spi_sync */
bool queue_empty;
bool must_async;
+ bool defer_optimize_message;
};
static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
@@ -802,11 +817,6 @@ static inline void spi_controller_put(struct spi_controller *ctlr)
put_device(&ctlr->dev);
}
-static inline bool spi_controller_is_slave(struct spi_controller *ctlr)
-{
- return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave;
-}
-
static inline bool spi_controller_is_target(struct spi_controller *ctlr)
{
return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->target;
@@ -831,22 +841,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
/* The SPI driver core manages memory for the spi_controller classdev */
extern struct spi_controller *__spi_alloc_controller(struct device *host,
- unsigned int size, bool slave);
-
-static inline struct spi_controller *spi_alloc_master(struct device *host,
- unsigned int size)
-{
- return __spi_alloc_controller(host, size, false);
-}
-
-static inline struct spi_controller *spi_alloc_slave(struct device *host,
- unsigned int size)
-{
- if (!IS_ENABLED(CONFIG_SPI_SLAVE))
- return NULL;
-
- return __spi_alloc_controller(host, size, true);
-}
+ unsigned int size, bool target);
static inline struct spi_controller *spi_alloc_host(struct device *dev,
unsigned int size)
@@ -865,22 +860,7 @@ static inline struct spi_controller *spi_alloc_target(struct device *dev,
struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
unsigned int size,
- bool slave);
-
-static inline struct spi_controller *devm_spi_alloc_master(struct device *dev,
- unsigned int size)
-{
- return __devm_spi_alloc_controller(dev, size, false);
-}
-
-static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev,
- unsigned int size)
-{
- if (!IS_ENABLED(CONFIG_SPI_SLAVE))
- return NULL;
-
- return __devm_spi_alloc_controller(dev, size, true);
-}
+ bool target);
static inline struct spi_controller *devm_spi_alloc_host(struct device *dev,
unsigned int size)
@@ -902,12 +882,29 @@ extern int devm_spi_register_controller(struct device *dev,
struct spi_controller *ctlr);
extern void spi_unregister_controller(struct spi_controller *ctlr);
-#if IS_ENABLED(CONFIG_ACPI)
+#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SPI_MASTER)
extern struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
extern struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
struct acpi_device *adev,
int index);
int acpi_spi_count_resources(struct acpi_device *adev);
+#else
+static inline struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
+{
+ return NULL;
+}
+
+static inline struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
+ struct acpi_device *adev,
+ int index)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int acpi_spi_count_resources(struct acpi_device *adev)
+{
+ return 0;
+}
#endif
/*
@@ -981,8 +978,12 @@ struct spi_res {
* transfer this transfer. Set to 0 if the SPI bus driver does
* not support it.
* @transfer_list: transfers are sequenced through @spi_message.transfers
+ * @tx_sg_mapped: If true, the @tx_sg is mapped for DMA
+ * @rx_sg_mapped: If true, the @rx_sg is mapped for DMA
* @tx_sg: Scatterlist for transmit, currently not for client use
* @rx_sg: Scatterlist for receive, currently not for client use
+ * @offload_flags: Flags that are only applicable to specialized SPI offload
+ * transfers. See %SPI_OFFLOAD_XFER_* in spi-offload.h.
* @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset
* within @tx_buf for which the SPI device is requesting that the time
* snapshot for this transfer begins. Upon completing the SPI transfer,
@@ -997,15 +998,16 @@ struct spi_res {
* purposefully (instead of setting to spi_transfer->len - 1) to denote
* that a transfer-level snapshot taken from within the driver may still
* be of higher quality.
- * @ptp_sts: Pointer to a memory location held by the SPI slave device where a
+ * @ptp_sts: Pointer to a memory location held by the SPI target device where a
* PTP system timestamp structure may lie. If drivers use PIO or their
* hardware has some sort of assist for retrieving exact transfer timing,
* they can (and should) assert @ptp_sts_supported and populate this
* structure using the ptp_read_system_*ts helper functions.
- * The timestamp must represent the time at which the SPI slave device has
+ * The timestamp must represent the time at which the SPI target device has
* processed the word, i.e. the "pre" timestamp should be taken before
* transmitting the "pre" word, and the "post" timestamp after receiving
* transmit confirmation from the controller for the "post" word.
+ * @dtr_mode: true if supports double transfer rate.
* @timestamped: true if the transfer has been timestamped
* @error: Error status logged by SPI controller driver.
*
@@ -1057,6 +1059,9 @@ struct spi_res {
* two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x)
* SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer.
*
+ * User may also set dtr_mode to true to use dual transfer mode if desired. if
+ * not, default considered as single transfer mode.
+ *
* The code that submits an spi_message (and its spi_transfers)
* to the lower layers is responsible for managing its memory.
* Zero-initialize every field you don't set up explicitly, to
@@ -1077,20 +1082,25 @@ struct spi_transfer {
#define SPI_TRANS_FAIL_IO BIT(1)
u16 error;
- dma_addr_t tx_dma;
- dma_addr_t rx_dma;
+ bool tx_sg_mapped;
+ bool rx_sg_mapped;
+
struct sg_table tx_sg;
struct sg_table rx_sg;
+ dma_addr_t tx_dma;
+ dma_addr_t rx_dma;
unsigned dummy_data:1;
unsigned cs_off:1;
unsigned cs_change:1;
- unsigned tx_nbits:3;
- unsigned rx_nbits:3;
+ unsigned tx_nbits:4;
+ unsigned rx_nbits:4;
unsigned timestamped:1;
+ bool dtr_mode;
#define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */
#define SPI_NBITS_DUAL 0x02 /* 2-bit transfer */
#define SPI_NBITS_QUAD 0x04 /* 4-bit transfer */
+#define SPI_NBITS_OCTAL 0x08 /* 8-bit transfer */
u8 bits_per_word;
struct spi_delay delay;
struct spi_delay cs_change_delay;
@@ -1099,6 +1109,9 @@ struct spi_transfer {
u32 effective_speed_hz;
+ /* Use %SPI_OFFLOAD_XFER_* from spi-offload.h */
+ unsigned int offload_flags;
+
unsigned int ptp_sts_word_pre;
unsigned int ptp_sts_word_post;
@@ -1124,6 +1137,7 @@ struct spi_transfer {
* @state: for use by whichever driver currently owns the message
* @opt_state: for use by whichever driver currently owns the message
* @resources: for resource management when the SPI message is processed
+ * @offload: (optional) offload instance used by this message
*
* A @spi_message is used to execute an atomic sequence of data transfers,
* each represented by a struct spi_transfer. The sequence is "atomic"
@@ -1184,6 +1198,12 @@ struct spi_message {
*/
void *opt_state;
+ /*
+ * Optional offload instance used by this message. This must be set
+ * by the peripheral driver before calling spi_optimize_message().
+ */
+ struct spi_offload *offload;
+
/* List of spi_res resources when the SPI message is processed */
struct list_head resources;
};
@@ -1268,10 +1288,11 @@ static inline void spi_message_free(struct spi_message *m)
extern int spi_optimize_message(struct spi_device *spi, struct spi_message *msg);
extern void spi_unoptimize_message(struct spi_message *msg);
+extern int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
+ struct spi_message *msg);
extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
-extern int spi_slave_abort(struct spi_device *spi);
extern int spi_target_abort(struct spi_device *spi);
static inline size_t
@@ -1319,6 +1340,32 @@ static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw)
}
/**
+ * spi_bpw_to_bytes - Covert bits per word to bytes
+ * @bpw: Bits per word
+ *
+ * This function converts the given @bpw to bytes. The result is always
+ * power-of-two, e.g.,
+ *
+ * =============== =================
+ * Input (in bits) Output (in bytes)
+ * =============== =================
+ * 5 1
+ * 9 2
+ * 21 4
+ * 37 8
+ * =============== =================
+ *
+ * It will return 0 for the 0 input.
+ *
+ * Returns:
+ * Bytes for the given @bpw.
+ */
+static inline u32 spi_bpw_to_bytes(u32 bpw)
+{
+ return roundup_pow_of_two(BITS_TO_BYTES(bpw));
+}
+
+/**
* spi_controller_xfer_timeout - Compute a suitable timeout value
* @ctlr: SPI device
* @xfer: Transfer descriptor
@@ -1615,7 +1662,7 @@ struct spi_board_info {
* bus_num is board specific and matches the bus_num of some
* spi_controller that will probably be registered later.
*
- * chip_select reflects how this chip is wired to that master;
+ * chip_select reflects how this chip is wired to that controller;
* it's less than num_chipselect.
*/
u16 bus_num;
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index b930eca2ef7b..c92cd43a47f4 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -4,6 +4,8 @@
#include <linux/workqueue.h>
+typedef u32 (*spi_bb_txrx_word_fn)(struct spi_device *, unsigned int, u32, u8, unsigned int);
+
struct spi_bitbang {
struct mutex lock;
u8 busy;
@@ -22,15 +24,15 @@ struct spi_bitbang {
#define BITBANG_CS_ACTIVE 1 /* normally nCS, active low */
#define BITBANG_CS_INACTIVE 0
+ void (*set_mosi_idle)(struct spi_device *spi);
/* txrx_bufs() may handle dma mapping for transfers that don't
* already have one (transfer.{tx,rx}_dma is zero), or use PIO
*/
int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t);
/* txrx_word[SPI_MODE_*]() just looks like a shift register */
- u32 (*txrx_word[4])(struct spi_device *spi,
- unsigned nsecs,
- u32 word, u8 bits, unsigned flags);
+ spi_bb_txrx_word_fn txrx_word[SPI_MODE_X_MASK + 1];
+
int (*set_line_direction)(struct spi_device *spi, bool output);
};
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3fcd20de6ca8..d3561c4a080e 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -462,11 +462,10 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
*/
static inline int spin_needbreak(spinlock_t *lock)
{
-#ifdef CONFIG_PREEMPTION
+ if (!preempt_model_preemptible())
+ return 0;
+
return spin_is_contended(lock);
-#else
- return 0;
-#endif
}
/*
@@ -479,11 +478,10 @@ static inline int spin_needbreak(spinlock_t *lock)
*/
static inline int rwlock_needbreak(rwlock_t *lock)
{
-#ifdef CONFIG_PREEMPTION
+ if (!preempt_model_preemptible())
+ return 0;
+
return rwlock_is_contended(lock);
-#else
- return 0;
-#endif
}
/*
@@ -550,6 +548,12 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
+DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t,
+ raw_spin_lock_bh(_T->lock),
+ raw_spin_unlock_bh(_T->lock))
+
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock))
+
DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
raw_spin_lock_irqsave(_T->lock, _T->flags),
raw_spin_unlock_irqrestore(_T->lock, _T->flags),
@@ -571,6 +575,13 @@ DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
spin_trylock_irq(_T->lock))
+DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t,
+ spin_lock_bh(_T->lock),
+ spin_unlock_bh(_T->lock))
+
+DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try,
+ spin_trylock_bh(_T->lock))
+
DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
spin_lock_irqsave(_T->lock, _T->flags),
spin_unlock_irqrestore(_T->lock, _T->flags),
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 89eb6f4c659c..9ecb0ab504e3 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -2,7 +2,7 @@
#define __LINUX_SPINLOCK_API_SMP_H
#ifndef __LINUX_INSIDE_SPINLOCK_H
-# error "please don't include this file directly"
+# error "Please do not include this file directly."
#endif
/*
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index 61c49b16f69a..f6499c37157d 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -16,26 +16,25 @@ static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
}
#endif
-#define spin_lock_init(slock) \
+#define __spin_lock_init(slock, name, key, percpu) \
do { \
- static struct lock_class_key __key; \
- \
rt_mutex_base_init(&(slock)->lock); \
- __rt_spin_lock_init(slock, #slock, &__key, false); \
+ __rt_spin_lock_init(slock, name, key, percpu); \
} while (0)
-#define local_spin_lock_init(slock) \
+#define _spin_lock_init(slock, percpu) \
do { \
static struct lock_class_key __key; \
- \
- rt_mutex_base_init(&(slock)->lock); \
- __rt_spin_lock_init(slock, #slock, &__key, true); \
+ __spin_lock_init(slock, #slock, &__key, percpu); \
} while (0)
-extern void rt_spin_lock(spinlock_t *lock);
-extern void rt_spin_lock_nested(spinlock_t *lock, int subclass);
-extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
-extern void rt_spin_unlock(spinlock_t *lock);
+#define spin_lock_init(slock) _spin_lock_init(slock, false)
+#define local_spin_lock_init(slock) _spin_lock_init(slock, true)
+
+extern void rt_spin_lock(spinlock_t *lock) __acquires(lock);
+extern void rt_spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock);
+extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock);
+extern void rt_spin_unlock(spinlock_t *lock) __releases(lock);
extern void rt_spin_lock_unlock(spinlock_t *lock);
extern int rt_spin_trylock_bh(spinlock_t *lock);
extern int rt_spin_trylock(spinlock_t *lock);
@@ -132,7 +131,7 @@ static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
#define spin_trylock_irq(lock) \
__cond_lock(lock, rt_spin_trylock(lock))
-#define __spin_trylock_irqsave(lock, flags) \
+#define spin_trylock_irqsave(lock, flags) \
({ \
int __locked; \
\
@@ -142,9 +141,6 @@ static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
__locked; \
})
-#define spin_trylock_irqsave(lock, flags) \
- __cond_lock(lock, __spin_trylock_irqsave(lock, flags))
-
#define spin_is_contended(lock) (((void)(lock), 0))
static inline int spin_is_locked(spinlock_t *lock)
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 7f86a2016ac5..fc4e2d017c20 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -2,7 +2,7 @@
#define __LINUX_SPINLOCK_TYPES_UP_H
#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
-# error "please don't include this file directly"
+# error "Please do not include this file directly."
#endif
/*
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index c87204247592..1e84e71ca495 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -2,7 +2,7 @@
#define __LINUX_SPINLOCK_UP_H
#ifndef __LINUX_INSIDE_SPINLOCK_H
-# error "please don't include this file directly"
+# error "Please do not include this file directly."
#endif
#include <asm/processor.h> /* for cpu_relax() */
diff --git a/include/linux/sprintf.h b/include/linux/sprintf.h
index 33dcbec71925..f06f7b785091 100644
--- a/include/linux/sprintf.h
+++ b/include/linux/sprintf.h
@@ -4,6 +4,7 @@
#include <linux/compiler_attributes.h>
#include <linux/types.h>
+#include <linux/stdarg.h>
int num_to_str(char *buf, int size, unsigned long long num, unsigned int width);
@@ -22,6 +23,9 @@ __scanf(2, 0) int vsscanf(const char *, const char *, va_list);
/* These are for specific cases, do not use without real need */
extern bool no_hash_pointers;
-int no_hash_pointers_enable(char *str);
+void hash_pointers_finalize(bool slub_debug);
+
+/* Used for Rust formatting ('%pA') */
+char *rust_fmt_argument(char *buf, char *end, const void *ptr);
#endif /* _LINUX_KERNEL_SPRINTF_H */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 236610e4a8fa..344ad51c8f6c 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -25,8 +25,12 @@ struct srcu_struct;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
- struct lock_class_key *key);
+int __init_srcu_struct(struct srcu_struct *ssp, const char *name, struct lock_class_key *key);
+#ifndef CONFIG_TINY_SRCU
+int __init_srcu_struct_fast(struct srcu_struct *ssp, const char *name, struct lock_class_key *key);
+int __init_srcu_struct_fast_updown(struct srcu_struct *ssp, const char *name,
+ struct lock_class_key *key);
+#endif // #ifndef CONFIG_TINY_SRCU
#define init_srcu_struct(ssp) \
({ \
@@ -35,14 +39,46 @@ int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
__init_srcu_struct((ssp), #ssp, &__srcu_key); \
})
+#define init_srcu_struct_fast(ssp) \
+({ \
+ static struct lock_class_key __srcu_key; \
+ \
+ __init_srcu_struct_fast((ssp), #ssp, &__srcu_key); \
+})
+
+#define init_srcu_struct_fast_updown(ssp) \
+({ \
+ static struct lock_class_key __srcu_key; \
+ \
+ __init_srcu_struct_fast_updown((ssp), #ssp, &__srcu_key); \
+})
+
#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name },
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
int init_srcu_struct(struct srcu_struct *ssp);
+#ifndef CONFIG_TINY_SRCU
+int init_srcu_struct_fast(struct srcu_struct *ssp);
+int init_srcu_struct_fast_updown(struct srcu_struct *ssp);
+#endif // #ifndef CONFIG_TINY_SRCU
#define __SRCU_DEP_MAP_INIT(srcu_name)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+/* Values for SRCU Tree srcu_data ->srcu_reader_flavor, but also used by rcutorture. */
+#define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock().
+#define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe().
+// 0x4 // SRCU-lite is no longer with us.
+#define SRCU_READ_FLAVOR_FAST 0x4 // srcu_read_lock_fast().
+#define SRCU_READ_FLAVOR_FAST_UPDOWN 0x8 // srcu_read_lock_fast().
+#define SRCU_READ_FLAVOR_ALL (SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_NMI | \
+ SRCU_READ_FLAVOR_FAST | SRCU_READ_FLAVOR_FAST_UPDOWN)
+ // All of the above.
+#define SRCU_READ_FLAVOR_SLOWGP (SRCU_READ_FLAVOR_FAST | SRCU_READ_FLAVOR_FAST_UPDOWN)
+ // Flavors requiring synchronize_rcu()
+ // instead of smp_mb().
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
+
#ifdef CONFIG_TINY_SRCU
#include <linux/srcutiny.h>
#elif defined(CONFIG_TREE_SRCU)
@@ -54,13 +90,46 @@ int init_srcu_struct(struct srcu_struct *ssp);
void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
void (*func)(struct rcu_head *head));
void cleanup_srcu_struct(struct srcu_struct *ssp);
-int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
-void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
void synchronize_srcu(struct srcu_struct *ssp);
+
+#define SRCU_GET_STATE_COMPLETED 0x1
+
+/**
+ * get_completed_synchronize_srcu - Return a pre-completed polled state cookie
+ *
+ * Returns a value that poll_state_synchronize_srcu() will always treat
+ * as a cookie whose grace period has already completed.
+ */
+static inline unsigned long get_completed_synchronize_srcu(void)
+{
+ return SRCU_GET_STATE_COMPLETED;
+}
+
unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
+// Maximum number of unsigned long values corresponding to
+// not-yet-completed SRCU grace periods.
+#define NUM_ACTIVE_SRCU_POLL_OLDSTATE 2
+
+/**
+ * same_state_synchronize_srcu - Are two old-state values identical?
+ * @oldstate1: First old-state value.
+ * @oldstate2: Second old-state value.
+ *
+ * The two old-state values must have been obtained from either
+ * get_state_synchronize_srcu(), start_poll_synchronize_srcu(), or
+ * get_completed_synchronize_srcu(). Returns @true if the two values are
+ * identical and @false otherwise. This allows structures whose lifetimes
+ * are tracked by old-state values to push these values to a list header,
+ * allowing those structures to be slightly smaller.
+ */
+static inline bool same_state_synchronize_srcu(unsigned long oldstate1, unsigned long oldstate2)
+{
+ return oldstate1 == oldstate2;
+}
+
#ifdef CONFIG_NEED_SRCU_NMI_SAFE
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
@@ -141,17 +210,6 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-#define SRCU_NMI_UNKNOWN 0x0
-#define SRCU_NMI_UNSAFE 0x1
-#define SRCU_NMI_SAFE 0x2
-
-#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_TREE_SRCU)
-void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe);
-#else
-static inline void srcu_check_nmi_safety(struct srcu_struct *ssp,
- bool nmi_safe) { }
-#endif
-
/**
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
@@ -201,33 +259,152 @@ static inline void srcu_check_nmi_safety(struct srcu_struct *ssp,
* a mutex that is held elsewhere while calling synchronize_srcu() or
* synchronize_srcu_expedited().
*
- * Note that srcu_read_lock() and the matching srcu_read_unlock() must
- * occur in the same context, for example, it is illegal to invoke
- * srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
- * was invoked in process context.
+ * The return value from srcu_read_lock() is guaranteed to be
+ * non-negative. This value must be passed unaltered to the matching
+ * srcu_read_unlock(). Note that srcu_read_lock() and the matching
+ * srcu_read_unlock() must occur in the same context, for example, it is
+ * illegal to invoke srcu_read_unlock() in an irq handler if the matching
+ * srcu_read_lock() was invoked in process context. Or, for that matter to
+ * invoke srcu_read_unlock() from one task and the matching srcu_read_lock()
+ * from another.
*/
static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
- srcu_check_nmi_safety(ssp, false);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
retval = __srcu_read_lock(ssp);
srcu_lock_acquire(&ssp->dep_map);
return retval;
}
/**
+ * srcu_read_lock_fast - register a new reader for an SRCU-protected structure.
+ * @ssp: srcu_struct in which to register the new reader.
+ *
+ * Enter an SRCU read-side critical section, but for a light-weight
+ * smp_mb()-free reader. See srcu_read_lock() for more information. This
+ * function is NMI-safe, in a manner similar to srcu_read_lock_nmisafe().
+ *
+ * For srcu_read_lock_fast() to be used on an srcu_struct structure,
+ * that structure must have been defined using either DEFINE_SRCU_FAST()
+ * or DEFINE_STATIC_SRCU_FAST() on the one hand or initialized with
+ * init_srcu_struct_fast() on the other. Such an srcu_struct structure
+ * cannot be passed to any non-fast variant of srcu_read_{,un}lock() or
+ * srcu_{down,up}_read(). In kernels built with CONFIG_PROVE_RCU=y,
+ * __srcu_check_read_flavor() will complain bitterly if you ignore this
+ * restriction.
+ *
+ * Grace-period auto-expediting is disabled for SRCU-fast srcu_struct
+ * structures because SRCU-fast expedited grace periods invoke
+ * synchronize_rcu_expedited(), IPIs and all. If you need expedited
+ * SRCU-fast grace periods, use synchronize_srcu_expedited().
+ *
+ * The srcu_read_lock_fast() function can be invoked only from those
+ * contexts where RCU is watching, that is, from contexts where it would
+ * be legal to invoke rcu_read_lock(). Otherwise, lockdep will complain.
+ */
+static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *ssp) __acquires(ssp)
+{
+ struct srcu_ctr __percpu *retval;
+
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_fast().");
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
+ retval = __srcu_read_lock_fast(ssp);
+ rcu_try_lock_acquire(&ssp->dep_map);
+ return retval;
+}
+
+/**
+ * srcu_read_lock_fast_updown - register a new reader for an SRCU-fast-updown structure.
+ * @ssp: srcu_struct in which to register the new reader.
+ *
+ * Enter an SRCU read-side critical section, but for a light-weight
+ * smp_mb()-free reader. See srcu_read_lock() for more information.
+ * This function is compatible with srcu_down_read_fast(), but is not
+ * NMI-safe.
+ *
+ * For srcu_read_lock_fast_updown() to be used on an srcu_struct
+ * structure, that structure must have been defined using either
+ * DEFINE_SRCU_FAST_UPDOWN() or DEFINE_STATIC_SRCU_FAST_UPDOWN() on the one
+ * hand or initialized with init_srcu_struct_fast_updown() on the other.
+ * Such an srcu_struct structure cannot be passed to any non-fast-updown
+ * variant of srcu_read_{,un}lock() or srcu_{down,up}_read(). In kernels
+ * built with CONFIG_PROVE_RCU=y, __srcu_check_read_flavor() will complain
+ * bitterly if you ignore this * restriction.
+ *
+ * Grace-period auto-expediting is disabled for SRCU-fast-updown
+ * srcu_struct structures because SRCU-fast-updown expedited grace periods
+ * invoke synchronize_rcu_expedited(), IPIs and all. If you need expedited
+ * SRCU-fast-updown grace periods, use synchronize_srcu_expedited().
+ *
+ * The srcu_read_lock_fast_updown() function can be invoked only from
+ * those contexts where RCU is watching, that is, from contexts where
+ * it would be legal to invoke rcu_read_lock(). Otherwise, lockdep will
+ * complain.
+ */
+static inline struct srcu_ctr __percpu *srcu_read_lock_fast_updown(struct srcu_struct *ssp)
+__acquires(ssp)
+{
+ struct srcu_ctr __percpu *retval;
+
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_fast_updown().");
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
+ retval = __srcu_read_lock_fast_updown(ssp);
+ rcu_try_lock_acquire(&ssp->dep_map);
+ return retval;
+}
+
+/*
+ * Used by tracing, cannot be traced and cannot call lockdep.
+ * See srcu_read_lock_fast() for more information.
+ */
+static inline struct srcu_ctr __percpu *srcu_read_lock_fast_notrace(struct srcu_struct *ssp)
+ __acquires(ssp)
+{
+ struct srcu_ctr __percpu *retval;
+
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
+ retval = __srcu_read_lock_fast(ssp);
+ return retval;
+}
+
+/**
+ * srcu_down_read_fast - register a new reader for an SRCU-protected structure.
+ * @ssp: srcu_struct in which to register the new reader.
+ *
+ * Enter a semaphore-like SRCU read-side critical section, but for
+ * a light-weight smp_mb()-free reader. See srcu_read_lock_fast() and
+ * srcu_down_read() for more information.
+ *
+ * The same srcu_struct may be used concurrently by srcu_down_read_fast()
+ * and srcu_read_lock_fast(). However, the same definition/initialization
+ * requirements called out for srcu_read_lock_safe() apply.
+ */
+static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *ssp) __acquires(ssp)
+{
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_down_read_fast().");
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
+ return __srcu_read_lock_fast_updown(ssp);
+}
+
+/**
* srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure.
* @ssp: srcu_struct in which to register the new reader.
*
* Enter an SRCU read-side critical section, but in an NMI-safe manner.
* See srcu_read_lock() for more information.
+ *
+ * If srcu_read_lock_nmisafe() is ever used on an srcu_struct structure,
+ * then none of the other flavors may be used, whether before, during,
+ * or after.
*/
static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
- srcu_check_nmi_safety(ssp, true);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
retval = __srcu_read_lock_nmisafe(ssp);
rcu_try_lock_acquire(&ssp->dep_map);
return retval;
@@ -239,7 +416,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
- srcu_check_nmi_safety(ssp, false);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
retval = __srcu_read_lock(ssp);
return retval;
}
@@ -263,12 +440,13 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
* srcu_down_read() nor srcu_up_read() may be invoked from an NMI handler.
*
* Calls to srcu_down_read() may be nested, similar to the manner in
- * which calls to down_read() may be nested.
+ * which calls to down_read() may be nested. The same srcu_struct may be
+ * used concurrently by srcu_down_read() and srcu_read_lock().
*/
static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
{
WARN_ON_ONCE(in_nmi());
- srcu_check_nmi_safety(ssp, false);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
return __srcu_read_lock(ssp);
}
@@ -283,15 +461,76 @@ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
__releases(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
- srcu_check_nmi_safety(ssp, false);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
srcu_lock_release(&ssp->dep_map);
__srcu_read_unlock(ssp, idx);
}
/**
+ * srcu_read_unlock_fast - unregister a old reader from an SRCU-protected structure.
+ * @ssp: srcu_struct in which to unregister the old reader.
+ * @scp: return value from corresponding srcu_read_lock_fast().
+ *
+ * Exit a light-weight SRCU read-side critical section.
+ */
+static inline void srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases(ssp)
+{
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
+ srcu_lock_release(&ssp->dep_map);
+ __srcu_read_unlock_fast(ssp, scp);
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_unlock_fast().");
+}
+
+/**
+ * srcu_read_unlock_fast_updown - unregister a old reader from an SRCU-fast-updown structure.
+ * @ssp: srcu_struct in which to unregister the old reader.
+ * @scp: return value from corresponding srcu_read_lock_fast_updown().
+ *
+ * Exit an SRCU-fast-updown read-side critical section.
+ */
+static inline void
+srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) __releases(ssp)
+{
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
+ srcu_lock_release(&ssp->dep_map);
+ __srcu_read_unlock_fast_updown(ssp, scp);
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "RCU must be watching srcu_read_unlock_fast_updown().");
+}
+
+/*
+ * Used by tracing, cannot be traced and cannot call lockdep.
+ * See srcu_read_unlock_fast() for more information.
+ */
+static inline void srcu_read_unlock_fast_notrace(struct srcu_struct *ssp,
+ struct srcu_ctr __percpu *scp) __releases(ssp)
+{
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
+ __srcu_read_unlock_fast(ssp, scp);
+}
+
+/**
+ * srcu_up_read_fast - unregister a old reader from an SRCU-protected structure.
+ * @ssp: srcu_struct in which to unregister the old reader.
+ * @scp: return value from corresponding srcu_read_lock_fast().
+ *
+ * Exit an SRCU read-side critical section, but not necessarily from
+ * the same context as the maching srcu_down_read_fast().
+ */
+static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases(ssp)
+{
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
+ __srcu_read_unlock_fast_updown(ssp, scp);
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_up_read_fast_updown().");
+}
+
+/**
* srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure.
* @ssp: srcu_struct in which to unregister the old reader.
- * @idx: return value from corresponding srcu_read_lock().
+ * @idx: return value from corresponding srcu_read_lock_nmisafe().
*
* Exit an SRCU read-side critical section, but in an NMI-safe manner.
*/
@@ -299,7 +538,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
__releases(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
- srcu_check_nmi_safety(ssp, true);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
rcu_lock_release(&ssp->dep_map);
__srcu_read_unlock_nmisafe(ssp, idx);
}
@@ -308,7 +547,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
static inline notrace void
srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
{
- srcu_check_nmi_safety(ssp, false);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
__srcu_read_unlock(ssp, idx);
}
@@ -325,7 +564,7 @@ static inline void srcu_up_read(struct srcu_struct *ssp, int idx)
{
WARN_ON_ONCE(idx & ~0x1);
WARN_ON_ONCE(in_nmi());
- srcu_check_nmi_safety(ssp, false);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
__srcu_read_unlock(ssp, idx);
}
@@ -343,9 +582,33 @@ static inline void smp_mb__after_srcu_read_unlock(void)
/* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
}
+/**
+ * smp_mb__after_srcu_read_lock - ensure full ordering after srcu_read_lock
+ *
+ * Converts the preceding srcu_read_lock into a two-way memory barrier.
+ *
+ * Call this after srcu_read_lock, to guarantee that all memory operations
+ * that occur after smp_mb__after_srcu_read_lock will appear to happen after
+ * the preceding srcu_read_lock.
+ */
+static inline void smp_mb__after_srcu_read_lock(void)
+{
+ /* __srcu_read_lock has smp_mb() internally so nothing to do here. */
+}
+
DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
_T->idx = srcu_read_lock(_T->lock),
srcu_read_unlock(_T->lock, _T->idx),
int idx)
+DEFINE_LOCK_GUARD_1(srcu_fast, struct srcu_struct,
+ _T->scp = srcu_read_lock_fast(_T->lock),
+ srcu_read_unlock_fast(_T->lock, _T->scp),
+ struct srcu_ctr __percpu *scp)
+
+DEFINE_LOCK_GUARD_1(srcu_fast_notrace, struct srcu_struct,
+ _T->scp = srcu_read_lock_fast_notrace(_T->lock),
+ srcu_read_unlock_fast_notrace(_T->lock, _T->scp),
+ struct srcu_ctr __percpu *scp)
+
#endif
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index 4d96bbdb45f0..e0698024667a 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -31,7 +31,7 @@ struct srcu_struct {
void srcu_drive_gp(struct work_struct *wp);
-#define __SRCU_STRUCT_INIT(name, __ignored, ___ignored) \
+#define __SRCU_STRUCT_INIT(name, __ignored, ___ignored, ____ignored) \
{ \
.srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \
.srcu_cb_tail = &name.srcu_cb_head, \
@@ -44,13 +44,25 @@ void srcu_drive_gp(struct work_struct *wp);
* Tree SRCU, which needs some per-CPU data.
*/
#define DEFINE_SRCU(name) \
- struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name)
+ struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
#define DEFINE_STATIC_SRCU(name) \
- static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name)
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
+#define DEFINE_SRCU_FAST(name) DEFINE_SRCU(name)
+#define DEFINE_STATIC_SRCU_FAST(name) \
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
+#define DEFINE_SRCU_FAST_UPDOWN(name) DEFINE_SRCU(name)
+#define DEFINE_STATIC_SRCU_FAST_UPDOWN(name) \
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
// Dummy structure for srcu_notifier_head.
struct srcu_usage { };
#define __SRCU_USAGE_INIT(name) { }
+#define __init_srcu_struct_fast __init_srcu_struct
+#define __init_srcu_struct_fast_updown __init_srcu_struct
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
+#define init_srcu_struct_fast init_srcu_struct
+#define init_srcu_struct_fast_updown init_srcu_struct
+#endif // #ifndef CONFIG_DEBUG_LOCK_ALLOC
void synchronize_srcu(struct srcu_struct *ssp);
@@ -64,13 +76,46 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
{
int idx;
- preempt_disable(); // Needed for PREEMPT_AUTO
+ preempt_disable(); // Needed for PREEMPT_LAZY
idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
preempt_enable();
return idx;
}
+struct srcu_ctr;
+
+static inline bool __srcu_ptr_to_ctr(struct srcu_struct *ssp, struct srcu_ctr __percpu *scpp)
+{
+ return (int)(intptr_t)(struct srcu_ctr __force __kernel *)scpp;
+}
+
+static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ssp, int idx)
+{
+ return (struct srcu_ctr __percpu *)(intptr_t)idx;
+}
+
+static inline struct srcu_ctr __percpu *__srcu_read_lock_fast(struct srcu_struct *ssp)
+{
+ return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
+}
+
+static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+{
+ __srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
+}
+
+static inline struct srcu_ctr __percpu *__srcu_read_lock_fast_updown(struct srcu_struct *ssp)
+{
+ return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
+}
+
+static inline
+void __srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+{
+ __srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
+}
+
static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
{
synchronize_srcu(ssp);
@@ -81,6 +126,9 @@ static inline void srcu_barrier(struct srcu_struct *ssp)
synchronize_srcu(ssp);
}
+static inline void srcu_expedite_current(struct srcu_struct *ssp) { }
+#define srcu_check_read_flavor(ssp, read_flavor) do { } while (0)
+
/* Defined here to avoid size increase for non-torture kernels. */
static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
char *tt, char *tf)
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 8f3f72480e78..d6f978b50472 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -17,15 +17,21 @@
struct srcu_node;
struct srcu_struct;
+/* One element of the srcu_data srcu_ctrs array. */
+struct srcu_ctr {
+ atomic_long_t srcu_locks; /* Locks per CPU. */
+ atomic_long_t srcu_unlocks; /* Unlocks per CPU. */
+};
+
/*
* Per-CPU structure feeding into leaf srcu_node, similar in function
* to rcu_node.
*/
struct srcu_data {
/* Read-side state. */
- atomic_long_t srcu_lock_count[2]; /* Locks per CPU. */
- atomic_long_t srcu_unlock_count[2]; /* Unlocks per CPU. */
- int srcu_nmi_safety; /* NMI-safe srcu_struct structure? */
+ struct srcu_ctr srcu_ctrs[2]; /* Locks and unlocks per CPU. */
+ int srcu_reader_flavor; /* Reader flavor for srcu_struct structure? */
+ /* Values: SRCU_READ_FLAVOR_.* */
/* Update-side state. */
spinlock_t __private lock ____cacheline_internodealigned_in_smp;
@@ -36,6 +42,8 @@ struct srcu_data {
struct timer_list delay_work; /* Delay for CB invoking */
struct work_struct work; /* Context for CB invoking. */
struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */
+ struct rcu_head srcu_ec_head; /* For srcu_expedite_current() use. */
+ int srcu_ec_state; /* State for srcu_expedite_current(). */
struct srcu_node *mynode; /* Leaf srcu_node. */
unsigned long grpmask; /* Mask for leaf srcu_node */
/* ->srcu_data_have_cbs[]. */
@@ -94,8 +102,9 @@ struct srcu_usage {
* Per-SRCU-domain structure, similar in function to rcu_state.
*/
struct srcu_struct {
- unsigned int srcu_idx; /* Current rdr array element. */
+ struct srcu_ctr __percpu *srcu_ctrp;
struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */
+ u8 srcu_reader_flavor;
struct lockdep_map dep_map;
struct srcu_usage *srcu_sup; /* Update-side data. */
};
@@ -129,26 +138,46 @@ struct srcu_struct {
#define SRCU_STATE_SCAN1 1
#define SRCU_STATE_SCAN2 2
+/* Values for srcu_expedite_current() state (->srcu_ec_state). */
+#define SRCU_EC_IDLE 0
+#define SRCU_EC_PENDING 1
+#define SRCU_EC_REPOST 2
+
+/*
+ * Values for initializing gp sequence fields. Higher values allow wrap arounds to
+ * occur earlier.
+ * The second value with state is useful in the case of static initialization of
+ * srcu_usage where srcu_gp_seq_needed is expected to have some state value in its
+ * lower bits (or else it will appear to be already initialized within
+ * the call check_init_srcu_struct()).
+ */
+#define SRCU_GP_SEQ_INITIAL_VAL ((0UL - 100UL) << RCU_SEQ_CTR_SHIFT)
+#define SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE (SRCU_GP_SEQ_INITIAL_VAL - 1)
+
#define __SRCU_USAGE_INIT(name) \
{ \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
- .srcu_gp_seq_needed = -1UL, \
+ .srcu_gp_seq = SRCU_GP_SEQ_INITIAL_VAL, \
+ .srcu_gp_seq_needed = SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE, \
+ .srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL, \
.work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
}
-#define __SRCU_STRUCT_INIT_COMMON(name, usage_name) \
+#define __SRCU_STRUCT_INIT_COMMON(name, usage_name, fast) \
.srcu_sup = &usage_name, \
+ .srcu_reader_flavor = fast, \
__SRCU_DEP_MAP_INIT(name)
-#define __SRCU_STRUCT_INIT_MODULE(name, usage_name) \
+#define __SRCU_STRUCT_INIT_MODULE(name, usage_name, fast) \
{ \
- __SRCU_STRUCT_INIT_COMMON(name, usage_name) \
+ __SRCU_STRUCT_INIT_COMMON(name, usage_name, fast) \
}
-#define __SRCU_STRUCT_INIT(name, usage_name, pcpu_name) \
+#define __SRCU_STRUCT_INIT(name, usage_name, pcpu_name, fast) \
{ \
.sda = &pcpu_name, \
- __SRCU_STRUCT_INIT_COMMON(name, usage_name) \
+ .srcu_ctrp = &pcpu_name.srcu_ctrs[0], \
+ __SRCU_STRUCT_INIT_COMMON(name, usage_name, fast) \
}
/*
@@ -169,26 +198,171 @@ struct srcu_struct {
* init_srcu_struct(&my_srcu);
*
* See include/linux/percpu-defs.h for the rules on per-CPU variables.
+ *
+ * DEFINE_SRCU_FAST() and DEFINE_STATIC_SRCU_FAST create an srcu_struct
+ * and associated structures whose readers must be of the SRCU-fast variety.
+ * DEFINE_SRCU_FAST_UPDOWN() and DEFINE_STATIC_SRCU_FAST_UPDOWN() create
+ * an srcu_struct and associated structures whose readers must be of the
+ * SRCU-fast-updown variety. The key point (aside from error checking) with
+ * both varieties is that the grace periods must use synchronize_rcu()
+ * instead of smp_mb(), and given that the first (for example)
+ * srcu_read_lock_fast() might race with the first synchronize_srcu(),
+ * this different must be specified at initialization time.
*/
#ifdef MODULE
-# define __DEFINE_SRCU(name, is_static) \
+# define __DEFINE_SRCU(name, fast, is_static) \
static struct srcu_usage name##_srcu_usage = __SRCU_USAGE_INIT(name##_srcu_usage); \
- is_static struct srcu_struct name = __SRCU_STRUCT_INIT_MODULE(name, name##_srcu_usage); \
+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT_MODULE(name, name##_srcu_usage, \
+ fast); \
extern struct srcu_struct * const __srcu_struct_##name; \
struct srcu_struct * const __srcu_struct_##name \
__section("___srcu_struct_ptrs") = &name
#else
-# define __DEFINE_SRCU(name, is_static) \
+# define __DEFINE_SRCU(name, fast, is_static) \
static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data); \
static struct srcu_usage name##_srcu_usage = __SRCU_USAGE_INIT(name##_srcu_usage); \
is_static struct srcu_struct name = \
- __SRCU_STRUCT_INIT(name, name##_srcu_usage, name##_srcu_data)
+ __SRCU_STRUCT_INIT(name, name##_srcu_usage, name##_srcu_data, fast)
#endif
-#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
-#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
+#define DEFINE_SRCU(name) __DEFINE_SRCU(name, 0, /* not static */)
+#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, 0, static)
+#define DEFINE_SRCU_FAST(name) __DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST, /* not static */)
+#define DEFINE_STATIC_SRCU_FAST(name) __DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST, static)
+#define DEFINE_SRCU_FAST_UPDOWN(name) __DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST_UPDOWN, \
+ /* not static */)
+#define DEFINE_STATIC_SRCU_FAST_UPDOWN(name) \
+ __DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST_UPDOWN, static)
+int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
void synchronize_srcu_expedited(struct srcu_struct *ssp);
void srcu_barrier(struct srcu_struct *ssp);
+void srcu_expedite_current(struct srcu_struct *ssp);
void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf);
+// Converts a per-CPU pointer to an ->srcu_ctrs[] array element to that
+// element's index.
+static inline bool __srcu_ptr_to_ctr(struct srcu_struct *ssp, struct srcu_ctr __percpu *scpp)
+{
+ return scpp - &ssp->sda->srcu_ctrs[0];
+}
+
+// Converts an integer to a per-CPU pointer to the corresponding
+// ->srcu_ctrs[] array element.
+static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ssp, int idx)
+{
+ return &ssp->sda->srcu_ctrs[idx];
+}
+
+/*
+ * Counts the new reader in the appropriate per-CPU element of the
+ * srcu_struct. Returns a pointer that must be passed to the matching
+ * srcu_read_unlock_fast().
+ *
+ * Note that both this_cpu_inc() and atomic_long_inc() are RCU read-side
+ * critical sections either because they disables interrupts, because
+ * they are a single instruction, or because they are read-modify-write
+ * atomic operations, depending on the whims of the architecture.
+ * This matters because the SRCU-fast grace-period mechanism uses either
+ * synchronize_rcu() or synchronize_rcu_expedited(), that is, RCU,
+ * *not* SRCU, in order to eliminate the need for the read-side smp_mb()
+ * invocations that are used by srcu_read_lock() and srcu_read_unlock().
+ * The __srcu_read_unlock_fast() function also relies on this same RCU
+ * (again, *not* SRCU) trick to eliminate the need for smp_mb().
+ *
+ * The key point behind this RCU trick is that if any part of a given
+ * RCU reader precedes the beginning of a given RCU grace period, then
+ * the entirety of that RCU reader and everything preceding it happens
+ * before the end of that same RCU grace period. Similarly, if any part
+ * of a given RCU reader follows the end of a given RCU grace period,
+ * then the entirety of that RCU reader and everything following it
+ * happens after the beginning of that same RCU grace period. Therefore,
+ * the operations labeled Y in __srcu_read_lock_fast() and those labeled Z
+ * in __srcu_read_unlock_fast() are ordered against the corresponding SRCU
+ * read-side critical section from the viewpoint of the SRCU grace period.
+ * This is all the ordering that is required, hence no calls to smp_mb().
+ *
+ * This means that __srcu_read_lock_fast() is not all that fast
+ * on architectures that support NMIs but do not supply NMI-safe
+ * implementations of this_cpu_inc().
+ */
+static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct srcu_struct *ssp)
+{
+ struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
+
+ if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
+ this_cpu_inc(scp->srcu_locks.counter); // Y, and implicit RCU reader.
+ else
+ atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
+ barrier(); /* Avoid leaking the critical section. */
+ return scp;
+}
+
+/*
+ * Removes the count for the old reader from the appropriate
+ * per-CPU element of the srcu_struct. Note that this may well be a
+ * different CPU than that which was incremented by the corresponding
+ * srcu_read_lock_fast(), but it must be within the same task.
+ *
+ * Please see the __srcu_read_lock_fast() function's header comment for
+ * information on implicit RCU readers and NMI safety.
+ */
+static inline void notrace
+__srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+{
+ barrier(); /* Avoid leaking the critical section. */
+ if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
+ this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader.
+ else
+ atomic_long_inc(raw_cpu_ptr(&scp->srcu_unlocks)); // Z, and implicit RCU reader.
+}
+
+/*
+ * Counts the new reader in the appropriate per-CPU element of the
+ * srcu_struct. Returns a pointer that must be passed to the matching
+ * srcu_read_unlock_fast_updown(). This type of reader is compatible
+ * with srcu_down_read_fast() and srcu_up_read_fast().
+ *
+ * See the __srcu_read_lock_fast() comment for more details.
+ */
+static inline
+struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struct *ssp)
+{
+ struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
+
+ if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
+ this_cpu_inc(scp->srcu_locks.counter); // Y, and implicit RCU reader.
+ else
+ atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
+ barrier(); /* Avoid leaking the critical section. */
+ return scp;
+}
+
+/*
+ * Removes the count for the old reader from the appropriate
+ * per-CPU element of the srcu_struct. Note that this may well be a
+ * different CPU than that which was incremented by the corresponding
+ * srcu_read_lock_fast(), but it must be within the same task.
+ *
+ * Please see the __srcu_read_lock_fast() function's header comment for
+ * information on implicit RCU readers and NMI safety.
+ */
+static inline void notrace
+__srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+{
+ barrier(); /* Avoid leaking the critical section. */
+ if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
+ this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader.
+ else
+ atomic_long_inc(raw_cpu_ptr(&scp->srcu_unlocks)); // Z, and implicit RCU reader.
+}
+
+void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor);
+
+// Record SRCU-reader usage type only for CONFIG_PROVE_RCU=y kernels.
+static inline void srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor)
+{
+ if (IS_ENABLED(CONFIG_PROVE_RCU))
+ __srcu_check_read_flavor(ssp, read_flavor);
+}
+
#endif
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index a2257380c3f1..e1fb11e0f12c 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -325,7 +325,7 @@ struct ssb_driver {
struct device_driver drv;
};
-#define drv_to_ssb_drv(_drv) container_of(_drv, struct ssb_driver, drv)
+#define drv_to_ssb_drv(_drv) container_of_const(_drv, struct ssb_driver, drv)
extern int __ssb_driver_register(struct ssb_driver *drv, struct module *owner);
#define ssb_driver_register(drv) \
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index e9ec32fb97d4..2cc21ffcdaf9 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -147,7 +147,7 @@ static inline int stack_depot_early_init(void) { return 0; }
* If the provided stack trace comes from the interrupt context, only the part
* up to the interrupt entry is saved.
*
- * Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if
+ * Context: Any context, but unsetting STACK_DEPOT_FLAG_CAN_ALLOC is required if
* alloc_pages() cannot be used from the current context. Currently
* this is the case for contexts where neither %GFP_ATOMIC nor
* %GFP_NOWAIT can be used (NMI, raw_spin_lock).
@@ -156,7 +156,7 @@ static inline int stack_depot_early_init(void) { return 0; }
*/
depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
unsigned int nr_entries,
- gfp_t gfp_flags,
+ gfp_t alloc_flags,
depot_flags_t depot_flags);
/**
@@ -175,7 +175,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
* Return: Handle of the stack trace stored in depot, 0 on failure
*/
depot_stack_handle_t stack_depot_save(unsigned long *entries,
- unsigned int nr_entries, gfp_t gfp_flags);
+ unsigned int nr_entries, gfp_t alloc_flags);
/**
* __stack_depot_get_stack_record - Get a pointer to a stack_record struct
diff --git a/include/linux/stat.h b/include/linux/stat.h
index bf92441dbad2..e3d00e7bb26d 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -50,10 +50,15 @@ struct kstat {
struct timespec64 btime; /* File creation time */
u64 blocks;
u64 mnt_id;
- u32 dio_mem_align;
- u32 dio_offset_align;
u64 change_cookie;
u64 subvol;
+ u32 dio_mem_align;
+ u32 dio_offset_align;
+ u32 dio_read_offset_align;
+ u32 atomic_write_unit_min;
+ u32 atomic_write_unit_max;
+ u32 atomic_write_unit_max_opt;
+ u32 atomic_write_segments_max;
};
/* These definitions are internal to the kernel for now. Mainly used by nfsd. */
diff --git a/include/linux/static_call.h b/include/linux/static_call.h
index 141e6b176a1b..78a77a4ae0ea 100644
--- a/include/linux/static_call.h
+++ b/include/linux/static_call.h
@@ -160,6 +160,8 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool
#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+extern int static_call_initialized;
+
extern int __init static_call_init(void);
extern void static_call_force_reinit(void);
@@ -225,6 +227,8 @@ extern long __static_call_return0(void);
#elif defined(CONFIG_HAVE_STATIC_CALL)
+#define static_call_initialized 0
+
static inline int static_call_init(void) { return 0; }
#define DEFINE_STATIC_CALL(name, _func) \
@@ -281,6 +285,8 @@ extern long __static_call_return0(void);
#else /* Generic implementation */
+#define static_call_initialized 0
+
static inline int static_call_init(void) { return 0; }
static inline long __static_call_return0(void)
diff --git a/include/linux/static_call_types.h b/include/linux/static_call_types.h
index 5a00b8b2cf9f..cfb6ddeb292b 100644
--- a/include/linux/static_call_types.h
+++ b/include/linux/static_call_types.h
@@ -25,6 +25,8 @@
#define STATIC_CALL_SITE_INIT 2UL /* init section */
#define STATIC_CALL_SITE_FLAGS 3UL
+#ifndef __ASSEMBLY__
+
/*
* The static call site table needs to be created by external tooling (objtool
* or a compiler plugin).
@@ -100,4 +102,6 @@ struct static_call_key {
#endif /* CONFIG_HAVE_STATIC_CALL */
+#endif /* __ASSEMBLY__ */
+
#endif /* _STATIC_CALL_TYPES_H */
diff --git a/include/linux/stddef.h b/include/linux/stddef.h
index 929d67710cc5..80b6bfb944f0 100644
--- a/include/linux/stddef.h
+++ b/include/linux/stddef.h
@@ -93,4 +93,40 @@ enum {
#define DECLARE_FLEX_ARRAY(TYPE, NAME) \
__DECLARE_FLEX_ARRAY(TYPE, NAME)
+/**
+ * __TRAILING_OVERLAP() - Overlap a flexible-array member with trailing
+ * members.
+ *
+ * Creates a union between a flexible-array member (FAM) in a struct and a set
+ * of additional members that would otherwise follow it.
+ *
+ * @TYPE: Flexible structure type name, including "struct" keyword.
+ * @NAME: Name for a variable to define.
+ * @FAM: The flexible-array member within @TYPE
+ * @ATTRS: Any struct attributes (usually empty)
+ * @MEMBERS: Trailing overlapping members.
+ */
+#define __TRAILING_OVERLAP(TYPE, NAME, FAM, ATTRS, MEMBERS) \
+ union { \
+ TYPE NAME; \
+ struct { \
+ unsigned char __offset_to_FAM[offsetof(TYPE, FAM)]; \
+ MEMBERS \
+ } ATTRS; \
+ }
+
+/**
+ * TRAILING_OVERLAP() - Overlap a flexible-array member with trailing members.
+ *
+ * Creates a union between a flexible-array member (FAM) in a struct and a set
+ * of additional members that would otherwise follow it.
+ *
+ * @TYPE: Flexible structure type name, including "struct" keyword.
+ * @NAME: Name for a variable to define.
+ * @FAM: The flexible-array member within @TYPE
+ * @MEMBERS: Trailing overlapping members.
+ */
+#define TRAILING_OVERLAP(TYPE, NAME, FAM, MEMBERS) \
+ __TRAILING_OVERLAP(TYPE, NAME, FAM, /* no attrs */, MEMBERS)
+
#endif
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index f92c195c76ed..f1054b9c2d8a 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -13,7 +13,7 @@
#define __STMMAC_PLATFORM_DATA
#include <linux/platform_device.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#define MTL_MAX_RX_QUEUES 8
#define MTL_MAX_TX_QUEUES 8
@@ -33,7 +33,9 @@
#define STMMAC_CSR_20_35M 0x2 /* MDC = clk_scr_i/16 */
#define STMMAC_CSR_35_60M 0x3 /* MDC = clk_scr_i/26 */
#define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */
-#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */
+#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/124 */
+#define STMMAC_CSR_300_500M 0x6 /* MDC = clk_scr_i/204 */
+#define STMMAC_CSR_500_800M 0x7 /* MDC = clk_scr_i/324 */
/* MTL algorithms identifiers */
#define MTL_TX_ALGORITHM_WRR 0x0
@@ -76,14 +78,15 @@
| DMA_AXI_BLEN_32 | DMA_AXI_BLEN_64 \
| DMA_AXI_BLEN_128 | DMA_AXI_BLEN_256)
+struct clk;
struct stmmac_priv;
/* Platfrom data for platform device structure's platform_data field */
struct stmmac_mdio_bus_data {
unsigned int phy_mask;
- unsigned int has_xpcs;
- unsigned int xpcs_an_inband;
+ unsigned int pcs_mask;
+ unsigned int default_an_inband;
int *irqs;
int probed_phy_irq;
bool needs_reset;
@@ -100,6 +103,7 @@ struct stmmac_dma_cfg {
bool eame;
bool multi_msi_en;
bool dche;
+ bool atds;
};
#define AXI_BLEN 7
@@ -109,7 +113,7 @@ struct stmmac_axi {
u32 axi_wr_osr_lmt;
u32 axi_rd_osr_lmt;
bool axi_kbbe;
- u32 axi_blen[AXI_BLEN];
+ u32 axi_blen_regval;
bool axi_fb;
bool axi_mb;
bool axi_rb;
@@ -137,33 +141,6 @@ struct stmmac_txq_cfg {
int tbs_en;
};
-/* FPE link state */
-enum stmmac_fpe_state {
- FPE_STATE_OFF = 0,
- FPE_STATE_CAPABLE = 1,
- FPE_STATE_ENTERING_ON = 2,
- FPE_STATE_ON = 3,
-};
-
-/* FPE link-partner hand-shaking mPacket type */
-enum stmmac_mpacket_type {
- MPACKET_VERIFY = 0,
- MPACKET_RESPONSE = 1,
-};
-
-enum stmmac_fpe_task_state_t {
- __FPE_REMOVING,
- __FPE_TASK_SCHED,
-};
-
-struct stmmac_fpe_cfg {
- bool enable; /* FPE enable */
- bool hs_enable; /* FPE handshake enable */
- enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */
- enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */
- u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
-};
-
struct stmmac_safety_feature_cfg {
u32 tsoee;
u32 mrxpee;
@@ -194,7 +171,13 @@ struct dwmac4_addrs {
u32 mtl_low_cred_offset;
};
-#define STMMAC_FLAG_HAS_INTEGRATED_PCS BIT(0)
+enum dwmac_core_type {
+ DWMAC_CORE_MAC100,
+ DWMAC_CORE_GMAC,
+ DWMAC_CORE_GMAC4,
+ DWMAC_CORE_XGMAC,
+};
+
#define STMMAC_FLAG_SPH_DISABLE BIT(1)
#define STMMAC_FLAG_USE_PHY_WOL BIT(2)
#define STMMAC_FLAG_HAS_SUN8I BIT(3)
@@ -206,24 +189,37 @@ struct dwmac4_addrs {
#define STMMAC_FLAG_INT_SNAPSHOT_EN BIT(9)
#define STMMAC_FLAG_RX_CLK_RUNS_IN_LPI BIT(10)
#define STMMAC_FLAG_EN_TX_LPI_CLOCKGATING BIT(11)
-#define STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY BIT(12)
+#define STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP BIT(12)
+#define STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY BIT(13)
+
+struct mac_device_info;
struct plat_stmmacenet_data {
+ enum dwmac_core_type core_type;
int bus_id;
int phy_addr;
/* MAC ----- optional PCS ----- SerDes ----- optional PHY ----- Media
- * ^ ^
- * mac_interface phy_interface
+ * ^
+ * phy_interface
*
- * mac_interface is the MAC-side interface, which may be the same
- * as phy_interface if there is no intervening PCS. If there is a
- * PCS, then mac_interface describes the interface mode between the
- * MAC and PCS, and phy_interface describes the interface mode
- * between the PCS and PHY.
- */
- phy_interface_t mac_interface;
- /* phy_interface is the PHY-side interface - the interface used by
- * an attached PHY.
+ * The Synopsys dwmac core only covers the MAC and an optional
+ * integrated PCS. Where the integrated PCS is used with a SerDes,
+ * e.g. for 1000base-X or Cisco SGMII, the connection between the
+ * PCS and SerDes will be TBI.
+ *
+ * Where the Synopsys dwmac core has been instantiated with multiple
+ * interface modes, these are selected via core-external configuration
+ * which is sampled when the dwmac core is reset. How this is done is
+ * platform glue specific, but this defines the interface used from
+ * the Synopsys dwmac core to the rest of the SoC.
+ *
+ * Where PCS other than the optional integrated Synopsys dwmac PCS
+ * is used, this counts as "the rest of the SoC" in the above
+ * paragraph.
+ *
+ * phy_interface is the PHY-side interface - the interface used by
+ * an attached PHY or SFP etc. This is equivalent to the interface
+ * that phylink uses.
*/
phy_interface_t phy_interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
@@ -231,10 +227,8 @@ struct plat_stmmacenet_data {
struct fwnode_handle *port_node;
struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
- struct stmmac_fpe_cfg *fpe_cfg;
struct stmmac_safety_feature_cfg *safety_feat_cfg;
int clk_csr;
- int has_gmac;
int enh_desc;
int tx_coe;
int rx_coe;
@@ -256,39 +250,51 @@ struct plat_stmmacenet_data {
u8 tx_sched_algorithm;
struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
- void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
- int (*fix_soc_reset)(void *priv, void __iomem *ioaddr);
+ void (*get_interfaces)(struct stmmac_priv *priv, void *bsp_priv,
+ unsigned long *interfaces);
+ int (*set_phy_intf_sel)(void *priv, u8 phy_intf_sel);
+ int (*set_clk_tx_rate)(void *priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed);
+ void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
+ int (*fix_soc_reset)(struct stmmac_priv *priv, void __iomem *ioaddr);
int (*serdes_powerup)(struct net_device *ndev, void *priv);
void (*serdes_powerdown)(struct net_device *ndev, void *priv);
- void (*speed_mode_2500)(struct net_device *ndev, void *priv);
+ int (*mac_finish)(struct net_device *ndev,
+ void *priv,
+ unsigned int mode,
+ phy_interface_t interface);
void (*ptp_clk_freq_config)(struct stmmac_priv *priv);
- int (*init)(struct platform_device *pdev, void *priv);
- void (*exit)(struct platform_device *pdev, void *priv);
- struct mac_device_info *(*setup)(void *priv);
+ int (*init)(struct device *dev, void *priv);
+ void (*exit)(struct device *dev, void *priv);
+ int (*suspend)(struct device *dev, void *priv);
+ int (*resume)(struct device *dev, void *priv);
+ int (*mac_setup)(void *priv, struct mac_device_info *mac);
int (*clks_config)(void *priv, bool enabled);
int (*crosststamp)(ktime_t *device, struct system_counterval_t *system,
void *ctx);
void (*dump_debug_regs)(void *priv);
int (*pcs_init)(struct stmmac_priv *priv);
void (*pcs_exit)(struct stmmac_priv *priv);
+ struct phylink_pcs *(*select_pcs)(struct stmmac_priv *priv,
+ phy_interface_t interface);
void *bsp_priv;
struct clk *stmmac_clk;
struct clk *pclk;
struct clk *clk_ptp_ref;
- unsigned int clk_ptp_rate;
- unsigned int clk_ref_rate;
+ struct clk *clk_tx_i; /* clk_tx_i to MAC core */
+ unsigned long clk_ptp_rate;
+ unsigned long clk_ref_rate;
+ struct clk_bulk_data *clks;
+ int num_clks;
unsigned int mult_fact_100ns;
s32 ptp_max_adj;
u32 cdc_error_adj;
struct reset_control *stmmac_rst;
struct reset_control *stmmac_ahb_rst;
struct stmmac_axi *axi;
- int has_gmac4;
int rss_en;
int mac_port_sel_speed;
- int has_xgmac;
u8 vlan_fail_q;
- unsigned int eee_usecs_rate;
struct pci_dev *pdev;
int int_snapshot_num;
int msi_mac_vec;
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index ea7a74ea7389..72820503514c 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -3,7 +3,7 @@
#define _LINUX_STOP_MACHINE
#include <linux/cpu.h>
-#include <linux/cpumask.h>
+#include <linux/cpumask_types.h>
#include <linux/smp.h>
#include <linux/list.h>
@@ -88,55 +88,73 @@ static inline void print_stop_info(const char *log_lvl, struct task_struct *task
#endif /* CONFIG_SMP */
/*
- * stop_machine "Bogolock": stop the entire machine, disable
- * interrupts. This is a very heavy lock, which is equivalent to
- * grabbing every spinlock (and more). So the "read" side to such a
- * lock is anything which disables preemption.
+ * stop_machine "Bogolock": stop the entire machine, disable interrupts.
+ * This is a very heavy lock, which is equivalent to grabbing every raw
+ * spinlock (and more). So the "read" side to such a lock is anything
+ * which disables preemption.
*/
#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
/**
* stop_machine: freeze the machine on all CPUs and run this function
* @fn: the function to run
- * @data: the data ptr for the @fn()
- * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
+ * @data: the data ptr to pass to @fn()
+ * @cpus: the cpus to run @fn() on (NULL = run on each online CPU)
*
- * Description: This causes a thread to be scheduled on every cpu,
- * each of which disables interrupts. The result is that no one is
- * holding a spinlock or inside any other preempt-disabled region when
- * @fn() runs.
+ * Description: This causes a thread to be scheduled on every CPU, which
+ * will run with interrupts disabled. Each CPU specified by @cpus will
+ * run @fn. While @fn is executing, there will no other CPUs holding
+ * a raw spinlock or running within any other type of preempt-disabled
+ * region of code.
*
- * This can be thought of as a very heavy write lock, equivalent to
- * grabbing every spinlock in the kernel.
+ * When @cpus specifies only a single CPU, this can be thought of as
+ * a reader-writer lock where readers disable preemption (for example,
+ * by holding a raw spinlock) and where the insanely heavy writers run
+ * @fn while also preventing any other CPU from doing any useful work.
+ * These writers can also be thought of as having implicitly grabbed every
+ * raw spinlock in the kernel.
*
- * Protects against CPU hotplug.
+ * When @fn is a no-op, this can be thought of as an RCU implementation
+ * where readers again disable preemption and writers use stop_machine()
+ * in place of synchronize_rcu(), albeit with orders of magnitude more
+ * disruption than even that of synchronize_rcu_expedited().
+ *
+ * Although only one stop_machine() operation can proceed at a time,
+ * the possibility of blocking in cpus_read_lock() means that the caller
+ * cannot usefully rely on this serialization.
+ *
+ * Return: 0 if all invocations of @fn return zero. Otherwise, the
+ * value returned by an arbitrarily chosen member of the set of calls to
+ * @fn that returned non-zero.
*/
int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
/**
* stop_machine_cpuslocked: freeze the machine on all CPUs and run this function
* @fn: the function to run
- * @data: the data ptr for the @fn()
- * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
+ * @data: the data ptr to pass to @fn()
+ * @cpus: the cpus to run @fn() on (NULL = run on each online CPU)
+ *
+ * Same as above. Avoids nested calls to cpus_read_lock().
*
- * Same as above. Must be called from with in a cpus_read_lock() protected
- * region. Avoids nested calls to cpus_read_lock().
+ * Context: Must be called from within a cpus_read_lock() protected region.
*/
int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
/**
* stop_core_cpuslocked: - stop all threads on just one core
* @cpu: any cpu in the targeted core
- * @fn: the function to run
- * @data: the data ptr for @fn()
+ * @fn: the function to run on each CPU in the core containing @cpu
+ * @data: the data ptr to pass to @fn()
*
- * Same as above, but instead of every CPU, only the logical CPUs of a
- * single core are affected.
+ * Same as above, but instead of every CPU, only the logical CPUs of the
+ * single core containing @cpu are affected.
*
* Context: Must be called from within a cpus_read_lock() protected region.
*
- * Return: 0 if all executions of @fn returned 0, any non zero return
- * value if any returned non zero.
+ * Return: 0 if all invocations of @fn return zero. Otherwise, the
+ * value returned by an arbitrarily chosen member of the set of calls to
+ * @fn that returned non-zero.
*/
int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data);
diff --git a/include/linux/string.h b/include/linux/string.h
index 60168aa2af07..1b564c36d721 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -4,6 +4,7 @@
#include <linux/args.h>
#include <linux/array_size.h>
+#include <linux/cleanup.h> /* for DEFINE_FREE() */
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */
@@ -76,12 +77,16 @@ ssize_t sized_strscpy(char *, const char *, size_t);
* known size.
*/
#define __strscpy0(dst, src, ...) \
- sized_strscpy(dst, src, sizeof(dst) + __must_be_array(dst))
-#define __strscpy1(dst, src, size) sized_strscpy(dst, src, size)
+ sized_strscpy(dst, src, sizeof(dst) + __must_be_array(dst) + \
+ __must_be_cstr(dst) + __must_be_cstr(src))
+#define __strscpy1(dst, src, size) \
+ sized_strscpy(dst, src, size + __must_be_cstr(dst) + __must_be_cstr(src))
#define __strscpy_pad0(dst, src, ...) \
- sized_strscpy_pad(dst, src, sizeof(dst) + __must_be_array(dst))
-#define __strscpy_pad1(dst, src, size) sized_strscpy_pad(dst, src, size)
+ sized_strscpy_pad(dst, src, sizeof(dst) + __must_be_array(dst) + \
+ __must_be_cstr(dst) + __must_be_cstr(src))
+#define __strscpy_pad1(dst, src, size) \
+ sized_strscpy_pad(dst, src, size + __must_be_cstr(dst) + __must_be_cstr(src))
/**
* strscpy - Copy a C-string into a sized buffer
@@ -279,6 +284,18 @@ static inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *str, char old, char new);
+/**
+ * mem_is_zero - Check if an area of memory is all 0's.
+ * @s: The memory area
+ * @n: The size of the area
+ *
+ * Return: True if the area of memory is all 0's.
+ */
+static inline bool mem_is_zero(const void *s, size_t n)
+{
+ return !memchr_inv(s, 0, n);
+}
+
extern void kfree_const(const void *x);
extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
@@ -289,13 +306,15 @@ extern void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp) __realloc_si
extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
-extern void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp)
+extern void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)
__realloc_size(2, 3);
/* lib/argv_split.c */
extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);
+DEFINE_FREE(argv_free, char **, if (!IS_ERR_OR_NULL(_T)) argv_free(_T))
+
/* lib/cmdline.c */
extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
@@ -317,9 +336,8 @@ int __sysfs_match_string(const char * const *array, size_t n, const char *s);
#define sysfs_match_string(_a, _s) __sysfs_match_string(_a, ARRAY_SIZE(_a), _s)
#ifdef CONFIG_BINARY_PRINTF
-int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
-int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
-int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
+__printf(3, 0) int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
+__printf(3, 0) int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
#endif
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
@@ -327,16 +345,6 @@ extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
int ptr_to_hashval(const void *ptr, unsigned long *hashval_out);
-/**
- * strstarts - does @str start with @prefix?
- * @str: string to examine
- * @prefix: prefix to look for.
- */
-static inline bool strstarts(const char *str, const char *prefix)
-{
- return strncmp(str, prefix, strlen(prefix)) == 0;
-}
-
size_t memweight(const void *ptr, size_t bytes);
/**
@@ -363,6 +371,10 @@ static inline void memzero_explicit(void *s, size_t count)
* kbasename - return the last part of a pathname.
*
* @path: path to extract the filename from.
+ *
+ * Returns:
+ * Pointer to the filename portion inside @path. If no '/' exists,
+ * returns @path unchanged.
*/
static inline const char *kbasename(const char *path)
{
@@ -396,8 +408,11 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
* must be discoverable by the compiler.
*/
#define strtomem_pad(dest, src, pad) do { \
- const size_t _dest_len = __builtin_object_size(dest, 1); \
- const size_t _src_len = __builtin_object_size(src, 1); \
+ const size_t _dest_len = __must_be_byte_array(dest) + \
+ __must_be_noncstr(dest) + \
+ ARRAY_SIZE(dest); \
+ const size_t _src_len = __must_be_cstr(src) + \
+ __builtin_object_size(src, 1); \
\
BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
_dest_len == (size_t)-1); \
@@ -419,8 +434,11 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
* must be discoverable by the compiler.
*/
#define strtomem(dest, src) do { \
- const size_t _dest_len = __builtin_object_size(dest, 1); \
- const size_t _src_len = __builtin_object_size(src, 1); \
+ const size_t _dest_len = __must_be_byte_array(dest) + \
+ __must_be_noncstr(dest) + \
+ ARRAY_SIZE(dest); \
+ const size_t _src_len = __must_be_cstr(src) + \
+ __builtin_object_size(src, 1); \
\
BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
_dest_len == (size_t)-1); \
@@ -438,8 +456,11 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
* Note that sizes of @dest and @src must be known at compile-time.
*/
#define memtostr(dest, src) do { \
- const size_t _dest_len = __builtin_object_size(dest, 1); \
- const size_t _src_len = __builtin_object_size(src, 1); \
+ const size_t _dest_len = __must_be_byte_array(dest) + \
+ __must_be_cstr(dest) + \
+ ARRAY_SIZE(dest); \
+ const size_t _src_len = __must_be_noncstr(src) + \
+ __builtin_object_size(src, 1); \
const size_t _src_chars = strnlen(src, _src_len); \
const size_t _copy_len = min(_dest_len - 1, _src_chars); \
\
@@ -463,8 +484,11 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
* Note that sizes of @dest and @src must be known at compile-time.
*/
#define memtostr_pad(dest, src) do { \
- const size_t _dest_len = __builtin_object_size(dest, 1); \
- const size_t _src_len = __builtin_object_size(src, 1); \
+ const size_t _dest_len = __must_be_byte_array(dest) + \
+ __must_be_cstr(dest) + \
+ ARRAY_SIZE(dest); \
+ const size_t _src_len = __must_be_noncstr(src) + \
+ __builtin_object_size(src, 1); \
const size_t _src_chars = strnlen(src, _src_len); \
const size_t _copy_len = min(_dest_len - 1, _src_chars); \
\
@@ -532,4 +556,36 @@ static __always_inline size_t str_has_prefix(const char *str, const char *prefix
return strncmp(str, prefix, len) == 0 ? len : 0;
}
+/**
+ * strstarts - does @str start with @prefix?
+ * @str: string to examine
+ * @prefix: prefix to look for.
+ *
+ * Returns:
+ * True if @str begins with @prefix. False in all other cases.
+ */
+static inline bool strstarts(const char *str, const char *prefix)
+{
+ return strncmp(str, prefix, strlen(prefix)) == 0;
+}
+
+/**
+ * strends - Check if a string ends with another string.
+ * @str: NULL-terminated string to check against @suffix
+ * @suffix: NULL-terminated string defining the suffix to look for in @str
+ *
+ * Returns:
+ * True if @str ends with @suffix. False in all other cases.
+ */
+static inline bool __attribute__((nonnull(1, 2)))
+strends(const char *str, const char *suffix)
+{
+ unsigned int str_len = strlen(str), suffix_len = strlen(suffix);
+
+ if (str_len < suffix_len)
+ return false;
+
+ return !(strcmp(str + str_len - suffix_len, suffix));
+}
+
#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/string_choices.h b/include/linux/string_choices.h
index d9ebe20229f8..ee84087d4b26 100644
--- a/include/linux/string_choices.h
+++ b/include/linux/string_choices.h
@@ -2,17 +2,38 @@
#ifndef _LINUX_STRING_CHOICES_H_
#define _LINUX_STRING_CHOICES_H_
+/*
+ * Here provide a series of helpers in the str_$TRUE_$FALSE format (you can
+ * also expand some helpers as needed), where $TRUE and $FALSE are their
+ * corresponding literal strings. These helpers can be used in the printing
+ * and also in other places where constant strings are required. Using these
+ * helpers offers the following benefits:
+ * 1) Reducing the hardcoding of strings, which makes the code more elegant
+ * through these simple literal-meaning helpers.
+ * 2) Unifying the output, which prevents the same string from being printed
+ * in various forms, such as enable/disable, enabled/disabled, en/dis.
+ * 3) Deduping by the linker, which results in a smaller binary file.
+ */
+
#include <linux/types.h>
+static inline const char *str_assert_deassert(bool v)
+{
+ return v ? "assert" : "deassert";
+}
+#define str_deassert_assert(v) str_assert_deassert(!(v))
+
static inline const char *str_enable_disable(bool v)
{
return v ? "enable" : "disable";
}
+#define str_disable_enable(v) str_enable_disable(!(v))
static inline const char *str_enabled_disabled(bool v)
{
return v ? "enabled" : "disabled";
}
+#define str_disabled_enabled(v) str_enabled_disabled(!(v))
static inline const char *str_hi_lo(bool v)
{
@@ -26,21 +47,41 @@ static inline const char *str_high_low(bool v)
}
#define str_low_high(v) str_high_low(!(v))
+static inline const char *str_input_output(bool v)
+{
+ return v ? "input" : "output";
+}
+#define str_output_input(v) str_input_output(!(v))
+
+static inline const char *str_on_off(bool v)
+{
+ return v ? "on" : "off";
+}
+#define str_off_on(v) str_on_off(!(v))
+
static inline const char *str_read_write(bool v)
{
return v ? "read" : "write";
}
#define str_write_read(v) str_read_write(!(v))
-static inline const char *str_on_off(bool v)
+static inline const char *str_true_false(bool v)
{
- return v ? "on" : "off";
+ return v ? "true" : "false";
+}
+#define str_false_true(v) str_true_false(!(v))
+
+static inline const char *str_up_down(bool v)
+{
+ return v ? "up" : "down";
}
+#define str_down_up(v) str_up_down(!(v))
static inline const char *str_yes_no(bool v)
{
return v ? "yes" : "no";
}
+#define str_no_yes(v) str_yes_no(!(v))
/**
* str_plural - Return the simple pluralization based on English counts
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index e93fbb5b0c01..3fb88a1e9898 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -31,6 +31,7 @@ enum string_size_units {
int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
char *buf, int len);
+int parse_int_array(const char *buf, size_t count, int **array);
int parse_int_array_user(const char __user *from, size_t count, int **array);
#define UNESCAPE_SPACE BIT(0)
diff --git a/include/linux/sungem_phy.h b/include/linux/sungem_phy.h
index c505f30e8b68..eecc7eb63bfb 100644
--- a/include/linux/sungem_phy.h
+++ b/include/linux/sungem_phy.h
@@ -40,7 +40,7 @@ enum {
/* An instance of a PHY, partially borrowed from mii_if_info */
struct mii_phy
{
- struct mii_phy_def* def;
+ const struct mii_phy_def *def;
u32 advertising;
int mii_id;
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 35766963dd14..e783132e481f 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -222,6 +222,8 @@ static inline bool cache_is_expired(struct cache_detail *detail, struct cache_he
return detail->flush_time >= h->last_refresh;
}
+extern int cache_check_rcu(struct cache_detail *detail,
+ struct cache_head *h, struct cache_req *rqstp);
extern int cache_check(struct cache_detail *detail,
struct cache_head *h, struct cache_req *rqstp);
extern void cache_flush(void);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 5321585c778f..f8b406b0a1af 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -64,7 +64,9 @@ struct rpc_clnt {
cl_noretranstimeo: 1,/* No retransmit timeouts */
cl_autobind : 1,/* use getport() */
cl_chatty : 1,/* be verbose */
- cl_shutdown : 1;/* rpc immediate -EIO */
+ cl_shutdown : 1,/* rpc immediate -EIO */
+ cl_netunreach_fatal : 1;
+ /* Treat ENETUNREACH errors as fatal */
struct xprtsec_parms cl_xprtsec; /* transport security policy */
struct rpc_rtt * cl_rtt; /* RTO estimator data */
@@ -93,6 +95,7 @@ struct rpc_clnt {
const struct cred *cl_cred;
unsigned int cl_max_connect; /* max number of transports not to the same IP */
struct super_block *pipefs_sb;
+ atomic_t cl_task_count;
};
/*
@@ -174,6 +177,7 @@ struct rpc_add_xprt_test {
#define RPC_CLNT_CREATE_SOFTERR (1UL << 10)
#define RPC_CLNT_CREATE_REUSEPORT (1UL << 11)
#define RPC_CLNT_CREATE_CONNECTED (1UL << 12)
+#define RPC_CLNT_CREATE_NETUNREACH_FATAL (1UL << 13)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index f6aeed07fe04..891f6173c951 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -23,43 +23,30 @@ extern unsigned int nlm_debug;
#define dprintk(fmt, ...) \
dfprintk(FACILITY, fmt, ##__VA_ARGS__)
-#define dprintk_cont(fmt, ...) \
- dfprintk_cont(FACILITY, fmt, ##__VA_ARGS__)
#define dprintk_rcu(fmt, ...) \
dfprintk_rcu(FACILITY, fmt, ##__VA_ARGS__)
-#define dprintk_rcu_cont(fmt, ...) \
- dfprintk_rcu_cont(FACILITY, fmt, ##__VA_ARGS__)
#undef ifdebug
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac))
-# define dfprintk(fac, fmt, ...) \
-do { \
- ifdebug(fac) \
- printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \
-} while (0)
+# if IS_ENABLED(CONFIG_SUNRPC_DEBUG_TRACE)
+# define __sunrpc_printk(fmt, ...) trace_printk(fmt, ##__VA_ARGS__)
+# else
+# define __sunrpc_printk(fmt, ...) printk(KERN_DEFAULT fmt, ##__VA_ARGS__)
+# endif
-# define dfprintk_cont(fac, fmt, ...) \
+# define dfprintk(fac, fmt, ...) \
do { \
ifdebug(fac) \
- printk(KERN_CONT fmt, ##__VA_ARGS__); \
+ __sunrpc_printk(fmt, ##__VA_ARGS__); \
} while (0)
# define dfprintk_rcu(fac, fmt, ...) \
do { \
ifdebug(fac) { \
rcu_read_lock(); \
- printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \
- rcu_read_unlock(); \
- } \
-} while (0)
-
-# define dfprintk_rcu_cont(fac, fmt, ...) \
-do { \
- ifdebug(fac) { \
- rcu_read_lock(); \
- printk(KERN_CONT fmt, ##__VA_ARGS__); \
+ __sunrpc_printk(fmt, ##__VA_ARGS__); \
rcu_read_unlock(); \
} \
} while (0)
@@ -68,7 +55,6 @@ do { \
#else
# define ifdebug(fac) if (0)
# define dfprintk(fac, fmt, ...) do {} while (0)
-# define dfprintk_cont(fac, fmt, ...) do {} while (0)
# define dfprintk_rcu(fac, fmt, ...) do {} while (0)
# define RPC_IFDEBUG(x)
#endif
diff --git a/include/linux/sunrpc/gss_asn1.h b/include/linux/sunrpc/gss_asn1.h
deleted file mode 100644
index 3ccecd0ad229..000000000000
--- a/include/linux/sunrpc/gss_asn1.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * linux/include/linux/sunrpc/gss_asn1.h
- *
- * minimal asn1 for generic encoding/decoding of gss tokens
- *
- * Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h,
- * lib/gssapi/krb5/gssapiP_krb5.h, and others
- *
- * Copyright (c) 2000 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Andy Adamson <andros@umich.edu>
- */
-
-/*
- * Copyright 1995 by the Massachusetts Institute of Technology.
- * All Rights Reserved.
- *
- * Export of this software from the United States of America may
- * require a specific license from the United States Government.
- * It is the responsibility of any person or organization contemplating
- * export to obtain such a license before exporting.
- *
- * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
- * distribute this software and its documentation for any purpose and
- * without fee is hereby granted, provided that the above copyright
- * notice appear in all copies and that both that copyright notice and
- * this permission notice appear in supporting documentation, and that
- * the name of M.I.T. not be used in advertising or publicity pertaining
- * to distribution of the software without specific, written prior
- * permission. Furthermore if you modify this software you must label
- * your software as modified software and not distribute it in such a
- * fashion that it might be confused with the original M.I.T. software.
- * M.I.T. makes no representations about the suitability of
- * this software for any purpose. It is provided "as is" without express
- * or implied warranty.
- *
- */
-
-
-#include <linux/sunrpc/gss_api.h>
-
-#define SIZEOF_INT 4
-
-/* from gssapi_err_generic.h */
-#define G_BAD_SERVICE_NAME (-2045022976L)
-#define G_BAD_STRING_UID (-2045022975L)
-#define G_NOUSER (-2045022974L)
-#define G_VALIDATE_FAILED (-2045022973L)
-#define G_BUFFER_ALLOC (-2045022972L)
-#define G_BAD_MSG_CTX (-2045022971L)
-#define G_WRONG_SIZE (-2045022970L)
-#define G_BAD_USAGE (-2045022969L)
-#define G_UNKNOWN_QOP (-2045022968L)
-#define G_NO_HOSTNAME (-2045022967L)
-#define G_BAD_HOSTNAME (-2045022966L)
-#define G_WRONG_MECH (-2045022965L)
-#define G_BAD_TOK_HEADER (-2045022964L)
-#define G_BAD_DIRECTION (-2045022963L)
-#define G_TOK_TRUNC (-2045022962L)
-#define G_REFLECT (-2045022961L)
-#define G_WRONG_TOKID (-2045022960L)
-
-#define g_OID_equal(o1,o2) \
- (((o1)->len == (o2)->len) && \
- (memcmp((o1)->data,(o2)->data,(int) (o1)->len) == 0))
-
-u32 g_verify_token_header(
- struct xdr_netobj *mech,
- int *body_size,
- unsigned char **buf_in,
- int toksize);
-
-int g_token_size(
- struct xdr_netobj *mech,
- unsigned int body_size);
-
-void g_make_token_header(
- struct xdr_netobj *mech,
- int body_size,
- unsigned char **buf);
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index 78a80bf3fdcb..43950b5237c8 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -40,7 +40,6 @@
#include <crypto/skcipher.h>
#include <linux/sunrpc/auth_gss.h>
#include <linux/sunrpc/gss_err.h>
-#include <linux/sunrpc/gss_asn1.h>
/* Length of constant used in key derivation */
#define GSS_KRB5_K5CLENGTH (5)
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index c4b0eb2b2f04..ada17b57ca44 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -69,15 +69,17 @@ enum rpc_reject_stat {
};
enum rpc_auth_stat {
- RPC_AUTH_OK = 0,
- RPC_AUTH_BADCRED = 1,
- RPC_AUTH_REJECTEDCRED = 2,
- RPC_AUTH_BADVERF = 3,
- RPC_AUTH_REJECTEDVERF = 4,
- RPC_AUTH_TOOWEAK = 5,
+ RPC_AUTH_OK = 0, /* success */
+ RPC_AUTH_BADCRED = 1, /* bad credential (seal broken) */
+ RPC_AUTH_REJECTEDCRED = 2, /* client must begin new session */
+ RPC_AUTH_BADVERF = 3, /* bad verifier (seal broken) */
+ RPC_AUTH_REJECTEDVERF = 4, /* verifier expired or replayed */
+ RPC_AUTH_TOOWEAK = 5, /* rejected for security reasons */
+ RPC_AUTH_INVALIDRESP = 6, /* bogus response verifier */
+ RPC_AUTH_FAILED = 7, /* reason unknown */
/* RPCSEC_GSS errors */
- RPCSEC_GSS_CREDPROBLEM = 13,
- RPCSEC_GSS_CTXPROBLEM = 14
+ RPCSEC_GSS_CREDPROBLEM = 13, /* no credentials for user */
+ RPCSEC_GSS_CTXPROBLEM = 14 /* problem with context */
};
#define RPC_MAXNETNAMELEN 256
diff --git a/include/linux/sunrpc/rdma_rn.h b/include/linux/sunrpc/rdma_rn.h
new file mode 100644
index 000000000000..7d032ca057af
--- /dev/null
+++ b/include/linux/sunrpc/rdma_rn.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * * Copyright (c) 2024, Oracle and/or its affiliates.
+ */
+
+#ifndef _LINUX_SUNRPC_RDMA_RN_H
+#define _LINUX_SUNRPC_RDMA_RN_H
+
+#include <rdma/ib_verbs.h>
+
+/**
+ * rpcrdma_notification - request removal notification
+ */
+struct rpcrdma_notification {
+ void (*rn_done)(struct rpcrdma_notification *rn);
+ u32 rn_index;
+};
+
+int rpcrdma_rn_register(struct ib_device *device,
+ struct rpcrdma_notification *rn,
+ void (*done)(struct rpcrdma_notification *rn));
+void rpcrdma_rn_unregister(struct ib_device *device,
+ struct rpcrdma_notification *rn);
+int rpcrdma_ib_client_register(void);
+void rpcrdma_ib_client_unregister(void);
+
+#endif /* _LINUX_SUNRPC_RDMA_RN_H */
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index 3b35b6f6533a..2cb406f8ff4e 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -98,7 +98,7 @@ static inline bool rpc_msg_is_inflight(const struct rpc_pipe_msg *msg) {
}
struct rpc_clnt;
-extern struct dentry *rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *);
+extern int rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *);
extern int rpc_remove_client_dir(struct rpc_clnt *);
extern void rpc_init_pipe_dir_head(struct rpc_pipe_dir_head *pdh);
@@ -127,9 +127,9 @@ extern void rpc_remove_cache_dir(struct dentry *);
struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags);
void rpc_destroy_pipe_data(struct rpc_pipe *pipe);
-extern struct dentry *rpc_mkpipe_dentry(struct dentry *, const char *, void *,
+extern int rpc_mkpipe_dentry(struct dentry *, const char *, void *,
struct rpc_pipe *);
-extern int rpc_unlink(struct dentry *);
+extern void rpc_unlink(struct rpc_pipe *);
extern int register_rpc_pipefs(void);
extern void unregister_rpc_pipefs(void);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 0c77ba488bba..ccba79ebf893 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -134,6 +134,7 @@ struct rpc_task_setup {
#define RPC_TASK_MOVEABLE 0x0004 /* nfs4.1+ rpc tasks */
#define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */
#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
+#define RPC_TASK_NETUNREACH_FATAL 0x0040 /* ENETUNREACH is fatal */
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
#define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
@@ -151,13 +152,14 @@ struct rpc_task_setup {
#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT)
#define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE)
-#define RPC_TASK_RUNNING 0
-#define RPC_TASK_QUEUED 1
-#define RPC_TASK_ACTIVE 2
-#define RPC_TASK_NEED_XMIT 3
-#define RPC_TASK_NEED_RECV 4
-#define RPC_TASK_MSG_PIN_WAIT 5
-#define RPC_TASK_SIGNALLED 6
+enum {
+ RPC_TASK_RUNNING,
+ RPC_TASK_QUEUED,
+ RPC_TASK_ACTIVE,
+ RPC_TASK_NEED_XMIT,
+ RPC_TASK_NEED_RECV,
+ RPC_TASK_MSG_PIN_WAIT,
+};
#define rpc_test_and_set_running(t) \
test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
@@ -169,7 +171,7 @@ struct rpc_task_setup {
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
-#define RPC_SIGNALLED(t) test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate)
+#define RPC_SIGNALLED(t) (READ_ONCE(task->tk_rpc_status) == -ERESTARTSYS)
/*
* Task priorities.
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 23617da0e565..5506d20857c3 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -21,6 +21,7 @@
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/pagevec.h>
+#include <linux/kthread.h>
/*
*
@@ -33,9 +34,9 @@
* node traffic on multi-node NUMA NFS servers.
*/
struct svc_pool {
- unsigned int sp_id; /* pool id; also node id on NUMA */
+ unsigned int sp_id; /* pool id; also node id on NUMA */
struct lwq sp_xprts; /* pending transports */
- atomic_t sp_nrthreads; /* # of threads in pool */
+ unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
struct llist_head sp_idle_threads; /* idle server threads */
@@ -66,25 +67,23 @@ enum {
* We currently do not support more than one RPC program per daemon.
*/
struct svc_serv {
- struct svc_program * sv_program; /* RPC program */
+ struct svc_program * sv_programs; /* RPC programs */
struct svc_stat * sv_stats; /* RPC statistics */
spinlock_t sv_lock;
+ unsigned int sv_nprogs; /* Number of sv_programs */
unsigned int sv_nrthreads; /* # of server threads */
- unsigned int sv_maxconn; /* max connections allowed or
- * '0' causing max to be based
- * on number of threads. */
-
unsigned int sv_max_payload; /* datagram payload size */
unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */
unsigned int sv_xdrsize; /* XDR buffer size */
struct list_head sv_permsocks; /* all permanent sockets */
struct list_head sv_tempsocks; /* all temporary sockets */
- int sv_tmpcnt; /* count of temporary sockets */
+ int sv_tmpcnt; /* count of temporary "valid" sockets */
struct timer_list sv_temptimer; /* timer for aging temporary sockets */
char * sv_name; /* service name */
unsigned int sv_nrpools; /* number of thread pools */
+ bool sv_is_pooled; /* is this a pooled service? */
struct svc_pool * sv_pools; /* array of thread pools */
int (*sv_threadfn)(void *data);
@@ -120,14 +119,14 @@ void svc_destroy(struct svc_serv **svcp);
* Linux limit; someone who cares more about NFS/UDP performance
* can test a larger number.
*
- * For TCP transports we have more freedom. A size of 1MB is
- * chosen to match the client limit. Other OSes are known to
- * have larger limits, but those numbers are probably beyond
- * the point of diminishing returns.
+ * For non-UDP transports we have more freedom. A size of 4MB is
+ * chosen to accommodate clients that support larger I/O sizes.
*/
-#define RPCSVC_MAXPAYLOAD (1*1024*1024u)
-#define RPCSVC_MAXPAYLOAD_TCP RPCSVC_MAXPAYLOAD
-#define RPCSVC_MAXPAYLOAD_UDP (32*1024u)
+enum {
+ RPCSVC_MAXPAYLOAD = 4 * 1024 * 1024,
+ RPCSVC_MAXPAYLOAD_TCP = RPCSVC_MAXPAYLOAD,
+ RPCSVC_MAXPAYLOAD_UDP = 32 * 1024,
+};
extern u32 svc_max_payload(const struct svc_rqst *rqstp);
@@ -151,14 +150,24 @@ extern u32 svc_max_payload(const struct svc_rqst *rqstp);
* list. xdr_buf.tail points to the end of the first page.
* This assumes that the non-page part of an rpc reply will fit
* in a page - NFSd ensures this. lockd also has no trouble.
+ */
+
+/**
+ * svc_serv_maxpages - maximum count of pages needed for one RPC message
+ * @serv: RPC service context
*
- * Each request/reply pair can have at most one "payload", plus two pages,
- * one for the request, and one for the reply.
- * We using ->sendfile to return read data, we might need one extra page
- * if the request is not page-aligned. So add another '1'.
+ * Returns a count of pages or vectors that can hold the maximum
+ * size RPC message for @serv.
+ *
+ * Each request/reply pair can have at most one "payload", plus two
+ * pages, one for the request, and one for the reply.
+ * nfsd_splice_actor() might need an extra page when a READ payload
+ * is not page-aligned.
*/
-#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
- + 2 + 1)
+static inline unsigned long svc_serv_maxpages(const struct svc_serv *serv)
+{
+ return DIV_ROUND_UP(serv->sv_max_mesg, PAGE_SIZE) + 2 + 1;
+}
/*
* The context of a single thread, including the request currently being
@@ -187,16 +196,16 @@ struct svc_rqst {
struct xdr_buf rq_arg;
struct xdr_stream rq_arg_stream;
struct xdr_stream rq_res_stream;
- struct page *rq_scratch_page;
+ struct folio *rq_scratch_folio;
struct xdr_buf rq_res;
- struct page *rq_pages[RPCSVC_MAXPAGES + 1];
+ unsigned long rq_maxpages; /* num of entries in rq_pages */
+ struct page * *rq_pages;
struct page * *rq_respages; /* points into rq_pages */
struct page * *rq_next_page; /* next reply page to use */
struct page * *rq_page_end; /* one past the last page */
struct folio_batch rq_fbatch;
- struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
- struct bio_vec rq_bvec[RPCSVC_MAXPAGES];
+ struct bio_vec *rq_bvec;
__be32 rq_xid; /* transmission id */
u32 rq_prog; /* program number */
@@ -231,10 +240,15 @@ struct svc_rqst {
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
*/
- unsigned long bc_to_initval;
- unsigned int bc_to_retries;
- void ** rq_lease_breaker; /* The v4 client breaking a lease */
+
+ int rq_err; /* Thread sets this to inidicate
+ * initialisation success.
+ */
+
+ unsigned long bc_to_initval;
+ unsigned int bc_to_retries;
unsigned int rq_status_counter; /* RPC processing counter */
+ void **rq_lease_breaker; /* The v4 client breaking a lease */
};
/* bits for rq_flags */
@@ -304,6 +318,26 @@ static inline bool svc_thread_should_stop(struct svc_rqst *rqstp)
return test_bit(RQ_VICTIM, &rqstp->rq_flags);
}
+/**
+ * svc_thread_init_status - report whether thread has initialised successfully
+ * @rqstp: the thread in question
+ * @err: errno code
+ *
+ * After performing any initialisation that could fail, and before starting
+ * normal work, each sunrpc svc_thread must call svc_thread_init_status()
+ * with an appropriate error, or zero.
+ *
+ * If zero is passed, the thread is ready and must continue until
+ * svc_thread_should_stop() returns true. If a non-zero error is passed
+ * the call will not return - the thread will exit.
+ */
+static inline void svc_thread_init_status(struct svc_rqst *rqstp, int err)
+{
+ store_release_wake_up(&rqstp->rq_err, err);
+ if (err)
+ kthread_exit(1);
+}
+
struct svc_deferred_req {
u32 prot; /* protocol (UDP or TCP) */
struct svc_xprt *xprt;
@@ -328,10 +362,9 @@ struct svc_process_info {
};
/*
- * List of RPC programs on the same transport endpoint
+ * RPC program - an array of these can use the same transport endpoint
*/
struct svc_program {
- struct svc_program * pg_next; /* other programs (same xprt) */
u32 pg_prog; /* program number */
unsigned int pg_lovers; /* lowest version */
unsigned int pg_hivers; /* highest version */
@@ -398,19 +431,18 @@ struct svc_procedure {
/*
* Function prototypes.
*/
-int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
+int sunrpc_set_pool_mode(const char *val);
+int sunrpc_get_pool_mode(char *val, size_t size);
void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
int svc_bind(struct svc_serv *serv, struct net *net);
struct svc_serv *svc_create(struct svc_program *, unsigned int,
int (*threadfn)(void *data));
-struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
- struct svc_pool *pool, int node);
bool svc_rqst_replace_page(struct svc_rqst *rqstp,
struct page *page);
void svc_rqst_release_pages(struct svc_rqst *rqstp);
-void svc_rqst_free(struct svc_rqst *);
void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *prog,
+ unsigned int nprog,
struct svc_stat *stats,
unsigned int bufsize,
int (*threadfn)(void *data));
@@ -430,8 +462,6 @@ const char * svc_proc_name(const struct svc_rqst *rqstp);
int svc_encode_result_payload(struct svc_rqst *rqstp,
unsigned int offset,
unsigned int length);
-unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
- struct xdr_buf *payload);
char *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
struct kvec *first, void *p,
size_t total);
@@ -443,11 +473,6 @@ int svc_generic_rpcbind_set(struct net *net,
u32 version, int family,
unsigned short proto,
unsigned short port);
-int svc_rpcbind_set_version(struct net *net,
- const struct svc_program *progp,
- u32 version, int family,
- unsigned short proto,
- unsigned short port);
#define RPC_MAX_ADDRBUFLEN (63U)
@@ -478,7 +503,7 @@ static inline void svcxdr_init_decode(struct svc_rqst *rqstp)
buf->len = buf->head->iov_len + buf->page_len + buf->tail->iov_len;
xdr_init_decode(xdr, buf, argv->iov_base, NULL);
- xdr_set_scratch_page(xdr, rqstp->rq_scratch_page);
+ xdr_set_scratch_folio(xdr, rqstp->rq_scratch_folio);
}
/**
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index d33bab33099a..57f4fd94166a 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -48,6 +48,7 @@
#include <linux/sunrpc/rpc_rdma.h>
#include <linux/sunrpc/rpc_rdma_cid.h>
#include <linux/sunrpc/svc_rdma_pcl.h>
+#include <linux/sunrpc/rdma_rn.h>
#include <linux/percpu_counter.h>
#include <rdma/ib_verbs.h>
@@ -76,6 +77,7 @@ struct svcxprt_rdma {
struct svc_xprt sc_xprt; /* SVC transport structure */
struct rdma_cm_id *sc_cm_id; /* RDMA connection id */
struct list_head sc_accept_q; /* Conn. waiting accept */
+ struct rpcrdma_notification sc_rn; /* removal notification */
int sc_ord; /* RDMA read limit */
int sc_max_send_sges;
bool sc_snd_w_inv; /* OK to use Send With Invalidate */
@@ -129,7 +131,7 @@ static inline struct svcxprt_rdma *svc_rdma_rqst_rdma(struct svc_rqst *rqstp)
*/
enum {
RPCRDMA_LISTEN_BACKLOG = 10,
- RPCRDMA_MAX_REQUESTS = 64,
+ RPCRDMA_MAX_REQUESTS = 128,
RPCRDMA_MAX_BC_REQUESTS = 2,
};
@@ -200,7 +202,8 @@ struct svc_rdma_recv_ctxt {
struct svc_rdma_pcl rc_reply_pcl;
unsigned int rc_page_count;
- struct page *rc_pages[RPCSVC_MAXPAGES];
+ unsigned long rc_maxpages;
+ struct page *rc_pages[] __counted_by(rc_maxpages);
};
/*
@@ -242,7 +245,8 @@ struct svc_rdma_send_ctxt {
void *sc_xprt_buf;
int sc_page_count;
int sc_cur_sge_no;
- struct page *sc_pages[RPCSVC_MAXPAGES];
+ unsigned long sc_maxpages;
+ struct page **sc_pages;
struct ib_sge sc_sges[];
};
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 0981e35a9fed..da2a2531e110 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -53,6 +53,7 @@ struct svc_xprt {
struct svc_xprt_class *xpt_class;
const struct svc_xprt_ops *xpt_ops;
struct kref xpt_ref;
+ ktime_t xpt_qtime;
struct list_head xpt_list;
struct lwq_node xpt_ready;
unsigned long xpt_flags;
@@ -99,8 +100,33 @@ enum {
XPT_HANDSHAKE, /* xprt requests a handshake */
XPT_TLS_SESSION, /* transport-layer security established */
XPT_PEER_AUTH, /* peer has been authenticated */
+ XPT_PEER_VALID, /* peer has presented a filehandle that
+ * it has access to. It is NOT counted
+ * in ->sv_tmpcnt.
+ */
+ XPT_RPCB_UNREG, /* transport that needs unregistering
+ * with rpcbind (TCP, UDP) on destroy
+ */
};
+/*
+ * Maximum number of "tmp" connections - those without XPT_PEER_VALID -
+ * permitted on any service.
+ */
+#define XPT_MAX_TMP_CONN 64
+
+static inline void svc_xprt_set_valid(struct svc_xprt *xpt)
+{
+ if (test_bit(XPT_TEMP, &xpt->xpt_flags) &&
+ !test_and_set_bit(XPT_PEER_VALID, &xpt->xpt_flags)) {
+ struct svc_serv *serv = xpt->xpt_server;
+
+ spin_lock(&serv->sv_lock);
+ serv->sv_tmpcnt -= 1;
+ spin_unlock(&serv->sv_lock);
+ }
+}
+
static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
{
spin_lock(&xpt->xpt_lock);
@@ -142,7 +168,8 @@ int svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
struct net *net, const int family,
const unsigned short port, int flags,
const struct cred *cred);
-void svc_xprt_destroy_all(struct svc_serv *serv, struct net *net);
+void svc_xprt_destroy_all(struct svc_serv *serv, struct net *net,
+ bool unregister);
void svc_xprt_received(struct svc_xprt *xprt);
void svc_xprt_enqueue(struct svc_xprt *xprt);
void svc_xprt_put(struct svc_xprt *xprt);
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 61c455f1e1f5..4b92fec23a49 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -14,6 +14,7 @@
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/gss_api.h>
+#include <linux/sunrpc/clnt.h>
#include <linux/hash.h>
#include <linux/stringhash.h>
#include <linux/cred.h>
@@ -85,7 +86,6 @@ struct auth_domain {
enum svc_auth_status {
SVC_GARBAGE = 1,
- SVC_SYSERR,
SVC_VALID,
SVC_NEGATIVE,
SVC_OK,
@@ -151,13 +151,16 @@ struct auth_ops {
struct svc_xprt;
-extern enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp);
extern rpc_authflavor_t svc_auth_flavor(struct svc_rqst *rqstp);
extern int svc_authorise(struct svc_rqst *rqstp);
extern enum svc_auth_status svc_set_client(struct svc_rqst *rqstp);
extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops);
extern void svc_auth_unregister(rpc_authflavor_t flavor);
+extern void svcauth_map_clnt_to_svc_cred_local(struct rpc_clnt *clnt,
+ const struct cred *,
+ struct svc_cred *);
+
extern struct auth_domain *unix_domain_find(char *name);
extern void auth_domain_put(struct auth_domain *item);
extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new);
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 7c78ec6356b9..de37069aba90 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -26,6 +26,9 @@ struct svc_sock {
void (*sk_odata)(struct sock *);
void (*sk_owspace)(struct sock *);
+ /* For sends (protected by xpt_mutex) */
+ struct bio_vec *sk_bvec;
+
/* private TCP part */
/* On-the-wire fragment header: */
__be32 sk_marker;
@@ -40,7 +43,9 @@ struct svc_sock {
struct completion sk_handshake_done;
- struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */
+ /* received data */
+ unsigned long sk_maxpages;
+ struct page * sk_pages[] __counted_by(sk_maxpages);
};
static inline u32 svc_sock_reclen(struct svc_sock *svsk)
@@ -58,8 +63,6 @@ static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
*/
void svc_recv(struct svc_rqst *rqstp);
void svc_send(struct svc_rqst *rqstp);
-void svc_drop(struct svc_rqst *);
-void svc_sock_update_bufs(struct svc_serv *serv);
int svc_addsock(struct svc_serv *serv, struct net *net,
const int fd, char *name_return, const size_t len,
const struct cred *cred);
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 2f8dc47f1eb0..152597750f55 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -13,7 +13,7 @@
#include <linux/uio.h>
#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/scatterlist.h>
struct bio_vec;
@@ -119,6 +119,8 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
#define rpc_autherr_badverf cpu_to_be32(RPC_AUTH_BADVERF)
#define rpc_autherr_rejectedverf cpu_to_be32(RPC_AUTH_REJECTEDVERF)
#define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK)
+#define rpc_autherr_invalidresp cpu_to_be32(RPC_AUTH_INVALIDRESP)
+#define rpc_autherr_failed cpu_to_be32(RPC_AUTH_FAILED)
#define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM)
#define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM)
@@ -128,10 +130,7 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int len);
__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int len);
__be32 *xdr_encode_string(__be32 *p, const char *s);
-__be32 *xdr_decode_string_inplace(__be32 *p, char **sp, unsigned int *lenp,
- unsigned int maxlen);
__be32 *xdr_encode_netobj(__be32 *p, const struct xdr_netobj *);
-__be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *);
void xdr_inline_pages(struct xdr_buf *, unsigned int,
struct page **, unsigned int, unsigned int);
@@ -242,8 +241,7 @@ typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf,
__be32 *p, struct rpc_rqst *rqst);
-extern void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
- struct page **pages, struct rpc_rqst *rqst);
+void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf);
extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
extern int xdr_reserve_space_vec(struct xdr_stream *xdr, size_t nbytes);
extern void __xdr_commit_encode(struct xdr_stream *xdr);
@@ -290,16 +288,16 @@ xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
}
/**
- * xdr_set_scratch_page - Attach a scratch buffer for decoding data
+ * xdr_set_scratch_folio - Attach a scratch buffer for decoding data
* @xdr: pointer to xdr_stream struct
- * @page: an anonymous page
+ * @page: an anonymous folio
*
* See xdr_set_scratch_buffer().
*/
static inline void
-xdr_set_scratch_page(struct xdr_stream *xdr, struct page *page)
+xdr_set_scratch_folio(struct xdr_stream *xdr, struct folio *folio)
{
- xdr_set_scratch_buffer(xdr, page_address(page), PAGE_SIZE);
+ xdr_set_scratch_buffer(xdr, folio_address(folio), folio_size(folio));
}
/**
@@ -341,12 +339,6 @@ xdr_stream_remaining(const struct xdr_stream *xdr)
return xdr->nwords << 2;
}
-ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr,
- size_t size);
-ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
- size_t maxlen, gfp_t gfp_flags);
-ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str,
- size_t size);
ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
size_t maxlen, gfp_t gfp_flags);
ssize_t xdr_stream_decode_opaque_auth(struct xdr_stream *xdr, u32 *flavor,
@@ -681,6 +673,27 @@ xdr_stream_decode_u32(struct xdr_stream *xdr, __u32 *ptr)
}
/**
+ * xdr_stream_decode_be32 - Decode a big-endian 32-bit integer
+ * @xdr: pointer to xdr_stream
+ * @ptr: location to store integer
+ *
+ * Return values:
+ * %0 on success
+ * %-EBADMSG on XDR buffer overflow
+ */
+static inline ssize_t
+xdr_stream_decode_be32(struct xdr_stream *xdr, __be32 *ptr)
+{
+ const size_t count = sizeof(*ptr);
+ __be32 *p = xdr_inline_decode(xdr, count);
+
+ if (unlikely(!p))
+ return -EBADMSG;
+ *ptr = *p;
+ return 0;
+}
+
+/**
* xdr_stream_decode_u64 - Decode a 64-bit integer
* @xdr: pointer to xdr_stream
* @ptr: location to store 64-bit integer
@@ -708,7 +721,7 @@ xdr_stream_decode_u64(struct xdr_stream *xdr, __u64 *ptr)
* @len: size of buffer pointed to by @ptr
*
* Return values:
- * On success, returns size of object stored in @ptr
+ * %0 on success
* %-EBADMSG on XDR buffer overflow
*/
static inline ssize_t
@@ -719,7 +732,7 @@ xdr_stream_decode_opaque_fixed(struct xdr_stream *xdr, void *ptr, size_t len)
if (unlikely(!p))
return -EBADMSG;
xdr_decode_opaque_fixed(p, ptr, len);
- return len;
+ return 0;
}
/**
diff --git a/include/linux/sunrpc/xdrgen/_builtins.h b/include/linux/sunrpc/xdrgen/_builtins.h
new file mode 100644
index 000000000000..66ca3ece951a
--- /dev/null
+++ b/include/linux/sunrpc/xdrgen/_builtins.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Oracle and/or its affiliates.
+ *
+ * This header defines XDR data type primitives specified in
+ * Section 4 of RFC 4506, used by RPC programs implemented
+ * in the Linux kernel.
+ */
+
+#ifndef _SUNRPC_XDRGEN__BUILTINS_H_
+#define _SUNRPC_XDRGEN__BUILTINS_H_
+
+#include <linux/sunrpc/xdr.h>
+
+static inline bool
+xdrgen_decode_void(struct xdr_stream *xdr)
+{
+ return true;
+}
+
+static inline bool
+xdrgen_encode_void(struct xdr_stream *xdr)
+{
+ return true;
+}
+
+static inline bool
+xdrgen_decode_bool(struct xdr_stream *xdr, bool *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = (*p != xdr_zero);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_bool(struct xdr_stream *xdr, bool val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = val ? xdr_one : xdr_zero;
+ return true;
+}
+
+static inline bool
+xdrgen_decode_int(struct xdr_stream *xdr, s32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_int(struct xdr_stream *xdr, s32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_unsigned_int(struct xdr_stream *xdr, u32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_int(struct xdr_stream *xdr, u32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_long(struct xdr_stream *xdr, s32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_long(struct xdr_stream *xdr, s32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_unsigned_long(struct xdr_stream *xdr, u32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_long(struct xdr_stream *xdr, u32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_hyper(struct xdr_stream *xdr, s64 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = get_unaligned_be64(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_hyper(struct xdr_stream *xdr, s64 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ put_unaligned_be64(val, p);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_unsigned_hyper(struct xdr_stream *xdr, u64 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = get_unaligned_be64(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_hyper(struct xdr_stream *xdr, u64 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ put_unaligned_be64(val, p);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_string(struct xdr_stream *xdr, string *ptr, u32 maxlen)
+{
+ __be32 *p;
+ u32 len;
+
+ if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
+ return false;
+ if (unlikely(maxlen && len > maxlen))
+ return false;
+ if (len != 0) {
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return false;
+ ptr->data = (unsigned char *)p;
+ }
+ ptr->len = len;
+ return true;
+}
+
+static inline bool
+xdrgen_encode_string(struct xdr_stream *xdr, string val, u32 maxlen)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT + xdr_align_size(val.len));
+
+ if (unlikely(!p))
+ return false;
+ xdr_encode_opaque(p, val.data, val.len);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_opaque(struct xdr_stream *xdr, opaque *ptr, u32 maxlen)
+{
+ __be32 *p;
+ u32 len;
+
+ if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
+ return false;
+ if (unlikely(maxlen && len > maxlen))
+ return false;
+ if (len != 0) {
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return false;
+ ptr->data = (u8 *)p;
+ }
+ ptr->len = len;
+ return true;
+}
+
+static inline bool
+xdrgen_encode_opaque(struct xdr_stream *xdr, opaque val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT + xdr_align_size(val.len));
+
+ if (unlikely(!p))
+ return false;
+ xdr_encode_opaque(p, val.data, val.len);
+ return true;
+}
+
+#endif /* _SUNRPC_XDRGEN__BUILTINS_H_ */
diff --git a/include/linux/sunrpc/xdrgen/_defs.h b/include/linux/sunrpc/xdrgen/_defs.h
new file mode 100644
index 000000000000..20c7270aa64d
--- /dev/null
+++ b/include/linux/sunrpc/xdrgen/_defs.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Oracle and/or its affiliates.
+ *
+ * This header defines XDR data type primitives specified in
+ * Section 4 of RFC 4506, used by RPC programs implemented
+ * in the Linux kernel.
+ */
+
+#ifndef _SUNRPC_XDRGEN__DEFS_H_
+#define _SUNRPC_XDRGEN__DEFS_H_
+
+#define TRUE (true)
+#define FALSE (false)
+
+typedef struct {
+ u32 len;
+ unsigned char *data;
+} string;
+
+typedef struct {
+ u32 len;
+ u8 *data;
+} opaque;
+
+#define XDR_void (0)
+#define XDR_bool (1)
+#define XDR_int (1)
+#define XDR_unsigned_int (1)
+#define XDR_long (1)
+#define XDR_unsigned_long (1)
+#define XDR_hyper (2)
+#define XDR_unsigned_hyper (2)
+
+#endif /* _SUNRPC_XDRGEN__DEFS_H_ */
diff --git a/include/linux/sunrpc/xdrgen/nfs4_1.h b/include/linux/sunrpc/xdrgen/nfs4_1.h
new file mode 100644
index 000000000000..cf21a14aa885
--- /dev/null
+++ b/include/linux/sunrpc/xdrgen/nfs4_1.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Generated by xdrgen. Manual edits will be lost. */
+/* XDR specification file: ../../Documentation/sunrpc/xdr/nfs4_1.x */
+/* XDR specification modification time: Mon Oct 14 09:10:13 2024 */
+
+#ifndef _LINUX_XDRGEN_NFS4_1_DEF_H
+#define _LINUX_XDRGEN_NFS4_1_DEF_H
+
+#include <linux/types.h>
+#include <linux/sunrpc/xdrgen/_defs.h>
+
+typedef s64 int64_t;
+
+typedef u32 uint32_t;
+
+typedef struct {
+ u32 count;
+ uint32_t *element;
+} bitmap4;
+
+struct nfstime4 {
+ int64_t seconds;
+ uint32_t nseconds;
+};
+
+typedef bool fattr4_offline;
+
+enum { FATTR4_OFFLINE = 83 };
+
+struct open_arguments4 {
+ bitmap4 oa_share_access;
+ bitmap4 oa_share_deny;
+ bitmap4 oa_share_access_want;
+ bitmap4 oa_open_claim;
+ bitmap4 oa_create_mode;
+};
+
+enum open_args_share_access4 {
+ OPEN_ARGS_SHARE_ACCESS_READ = 1,
+ OPEN_ARGS_SHARE_ACCESS_WRITE = 2,
+ OPEN_ARGS_SHARE_ACCESS_BOTH = 3,
+};
+typedef enum open_args_share_access4 open_args_share_access4;
+
+enum open_args_share_deny4 {
+ OPEN_ARGS_SHARE_DENY_NONE = 0,
+ OPEN_ARGS_SHARE_DENY_READ = 1,
+ OPEN_ARGS_SHARE_DENY_WRITE = 2,
+ OPEN_ARGS_SHARE_DENY_BOTH = 3,
+};
+typedef enum open_args_share_deny4 open_args_share_deny4;
+
+enum open_args_share_access_want4 {
+ OPEN_ARGS_SHARE_ACCESS_WANT_ANY_DELEG = 3,
+ OPEN_ARGS_SHARE_ACCESS_WANT_NO_DELEG = 4,
+ OPEN_ARGS_SHARE_ACCESS_WANT_CANCEL = 5,
+ OPEN_ARGS_SHARE_ACCESS_WANT_SIGNAL_DELEG_WHEN_RESRC_AVAIL = 17,
+ OPEN_ARGS_SHARE_ACCESS_WANT_PUSH_DELEG_WHEN_UNCONTENDED = 18,
+ OPEN_ARGS_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS = 20,
+ OPEN_ARGS_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION = 21,
+};
+typedef enum open_args_share_access_want4 open_args_share_access_want4;
+
+enum open_args_open_claim4 {
+ OPEN_ARGS_OPEN_CLAIM_NULL = 0,
+ OPEN_ARGS_OPEN_CLAIM_PREVIOUS = 1,
+ OPEN_ARGS_OPEN_CLAIM_DELEGATE_CUR = 2,
+ OPEN_ARGS_OPEN_CLAIM_DELEGATE_PREV = 3,
+ OPEN_ARGS_OPEN_CLAIM_FH = 4,
+ OPEN_ARGS_OPEN_CLAIM_DELEG_CUR_FH = 5,
+ OPEN_ARGS_OPEN_CLAIM_DELEG_PREV_FH = 6,
+};
+typedef enum open_args_open_claim4 open_args_open_claim4;
+
+enum open_args_createmode4 {
+ OPEN_ARGS_CREATEMODE_UNCHECKED4 = 0,
+ OPEN_ARGS_CREATE_MODE_GUARDED = 1,
+ OPEN_ARGS_CREATEMODE_EXCLUSIVE4 = 2,
+ OPEN_ARGS_CREATE_MODE_EXCLUSIVE4_1 = 3,
+};
+typedef enum open_args_createmode4 open_args_createmode4;
+
+typedef struct open_arguments4 fattr4_open_arguments;
+
+enum { FATTR4_OPEN_ARGUMENTS = 86 };
+
+enum { OPEN4_RESULT_NO_OPEN_STATEID = 0x00000010 };
+
+typedef struct nfstime4 fattr4_time_deleg_access;
+
+typedef struct nfstime4 fattr4_time_deleg_modify;
+
+enum { FATTR4_TIME_DELEG_ACCESS = 84 };
+
+enum { FATTR4_TIME_DELEG_MODIFY = 85 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_DELEG_MASK = 0xFF00 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_NO_PREFERENCE = 0x0000 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_READ_DELEG = 0x0100 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG = 0x0200 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_ANY_DELEG = 0x0300 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_NO_DELEG = 0x0400 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_CANCEL = 0x0500 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_SIGNAL_DELEG_WHEN_RESRC_AVAIL = 0x10000 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_PUSH_DELEG_WHEN_UNCONTENDED = 0x20000 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS = 0x100000 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION = 0x200000 };
+
+enum open_delegation_type4 {
+ OPEN_DELEGATE_NONE = 0,
+ OPEN_DELEGATE_READ = 1,
+ OPEN_DELEGATE_WRITE = 2,
+ OPEN_DELEGATE_NONE_EXT = 3,
+ OPEN_DELEGATE_READ_ATTRS_DELEG = 4,
+ OPEN_DELEGATE_WRITE_ATTRS_DELEG = 5,
+};
+typedef enum open_delegation_type4 open_delegation_type4;
+
+#define NFS4_int64_t_sz \
+ (XDR_hyper)
+#define NFS4_uint32_t_sz \
+ (XDR_unsigned_int)
+#define NFS4_bitmap4_sz (XDR_unsigned_int)
+#define NFS4_nfstime4_sz \
+ (NFS4_int64_t_sz + NFS4_uint32_t_sz)
+#define NFS4_fattr4_offline_sz \
+ (XDR_bool)
+#define NFS4_open_arguments4_sz \
+ (NFS4_bitmap4_sz + NFS4_bitmap4_sz + NFS4_bitmap4_sz + NFS4_bitmap4_sz + NFS4_bitmap4_sz)
+#define NFS4_open_args_share_access4_sz (XDR_int)
+#define NFS4_open_args_share_deny4_sz (XDR_int)
+#define NFS4_open_args_share_access_want4_sz (XDR_int)
+#define NFS4_open_args_open_claim4_sz (XDR_int)
+#define NFS4_open_args_createmode4_sz (XDR_int)
+#define NFS4_fattr4_open_arguments_sz \
+ (NFS4_open_arguments4_sz)
+#define NFS4_fattr4_time_deleg_access_sz \
+ (NFS4_nfstime4_sz)
+#define NFS4_fattr4_time_deleg_modify_sz \
+ (NFS4_nfstime4_sz)
+#define NFS4_open_delegation_type4_sz (XDR_int)
+
+#endif /* _LINUX_XDRGEN_NFS4_1_DEF_H */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 81b952649d35..f46d1fb8f71a 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -30,6 +30,8 @@
#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
+#define RPC_GSS_SEQNO_ARRAY_SIZE 3U
+
enum rpc_display_format_t {
RPC_DISPLAY_ADDR = 0,
RPC_DISPLAY_PORT,
@@ -66,7 +68,8 @@ struct rpc_rqst {
struct rpc_cred * rq_cred; /* Bound cred */
__be32 rq_xid; /* request XID */
int rq_cong; /* has incremented xprt->cong */
- u32 rq_seqno; /* gss seq no. used on req. */
+ u32 rq_seqnos[RPC_GSS_SEQNO_ARRAY_SIZE]; /* past gss req seq nos. */
+ unsigned int rq_seqno_count; /* number of entries in rq_seqnos */
int rq_enc_pages_num;
struct page **rq_enc_pages; /* scratch pages for use by
gss privacy code */
@@ -119,6 +122,18 @@ struct rpc_rqst {
#define rq_svec rq_snd_buf.head
#define rq_slen rq_snd_buf.len
+static inline int xprt_rqst_add_seqno(struct rpc_rqst *req, u32 seqno)
+{
+ if (likely(req->rq_seqno_count < RPC_GSS_SEQNO_ARRAY_SIZE))
+ req->rq_seqno_count++;
+
+ /* Shift array to make room for the newest element at the beginning */
+ memmove(&req->rq_seqnos[1], &req->rq_seqnos[0],
+ (RPC_GSS_SEQNO_ARRAY_SIZE - 1) * sizeof(req->rq_seqnos[0]));
+ req->rq_seqnos[0] = seqno;
+ return 0;
+}
+
/* RPC transport layer security policies */
enum xprtsec_policies {
RPC_XPRTSEC_NONE = 0,
diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h
index c0514c684b2c..e4db5022fe92 100644
--- a/include/linux/sunrpc/xprtmultipath.h
+++ b/include/linux/sunrpc/xprtmultipath.h
@@ -56,6 +56,7 @@ extern void rpc_xprt_switch_add_xprt(struct rpc_xprt_switch *xps,
struct rpc_xprt *xprt);
extern void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps,
struct rpc_xprt *xprt, bool offline);
+extern struct rpc_xprt *rpc_xprt_switch_get_main_xprt(struct rpc_xprt_switch *xps);
extern void xprt_iter_init(struct rpc_xprt_iter *xpi,
struct rpc_xprt_switch *xps);
@@ -75,7 +76,6 @@ extern struct rpc_xprt_switch *xprt_iter_xchg_switch(
struct rpc_xprt_switch *newswitch);
extern struct rpc_xprt *xprt_iter_xprt(struct rpc_xprt_iter *xpi);
-extern struct rpc_xprt *xprt_iter_get_xprt(struct rpc_xprt_iter *xpi);
extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi);
extern bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index da6ebca3ff77..b02876f1ae38 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -298,6 +298,11 @@ static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {}
static inline void s2idle_wake(void) {}
#endif /* !CONFIG_SUSPEND */
+static inline bool pm_suspend_in_progress(void)
+{
+ return pm_suspend_target_state != PM_SUSPEND_ON;
+}
+
/* struct pbe is used for creating lists of pages that should be restored
* atomically during the resume from disk, because the page frames they have
* occupied before the suspend are in use.
@@ -413,6 +418,12 @@ static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) {
}
#endif /* CONFIG_HIBERNATION */
+#if defined(CONFIG_HIBERNATION) && defined(CONFIG_SUSPEND)
+bool pm_hibernation_mode_is_suspend(void);
+#else
+static inline bool pm_hibernation_mode_is_suspend(void) { return false; }
+#endif
+
int arch_resume_nosmt(void);
#ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV
@@ -441,6 +452,8 @@ extern int unregister_pm_notifier(struct notifier_block *nb);
extern void ksys_sync_helper(void);
extern void pm_report_hw_sleep_time(u64 t);
extern void pm_report_max_hw_sleep(u64 t);
+void pm_restrict_gfp_mask(void);
+void pm_restore_gfp_mask(void);
#define pm_notifier(fn, pri) { \
static struct notifier_block fn##_nb = \
@@ -470,6 +483,9 @@ extern void pm_print_active_wakeup_sources(void);
extern unsigned int lock_system_sleep(void);
extern void unlock_system_sleep(unsigned int);
+extern bool pm_sleep_transition_in_progress(void);
+bool pm_hibernate_is_recovering(void);
+
#else /* !CONFIG_PM_SLEEP */
static inline int register_pm_notifier(struct notifier_block *nb)
@@ -485,6 +501,9 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
static inline void pm_report_hw_sleep_time(u64 t) {};
static inline void pm_report_max_hw_sleep(u64 t) {};
+static inline void pm_restrict_gfp_mask(void) {}
+static inline void pm_restore_gfp_mask(void) {}
+
static inline void ksys_sync_helper(void) {}
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
@@ -498,6 +517,9 @@ static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
static inline unsigned int lock_system_sleep(void) { return 0; }
static inline void unlock_system_sleep(unsigned int flags) {}
+static inline bool pm_sleep_transition_in_progress(void) { return false; }
+static inline bool pm_hibernate_is_recovering(void) { return false; }
+
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_SLEEP_DEBUG
diff --git a/include/linux/swap.h b/include/linux/swap.h
index bd450023b9a4..38ca3df68716 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -24,7 +24,6 @@ struct pagevec;
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
#define SWAP_FLAG_PRIO_MASK 0x7fff
-#define SWAP_FLAG_PRIO_SHIFT 0
#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
#define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
@@ -74,14 +73,13 @@ static inline int current_is_kswapd(void)
* to a special SWP_DEVICE_{READ|WRITE} entry.
*
* When a page is mapped by the device for exclusive access we set the CPU page
- * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
+ * table entries to a special SWP_DEVICE_EXCLUSIVE entry.
*/
#ifdef CONFIG_DEVICE_PRIVATE
-#define SWP_DEVICE_NUM 4
+#define SWP_DEVICE_NUM 3
#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
-#define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
-#define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
+#define SWP_DEVICE_EXCLUSIVE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
#else
#define SWP_DEVICE_NUM 0
#endif
@@ -219,10 +217,10 @@ enum {
SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
/* add others here before... */
- SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32UL
+#define SWAP_CLUSTER_MAX_SKIPPED (SWAP_CLUSTER_MAX << 10)
#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
/* Bit flag in swap_map */
@@ -238,35 +236,12 @@ enum {
#define SWAP_CONT_MAX 0x7f /* Max count */
/*
- * We use this to track usage of a cluster. A cluster is a block of swap disk
- * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
- * free clusters are organized into a list. We fetch an entry from the list to
- * get a free cluster.
- *
- * The data field stores next cluster if the cluster is free or cluster usage
- * counter otherwise. The flags field determines if a cluster is free. This is
- * protected by swap_info_struct.lock.
- */
-struct swap_cluster_info {
- spinlock_t lock; /*
- * Protect swap_cluster_info fields
- * and swap_info_struct->swap_map
- * elements correspond to the swap
- * cluster
- */
- unsigned int data:24;
- unsigned int flags:8;
-};
-#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
-#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
-
-/*
* The first page in the swap file is the swap header, which is always marked
* bad to prevent it from being allocated as an entry. This also prevents the
* cluster to which it belongs being marked free. Therefore 0 is safe to use as
- * a sentinel to indicate next is not valid in percpu_cluster.
+ * a sentinel to indicate an entry is not valid.
*/
-#define SWAP_NEXT_INVALID 0
+#define SWAP_ENTRY_INVALID 0
#ifdef CONFIG_THP_SWAP
#define SWAP_NR_ORDERS (PMD_ORDER + 1)
@@ -275,19 +250,13 @@ struct swap_cluster_info {
#endif
/*
- * We assign a cluster to each CPU, so each CPU can allocate swap entry from
- * its own cluster and swapout sequentially. The purpose is to optimize swapout
- * throughput.
+ * We keep using same cluster for rotational device so IO will be sequential.
+ * The purpose is to optimize SWAP throughput on these device.
*/
-struct percpu_cluster {
+struct swap_sequential_cluster {
unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */
};
-struct swap_cluster_list {
- struct swap_cluster_info head;
- struct swap_cluster_info tail;
-};
-
/*
* The in-memory structure used to track swap areas.
*/
@@ -299,27 +268,26 @@ struct swap_info_struct {
signed char type; /* strange name for an index */
unsigned int max; /* extent of the swap_map */
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
+ unsigned long *zeromap; /* kvmalloc'ed bitmap to track zero pages */
struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
- struct swap_cluster_list free_clusters; /* free clusters list */
- unsigned int lowest_bit; /* index of first free in swap_map */
- unsigned int highest_bit; /* index of last free in swap_map */
+ struct list_head free_clusters; /* free clusters list */
+ struct list_head full_clusters; /* full clusters list */
+ struct list_head nonfull_clusters[SWAP_NR_ORDERS];
+ /* list of cluster that contains at least one free slot */
+ struct list_head frag_clusters[SWAP_NR_ORDERS];
+ /* list of cluster that are fragmented or contented */
unsigned int pages; /* total of usable pages of swap */
- unsigned int inuse_pages; /* number of those currently in use */
- unsigned int cluster_next; /* likely index for next allocation */
- unsigned int cluster_nr; /* countdown to next cluster search */
- unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
- struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
+ atomic_long_t inuse_pages; /* number of those currently in use */
+ struct swap_sequential_cluster *global_cluster; /* Use one global cluster for rotating device */
+ spinlock_t global_cluster_lock; /* Serialize usage of global cluster */
struct rb_root swap_extent_root;/* root of the swap extent rbtree */
struct block_device *bdev; /* swap device or bdev of swap file */
struct file *swap_file; /* seldom referenced */
struct completion comp; /* seldom referenced */
spinlock_t lock; /*
* protect map scan related fields like
- * swap_map, lowest_bit, highest_bit,
- * inuse_pages, cluster_next,
- * cluster_nr, lowest_alloc,
- * highest_alloc, free/discard cluster
- * list. other fields are only changed
+ * swap_map, inuse_pages and all cluster
+ * lists. other fields are only changed
* at swapon/swapoff, so are protected
* by swap_lock. changing flags need
* hold this lock and swap_lock. If
@@ -331,17 +299,9 @@ struct swap_info_struct {
* list.
*/
struct work_struct discard_work; /* discard worker */
- struct swap_cluster_list discard_clusters; /* discard clusters list */
- struct plist_node avail_lists[]; /*
- * entries in swap_avail_heads, one
- * entry per node.
- * Must be last as the number of the
- * array is nr_node_ids, which is not
- * a fixed value so have to allocate
- * dynamically.
- * And it has to be an array so that
- * plist_for_each_* can work.
- */
+ struct work_struct reclaim_work; /* reclaim worker */
+ struct list_head discard_clusters; /* discard clusters list */
+ struct plist_node avail_list; /* entry in swap_avail_head */
};
static inline swp_entry_t page_swap_entry(struct page *page)
@@ -354,7 +314,8 @@ static inline swp_entry_t page_swap_entry(struct page *page)
}
/* linux/mm/workingset.c */
-bool workingset_test_recent(void *shadow, bool file, bool *workingset);
+bool workingset_test_recent(void *shadow, bool file, bool *workingset,
+ bool flush);
void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
void workingset_refault(struct folio *folio, void *shadow);
@@ -368,14 +329,25 @@ extern unsigned long totalreserve_pages;
/* linux/mm/swap.c */
-void lru_note_cost(struct lruvec *lruvec, bool file,
- unsigned int nr_io, unsigned int nr_rotated);
+void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file,
+ unsigned int nr_io, unsigned int nr_rotated)
+ __releases(lruvec->lru_lock);
void lru_note_cost_refault(struct folio *);
void folio_add_lru(struct folio *);
void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
void mark_page_accessed(struct page *);
void folio_mark_accessed(struct folio *);
+static inline bool folio_may_be_lru_cached(struct folio *folio)
+{
+ /*
+ * Holding PMD-sized folios in per-CPU LRU cache unbalances accounting.
+ * Holding small numbers of low-order mTHP folios in per-CPU LRU cache
+ * will be sensible, but nobody has implemented and tested that yet.
+ */
+ return !folio_test_large(folio);
+}
+
extern atomic_t lru_disable_count;
static inline bool lru_cache_disabled(void)
@@ -404,10 +376,17 @@ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
#define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
#define MEMCG_RECLAIM_PROACTIVE (1 << 2)
+#define MIN_SWAPPINESS 0
+#define MAX_SWAPPINESS 200
+
+/* Just reclaim from anon folios in proactive memory reclaim */
+#define SWAPPINESS_ANON_ONLY (MAX_SWAPPINESS + 1)
+
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
- unsigned int reclaim_options);
+ unsigned int reclaim_options,
+ int *swappiness);
extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap,
pg_data_t *pgdat,
@@ -416,19 +395,26 @@ extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
long remove_mapping(struct address_space *mapping, struct folio *folio);
-#ifdef CONFIG_NUMA
-extern int node_reclaim_mode;
-extern int sysctl_min_unmapped_ratio;
-extern int sysctl_min_slab_ratio;
+#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
+extern int reclaim_register_node(struct node *node);
+extern void reclaim_unregister_node(struct node *node);
+
#else
-#define node_reclaim_mode 0
-#endif
-static inline bool node_reclaim_enabled(void)
+static inline int reclaim_register_node(struct node *node)
+{
+ return 0;
+}
+
+static inline void reclaim_unregister_node(struct node *node)
{
- /* Is any node_reclaim_mode bit set? */
- return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
}
+#endif /* CONFIG_SYSFS && CONFIG_NUMA */
+
+#ifdef CONFIG_NUMA
+extern int sysctl_min_unmapped_ratio;
+extern int sysctl_min_slab_ratio;
+#endif
void check_move_unevictable_folios(struct folio_batch *fbatch);
@@ -448,13 +434,12 @@ static inline unsigned long total_swapcache_pages(void)
}
void free_swap_cache(struct folio *folio);
-void free_page_and_swap_cache(struct page *);
+void free_folio_and_swap_cache(struct folio *folio);
void free_pages_and_swap_cache(struct encoded_page **, int);
/* linux/mm/swapfile.c */
extern atomic_long_t nr_swap_pages;
extern long total_swap_pages;
extern atomic_t nr_rotate_swap;
-extern bool has_usable_swap(void);
/* Swap 50% full? Release swapcache more aggressively.. */
static inline bool vm_swap_full(void)
@@ -468,29 +453,24 @@ static inline long get_nr_swap_pages(void)
}
extern void si_swapinfo(struct sysinfo *);
-swp_entry_t folio_alloc_swap(struct folio *folio);
+int folio_alloc_swap(struct folio *folio);
bool folio_free_swap(struct folio *folio);
void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
-extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
-extern void swap_shmem_alloc(swp_entry_t);
+extern void swap_shmem_alloc(swp_entry_t, int);
extern int swap_duplicate(swp_entry_t);
-extern int swapcache_prepare(swp_entry_t);
-extern void swap_free(swp_entry_t);
-extern void swapcache_free_entries(swp_entry_t *entries, int n);
+extern int swapcache_prepare(swp_entry_t entry, int nr);
+extern void swap_free_nr(swp_entry_t entry, int nr_pages);
extern void free_swap_and_cache_nr(swp_entry_t entry, int nr);
int swap_type_of(dev_t device, sector_t offset);
int find_first_swap(dev_t *device);
extern unsigned int count_swap_pages(int, int);
extern sector_t swapdev_block(int, pgoff_t);
extern int __swap_count(swp_entry_t entry);
-extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry);
+extern bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
-struct swap_info_struct *swp_swap_info(swp_entry_t entry);
struct backing_dev_info;
-extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
-extern void exit_swap_address_space(unsigned int type);
extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
sector_t swap_folio_sector(struct folio *folio);
@@ -500,11 +480,6 @@ static inline void put_swap_device(struct swap_info_struct *si)
}
#else /* CONFIG_SWAP */
-static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
-{
- return NULL;
-}
-
static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
{
return NULL;
@@ -521,10 +496,8 @@ static inline void put_swap_device(struct swap_info_struct *si)
#define si_swapinfo(val) \
do { (val)->freeswap = (val)->totalswap = 0; } while (0)
-/* only sparc can not include linux/pagemap.h in this file
- * so leave put_page and release_pages undeclared... */
-#define free_page_and_swap_cache(page) \
- put_page(page)
+#define free_folio_and_swap_cache(folio) \
+ folio_put(folio)
#define free_pages_and_swap_cache(pages, nr) \
release_pages((pages), (nr));
@@ -541,7 +514,7 @@ static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
return 0;
}
-static inline void swap_shmem_alloc(swp_entry_t swp)
+static inline void swap_shmem_alloc(swp_entry_t swp, int nr)
{
}
@@ -550,12 +523,12 @@ static inline int swap_duplicate(swp_entry_t swp)
return 0;
}
-static inline int swapcache_prepare(swp_entry_t swp)
+static inline int swapcache_prepare(swp_entry_t swp, int nr)
{
return 0;
}
-static inline void swap_free(swp_entry_t swp)
+static inline void swap_free_nr(swp_entry_t entry, int nr_pages)
{
}
@@ -568,9 +541,9 @@ static inline int __swap_count(swp_entry_t entry)
return 0;
}
-static inline int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
+static inline bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry)
{
- return 0;
+ return false;
}
static inline int swp_swapcount(swp_entry_t entry)
@@ -578,11 +551,9 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-static inline swp_entry_t folio_alloc_swap(struct folio *folio)
+static inline int folio_alloc_swap(struct folio *folio)
{
- swp_entry_t entry;
- entry.val = 0;
- return entry;
+ return -EINVAL;
}
static inline bool folio_free_swap(struct folio *folio)
@@ -603,6 +574,11 @@ static inline void free_swap_and_cache(swp_entry_t entry)
free_swap_and_cache_nr(entry, 1);
}
+static inline void swap_free(swp_entry_t entry)
+{
+ swap_free_nr(entry, 1);
+}
+
#ifdef CONFIG_MEMCG
static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
{
@@ -638,7 +614,6 @@ static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
#endif
#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
-void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
static inline int mem_cgroup_try_charge_swap(struct folio *folio,
swp_entry_t entry)
@@ -659,10 +634,6 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p
extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
extern bool mem_cgroup_swap_full(struct folio *folio);
#else
-static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
-{
-}
-
static inline int mem_cgroup_try_charge_swap(struct folio *folio,
swp_entry_t entry)
{
diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h
index ae73a87775b3..91cdf12190a0 100644
--- a/include/linux/swap_cgroup.h
+++ b/include/linux/swap_cgroup.h
@@ -6,10 +6,8 @@
#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
-extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
- unsigned short old, unsigned short new);
-extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
- unsigned int nr_ents);
+extern void swap_cgroup_record(struct folio *folio, unsigned short id, swp_entry_t ent);
+extern unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents);
extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
extern int swap_cgroup_swapon(int type, unsigned long max_pages);
extern void swap_cgroup_swapoff(int type);
@@ -17,8 +15,12 @@ extern void swap_cgroup_swapoff(int type);
#else
static inline
-unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
- unsigned int nr_ents)
+void swap_cgroup_record(struct folio *folio, unsigned short id, swp_entry_t ent)
+{
+}
+
+static inline
+unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents)
{
return 0;
}
diff --git a/include/linux/swap_slots.h b/include/linux/swap_slots.h
deleted file mode 100644
index 15adfb8c813a..000000000000
--- a/include/linux/swap_slots.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SWAP_SLOTS_H
-#define _LINUX_SWAP_SLOTS_H
-
-#include <linux/swap.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-
-#define SWAP_SLOTS_CACHE_SIZE SWAP_BATCH
-#define THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE (5*SWAP_SLOTS_CACHE_SIZE)
-#define THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE (2*SWAP_SLOTS_CACHE_SIZE)
-
-struct swap_slots_cache {
- bool lock_initialized;
- struct mutex alloc_lock; /* protects slots, nr, cur */
- swp_entry_t *slots;
- int nr;
- int cur;
- spinlock_t free_lock; /* protects slots_ret, n_ret */
- swp_entry_t *slots_ret;
- int n_ret;
-};
-
-void disable_swap_slots_cache_lock(void);
-void reenable_swap_slots_cache_unlock(void);
-void enable_swap_slots_cache(void);
-void free_swap_slot(swp_entry_t entry);
-
-extern bool swap_slot_cache_enabled;
-
-#endif /* _LINUX_SWAP_SLOTS_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index a5c560a2f8c2..8cfc966eae48 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -28,7 +28,7 @@
#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
/*
- * Definitions only for PFN swap entries (see is_pfn_swap_entry()). To
+ * Definitions only for PFN swap entries (see leafeant_has_pfn()). To
* store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries
* can use the extra bits to store other information besides PFN.
*/
@@ -66,8 +66,6 @@
#define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT)
#define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT)
-static inline bool is_pfn_swap_entry(swp_entry_t entry);
-
/* Clear all flags but only keep swp_entry_t related information */
static inline pte_t pte_swp_clear_flags(pte_t pte)
{
@@ -110,36 +108,6 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
}
/*
- * This should only be called upon a pfn swap entry to get the PFN stored
- * in the swap entry. Please refers to is_pfn_swap_entry() for definition
- * of pfn swap entry.
- */
-static inline unsigned long swp_offset_pfn(swp_entry_t entry)
-{
- VM_BUG_ON(!is_pfn_swap_entry(entry));
- return swp_offset(entry) & SWP_PFN_MASK;
-}
-
-/* check whether a pte points to a swap entry */
-static inline int is_swap_pte(pte_t pte)
-{
- return !pte_none(pte) && !pte_present(pte);
-}
-
-/*
- * Convert the arch-dependent pte representation of a swp_entry_t into an
- * arch-independent swp_entry_t.
- */
-static inline swp_entry_t pte_to_swp_entry(pte_t pte)
-{
- swp_entry_t arch_entry;
-
- pte = pte_swp_clear_flags(pte);
- arch_entry = __pte_to_swp_entry(pte);
- return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
-}
-
-/*
* Convert the arch-independent representation of a swp_entry_t into the
* arch-dependent pte representation.
*/
@@ -175,37 +143,11 @@ static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
return swp_entry(SWP_DEVICE_WRITE, offset);
}
-static inline bool is_device_private_entry(swp_entry_t entry)
+static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
{
- int type = swp_type(entry);
- return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
+ return swp_entry(SWP_DEVICE_EXCLUSIVE, offset);
}
-static inline bool is_writable_device_private_entry(swp_entry_t entry)
-{
- return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
-}
-
-static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
-{
- return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset);
-}
-
-static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
-{
- return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset);
-}
-
-static inline bool is_device_exclusive_entry(swp_entry_t entry)
-{
- return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ ||
- swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE;
-}
-
-static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
-{
- return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE);
-}
#else /* CONFIG_DEVICE_PRIVATE */
static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
{
@@ -217,59 +159,14 @@ static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
return swp_entry(0, 0);
}
-static inline bool is_device_private_entry(swp_entry_t entry)
-{
- return false;
-}
-
-static inline bool is_writable_device_private_entry(swp_entry_t entry)
-{
- return false;
-}
-
-static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
+static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
{
return swp_entry(0, 0);
}
-static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
-{
- return swp_entry(0, 0);
-}
-
-static inline bool is_device_exclusive_entry(swp_entry_t entry)
-{
- return false;
-}
-
-static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
-{
- return false;
-}
#endif /* CONFIG_DEVICE_PRIVATE */
#ifdef CONFIG_MIGRATION
-static inline int is_migration_entry(swp_entry_t entry)
-{
- return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
- swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE ||
- swp_type(entry) == SWP_MIGRATION_WRITE);
-}
-
-static inline int is_writable_migration_entry(swp_entry_t entry)
-{
- return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
-}
-
-static inline int is_readable_migration_entry(swp_entry_t entry)
-{
- return unlikely(swp_type(entry) == SWP_MIGRATION_READ);
-}
-
-static inline int is_readable_exclusive_migration_entry(swp_entry_t entry)
-{
- return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE);
-}
static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
{
@@ -308,14 +205,6 @@ static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
return entry;
}
-static inline bool is_migration_entry_young(swp_entry_t entry)
-{
- if (migration_entry_supports_ad())
- return swp_offset(entry) & SWP_MIG_YOUNG;
- /* Keep the old behavior of aging page after migration */
- return false;
-}
-
static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
{
if (migration_entry_supports_ad())
@@ -324,17 +213,9 @@ static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
return entry;
}
-static inline bool is_migration_entry_dirty(swp_entry_t entry)
-{
- if (migration_entry_supports_ad())
- return swp_offset(entry) & SWP_MIG_DIRTY;
- /* Keep the old behavior of clean page after migration */
- return false;
-}
-
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address);
-extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
+extern void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte);
#else /* CONFIG_MIGRATION */
static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
{
@@ -351,43 +232,21 @@ static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
return swp_entry(0, 0);
}
-static inline int is_migration_entry(swp_entry_t swp)
-{
- return 0;
-}
-
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address) { }
static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
- pte_t *pte) { }
-static inline int is_writable_migration_entry(swp_entry_t entry)
-{
- return 0;
-}
-static inline int is_readable_migration_entry(swp_entry_t entry)
-{
- return 0;
-}
+ unsigned long addr, pte_t *pte) { }
static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
{
return entry;
}
-static inline bool is_migration_entry_young(swp_entry_t entry)
-{
- return false;
-}
-
static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
{
return entry;
}
-static inline bool is_migration_entry_dirty(swp_entry_t entry)
-{
- return false;
-}
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_MEMORY_FAILURE
@@ -426,30 +285,25 @@ typedef unsigned long pte_marker;
* "Poisoned" here is meant in the very general sense of "future accesses are
* invalid", instead of referring very specifically to hardware memory errors.
* This marker is meant to represent any of various different causes of this.
+ *
+ * Note that, when encountered by the faulting logic, PTEs with this marker will
+ * result in VM_FAULT_HWPOISON and thus regardless trigger hardware memory error
+ * logic.
*/
#define PTE_MARKER_POISONED BIT(1)
-#define PTE_MARKER_MASK (BIT(2) - 1)
+/*
+ * Indicates that, on fault, this PTE will case a SIGSEGV signal to be
+ * sent. This means guard markers behave in effect as if the region were mapped
+ * PROT_NONE, rather than if they were a memory hole or equivalent.
+ */
+#define PTE_MARKER_GUARD BIT(2)
+#define PTE_MARKER_MASK (BIT(3) - 1)
static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
{
return swp_entry(SWP_PTE_MARKER, marker);
}
-static inline bool is_pte_marker_entry(swp_entry_t entry)
-{
- return swp_type(entry) == SWP_PTE_MARKER;
-}
-
-static inline pte_marker pte_marker_get(swp_entry_t entry)
-{
- return swp_offset(entry) & PTE_MARKER_MASK;
-}
-
-static inline bool is_pte_marker(pte_t pte)
-{
- return is_swap_pte(pte) && is_pte_marker_entry(pte_to_swp_entry(pte));
-}
-
static inline pte_t make_pte_marker(pte_marker marker)
{
return swp_entry_to_pte(make_pte_marker_entry(marker));
@@ -460,69 +314,9 @@ static inline swp_entry_t make_poisoned_swp_entry(void)
return make_pte_marker_entry(PTE_MARKER_POISONED);
}
-static inline int is_poisoned_swp_entry(swp_entry_t entry)
+static inline swp_entry_t make_guard_swp_entry(void)
{
- return is_pte_marker_entry(entry) &&
- (pte_marker_get(entry) & PTE_MARKER_POISONED);
-}
-
-/*
- * This is a special version to check pte_none() just to cover the case when
- * the pte is a pte marker. It existed because in many cases the pte marker
- * should be seen as a none pte; it's just that we have stored some information
- * onto the none pte so it becomes not-none any more.
- *
- * It should be used when the pte is file-backed, ram-based and backing
- * userspace pages, like shmem. It is not needed upon pgtables that do not
- * support pte markers at all. For example, it's not needed on anonymous
- * memory, kernel-only memory (including when the system is during-boot),
- * non-ram based generic file-system. It's fine to be used even there, but the
- * extra pte marker check will be pure overhead.
- */
-static inline int pte_none_mostly(pte_t pte)
-{
- return pte_none(pte) || is_pte_marker(pte);
-}
-
-static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
-{
- struct page *p = pfn_to_page(swp_offset_pfn(entry));
-
- /*
- * Any use of migration entries may only occur while the
- * corresponding page is locked
- */
- BUG_ON(is_migration_entry(entry) && !PageLocked(p));
-
- return p;
-}
-
-static inline struct folio *pfn_swap_entry_folio(swp_entry_t entry)
-{
- struct folio *folio = pfn_folio(swp_offset_pfn(entry));
-
- /*
- * Any use of migration entries may only occur while the
- * corresponding folio is locked
- */
- BUG_ON(is_migration_entry(entry) && !folio_test_locked(folio));
-
- return folio;
-}
-
-/*
- * A pfn swap entry is a special type of swap entry that always has a pfn stored
- * in the swap offset. They can either be used to represent unaddressable device
- * memory, to restrict access to a page undergoing migration or to represent a
- * pfn which has been hwpoisoned and unmapped.
- */
-static inline bool is_pfn_swap_entry(swp_entry_t entry)
-{
- /* Make sure the swp offset can always store the needed fields */
- BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
-
- return is_migration_entry(entry) || is_device_private_entry(entry) ||
- is_device_exclusive_entry(entry) || is_hwpoison_entry(entry);
+ return make_pte_marker_entry(PTE_MARKER_GUARD);
}
struct page_vma_mapped_walk;
@@ -536,18 +330,6 @@ extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
-static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
-{
- swp_entry_t arch_entry;
-
- if (pmd_swp_soft_dirty(pmd))
- pmd = pmd_swp_clear_soft_dirty(pmd);
- if (pmd_swp_uffd_wp(pmd))
- pmd = pmd_swp_clear_uffd_wp(pmd);
- arch_entry = __pmd_to_swp_entry(pmd);
- return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
-}
-
static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
{
swp_entry_t arch_entry;
@@ -556,10 +338,6 @@ static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
return __swp_entry_to_pmd(arch_entry);
}
-static inline int is_pmd_migration_entry(pmd_t pmd)
-{
- return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
-}
#else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page)
@@ -575,26 +353,12 @@ static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
-static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
-{
- return swp_entry(0, 0);
-}
-
static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
{
return __pmd(0);
}
-static inline int is_pmd_migration_entry(pmd_t pmd)
-{
- return 0;
-}
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
-static inline int non_swap_entry(swp_entry_t entry)
-{
- return swp_type(entry) >= MAX_SWAPFILES;
-}
-
#endif /* CONFIG_MMU */
#endif /* _LINUX_SWAPOPS_H */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 14bc10c1bb23..3dae0f592063 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -42,24 +42,6 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
int (*remap)(void *tlb, unsigned long nslabs));
extern void __init swiotlb_update_mem_attributes(void);
-phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
- size_t mapping_size,
- unsigned int alloc_aligned_mask, enum dma_data_direction dir,
- unsigned long attrs);
-
-extern void swiotlb_tbl_unmap_single(struct device *hwdev,
- phys_addr_t tlb_addr,
- size_t mapping_size,
- enum dma_data_direction dir,
- unsigned long attrs);
-
-void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
- size_t size, enum dma_data_direction dir);
-void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
- size_t size, enum dma_data_direction dir);
-dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
- size_t size, enum dma_data_direction dir, unsigned long attrs);
-
#ifdef CONFIG_SWIOTLB
/**
@@ -143,37 +125,27 @@ struct io_tlb_mem {
#endif
};
-#ifdef CONFIG_SWIOTLB_DYNAMIC
-
-struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr);
-
-#else
-
-static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev,
- phys_addr_t paddr)
-{
- return &dev->dma_io_tlb_mem->defpool;
-}
-
-#endif
+struct io_tlb_pool *__swiotlb_find_pool(struct device *dev, phys_addr_t paddr);
/**
- * is_swiotlb_buffer() - check if a physical address belongs to a swiotlb
+ * swiotlb_find_pool() - find swiotlb pool to which a physical address belongs
* @dev: Device which has mapped the buffer.
* @paddr: Physical address within the DMA buffer.
*
- * Check if @paddr points into a bounce buffer.
+ * Find the swiotlb pool that @paddr points into.
*
* Return:
- * * %true if @paddr points into a bounce buffer
- * * %false otherwise
+ * * pool address if @paddr points into a bounce buffer
+ * * NULL if @paddr does not point into a bounce buffer. As such, this function
+ * can be used to determine if @paddr denotes a swiotlb bounce buffer.
*/
-static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
+static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev,
+ phys_addr_t paddr)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
if (!mem)
- return false;
+ return NULL;
#ifdef CONFIG_SWIOTLB_DYNAMIC
/*
@@ -182,16 +154,19 @@ static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
* If a SWIOTLB address is checked on another CPU, then it was
* presumably loaded by the device driver from an unspecified private
* data structure. Make sure that this load is ordered before reading
- * dev->dma_uses_io_tlb here and mem->pools in swiotlb_find_pool().
+ * dev->dma_uses_io_tlb here and mem->pools in __swiotlb_find_pool().
*
* This barrier pairs with smp_mb() in swiotlb_find_slots().
*/
smp_rmb();
- return READ_ONCE(dev->dma_uses_io_tlb) &&
- swiotlb_find_pool(dev, paddr);
+ if (READ_ONCE(dev->dma_uses_io_tlb))
+ return __swiotlb_find_pool(dev, paddr);
#else
- return paddr >= mem->defpool.start && paddr < mem->defpool.end;
+ if (paddr >= mem->defpool.start && paddr < mem->defpool.end)
+ return &mem->defpool;
#endif
+
+ return NULL;
}
static inline bool is_swiotlb_force_bounce(struct device *dev)
@@ -219,9 +194,10 @@ static inline void swiotlb_dev_init(struct device *dev)
{
}
-static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
+static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev,
+ phys_addr_t paddr)
{
- return false;
+ return NULL;
}
static inline bool is_swiotlb_force_bounce(struct device *dev)
{
@@ -260,6 +236,49 @@ static inline phys_addr_t default_swiotlb_limit(void)
}
#endif /* CONFIG_SWIOTLB */
+phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
+ size_t mapping_size, unsigned int alloc_aligned_mask,
+ enum dma_data_direction dir, unsigned long attrs);
+dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+
+void __swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
+ size_t mapping_size, enum dma_data_direction dir,
+ unsigned long attrs, struct io_tlb_pool *pool);
+static inline void swiotlb_tbl_unmap_single(struct device *dev,
+ phys_addr_t addr, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct io_tlb_pool *pool = swiotlb_find_pool(dev, addr);
+
+ if (unlikely(pool))
+ __swiotlb_tbl_unmap_single(dev, addr, size, dir, attrs, pool);
+}
+
+void __swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
+ size_t size, enum dma_data_direction dir,
+ struct io_tlb_pool *pool);
+static inline void swiotlb_sync_single_for_device(struct device *dev,
+ phys_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+ struct io_tlb_pool *pool = swiotlb_find_pool(dev, addr);
+
+ if (unlikely(pool))
+ __swiotlb_sync_single_for_device(dev, addr, size, dir, pool);
+}
+
+void __swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
+ size_t size, enum dma_data_direction dir,
+ struct io_tlb_pool *pool);
+static inline void swiotlb_sync_single_for_cpu(struct device *dev,
+ phys_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+ struct io_tlb_pool *pool = swiotlb_find_pool(dev, addr);
+
+ if (unlikely(pool))
+ __swiotlb_sync_single_for_cpu(dev, addr, size, dir, pool);
+}
+
extern void swiotlb_print_info(void);
#ifdef CONFIG_DMA_RESTRICTED_POOL
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index 8d8fac1626bd..cdb58d61c152 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -521,6 +521,6 @@ static inline struct switchtec_dev *to_stdev(struct device *dev)
return container_of(dev, struct switchtec_dev, dev);
}
-extern struct class *switchtec_class;
+extern const struct class switchtec_class;
#endif
diff --git a/include/linux/sys_info.h b/include/linux/sys_info.h
new file mode 100644
index 000000000000..a5bc3ea3d44b
--- /dev/null
+++ b/include/linux/sys_info.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SYS_INFO_H
+#define _LINUX_SYS_INFO_H
+
+#include <linux/sysctl.h>
+
+/*
+ * SYS_INFO_PANIC_CONSOLE_REPLAY is for panic case only, as it needs special
+ * handling which only fits panic case.
+ */
+#define SYS_INFO_TASKS 0x00000001
+#define SYS_INFO_MEM 0x00000002
+#define SYS_INFO_TIMERS 0x00000004
+#define SYS_INFO_LOCKS 0x00000008
+#define SYS_INFO_FTRACE 0x00000010
+#define SYS_INFO_PANIC_CONSOLE_REPLAY 0x00000020
+#define SYS_INFO_ALL_BT 0x00000040
+#define SYS_INFO_BLOCKED_TASKS 0x00000080
+
+void sys_info(unsigned long si_mask);
+unsigned long sys_info_parse_param(char *str);
+
+#ifdef CONFIG_SYSCTL
+int sysctl_sys_info_handler(const struct ctl_table *ro_table, int write,
+ void *buffer, size_t *lenp,
+ loff_t *ppos);
+#endif
+#endif /* _LINUX_SYS_INFO_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 9104952d323d..cf84d98964b2 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -77,6 +77,9 @@ struct cachestat_range;
struct cachestat;
struct statmount;
struct mnt_id_req;
+struct ns_id_req;
+struct xattr_args;
+struct file_attr;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -322,13 +325,13 @@ asmlinkage long sys_io_pgetevents(aio_context_t ctx_id,
long nr,
struct io_event __user *events,
struct __kernel_timespec __user *timeout,
- const struct __aio_sigset *sig);
+ const struct __aio_sigset __user *sig);
asmlinkage long sys_io_pgetevents_time32(aio_context_t ctx_id,
long min_nr,
long nr,
struct io_event __user *events,
struct old_timespec32 __user *timeout,
- const struct __aio_sigset *sig);
+ const struct __aio_sigset __user *sig);
asmlinkage long sys_io_uring_setup(u32 entries,
struct io_uring_params __user *p);
asmlinkage long sys_io_uring_enter(unsigned int fd, u32 to_submit,
@@ -338,26 +341,44 @@ asmlinkage long sys_io_uring_register(unsigned int fd, unsigned int op,
void __user *arg, unsigned int nr_args);
asmlinkage long sys_setxattr(const char __user *path, const char __user *name,
const void __user *value, size_t size, int flags);
+asmlinkage long sys_setxattrat(int dfd, const char __user *path, unsigned int at_flags,
+ const char __user *name,
+ const struct xattr_args __user *args, size_t size);
asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name,
const void __user *value, size_t size, int flags);
asmlinkage long sys_fsetxattr(int fd, const char __user *name,
const void __user *value, size_t size, int flags);
asmlinkage long sys_getxattr(const char __user *path, const char __user *name,
void __user *value, size_t size);
+asmlinkage long sys_getxattrat(int dfd, const char __user *path, unsigned int at_flags,
+ const char __user *name,
+ struct xattr_args __user *args, size_t size);
asmlinkage long sys_lgetxattr(const char __user *path, const char __user *name,
void __user *value, size_t size);
asmlinkage long sys_fgetxattr(int fd, const char __user *name,
void __user *value, size_t size);
asmlinkage long sys_listxattr(const char __user *path, char __user *list,
size_t size);
+asmlinkage long sys_listxattrat(int dfd, const char __user *path,
+ unsigned int at_flags,
+ char __user *list, size_t size);
asmlinkage long sys_llistxattr(const char __user *path, char __user *list,
size_t size);
asmlinkage long sys_flistxattr(int fd, char __user *list, size_t size);
asmlinkage long sys_removexattr(const char __user *path,
const char __user *name);
+asmlinkage long sys_removexattrat(int dfd, const char __user *path,
+ unsigned int at_flags,
+ const char __user *name);
asmlinkage long sys_lremovexattr(const char __user *path,
const char __user *name);
asmlinkage long sys_fremovexattr(int fd, const char __user *name);
+asmlinkage long sys_file_getattr(int dfd, const char __user *filename,
+ struct file_attr __user *attr, size_t usize,
+ unsigned int at_flags);
+asmlinkage long sys_file_setattr(int dfd, const char __user *filename,
+ struct file_attr __user *attr, size_t usize,
+ unsigned int at_flags);
asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
asmlinkage long sys_eventfd2(unsigned int count, int flags);
asmlinkage long sys_epoll_create1(int flags);
@@ -417,8 +438,11 @@ asmlinkage long sys_statmount(const struct mnt_id_req __user *req,
asmlinkage long sys_listmount(const struct mnt_id_req __user *req,
u64 __user *mnt_ids, size_t nr_mnt_ids,
unsigned int flags);
+asmlinkage long sys_listns(const struct ns_id_req __user *req,
+ u64 __user *ns_ids, size_t nr_ns_ids,
+ unsigned int flags);
asmlinkage long sys_truncate(const char __user *path, long length);
-asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
+asmlinkage long sys_ftruncate(unsigned int fd, off_t length);
#if BITS_PER_LONG == 32
asmlinkage long sys_truncate64(const char __user *path, loff_t length);
asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length);
@@ -441,7 +465,7 @@ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
umode_t mode);
asmlinkage long sys_openat2(int dfd, const char __user *filename,
- struct open_how *how, size_t size);
+ struct open_how __user *how, size_t size);
asmlinkage long sys_close(unsigned int fd);
asmlinkage long sys_close_range(unsigned int fd, unsigned int max_fd,
unsigned int flags);
@@ -555,7 +579,7 @@ asmlinkage long sys_get_robust_list(int pid,
asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
size_t len);
-asmlinkage long sys_futex_waitv(struct futex_waitv *waiters,
+asmlinkage long sys_futex_waitv(struct futex_waitv __user *waiters,
unsigned int nr_futexes, unsigned int flags,
struct __kernel_timespec __user *timeout, clockid_t clockid);
@@ -859,12 +883,18 @@ asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource,
const struct rlimit64 __user *new_rlim,
struct rlimit64 __user *old_rlim);
asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags);
+#if defined(CONFIG_ARCH_SPLIT_ARG64)
+asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
+ unsigned int mask_1, unsigned int mask_2,
+ int dfd, const char __user * pathname);
+#else
asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
u64 mask, int fd,
const char __user *pathname);
+#endif
asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name,
struct file_handle __user *handle,
- int __user *mnt_id, int flag);
+ void __user *mnt_id, int flag);
asmlinkage long sys_open_by_handle_at(int mountdirfd,
struct file_handle __user *handle,
int flags);
@@ -907,7 +937,7 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags,
asmlinkage long sys_getrandom(char __user *buf, size_t count,
unsigned int flags);
asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags);
-asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
+asmlinkage long sys_bpf(int cmd, union bpf_attr __user *attr, unsigned int size);
asmlinkage long sys_execveat(int dfd, const char __user *filename,
const char __user *const __user *argv,
const char __user *const __user *envp, int flags);
@@ -932,6 +962,10 @@ asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags,
asmlinkage long sys_rseq(struct rseq __user *rseq, uint32_t rseq_len,
int flags, uint32_t sig);
asmlinkage long sys_open_tree(int dfd, const char __user *path, unsigned flags);
+asmlinkage long sys_open_tree_attr(int dfd, const char __user *path,
+ unsigned flags,
+ struct mount_attr __user *uattr,
+ size_t usize);
asmlinkage long sys_move_mount(int from_dfd, const char __user *from_path,
int to_dfd, const char __user *to_path,
unsigned int ms_flags);
@@ -960,11 +994,11 @@ asmlinkage long sys_cachestat(unsigned int fd,
struct cachestat_range __user *cstat_range,
struct cachestat __user *cstat, unsigned int flags);
asmlinkage long sys_map_shadow_stack(unsigned long addr, unsigned long size, unsigned int flags);
-asmlinkage long sys_lsm_get_self_attr(unsigned int attr, struct lsm_ctx *ctx,
- u32 *size, u32 flags);
-asmlinkage long sys_lsm_set_self_attr(unsigned int attr, struct lsm_ctx *ctx,
+asmlinkage long sys_lsm_get_self_attr(unsigned int attr, struct lsm_ctx __user *ctx,
+ u32 __user *size, u32 flags);
+asmlinkage long sys_lsm_set_self_attr(unsigned int attr, struct lsm_ctx __user *ctx,
u32 size, u32 flags);
-asmlinkage long sys_lsm_list_modules(u64 *ids, u32 *size, u32 flags);
+asmlinkage long sys_lsm_list_modules(u64 __user *ids, u32 __user *size, u32 flags);
/*
* Architecture-specific system calls
@@ -973,6 +1007,10 @@ asmlinkage long sys_lsm_list_modules(u64 *ids, u32 *size, u32 flags);
/* x86 */
asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on);
+asmlinkage long sys_uretprobe(void);
+
+asmlinkage long sys_uprobe(void);
+
/* pciconfig: alpha, arm, arm64, ia64, sparc */
asmlinkage long sys_pciconfig_read(unsigned long bus, unsigned long dfn,
unsigned long off, unsigned long len,
@@ -1245,14 +1283,14 @@ static inline long ksys_lchown(const char __user *filename, uid_t user,
AT_SYMLINK_NOFOLLOW);
}
-extern long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
+int do_sys_ftruncate(unsigned int fd, loff_t length, int small);
static inline long ksys_ftruncate(unsigned int fd, loff_t length)
{
return do_sys_ftruncate(fd, length, 1);
}
-extern long do_sys_truncate(const char __user *pathname, loff_t length);
+int do_sys_truncate(const char __user *pathname, loff_t length);
static inline long ksys_truncate(const char __user *pathname, loff_t length)
{
diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
index ae4d48e4c970..ac6d71be5c38 100644
--- a/include/linux/syscore_ops.h
+++ b/include/linux/syscore_ops.h
@@ -11,14 +11,19 @@
#include <linux/list.h>
struct syscore_ops {
+ int (*suspend)(void *data);
+ void (*resume)(void *data);
+ void (*shutdown)(void *data);
+};
+
+struct syscore {
struct list_head node;
- int (*suspend)(void);
- void (*resume)(void);
- void (*shutdown)(void);
+ const struct syscore_ops *ops;
+ void *data;
};
-extern void register_syscore_ops(struct syscore_ops *ops);
-extern void unregister_syscore_ops(struct syscore_ops *ops);
+extern void register_syscore(struct syscore *syscore);
+extern void unregister_syscore(struct syscore *syscore);
#ifdef CONFIG_PM_SLEEP
extern int syscore_suspend(void);
extern void syscore_resume(void);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 09db2f2e6488..288fe0055cd5 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -59,40 +59,158 @@ extern const int sysctl_vals[];
#define SYSCTL_LONG_ONE ((void *)&sysctl_long_vals[1])
#define SYSCTL_LONG_MAX ((void *)&sysctl_long_vals[2])
+#define SYSCTL_CONV_IDENTITY(val) (val)
+/**
+ *
+ * "dir" originates from read_iter (dir = 0) or write_iter (dir = 1)
+ * in the file_operations struct at proc/proc_sysctl.c. Its value means
+ * one of two things for sysctl:
+ * 1. SYSCTL_USER_TO_KERN(dir) Writing to an internal kernel variable from user
+ * space (dir > 0)
+ * 2. SYSCTL_KERN_TO_USER(dir) Writing to a user space buffer from a kernel
+ * variable (dir == 0).
+ */
+#define SYSCTL_USER_TO_KERN(dir) (!!(dir))
+#define SYSCTL_KERN_TO_USER(dir) (!dir)
+
+#define SYSCTL_USER_TO_KERN_INT_CONV(name, u_ptr_op) \
+int sysctl_user_to_kern_int_conv##name(const bool *negp, \
+ const unsigned long *u_ptr,\
+ int *k_ptr) \
+{ \
+ unsigned long u = u_ptr_op(*u_ptr); \
+ if (*negp) { \
+ if (u > (unsigned long) INT_MAX + 1) \
+ return -EINVAL; \
+ WRITE_ONCE(*k_ptr, -u); \
+ } else { \
+ if (u > (unsigned long) INT_MAX) \
+ return -EINVAL; \
+ WRITE_ONCE(*k_ptr, u); \
+ } \
+ return 0; \
+}
+
+#define SYSCTL_KERN_TO_USER_INT_CONV(name, k_ptr_op) \
+int sysctl_kern_to_user_int_conv##name(bool *negp, \
+ unsigned long *u_ptr, \
+ const int *k_ptr) \
+{ \
+ int val = READ_ONCE(*k_ptr); \
+ if (val < 0) { \
+ *negp = true; \
+ *u_ptr = -k_ptr_op((unsigned long)val); \
+ } else { \
+ *negp = false; \
+ *u_ptr = k_ptr_op((unsigned long)val); \
+ } \
+ return 0; \
+}
+
+/**
+ * To range check on a converted value, use a temp k_ptr
+ * When checking range, value should be within (tbl->extra1, tbl->extra2)
+ */
+#define SYSCTL_INT_CONV_CUSTOM(name, user_to_kern, kern_to_user, \
+ k_ptr_range_check) \
+int do_proc_int_conv##name(bool *negp, unsigned long *u_ptr, int *k_ptr,\
+ int dir, const struct ctl_table *tbl) \
+{ \
+ if (SYSCTL_KERN_TO_USER(dir)) \
+ return kern_to_user(negp, u_ptr, k_ptr); \
+ \
+ if (k_ptr_range_check) { \
+ int tmp_k, ret; \
+ if (!tbl) \
+ return -EINVAL; \
+ ret = user_to_kern(negp, u_ptr, &tmp_k); \
+ if (ret) \
+ return ret; \
+ if ((tbl->extra1 && *(int *)tbl->extra1 > tmp_k) || \
+ (tbl->extra2 && *(int *)tbl->extra2 < tmp_k)) \
+ return -EINVAL; \
+ WRITE_ONCE(*k_ptr, tmp_k); \
+ } else \
+ return user_to_kern(negp, u_ptr, k_ptr); \
+ return 0; \
+}
+
+#define SYSCTL_USER_TO_KERN_UINT_CONV(name, u_ptr_op) \
+int sysctl_user_to_kern_uint_conv##name(const unsigned long *u_ptr,\
+ unsigned int *k_ptr) \
+{ \
+ unsigned long u = u_ptr_op(*u_ptr); \
+ if (u > UINT_MAX) \
+ return -EINVAL; \
+ WRITE_ONCE(*k_ptr, u); \
+ return 0; \
+}
+
+#define SYSCTL_UINT_CONV_CUSTOM(name, user_to_kern, kern_to_user, \
+ k_ptr_range_check) \
+int do_proc_uint_conv##name(unsigned long *u_ptr, unsigned int *k_ptr, \
+ int dir, const struct ctl_table *tbl) \
+{ \
+ if (SYSCTL_KERN_TO_USER(dir)) \
+ return kern_to_user(u_ptr, k_ptr); \
+ \
+ if (k_ptr_range_check) { \
+ unsigned int tmp_k; \
+ int ret; \
+ if (!tbl) \
+ return -EINVAL; \
+ ret = user_to_kern(u_ptr, &tmp_k); \
+ if (ret) \
+ return ret; \
+ if ((tbl->extra1 && \
+ *(unsigned int *)tbl->extra1 > tmp_k) || \
+ (tbl->extra2 && \
+ *(unsigned int *)tbl->extra2 < tmp_k)) \
+ return -ERANGE; \
+ WRITE_ONCE(*k_ptr, tmp_k); \
+ } else \
+ return user_to_kern(u_ptr, k_ptr); \
+ return 0; \
+}
+
+
extern const unsigned long sysctl_long_vals[];
-typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer,
+typedef int proc_handler(const struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos);
-int proc_dostring(struct ctl_table *, int, void *, size_t *, loff_t *);
-int proc_dobool(struct ctl_table *table, int write, void *buffer,
+int proc_dostring(const struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_dobool(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
-int proc_dointvec(struct ctl_table *, int, void *, size_t *, loff_t *);
-int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *);
-int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *);
-int proc_douintvec_minmax(struct ctl_table *table, int write, void *buffer,
+int proc_dointvec(const struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_dointvec_minmax(const struct ctl_table *table, int dir, void *buffer,
+ size_t *lenp, loff_t *ppos);
+int proc_dointvec_conv(const struct ctl_table *table, int dir, void *buffer,
+ size_t *lenp, loff_t *ppos,
+ int (*conv)(bool *negp, unsigned long *u_ptr, int *k_ptr,
+ int dir, const struct ctl_table *table));
+int proc_douintvec(const struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_douintvec_minmax(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
-int proc_dou8vec_minmax(struct ctl_table *table, int write, void *buffer,
+int proc_douintvec_conv(const struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos,
+ int (*conv)(unsigned long *lvalp, unsigned int *valp,
+ int write, const struct ctl_table *table));
+
+int proc_dou8vec_minmax(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
-int proc_dointvec_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *);
-int proc_dointvec_ms_jiffies_minmax(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-int proc_dointvec_ms_jiffies(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-int proc_doulongvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *);
-int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, void *,
- size_t *, loff_t *);
-int proc_do_large_bitmap(struct ctl_table *, int, void *, size_t *, loff_t *);
-int proc_do_static_key(struct ctl_table *table, int write, void *buffer,
+int proc_doulongvec_minmax(const struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_doulongvec_minmax_conv(const struct ctl_table *table, int dir,
+ void *buffer, size_t *lenp, loff_t *ppos,
+ unsigned long convmul, unsigned long convdiv);
+int proc_do_large_bitmap(const struct ctl_table *, int, void *, size_t *, loff_t *);
+int proc_do_static_key(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
+int sysctl_kern_to_user_uint_conv(unsigned long *u_ptr, const unsigned int *k_ptr);
/*
* Register a set of sysctl names by calling register_sysctl
- * with an initialised array of struct ctl_table's. An entry with
- * NULL procname terminates the table. table->de will be
- * set up by the registration and need not be initialised in advance.
+ * with an initialised array of struct ctl_table's.
*
* sysctl names can be mirrored automatically under /proc/sys. The
* procname supplied controls /proc naming.
@@ -133,7 +251,7 @@ static inline void *proc_sys_poll_event(struct ctl_table_poll *poll)
/* A sysctl table is an array of struct ctl_table: */
struct ctl_table {
- const char *procname; /* Text ID for /proc/sys, or zero */
+ const char *procname; /* Text ID for /proc/sys */
void *data;
int maxlen;
umode_t mode;
@@ -158,11 +276,15 @@ struct ctl_node {
* @nreg: When nreg drops to 0 the ctl_table_header will be unregistered.
* @rcu: Delays the freeing of the inode. Introduced with "unfuck proc_sysctl ->d_compare()"
*
+ * @type: Enumeration to differentiate between ctl target types
+ * @type.SYSCTL_TABLE_TYPE_DEFAULT: ctl target with no special considerations
+ * @type.SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY: Identifies a permanently empty dir
+ * target to serve as a mount point
*/
struct ctl_table_header {
union {
struct {
- struct ctl_table *ctl_table;
+ const struct ctl_table *ctl_table;
int ctl_table_size;
int used;
int count;
@@ -177,13 +299,6 @@ struct ctl_table_header {
struct ctl_dir *parent;
struct ctl_node *node;
struct hlist_head inodes; /* head for proc_inode->sysctl_inodes */
- /**
- * enum type - Enumeration to differentiate between ctl target types
- * @SYSCTL_TABLE_TYPE_DEFAULT: ctl target with no special considerations
- * @SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY: Used to identify a permanently
- * empty directory target to serve
- * as mount point.
- */
enum {
SYSCTL_TABLE_TYPE_DEFAULT,
SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY,
@@ -223,13 +338,13 @@ extern void retire_sysctl_set(struct ctl_table_set *set);
struct ctl_table_header *__register_sysctl_table(
struct ctl_table_set *set,
- const char *path, struct ctl_table *table, size_t table_size);
-struct ctl_table_header *register_sysctl_sz(const char *path, struct ctl_table *table,
+ const char *path, const struct ctl_table *table, size_t table_size);
+struct ctl_table_header *register_sysctl_sz(const char *path, const struct ctl_table *table,
size_t table_size);
void unregister_sysctl_table(struct ctl_table_header * table);
extern int sysctl_init_bases(void);
-extern void __register_sysctl_init(const char *path, struct ctl_table *table,
+extern void __register_sysctl_init(const char *path, const struct ctl_table *table,
const char *table_name, size_t table_size);
#define register_sysctl_init(path, table) \
__register_sysctl_init(path, table, #table, ARRAY_SIZE(table))
@@ -237,21 +352,13 @@ extern struct ctl_table_header *register_sysctl_mount_point(const char *path);
void do_sysctl_args(void);
bool sysctl_is_alias(char *param);
-int do_proc_douintvec(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos,
- int (*conv)(unsigned long *lvalp,
- unsigned int *valp,
- int write, void *data),
- void *data);
-
-extern int pwrsw_enabled;
+
extern int unaligned_enabled;
-extern int unaligned_dump_stack;
extern int no_unaligned_warning;
#else /* CONFIG_SYSCTL */
-static inline void register_sysctl_init(const char *path, struct ctl_table *table)
+static inline void register_sysctl_init(const char *path, const struct ctl_table *table)
{
}
@@ -261,7 +368,7 @@ static inline struct ctl_table_header *register_sysctl_mount_point(const char *p
}
static inline struct ctl_table_header *register_sysctl_sz(const char *path,
- struct ctl_table *table,
+ const struct ctl_table *table,
size_t table_size)
{
return NULL;
@@ -287,7 +394,4 @@ static inline bool sysctl_is_alias(char *param)
}
#endif /* CONFIG_SYSCTL */
-int sysctl_max_threads(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos);
-
#endif /* _LINUX_SYSCTL_H */
diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h
index c9cb657dad08..b449665c686a 100644
--- a/include/linux/sysfb.h
+++ b/include/linux/sysfb.h
@@ -7,9 +7,13 @@
* Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
*/
-#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
#include <linux/platform_data/simplefb.h>
+struct device;
+struct platform_device;
struct screen_info;
enum {
@@ -58,12 +62,19 @@ struct efifb_dmi_info {
#ifdef CONFIG_SYSFB
-void sysfb_disable(void);
+void sysfb_disable(struct device *dev);
+
+bool sysfb_handles_screen_info(void);
#else /* CONFIG_SYSFB */
-static inline void sysfb_disable(void)
+static inline void sysfb_disable(struct device *dev)
+{
+}
+
+static inline bool sysfb_handles_screen_info(void)
{
+ return false;
}
#endif /* CONFIG_SYSFB */
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index a7d725fbf739..c33a96b7391a 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -58,6 +58,12 @@ do { \
#define sysfs_attr_init(attr) do {} while (0)
#endif
+#ifdef CONFIG_CFI
+#define __SYSFS_FUNCTION_ALTERNATIVE(MEMBERS...) struct { MEMBERS }
+#else
+#define __SYSFS_FUNCTION_ALTERNATIVE(MEMBERS...) union { MEMBERS }
+#endif
+
/**
* struct attribute_group - data structure used to declare an attribute group.
* @name: Optional: Attribute group name
@@ -87,18 +93,33 @@ do { \
* SYSFS_GROUP_VISIBLE() when assigning this callback to
* specify separate _group_visible() and _attr_visible()
* handlers.
+ * @bin_size:
+ * Optional: Function to return the size of a binary attribute
+ * of the group. Will be called repeatedly for each binary
+ * attribute in the group. Overwrites the size field embedded
+ * inside the attribute itself.
* @attrs: Pointer to NULL terminated list of attributes.
* @bin_attrs: Pointer to NULL terminated list of binary attributes.
* Either attrs or bin_attrs or both must be provided.
*/
struct attribute_group {
const char *name;
- umode_t (*is_visible)(struct kobject *,
- struct attribute *, int);
+ __SYSFS_FUNCTION_ALTERNATIVE(
+ umode_t (*is_visible)(struct kobject *,
+ struct attribute *, int);
+ umode_t (*is_visible_const)(struct kobject *,
+ const struct attribute *, int);
+ );
umode_t (*is_bin_visible)(struct kobject *,
- struct bin_attribute *, int);
- struct attribute **attrs;
- struct bin_attribute **bin_attrs;
+ const struct bin_attribute *, int);
+ size_t (*bin_size)(struct kobject *,
+ const struct bin_attribute *,
+ int);
+ union {
+ struct attribute **attrs;
+ const struct attribute *const *attrs_const;
+ };
+ const struct bin_attribute *const *bin_attrs;
};
#define SYSFS_PREALLOC 010000
@@ -191,22 +212,22 @@ struct attribute_group {
* attributes, the group visibility is determined by the function
* specified to is_visible() not is_bin_visible()
*/
-#define DEFINE_SYSFS_BIN_GROUP_VISIBLE(name) \
- static inline umode_t sysfs_group_visible_##name( \
- struct kobject *kobj, struct bin_attribute *attr, int n) \
- { \
- if (n == 0 && !name##_group_visible(kobj)) \
- return SYSFS_GROUP_INVISIBLE; \
- return name##_attr_visible(kobj, attr, n); \
+#define DEFINE_SYSFS_BIN_GROUP_VISIBLE(name) \
+ static inline umode_t sysfs_group_visible_##name( \
+ struct kobject *kobj, const struct bin_attribute *attr, int n) \
+ { \
+ if (n == 0 && !name##_group_visible(kobj)) \
+ return SYSFS_GROUP_INVISIBLE; \
+ return name##_attr_visible(kobj, attr, n); \
}
-#define DEFINE_SIMPLE_SYSFS_BIN_GROUP_VISIBLE(name) \
- static inline umode_t sysfs_group_visible_##name( \
- struct kobject *kobj, struct bin_attribute *a, int n) \
- { \
- if (n == 0 && !name##_group_visible(kobj)) \
- return SYSFS_GROUP_INVISIBLE; \
- return a->mode; \
+#define DEFINE_SIMPLE_SYSFS_BIN_GROUP_VISIBLE(name) \
+ static inline umode_t sysfs_group_visible_##name( \
+ struct kobject *kobj, const struct bin_attribute *a, int n) \
+ { \
+ if (n == 0 && !name##_group_visible(kobj)) \
+ return SYSFS_GROUP_INVISIBLE; \
+ return a->mode; \
}
#define SYSFS_GROUP_VISIBLE(fn) sysfs_group_visible_##fn
@@ -230,28 +251,20 @@ struct attribute_group {
.store = _store, \
}
-#define __ATTR_RO(_name) { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = _name##_show, \
-}
-
#define __ATTR_RO_MODE(_name, _mode) { \
.attr = { .name = __stringify(_name), \
.mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
.show = _name##_show, \
}
-#define __ATTR_RW_MODE(_name, _mode) { \
- .attr = { .name = __stringify(_name), \
- .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
- .show = _name##_show, \
- .store = _name##_store, \
-}
+#define __ATTR_RO(_name) \
+ __ATTR_RO_MODE(_name, 0444)
-#define __ATTR_WO(_name) { \
- .attr = { .name = __stringify(_name), .mode = 0200 }, \
- .store = _name##_store, \
-}
+#define __ATTR_RW_MODE(_name, _mode) \
+ __ATTR(_name, _mode, _name##_show, _name##_store)
+
+#define __ATTR_WO(_name) \
+ __ATTR(_name, 0200, NULL, _name##_store)
#define __ATTR_RW(_name) __ATTR(_name, 0644, _name##_show, _name##_store)
@@ -276,7 +289,12 @@ static const struct attribute_group *_name##_groups[] = { \
#define ATTRIBUTE_GROUPS(_name) \
static const struct attribute_group _name##_group = { \
- .attrs = _name##_attrs, \
+ .attrs = _Generic(_name##_attrs, \
+ struct attribute **: \
+ _name##_attrs, \
+ const struct attribute *const *: \
+ (void *)_name##_attrs \
+ ), \
}; \
__ATTRIBUTE_GROUPS(_name)
@@ -295,13 +313,13 @@ struct bin_attribute {
size_t size;
void *private;
struct address_space *(*f_mapping)(void);
- ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *,
+ ssize_t (*read)(struct file *, struct kobject *, const struct bin_attribute *,
char *, loff_t, size_t);
- ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *,
+ ssize_t (*write)(struct file *, struct kobject *, const struct bin_attribute *,
char *, loff_t, size_t);
- loff_t (*llseek)(struct file *, struct kobject *, struct bin_attribute *,
+ loff_t (*llseek)(struct file *, struct kobject *, const struct bin_attribute *,
loff_t, int);
- int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
+ int (*mmap)(struct file *, struct kobject *, const struct bin_attribute *attr,
struct vm_area_struct *vma);
};
@@ -320,22 +338,16 @@ struct bin_attribute {
/* macros to create static binary attributes easier */
#define __BIN_ATTR(_name, _mode, _read, _write, _size) { \
.attr = { .name = __stringify(_name), .mode = _mode }, \
- .read = _read, \
- .write = _write, \
+ .read = _read, \
+ .write = _write, \
.size = _size, \
}
-#define __BIN_ATTR_RO(_name, _size) { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .read = _name##_read, \
- .size = _size, \
-}
+#define __BIN_ATTR_RO(_name, _size) \
+ __BIN_ATTR(_name, 0444, _name##_read, NULL, _size)
-#define __BIN_ATTR_WO(_name, _size) { \
- .attr = { .name = __stringify(_name), .mode = 0200 }, \
- .write = _name##_write, \
- .size = _size, \
-}
+#define __BIN_ATTR_WO(_name, _size) \
+ __BIN_ATTR(_name, 0200, NULL, _name##_write, _size)
#define __BIN_ATTR_RW(_name, _size) \
__BIN_ATTR(_name, 0644, _name##_read, _name##_write, _size)
@@ -356,11 +368,8 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_WO(_name, _size)
struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size)
-#define __BIN_ATTR_ADMIN_RO(_name, _size) { \
- .attr = { .name = __stringify(_name), .mode = 0400 }, \
- .read = _name##_read, \
- .size = _size, \
-}
+#define __BIN_ATTR_ADMIN_RO(_name, _size) \
+ __BIN_ATTR(_name, 0400, _name##_read, NULL, _size)
#define __BIN_ATTR_ADMIN_RW(_name, _size) \
__BIN_ATTR(_name, 0600, _name##_read, _name##_write, _size)
@@ -371,10 +380,8 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RO(_name, _size)
#define BIN_ATTR_ADMIN_RW(_name, _size) \
struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RW(_name, _size)
-#define __BIN_ATTR_SIMPLE_RO(_name, _mode) { \
- .attr = { .name = __stringify(_name), .mode = _mode }, \
- .read = sysfs_bin_attr_simple_read, \
-}
+#define __BIN_ATTR_SIMPLE_RO(_name, _mode) \
+ __BIN_ATTR(_name, _mode, sysfs_bin_attr_simple_read, NULL, 0)
#define BIN_ATTR_SIMPLE_RO(_name) \
struct bin_attribute bin_attr_##_name = __BIN_ATTR_SIMPLE_RO(_name, 0444)
@@ -490,7 +497,7 @@ __printf(3, 4)
int sysfs_emit_at(char *buf, int at, const char *fmt, ...);
ssize_t sysfs_bin_attr_simple_read(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count);
#else /* CONFIG_SYSFS */
@@ -750,6 +757,15 @@ static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...)
{
return 0;
}
+
+static inline ssize_t sysfs_bin_attr_simple_read(struct file *file,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return 0;
+}
#endif /* CONFIG_SYSFS */
static inline int __must_check sysfs_create_file(struct kobject *kobj,
diff --git a/include/linux/sysv_fs.h b/include/linux/sysv_fs.h
deleted file mode 100644
index 5cf77dbb8d86..000000000000
--- a/include/linux/sysv_fs.h
+++ /dev/null
@@ -1,214 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SYSV_FS_H
-#define _LINUX_SYSV_FS_H
-
-#define __packed2__ __attribute__((packed, aligned(2)))
-
-
-#ifndef __KERNEL__
-typedef u16 __fs16;
-typedef u32 __fs16;
-#endif
-
-/* inode numbers are 16 bit */
-typedef __fs16 sysv_ino_t;
-
-/* Block numbers are 24 bit, sometimes stored in 32 bit.
- On Coherent FS, they are always stored in PDP-11 manner: the least
- significant 16 bits come last. */
-typedef __fs32 sysv_zone_t;
-
-/* 0 is non-existent */
-#define SYSV_BADBL_INO 1 /* inode of bad blocks file */
-#define SYSV_ROOT_INO 2 /* inode of root directory */
-
-
-/* Xenix super-block data on disk */
-#define XENIX_NICINOD 100 /* number of inode cache entries */
-#define XENIX_NICFREE 100 /* number of free block list chunk entries */
-struct xenix_super_block {
- __fs16 s_isize; /* index of first data zone */
- __fs32 s_fsize __packed2__; /* total number of zones of this fs */
- /* the start of the free block list: */
- __fs16 s_nfree; /* number of free blocks in s_free, <= XENIX_NICFREE */
- sysv_zone_t s_free[XENIX_NICFREE]; /* first free block list chunk */
- /* the cache of free inodes: */
- __fs16 s_ninode; /* number of free inodes in s_inode, <= XENIX_NICINOD */
- sysv_ino_t s_inode[XENIX_NICINOD]; /* some free inodes */
- /* locks, not used by Linux: */
- char s_flock; /* lock during free block list manipulation */
- char s_ilock; /* lock during inode cache manipulation */
- char s_fmod; /* super-block modified flag */
- char s_ronly; /* flag whether fs is mounted read-only */
- __fs32 s_time __packed2__; /* time of last super block update */
- __fs32 s_tfree __packed2__; /* total number of free zones */
- __fs16 s_tinode; /* total number of free inodes */
- __fs16 s_dinfo[4]; /* device information ?? */
- char s_fname[6]; /* file system volume name */
- char s_fpack[6]; /* file system pack name */
- char s_clean; /* set to 0x46 when filesystem is properly unmounted */
- char s_fill[371];
- s32 s_magic; /* version of file system */
- __fs32 s_type; /* type of file system: 1 for 512 byte blocks
- 2 for 1024 byte blocks
- 3 for 2048 byte blocks */
-
-};
-
-/*
- * SystemV FS comes in two variants:
- * sysv2: System V Release 2 (e.g. Microport), structure elements aligned(2).
- * sysv4: System V Release 4 (e.g. Consensys), structure elements aligned(4).
- */
-#define SYSV_NICINOD 100 /* number of inode cache entries */
-#define SYSV_NICFREE 50 /* number of free block list chunk entries */
-
-/* SystemV4 super-block data on disk */
-struct sysv4_super_block {
- __fs16 s_isize; /* index of first data zone */
- u16 s_pad0;
- __fs32 s_fsize; /* total number of zones of this fs */
- /* the start of the free block list: */
- __fs16 s_nfree; /* number of free blocks in s_free, <= SYSV_NICFREE */
- u16 s_pad1;
- sysv_zone_t s_free[SYSV_NICFREE]; /* first free block list chunk */
- /* the cache of free inodes: */
- __fs16 s_ninode; /* number of free inodes in s_inode, <= SYSV_NICINOD */
- u16 s_pad2;
- sysv_ino_t s_inode[SYSV_NICINOD]; /* some free inodes */
- /* locks, not used by Linux: */
- char s_flock; /* lock during free block list manipulation */
- char s_ilock; /* lock during inode cache manipulation */
- char s_fmod; /* super-block modified flag */
- char s_ronly; /* flag whether fs is mounted read-only */
- __fs32 s_time; /* time of last super block update */
- __fs16 s_dinfo[4]; /* device information ?? */
- __fs32 s_tfree; /* total number of free zones */
- __fs16 s_tinode; /* total number of free inodes */
- u16 s_pad3;
- char s_fname[6]; /* file system volume name */
- char s_fpack[6]; /* file system pack name */
- s32 s_fill[12];
- __fs32 s_state; /* file system state: 0x7c269d38-s_time means clean */
- s32 s_magic; /* version of file system */
- __fs32 s_type; /* type of file system: 1 for 512 byte blocks
- 2 for 1024 byte blocks */
-};
-
-/* SystemV2 super-block data on disk */
-struct sysv2_super_block {
- __fs16 s_isize; /* index of first data zone */
- __fs32 s_fsize __packed2__; /* total number of zones of this fs */
- /* the start of the free block list: */
- __fs16 s_nfree; /* number of free blocks in s_free, <= SYSV_NICFREE */
- sysv_zone_t s_free[SYSV_NICFREE]; /* first free block list chunk */
- /* the cache of free inodes: */
- __fs16 s_ninode; /* number of free inodes in s_inode, <= SYSV_NICINOD */
- sysv_ino_t s_inode[SYSV_NICINOD]; /* some free inodes */
- /* locks, not used by Linux: */
- char s_flock; /* lock during free block list manipulation */
- char s_ilock; /* lock during inode cache manipulation */
- char s_fmod; /* super-block modified flag */
- char s_ronly; /* flag whether fs is mounted read-only */
- __fs32 s_time __packed2__; /* time of last super block update */
- __fs16 s_dinfo[4]; /* device information ?? */
- __fs32 s_tfree __packed2__; /* total number of free zones */
- __fs16 s_tinode; /* total number of free inodes */
- char s_fname[6]; /* file system volume name */
- char s_fpack[6]; /* file system pack name */
- s32 s_fill[14];
- __fs32 s_state; /* file system state: 0xcb096f43 means clean */
- s32 s_magic; /* version of file system */
- __fs32 s_type; /* type of file system: 1 for 512 byte blocks
- 2 for 1024 byte blocks */
-};
-
-/* V7 super-block data on disk */
-#define V7_NICINOD 100 /* number of inode cache entries */
-#define V7_NICFREE 50 /* number of free block list chunk entries */
-struct v7_super_block {
- __fs16 s_isize; /* index of first data zone */
- __fs32 s_fsize __packed2__; /* total number of zones of this fs */
- /* the start of the free block list: */
- __fs16 s_nfree; /* number of free blocks in s_free, <= V7_NICFREE */
- sysv_zone_t s_free[V7_NICFREE]; /* first free block list chunk */
- /* the cache of free inodes: */
- __fs16 s_ninode; /* number of free inodes in s_inode, <= V7_NICINOD */
- sysv_ino_t s_inode[V7_NICINOD]; /* some free inodes */
- /* locks, not used by Linux or V7: */
- char s_flock; /* lock during free block list manipulation */
- char s_ilock; /* lock during inode cache manipulation */
- char s_fmod; /* super-block modified flag */
- char s_ronly; /* flag whether fs is mounted read-only */
- __fs32 s_time __packed2__; /* time of last super block update */
- /* the following fields are not maintained by V7: */
- __fs32 s_tfree __packed2__; /* total number of free zones */
- __fs16 s_tinode; /* total number of free inodes */
- __fs16 s_m; /* interleave factor */
- __fs16 s_n; /* interleave factor */
- char s_fname[6]; /* file system name */
- char s_fpack[6]; /* file system pack name */
-};
-/* Constants to aid sanity checking */
-/* This is not a hard limit, nor enforced by v7 kernel. It's actually just
- * the limit used by Seventh Edition's ls, though is high enough to assume
- * that no reasonable file system would have that much entries in root
- * directory. Thus, if we see anything higher, we just probably got the
- * endiannes wrong. */
-#define V7_NFILES 1024
-/* The disk addresses are three-byte (despite direct block addresses being
- * aligned word-wise in inode). If the most significant byte is non-zero,
- * something is most likely wrong (not a filesystem, bad bytesex). */
-#define V7_MAXSIZE 0x00ffffff
-
-/* Coherent super-block data on disk */
-#define COH_NICINOD 100 /* number of inode cache entries */
-#define COH_NICFREE 64 /* number of free block list chunk entries */
-struct coh_super_block {
- __fs16 s_isize; /* index of first data zone */
- __fs32 s_fsize __packed2__; /* total number of zones of this fs */
- /* the start of the free block list: */
- __fs16 s_nfree; /* number of free blocks in s_free, <= COH_NICFREE */
- sysv_zone_t s_free[COH_NICFREE] __packed2__; /* first free block list chunk */
- /* the cache of free inodes: */
- __fs16 s_ninode; /* number of free inodes in s_inode, <= COH_NICINOD */
- sysv_ino_t s_inode[COH_NICINOD]; /* some free inodes */
- /* locks, not used by Linux: */
- char s_flock; /* lock during free block list manipulation */
- char s_ilock; /* lock during inode cache manipulation */
- char s_fmod; /* super-block modified flag */
- char s_ronly; /* flag whether fs is mounted read-only */
- __fs32 s_time __packed2__; /* time of last super block update */
- __fs32 s_tfree __packed2__; /* total number of free zones */
- __fs16 s_tinode; /* total number of free inodes */
- __fs16 s_interleave_m; /* interleave factor */
- __fs16 s_interleave_n;
- char s_fname[6]; /* file system volume name */
- char s_fpack[6]; /* file system pack name */
- __fs32 s_unique; /* zero, not used */
-};
-
-/* SystemV/Coherent inode data on disk */
-struct sysv_inode {
- __fs16 i_mode;
- __fs16 i_nlink;
- __fs16 i_uid;
- __fs16 i_gid;
- __fs32 i_size;
- u8 i_data[3*(10+1+1+1)];
- u8 i_gen;
- __fs32 i_atime; /* time of last access */
- __fs32 i_mtime; /* time of last modification */
- __fs32 i_ctime; /* time of creation */
-};
-
-/* SystemV/Coherent directory entry on disk */
-#define SYSV_NAMELEN 14 /* max size of name in struct sysv_dir_entry */
-struct sysv_dir_entry {
- sysv_ino_t inode;
- char name[SYSV_NAMELEN]; /* up to 14 characters, the rest are zeroes */
-};
-
-#define SYSV_DIRSIZE sizeof(struct sysv_dir_entry) /* size of every directory entry */
-
-#endif /* _LINUX_SYSV_FS_H */
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index 248f4ac95642..2c59fe3efcd4 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -41,18 +41,12 @@ static inline u32 t10_pi_ref_tag(struct request *rq)
{
unsigned int shift = ilog2(queue_logical_block_size(rq->q));
-#ifdef CONFIG_BLK_DEV_INTEGRITY
- if (rq->q->integrity.interval_exp)
- shift = rq->q->integrity.interval_exp;
-#endif
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ rq->q->limits.integrity.interval_exp)
+ shift = rq->q->limits.integrity.interval_exp;
return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff;
}
-extern const struct blk_integrity_profile t10_pi_type1_crc;
-extern const struct blk_integrity_profile t10_pi_type1_ip;
-extern const struct blk_integrity_profile t10_pi_type3_crc;
-extern const struct blk_integrity_profile t10_pi_type3_ip;
-
struct crc64_pi_tuple {
__be64 guard_tag;
__be16 app_tag;
@@ -72,14 +66,10 @@ static inline u64 ext_pi_ref_tag(struct request *rq)
{
unsigned int shift = ilog2(queue_logical_block_size(rq->q));
-#ifdef CONFIG_BLK_DEV_INTEGRITY
- if (rq->q->integrity.interval_exp)
- shift = rq->q->integrity.interval_exp;
-#endif
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ rq->q->limits.integrity.interval_exp)
+ shift = rq->q->limits.integrity.interval_exp;
return lower_48_bits(blk_rq_pos(rq) >> (shift - SECTOR_SHIFT));
}
-extern const struct blk_integrity_profile ext_pi_type1_crc64;
-extern const struct blk_integrity_profile ext_pi_type3_crc64;
-
#endif
diff --git a/include/linux/task_work.h b/include/linux/task_work.h
index 795ef5a68429..0646804860ff 100644
--- a/include/linux/task_work.h
+++ b/include/linux/task_work.h
@@ -14,10 +14,11 @@ init_task_work(struct callback_head *twork, task_work_func_t func)
}
enum task_work_notify_mode {
- TWA_NONE,
+ TWA_NONE = 0,
TWA_RESUME,
TWA_SIGNAL,
TWA_SIGNAL_NO_IPI,
+ TWA_NMI_CURRENT,
};
static inline bool task_work_pending(struct task_struct *task)
@@ -30,7 +31,8 @@ int task_work_add(struct task_struct *task, struct callback_head *twork,
struct callback_head *task_work_cancel_match(struct task_struct *task,
bool (*match)(struct callback_head *, void *data), void *data);
-struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
+struct callback_head *task_work_cancel_func(struct task_struct *, task_work_func_t);
+bool task_work_cancel(struct task_struct *task, struct callback_head *cb);
void task_work_run(void);
static inline void exit_task_work(struct task_struct *task)
diff --git a/include/linux/tc.h b/include/linux/tc.h
index 1638660abf5e..8416bae9b126 100644
--- a/include/linux/tc.h
+++ b/include/linux/tc.h
@@ -108,7 +108,7 @@ struct tc_driver {
struct device_driver driver;
};
-#define to_tc_driver(drv) container_of(drv, struct tc_driver, driver)
+#define to_tc_driver(drv) container_of_const(drv, struct tc_driver, driver)
/*
* Return TURBOchannel clock frequency in Hz.
diff --git a/include/linux/tca6416_keypad.h b/include/linux/tca6416_keypad.h
deleted file mode 100644
index 5cf6f6f82aa7..000000000000
--- a/include/linux/tca6416_keypad.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * tca6416 keypad platform support
- *
- * Copyright (C) 2010 Texas Instruments
- *
- * Author: Sriramakrishnan <srk@ti.com>
- */
-
-#ifndef _TCA6416_KEYS_H
-#define _TCA6416_KEYS_H
-
-#include <linux/types.h>
-
-struct tca6416_button {
- /* Configuration parameters */
- int code; /* input event code (KEY_*, SW_*) */
- int active_low;
- int type; /* input event type (EV_KEY, EV_SW) */
-};
-
-struct tca6416_keys_platform_data {
- struct tca6416_button *buttons;
- int nbuttons;
- unsigned int rep:1; /* enable input subsystem auto repeat */
- uint16_t pinmask;
- uint16_t invert;
- int use_polling; /* use polling if Interrupt is not connected*/
-};
-#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 6a5e08b937b3..20b8c6e21fef 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -122,8 +122,9 @@ struct tcp_options_received {
smc_ok : 1, /* SMC seen on SYN packet */
snd_wscale : 4, /* Window scaling received from sender */
rcv_wscale : 4; /* Window scaling to send to receiver */
- u8 saw_unknown:1, /* Received unknown option */
- unused:7;
+ u8 accecn:6, /* AccECN index in header, 0=no options */
+ saw_unknown:1, /* Received unknown option */
+ unused:1;
u8 num_sacks; /* Number of SACK blocks */
u16 user_mss; /* mss requested by user in ioctl */
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
@@ -160,12 +161,19 @@ struct tcp_request_sock {
u32 rcv_isn;
u32 snt_isn;
u32 ts_off;
+ u32 snt_tsval_first;
+ u32 snt_tsval_last;
u32 last_oow_ack_time; /* last SYNACK */
u32 rcv_nxt; /* the ack # by SYNACK. For
* FastOpen it's the seq#
* after data-in-SYN.
*/
u8 syn_tos;
+ bool accecn_ok;
+ u8 syn_ect_snt: 2,
+ syn_ect_rcv: 2,
+ accecn_fail_mode:4;
+ u8 saw_accecn_opt :2;
#ifdef CONFIG_TCP_AO
u8 ao_keyid;
u8 ao_rcv_next;
@@ -200,15 +208,16 @@ struct tcp_sock {
/* TX read-mostly hotpath cache lines */
__cacheline_group_begin(tcp_sock_read_tx);
- /* timestamp of last sent data packet (for restart window) */
u32 max_window; /* Maximal window ever seen from peer */
u32 rcv_ssthresh; /* Current window clamp */
u32 reordering; /* Packet reordering metric. */
u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */
u16 gso_segs; /* Max number of segs per GSO packet */
/* from STCP, retrans queue hinting */
- struct sk_buff *lost_skb_hint;
struct sk_buff *retransmit_skb_hint;
+#if defined(CONFIG_TLS_DEVICE)
+ void (*tcp_clean_acked)(struct sock *sk, u32 acked_seq);
+#endif
__cacheline_group_end(tcp_sock_read_tx);
/* TXRX read-mostly hotpath cache lines */
@@ -226,13 +235,13 @@ struct tcp_sock {
repair : 1,
tcp_usec_ts : 1, /* TSval values in usec */
is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
- is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
+ is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */
+ recvmsg_inq : 1;/* Indicate # of bytes in queue upon recvmsg */
__cacheline_group_end(tcp_sock_read_txrx);
/* RX read-mostly hotpath cache lines */
__cacheline_group_begin(tcp_sock_read_rx);
u32 copied_seq; /* Head of yet unread data */
- u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
u32 snd_wl1; /* Sequence for window update */
u32 tlp_high_seq; /* snd_nxt at the time of TLP */
u32 rttvar_us; /* smoothed mdev_max */
@@ -240,11 +249,10 @@ struct tcp_sock {
u16 advmss; /* Advertised MSS */
u16 urg_data; /* Saved octet of OOB data and control flags */
u32 lost; /* Total data packets lost incl. rexmits */
+ u32 snd_ssthresh; /* Slow start size threshold */
struct minmax rtt_min;
/* OOO segments go in this rbtree. Socket lock must be held. */
struct rb_root out_of_order_queue;
- u32 snd_ssthresh; /* Slow start size threshold */
- u8 recvmsg_inq : 1;/* Indicate # of bytes in queue upon recvmsg */
__cacheline_group_end(tcp_sock_read_rx);
/* TX read-write hotpath cache lines */
@@ -263,10 +271,11 @@ struct tcp_sock {
u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
u32 pushed_seq; /* Last pushed seq, required to talk to windows */
- u32 lsndtime;
+ u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
u32 mdev_us; /* medium deviation */
u32 rtt_seq; /* sequence number to update rttvar */
u64 tcp_wstamp_ns; /* departure time for next sent data packet */
+ u64 accecn_opt_tstamp; /* Last AccECN option sent timestamp */
struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */
struct sk_buff *highest_sack; /* skb just after the highest
* skb with SACKed bit set
@@ -282,6 +291,14 @@ struct tcp_sock {
* Header prediction flags
* 0x5?10 << 16 + snd_wnd in net byte order
*/
+ u8 nonagle : 4,/* Disable Nagle algorithm? */
+ rate_app_limited:1; /* rate_{delivered,interval_us} limited? */
+ u8 received_ce_pending:4, /* Not yet transmit cnt of received_ce */
+ unused2:4;
+ u8 accecn_minlen:2,/* Minimum length of AccECN option sent */
+ est_ecnfield:2,/* ECN field for AccECN delivered estimates */
+ accecn_opt_demand:2,/* Demand AccECN option for n next ACKs */
+ prev_ecnfield:2; /* ECN bits from the previous segment */
__be32 pred_flags;
u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
u64 tcp_mstamp; /* most recent packet received/sent */
@@ -294,14 +311,18 @@ struct tcp_sock {
u32 snd_up; /* Urgent pointer */
u32 delivered; /* Total data packets delivered incl. rexmits */
u32 delivered_ce; /* Like the above but only ECE marked packets */
+ u32 received_ce; /* Like the above but for rcvd CE marked pkts */
+ u32 received_ecn_bytes[3]; /* received byte counters for three ECN
+ * types: INET_ECN_ECT_1, INET_ECN_ECT_0,
+ * and INET_ECN_CE
+ */
u32 app_limited; /* limited until "delivered" reaches this val */
u32 rcv_wnd; /* Current receiver window */
+ u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
/*
* Options received (usually on last packet, some only on SYN packets).
*/
struct tcp_options_received rx_opt;
- u8 nonagle : 4,/* Disable Nagle algorithm? */
- rate_app_limited:1; /* rate_{delivered,interval_us} limited? */
__cacheline_group_end(tcp_sock_write_txrx);
/* RX read-write hotpath cache lines */
@@ -323,6 +344,7 @@ struct tcp_sock {
u32 rate_delivered; /* saved rate sample: packets delivered */
u32 rate_interval_us; /* saved rate sample: time elapsed */
u32 rcv_rtt_last_tsecr;
+ u32 delivered_ecn_bytes[3];
u64 first_tx_mstamp; /* start of window send phase */
u64 delivered_mstamp; /* time we reached "delivered" */
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
@@ -336,7 +358,7 @@ struct tcp_sock {
} rcv_rtt_est;
/* Receiver queue space */
struct {
- u32 space;
+ int space;
u32 seq;
u64 time;
} rcvq_space;
@@ -369,7 +391,8 @@ struct tcp_sock {
u8 compressed_ack;
u8 dup_ack_counter:2,
tlp_retrans:1, /* TLP is a retransmission */
- unused:5;
+ syn_ect_snt:2, /* AccECN ECT memory, only */
+ syn_ect_rcv:2; /* ... needed during 3WHS + first seqno */
u8 thin_lto : 1,/* Use linear timeouts for thin streams */
fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
@@ -381,9 +404,12 @@ struct tcp_sock {
syn_fastopen:1, /* SYN includes Fast Open option */
syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
syn_fastopen_ch:1, /* Active TFO re-enabling probe */
- syn_data_acked:1;/* data in SYN is acked by SYN-ACK */
+ syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
+ syn_fastopen_child:1; /* created TFO passive child socket */
u8 keepalive_probes; /* num of allowed keep alive probes */
+ u8 accecn_fail_mode:4, /* AccECN failure handling */
+ saw_accecn_opt:2; /* An AccECN option was seen */
u32 tcp_tx_delay; /* delay (in usec) added to TX packets */
/* RTT measurement */
@@ -414,8 +440,6 @@ struct tcp_sock {
struct tcp_sack_block recv_sack_cache[4];
- int lost_cnt_hint;
-
u32 prior_ssthresh; /* ssthresh saved at recovery start */
u32 high_seq; /* snd_nxt at onset of congestion */
@@ -424,6 +448,9 @@ struct tcp_sock {
* the first SYN. */
u32 undo_marker; /* snd_una upon a new recovery episode. */
int undo_retrans; /* number of undoable retransmissions. */
+ u32 mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG
+ * while socket was owned by user.
+ */
u64 bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans
* Total data bytes retransmitted
*/
@@ -470,9 +497,6 @@ struct tcp_sock {
u32 probe_seq_end;
} mtu_probe;
u32 plb_rehash; /* PLB-triggered rehash attempts */
- u32 mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG
- * while socket was owned by user.
- */
#if IS_ENABLED(CONFIG_MPTCP)
bool is_mptcp;
#endif
@@ -619,6 +643,7 @@ void tcp_sock_set_nodelay(struct sock *sk);
void tcp_sock_set_quickack(struct sock *sk, int val);
int tcp_sock_set_syncnt(struct sock *sk, int val);
int tcp_sock_set_user_timeout(struct sock *sk, int val);
+int tcp_sock_set_maxseg(struct sock *sk, int val);
static inline bool dst_tcp_usec_ts(const struct dst_entry *dst)
{
diff --git a/include/linux/tee_core.h b/include/linux/tee_core.h
index efd16ed52315..1f3e5dad6d0d 100644
--- a/include/linux/tee_core.h
+++ b/include/linux/tee_core.h
@@ -8,9 +8,11 @@
#include <linux/cdev.h>
#include <linux/device.h>
+#include <linux/dma-buf.h>
#include <linux/idr.h>
#include <linux/kref.h>
#include <linux/list.h>
+#include <linux/scatterlist.h>
#include <linux/tee.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
@@ -26,10 +28,19 @@
#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */
#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */
#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */
+#define TEE_SHM_DMA_BUF BIT(4) /* Memory with dma-buf handle */
+#define TEE_SHM_DMA_MEM BIT(5) /* Memory allocated with */
+ /* dma_alloc_pages() */
#define TEE_DEVICE_FLAG_REGISTERED 0x1
#define TEE_MAX_DEV_NAME_LEN 32
+enum tee_dma_heap_id {
+ TEE_DMA_HEAP_SECURE_VIDEO_PLAY = 1,
+ TEE_DMA_HEAP_TRUSTED_UI,
+ TEE_DMA_HEAP_SECURE_VIDEO_RECORD,
+};
+
/**
* struct tee_device - TEE Device representation
* @name: name of device
@@ -65,22 +76,30 @@ struct tee_device {
/**
* struct tee_driver_ops - driver operations vtable
* @get_version: returns version of driver
- * @open: called when the device file is opened
- * @release: release this open file
+ * @open: called for a context when the device file is opened
+ * @close_context: called when the device file is closed
+ * @release: called to release the context
* @open_session: open a new session
* @close_session: close a session
* @system_session: declare session as a system session
* @invoke_func: invoke a trusted function
+ * @object_invoke_func: invoke a TEE object
* @cancel_req: request cancel of an ongoing invoke or open
* @supp_recv: called for supplicant to get a command
* @supp_send: called for supplicant to send a response
* @shm_register: register shared memory buffer in TEE
* @shm_unregister: unregister shared memory buffer in TEE
+ *
+ * The context given to @open might last longer than the device file if it is
+ * tied to other resources in the TEE driver. @close_context is called when the
+ * client closes the device file, even if there are existing references to the
+ * context. The TEE driver can use @close_context to start cleaning up.
*/
struct tee_driver_ops {
void (*get_version)(struct tee_device *teedev,
struct tee_ioctl_version_data *vers);
int (*open)(struct tee_context *ctx);
+ void (*close_context)(struct tee_context *ctx);
void (*release)(struct tee_context *ctx);
int (*open_session)(struct tee_context *ctx,
struct tee_ioctl_open_session_arg *arg,
@@ -90,6 +109,9 @@ struct tee_driver_ops {
int (*invoke_func)(struct tee_context *ctx,
struct tee_ioctl_invoke_arg *arg,
struct tee_param *param);
+ int (*object_invoke_func)(struct tee_context *ctx,
+ struct tee_ioctl_object_invoke_arg *arg,
+ struct tee_param *param);
int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session);
int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params,
struct tee_param *param);
@@ -117,6 +139,36 @@ struct tee_desc {
};
/**
+ * struct tee_protmem_pool - protected memory pool
+ * @ops: operations
+ *
+ * This is an abstract interface where this struct is expected to be
+ * embedded in another struct specific to the implementation.
+ */
+struct tee_protmem_pool {
+ const struct tee_protmem_pool_ops *ops;
+};
+
+/**
+ * struct tee_protmem_pool_ops - protected memory pool operations
+ * @alloc: called when allocating protected memory
+ * @free: called when freeing protected memory
+ * @update_shm: called when registering a dma-buf to update the @shm
+ * with physical address of the buffer or to return the
+ * @parent_shm of the memory pool
+ * @destroy_pool: called when destroying the pool
+ */
+struct tee_protmem_pool_ops {
+ int (*alloc)(struct tee_protmem_pool *pool, struct sg_table *sgt,
+ size_t size, size_t *offs);
+ void (*free)(struct tee_protmem_pool *pool, struct sg_table *sgt);
+ int (*update_shm)(struct tee_protmem_pool *pool, struct sg_table *sgt,
+ size_t offs, struct tee_shm *shm,
+ struct tee_shm **parent_shm);
+ void (*destroy_pool)(struct tee_protmem_pool *pool);
+};
+
+/**
* tee_device_alloc() - Allocate a new struct tee_device instance
* @teedesc: Descriptor for this driver
* @dev: Parent device for this device
@@ -154,6 +206,41 @@ int tee_device_register(struct tee_device *teedev);
*/
void tee_device_unregister(struct tee_device *teedev);
+int tee_device_register_dma_heap(struct tee_device *teedev,
+ enum tee_dma_heap_id id,
+ struct tee_protmem_pool *pool);
+void tee_device_put_all_dma_heaps(struct tee_device *teedev);
+
+/**
+ * tee_device_get() - Increment the user count for a tee_device
+ * @teedev: Pointer to the tee_device
+ *
+ * If tee_device_unregister() has been called and the final user of @teedev
+ * has already released the device, this function will fail to prevent new users
+ * from accessing the device during the unregistration process.
+ *
+ * Returns: true if @teedev remains valid, otherwise false
+ */
+bool tee_device_get(struct tee_device *teedev);
+
+/**
+ * tee_device_put() - Decrease the user count for a tee_device
+ * @teedev: pointer to the tee_device
+ */
+void tee_device_put(struct tee_device *teedev);
+
+/**
+ * tee_device_set_dev_groups() - Set device attribute groups
+ * @teedev: Device to register
+ * @dev_groups: Attribute groups
+ *
+ * Assigns the provided @dev_groups to the @teedev to be registered later
+ * with tee_device_register(). Calling this function is optional, but if
+ * it's called it must be called before tee_device_register().
+ */
+void tee_device_set_dev_groups(struct tee_device *teedev,
+ const struct attribute_group **dev_groups);
+
/**
* tee_session_calc_client_uuid() - Calculates client UUID for session
* @uuid: Resulting UUID
@@ -218,6 +305,16 @@ static inline void tee_shm_pool_free(struct tee_shm_pool *pool)
}
/**
+ * tee_protmem_static_pool_alloc() - Create a protected memory manager
+ * @paddr: Physical address of start of pool
+ * @size: Size in bytes of the pool
+ *
+ * @returns pointer to a 'struct tee_protmem_pool' or an ERR_PTR on failure.
+ */
+struct tee_protmem_pool *tee_protmem_static_pool_alloc(phys_addr_t paddr,
+ size_t size);
+
+/**
* tee_get_drvdata() - Return driver_data pointer
* @returns the driver_data pointer supplied to tee_register().
*/
@@ -232,6 +329,9 @@ void *tee_get_drvdata(struct tee_device *teedev);
*/
struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size);
+struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
+ size_t page_count);
+
int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
int (*shm_register)(struct tee_context *ctx,
struct tee_shm *shm,
@@ -303,4 +403,25 @@ struct tee_context *teedev_open(struct tee_device *teedev);
*/
void teedev_close_context(struct tee_context *ctx);
+/**
+ * teedev_ctx_get() - Increment the reference count of a context
+ * @ctx: Pointer to the context
+ *
+ * This function increases the refcount of the context, which is tied to
+ * resources shared by the same tee_device. During the unregistration process,
+ * the context may remain valid even after tee_device_unregister() has returned.
+ *
+ * Users should ensure that the context's refcount is properly decreased before
+ * calling tee_device_put(), typically within the context's release() function.
+ * Alternatively, users can call tee_device_get() and teedev_ctx_get() together
+ * and release them simultaneously (see shm_alloc_helper()).
+ */
+void teedev_ctx_get(struct tee_context *ctx);
+
+/**
+ * teedev_ctx_put() - Decrease reference count on a context
+ * @ctx: pointer to the context
+ */
+void teedev_ctx_put(struct tee_context *ctx);
+
#endif /*__TEE_CORE_H*/
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 786b9ae6cf4d..88a6f9697c89 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -82,6 +82,16 @@ struct tee_param_memref {
struct tee_shm *shm;
};
+struct tee_param_ubuf {
+ void __user *uaddr;
+ size_t size;
+};
+
+struct tee_param_objref {
+ u64 id;
+ u64 flags;
+};
+
struct tee_param_value {
u64 a;
u64 b;
@@ -92,6 +102,8 @@ struct tee_param {
u64 attr;
union {
struct tee_param_memref memref;
+ struct tee_param_objref objref;
+ struct tee_param_ubuf ubuf;
struct tee_param_value value;
} u;
};
@@ -117,6 +129,16 @@ struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
void *addr, size_t length);
/**
+ * tee_shm_register_fd() - Register shared memory from file descriptor
+ *
+ * @ctx: Context that allocates the shared memory
+ * @fd: Shared memory file descriptor reference
+ *
+ * @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure
+ */
+struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd);
+
+/**
* tee_shm_free() - Free shared memory
* @shm: Handle to shared memory to free
*/
@@ -298,6 +320,6 @@ struct tee_client_driver {
};
#define to_tee_client_driver(d) \
- container_of(d, struct tee_client_driver, driver)
+ container_of_const(d, struct tee_client_driver, driver)
#endif /*__TEE_DRV_H*/
diff --git a/include/linux/text-patching.h b/include/linux/text-patching.h
new file mode 100644
index 000000000000..ad5877ab0855
--- /dev/null
+++ b/include/linux/text-patching.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TEXT_PATCHING_H
+#define _LINUX_TEXT_PATCHING_H
+
+#include <asm/text-patching.h>
+
+#ifndef text_poke_copy
+static inline void *text_poke_copy(void *dst, const void *src, size_t len)
+{
+ return memcpy(dst, src, len);
+}
+#define text_poke_copy text_poke_copy
+#endif
+
+#endif /* _LINUX_TEXT_PATCHING_H */
diff --git a/include/linux/tfrc.h b/include/linux/tfrc.h
deleted file mode 100644
index a5acc768085d..000000000000
--- a/include/linux/tfrc.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _LINUX_TFRC_H_
-#define _LINUX_TFRC_H_
-/*
- * TFRC - Data Structures for the TCP-Friendly Rate Control congestion
- * control mechanism as specified in RFC 3448.
- *
- * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
- * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz>
- * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
- * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
- */
-#include <linux/types.h>
-
-/** tfrc_rx_info - TFRC Receiver Data Structure
- *
- * @tfrcrx_x_recv: receiver estimate of sending rate (3.2.2)
- * @tfrcrx_rtt: round-trip-time (communicated by sender)
- * @tfrcrx_p: current estimate of loss event rate (3.2.2)
- */
-struct tfrc_rx_info {
- __u32 tfrcrx_x_recv;
- __u32 tfrcrx_rtt;
- __u32 tfrcrx_p;
-};
-
-/** tfrc_tx_info - TFRC Sender Data Structure
- *
- * @tfrctx_x: computed transmit rate (4.3 (4))
- * @tfrctx_x_recv: receiver estimate of send rate (4.3)
- * @tfrctx_x_calc: return value of throughput equation (3.1)
- * @tfrctx_rtt: (moving average) estimate of RTT (4.3)
- * @tfrctx_p: current loss event rate (5.4)
- * @tfrctx_rto: estimate of RTO, equals 4*RTT (4.3)
- * @tfrctx_ipi: inter-packet interval (4.6)
- *
- * Note: X and X_recv are both maintained in units of 64 * bytes/second. This
- * enables a finer resolution of sending rates and avoids problems with
- * integer arithmetic; u32 is not sufficient as scaling consumes 6 bits.
- */
-struct tfrc_tx_info {
- __u64 tfrctx_x;
- __u64 tfrctx_x_recv;
- __u32 tfrctx_x_calc;
- __u32 tfrctx_rtt;
- __u32 tfrctx_p;
- __u32 tfrctx_rto;
- __u32 tfrctx_ipi;
-};
-
-#endif /* _LINUX_TFRC_H_ */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index f1155c0439c4..0b5ed6821080 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -55,6 +55,10 @@ enum thermal_notify_event {
THERMAL_TZ_BIND_CDEV, /* Cooling dev is bind to the thermal zone */
THERMAL_TZ_UNBIND_CDEV, /* Cooling dev is unbind from the thermal zone */
THERMAL_INSTANCE_WEIGHT_CHANGED, /* Thermal instance weight changed */
+ THERMAL_TZ_RESUME, /* Thermal zone is resuming after system sleep */
+ THERMAL_TZ_ADD_THRESHOLD, /* Threshold added */
+ THERMAL_TZ_DEL_THRESHOLD, /* Threshold deleted */
+ THERMAL_TZ_FLUSH_THRESHOLDS, /* All thresholds deleted */
};
/**
@@ -79,18 +83,26 @@ struct thermal_trip {
#define THERMAL_TRIP_FLAG_RW (THERMAL_TRIP_FLAG_RW_TEMP | \
THERMAL_TRIP_FLAG_RW_HYST)
-struct thermal_zone_device;
+#define THERMAL_TRIP_PRIV_TO_INT(_val_) (uintptr_t)(_val_)
+#define THERMAL_INT_TO_TRIP_PRIV(_val_) (void *)(uintptr_t)(_val_)
+
+struct cooling_spec {
+ unsigned long upper; /* Highest cooling state */
+ unsigned long lower; /* Lowest cooling state */
+ unsigned int weight; /* Cooling device weight */
+};
struct thermal_zone_device_ops {
- int (*bind) (struct thermal_zone_device *,
- struct thermal_cooling_device *);
- int (*unbind) (struct thermal_zone_device *,
- struct thermal_cooling_device *);
+ bool (*should_bind) (struct thermal_zone_device *,
+ const struct thermal_trip *,
+ struct thermal_cooling_device *,
+ struct cooling_spec *);
int (*get_temp) (struct thermal_zone_device *, int *);
int (*set_trips) (struct thermal_zone_device *, int, int);
int (*change_mode) (struct thermal_zone_device *,
enum thermal_device_mode);
- int (*set_trip_temp) (struct thermal_zone_device *, int, int);
+ int (*set_trip_temp) (struct thermal_zone_device *,
+ const struct thermal_trip *, int);
int (*get_crit_temp) (struct thermal_zone_device *, int *);
int (*set_emul_temp) (struct thermal_zone_device *, int);
int (*get_trend) (struct thermal_zone_device *,
@@ -126,6 +138,9 @@ struct thermal_cooling_device {
#endif
};
+DEFINE_GUARD(cooling_dev, struct thermal_cooling_device *, mutex_lock(&_T->lock),
+ mutex_unlock(&_T->lock))
+
/* Structure to define Thermal Zone parameters */
struct thermal_zone_params {
const char *governor_name;
@@ -198,17 +213,12 @@ static inline void devm_thermal_of_zone_unregister(struct device *dev,
}
#endif
-int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
- struct thermal_trip *trip);
-int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
- struct thermal_trip *trip);
int for_each_thermal_trip(struct thermal_zone_device *tz,
int (*cb)(struct thermal_trip *, void *),
void *data);
int thermal_zone_for_each_trip(struct thermal_zone_device *tz,
int (*cb)(struct thermal_trip *, void *),
void *data);
-int thermal_zone_get_num_trips(struct thermal_zone_device *tz);
void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
struct thermal_trip *trip, int temp);
@@ -221,7 +231,8 @@ struct thermal_zone_device *thermal_zone_device_register_with_trips(
int num_trips, void *devdata,
const struct thermal_zone_device_ops *ops,
const struct thermal_zone_params *tzp,
- int passive_delay, int polling_delay);
+ unsigned int passive_delay,
+ unsigned int polling_delay);
struct thermal_zone_device *thermal_tripless_zone_device_register(
const char *type,
@@ -236,20 +247,6 @@ const char *thermal_zone_device_type(struct thermal_zone_device *tzd);
int thermal_zone_device_id(struct thermal_zone_device *tzd);
struct device *thermal_zone_device(struct thermal_zone_device *tzd);
-int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
- struct thermal_cooling_device *cdev,
- unsigned long upper, unsigned long lower,
- unsigned int weight);
-int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
- struct thermal_cooling_device *,
- unsigned long, unsigned long,
- unsigned int);
-int thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz,
- const struct thermal_trip *trip,
- struct thermal_cooling_device *cdev);
-int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
- struct thermal_cooling_device *);
void thermal_zone_device_update(struct thermal_zone_device *,
enum thermal_notify_event);
@@ -261,7 +258,7 @@ thermal_of_cooling_device_register(struct device_node *np, const char *, void *,
struct thermal_cooling_device *
devm_thermal_of_cooling_device_register(struct device *dev,
struct device_node *np,
- char *type, void *devdata,
+ const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops);
void thermal_cooling_device_update(struct thermal_cooling_device *);
void thermal_cooling_device_unregister(struct thermal_cooling_device *);
@@ -269,6 +266,9 @@ struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name);
int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
int thermal_zone_get_slope(struct thermal_zone_device *tz);
int thermal_zone_get_offset(struct thermal_zone_device *tz);
+bool thermal_trip_is_bound_to_cdev(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev);
int thermal_zone_device_enable(struct thermal_zone_device *tz);
int thermal_zone_device_disable(struct thermal_zone_device *tz);
@@ -293,6 +293,10 @@ static inline struct thermal_zone_device *thermal_tripless_zone_device_register(
static inline void thermal_zone_device_unregister(struct thermal_zone_device *tz)
{ }
+static inline void thermal_zone_device_update(struct thermal_zone_device *tz,
+ enum thermal_notify_event event)
+{ }
+
static inline struct thermal_cooling_device *
thermal_cooling_device_register(const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
@@ -305,7 +309,7 @@ thermal_of_cooling_device_register(struct device_node *np,
static inline struct thermal_cooling_device *
devm_thermal_of_cooling_device_register(struct device *dev,
struct device_node *np,
- char *type, void *devdata,
+ const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
{
return ERR_PTR(-ENODEV);
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 9ea0b28068f4..b40de9bab4b7 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -59,6 +59,19 @@ enum syscall_work_bit {
#include <asm/thread_info.h>
+#ifndef TIF_NEED_RESCHED_LAZY
+#ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
+#error Inconsistent PREEMPT_LAZY
+#endif
+#define TIF_NEED_RESCHED_LAZY TIF_NEED_RESCHED
+#define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED
+#endif
+
+#ifndef TIF_RSEQ
+# define TIF_RSEQ TIF_NOTIFY_RESUME
+# define _TIF_RSEQ _TIF_NOTIFY_RESUME
+#endif
+
#ifdef __KERNEL__
#ifndef arch_set_restart_data
@@ -179,22 +192,27 @@ static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti
#ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
-static __always_inline bool tif_need_resched(void)
+static __always_inline bool tif_test_bit(int bit)
{
- return arch_test_bit(TIF_NEED_RESCHED,
+ return arch_test_bit(bit,
(unsigned long *)(&current_thread_info()->flags));
}
#else
-static __always_inline bool tif_need_resched(void)
+static __always_inline bool tif_test_bit(int bit)
{
- return test_bit(TIF_NEED_RESCHED,
+ return test_bit(bit,
(unsigned long *)(&current_thread_info()->flags));
}
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
+static __always_inline bool tif_need_resched(void)
+{
+ return tif_test_bit(TIF_NEED_RESCHED);
+}
+
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
static inline int arch_within_stack_frames(const void * const stack,
const void * const stackend,
@@ -204,54 +222,6 @@ static inline int arch_within_stack_frames(const void * const stack,
}
#endif
-#ifdef CONFIG_HARDENED_USERCOPY
-extern void __check_object_size(const void *ptr, unsigned long n,
- bool to_user);
-
-static __always_inline void check_object_size(const void *ptr, unsigned long n,
- bool to_user)
-{
- if (!__builtin_constant_p(n))
- __check_object_size(ptr, n, to_user);
-}
-#else
-static inline void check_object_size(const void *ptr, unsigned long n,
- bool to_user)
-{ }
-#endif /* CONFIG_HARDENED_USERCOPY */
-
-extern void __compiletime_error("copy source size is too small")
-__bad_copy_from(void);
-extern void __compiletime_error("copy destination size is too small")
-__bad_copy_to(void);
-
-void __copy_overflow(int size, unsigned long count);
-
-static inline void copy_overflow(int size, unsigned long count)
-{
- if (IS_ENABLED(CONFIG_BUG))
- __copy_overflow(size, count);
-}
-
-static __always_inline __must_check bool
-check_copy_size(const void *addr, size_t bytes, bool is_source)
-{
- int sz = __builtin_object_size(addr, 0);
- if (unlikely(sz >= 0 && sz < bytes)) {
- if (!__builtin_constant_p(bytes))
- copy_overflow(sz, bytes);
- else if (is_source)
- __bad_copy_from();
- else
- __bad_copy_to();
- return false;
- }
- if (WARN_ON_ONCE(bytes > INT_MAX))
- return false;
- check_object_size(addr, bytes, is_source);
- return true;
-}
-
#ifndef arch_setup_new_exec
static inline void arch_setup_new_exec(void) { }
#endif
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index 7d902d8c054b..0ba112175bb3 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -11,6 +11,13 @@
#ifndef THUNDERBOLT_H_
#define THUNDERBOLT_H_
+#include <linux/types.h>
+
+struct fwnode_handle;
+struct device;
+
+#if IS_REACHABLE(CONFIG_USB4)
+
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/list.h>
@@ -206,7 +213,7 @@ enum tb_link_width {
* queried first
* @service_ids: Used to generate IDs for the services
* @in_hopids: Input HopIDs for DMA tunneling
- * @out_hopids; Output HopIDs for DMA tunneling
+ * @out_hopids: Output HopIDs for DMA tunneling
* @local_property_block: Local block of properties
* @local_property_block_gen: Generation of @local_property_block
* @local_property_block_len: Length of the @local_property_block in dwords
@@ -349,7 +356,7 @@ int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
unsigned int timeout_msec);
/**
- * tb_protocol_handler - Protocol specific handler
+ * struct tb_protocol_handler - Protocol specific handler
* @uuid: XDomain messages with this UUID are dispatched to this handler
* @callback: Callback called with the XDomain message. Returning %1
* here tells the XDomain core that the message was handled
@@ -430,7 +437,7 @@ static inline struct tb_service *tb_to_service(struct device *dev)
}
/**
- * tb_service_driver - Thunderbolt service driver
+ * struct tb_service_driver - Thunderbolt service driver
* @driver: Driver structure
* @probe: Called when the driver is probed
* @remove: Called when the driver is removed (optional)
@@ -512,6 +519,7 @@ struct tb_nhi {
* @head: Head of the ring (write next descriptor here)
* @tail: Tail of the ring (complete next descriptor here)
* @descriptors: Allocated descriptors for this ring
+ * @descriptors_dma: DMA address of descriptors for this ring
* @queue: Queue holding frames to be transferred over this ring
* @in_flight: Queue holding frames that are currently in flight
* @work: Interrupt work structure
@@ -564,12 +572,12 @@ typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
/**
* enum ring_desc_flags - Flags for DMA ring descriptor
- * %RING_DESC_ISOCH: Enable isonchronous DMA (Tx only)
- * %RING_DESC_CRC_ERROR: In frame mode CRC check failed for the frame (Rx only)
- * %RING_DESC_COMPLETED: Descriptor completed (set by NHI)
- * %RING_DESC_POSTED: Always set this
- * %RING_DESC_BUFFER_OVERRUN: RX buffer overrun
- * %RING_DESC_INTERRUPT: Request an interrupt on completion
+ * @RING_DESC_ISOCH: Enable isonchronous DMA (Tx only)
+ * @RING_DESC_CRC_ERROR: In frame mode CRC check failed for the frame (Rx only)
+ * @RING_DESC_COMPLETED: Descriptor completed (set by NHI)
+ * @RING_DESC_POSTED: Always set this
+ * @RING_DESC_BUFFER_OVERRUN: RX buffer overrun
+ * @RING_DESC_INTERRUPT: Request an interrupt on completion
*/
enum ring_desc_flags {
RING_DESC_ISOCH = 0x1,
@@ -629,7 +637,7 @@ int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
* If ring_stop() is called after the packet has been enqueued
* @frame->callback will be called with canceled set to true.
*
- * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
+ * Return: %-ESHUTDOWN if ring_stop() has been called, %0 otherwise.
*/
static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
{
@@ -650,7 +658,7 @@ static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
* If ring_stop() is called after the packet has been enqueued @frame->callback
* will be called with canceled set to true.
*
- * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
+ * Return: %-ESHUTDOWN if ring_stop has been called, %0 otherwise.
*/
static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame)
{
@@ -668,10 +676,23 @@ void tb_ring_poll_complete(struct tb_ring *ring);
*
* Use this function when you are mapping DMA for buffers that are
* passed to the ring for sending/receiving.
+ *
+ * Return: Pointer to device used for DMA mapping.
*/
static inline struct device *tb_ring_dma_device(struct tb_ring *ring)
{
return &ring->nhi->pdev->dev;
}
+bool usb4_usb3_port_match(struct device *usb4_port_dev,
+ const struct fwnode_handle *usb3_port_fwnode);
+
+#else /* CONFIG_USB4 */
+static inline bool usb4_usb3_port_match(struct device *usb4_port_dev,
+ const struct fwnode_handle *usb3_port_fwnode)
+{
+ return false;
+}
+#endif /* CONFIG_USB4 */
+
#endif /* THUNDERBOLT_H_ */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 4924a33700b7..ac76ae9fa36d 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -20,12 +20,10 @@ extern void __init tick_init(void);
extern void tick_suspend_local(void);
/* Should be core only, but XEN resume magic and ARM BL switcher require it */
extern void tick_resume_local(void);
-extern void tick_cleanup_dead_cpu(int cpu);
#else /* CONFIG_GENERIC_CLOCKEVENTS */
static inline void tick_init(void) { }
static inline void tick_suspend_local(void) { }
static inline void tick_resume_local(void) { }
-static inline void tick_cleanup_dead_cpu(int cpu) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
#if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_HOTPLUG_CPU)
@@ -139,7 +137,6 @@ extern void tick_nohz_irq_exit(void);
extern bool tick_nohz_idle_got_tick(void);
extern ktime_t tick_nohz_get_next_hrtimer(void);
extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
-extern unsigned long tick_nohz_get_idle_calls(void);
extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
@@ -198,12 +195,6 @@ static inline bool tick_nohz_full_enabled(void)
__ret; \
})
-static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
-{
- if (tick_nohz_full_enabled())
- cpumask_or(mask, mask, tick_nohz_full_mask);
-}
-
extern void tick_nohz_dep_set(enum tick_dep_bits bit);
extern void tick_nohz_dep_clear(enum tick_dep_bits bit);
extern void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit);
@@ -252,12 +243,19 @@ static inline void tick_dep_set_task(struct task_struct *tsk,
if (tick_nohz_full_enabled())
tick_nohz_dep_set_task(tsk, bit);
}
+
static inline void tick_dep_clear_task(struct task_struct *tsk,
enum tick_dep_bits bit)
{
if (tick_nohz_full_enabled())
tick_nohz_dep_clear_task(tsk, bit);
}
+
+static inline void tick_dep_init_task(struct task_struct *tsk)
+{
+ atomic_set(&tsk->tick_dep_mask, 0);
+}
+
static inline void tick_dep_set_signal(struct task_struct *tsk,
enum tick_dep_bits bit)
{
@@ -277,7 +275,6 @@ extern void __init tick_nohz_full_setup(cpumask_var_t cpumask);
#else
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
-static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
@@ -291,6 +288,7 @@ static inline void tick_dep_set_task(struct task_struct *tsk,
enum tick_dep_bits bit) { }
static inline void tick_dep_clear_task(struct task_struct *tsk,
enum tick_dep_bits bit) { }
+static inline void tick_dep_init_task(struct task_struct *tsk) { }
static inline void tick_dep_set_signal(struct task_struct *tsk,
enum tick_dep_bits bit) { }
static inline void tick_dep_clear_signal(struct signal_struct *signal,
diff --git a/include/linux/time64.h b/include/linux/time64.h
index f1bcea8c124a..9934331c7b86 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -49,6 +49,11 @@ static inline int timespec64_equal(const struct timespec64 *a,
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
}
+static inline bool timespec64_is_epoch(const struct timespec64 *ts)
+{
+ return ts->tv_sec == 0 && ts->tv_nsec == 0;
+}
+
/*
* lhs < rhs: return <0
* lhs == rhs: return 0
diff --git a/include/linux/time_namespace.h b/include/linux/time_namespace.h
index 876e31b4461d..c514d0e5a45c 100644
--- a/include/linux/time_namespace.h
+++ b/include/linux/time_namespace.h
@@ -12,6 +12,7 @@
struct user_namespace;
extern struct user_namespace init_user_ns;
+struct seq_file;
struct vm_area_struct;
struct timens_offsets {
@@ -32,17 +33,22 @@ struct time_namespace {
extern struct time_namespace init_time_ns;
#ifdef CONFIG_TIME_NS
+static inline struct time_namespace *to_time_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct time_namespace, ns);
+}
+void __init time_ns_init(void);
extern int vdso_join_timens(struct task_struct *task,
struct time_namespace *ns);
extern void timens_commit(struct task_struct *tsk, struct time_namespace *ns);
static inline struct time_namespace *get_time_ns(struct time_namespace *ns)
{
- refcount_inc(&ns->ns.count);
+ ns_ref_inc(ns);
return ns;
}
-struct time_namespace *copy_time_ns(unsigned long flags,
+struct time_namespace *copy_time_ns(u64 flags,
struct user_namespace *user_ns,
struct time_namespace *old_ns);
void free_time_ns(struct time_namespace *ns);
@@ -51,7 +57,7 @@ struct page *find_timens_vvar_page(struct vm_area_struct *vma);
static inline void put_time_ns(struct time_namespace *ns)
{
- if (refcount_dec_and_test(&ns->ns.count))
+ if (ns_ref_put(ns))
free_time_ns(ns);
}
@@ -107,6 +113,10 @@ static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim)
}
#else
+static inline void __init time_ns_init(void)
+{
+}
+
static inline int vdso_join_timens(struct task_struct *task,
struct time_namespace *ns)
{
@@ -128,7 +138,7 @@ static inline void put_time_ns(struct time_namespace *ns)
}
static inline
-struct time_namespace *copy_time_ns(unsigned long flags,
+struct time_namespace *copy_time_ns(u64 flags,
struct user_namespace *user_ns,
struct time_namespace *old_ns)
{
@@ -165,6 +175,4 @@ static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim)
}
#endif
-struct vdso_data *arch_get_vdso_data(void *vvar_page);
-
#endif /* _LINUX_TIMENS_H */
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
index 0982d1d52b24..dce03a5cafb7 100644
--- a/include/linux/timecounter.h
+++ b/include/linux/timecounter.h
@@ -28,7 +28,7 @@
* @shift: cycle to nanosecond divisor (power of two)
*/
struct cyclecounter {
- u64 (*read)(const struct cyclecounter *cc);
+ u64 (*read)(struct cyclecounter *cc);
u64 mask;
u32 mult;
u32 shift;
@@ -53,7 +53,7 @@ struct cyclecounter {
* @frac: accumulated fractional nanoseconds
*/
struct timecounter {
- const struct cyclecounter *cc;
+ struct cyclecounter *cc;
u64 cycle_last;
u64 nsec;
u64 mask;
@@ -100,7 +100,7 @@ static inline void timecounter_adjtime(struct timecounter *tc, s64 delta)
* the time stamp counter by the number of elapsed nanoseconds.
*/
extern void timecounter_init(struct timecounter *tc,
- const struct cyclecounter *cc,
+ struct cyclecounter *cc,
u64 start_tstamp);
/**
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 84ff2844df2a..b8ae89ea28ab 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -12,6 +12,22 @@
#include <linux/time.h>
/**
+ * timekeeper_ids - IDs for various time keepers in the kernel
+ * @TIMEKEEPER_CORE: The central core timekeeper managing system time
+ * @TIMEKEEPER_AUX_FIRST: The first AUX timekeeper
+ * @TIMEKEEPER_AUX_LAST: The last AUX timekeeper
+ * @TIMEKEEPERS_MAX: The maximum number of timekeepers managed
+ */
+enum timekeeper_ids {
+ TIMEKEEPER_CORE,
+#ifdef CONFIG_POSIX_AUX_CLOCKS
+ TIMEKEEPER_AUX_FIRST,
+ TIMEKEEPER_AUX_LAST = TIMEKEEPER_AUX_FIRST + MAX_AUX_CLOCKS - 1,
+#endif
+ TIMEKEEPERS_MAX,
+};
+
+/**
* struct tk_read_base - base structure for timekeeping readout
* @clock: Current clocksource used for timekeeping.
* @mask: Bitmask for two's complement subtraction of non 64bit clocks
@@ -26,7 +42,7 @@
* occupies a single 64byte cache line.
*
* The struct is separate from struct timekeeper as it is also used
- * for a fast NMI safe accessors.
+ * for the fast NMI safe accessors.
*
* @base_real is for the fast NMI safe accessor to allow reading clock
* realtime from any context.
@@ -44,36 +60,46 @@ struct tk_read_base {
/**
* struct timekeeper - Structure holding internal timekeeping values.
- * @tkr_mono: The readout base structure for CLOCK_MONOTONIC
- * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW
- * @xtime_sec: Current CLOCK_REALTIME time in seconds
- * @ktime_sec: Current CLOCK_MONOTONIC time in seconds
- * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
- * @offs_real: Offset clock monotonic -> clock realtime
- * @offs_boot: Offset clock monotonic -> clock boottime
- * @offs_tai: Offset clock monotonic -> clock tai
- * @tai_offset: The current UTC to TAI offset in seconds
- * @clock_was_set_seq: The sequence number of clock was set events
- * @cs_was_changed_seq: The sequence number of clocksource change events
- * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
- * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
- * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset
- * @cycle_interval: Number of clock cycles in one NTP interval
- * @xtime_interval: Number of clock shifted nano seconds in one NTP
- * interval.
- * @xtime_remainder: Shifted nano seconds left over when rounding
- * @cycle_interval
- * @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
- * @ntp_error: Difference between accumulated time and NTP time in ntp
- * shifted nano seconds.
- * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
- * ntp shifted nano seconds.
- * @last_warning: Warning ratelimiter (DEBUG_TIMEKEEPING)
- * @underflow_seen: Underflow warning flag (DEBUG_TIMEKEEPING)
- * @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING)
+ * @tkr_mono: The readout base structure for CLOCK_MONOTONIC
+ * @xtime_sec: Current CLOCK_REALTIME time in seconds
+ * @ktime_sec: Current CLOCK_MONOTONIC time in seconds
+ * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
+ * @offs_real: Offset clock monotonic -> clock realtime
+ * @offs_boot: Offset clock monotonic -> clock boottime
+ * @offs_tai: Offset clock monotonic -> clock tai
+ * @offs_aux: Offset clock monotonic -> clock AUX
+ * @coarse_nsec: The nanoseconds part for coarse time getters
+ * @id: The timekeeper ID
+ * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW
+ * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
+ * @clock_was_set_seq: The sequence number of clock was set events
+ * @cs_was_changed_seq: The sequence number of clocksource change events
+ * @clock_valid: Indicator for valid clock
+ * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset
+ * @monotonic_to_aux: CLOCK_MONOTONIC to CLOCK_AUX offset
+ * @cycle_interval: Number of clock cycles in one NTP interval
+ * @xtime_interval: Number of clock shifted nano seconds in one NTP
+ * interval.
+ * @xtime_remainder: Shifted nano seconds left over when rounding
+ * @cycle_interval
+ * @raw_interval: Shifted raw nano seconds accumulated per NTP interval.
+ * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
+ * @ntp_tick: The ntp_tick_length() value currently being
+ * used. This cached copy ensures we consistently
+ * apply the tick length for an entire tick, as
+ * ntp_tick_length may change mid-tick, and we don't
+ * want to apply that new value to the tick in
+ * progress.
+ * @ntp_error: Difference between accumulated time and NTP time in ntp
+ * shifted nano seconds.
+ * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
+ * ntp shifted nano seconds.
+ * @ntp_err_mult: Multiplication factor for scaled math conversion
+ * @skip_second_overflow: Flag used to avoid updating NTP twice with same second
+ * @tai_offset: The current UTC to TAI offset in seconds
*
* Note: For timespec(64) based interfaces wall_to_monotonic is what
- * we need to add to xtime (or xtime corrected for sub jiffie times)
+ * we need to add to xtime (or xtime corrected for sub jiffy times)
* to get to monotonic time. Monotonic is pegged at zero at system
* boot time, so wall_to_monotonic will be negative, however, we will
* ALWAYS keep the tv_nsec part positive so we can use the usual
@@ -88,54 +114,72 @@ struct tk_read_base {
*
* @monotonic_to_boottime is a timespec64 representation of @offs_boot to
* accelerate the VDSO update for CLOCK_BOOTTIME.
+ *
+ * @offs_aux is used by the auxiliary timekeepers which do not utilize any
+ * of the regular timekeeper offset fields.
+ *
+ * @monotonic_to_aux is a timespec64 representation of @offs_aux to
+ * accelerate the VDSO update for CLOCK_AUX.
+ *
+ * The cacheline ordering of the structure is optimized for in kernel usage of
+ * the ktime_get() and ktime_get_ts64() family of time accessors. Struct
+ * timekeeper is prepended in the core timekeeping code with a sequence count,
+ * which results in the following cacheline layout:
+ *
+ * 0: seqcount, tkr_mono
+ * 1: xtime_sec ... id
+ * 2: tkr_raw, raw_sec
+ * 3,4: Internal variables
+ *
+ * Cacheline 0,1 contain the data which is used for accessing
+ * CLOCK_MONOTONIC/REALTIME/BOOTTIME/TAI, while cacheline 2 contains the
+ * data for accessing CLOCK_MONOTONIC_RAW. Cacheline 3,4 are internal
+ * variables which are only accessed during timekeeper updates once per
+ * tick.
*/
struct timekeeper {
+ /* Cacheline 0 (together with prepended seqcount of timekeeper core): */
struct tk_read_base tkr_mono;
- struct tk_read_base tkr_raw;
+
+ /* Cacheline 1: */
u64 xtime_sec;
unsigned long ktime_sec;
struct timespec64 wall_to_monotonic;
ktime_t offs_real;
ktime_t offs_boot;
- ktime_t offs_tai;
- s32 tai_offset;
+ union {
+ ktime_t offs_tai;
+ ktime_t offs_aux;
+ };
+ u32 coarse_nsec;
+ enum timekeeper_ids id;
+
+ /* Cacheline 2: */
+ struct tk_read_base tkr_raw;
+ u64 raw_sec;
+
+ /* Cachline 3 and 4 (timekeeping internal variables): */
unsigned int clock_was_set_seq;
u8 cs_was_changed_seq;
- ktime_t next_leap_ktime;
- u64 raw_sec;
- struct timespec64 monotonic_to_boot;
+ u8 clock_valid;
+
+ union {
+ struct timespec64 monotonic_to_boot;
+ struct timespec64 monotonic_to_aux;
+ };
- /* The following members are for timekeeping internal use */
u64 cycle_interval;
u64 xtime_interval;
s64 xtime_remainder;
u64 raw_interval;
- /* The ntp_tick_length() value currently being used.
- * This cached copy ensures we consistently apply the tick
- * length for an entire tick, as ntp_tick_length may change
- * mid-tick, and we don't want to apply that new value to
- * the tick in progress.
- */
+
+ ktime_t next_leap_ktime;
u64 ntp_tick;
- /* Difference between accumulated time and NTP time in ntp
- * shifted nano seconds. */
s64 ntp_error;
u32 ntp_error_shift;
u32 ntp_err_mult;
- /* Flag used to avoid updating NTP twice with same second */
u32 skip_second_overflow;
-#ifdef CONFIG_DEBUG_TIMEKEEPING
- long last_warning;
- /*
- * These simple flag variables are managed
- * without locks, which is racy, but they are
- * ok since we don't really care about being
- * super precise about how many events were
- * seen, just that a problem was observed.
- */
- int underflow_seen;
- int overflow_seen;
-#endif
+ s32 tai_offset;
};
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
@@ -153,4 +197,10 @@ static inline void update_vsyscall_tz(void)
}
#endif
+#if defined(CONFIG_GENERIC_GETTIMEOFDAY) && defined(CONFIG_POSIX_AUX_CLOCKS)
+extern void vdso_time_update_aux(struct timekeeper *tk);
+#else
+static inline void vdso_time_update_aux(struct timekeeper *tk) { }
+#endif
+
#endif /* _LINUX_TIMEKEEPER_INTERNAL_H */
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 0ea7823b7f31..aee2c1a46e47 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -44,6 +44,12 @@ extern void ktime_get_ts64(struct timespec64 *ts);
extern void ktime_get_real_ts64(struct timespec64 *tv);
extern void ktime_get_coarse_ts64(struct timespec64 *ts);
extern void ktime_get_coarse_real_ts64(struct timespec64 *ts);
+extern void ktime_get_clock_ts64(clockid_t id, struct timespec64 *ts);
+
+/* Multigrain timestamp interfaces */
+extern void ktime_get_coarse_real_ts64_mg(struct timespec64 *ts);
+extern void ktime_get_real_ts64_mg(struct timespec64 *ts);
+extern unsigned long timekeeping_get_mg_floor_swaps(void);
void getboottime64(struct timespec64 *ts);
@@ -258,23 +264,23 @@ extern bool timekeeping_rtc_skipresume(void);
extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
-/**
- * struct ktime_timestamps - Simultaneous mono/boot/real timestamps
- * @mono: Monotonic timestamp
- * @boot: Boottime timestamp
- * @real: Realtime timestamp
+/*
+ * Auxiliary clock interfaces
*/
-struct ktime_timestamps {
- u64 mono;
- u64 boot;
- u64 real;
-};
+#ifdef CONFIG_POSIX_AUX_CLOCKS
+extern bool ktime_get_aux(clockid_t id, ktime_t *kt);
+extern bool ktime_get_aux_ts64(clockid_t id, struct timespec64 *kt);
+#else
+static inline bool ktime_get_aux(clockid_t id, ktime_t *kt) { return false; }
+static inline bool ktime_get_aux_ts64(clockid_t id, struct timespec64 *kt) { return false; }
+#endif
/**
* struct system_time_snapshot - simultaneous raw/real time capture with
* counter value
* @cycles: Clocksource counter value to produce the system times
* @real: Realtime system time
+ * @boot: Boot time
* @raw: Monotonic raw system time
* @cs_id: Clocksource ID
* @clock_was_set_seq: The sequence number of clock-was-set events
@@ -283,6 +289,7 @@ struct ktime_timestamps {
struct system_time_snapshot {
u64 cycles;
ktime_t real;
+ ktime_t boot;
ktime_t raw;
enum clocksource_ids cs_id;
unsigned int clock_was_set_seq;
@@ -310,12 +317,18 @@ struct system_device_crosststamp {
* timekeeping code to verify comparability of two cycle values.
* The default ID, CSID_GENERIC, does not identify a specific
* clocksource.
+ * @use_nsecs: @cycles is in nanoseconds.
*/
struct system_counterval_t {
u64 cycles;
enum clocksource_ids cs_id;
+ bool use_nsecs;
};
+extern bool ktime_real_to_base_clock(ktime_t treal,
+ enum clocksource_ids base_id, u64 *cycles);
+extern bool timekeeping_clocksource_has_base(enum clocksource_ids id);
+
/*
* Get cross timestamp between system clock and device clock
*/
@@ -332,9 +345,6 @@ extern int get_device_system_crosststamp(
*/
extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
-/* NMI safe mono/boot/realtime timestamps */
-extern void ktime_get_fast_timestamps(struct ktime_timestamps *snap);
-
/*
* Persistent clock related interfaces
*/
diff --git a/include/linux/timer.h b/include/linux/timer.h
index e67ecd1cbc97..62e1cea71125 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -30,7 +30,7 @@
*
* @TIMER_IRQSAFE: An irqsafe timer is executed with IRQ disabled and
* it's safe to wait for the completion of the running instance from
- * IRQ handlers, for example, by calling del_timer_sync().
+ * IRQ handlers, for example, by calling timer_delete_sync().
*
* Note: The irq disabled callback execution is a special case for
* workqueue locking issues. It's not meant for executing random crap
@@ -67,44 +67,44 @@
/*
* LOCKDEP and DEBUG timer interfaces.
*/
-void init_timer_key(struct timer_list *timer,
+void timer_init_key(struct timer_list *timer,
void (*func)(struct timer_list *), unsigned int flags,
const char *name, struct lock_class_key *key);
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
-extern void init_timer_on_stack_key(struct timer_list *timer,
+extern void timer_init_key_on_stack(struct timer_list *timer,
void (*func)(struct timer_list *),
unsigned int flags, const char *name,
struct lock_class_key *key);
#else
-static inline void init_timer_on_stack_key(struct timer_list *timer,
+static inline void timer_init_key_on_stack(struct timer_list *timer,
void (*func)(struct timer_list *),
unsigned int flags,
const char *name,
struct lock_class_key *key)
{
- init_timer_key(timer, func, flags, name, key);
+ timer_init_key(timer, func, flags, name, key);
}
#endif
#ifdef CONFIG_LOCKDEP
-#define __init_timer(_timer, _fn, _flags) \
+#define __timer_init(_timer, _fn, _flags) \
do { \
static struct lock_class_key __key; \
- init_timer_key((_timer), (_fn), (_flags), #_timer, &__key);\
+ timer_init_key((_timer), (_fn), (_flags), #_timer, &__key);\
} while (0)
-#define __init_timer_on_stack(_timer, _fn, _flags) \
+#define __timer_init_on_stack(_timer, _fn, _flags) \
do { \
static struct lock_class_key __key; \
- init_timer_on_stack_key((_timer), (_fn), (_flags), \
+ timer_init_key_on_stack((_timer), (_fn), (_flags), \
#_timer, &__key); \
} while (0)
#else
-#define __init_timer(_timer, _fn, _flags) \
- init_timer_key((_timer), (_fn), (_flags), NULL, NULL)
-#define __init_timer_on_stack(_timer, _fn, _flags) \
- init_timer_on_stack_key((_timer), (_fn), (_flags), NULL, NULL)
+#define __timer_init(_timer, _fn, _flags) \
+ timer_init_key((_timer), (_fn), (_flags), NULL, NULL)
+#define __timer_init_on_stack(_timer, _fn, _flags) \
+ timer_init_key_on_stack((_timer), (_fn), (_flags), NULL, NULL)
#endif
/**
@@ -115,21 +115,21 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
*
* Regular timer initialization should use either DEFINE_TIMER() above,
* or timer_setup(). For timers on the stack, timer_setup_on_stack() must
- * be used and must be balanced with a call to destroy_timer_on_stack().
+ * be used and must be balanced with a call to timer_destroy_on_stack().
*/
#define timer_setup(timer, callback, flags) \
- __init_timer((timer), (callback), (flags))
+ __timer_init((timer), (callback), (flags))
#define timer_setup_on_stack(timer, callback, flags) \
- __init_timer_on_stack((timer), (callback), (flags))
+ __timer_init_on_stack((timer), (callback), (flags))
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
-extern void destroy_timer_on_stack(struct timer_list *timer);
+extern void timer_destroy_on_stack(struct timer_list *timer);
#else
-static inline void destroy_timer_on_stack(struct timer_list *timer) { }
+static inline void timer_destroy_on_stack(struct timer_list *timer) { }
#endif
-#define from_timer(var, callback_timer, timer_fieldname) \
+#define timer_container_of(var, callback_timer, timer_fieldname) \
container_of(callback_timer, typeof(*var), timer_fieldname)
/**
@@ -156,62 +156,26 @@ extern int timer_reduce(struct timer_list *timer, unsigned long expires);
* The jiffies value which is added to now, when there is no timer
* in the timer wheel:
*/
-#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
+#define TIMER_NEXT_MAX_DELTA ((1UL << 30) - 1)
extern void add_timer(struct timer_list *timer);
extern void add_timer_local(struct timer_list *timer);
extern void add_timer_global(struct timer_list *timer);
-extern int try_to_del_timer_sync(struct timer_list *timer);
+extern int timer_delete_sync_try(struct timer_list *timer);
extern int timer_delete_sync(struct timer_list *timer);
extern int timer_delete(struct timer_list *timer);
extern int timer_shutdown_sync(struct timer_list *timer);
extern int timer_shutdown(struct timer_list *timer);
-/**
- * del_timer_sync - Delete a pending timer and wait for a running callback
- * @timer: The timer to be deleted
- *
- * See timer_delete_sync() for detailed explanation.
- *
- * Do not use in new code. Use timer_delete_sync() instead.
- *
- * Returns:
- * * %0 - The timer was not pending
- * * %1 - The timer was pending and deactivated
- */
-static inline int del_timer_sync(struct timer_list *timer)
-{
- return timer_delete_sync(timer);
-}
-
-/**
- * del_timer - Delete a pending timer
- * @timer: The timer to be deleted
- *
- * See timer_delete() for detailed explanation.
- *
- * Do not use in new code. Use timer_delete() instead.
- *
- * Returns:
- * * %0 - The timer was not pending
- * * %1 - The timer was pending and deactivated
- */
-static inline int del_timer(struct timer_list *timer)
-{
- return timer_delete(timer);
-}
-
-extern void init_timers(void);
+extern void timers_init(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
-unsigned long __round_jiffies(unsigned long j, int cpu);
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
unsigned long round_jiffies(unsigned long j);
unsigned long round_jiffies_relative(unsigned long j);
-unsigned long __round_jiffies_up(unsigned long j, int cpu);
unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
unsigned long round_jiffies_up(unsigned long j);
unsigned long round_jiffies_up_relative(unsigned long j);
@@ -224,4 +188,13 @@ int timers_dead_cpu(unsigned int cpu);
#define timers_dead_cpu NULL
#endif
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+extern int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask);
+#else
+static inline int tmigr_isolated_exclude_cpumask(struct cpumask *exclude_cpumask)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 3871b06bd302..4ee32eff3f22 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -139,14 +139,6 @@ unsigned long random_get_entropy_fallback(void);
#define MAXSEC 2048 /* max interval between updates (s) */
#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
-/*
- * kernel variables
- * Note: maximum error = NTP sync distance = dispersion + delay / 2;
- * estimated error = NTP dispersion.
- */
-extern unsigned long tick_usec; /* USER_HZ period (usec) */
-extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */
-
/* Required to safely shift negative values */
#define shift_right(x, s) ({ \
__typeof__(x) __x = (x); \
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
index 3c13240077b8..c52b862dad45 100644
--- a/include/linux/tnum.h
+++ b/include/linux/tnum.h
@@ -40,6 +40,8 @@ struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness);
struct tnum tnum_add(struct tnum a, struct tnum b);
/* Subtract two tnums, return @a - @b */
struct tnum tnum_sub(struct tnum a, struct tnum b);
+/* Neg of a tnum, return 0 - @a */
+struct tnum tnum_neg(struct tnum a);
/* Bitwise-AND, return @a & @b */
struct tnum tnum_and(struct tnum a, struct tnum b);
/* Bitwise-OR, return @a | @b */
@@ -49,9 +51,15 @@ struct tnum tnum_xor(struct tnum a, struct tnum b);
/* Multiply two tnums, return @a * @b */
struct tnum tnum_mul(struct tnum a, struct tnum b);
+/* Return true if the known bits of both tnums have the same value */
+bool tnum_overlap(struct tnum a, struct tnum b);
+
/* Return a tnum representing numbers satisfying both @a and @b */
struct tnum tnum_intersect(struct tnum a, struct tnum b);
+/* Returns a tnum representing numbers satisfying either @a or @b */
+struct tnum tnum_union(struct tnum t1, struct tnum t2);
+
/* Return @a with all but the lowest @size bytes cleared */
struct tnum tnum_cast(struct tnum a, u8 size);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 52f5850730b3..6575af39fd10 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -29,6 +29,7 @@
#include <linux/arch_topology.h>
#include <linux/cpumask.h>
+#include <linux/nodemask.h>
#include <linux/bitops.h>
#include <linux/mmzone.h>
#include <linux/smp.h>
@@ -39,10 +40,6 @@
#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
#endif
-#define for_each_node_with_cpus(node) \
- for_each_online_node(node) \
- if (nr_cpus_node(node))
-
int arch_update_cpu_topology(void);
/* Conform to ACPI 2.0 SLIT distance definitions */
@@ -240,7 +237,30 @@ static inline const struct cpumask *cpu_smt_mask(int cpu)
}
#endif
-static inline const struct cpumask *cpu_cpu_mask(int cpu)
+#ifndef topology_is_primary_thread
+
+static inline bool topology_is_primary_thread(unsigned int cpu)
+{
+ /*
+ * When disabling SMT, the primary thread of the SMT will remain
+ * enabled/active. Architectures that have a special primary thread
+ * (e.g. x86) need to override this function. Otherwise the first
+ * thread in the SMT can be made the primary thread.
+ *
+ * The sibling cpumask of an offline CPU always contains the CPU
+ * itself on architectures using the implementation of
+ * CONFIG_GENERIC_ARCH_TOPOLOGY for building their topology.
+ * Other architectures not using CONFIG_GENERIC_ARCH_TOPOLOGY for
+ * building their topology have to check whether to use this default
+ * implementation or to override it.
+ */
+ return cpu == cpumask_first(topology_sibling_cpumask(cpu));
+}
+#define topology_is_primary_thread topology_is_primary_thread
+
+#endif
+
+static inline const struct cpumask *cpu_node_mask(int cpu)
{
return cpumask_of_node(cpu_to_node(cpu));
}
@@ -262,6 +282,36 @@ sched_numa_hop_mask(unsigned int node, unsigned int hops)
#endif /* CONFIG_NUMA */
/**
+ * for_each_node_numadist() - iterate over nodes in increasing distance
+ * order, starting from a given node
+ * @node: the iteration variable and the starting node.
+ * @unvisited: a nodemask to keep track of the unvisited nodes.
+ *
+ * This macro iterates over NUMA node IDs in increasing distance from the
+ * starting @node and yields MAX_NUMNODES when all the nodes have been
+ * visited.
+ *
+ * Note that by the time the loop completes, the @unvisited nodemask will
+ * be fully cleared, unless the loop exits early.
+ *
+ * The difference between for_each_node() and for_each_node_numadist() is
+ * that the former allows to iterate over nodes in numerical order, whereas
+ * the latter iterates over nodes in increasing order of distance.
+ *
+ * This complexity of this iterator is O(N^2), where N represents the
+ * number of nodes, as each iteration involves scanning all nodes to
+ * find the one with the shortest distance.
+ *
+ * Requires rcu_lock to be held.
+ */
+#define for_each_node_numadist(node, unvisited) \
+ for (int __start = (node), \
+ (node) = nearest_node_nodemask((__start), &(unvisited)); \
+ (node) < MAX_NUMNODES; \
+ node_clear((node), (unvisited)), \
+ (node) = nearest_node_nodemask((__start), &(unvisited)))
+
+/**
* for_each_numa_hop_mask - iterate over cpumasks of increasing NUMA distance
* from a given node.
* @mask: the iteration variable.
@@ -279,4 +329,13 @@ sched_numa_hop_mask(unsigned int node, unsigned int hops)
!IS_ERR_OR_NULL(mask); \
__hops++)
+DECLARE_PER_CPU(unsigned long, cpu_scale);
+
+static inline unsigned long topology_get_cpu_scale(int cpu)
+{
+ return per_cpu(cpu_scale, cpu);
+}
+
+void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
+
#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 1541454da03e..1b59056c3b18 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -14,7 +14,7 @@
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
-#include <linux/cpumask.h>
+#include <linux/cpumask_types.h>
#include <linux/seqlock.h>
#include <linux/lockdep.h>
#include <linux/completion.h>
@@ -104,6 +104,7 @@ int torture_stutter_init(int s, int sgap);
/* Initialization and cleanup. */
bool torture_init_begin(char *ttype, int v);
void torture_init_end(void);
+unsigned long get_torture_init_jiffies(void);
bool torture_cleanup_begin(void);
void torture_cleanup_end(void);
bool torture_must_stop(void);
@@ -130,7 +131,7 @@ void _torture_stop_kthread(char *m, struct task_struct **tp);
#endif
#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) || IS_ENABLED(CONFIG_LOCK_TORTURE_TEST) || IS_MODULE(CONFIG_LOCK_TORTURE_TEST)
-long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
+long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask, bool dowarn);
#endif
#endif /* __LINUX_TORTURE_H */
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index c17e4efbb2e5..202da079d500 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -26,7 +26,9 @@
#include <crypto/aes.h>
#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */
-#define TPM_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
+
+#define TPM2_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
+#define TPM2_MAX_PCR_BANKS 8
struct tpm_chip;
struct trusted_key_payload;
@@ -68,7 +70,7 @@ enum tpm2_curves {
struct tpm_digest {
u16 alg_id;
- u8 digest[TPM_MAX_DIGEST_SIZE];
+ u8 digest[TPM2_MAX_DIGEST_SIZE];
} __packed;
struct tpm_bank_info {
@@ -87,7 +89,8 @@ struct tpm_class_ops {
const u8 req_complete_val;
bool (*req_canceled)(struct tpm_chip *chip, u8 status);
int (*recv) (struct tpm_chip *chip, u8 *buf, size_t len);
- int (*send) (struct tpm_chip *chip, u8 *buf, size_t len);
+ int (*send)(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t cmd_len);
void (*cancel) (struct tpm_chip *chip);
u8 (*status) (struct tpm_chip *chip);
void (*update_timeouts)(struct tpm_chip *chip,
@@ -182,13 +185,13 @@ struct tpm_chip {
unsigned long duration[TPM_NUM_DURATIONS]; /* jiffies */
bool duration_adjusted;
- struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES];
+ struct dentry *bios_dir;
const struct attribute_group *groups[3 + TPM_MAX_HASHES];
unsigned int groups_cnt;
u32 nr_allocated_banks;
- struct tpm_bank_info *allocated_banks;
+ struct tpm_bank_info allocated_banks[TPM2_MAX_PCR_BANKS];
#ifdef CONFIG_ACPI
acpi_handle acpi_dev_handle;
char ppi_version[TPM_PPI_VERSION_LEN + 1];
@@ -224,13 +227,14 @@ enum tpm2_const {
enum tpm2_timeouts {
TPM2_TIMEOUT_A = 750,
- TPM2_TIMEOUT_B = 2000,
+ TPM2_TIMEOUT_B = 4000,
TPM2_TIMEOUT_C = 200,
TPM2_TIMEOUT_D = 30,
+};
+
+enum tpm2_durations {
TPM2_DURATION_SHORT = 20,
- TPM2_DURATION_MEDIUM = 750,
TPM2_DURATION_LONG = 2000,
- TPM2_DURATION_LONG_LONG = 300000,
TPM2_DURATION_DEFAULT = 120000,
};
@@ -257,6 +261,7 @@ enum tpm2_return_codes {
TPM2_RC_TESTING = 0x090A, /* RC_WARN */
TPM2_RC_REFERENCE_H0 = 0x0910,
TPM2_RC_RETRY = 0x0922,
+ TPM2_RC_SESSION_MEMORY = 0x0903,
};
enum tpm2_command_codes {
@@ -335,6 +340,7 @@ enum tpm2_cc_attrs {
#define TPM_VID_WINBOND 0x1050
#define TPM_VID_STM 0x104A
#define TPM_VID_ATML 0x1114
+#define TPM_VID_IFX 0x15D1
enum tpm_chip_flags {
TPM_CHIP_FLAG_BOOTSTRAPPED = BIT(0),
@@ -348,6 +354,7 @@ enum tpm_chip_flags {
TPM_CHIP_FLAG_SUSPENDED = BIT(8),
TPM_CHIP_FLAG_HWRNG_DISABLED = BIT(9),
TPM_CHIP_FLAG_DISABLE = BIT(10),
+ TPM_CHIP_FLAG_SYNC = BIT(11),
};
#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
@@ -394,21 +401,6 @@ enum tpm2_object_attributes {
TPM2_OA_SIGN = BIT(18),
};
-/*
- * definitions for the canonical template. These are mandated
- * by the TCG key template documents
- */
-
-#define AES_KEY_BYTES AES_KEYSIZE_128
-#define AES_KEY_BITS (AES_KEY_BYTES*8)
-#define TPM2_OA_TMPL (TPM2_OA_NO_DA | \
- TPM2_OA_FIXED_TPM | \
- TPM2_OA_FIXED_PARENT | \
- TPM2_OA_SENSITIVE_DATA_ORIGIN | \
- TPM2_OA_USER_WITH_AUTH | \
- TPM2_OA_DECRYPT | \
- TPM2_OA_RESTRICTED)
-
enum tpm2_session_attributes {
TPM2_SA_CONTINUE_SESSION = BIT(0),
TPM2_SA_AUDIT_EXCLUSIVE = BIT(1),
@@ -436,8 +428,7 @@ void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value);
u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset);
u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset);
u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset);
-
-u8 *tpm_buf_parameters(struct tpm_buf *buf);
+void tpm_buf_append_handle(struct tpm_chip *chip, struct tpm_buf *buf, u32 handle);
/*
* Check if TPM device is in the firmware upgrade mode.
@@ -452,6 +443,26 @@ static inline u32 tpm2_rc_value(u32 rc)
return (rc & BIT(7)) ? rc & 0xbf : rc;
}
+/*
+ * Convert a return value from tpm_transmit_cmd() to POSIX error code.
+ */
+static inline ssize_t tpm_ret_to_err(ssize_t ret)
+{
+ if (ret < 0)
+ return ret;
+
+ switch (tpm2_rc_value(ret)) {
+ case TPM2_RC_SUCCESS:
+ return 0;
+ case TPM2_RC_SESSION_MEMORY:
+ return -ENOMEM;
+ case TPM2_RC_HASH:
+ return -EINVAL;
+ default:
+ return -EPERM;
+ }
+}
+
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
extern int tpm_is_tpm2(struct tpm_chip *chip);
@@ -466,6 +477,7 @@ extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max);
extern struct tpm_chip *tpm_default_chip(void);
void tpm2_flush_context(struct tpm_chip *chip, u32 handle);
+int tpm2_find_hash_alg(unsigned int crypto_id);
static inline void tpm_buf_append_empty_auth(struct tpm_buf *buf, u32 handle)
{
@@ -507,29 +519,33 @@ static inline void tpm_buf_append_empty_auth(struct tpm_buf *buf, u32 handle)
{
}
#endif
+
+static inline struct tpm2_auth *tpm2_chip_auth(struct tpm_chip *chip)
+{
#ifdef CONFIG_TCG_TPM2_HMAC
+ return chip->auth;
+#else
+ return NULL;
+#endif
+}
-int tpm2_start_auth_session(struct tpm_chip *chip);
-void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
- u32 handle, u8 *name);
+int tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
+ u32 handle, u8 *name);
void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
u8 attributes, u8 *passphrase,
int passphraselen);
-static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip,
- struct tpm_buf *buf,
- u8 attributes,
- u8 *passphrase,
- int passphraselen)
-{
- tpm_buf_append_hmac_session(chip, buf, attributes, passphrase,
- passphraselen);
-}
-void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf);
+void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf,
+ u8 *passphrase, int passphraselen);
+
+#ifdef CONFIG_TCG_TPM2_HMAC
+
+int tpm2_start_auth_session(struct tpm_chip *chip);
+int tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf);
int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
int rc);
void tpm2_end_auth_session(struct tpm_chip *chip);
#else
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static inline int tpm2_start_auth_session(struct tpm_chip *chip)
{
@@ -538,60 +554,13 @@ static inline int tpm2_start_auth_session(struct tpm_chip *chip)
static inline void tpm2_end_auth_session(struct tpm_chip *chip)
{
}
-static inline void tpm_buf_append_name(struct tpm_chip *chip,
- struct tpm_buf *buf,
- u32 handle, u8 *name)
-{
- tpm_buf_append_u32(buf, handle);
- /* count the number of handles in the upper bits of flags */
- buf->handles++;
-}
-static inline void tpm_buf_append_hmac_session(struct tpm_chip *chip,
- struct tpm_buf *buf,
- u8 attributes, u8 *passphrase,
- int passphraselen)
-{
- /* offset tells us where the sessions area begins */
- int offset = buf->handles * 4 + TPM_HEADER_SIZE;
- u32 len = 9 + passphraselen;
-
- if (tpm_buf_length(buf) != offset) {
- /* not the first session so update the existing length */
- len += get_unaligned_be32(&buf->data[offset]);
- put_unaligned_be32(len, &buf->data[offset]);
- } else {
- tpm_buf_append_u32(buf, len);
- }
- /* auth handle */
- tpm_buf_append_u32(buf, TPM2_RS_PW);
- /* nonce */
- tpm_buf_append_u16(buf, 0);
- /* attributes */
- tpm_buf_append_u8(buf, 0);
- /* passphrase */
- tpm_buf_append_u16(buf, passphraselen);
- tpm_buf_append(buf, passphrase, passphraselen);
-}
-static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip,
- struct tpm_buf *buf,
- u8 attributes,
- u8 *passphrase,
- int passphraselen)
-{
- int offset = buf->handles * 4 + TPM_HEADER_SIZE;
- struct tpm_header *head = (struct tpm_header *) buf->data;
- /*
- * if the only sessions are optional, the command tag
- * must change to TPM2_ST_NO_SESSIONS
- */
- if (tpm_buf_length(buf) == offset)
- head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
-}
-static inline void tpm_buf_fill_hmac_session(struct tpm_chip *chip,
- struct tpm_buf *buf)
+static inline int tpm_buf_fill_hmac_session(struct tpm_chip *chip,
+ struct tpm_buf *buf)
{
+ return 0;
}
+
static inline int tpm_buf_check_hmac_response(struct tpm_chip *chip,
struct tpm_buf *buf,
int rc)
diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h
index 7d68a5cc5881..891368e82558 100644
--- a/include/linux/tpm_eventlog.h
+++ b/include/linux/tpm_eventlog.h
@@ -157,7 +157,7 @@ struct tcg_algorithm_info {
* Return: size of the event on success, 0 on failure
*/
-static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
+static __always_inline u32 __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
struct tcg_pcr_event *event_header,
bool do_mapping)
{
diff --git a/include/linux/tpm_svsm.h b/include/linux/tpm_svsm.h
new file mode 100644
index 000000000000..38e341f9761a
--- /dev/null
+++ b/include/linux/tpm_svsm.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 James.Bottomley@HansenPartnership.com
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ *
+ * Helpers for the SVSM_VTPM_CMD calls used by the vTPM protocol defined by the
+ * AMD SVSM spec [1].
+ *
+ * The vTPM protocol follows the Official TPM 2.0 Reference Implementation
+ * (originally by Microsoft, now part of the TCG) simulator protocol.
+ *
+ * [1] "Secure VM Service Module for SEV-SNP Guests"
+ * Publication # 58019 Revision: 1.00
+ */
+#ifndef _TPM_SVSM_H_
+#define _TPM_SVSM_H_
+
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define SVSM_VTPM_MAX_BUFFER 4096 /* max req/resp buffer size */
+
+/**
+ * struct svsm_vtpm_request - Generic request for single word command
+ * @cmd: The command to send
+ *
+ * Defined by AMD SVSM spec [1] in section "8.2 SVSM_VTPM_CMD Call" -
+ * Table 15: vTPM Common Request/Response Structure
+ * Byte Size     In/Out    Description
+ * Offset    (Bytes)
+ * 0x000     4          In        Platform command
+ *                         Out       Platform command response size
+ */
+struct svsm_vtpm_request {
+ u32 cmd;
+};
+
+/**
+ * struct svsm_vtpm_response - Generic response
+ * @size: The response size (zero if nothing follows)
+ *
+ * Defined by AMD SVSM spec [1] in section "8.2 SVSM_VTPM_CMD Call" -
+ * Table 15: vTPM Common Request/Response Structure
+ * Byte Size     In/Out    Description
+ * Offset    (Bytes)
+ * 0x000     4          In        Platform command
+ *                         Out       Platform command response size
+ *
+ * Note: most TCG Simulator commands simply return zero here with no indication
+ * of success or failure.
+ */
+struct svsm_vtpm_response {
+ u32 size;
+};
+
+/**
+ * struct svsm_vtpm_cmd_request - Structure for a TPM_SEND_COMMAND request
+ * @cmd: The command to send (must be TPM_SEND_COMMAND)
+ * @locality: The locality
+ * @buf_size: The size of the input buffer following
+ * @buf: A buffer of size buf_size
+ *
+ * Defined by AMD SVSM spec [1] in section "8.2 SVSM_VTPM_CMD Call" -
+ * Table 16: TPM_SEND_COMMAND Request Structure
+ * Byte Size Meaning
+ * Offset    (Bytes)
+ * 0x000     4          Platform command (8)
+ * 0x004     1          Locality (must-be-0)
+ * 0x005     4          TPM Command size (in bytes)
+ * 0x009     Variable   TPM Command
+ *
+ * Note: the TCG Simulator expects @buf_size to be equal to the size of the
+ * specific TPM command, otherwise an TPM_RC_COMMAND_SIZE error is returned.
+ */
+struct svsm_vtpm_cmd_request {
+ u32 cmd;
+ u8 locality;
+ u32 buf_size;
+ u8 buf[];
+} __packed;
+
+/**
+ * struct svsm_vtpm_cmd_response - Structure for a TPM_SEND_COMMAND response
+ * @buf_size: The size of the output buffer following
+ * @buf: A buffer of size buf_size
+ *
+ * Defined by AMD SVSM spec [1] in section "8.2 SVSM_VTPM_CMD Call" -
+ * Table 17: TPM_SEND_COMMAND Response Structure
+ * Byte Size Meaning
+ * Offset    (Bytes)
+ * 0x000     4          Response size (in bytes)
+ * 0x004     Variable   Response
+ */
+struct svsm_vtpm_cmd_response {
+ u32 buf_size;
+ u8 buf[];
+};
+
+/**
+ * svsm_vtpm_cmd_request_fill() - Fill a TPM_SEND_COMMAND request to be sent to SVSM
+ * @req: The struct svsm_vtpm_cmd_request to fill
+ * @locality: The locality
+ * @buf: The buffer from where to copy the payload of the command
+ * @len: The size of the buffer
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static inline int
+svsm_vtpm_cmd_request_fill(struct svsm_vtpm_cmd_request *req, u8 locality,
+ const u8 *buf, size_t len)
+{
+ if (len > SVSM_VTPM_MAX_BUFFER - sizeof(*req))
+ return -EINVAL;
+
+ req->cmd = 8; /* TPM_SEND_COMMAND */
+ req->locality = locality;
+ req->buf_size = len;
+
+ memcpy(req->buf, buf, len);
+
+ return 0;
+}
+
+/**
+ * svsm_vtpm_cmd_response_parse() - Parse a TPM_SEND_COMMAND response received from SVSM
+ * @resp: The struct svsm_vtpm_cmd_response to parse
+ * @buf: The buffer where to copy the response
+ * @len: The size of the buffer
+ *
+ * Return: buffer size filled with the response on success, negative error
+ * code on failure.
+ */
+static inline int
+svsm_vtpm_cmd_response_parse(const struct svsm_vtpm_cmd_response *resp, u8 *buf,
+ size_t len)
+{
+ if (len < resp->buf_size)
+ return -E2BIG;
+
+ if (resp->buf_size > SVSM_VTPM_MAX_BUFFER - sizeof(*resp))
+ return -EINVAL; // Invalid response from the platform TPM
+
+ memcpy(buf, resp->buf, resp->buf_size);
+
+ return resp->buf_size;
+}
+
+#endif /* _TPM_SVSM_H_ */
diff --git a/include/linux/trace.h b/include/linux/trace.h
index fdcd76b7be83..7eaad857dee0 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -72,8 +72,8 @@ static inline int unregister_ftrace_export(struct trace_export *export)
static inline void trace_printk_init_buffers(void)
{
}
-static inline int trace_array_printk(struct trace_array *tr, unsigned long ip,
- const char *fmt, ...)
+static inline __printf(3, 4)
+int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...)
{
return 0;
}
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 9df3e2973626..3690221ba3d8 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -138,6 +138,7 @@ enum trace_iter_flags {
TRACE_FILE_LAT_FMT = 1,
TRACE_FILE_ANNOTATE = 2,
TRACE_FILE_TIME_IN_NS = 4,
+ TRACE_FILE_PAUSE = 8,
};
@@ -184,7 +185,7 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
- TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x02,
TRACE_FLAG_NEED_RESCHED = 0x04,
TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
@@ -193,7 +194,6 @@ enum trace_flag_type {
TRACE_FLAG_BH_OFF = 0x80,
};
-#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
{
unsigned int irq_status = irqs_disabled_flags(irqflags) ?
@@ -207,17 +207,6 @@ static inline unsigned int tracing_gen_ctx(void)
local_save_flags(irqflags);
return tracing_gen_ctx_flags(irqflags);
}
-#else
-
-static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
-{
- return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
-}
-static inline unsigned int tracing_gen_ctx(void)
-{
- return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
-}
-#endif
static inline unsigned int tracing_gen_ctx_dec(void)
{
@@ -285,7 +274,8 @@ struct trace_event_fields {
const char *name;
const int size;
const int align;
- const int is_signed;
+ const unsigned int is_signed:1;
+ unsigned int needs_test:1;
const int filter_type;
const int len;
};
@@ -326,7 +316,6 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
enum {
- TRACE_EVENT_FL_FILTERED_BIT,
TRACE_EVENT_FL_CAP_ANY_BIT,
TRACE_EVENT_FL_NO_SET_FILTER_BIT,
TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
@@ -337,11 +326,11 @@ enum {
TRACE_EVENT_FL_EPROBE_BIT,
TRACE_EVENT_FL_FPROBE_BIT,
TRACE_EVENT_FL_CUSTOM_BIT,
+ TRACE_EVENT_FL_TEST_STR_BIT,
};
/*
* Event flags:
- * FILTERED - The event has a filter attached
* CAP_ANY - Any user can enable for perf
* NO_SET_FILTER - Set when filter has error and is to be ignored
* IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
@@ -354,9 +343,9 @@ enum {
* CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint)
* This is set when the custom event has not been attached
* to a tracepoint yet, then it is cleared when it is.
+ * TEST_STR - The event has a "%s" that points to a string outside the event
*/
enum {
- TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
@@ -367,6 +356,7 @@ enum {
TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT),
TRACE_EVENT_FL_FPROBE = (1 << TRACE_EVENT_FL_FPROBE_BIT),
TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT),
+ TRACE_EVENT_FL_TEST_STR = (1 << TRACE_EVENT_FL_TEST_STR_BIT),
};
#define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
@@ -375,13 +365,12 @@ struct trace_event_call {
struct list_head list;
struct trace_event_class *class;
union {
- char *name;
+ const char *name;
/* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
struct tracepoint *tp;
};
struct trace_event event;
char *print_fmt;
- struct event_filter *filter;
/*
* Static events can disappear with modules,
* where as dynamic ones need their own ref count.
@@ -492,7 +481,6 @@ enum {
EVENT_FILE_FL_RECORDED_TGID_BIT,
EVENT_FILE_FL_FILTERED_BIT,
EVENT_FILE_FL_NO_SET_FILTER_BIT,
- EVENT_FILE_FL_SOFT_MODE_BIT,
EVENT_FILE_FL_SOFT_DISABLED_BIT,
EVENT_FILE_FL_TRIGGER_MODE_BIT,
EVENT_FILE_FL_TRIGGER_COND_BIT,
@@ -630,7 +618,6 @@ extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
* RECORDED_TGID - The tgids should be recorded at sched_switch
* FILTERED - The event has a filter attached
* NO_SET_FILTER - Set when filter has error and is to be ignored
- * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
* SOFT_DISABLED - When set, do not trace the event (even though its
* tracepoint may be enabled)
* TRIGGER_MODE - When set, invoke the triggers associated with the event
@@ -645,7 +632,6 @@ enum {
EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
- EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
@@ -680,11 +666,25 @@ struct trace_event_file {
* caching and such. Which is mostly OK ;-)
*/
unsigned long flags;
- atomic_t ref; /* ref count for opened files */
+ refcount_t ref; /* ref count for opened files */
atomic_t sm_ref; /* soft-mode reference counter */
atomic_t tm_ref; /* trigger-mode reference counter */
};
+#ifdef CONFIG_HIST_TRIGGERS
+extern struct irq_work hist_poll_work;
+extern wait_queue_head_t hist_poll_wq;
+
+static inline void hist_poll_wakeup(void)
+{
+ if (wq_has_sleeper(&hist_poll_wq))
+ irq_work_queue(&hist_poll_work);
+}
+
+#define hist_poll_wait(file, wait) \
+ poll_wait(file, &hist_poll_wq, wait)
+#endif
+
#define __TRACE_EVENT_FLAGS(name, value) \
static int __init trace_init_flags_##name(void) \
{ \
@@ -857,30 +857,11 @@ int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
int trace_set_clr_event(const char *system, const char *event, int set);
int trace_array_set_clr_event(struct trace_array *tr, const char *system,
const char *event, bool enable);
-/*
- * The double __builtin_constant_p is because gcc will give us an error
- * if we try to allocate the static variable to fmt if it is not a
- * constant. Even with the outer if statement optimizing out.
- */
-#define event_trace_printk(ip, fmt, args...) \
-do { \
- __trace_printk_check_format(fmt, ##args); \
- tracing_record_cmdline(current); \
- if (__builtin_constant_p(fmt)) { \
- static const char *trace_printk_fmt \
- __section("__trace_printk_fmt") = \
- __builtin_constant_p(fmt) ? fmt : NULL; \
- \
- __trace_bprintk(ip, trace_printk_fmt, ##args); \
- } else \
- __trace_printk(ip, fmt, ##args); \
-} while (0)
#ifdef CONFIG_PERF_EVENTS
struct perf_event;
DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
-DECLARE_PER_CPU(int, bpf_kprobe_override);
extern int perf_trace_init(struct perf_event *event);
extern void perf_trace_destroy(struct perf_event *event);
diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h
index 24ea8ac049b4..ae04054a1be3 100644
--- a/include/linux/trace_recursion.h
+++ b/include/linux/trace_recursion.h
@@ -44,35 +44,6 @@ enum {
*/
TRACE_IRQ_BIT,
- /* Set if the function is in the set_graph_function file */
- TRACE_GRAPH_BIT,
-
- /*
- * In the very unlikely case that an interrupt came in
- * at a start of graph tracing, and we want to trace
- * the function in that interrupt, the depth can be greater
- * than zero, because of the preempted start of a previous
- * trace. In an even more unlikely case, depth could be 2
- * if a softirq interrupted the start of graph tracing,
- * followed by an interrupt preempting a start of graph
- * tracing in the softirq, and depth can even be 3
- * if an NMI came in at the start of an interrupt function
- * that preempted a softirq start of a function that
- * preempted normal context!!!! Luckily, it can't be
- * greater than 3, so the next two bits are a mask
- * of what the depth is when we set TRACE_GRAPH_BIT
- */
-
- TRACE_GRAPH_DEPTH_START_BIT,
- TRACE_GRAPH_DEPTH_END_BIT,
-
- /*
- * To implement set_graph_notrace, if this bit is set, we ignore
- * function graph tracing of called functions, until the return
- * function is called to clear it.
- */
- TRACE_GRAPH_NOTRACE_BIT,
-
/* Used to prevent recursion recording from recursing. */
TRACE_RECORD_RECURSION_BIT,
};
@@ -81,16 +52,6 @@ enum {
#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
-#define trace_recursion_depth() \
- (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
-#define trace_recursion_set_depth(depth) \
- do { \
- current->trace_recursion &= \
- ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
- current->trace_recursion |= \
- ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
- } while (0)
-
#define TRACE_CONTEXT_BITS 4
#define TRACE_FTRACE_START TRACE_FTRACE_BIT
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 1ef95c0287f0..4a0b8c172d27 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -21,10 +21,10 @@
(sizeof(struct seq_buf) + sizeof(size_t) + sizeof(int)))
struct trace_seq {
- char buffer[TRACE_SEQ_BUFFER_SIZE];
struct seq_buf seq;
size_t readpos;
int full;
+ char buffer[TRACE_SEQ_BUFFER_SIZE];
};
static inline void
@@ -80,6 +80,19 @@ static inline bool trace_seq_has_overflowed(struct trace_seq *s)
return s->full || seq_buf_has_overflowed(&s->seq);
}
+/**
+ * trace_seq_pop - pop off the last written character
+ * @s: trace sequence descriptor
+ *
+ * Removes the last written character to the trace_seq @s.
+ *
+ * Returns the last character or -1 if it is empty.
+ */
+static inline int trace_seq_pop(struct trace_seq *s)
+{
+ return seq_buf_pop(&s->seq);
+}
+
/*
* Currently only defined when tracing is enabled.
*/
@@ -88,8 +101,8 @@ extern __printf(2, 3)
void trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
extern __printf(2, 0)
void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args);
-extern void
-trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
+extern __printf(2, 0)
+void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
int cnt);
@@ -113,8 +126,8 @@ static inline __printf(2, 3)
void trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{
}
-static inline void
-trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
+static inline __printf(2, 0)
+void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
{
}
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index 4dc4955f0fbf..aebf0571c736 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -29,16 +29,22 @@ struct tracepoint_func {
int prio;
};
+struct tracepoint_ext {
+ int (*regfunc)(void);
+ void (*unregfunc)(void);
+ /* Flags. */
+ unsigned int faultable:1;
+};
+
struct tracepoint {
const char *name; /* Tracepoint name */
- struct static_key key;
+ struct static_key_false key;
struct static_call_key *static_call_key;
void *static_call_tramp;
void *iterator;
void *probestub;
- int (*regfunc)(void);
- void (*unregfunc)(void);
struct tracepoint_func __rcu *funcs;
+ struct tracepoint_ext *ext;
};
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
@@ -83,7 +89,7 @@ struct bpf_raw_event_map {
#ifdef CONFIG_TRACEPOINTS
# define tracepoint_enabled(tp) \
- static_key_false(&(__tracepoint_##tp).key)
+ static_branch_unlikely(&(__tracepoint_##tp).key)
#else
# define tracepoint_enabled(tracepoint) false
#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 689b6d71590e..8a56f3278b1b 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -16,8 +16,8 @@
#include <linux/srcu.h>
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/cpumask.h>
#include <linux/rcupdate.h>
+#include <linux/rcupdate_trace.h>
#include <linux/tracepoint-defs.h>
#include <linux/static_call.h>
@@ -33,8 +33,6 @@ struct trace_eval_map {
#define TRACEPOINT_DEFAULT_PRIO 10
-extern struct srcu_struct tracepoint_srcu;
-
extern int
tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
extern int
@@ -65,6 +63,13 @@ struct tp_module {
bool trace_module_has_bad_taint(struct module *mod);
extern int register_tracepoint_module_notifier(struct notifier_block *nb);
extern int unregister_tracepoint_module_notifier(struct notifier_block *nb);
+void for_each_module_tracepoint(void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv);
+void for_each_tracepoint_in_module(struct module *,
+ void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv);
#else
static inline bool trace_module_has_bad_taint(struct module *mod)
{
@@ -80,22 +85,49 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb)
{
return 0;
}
+static inline
+void for_each_module_tracepoint(void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv)
+{
+}
+static inline
+void for_each_tracepoint_in_module(struct module *mod,
+ void (*fct)(struct tracepoint *,
+ struct module *, void *),
+ void *priv)
+{
+}
#endif /* CONFIG_MODULES */
/*
* tracepoint_synchronize_unregister must be called between the last tracepoint
* probe unregistration and the end of module exit to make sure there is no
* caller executing a probe when it is freed.
+ *
+ * An alternative is to use the following for batch reclaim associated
+ * with a given tracepoint:
+ *
+ * - tracepoint_is_faultable() == false: call_rcu()
+ * - tracepoint_is_faultable() == true: call_rcu_tasks_trace()
*/
#ifdef CONFIG_TRACEPOINTS
static inline void tracepoint_synchronize_unregister(void)
{
- synchronize_srcu(&tracepoint_srcu);
+ synchronize_rcu_tasks_trace();
synchronize_rcu();
}
+static inline bool tracepoint_is_faultable(struct tracepoint *tp)
+{
+ return tp->ext && tp->ext->faultable;
+}
#else
static inline void tracepoint_synchronize_unregister(void)
{ }
+static inline bool tracepoint_is_faultable(struct tracepoint *tp)
+{
+ return false;
+}
#endif
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
@@ -178,65 +210,25 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#endif /* CONFIG_HAVE_STATIC_CALL */
/*
- * ARCH_WANTS_NO_INSTR archs are expected to have sanitized entry and idle
- * code that disallow any/all tracing/instrumentation when RCU isn't watching.
+ * Declare an exported function that Rust code can call to trigger this
+ * tracepoint. This function does not include the static branch; that is done
+ * in Rust to avoid a function call when the tracepoint is disabled.
*/
-#ifdef CONFIG_ARCH_WANTS_NO_INSTR
-#define RCUIDLE_COND(rcuidle) (rcuidle)
-#else
-/* srcu can't be used from NMI */
-#define RCUIDLE_COND(rcuidle) (rcuidle && in_nmi())
-#endif
+#define DEFINE_RUST_DO_TRACE(name, proto, args)
+#define __DEFINE_RUST_DO_TRACE(name, proto, args) \
+ notrace void rust_do_trace_##name(proto) \
+ { \
+ __do_trace_##name(args); \
+ }
/*
- * it_func[0] is never NULL because there is at least one element in the array
- * when the array itself is non NULL.
+ * When a tracepoint is used, it's name is added to the __tracepoint_check
+ * section. This section is only used at build time to make sure all
+ * defined tracepoints are used. It is discarded after the build.
*/
-#define __DO_TRACE(name, args, cond, rcuidle) \
- do { \
- int __maybe_unused __idx = 0; \
- \
- if (!(cond)) \
- return; \
- \
- if (WARN_ONCE(RCUIDLE_COND(rcuidle), \
- "Bad RCU usage for tracepoint")) \
- return; \
- \
- /* keep srcu and sched-rcu usage consistent */ \
- preempt_disable_notrace(); \
- \
- /* \
- * For rcuidle callers, use srcu since sched-rcu \
- * doesn't work from the idle path. \
- */ \
- if (rcuidle) { \
- __idx = srcu_read_lock_notrace(&tracepoint_srcu);\
- ct_irq_enter_irqson(); \
- } \
- \
- __DO_TRACE_CALL(name, TP_ARGS(args)); \
- \
- if (rcuidle) { \
- ct_irq_exit_irqson(); \
- srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\
- } \
- \
- preempt_enable_notrace(); \
- } while (0)
-
-#ifndef MODULE
-#define __DECLARE_TRACE_RCU(name, proto, args, cond) \
- static inline void trace_##name##_rcuidle(proto) \
- { \
- if (static_key_false(&__tracepoint_##name.key)) \
- __DO_TRACE(name, \
- TP_ARGS(args), \
- TP_CONDITION(cond), 1); \
- }
-#else
-#define __DECLARE_TRACE_RCU(name, proto, args, cond)
-#endif
+# define TRACEPOINT_CHECK(name) \
+ static const char __used __section("__tracepoint_check") \
+ __trace_check_##name[] = #name;
/*
* Make sure the alignment of the structure in the __tracepoints section will
@@ -249,23 +241,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* site if it is not watching, as it will need to be active when the
* tracepoint is enabled.
*/
-#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
+#define __DECLARE_TRACE_COMMON(name, proto, args, data_proto) \
extern int __traceiter_##name(data_proto); \
DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \
extern struct tracepoint __tracepoint_##name; \
- static inline void trace_##name(proto) \
- { \
- if (static_key_false(&__tracepoint_##name.key)) \
- __DO_TRACE(name, \
- TP_ARGS(args), \
- TP_CONDITION(cond), 0); \
- if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
- WARN_ONCE(!rcu_is_watching(), \
- "RCU not watching for tracepoint"); \
- } \
- } \
- __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
- PARAMS(cond)) \
+ extern void rust_do_trace_##name(proto); \
static inline int \
register_trace_##name(void (*probe)(data_proto), void *data) \
{ \
@@ -292,15 +272,57 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
static inline bool \
trace_##name##_enabled(void) \
{ \
- return static_key_false(&__tracepoint_##name.key); \
+ return static_branch_unlikely(&__tracepoint_##name.key);\
+ }
+
+#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
+ __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \
+ static inline void __do_trace_##name(proto) \
+ { \
+ TRACEPOINT_CHECK(name) \
+ if (cond) { \
+ guard(preempt_notrace)(); \
+ __DO_TRACE_CALL(name, TP_ARGS(args)); \
+ } \
+ } \
+ static inline void trace_##name(proto) \
+ { \
+ if (static_branch_unlikely(&__tracepoint_##name.key)) \
+ __do_trace_##name(args); \
+ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
+ WARN_ONCE(!rcu_is_watching(), \
+ "RCU not watching for tracepoint"); \
+ } \
+ }
+
+#define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \
+ __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \
+ static inline void __do_trace_##name(proto) \
+ { \
+ TRACEPOINT_CHECK(name) \
+ guard(rcu_tasks_trace)(); \
+ __DO_TRACE_CALL(name, TP_ARGS(args)); \
+ } \
+ static inline void trace_##name(proto) \
+ { \
+ might_fault(); \
+ if (static_branch_unlikely(&__tracepoint_##name.key)) \
+ __do_trace_##name(args); \
+ if (IS_ENABLED(CONFIG_LOCKDEP)) { \
+ WARN_ONCE(!rcu_is_watching(), \
+ "RCU not watching for tracepoint"); \
+ } \
}
/*
* We have no guarantee that gcc and the linker won't up-align the tracepoint
* structures, so we create an array of pointers that will be used for iteration
* on the tracepoints.
+ *
+ * it_func[0] is never NULL because there is at least one element in the array
+ * when the array itself is non NULL.
*/
-#define DEFINE_TRACE_FN(_name, _reg, _unreg, proto, args) \
+#define __DEFINE_TRACE_EXT(_name, _ext, proto, args) \
static const char __tpstrtab_##_name[] \
__section("__tracepoints_strings") = #_name; \
extern struct static_call_key STATIC_CALL_KEY(tp_func_##_name); \
@@ -309,14 +331,14 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
struct tracepoint __tracepoint_##_name __used \
__section("__tracepoints") = { \
.name = __tpstrtab_##_name, \
- .key = STATIC_KEY_INIT_FALSE, \
+ .key = STATIC_KEY_FALSE_INIT, \
.static_call_key = &STATIC_CALL_KEY(tp_func_##_name), \
.static_call_tramp = STATIC_CALL_TRAMP_ADDR(tp_func_##_name), \
.iterator = &__traceiter_##_name, \
.probestub = &__probestub_##_name, \
- .regfunc = _reg, \
- .unregfunc = _unreg, \
- .funcs = NULL }; \
+ .funcs = NULL, \
+ .ext = _ext, \
+ }; \
__TRACEPOINT_ENTRY(_name); \
int __traceiter_##_name(void *__data, proto) \
{ \
@@ -337,27 +359,44 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
void __probestub_##_name(void *__data, proto) \
{ \
} \
- DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);
+ DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name); \
+ DEFINE_RUST_DO_TRACE(_name, TP_PROTO(proto), TP_ARGS(args))
+
+#define DEFINE_TRACE_FN(_name, _reg, _unreg, _proto, _args) \
+ static struct tracepoint_ext __tracepoint_ext_##_name = { \
+ .regfunc = _reg, \
+ .unregfunc = _unreg, \
+ .faultable = false, \
+ }; \
+ __DEFINE_TRACE_EXT(_name, &__tracepoint_ext_##_name, PARAMS(_proto), PARAMS(_args));
+
+#define DEFINE_TRACE_SYSCALL(_name, _reg, _unreg, _proto, _args) \
+ static struct tracepoint_ext __tracepoint_ext_##_name = { \
+ .regfunc = _reg, \
+ .unregfunc = _unreg, \
+ .faultable = true, \
+ }; \
+ __DEFINE_TRACE_EXT(_name, &__tracepoint_ext_##_name, PARAMS(_proto), PARAMS(_args));
-#define DEFINE_TRACE(name, proto, args) \
- DEFINE_TRACE_FN(name, NULL, NULL, PARAMS(proto), PARAMS(args));
+#define DEFINE_TRACE(_name, _proto, _args) \
+ __DEFINE_TRACE_EXT(_name, NULL, PARAMS(_proto), PARAMS(_args));
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \
+ TRACEPOINT_CHECK(name) \
EXPORT_SYMBOL_GPL(__tracepoint_##name); \
EXPORT_SYMBOL_GPL(__traceiter_##name); \
EXPORT_STATIC_CALL_GPL(tp_func_##name)
#define EXPORT_TRACEPOINT_SYMBOL(name) \
+ TRACEPOINT_CHECK(name) \
EXPORT_SYMBOL(__tracepoint_##name); \
EXPORT_SYMBOL(__traceiter_##name); \
EXPORT_STATIC_CALL(tp_func_##name)
#else /* !TRACEPOINTS_ENABLED */
-#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
+#define __DECLARE_TRACE_COMMON(name, proto, args, data_proto) \
static inline void trace_##name(proto) \
{ } \
- static inline void trace_##name##_rcuidle(proto) \
- { } \
static inline int \
register_trace_##name(void (*probe)(data_proto), \
void *data) \
@@ -379,7 +418,14 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
return false; \
}
+#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
+ __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto))
+
+#define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \
+ __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto))
+
#define DEFINE_TRACE_FN(name, reg, unreg, proto, args)
+#define DEFINE_TRACE_SYSCALL(name, reg, unreg, proto, args)
#define DEFINE_TRACE(name, proto, args)
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
#define EXPORT_TRACEPOINT_SYMBOL(name)
@@ -431,15 +477,33 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#endif
#define DECLARE_TRACE(name, proto, args) \
- __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
+ __DECLARE_TRACE(name##_tp, PARAMS(proto), PARAMS(args), \
cpu_online(raw_smp_processor_id()), \
PARAMS(void *__data, proto))
#define DECLARE_TRACE_CONDITION(name, proto, args, cond) \
+ __DECLARE_TRACE(name##_tp, PARAMS(proto), PARAMS(args), \
+ cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
+ PARAMS(void *__data, proto))
+
+#define DECLARE_TRACE_SYSCALL(name, proto, args) \
+ __DECLARE_TRACE_SYSCALL(name##_tp, PARAMS(proto), PARAMS(args), \
+ PARAMS(void *__data, proto))
+
+#define DECLARE_TRACE_EVENT(name, proto, args) \
+ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
+ cpu_online(raw_smp_processor_id()), \
+ PARAMS(void *__data, proto))
+
+#define DECLARE_TRACE_EVENT_CONDITION(name, proto, args, cond) \
__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
PARAMS(void *__data, proto))
+#define DECLARE_TRACE_EVENT_SYSCALL(name, proto, args) \
+ __DECLARE_TRACE_SYSCALL(name, PARAMS(proto), PARAMS(args), \
+ PARAMS(void *__data, proto))
+
#define TRACE_EVENT_FLAGS(event, flag)
#define TRACE_EVENT_PERF_PERM(event, expr...)
@@ -554,29 +618,32 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print)
#define DEFINE_EVENT(template, name, proto, args) \
- DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+ DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args))
#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg)\
- DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+ DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args))
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
- DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+ DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args))
#define DEFINE_EVENT_CONDITION(template, name, proto, \
args, cond) \
- DECLARE_TRACE_CONDITION(name, PARAMS(proto), \
+ DECLARE_TRACE_EVENT_CONDITION(name, PARAMS(proto), \
PARAMS(args), PARAMS(cond))
#define TRACE_EVENT(name, proto, args, struct, assign, print) \
- DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+ DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args))
#define TRACE_EVENT_FN(name, proto, args, struct, \
assign, print, reg, unreg) \
- DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
-#define TRACE_EVENT_FN_COND(name, proto, args, cond, struct, \
+ DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args))
+#define TRACE_EVENT_FN_COND(name, proto, args, cond, struct, \
assign, print, reg, unreg) \
- DECLARE_TRACE_CONDITION(name, PARAMS(proto), \
+ DECLARE_TRACE_EVENT_CONDITION(name, PARAMS(proto), \
PARAMS(args), PARAMS(cond))
#define TRACE_EVENT_CONDITION(name, proto, args, cond, \
struct, assign, print) \
- DECLARE_TRACE_CONDITION(name, PARAMS(proto), \
+ DECLARE_TRACE_EVENT_CONDITION(name, PARAMS(proto), \
PARAMS(args), PARAMS(cond))
+#define TRACE_EVENT_SYSCALL(name, proto, args, struct, assign, \
+ print, reg, unreg) \
+ DECLARE_TRACE_EVENT_SYSCALL(name, PARAMS(proto), PARAMS(args))
#define TRACE_EVENT_FLAGS(event, flag)
diff --git a/include/linux/tsm-mr.h b/include/linux/tsm-mr.h
new file mode 100644
index 000000000000..50a521f4ac97
--- /dev/null
+++ b/include/linux/tsm-mr.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __TSM_MR_H
+#define __TSM_MR_H
+
+#include <crypto/hash_info.h>
+
+/**
+ * struct tsm_measurement_register - describes an architectural measurement
+ * register (MR)
+ * @mr_name: name of the MR
+ * @mr_value: buffer containing the current value of the MR
+ * @mr_size: size of the MR - typically the digest size of @mr_hash
+ * @mr_flags: bitwise OR of one or more flags, detailed below
+ * @mr_hash: optional hash identifier defined in include/uapi/linux/hash_info.h.
+ *
+ * A CC guest driver encloses an array of this structure in struct
+ * tsm_measurements to detail the measurement facility supported by the
+ * underlying CC hardware.
+ *
+ * @mr_name and @mr_value must stay valid until this structure is no longer in
+ * use.
+ *
+ * @mr_flags is the bitwise-OR of zero or more of the flags below.
+ *
+ * * %TSM_MR_F_READABLE - the sysfs attribute corresponding to this MR is readable.
+ * * %TSM_MR_F_WRITABLE - the sysfs attribute corresponding to this MR is writable.
+ * The semantics is typically to extend the MR but could vary depending on the
+ * architecture and the MR.
+ * * %TSM_MR_F_LIVE - this MR's value may differ from the last value written, so
+ * must be read back from the underlying CC hardware/firmware.
+ * * %TSM_MR_F_RTMR - bitwise-OR of %TSM_MR_F_LIVE and %TSM_MR_F_WRITABLE.
+ * * %TSM_MR_F_NOHASH - this MR does NOT have an associated hash algorithm.
+ * @mr_hash will be ignored when this flag is set.
+ */
+struct tsm_measurement_register {
+ const char *mr_name;
+ void *mr_value;
+ u32 mr_size;
+ u32 mr_flags;
+ enum hash_algo mr_hash;
+};
+
+#define TSM_MR_F_NOHASH 1
+#define TSM_MR_F_WRITABLE 2
+#define TSM_MR_F_READABLE 4
+#define TSM_MR_F_LIVE 8
+#define TSM_MR_F_RTMR (TSM_MR_F_LIVE | TSM_MR_F_WRITABLE)
+
+#define TSM_MR_(mr, hash) \
+ .mr_name = #mr, .mr_size = hash##_DIGEST_SIZE, \
+ .mr_hash = HASH_ALGO_##hash, .mr_flags = TSM_MR_F_READABLE
+
+/**
+ * struct tsm_measurements - defines the CC architecture specific measurement
+ * facility and methods for updating measurement registers (MRs)
+ * @mrs: Array of MR definitions.
+ * @nr_mrs: Number of elements in @mrs.
+ * @refresh: Callback function to load/sync all MRs from TVM hardware/firmware
+ * into the kernel cache.
+ * @write: Callback function to write to the MR specified by the parameter @mr.
+ * Typically, writing to an MR extends the input buffer to that MR.
+ *
+ * The @refresh callback is invoked when an MR with %TSM_MR_F_LIVE set is being
+ * read and the cache is stale. It must reload all MRs with %TSM_MR_F_LIVE set.
+ * The function parameter @tm is a pointer pointing back to this structure.
+ *
+ * The @write callback is invoked whenever an MR is being written. It takes two
+ * additional parameters besides @tm:
+ *
+ * * @mr - points to the MR (an element of @tm->mrs) being written.
+ * * @data - contains the bytes to write and whose size is @mr->mr_size.
+ *
+ * Both @refresh and @write should return 0 on success and an appropriate error
+ * code on failure.
+ */
+struct tsm_measurements {
+ const struct tsm_measurement_register *mrs;
+ size_t nr_mrs;
+ int (*refresh)(const struct tsm_measurements *tm);
+ int (*write)(const struct tsm_measurements *tm,
+ const struct tsm_measurement_register *mr, const u8 *data);
+};
+
+const struct attribute_group *
+tsm_mr_create_attribute_group(const struct tsm_measurements *tm);
+void tsm_mr_free_attribute_group(const struct attribute_group *attr_grp);
+
+#endif
diff --git a/include/linux/tsm.h b/include/linux/tsm.h
index de8324a2223c..a3b7ab668eff 100644
--- a/include/linux/tsm.h
+++ b/include/linux/tsm.h
@@ -4,26 +4,34 @@
#include <linux/sizes.h>
#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/device.h>
-#define TSM_INBLOB_MAX 64
-#define TSM_OUTBLOB_MAX SZ_32K
+#define TSM_REPORT_INBLOB_MAX 64
+#define TSM_REPORT_OUTBLOB_MAX SZ_32K
/*
* Privilege level is a nested permission concept to allow confidential
* guests to partition address space, 4-levels are supported.
*/
-#define TSM_PRIVLEVEL_MAX 3
+#define TSM_REPORT_PRIVLEVEL_MAX 3
/**
- * struct tsm_desc - option descriptor for generating tsm report blobs
+ * struct tsm_report_desc - option descriptor for generating tsm report blobs
* @privlevel: optional privilege level to associate with @outblob
* @inblob_len: sizeof @inblob
* @inblob: arbitrary input data
+ * @service_provider: optional name of where to obtain the tsm report blob
+ * @service_guid: optional service-provider service guid to attest
+ * @service_manifest_version: optional service-provider service manifest version requested
*/
-struct tsm_desc {
+struct tsm_report_desc {
unsigned int privlevel;
size_t inblob_len;
- u8 inblob[TSM_INBLOB_MAX];
+ u8 inblob[TSM_REPORT_INBLOB_MAX];
+ char *service_provider;
+ guid_t service_guid;
+ unsigned int service_manifest_version;
};
/**
@@ -33,37 +41,89 @@ struct tsm_desc {
* @outblob: generated evidence to provider to the attestation agent
* @auxblob_len: sizeof(@auxblob)
* @auxblob: (optional) auxiliary data to the report (e.g. certificate data)
+ * @manifestblob_len: sizeof(@manifestblob)
+ * @manifestblob: (optional) manifest data associated with the report
*/
struct tsm_report {
- struct tsm_desc desc;
+ struct tsm_report_desc desc;
size_t outblob_len;
u8 *outblob;
size_t auxblob_len;
u8 *auxblob;
+ size_t manifestblob_len;
+ u8 *manifestblob;
};
/**
- * struct tsm_ops - attributes and operations for tsm instances
+ * enum tsm_attr_index - index used to reference report attributes
+ * @TSM_REPORT_GENERATION: index of the report generation number attribute
+ * @TSM_REPORT_PROVIDER: index of the provider name attribute
+ * @TSM_REPORT_PRIVLEVEL: index of the desired privilege level attribute
+ * @TSM_REPORT_PRIVLEVEL_FLOOR: index of the minimum allowed privileg level attribute
+ * @TSM_REPORT_SERVICE_PROVIDER: index of the service provider identifier attribute
+ * @TSM_REPORT_SERVICE_GUID: index of the service GUID attribute
+ * @TSM_REPORT_SERVICE_MANIFEST_VER: index of the service manifest version attribute
+ */
+enum tsm_attr_index {
+ TSM_REPORT_GENERATION,
+ TSM_REPORT_PROVIDER,
+ TSM_REPORT_PRIVLEVEL,
+ TSM_REPORT_PRIVLEVEL_FLOOR,
+ TSM_REPORT_SERVICE_PROVIDER,
+ TSM_REPORT_SERVICE_GUID,
+ TSM_REPORT_SERVICE_MANIFEST_VER,
+};
+
+/**
+ * enum tsm_bin_attr_index - index used to reference binary report attributes
+ * @TSM_REPORT_INBLOB: index of the binary report input attribute
+ * @TSM_REPORT_OUTBLOB: index of the binary report output attribute
+ * @TSM_REPORT_AUXBLOB: index of the binary auxiliary data attribute
+ * @TSM_REPORT_MANIFESTBLOB: index of the binary manifest data attribute
+ */
+enum tsm_bin_attr_index {
+ TSM_REPORT_INBLOB,
+ TSM_REPORT_OUTBLOB,
+ TSM_REPORT_AUXBLOB,
+ TSM_REPORT_MANIFESTBLOB,
+};
+
+/**
+ * struct tsm_report_ops - attributes and operations for tsm_report instances
* @name: tsm id reflected in /sys/kernel/config/tsm/report/$report/provider
* @privlevel_floor: convey base privlevel for nested scenarios
* @report_new: Populate @report with the report blob and auxblob
* (optional), return 0 on successful population, or -errno otherwise
+ * @report_attr_visible: show or hide a report attribute entry
+ * @report_bin_attr_visible: show or hide a report binary attribute entry
*
* Implementation specific ops, only one is expected to be registered at
* a time i.e. only one of "sev-guest", "tdx-guest", etc.
*/
-struct tsm_ops {
+struct tsm_report_ops {
const char *name;
- const unsigned int privlevel_floor;
+ unsigned int privlevel_floor;
int (*report_new)(struct tsm_report *report, void *data);
+ bool (*report_attr_visible)(int n);
+ bool (*report_bin_attr_visible)(int n);
};
-extern const struct config_item_type tsm_report_default_type;
+struct pci_tsm_ops;
+struct tsm_dev {
+ struct device dev;
+ int id;
+ const struct pci_tsm_ops *pci_ops;
+};
-/* publish @privlevel, @privlevel_floor, and @auxblob attributes */
-extern const struct config_item_type tsm_report_extra_type;
+DEFINE_FREE(put_tsm_dev, struct tsm_dev *,
+ if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
-int tsm_register(const struct tsm_ops *ops, void *priv,
- const struct config_item_type *type);
-int tsm_unregister(const struct tsm_ops *ops);
+int tsm_report_register(const struct tsm_report_ops *ops, void *priv);
+int tsm_report_unregister(const struct tsm_report_ops *ops);
+struct tsm_dev *tsm_register(struct device *parent, struct pci_tsm_ops *ops);
+void tsm_unregister(struct tsm_dev *tsm_dev);
+struct tsm_dev *find_tsm_dev(int id);
+struct pci_ide;
+int tsm_ide_stream_register(struct pci_ide *ide);
+void tsm_ide_stream_unregister(struct pci_ide *ide);
#endif /* __TSM_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 2372f9357240..0a46e4054dec 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -239,7 +239,6 @@ struct tty_struct {
struct list_head tty_files;
-#define N_TTY_BUF_SIZE 4096
struct work_struct SAK_work;
} __randomize_layout;
@@ -251,7 +250,7 @@ struct tty_file_private {
};
/**
- * DOC: TTY Struct Flags
+ * enum tty_struct_flags - TTY Struct Flags
*
* These bits are used in the :c:member:`tty_struct.flags` field.
*
@@ -260,62 +259,64 @@ struct tty_file_private {
* tty->write. Thus, you must use the inline functions set_bit() and
* clear_bit() to make things atomic.
*
- * TTY_THROTTLED
+ * @TTY_THROTTLED:
* Driver input is throttled. The ldisc should call
* :c:member:`tty_driver.unthrottle()` in order to resume reception when
* it is ready to process more data (at threshold min).
*
- * TTY_IO_ERROR
+ * @TTY_IO_ERROR:
* If set, causes all subsequent userspace read/write calls on the tty to
* fail, returning -%EIO. (May be no ldisc too.)
*
- * TTY_OTHER_CLOSED
+ * @TTY_OTHER_CLOSED:
* Device is a pty and the other side has closed.
*
- * TTY_EXCLUSIVE
+ * @TTY_EXCLUSIVE:
* Exclusive open mode (a single opener).
*
- * TTY_DO_WRITE_WAKEUP
+ * @TTY_DO_WRITE_WAKEUP:
* If set, causes the driver to call the
* :c:member:`tty_ldisc_ops.write_wakeup()` method in order to resume
* transmission when it can accept more data to transmit.
*
- * TTY_LDISC_OPEN
+ * @TTY_LDISC_OPEN:
* Indicates that a line discipline is open. For debugging purposes only.
*
- * TTY_PTY_LOCK
+ * @TTY_PTY_LOCK:
* A flag private to pty code to implement %TIOCSPTLCK/%TIOCGPTLCK logic.
*
- * TTY_NO_WRITE_SPLIT
+ * @TTY_NO_WRITE_SPLIT:
* Prevent driver from splitting up writes into smaller chunks (preserve
* write boundaries to driver).
*
- * TTY_HUPPED
+ * @TTY_HUPPED:
* The TTY was hung up. This is set post :c:member:`tty_driver.hangup()`.
*
- * TTY_HUPPING
+ * @TTY_HUPPING:
* The TTY is in the process of hanging up to abort potential readers.
*
- * TTY_LDISC_CHANGING
+ * @TTY_LDISC_CHANGING:
* Line discipline for this TTY is being changed. I/O should not block
* when this is set. Use tty_io_nonblock() to check.
*
- * TTY_LDISC_HALTED
+ * @TTY_LDISC_HALTED:
* Line discipline for this TTY was stopped. No work should be queued to
* this ldisc.
*/
-#define TTY_THROTTLED 0
-#define TTY_IO_ERROR 1
-#define TTY_OTHER_CLOSED 2
-#define TTY_EXCLUSIVE 3
-#define TTY_DO_WRITE_WAKEUP 5
-#define TTY_LDISC_OPEN 11
-#define TTY_PTY_LOCK 16
-#define TTY_NO_WRITE_SPLIT 17
-#define TTY_HUPPED 18
-#define TTY_HUPPING 19
-#define TTY_LDISC_CHANGING 20
-#define TTY_LDISC_HALTED 22
+enum tty_struct_flags {
+ TTY_THROTTLED,
+ TTY_IO_ERROR,
+ TTY_OTHER_CLOSED,
+ TTY_EXCLUSIVE,
+ TTY_DO_WRITE_WAKEUP,
+ TTY_LDISC_OPEN,
+ TTY_PTY_LOCK,
+ TTY_NO_WRITE_SPLIT,
+ TTY_HUPPED,
+ TTY_HUPPING,
+ TTY_LDISC_CHANGING,
+ TTY_LDISC_HALTED,
+};
static inline bool tty_io_nonblock(struct tty_struct *tty, struct file *file)
{
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index dd4b31ce6d5d..188ee9b768eb 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -17,6 +17,92 @@ struct serial_icounter_struct;
struct serial_struct;
/**
+ * enum tty_driver_flag -- TTY Driver Flags
+ *
+ * These are flags passed to tty_alloc_driver().
+ *
+ * @TTY_DRIVER_INSTALLED:
+ * Whether this driver was succesfully installed. This is a tty internal
+ * flag. Do not touch.
+ *
+ * @TTY_DRIVER_RESET_TERMIOS:
+ * Requests the tty layer to reset the termios setting when the last
+ * process has closed the device. Used for PTYs, in particular.
+ *
+ * @TTY_DRIVER_REAL_RAW:
+ * Indicates that the driver will guarantee not to set any special
+ * character handling flags if this is set for the tty:
+ *
+ * ``(IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR || !INPCK)``
+ *
+ * That is, if there is no reason for the driver to
+ * send notifications of parity and break characters up to the line
+ * driver, it won't do so. This allows the line driver to optimize for
+ * this case if this flag is set. (Note that there is also a promise, if
+ * the above case is true, not to signal overruns, either.)
+ *
+ * @TTY_DRIVER_DYNAMIC_DEV:
+ * The individual tty devices need to be registered with a call to
+ * tty_register_device() when the device is found in the system and
+ * unregistered with a call to tty_unregister_device() so the devices will
+ * be show up properly in sysfs. If not set, all &tty_driver.num entries
+ * will be created by the tty core in sysfs when tty_register_driver() is
+ * called. This is to be used by drivers that have tty devices that can
+ * appear and disappear while the main tty driver is registered with the
+ * tty core.
+ *
+ * @TTY_DRIVER_DEVPTS_MEM:
+ * Don't use the standard arrays (&tty_driver.ttys and
+ * &tty_driver.termios), instead use dynamic memory keyed through the
+ * devpts filesystem. This is only applicable to the PTY driver.
+ *
+ * @TTY_DRIVER_HARDWARE_BREAK:
+ * Hardware handles break signals. Pass the requested timeout to the
+ * &tty_operations.break_ctl instead of using a simple on/off interface.
+ *
+ * @TTY_DRIVER_DYNAMIC_ALLOC:
+ * Do not allocate structures which are needed per line for this driver
+ * (&tty_driver.ports) as it would waste memory. The driver will take
+ * care. This is only applicable to the PTY driver.
+ *
+ * @TTY_DRIVER_UNNUMBERED_NODE:
+ * Do not create numbered ``/dev`` nodes. For example, create
+ * ``/dev/ttyprintk`` and not ``/dev/ttyprintk0``. Applicable only when a
+ * driver for a single tty device is being allocated.
+ */
+enum tty_driver_flag {
+ TTY_DRIVER_INSTALLED = BIT(0),
+ TTY_DRIVER_RESET_TERMIOS = BIT(1),
+ TTY_DRIVER_REAL_RAW = BIT(2),
+ TTY_DRIVER_DYNAMIC_DEV = BIT(3),
+ TTY_DRIVER_DEVPTS_MEM = BIT(4),
+ TTY_DRIVER_HARDWARE_BREAK = BIT(5),
+ TTY_DRIVER_DYNAMIC_ALLOC = BIT(6),
+ TTY_DRIVER_UNNUMBERED_NODE = BIT(7),
+};
+
+enum tty_driver_type {
+ TTY_DRIVER_TYPE_SYSTEM,
+ TTY_DRIVER_TYPE_CONSOLE,
+ TTY_DRIVER_TYPE_SERIAL,
+ TTY_DRIVER_TYPE_PTY,
+ TTY_DRIVER_TYPE_SCC,
+ TTY_DRIVER_TYPE_SYSCONS,
+};
+
+enum tty_driver_subtype {
+ SYSTEM_TYPE_TTY = 1,
+ SYSTEM_TYPE_CONSOLE,
+ SYSTEM_TYPE_SYSCONS,
+ SYSTEM_TYPE_SYSPTMX,
+
+ PTY_TYPE_MASTER = 1,
+ PTY_TYPE_SLAVE,
+
+ SERIAL_TYPE_NORMAL = 1,
+};
+
+/**
* struct tty_operations -- interface between driver and tty
*
* @lookup: ``struct tty_struct *()(struct tty_driver *self, struct file *,
@@ -320,7 +406,7 @@ struct serial_struct;
*
* @poll_init: ``int ()(struct tty_driver *driver, int line, char *options)``
*
- * kgdboc support (Documentation/dev-tools/kgdb.rst). This routine is
+ * kgdboc support (Documentation/process/debugging/kgdb.rst). This routine is
* called to initialize the HW for later use by calling @poll_get_char or
* @poll_put_char.
*
@@ -414,8 +500,8 @@ struct tty_operations {
* @major: major /dev device number (zero for autoassignment)
* @minor_start: the first minor /dev device number
* @num: number of devices allocated
- * @type: type of tty driver (%TTY_DRIVER_TYPE_)
- * @subtype: subtype of tty driver (%SYSTEM_TYPE_, %PTY_TYPE_, %SERIAL_TYPE_)
+ * @type: type of tty driver (enum tty_driver_type)
+ * @subtype: subtype of tty driver (enum tty_driver_subtype)
* @init_termios: termios to set to each tty initially (e.g. %tty_std_termios)
* @flags: tty driver flags (%TTY_DRIVER_)
* @proc_entry: proc fs entry, used internally
@@ -447,8 +533,8 @@ struct tty_driver {
int major;
int minor_start;
unsigned int num;
- short type;
- short subtype;
+ enum tty_driver_type type;
+ enum tty_driver_subtype subtype;
struct ktermios init_termios;
unsigned long flags;
struct proc_dir_entry *proc_entry;
@@ -478,7 +564,13 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line);
void tty_driver_kref_put(struct tty_driver *driver);
-/* Use TTY_DRIVER_* flags below */
+/**
+ * tty_alloc_driver - allocate tty driver
+ * @lines: count of lines this driver can handle at most
+ * @flags: some of enum tty_driver_flag, will be set in driver->flags
+ *
+ * Returns: struct tty_driver or a PTR-encoded error (use IS_ERR() and friends).
+ */
#define tty_alloc_driver(lines, flags) \
__tty_alloc_driver(lines, THIS_MODULE, flags)
@@ -494,84 +586,6 @@ static inline void tty_set_operations(struct tty_driver *driver,
driver->ops = op;
}
-/**
- * DOC: TTY Driver Flags
- *
- * TTY_DRIVER_RESET_TERMIOS
- * Requests the tty layer to reset the termios setting when the last
- * process has closed the device. Used for PTYs, in particular.
- *
- * TTY_DRIVER_REAL_RAW
- * Indicates that the driver will guarantee not to set any special
- * character handling flags if this is set for the tty:
- *
- * ``(IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR || !INPCK)``
- *
- * That is, if there is no reason for the driver to
- * send notifications of parity and break characters up to the line
- * driver, it won't do so. This allows the line driver to optimize for
- * this case if this flag is set. (Note that there is also a promise, if
- * the above case is true, not to signal overruns, either.)
- *
- * TTY_DRIVER_DYNAMIC_DEV
- * The individual tty devices need to be registered with a call to
- * tty_register_device() when the device is found in the system and
- * unregistered with a call to tty_unregister_device() so the devices will
- * be show up properly in sysfs. If not set, all &tty_driver.num entries
- * will be created by the tty core in sysfs when tty_register_driver() is
- * called. This is to be used by drivers that have tty devices that can
- * appear and disappear while the main tty driver is registered with the
- * tty core.
- *
- * TTY_DRIVER_DEVPTS_MEM
- * Don't use the standard arrays (&tty_driver.ttys and
- * &tty_driver.termios), instead use dynamic memory keyed through the
- * devpts filesystem. This is only applicable to the PTY driver.
- *
- * TTY_DRIVER_HARDWARE_BREAK
- * Hardware handles break signals. Pass the requested timeout to the
- * &tty_operations.break_ctl instead of using a simple on/off interface.
- *
- * TTY_DRIVER_DYNAMIC_ALLOC
- * Do not allocate structures which are needed per line for this driver
- * (&tty_driver.ports) as it would waste memory. The driver will take
- * care. This is only applicable to the PTY driver.
- *
- * TTY_DRIVER_UNNUMBERED_NODE
- * Do not create numbered ``/dev`` nodes. For example, create
- * ``/dev/ttyprintk`` and not ``/dev/ttyprintk0``. Applicable only when a
- * driver for a single tty device is being allocated.
- */
-#define TTY_DRIVER_INSTALLED 0x0001
-#define TTY_DRIVER_RESET_TERMIOS 0x0002
-#define TTY_DRIVER_REAL_RAW 0x0004
-#define TTY_DRIVER_DYNAMIC_DEV 0x0008
-#define TTY_DRIVER_DEVPTS_MEM 0x0010
-#define TTY_DRIVER_HARDWARE_BREAK 0x0020
-#define TTY_DRIVER_DYNAMIC_ALLOC 0x0040
-#define TTY_DRIVER_UNNUMBERED_NODE 0x0080
-
-/* tty driver types */
-#define TTY_DRIVER_TYPE_SYSTEM 0x0001
-#define TTY_DRIVER_TYPE_CONSOLE 0x0002
-#define TTY_DRIVER_TYPE_SERIAL 0x0003
-#define TTY_DRIVER_TYPE_PTY 0x0004
-#define TTY_DRIVER_TYPE_SCC 0x0005 /* scc driver */
-#define TTY_DRIVER_TYPE_SYSCONS 0x0006
-
-/* system subtypes (magic, used by tty_io.c) */
-#define SYSTEM_TYPE_TTY 0x0001
-#define SYSTEM_TYPE_CONSOLE 0x0002
-#define SYSTEM_TYPE_SYSCONS 0x0003
-#define SYSTEM_TYPE_SYSPTMX 0x0004
-
-/* pty subtypes (magic, used by tty_io.c) */
-#define PTY_TYPE_MASTER 0x0001
-#define PTY_TYPE_SLAVE 0x0002
-
-/* serial subtype definitions */
-#define SERIAL_TYPE_NORMAL 1
-
int tty_register_driver(struct tty_driver *driver);
void tty_unregister_driver(struct tty_driver *driver);
struct device *tty_register_device(struct tty_driver *driver, unsigned index,
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index af01e89074b2..c5cccc3fc1e8 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -39,7 +39,6 @@ do { \
int ldsem_down_read(struct ld_semaphore *sem, long timeout);
int ldsem_down_read_trylock(struct ld_semaphore *sem);
int ldsem_down_write(struct ld_semaphore *sem, long timeout);
-int ldsem_down_write_trylock(struct ld_semaphore *sem);
void ldsem_up_read(struct ld_semaphore *sem);
void ldsem_up_write(struct ld_semaphore *sem);
diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h
index 1b861f2100b6..660c254f1efe 100644
--- a/include/linux/tty_port.h
+++ b/include/linux/tty_port.h
@@ -147,9 +147,6 @@ struct device *tty_port_register_device_attr(struct tty_port *port,
struct tty_driver *driver, unsigned index,
struct device *device, void *drvdata,
const struct attribute_group **attr_grp);
-struct device *tty_port_register_device_serdev(struct tty_port *port,
- struct tty_driver *driver, unsigned index,
- struct device *host, struct device *parent);
struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
struct tty_driver *driver, unsigned index,
struct device *host, struct device *parent, void *drvdata,
@@ -235,7 +232,7 @@ bool tty_port_carrier_raised(struct tty_port *port);
void tty_port_raise_dtr_rts(struct tty_port *port);
void tty_port_lower_dtr_rts(struct tty_port *port);
void tty_port_hangup(struct tty_port *port);
-void tty_port_tty_hangup(struct tty_port *port, bool check_clocal);
+void __tty_port_tty_hangup(struct tty_port *port, bool check_clocal, bool async);
void tty_port_tty_wakeup(struct tty_port *port);
int tty_port_block_til_ready(struct tty_port *port, struct tty_struct *tty,
struct file *filp);
@@ -254,4 +251,37 @@ static inline int tty_port_users(struct tty_port *port)
return port->count + port->blocked_open;
}
+/**
+ * tty_port_tty_hangup - helper to hang up a tty asynchronously
+ * @port: tty port
+ * @check_clocal: hang only ttys with %CLOCAL unset?
+ */
+static inline void tty_port_tty_hangup(struct tty_port *port, bool check_clocal)
+{
+ __tty_port_tty_hangup(port, check_clocal, true);
+}
+
+/**
+ * tty_port_tty_vhangup - helper to hang up a tty synchronously
+ * @port: tty port
+ */
+static inline void tty_port_tty_vhangup(struct tty_port *port)
+{
+ __tty_port_tty_hangup(port, false, false);
+}
+
+#ifdef CONFIG_TTY
+void tty_kref_put(struct tty_struct *tty);
+__DEFINE_CLASS_IS_CONDITIONAL(tty_port_tty, true);
+__DEFINE_UNLOCK_GUARD(tty_port_tty, struct tty_struct, tty_kref_put(_T->lock));
+static inline class_tty_port_tty_t class_tty_port_tty_constructor(struct tty_port *tport)
+{
+ class_tty_port_tty_t _t = {
+ .lock = tty_port_tty_get(tport),
+ };
+ return _t;
+}
+#define scoped_tty() ((struct tty_struct *)(__guard_ptr(tty_port_tty)(&scope)))
+#endif
+
#endif
diff --git a/include/linux/turris-omnia-mcu-interface.h b/include/linux/turris-omnia-mcu-interface.h
new file mode 100644
index 000000000000..38b45ab00053
--- /dev/null
+++ b/include/linux/turris-omnia-mcu-interface.h
@@ -0,0 +1,397 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CZ.NIC's Turris Omnia MCU I2C interface commands definitions
+ *
+ * 2024 by Marek Behún <kabel@kernel.org>
+ */
+
+#ifndef __TURRIS_OMNIA_MCU_INTERFACE_H
+#define __TURRIS_OMNIA_MCU_INTERFACE_H
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+#include <asm/byteorder.h>
+
+enum omnia_commands_e {
+ OMNIA_CMD_GET_STATUS_WORD = 0x01, /* slave sends status word back */
+ OMNIA_CMD_GENERAL_CONTROL = 0x02,
+ OMNIA_CMD_LED_MODE = 0x03, /* default/user */
+ OMNIA_CMD_LED_STATE = 0x04, /* LED on/off */
+ OMNIA_CMD_LED_COLOR = 0x05, /* LED number + RED + GREEN + BLUE */
+ OMNIA_CMD_USER_VOLTAGE = 0x06,
+ OMNIA_CMD_SET_BRIGHTNESS = 0x07,
+ OMNIA_CMD_GET_BRIGHTNESS = 0x08,
+ OMNIA_CMD_GET_RESET = 0x09,
+ OMNIA_CMD_GET_FW_VERSION_APP = 0x0A, /* 20B git hash number */
+ OMNIA_CMD_SET_WATCHDOG_STATE = 0x0B, /* 0 - disable
+ * 1 - enable / ping
+ * after boot watchdog is started
+ * with 2 minutes timeout
+ */
+
+ /* OMNIA_CMD_WATCHDOG_STATUS = 0x0C, not implemented anymore */
+
+ OMNIA_CMD_GET_WATCHDOG_STATE = 0x0D,
+ OMNIA_CMD_GET_FW_VERSION_BOOT = 0x0E, /* 20B Git hash number */
+ OMNIA_CMD_GET_FW_CHECKSUM = 0x0F, /* 4B length, 4B checksum */
+
+ /* available if FEATURES_SUPPORTED bit set in status word */
+ OMNIA_CMD_GET_FEATURES = 0x10,
+
+ /* available if EXT_CMD bit set in features */
+ OMNIA_CMD_GET_EXT_STATUS_DWORD = 0x11,
+ OMNIA_CMD_EXT_CONTROL = 0x12,
+ OMNIA_CMD_GET_EXT_CONTROL_STATUS = 0x13,
+
+ /* available if NEW_INT_API bit set in features */
+ OMNIA_CMD_GET_INT_AND_CLEAR = 0x14,
+ OMNIA_CMD_GET_INT_MASK = 0x15,
+ OMNIA_CMD_SET_INT_MASK = 0x16,
+
+ /* available if FLASHING bit set in features */
+ OMNIA_CMD_FLASH = 0x19,
+
+ /* available if WDT_PING bit set in features */
+ OMNIA_CMD_SET_WDT_TIMEOUT = 0x20,
+ OMNIA_CMD_GET_WDT_TIMELEFT = 0x21,
+
+ /* available if POWEROFF_WAKEUP bit set in features */
+ OMNIA_CMD_SET_WAKEUP = 0x22,
+ OMNIA_CMD_GET_UPTIME_AND_WAKEUP = 0x23,
+ OMNIA_CMD_POWER_OFF = 0x24,
+
+ /* available if USB_OVC_PROT_SETTING bit set in features */
+ OMNIA_CMD_SET_USB_OVC_PROT = 0x25,
+ OMNIA_CMD_GET_USB_OVC_PROT = 0x26,
+
+ /* available if TRNG bit set in features */
+ OMNIA_CMD_TRNG_COLLECT_ENTROPY = 0x28,
+
+ /* available if CRYPTO bit set in features */
+ OMNIA_CMD_CRYPTO_GET_PUBLIC_KEY = 0x29,
+ OMNIA_CMD_CRYPTO_SIGN_MESSAGE = 0x2A,
+ OMNIA_CMD_CRYPTO_COLLECT_SIGNATURE = 0x2B,
+
+ /* available if BOARD_INFO it set in features */
+ OMNIA_CMD_BOARD_INFO_GET = 0x2C,
+ OMNIA_CMD_BOARD_INFO_BURN = 0x2D,
+
+ /* available only at address 0x2b (LED-controller) */
+ /* available only if LED_GAMMA_CORRECTION bit set in features */
+ OMNIA_CMD_SET_GAMMA_CORRECTION = 0x30,
+ OMNIA_CMD_GET_GAMMA_CORRECTION = 0x31,
+
+ /* available only at address 0x2b (LED-controller) */
+ /* available only if PER_LED_CORRECTION bit set in features */
+ /* available only if FROM_BIT_16_INVALID bit NOT set in features */
+ OMNIA_CMD_SET_LED_CORRECTIONS = 0x32,
+ OMNIA_CMD_GET_LED_CORRECTIONS = 0x33,
+};
+
+enum omnia_flashing_commands_e {
+ OMNIA_FLASH_CMD_UNLOCK = 0x01,
+ OMNIA_FLASH_CMD_SIZE_AND_CSUM = 0x02,
+ OMNIA_FLASH_CMD_PROGRAM = 0x03,
+ OMNIA_FLASH_CMD_RESET = 0x04,
+};
+
+enum omnia_sts_word_e {
+ OMNIA_STS_MCU_TYPE_MASK = GENMASK(1, 0),
+ OMNIA_STS_MCU_TYPE_STM32 = FIELD_PREP_CONST(OMNIA_STS_MCU_TYPE_MASK, 0),
+ OMNIA_STS_MCU_TYPE_GD32 = FIELD_PREP_CONST(OMNIA_STS_MCU_TYPE_MASK, 1),
+ OMNIA_STS_MCU_TYPE_MKL = FIELD_PREP_CONST(OMNIA_STS_MCU_TYPE_MASK, 2),
+ OMNIA_STS_FEATURES_SUPPORTED = BIT(2),
+ OMNIA_STS_USER_REGULATOR_NOT_SUPPORTED = BIT(3),
+ OMNIA_STS_CARD_DET = BIT(4),
+ OMNIA_STS_MSATA_IND = BIT(5),
+ OMNIA_STS_USB30_OVC = BIT(6),
+ OMNIA_STS_USB31_OVC = BIT(7),
+ OMNIA_STS_USB30_PWRON = BIT(8),
+ OMNIA_STS_USB31_PWRON = BIT(9),
+ OMNIA_STS_ENABLE_4V5 = BIT(10),
+ OMNIA_STS_BUTTON_MODE = BIT(11),
+ OMNIA_STS_BUTTON_PRESSED = BIT(12),
+ OMNIA_STS_BUTTON_COUNTER_MASK = GENMASK(15, 13),
+};
+
+enum omnia_ctl_byte_e {
+ OMNIA_CTL_LIGHT_RST = BIT(0),
+ OMNIA_CTL_HARD_RST = BIT(1),
+ /* BIT(2) is currently reserved */
+ OMNIA_CTL_USB30_PWRON = BIT(3),
+ OMNIA_CTL_USB31_PWRON = BIT(4),
+ OMNIA_CTL_ENABLE_4V5 = BIT(5),
+ OMNIA_CTL_BUTTON_MODE = BIT(6),
+ OMNIA_CTL_BOOTLOADER = BIT(7),
+};
+
+enum omnia_features_e {
+ OMNIA_FEAT_PERIPH_MCU = BIT(0),
+ OMNIA_FEAT_EXT_CMDS = BIT(1),
+ OMNIA_FEAT_WDT_PING = BIT(2),
+ OMNIA_FEAT_LED_STATE_EXT_MASK = GENMASK(4, 3),
+ OMNIA_FEAT_LED_STATE_EXT = FIELD_PREP_CONST(OMNIA_FEAT_LED_STATE_EXT_MASK, 1),
+ OMNIA_FEAT_LED_STATE_EXT_V32 = FIELD_PREP_CONST(OMNIA_FEAT_LED_STATE_EXT_MASK, 2),
+ OMNIA_FEAT_LED_GAMMA_CORRECTION = BIT(5),
+ OMNIA_FEAT_NEW_INT_API = BIT(6),
+ OMNIA_FEAT_BOOTLOADER = BIT(7),
+ OMNIA_FEAT_FLASHING = BIT(8),
+ OMNIA_FEAT_NEW_MESSAGE_API = BIT(9),
+ OMNIA_FEAT_BRIGHTNESS_INT = BIT(10),
+ OMNIA_FEAT_POWEROFF_WAKEUP = BIT(11),
+ OMNIA_FEAT_CAN_OLD_MESSAGE_API = BIT(12),
+ OMNIA_FEAT_TRNG = BIT(13),
+ OMNIA_FEAT_CRYPTO = BIT(14),
+ OMNIA_FEAT_BOARD_INFO = BIT(15),
+
+ /*
+ * Orginally the features command replied only 16 bits. If more were
+ * read, either the I2C transaction failed or 0xff bytes were sent.
+ * Therefore to consider bits 16 - 31 valid, one bit (20) was reserved
+ * to be zero.
+ */
+
+ /* Bits 16 - 19 correspond to bits 0 - 3 of status word */
+ OMNIA_FEAT_MCU_TYPE_MASK = GENMASK(17, 16),
+ OMNIA_FEAT_MCU_TYPE_STM32 = FIELD_PREP_CONST(OMNIA_FEAT_MCU_TYPE_MASK, 0),
+ OMNIA_FEAT_MCU_TYPE_GD32 = FIELD_PREP_CONST(OMNIA_FEAT_MCU_TYPE_MASK, 1),
+ OMNIA_FEAT_MCU_TYPE_MKL = FIELD_PREP_CONST(OMNIA_FEAT_MCU_TYPE_MASK, 2),
+ OMNIA_FEAT_FEATURES_SUPPORTED = BIT(18),
+ OMNIA_FEAT_USER_REGULATOR_NOT_SUPPORTED = BIT(19),
+
+ /* must not be set */
+ OMNIA_FEAT_FROM_BIT_16_INVALID = BIT(20),
+
+ OMNIA_FEAT_PER_LED_CORRECTION = BIT(21),
+ OMNIA_FEAT_USB_OVC_PROT_SETTING = BIT(22),
+};
+
+enum omnia_ext_sts_dword_e {
+ OMNIA_EXT_STS_SFP_nDET = BIT(0),
+ OMNIA_EXT_STS_LED_STATES_MASK = GENMASK(31, 12),
+ OMNIA_EXT_STS_WLAN0_MSATA_LED = BIT(12),
+ OMNIA_EXT_STS_WLAN1_LED = BIT(13),
+ OMNIA_EXT_STS_WLAN2_LED = BIT(14),
+ OMNIA_EXT_STS_WPAN0_LED = BIT(15),
+ OMNIA_EXT_STS_WPAN1_LED = BIT(16),
+ OMNIA_EXT_STS_WPAN2_LED = BIT(17),
+ OMNIA_EXT_STS_WAN_LED0 = BIT(18),
+ OMNIA_EXT_STS_WAN_LED1 = BIT(19),
+ OMNIA_EXT_STS_LAN0_LED0 = BIT(20),
+ OMNIA_EXT_STS_LAN0_LED1 = BIT(21),
+ OMNIA_EXT_STS_LAN1_LED0 = BIT(22),
+ OMNIA_EXT_STS_LAN1_LED1 = BIT(23),
+ OMNIA_EXT_STS_LAN2_LED0 = BIT(24),
+ OMNIA_EXT_STS_LAN2_LED1 = BIT(25),
+ OMNIA_EXT_STS_LAN3_LED0 = BIT(26),
+ OMNIA_EXT_STS_LAN3_LED1 = BIT(27),
+ OMNIA_EXT_STS_LAN4_LED0 = BIT(28),
+ OMNIA_EXT_STS_LAN4_LED1 = BIT(29),
+ OMNIA_EXT_STS_LAN5_LED0 = BIT(30),
+ OMNIA_EXT_STS_LAN5_LED1 = BIT(31),
+};
+
+enum omnia_ext_ctl_e {
+ OMNIA_EXT_CTL_nRES_MMC = BIT(0),
+ OMNIA_EXT_CTL_nRES_LAN = BIT(1),
+ OMNIA_EXT_CTL_nRES_PHY = BIT(2),
+ OMNIA_EXT_CTL_nPERST0 = BIT(3),
+ OMNIA_EXT_CTL_nPERST1 = BIT(4),
+ OMNIA_EXT_CTL_nPERST2 = BIT(5),
+ OMNIA_EXT_CTL_PHY_SFP = BIT(6),
+ OMNIA_EXT_CTL_PHY_SFP_AUTO = BIT(7),
+ OMNIA_EXT_CTL_nVHV_CTRL = BIT(8),
+};
+
+enum omnia_int_e {
+ OMNIA_INT_CARD_DET = BIT(0),
+ OMNIA_INT_MSATA_IND = BIT(1),
+ OMNIA_INT_USB30_OVC = BIT(2),
+ OMNIA_INT_USB31_OVC = BIT(3),
+ OMNIA_INT_BUTTON_PRESSED = BIT(4),
+ OMNIA_INT_SFP_nDET = BIT(5),
+ OMNIA_INT_BRIGHTNESS_CHANGED = BIT(6),
+ OMNIA_INT_TRNG = BIT(7),
+ OMNIA_INT_MESSAGE_SIGNED = BIT(8),
+
+ OMNIA_INT_LED_STATES_MASK = GENMASK(31, 12),
+ OMNIA_INT_WLAN0_MSATA_LED = BIT(12),
+ OMNIA_INT_WLAN1_LED = BIT(13),
+ OMNIA_INT_WLAN2_LED = BIT(14),
+ OMNIA_INT_WPAN0_LED = BIT(15),
+ OMNIA_INT_WPAN1_LED = BIT(16),
+ OMNIA_INT_WPAN2_LED = BIT(17),
+ OMNIA_INT_WAN_LED0 = BIT(18),
+ OMNIA_INT_WAN_LED1 = BIT(19),
+ OMNIA_INT_LAN0_LED0 = BIT(20),
+ OMNIA_INT_LAN0_LED1 = BIT(21),
+ OMNIA_INT_LAN1_LED0 = BIT(22),
+ OMNIA_INT_LAN1_LED1 = BIT(23),
+ OMNIA_INT_LAN2_LED0 = BIT(24),
+ OMNIA_INT_LAN2_LED1 = BIT(25),
+ OMNIA_INT_LAN3_LED0 = BIT(26),
+ OMNIA_INT_LAN3_LED1 = BIT(27),
+ OMNIA_INT_LAN4_LED0 = BIT(28),
+ OMNIA_INT_LAN4_LED1 = BIT(29),
+ OMNIA_INT_LAN5_LED0 = BIT(30),
+ OMNIA_INT_LAN5_LED1 = BIT(31),
+};
+
+enum omnia_cmd_led_mode_e {
+ OMNIA_CMD_LED_MODE_LED_MASK = GENMASK(3, 0),
+ OMNIA_CMD_LED_MODE_USER = BIT(4),
+};
+
+#define OMNIA_CMD_LED_MODE_LED(_l) FIELD_PREP(OMNIA_CMD_LED_MODE_LED_MASK, _l)
+
+enum omnia_cmd_led_state_e {
+ OMNIA_CMD_LED_STATE_LED_MASK = GENMASK(3, 0),
+ OMNIA_CMD_LED_STATE_ON = BIT(4),
+};
+
+#define OMNIA_CMD_LED_STATE_LED(_l) FIELD_PREP(OMNIA_CMD_LED_STATE_LED_MASK, _l)
+
+enum omnia_cmd_poweroff_e {
+ OMNIA_CMD_POWER_OFF_POWERON_BUTTON = BIT(0),
+ OMNIA_CMD_POWER_OFF_MAGIC = 0xdead,
+};
+
+enum omnia_cmd_usb_ovc_prot_e {
+ OMNIA_CMD_xET_USB_OVC_PROT_PORT_MASK = GENMASK(3, 0),
+ OMNIA_CMD_xET_USB_OVC_PROT_ENABLE = BIT(4),
+};
+
+/* Command execution functions */
+
+struct i2c_client;
+
+int omnia_cmd_write_read(const struct i2c_client *client,
+ void *cmd, unsigned int cmd_len,
+ void *reply, unsigned int reply_len);
+
+static inline int omnia_cmd_write(const struct i2c_client *client, void *cmd,
+ unsigned int len)
+{
+ return omnia_cmd_write_read(client, cmd, len, NULL, 0);
+}
+
+static inline int omnia_cmd_write_u8(const struct i2c_client *client, u8 cmd,
+ u8 val)
+{
+ u8 buf[2] = { cmd, val };
+
+ return omnia_cmd_write(client, buf, sizeof(buf));
+}
+
+static inline int omnia_cmd_write_u16(const struct i2c_client *client, u8 cmd,
+ u16 val)
+{
+ u8 buf[3];
+
+ buf[0] = cmd;
+ put_unaligned_le16(val, &buf[1]);
+
+ return omnia_cmd_write(client, buf, sizeof(buf));
+}
+
+static inline int omnia_cmd_write_u32(const struct i2c_client *client, u8 cmd,
+ u32 val)
+{
+ u8 buf[5];
+
+ buf[0] = cmd;
+ put_unaligned_le32(val, &buf[1]);
+
+ return omnia_cmd_write(client, buf, sizeof(buf));
+}
+
+static inline int omnia_cmd_read(const struct i2c_client *client, u8 cmd,
+ void *reply, unsigned int len)
+{
+ return omnia_cmd_write_read(client, &cmd, 1, reply, len);
+}
+
+static inline unsigned int
+omnia_compute_reply_length(unsigned long mask, bool interleaved,
+ unsigned int offset)
+{
+ if (!mask)
+ return 0;
+
+ return ((__fls(mask) >> 3) << interleaved) + 1 + offset;
+}
+
+/* Returns 0 on success */
+static inline int omnia_cmd_read_bits(const struct i2c_client *client, u8 cmd,
+ unsigned long bits, unsigned long *dst)
+{
+ __le32 reply;
+ int err;
+
+ if (!bits) {
+ *dst = 0;
+ return 0;
+ }
+
+ err = omnia_cmd_read(client, cmd, &reply,
+ omnia_compute_reply_length(bits, false, 0));
+ if (err)
+ return err;
+
+ *dst = le32_to_cpu(reply) & bits;
+
+ return 0;
+}
+
+static inline int omnia_cmd_read_bit(const struct i2c_client *client, u8 cmd,
+ unsigned long bit)
+{
+ unsigned long reply;
+ int err;
+
+ err = omnia_cmd_read_bits(client, cmd, bit, &reply);
+ if (err)
+ return err;
+
+ return !!reply;
+}
+
+static inline int omnia_cmd_read_u32(const struct i2c_client *client, u8 cmd,
+ u32 *dst)
+{
+ __le32 reply;
+ int err;
+
+ err = omnia_cmd_read(client, cmd, &reply, sizeof(reply));
+ if (err)
+ return err;
+
+ *dst = le32_to_cpu(reply);
+
+ return 0;
+}
+
+static inline int omnia_cmd_read_u16(const struct i2c_client *client, u8 cmd,
+ u16 *dst)
+{
+ __le16 reply;
+ int err;
+
+ err = omnia_cmd_read(client, cmd, &reply, sizeof(reply));
+ if (err)
+ return err;
+
+ *dst = le16_to_cpu(reply);
+
+ return 0;
+}
+
+static inline int omnia_cmd_read_u8(const struct i2c_client *client, u8 cmd,
+ u8 *reply)
+{
+ return omnia_cmd_read(client, cmd, reply, sizeof(*reply));
+}
+
+#endif /* __TURRIS_OMNIA_MCU_INTERFACE_H */
diff --git a/include/linux/turris-signing-key.h b/include/linux/turris-signing-key.h
new file mode 100644
index 000000000000..8a435b73c3a9
--- /dev/null
+++ b/include/linux/turris-signing-key.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * 2025 by Marek Behún <kabel@kernel.org>
+ */
+
+#ifndef __TURRIS_SIGNING_KEY_H
+#define __TURRIS_SIGNING_KEY_H
+
+#include <linux/key.h>
+#include <linux/types.h>
+
+struct device;
+
+#ifdef CONFIG_KEYS
+struct turris_signing_key_subtype {
+ u16 key_size;
+ u8 data_size;
+ u8 sig_size;
+ u8 public_key_size;
+ const char *hash_algo;
+ const void *(*get_public_key)(const struct key *key);
+ int (*sign)(const struct key *key, const void *msg, void *signature);
+};
+
+static inline struct device *turris_signing_key_get_dev(const struct key *key)
+{
+ return key->payload.data[1];
+}
+
+int
+devm_turris_signing_key_create(struct device *dev, const struct turris_signing_key_subtype *subtype,
+ const char *desc);
+#endif
+
+#endif /* __TURRIS_SIGNING_KEY_H */
diff --git a/include/linux/types.h b/include/linux/types.h
index 2bc8766ba20c..d4437e9c452c 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -43,13 +43,14 @@ typedef unsigned long uintptr_t;
typedef long intptr_t;
#ifdef CONFIG_HAVE_UID16
-/* This is defined by include/asm-{arch}/posix_types.h */
+/* This is defined by arch/{arch}/include/asm/posix_types.h */
typedef __kernel_old_uid_t old_uid_t;
typedef __kernel_old_gid_t old_gid_t;
#endif /* CONFIG_UID16 */
#if defined(__GNUC__)
typedef __kernel_loff_t loff_t;
+typedef __kernel_uoff_t uoff_t;
#endif
/*
@@ -92,6 +93,7 @@ typedef unsigned char unchar;
typedef unsigned short ushort;
typedef unsigned int uint;
typedef unsigned long ulong;
+typedef unsigned long long ullong;
#ifndef __BIT_TYPES_DEFINED__
#define __BIT_TYPES_DEFINED__
@@ -115,8 +117,9 @@ typedef u64 u_int64_t;
typedef s64 int64_t;
#endif
-/* this is a special 64bit data type that is 8-byte aligned */
+/* These are the special 64-bit data types that are 8-byte aligned */
#define aligned_u64 __aligned_u64
+#define aligned_s64 __aligned_s64
#define aligned_be64 __aligned_be64
#define aligned_le64 __aligned_le64
@@ -134,6 +137,10 @@ typedef s64 ktime_t;
typedef u64 sector_t;
typedef u64 blkcnt_t;
+/* generic data direction definitions */
+#define READ 0
+#define WRITE 1
+
/*
* The type of an index into the pagecache.
*/
@@ -247,5 +254,17 @@ typedef void (*swap_func_t)(void *a, void *b, int size);
typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv);
typedef int (*cmp_func_t)(const void *a, const void *b);
+/*
+ * rcuwait provides a way of blocking and waking up a single
+ * task in an rcu-safe manner.
+ *
+ * The only time @task is non-nil is when a user is blocked (or
+ * checking if it needs to) on a condition, and reset as soon as we
+ * know that the condition has succeeded and are awoken.
+ */
+struct rcuwait {
+ struct task_struct __rcu *task;
+};
+
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 3064314f4832..1f3804245c06 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,11 +2,13 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
+#include <linux/cleanup.h>
#include <linux/fault-inject-usercopy.h>
#include <linux/instrumented.h>
#include <linux/minmax.h>
+#include <linux/nospec.h>
#include <linux/sched.h>
-#include <linux/thread_info.h>
+#include <linux/ucopysize.h>
#include <asm/uaccess.h>
@@ -32,6 +34,22 @@
})
#endif
+#ifdef masked_user_access_begin
+ #define can_do_masked_user_access() 1
+# ifndef masked_user_write_access_begin
+# define masked_user_write_access_begin masked_user_access_begin
+# endif
+# ifndef masked_user_read_access_begin
+# define masked_user_read_access_begin masked_user_access_begin
+#endif
+#else
+ #define can_do_masked_user_access() 0
+ #define masked_user_access_begin(src) NULL
+ #define masked_user_read_access_begin(src) NULL
+ #define masked_user_write_access_begin(src) NULL
+ #define mask_user_address(src) (src)
+#endif
+
/*
* Architectures should provide two primitives (raw_copy_{to,from}_user())
* and get rid of their private instances of copy_{to,from}_user() and
@@ -138,29 +156,47 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
return raw_copy_to_user(to, from, n);
}
-#ifdef INLINE_COPY_FROM_USER
+/*
+ * Architectures that #define INLINE_COPY_TO_USER use this function
+ * directly in the normal copy_to/from_user(), the other ones go
+ * through an extern _copy_to/from_user(), which expands the same code
+ * here.
+ */
static inline __must_check unsigned long
-_copy_from_user(void *to, const void __user *from, unsigned long n)
+_inline_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
might_fault();
- if (!should_fail_usercopy() && likely(access_ok(from, n))) {
- instrument_copy_from_user_before(to, from, n);
- res = raw_copy_from_user(to, from, n);
- instrument_copy_from_user_after(to, from, n, res);
+ if (should_fail_usercopy())
+ goto fail;
+ if (can_do_masked_user_access())
+ from = mask_user_address(from);
+ else {
+ if (!access_ok(from, n))
+ goto fail;
+ /*
+ * Ensure that bad access_ok() speculation will not
+ * lead to nasty side effects *after* the copy is
+ * finished:
+ */
+ barrier_nospec();
}
- if (unlikely(res))
- memset(to + (n - res), 0, res);
+ instrument_copy_from_user_before(to, from, n);
+ res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
+ if (likely(!res))
+ return 0;
+fail:
+ memset(to + (n - res), 0, res);
return res;
}
-#else
+#ifndef INLINE_COPY_FROM_USER
extern __must_check unsigned long
_copy_from_user(void *, const void __user *, unsigned long);
#endif
-#ifdef INLINE_COPY_TO_USER
static inline __must_check unsigned long
-_copy_to_user(void __user *to, const void *from, unsigned long n)
+_inline_copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (should_fail_usercopy())
@@ -171,7 +207,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
}
return n;
}
-#else
+#ifndef INLINE_COPY_TO_USER
extern __must_check unsigned long
_copy_to_user(void __user *, const void *, unsigned long);
#endif
@@ -179,17 +215,26 @@ _copy_to_user(void __user *, const void *, unsigned long);
static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (check_copy_size(to, n, false))
- n = _copy_from_user(to, from, n);
- return n;
+ if (!check_copy_size(to, n, false))
+ return n;
+#ifdef INLINE_COPY_FROM_USER
+ return _inline_copy_from_user(to, from, n);
+#else
+ return _copy_from_user(to, from, n);
+#endif
}
static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (check_copy_size(from, n, true))
- n = _copy_to_user(to, from, n);
- return n;
+ if (!check_copy_size(from, n, true))
+ return n;
+
+#ifdef INLINE_COPY_TO_USER
+ return _inline_copy_to_user(to, from, n);
+#else
+ return _copy_to_user(to, from, n);
+#endif
}
#ifndef copy_mc_to_kernel
@@ -262,6 +307,8 @@ static inline bool pagefault_disabled(void)
*/
#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
+DEFINE_LOCK_GUARD_0(pagefault, pagefault_disable(), pagefault_enable())
+
#ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
/**
@@ -369,6 +416,103 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
return 0;
}
+/**
+ * copy_struct_to_user: copy a struct to userspace
+ * @dst: Destination address, in userspace. This buffer must be @ksize
+ * bytes long.
+ * @usize: (Alleged) size of @dst struct.
+ * @src: Source address, in kernel space.
+ * @ksize: Size of @src struct.
+ * @ignored_trailing: Set to %true if there was a non-zero byte in @src that
+ * userspace cannot see because they are using an smaller struct.
+ *
+ * Copies a struct from kernel space to userspace, in a way that guarantees
+ * backwards-compatibility for struct syscall arguments (as long as future
+ * struct extensions are made such that all new fields are *appended* to the
+ * old struct, and zeroed-out new fields have the same meaning as the old
+ * struct).
+ *
+ * Some syscalls may wish to make sure that userspace knows about everything in
+ * the struct, and if there is a non-zero value that userspce doesn't know
+ * about, they want to return an error (such as -EMSGSIZE) or have some other
+ * fallback (such as adding a "you're missing some information" flag). If
+ * @ignored_trailing is non-%NULL, it will be set to %true if there was a
+ * non-zero byte that could not be copied to userspace (ie. was past @usize).
+ *
+ * While unconditionally returning an error in this case is the simplest
+ * solution, for maximum backward compatibility you should try to only return
+ * -EMSGSIZE if the user explicitly requested the data that couldn't be copied.
+ * Note that structure sizes can change due to header changes and simple
+ * recompilations without code changes(!), so if you care about
+ * @ignored_trailing you probably want to make sure that any new field data is
+ * associated with a flag. Otherwise you might assume that a program knows
+ * about data it does not.
+ *
+ * @ksize is just sizeof(*src), and @usize should've been passed by userspace.
+ * The recommended usage is something like the following:
+ *
+ * SYSCALL_DEFINE2(foobar, struct foo __user *, uarg, size_t, usize)
+ * {
+ * int err;
+ * bool ignored_trailing;
+ * struct foo karg = {};
+ *
+ * if (usize > PAGE_SIZE)
+ * return -E2BIG;
+ * if (usize < FOO_SIZE_VER0)
+ * return -EINVAL;
+ *
+ * // ... modify karg somehow ...
+ *
+ * err = copy_struct_to_user(uarg, usize, &karg, sizeof(karg),
+ * &ignored_trailing);
+ * if (err)
+ * return err;
+ * if (ignored_trailing)
+ * return -EMSGSIZE:
+ *
+ * // ...
+ * }
+ *
+ * There are three cases to consider:
+ * * If @usize == @ksize, then it's copied verbatim.
+ * * If @usize < @ksize, then the kernel is trying to pass userspace a newer
+ * struct than it supports. Thus we only copy the interoperable portions
+ * (@usize) and ignore the rest (but @ignored_trailing is set to %true if
+ * any of the trailing (@ksize - @usize) bytes are non-zero).
+ * * If @usize > @ksize, then the kernel is trying to pass userspace an older
+ * struct than userspace supports. In order to make sure the
+ * unknown-to-the-kernel fields don't contain garbage values, we zero the
+ * trailing (@usize - @ksize) bytes.
+ *
+ * Returns (in all cases, some data may have been copied):
+ * * -EFAULT: access to userspace failed.
+ */
+static __always_inline __must_check int
+copy_struct_to_user(void __user *dst, size_t usize, const void *src,
+ size_t ksize, bool *ignored_trailing)
+{
+ size_t size = min(ksize, usize);
+ size_t rest = max(ksize, usize) - size;
+
+ /* Double check if ksize is larger than a known object size. */
+ if (WARN_ON_ONCE(ksize > __builtin_object_size(src, 1)))
+ return -E2BIG;
+
+ /* Deal with trailing bytes. */
+ if (usize > ksize) {
+ if (clear_user(dst + size, rest))
+ return -EFAULT;
+ }
+ if (ignored_trailing)
+ *ignored_trailing = ksize < usize &&
+ memchr_inv(src + size, 0, rest) != NULL;
+ /* Copy the interoperable parts of the struct. */
+ if (copy_to_user(dst, src, size))
+ return -EFAULT;
+ return 0;
+}
+
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
@@ -385,7 +529,34 @@ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
long count);
long strnlen_user_nofault(const void __user *unsafe_addr, long count);
-#ifndef __get_kernel_nofault
+#ifdef arch_get_kernel_nofault
+/*
+ * Wrap the architecture implementation so that @label can be outside of a
+ * cleanup() scope. A regular C goto works correctly, but ASM goto does
+ * not. Clang rejects such an attempt, but GCC silently emits buggy code.
+ */
+#define __get_kernel_nofault(dst, src, type, label) \
+do { \
+ __label__ local_label; \
+ arch_get_kernel_nofault(dst, src, type, local_label); \
+ if (0) { \
+ local_label: \
+ goto label; \
+ } \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, label) \
+do { \
+ __label__ local_label; \
+ arch_put_kernel_nofault(dst, src, type, local_label); \
+ if (0) { \
+ local_label: \
+ goto label; \
+ } \
+} while (0)
+
+#elif !defined(__get_kernel_nofault) /* arch_get_kernel_nofault */
+
#define __get_kernel_nofault(dst, src, type, label) \
do { \
type __user *p = (type __force __user *)(src); \
@@ -402,7 +573,8 @@ do { \
if (__put_user(data, p)) \
goto label; \
} while (0)
-#endif
+
+#endif /* !__get_kernel_nofault */
/**
* get_kernel_nofault(): safely attempt to read from a location
@@ -416,7 +588,42 @@ do { \
copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
})
-#ifndef user_access_begin
+#ifdef user_access_begin
+
+#ifdef arch_unsafe_get_user
+/*
+ * Wrap the architecture implementation so that @label can be outside of a
+ * cleanup() scope. A regular C goto works correctly, but ASM goto does
+ * not. Clang rejects such an attempt, but GCC silently emits buggy code.
+ *
+ * Some architectures use internal local labels already, but this extra
+ * indirection here is harmless because the compiler optimizes it out
+ * completely in any case. This construct just ensures that the ASM GOTO
+ * target is always in the local scope. The C goto 'label' works correctly
+ * when leaving a cleanup() scope.
+ */
+#define unsafe_get_user(x, ptr, label) \
+do { \
+ __label__ local_label; \
+ arch_unsafe_get_user(x, ptr, local_label); \
+ if (0) { \
+ local_label: \
+ goto label; \
+ } \
+} while (0)
+
+#define unsafe_put_user(x, ptr, label) \
+do { \
+ __label__ local_label; \
+ arch_unsafe_put_user(x, ptr, local_label); \
+ if (0) { \
+ local_label: \
+ goto label; \
+ } \
+} while (0)
+#endif /* arch_unsafe_get_user */
+
+#else /* user_access_begin */
#define user_access_begin(ptr,len) access_ok(ptr, len)
#define user_access_end() do { } while (0)
#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
@@ -426,7 +633,8 @@ do { \
#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
static inline unsigned long user_access_save(void) { return 0UL; }
static inline void user_access_restore(unsigned long flags) { }
-#endif
+#endif /* !user_access_begin */
+
#ifndef user_write_access_begin
#define user_write_access_begin user_access_begin
#define user_write_access_end user_access_end
@@ -436,6 +644,239 @@ static inline void user_access_restore(unsigned long flags) { }
#define user_read_access_end user_access_end
#endif
+/* Define RW variant so the below _mode macro expansion works */
+#define masked_user_rw_access_begin(u) masked_user_access_begin(u)
+#define user_rw_access_begin(u, s) user_access_begin(u, s)
+#define user_rw_access_end() user_access_end()
+
+/* Scoped user access */
+#define USER_ACCESS_GUARD(_mode) \
+static __always_inline void __user * \
+class_user_##_mode##_begin(void __user *ptr) \
+{ \
+ return ptr; \
+} \
+ \
+static __always_inline void \
+class_user_##_mode##_end(void __user *ptr) \
+{ \
+ user_##_mode##_access_end(); \
+} \
+ \
+DEFINE_CLASS(user_ ##_mode## _access, void __user *, \
+ class_user_##_mode##_end(_T), \
+ class_user_##_mode##_begin(ptr), void __user *ptr) \
+ \
+static __always_inline class_user_##_mode##_access_t \
+class_user_##_mode##_access_ptr(void __user *scope) \
+{ \
+ return scope; \
+}
+
+USER_ACCESS_GUARD(read)
+USER_ACCESS_GUARD(write)
+USER_ACCESS_GUARD(rw)
+#undef USER_ACCESS_GUARD
+
+/**
+ * __scoped_user_access_begin - Start a scoped user access
+ * @mode: The mode of the access class (read, write, rw)
+ * @uptr: The pointer to access user space memory
+ * @size: Size of the access
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * Internal helper for __scoped_user_access(). Don't use directly.
+ */
+#define __scoped_user_access_begin(mode, uptr, size, elbl) \
+({ \
+ typeof(uptr) __retptr; \
+ \
+ if (can_do_masked_user_access()) { \
+ __retptr = masked_user_##mode##_access_begin(uptr); \
+ } else { \
+ __retptr = uptr; \
+ if (!user_##mode##_access_begin(uptr, size)) \
+ goto elbl; \
+ } \
+ __retptr; \
+})
+
+/**
+ * __scoped_user_access - Open a scope for user access
+ * @mode: The mode of the access class (read, write, rw)
+ * @uptr: The pointer to access user space memory
+ * @size: Size of the access
+ * @elbl: Error label to goto when the access region is rejected. It
+ * must be placed outside the scope
+ *
+ * If the user access function inside the scope requires a fault label, it
+ * can use @elbl or a different label outside the scope, which requires
+ * that user access which is implemented with ASM GOTO has been properly
+ * wrapped. See unsafe_get_user() for reference.
+ *
+ * scoped_user_rw_access(ptr, efault) {
+ * unsafe_get_user(rval, &ptr->rval, efault);
+ * unsafe_put_user(wval, &ptr->wval, efault);
+ * }
+ * return 0;
+ * efault:
+ * return -EFAULT;
+ *
+ * The scope is internally implemented as a autoterminating nested for()
+ * loop, which can be left with 'return', 'break' and 'goto' at any
+ * point.
+ *
+ * When the scope is left user_##@_mode##_access_end() is automatically
+ * invoked.
+ *
+ * When the architecture supports masked user access and the access region
+ * which is determined by @uptr and @size is not a valid user space
+ * address, i.e. < TASK_SIZE, the scope sets the pointer to a faulting user
+ * space address and does not terminate early. This optimizes for the good
+ * case and lets the performance uncritical bad case go through the fault.
+ *
+ * The eventual modification of the pointer is limited to the scope.
+ * Outside of the scope the original pointer value is unmodified, so that
+ * the original pointer value is available for diagnostic purposes in an
+ * out of scope fault path.
+ *
+ * Nesting scoped user access into a user access scope is invalid and fails
+ * the build. Nesting into other guards, e.g. pagefault is safe.
+ *
+ * The masked variant does not check the size of the access and relies on a
+ * mapping hole (e.g. guard page) to catch an out of range pointer, the
+ * first access to user memory inside the scope has to be within
+ * @uptr ... @uptr + PAGE_SIZE - 1
+ *
+ * Don't use directly. Use scoped_masked_user_$MODE_access() instead.
+ */
+#define __scoped_user_access(mode, uptr, size, elbl) \
+for (bool done = false; !done; done = true) \
+ for (void __user *_tmpptr = __scoped_user_access_begin(mode, uptr, size, elbl); \
+ !done; done = true) \
+ for (CLASS(user_##mode##_access, scope)(_tmpptr); !done; done = true) \
+ /* Force modified pointer usage within the scope */ \
+ for (const typeof(uptr) uptr = _tmpptr; !done; done = true)
+
+/**
+ * scoped_user_read_access_size - Start a scoped user read access with given size
+ * @usrc: Pointer to the user space address to read from
+ * @size: Size of the access starting from @usrc
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_read_access_size(usrc, size, elbl) \
+ __scoped_user_access(read, usrc, size, elbl)
+
+/**
+ * scoped_user_read_access - Start a scoped user read access
+ * @usrc: Pointer to the user space address to read from
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * The size of the access starting from @usrc is determined via sizeof(*@usrc)).
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_read_access(usrc, elbl) \
+ scoped_user_read_access_size(usrc, sizeof(*(usrc)), elbl)
+
+/**
+ * scoped_user_write_access_size - Start a scoped user write access with given size
+ * @udst: Pointer to the user space address to write to
+ * @size: Size of the access starting from @udst
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_write_access_size(udst, size, elbl) \
+ __scoped_user_access(write, udst, size, elbl)
+
+/**
+ * scoped_user_write_access - Start a scoped user write access
+ * @udst: Pointer to the user space address to write to
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * The size of the access starting from @udst is determined via sizeof(*@udst)).
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_write_access(udst, elbl) \
+ scoped_user_write_access_size(udst, sizeof(*(udst)), elbl)
+
+/**
+ * scoped_user_rw_access_size - Start a scoped user read/write access with given size
+ * @uptr Pointer to the user space address to read from and write to
+ * @size: Size of the access starting from @uptr
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_rw_access_size(uptr, size, elbl) \
+ __scoped_user_access(rw, uptr, size, elbl)
+
+/**
+ * scoped_user_rw_access - Start a scoped user read/write access
+ * @uptr Pointer to the user space address to read from and write to
+ * @elbl: Error label to goto when the access region is rejected
+ *
+ * The size of the access starting from @uptr is determined via sizeof(*@uptr)).
+ *
+ * For further information see __scoped_user_access() above.
+ */
+#define scoped_user_rw_access(uptr, elbl) \
+ scoped_user_rw_access_size(uptr, sizeof(*(uptr)), elbl)
+
+/**
+ * get_user_inline - Read user data inlined
+ * @val: The variable to store the value read from user memory
+ * @usrc: Pointer to the user space memory to read from
+ *
+ * Return: 0 if successful, -EFAULT when faulted
+ *
+ * Inlined variant of get_user(). Only use when there is a demonstrable
+ * performance reason.
+ */
+#define get_user_inline(val, usrc) \
+({ \
+ __label__ efault; \
+ typeof(usrc) _tmpsrc = usrc; \
+ int _ret = 0; \
+ \
+ scoped_user_read_access(_tmpsrc, efault) \
+ unsafe_get_user(val, _tmpsrc, efault); \
+ if (0) { \
+ efault: \
+ _ret = -EFAULT; \
+ } \
+ _ret; \
+})
+
+/**
+ * put_user_inline - Write to user memory inlined
+ * @val: The value to write
+ * @udst: Pointer to the user space memory to write to
+ *
+ * Return: 0 if successful, -EFAULT when faulted
+ *
+ * Inlined variant of put_user(). Only use when there is a demonstrable
+ * performance reason.
+ */
+#define put_user_inline(val, udst) \
+({ \
+ __label__ efault; \
+ typeof(udst) _tmpdst = udst; \
+ int _ret = 0; \
+ \
+ scoped_user_write_access(_tmpdst, efault) \
+ unsafe_put_user(val, _tmpdst, efault); \
+ if (0) { \
+ efault: \
+ _ret = -EFAULT; \
+ } \
+ _ret; \
+})
+
#ifdef CONFIG_HARDENED_USERCOPY
void __noreturn usercopy_abort(const char *name, const char *detail,
bool to_user, unsigned long offset,
diff --git a/include/linux/ubsan.h b/include/linux/ubsan.h
index bff7445498de..3ab8d38aedb8 100644
--- a/include/linux/ubsan.h
+++ b/include/linux/ubsan.h
@@ -2,8 +2,13 @@
#ifndef _LINUX_UBSAN_H
#define _LINUX_UBSAN_H
-#ifdef CONFIG_UBSAN_TRAP
-const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type);
+#if defined(CONFIG_UBSAN_TRAP) || defined(CONFIG_UBSAN_KVM_EL2)
+const char *report_ubsan_failure(u32 check_type);
+#else
+static inline const char *report_ubsan_failure(u32 check_type)
+{
+ return NULL;
+}
#endif
#endif
diff --git a/include/linux/ucopysize.h b/include/linux/ucopysize.h
new file mode 100644
index 000000000000..41c2d9720466
--- /dev/null
+++ b/include/linux/ucopysize.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Perform sanity checking for object sizes for uaccess.h and uio.h. */
+#ifndef __LINUX_UCOPYSIZE_H__
+#define __LINUX_UCOPYSIZE_H__
+
+#include <linux/bug.h>
+
+#ifdef CONFIG_HARDENED_USERCOPY
+#include <linux/jump_label.h>
+extern void __check_object_size(const void *ptr, unsigned long n,
+ bool to_user);
+
+DECLARE_STATIC_KEY_MAYBE(CONFIG_HARDENED_USERCOPY_DEFAULT_ON,
+ validate_usercopy_range);
+
+static __always_inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
+{
+ if (!__builtin_constant_p(n) &&
+ static_branch_maybe(CONFIG_HARDENED_USERCOPY_DEFAULT_ON,
+ &validate_usercopy_range)) {
+ __check_object_size(ptr, n, to_user);
+ }
+}
+#else
+static inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
+{ }
+#endif /* CONFIG_HARDENED_USERCOPY */
+
+extern void __compiletime_error("copy source size is too small")
+__bad_copy_from(void);
+extern void __compiletime_error("copy destination size is too small")
+__bad_copy_to(void);
+
+void __copy_overflow(int size, unsigned long count);
+
+static inline void copy_overflow(int size, unsigned long count)
+{
+ if (IS_ENABLED(CONFIG_BUG))
+ __copy_overflow(size, count);
+}
+
+static __always_inline __must_check bool
+check_copy_size(const void *addr, size_t bytes, bool is_source)
+{
+ int sz = __builtin_object_size(addr, 0);
+ if (unlikely(sz >= 0 && sz < bytes)) {
+ if (!__builtin_constant_p(bytes))
+ copy_overflow(sz, bytes);
+ else if (is_source)
+ __bad_copy_from();
+ else
+ __bad_copy_to();
+ return false;
+ }
+ if (WARN_ON_ONCE(bytes > INT_MAX))
+ return false;
+ check_object_size(addr, bytes, is_source);
+ return true;
+}
+
+#endif /* __LINUX_UCOPYSIZE_H__ */
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 3eb3f2b9a2a0..58795688a186 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -44,6 +44,12 @@ enum {
UDP_FLAGS_UDPLITE_RECV_CC, /* set via udplite setsockopt */
};
+/* per NUMA structure for lockless producer usage. */
+struct udp_prod_queue {
+ struct llist_head ll_root ____cacheline_aligned_in_smp;
+ atomic_t rmem_alloc;
+};
+
struct udp_sock {
/* inet_sock has to be the first member */
struct inet_sock inet;
@@ -56,6 +62,12 @@ struct udp_sock {
int pending; /* Any pending frames ? */
__u8 encap_type; /* Is this an Encapsulation socket? */
+#if !IS_ENABLED(CONFIG_BASE_SMALL)
+ /* For UDP 4-tuple hash */
+ __u16 udp_lrpa_hash;
+ struct hlist_nulls_node udp_lrpa_node;
+#endif
+
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@@ -84,6 +96,8 @@ struct udp_sock {
struct sk_buff *skb,
int nhoff);
+ struct udp_prod_queue *udp_prod_queue;
+
/* udp_recvmsg try to use this before splicing sk_receive_queue */
struct sk_buff_head reader_queue ____cacheline_aligned_in_smp;
@@ -95,6 +109,14 @@ struct udp_sock {
/* Cache friendly copy of sk->sk_peek_off >= 0 */
bool peeking_with_offset;
+
+ /*
+ * Accounting for the tunnel GRO fastpath.
+ * Unprotected by compilers guard, as it uses space available in
+ * the last UDP socket cacheline.
+ */
+ struct hlist_node tunnel_list;
+ struct numa_drop_counters drop_counters;
};
#define udp_test_bit(nr, sk) \
@@ -203,9 +225,26 @@ static inline void udp_allow_gso(struct sock *sk)
#define udp_portaddr_for_each_entry(__sk, list) \
hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
+#define udp_portaddr_for_each_entry_from(__sk) \
+ hlist_for_each_entry_from(__sk, __sk_common.skc_portaddr_node)
+
#define udp_portaddr_for_each_entry_rcu(__sk, list) \
hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node)
+#if !IS_ENABLED(CONFIG_BASE_SMALL)
+#define udp_lrpa_for_each_entry_rcu(__up, node, list) \
+ hlist_nulls_for_each_entry_rcu(__up, node, list, udp_lrpa_node)
+#endif
+
#define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE)
+static inline struct sock *udp_tunnel_sk(const struct net *net, bool is_ipv6)
+{
+#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
+ return rcu_dereference(net->ipv4.udp_tunnel_gro[is_ipv6].sk);
+#else
+ return NULL;
+#endif
+}
+
#endif /* _LINUX_UDP_H */
diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
index f85ec5613721..2dc767e08f54 100644
--- a/include/linux/uidgid.h
+++ b/include/linux/uidgid.h
@@ -132,6 +132,7 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
u32 map_id_down(struct uid_gid_map *map, u32 id);
u32 map_id_up(struct uid_gid_map *map, u32 id);
+u32 map_id_range_up(struct uid_gid_map *map, u32 id, u32 count);
#else
@@ -186,6 +187,11 @@ static inline u32 map_id_down(struct uid_gid_map *map, u32 id)
return id;
}
+static inline u32 map_id_range_up(struct uid_gid_map *map, u32 id, u32 count)
+{
+ return id;
+}
+
static inline u32 map_id_up(struct uid_gid_map *map, u32 id)
{
return id;
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 7020adedfa08..5b127043a151 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -6,11 +6,12 @@
#define __LINUX_UIO_H
#include <linux/kernel.h>
-#include <linux/thread_info.h>
#include <linux/mm_types.h>
+#include <linux/ucopysize.h>
#include <uapi/linux/uio.h>
struct page;
+struct folio_queue;
typedef unsigned int __bitwise iov_iter_extraction_t;
@@ -25,6 +26,7 @@ enum iter_type {
ITER_IOVEC,
ITER_BVEC,
ITER_KVEC,
+ ITER_FOLIOQ,
ITER_XARRAY,
ITER_DISCARD,
};
@@ -66,6 +68,7 @@ struct iov_iter {
const struct iovec *__iov;
const struct kvec *kvec;
const struct bio_vec *bvec;
+ const struct folio_queue *folioq;
struct xarray *xarray;
void __user *ubuf;
};
@@ -74,10 +77,20 @@ struct iov_iter {
};
union {
unsigned long nr_segs;
+ u8 folioq_slot;
loff_t xarray_start;
};
};
+typedef __u16 uio_meta_flags_t;
+
+struct uio_meta {
+ uio_meta_flags_t flags;
+ u16 app_tag;
+ u64 seed;
+ struct iov_iter iter;
+};
+
static inline const struct iovec *iter_iov(const struct iov_iter *iter)
{
if (iter->iter_type == ITER_UBUF)
@@ -86,7 +99,13 @@ static inline const struct iovec *iter_iov(const struct iov_iter *iter)
}
#define iter_iov_addr(iter) (iter_iov(iter)->iov_base + (iter)->iov_offset)
-#define iter_iov_len(iter) (iter_iov(iter)->iov_len - (iter)->iov_offset)
+
+static inline size_t iter_iov_len(const struct iov_iter *i)
+{
+ if (i->iter_type == ITER_UBUF)
+ return i->count;
+ return iter_iov(i)->iov_len - i->iov_offset;
+}
static inline enum iter_type iov_iter_type(const struct iov_iter *i)
{
@@ -126,6 +145,11 @@ static inline bool iov_iter_is_discard(const struct iov_iter *i)
return iov_iter_type(i) == ITER_DISCARD;
}
+static inline bool iov_iter_is_folioq(const struct iov_iter *i)
+{
+ return iov_iter_type(i) == ITER_FOLIOQ;
+}
+
static inline bool iov_iter_is_xarray(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_XARRAY;
@@ -158,8 +182,6 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
return ret;
}
-size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
- size_t bytes, struct iov_iter *i);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
void iov_iter_revert(struct iov_iter *i, size_t bytes);
size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes);
@@ -169,6 +191,8 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
+size_t copy_folio_from_iter_atomic(struct folio *folio, size_t offset,
+ size_t bytes, struct iov_iter *i);
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
@@ -180,10 +204,10 @@ static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
return copy_page_to_iter(&folio->page, offset, bytes, i);
}
-static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
- size_t offset, size_t bytes, struct iov_iter *i)
+static inline size_t copy_folio_from_iter(struct folio *folio, size_t offset,
+ size_t bytes, struct iov_iter *i)
{
- return copy_page_from_iter_atomic(&folio->page, offset, bytes, i);
+ return copy_page_from_iter(&folio->page, offset, bytes, i);
}
size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
@@ -262,8 +286,6 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
#endif
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
-bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
- unsigned len_mask);
unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
@@ -273,6 +295,9 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec
void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
unsigned long nr_segs, size_t count);
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
+void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction,
+ const struct folio_queue *folioq,
+ unsigned int first_slot, unsigned int offset, size_t count);
void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
loff_t start, size_t count);
ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
diff --git a/include/asm-generic/unaligned.h b/include/linux/unaligned.h
index a84c64e5f11e..4a9651017e3c 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/linux/unaligned.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_GENERIC_UNALIGNED_H
-#define __ASM_GENERIC_UNALIGNED_H
+#ifndef __LINUX_UNALIGNED_H
+#define __LINUX_UNALIGNED_H
/*
* This is the most generic implementation of unaligned accesses
@@ -8,16 +8,7 @@
*/
#include <linux/unaligned/packed_struct.h>
#include <asm/byteorder.h>
-
-#define __get_unaligned_t(type, ptr) ({ \
- const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
- __pptr->x; \
-})
-
-#define __put_unaligned_t(type, val, ptr) do { \
- struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
- __pptr->x = (val); \
-} while (0)
+#include <vdso/unaligned.h>
#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
@@ -152,4 +143,4 @@ static inline u64 get_unaligned_be48(const void *p)
return __get_unaligned_be48(p);
}
-#endif /* __ASM_GENERIC_UNALIGNED_H */
+#endif /* __LINUX_UNALIGNED_H */
diff --git a/include/linux/unicode.h b/include/linux/unicode.h
index 4d39e6e11a95..5e6b212a2aed 100644
--- a/include/linux/unicode.h
+++ b/include/linux/unicode.h
@@ -16,6 +16,8 @@ struct utf8data_table;
((unsigned int)(MIN) << UNICODE_MIN_SHIFT) | \
((unsigned int)(REV)))
+#define UTF8_LATEST UNICODE_AGE(12, 1, 0)
+
static inline u8 unicode_major(unsigned int age)
{
return (age >> UNICODE_MAJ_SHIFT) & 0xff;
@@ -76,4 +78,6 @@ int utf8_casefold_hash(const struct unicode_map *um, const void *salt,
struct unicode_map *utf8_load(unsigned int version);
void utf8_unload(struct unicode_map *um);
+int utf8_parse_version(char *version);
+
#endif /* _LINUX_UNICODE_H */
diff --git a/include/linux/union_find.h b/include/linux/union_find.h
new file mode 100644
index 000000000000..cfd49263c138
--- /dev/null
+++ b/include/linux/union_find.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_UNION_FIND_H
+#define __LINUX_UNION_FIND_H
+/**
+ * union_find.h - union-find data structure implementation
+ *
+ * This header provides functions and structures to implement the union-find
+ * data structure. The union-find data structure is used to manage disjoint
+ * sets and supports efficient union and find operations.
+ *
+ * See Documentation/core-api/union_find.rst for documentation and samples.
+ */
+
+struct uf_node {
+ struct uf_node *parent;
+ unsigned int rank;
+};
+
+/* This macro is used for static initialization of a union-find node. */
+#define UF_INIT_NODE(node) {.parent = &node, .rank = 0}
+
+/**
+ * uf_node_init - Initialize a union-find node
+ * @node: pointer to the union-find node to be initialized
+ *
+ * This function sets the parent of the node to itself and
+ * initializes its rank to 0.
+ */
+static inline void uf_node_init(struct uf_node *node)
+{
+ node->parent = node;
+ node->rank = 0;
+}
+
+/* find the root of a node */
+struct uf_node *uf_find(struct uf_node *node);
+
+/* Merge two intersecting nodes */
+void uf_union(struct uf_node *node1, struct uf_node *node2);
+
+#endif /* __LINUX_UNION_FIND_H */
diff --git a/include/linux/unroll.h b/include/linux/unroll.h
new file mode 100644
index 000000000000..186b71de740f
--- /dev/null
+++ b/include/linux/unroll.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2023 Google LLC.
+ */
+
+#ifndef __UNROLL_H
+#define __UNROLL_H
+
+#include <linux/args.h>
+
+#ifdef CONFIG_CC_IS_CLANG
+#define __pick_unrolled(x, y) _Pragma(#x)
+#else
+#define __pick_unrolled(x, y) _Pragma(#y)
+#endif
+
+/**
+ * unrolled - loop attributes to ask the compiler to unroll it
+ *
+ * Usage:
+ *
+ * #define BATCH 8
+ *
+ * unrolled_count(BATCH)
+ * for (u32 i = 0; i < BATCH; i++)
+ * // loop body without cross-iteration dependencies
+ *
+ * This is only a hint and the compiler is free to disable unrolling if it
+ * thinks the count is suboptimal and may hurt performance and/or hugely
+ * increase object code size.
+ * Not having any cross-iteration dependencies (i.e. when iter x + 1 depends
+ * on what iter x will do with variables) is not a strict requirement, but
+ * provides best performance and object code size.
+ * Available only on Clang and GCC 8.x onwards.
+ */
+
+/* Ask the compiler to pick an optimal unroll count, Clang only */
+#define unrolled \
+ __pick_unrolled(clang loop unroll(enable), /* nothing */)
+
+/* Unroll each @n iterations of the loop */
+#define unrolled_count(n) \
+ __pick_unrolled(clang loop unroll_count(n), GCC unroll n)
+
+/* Unroll the whole loop */
+#define unrolled_full \
+ __pick_unrolled(clang loop unroll(full), GCC unroll 65534)
+
+/* Never unroll the loop */
+#define unrolled_none \
+ __pick_unrolled(clang loop unroll(disable), GCC unroll 1)
+
+#define UNROLL(N, MACRO, args...) CONCATENATE(__UNROLL_, N)(MACRO, args)
+
+#define __UNROLL_0(MACRO, args...)
+#define __UNROLL_1(MACRO, args...) __UNROLL_0(MACRO, args) MACRO(0, args)
+#define __UNROLL_2(MACRO, args...) __UNROLL_1(MACRO, args) MACRO(1, args)
+#define __UNROLL_3(MACRO, args...) __UNROLL_2(MACRO, args) MACRO(2, args)
+#define __UNROLL_4(MACRO, args...) __UNROLL_3(MACRO, args) MACRO(3, args)
+#define __UNROLL_5(MACRO, args...) __UNROLL_4(MACRO, args) MACRO(4, args)
+#define __UNROLL_6(MACRO, args...) __UNROLL_5(MACRO, args) MACRO(5, args)
+#define __UNROLL_7(MACRO, args...) __UNROLL_6(MACRO, args) MACRO(6, args)
+#define __UNROLL_8(MACRO, args...) __UNROLL_7(MACRO, args) MACRO(7, args)
+#define __UNROLL_9(MACRO, args...) __UNROLL_8(MACRO, args) MACRO(8, args)
+#define __UNROLL_10(MACRO, args...) __UNROLL_9(MACRO, args) MACRO(9, args)
+#define __UNROLL_11(MACRO, args...) __UNROLL_10(MACRO, args) MACRO(10, args)
+#define __UNROLL_12(MACRO, args...) __UNROLL_11(MACRO, args) MACRO(11, args)
+#define __UNROLL_13(MACRO, args...) __UNROLL_12(MACRO, args) MACRO(12, args)
+#define __UNROLL_14(MACRO, args...) __UNROLL_13(MACRO, args) MACRO(13, args)
+#define __UNROLL_15(MACRO, args...) __UNROLL_14(MACRO, args) MACRO(14, args)
+#define __UNROLL_16(MACRO, args...) __UNROLL_15(MACRO, args) MACRO(15, args)
+#define __UNROLL_17(MACRO, args...) __UNROLL_16(MACRO, args) MACRO(16, args)
+#define __UNROLL_18(MACRO, args...) __UNROLL_17(MACRO, args) MACRO(17, args)
+#define __UNROLL_19(MACRO, args...) __UNROLL_18(MACRO, args) MACRO(18, args)
+#define __UNROLL_20(MACRO, args...) __UNROLL_19(MACRO, args) MACRO(19, args)
+
+#endif /* __UNROLL_H */
diff --git a/include/linux/unwind_deferred.h b/include/linux/unwind_deferred.h
new file mode 100644
index 000000000000..bc7ae7d21900
--- /dev/null
+++ b/include/linux/unwind_deferred.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_UNWIND_USER_DEFERRED_H
+#define _LINUX_UNWIND_USER_DEFERRED_H
+
+#include <linux/task_work.h>
+#include <linux/unwind_user.h>
+#include <linux/unwind_deferred_types.h>
+
+#ifdef CONFIG_UNWIND_USER
+
+enum {
+ UNWIND_PENDING_BIT = 0,
+ UNWIND_USED_BIT,
+};
+
+enum {
+ UNWIND_PENDING = BIT(UNWIND_PENDING_BIT),
+
+ /* Set if the unwinding was used (directly or deferred) */
+ UNWIND_USED = BIT(UNWIND_USED_BIT)
+};
+
+void unwind_task_init(struct task_struct *task);
+void unwind_task_free(struct task_struct *task);
+
+int unwind_user_faultable(struct unwind_stacktrace *trace);
+
+int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func);
+int unwind_deferred_request(struct unwind_work *work, u64 *cookie);
+void unwind_deferred_cancel(struct unwind_work *work);
+
+void unwind_deferred_task_exit(struct task_struct *task);
+
+static __always_inline void unwind_reset_info(void)
+{
+ struct unwind_task_info *info = &current->unwind_info;
+ unsigned long bits = atomic_long_read(&info->unwind_mask);
+
+ /* Was there any unwinding? */
+ if (likely(!bits))
+ return;
+
+ do {
+ /* Is a task_work going to run again before going back */
+ if (bits & UNWIND_PENDING)
+ return;
+ } while (!atomic_long_try_cmpxchg(&info->unwind_mask, &bits, 0UL));
+ current->unwind_info.id.id = 0;
+
+ if (unlikely(info->cache)) {
+ info->cache->nr_entries = 0;
+ info->cache->unwind_completed = 0;
+ }
+}
+
+#else /* !CONFIG_UNWIND_USER */
+
+static inline void unwind_task_init(struct task_struct *task) {}
+static inline void unwind_task_free(struct task_struct *task) {}
+
+static inline int unwind_user_faultable(struct unwind_stacktrace *trace)
+{ return -ENOSYS; }
+
+static inline int
+unwind_deferred_init(struct unwind_work *work, unwind_callback_t func)
+{ return -ENOSYS; }
+
+static inline int
+unwind_deferred_request(struct unwind_work *work, u64 *timestamp)
+{ return -ENOSYS; }
+
+static inline void unwind_deferred_cancel(struct unwind_work *work) {}
+
+static inline void unwind_deferred_task_exit(struct task_struct *task) {}
+static inline void unwind_reset_info(void) {}
+
+#endif /* !CONFIG_UNWIND_USER */
+
+#endif /* _LINUX_UNWIND_USER_DEFERRED_H */
diff --git a/include/linux/unwind_deferred_types.h b/include/linux/unwind_deferred_types.h
new file mode 100644
index 000000000000..18fa3932f61c
--- /dev/null
+++ b/include/linux/unwind_deferred_types.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_UNWIND_USER_DEFERRED_TYPES_H
+#define _LINUX_UNWIND_USER_DEFERRED_TYPES_H
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+
+struct unwind_cache {
+ unsigned long unwind_completed;
+ unsigned int nr_entries;
+ unsigned long entries[];
+};
+
+/*
+ * The unwind_task_id is a unique identifier that maps to a user space
+ * stacktrace. It is generated the first time a deferred user space
+ * stacktrace is requested after a task has entered the kerenl and
+ * is cleared to zero when it exits. The mapped id will be a non-zero
+ * number.
+ *
+ * To simplify the generation of the 64 bit number, 32 bits will be
+ * the CPU it was generated on, and the other 32 bits will be a per
+ * cpu counter that gets incremented by two every time a new identifier
+ * is generated. The LSB will always be set to keep the value
+ * from being zero.
+ */
+union unwind_task_id {
+ struct {
+ u32 cpu;
+ u32 cnt;
+ };
+ u64 id;
+};
+
+struct unwind_task_info {
+ atomic_long_t unwind_mask;
+ struct unwind_cache *cache;
+ struct callback_head work;
+ union unwind_task_id id;
+};
+
+struct unwind_work;
+struct unwind_stacktrace;
+
+typedef void (*unwind_callback_t)(struct unwind_work *work,
+ struct unwind_stacktrace *trace,
+ u64 cookie);
+
+struct unwind_work {
+ struct list_head list;
+ unwind_callback_t func;
+ int bit;
+};
+
+#endif /* _LINUX_UNWIND_USER_DEFERRED_TYPES_H */
diff --git a/include/linux/unwind_user.h b/include/linux/unwind_user.h
new file mode 100644
index 000000000000..7f7282516bf5
--- /dev/null
+++ b/include/linux/unwind_user.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_UNWIND_USER_H
+#define _LINUX_UNWIND_USER_H
+
+#include <linux/unwind_user_types.h>
+#include <asm/unwind_user.h>
+
+#ifndef ARCH_INIT_USER_FP_FRAME
+ #define ARCH_INIT_USER_FP_FRAME
+#endif
+
+int unwind_user(struct unwind_stacktrace *trace, unsigned int max_entries);
+
+#endif /* _LINUX_UNWIND_USER_H */
diff --git a/include/linux/unwind_user_types.h b/include/linux/unwind_user_types.h
new file mode 100644
index 000000000000..412729a269bc
--- /dev/null
+++ b/include/linux/unwind_user_types.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_UNWIND_USER_TYPES_H
+#define _LINUX_UNWIND_USER_TYPES_H
+
+#include <linux/types.h>
+
+/*
+ * Unwind types, listed in priority order: lower numbers are attempted first if
+ * available.
+ */
+enum unwind_user_type_bits {
+ UNWIND_USER_TYPE_FP_BIT = 0,
+
+ NR_UNWIND_USER_TYPE_BITS,
+};
+
+enum unwind_user_type {
+ /* Type "none" for the start of stack walk iteration. */
+ UNWIND_USER_TYPE_NONE = 0,
+ UNWIND_USER_TYPE_FP = BIT(UNWIND_USER_TYPE_FP_BIT),
+};
+
+struct unwind_stacktrace {
+ unsigned int nr;
+ unsigned long *entries;
+};
+
+struct unwind_user_frame {
+ s32 cfa_off;
+ s32 ra_off;
+ s32 fp_off;
+ bool use_fp;
+};
+
+struct unwind_user_state {
+ unsigned long ip;
+ unsigned long sp;
+ unsigned long fp;
+ unsigned int ws;
+ enum unwind_user_type current_type;
+ unsigned int available_types;
+ bool topmost;
+ bool done;
+};
+
+#endif /* _LINUX_UNWIND_USER_TYPES_H */
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index f46e0ca0169c..ee3d36eda45d 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -15,34 +15,51 @@
#include <linux/rbtree.h>
#include <linux/types.h>
#include <linux/wait.h>
+#include <linux/timer.h>
+#include <linux/seqlock.h>
+#include <linux/mutex.h>
+struct uprobe;
struct vm_area_struct;
struct mm_struct;
struct inode;
struct notifier_block;
struct page;
+/*
+ * Allowed return values from uprobe consumer's handler callback
+ * with following meaning:
+ *
+ * UPROBE_HANDLER_REMOVE
+ * - Remove the uprobe breakpoint from current->mm.
+ * UPROBE_HANDLER_IGNORE
+ * - Ignore ret_handler callback for this consumer.
+ */
#define UPROBE_HANDLER_REMOVE 1
-#define UPROBE_HANDLER_MASK 1
+#define UPROBE_HANDLER_IGNORE 2
#define MAX_URETPROBE_DEPTH 64
-enum uprobe_filter_ctx {
- UPROBE_FILTER_REGISTER,
- UPROBE_FILTER_UNREGISTER,
- UPROBE_FILTER_MMAP,
-};
+#define UPROBE_NO_TRAMPOLINE_VADDR (~0UL)
struct uprobe_consumer {
- int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
+ /*
+ * handler() can return UPROBE_HANDLER_REMOVE to signal the need to
+ * unregister uprobe for current process. If UPROBE_HANDLER_REMOVE is
+ * returned, filter() callback has to be implemented as well and it
+ * should return false to "confirm" the decision to uninstall uprobe
+ * for the current process. If filter() is omitted or returns true,
+ * UPROBE_HANDLER_REMOVE is effectively ignored.
+ */
+ int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data);
int (*ret_handler)(struct uprobe_consumer *self,
unsigned long func,
- struct pt_regs *regs);
- bool (*filter)(struct uprobe_consumer *self,
- enum uprobe_filter_ctx ctx,
- struct mm_struct *mm);
+ struct pt_regs *regs, __u64 *data);
+ bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm);
+
+ struct list_head cons_node;
- struct uprobe_consumer *next;
+ __u64 id; /* set when uprobe_consumer is registered */
};
#ifdef CONFIG_UPROBES
@@ -55,12 +72,66 @@ enum uprobe_task_state {
UTASK_SSTEP_TRAPPED,
};
+/* The state of hybrid-lifetime uprobe inside struct return_instance */
+enum hprobe_state {
+ HPROBE_LEASED, /* uretprobes_srcu-protected uprobe */
+ HPROBE_STABLE, /* refcounted uprobe */
+ HPROBE_GONE, /* NULL uprobe, SRCU expired, refcount failed */
+ HPROBE_CONSUMED, /* uprobe "consumed" by uretprobe handler */
+};
+
+/*
+ * Hybrid lifetime uprobe. Represents a uprobe instance that could be either
+ * SRCU protected (with SRCU protection eventually potentially timing out),
+ * refcounted using uprobe->ref, or there could be no valid uprobe (NULL).
+ *
+ * hprobe's internal state is setup such that background timer thread can
+ * atomically "downgrade" temporarily RCU-protected uprobe into refcounted one
+ * (or no uprobe, if refcounting failed).
+ *
+ * *stable* pointer always point to the uprobe (or could be NULL if there is
+ * was no valid underlying uprobe to begin with).
+ *
+ * *leased* pointer is the key to achieving race-free atomic lifetime state
+ * transition and can have three possible states:
+ * - either the same non-NULL value as *stable*, in which case uprobe is
+ * SRCU-protected;
+ * - NULL, in which case uprobe (if there is any) is refcounted;
+ * - special __UPROBE_DEAD value, which represents an uprobe that was SRCU
+ * protected initially, but SRCU period timed out and we attempted to
+ * convert it to refcounted, but refcount_inc_not_zero() failed, because
+ * uprobe effectively went away (the last consumer unsubscribed). In this
+ * case it's important to know that *stable* pointer (which still has
+ * non-NULL uprobe pointer) shouldn't be used, because lifetime of
+ * underlying uprobe is not guaranteed anymore. __UPROBE_DEAD is just an
+ * internal marker and is handled transparently by hprobe_fetch() helper.
+ *
+ * When uprobe is SRCU-protected, we also record srcu_idx value, necessary for
+ * SRCU unlocking.
+ *
+ * See hprobe_expire() and hprobe_fetch() for details of race-free uprobe
+ * state transitioning details. It all hinges on atomic xchg() over *leaded*
+ * pointer. *stable* pointer, once initially set, is not modified concurrently.
+ */
+struct hprobe {
+ enum hprobe_state state;
+ int srcu_idx;
+ struct uprobe *uprobe;
+};
+
/*
* uprobe_task: Metadata of a task while it singlesteps.
*/
struct uprobe_task {
enum uprobe_task_state state;
+ unsigned int depth;
+ struct return_instance *return_instances;
+
+ struct return_instance *ri_pool;
+ struct timer_list ri_timer;
+ seqcount_t ri_seqcount;
+
union {
struct {
struct arch_uprobe_task autask;
@@ -75,20 +146,35 @@ struct uprobe_task {
struct uprobe *active_uprobe;
unsigned long xol_vaddr;
+ bool signal_denied;
- struct return_instance *return_instances;
- unsigned int depth;
+ struct arch_uprobe *auprobe;
+};
+
+struct return_consumer {
+ __u64 cookie;
+ __u64 id;
};
struct return_instance {
- struct uprobe *uprobe;
+ struct hprobe hprobe;
unsigned long func;
unsigned long stack; /* stack pointer */
unsigned long orig_ret_vaddr; /* original return address */
bool chained; /* true, if instance is nested */
+ int cons_cnt; /* total number of session consumers */
struct return_instance *next; /* keep as stack */
-};
+ struct rcu_head rcu;
+
+ /* singular pre-allocated return_consumer instance for common case */
+ struct return_consumer consumer;
+ /*
+ * extra return_consumer instances for rare cases of multiple session consumers,
+ * contains (cons_cnt - 1) elements
+ */
+ struct return_consumer *extra_consumers;
+} ____cacheline_aligned;
enum rp_check {
RP_CHECK_CALL,
@@ -100,27 +186,37 @@ struct xol_area;
struct uprobes_state {
struct xol_area *xol_area;
+#ifdef CONFIG_X86_64
+ struct hlist_head head_tramps;
+#endif
};
+typedef int (*uprobe_write_verify_t)(struct page *page, unsigned long vaddr,
+ uprobe_opcode_t *insn, int nbytes, void *data);
+
extern void __init uprobes_init(void);
-extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
-extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
+extern int set_swbp(struct arch_uprobe *aup, struct vm_area_struct *vma, unsigned long vaddr);
+extern int set_orig_insn(struct arch_uprobe *aup, struct vm_area_struct *vma, unsigned long vaddr);
extern bool is_swbp_insn(uprobe_opcode_t *insn);
extern bool is_trap_insn(uprobe_opcode_t *insn);
extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
-extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
-extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
-extern int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc);
-extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
-extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
+extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct vm_area_struct *vma, unsigned long vaddr, uprobe_opcode_t,
+ bool is_register);
+extern int uprobe_write(struct arch_uprobe *auprobe, struct vm_area_struct *vma, const unsigned long opcode_vaddr,
+ uprobe_opcode_t *insn, int nbytes, uprobe_write_verify_t verify, bool is_register, bool do_update_ref_ctr,
+ void *data);
+extern struct uprobe *uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc);
+extern int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool);
+extern void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc);
+extern void uprobe_unregister_sync(void);
extern int uprobe_mmap(struct vm_area_struct *vma);
extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void uprobe_start_dup_mmap(void);
extern void uprobe_end_dup_mmap(void);
extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
extern void uprobe_free_utask(struct task_struct *t);
-extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
+extern void uprobe_copy_process(struct task_struct *t, u64 flags);
extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
extern void uprobe_notify_resume(struct pt_regs *regs);
@@ -138,6 +234,14 @@ extern bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check c
extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len);
+extern void uprobe_handle_trampoline(struct pt_regs *regs);
+extern void *arch_uretprobe_trampoline(unsigned long *psize);
+extern unsigned long uprobe_get_trampoline_vaddr(void);
+extern void uprobe_copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len);
+extern void arch_uprobe_clear_state(struct mm_struct *mm);
+extern void arch_uprobe_init_state(struct mm_struct *mm);
+extern void handle_syscall_uprobe(struct pt_regs *regs, unsigned long bp_vaddr);
+extern void arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long vaddr);
#else /* !CONFIG_UPROBES */
struct uprobes_state {
};
@@ -148,22 +252,21 @@ static inline void uprobes_init(void)
#define uprobe_get_trap_addr(regs) instruction_pointer(regs)
-static inline int
-uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
-{
- return -ENOSYS;
-}
-static inline int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc)
+static inline struct uprobe *
+uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc)
{
- return -ENOSYS;
+ return ERR_PTR(-ENOSYS);
}
static inline int
-uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add)
+uprobe_apply(struct uprobe* uprobe, struct uprobe_consumer *uc, bool add)
{
return -ENOSYS;
}
static inline void
-uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
+uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc)
+{
+}
+static inline void uprobe_unregister_sync(void)
{
}
static inline int uprobe_mmap(struct vm_area_struct *vma)
@@ -194,7 +297,7 @@ static inline bool uprobe_deny_signal(void)
static inline void uprobe_free_utask(struct task_struct *t)
{
}
-static inline void uprobe_copy_process(struct task_struct *t, unsigned long flags)
+static inline void uprobe_copy_process(struct task_struct *t, u64 flags)
{
}
static inline void uprobe_clear_state(struct mm_struct *mm)
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 1913a13833f2..e85105939af8 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -51,6 +51,7 @@ struct ep_device;
* @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder
* @ss_ep_comp: SuperSpeed companion descriptor for this endpoint
* @ssp_isoc_ep_comp: SuperSpeedPlus isoc companion descriptor for this endpoint
+ * @eusb2_isoc_ep_comp: eUSB2 isoc companion descriptor for this endpoint
* @urb_list: urbs queued to this endpoint; maintained by usbcore
* @hcpriv: for use by HCD; typically holds hardware dma queue head (QH)
* with one or more transfer descriptors (TDs) per urb
@@ -64,9 +65,10 @@ struct ep_device;
* descriptor within an active interface in a given USB configuration.
*/
struct usb_host_endpoint {
- struct usb_endpoint_descriptor desc;
- struct usb_ss_ep_comp_descriptor ss_ep_comp;
- struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp;
+ struct usb_endpoint_descriptor desc;
+ struct usb_ss_ep_comp_descriptor ss_ep_comp;
+ struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp;
+ struct usb_eusb2_isoc_ep_comp_descriptor eusb2_isoc_ep_comp;
struct list_head urb_list;
void *hcpriv;
struct ep_device *ep_dev; /* For sysfs info */
@@ -495,6 +497,12 @@ struct usb_dev_state;
struct usb_tt;
+enum usb_link_tunnel_mode {
+ USB_LINK_UNKNOWN = 0,
+ USB_LINK_NATIVE,
+ USB_LINK_TUNNELED,
+};
+
enum usb_port_connect_type {
USB_PORT_CONNECT_TYPE_UNKNOWN = 0,
USB_PORT_CONNECT_TYPE_HOT_PLUG,
@@ -605,6 +613,8 @@ struct usb3_lpm_parameters {
* WUSB devices are not, until we authorize them from user space.
* FIXME -- complete doc
* @authenticated: Crypto authentication passed
+ * @tunnel_mode: Connection native or tunneled over USB4
+ * @usb4_link: device link to the USB4 host interface
* @lpm_capable: device supports LPM
* @lpm_devinit_allow: Allow USB3 device initiated LPM, exit latency is in range
* @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
@@ -626,6 +636,8 @@ struct usb3_lpm_parameters {
* @do_remote_wakeup: remote wakeup should be enabled
* @reset_resume: needs reset instead of resume
* @port_is_suspended: the upstream port is suspended (L2 or U3)
+ * @offload_at_suspend: offload activities during suspend is enabled.
+ * @offload_usage: number of offload activities happening on this usb device.
* @slot_id: Slot ID assigned by xHCI
* @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout.
* @u1_params: exit latencies for USB3 U1 LPM state, and hub-initiated timeout.
@@ -714,6 +726,10 @@ struct usb_device {
unsigned do_remote_wakeup:1;
unsigned reset_resume:1;
unsigned port_is_suspended:1;
+ unsigned offload_at_suspend:1;
+ int offload_usage;
+ enum usb_link_tunnel_mode tunnel_mode;
+ struct device_link *usb4_link;
int slot_id;
struct usb2_lpm_parameters l1_params;
@@ -805,10 +821,10 @@ static inline void usb_mark_last_busy(struct usb_device *udev)
#else
-static inline int usb_enable_autosuspend(struct usb_device *udev)
-{ return 0; }
-static inline int usb_disable_autosuspend(struct usb_device *udev)
-{ return 0; }
+static inline void usb_enable_autosuspend(struct usb_device *udev)
+{ }
+static inline void usb_disable_autosuspend(struct usb_device *udev)
+{ }
static inline int usb_autopm_get_interface(struct usb_interface *intf)
{ return 0; }
@@ -829,6 +845,20 @@ static inline void usb_mark_last_busy(struct usb_device *udev)
{ }
#endif
+#if IS_ENABLED(CONFIG_USB_XHCI_SIDEBAND)
+int usb_offload_get(struct usb_device *udev);
+int usb_offload_put(struct usb_device *udev);
+bool usb_offload_check(struct usb_device *udev);
+#else
+
+static inline int usb_offload_get(struct usb_device *udev)
+{ return 0; }
+static inline int usb_offload_put(struct usb_device *udev)
+{ return 0; }
+static inline bool usb_offload_check(struct usb_device *udev)
+{ return false; }
+#endif
+
extern int usb_disable_lpm(struct usb_device *udev);
extern void usb_enable_lpm(struct usb_device *udev);
/* Same as above, but these functions lock/unlock the bandwidth_mutex. */
@@ -1121,8 +1151,8 @@ static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size)
/* ----------------------------------------------------------------------- */
/* Stuff for dynamic usb ids */
+extern struct mutex usb_dynids_lock;
struct usb_dynids {
- spinlock_t lock;
struct list_head list;
};
@@ -1171,6 +1201,7 @@ extern ssize_t usb_show_dynids(struct usb_dynids *dynids, char *buf);
* post_reset method is called.
* @post_reset: Called by usb_reset_device() after the device
* has been reset
+ * @shutdown: Called at shut-down time to quiesce the device.
* @id_table: USB drivers use ID table to support hotplugging.
* Export this with MODULE_DEVICE_TABLE(usb,...). This must be set
* or your driver's probe function will never get called.
@@ -1222,6 +1253,8 @@ struct usb_driver {
int (*pre_reset)(struct usb_interface *intf);
int (*post_reset)(struct usb_interface *intf);
+ void (*shutdown)(struct usb_interface *intf);
+
const struct usb_device_id *id_table;
const struct attribute_group **dev_groups;
@@ -1232,7 +1265,7 @@ struct usb_driver {
unsigned int disable_hub_initiated_lpm:1;
unsigned int soft_unbind:1;
};
-#define to_usb_driver(d) container_of(d, struct usb_driver, driver)
+#define to_usb_driver(d) container_of_const(d, struct usb_driver, driver)
/**
* struct usb_device_driver - identifies USB device driver to usbcore
@@ -1283,8 +1316,7 @@ struct usb_device_driver {
unsigned int supports_autosuspend:1;
unsigned int generic_subclass:1;
};
-#define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \
- driver)
+#define to_usb_device_driver(d) container_of_const(d, struct usb_device_driver, driver)
/**
* struct usb_class_driver - identifies a USB driver that wants to use the USB major number
@@ -1441,6 +1473,10 @@ typedef void (*usb_complete_t)(struct urb *);
* @sg: scatter gather buffer list, the buffer size of each element in
* the list (except the last) must be divisible by the endpoint's
* max packet size if no_sg_constraint isn't set in 'struct usb_bus'
+ * @sgt: used to hold a scatter gather table returned by usb_alloc_noncoherent(),
+ * which describes the allocated non-coherent and possibly non-contiguous
+ * memory and is guaranteed to have 1 single DMA mapped segment. The
+ * allocated memory needs to be freed by usb_free_noncoherent().
* @num_mapped_sgs: (internal) number of mapped sg entries
* @num_sgs: number of entries in the sg list
* @transfer_buffer_length: How big is transfer_buffer. The transfer may
@@ -1607,6 +1643,7 @@ struct urb {
void *transfer_buffer; /* (in) associated data buffer */
dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */
struct scatterlist *sg; /* (in) scatter gather buffer list */
+ struct sg_table *sgt; /* (in) scatter gather table for noncoherent buffer */
int num_mapped_sgs; /* (internal) mapped sg entries */
int num_sgs; /* (in) number of entries in the sg list */
u32 transfer_buffer_length; /* (in) data buffer length */
@@ -1768,7 +1805,6 @@ extern void usb_block_urb(struct urb *urb);
extern void usb_kill_anchored_urbs(struct usb_anchor *anchor);
extern void usb_poison_anchored_urbs(struct usb_anchor *anchor);
extern void usb_unpoison_anchored_urbs(struct usb_anchor *anchor);
-extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor);
extern void usb_anchor_suspend_wakeups(struct usb_anchor *anchor);
extern void usb_anchor_resume_wakeups(struct usb_anchor *anchor);
extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor);
@@ -1813,6 +1849,16 @@ void *usb_alloc_coherent(struct usb_device *dev, size_t size,
void usb_free_coherent(struct usb_device *dev, size_t size,
void *addr, dma_addr_t dma);
+enum dma_data_direction;
+
+void *usb_alloc_noncoherent(struct usb_device *dev, size_t size,
+ gfp_t mem_flags, dma_addr_t *dma,
+ enum dma_data_direction dir,
+ struct sg_table **table);
+void usb_free_noncoherent(struct usb_device *dev, size_t size,
+ void *addr, enum dma_data_direction dir,
+ struct sg_table *table);
+
/*-------------------------------------------------------------------*
* SYNCHRONOUS CALL SUPPORT *
*-------------------------------------------------------------------*/
@@ -2011,6 +2057,12 @@ static inline u16 usb_maxpacket(struct usb_device *udev, int pipe)
return usb_endpoint_maxp(&ep->desc);
}
+u32 usb_endpoint_max_periodic_payload(struct usb_device *udev,
+ const struct usb_host_endpoint *ep);
+
+bool usb_endpoint_is_hs_isoc_double(struct usb_device *udev,
+ const struct usb_host_endpoint *ep);
+
/* translate USB error codes to codes user space understands */
static inline int usb_translate_errors(int error_code)
{
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 2d207cb4837d..4ac082a63173 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -119,6 +119,7 @@ struct cdc_ncm_ctx {
u32 timer_interval;
u32 max_ndp_size;
u8 is_ndp16;
+ u8 filtering_supported;
union {
struct usb_cdc_ncm_ndp16 *delayed_ndp16;
struct usb_cdc_ncm_ndp32 *delayed_ndp32;
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 5a7f96684ea2..c6451191d2de 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -65,12 +65,15 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_PHY_VBUS_CONTROL BIT(16)
#define CI_HDRC_HAS_PORTSC_PEC_MISSED BIT(17)
#define CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS BIT(18)
+#define CI_HDRC_HAS_SHORT_PKT_LIMIT BIT(19)
+#define CI_HDRC_OUT_BAND_WAKEUP BIT(20)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
#define CI_HDRC_IMX_HSIC_ACTIVE_EVENT 2
#define CI_HDRC_IMX_HSIC_SUSPEND_EVENT 3
#define CI_HDRC_CONTROLLER_VBUS_EVENT 4
+#define CI_HDRC_CONTROLLER_PULLUP_EVENT 5
int (*notify_event) (struct ci_hdrc *ci, unsigned event);
struct regulator *reg_vbus;
struct usb_otg_caps ci_otg_caps;
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index af3cd2aae4bc..c18041fafa52 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -237,7 +237,7 @@ struct usb_function {
/* internals */
struct list_head list;
DECLARE_BITMAP(endpoints, 32);
- const struct usb_function_instance *fi;
+ struct usb_function_instance *fi;
unsigned int bind_deactivated:1;
};
@@ -256,7 +256,7 @@ int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f,
struct usb_ep *_ep);
int usb_func_wakeup(struct usb_function *func);
-#define MAX_CONFIG_INTERFACES 16 /* arbitrary; max 255 */
+#define MAX_CONFIG_INTERFACES 32
/**
* struct usb_configuration - represents one gadget configuration
@@ -339,9 +339,6 @@ int usb_add_config(struct usb_composite_dev *,
struct usb_configuration *,
int (*)(struct usb_configuration *));
-void usb_remove_config(struct usb_composite_dev *,
- struct usb_configuration *);
-
/* predefined index for usb_composite_driver */
enum {
USB_GADGET_MANUFACTURER_IDX = 0,
diff --git a/include/linux/usb/func_utils.h b/include/linux/usb/func_utils.h
new file mode 100644
index 000000000000..c8795c965109
--- /dev/null
+++ b/include/linux/usb/func_utils.h
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * func_utils.h
+ *
+ * Utility definitions for USB functions
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
+ */
+
+#ifndef _FUNC_UTILS_H_
+#define _FUNC_UTILS_H_
+
+#include <linux/usb/gadget.h>
+#include <linux/overflow.h>
+
+/* Variable Length Array Macros **********************************************/
+#define vla_group(groupname) size_t groupname##__next = 0
+#define vla_group_size(groupname) groupname##__next
+
+#define vla_item(groupname, type, name, n) \
+ size_t groupname##_##name##__offset = ({ \
+ size_t offset = 0; \
+ if (groupname##__next != SIZE_MAX) { \
+ size_t align_mask = __alignof__(type) - 1; \
+ size_t size = array_size(n, sizeof(type)); \
+ offset = (groupname##__next + align_mask) & \
+ ~align_mask; \
+ if (check_add_overflow(offset, size, \
+ &groupname##__next)) { \
+ groupname##__next = SIZE_MAX; \
+ offset = 0; \
+ } \
+ } \
+ offset; \
+ })
+
+#define vla_item_with_sz(groupname, type, name, n) \
+ size_t groupname##_##name##__sz = array_size(n, sizeof(type)); \
+ size_t groupname##_##name##__offset = ({ \
+ size_t offset = 0; \
+ if (groupname##__next != SIZE_MAX) { \
+ size_t align_mask = __alignof__(type) - 1; \
+ offset = (groupname##__next + align_mask) & \
+ ~align_mask; \
+ if (check_add_overflow(offset, groupname##_##name##__sz,\
+ &groupname##__next)) { \
+ groupname##__next = SIZE_MAX; \
+ offset = 0; \
+ } \
+ } \
+ offset; \
+ })
+
+#define vla_ptr(ptr, groupname, name) \
+ ((void *) ((char *)ptr + groupname##_##name##__offset))
+
+struct usb_ep;
+struct usb_request;
+
+/**
+ * alloc_ep_req - returns a usb_request allocated by the gadget driver and
+ * allocates the request's buffer.
+ *
+ * @ep: the endpoint to allocate a usb_request
+ * @len: usb_requests's buffer suggested size
+ *
+ * In case @ep direction is OUT, the @len will be aligned to ep's
+ * wMaxPacketSize. In order to avoid memory leaks or drops, *always* use
+ * usb_requests's length (req->length) to refer to the allocated buffer size.
+ * Requests allocated via alloc_ep_req() *must* be freed by free_ep_req().
+ */
+struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len);
+
+/* Frees a usb_request previously allocated by alloc_ep_req() */
+static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
+{
+ WARN_ON(req->buf == NULL);
+ kfree(req->buf);
+ req->buf = NULL;
+ usb_ep_free_request(ep, req);
+}
+
+#endif /* _FUNC_UTILS_H_ */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 56dda8e1562d..8285b19a25e0 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -15,6 +15,7 @@
#ifndef __LINUX_USB_GADGET_H
#define __LINUX_USB_GADGET_H
+#include <linux/cleanup.h>
#include <linux/configfs.h>
#include <linux/device.h>
#include <linux/errno.h>
@@ -32,6 +33,7 @@ struct usb_ep;
/**
* struct usb_request - describes one i/o request
+ * @ep: The associated endpoint set by usb_ep_alloc_request().
* @buf: Buffer used for data. Always provide this; some controllers
* only use PIO, or don't use DMA for some endpoints.
* @dma: DMA address corresponding to 'buf'. If you don't set this
@@ -98,6 +100,7 @@ struct usb_ep;
*/
struct usb_request {
+ struct usb_ep *ep;
void *buf;
unsigned length;
dma_addr_t dma;
@@ -229,18 +232,18 @@ struct usb_ep {
const char *name;
const struct usb_ep_ops *ops;
+ const struct usb_endpoint_descriptor *desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
struct list_head ep_list;
struct usb_ep_caps caps;
bool claimed;
bool enabled;
- unsigned maxpacket:16;
- unsigned maxpacket_limit:16;
- unsigned max_streams:16;
unsigned mult:2;
unsigned maxburst:5;
u8 address;
- const struct usb_endpoint_descriptor *desc;
- const struct usb_ss_ep_comp_descriptor *comp_desc;
+ u16 maxpacket;
+ u16 maxpacket_limit;
+ u16 max_streams;
};
/*-------------------------------------------------------------------------*/
@@ -291,6 +294,28 @@ static inline void usb_ep_fifo_flush(struct usb_ep *ep)
/*-------------------------------------------------------------------------*/
+/**
+ * free_usb_request - frees a usb_request object and its buffer
+ * @req: the request being freed
+ *
+ * This helper function frees both the request's buffer and the request object
+ * itself by calling usb_ep_free_request(). Its signature is designed to be used
+ * with DEFINE_FREE() to enable automatic, scope-based cleanup for usb_request
+ * pointers.
+ */
+static inline void free_usb_request(struct usb_request *req)
+{
+ if (!req)
+ return;
+
+ kfree(req->buf);
+ usb_ep_free_request(req->ep, req);
+}
+
+DEFINE_FREE(free_usb_request, struct usb_request *, free_usb_request(_T))
+
+/*-------------------------------------------------------------------------*/
+
struct usb_dcd_config_params {
__u8 bU1devExitLat; /* U1 Device exit Latency */
#define USB_DEFAULT_U1_DEV_EXIT_LAT 0x01 /* Less then 1 microsec */
@@ -351,6 +376,9 @@ struct usb_gadget_ops {
* can handle. The UDC must support this and all slower speeds and lower
* number of lanes.
* @state: the state we are now (attached, suspended, configured, etc)
+ * @state_lock: Spinlock protecting the `state` and `teardown` members.
+ * @teardown: True if the device is undergoing teardown, used to prevent
+ * new work from being scheduled during cleanup.
* @name: Identifies the controller hardware type. Used in diagnostics
* and sometimes configuration.
* @dev: Driver model state for this abstract device.
@@ -426,6 +454,8 @@ struct usb_gadget {
enum usb_ssp_rate max_ssp_rate;
enum usb_device_state state;
+ spinlock_t state_lock;
+ bool teardown;
const char *name;
struct device dev;
unsigned isoch_delay;
@@ -860,10 +890,6 @@ container_of(str_item, struct gadget_string, item)
int usb_descriptor_fillbuf(void *, unsigned,
const struct usb_descriptor_header **);
-/* build config descriptor from single descriptor vector */
-int usb_gadget_config_buf(const struct usb_config_descriptor *config,
- void *buf, unsigned buflen, const struct usb_descriptor_header **desc);
-
/* copy a NULL-terminated vector of descriptors */
struct usb_descriptor_header **usb_copy_descriptors(
struct usb_descriptor_header **);
diff --git a/include/linux/usb/gadget_configfs.h b/include/linux/usb/gadget_configfs.h
index d61aebd68128..6b5d6838f865 100644
--- a/include/linux/usb/gadget_configfs.h
+++ b/include/linux/usb/gadget_configfs.h
@@ -4,9 +4,6 @@
#include <linux/configfs.h>
-int check_user_usb_string(const char *name,
- struct usb_gadget_strings *stringtab_dev);
-
#define GS_STRINGS_W(__struct, __name) \
static ssize_t __struct##_##__name##_store(struct config_item *item, \
const char *page, size_t len) \
@@ -37,7 +34,7 @@ static struct configfs_item_operations struct_in##_langid_item_ops = { \
.release = struct_in##_attr_release, \
}; \
\
-static struct config_item_type struct_in##_langid_type = { \
+static const struct config_item_type struct_in##_langid_type = { \
.ct_item_ops = &struct_in##_langid_item_ops, \
.ct_attrs = struct_in##_langid_attrs, \
.ct_owner = THIS_MODULE, \
@@ -94,7 +91,7 @@ static struct configfs_group_operations struct_in##_strings_ops = { \
.drop_item = &struct_in##_strings_drop, \
}; \
\
-static struct config_item_type struct_in##_strings_type = { \
+static const struct config_item_type struct_in##_strings_type = { \
.ct_group_ops = &struct_in##_strings_ops, \
.ct_owner = THIS_MODULE, \
}
diff --git a/include/linux/usb/mctp-usb.h b/include/linux/usb/mctp-usb.h
new file mode 100644
index 000000000000..a2f6f1e04efb
--- /dev/null
+++ b/include/linux/usb/mctp-usb.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mctp-usb.h - MCTP USB transport binding: common definitions,
+ * based on DMTF0283 specification:
+ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0283_1.0.1.pdf
+ *
+ * These are protocol-level definitions, that may be shared between host
+ * and gadget drivers.
+ *
+ * Copyright (C) 2024-2025 Code Construct Pty Ltd
+ */
+
+#ifndef __LINUX_USB_MCTP_USB_H
+#define __LINUX_USB_MCTP_USB_H
+
+#include <linux/types.h>
+
+struct mctp_usb_hdr {
+ __be16 id;
+ u8 rsvd;
+ u8 len;
+} __packed;
+
+#define MCTP_USB_XFER_SIZE 512
+#define MCTP_USB_BTU 68
+#define MCTP_USB_MTU_MIN MCTP_USB_BTU
+#define MCTP_USB_MTU_MAX (U8_MAX - sizeof(struct mctp_usb_hdr))
+#define MCTP_USB_DMTF_ID 0x1ab4
+
+#endif /* __LINUX_USB_MCTP_USB_H */
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 3963e55e88a3..fbdef950f06c 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -61,7 +61,7 @@ struct musb_hdrc_eps_bits {
};
struct musb_hdrc_config {
- struct musb_fifo_cfg *fifo_cfg; /* board fifo configuration */
+ const struct musb_fifo_cfg *fifo_cfg; /* board fifo configuration */
unsigned fifo_cfg_size; /* size of the fifo configuration */
/* MUSB configuration-specific details */
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index d50098fb16b5..6ccd1b2af993 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -6,6 +6,7 @@
#ifndef __LINUX_USB_PD_H
#define __LINUX_USB_PD_H
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/usb/typec.h>
@@ -33,7 +34,9 @@ enum pd_ctrl_msg_type {
PD_CTRL_FR_SWAP = 19,
PD_CTRL_GET_PPS_STATUS = 20,
PD_CTRL_GET_COUNTRY_CODES = 21,
- /* 22-31 Reserved */
+ /* 22-23 Reserved */
+ PD_CTRL_GET_REVISION = 24,
+ /* 25-31 Reserved */
};
enum pd_data_msg_type {
@@ -46,7 +49,9 @@ enum pd_data_msg_type {
PD_DATA_ALERT = 6,
PD_DATA_GET_COUNTRY_INFO = 7,
PD_DATA_ENTER_USB = 8,
- /* 9-14 Reserved */
+ /* 9-11 Reserved */
+ PD_DATA_REVISION = 12,
+ /* 13-14 Reserved */
PD_DATA_VENDOR_DEF = 15,
/* 16-31 Reserved */
};
@@ -267,9 +272,11 @@ enum pd_pdo_type {
enum pd_apdo_type {
APDO_TYPE_PPS = 0,
+ APDO_TYPE_EPR_AVS = 1,
+ APDO_TYPE_SPR_AVS = 2,
};
-#define PDO_APDO_TYPE_SHIFT 28 /* Only valid value currently is 0x0 - PPS */
+#define PDO_APDO_TYPE_SHIFT 28
#define PDO_APDO_TYPE_MASK 0x3
#define PDO_APDO_TYPE(t) ((t) << PDO_APDO_TYPE_SHIFT)
@@ -293,6 +300,35 @@ enum pd_apdo_type {
PDO_PPS_APDO_MIN_VOLT(min_mv) | PDO_PPS_APDO_MAX_VOLT(max_mv) | \
PDO_PPS_APDO_MAX_CURR(max_ma))
+/*
+ * Applicable only to EPR AVS APDO source cap as per
+ * Table 6.15 EPR Adjustable Voltage Supply APDO – Source
+ */
+#define PDO_EPR_AVS_APDO_PEAK_CURRENT GENMASK(27, 26)
+
+/*
+ * Applicable to both EPR AVS APDO source and sink cap as per
+ * Table 6.15 EPR Adjustable Voltage Supply APDO – Source
+ * Table 6.22 EPR Adjustable Voltage Supply APDO – Sink
+ */
+#define PDO_EPR_AVS_APDO_MAX_VOLT GENMASK(25, 17) /* 100mV unit */
+#define PDO_EPR_AVS_APDO_MIN_VOLT GENMASK(15, 8) /* 100mV unit */
+#define PDO_EPR_AVS_APDO_PDP GENMASK(7, 0) /* 1W unit */
+
+/*
+ * Applicable only SPR AVS APDO source cap as per
+ * Table 6.14 SPR Adjustable Voltage Supply APDO – Source
+ */
+#define PDO_SPR_AVS_APDO_PEAK_CURRENT GENMASK(27, 26)
+
+/*
+ * Applicable to both SPR AVS APDO source and sink cap as per
+ * Table 6.14 SPR Adjustable Voltage Supply APDO – Source
+ * Table 6.21 SPR Adjustable Voltage Supply APDO – Sink
+ */
+#define PDO_SPR_AVS_APDO_9V_TO_15V_MAX_CURR GENMASK(19, 10) /* 10mA unit */
+#define PDO_SPR_AVS_APDO_15V_TO_20V_MAX_CURR GENMASK(9, 0) /* 10mA unit */
+
static inline enum pd_pdo_type pdo_type(u32 pdo)
{
return (pdo >> PDO_TYPE_SHIFT) & PDO_TYPE_MASK;
@@ -346,6 +382,41 @@ static inline unsigned int pdo_pps_apdo_max_current(u32 pdo)
PDO_PPS_APDO_CURR_MASK) * 50;
}
+static inline unsigned int pdo_epr_avs_apdo_src_peak_current(u32 pdo)
+{
+ return FIELD_GET(PDO_EPR_AVS_APDO_PEAK_CURRENT, pdo);
+}
+
+static inline unsigned int pdo_epr_avs_apdo_min_voltage_mv(u32 pdo)
+{
+ return FIELD_GET(PDO_EPR_AVS_APDO_MIN_VOLT, pdo) * 100;
+}
+
+static inline unsigned int pdo_epr_avs_apdo_max_voltage_mv(u32 pdo)
+{
+ return FIELD_GET(PDO_EPR_AVS_APDO_MIN_VOLT, pdo) * 100;
+}
+
+static inline unsigned int pdo_epr_avs_apdo_pdp_w(u32 pdo)
+{
+ return FIELD_GET(PDO_EPR_AVS_APDO_PDP, pdo);
+}
+
+static inline unsigned int pdo_spr_avs_apdo_src_peak_current(u32 pdo)
+{
+ return FIELD_GET(PDO_SPR_AVS_APDO_PEAK_CURRENT, pdo);
+}
+
+static inline unsigned int pdo_spr_avs_apdo_9v_to_15v_max_current_ma(u32 pdo)
+{
+ return FIELD_GET(PDO_SPR_AVS_APDO_9V_TO_15V_MAX_CURR, pdo) * 10;
+}
+
+static inline unsigned int pdo_spr_avs_apdo_15v_to_20v_max_current_ma(u32 pdo)
+{
+ return FIELD_GET(PDO_SPR_AVS_APDO_15V_TO_20V_MAX_CURR, pdo) * 10;
+}
+
/* RDO: Request Data Object */
#define RDO_OBJ_POS_SHIFT 28
#define RDO_OBJ_POS_MASK 0x7
@@ -453,6 +524,20 @@ static inline unsigned int rdo_max_power(u32 rdo)
#define EUDO_TBT_SUPPORT BIT(14)
#define EUDO_HOST_PRESENT BIT(13)
+/*
+ * Request Message Data Object (PD Revision 3.1+ only)
+ * --------
+ * <31:28> :: Revision Major
+ * <27:24> :: Revision Minor
+ * <23:20> :: Version Major
+ * <19:16> :: Version Minor
+ * <15:0> :: Reserved, Shall be set to zero
+ */
+
+#define RMDO(rev_maj, rev_min, ver_maj, ver_min) \
+ (((rev_maj) & 0xf) << 28 | ((rev_min) & 0xf) << 24 | \
+ ((ver_maj) & 0xf) << 20 | ((ver_min) & 0xf) << 16)
+
/* USB PD timers and counters */
#define PD_T_NO_RESPONSE 5000 /* 4.5 - 5.5 seconds */
#define PD_T_DB_DETECT 10000 /* 10 - 15 seconds */
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index e4de6bc1f69b..0fa9885a1038 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -223,7 +223,6 @@ extern struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
extern struct usb_phy *devm_usb_get_phy_by_node(struct device *dev,
struct device_node *node, struct notifier_block *nb);
extern void usb_put_phy(struct usb_phy *);
-extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x);
extern void usb_phy_set_event(struct usb_phy *x, unsigned long event);
extern void usb_phy_set_charger_current(struct usb_phy *usb_phy,
unsigned int mA);
@@ -259,10 +258,6 @@ static inline void usb_put_phy(struct usb_phy *x)
{
}
-static inline void devm_usb_put_phy(struct device *dev, struct usb_phy *x)
-{
-}
-
static inline void usb_phy_set_event(struct usb_phy *x, unsigned long event)
{
}
diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h
index 33a4c146dc19..2ca60828f28b 100644
--- a/include/linux/usb/r8152.h
+++ b/include/linux/usb/r8152.h
@@ -30,6 +30,7 @@
#define VENDOR_ID_NVIDIA 0x0955
#define VENDOR_ID_TPLINK 0x2357
#define VENDOR_ID_DLINK 0x2001
+#define VENDOR_ID_DELL 0x413c
#define VENDOR_ID_ASUS 0x0b05
#if IS_REACHABLE(CONFIG_USB_RTL8152)
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 1a0a4dc87980..75b2b763f1ba 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -311,8 +311,11 @@ struct usb_serial_driver {
#define to_usb_serial_driver(d) \
container_of(d, struct usb_serial_driver, driver)
-int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
- const char *name, const struct usb_device_id *id_table);
+#define usb_serial_register_drivers(serial_drivers, name, id_table) \
+ __usb_serial_register_drivers(serial_drivers, THIS_MODULE, name, id_table)
+int __usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
+ struct module *owner, const char *name,
+ const struct usb_device_id *id_table);
void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]);
void usb_serial_port_softint(struct usb_serial_port *port);
diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h
index 2827ce72e502..51be3bb8fccb 100644
--- a/include/linux/usb/storage.h
+++ b/include/linux/usb/storage.h
@@ -53,7 +53,7 @@ struct bulk_cb_wrap {
__le32 Signature; /* contains 'USBC' */
__u32 Tag; /* unique per command id */
__le32 DataTransferLength; /* size of data */
- __u8 Flags; /* direction in bit 0 */
+ __u8 Flags; /* direction in bit 7 */
__u8 Lun; /* LUN normally 0 */
__u8 Length; /* length of the CDB */
__u8 CDB[16]; /* max command */
@@ -82,4 +82,12 @@ struct bulk_cs_wrap {
#define US_BULK_RESET_REQUEST 0xff
#define US_BULK_GET_MAX_LUN 0xfe
+/*
+ * If 4 LUNs are supported then the LUNs would be
+ * numbered from 0 to 3, and the return value for
+ * US_BULK_GET_MAX_LUN request would be 3. The valid
+ * LUN field is 4 bits wide, the upper limit is 0x0f.
+ */
+#define US_BULK_MAX_LUN_LIMIT 0x0f
+
#endif
diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h
index 47a86b8a4a50..f7f5cfbdef12 100644
--- a/include/linux/usb/tcpci.h
+++ b/include/linux/usb/tcpci.h
@@ -47,6 +47,9 @@
#define TCPC_SINK_FAST_ROLE_SWAP BIT(0)
#define TCPC_CONFIG_STD_OUTPUT 0x18
+#define TCPC_CONFIG_STD_OUTPUT_ORIENTATION_MASK BIT(0)
+#define TCPC_CONFIG_STD_OUTPUT_ORIENTATION_NORMAL 0
+#define TCPC_CONFIG_STD_OUTPUT_ORIENTATION_FLIPPED 1
#define TCPC_TCPC_CTRL 0x19
#define TCPC_TCPC_CTRL_ORIENTATION BIT(0)
@@ -60,15 +63,12 @@
#define TCPC_ROLE_CTRL 0x1a
#define TCPC_ROLE_CTRL_DRP BIT(6)
-#define TCPC_ROLE_CTRL_RP_VAL_SHIFT 4
-#define TCPC_ROLE_CTRL_RP_VAL_MASK 0x3
+#define TCPC_ROLE_CTRL_RP_VAL GENMASK(5, 4)
#define TCPC_ROLE_CTRL_RP_VAL_DEF 0x0
#define TCPC_ROLE_CTRL_RP_VAL_1_5 0x1
#define TCPC_ROLE_CTRL_RP_VAL_3_0 0x2
-#define TCPC_ROLE_CTRL_CC2_SHIFT 2
-#define TCPC_ROLE_CTRL_CC2_MASK 0x3
-#define TCPC_ROLE_CTRL_CC1_SHIFT 0
-#define TCPC_ROLE_CTRL_CC1_MASK 0x3
+#define TCPC_ROLE_CTRL_CC2 GENMASK(3, 2)
+#define TCPC_ROLE_CTRL_CC1 GENMASK(1, 0)
#define TCPC_ROLE_CTRL_CC_RA 0x0
#define TCPC_ROLE_CTRL_CC_RP 0x1
#define TCPC_ROLE_CTRL_CC_RD 0x2
@@ -89,11 +89,9 @@
#define TCPC_CC_STATUS_TERM BIT(4)
#define TCPC_CC_STATUS_TERM_RP 0
#define TCPC_CC_STATUS_TERM_RD 1
+#define TCPC_CC_STATUS_CC2 GENMASK(3, 2)
+#define TCPC_CC_STATUS_CC1 GENMASK(1, 0)
#define TCPC_CC_STATE_SRC_OPEN 0
-#define TCPC_CC_STATUS_CC2_SHIFT 2
-#define TCPC_CC_STATUS_CC2_MASK 0x3
-#define TCPC_CC_STATUS_CC1_SHIFT 0
-#define TCPC_CC_STATUS_CC1_MASK 0x3
#define TCPC_POWER_STATUS 0x1e
#define TCPC_POWER_STATUS_DBG_ACC_CON BIT(7)
@@ -127,12 +125,12 @@
#define TCPC_DEV_CAP_2 0x26
#define TCPC_STD_INPUT_CAP 0x28
#define TCPC_STD_OUTPUT_CAP 0x29
+#define TCPC_STD_OUTPUT_CAP_ORIENTATION BIT(0)
#define TCPC_MSG_HDR_INFO 0x2e
#define TCPC_MSG_HDR_INFO_DATA_ROLE BIT(3)
+#define TCPC_MSG_HDR_INFO_REV GENMASK(2, 1)
#define TCPC_MSG_HDR_INFO_PWR_ROLE BIT(0)
-#define TCPC_MSG_HDR_INFO_REV_SHIFT 1
-#define TCPC_MSG_HDR_INFO_REV_MASK 0x3
#define TCPC_RX_DETECT 0x2f
#define TCPC_RX_DETECT_HARD_RESET BIT(5)
@@ -150,10 +148,8 @@
#define TCPC_RX_DATA 0x34 /* through 0x4f */
#define TCPC_TRANSMIT 0x50
-#define TCPC_TRANSMIT_RETRY_SHIFT 4
-#define TCPC_TRANSMIT_RETRY_MASK 0x3
-#define TCPC_TRANSMIT_TYPE_SHIFT 0
-#define TCPC_TRANSMIT_TYPE_MASK 0x7
+#define TCPC_TRANSMIT_RETRY GENMASK(5, 4)
+#define TCPC_TRANSMIT_TYPE GENMASK(2, 0)
#define TCPC_TX_BYTE_CNT 0x51
#define TCPC_TX_HDR 0x52
@@ -174,8 +170,7 @@
#define tcpc_presenting_rd(reg, cc) \
(!(TCPC_ROLE_CTRL_DRP & (reg)) && \
- (((reg) & (TCPC_ROLE_CTRL_## cc ##_MASK << TCPC_ROLE_CTRL_## cc ##_SHIFT)) == \
- (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_## cc ##_SHIFT)))
+ FIELD_GET(TCPC_ROLE_CTRL_## cc, reg) == TCPC_ROLE_CTRL_CC_RD)
struct tcpci;
@@ -186,7 +181,7 @@ struct tcpci;
* Optional; Callback to perform chip specific operations when FRS
* is sourcing vbus.
* @auto_discharge_disconnect:
- * Optional; Enables TCPC to autonously discharge vbus on disconnect.
+ * Optional; Enables TCPC to autonomously discharge vbus on disconnect.
* @vbus_vsafe0v:
* optional; Set when TCPC can detect whether vbus is at VSAFE0V.
* @set_partner_usb_comm_capable:
@@ -209,6 +204,9 @@ struct tcpci;
* swap following Discover Identity on SOP' occurs.
* Return true when the TCPM is allowed to request a Vconn swap
* after Discovery Identity on SOP.
+ * @set_orientation:
+ * Optional; Enable setting the connector orientation
+ * CONFIG_STANDARD_OUTPUT (0x18) bit0.
*/
struct tcpci_data {
struct regmap *regmap;
@@ -216,6 +214,7 @@ struct tcpci_data {
unsigned char auto_discharge_disconnect:1;
unsigned char vbus_vsafe0v:1;
unsigned char cable_comm_capable:1;
+ unsigned char set_orientation:1;
int (*init)(struct tcpci *tcpci, struct tcpci_data *data);
int (*set_vconn)(struct tcpci *tcpci, struct tcpci_data *data,
@@ -248,7 +247,7 @@ static inline enum typec_cc_status tcpci_to_typec_cc(unsigned int cc, bool sink)
if (sink)
return TYPEC_CC_RP_3_0;
fallthrough;
- case 0x0:
+ case TCPC_CC_STATE_SRC_OPEN:
default:
return TYPEC_CC_OPEN;
}
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index 061da9546a81..b22e659f81ba 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -163,7 +163,8 @@ struct tcpc_dev {
void (*frs_sourcing_vbus)(struct tcpc_dev *dev);
int (*enable_auto_vbus_discharge)(struct tcpc_dev *dev, bool enable);
int (*set_auto_vbus_discharge_threshold)(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
- bool pps_active, u32 requested_vbus_voltage);
+ bool pps_active, u32 requested_vbus_voltage,
+ u32 pps_apdo_min_voltage);
bool (*is_vbus_vsafe0v)(struct tcpc_dev *dev);
void (*set_partner_usb_comm_capable)(struct tcpc_dev *dev, bool enable);
void (*check_contaminant)(struct tcpc_dev *dev);
diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h
index e6c14f2b1f9b..40afcee8b4f5 100644
--- a/include/linux/usb/tegra_usb_phy.h
+++ b/include/linux/usb/tegra_usb_phy.h
@@ -80,13 +80,4 @@ struct tegra_usb_phy {
bool powered_on;
};
-void tegra_usb_phy_preresume(struct usb_phy *phy);
-
-void tegra_usb_phy_postresume(struct usb_phy *phy);
-
-void tegra_ehci_phy_restore_start(struct usb_phy *phy,
- enum tegra_usb_phy_port_speed port_speed);
-
-void tegra_ehci_phy_restore_end(struct usb_phy *phy);
-
#endif /* __TEGRA_USB_PHY_H */
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index b35b427561ab..309251572e2e 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -87,6 +87,17 @@ enum typec_orientation {
TYPEC_ORIENTATION_REVERSE,
};
+enum usb_mode {
+ USB_MODE_NONE,
+ USB_MODE_USB2,
+ USB_MODE_USB3,
+ USB_MODE_USB4
+};
+
+#define USB_CAPABILITY_USB2 BIT(0)
+#define USB_CAPABILITY_USB3 BIT(1)
+#define USB_CAPABILITY_USB4 BIT(2)
+
/*
* struct enter_usb_data - Enter_USB Message details
* @eudo: Enter_USB Data Object
@@ -129,6 +140,7 @@ int typec_cable_set_identity(struct typec_cable *cable);
* @mode: Index of the Mode
* @vdo: VDO returned by Discover Modes USB PD command
* @roles: Only for ports. DRP if the mode is available in both roles
+ * @inactive: Only for ports. Make this port inactive (default is active).
*
* Description of an Alternate Mode which a connector, cable plug or partner
* supports.
@@ -139,6 +151,7 @@ struct typec_altmode_desc {
u32 vdo;
/* Only used with ports */
enum typec_port_data roles;
+ bool inactive;
};
void typec_partner_set_pd_revision(struct typec_partner *partner, u16 pd_revision);
@@ -167,6 +180,9 @@ struct typec_port *typec_altmode2port(struct typec_altmode *alt);
void typec_altmode_update_active(struct typec_altmode *alt, bool active);
+void typec_altmode_set_ops(struct typec_altmode *alt,
+ const struct typec_altmode_ops *ops);
+
enum typec_plug_index {
TYPEC_PLUG_SOP_P,
TYPEC_PLUG_SOP_PP,
@@ -206,6 +222,7 @@ struct typec_cable_desc {
* @accessory: Audio, Debug or none.
* @identity: Discover Identity command data
* @pd_revision: USB Power Delivery Specification Revision if supported
+ * @usb_capability: Supported USB Modes
* @attach: Notification about attached USB device
* @deattach: Notification about removed USB device
*
@@ -223,6 +240,7 @@ struct typec_partner_desc {
enum typec_accessory accessory;
struct usb_pd_identity *identity;
u16 pd_revision; /* 0300H = "3.0" */
+ u8 usb_capability;
void (*attach)(struct typec_partner *partner, struct device *dev);
void (*deattach)(struct typec_partner *partner, struct device *dev);
@@ -237,6 +255,8 @@ struct typec_partner_desc {
* @port_type_set: Set port type
* @pd_get: Get available USB Power Delivery Capabilities.
* @pd_set: Set USB Power Delivery Capabilities.
+ * @default_usb_mode_set: USB Mode to be used by default with Enter_USB Message
+ * @enter_usb_mode: Change the active USB Mode
*/
struct typec_operations {
int (*try_role)(struct typec_port *port, int role);
@@ -247,6 +267,8 @@ struct typec_operations {
enum typec_port_type type);
struct usb_power_delivery **(*pd_get)(struct typec_port *port);
int (*pd_set)(struct typec_port *port, struct usb_power_delivery *pd);
+ int (*default_usb_mode_set)(struct typec_port *port, enum usb_mode mode);
+ int (*enter_usb_mode)(struct typec_port *port, enum usb_mode mode);
};
enum usb_pd_svdm_ver {
@@ -264,6 +286,7 @@ enum usb_pd_svdm_ver {
* @svdm_version: USB PD Structured VDM version if supported
* @prefer_role: Initial role preference (DRP ports).
* @accessory: Supported Accessory Modes
+ * @usb_capability: Supported USB Modes
* @fwnode: Optional fwnode of the port
* @driver_data: Private pointer for driver specific info
* @pd: Optional USB Power Delivery Support
@@ -280,6 +303,7 @@ struct typec_capability {
int prefer_role;
enum typec_accessory accessory[TYPEC_MAX_ACCESSORY];
unsigned int orientation_aware:1;
+ u8 usb_capability;
struct fwnode_handle *fwnode;
void *driver_data;
@@ -313,6 +337,7 @@ struct typec_plug *typec_register_plug(struct typec_cable *cable,
void typec_unregister_plug(struct typec_plug *plug);
void typec_set_data_role(struct typec_port *port, enum typec_data_role role);
+enum typec_data_role typec_get_data_role(struct typec_port *port);
void typec_set_pwr_role(struct typec_port *port, enum typec_role role);
void typec_set_vconn_role(struct typec_port *port, enum typec_role role);
void typec_set_pwr_opmode(struct typec_port *port, enum typec_pwr_opmode mode);
@@ -347,6 +372,9 @@ int typec_port_set_usb_power_delivery(struct typec_port *port, struct usb_power_
int typec_partner_set_usb_power_delivery(struct typec_partner *partner,
struct usb_power_delivery *pd);
+void typec_partner_set_usb_mode(struct typec_partner *partner, enum usb_mode usb_mode);
+void typec_port_set_usb_mode(struct typec_port *port, enum usb_mode mode);
+
/**
* struct typec_connector - Representation of Type-C port for external drivers
* @attach: notification about device removal
diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h
index b3c0866ea70f..f7db3bd4c90e 100644
--- a/include/linux/usb/typec_altmode.h
+++ b/include/linux/usb/typec_altmode.h
@@ -173,6 +173,19 @@ typec_altmode_get_svdm_version(struct typec_altmode *altmode)
}
/**
+ * typec_altmode_get_data_role - Get port data role
+ * @altmode: Handle to the alternate mode
+ *
+ * Alt Mode drivers should only issue Enter Mode through the port if they are
+ * the DFP.
+ */
+static inline enum typec_data_role
+typec_altmode_get_data_role(struct typec_altmode *altmode)
+{
+ return typec_get_data_role(typec_altmode2port(altmode));
+}
+
+/**
* struct typec_altmode_driver - USB Type-C alternate mode device driver
* @id_table: Null terminated array of SVIDs
* @probe: Callback for device binding
diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
index f2da264d9c14..acb0ad03bdac 100644
--- a/include/linux/usb/typec_dp.h
+++ b/include/linux/usb/typec_dp.h
@@ -57,6 +57,7 @@ enum {
DP_PIN_ASSIGN_D,
DP_PIN_ASSIGN_E,
DP_PIN_ASSIGN_F, /* Not supported after v1.0b */
+ DP_PIN_ASSIGN_MAX,
};
/* DisplayPort alt mode specific commands */
diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h
index 2489a7857d8e..aa9ebb7e2fe0 100644
--- a/include/linux/usb/typec_mux.h
+++ b/include/linux/usb/typec_mux.h
@@ -3,6 +3,7 @@
#ifndef __USB_TYPEC_MUX
#define __USB_TYPEC_MUX
+#include <linux/err.h>
#include <linux/property.h>
#include <linux/usb/typec.h>
@@ -24,16 +25,13 @@ struct typec_switch_desc {
void *drvdata;
};
+#if IS_ENABLED(CONFIG_TYPEC)
+
struct typec_switch *fwnode_typec_switch_get(struct fwnode_handle *fwnode);
void typec_switch_put(struct typec_switch *sw);
int typec_switch_set(struct typec_switch *sw,
enum typec_orientation orientation);
-static inline struct typec_switch *typec_switch_get(struct device *dev)
-{
- return fwnode_typec_switch_get(dev_fwnode(dev));
-}
-
struct typec_switch_dev *
typec_switch_register(struct device *parent,
const struct typec_switch_desc *desc);
@@ -42,6 +40,44 @@ void typec_switch_unregister(struct typec_switch_dev *sw);
void typec_switch_set_drvdata(struct typec_switch_dev *sw, void *data);
void *typec_switch_get_drvdata(struct typec_switch_dev *sw);
+#else
+
+static inline struct typec_switch *
+fwnode_typec_switch_get(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline void typec_switch_put(struct typec_switch *sw) {}
+
+static inline int typec_switch_set(struct typec_switch *sw,
+ enum typec_orientation orientation)
+{
+ return 0;
+}
+
+static inline struct typec_switch_dev *
+typec_switch_register(struct device *parent,
+ const struct typec_switch_desc *desc)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void typec_switch_unregister(struct typec_switch_dev *sw) {}
+
+static inline void typec_switch_set_drvdata(struct typec_switch_dev *sw, void *data) {}
+static inline void *typec_switch_get_drvdata(struct typec_switch_dev *sw)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+#endif /* CONFIG_TYPEC */
+
+static inline struct typec_switch *typec_switch_get(struct device *dev)
+{
+ return fwnode_typec_switch_get(dev_fwnode(dev));
+}
+
struct typec_mux_state {
struct typec_altmode *alt;
unsigned long mode;
diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h
index fa97d7e00f5c..0b570f1b8bc8 100644
--- a/include/linux/usb/typec_tbt.h
+++ b/include/linux/usb/typec_tbt.h
@@ -44,6 +44,7 @@ struct typec_thunderbolt_data {
#define TBT_GEN3_NON_ROUNDED 0
#define TBT_GEN3_GEN4_ROUNDED_NON_ROUNDED 1
+#define TBT_CABLE_ROUNDED BIT(19)
#define TBT_CABLE_OPTICAL BIT(21)
#define TBT_CABLE_RETIMER BIT(22)
#define TBT_CABLE_LINK_TRAINING BIT(23)
@@ -54,6 +55,7 @@ struct typec_thunderbolt_data {
/* TBT3 Device Enter Mode VDO bits */
#define TBT_ENTER_MODE_CABLE_SPEED(s) TBT_SET_CABLE_SPEED(s)
+#define TBT_ENTER_MODE_UNI_DIR_LSRX BIT(23)
#define TBT_ENTER_MODE_ACTIVE_CABLE BIT(24)
#endif /* __USB_TYPEC_TBT_H */
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h
index 5050f502c1ed..4b651065738a 100644
--- a/include/linux/usb/ulpi.h
+++ b/include/linux/usb/ulpi.h
@@ -49,19 +49,10 @@
/*-------------------------------------------------------------------------*/
#if IS_ENABLED(CONFIG_USB_ULPI)
-struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
- unsigned int flags);
-
struct usb_phy *devm_otg_ulpi_create(struct device *dev,
struct usb_phy_io_ops *ops,
unsigned int flags);
#else
-static inline struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
- unsigned int flags)
-{
- return NULL;
-}
-
static inline struct usb_phy *devm_otg_ulpi_create(struct device *dev,
struct usb_phy_io_ops *ops,
unsigned int flags)
diff --git a/include/linux/usb/usbio.h b/include/linux/usb/usbio.h
new file mode 100644
index 000000000000..6c4e7c246d58
--- /dev/null
+++ b/include/linux/usb/usbio.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2025 Intel Corporation.
+ *
+ */
+
+#ifndef _LINUX_USBIO_H_
+#define _LINUX_USBIO_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/byteorder/generic.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+/***********************
+ * USBIO Clients Names *
+ ***********************/
+#define USBIO_GPIO_CLIENT "usbio-gpio"
+#define USBIO_I2C_CLIENT "usbio-i2c"
+
+/****************
+ * USBIO quirks *
+ ****************/
+#define USBIO_QUIRK_BULK_MAXP_63 BIT(0) /* Force bulk endpoint maxp to 63 */
+#define USBIO_QUIRK_I2C_NO_INIT_ACK BIT(8) /* Do not ask for ack on I2C init */
+#define USBIO_QUIRK_I2C_MAX_RW_LEN_52 BIT(9) /* Set i2c-adapter max r/w len to 52 */
+#define USBIO_QUIRK_I2C_USE_CHUNK_LEN BIT(10) /* Send chunk-len for split xfers */
+#define USBIO_QUIRK_I2C_ALLOW_400KHZ BIT(11) /* Override desc, allowing 400 KHz */
+
+/**************************
+ * USBIO Type Definitions *
+ **************************/
+
+/* USBIO Packet Type */
+#define USBIO_PKTTYPE_CTRL 1
+#define USBIO_PKTTYPE_DBG 2
+#define USBIO_PKTTYPE_GPIO 3
+#define USBIO_PKTTYPE_I2C 4
+
+/* USBIO Packet Header */
+struct usbio_packet_header {
+ u8 type;
+ u8 cmd;
+ u8 flags;
+} __packed;
+
+/* USBIO Control Transfer Packet */
+struct usbio_ctrl_packet {
+ struct usbio_packet_header header;
+ u8 len;
+ u8 data[] __counted_by(len);
+} __packed;
+
+/* USBIO Bulk Transfer Packet */
+struct usbio_bulk_packet {
+ struct usbio_packet_header header;
+ __le16 len;
+ u8 data[] __counted_by(len);
+} __packed;
+
+/* USBIO GPIO commands */
+enum usbio_gpio_cmd {
+ USBIO_GPIOCMD_DEINIT,
+ USBIO_GPIOCMD_INIT,
+ USBIO_GPIOCMD_READ,
+ USBIO_GPIOCMD_WRITE,
+ USBIO_GPIOCMD_END
+};
+
+/* USBIO GPIO config */
+enum usbio_gpio_pincfg {
+ USBIO_GPIO_PINCFG_DEFAULT,
+ USBIO_GPIO_PINCFG_PULLUP,
+ USBIO_GPIO_PINCFG_PULLDOWN,
+ USBIO_GPIO_PINCFG_PUSHPULL
+};
+
+#define USBIO_GPIO_PINCFG_SHIFT 2
+#define USBIO_GPIO_PINCFG_MASK (0x3 << USBIO_GPIO_PINCFG_SHIFT)
+#define USBIO_GPIO_SET_PINCFG(pincfg) \
+ (((pincfg) << USBIO_GPIO_PINCFG_SHIFT) & USBIO_GPIO_PINCFG_MASK)
+
+enum usbio_gpio_pinmode {
+ USBIO_GPIO_PINMOD_INVAL,
+ USBIO_GPIO_PINMOD_INPUT,
+ USBIO_GPIO_PINMOD_OUTPUT,
+ USBIO_GPIO_PINMOD_MAXVAL
+};
+
+#define USBIO_GPIO_PINMOD_MASK 0x3
+#define USBIO_GPIO_SET_PINMOD(pin) (pin & USBIO_GPIO_PINMOD_MASK)
+
+/*************************
+ * USBIO GPIO Controller *
+ *************************/
+
+#define USBIO_MAX_GPIOBANKS 5
+#define USBIO_GPIOSPERBANK 32
+
+struct usbio_gpio_bank_desc {
+ u8 id;
+ u8 pins;
+ __le32 bmap;
+} __packed;
+
+struct usbio_gpio_init {
+ u8 bankid;
+ u8 config;
+ u8 pincount;
+ u8 pin;
+} __packed;
+
+struct usbio_gpio_rw {
+ u8 bankid;
+ u8 pincount;
+ u8 pin;
+ __le32 value;
+} __packed;
+
+/* USBIO I2C commands */
+enum usbio_i2c_cmd {
+ USBIO_I2CCMD_UNINIT,
+ USBIO_I2CCMD_INIT,
+ USBIO_I2CCMD_READ,
+ USBIO_I2CCMD_WRITE,
+ USBIO_I2CCMD_END
+};
+
+/************************
+ * USBIO I2C Controller *
+ ************************/
+
+#define USBIO_MAX_I2CBUSES 5
+
+#define USBIO_I2C_BUS_ADDR_CAP_10B BIT(3) /* 10bit address support */
+#define USBIO_I2C_BUS_MODE_CAP_MASK 0x3
+#define USBIO_I2C_BUS_MODE_CAP_SM 0 /* Standard Mode */
+#define USBIO_I2C_BUS_MODE_CAP_FM 1 /* Fast Mode */
+#define USBIO_I2C_BUS_MODE_CAP_FMP 2 /* Fast Mode+ */
+#define USBIO_I2C_BUS_MODE_CAP_HSM 3 /* High-Speed Mode */
+
+struct usbio_i2c_bus_desc {
+ u8 id;
+ u8 caps;
+} __packed;
+
+struct usbio_i2c_uninit {
+ u8 busid;
+ __le16 config;
+} __packed;
+
+struct usbio_i2c_init {
+ u8 busid;
+ __le16 config;
+ __le32 speed;
+} __packed;
+
+struct usbio_i2c_rw {
+ u8 busid;
+ __le16 config;
+ __le16 size;
+ u8 data[] __counted_by(size);
+} __packed;
+
+int usbio_control_msg(struct auxiliary_device *adev, u8 type, u8 cmd,
+ const void *obuf, u16 obuf_len, void *ibuf, u16 ibuf_len);
+
+int usbio_bulk_msg(struct auxiliary_device *adev, u8 type, u8 cmd, bool last,
+ const void *obuf, u16 obuf_len, void *ibuf, u16 ibuf_len);
+
+int usbio_acquire(struct auxiliary_device *adev);
+void usbio_release(struct auxiliary_device *adev);
+void usbio_get_txrxbuf_len(struct auxiliary_device *adev, u16 *txbuf_len, u16 *rxbuf_len);
+unsigned long usbio_get_quirks(struct auxiliary_device *adev);
+void usbio_acpi_bind(struct auxiliary_device *adev, const struct acpi_device_id *hids);
+
+#endif
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 9f08a584d707..2945923a8a95 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -14,6 +14,7 @@
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/usb.h>
+#include <linux/spinlock.h>
/* interface from usbnet core to each USB networking link we handle */
struct usbnet {
@@ -58,7 +59,8 @@ struct usbnet {
unsigned interrupt_count;
struct mutex interrupt_mutex;
struct usb_anchor deferred;
- struct tasklet_struct bh;
+ struct work_struct bh_work;
+ spinlock_t bql_spinlock;
struct work_struct kevent;
unsigned long flags;
@@ -76,8 +78,24 @@ struct usbnet {
# define EVENT_LINK_CHANGE 11
# define EVENT_SET_RX_MODE 12
# define EVENT_NO_IP_ALIGN 13
+# define EVENT_LINK_CARRIER_ON 14
+/* This one is special, as it indicates that the device is going away
+ * there are cyclic dependencies between tasklet, timer and bh
+ * that must be broken
+ */
+# define EVENT_UNPLUG 31
};
+static inline bool usbnet_going_away(struct usbnet *ubn)
+{
+ return test_bit(EVENT_UNPLUG, &ubn->flags);
+}
+
+static inline void usbnet_mark_going_away(struct usbnet *ubn)
+{
+ set_bit(EVENT_UNPLUG, &ubn->flags);
+}
+
static inline struct usb_driver *driver_of(struct usb_interface *intf)
{
return to_usb_driver(intf->dev.driver);
diff --git a/include/linux/usb/uvc.h b/include/linux/usb/uvc.h
index 88d96095bcb1..22e0dab0809e 100644
--- a/include/linux/usb/uvc.h
+++ b/include/linux/usb/uvc.h
@@ -29,6 +29,31 @@
#define UVC_GUID_EXT_GPIO_CONTROLLER \
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03}
+#define UVC_GUID_CHROMEOS_XU \
+ {0x24, 0xe9, 0xd7, 0x74, 0xc9, 0x49, 0x45, 0x4a, \
+ 0x98, 0xa3, 0xc8, 0x07, 0x7e, 0x05, 0x1c, 0xa3}
+#define UVC_GUID_MSXU_1_5 \
+ {0xdc, 0x95, 0x3f, 0x0f, 0x32, 0x26, 0x4e, 0x4c, \
+ 0x92, 0xc9, 0xa0, 0x47, 0x82, 0xf4, 0x3b, 0xc8}
+
+/* https://learn.microsoft.com/en-us/windows-hardware/drivers/stream/uvc-extensions-1-5#222-extension-unit-controls */
+#define UVC_MSXU_CONTROL_FOCUS 0x01
+#define UVC_MSXU_CONTROL_EXPOSURE 0x02
+#define UVC_MSXU_CONTROL_EVCOMPENSATION 0x03
+#define UVC_MSXU_CONTROL_WHITEBALANCE 0x04
+#define UVC_MSXU_CONTROL_FACE_AUTHENTICATION 0x06
+#define UVC_MSXU_CONTROL_CAMERA_EXTRINSICS 0x07
+#define UVC_MSXU_CONTROL_CAMERA_INTRINSICS 0x08
+#define UVC_MSXU_CONTROL_METADATA 0x09
+#define UVC_MSXU_CONTROL_IR_TORCH 0x0a
+#define UVC_MSXU_CONTROL_DIGITALWINDOW 0x0b
+#define UVC_MSXU_CONTROL_DIGITALWINDOW_CONFIG 0x0c
+#define UVC_MSXU_CONTROL_VIDEO_HDR 0x0d
+#define UVC_MSXU_CONTROL_FRAMERATE_THROTTLE 0x0e
+#define UVC_MSXU_CONTROL_FIELDOFVIEW2_CONFIG 0x0f
+#define UVC_MSXU_CONTROL_FIELDOFVIEW2 0x10
+
+#define UVC_CROSXU_CONTROL_IQ_PROFILE 0x04
#define UVC_GUID_FORMAT_MJPEG \
{ 'M', 'J', 'P', 'G', 0x00, 0x00, 0x10, 0x00, \
@@ -118,6 +143,9 @@
#define UVC_GUID_FORMAT_Y12I \
{ 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y16I \
+ { 'Y', '1', '6', 'I', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
#define UVC_GUID_FORMAT_Z16 \
{ 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
@@ -140,6 +168,9 @@
#define UVC_GUID_FORMAT_D3DFMT_L8 \
{0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_D3DFMT_R5G6B5 \
+ {0x7b, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \
+ 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70}
#define UVC_GUID_FORMAT_KSMEDIA_L8_IR \
{0x32, 0x00, 0x00, 0x00, 0x02, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
diff --git a/include/linux/usb/xhci-sideband.h b/include/linux/usb/xhci-sideband.h
new file mode 100644
index 000000000000..005257085dcb
--- /dev/null
+++ b/include/linux/usb/xhci-sideband.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * xHCI host controller sideband support
+ *
+ * Copyright (c) 2023-2025, Intel Corporation.
+ *
+ * Author: Mathias Nyman <mathias.nyman@linux.intel.com>
+ */
+#ifndef __LINUX_XHCI_SIDEBAND_H
+#define __LINUX_XHCI_SIDEBAND_H
+
+#include <linux/scatterlist.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+#define EP_CTX_PER_DEV 31 /* FIXME defined twice, from xhci.h */
+
+struct xhci_sideband;
+
+enum xhci_sideband_type {
+ XHCI_SIDEBAND_AUDIO,
+ XHCI_SIDEBAND_VENDOR,
+};
+
+enum xhci_sideband_notify_type {
+ XHCI_SIDEBAND_XFER_RING_FREE,
+};
+
+/**
+ * struct xhci_sideband_event - sideband event
+ * @type: notifier type
+ * @evt_data: event data
+ */
+struct xhci_sideband_event {
+ enum xhci_sideband_notify_type type;
+ void *evt_data;
+};
+
+/**
+ * struct xhci_sideband - representation of a sideband accessed usb device.
+ * @xhci: The xhci host controller the usb device is connected to
+ * @vdev: the usb device accessed via sideband
+ * @eps: array of endpoints controlled via sideband
+ * @ir: event handling and buffer for sideband accessed device
+ * @type: xHCI sideband type
+ * @mutex: mutex for sideband operations
+ * @intf: USB sideband client interface
+ * @notify_client: callback for xHCI sideband sequences
+ *
+ * FIXME usb device accessed via sideband Keeping track of sideband accessed usb devices.
+ */
+struct xhci_sideband {
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *vdev;
+ struct xhci_virt_ep *eps[EP_CTX_PER_DEV];
+ struct xhci_interrupter *ir;
+ enum xhci_sideband_type type;
+
+ /* Synchronizing xHCI sideband operations with client drivers operations */
+ struct mutex mutex;
+
+ struct usb_interface *intf;
+ int (*notify_client)(struct usb_interface *intf,
+ struct xhci_sideband_event *evt);
+};
+
+struct xhci_sideband *
+xhci_sideband_register(struct usb_interface *intf, enum xhci_sideband_type type,
+ int (*notify_client)(struct usb_interface *intf,
+ struct xhci_sideband_event *evt));
+void
+xhci_sideband_unregister(struct xhci_sideband *sb);
+int
+xhci_sideband_add_endpoint(struct xhci_sideband *sb,
+ struct usb_host_endpoint *host_ep);
+int
+xhci_sideband_remove_endpoint(struct xhci_sideband *sb,
+ struct usb_host_endpoint *host_ep);
+int
+xhci_sideband_stop_endpoint(struct xhci_sideband *sb,
+ struct usb_host_endpoint *host_ep);
+struct sg_table *
+xhci_sideband_get_endpoint_buffer(struct xhci_sideband *sb,
+ struct usb_host_endpoint *host_ep);
+struct sg_table *
+xhci_sideband_get_event_buffer(struct xhci_sideband *sb);
+
+#if IS_ENABLED(CONFIG_USB_XHCI_SIDEBAND)
+bool xhci_sideband_check(struct usb_hcd *hcd);
+#else
+static inline bool xhci_sideband_check(struct usb_hcd *hcd)
+{ return false; }
+#endif /* IS_ENABLED(CONFIG_USB_XHCI_SIDEBAND) */
+
+int
+xhci_sideband_create_interrupter(struct xhci_sideband *sb, int num_seg,
+ bool ip_autoclear, u32 imod_interval, int intr_num);
+void
+xhci_sideband_remove_interrupter(struct xhci_sideband *sb);
+int
+xhci_sideband_interrupter_id(struct xhci_sideband *sb);
+
+#if IS_ENABLED(CONFIG_USB_XHCI_SIDEBAND)
+void xhci_sideband_notify_ep_ring_free(struct xhci_sideband *sb,
+ unsigned int ep_index);
+#else
+static inline void xhci_sideband_notify_ep_ring_free(struct xhci_sideband *sb,
+ unsigned int ep_index)
+{ }
+#endif /* IS_ENABLED(CONFIG_USB_XHCI_SIDEBAND) */
+#endif /* __LINUX_XHCI_SIDEBAND_H */
diff --git a/include/linux/user_events.h b/include/linux/user_events.h
index 8afa8c3a0973..57d1ff006090 100644
--- a/include/linux/user_events.h
+++ b/include/linux/user_events.h
@@ -33,7 +33,7 @@ extern void user_event_mm_dup(struct task_struct *t,
extern void user_event_mm_remove(struct task_struct *t);
static inline void user_events_fork(struct task_struct *t,
- unsigned long clone_flags)
+ u64 clone_flags)
{
struct user_event_mm *old_mm;
@@ -68,7 +68,7 @@ static inline void user_events_exit(struct task_struct *t)
}
#else
static inline void user_events_fork(struct task_struct *t,
- unsigned long clone_flags)
+ u64 clone_flags)
{
}
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 6030a8235617..9c3be157397e 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -5,8 +5,10 @@
#include <linux/kref.h>
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
+#include <linux/rculist_nulls.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
+#include <linux/rcuref.h>
#include <linux/rwsem.h>
#include <linux/sysctl.h>
#include <linux/err.h>
@@ -21,9 +23,11 @@ struct uid_gid_extent {
};
struct uid_gid_map { /* 64 bytes -- 1 cache line */
- u32 nr_extents;
union {
- struct uid_gid_extent extent[UID_GID_MAP_MAX_BASE_EXTENTS];
+ struct {
+ struct uid_gid_extent extent[UID_GID_MAP_MAX_BASE_EXTENTS];
+ u32 nr_extents;
+ };
struct {
struct uid_gid_extent *forward;
struct uid_gid_extent *reverse;
@@ -113,10 +117,11 @@ struct user_namespace {
} __randomize_layout;
struct ucounts {
- struct hlist_node node;
+ struct hlist_nulls_node node;
struct user_namespace *ns;
kuid_t uid;
- atomic_t count;
+ struct rcu_head rcu;
+ rcuref_t count;
atomic_long_t ucount[UCOUNT_COUNTS];
atomic_long_t rlimit[UCOUNT_RLIMIT_COUNTS];
};
@@ -129,9 +134,15 @@ void retire_userns_sysctls(struct user_namespace *ns);
struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type);
void dec_ucount(struct ucounts *ucounts, enum ucount_type type);
struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid);
-struct ucounts * __must_check get_ucounts(struct ucounts *ucounts);
void put_ucounts(struct ucounts *ucounts);
+static inline struct ucounts * __must_check get_ucounts(struct ucounts *ucounts)
+{
+ if (rcuref_get(&ucounts->count))
+ return ucounts;
+ return NULL;
+}
+
static inline long get_rlimit_value(struct ucounts *ucounts, enum rlimit_type type)
{
return atomic_long_read(&ucounts->rlimit[type]);
@@ -139,7 +150,8 @@ static inline long get_rlimit_value(struct ucounts *ucounts, enum rlimit_type ty
long inc_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v);
bool dec_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v);
-long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type);
+long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type,
+ bool override_rlimit);
void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum rlimit_type type);
bool is_rlimit_overlimit(struct ucounts *ucounts, enum rlimit_type type, unsigned long max);
@@ -154,12 +166,17 @@ static inline void set_userns_rlimit_max(struct user_namespace *ns,
ns->rlimit_max[type] = max <= LONG_MAX ? max : LONG_MAX;
}
+static inline struct user_namespace *to_user_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct user_namespace, ns);
+}
+
#ifdef CONFIG_USER_NS
static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
{
if (ns)
- refcount_inc(&ns->ns.count);
+ ns_ref_inc(ns);
return ns;
}
@@ -169,7 +186,7 @@ extern void __put_user_ns(struct user_namespace *ns);
static inline void put_user_ns(struct user_namespace *ns)
{
- if (ns && refcount_dec_and_test(&ns->ns.count))
+ if (ns && ns_ref_put(ns))
__put_user_ns(ns);
}
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index 05d59f74fc88..fd5f42765497 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -16,7 +16,7 @@
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/swap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <asm-generic/pgtable_uffd.h>
#include <linux/hugetlb_inline.h>
@@ -30,11 +30,7 @@
* from userfaultfd, in order to leave a free define-space for
* shared O_* flags.
*/
-#define UFFD_CLOEXEC O_CLOEXEC
-#define UFFD_NONBLOCK O_NONBLOCK
-
#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
-#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
/*
* Start with fault_pending_wqh and fault_wqh so they're more likely
@@ -213,11 +209,14 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma)
}
static inline bool vma_can_userfault(struct vm_area_struct *vma,
- unsigned long vm_flags,
+ vm_flags_t vm_flags,
bool wp_async)
{
vm_flags &= __VM_UFFD_FLAGS;
+ if (vma->vm_flags & VM_DROPPABLE)
+ return false;
+
if ((vm_flags & VM_UFFD_MINOR) &&
(!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
return false;
@@ -229,29 +228,37 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma,
if (wp_async && (vm_flags == VM_UFFD_WP))
return true;
-#ifndef CONFIG_PTE_MARKER_UFFD_WP
/*
* If user requested uffd-wp but not enabled pte markers for
* uffd-wp, then shmem & hugetlbfs are not supported but only
* anonymous.
*/
- if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma))
+ if (!uffd_supports_wp_marker() && (vm_flags & VM_UFFD_WP) &&
+ !vma_is_anonymous(vma))
return false;
-#endif
/* By default, allow any of anon|shmem|hugetlb */
return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
vma_is_shmem(vma);
}
+static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma)
+{
+ struct userfaultfd_ctx *uffd_ctx = vma->vm_userfaultfd_ctx.ctx;
+
+ return uffd_ctx && (uffd_ctx->features & UFFD_FEATURE_EVENT_REMAP) == 0;
+}
+
extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
extern void dup_userfaultfd_complete(struct list_head *);
+void dup_userfaultfd_fail(struct list_head *);
extern void mremap_userfaultfd_prep(struct vm_area_struct *,
struct vm_userfaultfd_ctx *);
extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
unsigned long from, unsigned long to,
unsigned long len);
+void mremap_userfaultfd_fail(struct vm_userfaultfd_ctx *);
extern bool userfaultfd_remove(struct vm_area_struct *vma,
unsigned long start,
@@ -264,6 +271,62 @@ extern void userfaultfd_unmap_complete(struct mm_struct *mm,
extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma);
extern bool userfaultfd_wp_async(struct vm_area_struct *vma);
+void userfaultfd_reset_ctx(struct vm_area_struct *vma);
+
+struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end);
+
+int userfaultfd_register_range(struct userfaultfd_ctx *ctx,
+ struct vm_area_struct *vma,
+ vm_flags_t vm_flags,
+ unsigned long start, unsigned long end,
+ bool wp_async);
+
+void userfaultfd_release_new(struct userfaultfd_ctx *ctx);
+
+void userfaultfd_release_all(struct mm_struct *mm,
+ struct userfaultfd_ctx *ctx);
+
+static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma)
+{
+ /* Only wr-protect mode uses pte markers */
+ if (!userfaultfd_wp(vma))
+ return false;
+
+ /* File-based uffd-wp always need markers */
+ if (!vma_is_anonymous(vma))
+ return true;
+
+ /*
+ * Anonymous uffd-wp only needs the markers if WP_UNPOPULATED
+ * enabled (to apply markers on zero pages).
+ */
+ return userfaultfd_wp_unpopulated(vma);
+}
+
+/*
+ * Returns true if this is a swap pte and was uffd-wp wr-protected in either
+ * forms (pte marker or a normal swap pte), false otherwise.
+ */
+static inline bool pte_swp_uffd_wp_any(pte_t pte)
+{
+ if (!uffd_supports_wp_marker())
+ return false;
+
+ if (pte_present(pte))
+ return false;
+
+ if (pte_swp_uffd_wp(pte))
+ return true;
+
+ if (pte_is_uffd_wp_marker(pte))
+ return true;
+
+ return false;
+}
#else /* CONFIG_USERFAULTFD */
/* mm helpers */
@@ -329,6 +392,10 @@ static inline void dup_userfaultfd_complete(struct list_head *l)
{
}
+static inline void dup_userfaultfd_fail(struct list_head *l)
+{
+}
+
static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma,
struct vm_userfaultfd_ctx *ctx)
{
@@ -341,6 +408,10 @@ static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
{
}
+static inline void mremap_userfaultfd_fail(struct vm_userfaultfd_ctx *ctx)
+{
+}
+
static inline bool userfaultfd_remove(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
@@ -375,49 +446,14 @@ static inline bool userfaultfd_wp_async(struct vm_area_struct *vma)
return false;
}
-#endif /* CONFIG_USERFAULTFD */
-
-static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma)
-{
- /* Only wr-protect mode uses pte markers */
- if (!userfaultfd_wp(vma))
- return false;
-
- /* File-based uffd-wp always need markers */
- if (!vma_is_anonymous(vma))
- return true;
-
- /*
- * Anonymous uffd-wp only needs the markers if WP_UNPOPULATED
- * enabled (to apply markers on zero pages).
- */
- return userfaultfd_wp_unpopulated(vma);
-}
-
-static inline bool pte_marker_entry_uffd_wp(swp_entry_t entry)
+static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma)
{
-#ifdef CONFIG_PTE_MARKER_UFFD_WP
- return is_pte_marker_entry(entry) &&
- (pte_marker_get(entry) & PTE_MARKER_UFFD_WP);
-#else
return false;
-#endif
}
-static inline bool pte_marker_uffd_wp(pte_t pte)
+static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma)
{
-#ifdef CONFIG_PTE_MARKER_UFFD_WP
- swp_entry_t entry;
-
- if (!is_swap_pte(pte))
- return false;
-
- entry = pte_to_swp_entry(pte);
-
- return pte_marker_entry_uffd_wp(entry);
-#else
return false;
-#endif
}
/*
@@ -426,17 +462,7 @@ static inline bool pte_marker_uffd_wp(pte_t pte)
*/
static inline bool pte_swp_uffd_wp_any(pte_t pte)
{
-#ifdef CONFIG_PTE_MARKER_UFFD_WP
- if (!is_swap_pte(pte))
- return false;
-
- if (pte_swp_uffd_wp(pte))
- return true;
-
- if (pte_marker_uffd_wp(pte))
- return true;
-#endif
return false;
}
-
+#endif /* CONFIG_USERFAULTFD */
#endif /* _LINUX_USERFAULTFD_K_H */
diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h
deleted file mode 100644
index ad970416260d..000000000000
--- a/include/linux/usermode_driver.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef __LINUX_USERMODE_DRIVER_H__
-#define __LINUX_USERMODE_DRIVER_H__
-
-#include <linux/umh.h>
-#include <linux/path.h>
-
-struct umd_info {
- const char *driver_name;
- struct file *pipe_to_umh;
- struct file *pipe_from_umh;
- struct path wd;
- struct pid *tgid;
-};
-int umd_load_blob(struct umd_info *info, const void *data, size_t len);
-int umd_unload_blob(struct umd_info *info);
-int fork_usermode_driver(struct umd_info *info);
-void umd_cleanup_helper(struct umd_info *info);
-
-#endif /* __LINUX_USERMODE_DRIVER_H__ */
diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h
index 6bb460c3e818..2eb528058d0d 100644
--- a/include/linux/util_macros.h
+++ b/include/linux/util_macros.h
@@ -2,20 +2,25 @@
#ifndef _LINUX_HELPER_MACROS_H_
#define _LINUX_HELPER_MACROS_H_
+#include <linux/compiler_attributes.h>
#include <linux/math.h>
+#include <linux/typecheck.h>
+#include <linux/stddef.h>
-#define __find_closest(x, a, as, op) \
-({ \
- typeof(as) __fc_i, __fc_as = (as) - 1; \
- typeof(x) __fc_x = (x); \
- typeof(*a) const *__fc_a = (a); \
- for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \
- if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] + \
- __fc_a[__fc_i + 1], 2)) \
- break; \
- } \
- (__fc_i); \
-})
+/**
+ * for_each_if - helper for handling conditionals in various for_each macros
+ * @condition: The condition to check
+ *
+ * Typical use::
+ *
+ * #define for_each_foo_bar(x, y) \'
+ * list_for_each_entry(x, y->list, head) \'
+ * for_each_if(x->something == SOMETHING)
+ *
+ * The for_each_if() macro makes the use of for_each_foo_bar() less error
+ * prone.
+ */
+#define for_each_if(condition) if (!(condition)) {} else
/**
* find_closest - locate the closest element in a sorted array
@@ -25,8 +30,27 @@
* @as: Size of 'a'.
*
* Returns the index of the element closest to 'x'.
+ * Note: If using an array of negative numbers (or mixed positive numbers),
+ * then be sure that 'x' is of a signed-type to get good results.
*/
-#define find_closest(x, a, as) __find_closest(x, a, as, <=)
+#define find_closest(x, a, as) \
+({ \
+ typeof(as) __fc_i, __fc_as = (as) - 1; \
+ long __fc_mid_x, __fc_x = (x); \
+ long __fc_left, __fc_right; \
+ typeof(*a) const *__fc_a = (a); \
+ for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \
+ __fc_mid_x = (__fc_a[__fc_i] + __fc_a[__fc_i + 1]) / 2; \
+ if (__fc_x <= __fc_mid_x) { \
+ __fc_left = __fc_x - __fc_a[__fc_i]; \
+ __fc_right = __fc_a[__fc_i + 1] - __fc_x; \
+ if (__fc_right < __fc_left) \
+ __fc_i++; \
+ break; \
+ } \
+ } \
+ (__fc_i); \
+})
/**
* find_closest_descending - locate the closest element in a sorted array
@@ -36,9 +60,93 @@
* @as: Size of 'a'.
*
* Similar to find_closest() but 'a' is expected to be sorted in descending
- * order.
+ * order. The iteration is done in reverse order, so that the comparison
+ * of '__fc_right' & '__fc_left' also works for unsigned numbers.
*/
-#define find_closest_descending(x, a, as) __find_closest(x, a, as, >=)
+#define find_closest_descending(x, a, as) \
+({ \
+ typeof(as) __fc_i, __fc_as = (as) - 1; \
+ long __fc_mid_x, __fc_x = (x); \
+ long __fc_left, __fc_right; \
+ typeof(*a) const *__fc_a = (a); \
+ for (__fc_i = __fc_as; __fc_i >= 1; __fc_i--) { \
+ __fc_mid_x = (__fc_a[__fc_i] + __fc_a[__fc_i - 1]) / 2; \
+ if (__fc_x <= __fc_mid_x) { \
+ __fc_left = __fc_x - __fc_a[__fc_i]; \
+ __fc_right = __fc_a[__fc_i - 1] - __fc_x; \
+ if (__fc_right < __fc_left) \
+ __fc_i--; \
+ break; \
+ } \
+ } \
+ (__fc_i); \
+})
+
+/**
+ * PTR_IF - evaluate to @ptr if @cond is true, or to NULL otherwise.
+ * @cond: A conditional, usually in a form of IS_ENABLED(CONFIG_FOO)
+ * @ptr: A pointer to assign if @cond is true.
+ *
+ * PTR_IF(IS_ENABLED(CONFIG_FOO), ptr) evaluates to @ptr if CONFIG_FOO is set
+ * to 'y' or 'm', or to NULL otherwise. The @ptr argument must be a pointer.
+ *
+ * The macro can be very useful to help compiler dropping dead code.
+ *
+ * For instance, consider the following::
+ *
+ * #ifdef CONFIG_FOO_SUSPEND
+ * static int foo_suspend(struct device *dev)
+ * {
+ * ...
+ * }
+ * #endif
+ *
+ * static struct pm_ops foo_ops = {
+ * #ifdef CONFIG_FOO_SUSPEND
+ * .suspend = foo_suspend,
+ * #endif
+ * };
+ *
+ * While this works, the foo_suspend() macro is compiled conditionally,
+ * only when CONFIG_FOO_SUSPEND is set. This is problematic, as there could
+ * be a build bug in this function, we wouldn't have a way to know unless
+ * the configuration option is set.
+ *
+ * An alternative is to declare foo_suspend() always, but mark it
+ * as __maybe_unused. This works, but the __maybe_unused attribute
+ * is required to instruct the compiler that the function may not
+ * be referenced anywhere, and is safe to remove without making
+ * a fuss about it. This makes the programmer responsible for tagging
+ * the functions that can be garbage-collected.
+ *
+ * With the macro it is possible to write the following:
+ *
+ * static int foo_suspend(struct device *dev)
+ * {
+ * ...
+ * }
+ *
+ * static struct pm_ops foo_ops = {
+ * .suspend = PTR_IF(IS_ENABLED(CONFIG_FOO_SUSPEND), foo_suspend),
+ * };
+ *
+ * The foo_suspend() function will now be automatically dropped by the
+ * compiler, and it does not require any specific attribute.
+ */
+#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL)
+
+/**
+ * u64_to_user_ptr - cast a pointer passed as u64 from user space to void __user *
+ * @x: The u64 value from user space, usually via IOCTL
+ *
+ * u64_to_user_ptr() simply casts a pointer passed as u64 from user space to void
+ * __user * correctly. Using this lets us get rid of all the tiresome casts.
+ */
+#define u64_to_user_ptr(x) \
+({ \
+ typecheck(u64, (x)); \
+ (void __user *)(uintptr_t)(x); \
+})
/**
* is_insidevar - check if the @ptr points inside the @var memory range.
diff --git a/include/linux/uts_namespace.h b/include/linux/uts_namespace.h
new file mode 100644
index 000000000000..60f37fec0f4b
--- /dev/null
+++ b/include/linux/uts_namespace.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_UTS_NAMESPACE_H
+#define _LINUX_UTS_NAMESPACE_H
+
+#include <linux/ns_common.h>
+#include <uapi/linux/utsname.h>
+
+struct user_namespace;
+extern struct user_namespace init_user_ns;
+
+struct uts_namespace {
+ struct new_utsname name;
+ struct user_namespace *user_ns;
+ struct ucounts *ucounts;
+ struct ns_common ns;
+} __randomize_layout;
+
+extern struct uts_namespace init_uts_ns;
+
+#ifdef CONFIG_UTS_NS
+static inline struct uts_namespace *to_uts_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct uts_namespace, ns);
+}
+
+static inline void get_uts_ns(struct uts_namespace *ns)
+{
+ ns_ref_inc(ns);
+}
+
+extern struct uts_namespace *copy_utsname(u64 flags,
+ struct user_namespace *user_ns, struct uts_namespace *old_ns);
+extern void free_uts_ns(struct uts_namespace *ns);
+
+static inline void put_uts_ns(struct uts_namespace *ns)
+{
+ if (ns_ref_put(ns))
+ free_uts_ns(ns);
+}
+
+void uts_ns_init(void);
+#else
+static inline void get_uts_ns(struct uts_namespace *ns)
+{
+}
+
+static inline void put_uts_ns(struct uts_namespace *ns)
+{
+}
+
+static inline struct uts_namespace *copy_utsname(u64 flags,
+ struct user_namespace *user_ns, struct uts_namespace *old_ns)
+{
+ if (flags & CLONE_NEWUTS)
+ return ERR_PTR(-EINVAL);
+
+ return old_ns;
+}
+
+static inline void uts_ns_init(void)
+{
+}
+#endif
+
+#endif /* _LINUX_UTS_NAMESPACE_H */
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index bf7613ba412b..547bd4439706 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -7,7 +7,7 @@
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
#include <linux/err.h>
-#include <uapi/linux/utsname.h>
+#include <linux/uts_namespace.h>
enum uts_proc {
UTS_PROC_ARCH,
@@ -18,57 +18,6 @@ enum uts_proc {
UTS_PROC_DOMAINNAME,
};
-struct user_namespace;
-extern struct user_namespace init_user_ns;
-
-struct uts_namespace {
- struct new_utsname name;
- struct user_namespace *user_ns;
- struct ucounts *ucounts;
- struct ns_common ns;
-} __randomize_layout;
-extern struct uts_namespace init_uts_ns;
-
-#ifdef CONFIG_UTS_NS
-static inline void get_uts_ns(struct uts_namespace *ns)
-{
- refcount_inc(&ns->ns.count);
-}
-
-extern struct uts_namespace *copy_utsname(unsigned long flags,
- struct user_namespace *user_ns, struct uts_namespace *old_ns);
-extern void free_uts_ns(struct uts_namespace *ns);
-
-static inline void put_uts_ns(struct uts_namespace *ns)
-{
- if (refcount_dec_and_test(&ns->ns.count))
- free_uts_ns(ns);
-}
-
-void uts_ns_init(void);
-#else
-static inline void get_uts_ns(struct uts_namespace *ns)
-{
-}
-
-static inline void put_uts_ns(struct uts_namespace *ns)
-{
-}
-
-static inline struct uts_namespace *copy_utsname(unsigned long flags,
- struct user_namespace *user_ns, struct uts_namespace *old_ns)
-{
- if (flags & CLONE_NEWUTS)
- return ERR_PTR(-EINVAL);
-
- return old_ns;
-}
-
-static inline void uts_ns_init(void)
-{
-}
-#endif
-
#ifdef CONFIG_PROC_SYSCTL
extern void uts_proc_notify(enum uts_proc proc);
#else
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 7977ca03ac7a..4cf21d6e9cfd 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/virtio.h>
#include <linux/vhost_iotlb.h>
#include <linux/virtio_net.h>
#include <linux/virtio_blk.h>
@@ -70,11 +71,12 @@ struct vdpa_mgmt_dev;
/**
* struct vdpa_device - representation of a vDPA device
* @dev: underlying device
- * @dma_dev: the actual device that is performing DMA
+ * @vmap: the metadata passed to upper layer to be used for mapping
* @driver_override: driver name to force a match; do not set directly,
* because core frees it; use driver_set_override() to
* set or clear it.
* @config: the configuration ops for this device.
+ * @map: the map ops for this device
* @cf_lock: Protects get and set access to configuration layout.
* @index: device index
* @features_valid: were features initialized? for legacy guests
@@ -87,9 +89,10 @@ struct vdpa_mgmt_dev;
*/
struct vdpa_device {
struct device dev;
- struct device *dma_dev;
+ union virtio_map vmap;
const char *driver_override;
const struct vdpa_config_ops *config;
+ const struct virtio_map_ops *map;
struct rw_semaphore cf_lock; /* Protects get/set config */
unsigned int index;
bool features_valid;
@@ -352,11 +355,11 @@ struct vdpa_map_file {
* @vdev: vdpa device
* @asid: address space identifier
* Returns integer: success (0) or error (< 0)
- * @get_vq_dma_dev: Get the dma device for a specific
+ * @get_vq_map: Get the map metadata for a specific
* virtqueue (optional)
* @vdev: vdpa device
* @idx: virtqueue index
- * Returns pointer to structure device or error (NULL)
+ * Returns map token union error (NULL)
* @bind_mm: Bind the device to a specific address space
* so the vDPA framework can use VA when this
* callback is implemented. (optional)
@@ -436,7 +439,7 @@ struct vdpa_config_ops {
int (*reset_map)(struct vdpa_device *vdev, unsigned int asid);
int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
unsigned int asid);
- struct device *(*get_vq_dma_dev)(struct vdpa_device *vdev, u16 idx);
+ union virtio_map (*get_vq_map)(struct vdpa_device *vdev, u16 idx);
int (*bind_mm)(struct vdpa_device *vdev, struct mm_struct *mm);
void (*unbind_mm)(struct vdpa_device *vdev);
@@ -446,6 +449,7 @@ struct vdpa_config_ops {
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
+ const struct virtio_map_ops *map,
unsigned int ngroups, unsigned int nas,
size_t size, const char *name,
bool use_va);
@@ -457,6 +461,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
* @member: the name of struct vdpa_device within the @dev_struct
* @parent: the parent device
* @config: the bus operations that is supported by this device
+ * @map: the map operations that is supported by this device
* @ngroups: the number of virtqueue groups supported by this device
* @nas: the number of address spaces
* @name: name of the vdpa device
@@ -464,10 +469,10 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
*
* Return allocated data structure or ERR_PTR upon error
*/
-#define vdpa_alloc_device(dev_struct, member, parent, config, ngroups, nas, \
- name, use_va) \
+#define vdpa_alloc_device(dev_struct, member, parent, config, map, \
+ ngroups, nas, name, use_va) \
container_of((__vdpa_alloc_device( \
- parent, config, ngroups, nas, \
+ parent, config, map, ngroups, nas, \
(sizeof(dev_struct) + \
BUILD_BUG_ON_ZERO(offsetof( \
dev_struct, member))), name, use_va)), \
@@ -520,9 +525,9 @@ static inline void vdpa_set_drvdata(struct vdpa_device *vdev, void *data)
dev_set_drvdata(&vdev->dev, data);
}
-static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
+static inline union virtio_map vdpa_get_map(struct vdpa_device *vdev)
{
- return vdev->dma_dev;
+ return vdev->vmap;
}
static inline int vdpa_reset(struct vdpa_device *vdev, u32 flags)
@@ -582,11 +587,20 @@ void vdpa_set_status(struct vdpa_device *vdev, u8 status);
* @dev: vdpa device to remove
* Driver need to remove the specified device by calling
* _vdpa_unregister_device().
+ * @dev_set_attr: change a vdpa device's attr after it was create
+ * @mdev: parent device to use for device
+ * @dev: vdpa device structure
+ * @config:Attributes to be set for the device.
+ * The driver needs to check the mask of the structure and then set
+ * the related information to the vdpa device. The driver must return 0
+ * if set successfully.
*/
struct vdpa_mgmtdev_ops {
int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name,
const struct vdpa_dev_set_config *config);
void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
+ int (*dev_set_attr)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev,
+ const struct vdpa_dev_set_config *config);
};
/**
diff --git a/include/linux/vdso_datastore.h b/include/linux/vdso_datastore.h
new file mode 100644
index 000000000000..a91fa24b06e0
--- /dev/null
+++ b/include/linux/vdso_datastore.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VDSO_DATASTORE_H
+#define _LINUX_VDSO_DATASTORE_H
+
+#include <linux/mm_types.h>
+
+extern const struct vm_special_mapping vdso_vvar_mapping;
+struct vm_area_struct *vdso_install_vvar_mapping(struct mm_struct *mm, unsigned long addr);
+
+#endif /* _LINUX_VDSO_DATASTORE_H */
diff --git a/include/linux/verification.h b/include/linux/verification.h
index cb2d47f28091..dec7f2beabfd 100644
--- a/include/linux/verification.h
+++ b/include/linux/verification.h
@@ -36,10 +36,9 @@ enum key_being_used_for {
VERIFYING_KEY_SIGNATURE,
VERIFYING_KEY_SELF_SIGNATURE,
VERIFYING_UNSPECIFIED_SIGNATURE,
+ VERIFYING_BPF_SIGNATURE,
NR__KEY_BEING_USED_FOR
};
-extern const char *const key_being_used_for[NR__KEY_BEING_USED_FOR];
-
#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
struct key;
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index a54046bf37e5..335c360d4f9b 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -15,10 +15,10 @@
#else
#define MODULE_VERMAGIC_SMP ""
#endif
-#ifdef CONFIG_PREEMPT_BUILD
-#define MODULE_VERMAGIC_PREEMPT "preempt "
-#elif defined(CONFIG_PREEMPT_RT)
+#ifdef CONFIG_PREEMPT_RT
#define MODULE_VERMAGIC_PREEMPT "preempt_rt "
+#elif defined(CONFIG_PREEMPT_BUILD)
+#define MODULE_VERMAGIC_PREEMPT "preempt "
#else
#define MODULE_VERMAGIC_PREEMPT ""
#endif
@@ -33,7 +33,6 @@
#define MODULE_VERMAGIC_MODVERSIONS ""
#endif
#ifdef RANDSTRUCT
-#include <generated/randstruct_hash.h>
#define MODULE_RANDSTRUCT "RANDSTRUCT_" RANDSTRUCT_HASHED_SEED
#else
#define MODULE_RANDSTRUCT
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 8b1a29820409..e90859956514 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -21,6 +21,7 @@ struct kvm;
struct iommufd_ctx;
struct iommufd_device;
struct iommufd_access;
+struct vfio_info_cap;
/*
* VFIO devices can be placed in a set, this allows all devices to share this
@@ -64,8 +65,10 @@ struct vfio_device {
struct completion comp;
struct iommufd_access *iommufd_access;
void (*put_kvm)(struct kvm *kvm);
+ struct inode *inode;
#if IS_ENABLED(CONFIG_IOMMUFD)
struct iommufd_device *iommufd_device;
+ struct ida pasids;
u8 iommufd_attached:1;
#endif
u8 cdev_opened:1;
@@ -90,6 +93,8 @@ struct vfio_device {
* bound iommufd. Undo in unbind_iommufd if @detach_ioas is not
* called.
* @detach_ioas: Opposite of attach_ioas
+ * @pasid_attach_ioas: The pasid variation of attach_ioas
+ * @pasid_detach_ioas: Opposite of pasid_attach_ioas
* @open_device: Called when the first file descriptor is opened for this device
* @close_device: Opposite of open_device
* @read: Perform read(2) on device file descriptor
@@ -101,6 +106,9 @@ struct vfio_device {
* @match: Optional device name match callback (return: 0 for no-match, >0 for
* match, -errno for abort (ex. match with insufficient or incorrect
* additional args)
+ * @match_token_uuid: Optional device token match/validation. Return 0
+ * if the uuid is valid for the device, -errno otherwise. uuid is NULL
+ * if none was provided.
* @dma_unmap: Called when userspace unmaps IOVA from the container
* this device is attached to.
* @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl
@@ -114,6 +122,9 @@ struct vfio_device_ops {
void (*unbind_iommufd)(struct vfio_device *vdev);
int (*attach_ioas)(struct vfio_device *vdev, u32 *pt_id);
void (*detach_ioas)(struct vfio_device *vdev);
+ int (*pasid_attach_ioas)(struct vfio_device *vdev, u32 pasid,
+ u32 *pt_id);
+ void (*pasid_detach_ioas)(struct vfio_device *vdev, u32 pasid);
int (*open_device)(struct vfio_device *vdev);
void (*close_device)(struct vfio_device *vdev);
ssize_t (*read)(struct vfio_device *vdev, char __user *buf,
@@ -122,9 +133,13 @@ struct vfio_device_ops {
size_t count, loff_t *size);
long (*ioctl)(struct vfio_device *vdev, unsigned int cmd,
unsigned long arg);
+ int (*get_region_info_caps)(struct vfio_device *vdev,
+ struct vfio_region_info *info,
+ struct vfio_info_cap *caps);
int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma);
void (*request)(struct vfio_device *vdev, unsigned int count);
int (*match)(struct vfio_device *vdev, char *buf);
+ int (*match_token_uuid)(struct vfio_device *vdev, const uuid_t *uuid);
void (*dma_unmap)(struct vfio_device *vdev, u64 iova, u64 length);
int (*device_feature)(struct vfio_device *device, u32 flags,
void __user *arg, size_t argsz);
@@ -138,6 +153,10 @@ int vfio_iommufd_physical_bind(struct vfio_device *vdev,
void vfio_iommufd_physical_unbind(struct vfio_device *vdev);
int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
void vfio_iommufd_physical_detach_ioas(struct vfio_device *vdev);
+int vfio_iommufd_physical_pasid_attach_ioas(struct vfio_device *vdev,
+ u32 pasid, u32 *pt_id);
+void vfio_iommufd_physical_pasid_detach_ioas(struct vfio_device *vdev,
+ u32 pasid);
int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
struct iommufd_ctx *ictx, u32 *out_device_id);
void vfio_iommufd_emulated_unbind(struct vfio_device *vdev);
@@ -165,6 +184,10 @@ vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx)
((int (*)(struct vfio_device *vdev, u32 *pt_id)) NULL)
#define vfio_iommufd_physical_detach_ioas \
((void (*)(struct vfio_device *vdev)) NULL)
+#define vfio_iommufd_physical_pasid_attach_ioas \
+ ((int (*)(struct vfio_device *vdev, u32 pasid, u32 *pt_id)) NULL)
+#define vfio_iommufd_physical_pasid_detach_ioas \
+ ((void (*)(struct vfio_device *vdev, u32 pasid)) NULL)
#define vfio_iommufd_emulated_bind \
((int (*)(struct vfio_device *vdev, struct iommufd_ctx *ictx, \
u32 *out_device_id)) NULL)
@@ -278,6 +301,8 @@ static inline void vfio_put_device(struct vfio_device *device)
int vfio_register_group_dev(struct vfio_device *device);
int vfio_register_emulated_iommu_dev(struct vfio_device *device);
void vfio_unregister_group_dev(struct vfio_device *device);
+bool vfio_device_try_get_registration(struct vfio_device *device);
+void vfio_device_put_registration(struct vfio_device *device);
int vfio_assign_device_set(struct vfio_device *device, void *set_id);
unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set);
diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
index a2c8b8bba711..706877f998ff 100644
--- a/include/linux/vfio_pci_core.h
+++ b/include/linux/vfio_pci_core.h
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <linux/vfio.h>
#include <linux/irqbypass.h>
+#include <linux/rcupdate.h>
#include <linux/types.h>
#include <linux/uuid.h>
#include <linux/notifier.h>
@@ -26,6 +27,14 @@
struct vfio_pci_core_device;
struct vfio_pci_region;
+struct p2pdma_provider;
+struct dma_buf_phys_vec;
+struct dma_buf_attachment;
+
+struct vfio_pci_eventfd {
+ struct eventfd_ctx *ctx;
+ struct rcu_head rcu;
+};
struct vfio_pci_regops {
ssize_t (*rw)(struct vfio_pci_core_device *vdev, char __user *buf,
@@ -49,9 +58,48 @@ struct vfio_pci_region {
u32 flags;
};
+struct vfio_pci_device_ops {
+ int (*get_dmabuf_phys)(struct vfio_pci_core_device *vdev,
+ struct p2pdma_provider **provider,
+ unsigned int region_index,
+ struct dma_buf_phys_vec *phys_vec,
+ struct vfio_region_dma_range *dma_ranges,
+ size_t nr_ranges);
+};
+
+#if IS_ENABLED(CONFIG_VFIO_PCI_DMABUF)
+int vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
+ struct vfio_region_dma_range *dma_ranges,
+ size_t nr_ranges, phys_addr_t start,
+ phys_addr_t len);
+int vfio_pci_core_get_dmabuf_phys(struct vfio_pci_core_device *vdev,
+ struct p2pdma_provider **provider,
+ unsigned int region_index,
+ struct dma_buf_phys_vec *phys_vec,
+ struct vfio_region_dma_range *dma_ranges,
+ size_t nr_ranges);
+#else
+static inline int
+vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
+ struct vfio_region_dma_range *dma_ranges,
+ size_t nr_ranges, phys_addr_t start,
+ phys_addr_t len)
+{
+ return -EINVAL;
+}
+static inline int vfio_pci_core_get_dmabuf_phys(
+ struct vfio_pci_core_device *vdev, struct p2pdma_provider **provider,
+ unsigned int region_index, struct dma_buf_phys_vec *phys_vec,
+ struct vfio_region_dma_range *dma_ranges, size_t nr_ranges)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
struct vfio_pci_core_device {
struct vfio_device vdev;
struct pci_dev *pdev;
+ const struct vfio_pci_device_ops *pci_ops;
void __iomem *barmap[PCI_STD_NUM_BARS];
bool bar_mmap_supported[PCI_STD_NUM_BARS];
u8 *pci_config_map;
@@ -83,8 +131,8 @@ struct vfio_pci_core_device {
struct pci_saved_state *pci_saved_state;
struct pci_saved_state *pm_save;
int ioeventfds_nr;
- struct eventfd_ctx *err_trigger;
- struct eventfd_ctx *req_trigger;
+ struct vfio_pci_eventfd __rcu *err_trigger;
+ struct vfio_pci_eventfd __rcu *req_trigger;
struct eventfd_ctx *pm_wake_eventfd_ctx;
struct list_head dummy_resources_list;
struct mutex ioeventfds_lock;
@@ -93,9 +141,8 @@ struct vfio_pci_core_device {
struct list_head sriov_pfs_item;
struct vfio_pci_core_device *sriov_pf_core_dev;
struct notifier_block nb;
- struct mutex vma_lock;
- struct list_head vma_list;
struct rw_semaphore memory_lock;
+ struct list_head dmabufs;
};
/* Will be exported for vfio pci drivers usage */
@@ -117,13 +164,21 @@ long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
unsigned long arg);
int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
void __user *arg, size_t argsz);
+int vfio_pci_ioctl_get_region_info(struct vfio_device *core_vdev,
+ struct vfio_region_info *info,
+ struct vfio_info_cap *caps);
ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
size_t count, loff_t *ppos);
ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
size_t count, loff_t *ppos);
+vm_fault_t vfio_pci_vmf_insert_pfn(struct vfio_pci_core_device *vdev,
+ struct vm_fault *vmf, unsigned long pfn,
+ unsigned int order);
int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma);
void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count);
int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf);
+int vfio_pci_core_match_token_uuid(struct vfio_device *core_vdev,
+ const uuid_t *uuid);
int vfio_pci_core_enable(struct vfio_pci_core_device *vdev);
void vfio_pci_core_disable(struct vfio_pci_core_device *vdev);
void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev);
@@ -134,28 +189,45 @@ ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
void __iomem *io, char __user *buf,
loff_t off, size_t count, size_t x_start,
size_t x_end, bool iswrite);
+bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev);
bool vfio_pci_core_range_intersect_range(loff_t buf_start, size_t buf_cnt,
loff_t reg_start, size_t reg_cnt,
loff_t *buf_offset,
size_t *intersect_count,
size_t *register_offset);
-#define VFIO_IOWRITE_DECLATION(size) \
+#define VFIO_IOWRITE_DECLARATION(size) \
int vfio_pci_core_iowrite##size(struct vfio_pci_core_device *vdev, \
bool test_mem, u##size val, void __iomem *io);
-VFIO_IOWRITE_DECLATION(8)
-VFIO_IOWRITE_DECLATION(16)
-VFIO_IOWRITE_DECLATION(32)
+VFIO_IOWRITE_DECLARATION(8)
+VFIO_IOWRITE_DECLARATION(16)
+VFIO_IOWRITE_DECLARATION(32)
#ifdef iowrite64
-VFIO_IOWRITE_DECLATION(64)
+VFIO_IOWRITE_DECLARATION(64)
#endif
-#define VFIO_IOREAD_DECLATION(size) \
+#define VFIO_IOREAD_DECLARATION(size) \
int vfio_pci_core_ioread##size(struct vfio_pci_core_device *vdev, \
bool test_mem, u##size *val, void __iomem *io);
-VFIO_IOREAD_DECLATION(8)
-VFIO_IOREAD_DECLATION(16)
-VFIO_IOREAD_DECLATION(32)
+VFIO_IOREAD_DECLARATION(8)
+VFIO_IOREAD_DECLARATION(16)
+VFIO_IOREAD_DECLARATION(32)
+#ifdef ioread64
+VFIO_IOREAD_DECLARATION(64)
+#endif
+
+static inline bool is_aligned_for_order(struct vm_area_struct *vma,
+ unsigned long addr,
+ unsigned long pfn,
+ unsigned int order)
+{
+ return !(order && (addr < vma->vm_start ||
+ addr + (PAGE_SIZE << order) > vma->vm_end ||
+ !IS_ALIGNED(pfn, 1 << order)));
+}
+
+int vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
+ struct dma_buf_phys_vec *phys);
#endif /* VFIO_PCI_CORE_H */
diff --git a/include/linux/vfsdebug.h b/include/linux/vfsdebug.h
new file mode 100644
index 000000000000..9cf22d3eb9dd
--- /dev/null
+++ b/include/linux/vfsdebug.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_VFS_DEBUG_H
+#define LINUX_VFS_DEBUG_H 1
+
+#include <linux/bug.h>
+
+struct inode;
+
+#ifdef CONFIG_DEBUG_VFS
+void dump_inode(struct inode *inode, const char *reason);
+
+#define VFS_BUG_ON(cond) BUG_ON(cond)
+#define VFS_WARN_ON(cond) (void)WARN_ON(cond)
+#define VFS_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
+#define VFS_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format)
+#define VFS_WARN(cond, format...) (void)WARN(cond, format)
+
+#define VFS_BUG_ON_INODE(cond, inode) ({ \
+ if (unlikely(!!(cond))) { \
+ dump_inode(inode, "VFS_BUG_ON_INODE(" #cond")");\
+ BUG_ON(1); \
+ } \
+})
+
+#define VFS_WARN_ON_INODE(cond, inode) ({ \
+ int __ret_warn = !!(cond); \
+ \
+ if (unlikely(__ret_warn)) { \
+ dump_inode(inode, "VFS_WARN_ON_INODE(" #cond")");\
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn); \
+})
+#else
+#define VFS_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
+#define VFS_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
+#define VFS_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
+#define VFS_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
+#define VFS_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
+
+#define VFS_BUG_ON_INODE(cond, inode) VFS_BUG_ON(cond)
+#define VFS_WARN_ON_INODE(cond, inode) BUILD_BUG_ON_INVALID(cond)
+#endif /* CONFIG_DEBUG_VFS */
+
+#endif
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 219037f4c08d..9609cf365e8e 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -50,7 +50,7 @@
*
* Author: Bill Dirks <bill@thedirks.org>
* Justin Schoeman
- * Hans Verkuil <hverkuil@xs4all.nl>
+ * Hans Verkuil <hverkuil@kernel.org>
* et al.
*/
#ifndef __LINUX_VIDEODEV2_H
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 96fea920873b..132a474e5914 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -10,6 +10,8 @@
#include <linux/mod_devicetable.h>
#include <linux/gfp.h>
#include <linux/dma-mapping.h>
+#include <linux/completion.h>
+#include <linux/virtio_features.h>
/**
* struct virtqueue - a queue to register buffers for sending or receiving.
@@ -39,6 +41,15 @@ struct virtqueue {
void *priv;
};
+struct vduse_iova_domain;
+
+union virtio_map {
+ /* Device that performs DMA */
+ struct device *dma_dev;
+ /* VDUSE specific mapping data */
+ struct vduse_iova_domain *iova_domain;
+};
+
int virtqueue_add_outbuf(struct virtqueue *vq,
struct scatterlist sg[], unsigned int num,
void *data,
@@ -55,6 +66,17 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
void *ctx,
gfp_t gfp);
+int virtqueue_add_inbuf_premapped(struct virtqueue *vq,
+ struct scatterlist *sg, unsigned int num,
+ void *data,
+ void *ctx,
+ gfp_t gfp);
+
+int virtqueue_add_outbuf_premapped(struct virtqueue *vq,
+ struct scatterlist *sg, unsigned int num,
+ void *data,
+ gfp_t gfp);
+
int virtqueue_add_sgs(struct virtqueue *vq,
struct scatterlist *sgs[],
unsigned int out_sgs,
@@ -81,8 +103,6 @@ bool virtqueue_enable_cb(struct virtqueue *vq);
unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
-int virtqueue_set_dma_premapped(struct virtqueue *_vq);
-
bool virtqueue_poll(struct virtqueue *vq, unsigned);
bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
@@ -99,9 +119,11 @@ dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *vq);
dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq);
int virtqueue_resize(struct virtqueue *vq, u32 num,
- void (*recycle)(struct virtqueue *vq, void *buf));
+ void (*recycle)(struct virtqueue *vq, void *buf),
+ void (*recycle_done)(struct virtqueue *vq));
int virtqueue_reset(struct virtqueue *vq,
- void (*recycle)(struct virtqueue *vq, void *buf));
+ void (*recycle)(struct virtqueue *vq, void *buf),
+ void (*recycle_done)(struct virtqueue *vq));
struct virtio_admin_cmd {
__le16 opcode;
@@ -109,13 +131,18 @@ struct virtio_admin_cmd {
__le64 group_member_id;
struct scatterlist *data_sg;
struct scatterlist *result_sg;
+ struct completion completion;
+ u32 result_sg_size;
+ int ret;
};
/**
* struct virtio_device - representation of a device using virtio
* @index: unique position on the virtio bus
* @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore)
- * @config_enabled: configuration change reporting enabled
+ * @config_core_enabled: configuration change reporting enabled by core
+ * @config_driver_disabled: configuration change reporting disabled by
+ * a driver
* @config_change_pending: configuration change reported while disabled
* @config_lock: protects configuration change reporting
* @vqs_list_lock: protects @vqs.
@@ -124,7 +151,9 @@ struct virtio_admin_cmd {
* @config: the configuration ops for this device.
* @vringh_config: configuration ops for host vrings.
* @vqs: the list of virtqueues for this device.
- * @features: the features supported by both driver and device.
+ * @features: the 64 lower features supported by both driver and device.
+ * @features_array: the full features space supported by both driver and
+ * device.
* @priv: private pointer for the driver's use.
* @debugfs_dir: debugfs directory entry.
* @debugfs_filter_features: features to be filtered set by debugfs.
@@ -132,7 +161,8 @@ struct virtio_admin_cmd {
struct virtio_device {
int index;
bool failed;
- bool config_enabled;
+ bool config_core_enabled;
+ bool config_driver_disabled;
bool config_change_pending;
spinlock_t config_lock;
spinlock_t vqs_list_lock;
@@ -140,12 +170,14 @@ struct virtio_device {
struct virtio_device_id id;
const struct virtio_config_ops *config;
const struct vringh_config_ops *vringh_config;
+ const struct virtio_map_ops *map;
struct list_head vqs;
- u64 features;
+ VIRTIO_DECLARE_FEATURES(features);
void *priv;
+ union virtio_map vmap;
#ifdef CONFIG_VIRTIO_DEBUG
struct dentry *debugfs_dir;
- u64 debugfs_filter_features;
+ u64 debugfs_filter_features[VIRTIO_FEATURES_U64S];
#endif
};
@@ -163,16 +195,22 @@ void __virtqueue_break(struct virtqueue *_vq);
void __virtqueue_unbreak(struct virtqueue *_vq);
void virtio_config_changed(struct virtio_device *dev);
+
+void virtio_config_driver_disable(struct virtio_device *dev);
+void virtio_config_driver_enable(struct virtio_device *dev);
+
#ifdef CONFIG_PM_SLEEP
int virtio_device_freeze(struct virtio_device *dev);
int virtio_device_restore(struct virtio_device *dev);
#endif
void virtio_reset_device(struct virtio_device *dev);
+int virtio_device_reset_prepare(struct virtio_device *dev);
+int virtio_device_reset_done(struct virtio_device *dev);
size_t virtio_max_dma_size(const struct virtio_device *vdev);
#define virtio_device_for_each_vq(vdev, vq) \
- list_for_each_entry(vq, &vdev->vqs, list)
+ list_for_each_entry(vq, &(vdev)->vqs, list)
/**
* struct virtio_driver - operations for a virtio I/O driver
@@ -192,6 +230,12 @@ size_t virtio_max_dma_size(const struct virtio_device *vdev);
* changes; may be called in interrupt context.
* @freeze: optional function to call during suspend/hibernation.
* @restore: optional function to call on resume.
+ * @reset_prepare: optional function to call when a transport specific reset
+ * occurs.
+ * @reset_done: optional function to call after transport specific reset
+ * operation has finished.
+ * @shutdown: synchronize with the device on shutdown. If provided, replaces
+ * the virtio core implementation.
*/
struct virtio_driver {
struct device_driver driver;
@@ -207,12 +251,12 @@ struct virtio_driver {
void (*config_changed)(struct virtio_device *dev);
int (*freeze)(struct virtio_device *dev);
int (*restore)(struct virtio_device *dev);
+ int (*reset_prepare)(struct virtio_device *dev);
+ int (*reset_done)(struct virtio_device *dev);
+ void (*shutdown)(struct virtio_device *dev);
};
-static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv)
-{
- return container_of(drv, struct virtio_driver, driver);
-}
+#define drv_to_virtio(__drv) container_of_const(__drv, struct virtio_driver, driver)
/* use a macro to avoid include chaining to get THIS_MODULE */
#define register_virtio_driver(drv) \
@@ -229,18 +273,41 @@ void unregister_virtio_driver(struct virtio_driver *drv);
module_driver(__virtio_driver, register_virtio_driver, \
unregister_virtio_driver)
-dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, size_t size,
+
+void *virtqueue_map_alloc_coherent(struct virtio_device *vdev,
+ union virtio_map mapping_token,
+ size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp);
+
+void virtqueue_map_free_coherent(struct virtio_device *vdev,
+ union virtio_map mapping_token,
+ size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+
+dma_addr_t virtqueue_map_page_attrs(const struct virtqueue *_vq,
+ struct page *page,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs);
+
+void virtqueue_unmap_page_attrs(const struct virtqueue *_vq,
+ dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+
+dma_addr_t virtqueue_map_single_attrs(const struct virtqueue *_vq, void *ptr, size_t size,
enum dma_data_direction dir, unsigned long attrs);
-void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
+void virtqueue_unmap_single_attrs(const struct virtqueue *_vq, dma_addr_t addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs);
-int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
+int virtqueue_map_mapping_error(const struct virtqueue *_vq, dma_addr_t addr);
-bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr);
-void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr,
+bool virtqueue_map_need_sync(const struct virtqueue *_vq, dma_addr_t addr);
+void virtqueue_map_sync_single_range_for_cpu(const struct virtqueue *_vq, dma_addr_t addr,
unsigned long offset, size_t size,
enum dma_data_direction dir);
-void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr,
+void virtqueue_map_sync_single_range_for_device(const struct virtqueue *_vq, dma_addr_t addr,
unsigned long offset, size_t size,
enum dma_data_direction dir);
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index da9b271b54db..69f84ea85d71 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -19,6 +19,20 @@ struct virtio_shm_region {
typedef void vq_callback_t(struct virtqueue *);
/**
+ * struct virtqueue_info - Info for a virtqueue passed to find_vqs().
+ * @name: virtqueue description. Used mainly for debugging, NULL for
+ * a virtqueue unused by the driver.
+ * @callback: A callback to invoke on a used buffer notification.
+ * NULL for a virtqueue that does not need a callback.
+ * @ctx: whether to maintain an extra context per virtqueue.
+ */
+struct virtqueue_info {
+ const char *name;
+ vq_callback_t *callback;
+ bool ctx;
+};
+
+/**
* struct virtio_config_ops - operations for configuring a virtio device
* Note: Do not assume that a transport implements all of the operations
* getting/setting a value as a simple read/write! Generally speaking,
@@ -53,10 +67,7 @@ typedef void vq_callback_t(struct virtqueue *);
* vdev: the virtio_device
* nvqs: the number of virtqueues to find
* vqs: on success, includes new virtqueues
- * callbacks: array of callbacks, for each virtqueue
- * include a NULL entry for vqs that do not need a callback
- * names: array of virtqueue names (mainly for debugging)
- * include a NULL entry for vqs unused by driver
+ * vqs_info: array of virtqueue info structures
* Returns 0 on success or error status
* @del_vqs: free virtqueues found by find_vqs().
* @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
@@ -66,12 +77,16 @@ typedef void vq_callback_t(struct virtqueue *);
* vdev: the virtio_device
* @get_features: get the array of feature bits for this device.
* vdev: the virtio_device
- * Returns the first 64 feature bits (all we currently need).
+ * Returns the first 64 feature bits.
+ * @get_extended_features:
+ * vdev: the virtio_device
+ * Returns the first VIRTIO_FEATURES_BITS feature bits (all we currently
+ * need).
* @finalize_features: confirm what device features we'll be using.
* vdev: the virtio_device
* This sends the driver feature bits to the device: it can change
* the dev->feature bits if it wants.
- * Note that despite the name this can be called any number of
+ * Note that despite the name this can be called any number of
* times.
* Returns 0 on success or error status
* @bus_name: return the bus name associated with the device (optional)
@@ -93,8 +108,6 @@ typedef void vq_callback_t(struct virtqueue *);
* Returns 0 on success or error status
* If disable_vq_and_reset is set, then enable_vq_after_reset must also be
* set.
- * @create_avq: create admin virtqueue resource.
- * @destroy_avq: destroy admin virtqueue resource.
*/
struct virtio_config_ops {
void (*get)(struct virtio_device *vdev, unsigned offset,
@@ -105,25 +118,97 @@ struct virtio_config_ops {
u8 (*get_status)(struct virtio_device *vdev);
void (*set_status)(struct virtio_device *vdev, u8 status);
void (*reset)(struct virtio_device *vdev);
- int (*find_vqs)(struct virtio_device *, unsigned nvqs,
- struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx,
+ int (*find_vqs)(struct virtio_device *vdev, unsigned int nvqs,
+ struct virtqueue *vqs[],
+ struct virtqueue_info vqs_info[],
struct irq_affinity *desc);
void (*del_vqs)(struct virtio_device *);
void (*synchronize_cbs)(struct virtio_device *);
u64 (*get_features)(struct virtio_device *vdev);
+ void (*get_extended_features)(struct virtio_device *vdev,
+ u64 *features);
int (*finalize_features)(struct virtio_device *vdev);
const char *(*bus_name)(struct virtio_device *vdev);
int (*set_vq_affinity)(struct virtqueue *vq,
const struct cpumask *cpu_mask);
const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
- int index);
+ int index);
bool (*get_shm_region)(struct virtio_device *vdev,
struct virtio_shm_region *region, u8 id);
int (*disable_vq_and_reset)(struct virtqueue *vq);
int (*enable_vq_after_reset)(struct virtqueue *vq);
- int (*create_avq)(struct virtio_device *vdev);
- void (*destroy_avq)(struct virtio_device *vdev);
+};
+
+/**
+ * struct virtio_map_ops - operations for mapping buffer for a virtio device
+ * Note: For a transport that has its own mapping logic it must
+ * implement all of the operations
+ * @map_page: map a buffer to the device
+ * map: metadata for performing mapping
+ * page: the page that will be mapped by the device
+ * offset: the offset in the page for a buffer
+ * size: the buffer size
+ * dir: mapping direction
+ * attrs: mapping attributes
+ * Returns the mapped address
+ * @unmap_page: unmap a buffer from the device
+ * map: device specific mapping map
+ * map_handle: the mapped address
+ * size: the buffer size
+ * dir: mapping direction
+ * attrs: unmapping attributes
+ * @sync_single_for_cpu: sync a single buffer from device to cpu
+ * map: metadata for performing mapping
+ * map_handle: the mapping address to sync
+ * size: the size of the buffer
+ * dir: synchronization direction
+ * @sync_single_for_device: sync a single buffer from cpu to device
+ * map: metadata for performing mapping
+ * map_handle: the mapping address to sync
+ * size: the size of the buffer
+ * dir: synchronization direction
+ * @alloc: alloc a coherent buffer mapping
+ * map: metadata for performing mapping
+ * size: the size of the buffer
+ * map_handle: the mapping address to sync
+ * gfp: allocation flag (GFP_XXX)
+ * Returns virtual address of the allocated buffer
+ * @free: free a coherent buffer mapping
+ * map: metadata for performing mapping
+ * size: the size of the buffer
+ * vaddr: virtual address of the buffer
+ * map_handle: the mapping address that needs to be freed
+ * attrs: unmapping attributes
+ * @need_sync: if the buffer needs synchronization
+ * map: metadata for performing mapping
+ * map_handle: the mapped address
+ * Returns whether the buffer needs synchronization
+ * @mapping_error: if the mapping address is error
+ * map: metadata for performing mapping
+ * map_handle: the mapped address
+ * @max_mapping_size: get the maximum buffer size that can be mapped
+ * map: metadata for performing mapping
+ * Returns the maximum buffer size that can be mapped
+ */
+struct virtio_map_ops {
+ dma_addr_t (*map_page)(union virtio_map map, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+ void (*unmap_page)(union virtio_map map, dma_addr_t map_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ void (*sync_single_for_cpu)(union virtio_map map, dma_addr_t map_handle,
+ size_t size, enum dma_data_direction dir);
+ void (*sync_single_for_device)(union virtio_map map,
+ dma_addr_t map_handle, size_t size,
+ enum dma_data_direction dir);
+ void *(*alloc)(union virtio_map map, size_t size,
+ dma_addr_t *map_handle, gfp_t gfp);
+ void (*free)(union virtio_map map, size_t size, void *vaddr,
+ dma_addr_t map_handle, unsigned long attrs);
+ bool (*need_sync)(union virtio_map map, dma_addr_t map_handle);
+ int (*mapping_error)(union virtio_map map, dma_addr_t map_handle);
+ size_t (*max_mapping_size)(union virtio_map map);
};
/* If driver didn't advertise the feature, it will never appear. */
@@ -140,13 +225,7 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
static inline bool __virtio_test_bit(const struct virtio_device *vdev,
unsigned int fbit)
{
- /* Did you forget to fix assumptions on max features? */
- if (__builtin_constant_p(fbit))
- BUILD_BUG_ON(fbit >= 64);
- else
- BUG_ON(fbit >= 64);
-
- return vdev->features & BIT_ULL(fbit);
+ return virtio_features_test_bit(vdev->features_array, fbit);
}
/**
@@ -157,13 +236,7 @@ static inline bool __virtio_test_bit(const struct virtio_device *vdev,
static inline void __virtio_set_bit(struct virtio_device *vdev,
unsigned int fbit)
{
- /* Did you forget to fix assumptions on max features? */
- if (__builtin_constant_p(fbit))
- BUILD_BUG_ON(fbit >= 64);
- else
- BUG_ON(fbit >= 64);
-
- vdev->features |= BIT_ULL(fbit);
+ virtio_features_set_bit(vdev->features_array, fbit);
}
/**
@@ -174,13 +247,7 @@ static inline void __virtio_set_bit(struct virtio_device *vdev,
static inline void __virtio_clear_bit(struct virtio_device *vdev,
unsigned int fbit)
{
- /* Did you forget to fix assumptions on max features? */
- if (__builtin_constant_p(fbit))
- BUILD_BUG_ON(fbit >= 64);
- else
- BUG_ON(fbit >= 64);
-
- vdev->features &= ~BIT_ULL(fbit);
+ virtio_features_clear_bit(vdev->features_array, fbit);
}
/**
@@ -197,6 +264,18 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
return __virtio_test_bit(vdev, fbit);
}
+static inline void virtio_get_features(struct virtio_device *vdev,
+ u64 *features_out)
+{
+ if (vdev->config->get_extended_features) {
+ vdev->config->get_extended_features(vdev, features_out);
+ return;
+ }
+
+ virtio_features_from_u64(features_out,
+ vdev->config->get_features(vdev));
+}
+
/**
* virtio_has_dma_quirk - determine whether this device has the DMA quirk
* @vdev: the device
@@ -211,38 +290,29 @@ static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
}
static inline
+int virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
+ struct virtqueue *vqs[],
+ struct virtqueue_info vqs_info[],
+ struct irq_affinity *desc)
+{
+ return vdev->config->find_vqs(vdev, nvqs, vqs, vqs_info, desc);
+}
+
+static inline
struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
vq_callback_t *c, const char *n)
{
- vq_callback_t *callbacks[] = { c };
- const char *names[] = { n };
+ struct virtqueue_info vqs_info[] = {
+ { n, c },
+ };
struct virtqueue *vq;
- int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL,
- NULL);
+ int err = virtio_find_vqs(vdev, 1, &vq, vqs_info, NULL);
+
if (err < 0)
return ERR_PTR(err);
return vq;
}
-static inline
-int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
- struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[],
- struct irq_affinity *desc)
-{
- return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc);
-}
-
-static inline
-int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
- struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], const bool *ctx,
- struct irq_affinity *desc)
-{
- return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx,
- desc);
-}
-
/**
* virtio_synchronize_cbs - synchronize with virtqueue callbacks
* @dev: the virtio device
@@ -292,7 +362,7 @@ void virtio_device_ready(struct virtio_device *dev)
* specific set_status() method.
*
* A well behaved device will only notify a virtqueue after
- * DRIVER_OK, this means the device should "see" the coherenct
+ * DRIVER_OK, this means the device should "see" the coherent
* memory write that set vq->broken as false which is done by
* the driver when it sees DRIVER_OK, then the following
* driver's vring_interrupt() will see vq->broken as false so
@@ -314,7 +384,7 @@ const char *virtio_bus_name(struct virtio_device *vdev)
* @vq: the virtqueue
* @cpu_mask: the cpu mask
*
- * Pay attention the function are best-effort: the affinity hint may not be set
+ * Note that this function is best-effort: the affinity hint may not be set
* due to config support, irq type and sharing.
*
*/
@@ -329,11 +399,11 @@ int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
static inline
bool virtio_get_shm_region(struct virtio_device *vdev,
- struct virtio_shm_region *region, u8 id)
+ struct virtio_shm_region *region_out, u8 id)
{
if (!vdev->config->get_shm_region)
return false;
- return vdev->config->get_shm_region(vdev, region, id);
+ return vdev->config->get_shm_region(vdev, region_out, id);
}
static inline bool virtio_is_little_endian(struct virtio_device *vdev)
diff --git a/include/linux/virtio_features.h b/include/linux/virtio_features.h
new file mode 100644
index 000000000000..ea2ad8717882
--- /dev/null
+++ b/include/linux/virtio_features.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VIRTIO_FEATURES_H
+#define _LINUX_VIRTIO_FEATURES_H
+
+#include <linux/bits.h>
+
+#define VIRTIO_FEATURES_U64S 2
+#define VIRTIO_FEATURES_BITS (VIRTIO_FEATURES_U64S * 64)
+
+#define VIRTIO_BIT(b) BIT_ULL((b) & 0x3f)
+#define VIRTIO_U64(b) ((b) >> 6)
+
+#define VIRTIO_DECLARE_FEATURES(name) \
+ union { \
+ u64 name; \
+ u64 name##_array[VIRTIO_FEATURES_U64S];\
+ }
+
+static inline bool virtio_features_chk_bit(unsigned int bit)
+{
+ if (__builtin_constant_p(bit)) {
+ /*
+ * Don't care returning the correct value: the build
+ * will fail before any bad features access
+ */
+ BUILD_BUG_ON(bit >= VIRTIO_FEATURES_BITS);
+ } else {
+ if (WARN_ON_ONCE(bit >= VIRTIO_FEATURES_BITS))
+ return false;
+ }
+ return true;
+}
+
+static inline bool virtio_features_test_bit(const u64 *features,
+ unsigned int bit)
+{
+ return virtio_features_chk_bit(bit) &&
+ !!(features[VIRTIO_U64(bit)] & VIRTIO_BIT(bit));
+}
+
+static inline void virtio_features_set_bit(u64 *features,
+ unsigned int bit)
+{
+ if (virtio_features_chk_bit(bit))
+ features[VIRTIO_U64(bit)] |= VIRTIO_BIT(bit);
+}
+
+static inline void virtio_features_clear_bit(u64 *features,
+ unsigned int bit)
+{
+ if (virtio_features_chk_bit(bit))
+ features[VIRTIO_U64(bit)] &= ~VIRTIO_BIT(bit);
+}
+
+static inline void virtio_features_zero(u64 *features)
+{
+ memset(features, 0, sizeof(features[0]) * VIRTIO_FEATURES_U64S);
+}
+
+static inline void virtio_features_from_u64(u64 *features, u64 from)
+{
+ virtio_features_zero(features);
+ features[0] = from;
+}
+
+static inline bool virtio_features_equal(const u64 *f1, const u64 *f2)
+{
+ int i;
+
+ for (i = 0; i < VIRTIO_FEATURES_U64S; ++i)
+ if (f1[i] != f2[i])
+ return false;
+ return true;
+}
+
+static inline void virtio_features_copy(u64 *to, const u64 *from)
+{
+ memcpy(to, from, sizeof(to[0]) * VIRTIO_FEATURES_U64S);
+}
+
+static inline void virtio_features_andnot(u64 *to, const u64 *f1, const u64 *f2)
+{
+ int i;
+
+ for (i = 0; i < VIRTIO_FEATURES_U64S; i++)
+ to[i] = f1[i] & ~f2[i];
+}
+
+#endif
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 4dfa9b69ca8d..75dabb763c65 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -47,9 +47,9 @@ static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
return 0;
}
-static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
- const struct virtio_net_hdr *hdr,
- bool little_endian)
+static inline int __virtio_net_hdr_to_skb(struct sk_buff *skb,
+ const struct virtio_net_hdr *hdr,
+ bool little_endian, u8 hdr_gso_type)
{
unsigned int nh_min_len = sizeof(struct iphdr);
unsigned int gso_type = 0;
@@ -57,8 +57,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
unsigned int p_off = 0;
unsigned int ip_proto;
- if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
- switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ if (hdr_gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ switch (hdr_gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
gso_type = SKB_GSO_TCPV4;
ip_proto = IPPROTO_TCP;
@@ -84,7 +84,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
return -EINVAL;
}
- if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
+ if (hdr_gso_type & VIRTIO_NET_HDR_GSO_ECN)
gso_type |= SKB_GSO_TCP_ECN;
if (hdr->gso_size == 0)
@@ -103,8 +103,10 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (!skb_partial_csum_set(skb, start, off))
return -EINVAL;
+ if (skb_transport_offset(skb) < nh_min_len)
+ return -EINVAL;
- nh_min_len = max_t(u32, nh_min_len, skb_transport_offset(skb));
+ nh_min_len = skb_transport_offset(skb);
p_off = nh_min_len + thlen;
if (!pskb_may_pull(skb, p_off))
return -EINVAL;
@@ -120,7 +122,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (!protocol)
virtio_net_hdr_set_proto(skb, hdr);
- else if (!virtio_net_hdr_match_proto(protocol, hdr->gso_type))
+ else if (!virtio_net_hdr_match_proto(protocol,
+ hdr_gso_type))
return -EINVAL;
else
skb->protocol = protocol;
@@ -151,7 +154,7 @@ retry:
}
}
- if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ if (hdr_gso_type != VIRTIO_NET_HDR_GSO_NONE) {
u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
unsigned int nh_off = p_off;
struct skb_shared_info *shinfo = skb_shinfo(skb);
@@ -171,6 +174,12 @@ retry:
if (gso_type != SKB_GSO_UDP_L4)
return -EINVAL;
break;
+ case SKB_GSO_TCPV4:
+ case SKB_GSO_TCPV6:
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb->csum_offset != offsetof(struct tcphdr, check))
+ return -EINVAL;
+ break;
}
/* Kernel has a special handling for GSO_BY_FRAGS. */
@@ -191,6 +200,13 @@ retry:
return 0;
}
+static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ const struct virtio_net_hdr *hdr,
+ bool little_endian)
+{
+ return __virtio_net_hdr_to_skb(skb, hdr, little_endian, hdr->gso_type);
+}
+
static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
struct virtio_net_hdr *hdr,
bool little_endian,
@@ -234,4 +250,183 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
return 0;
}
+static inline unsigned int virtio_l3min(bool is_ipv6)
+{
+ return is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr);
+}
+
+static inline int
+virtio_net_hdr_tnl_to_skb(struct sk_buff *skb,
+ const struct virtio_net_hdr_v1_hash_tunnel *vhdr,
+ bool tnl_hdr_negotiated,
+ bool tnl_csum_negotiated,
+ bool little_endian)
+{
+ const struct virtio_net_hdr *hdr = (const struct virtio_net_hdr *)vhdr;
+ unsigned int inner_nh, outer_th, inner_th;
+ unsigned int inner_l3min, outer_l3min;
+ u8 gso_inner_type, gso_tunnel_type;
+ bool outer_isv6, inner_isv6;
+ int ret;
+
+ gso_tunnel_type = hdr->gso_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL;
+ if (!gso_tunnel_type)
+ return virtio_net_hdr_to_skb(skb, hdr, little_endian);
+
+ /* Tunnel not supported/negotiated, but the hdr asks for it. */
+ if (!tnl_hdr_negotiated)
+ return -EINVAL;
+
+ /* Either ipv4 or ipv6. */
+ if (gso_tunnel_type == VIRTIO_NET_HDR_GSO_UDP_TUNNEL)
+ return -EINVAL;
+
+ /* The UDP tunnel must carry a GSO packet, but no UFO. */
+ gso_inner_type = hdr->gso_type & ~(VIRTIO_NET_HDR_GSO_ECN |
+ VIRTIO_NET_HDR_GSO_UDP_TUNNEL);
+ if (!gso_inner_type || gso_inner_type == VIRTIO_NET_HDR_GSO_UDP)
+ return -EINVAL;
+
+ /* Rely on csum being present. */
+ if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ return -EINVAL;
+
+ /* Validate offsets. */
+ outer_isv6 = gso_tunnel_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6;
+ inner_isv6 = gso_inner_type == VIRTIO_NET_HDR_GSO_TCPV6;
+ inner_l3min = virtio_l3min(inner_isv6);
+ outer_l3min = ETH_HLEN + virtio_l3min(outer_isv6);
+
+ inner_th = __virtio16_to_cpu(little_endian, hdr->csum_start);
+ inner_nh = le16_to_cpu(vhdr->inner_nh_offset);
+ outer_th = le16_to_cpu(vhdr->outer_th_offset);
+ if (outer_th < outer_l3min ||
+ inner_nh < outer_th + sizeof(struct udphdr) ||
+ inner_th < inner_nh + inner_l3min)
+ return -EINVAL;
+
+ /* Let the basic parsing deal with plain GSO features. */
+ ret = __virtio_net_hdr_to_skb(skb, hdr, true,
+ hdr->gso_type & ~gso_tunnel_type);
+ if (ret)
+ return ret;
+
+ /* In case of USO, the inner protocol is still unknown and
+ * `inner_isv6` is just a guess, additional parsing is needed.
+ * The previous validation ensures that accessing an ipv4 inner
+ * network header is safe.
+ */
+ if (gso_inner_type == VIRTIO_NET_HDR_GSO_UDP_L4) {
+ struct iphdr *iphdr = (struct iphdr *)(skb->data + inner_nh);
+
+ inner_isv6 = iphdr->version == 6;
+ inner_l3min = virtio_l3min(inner_isv6);
+ if (inner_th < inner_nh + inner_l3min)
+ return -EINVAL;
+ }
+
+ skb_set_inner_protocol(skb, inner_isv6 ? htons(ETH_P_IPV6) :
+ htons(ETH_P_IP));
+ if (hdr->flags & VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM) {
+ if (!tnl_csum_negotiated)
+ return -EINVAL;
+
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+ } else {
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+
+ skb->inner_transport_header = inner_th + skb_headroom(skb);
+ skb->inner_network_header = inner_nh + skb_headroom(skb);
+ skb->inner_mac_header = inner_nh + skb_headroom(skb);
+ skb->transport_header = outer_th + skb_headroom(skb);
+ skb->encapsulation = 1;
+ return 0;
+}
+
+/* Checksum-related fields validation for the driver */
+static inline int virtio_net_handle_csum_offload(struct sk_buff *skb,
+ struct virtio_net_hdr *hdr,
+ bool tnl_csum_negotiated)
+{
+ if (!(hdr->gso_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL)) {
+ if (!(hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID))
+ return 0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (!(hdr->flags & VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM))
+ return 0;
+
+ /* tunnel csum packets are invalid when the related
+ * feature has not been negotiated
+ */
+ if (!tnl_csum_negotiated)
+ return -EINVAL;
+ skb->csum_level = 1;
+ return 0;
+ }
+
+ /* DATA_VALID is mutually exclusive with NEEDS_CSUM, and GSO
+ * over UDP tunnel requires the latter
+ */
+ if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * vlan_hlen always refers to the outermost MAC header. That also
+ * means it refers to the only MAC header, if the packet does not carry
+ * any encapsulation.
+ */
+static inline int
+virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
+ struct virtio_net_hdr_v1_hash_tunnel *vhdr,
+ bool tnl_hdr_negotiated,
+ bool little_endian,
+ int vlan_hlen,
+ bool has_data_valid)
+{
+ struct virtio_net_hdr *hdr = (struct virtio_net_hdr *)vhdr;
+ unsigned int inner_nh, outer_th;
+ int tnl_gso_type;
+ int ret;
+
+ tnl_gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM);
+ if (!tnl_gso_type)
+ return virtio_net_hdr_from_skb(skb, hdr, little_endian,
+ has_data_valid, vlan_hlen);
+
+ /* Tunnel support not negotiated but skb ask for it. */
+ if (!tnl_hdr_negotiated)
+ return -EINVAL;
+
+ vhdr->hash_hdr.hash_value_lo = 0;
+ vhdr->hash_hdr.hash_value_hi = 0;
+ vhdr->hash_hdr.hash_report = 0;
+ vhdr->hash_hdr.padding = 0;
+
+ /* Let the basic parsing deal with plain GSO features. */
+ skb_shinfo(skb)->gso_type &= ~tnl_gso_type;
+ ret = virtio_net_hdr_from_skb(skb, hdr, true, false, vlan_hlen);
+ skb_shinfo(skb)->gso_type |= tnl_gso_type;
+ if (ret)
+ return ret;
+
+ if (skb->protocol == htons(ETH_P_IPV6))
+ hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6;
+ else
+ hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4;
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
+ hdr->flags |= VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM;
+
+ inner_nh = skb->inner_network_header - skb_headroom(skb);
+ outer_th = skb->transport_header - skb_headroom(skb);
+ vhdr->inner_nh_offset = cpu_to_le16(inner_nh);
+ vhdr->outer_th_offset = cpu_to_le16(outer_th);
+ return 0;
+}
+
#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/linux/virtio_pci_admin.h b/include/linux/virtio_pci_admin.h
index f4a100a0fe2e..dffc92c17ad2 100644
--- a/include/linux/virtio_pci_admin.h
+++ b/include/linux/virtio_pci_admin.h
@@ -20,4 +20,15 @@ int virtio_pci_admin_legacy_io_notify_info(struct pci_dev *pdev,
u64 *bar_offset);
#endif
+bool virtio_pci_admin_has_dev_parts(struct pci_dev *pdev);
+int virtio_pci_admin_mode_set(struct pci_dev *pdev, u8 mode);
+int virtio_pci_admin_obj_create(struct pci_dev *pdev, u16 obj_type, u8 operation_type,
+ u32 *obj_id);
+int virtio_pci_admin_obj_destroy(struct pci_dev *pdev, u16 obj_type, u32 id);
+int virtio_pci_admin_dev_parts_metadata_get(struct pci_dev *pdev, u16 obj_type,
+ u32 id, u8 metadata_type, u32 *out);
+int virtio_pci_admin_dev_parts_get(struct pci_dev *pdev, u16 obj_type, u32 id,
+ u8 get_type, struct scatterlist *res_sg, u32 *res_size);
+int virtio_pci_admin_dev_parts_set(struct pci_dev *pdev, struct scatterlist *data_sg);
+
#endif /* _LINUX_VIRTIO_PCI_ADMIN_H */
diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h
index c0b1b1ca1163..9a3f2fc53bd6 100644
--- a/include/linux/virtio_pci_modern.h
+++ b/include/linux/virtio_pci_modern.h
@@ -3,6 +3,7 @@
#define _LINUX_VIRTIO_PCI_MODERN_H
#include <linux/pci.h>
+#include <linux/virtio_config.h>
#include <linux/virtio_pci.h>
/**
@@ -95,10 +96,44 @@ static inline void vp_iowrite64_twopart(u64 val,
vp_iowrite32(val >> 32, hi);
}
-u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev);
-u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev);
-void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
- u64 features);
+void
+vp_modern_get_driver_extended_features(struct virtio_pci_modern_device *mdev,
+ u64 *features);
+void vp_modern_get_extended_features(struct virtio_pci_modern_device *mdev,
+ u64 *features);
+void vp_modern_set_extended_features(struct virtio_pci_modern_device *mdev,
+ const u64 *features);
+
+static inline u64
+vp_modern_get_features(struct virtio_pci_modern_device *mdev)
+{
+ u64 features_array[VIRTIO_FEATURES_U64S];
+
+ vp_modern_get_extended_features(mdev, features_array);
+ return features_array[0];
+}
+
+static inline u64
+vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev)
+{
+ u64 features_array[VIRTIO_FEATURES_U64S];
+ int i;
+
+ vp_modern_get_driver_extended_features(mdev, features_array);
+ for (i = 1; i < VIRTIO_FEATURES_U64S; ++i)
+ WARN_ON_ONCE(features_array[i]);
+ return features_array[0];
+}
+
+static inline void
+vp_modern_set_features(struct virtio_pci_modern_device *mdev, u64 features)
+{
+ u64 features_array[VIRTIO_FEATURES_U64S];
+
+ virtio_features_from_u64(features_array, features);
+ vp_modern_set_extended_features(mdev, features_array);
+}
+
u32 vp_modern_generation(struct virtio_pci_modern_device *mdev);
u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev);
void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 9b33df741b63..c97a12c1cda3 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -3,6 +3,7 @@
#define _LINUX_VIRTIO_RING_H
#include <asm/barrier.h>
+#include <linux/virtio.h>
#include <linux/irqreturn.h>
#include <uapi/linux/virtio_ring.h>
@@ -79,9 +80,9 @@ struct virtqueue *vring_create_virtqueue(unsigned int index,
/*
* Creates a virtqueue and allocates the descriptor ring with per
- * virtqueue DMA device.
+ * virtqueue mapping operations.
*/
-struct virtqueue *vring_create_virtqueue_dma(unsigned int index,
+struct virtqueue *vring_create_virtqueue_map(unsigned int index,
unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
@@ -91,7 +92,7 @@ struct virtqueue *vring_create_virtqueue_dma(unsigned int index,
bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq),
const char *name,
- struct device *dma_dev);
+ union virtio_map map);
/*
* Creates a virtqueue with a standard layout but a caller-allocated
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index c82089dee0c8..0c67543a45c8 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -47,31 +47,50 @@ static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb)
VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false;
}
-static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb)
+static inline void virtio_vsock_skb_put(struct sk_buff *skb, u32 len)
{
- u32 len;
+ DEBUG_NET_WARN_ON_ONCE(skb->len);
- len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
-
- if (len > 0)
+ if (skb_is_nonlinear(skb))
+ skb->len = len;
+ else
skb_put(skb, len);
}
-static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
+static inline struct sk_buff *
+__virtio_vsock_alloc_skb_with_frags(unsigned int header_len,
+ unsigned int data_len,
+ gfp_t mask)
{
struct sk_buff *skb;
+ int err;
- if (size < VIRTIO_VSOCK_SKB_HEADROOM)
- return NULL;
-
- skb = alloc_skb(size, mask);
+ skb = alloc_skb_with_frags(header_len, data_len,
+ PAGE_ALLOC_COSTLY_ORDER, &err, mask);
if (!skb)
return NULL;
skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
+ skb->data_len = data_len;
return skb;
}
+static inline struct sk_buff *
+virtio_vsock_alloc_linear_skb(unsigned int size, gfp_t mask)
+{
+ return __virtio_vsock_alloc_skb_with_frags(size, 0, mask);
+}
+
+static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
+{
+ if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
+ return virtio_vsock_alloc_linear_skb(size, mask);
+
+ size -= VIRTIO_VSOCK_SKB_HEADROOM;
+ return __virtio_vsock_alloc_skb_with_frags(VIRTIO_VSOCK_SKB_HEADROOM,
+ size, mask);
+}
+
static inline void
virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb)
{
@@ -111,7 +130,12 @@ static inline size_t virtio_vsock_skb_len(struct sk_buff *skb)
return (size_t)(skb_end_pointer(skb) - skb->head);
}
-#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4)
+/* Dimension the RX SKB so that the entire thing fits exactly into
+ * a single 4KiB page. This avoids wasting memory due to alloc_skb()
+ * rounding up to the next page order and also means that we
+ * don't leave higher-order pages sitting around in the RX queue.
+ */
+#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE SKB_WITH_OVERHEAD(1024 * 4)
#define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
@@ -133,12 +157,14 @@ struct virtio_vsock_sock {
u32 tx_cnt;
u32 peer_fwd_cnt;
u32 peer_buf_alloc;
+ size_t bytes_unsent;
/* Protected by rx_lock */
u32 fwd_cnt;
u32 last_fwd_cnt;
u32 rx_bytes;
u32 buf_alloc;
+ u32 buf_used;
struct sk_buff_head rx_queue;
u32 msg_count;
};
@@ -193,6 +219,11 @@ s64 virtio_transport_stream_has_data(struct vsock_sock *vsk);
s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk);
+ssize_t virtio_transport_unsent_bytes(struct vsock_sock *vsk);
+
+void virtio_transport_consume_skb_sent(struct sk_buff *skb,
+ bool consume);
+
int virtio_transport_do_socket_init(struct vsock_sock *vsk,
struct vsock_sock *psk);
int
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 747943bc8cc2..92f80b4d69a6 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -2,6 +2,8 @@
#ifndef VM_EVENT_ITEM_H_INCLUDED
#define VM_EVENT_ITEM_H_INCLUDED
+#include <linux/thread_info.h>
+
#ifdef CONFIG_ZONE_DMA
#define DMA_ZONE(xx) xx##_DMA,
#else
@@ -41,15 +43,18 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGSTEAL_KSWAPD,
PGSTEAL_DIRECT,
PGSTEAL_KHUGEPAGED,
+ PGSTEAL_PROACTIVE,
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
PGSCAN_KHUGEPAGED,
+ PGSCAN_PROACTIVE,
PGSCAN_DIRECT_THROTTLE,
PGSCAN_ANON,
PGSCAN_FILE,
PGSTEAL_ANON,
PGSTEAL_FILE,
#ifdef CONFIG_NUMA
+ PGSCAN_ZONE_RECLAIM_SUCCESS,
PGSCAN_ZONE_RECLAIM_FAILED,
#endif
PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL,
@@ -104,6 +109,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_SPLIT_PAGE,
THP_SPLIT_PAGE_FAILED,
THP_DEFERRED_SPLIT_PAGE,
+ THP_UNDERUSED_SPLIT_PAGE,
THP_SPLIT_PMD,
THP_SCAN_EXCEED_NONE_PTE,
THP_SCAN_EXCEED_SWAP_PTE,
@@ -132,6 +138,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_SWAP
SWAP_RA,
SWAP_RA_HIT,
+ SWPIN_ZERO,
+ SWPOUT_ZERO,
#ifdef CONFIG_KSM
KSM_SWPIN_COPY,
#endif
@@ -147,6 +155,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_X86
DIRECT_MAP_LEVEL2_SPLIT,
DIRECT_MAP_LEVEL3_SPLIT,
+ DIRECT_MAP_LEVEL2_COLLAPSE,
+ DIRECT_MAP_LEVEL3_COLLAPSE,
#endif
#ifdef CONFIG_PER_VMA_LOCK_STATS
VMA_LOCK_SUCCESS,
@@ -154,6 +164,30 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
VMA_LOCK_RETRY,
VMA_LOCK_MISS,
#endif
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ KSTACK_1K,
+#if THREAD_SIZE > 1024
+ KSTACK_2K,
+#endif
+#if THREAD_SIZE > 2048
+ KSTACK_4K,
+#endif
+#if THREAD_SIZE > 4096
+ KSTACK_8K,
+#endif
+#if THREAD_SIZE > 8192
+ KSTACK_16K,
+#endif
+#if THREAD_SIZE > 16384
+ KSTACK_32K,
+#endif
+#if THREAD_SIZE > 32768
+ KSTACK_64K,
+#endif
+#if THREAD_SIZE > 65536
+ KSTACK_REST,
+#endif
+#endif /* CONFIG_DEBUG_STACK_USAGE */
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index e4a631ec430b..e8e94f90d686 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -50,7 +50,11 @@ struct iov_iter; /* in uio.h */
#endif
struct vm_struct {
- struct vm_struct *next;
+ union {
+ struct vm_struct *next; /* Early registration of vm_areas. */
+ struct llist_node llnode; /* Asynchronous freeing on error paths. */
+ };
+
void *addr;
unsigned long size;
unsigned long flags;
@@ -61,6 +65,7 @@ struct vm_struct {
unsigned int nr_pages;
phys_addr_t phys_addr;
const void *caller;
+ unsigned long requested_size;
};
struct vmap_area {
@@ -113,6 +118,14 @@ static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, uns
}
#endif
+#ifndef arch_vmap_pte_range_unmap_size
+static inline unsigned long arch_vmap_pte_range_unmap_size(unsigned long addr,
+ pte_t *ptep)
+{
+ return PAGE_SIZE;
+}
+#endif
+
#ifndef arch_vmap_pte_supported_shift
static inline int arch_vmap_pte_supported_shift(unsigned long size)
{
@@ -134,12 +147,6 @@ extern void vm_unmap_ram(const void *mem, unsigned int count);
extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
extern void vm_unmap_aliases(void);
-#ifdef CONFIG_MMU
-extern unsigned long vmalloc_nr_pages(void);
-#else
-static inline unsigned long vmalloc_nr_pages(void) { return 0; }
-#endif
-
extern void *vmalloc_noprof(unsigned long size) __alloc_size(1);
#define vmalloc(...) alloc_hooks(vmalloc_noprof(__VA_ARGS__))
@@ -174,8 +181,13 @@ void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_m
int node, const void *caller) __alloc_size(1);
#define __vmalloc_node(...) alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__))
-void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
-#define vmalloc_huge(...) alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__))
+void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) __alloc_size(1);
+#define vmalloc_huge_node(...) alloc_hooks(vmalloc_huge_node_noprof(__VA_ARGS__))
+
+static inline void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
+{
+ return vmalloc_huge_node(size, gfp_mask, NUMA_NO_NODE);
+}
extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
#define __vmalloc_array(...) alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__))
@@ -189,6 +201,16 @@ extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1
extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
#define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__))
+void *__must_check vrealloc_node_align_noprof(const void *p, size_t size,
+ unsigned long align, gfp_t flags, int nid) __realloc_size(2);
+#define vrealloc_node_noprof(_p, _s, _f, _nid) \
+ vrealloc_node_align_noprof(_p, _s, 1, _f, _nid)
+#define vrealloc_noprof(_p, _s, _f) \
+ vrealloc_node_align_noprof(_p, _s, 1, _f, NUMA_NO_NODE)
+#define vrealloc_node_align(...) alloc_hooks(vrealloc_node_align_noprof(__VA_ARGS__))
+#define vrealloc_node(...) alloc_hooks(vrealloc_node_noprof(__VA_ARGS__))
+#define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__))
+
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
@@ -204,21 +226,8 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff);
-/*
- * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
- * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
- * needs to be called.
- */
-#ifndef ARCH_PAGE_TABLE_SYNC_MASK
-#define ARCH_PAGE_TABLE_SYNC_MASK 0
-#endif
-
-/*
- * There is no default implementation for arch_sync_kernel_mappings(). It is
- * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
- * is 0.
- */
-void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
+int vmap_pages_range(unsigned long addr, unsigned long end, pgprot_t prot,
+ struct page **pages, unsigned int page_shift);
/*
* Lowlevel-APIs (not for driver use!)
@@ -262,12 +271,29 @@ static inline bool is_vm_area_hugepages(const void *addr)
#endif
}
+/* for /proc/kcore */
+long vread_iter(struct iov_iter *iter, const char *addr, size_t count);
+
+/*
+ * Internals. Don't use..
+ */
+__init void vm_area_add_early(struct vm_struct *vm);
+__init void vm_area_register_early(struct vm_struct *vm, size_t align);
+
+int register_vmap_purge_notifier(struct notifier_block *nb);
+int unregister_vmap_purge_notifier(struct notifier_block *nb);
+
#ifdef CONFIG_MMU
+#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
+
+unsigned long vmalloc_nr_pages(void);
+
int vm_area_map_pages(struct vm_struct *area, unsigned long start,
unsigned long end, struct page **pages);
void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
unsigned long end);
void vunmap_range(unsigned long addr, unsigned long end);
+
static inline void set_vm_flush_reset_perms(void *addr)
{
struct vm_struct *vm = find_vm_area(addr);
@@ -275,24 +301,14 @@ static inline void set_vm_flush_reset_perms(void *addr)
if (vm)
vm->flags |= VM_FLUSH_RESET_PERMS;
}
+#else /* !CONFIG_MMU */
+#define VMALLOC_TOTAL 0UL
-#else
-static inline void set_vm_flush_reset_perms(void *addr)
-{
-}
-#endif
-
-/* for /proc/kcore */
-extern long vread_iter(struct iov_iter *iter, const char *addr, size_t count);
-
-/*
- * Internals. Don't use..
- */
-extern __init void vm_area_add_early(struct vm_struct *vm);
-extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
+static inline unsigned long vmalloc_nr_pages(void) { return 0; }
+static inline void set_vm_flush_reset_perms(void *addr) {}
+#endif /* CONFIG_MMU */
-#ifdef CONFIG_SMP
-# ifdef CONFIG_MMU
+#if defined(CONFIG_MMU) && defined(CONFIG_SMP)
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
size_t align);
@@ -307,26 +323,15 @@ pcpu_get_vm_areas(const unsigned long *offsets,
return NULL;
}
-static inline void
-pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
-{
-}
-# endif
+static inline void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) {}
#endif
-#ifdef CONFIG_MMU
-#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
-#else
-#define VMALLOC_TOTAL 0UL
-#endif
-
-int register_vmap_purge_notifier(struct notifier_block *nb);
-int unregister_vmap_purge_notifier(struct notifier_block *nb);
-
#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
bool vmalloc_dump_obj(void *object);
#else
static inline bool vmalloc_dump_obj(void *object) { return false; }
#endif
+unsigned int memalloc_apply_gfp_scope(gfp_t gfp_mask);
+void memalloc_restore_scope(unsigned int flags);
#endif /* _LINUX_VMALLOC_H */
diff --git a/include/linux/vmcore_info.h b/include/linux/vmcore_info.h
index e1dec1a6a749..e71518caacdf 100644
--- a/include/linux/vmcore_info.h
+++ b/include/linux/vmcore_info.h
@@ -5,10 +5,10 @@
#include <linux/linkage.h>
#include <linux/elfcore.h>
#include <linux/elf.h>
+#include <uapi/linux/vmcore.h>
-#define CRASH_CORE_NOTE_NAME "CORE"
#define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
-#define CRASH_CORE_NOTE_NAME_BYTES ALIGN(sizeof(CRASH_CORE_NOTE_NAME), 4)
+#define CRASH_CORE_NOTE_NAME_BYTES ALIGN(sizeof(NN_PRSTATUS), 4)
#define CRASH_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
/*
@@ -78,4 +78,11 @@ extern u32 *vmcoreinfo_note;
Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
void *data, size_t data_len);
void final_note(Elf_Word *buf);
+
+#ifdef CONFIG_VMCORE_INFO
+void hwerr_log_error_type(enum hwerr_error_type src);
+#else
+static inline void hwerr_log_error_type(enum hwerr_error_type src) {};
+#endif
+
#endif /* LINUX_VMCORE_INFO_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 735eae6e272c..3398a345bda8 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -10,15 +10,8 @@
#include <linux/static_key.h>
#include <linux/mmdebug.h>
-extern int sysctl_stat_interval;
-
#ifdef CONFIG_NUMA
-#define ENABLE_NUMA_STAT 1
-#define DISABLE_NUMA_STAT 0
-extern int sysctl_vm_numa_stat;
DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
-int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
- void *buffer, size_t *length, loff_t *ppos);
#endif
struct reclaim_stat {
@@ -32,12 +25,16 @@ struct reclaim_stat {
unsigned nr_ref_keep;
unsigned nr_unmap_fail;
unsigned nr_lazyfree_fail;
+ unsigned nr_demoted;
};
-enum writeback_stat_item {
+/* Stat data for system wide items */
+enum vm_stat_item {
NR_DIRTY_THRESHOLD,
NR_DIRTY_BG_THRESHOLD,
- NR_VM_WRITEBACK_STAT_ITEMS,
+ NR_MEMMAP_PAGES, /* page metadata allocated through buddy allocator */
+ NR_MEMMAP_BOOT_PAGES, /* page metadata allocated through boot allocator */
+ NR_VM_STAT_ITEMS,
};
#ifdef CONFIG_VM_EVENT_COUNTERS
@@ -300,10 +297,6 @@ void quiet_vmstat(void);
void cpu_vm_stats_fold(int cpu);
void refresh_zone_stat_thresholds(void);
-struct ctl_table;
-int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
- loff_t *ppos);
-
void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
int calculate_pressure_threshold(struct zone *zone);
@@ -514,53 +507,25 @@ static inline const char *lru_list_name(enum lru_list lru)
return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
}
-static inline const char *writeback_stat_name(enum writeback_stat_item item)
-{
- return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
- NR_VM_NUMA_EVENT_ITEMS +
- NR_VM_NODE_STAT_ITEMS +
- item];
-}
-
-#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
+#if defined(CONFIG_VM_EVENT_COUNTERS)
static inline const char *vm_event_name(enum vm_event_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
NR_VM_NUMA_EVENT_ITEMS +
NR_VM_NODE_STAT_ITEMS +
- NR_VM_WRITEBACK_STAT_ITEMS +
+ NR_VM_STAT_ITEMS +
item];
}
-#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
+#endif /* CONFIG_VM_EVENT_COUNTERS */
#ifdef CONFIG_MEMCG
-void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
+void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
-static inline void mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __mod_lruvec_state(lruvec, idx, val);
- local_irq_restore(flags);
-}
-
-void __lruvec_stat_mod_folio(struct folio *folio,
+void lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val);
-static inline void lruvec_stat_mod_folio(struct folio *folio,
- enum node_stat_item idx, int val)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __lruvec_stat_mod_folio(folio, idx, val);
- local_irq_restore(flags);
-}
-
static inline void mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val)
{
@@ -569,24 +534,12 @@ static inline void mod_lruvec_page_state(struct page *page,
#else
-static inline void __mod_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
-{
- __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
-}
-
static inline void mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
}
-static inline void __lruvec_stat_mod_folio(struct folio *folio,
- enum node_stat_item idx, int val)
-{
- __mod_node_page_state(folio_pgdat(folio), idx, val);
-}
-
static inline void lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val)
{
@@ -601,18 +554,6 @@ static inline void mod_lruvec_page_state(struct page *page,
#endif /* CONFIG_MEMCG */
-static inline void __lruvec_stat_add_folio(struct folio *folio,
- enum node_stat_item idx)
-{
- __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
-}
-
-static inline void __lruvec_stat_sub_folio(struct folio *folio,
- enum node_stat_item idx)
-{
- __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
-}
-
static inline void lruvec_stat_add_folio(struct folio *folio,
enum node_stat_item idx)
{
@@ -624,4 +565,7 @@ static inline void lruvec_stat_sub_folio(struct folio *folio,
{
lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
}
+
+void memmap_boot_pages_add(long delta);
+void memmap_pages_add(long delta);
#endif /* _LINUX_VMSTAT_H */
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h
index f28907345c80..41764a684423 100644
--- a/include/linux/vmw_vmci_api.h
+++ b/include/linux/vmw_vmci_api.h
@@ -35,7 +35,6 @@ int vmci_doorbell_create(struct vmci_handle *handle, u32 flags,
u32 priv_flags,
vmci_callback notify_cb, void *client_data);
int vmci_doorbell_destroy(struct vmci_handle handle);
-int vmci_doorbell_notify(struct vmci_handle handle, u32 priv_flags);
u32 vmci_get_context_id(void);
bool vmci_is_context_owner(u32 context_id, kuid_t uid);
int vmci_register_vsock_callback(vmci_vsock_cb callback);
@@ -61,12 +60,6 @@ s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair);
s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair);
s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair);
s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair);
-ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
- const void *buf, size_t buf_size, int mode);
-ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
- void *buf, size_t buf_size, int mode);
-ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size,
- int mode);
ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
struct msghdr *msg, size_t iov_size, int mode);
ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index 6fb663b36f72..60c9eacd2cf3 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -431,11 +431,11 @@ enum {
((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2]))
/*
- * The VMCI IOCTLs. We use identity code 7, as noted in ioctl-number.h, and
- * we start at sequence 9f. This gives us the same values that our shipping
- * products use, starting at 1951, provided we leave out the direction and
- * structure size. Note that VMMon occupies the block following us, starting
- * at 2001.
+ * The VMCI IOCTLs. We use identity code 7, as noted in ioctl-number.rst,
+ * and we start at sequence 9f. This gives us the same values that our
+ * shipping products use, starting at 1951, provided we leave out the
+ * direction and structure size. Note that VMMon occupies the block
+ * following us, starting at 2001.
*/
#define IOCTL_VMCI_VERSION _IO(7, 0x9f) /* 1951 */
#define IOCTL_VMCI_INIT_CONTEXT _IO(7, 0xa0)
@@ -453,9 +453,7 @@ enum {
#define IOCTL_VMCI_CTX_GET_CPT_STATE _IO(7, 0xb1)
#define IOCTL_VMCI_CTX_SET_CPT_STATE _IO(7, 0xb2)
#define IOCTL_VMCI_GET_CONTEXT_ID _IO(7, 0xb3)
-#define IOCTL_VMCI_SOCKETS_VERSION _IO(7, 0xb4)
-#define IOCTL_VMCI_SOCKETS_GET_AF_VALUE _IO(7, 0xb8)
-#define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
+/*IOCTL_VM_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)*/
#define IOCTL_VMCI_SET_NOTIFY _IO(7, 0xcb) /* 1995 */
/*IOCTL_VMMON_START _IO(7, 0xd1)*/ /* 2001 */
diff --git a/include/linux/vringh.h b/include/linux/vringh.h
index c3a8117dabe8..49e7cbc9697a 100644
--- a/include/linux/vringh.h
+++ b/include/linux/vringh.h
@@ -175,9 +175,6 @@ int vringh_complete_multi_user(struct vringh *vrh,
const struct vring_used_elem used[],
unsigned num_used);
-/* Pretend we've never seen descriptor (for easy error handling). */
-void vringh_abandon_user(struct vringh *vrh, unsigned int num);
-
/* Do we need to fire the eventfd to notify the other side? */
int vringh_need_notify_user(struct vringh *vrh);
@@ -235,10 +232,6 @@ int vringh_getdesc_kern(struct vringh *vrh,
u16 *head,
gfp_t gfp);
-ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len);
-ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
- const void *src, size_t len);
-void vringh_abandon_kern(struct vringh *vrh, unsigned int num);
int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len);
bool vringh_notify_enable_kern(struct vringh *vrh);
@@ -319,13 +312,8 @@ ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
struct vringh_kiov *wiov,
const void *src, size_t len);
-void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num);
-
int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len);
-bool vringh_notify_enable_iotlb(struct vringh *vrh);
-void vringh_notify_disable_iotlb(struct vringh *vrh);
-
int vringh_need_notify_iotlb(struct vringh *vrh);
#endif /* CONFIG_VHOST_IOTLB */
diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h
index 919d999a8c1d..b6eeb8cb6070 100644
--- a/include/linux/vt_buffer.h
+++ b/include/linux/vt_buffer.h
@@ -28,45 +28,21 @@
#ifndef VT_BUF_HAVE_MEMSETW
static inline void scr_memsetw(u16 *s, u16 c, unsigned int count)
{
-#ifdef VT_BUF_HAVE_RW
- count /= 2;
- while (count--)
- scr_writew(c, s++);
-#else
memset16(s, c, count / 2);
-#endif
}
#endif
#ifndef VT_BUF_HAVE_MEMCPYW
static inline void scr_memcpyw(u16 *d, const u16 *s, unsigned int count)
{
-#ifdef VT_BUF_HAVE_RW
- count /= 2;
- while (count--)
- scr_writew(scr_readw(s++), d++);
-#else
memcpy(d, s, count);
-#endif
}
#endif
#ifndef VT_BUF_HAVE_MEMMOVEW
static inline void scr_memmovew(u16 *d, const u16 *s, unsigned int count)
{
-#ifdef VT_BUF_HAVE_RW
- if (d < s)
- scr_memcpyw(d, s, count);
- else {
- count /= 2;
- d += count;
- s += count;
- while (count--)
- scr_writew(scr_readw(--s), --d);
- }
-#else
memmove(d, s, count);
-#endif
}
#endif
diff --git a/include/linux/w1.h b/include/linux/w1.h
index 9a2a0ef39018..064805bfae3f 100644
--- a/include/linux/w1.h
+++ b/include/linux/w1.h
@@ -85,7 +85,8 @@ typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
*
* @data: the first parameter in all the functions below
*
- * @read_bit: Sample the line level @return the level read (0 or 1)
+ * @read_bit: Sample the line level
+ * @return the level read (0 or 1)
*
* @write_bit: Sets the line level
*
@@ -95,7 +96,7 @@ typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
* touch_bit(1) = write-1 / read cycle
* @return the bit read (0 or 1)
*
- * @read_byte: Reads a bytes. Same as 8 touch_bit(1) calls.
+ * @read_byte: Reads a byte. Same as 8 touch_bit(1) calls.
* @return the byte read
*
* @write_byte: Writes a byte. Same as 8 touch_bit(x) calls.
@@ -114,7 +115,7 @@ typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
* @set_pullup: Put out a strong pull-up pulse of the specified duration.
* @return -1=Error, 0=completed
*
- * @search: Really nice hardware can handles the different types of ROM search
+ * @search: Really nice hardware can handle the different types of ROM search
* w1_master* is passed to the slave found callback.
* u8 is search_type, W1_SEARCH or W1_ALARM_SEARCH
*
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 8aa3372f21a0..f648044466d5 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -164,6 +164,8 @@ static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
+extern int add_wait_queue_priority_exclusive(struct wait_queue_head *wq_head,
+ struct wait_queue_entry *wq_entry);
extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
@@ -221,6 +223,7 @@ void __wake_up_pollfree(struct wait_queue_head *wq_head);
#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
+#define wake_up_sync(x) __wake_up_sync(x, TASK_NORMAL)
#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
@@ -315,6 +318,9 @@ extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
} \
\
cmd; \
+ \
+ if (condition) \
+ break; \
} \
finish_wait(&wq_head, &__wq_entry); \
__out: __ret; \
@@ -541,8 +547,8 @@ do { \
int __ret = 0; \
struct hrtimer_sleeper __t; \
\
- hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
- HRTIMER_MODE_REL); \
+ hrtimer_setup_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
+ HRTIMER_MODE_REL); \
if ((timeout) != KTIME_MAX) { \
hrtimer_set_expires_range_ns(&__t.timer, timeout, \
current->timer_slack_ns); \
@@ -959,6 +965,18 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
__ret; \
})
+#define __wait_event_state_exclusive(wq, condition, state) \
+ ___wait_event(wq, condition, state, 1, 0, schedule())
+
+#define wait_event_state_exclusive(wq, condition, state) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_event_state_exclusive(wq, condition, state); \
+ __ret; \
+})
+
#define __wait_event_killable_timeout(wq_head, condition, timeout) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
TASK_KILLABLE, 0, timeout, \
@@ -1206,14 +1224,16 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i
#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
-#define init_wait(wait) \
+#define init_wait_func(wait, function) \
do { \
(wait)->private = current; \
- (wait)->func = autoremove_wake_function; \
+ (wait)->func = function; \
INIT_LIST_HEAD(&(wait)->entry); \
(wait)->flags = 0; \
} while (0)
+#define init_wait(wait) init_wait_func(wait, autoremove_wake_function)
+
typedef int (*task_call_f)(struct task_struct *p, void *arg);
extern int task_call_func(struct task_struct *p, task_call_f func, void *arg);
diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
index 7725b7579b78..9e29d79fc790 100644
--- a/include/linux/wait_bit.h
+++ b/include/linux/wait_bit.h
@@ -8,7 +8,7 @@
#include <linux/wait.h>
struct wait_bit_key {
- void *flags;
+ unsigned long *flags;
int bit_nr;
unsigned long timeout;
};
@@ -23,14 +23,14 @@ struct wait_bit_queue_entry {
typedef int wait_bit_action_f(struct wait_bit_key *key, int mode);
-void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit);
+void __wake_up_bit(struct wait_queue_head *wq_head, unsigned long *word, int bit);
int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
-void wake_up_bit(void *word, int bit);
-int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode);
-int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
-int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode);
-struct wait_queue_head *bit_waitqueue(void *word, int bit);
+void wake_up_bit(unsigned long *word, int bit);
+int out_of_line_wait_on_bit(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode);
+int out_of_line_wait_on_bit_timeout(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
+int out_of_line_wait_on_bit_lock(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode);
+struct wait_queue_head *bit_waitqueue(unsigned long *word, int bit);
extern void __init wait_bit_init(void);
int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
@@ -49,23 +49,24 @@ int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync
extern int bit_wait(struct wait_bit_key *key, int mode);
extern int bit_wait_io(struct wait_bit_key *key, int mode);
extern int bit_wait_timeout(struct wait_bit_key *key, int mode);
-extern int bit_wait_io_timeout(struct wait_bit_key *key, int mode);
/**
* wait_on_bit - wait for a bit to be cleared
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
+ * @word: the address containing the bit being waited on
+ * @bit: the bit at that address being waited on
* @mode: the task state to sleep in
*
- * There is a standard hashed waitqueue table for generic use. This
- * is the part of the hashtable's accessor API that waits on a bit.
- * For instance, if one were to have waiters on a bitflag, one would
- * call wait_on_bit() in threads waiting for the bit to clear.
- * One uses wait_on_bit() where one is waiting for the bit to clear,
- * but has no intention of setting it.
- * Returned value will be zero if the bit was cleared, or non-zero
- * if the process received a signal and the mode permitted wakeup
- * on that signal.
+ * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP())
+ * to be cleared. The clearing of the bit must be signalled with
+ * wake_up_bit(), often as clear_and_wake_up_bit().
+ *
+ * The process will wait on a waitqueue selected by hash from a shared
+ * pool. It will only be woken on a wake_up for the target bit, even
+ * if other processes on the same queue are waiting for other bits.
+ *
+ * Returned value will be zero if the bit was cleared in which case the
+ * call has ACQUIRE semantics, or %-EINTR if the process received a
+ * signal and the mode permitted wake up on that signal.
*/
static inline int
wait_on_bit(unsigned long *word, int bit, unsigned mode)
@@ -80,17 +81,20 @@ wait_on_bit(unsigned long *word, int bit, unsigned mode)
/**
* wait_on_bit_io - wait for a bit to be cleared
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
+ * @word: the address containing the bit being waited on
+ * @bit: the bit at that address being waited on
* @mode: the task state to sleep in
*
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared. This is similar to wait_on_bit(), but calls
- * io_schedule() instead of schedule() for the actual waiting.
+ * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP())
+ * to be cleared. The clearing of the bit must be signalled with
+ * wake_up_bit(), often as clear_and_wake_up_bit().
*
- * Returned value will be zero if the bit was cleared, or non-zero
- * if the process received a signal and the mode permitted wakeup
- * on that signal.
+ * This is similar to wait_on_bit(), but calls io_schedule() instead of
+ * schedule() for the actual waiting.
+ *
+ * Returned value will be zero if the bit was cleared in which case the
+ * call has ACQUIRE semantics, or %-EINTR if the process received a
+ * signal and the mode permitted wake up on that signal.
*/
static inline int
wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
@@ -104,19 +108,24 @@ wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
}
/**
- * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
+ * wait_on_bit_timeout - wait for a bit to be cleared or a timeout to elapse
+ * @word: the address containing the bit being waited on
+ * @bit: the bit at that address being waited on
* @mode: the task state to sleep in
* @timeout: timeout, in jiffies
*
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared. This is similar to wait_on_bit(), except also takes a
- * timeout parameter.
+ * Wait for the given bit in an unsigned long or bitmap (see
+ * DECLARE_BITMAP()) to be cleared, or for a timeout to expire. The
+ * clearing of the bit must be signalled with wake_up_bit(), often as
+ * clear_and_wake_up_bit().
+ *
+ * This is similar to wait_on_bit(), except it also takes a timeout
+ * parameter.
*
- * Returned value will be zero if the bit was cleared before the
- * @timeout elapsed, or non-zero if the @timeout elapsed or process
- * received a signal and the mode permitted wakeup on that signal.
+ * Returned value will be zero if the bit was cleared in which case the
+ * call has ACQUIRE semantics, or %-EINTR if the process received a
+ * signal and the mode permitted wake up on that signal, or %-EAGAIN if the
+ * timeout elapsed.
*/
static inline int
wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
@@ -132,19 +141,21 @@ wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
/**
* wait_on_bit_action - wait for a bit to be cleared
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
+ * @word: the address containing the bit waited on
+ * @bit: the bit at that address being waited on
* @action: the function used to sleep, which may take special actions
* @mode: the task state to sleep in
*
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared, and allow the waiting action to be specified.
- * This is like wait_on_bit() but allows fine control of how the waiting
- * is done.
+ * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP())
+ * to be cleared. The clearing of the bit must be signalled with
+ * wake_up_bit(), often as clear_and_wake_up_bit().
+ *
+ * This is similar to wait_on_bit(), but calls @action() instead of
+ * schedule() for the actual waiting.
*
- * Returned value will be zero if the bit was cleared, or non-zero
- * if the process received a signal and the mode permitted wakeup
- * on that signal.
+ * Returned value will be zero if the bit was cleared in which case the
+ * call has ACQUIRE semantics, or the error code returned by @action if
+ * that call returned non-zero.
*/
static inline int
wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
@@ -157,23 +168,22 @@ wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
}
/**
- * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
+ * wait_on_bit_lock - wait for a bit to be cleared, then set it
+ * @word: the address containing the bit being waited on
+ * @bit: the bit of the word being waited on and set
* @mode: the task state to sleep in
*
- * There is a standard hashed waitqueue table for generic use. This
- * is the part of the hashtable's accessor API that waits on a bit
- * when one intends to set it, for instance, trying to lock bitflags.
- * For instance, if one were to have waiters trying to set bitflag
- * and waiting for it to clear before setting it, one would call
- * wait_on_bit() in threads waiting to be able to set the bit.
- * One uses wait_on_bit_lock() where one is waiting for the bit to
- * clear with the intention of setting it, and when done, clearing it.
+ * Wait for the given bit in an unsigned long or bitmap (see
+ * DECLARE_BITMAP()) to be cleared. The clearing of the bit must be
+ * signalled with wake_up_bit(), often as clear_and_wake_up_bit(). As
+ * soon as it is clear, atomically set it and return.
*
- * Returns zero if the bit was (eventually) found to be clear and was
- * set. Returns non-zero if a signal was delivered to the process and
- * the @mode allows that signal to wake the process.
+ * This is similar to wait_on_bit(), but sets the bit before returning.
+ *
+ * Returned value will be zero if the bit was successfully set in which
+ * case the call has the same memory sequencing semantics as
+ * test_and_clear_bit(), or %-EINTR if the process received a signal and
+ * the mode permitted wake up on that signal.
*/
static inline int
wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
@@ -185,15 +195,18 @@ wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
}
/**
- * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
+ * wait_on_bit_lock_io - wait for a bit to be cleared, then set it
+ * @word: the address containing the bit being waited on
+ * @bit: the bit of the word being waited on and set
* @mode: the task state to sleep in
*
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared and then to atomically set it. This is similar
- * to wait_on_bit(), but calls io_schedule() instead of schedule()
- * for the actual waiting.
+ * Wait for the given bit in an unsigned long or bitmap (see
+ * DECLARE_BITMAP()) to be cleared. The clearing of the bit must be
+ * signalled with wake_up_bit(), often as clear_and_wake_up_bit(). As
+ * soon as it is clear, atomically set it and return.
+ *
+ * This is similar to wait_on_bit_lock(), but calls io_schedule() instead
+ * of schedule().
*
* Returns zero if the bit was (eventually) found to be clear and was
* set. Returns non-zero if a signal was delivered to the process and
@@ -209,21 +222,19 @@ wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
}
/**
- * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
+ * wait_on_bit_lock_action - wait for a bit to be cleared, then set it
+ * @word: the address containing the bit being waited on
+ * @bit: the bit of the word being waited on and set
* @action: the function used to sleep, which may take special actions
* @mode: the task state to sleep in
*
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared and then to set it, and allow the waiting action
- * to be specified.
- * This is like wait_on_bit() but allows fine control of how the waiting
- * is done.
+ * This is similar to wait_on_bit_lock(), but calls @action() instead of
+ * schedule() for the actual waiting.
*
- * Returns zero if the bit was (eventually) found to be clear and was
- * set. Returns non-zero if a signal was delivered to the process and
- * the @mode allows that signal to wake the process.
+ * Returned value will be zero if the bit was successfully set in which
+ * case the call has the same memory sequencing semantics as
+ * test_and_clear_bit(), or the error code returned by @action if that
+ * call returned non-zero.
*/
static inline int
wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
@@ -269,7 +280,26 @@ __out: __ret; \
#define __wait_var_event(var, condition) \
___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
schedule())
+#define __wait_var_event_io(var, condition) \
+ ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
+ io_schedule())
+/**
+ * wait_var_event - wait for a variable to be updated and notified
+ * @var: the address of variable being waited on
+ * @condition: the condition to wait for
+ *
+ * Wait for a @condition to be true, only re-checking when a wake up is
+ * received for the given @var (an arbitrary kernel address which need
+ * not be directly related to the given condition, but usually is).
+ *
+ * The process will wait on a waitqueue selected by hash from a shared
+ * pool. It will only be woken on a wake_up for the given address.
+ *
+ * The condition should normally use smp_load_acquire() or a similarly
+ * ordered access to ensure that any changes to memory made before the
+ * condition became true will be visible after the wait completes.
+ */
#define wait_var_event(var, condition) \
do { \
might_sleep(); \
@@ -278,10 +308,56 @@ do { \
__wait_var_event(var, condition); \
} while (0)
+/**
+ * wait_var_event_io - wait for a variable to be updated and notified
+ * @var: the address of variable being waited on
+ * @condition: the condition to wait for
+ *
+ * Wait for an IO related @condition to be true, only re-checking when a
+ * wake up is received for the given @var (an arbitrary kernel address
+ * which need not be directly related to the given condition, but
+ * usually is).
+ *
+ * The process will wait on a waitqueue selected by hash from a shared
+ * pool. It will only be woken on a wake_up for the given address.
+ *
+ * This is similar to wait_var_event(), but calls io_schedule() instead
+ * of schedule().
+ *
+ * The condition should normally use smp_load_acquire() or a similarly
+ * ordered access to ensure that any changes to memory made before the
+ * condition became true will be visible after the wait completes.
+ */
+#define wait_var_event_io(var, condition) \
+do { \
+ might_sleep(); \
+ if (condition) \
+ break; \
+ __wait_var_event_io(var, condition); \
+} while (0)
+
#define __wait_var_event_killable(var, condition) \
___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \
schedule())
+/**
+ * wait_var_event_killable - wait for a variable to be updated and notified
+ * @var: the address of variable being waited on
+ * @condition: the condition to wait for
+ *
+ * Wait for a @condition to be true or a fatal signal to be received,
+ * only re-checking the condition when a wake up is received for the given
+ * @var (an arbitrary kernel address which need not be directly related
+ * to the given condition, but usually is).
+ *
+ * This is similar to wait_var_event() but returns a value which is
+ * 0 if the condition became true, or %-ERESTARTSYS if a fatal signal
+ * was received.
+ *
+ * The condition should normally use smp_load_acquire() or a similarly
+ * ordered access to ensure that any changes to memory made before the
+ * condition became true will be visible after the wait completes.
+ */
#define wait_var_event_killable(var, condition) \
({ \
int __ret = 0; \
@@ -296,6 +372,26 @@ do { \
TASK_UNINTERRUPTIBLE, 0, timeout, \
__ret = schedule_timeout(__ret))
+/**
+ * wait_var_event_timeout - wait for a variable to be updated or a timeout to expire
+ * @var: the address of variable being waited on
+ * @condition: the condition to wait for
+ * @timeout: maximum time to wait in jiffies
+ *
+ * Wait for a @condition to be true or a timeout to expire, only
+ * re-checking the condition when a wake up is received for the given
+ * @var (an arbitrary kernel address which need not be directly related
+ * to the given condition, but usually is).
+ *
+ * This is similar to wait_var_event() but returns a value which is 0 if
+ * the timeout expired and the condition was still false, or the
+ * remaining time left in the timeout (but at least 1) if the condition
+ * was found to be true.
+ *
+ * The condition should normally use smp_load_acquire() or a similarly
+ * ordered access to ensure that any changes to memory made before the
+ * condition became true will be visible after the wait completes.
+ */
#define wait_var_event_timeout(var, condition, timeout) \
({ \
long __ret = timeout; \
@@ -309,6 +405,23 @@ do { \
___wait_var_event(var, condition, TASK_INTERRUPTIBLE, 0, 0, \
schedule())
+/**
+ * wait_var_event_killable - wait for a variable to be updated and notified
+ * @var: the address of variable being waited on
+ * @condition: the condition to wait for
+ *
+ * Wait for a @condition to be true or a signal to be received, only
+ * re-checking the condition when a wake up is received for the given
+ * @var (an arbitrary kernel address which need not be directly related
+ * to the given condition, but usually is).
+ *
+ * This is similar to wait_var_event() but returns a value which is 0 if
+ * the condition became true, or %-ERESTARTSYS if a signal was received.
+ *
+ * The condition should normally use smp_load_acquire() or a similarly
+ * ordered access to ensure that any changes to memory made before the
+ * condition became true will be visible after the wait completes.
+ */
#define wait_var_event_interruptible(var, condition) \
({ \
int __ret = 0; \
@@ -319,15 +432,122 @@ do { \
})
/**
- * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
+ * wait_var_event_any_lock - wait for a variable to be updated under a lock
+ * @var: the address of the variable being waited on
+ * @condition: condition to wait for
+ * @lock: the object that is locked to protect updates to the variable
+ * @type: prefix on lock and unlock operations
+ * @state: waiting state, %TASK_UNINTERRUPTIBLE etc.
+ *
+ * Wait for a condition which can only be reliably tested while holding
+ * a lock. The variables assessed in the condition will normal be updated
+ * under the same lock, and the wake up should be signalled with
+ * wake_up_var_locked() under the same lock.
+ *
+ * This is similar to wait_var_event(), but assumes a lock is held
+ * while calling this function and while updating the variable.
*
+ * This must be called while the given lock is held and the lock will be
+ * dropped when schedule() is called to wait for a wake up, and will be
+ * reclaimed before testing the condition again. The functions used to
+ * unlock and lock the object are constructed by appending _unlock and _lock
+ * to @type.
+ *
+ * Return %-ERESTARTSYS if a signal arrives which is allowed to interrupt
+ * the wait according to @state.
+ */
+#define wait_var_event_any_lock(var, condition, lock, type, state) \
+({ \
+ int __ret = 0; \
+ if (!(condition)) \
+ __ret = ___wait_var_event(var, condition, state, 0, 0, \
+ type ## _unlock(lock); \
+ schedule(); \
+ type ## _lock(lock)); \
+ __ret; \
+})
+
+/**
+ * wait_var_event_spinlock - wait for a variable to be updated under a spinlock
+ * @var: the address of the variable being waited on
+ * @condition: condition to wait for
+ * @lock: the spinlock which protects updates to the variable
+ *
+ * Wait for a condition which can only be reliably tested while holding
+ * a spinlock. The variables assessed in the condition will normal be updated
+ * under the same spinlock, and the wake up should be signalled with
+ * wake_up_var_locked() under the same spinlock.
+ *
+ * This is similar to wait_var_event(), but assumes a spinlock is held
+ * while calling this function and while updating the variable.
+ *
+ * This must be called while the given lock is held and the lock will be
+ * dropped when schedule() is called to wait for a wake up, and will be
+ * reclaimed before testing the condition again.
+ */
+#define wait_var_event_spinlock(var, condition, lock) \
+ wait_var_event_any_lock(var, condition, lock, spin, TASK_UNINTERRUPTIBLE)
+
+/**
+ * wait_var_event_mutex - wait for a variable to be updated under a mutex
+ * @var: the address of the variable being waited on
+ * @condition: condition to wait for
+ * @mutex: the mutex which protects updates to the variable
+ *
+ * Wait for a condition which can only be reliably tested while holding
+ * a mutex. The variables assessed in the condition will normal be
+ * updated under the same mutex, and the wake up should be signalled
+ * with wake_up_var_locked() under the same mutex.
+ *
+ * This is similar to wait_var_event(), but assumes a mutex is held
+ * while calling this function and while updating the variable.
+ *
+ * This must be called while the given mutex is held and the mutex will be
+ * dropped when schedule() is called to wait for a wake up, and will be
+ * reclaimed before testing the condition again.
+ */
+#define wait_var_event_mutex(var, condition, lock) \
+ wait_var_event_any_lock(var, condition, lock, mutex, TASK_UNINTERRUPTIBLE)
+
+/**
+ * wake_up_var_protected - wake up waiters for a variable asserting that it is safe
+ * @var: the address of the variable being waited on
+ * @cond: the condition which afirms this is safe
+ *
+ * When waking waiters which use wait_var_event_any_lock() the waker must be
+ * holding the reelvant lock to avoid races. This version of wake_up_var()
+ * asserts that the relevant lock is held and so no barrier is needed.
+ * The @cond is only tested when CONFIG_LOCKDEP is enabled.
+ */
+#define wake_up_var_protected(var, cond) \
+do { \
+ lockdep_assert(cond); \
+ wake_up_var(var); \
+} while (0)
+
+/**
+ * wake_up_var_locked - wake up waiters for a variable while holding a spinlock or mutex
+ * @var: the address of the variable being waited on
+ * @lock: The spinlock or mutex what protects the variable
+ *
+ * Send a wake up for the given variable which should be waited for with
+ * wait_var_event_spinlock() or wait_var_event_mutex(). Unlike wake_up_var(),
+ * no extra barriers are needed as the locking provides sufficient sequencing.
+ */
+#define wake_up_var_locked(var, lock) \
+ wake_up_var_protected(var, lockdep_is_held(lock))
+
+/**
+ * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
* @bit: the bit of the word being waited on
- * @word: the word being waited on, a kernel virtual address
+ * @word: the address containing the bit being waited on
*
- * You can use this helper if bitflags are manipulated atomically rather than
- * non-atomically under a lock.
+ * The designated bit is cleared and any tasks waiting in wait_on_bit()
+ * or similar will be woken. This call has RELEASE semantics so that
+ * any changes to memory made before this call are guaranteed to be visible
+ * after the corresponding wait_on_bit() completes.
*/
-static inline void clear_and_wake_up_bit(int bit, void *word)
+static inline void clear_and_wake_up_bit(int bit, unsigned long *word)
{
clear_bit_unlock(bit, word);
/* See wake_up_bit() for which memory barrier you need to use. */
@@ -335,4 +555,64 @@ static inline void clear_and_wake_up_bit(int bit, void *word)
wake_up_bit(word, bit);
}
+/**
+ * test_and_clear_wake_up_bit - clear a bit if it was set: wake up anyone waiting on that bit
+ * @bit: the bit of the word being waited on
+ * @word: the address of memory containing that bit
+ *
+ * If the bit is set and can be atomically cleared, any tasks waiting in
+ * wait_on_bit() or similar will be woken. This call has the same
+ * complete ordering semantics as test_and_clear_bit(). Any changes to
+ * memory made before this call are guaranteed to be visible after the
+ * corresponding wait_on_bit() completes.
+ *
+ * Returns %true if the bit was successfully set and the wake up was sent.
+ */
+static inline bool test_and_clear_wake_up_bit(int bit, unsigned long *word)
+{
+ if (!test_and_clear_bit(bit, word))
+ return false;
+ /* no extra barrier required */
+ wake_up_bit(word, bit);
+ return true;
+}
+
+/**
+ * atomic_dec_and_wake_up - decrement an atomic_t and if zero, wake up waiters
+ * @var: the variable to dec and test
+ *
+ * Decrements the atomic variable and if it reaches zero, send a wake_up to any
+ * processes waiting on the variable.
+ *
+ * This function has the same complete ordering semantics as atomic_dec_and_test.
+ *
+ * Returns %true is the variable reaches zero and the wake up was sent.
+ */
+
+static inline bool atomic_dec_and_wake_up(atomic_t *var)
+{
+ if (!atomic_dec_and_test(var))
+ return false;
+ /* No extra barrier required */
+ wake_up_var(var);
+ return true;
+}
+
+/**
+ * store_release_wake_up - update a variable and send a wake_up
+ * @var: the address of the variable to be updated and woken
+ * @val: the value to store in the variable.
+ *
+ * Store the given value in the variable send a wake up to any tasks
+ * waiting on the variable. All necessary barriers are included to ensure
+ * the task calling wait_var_event() sees the new value and all values
+ * written to memory before this call.
+ */
+#define store_release_wake_up(var, val) \
+do { \
+ smp_store_release(var, val); \
+ smp_mb(); \
+ wake_up_var(var); \
+} while (0)
+
#endif /* _LINUX_WAIT_BIT_H */
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 99660197a36c..8c60687a3e55 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -9,14 +9,18 @@
#ifndef _LINUX_WATCHDOG_H
#define _LINUX_WATCHDOG_H
-
#include <linux/bitops.h>
-#include <linux/cdev.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/notifier.h>
+#include <linux/printk.h>
+#include <linux/types.h>
+
#include <uapi/linux/watchdog.h>
+struct attribute_group;
+struct device;
+struct module;
+
struct watchdog_ops;
struct watchdog_device;
struct watchdog_core_data;
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index e6e34d74dda0..03e5d3fe226d 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -21,8 +21,7 @@ struct compat_iw_point {
__u16 length;
__u16 flags;
};
-#endif
-#ifdef CONFIG_COMPAT
+
struct __compat_iw_event {
__u16 len; /* Real length of this stuff */
__u16 cmd; /* Wireless IOCTL */
@@ -49,5 +48,5 @@ struct __compat_iw_event {
#define IW_EV_COMPAT_POINT_LEN \
(IW_EV_COMPAT_LCP_LEN + sizeof(struct compat_iw_point) - \
IW_EV_COMPAT_POINT_OFF)
-#endif
+#endif /* CONFIG_COMPAT */
#endif /* _LINUX_WIRELESS_H */
diff --git a/include/linux/wmi.h b/include/linux/wmi.h
index 63cca3b58d6d..665ea7dc8a92 100644
--- a/include/linux/wmi.h
+++ b/include/linux/wmi.h
@@ -16,12 +16,16 @@
* struct wmi_device - WMI device structure
* @dev: Device associated with this WMI device
* @setable: True for devices implementing the Set Control Method
+ * @driver_override: Driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
*
* This represents WMI devices discovered by the WMI driver core.
*/
struct wmi_device {
struct device dev;
bool setable;
+ const char *driver_override;
};
/**
@@ -30,15 +34,12 @@ struct wmi_device {
*
* Cast a struct device to a struct wmi_device.
*/
-#define to_wmi_device(device) container_of(device, struct wmi_device, dev)
+#define to_wmi_device(device) container_of_const(device, struct wmi_device, dev)
-extern acpi_status wmidev_evaluate_method(struct wmi_device *wdev,
- u8 instance, u32 method_id,
- const struct acpi_buffer *in,
- struct acpi_buffer *out);
+acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, u32 method_id,
+ const struct acpi_buffer *in, struct acpi_buffer *out);
-extern union acpi_object *wmidev_block_query(struct wmi_device *wdev,
- u8 instance);
+union acpi_object *wmidev_block_query(struct wmi_device *wdev, u8 instance);
acpi_status wmidev_block_set(struct wmi_device *wdev, u8 instance, const struct acpi_buffer *in);
@@ -52,6 +53,7 @@ u8 wmidev_instance_count(struct wmi_device *wdev);
* @no_singleton: Driver can be instantiated multiple times
* @probe: Callback for device binding
* @remove: Callback for device unbinding
+ * @shutdown: Callback for device shutdown
* @notify: Callback for receiving WMI events
*
* This represents WMI drivers which handle WMI devices.
@@ -64,12 +66,21 @@ struct wmi_driver {
int (*probe)(struct wmi_device *wdev, const void *context);
void (*remove)(struct wmi_device *wdev);
+ void (*shutdown)(struct wmi_device *wdev);
void (*notify)(struct wmi_device *device, union acpi_object *data);
};
-extern int __must_check __wmi_driver_register(struct wmi_driver *driver,
- struct module *owner);
-extern void wmi_driver_unregister(struct wmi_driver *driver);
+/**
+ * to_wmi_driver() - Helper macro to cast a driver to a wmi_driver
+ * @drv: driver struct
+ *
+ * Cast a struct device_driver to a struct wmi_driver.
+ */
+#define to_wmi_driver(drv) container_of_const(drv, struct wmi_driver, driver)
+
+int __must_check __wmi_driver_register(struct wmi_driver *driver, struct module *owner);
+
+void wmi_driver_unregister(struct wmi_driver *driver);
/**
* wmi_driver_register() - Helper macro to register a WMI driver
diff --git a/include/linux/wordpart.h b/include/linux/wordpart.h
index 4ca1ba66d2f0..5a7b97bb7c95 100644
--- a/include/linux/wordpart.h
+++ b/include/linux/wordpart.h
@@ -39,6 +39,14 @@
*/
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
+/**
+ * REPEAT_BYTE_U32 - repeat the value @x multiple times as a u32 value
+ * @x: value to repeat
+ *
+ * NOTE: @x is not checked for > 0xff; larger values produce odd results.
+ */
+#define REPEAT_BYTE_U32(x) lower_32_bits(REPEAT_BYTE(x))
+
/* Set bits in the first 'n' bytes when loaded from memory */
#ifdef __LITTLE_ENDIAN
# define aligned_byte_mask(n) ((1UL << 8*(n))-1)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index fb3993894536..dabc351cc127 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -6,13 +6,14 @@
#ifndef _LINUX_WORKQUEUE_H
#define _LINUX_WORKQUEUE_H
+#include <linux/alloc_tag.h>
#include <linux/timer.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
#include <linux/lockdep.h>
#include <linux/threads.h>
#include <linux/atomic.h>
-#include <linux/cpumask.h>
+#include <linux/cpumask_types.h>
#include <linux/rcupdate.h>
#include <linux/workqueue_types.h>
@@ -95,7 +96,7 @@ enum wq_misc_consts {
WORK_BUSY_RUNNING = 1 << 1,
/* maximum string length for set_worker_desc() */
- WORKER_DESC_LEN = 24,
+ WORKER_DESC_LEN = 32,
};
/* Convenience constants - of type 'unsigned long', not 'enum'! */
@@ -316,7 +317,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
do { \
INIT_WORK(&(_work)->work, (_func)); \
- __init_timer(&(_work)->timer, \
+ __timer_init(&(_work)->timer, \
delayed_work_timer_fn, \
(_tflags) | TIMER_IRQSAFE); \
} while (0)
@@ -324,7 +325,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
do { \
INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
- __init_timer_on_stack(&(_work)->timer, \
+ __timer_init_on_stack(&(_work)->timer, \
delayed_work_timer_fn, \
(_tflags) | TIMER_IRQSAFE); \
} while (0)
@@ -401,6 +402,7 @@ enum wq_flags {
* http://thread.gmane.org/gmane.linux.kernel/1480396
*/
WQ_POWER_EFFICIENT = 1 << 7,
+ WQ_PERCPU = 1 << 8, /* bound to a specific cpu */
__WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
@@ -408,11 +410,11 @@ enum wq_flags {
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
/* BH wq only allows the following flags */
- __WQ_BH_ALLOWS = WQ_BH | WQ_HIGHPRI,
+ __WQ_BH_ALLOWS = WQ_BH | WQ_HIGHPRI | WQ_PERCPU,
};
enum wq_consts {
- WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
+ WQ_MAX_ACTIVE = 2048, /* I like 2048, better ideas? */
WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE,
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
@@ -427,38 +429,40 @@ enum wq_consts {
/*
* System-wide workqueues which are always present.
*
- * system_wq is the one used by schedule[_delayed]_work[_on]().
+ * system_percpu_wq is the one used by schedule[_delayed]_work[_on]().
* Multi-CPU multi-threaded. There are users which expect relatively
* short queue flush time. Don't queue works which can run for too
* long.
*
- * system_highpri_wq is similar to system_wq but for work items which
+ * system_highpri_wq is similar to system_percpu_wq but for work items which
* require WQ_HIGHPRI.
*
- * system_long_wq is similar to system_wq but may host long running
+ * system_long_wq is similar to system_percpu_wq but may host long running
* works. Queue flushing might take relatively long.
*
- * system_unbound_wq is unbound workqueue. Workers are not bound to
+ * system_dfl_wq is unbound workqueue. Workers are not bound to
* any specific CPU, not concurrency managed, and all queued works are
* executed immediately as long as max_active limit is not reached and
* resources are available.
*
- * system_freezable_wq is equivalent to system_wq except that it's
+ * system_freezable_wq is equivalent to system_percpu_wq except that it's
* freezable.
*
* *_power_efficient_wq are inclined towards saving power and converted
* into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
* they are same as their non-power-efficient counterparts - e.g.
- * system_power_efficient_wq is identical to system_wq if
+ * system_power_efficient_wq is identical to system_percpu_wq if
* 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
*
* system_bh[_highpri]_wq are convenience interface to softirq. BH work items
* are executed in the queueing CPU's BH context in the queueing order.
*/
-extern struct workqueue_struct *system_wq;
+extern struct workqueue_struct *system_wq; /* use system_percpu_wq, this will be removed */
+extern struct workqueue_struct *system_percpu_wq;
extern struct workqueue_struct *system_highpri_wq;
extern struct workqueue_struct *system_long_wq;
extern struct workqueue_struct *system_unbound_wq;
+extern struct workqueue_struct *system_dfl_wq;
extern struct workqueue_struct *system_freezable_wq;
extern struct workqueue_struct *system_power_efficient_wq;
extern struct workqueue_struct *system_freezable_power_efficient_wq;
@@ -480,7 +484,7 @@ void workqueue_softirq_dead(unsigned int cpu);
* executing at most one work item for the workqueue.
*
* For unbound workqueues, @max_active limits the number of in-flight work items
- * for the whole system. e.g. @max_active of 16 indicates that that there can be
+ * for the whole system. e.g. @max_active of 16 indicates that there can be
* at most 16 work items executing for the workqueue in the whole system.
*
* As sharing the same active counter for an unbound workqueue across multiple
@@ -498,14 +502,56 @@ void workqueue_softirq_dead(unsigned int cpu);
* min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means
* that the sum of per-node max_active's may be larger than @max_active.
*
- * For detailed information on %WQ_* flags, please refer to
+ * For detailed information on %WQ_\* flags, please refer to
* Documentation/core-api/workqueue.rst.
*
* RETURNS:
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
__printf(1, 4) struct workqueue_struct *
-alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
+alloc_workqueue_noprof(const char *fmt, unsigned int flags, int max_active, ...);
+#define alloc_workqueue(...) alloc_hooks(alloc_workqueue_noprof(__VA_ARGS__))
+
+#ifdef CONFIG_LOCKDEP
+/**
+ * alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags
+ * @max_active: max in-flight work items, 0 for default
+ * @lockdep_map: user-defined lockdep_map
+ * @...: args for @fmt
+ *
+ * Same as alloc_workqueue but with the a user-define lockdep_map. Useful for
+ * workqueues created with the same purpose and to avoid leaking a lockdep_map
+ * on each workqueue creation.
+ *
+ * RETURNS:
+ * Pointer to the allocated workqueue on success, %NULL on failure.
+ */
+__printf(1, 5) struct workqueue_struct *
+alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
+ struct lockdep_map *lockdep_map, ...);
+
+/**
+ * alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with
+ * user-defined lockdep_map
+ *
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
+ * @lockdep_map: user-defined lockdep_map
+ * @args: args for @fmt
+ *
+ * Same as alloc_ordered_workqueue but with the a user-define lockdep_map.
+ * Useful for workqueues created with the same purpose and to avoid leaking a
+ * lockdep_map on each workqueue creation.
+ *
+ * RETURNS:
+ * Pointer to the allocated workqueue on success, %NULL on failure.
+ */
+#define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \
+ alloc_hooks(alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags),\
+ 1, lockdep_map, ##args))
+#endif
/**
* alloc_ordered_workqueue - allocate an ordered workqueue
@@ -524,7 +570,7 @@ alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
#define create_workqueue(name) \
- alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
+ alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_PERCPU, 1, (name))
#define create_freezable_workqueue(name) \
alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
WQ_MEM_RECLAIM, 1, (name))
@@ -536,7 +582,9 @@ alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
extern void destroy_workqueue(struct workqueue_struct *wq);
-struct workqueue_attrs *alloc_workqueue_attrs(void);
+struct workqueue_attrs *alloc_workqueue_attrs_noprof(void);
+#define alloc_workqueue_attrs(...) alloc_hooks(alloc_workqueue_attrs_noprof(__VA_ARGS__))
+
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
@@ -660,7 +708,7 @@ static inline bool mod_delayed_work(struct workqueue_struct *wq,
*/
static inline bool schedule_work_on(int cpu, struct work_struct *work)
{
- return queue_work_on(cpu, system_wq, work);
+ return queue_work_on(cpu, system_percpu_wq, work);
}
/**
@@ -679,7 +727,7 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work)
*/
static inline bool schedule_work(struct work_struct *work)
{
- return queue_work(system_wq, work);
+ return queue_work(system_percpu_wq, work);
}
/**
@@ -722,21 +770,21 @@ extern void __warn_flushing_systemwide_wq(void)
#define flush_scheduled_work() \
({ \
__warn_flushing_systemwide_wq(); \
- __flush_workqueue(system_wq); \
+ __flush_workqueue(system_percpu_wq); \
})
#define flush_workqueue(wq) \
({ \
struct workqueue_struct *_wq = (wq); \
\
- if ((__builtin_constant_p(_wq == system_wq) && \
- _wq == system_wq) || \
+ if ((__builtin_constant_p(_wq == system_percpu_wq) && \
+ _wq == system_percpu_wq) || \
(__builtin_constant_p(_wq == system_highpri_wq) && \
_wq == system_highpri_wq) || \
(__builtin_constant_p(_wq == system_long_wq) && \
_wq == system_long_wq) || \
- (__builtin_constant_p(_wq == system_unbound_wq) && \
- _wq == system_unbound_wq) || \
+ (__builtin_constant_p(_wq == system_dfl_wq) && \
+ _wq == system_dfl_wq) || \
(__builtin_constant_p(_wq == system_freezable_wq) && \
_wq == system_freezable_wq) || \
(__builtin_constant_p(_wq == system_power_efficient_wq) && \
@@ -759,7 +807,7 @@ extern void __warn_flushing_systemwide_wq(void)
static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
unsigned long delay)
{
- return queue_delayed_work_on(cpu, system_wq, dwork, delay);
+ return queue_delayed_work_on(cpu, system_percpu_wq, dwork, delay);
}
/**
@@ -773,7 +821,7 @@ static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
static inline bool schedule_delayed_work(struct delayed_work *dwork,
unsigned long delay)
{
- return queue_delayed_work(system_wq, dwork, delay);
+ return queue_delayed_work(system_percpu_wq, dwork, delay);
}
#ifndef CONFIG_SMP
@@ -799,19 +847,6 @@ long work_on_cpu_key(int cpu, long (*fn)(void *),
work_on_cpu_key(_cpu, _fn, _arg, &__key); \
})
-long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
- void *arg, struct lock_class_key *key);
-
-/*
- * A new key is defined for each caller to make sure the work
- * associated with the function doesn't share its locking class.
- */
-#define work_on_cpu_safe(_cpu, _fn, _arg) \
-({ \
- static struct lock_class_key __key; \
- \
- work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \
-})
#endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 112d806ddbe4..f48e8ccffe81 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -59,7 +59,6 @@ struct writeback_control {
unsigned for_kupdate:1; /* A kupdate writeback */
unsigned for_background:1; /* A background writeback */
unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
- unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned range_cyclic:1; /* range_start is cyclic */
unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
unsigned unpinned_netfs_wb:1; /* Cleared I_PINNING_NETFS_WB */
@@ -72,13 +71,6 @@ struct writeback_control {
*/
unsigned no_cgroup_owner:1;
- /* To enable batching of swap writes to non-block-device backends,
- * "plug" can be set point to a 'struct swap_iocb *'. When all swap
- * writes have been submitted, if with swap_iocb is not NULL,
- * swap_write_unplug() should be called.
- */
- struct swap_iocb **swap_plug;
-
/* internal fields used by the ->writepages implementation: */
struct folio_batch fbatch;
pgoff_t index;
@@ -197,10 +189,11 @@ void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
void inode_wait_for_writeback(struct inode *inode);
void inode_io_list_del(struct inode *inode);
-/* writeback.h requires fs.h; it, too, is not included from here. */
-static inline void wait_on_inode(struct inode *inode)
+static inline xa_mark_t wbc_to_tag(struct writeback_control *wbc)
{
- wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+ return PAGECACHE_TAG_TOWRITE;
+ return PAGECACHE_TAG_DIRTY;
}
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -209,15 +202,12 @@ static inline void wait_on_inode(struct inode *inode)
#include <linux/bio.h>
void __inode_attach_wb(struct inode *inode, struct folio *folio);
-void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
- struct inode *inode)
- __releases(&inode->i_lock);
void wbc_detach_inode(struct writeback_control *wbc);
-void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
+void wbc_account_cgroup_owner(struct writeback_control *wbc, struct folio *folio,
size_t bytes);
int cgroup_writeback_by_id(u64 bdi_id, int memcg_id,
enum wb_reason reason, struct wb_completion *done);
-void cgroup_writeback_umount(void);
+void cgroup_writeback_umount(struct super_block *sb);
bool cleanup_offline_cgwb(struct bdi_writeback *wb);
/**
@@ -244,28 +234,14 @@ static inline void inode_attach_wb(struct inode *inode, struct folio *folio)
static inline void inode_detach_wb(struct inode *inode)
{
if (inode->i_wb) {
- WARN_ON_ONCE(!(inode->i_state & I_CLEAR));
+ WARN_ON_ONCE(!(inode_state_read_once(inode) & I_CLEAR));
wb_put(inode->i_wb);
inode->i_wb = NULL;
}
}
-/**
- * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite
- * @wbc: writeback_control of interest
- * @inode: target inode
- *
- * This function is to be used by __filemap_fdatawrite_range(), which is an
- * alternative entry point into writeback code, and first ensures @inode is
- * associated with a bdi_writeback and attaches it to @wbc.
- */
-static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
- struct inode *inode)
-{
- spin_lock(&inode->i_lock);
- inode_attach_wb(inode, NULL);
- wbc_attach_and_unlock_inode(wbc, inode);
-}
+void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
+ struct inode *inode);
/**
* wbc_init_bio - writeback specific initializtion of bio
@@ -289,6 +265,8 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css);
}
+void inode_switch_wbs_work_fn(struct work_struct *work);
+
#else /* CONFIG_CGROUP_WRITEBACK */
static inline void inode_attach_wb(struct inode *inode, struct folio *folio)
@@ -299,13 +277,6 @@ static inline void inode_detach_wb(struct inode *inode)
{
}
-static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
- struct inode *inode)
- __releases(&inode->i_lock)
-{
- spin_unlock(&inode->i_lock);
-}
-
static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
struct inode *inode)
{
@@ -320,11 +291,11 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
}
static inline void wbc_account_cgroup_owner(struct writeback_control *wbc,
- struct page *page, size_t bytes)
+ struct folio *folio, size_t bytes)
{
}
-static inline void cgroup_writeback_umount(void)
+static inline void cgroup_writeback_umount(struct super_block *sb)
{
}
@@ -333,6 +304,30 @@ static inline void cgroup_writeback_umount(void)
/*
* mm/page-writeback.c
*/
+/* consolidated parameters for balance_dirty_pages() and its subroutines */
+struct dirty_throttle_control {
+#ifdef CONFIG_CGROUP_WRITEBACK
+ struct wb_domain *dom;
+ struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */
+#endif
+ struct bdi_writeback *wb;
+ struct fprop_local_percpu *wb_completions;
+
+ unsigned long avail; /* dirtyable */
+ unsigned long dirty; /* file_dirty + write + nfs */
+ unsigned long thresh; /* dirty threshold */
+ unsigned long bg_thresh; /* dirty background threshold */
+ unsigned long limit; /* hard dirty limit */
+
+ unsigned long wb_dirty; /* per-wb counterparts */
+ unsigned long wb_thresh;
+ unsigned long wb_bg_thresh;
+
+ unsigned long pos_ratio;
+ bool freerun;
+ bool dirty_exceeded;
+};
+
void laptop_io_completion(struct backing_dev_info *info);
void laptop_sync_completion(void);
void laptop_mode_timer_fn(struct timer_list *t);
@@ -347,12 +342,8 @@ extern struct wb_domain global_wb_domain;
/* These are exported to sysctl. */
extern unsigned int dirty_writeback_interval;
extern unsigned int dirty_expire_interval;
-extern unsigned int dirtytime_expire_interval;
extern int laptop_mode;
-int dirtytime_interval_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
unsigned long cgwb_calc_thresh(struct bdi_writeback *wb);
@@ -371,12 +362,6 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb);
struct folio *writeback_iter(struct address_space *mapping,
struct writeback_control *wbc, struct folio *folio, int *error);
-typedef int (*writepage_t)(struct folio *folio, struct writeback_control *wbc,
- void *data);
-
-int write_cache_pages(struct address_space *mapping,
- struct writeback_control *wbc, writepage_t writepage,
- void *data);
int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
void writeback_set_ratelimit(void);
void tag_pages_for_writeback(struct address_space *mapping,
@@ -389,4 +374,9 @@ bool redirty_page_for_writepage(struct writeback_control *, struct page *);
void sb_mark_inode_writeback(struct inode *inode);
void sb_clear_inode_writeback(struct inode *inode);
+/*
+ * 4MB minimal write chunk size
+ */
+#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10))
+
#endif /* WRITEBACK_H */
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index bb763085479a..45ff6f7a872b 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -65,6 +65,16 @@ struct ww_acquire_ctx {
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
+ /**
+ * @first_lock_dep_map: fake lockdep_map for first locked ww_mutex.
+ *
+ * lockdep requires the lockdep_map for the first locked ww_mutex
+ * in a ww transaction to remain in memory until all ww_mutexes of
+ * the transaction have been unlocked. Ensure this by keeping a
+ * fake locked ww_mutex lockdep map between ww_acquire_init() and
+ * ww_acquire_fini().
+ */
+ struct lockdep_map first_lock_dep_map;
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
unsigned int deadlock_inject_interval;
@@ -146,7 +156,10 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
&ww_class->acquire_key, 0);
+ lockdep_init_map_wait(&ctx->first_lock_dep_map, ww_class->mutex_name,
+ &ww_class->mutex_key, 0, LD_WAIT_SLEEP);
mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
+ mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx->dep_map, _RET_IP_);
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
ctx->deadlock_inject_interval = 1;
@@ -185,6 +198,7 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ mutex_release(&ctx->first_lock_dep_map, _THIS_IP_);
mutex_release(&ctx->dep_map, _THIS_IP_);
#endif
#ifdef DEBUG_WW_MUTEXES
diff --git a/include/linux/wwan.h b/include/linux/wwan.h
index 170fdee6339c..a4d6cc0c9f68 100644
--- a/include/linux/wwan.h
+++ b/include/linux/wwan.h
@@ -17,6 +17,8 @@
* @WWAN_PORT_FIREHOSE: XML based command protocol
* @WWAN_PORT_XMMRPC: Control protocol for Intel XMM modems
* @WWAN_PORT_FASTBOOT: Fastboot protocol control
+ * @WWAN_PORT_ADB: ADB protocol control
+ * @WWAN_PORT_MIPC: MTK MIPC diagnostic interface
*
* @WWAN_PORT_MAX: Highest supported port types
* @WWAN_PORT_UNKNOWN: Special value to indicate an unknown port type
@@ -30,6 +32,8 @@ enum wwan_port_type {
WWAN_PORT_FIREHOSE,
WWAN_PORT_XMMRPC,
WWAN_PORT_FASTBOOT,
+ WWAN_PORT_ADB,
+ WWAN_PORT_MIPC,
/* Add new port types above this line */
@@ -93,7 +97,7 @@ struct wwan_port_caps {
*
* This function must be balanced with a call to wwan_remove_port().
*
- * Returns a valid pointer to wwan_port on success or PTR_ERR on failure
+ * Returns: a valid pointer to wwan_port on success or PTR_ERR on failure
*/
struct wwan_port *wwan_create_port(struct device *parent,
enum wwan_port_type type,
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 0b618ec04115..be850174e802 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -965,10 +965,12 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
+ * Note that callers interested in whether wrapping has occurred should
+ * use __xa_alloc_cyclic() instead.
+ *
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
- * Return: 0 if the allocation succeeded without wrapping. 1 if the
- * allocation succeeded after wrapping, -ENOMEM if memory could not be
+ * Return: 0 if the allocation succeeded, -ENOMEM if memory could not be
* allocated or -EBUSY if there are no free entries in @limit.
*/
static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
@@ -981,7 +983,7 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock(xa);
- return err;
+ return err < 0 ? err : 0;
}
/**
@@ -1002,10 +1004,12 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
+ * Note that callers interested in whether wrapping has occurred should
+ * use __xa_alloc_cyclic() instead.
+ *
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
- * Return: 0 if the allocation succeeded without wrapping. 1 if the
- * allocation succeeded after wrapping, -ENOMEM if memory could not be
+ * Return: 0 if the allocation succeeded, -ENOMEM if memory could not be
* allocated or -EBUSY if there are no free entries in @limit.
*/
static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
@@ -1018,7 +1022,7 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock_bh(xa);
- return err;
+ return err < 0 ? err : 0;
}
/**
@@ -1039,10 +1043,12 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
+ * Note that callers interested in whether wrapping has occurred should
+ * use __xa_alloc_cyclic() instead.
+ *
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
- * Return: 0 if the allocation succeeded without wrapping. 1 if the
- * allocation succeeded after wrapping, -ENOMEM if memory could not be
+ * Return: 0 if the allocation succeeded, -ENOMEM if memory could not be
* allocated or -EBUSY if there are no free entries in @limit.
*/
static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
@@ -1055,7 +1061,7 @@ static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
xa_unlock_irq(xa);
- return err;
+ return err < 0 ? err : 0;
}
/**
@@ -1555,6 +1561,8 @@ int xa_get_order(struct xarray *, unsigned long index);
int xas_get_order(struct xa_state *xas);
void xas_split(struct xa_state *, void *entry, unsigned int order);
void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
+void xas_try_split(struct xa_state *xas, void *entry, unsigned int order);
+unsigned int xas_try_split_min_order(unsigned int order);
#else
static inline int xa_get_order(struct xarray *xa, unsigned long index)
{
@@ -1576,6 +1584,17 @@ static inline void xas_split_alloc(struct xa_state *xas, void *entry,
unsigned int order, gfp_t gfp)
{
}
+
+static inline void xas_try_split(struct xa_state *xas, void *entry,
+ unsigned int order)
+{
+}
+
+static inline unsigned int xas_try_split_min_order(unsigned int order)
+{
+ return 0;
+}
+
#endif
/**
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index d20051865800..64e9afe7d647 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -19,6 +19,10 @@
#include <linux/user_namespace.h>
#include <uapi/linux/xattr.h>
+/* List of all open_how "versions". */
+#define XATTR_ARGS_SIZE_VER0 16 /* sizeof first published struct */
+#define XATTR_ARGS_SIZE_LATEST XATTR_ARGS_SIZE_VER0
+
struct inode;
struct dentry;
@@ -81,12 +85,12 @@ int __vfs_setxattr_noperm(struct mnt_idmap *, struct dentry *,
const char *, const void *, size_t, int);
int __vfs_setxattr_locked(struct mnt_idmap *, struct dentry *,
const char *, const void *, size_t, int,
- struct inode **);
+ struct delegated_inode *);
int vfs_setxattr(struct mnt_idmap *, struct dentry *, const char *,
const void *, size_t, int);
int __vfs_removexattr(struct mnt_idmap *, struct dentry *, const char *);
int __vfs_removexattr_locked(struct mnt_idmap *, struct dentry *,
- const char *, struct inode **);
+ const char *, struct delegated_inode *);
int vfs_removexattr(struct mnt_idmap *, struct dentry *, const char *);
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h
index df42511438d0..587122e2c29c 100644
--- a/include/linux/xxhash.h
+++ b/include/linux/xxhash.h
@@ -141,21 +141,7 @@ static inline unsigned long xxhash(const void *input, size_t length,
*/
/**
- * struct xxh32_state - private xxh32 state, do not use members directly
- */
-struct xxh32_state {
- uint32_t total_len_32;
- uint32_t large_len;
- uint32_t v1;
- uint32_t v2;
- uint32_t v3;
- uint32_t v4;
- uint32_t mem32[4];
- uint32_t memsize;
-};
-
-/**
- * struct xxh32_state - private xxh64 state, do not use members directly
+ * struct xxh64_state - private xxh64 state, do not use members directly
*/
struct xxh64_state {
uint64_t total_len;
@@ -168,42 +154,6 @@ struct xxh64_state {
};
/**
- * xxh32_reset() - reset the xxh32 state to start a new hashing operation
- *
- * @state: The xxh32 state to reset.
- * @seed: Initialize the hash state with this seed.
- *
- * Call this function on any xxh32_state to prepare for a new hashing operation.
- */
-void xxh32_reset(struct xxh32_state *state, uint32_t seed);
-
-/**
- * xxh32_update() - hash the data given and update the xxh32 state
- *
- * @state: The xxh32 state to update.
- * @input: The data to hash.
- * @length: The length of the data to hash.
- *
- * After calling xxh32_reset() call xxh32_update() as many times as necessary.
- *
- * Return: Zero on success, otherwise an error code.
- */
-int xxh32_update(struct xxh32_state *state, const void *input, size_t length);
-
-/**
- * xxh32_digest() - produce the current xxh32 hash
- *
- * @state: Produce the current xxh32 hash of this state.
- *
- * A hash value can be produced at any time. It is still possible to continue
- * inserting input into the hash state after a call to xxh32_digest(), and
- * generate new hashes later on, by calling xxh32_digest() again.
- *
- * Return: The xxh32 hash stored in the state.
- */
-uint32_t xxh32_digest(const struct xxh32_state *state);
-
-/**
* xxh64_reset() - reset the xxh64 state to start a new hashing operation
*
* @state: The xxh64 state to reset.
@@ -236,24 +186,4 @@ int xxh64_update(struct xxh64_state *state, const void *input, size_t length);
*/
uint64_t xxh64_digest(const struct xxh64_state *state);
-/*-**************************
- * Utils
- ***************************/
-
-/**
- * xxh32_copy_state() - copy the source state into the destination state
- *
- * @src: The source xxh32 state.
- * @dst: The destination xxh32 state.
- */
-void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src);
-
-/**
- * xxh64_copy_state() - copy the source state into the destination state
- *
- * @src: The source xxh64 state.
- * @dst: The destination xxh64 state.
- */
-void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src);
-
#endif /* XXHASH_H */
diff --git a/include/linux/xz.h b/include/linux/xz.h
index 7285ca5d56e9..58ae1d746c6f 100644
--- a/include/linux/xz.h
+++ b/include/linux/xz.h
@@ -1,11 +1,10 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* XZ decompressor
*
* Authors: Lasse Collin <lasse.collin@tukaani.org>
* Igor Pavlov <https://7-zip.org/>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef XZ_H
@@ -19,11 +18,6 @@
# include <stdint.h>
#endif
-/* In Linux, this is used to make extern functions static when needed. */
-#ifndef XZ_EXTERN
-# define XZ_EXTERN extern
-#endif
-
/**
* enum xz_mode - Operation mode
*
@@ -143,7 +137,7 @@ struct xz_buf {
size_t out_size;
};
-/**
+/*
* struct xz_dec - Opaque type to hold the XZ decoder state
*/
struct xz_dec;
@@ -191,7 +185,7 @@ struct xz_dec;
* ready to be used with xz_dec_run(). If memory allocation fails,
* xz_dec_init() returns NULL.
*/
-XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
+struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
/**
* xz_dec_run() - Run the XZ decoder
@@ -211,7 +205,7 @@ XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
* get that amount valid data from the beginning of the stream. You must use
* the multi-call decoder if you don't want to uncompress the whole stream.
*/
-XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
+enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
/**
* xz_dec_reset() - Reset an already allocated decoder state
@@ -224,32 +218,38 @@ XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
* xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in
* multi-call mode.
*/
-XZ_EXTERN void xz_dec_reset(struct xz_dec *s);
+void xz_dec_reset(struct xz_dec *s);
/**
* xz_dec_end() - Free the memory allocated for the decoder state
* @s: Decoder state allocated using xz_dec_init(). If s is NULL,
* this function does nothing.
*/
-XZ_EXTERN void xz_dec_end(struct xz_dec *s);
+void xz_dec_end(struct xz_dec *s);
-/*
- * Decompressor for MicroLZMA, an LZMA variant with a very minimal header.
- * See xz_dec_microlzma_alloc() below for details.
+/**
+ * DOC: MicroLZMA decompressor
+ *
+ * This MicroLZMA header format was created for use in EROFS but may be used
+ * by others too. **In most cases one needs the XZ APIs above instead.**
*
- * These functions aren't used or available in preboot code and thus aren't
- * marked with XZ_EXTERN. This avoids warnings about static functions that
- * are never defined.
+ * The compressed format supported by this decoder is a raw LZMA stream
+ * whose first byte (always 0x00) has been replaced with bitwise-negation
+ * of the LZMA properties (lc/lp/pb) byte. For example, if lc/lp/pb is
+ * 3/0/2, the first byte is 0xA2. This way the first byte can never be 0x00.
+ * Just like with LZMA2, lc + lp <= 4 must be true. The LZMA end-of-stream
+ * marker must not be used. The unused values are reserved for future use.
*/
-/**
+
+/*
* struct xz_dec_microlzma - Opaque type to hold the MicroLZMA decoder state
*/
struct xz_dec_microlzma;
/**
* xz_dec_microlzma_alloc() - Allocate memory for the MicroLZMA decoder
- * @mode XZ_SINGLE or XZ_PREALLOC
- * @dict_size LZMA dictionary size. This must be at least 4 KiB and
+ * @mode: XZ_SINGLE or XZ_PREALLOC
+ * @dict_size: LZMA dictionary size. This must be at least 4 KiB and
* at most 3 GiB.
*
* In contrast to xz_dec_init(), this function only allocates the memory
@@ -262,40 +262,30 @@ struct xz_dec_microlzma;
* On success, xz_dec_microlzma_alloc() returns a pointer to
* struct xz_dec_microlzma. If memory allocation fails or
* dict_size is invalid, NULL is returned.
- *
- * The compressed format supported by this decoder is a raw LZMA stream
- * whose first byte (always 0x00) has been replaced with bitwise-negation
- * of the LZMA properties (lc/lp/pb) byte. For example, if lc/lp/pb is
- * 3/0/2, the first byte is 0xA2. This way the first byte can never be 0x00.
- * Just like with LZMA2, lc + lp <= 4 must be true. The LZMA end-of-stream
- * marker must not be used. The unused values are reserved for future use.
- * This MicroLZMA header format was created for use in EROFS but may be used
- * by others too.
*/
-extern struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
- uint32_t dict_size);
+struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
+ uint32_t dict_size);
/**
* xz_dec_microlzma_reset() - Reset the MicroLZMA decoder state
- * @s Decoder state allocated using xz_dec_microlzma_alloc()
- * @comp_size Compressed size of the input stream
- * @uncomp_size Uncompressed size of the input stream. A value smaller
+ * @s: Decoder state allocated using xz_dec_microlzma_alloc()
+ * @comp_size: Compressed size of the input stream
+ * @uncomp_size: Uncompressed size of the input stream. A value smaller
* than the real uncompressed size of the input stream can
* be specified if uncomp_size_is_exact is set to false.
* uncomp_size can never be set to a value larger than the
* expected real uncompressed size because it would eventually
* result in XZ_DATA_ERROR.
- * @uncomp_size_is_exact This is an int instead of bool to avoid
+ * @uncomp_size_is_exact: This is an int instead of bool to avoid
* requiring stdbool.h. This should normally be set to true.
* When this is set to false, error detection is weaker.
*/
-extern void xz_dec_microlzma_reset(struct xz_dec_microlzma *s,
- uint32_t comp_size, uint32_t uncomp_size,
- int uncomp_size_is_exact);
+void xz_dec_microlzma_reset(struct xz_dec_microlzma *s, uint32_t comp_size,
+ uint32_t uncomp_size, int uncomp_size_is_exact);
/**
* xz_dec_microlzma_run() - Run the MicroLZMA decoder
- * @s Decoder state initialized using xz_dec_microlzma_reset()
+ * @s: Decoder state initialized using xz_dec_microlzma_reset()
* @b: Input and output buffers
*
* This works similarly to xz_dec_run() with a few important differences.
@@ -329,15 +319,14 @@ extern void xz_dec_microlzma_reset(struct xz_dec_microlzma *s,
* may be changed normally like with XZ_PREALLOC. This way input data can be
* provided from non-contiguous memory.
*/
-extern enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s,
- struct xz_buf *b);
+enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s, struct xz_buf *b);
/**
* xz_dec_microlzma_end() - Free the memory allocated for the decoder state
* @s: Decoder state allocated using xz_dec_microlzma_alloc().
* If s is NULL, this function does nothing.
*/
-extern void xz_dec_microlzma_end(struct xz_dec_microlzma *s);
+void xz_dec_microlzma_end(struct xz_dec_microlzma *s);
/*
* Standalone build (userspace build or in-kernel build for boot time use)
@@ -358,13 +347,13 @@ extern void xz_dec_microlzma_end(struct xz_dec_microlzma *s);
* This must be called before any other xz_* function to initialize
* the CRC32 lookup table.
*/
-XZ_EXTERN void xz_crc32_init(void);
+void xz_crc32_init(void);
/*
* Update CRC32 value using the polynomial from IEEE-802.3. To start a new
* calculation, the third argument must be zero. To continue the calculation,
* the previously returned value is passed as the third argument.
*/
-XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc);
+uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc);
#endif
#endif
diff --git a/include/linux/zorro.h b/include/linux/zorro.h
index db7416ed6057..f36c8d39553d 100644
--- a/include/linux/zorro.h
+++ b/include/linux/zorro.h
@@ -52,7 +52,7 @@ struct zorro_driver {
struct device_driver driver;
};
-#define to_zorro_driver(drv) container_of(drv, struct zorro_driver, driver)
+#define to_zorro_driver(drv) container_of_const(drv, struct zorro_driver, driver)
#define zorro_for_each_dev(dev) \
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
deleted file mode 100644
index a67d62b79698..000000000000
--- a/include/linux/zpool.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * zpool memory storage api
- *
- * Copyright (C) 2014 Dan Streetman
- *
- * This is a common frontend for the zbud and zsmalloc memory
- * storage pool implementations. Typically, this is used to
- * store compressed memory.
- */
-
-#ifndef _ZPOOL_H_
-#define _ZPOOL_H_
-
-struct zpool;
-
-/*
- * Control how a handle is mapped. It will be ignored if the
- * implementation does not support it. Its use is optional.
- * Note that this does not refer to memory protection, it
- * refers to how the memory will be copied in/out if copying
- * is necessary during mapping; read-write is the safest as
- * it copies the existing memory in on map, and copies the
- * changed memory back out on unmap. Write-only does not copy
- * in the memory and should only be used for initialization.
- * If in doubt, use ZPOOL_MM_DEFAULT which is read-write.
- */
-enum zpool_mapmode {
- ZPOOL_MM_RW, /* normal read-write mapping */
- ZPOOL_MM_RO, /* read-only (no copy-out at unmap time) */
- ZPOOL_MM_WO, /* write-only (no copy-in at map time) */
-
- ZPOOL_MM_DEFAULT = ZPOOL_MM_RW
-};
-
-bool zpool_has_pool(char *type);
-
-struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp);
-
-const char *zpool_get_type(struct zpool *pool);
-
-void zpool_destroy_pool(struct zpool *pool);
-
-bool zpool_malloc_support_movable(struct zpool *pool);
-
-int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
- unsigned long *handle);
-
-void zpool_free(struct zpool *pool, unsigned long handle);
-
-void *zpool_map_handle(struct zpool *pool, unsigned long handle,
- enum zpool_mapmode mm);
-
-void zpool_unmap_handle(struct zpool *pool, unsigned long handle);
-
-u64 zpool_get_total_pages(struct zpool *pool);
-
-
-/**
- * struct zpool_driver - driver implementation for zpool
- * @type: name of the driver.
- * @list: entry in the list of zpool drivers.
- * @create: create a new pool.
- * @destroy: destroy a pool.
- * @malloc: allocate mem from a pool.
- * @free: free mem from a pool.
- * @sleep_mapped: whether zpool driver can sleep during map.
- * @map: map a handle.
- * @unmap: unmap a handle.
- * @total_size: get total size of a pool.
- *
- * This is created by a zpool implementation and registered
- * with zpool.
- */
-struct zpool_driver {
- char *type;
- struct module *owner;
- atomic_t refcount;
- struct list_head list;
-
- void *(*create)(const char *name, gfp_t gfp);
- void (*destroy)(void *pool);
-
- bool malloc_support_movable;
- int (*malloc)(void *pool, size_t size, gfp_t gfp,
- unsigned long *handle);
- void (*free)(void *pool, unsigned long handle);
-
- bool sleep_mapped;
- void *(*map)(void *pool, unsigned long handle,
- enum zpool_mapmode mm);
- void (*unmap)(void *pool, unsigned long handle);
-
- u64 (*total_pages)(void *pool);
-};
-
-void zpool_register_driver(struct zpool_driver *driver);
-
-int zpool_unregister_driver(struct zpool_driver *driver);
-
-bool zpool_can_sleep_mapped(struct zpool *pool);
-
-#endif
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index a48cd0ffe57d..f3ccff2d966c 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -16,23 +16,6 @@
#include <linux/types.h>
-/*
- * zsmalloc mapping modes
- *
- * NOTE: These only make a difference when a mapped object spans pages.
- */
-enum zs_mapmode {
- ZS_MM_RW, /* normal read-write mapping */
- ZS_MM_RO, /* read-only (no copy-out at unmap time) */
- ZS_MM_WO /* write-only (no copy-in at map time) */
- /*
- * NOTE: ZS_MM_WO should only be used for initializing new
- * (uninitialized) allocations. Partial writes to already
- * initialized allocations should use ZS_MM_RW to preserve the
- * existing data.
- */
-};
-
struct zs_pool_stats {
/* How many pages were migrated (freed) */
atomic_long_t pages_compacted;
@@ -43,19 +26,26 @@ struct zs_pool;
struct zs_pool *zs_create_pool(const char *name);
void zs_destroy_pool(struct zs_pool *pool);
-unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
+unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags,
+ const int nid);
void zs_free(struct zs_pool *pool, unsigned long obj);
size_t zs_huge_class_size(struct zs_pool *pool);
-void *zs_map_object(struct zs_pool *pool, unsigned long handle,
- enum zs_mapmode mm);
-void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
-
unsigned long zs_get_total_pages(struct zs_pool *pool);
unsigned long zs_compact(struct zs_pool *pool);
unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size);
void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
+
+void *zs_obj_read_begin(struct zs_pool *pool, unsigned long handle,
+ void *local_copy);
+void zs_obj_read_end(struct zs_pool *pool, unsigned long handle,
+ void *handle_mem);
+void zs_obj_write(struct zs_pool *pool, unsigned long handle,
+ void *handle_mem, size_t mem_len);
+
+extern const struct movable_operations zsmalloc_mops;
+
#endif
diff --git a/include/linux/zstd.h b/include/linux/zstd.h
index 113408eef6ec..2f2a3c8b8a33 100644
--- a/include/linux/zstd.h
+++ b/include/linux/zstd.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -77,6 +77,30 @@ int zstd_min_clevel(void);
*/
int zstd_max_clevel(void);
+/**
+ * zstd_default_clevel() - default compression level
+ *
+ * Return: Default compression level.
+ */
+int zstd_default_clevel(void);
+
+/**
+ * struct zstd_custom_mem - custom memory allocation
+ */
+typedef ZSTD_customMem zstd_custom_mem;
+
+/**
+ * struct zstd_dict_load_method - Dictionary load method.
+ * See zstd_lib.h.
+ */
+typedef ZSTD_dictLoadMethod_e zstd_dict_load_method;
+
+/**
+ * struct zstd_dict_content_type - Dictionary context type.
+ * See zstd_lib.h.
+ */
+typedef ZSTD_dictContentType_e zstd_dict_content_type;
+
/* ====== Parameter Selection ====== */
/**
@@ -136,9 +160,32 @@ typedef ZSTD_parameters zstd_parameters;
zstd_parameters zstd_get_params(int level,
unsigned long long estimated_src_size);
-/* ====== Single-pass Compression ====== */
+/**
+ * zstd_get_cparams() - returns zstd_compression_parameters for selected level
+ * @level: The compression level
+ * @estimated_src_size: The estimated source size to compress or 0
+ * if unknown.
+ * @dict_size: Dictionary size.
+ *
+ * Return: The selected zstd_compression_parameters.
+ */
+zstd_compression_parameters zstd_get_cparams(int level,
+ unsigned long long estimated_src_size, size_t dict_size);
typedef ZSTD_CCtx zstd_cctx;
+typedef ZSTD_cParameter zstd_cparameter;
+
+/**
+ * zstd_cctx_set_param() - sets a compression parameter
+ * @cctx: The context. Must have been initialized with zstd_init_cctx().
+ * @param: The parameter to set.
+ * @value: The value to set the parameter to.
+ *
+ * Return: Zero or an error, which can be checked using zstd_is_error().
+ */
+size_t zstd_cctx_set_param(zstd_cctx *cctx, zstd_cparameter param, int value);
+
+/* ====== Single-pass Compression ====== */
/**
* zstd_cctx_workspace_bound() - max memory needed to initialize a zstd_cctx
@@ -154,6 +201,20 @@ typedef ZSTD_CCtx zstd_cctx;
size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *parameters);
/**
+ * zstd_cctx_workspace_bound_with_ext_seq_prod() - max memory needed to
+ * initialize a zstd_cctx when using the block-level external sequence
+ * producer API.
+ * @parameters: The compression parameters to be used.
+ *
+ * If multiple compression parameters might be used, the caller must call
+ * this function for each set of parameters and use the maximum size.
+ *
+ * Return: A lower bound on the size of the workspace that is passed to
+ * zstd_init_cctx().
+ */
+size_t zstd_cctx_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *parameters);
+
+/**
* zstd_init_cctx() - initialize a zstd compression context
* @workspace: The workspace to emplace the context into. It must outlive
* the returned context.
@@ -180,6 +241,71 @@ zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size);
size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
const void *src, size_t src_size, const zstd_parameters *parameters);
+/**
+ * zstd_create_cctx_advanced() - Create compression context
+ * @custom_mem: Custom allocator.
+ *
+ * Return: NULL on error, pointer to compression context otherwise.
+ */
+zstd_cctx *zstd_create_cctx_advanced(zstd_custom_mem custom_mem);
+
+/**
+ * zstd_free_cctx() - Free compression context
+ * @cdict: Pointer to compression context.
+ *
+ * Return: Always 0.
+ */
+size_t zstd_free_cctx(zstd_cctx* cctx);
+
+/**
+ * struct zstd_cdict - Compression dictionary.
+ * See zstd_lib.h.
+ */
+typedef ZSTD_CDict zstd_cdict;
+
+/**
+ * zstd_create_cdict_byreference() - Create compression dictionary
+ * @dict: Pointer to dictionary buffer.
+ * @dict_size: Size of the dictionary buffer.
+ * @dict_load_method: Dictionary load method.
+ * @dict_content_type: Dictionary content type.
+ * @custom_mem: Memory allocator.
+ *
+ * Note, this uses @dict by reference (ZSTD_dlm_byRef), so it should be
+ * free before zstd_cdict is destroyed.
+ *
+ * Return: NULL on error, pointer to compression dictionary
+ * otherwise.
+ */
+zstd_cdict *zstd_create_cdict_byreference(const void *dict, size_t dict_size,
+ zstd_compression_parameters cparams,
+ zstd_custom_mem custom_mem);
+
+/**
+ * zstd_free_cdict() - Free compression dictionary
+ * @cdict: Pointer to compression dictionary.
+ *
+ * Return: Always 0.
+ */
+size_t zstd_free_cdict(zstd_cdict* cdict);
+
+/**
+ * zstd_compress_using_cdict() - compress src into dst using a dictionary
+ * @cctx: The context. Must have been initialized with zstd_init_cctx().
+ * @dst: The buffer to compress src into.
+ * @dst_capacity: The size of the destination buffer. May be any size, but
+ * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
+ * @src: The data to compress.
+ * @src_size: The size of the data to compress.
+ * @cdict: The dictionary to be used.
+ *
+ * Return: The compressed size or an error, which can be checked using
+ * zstd_is_error().
+ */
+size_t zstd_compress_using_cdict(zstd_cctx *cctx, void *dst,
+ size_t dst_capacity, const void *src, size_t src_size,
+ const zstd_cdict *cdict);
+
/* ====== Single-pass Decompression ====== */
typedef ZSTD_DCtx zstd_dctx;
@@ -220,6 +346,71 @@ zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size);
size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
const void *src, size_t src_size);
+/**
+ * struct zstd_ddict - Decompression dictionary.
+ * See zstd_lib.h.
+ */
+typedef ZSTD_DDict zstd_ddict;
+
+/**
+ * zstd_create_ddict_byreference() - Create decompression dictionary
+ * @dict: Pointer to dictionary buffer.
+ * @dict_size: Size of the dictionary buffer.
+ * @dict_load_method: Dictionary load method.
+ * @dict_content_type: Dictionary content type.
+ * @custom_mem: Memory allocator.
+ *
+ * Note, this uses @dict by reference (ZSTD_dlm_byRef), so it should be
+ * free before zstd_ddict is destroyed.
+ *
+ * Return: NULL on error, pointer to decompression dictionary
+ * otherwise.
+ */
+zstd_ddict *zstd_create_ddict_byreference(const void *dict, size_t dict_size,
+ zstd_custom_mem custom_mem);
+/**
+ * zstd_free_ddict() - Free decompression dictionary
+ * @dict: Pointer to the dictionary.
+ *
+ * Return: Always 0.
+ */
+size_t zstd_free_ddict(zstd_ddict *ddict);
+
+/**
+ * zstd_create_dctx_advanced() - Create decompression context
+ * @custom_mem: Custom allocator.
+ *
+ * Return: NULL on error, pointer to decompression context otherwise.
+ */
+zstd_dctx *zstd_create_dctx_advanced(zstd_custom_mem custom_mem);
+
+/**
+ * zstd_free_dctx() -- Free decompression context
+ * @dctx: Pointer to decompression context.
+ * Return: Always 0.
+ */
+size_t zstd_free_dctx(zstd_dctx *dctx);
+
+/**
+ * zstd_decompress_using_ddict() - decompress src into dst using a dictionary
+ * @dctx: The decompression context.
+ * @dst: The buffer to decompress src into.
+ * @dst_capacity: The size of the destination buffer. Must be at least as large
+ * as the decompressed size. If the caller cannot upper bound the
+ * decompressed size, then it's better to use the streaming API.
+ * @src: The zstd compressed data to decompress. Multiple concatenated
+ * frames and skippable frames are allowed.
+ * @src_size: The exact size of the data to decompress.
+ * @ddict: The dictionary to be used.
+ *
+ * Return: The decompressed size or an error, which can be checked using
+ * zstd_is_error().
+ */
+size_t zstd_decompress_using_ddict(zstd_dctx *dctx,
+ void *dst, size_t dst_capacity, const void *src, size_t src_size,
+ const zstd_ddict *ddict);
+
+
/* ====== Streaming Buffers ====== */
/**
@@ -258,6 +449,16 @@ typedef ZSTD_CStream zstd_cstream;
size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams);
/**
+ * zstd_cstream_workspace_bound_with_ext_seq_prod() - memory needed to initialize
+ * a zstd_cstream when using the block-level external sequence producer API.
+ * @cparams: The compression parameters to be used for compression.
+ *
+ * Return: A lower bound on the size of the workspace that is passed to
+ * zstd_init_cstream().
+ */
+size_t zstd_cstream_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *cparams);
+
+/**
* zstd_init_cstream() - initialize a zstd streaming compression context
* @parameters The zstd parameters to use for compression.
* @pledged_src_size: If params.fParams.contentSizeFlag == 1 then the caller
@@ -417,6 +618,18 @@ size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
size_t zstd_find_frame_compressed_size(const void *src, size_t src_size);
/**
+ * zstd_register_sequence_producer() - exposes the zstd library function
+ * ZSTD_registerSequenceProducer(). This is used for the block-level external
+ * sequence producer API. See upstream zstd.h for detailed documentation.
+ */
+typedef ZSTD_sequenceProducer_F zstd_sequence_producer_f;
+void zstd_register_sequence_producer(
+ zstd_cctx *cctx,
+ void* sequence_producer_state,
+ zstd_sequence_producer_f sequence_producer
+);
+
+/**
* struct zstd_frame_params - zstd frame parameters stored in the frame header
* @frameContentSize: The frame content size, or ZSTD_CONTENTSIZE_UNKNOWN if not
* present.
@@ -429,7 +642,7 @@ size_t zstd_find_frame_compressed_size(const void *src, size_t src_size);
*
* See zstd_lib.h.
*/
-typedef ZSTD_frameHeader zstd_frame_header;
+typedef ZSTD_FrameHeader zstd_frame_header;
/**
* zstd_get_frame_header() - extracts parameters from a zstd or skippable frame
@@ -444,4 +657,35 @@ typedef ZSTD_frameHeader zstd_frame_header;
size_t zstd_get_frame_header(zstd_frame_header *params, const void *src,
size_t src_size);
+/**
+ * struct zstd_sequence - a sequence of literals or a match
+ *
+ * @offset: The offset of the match
+ * @litLength: The literal length of the sequence
+ * @matchLength: The match length of the sequence
+ * @rep: Represents which repeat offset is used
+ */
+typedef ZSTD_Sequence zstd_sequence;
+
+/**
+ * zstd_compress_sequences_and_literals() - compress an array of zstd_sequence and literals
+ *
+ * @cctx: The zstd compression context.
+ * @dst: The buffer to compress the data into.
+ * @dst_capacity: The size of the destination buffer.
+ * @in_seqs: The array of zstd_sequence to compress.
+ * @in_seqs_size: The number of sequences in in_seqs.
+ * @literals: The literals associated to the sequences to be compressed.
+ * @lit_size: The size of the literals in the literals buffer.
+ * @lit_capacity: The size of the literals buffer.
+ * @decompressed_size: The size of the input data
+ *
+ * Return: The compressed size or an error, which can be checked using
+ * zstd_is_error().
+ */
+size_t zstd_compress_sequences_and_literals(zstd_cctx *cctx, void* dst, size_t dst_capacity,
+ const zstd_sequence *in_seqs, size_t in_seqs_size,
+ const void* literals, size_t lit_size, size_t lit_capacity,
+ size_t decompressed_size);
+
#endif /* LINUX_ZSTD_H */
diff --git a/include/linux/zstd_errors.h b/include/linux/zstd_errors.h
index 58b6dd45a969..c307fb011132 100644
--- a/include/linux/zstd_errors.h
+++ b/include/linux/zstd_errors.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -12,13 +13,18 @@
#define ZSTD_ERRORS_H_398273423
-/*===== dependency =====*/
-#include <linux/types.h> /* size_t */
+/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
+#define ZSTDERRORLIB_VISIBLE
+#ifndef ZSTDERRORLIB_HIDDEN
+# if (__GNUC__ >= 4) && !defined(__MINGW32__)
+# define ZSTDERRORLIB_HIDDEN __attribute__ ((visibility ("hidden")))
+# else
+# define ZSTDERRORLIB_HIDDEN
+# endif
+#endif
-/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
-#define ZSTDERRORLIB_VISIBILITY
-#define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
+#define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBLE
/*-*********************************************
* Error codes list
@@ -43,14 +49,18 @@ typedef enum {
ZSTD_error_frameParameter_windowTooLarge = 16,
ZSTD_error_corruption_detected = 20,
ZSTD_error_checksum_wrong = 22,
+ ZSTD_error_literals_headerWrong = 24,
ZSTD_error_dictionary_corrupted = 30,
ZSTD_error_dictionary_wrong = 32,
ZSTD_error_dictionaryCreation_failed = 34,
ZSTD_error_parameter_unsupported = 40,
+ ZSTD_error_parameter_combination_unsupported = 41,
ZSTD_error_parameter_outOfBound = 42,
ZSTD_error_tableLog_tooLarge = 44,
ZSTD_error_maxSymbolValue_tooLarge = 46,
ZSTD_error_maxSymbolValue_tooSmall = 48,
+ ZSTD_error_cannotProduce_uncompressedBlock = 49,
+ ZSTD_error_stabilityCondition_notRespected = 50,
ZSTD_error_stage_wrong = 60,
ZSTD_error_init_missing = 62,
ZSTD_error_memory_allocation = 64,
@@ -58,18 +68,18 @@ typedef enum {
ZSTD_error_dstSize_tooSmall = 70,
ZSTD_error_srcSize_wrong = 72,
ZSTD_error_dstBuffer_null = 74,
+ ZSTD_error_noForwardProgress_destFull = 80,
+ ZSTD_error_noForwardProgress_inputEmpty = 82,
/* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
ZSTD_error_frameIndex_tooLarge = 100,
ZSTD_error_seekableIO = 102,
ZSTD_error_dstBuffer_wrong = 104,
ZSTD_error_srcBuffer_wrong = 105,
+ ZSTD_error_sequenceProducer_failed = 106,
+ ZSTD_error_externalSequences_invalid = 107,
ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
} ZSTD_ErrorCode;
-/*! ZSTD_getErrorCode() :
- convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
- which can be used to compare with enum list published above */
-ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /*< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
diff --git a/include/linux/zstd_lib.h b/include/linux/zstd_lib.h
index 79d55465d5c1..e295d4125dde 100644
--- a/include/linux/zstd_lib.h
+++ b/include/linux/zstd_lib.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
/*
- * Copyright (c) Yann Collet, Facebook, Inc.
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
@@ -11,23 +12,47 @@
#ifndef ZSTD_H_235446
#define ZSTD_H_235446
-/* ====== Dependency ======*/
-#include <linux/limits.h> /* INT_MAX */
+
+/* ====== Dependencies ======*/
#include <linux/types.h> /* size_t */
+#include <linux/zstd_errors.h> /* list of errors */
+#if !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
+#include <linux/limits.h> /* INT_MAX */
+#endif /* ZSTD_STATIC_LINKING_ONLY */
+
/* ===== ZSTDLIB_API : control library symbols visibility ===== */
-#ifndef ZSTDLIB_VISIBLE
+#define ZSTDLIB_VISIBLE
+
+#ifndef ZSTDLIB_HIDDEN
# if (__GNUC__ >= 4) && !defined(__MINGW32__)
-# define ZSTDLIB_VISIBLE __attribute__ ((visibility ("default")))
# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden")))
# else
-# define ZSTDLIB_VISIBLE
# define ZSTDLIB_HIDDEN
# endif
#endif
+
#define ZSTDLIB_API ZSTDLIB_VISIBLE
+/* Deprecation warnings :
+ * Should these warnings be a problem, it is generally possible to disable them,
+ * typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual.
+ * Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS.
+ */
+#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS
+# define ZSTD_DEPRECATED(message) /* disable deprecation warnings */
+#else
+# if (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) || defined(__IAR_SYSTEMS_ICC__)
+# define ZSTD_DEPRECATED(message) __attribute__((deprecated(message)))
+# elif (__GNUC__ >= 3)
+# define ZSTD_DEPRECATED(message) __attribute__((deprecated))
+# else
+# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler")
+# define ZSTD_DEPRECATED(message)
+# endif
+#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */
+
/* *****************************************************************************
Introduction
@@ -65,7 +90,7 @@
/*------ Version ------*/
#define ZSTD_VERSION_MAJOR 1
#define ZSTD_VERSION_MINOR 5
-#define ZSTD_VERSION_RELEASE 2
+#define ZSTD_VERSION_RELEASE 7
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
/*! ZSTD_versionNumber() :
@@ -103,11 +128,12 @@ ZSTDLIB_API const char* ZSTD_versionString(void);
/* *************************************
-* Simple API
+* Simple Core API
***************************************/
/*! ZSTD_compress() :
* Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
- * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
+ * NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have
+ * enough space to successfully compress the data.
* @return : compressed size written into `dst` (<= `dstCapacity),
* or an error code if it fails (which can be tested using ZSTD_isError()). */
ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
@@ -115,47 +141,55 @@ ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
int compressionLevel);
/*! ZSTD_decompress() :
- * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
- * `dstCapacity` is an upper bound of originalSize to regenerate.
- * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
- * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
- * or an errorCode if it fails (which can be tested using ZSTD_isError()). */
+ * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
+ * Multiple compressed frames can be decompressed at once with this method.
+ * The result will be the concatenation of all decompressed frames, back to back.
+ * `dstCapacity` is an upper bound of originalSize to regenerate.
+ * First frame's decompressed size can be extracted using ZSTD_getFrameContentSize().
+ * If maximum upper bound isn't known, prefer using streaming mode to decompress data.
+ * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+ * or an errorCode if it fails (which can be tested using ZSTD_isError()). */
ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
const void* src, size_t compressedSize);
+
+/*====== Decompression helper functions ======*/
+
/*! ZSTD_getFrameContentSize() : requires v1.3.0+
- * `src` should point to the start of a ZSTD encoded frame.
- * `srcSize` must be at least as large as the frame header.
- * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
- * @return : - decompressed size of `src` frame content, if known
- * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
- * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
- * note 1 : a 0 return value means the frame is valid but "empty".
- * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
- * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
- * In which case, it's necessary to use streaming mode to decompress data.
- * Optionally, application can rely on some implicit limit,
- * as ZSTD_decompress() only needs an upper bound of decompressed size.
- * (For example, data could be necessarily cut into blocks <= 16 KB).
- * note 3 : decompressed size is always present when compression is completed using single-pass functions,
- * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
- * note 4 : decompressed size can be very large (64-bits value),
- * potentially larger than what local system can handle as a single memory segment.
- * In which case, it's necessary to use streaming mode to decompress data.
- * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
- * Always ensure return value fits within application's authorized limits.
- * Each application can set its own limits.
- * note 6 : This function replaces ZSTD_getDecompressedSize() */
+ * `src` should point to the start of a ZSTD encoded frame.
+ * `srcSize` must be at least as large as the frame header.
+ * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
+ * @return : - decompressed size of `src` frame content, if known
+ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
+ * note 1 : a 0 return value means the frame is valid but "empty".
+ * When invoking this method on a skippable frame, it will return 0.
+ * note 2 : decompressed size is an optional field, it may not be present (typically in streaming mode).
+ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * Optionally, application can rely on some implicit limit,
+ * as ZSTD_decompress() only needs an upper bound of decompressed size.
+ * (For example, data could be necessarily cut into blocks <= 16 KB).
+ * note 3 : decompressed size is always present when compression is completed using single-pass functions,
+ * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
+ * note 4 : decompressed size can be very large (64-bits value),
+ * potentially larger than what local system can handle as a single memory segment.
+ * In which case, it's necessary to use streaming mode to decompress data.
+ * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ * Always ensure return value fits within application's authorized limits.
+ * Each application can set its own limits.
+ * note 6 : This function replaces ZSTD_getDecompressedSize() */
#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
-/*! ZSTD_getDecompressedSize() :
- * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
+/*! ZSTD_getDecompressedSize() (obsolete):
+ * This function is now obsolete, in favor of ZSTD_getFrameContentSize().
* Both functions work the same way, but ZSTD_getDecompressedSize() blends
* "empty", "unknown" and "error" results to the same return value (0),
* while ZSTD_getFrameContentSize() gives them separate return values.
* @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
+ZSTD_DEPRECATED("Replaced by ZSTD_getFrameContentSize")
ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
/*! ZSTD_findFrameCompressedSize() : Requires v1.4.0+
@@ -163,18 +197,50 @@ ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t
* `srcSize` must be >= first frame size
* @return : the compressed size of the first frame starting at `src`,
* suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
- * or an error code if input is invalid */
+ * or an error code if input is invalid
+ * Note 1: this method is called _find*() because it's not enough to read the header,
+ * it may have to scan through the frame's content, to reach its end.
+ * Note 2: this method also works with Skippable Frames. In which case,
+ * it returns the size of the complete skippable frame,
+ * which is always equal to its content size + 8 bytes for headers. */
ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
-/*====== Helper functions ======*/
-#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
-ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
-ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
-ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */
-ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */
-ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
-ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */
+/*====== Compression helper functions ======*/
+
+/*! ZSTD_compressBound() :
+ * maximum compressed size in worst case single-pass scenario.
+ * When invoking `ZSTD_compress()`, or any other one-pass compression function,
+ * it's recommended to provide @dstCapacity >= ZSTD_compressBound(srcSize)
+ * as it eliminates one potential failure scenario,
+ * aka not enough room in dst buffer to write the compressed frame.
+ * Note : ZSTD_compressBound() itself can fail, if @srcSize >= ZSTD_MAX_INPUT_SIZE .
+ * In which case, ZSTD_compressBound() will return an error code
+ * which can be tested using ZSTD_isError().
+ *
+ * ZSTD_COMPRESSBOUND() :
+ * same as ZSTD_compressBound(), but as a macro.
+ * It can be used to produce constants, which can be useful for static allocation,
+ * for example to size a static array on stack.
+ * Will produce constant value 0 if srcSize is too large.
+ */
+#define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00ULL : 0xFF00FF00U)
+#define ZSTD_COMPRESSBOUND(srcSize) (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
+ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
+
+
+/*====== Error helper functions ======*/
+/* ZSTD_isError() :
+ * Most ZSTD_* functions returning a size_t value can be tested for error,
+ * using ZSTD_isError().
+ * @return 1 if error, 0 otherwise
+ */
+ZSTDLIB_API unsigned ZSTD_isError(size_t result); /*!< tells if a `size_t` function result is an error code */
+ZSTDLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); /* convert a result into an error code, which can be compared to error enum list */
+ZSTDLIB_API const char* ZSTD_getErrorName(size_t result); /*!< provides readable string from a function result */
+ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */
+ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */
+ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */
/* *************************************
@@ -182,25 +248,25 @@ ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compres
***************************************/
/*= Compression context
* When compressing many times,
- * it is recommended to allocate a context just once,
- * and re-use it for each successive compression operation.
- * This will make workload friendlier for system's memory.
+ * it is recommended to allocate a compression context just once,
+ * and reuse it for each successive compression operation.
+ * This will make the workload easier for system's memory.
* Note : re-using context is just a speed / resource optimization.
* It doesn't change the compression ratio, which remains identical.
- * Note 2 : In multi-threaded environments,
- * use one different context per thread for parallel execution.
+ * Note 2: For parallel execution in multi-threaded environments,
+ * use one different context per thread .
*/
typedef struct ZSTD_CCtx_s ZSTD_CCtx;
ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
-ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* accept NULL pointer */
+ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* compatible with NULL pointer */
/*! ZSTD_compressCCtx() :
* Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
- * Important : in order to behave similarly to `ZSTD_compress()`,
- * this function compresses at requested compression level,
- * __ignoring any other parameter__ .
+ * Important : in order to mirror `ZSTD_compress()` behavior,
+ * this function compresses at the requested compression level,
+ * __ignoring any other advanced parameter__ .
* If any advanced parameter was set using the advanced API,
- * they will all be reset. Only `compressionLevel` remains.
+ * they will all be reset. Only @compressionLevel remains.
*/
ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
@@ -210,7 +276,7 @@ ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
/*= Decompression context
* When decompressing many times,
* it is recommended to allocate a context only once,
- * and re-use it for each successive compression operation.
+ * and reuse it for each successive compression operation.
* This will make workload friendlier for system's memory.
* Use one context per thread for parallel execution. */
typedef struct ZSTD_DCtx_s ZSTD_DCtx;
@@ -220,7 +286,7 @@ ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /* accept NULL pointer *
/*! ZSTD_decompressDCtx() :
* Same as ZSTD_decompress(),
* requires an allocated ZSTD_DCtx.
- * Compatible with sticky parameters.
+ * Compatible with sticky parameters (see below).
*/
ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
@@ -236,12 +302,12 @@ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
* using ZSTD_CCtx_set*() functions.
* Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
* "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
- * __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ .
+ * __They do not apply to one-shot variants such as ZSTD_compressCCtx()__ .
*
* It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
*
* This API supersedes all other "advanced" API entry points in the experimental section.
- * In the future, we expect to remove from experimental API entry points which are redundant with this API.
+ * In the future, we expect to remove API entry points from experimental which are redundant with this API.
*/
@@ -324,6 +390,19 @@ typedef enum {
* The higher the value of selected strategy, the more complex it is,
* resulting in stronger and slower compression.
* Special: value 0 means "use default strategy". */
+
+ ZSTD_c_targetCBlockSize=130, /* v1.5.6+
+ * Attempts to fit compressed block size into approximately targetCBlockSize.
+ * Bound by ZSTD_TARGETCBLOCKSIZE_MIN and ZSTD_TARGETCBLOCKSIZE_MAX.
+ * Note that it's not a guarantee, just a convergence target (default:0).
+ * No target when targetCBlockSize == 0.
+ * This is helpful in low bandwidth streaming environments to improve end-to-end latency,
+ * when a client can make use of partial documents (a prominent example being Chrome).
+ * Note: this parameter is stable since v1.5.6.
+ * It was present as an experimental parameter in earlier versions,
+ * but it's not recommended using it with earlier library versions
+ * due to massive performance regressions.
+ */
/* LDM mode parameters */
ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
* This parameter is designed to improve compression ratio
@@ -403,15 +482,18 @@ typedef enum {
* ZSTD_c_forceMaxWindow
* ZSTD_c_forceAttachDict
* ZSTD_c_literalCompressionMode
- * ZSTD_c_targetCBlockSize
* ZSTD_c_srcSizeHint
* ZSTD_c_enableDedicatedDictSearch
* ZSTD_c_stableInBuffer
* ZSTD_c_stableOutBuffer
* ZSTD_c_blockDelimiters
* ZSTD_c_validateSequences
- * ZSTD_c_useBlockSplitter
+ * ZSTD_c_blockSplitterLevel
+ * ZSTD_c_splitAfterSequences
* ZSTD_c_useRowMatchFinder
+ * ZSTD_c_prefetchCDictTables
+ * ZSTD_c_enableSeqProducerFallback
+ * ZSTD_c_maxBlockSize
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
* note : never ever use experimentalParam? names directly;
* also, the enums values themselves are unstable and can still change.
@@ -421,7 +503,7 @@ typedef enum {
ZSTD_c_experimentalParam3=1000,
ZSTD_c_experimentalParam4=1001,
ZSTD_c_experimentalParam5=1002,
- ZSTD_c_experimentalParam6=1003,
+ /* was ZSTD_c_experimentalParam6=1003; is now ZSTD_c_targetCBlockSize */
ZSTD_c_experimentalParam7=1004,
ZSTD_c_experimentalParam8=1005,
ZSTD_c_experimentalParam9=1006,
@@ -430,7 +512,12 @@ typedef enum {
ZSTD_c_experimentalParam12=1009,
ZSTD_c_experimentalParam13=1010,
ZSTD_c_experimentalParam14=1011,
- ZSTD_c_experimentalParam15=1012
+ ZSTD_c_experimentalParam15=1012,
+ ZSTD_c_experimentalParam16=1013,
+ ZSTD_c_experimentalParam17=1014,
+ ZSTD_c_experimentalParam18=1015,
+ ZSTD_c_experimentalParam19=1016,
+ ZSTD_c_experimentalParam20=1017
} ZSTD_cParameter;
typedef struct {
@@ -493,7 +580,7 @@ typedef enum {
* They will be used to compress next frame.
* Resetting session never fails.
* - The parameters : changes all parameters back to "default".
- * This removes any reference to any dictionary too.
+ * This also removes any reference to any dictionary or external sequence producer.
* Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
* otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
* - Both : similar to resetting the session, followed by resetting parameters.
@@ -502,11 +589,13 @@ ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
/*! ZSTD_compress2() :
* Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
+ * (note that this entry point doesn't even expose a compression level parameter).
* ZSTD_compress2() always starts a new frame.
* Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
* - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
* - The function is always blocking, returns when compression is completed.
- * Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
+ * NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have
+ * enough space to successfully compress the data, though it is possible it fails for other reasons.
* @return : compressed size written into `dst` (<= `dstCapacity),
* or an error code if it fails (which can be tested using ZSTD_isError()).
*/
@@ -543,13 +632,17 @@ typedef enum {
* ZSTD_d_stableOutBuffer
* ZSTD_d_forceIgnoreChecksum
* ZSTD_d_refMultipleDDicts
+ * ZSTD_d_disableHuffmanAssembly
+ * ZSTD_d_maxBlockSize
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
* note : never ever use experimentalParam? names directly
*/
ZSTD_d_experimentalParam1=1000,
ZSTD_d_experimentalParam2=1001,
ZSTD_d_experimentalParam3=1002,
- ZSTD_d_experimentalParam4=1003
+ ZSTD_d_experimentalParam4=1003,
+ ZSTD_d_experimentalParam5=1004,
+ ZSTD_d_experimentalParam6=1005
} ZSTD_dParameter;
@@ -604,14 +697,14 @@ typedef struct ZSTD_outBuffer_s {
* A ZSTD_CStream object is required to track streaming operation.
* Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
* ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
-* It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
+* It is recommended to reuse ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
*
* For parallel execution, use one separate ZSTD_CStream per thread.
*
* note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
*
* Parameters are sticky : when starting a new compression on the same context,
-* it will re-use the same sticky parameters as previous compression session.
+* it will reuse the same sticky parameters as previous compression session.
* When in doubt, it's recommended to fully initialize the context before usage.
* Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
* ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
@@ -700,6 +793,11 @@ typedef enum {
* only ZSTD_e_end or ZSTD_e_flush operations are allowed.
* Before starting a new compression job, or changing compression parameters,
* it is required to fully flush internal buffers.
+ * - note: if an operation ends with an error, it may leave @cctx in an undefined state.
+ * Therefore, it's UB to invoke ZSTD_compressStream2() of ZSTD_compressStream() on such a state.
+ * In order to be re-employed after an error, a state must be reset,
+ * which can be done explicitly (ZSTD_CCtx_reset()),
+ * or is sometimes implied by methods starting a new compression job (ZSTD_initCStream(), ZSTD_compressCCtx())
*/
ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
ZSTD_outBuffer* output,
@@ -728,8 +826,6 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /*< recommended size for output
* This following is a legacy streaming API, available since v1.0+ .
* It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
* It is redundant, but remains fully supported.
- * Streaming in combination with advanced parameters and dictionary compression
- * can only be used through the new API.
******************************************************************************/
/*!
@@ -738,6 +834,9 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /*< recommended size for output
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
* ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
+ *
+ * Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API
+ * to compress with a dictionary.
*/
ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
/*!
@@ -758,7 +857,7 @@ ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
*
* A ZSTD_DStream object is required to track streaming operations.
* Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
-* ZSTD_DStream objects can be re-used multiple times.
+* ZSTD_DStream objects can be re-employed multiple times.
*
* Use ZSTD_initDStream() to start a new decompression operation.
* @return : recommended first input size
@@ -768,16 +867,21 @@ ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
* The function will update both `pos` fields.
* If `input.pos < input.size`, some input has not been consumed.
* It's up to the caller to present again remaining data.
+*
* The function tries to flush all data decoded immediately, respecting output buffer size.
* If `output.pos < output.size`, decoder has flushed everything it could.
-* But if `output.pos == output.size`, there might be some data left within internal buffers.,
+*
+* However, when `output.pos == output.size`, it's more difficult to know.
+* If @return > 0, the frame is not complete, meaning
+* either there is still some data left to flush within internal buffers,
+* or there is more input to read to complete the frame (or both).
* In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
* Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
* @return : 0 when a frame is completely decoded and fully flushed,
* or an error code, which can be tested using ZSTD_isError(),
* or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
* the return value is a suggested next input size (just a hint for better latency)
-* that will never request more than the remaining frame size.
+* that will never request more than the remaining content of the compressed frame.
* *******************************************************************************/
typedef ZSTD_DCtx ZSTD_DStream; /*< DCtx and DStream are now effectively same object (>= v1.3.0) */
@@ -788,13 +892,38 @@ ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /* accept NULL pointer
/*===== Streaming decompression functions =====*/
-/* This function is redundant with the advanced API and equivalent to:
+/*! ZSTD_initDStream() :
+ * Initialize/reset DStream state for new decompression operation.
+ * Call before new decompression operation using same DStream.
*
+ * Note : This function is redundant with the advanced API and equivalent to:
* ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
* ZSTD_DCtx_refDDict(zds, NULL);
*/
ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
+/*! ZSTD_decompressStream() :
+ * Streaming decompression function.
+ * Call repetitively to consume full input updating it as necessary.
+ * Function will update both input and output `pos` fields exposing current state via these fields:
+ * - `input.pos < input.size`, some input remaining and caller should provide remaining input
+ * on the next call.
+ * - `output.pos < output.size`, decoder flushed internal output buffer.
+ * - `output.pos == output.size`, unflushed data potentially present in the internal buffers,
+ * check ZSTD_decompressStream() @return value,
+ * if > 0, invoke it again to flush remaining data to output.
+ * Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.
+ *
+ * @return : 0 when a frame is completely decoded and fully flushed,
+ * or an error code, which can be tested using ZSTD_isError(),
+ * or any other value > 0, which means there is some decoding or flushing to do to complete current frame.
+ *
+ * Note: when an operation returns with an error code, the @zds state may be left in undefined state.
+ * It's UB to invoke `ZSTD_decompressStream()` on such a state.
+ * In order to re-use such a state, it must be first reset,
+ * which can be done explicitly (`ZSTD_DCtx_reset()`),
+ * or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`)
+ */
ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */
@@ -913,7 +1042,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
* If @return == 0, the dictID could not be decoded.
* This could for one of the following reasons :
* - The frame does not require a dictionary to be decoded (most common case).
- * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
+ * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden piece of information.
* Note : this use case also happens when using a non-conformant dictionary.
* - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
* - This is not a Zstandard frame.
@@ -925,9 +1054,11 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
* Advanced dictionary and prefix API (Requires v1.4.0+)
*
* This API allows dictionaries to be used with ZSTD_compress2(),
- * ZSTD_compressStream2(), and ZSTD_decompressDCtx(). Dictionaries are sticky, and
- * only reset with the context is reset with ZSTD_reset_parameters or
- * ZSTD_reset_session_and_parameters. Prefixes are single-use.
+ * ZSTD_compressStream2(), and ZSTD_decompressDCtx().
+ * Dictionaries are sticky, they remain valid when same context is reused,
+ * they only reset when the context is reset
+ * with ZSTD_reset_parameters or ZSTD_reset_session_and_parameters.
+ * In contrast, Prefixes are single-use.
******************************************************************************/
@@ -937,8 +1068,9 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
* Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
* meaning "return to no-dictionary mode".
- * Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
- * To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
+ * Note 1 : Dictionary is sticky, it will be used for all future compressed frames,
+ * until parameters are reset, a new dictionary is loaded, or the dictionary
+ * is explicitly invalidated by loading a NULL dictionary.
* Note 2 : Loading a dictionary involves building tables.
* It's also a CPU consuming operation, with non-negligible impact on latency.
* Tables are dependent on compression parameters, and for this reason,
@@ -947,11 +1079,15 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
* Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
* In such a case, dictionary buffer must outlive its users.
* Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
- * to precisely select how dictionary content must be interpreted. */
+ * to precisely select how dictionary content must be interpreted.
+ * Note 5 : This method does not benefit from LDM (long distance mode).
+ * If you want to employ LDM on some large dictionary content,
+ * prefer employing ZSTD_CCtx_refPrefix() described below.
+ */
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
/*! ZSTD_CCtx_refCDict() : Requires v1.4.0+
- * Reference a prepared dictionary, to be used for all next compressed frames.
+ * Reference a prepared dictionary, to be used for all future compressed frames.
* Note that compression parameters are enforced from within CDict,
* and supersede any compression parameter previously set within CCtx.
* The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
@@ -970,6 +1106,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
* Decompression will need same prefix to properly regenerate data.
* Compressing with a prefix is similar in outcome as performing a diff and compressing it,
* but performs much faster, especially during decompression (compression speed is tunable with compression level).
+ * This method is compatible with LDM (long distance mode).
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
* Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
* Note 1 : Prefix buffer is referenced. It **must** outlive compression.
@@ -986,9 +1123,9 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
const void* prefix, size_t prefixSize);
/*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+
- * Create an internal DDict from dict buffer,
- * to be used to decompress next frames.
- * The dictionary remains valid for all future frames, until explicitly invalidated.
+ * Create an internal DDict from dict buffer, to be used to decompress all future frames.
+ * The dictionary remains valid for all future frames, until explicitly invalidated, or
+ * a new dictionary is loaded.
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
* Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
* meaning "return to no-dictionary mode".
@@ -1012,9 +1149,10 @@ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, s
* The memory for the table is allocated on the first call to refDDict, and can be
* freed with ZSTD_freeDCtx().
*
+ * If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary
+ * will be managed, and referencing a dictionary effectively "discards" any previous one.
+ *
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
- * Note 1 : Currently, only one dictionary can be managed.
- * Referencing a new dictionary effectively "discards" any previous one.
* Special: referencing a NULL DDict means "return to no-dictionary mode".
* Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
*/
@@ -1051,6 +1189,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+
#endif /* ZSTD_H_235446 */
@@ -1066,29 +1205,12 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
#if !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
+
/* This can be overridden externally to hide static symbols. */
#ifndef ZSTDLIB_STATIC_API
#define ZSTDLIB_STATIC_API ZSTDLIB_VISIBLE
#endif
-/* Deprecation warnings :
- * Should these warnings be a problem, it is generally possible to disable them,
- * typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual.
- * Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS.
- */
-#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS
-# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API /* disable deprecation warnings */
-#else
-# if (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__)
-# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated(message)))
-# elif (__GNUC__ >= 3)
-# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated))
-# else
-# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler")
-# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API
-# endif
-#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */
-
/* **************************************************************************************
* experimental API (static linking only)
****************************************************************************************
@@ -1123,6 +1245,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
#define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */
#define ZSTD_STRATEGY_MIN ZSTD_fast
#define ZSTD_STRATEGY_MAX ZSTD_btultra2
+#define ZSTD_BLOCKSIZE_MAX_MIN (1 << 10) /* The minimum valid max blocksize. Maximum blocksizes smaller than this make compressBound() inaccurate. */
#define ZSTD_OVERLAPLOG_MIN 0
@@ -1146,7 +1269,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
#define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
/* Advanced parameter bounds */
-#define ZSTD_TARGETCBLOCKSIZE_MIN 64
+#define ZSTD_TARGETCBLOCKSIZE_MIN 1340 /* suitable to fit into an ethernet / wifi / 4G transport frame */
#define ZSTD_TARGETCBLOCKSIZE_MAX ZSTD_BLOCKSIZE_MAX
#define ZSTD_SRCSIZEHINT_MIN 0
#define ZSTD_SRCSIZEHINT_MAX INT_MAX
@@ -1188,7 +1311,7 @@ typedef struct {
*
* Note: This field is optional. ZSTD_generateSequences() will calculate the value of
* 'rep', but repeat offsets do not necessarily need to be calculated from an external
- * sequence provider's perspective. For example, ZSTD_compressSequences() does not
+ * sequence provider perspective. For example, ZSTD_compressSequences() does not
* use this 'rep' field at all (as of now).
*/
} ZSTD_Sequence;
@@ -1293,17 +1416,18 @@ typedef enum {
} ZSTD_literalCompressionMode_e;
typedef enum {
- /* Note: This enum controls features which are conditionally beneficial. Zstd typically will make a final
- * decision on whether or not to enable the feature (ZSTD_ps_auto), but setting the switch to ZSTD_ps_enable
- * or ZSTD_ps_disable allow for a force enable/disable the feature.
+ /* Note: This enum controls features which are conditionally beneficial.
+ * Zstd can take a decision on whether or not to enable the feature (ZSTD_ps_auto),
+ * but setting the switch to ZSTD_ps_enable or ZSTD_ps_disable force enable/disable the feature.
*/
ZSTD_ps_auto = 0, /* Let the library automatically determine whether the feature shall be enabled */
ZSTD_ps_enable = 1, /* Force-enable the feature */
ZSTD_ps_disable = 2 /* Do not use the feature */
-} ZSTD_paramSwitch_e;
+} ZSTD_ParamSwitch_e;
+#define ZSTD_paramSwitch_e ZSTD_ParamSwitch_e /* old name */
/* *************************************
-* Frame size functions
+* Frame header and size functions
***************************************/
/*! ZSTD_findDecompressedSize() :
@@ -1345,34 +1469,130 @@ ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize(const void* src,
ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
/*! ZSTD_frameHeaderSize() :
- * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
+ * srcSize must be large enough, aka >= ZSTD_FRAMEHEADERSIZE_PREFIX.
* @return : size of the Frame Header,
* or an error code (if srcSize is too small) */
ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
+typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_FrameType_e;
+#define ZSTD_frameType_e ZSTD_FrameType_e /* old name */
+typedef struct {
+ unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
+ unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
+ unsigned blockSizeMax;
+ ZSTD_FrameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
+ unsigned headerSize;
+ unsigned dictID; /* for ZSTD_skippableFrame, contains the skippable magic variant [0-15] */
+ unsigned checksumFlag;
+ unsigned _reserved1;
+ unsigned _reserved2;
+} ZSTD_FrameHeader;
+#define ZSTD_frameHeader ZSTD_FrameHeader /* old name */
+
+/*! ZSTD_getFrameHeader() :
+ * decode Frame Header into `zfhPtr`, or requires larger `srcSize`.
+ * @return : 0 => header is complete, `zfhPtr` is correctly filled,
+ * >0 => `srcSize` is too small, @return value is the wanted `srcSize` amount, `zfhPtr` is not filled,
+ * or an error code, which can be tested using ZSTD_isError() */
+ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize);
+/*! ZSTD_getFrameHeader_advanced() :
+ * same as ZSTD_getFrameHeader(),
+ * with added capability to select a format (like ZSTD_f_zstd1_magicless) */
+ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
+
+/*! ZSTD_decompressionMargin() :
+ * Zstd supports in-place decompression, where the input and output buffers overlap.
+ * In this case, the output buffer must be at least (Margin + Output_Size) bytes large,
+ * and the input buffer must be at the end of the output buffer.
+ *
+ * _______________________ Output Buffer ________________________
+ * | |
+ * | ____ Input Buffer ____|
+ * | | |
+ * v v v
+ * |---------------------------------------|-----------|----------|
+ * ^ ^ ^
+ * |___________________ Output_Size ___________________|_ Margin _|
+ *
+ * NOTE: See also ZSTD_DECOMPRESSION_MARGIN().
+ * NOTE: This applies only to single-pass decompression through ZSTD_decompress() or
+ * ZSTD_decompressDCtx().
+ * NOTE: This function supports multi-frame input.
+ *
+ * @param src The compressed frame(s)
+ * @param srcSize The size of the compressed frame(s)
+ * @returns The decompression margin or an error that can be checked with ZSTD_isError().
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_decompressionMargin(const void* src, size_t srcSize);
+
+/*! ZSTD_DECOMPRESS_MARGIN() :
+ * Similar to ZSTD_decompressionMargin(), but instead of computing the margin from
+ * the compressed frame, compute it from the original size and the blockSizeLog.
+ * See ZSTD_decompressionMargin() for details.
+ *
+ * WARNING: This macro does not support multi-frame input, the input must be a single
+ * zstd frame. If you need that support use the function, or implement it yourself.
+ *
+ * @param originalSize The original uncompressed size of the data.
+ * @param blockSize The block size == MIN(windowSize, ZSTD_BLOCKSIZE_MAX).
+ * Unless you explicitly set the windowLog smaller than
+ * ZSTD_BLOCKSIZELOG_MAX you can just use ZSTD_BLOCKSIZE_MAX.
+ */
+#define ZSTD_DECOMPRESSION_MARGIN(originalSize, blockSize) ((size_t)( \
+ ZSTD_FRAMEHEADERSIZE_MAX /* Frame header */ + \
+ 4 /* checksum */ + \
+ ((originalSize) == 0 ? 0 : 3 * (((originalSize) + (blockSize) - 1) / blockSize)) /* 3 bytes per block */ + \
+ (blockSize) /* One block of margin */ \
+ ))
+
typedef enum {
- ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
- ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */
-} ZSTD_sequenceFormat_e;
+ ZSTD_sf_noBlockDelimiters = 0, /* ZSTD_Sequence[] has no block delimiters, just sequences */
+ ZSTD_sf_explicitBlockDelimiters = 1 /* ZSTD_Sequence[] contains explicit block delimiters */
+} ZSTD_SequenceFormat_e;
+#define ZSTD_sequenceFormat_e ZSTD_SequenceFormat_e /* old name */
+
+/*! ZSTD_sequenceBound() :
+ * `srcSize` : size of the input buffer
+ * @return : upper-bound for the number of sequences that can be generated
+ * from a buffer of srcSize bytes
+ *
+ * note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence).
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound(size_t srcSize);
/*! ZSTD_generateSequences() :
- * Generate sequences using ZSTD_compress2, given a source buffer.
+ * WARNING: This function is meant for debugging and informational purposes ONLY!
+ * Its implementation is flawed, and it will be deleted in a future version.
+ * It is not guaranteed to succeed, as there are several cases where it will give
+ * up and fail. You should NOT use this function in production code.
+ *
+ * This function is deprecated, and will be removed in a future version.
+ *
+ * Generate sequences using ZSTD_compress2(), given a source buffer.
+ *
+ * @param zc The compression context to be used for ZSTD_compress2(). Set any
+ * compression parameters you need on this context.
+ * @param outSeqs The output sequences buffer of size @p outSeqsSize
+ * @param outSeqsCapacity The size of the output sequences buffer.
+ * ZSTD_sequenceBound(srcSize) is an upper bound on the number
+ * of sequences that can be generated.
+ * @param src The source buffer to generate sequences from of size @p srcSize.
+ * @param srcSize The size of the source buffer.
*
* Each block will end with a dummy sequence
* with offset == 0, matchLength == 0, and litLength == length of last literals.
* litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)
* simply acts as a block delimiter.
*
- * zc can be used to insert custom compression params.
- * This function invokes ZSTD_compress2
- *
- * The output of this function can be fed into ZSTD_compressSequences() with CCtx
- * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
- * @return : number of sequences generated
+ * @returns The number of sequences generated, necessarily less than
+ * ZSTD_sequenceBound(srcSize), or an error code that can be checked
+ * with ZSTD_isError().
*/
-
-ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
- size_t outSeqsSize, const void* src, size_t srcSize);
+ZSTD_DEPRECATED("For debugging only, will be replaced by ZSTD_extractSequences()")
+ZSTDLIB_STATIC_API size_t
+ZSTD_generateSequences(ZSTD_CCtx* zc,
+ ZSTD_Sequence* outSeqs, size_t outSeqsCapacity,
+ const void* src, size_t srcSize);
/*! ZSTD_mergeBlockDelimiters() :
* Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals
@@ -1388,8 +1608,10 @@ ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* o
ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
/*! ZSTD_compressSequences() :
- * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst.
- * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)
+ * Compress an array of ZSTD_Sequence, associated with @src buffer, into dst.
+ * @src contains the entire input (not just the literals).
+ * If @srcSize > sum(sequence.length), the remaining bytes are considered all literals
+ * If a dictionary is included, then the cctx should reference the dict (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.).
* The entire source is compressed into a single frame.
*
* The compression behavior changes based on cctx params. In particular:
@@ -1398,11 +1620,17 @@ ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, si
* the block size derived from the cctx, and sequences may be split. This is the default setting.
*
* If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain
- * block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
+ * valid block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
+ *
+ * When ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, it's possible to decide generating repcodes
+ * using the advanced parameter ZSTD_c_repcodeResolution. Repcodes will improve compression ratio, though the benefit
+ * can vary greatly depending on Sequences. On the other hand, repcode resolution is an expensive operation.
+ * By default, it's disabled at low (<10) compression levels, and enabled above the threshold (>=10).
+ * ZSTD_c_repcodeResolution makes it possible to directly manage this processing in either direction.
*
- * If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined
- * behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for
- * specifics regarding offset/matchlength requirements) then the function will bail out and return an error.
+ * If ZSTD_c_validateSequences == 0, this function blindly accepts the Sequences provided. Invalid Sequences cause undefined
+ * behavior. If ZSTD_c_validateSequences == 1, then the function will detect invalid Sequences (see doc/zstd_compression_format.md for
+ * specifics regarding offset/matchlength requirements) and then bail out and return an error.
*
* In addition to the two adjustable experimental params, there are other important cctx params.
* - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.
@@ -1410,14 +1638,42 @@ ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, si
* - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset
* is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md
*
- * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.
- * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,
- * and cannot emit an RLE block that disagrees with the repcode history
- * @return : final compressed size or a ZSTD error.
- */
-ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize,
- const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
- const void* src, size_t srcSize);
+ * Note: Repcodes are, as of now, always re-calculated within this function, ZSTD_Sequence.rep is effectively unused.
+ * Dev Note: Once ability to ingest repcodes become available, the explicit block delims mode must respect those repcodes exactly,
+ * and cannot emit an RLE block that disagrees with the repcode history.
+ * @return : final compressed size, or a ZSTD error code.
+ */
+ZSTDLIB_STATIC_API size_t
+ZSTD_compressSequences(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize);
+
+
+/*! ZSTD_compressSequencesAndLiterals() :
+ * This is a variant of ZSTD_compressSequences() which,
+ * instead of receiving (src,srcSize) as input parameter, receives (literals,litSize),
+ * aka all the literals, already extracted and laid out into a single continuous buffer.
+ * This can be useful if the process generating the sequences also happens to generate the buffer of literals,
+ * thus skipping an extraction + caching stage.
+ * It's a speed optimization, useful when the right conditions are met,
+ * but it also features the following limitations:
+ * - Only supports explicit delimiter mode
+ * - Currently does not support Sequences validation (so input Sequences are trusted)
+ * - Not compatible with frame checksum, which must be disabled
+ * - If any block is incompressible, will fail and return an error
+ * - @litSize must be == sum of all @.litLength fields in @inSeqs. Any discrepancy will generate an error.
+ * - @litBufCapacity is the size of the underlying buffer into which literals are written, starting at address @literals.
+ * @litBufCapacity must be at least 8 bytes larger than @litSize.
+ * - @decompressedSize must be correct, and correspond to the sum of all Sequences. Any discrepancy will generate an error.
+ * @return : final compressed size, or a ZSTD error code.
+ */
+ZSTDLIB_STATIC_API size_t
+ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t nbSequences,
+ const void* literals, size_t litSize, size_t litBufCapacity,
+ size_t decompressedSize);
/*! ZSTD_writeSkippableFrame() :
@@ -1425,8 +1681,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* ds
*
* Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number,
* ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.
- * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so
- * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
+ * As such, the parameter magicVariant controls the exact skippable frame magic number variant used,
+ * so the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
*
* Returns an error if destination buffer is not large enough, if the source size is not representable
* with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).
@@ -1434,26 +1690,28 @@ ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* ds
* @return : number of bytes written or a ZSTD error.
*/
ZSTDLIB_STATIC_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
- const void* src, size_t srcSize, unsigned magicVariant);
+ const void* src, size_t srcSize,
+ unsigned magicVariant);
/*! ZSTD_readSkippableFrame() :
- * Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer.
+ * Retrieves the content of a zstd skippable frame starting at @src, and writes it to @dst buffer.
*
- * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written,
- * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested
- * in the magicVariant.
+ * The parameter @magicVariant will receive the magicVariant that was supplied when the frame was written,
+ * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START.
+ * This can be NULL if the caller is not interested in the magicVariant.
*
* Returns an error if destination buffer is not large enough, or if the frame is not skippable.
*
* @return : number of bytes written or a ZSTD error.
*/
-ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant,
- const void* src, size_t srcSize);
+ZSTDLIB_STATIC_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity,
+ unsigned* magicVariant,
+ const void* src, size_t srcSize);
/*! ZSTD_isSkippableFrame() :
* Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame.
*/
-ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
+ZSTDLIB_STATIC_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
@@ -1464,48 +1722,59 @@ ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
/*! ZSTD_estimate*() :
* These functions make it possible to estimate memory usage
* of a future {D,C}Ctx, before its creation.
+ * This is useful in combination with ZSTD_initStatic(),
+ * which makes it possible to employ a static buffer for ZSTD_CCtx* state.
*
* ZSTD_estimateCCtxSize() will provide a memory budget large enough
- * for any compression level up to selected one.
- * Note : Unlike ZSTD_estimateCStreamSize*(), this estimate
- * does not include space for a window buffer.
- * Therefore, the estimation is only guaranteed for single-shot compressions, not streaming.
+ * to compress data of any size using one-shot compression ZSTD_compressCCtx() or ZSTD_compress2()
+ * associated with any compression level up to max specified one.
* The estimate will assume the input may be arbitrarily large,
* which is the worst case.
*
+ * Note that the size estimation is specific for one-shot compression,
+ * it is not valid for streaming (see ZSTD_estimateCStreamSize*())
+ * nor other potential ways of using a ZSTD_CCtx* state.
+ *
* When srcSize can be bound by a known and rather "small" value,
- * this fact can be used to provide a tighter estimation
- * because the CCtx compression context will need less memory.
- * This tighter estimation can be provided by more advanced functions
+ * this knowledge can be used to provide a tighter budget estimation
+ * because the ZSTD_CCtx* state will need less memory for small inputs.
+ * This tighter estimation can be provided by employing more advanced functions
* ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),
* and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().
* Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.
*
- * Note 2 : only single-threaded compression is supported.
+ * Note : only single-threaded compression is supported.
* ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
*/
-ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int maxCompressionLevel);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void);
/*! ZSTD_estimateCStreamSize() :
- * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
- * It will also consider src size to be arbitrarily "large", which is worst case.
+ * ZSTD_estimateCStreamSize() will provide a memory budget large enough for streaming compression
+ * using any compression level up to the max specified one.
+ * It will also consider src size to be arbitrarily "large", which is a worst case scenario.
* If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
* ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
* ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
* Note : CStream size estimation is only correct for single-threaded compression.
- * ZSTD_DStream memory budget depends on window Size.
+ * ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
+ * Note 2 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time.
+ * Size estimates assume that no external sequence producer is registered.
+ *
+ * ZSTD_DStream memory budget depends on frame's window Size.
* This information can be passed manually, using ZSTD_estimateDStreamSize,
* or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
+ * Any frame requesting a window size larger than max specified one will be rejected.
* Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
* an internal ?Dict will be created, which additional size is not estimated here.
- * In this case, get total size by adding ZSTD_estimate?DictSize */
-ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
+ * In this case, get total size by adding ZSTD_estimate?DictSize
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int maxCompressionLevel);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
-ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
+ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t maxWindowSize);
ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
/*! ZSTD_estimate?DictSize() :
@@ -1568,7 +1837,15 @@ typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
static
__attribute__((__unused__))
+
+#if defined(__clang__) && __clang_major__ >= 5
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
+#endif
ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /*< this constant defers to stdlib's functions */
+#if defined(__clang__) && __clang_major__ >= 5
+#pragma clang diagnostic pop
+#endif
ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
@@ -1649,22 +1926,45 @@ ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
* This function never fails (wide contract) */
ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
+/*! ZSTD_CCtx_setCParams() :
+ * Set all parameters provided within @p cparams into the working @p cctx.
+ * Note : if modifying parameters during compression (MT mode only),
+ * note that changes to the .windowLog parameter will be ignored.
+ * @return 0 on success, or an error code (can be checked with ZSTD_isError()).
+ * On failure, no parameters are updated.
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams);
+
+/*! ZSTD_CCtx_setFParams() :
+ * Set all parameters provided within @p fparams into the working @p cctx.
+ * @return 0 on success, or an error code (can be checked with ZSTD_isError()).
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams);
+
+/*! ZSTD_CCtx_setParams() :
+ * Set all parameters provided within @p params into the working @p cctx.
+ * @return 0 on success, or an error code (can be checked with ZSTD_isError()).
+ */
+ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params);
+
/*! ZSTD_compress_advanced() :
* Note : this function is now DEPRECATED.
* It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
* This prototype will generate compilation warnings. */
ZSTD_DEPRECATED("use ZSTD_compress2")
+ZSTDLIB_STATIC_API
size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
- void* dst, size_t dstCapacity,
- const void* src, size_t srcSize,
- const void* dict,size_t dictSize,
- ZSTD_parameters params);
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_parameters params);
/*! ZSTD_compress_usingCDict_advanced() :
* Note : this function is now DEPRECATED.
* It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
* This prototype will generate compilation warnings. */
ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary")
+ZSTDLIB_STATIC_API
size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
@@ -1725,7 +2025,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
* See the comments on that enum for an explanation of the feature. */
#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
-/* Controlled with ZSTD_paramSwitch_e enum.
+/* Controlled with ZSTD_ParamSwitch_e enum.
* Default is ZSTD_ps_auto.
* Set to ZSTD_ps_disable to never compress literals.
* Set to ZSTD_ps_enable to always compress literals. (Note: uncompressed literals
@@ -1737,11 +2037,6 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
*/
#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
-/* Tries to fit compressed block size to be around targetCBlockSize.
- * No target when targetCBlockSize == 0.
- * There is no guarantee on compressed block size (default:0) */
-#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
-
/* User's best guess of source size.
* Hint is not valid when srcSizeHint == 0.
* There is no guarantee that hint is close to actual source size,
@@ -1808,13 +2103,16 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
* Experimental parameter.
* Default is 0 == disabled. Set to 1 to enable.
*
- * Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same
- * between calls, except for the modifications that zstd makes to pos (the
- * caller must not modify pos). This is checked by the compressor, and
- * compression will fail if it ever changes. This means the only flush
- * mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end
- * is not used. The data in the ZSTD_inBuffer in the range [src, src + pos)
- * MUST not be modified during compression or you will get data corruption.
+ * Tells the compressor that input data presented with ZSTD_inBuffer
+ * will ALWAYS be the same between calls.
+ * Technically, the @src pointer must never be changed,
+ * and the @pos field can only be updated by zstd.
+ * However, it's possible to increase the @size field,
+ * allowing scenarios where more data can be appended after compressions starts.
+ * These conditions are checked by the compressor,
+ * and compression will fail if they are not respected.
+ * Also, data in the ZSTD_inBuffer within the range [src, src + pos)
+ * MUST not be modified during compression or it will result in data corruption.
*
* When this flag is enabled zstd won't allocate an input window buffer,
* because the user guarantees it can reference the ZSTD_inBuffer until
@@ -1822,18 +2120,15 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
* large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also
* avoid the memcpy() from the input buffer to the input window buffer.
*
- * NOTE: ZSTD_compressStream2() will error if ZSTD_e_end is not used.
- * That means this flag cannot be used with ZSTD_compressStream().
- *
* NOTE: So long as the ZSTD_inBuffer always points to valid memory, using
* this flag is ALWAYS memory safe, and will never access out-of-bounds
- * memory. However, compression WILL fail if you violate the preconditions.
+ * memory. However, compression WILL fail if conditions are not respected.
*
- * WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST
- * not be modified during compression or you will get data corruption. This
- * is because zstd needs to reference data in the ZSTD_inBuffer to find
+ * WARNING: The data in the ZSTD_inBuffer in the range [src, src + pos) MUST
+ * not be modified during compression or it will result in data corruption.
+ * This is because zstd needs to reference data in the ZSTD_inBuffer to find
* matches. Normally zstd maintains its own window buffer for this purpose,
- * but passing this flag tells zstd to use the user provided buffer.
+ * but passing this flag tells zstd to rely on user provided buffer instead.
*/
#define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9
@@ -1871,22 +2166,46 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
/* ZSTD_c_validateSequences
* Default is 0 == disabled. Set to 1 to enable sequence validation.
*
- * For use with sequence compression API: ZSTD_compressSequences().
- * Designates whether or not we validate sequences provided to ZSTD_compressSequences()
+ * For use with sequence compression API: ZSTD_compressSequences*().
+ * Designates whether or not provided sequences are validated within ZSTD_compressSequences*()
* during function execution.
*
- * Without validation, providing a sequence that does not conform to the zstd spec will cause
- * undefined behavior, and may produce a corrupted block.
+ * When Sequence validation is disabled (default), Sequences are compressed as-is,
+ * so they must correct, otherwise it would result in a corruption error.
*
- * With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for
+ * Sequence validation adds some protection, by ensuring that all values respect boundary conditions.
+ * If a Sequence is detected invalid (see doc/zstd_compression_format.md for
* specifics regarding offset/matchlength requirements) then the function will bail out and
* return an error.
- *
*/
#define ZSTD_c_validateSequences ZSTD_c_experimentalParam12
-/* ZSTD_c_useBlockSplitter
- * Controlled with ZSTD_paramSwitch_e enum.
+/* ZSTD_c_blockSplitterLevel
+ * note: this parameter only influences the first splitter stage,
+ * which is active before producing the sequences.
+ * ZSTD_c_splitAfterSequences controls the next splitter stage,
+ * which is active after sequence production.
+ * Note that both can be combined.
+ * Allowed values are between 0 and ZSTD_BLOCKSPLITTER_LEVEL_MAX included.
+ * 0 means "auto", which will select a value depending on current ZSTD_c_strategy.
+ * 1 means no splitting.
+ * Then, values from 2 to 6 are sorted in increasing cpu load order.
+ *
+ * Note that currently the first block is never split,
+ * to ensure expansion guarantees in presence of incompressible data.
+ */
+#define ZSTD_BLOCKSPLITTER_LEVEL_MAX 6
+#define ZSTD_c_blockSplitterLevel ZSTD_c_experimentalParam20
+
+/* ZSTD_c_splitAfterSequences
+ * This is a stronger splitter algorithm,
+ * based on actual sequences previously produced by the selected parser.
+ * It's also slower, and as a consequence, mostly used for high compression levels.
+ * While the post-splitter does overlap with the pre-splitter,
+ * both can nonetheless be combined,
+ * notably with ZSTD_c_blockSplitterLevel at ZSTD_BLOCKSPLITTER_LEVEL_MAX,
+ * resulting in higher compression ratio than just one of them.
+ *
* Default is ZSTD_ps_auto.
* Set to ZSTD_ps_disable to never use block splitter.
* Set to ZSTD_ps_enable to always use block splitter.
@@ -1894,10 +2213,10 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
* By default, in ZSTD_ps_auto, the library will decide at runtime whether to use
* block splitting based on the compression parameters.
*/
-#define ZSTD_c_useBlockSplitter ZSTD_c_experimentalParam13
+#define ZSTD_c_splitAfterSequences ZSTD_c_experimentalParam13
/* ZSTD_c_useRowMatchFinder
- * Controlled with ZSTD_paramSwitch_e enum.
+ * Controlled with ZSTD_ParamSwitch_e enum.
* Default is ZSTD_ps_auto.
* Set to ZSTD_ps_disable to never use row-based matchfinder.
* Set to ZSTD_ps_enable to force usage of row-based matchfinder.
@@ -1928,6 +2247,80 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
*/
#define ZSTD_c_deterministicRefPrefix ZSTD_c_experimentalParam15
+/* ZSTD_c_prefetchCDictTables
+ * Controlled with ZSTD_ParamSwitch_e enum. Default is ZSTD_ps_auto.
+ *
+ * In some situations, zstd uses CDict tables in-place rather than copying them
+ * into the working context. (See docs on ZSTD_dictAttachPref_e above for details).
+ * In such situations, compression speed is seriously impacted when CDict tables are
+ * "cold" (outside CPU cache). This parameter instructs zstd to prefetch CDict tables
+ * when they are used in-place.
+ *
+ * For sufficiently small inputs, the cost of the prefetch will outweigh the benefit.
+ * For sufficiently large inputs, zstd will by default memcpy() CDict tables
+ * into the working context, so there is no need to prefetch. This parameter is
+ * targeted at a middle range of input sizes, where a prefetch is cheap enough to be
+ * useful but memcpy() is too expensive. The exact range of input sizes where this
+ * makes sense is best determined by careful experimentation.
+ *
+ * Note: for this parameter, ZSTD_ps_auto is currently equivalent to ZSTD_ps_disable,
+ * but in the future zstd may conditionally enable this feature via an auto-detection
+ * heuristic for cold CDicts.
+ * Use ZSTD_ps_disable to opt out of prefetching under any circumstances.
+ */
+#define ZSTD_c_prefetchCDictTables ZSTD_c_experimentalParam16
+
+/* ZSTD_c_enableSeqProducerFallback
+ * Allowed values are 0 (disable) and 1 (enable). The default setting is 0.
+ *
+ * Controls whether zstd will fall back to an internal sequence producer if an
+ * external sequence producer is registered and returns an error code. This fallback
+ * is block-by-block: the internal sequence producer will only be called for blocks
+ * where the external sequence producer returns an error code. Fallback parsing will
+ * follow any other cParam settings, such as compression level, the same as in a
+ * normal (fully-internal) compression operation.
+ *
+ * The user is strongly encouraged to read the full Block-Level Sequence Producer API
+ * documentation (below) before setting this parameter. */
+#define ZSTD_c_enableSeqProducerFallback ZSTD_c_experimentalParam17
+
+/* ZSTD_c_maxBlockSize
+ * Allowed values are between 1KB and ZSTD_BLOCKSIZE_MAX (128KB).
+ * The default is ZSTD_BLOCKSIZE_MAX, and setting to 0 will set to the default.
+ *
+ * This parameter can be used to set an upper bound on the blocksize
+ * that overrides the default ZSTD_BLOCKSIZE_MAX. It cannot be used to set upper
+ * bounds greater than ZSTD_BLOCKSIZE_MAX or bounds lower than 1KB (will make
+ * compressBound() inaccurate). Only currently meant to be used for testing.
+ */
+#define ZSTD_c_maxBlockSize ZSTD_c_experimentalParam18
+
+/* ZSTD_c_repcodeResolution
+ * This parameter only has an effect if ZSTD_c_blockDelimiters is
+ * set to ZSTD_sf_explicitBlockDelimiters (may change in the future).
+ *
+ * This parameter affects how zstd parses external sequences,
+ * provided via the ZSTD_compressSequences*() API
+ * or from an external block-level sequence producer.
+ *
+ * If set to ZSTD_ps_enable, the library will check for repeated offsets within
+ * external sequences, even if those repcodes are not explicitly indicated in
+ * the "rep" field. Note that this is the only way to exploit repcode matches
+ * while using compressSequences*() or an external sequence producer, since zstd
+ * currently ignores the "rep" field of external sequences.
+ *
+ * If set to ZSTD_ps_disable, the library will not exploit repeated offsets in
+ * external sequences, regardless of whether the "rep" field has been set. This
+ * reduces sequence compression overhead by about 25% while sacrificing some
+ * compression ratio.
+ *
+ * The default value is ZSTD_ps_auto, for which the library will enable/disable
+ * based on compression level (currently: level<10 disables, level>=10 enables).
+ */
+#define ZSTD_c_repcodeResolution ZSTD_c_experimentalParam19
+#define ZSTD_c_searchForExternalRepcodes ZSTD_c_experimentalParam19 /* older name */
+
+
/*! ZSTD_CCtx_getParameter() :
* Get the requested compression parameter value, selected by enum ZSTD_cParameter,
* and store it into int* value.
@@ -2084,7 +2477,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete
* in the range [dst, dst + pos) MUST not be modified during decompression
* or you will get data corruption.
*
- * When this flags is enabled zstd won't allocate an output buffer, because
+ * When this flag is enabled zstd won't allocate an output buffer, because
* it can write directly to the ZSTD_outBuffer, but it will still allocate
* an input buffer large enough to fit any compressed block. This will also
* avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer.
@@ -2137,6 +2530,33 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete
*/
#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4
+/* ZSTD_d_disableHuffmanAssembly
+ * Set to 1 to disable the Huffman assembly implementation.
+ * The default value is 0, which allows zstd to use the Huffman assembly
+ * implementation if available.
+ *
+ * This parameter can be used to disable Huffman assembly at runtime.
+ * If you want to disable it at compile time you can define the macro
+ * ZSTD_DISABLE_ASM.
+ */
+#define ZSTD_d_disableHuffmanAssembly ZSTD_d_experimentalParam5
+
+/* ZSTD_d_maxBlockSize
+ * Allowed values are between 1KB and ZSTD_BLOCKSIZE_MAX (128KB).
+ * The default is ZSTD_BLOCKSIZE_MAX, and setting to 0 will set to the default.
+ *
+ * Forces the decompressor to reject blocks whose content size is
+ * larger than the configured maxBlockSize. When maxBlockSize is
+ * larger than the windowSize, the windowSize is used instead.
+ * This saves memory on the decoder when you know all blocks are small.
+ *
+ * This option is typically used in conjunction with ZSTD_c_maxBlockSize.
+ *
+ * WARNING: This causes the decoder to reject otherwise valid frames
+ * that have block sizes larger than the configured maxBlockSize.
+ */
+#define ZSTD_d_maxBlockSize ZSTD_d_experimentalParam6
+
/*! ZSTD_DCtx_setFormat() :
* This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter().
@@ -2145,6 +2565,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete
* such ZSTD_f_zstd1_magicless for example.
* @return : 0, or an error code (which can be tested using ZSTD_isError()). */
ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead")
+ZSTDLIB_STATIC_API
size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
/*! ZSTD_decompressStream_simpleArgs() :
@@ -2181,6 +2602,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs (
* This prototype will generate compilation warnings.
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
int compressionLevel,
unsigned long long pledgedSrcSize);
@@ -2198,17 +2620,15 @@ size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
* This prototype will generate compilation warnings.
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
int compressionLevel);
/*! ZSTD_initCStream_advanced() :
- * This function is DEPRECATED, and is approximately equivalent to:
+ * This function is DEPRECATED, and is equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- * // Pseudocode: Set each zstd parameter and leave the rest as-is.
- * for ((param, value) : params) {
- * ZSTD_CCtx_setParameter(zcs, param, value);
- * }
+ * ZSTD_CCtx_setParams(zcs, params);
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
* ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
*
@@ -2218,6 +2638,7 @@ size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
* This prototype will generate compilation warnings.
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
const void* dict, size_t dictSize,
ZSTD_parameters params,
@@ -2232,15 +2653,13 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
* This prototype will generate compilation warnings.
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
/*! ZSTD_initCStream_usingCDict_advanced() :
- * This function is DEPRECATED, and is approximately equivalent to:
+ * This function is DEPRECATED, and is equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
- * // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
- * for ((fParam, value) : fParams) {
- * ZSTD_CCtx_setParameter(zcs, fParam, value);
- * }
+ * ZSTD_CCtx_setFParams(zcs, fParams);
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
* ZSTD_CCtx_refCDict(zcs, cdict);
*
@@ -2250,6 +2669,7 @@ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
* This prototype will generate compilation warnings.
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
const ZSTD_CDict* cdict,
ZSTD_frameParameters fParams,
@@ -2264,7 +2684,7 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
* explicitly specified.
*
* start a new frame, using same parameters from previous frame.
- * This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
+ * This is typically useful to skip dictionary loading stage, since it will reuse it in-place.
* Note that zcs must be init at least once before using ZSTD_resetCStream().
* If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
* If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
@@ -2274,6 +2694,7 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
* This prototype will generate compilation warnings.
*/
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
+ZSTDLIB_STATIC_API
size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
@@ -2319,8 +2740,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
* ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
*
* note: no dictionary will be used if dict == NULL or dictSize < 8
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
+ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_loadDictionary, see zstd.h for detailed instructions")
ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
/*!
@@ -2330,8 +2751,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const vo
* ZSTD_DCtx_refDDict(zds, ddict);
*
* note : ddict is referenced, it must outlive decompression session
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
*/
+ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_refDDict, see zstd.h for detailed instructions")
ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
/*!
@@ -2339,18 +2760,202 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const Z
*
* ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
*
- * re-use decompression parameters from previous init; saves dictionary loading
- * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
+ * reuse decompression parameters from previous init; saves dictionary loading
*/
+ZSTD_DEPRECATED("use ZSTD_DCtx_reset, see zstd.h for detailed instructions")
ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
+/* ********************* BLOCK-LEVEL SEQUENCE PRODUCER API *********************
+ *
+ * *** OVERVIEW ***
+ * The Block-Level Sequence Producer API allows users to provide their own custom
+ * sequence producer which libzstd invokes to process each block. The produced list
+ * of sequences (literals and matches) is then post-processed by libzstd to produce
+ * valid compressed blocks.
+ *
+ * This block-level offload API is a more granular complement of the existing
+ * frame-level offload API compressSequences() (introduced in v1.5.1). It offers
+ * an easier migration story for applications already integrated with libzstd: the
+ * user application continues to invoke the same compression functions
+ * ZSTD_compress2() or ZSTD_compressStream2() as usual, and transparently benefits
+ * from the specific advantages of the external sequence producer. For example,
+ * the sequence producer could be tuned to take advantage of known characteristics
+ * of the input, to offer better speed / ratio, or could leverage hardware
+ * acceleration not available within libzstd itself.
+ *
+ * See contrib/externalSequenceProducer for an example program employing the
+ * Block-Level Sequence Producer API.
+ *
+ * *** USAGE ***
+ * The user is responsible for implementing a function of type
+ * ZSTD_sequenceProducer_F. For each block, zstd will pass the following
+ * arguments to the user-provided function:
+ *
+ * - sequenceProducerState: a pointer to a user-managed state for the sequence
+ * producer.
+ *
+ * - outSeqs, outSeqsCapacity: an output buffer for the sequence producer.
+ * outSeqsCapacity is guaranteed >= ZSTD_sequenceBound(srcSize). The memory
+ * backing outSeqs is managed by the CCtx.
+ *
+ * - src, srcSize: an input buffer for the sequence producer to parse.
+ * srcSize is guaranteed to be <= ZSTD_BLOCKSIZE_MAX.
+ *
+ * - dict, dictSize: a history buffer, which may be empty, which the sequence
+ * producer may reference as it parses the src buffer. Currently, zstd will
+ * always pass dictSize == 0 into external sequence producers, but this will
+ * change in the future.
+ *
+ * - compressionLevel: a signed integer representing the zstd compression level
+ * set by the user for the current operation. The sequence producer may choose
+ * to use this information to change its compression strategy and speed/ratio
+ * tradeoff. Note: the compression level does not reflect zstd parameters set
+ * through the advanced API.
+ *
+ * - windowSize: a size_t representing the maximum allowed offset for external
+ * sequences. Note that sequence offsets are sometimes allowed to exceed the
+ * windowSize if a dictionary is present, see doc/zstd_compression_format.md
+ * for details.
+ *
+ * The user-provided function shall return a size_t representing the number of
+ * sequences written to outSeqs. This return value will be treated as an error
+ * code if it is greater than outSeqsCapacity. The return value must be non-zero
+ * if srcSize is non-zero. The ZSTD_SEQUENCE_PRODUCER_ERROR macro is provided
+ * for convenience, but any value greater than outSeqsCapacity will be treated as
+ * an error code.
+ *
+ * If the user-provided function does not return an error code, the sequences
+ * written to outSeqs must be a valid parse of the src buffer. Data corruption may
+ * occur if the parse is not valid. A parse is defined to be valid if the
+ * following conditions hold:
+ * - The sum of matchLengths and literalLengths must equal srcSize.
+ * - All sequences in the parse, except for the final sequence, must have
+ * matchLength >= ZSTD_MINMATCH_MIN. The final sequence must have
+ * matchLength >= ZSTD_MINMATCH_MIN or matchLength == 0.
+ * - All offsets must respect the windowSize parameter as specified in
+ * doc/zstd_compression_format.md.
+ * - If the final sequence has matchLength == 0, it must also have offset == 0.
+ *
+ * zstd will only validate these conditions (and fail compression if they do not
+ * hold) if the ZSTD_c_validateSequences cParam is enabled. Note that sequence
+ * validation has a performance cost.
+ *
+ * If the user-provided function returns an error, zstd will either fall back
+ * to an internal sequence producer or fail the compression operation. The user can
+ * choose between the two behaviors by setting the ZSTD_c_enableSeqProducerFallback
+ * cParam. Fallback compression will follow any other cParam settings, such as
+ * compression level, the same as in a normal compression operation.
+ *
+ * The user shall instruct zstd to use a particular ZSTD_sequenceProducer_F
+ * function by calling
+ * ZSTD_registerSequenceProducer(cctx,
+ * sequenceProducerState,
+ * sequenceProducer)
+ * This setting will persist until the next parameter reset of the CCtx.
+ *
+ * The sequenceProducerState must be initialized by the user before calling
+ * ZSTD_registerSequenceProducer(). The user is responsible for destroying the
+ * sequenceProducerState.
+ *
+ * *** LIMITATIONS ***
+ * This API is compatible with all zstd compression APIs which respect advanced parameters.
+ * However, there are three limitations:
+ *
+ * First, the ZSTD_c_enableLongDistanceMatching cParam is not currently supported.
+ * COMPRESSION WILL FAIL if it is enabled and the user tries to compress with a block-level
+ * external sequence producer.
+ * - Note that ZSTD_c_enableLongDistanceMatching is auto-enabled by default in some
+ * cases (see its documentation for details). Users must explicitly set
+ * ZSTD_c_enableLongDistanceMatching to ZSTD_ps_disable in such cases if an external
+ * sequence producer is registered.
+ * - As of this writing, ZSTD_c_enableLongDistanceMatching is disabled by default
+ * whenever ZSTD_c_windowLog < 128MB, but that's subject to change. Users should
+ * check the docs on ZSTD_c_enableLongDistanceMatching whenever the Block-Level Sequence
+ * Producer API is used in conjunction with advanced settings (like ZSTD_c_windowLog).
+ *
+ * Second, history buffers are not currently supported. Concretely, zstd will always pass
+ * dictSize == 0 to the external sequence producer (for now). This has two implications:
+ * - Dictionaries are not currently supported. Compression will *not* fail if the user
+ * references a dictionary, but the dictionary won't have any effect.
+ * - Stream history is not currently supported. All advanced compression APIs, including
+ * streaming APIs, work with external sequence producers, but each block is treated as
+ * an independent chunk without history from previous blocks.
+ *
+ * Third, multi-threading within a single compression is not currently supported. In other words,
+ * COMPRESSION WILL FAIL if ZSTD_c_nbWorkers > 0 and an external sequence producer is registered.
+ * Multi-threading across compressions is fine: simply create one CCtx per thread.
+ *
+ * Long-term, we plan to overcome all three limitations. There is no technical blocker to
+ * overcoming them. It is purely a question of engineering effort.
+ */
+
+#define ZSTD_SEQUENCE_PRODUCER_ERROR ((size_t)(-1))
+
+typedef size_t (*ZSTD_sequenceProducer_F) (
+ void* sequenceProducerState,
+ ZSTD_Sequence* outSeqs, size_t outSeqsCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize,
+ int compressionLevel,
+ size_t windowSize
+);
+
+/*! ZSTD_registerSequenceProducer() :
+ * Instruct zstd to use a block-level external sequence producer function.
+ *
+ * The sequenceProducerState must be initialized by the caller, and the caller is
+ * responsible for managing its lifetime. This parameter is sticky across
+ * compressions. It will remain set until the user explicitly resets compression
+ * parameters.
+ *
+ * Sequence producer registration is considered to be an "advanced parameter",
+ * part of the "advanced API". This means it will only have an effect on compression
+ * APIs which respect advanced parameters, such as compress2() and compressStream2().
+ * Older compression APIs such as compressCCtx(), which predate the introduction of
+ * "advanced parameters", will ignore any external sequence producer setting.
+ *
+ * The sequence producer can be "cleared" by registering a NULL function pointer. This
+ * removes all limitations described above in the "LIMITATIONS" section of the API docs.
+ *
+ * The user is strongly encouraged to read the full API documentation (above) before
+ * calling this function. */
+ZSTDLIB_STATIC_API void
+ZSTD_registerSequenceProducer(
+ ZSTD_CCtx* cctx,
+ void* sequenceProducerState,
+ ZSTD_sequenceProducer_F sequenceProducer
+);
+
+/*! ZSTD_CCtxParams_registerSequenceProducer() :
+ * Same as ZSTD_registerSequenceProducer(), but operates on ZSTD_CCtx_params.
+ * This is used for accurate size estimation with ZSTD_estimateCCtxSize_usingCCtxParams(),
+ * which is needed when creating a ZSTD_CCtx with ZSTD_initStaticCCtx().
+ *
+ * If you are using the external sequence producer API in a scenario where ZSTD_initStaticCCtx()
+ * is required, then this function is for you. Otherwise, you probably don't need it.
+ *
+ * See tests/zstreamtest.c for example usage. */
+ZSTDLIB_STATIC_API void
+ZSTD_CCtxParams_registerSequenceProducer(
+ ZSTD_CCtx_params* params,
+ void* sequenceProducerState,
+ ZSTD_sequenceProducer_F sequenceProducer
+);
+
+
/* *******************************************************************
-* Buffer-less and synchronous inner streaming functions
+* Buffer-less and synchronous inner streaming functions (DEPRECATED)
+*
+* This API is deprecated, and will be removed in a future version.
+* It allows streaming (de)compression with user allocated buffers.
+* However, it is hard to use, and not as well tested as the rest of
+* our API.
*
-* This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
-* But it's also a complex one, with several restrictions, documented below.
-* Prefer normal streaming API for an easier experience.
+* Please use the normal streaming API instead: ZSTD_compressStream2,
+* and ZSTD_decompressStream.
+* If there is functionality that you need, but it doesn't provide,
+* please open an issue on our GitHub.
********************************************************************* */
/*
@@ -2358,11 +2963,10 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
A ZSTD_CCtx object is required to track streaming operations.
Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
- ZSTD_CCtx object can be re-used multiple times within successive compression operations.
+ ZSTD_CCtx object can be reused multiple times within successive compression operations.
Start by initializing a context.
Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.
- It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
Then, consume your input using ZSTD_compressContinue().
There are some important considerations to keep in mind when using this advanced function :
@@ -2380,39 +2984,49 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
- `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
+ `ZSTD_CCtx` object can be reused (ZSTD_compressBegin()) to compress again.
*/
/*===== Buffer-less streaming compression functions =====*/
+ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
+ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
+ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /*< note: fails if cdict==NULL */
-ZSTDLIB_STATIC_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /*< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTD_DEPRECATED("This function will likely be removed in a future release. It is misleading and has very limited utility.")
+ZSTDLIB_STATIC_API
+size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /*< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
+
+ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
/* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */
ZSTD_DEPRECATED("use advanced API to access custom parameters")
+ZSTDLIB_STATIC_API
size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /*< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
ZSTD_DEPRECATED("use advanced API to access custom parameters")
+ZSTDLIB_STATIC_API
size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
/*
Buffer-less streaming decompression (synchronous mode)
A ZSTD_DCtx object is required to track streaming operations.
Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
- A ZSTD_DCtx object can be re-used multiple times.
+ A ZSTD_DCtx object can be reused multiple times.
First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
Data fragment must be large enough to ensure successful decoding.
`ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
- @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
- >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
+ result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
+ >0 : `srcSize` is too small, please provide at least result bytes on next attempt.
errorCode, which can be tested using ZSTD_isError().
- It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
+ It fills a ZSTD_FrameHeader structure with important information to correctly decode the frame,
such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
As a consequence, check that values remain within valid application range.
@@ -2428,7 +3042,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
The most memory efficient way is to use a round buffer of sufficient size.
Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
- which can @return an error code if required value is too large for current system (in 32-bits mode).
+ which can return an error code if required value is too large for current system (in 32-bits mode).
In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
@@ -2448,7 +3062,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
- @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+ result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
It can also be an error code, which can be tested with ZSTD_isError().
@@ -2471,27 +3085,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
*/
/*===== Buffer-less streaming decompression functions =====*/
-typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
-typedef struct {
- unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
- unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
- unsigned blockSizeMax;
- ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
- unsigned headerSize;
- unsigned dictID;
- unsigned checksumFlag;
-} ZSTD_frameHeader;
-/*! ZSTD_getFrameHeader() :
- * decode Frame Header, or requires larger `srcSize`.
- * @return : 0, `zfhPtr` is correctly filled,
- * >0, `srcSize` is too small, value is wanted `srcSize` amount,
- * or an error code, which can be tested using ZSTD_isError() */
-ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /*< doesn't consume input */
-/*! ZSTD_getFrameHeader_advanced() :
- * same as ZSTD_getFrameHeader(),
- * with added capability to select a format (like ZSTD_f_zstd1_magicless) */
-ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /*< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
@@ -2502,6 +3096,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
/* misc */
+ZSTD_DEPRECATED("This function will likely be removed in the next minor release. It is misleading and has very limited utility.")
ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
@@ -2509,11 +3104,23 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
-/* ============================ */
-/* Block level API */
-/* ============================ */
+/* ========================================= */
+/* Block level API (DEPRECATED) */
+/* ========================================= */
/*!
+
+ This API is deprecated in favor of the regular compression API.
+ You can get the frame header down to 2 bytes by setting:
+ - ZSTD_c_format = ZSTD_f_zstd1_magicless
+ - ZSTD_c_contentSizeFlag = 0
+ - ZSTD_c_checksumFlag = 0
+ - ZSTD_c_dictIDFlag = 0
+
+ This API is not as well tested as our normal API, so we recommend not using it.
+ We will be removing it in a future version. If the normal API doesn't provide
+ the functionality you need, please open a GitHub issue.
+
Block functions produce and decode raw zstd blocks, without frame metadata.
Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
@@ -2524,7 +3131,6 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
- It is necessary to init context before starting
+ compression : any ZSTD_compressBegin*() variant, including with dictionary
+ decompression : any ZSTD_decompressBegin*() variant, including with dictionary
- + copyCCtx() and copyDCtx() can be used too
- Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ If input is larger than a block size, it's necessary to split input data into multiple blocks
+ For inputs larger than a single block, consider using regular ZSTD_compress() instead.
@@ -2541,11 +3147,14 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
*/
/*===== Raw zstd block functions =====*/
+ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
+ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /*< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
-
diff --git a/include/linux/zswap.h b/include/linux/zswap.h
index 2a85b941db97..30c193a1207e 100644
--- a/include/linux/zswap.h
+++ b/include/linux/zswap.h
@@ -7,35 +7,34 @@
struct lruvec;
-extern atomic_t zswap_stored_pages;
+extern atomic_long_t zswap_stored_pages;
#ifdef CONFIG_ZSWAP
struct zswap_lruvec_state {
/*
- * Number of pages in zswap that should be protected from the shrinker.
- * This number is an estimate of the following counts:
+ * Number of swapped in pages from disk, i.e not found in the zswap pool.
*
- * a) Recent page faults.
- * b) Recent insertion to the zswap LRU. This includes new zswap stores,
- * as well as recent zswap LRU rotations.
- *
- * These pages are likely to be warm, and might incur IO if the are written
- * to swap.
+ * This is consumed and subtracted from the lru size in
+ * zswap_shrinker_count() to penalize past overshrinking that led to disk
+ * swapins. The idea is that had we considered this many more pages in the
+ * LRU active/protected and not written them back, we would not have had to
+ * swapped them in.
*/
- atomic_long_t nr_zswap_protected;
+ atomic_long_t nr_disk_swapins;
};
unsigned long zswap_total_pages(void);
bool zswap_store(struct folio *folio);
-bool zswap_load(struct folio *folio);
+int zswap_load(struct folio *folio);
void zswap_invalidate(swp_entry_t swp);
int zswap_swapon(int type, unsigned long nr_pages);
void zswap_swapoff(int type);
void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
void zswap_lruvec_state_init(struct lruvec *lruvec);
void zswap_folio_swapin(struct folio *folio);
-bool is_zswap_enabled(void);
+bool zswap_is_enabled(void);
+bool zswap_never_enabled(void);
#else
struct zswap_lruvec_state {};
@@ -45,9 +44,9 @@ static inline bool zswap_store(struct folio *folio)
return false;
}
-static inline bool zswap_load(struct folio *folio)
+static inline int zswap_load(struct folio *folio)
{
- return false;
+ return -ENOENT;
}
static inline void zswap_invalidate(swp_entry_t swp) {}
@@ -60,11 +59,16 @@ static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {}
static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {}
static inline void zswap_folio_swapin(struct folio *folio) {}
-static inline bool is_zswap_enabled(void)
+static inline bool zswap_is_enabled(void)
{
return false;
}
+static inline bool zswap_never_enabled(void)
+{
+ return true;
+}
+
#endif
#endif /* _LINUX_ZSWAP_H */
diff --git a/include/media/cadence/cdns-csi2rx.h b/include/media/cadence/cdns-csi2rx.h
new file mode 100644
index 000000000000..782d03fc36d1
--- /dev/null
+++ b/include/media/cadence/cdns-csi2rx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _CDNS_CSI2RX_H
+#define _CDNS_CSI2RX_H
+
+#include <media/v4l2-subdev.h>
+
+/**
+ * cdns_csi2rx_negotiate_ppc - Negotiate pixel-per-clock on output interface
+ *
+ * @subdev: point to &struct v4l2_subdev
+ * @pad: pad number of the source pad
+ * @ppc: pointer to requested pixel-per-clock value
+ *
+ * Returns 0 on success, negative error code otherwise.
+ */
+int cdns_csi2rx_negotiate_ppc(struct v4l2_subdev *subdev, unsigned int pad,
+ u8 *ppc);
+
+#endif
diff --git a/include/media/cec.h b/include/media/cec.h
index d131514032f2..0c8e86115b6f 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -10,7 +10,6 @@
#include <linux/poll.h>
#include <linux/fs.h>
-#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/kthread.h>
@@ -66,6 +65,8 @@ struct cec_data {
struct list_head xfer_list;
struct cec_adapter *adap;
struct cec_msg msg;
+ u8 match_len;
+ u8 match_reply[5];
struct cec_fh *fh;
struct delayed_work work;
struct completion c;
@@ -296,6 +297,37 @@ struct cec_adapter {
char input_phys[40];
};
+static inline int cec_get_device(struct cec_adapter *adap)
+{
+ struct cec_devnode *devnode = &adap->devnode;
+
+ /*
+ * Check if the cec device is available. This needs to be done with
+ * the devnode->lock held to prevent an open/unregister race:
+ * without the lock, the device could be unregistered and freed between
+ * the devnode->registered check and get_device() calls, leading to
+ * a crash.
+ */
+ mutex_lock(&devnode->lock);
+ /*
+ * return ENODEV if the cec device has been removed
+ * already or if it is not registered anymore.
+ */
+ if (!devnode->registered) {
+ mutex_unlock(&devnode->lock);
+ return -ENODEV;
+ }
+ /* and increase the device refcount */
+ get_device(&devnode->dev);
+ mutex_unlock(&devnode->lock);
+ return 0;
+}
+
+static inline void cec_put_device(struct cec_adapter *adap)
+{
+ put_device(&adap->devnode.dev);
+}
+
static inline void *cec_get_drvdata(const struct cec_adapter *adap)
{
return adap->priv;
diff --git a/include/media/drv-intf/cx25840.h b/include/media/drv-intf/cx25840.h
index ba69bc525382..8b455d9dd5ca 100644
--- a/include/media/drv-intf/cx25840.h
+++ b/include/media/drv-intf/cx25840.h
@@ -3,7 +3,7 @@
/*
* cx25840.h - definition for cx25840/1/2/3 inputs
*
- * Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ * Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
#ifndef _CX25840_H_
diff --git a/include/media/drv-intf/msp3400.h b/include/media/drv-intf/msp3400.h
index d6dfae104a6f..853258ee6bbd 100644
--- a/include/media/drv-intf/msp3400.h
+++ b/include/media/drv-intf/msp3400.h
@@ -2,7 +2,7 @@
/*
msp3400.h - definition for msp3400 inputs and outputs
- Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/drv-intf/saa7146_vv.h b/include/media/drv-intf/saa7146_vv.h
index 55c7d70b9feb..f66f4dfccf14 100644
--- a/include/media/drv-intf/saa7146_vv.h
+++ b/include/media/drv-intf/saa7146_vv.h
@@ -130,7 +130,8 @@ struct saa7146_ext_vv
/* pointer to the saa7146 core ops */
const struct v4l2_ioctl_ops *core_ops;
- struct v4l2_file_operations vbi_fops;
+ ssize_t (*vbi_write)(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos);
};
struct saa7146_use_ops {
diff --git a/include/media/i2c/bt819.h b/include/media/i2c/bt819.h
index 70aa46bd5182..2277a7eb9548 100644
--- a/include/media/i2c/bt819.h
+++ b/include/media/i2c/bt819.h
@@ -2,7 +2,7 @@
/*
bt819.h - bt819 notifications
- Copyright (C) 2009 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2009 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/i2c/cs5345.h b/include/media/i2c/cs5345.h
index d41e4dca3fcc..39e1cf6c1a2f 100644
--- a/include/media/i2c/cs5345.h
+++ b/include/media/i2c/cs5345.h
@@ -2,7 +2,7 @@
/*
cs5345.h - definition for cs5345 inputs and outputs
- Copyright (C) 2007 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2007 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/i2c/cs53l32a.h b/include/media/i2c/cs53l32a.h
index 52ceb2f916d3..777f667855cb 100644
--- a/include/media/i2c/cs53l32a.h
+++ b/include/media/i2c/cs53l32a.h
@@ -2,7 +2,7 @@
/*
cs53l32a.h - definition for cs53l32a inputs and outputs
- Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/i2c/m52790.h b/include/media/i2c/m52790.h
index 3f214fa9bc64..cedaaf215273 100644
--- a/include/media/i2c/m52790.h
+++ b/include/media/i2c/m52790.h
@@ -2,7 +2,7 @@
/*
m52790.h - definition for m52790 inputs and outputs
- Copyright (C) 2007 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2007 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/i2c/mt9p031.h b/include/media/i2c/mt9p031.h
deleted file mode 100644
index f933cd0be8e5..000000000000
--- a/include/media/i2c/mt9p031.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef MT9P031_H
-#define MT9P031_H
-
-struct v4l2_subdev;
-
-/*
- * struct mt9p031_platform_data - MT9P031 platform data
- * @ext_freq: Input clock frequency
- * @target_freq: Pixel clock frequency
- */
-struct mt9p031_platform_data {
- unsigned int pixclk_pol:1;
- int ext_freq;
- int target_freq;
-};
-
-#endif
diff --git a/include/media/i2c/mt9v011.h b/include/media/i2c/mt9v011.h
index 41c00b3e7184..552839756e64 100644
--- a/include/media/i2c/mt9v011.h
+++ b/include/media/i2c/mt9v011.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* mt9v011 sensor
*
- * Copyright (C) 2011 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2011 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef __MT9V011_H__
diff --git a/include/media/i2c/mt9v022.h b/include/media/i2c/mt9v022.h
deleted file mode 100644
index 6966eb538165..000000000000
--- a/include/media/i2c/mt9v022.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * mt9v022 sensor
- */
-
-#ifndef __MT9V022_H__
-#define __MT9V022_H__
-
-struct mt9v022_platform_data {
- unsigned short y_skip_top; /* Lines to skip at the top */
-};
-
-#endif
diff --git a/include/media/i2c/mt9v032.h b/include/media/i2c/mt9v032.h
deleted file mode 100644
index 83a37ccfb649..000000000000
--- a/include/media/i2c/mt9v032.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _MEDIA_MT9V032_H
-#define _MEDIA_MT9V032_H
-
-struct mt9v032_platform_data {
- unsigned int clk_pol:1;
-
- const s64 *link_freqs;
- s64 link_def_freq;
-};
-
-#endif
diff --git a/include/media/i2c/saa7115.h b/include/media/i2c/saa7115.h
index 0cd6080d7cb1..a607c91ef5f3 100644
--- a/include/media/i2c/saa7115.h
+++ b/include/media/i2c/saa7115.h
@@ -2,7 +2,7 @@
/*
saa7115.h - definition for saa7111/3/4/5 inputs and frequency flags
- Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/i2c/saa7127.h b/include/media/i2c/saa7127.h
index 53ee999e6090..c81ee1743df1 100644
--- a/include/media/i2c/saa7127.h
+++ b/include/media/i2c/saa7127.h
@@ -2,7 +2,7 @@
/*
saa7127.h - definition for saa7126/7/8/9 inputs/outputs
- Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/i2c/ths7303.h b/include/media/i2c/ths7303.h
index fee2818c558d..7eda467b6725 100644
--- a/include/media/i2c/ths7303.h
+++ b/include/media/i2c/ths7303.h
@@ -5,7 +5,7 @@
* Copyright 2013 Cisco Systems, Inc. and/or its affiliates.
*
* Contributors:
- * Hans Verkuil <hans.verkuil@cisco.com>
+ * Hans Verkuil <hverkuil@kernel.org>
* Lad, Prabhakar <prabhakar.lad@ti.com>
* Martin Bugge <marbugge@cisco.com>
*/
diff --git a/include/media/i2c/tvaudio.h b/include/media/i2c/tvaudio.h
index 42cd3206fb6c..206f42ed4e69 100644
--- a/include/media/i2c/tvaudio.h
+++ b/include/media/i2c/tvaudio.h
@@ -2,7 +2,7 @@
/*
tvaudio.h - definition for tvaudio inputs
- Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/i2c/upd64031a.h b/include/media/i2c/upd64031a.h
index b6570abc84ef..d39b2b7f0cf3 100644
--- a/include/media/i2c/upd64031a.h
+++ b/include/media/i2c/upd64031a.h
@@ -2,7 +2,7 @@
/*
* upd64031a - NEC Electronics Ghost Reduction input defines
*
- * 2006 by Hans Verkuil (hverkuil@xs4all.nl)
+ * 2006 by Hans Verkuil (hverkuil@kernel.org)
*/
#ifndef _UPD64031A_H_
diff --git a/include/media/i2c/upd64083.h b/include/media/i2c/upd64083.h
index 17fb7b5201cc..72cf547c25fc 100644
--- a/include/media/i2c/upd64083.h
+++ b/include/media/i2c/upd64083.h
@@ -2,7 +2,7 @@
/*
* upd6408x - NEC Electronics 3-Dimensional Y/C separation input defines
*
- * 2006 by Hans Verkuil (hverkuil@xs4all.nl)
+ * 2006 by Hans Verkuil (hverkuil@kernel.org)
*/
#ifndef _UPD64083_H_
diff --git a/include/media/i2c/wm8775.h b/include/media/i2c/wm8775.h
index 6ccdeb3817ab..a02695ee3a58 100644
--- a/include/media/i2c/wm8775.h
+++ b/include/media/i2c/wm8775.h
@@ -2,7 +2,7 @@
/*
wm8775.h - definition for wm8775 inputs and outputs
- Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/ipu-bridge.h b/include/media/ipu-bridge.h
index 783bda6d5cc3..16fac765456e 100644
--- a/include/media/ipu-bridge.h
+++ b/include/media/ipu-bridge.h
@@ -3,6 +3,7 @@
#ifndef __IPU_BRIDGE_H
#define __IPU_BRIDGE_H
+#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/types.h>
#include <media/v4l2-fwnode.h>
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 0393b23129eb..b91ff6f8c3bb 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -627,7 +627,7 @@ static inline bool media_entity_enum_intersects(
* @gobj: Pointer to the struct &media_gobj graph object
*/
#define gobj_to_entity(gobj) \
- container_of(gobj, struct media_entity, graph_obj)
+ container_of_const(gobj, struct media_entity, graph_obj)
/**
* gobj_to_pad - returns the struct &media_pad pointer from the
@@ -636,7 +636,7 @@ static inline bool media_entity_enum_intersects(
* @gobj: Pointer to the struct &media_gobj graph object
*/
#define gobj_to_pad(gobj) \
- container_of(gobj, struct media_pad, graph_obj)
+ container_of_const(gobj, struct media_pad, graph_obj)
/**
* gobj_to_link - returns the struct &media_link pointer from the
@@ -645,7 +645,7 @@ static inline bool media_entity_enum_intersects(
* @gobj: Pointer to the struct &media_gobj graph object
*/
#define gobj_to_link(gobj) \
- container_of(gobj, struct media_link, graph_obj)
+ container_of_const(gobj, struct media_link, graph_obj)
/**
* gobj_to_intf - returns the struct &media_interface pointer from the
@@ -654,7 +654,7 @@ static inline bool media_entity_enum_intersects(
* @gobj: Pointer to the struct &media_gobj graph object
*/
#define gobj_to_intf(gobj) \
- container_of(gobj, struct media_interface, graph_obj)
+ container_of_const(gobj, struct media_interface, graph_obj)
/**
* intf_to_devnode - returns the struct media_intf_devnode pointer from the
@@ -663,7 +663,7 @@ static inline bool media_entity_enum_intersects(
* @intf: Pointer to struct &media_intf_devnode
*/
#define intf_to_devnode(intf) \
- container_of(intf, struct media_intf_devnode, intf)
+ container_of_const(intf, struct media_intf_devnode, intf)
/**
* media_gobj_create - Initialize a graph object
@@ -1143,10 +1143,10 @@ struct media_entity *media_graph_walk_next(struct media_graph *graph);
/**
* media_pipeline_start - Mark a pipeline as streaming
- * @pad: Starting pad
+ * @origin: Starting pad
* @pipe: Media pipeline to be assigned to all pads in the pipeline.
*
- * Mark all pads connected to a given pad through enabled links, either
+ * Mark all pads connected to pad @origin through enabled links, either
* directly or indirectly, as streaming. The given pipeline object is assigned
* to every pad in the pipeline and stored in the media_pad pipe field.
*
@@ -1155,17 +1155,17 @@ struct media_entity *media_graph_walk_next(struct media_graph *graph);
* pipeline pointer must be identical for all nested calls to
* media_pipeline_start().
*/
-__must_check int media_pipeline_start(struct media_pad *pad,
+__must_check int media_pipeline_start(struct media_pad *origin,
struct media_pipeline *pipe);
/**
* __media_pipeline_start - Mark a pipeline as streaming
*
- * @pad: Starting pad
+ * @origin: Starting pad
* @pipe: Media pipeline to be assigned to all pads in the pipeline.
*
* ..note:: This is the non-locking version of media_pipeline_start()
*/
-__must_check int __media_pipeline_start(struct media_pad *pad,
+__must_check int __media_pipeline_start(struct media_pad *origin,
struct media_pipeline *pipe);
/**
diff --git a/include/media/media-request.h b/include/media/media-request.h
index 3cd25a2717ce..bb500b2f9da4 100644
--- a/include/media/media-request.h
+++ b/include/media/media-request.h
@@ -5,7 +5,7 @@
* Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
* Copyright (C) 2018 Intel Corporation
*
- * Author: Hans Verkuil <hans.verkuil@cisco.com>
+ * Author: Hans Verkuil <hverkuil@kernel.org>
* Author: Sakari Ailus <sakari.ailus@linux.intel.com>
*/
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 803349599c27..35c7a0546f02 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -57,11 +57,11 @@ enum rc_filter_type {
* struct lirc_fh - represents an open lirc file
* @list: list of open file handles
* @rc: rcdev for this lirc chardev
- * @carrier_low: when setting the carrier range, first the low end must be
- * set with an ioctl and then the high end with another ioctl
* @rawir: queue for incoming raw IR
* @scancodes: queue for incoming decoded scancodes
* @wait_poll: poll struct for lirc device
+ * @carrier_low: when setting the carrier range, first the low end must be
+ * set with an ioctl and then the high end with another ioctl
* @send_mode: lirc mode for sending, either LIRC_MODE_SCANCODE or
* LIRC_MODE_PULSE
* @rec_mode: lirc mode for receiving, either LIRC_MODE_SCANCODE or
@@ -70,10 +70,10 @@ enum rc_filter_type {
struct lirc_fh {
struct list_head list;
struct rc_dev *rc;
- int carrier_low;
DECLARE_KFIFO_PTR(rawir, unsigned int);
DECLARE_KFIFO_PTR(scancodes, struct lirc_scancode);
wait_queue_head_t wait_poll;
+ u32 carrier_low;
u8 send_mode;
u8 rec_mode;
};
@@ -82,6 +82,12 @@ struct lirc_fh {
* struct rc_dev - represents a remote control device
* @dev: driver model's view of this device
* @managed_alloc: devm_rc_allocate_device was used to create rc_dev
+ * @registered: set to true by rc_register_device(), false by
+ * rc_unregister_device
+ * @idle: used to keep track of RX state
+ * @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed
+ * wakeup protocols is the set of all raw encoders
+ * @minor: unique minor remote control device number
* @sysfs_groups: sysfs attribute groups
* @device_name: name of the rc child device
* @input_phys: physical path to the input child device
@@ -91,13 +97,10 @@ struct lirc_fh {
* @rc_map: current scan/key table
* @lock: used to ensure we've filled in all protocol details before
* anyone can call show_protocols or store_protocols
- * @minor: unique minor remote control device number
* @raw: additional data for raw pulse/space devices
* @input_dev: the input child device used to communicate events to userspace
* @driver_type: specifies if protocol decoding is done in hardware or software
- * @idle: used to keep track of RX state
- * @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed
- * wakeup protocols is the set of all raw encoders
+ * @users: number of current users of the device
* @allowed_protocols: bitmask with the supported RC_PROTO_BIT_* protocols
* @enabled_protocols: bitmask with the enabled RC_PROTO_BIT_* protocols
* @allowed_wakeup_protocols: bitmask with the supported RC_PROTO_BIT_* wakeup
@@ -111,30 +114,26 @@ struct lirc_fh {
* anything with it. Yet, as the same keycode table can be used with other
* devices, a mask is provided to allow its usage. Drivers should generally
* leave this field in blank
- * @users: number of current users of the device
* @priv: driver-specific data
* @keylock: protects the remaining members of the struct
* @keypressed: whether a key is currently pressed
+ * @last_toggle: toggle value of last command
+ * @last_keycode: keycode of last keypress
+ * @last_protocol: protocol of last keypress
+ * @last_scancode: scancode of last keypress
* @keyup_jiffies: time (in jiffies) when the current keypress should be released
* @timer_keyup: timer for releasing a keypress
* @timer_repeat: timer for autorepeat events. This is needed for CEC, which
* has non-standard repeats.
- * @last_keycode: keycode of last keypress
- * @last_protocol: protocol of last keypress
- * @last_scancode: scancode of last keypress
- * @last_toggle: toggle value of last command
* @timeout: optional time after which device stops sending data
* @min_timeout: minimum timeout supported by device
* @max_timeout: maximum timeout supported by device
* @rx_resolution : resolution (in us) of input sampler
- * @tx_resolution: resolution (in us) of output sampler
* @lirc_dev: lirc device
* @lirc_cdev: lirc char cdev
* @gap_start: start time for gap after timeout if non-zero
* @lirc_fh_lock: protects lirc_fh list
* @lirc_fh: list of open files
- * @registered: set to true by rc_register_device(), false by
- * rc_unregister_device
* @change_protocol: allow changing the protocol used on hardware decoders
* @open: callback to allow drivers to enable polling/irq when IR input device
* is opened.
@@ -158,6 +157,10 @@ struct lirc_fh {
struct rc_dev {
struct device dev;
bool managed_alloc;
+ bool registered;
+ bool idle;
+ bool encode_wakeup;
+ unsigned int minor;
const struct attribute_group *sysfs_groups[5];
const char *device_name;
const char *input_phys;
@@ -166,12 +169,10 @@ struct rc_dev {
const char *map_name;
struct rc_map rc_map;
struct mutex lock;
- unsigned int minor;
struct ir_raw_event_ctrl *raw;
struct input_dev *input_dev;
enum rc_driver_type driver_type;
- bool idle;
- bool encode_wakeup;
+ u32 users;
u64 allowed_protocols;
u64 enabled_protocols;
u64 allowed_wakeup_protocols;
@@ -179,22 +180,20 @@ struct rc_dev {
struct rc_scancode_filter scancode_filter;
struct rc_scancode_filter scancode_wakeup_filter;
u32 scancode_mask;
- u32 users;
void *priv;
spinlock_t keylock;
bool keypressed;
- unsigned long keyup_jiffies;
- struct timer_list timer_keyup;
- struct timer_list timer_repeat;
+ u8 last_toggle;
u32 last_keycode;
enum rc_proto last_protocol;
u64 last_scancode;
- u8 last_toggle;
+ unsigned long keyup_jiffies;
+ struct timer_list timer_keyup;
+ struct timer_list timer_repeat;
u32 timeout;
u32 min_timeout;
u32 max_timeout;
u32 rx_resolution;
- u32 tx_resolution;
#ifdef CONFIG_LIRC
struct device lirc_dev;
struct cdev lirc_cdev;
@@ -202,7 +201,6 @@ struct rc_dev {
spinlock_t lirc_fh_lock;
struct list_head lirc_fh;
#endif
- bool registered;
int (*change_protocol)(struct rc_dev *dev, u64 *rc_proto);
int (*open)(struct rc_dev *dev);
void (*close)(struct rc_dev *dev);
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 4676545ffd8f..d90e4611b066 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -290,6 +290,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_MSI_DIGIVOX_III "rc-msi-digivox-iii"
#define RC_MAP_MSI_TVANYWHERE "rc-msi-tvanywhere"
#define RC_MAP_MSI_TVANYWHERE_PLUS "rc-msi-tvanywhere-plus"
+#define RC_MAP_MYGICA_UTV3 "rc-mygica-utv3"
#define RC_MAP_NEBULA "rc-nebula"
#define RC_MAP_NEC_TERRATEC_CINERGY_XS "rc-nec-terratec-cinergy-xs"
#define RC_MAP_NORWOOD "rc-norwood"
@@ -312,6 +313,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_RC6_MCE "rc-rc6-mce"
#define RC_MAP_REAL_AUDIO_220_32_KEYS "rc-real-audio-220-32-keys"
#define RC_MAP_REDDO "rc-reddo"
+#define RC_MAP_SIEMENS_GIGASET_RC20 "rc-siemens-gigaset-rc20"
#define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly"
#define RC_MAP_STREAMZAP "rc-streamzap"
#define RC_MAP_SU3000 "rc-su3000"
diff --git a/include/media/rcar-fcp.h b/include/media/rcar-fcp.h
index 179240fb163b..6ac9be9f675e 100644
--- a/include/media/rcar-fcp.h
+++ b/include/media/rcar-fcp.h
@@ -18,6 +18,7 @@ void rcar_fcp_put(struct rcar_fcp_device *fcp);
struct device *rcar_fcp_get_device(struct rcar_fcp_device *fcp);
int rcar_fcp_enable(struct rcar_fcp_device *fcp);
void rcar_fcp_disable(struct rcar_fcp_device *fcp);
+int rcar_fcp_soft_reset(struct rcar_fcp_device *fcp);
#else
static inline struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np)
{
@@ -33,6 +34,10 @@ static inline int rcar_fcp_enable(struct rcar_fcp_device *fcp)
return 0;
}
static inline void rcar_fcp_disable(struct rcar_fcp_device *fcp) { }
+static inline int rcar_fcp_soft_reset(struct rcar_fcp_device *fcp)
+{
+ return 0;
+}
#endif
#endif /* __MEDIA_RCAR_FCP_H__ */
diff --git a/include/media/tuner-types.h b/include/media/tuner-types.h
index df76ac8e658c..c79b773f750c 100644
--- a/include/media/tuner-types.h
+++ b/include/media/tuner-types.h
@@ -168,7 +168,7 @@ struct tuner_params {
u16 iffreq;
unsigned int count;
- struct tuner_range *ranges;
+ const struct tuner_range *ranges;
};
/**
@@ -189,7 +189,7 @@ struct tuner_params {
struct tunertype {
char *name;
unsigned int count;
- struct tuner_params *params;
+ const struct tuner_params *params;
u16 min;
u16 max;
@@ -199,7 +199,7 @@ struct tunertype {
u8 *sleepdata;
};
-extern struct tunertype tuners[];
+extern const struct tunertype tuners[];
extern unsigned const int tuner_count;
#endif
diff --git a/include/media/tuner.h b/include/media/tuner.h
index a7796e0a3659..c5fd6faabfd6 100644
--- a/include/media/tuner.h
+++ b/include/media/tuner.h
@@ -133,6 +133,7 @@
#define TUNER_SONY_BTF_PK467Z 90 /* NTSC_JP */
#define TUNER_SONY_BTF_PB463Z 91 /* NTSC */
#define TUNER_SI2157 92
+#define TUNER_TENA_TNF_931D_DFDR1 93
/* tv card specific */
#define TDA9887_PRESENT (1<<0)
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 63ad36f04f72..f8b1faced79c 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -7,7 +7,7 @@
Each ioctl begins with VIDIOC_INT_ to clearly mark that it is an internal
define,
- Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005 Hans Verkuil <hverkuil@kernel.org>
*/
@@ -97,6 +97,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl,
/* ------------------------------------------------------------------------- */
+struct clk;
struct v4l2_device;
struct v4l2_subdev;
struct v4l2_subdev_ops;
@@ -390,38 +391,72 @@ void v4l_bound_align_image(unsigned int *width, unsigned int wmin,
unsigned int salign);
/**
- * v4l2_find_nearest_size - Find the nearest size among a discrete
- * set of resolutions contained in an array of a driver specific struct.
+ * v4l2_find_nearest_size_conditional - Find the nearest size among a discrete
+ * set of resolutions contained in an array of a driver specific struct,
+ * with conditionally exlusion of certain modes
*
* @array: a driver specific array of image sizes
* @array_size: the length of the driver specific array of image sizes
* @width_field: the name of the width field in the driver specific struct
* @height_field: the name of the height field in the driver specific struct
- * @width: desired width.
- * @height: desired height.
+ * @width: desired width
+ * @height: desired height
+ * @func: ignores mode if returns false
+ * @context: context for the function
*
* Finds the closest resolution to minimize the width and height differences
* between what requested and the supported resolutions. The size of the width
* and height fields in the driver specific must equal to that of u32, i.e. four
- * bytes.
+ * bytes. @func is called for each mode considered, a mode is ignored if @func
+ * returns false for it.
*
* Returns the best match or NULL if the length of the array is zero.
*/
-#define v4l2_find_nearest_size(array, array_size, width_field, height_field, \
- width, height) \
+#define v4l2_find_nearest_size_conditional(array, array_size, width_field, \
+ height_field, width, height, \
+ func, context) \
({ \
BUILD_BUG_ON(sizeof((array)->width_field) != sizeof(u32) || \
sizeof((array)->height_field) != sizeof(u32)); \
- (typeof(&(array)[0]))__v4l2_find_nearest_size( \
+ (typeof(&(array)[0]))__v4l2_find_nearest_size_conditional( \
(array), array_size, sizeof(*(array)), \
offsetof(typeof(*(array)), width_field), \
offsetof(typeof(*(array)), height_field), \
- width, height); \
+ width, height, func, context); \
})
const void *
-__v4l2_find_nearest_size(const void *array, size_t array_size,
- size_t entry_size, size_t width_offset,
- size_t height_offset, s32 width, s32 height);
+__v4l2_find_nearest_size_conditional(const void *array, size_t array_size,
+ size_t entry_size, size_t width_offset,
+ size_t height_offset, s32 width,
+ s32 height,
+ bool (*func)(const void *array,
+ size_t index,
+ const void *context),
+ const void *context);
+
+/**
+ * v4l2_find_nearest_size - Find the nearest size among a discrete set of
+ * resolutions contained in an array of a driver specific struct
+ *
+ * @array: a driver specific array of image sizes
+ * @array_size: the length of the driver specific array of image sizes
+ * @width_field: the name of the width field in the driver specific struct
+ * @height_field: the name of the height field in the driver specific struct
+ * @width: desired width
+ * @height: desired height
+ *
+ * Finds the closest resolution to minimize the width and height differences
+ * between what requested and the supported resolutions. The size of the width
+ * and height fields in the driver specific must equal to that of u32, i.e. four
+ * bytes.
+ *
+ * Returns the best match or NULL if the length of the array is zero.
+ */
+#define v4l2_find_nearest_size(array, array_size, width_field, \
+ height_field, width, height) \
+ v4l2_find_nearest_size_conditional(array, array_size, width_field, \
+ height_field, width, height, NULL, \
+ NULL)
/**
* v4l2_g_parm_cap - helper routine for vidioc_g_parm to fill this in by
@@ -525,25 +560,49 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat,
/**
* v4l2_get_link_freq - Get link rate from transmitter
*
- * @handler: The transmitter's control handler
+ * @pad: The transmitter's media pad
* @mul: The multiplier between pixel rate and link frequency. Bits per pixel on
* D-PHY, samples per clock on parallel. 0 otherwise.
* @div: The divisor between pixel rate and link frequency. Number of data lanes
* times two on D-PHY, 1 on parallel. 0 otherwise.
*
- * This function is intended for obtaining the link frequency from the
- * transmitter sub-devices. It returns the link rate, either from the
- * V4L2_CID_LINK_FREQ control implemented by the transmitter, or value
- * calculated based on the V4L2_CID_PIXEL_RATE implemented by the transmitter.
+ * This function obtains and returns the link frequency from the transmitter
+ * sub-device's pad. The link frequency is retrieved using the get_mbus_config
+ * sub-device pad operation. If this fails, the function falls back to obtaining
+ * the frequency either directly from the V4L2_CID_LINK_FREQ control if
+ * implemented by the transmitter, or by calculating it from the pixel rate
+ * obtained from the V4L2_CID_PIXEL_RATE control.
*
* Return:
* * >0: Link frequency
* * %-ENOENT: Link frequency or pixel rate control not found
* * %-EINVAL: Invalid link frequency value
*/
-s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
+#ifdef CONFIG_MEDIA_CONTROLLER
+s64 v4l2_get_link_freq(const struct media_pad *pad, unsigned int mul,
unsigned int div);
+/**
+ * v4l2_get_active_data_lanes - Get number of active data lanes from driver
+ *
+ * @pad: The transmitter's media pad.
+ * @max_data_lanes: The maximum number of active data lanes supported by
+ * the MIPI CSI link in hardware.
+ *
+ * This function is intended for obtaining the number of data lanes that are
+ * actively being used by the driver for a MIPI CSI-2 device on a given media pad.
+ * This information is derived from a mbus_config fetched from a device driver
+ * using the get_mbus_config v4l2_subdev pad op.
+ *
+ * Return:
+ * * >0: Number of active data lanes
+ * * %-EINVAL: Number of active data lanes is invalid, as it exceeds the maximum
+ * supported data lanes.
+ */
+int v4l2_get_active_data_lanes(const struct media_pad *pad,
+ unsigned int max_data_lanes);
+#endif
+
void v4l2_simplify_fraction(u32 *numerator, u32 *denominator,
unsigned int n_terms, unsigned int threshold);
u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator);
@@ -573,6 +632,79 @@ int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs,
unsigned int num_of_driver_link_freqs,
unsigned long *bitmap);
+struct clk *__devm_v4l2_sensor_clk_get(struct device *dev, const char *id,
+ bool legacy, bool fixed_rate,
+ unsigned long clk_rate);
+
+/**
+ * devm_v4l2_sensor_clk_get - lookup and obtain a reference to a clock producer
+ * for a camera sensor
+ *
+ * @dev: device for v4l2 sensor clock "consumer"
+ * @id: clock consumer ID
+ *
+ * This function behaves the same way as devm_clk_get() except where there
+ * is no clock producer like in ACPI-based platforms.
+ *
+ * For ACPI-based platforms, the function will read the "clock-frequency"
+ * ACPI _DSD property and register a fixed-clock with the frequency indicated
+ * in the property.
+ *
+ * This function also handles the special ACPI-based system case where:
+ *
+ * * The clock-frequency _DSD property is present.
+ * * A reference to the clock producer is present, where the clock is provided
+ * by a camera sensor PMIC driver (e.g. int3472/tps68470.c)
+ *
+ * In this case try to set the clock-frequency value to the provided clock.
+ *
+ * As the name indicates, this function may only be used on camera sensor
+ * devices. This is because generally only camera sensors do need a clock to
+ * query the frequency from, due to the requirement to configure the PLL for a
+ * given CSI-2 interface frequency where the sensor's external clock frequency
+ * is a factor. Additionally, the clock frequency tends to be available on ACPI
+ * firmware based systems for camera sensors specifically (if e.g. DisCo for
+ * Imaging compliant).
+ *
+ * Returns a pointer to a struct clk on success or an error pointer on failure.
+ */
+static inline struct clk *
+devm_v4l2_sensor_clk_get(struct device *dev, const char *id)
+{
+ return __devm_v4l2_sensor_clk_get(dev, id, false, false, 0);
+}
+
+/**
+ * devm_v4l2_sensor_clk_get_legacy - lookup and obtain a reference to a clock
+ * producer for a camera sensor.
+ *
+ * @dev: device for v4l2 sensor clock "consumer"
+ * @id: clock consumer ID
+ * @fixed_rate: interpret the @clk_rate as a fixed rate or default rate
+ * @clk_rate: the clock rate
+ *
+ * This function behaves the same way as devm_v4l2_sensor_clk_get() except that
+ * it extends the behaviour on ACPI platforms to all platforms.
+ *
+ * The function also provides the ability to set the clock rate to a fixed
+ * frequency by setting @fixed_rate to true and specifying the fixed frequency
+ * in @clk_rate, or to use a default clock rate when the "clock-frequency"
+ * property is absent by setting @fixed_rate to false and specifying the default
+ * frequency in @clk_rate. Setting @fixed_rate to true and @clk_rate to 0 is an
+ * error.
+ *
+ * This function is meant to support legacy behaviour in existing drivers only.
+ * It must not be used in any new driver.
+ *
+ * Returns a pointer to a struct clk on success or an error pointer on failure.
+ */
+static inline struct clk *
+devm_v4l2_sensor_clk_get_legacy(struct device *dev, const char *id,
+ bool fixed_rate, unsigned long clk_rate)
+{
+ return __devm_v4l2_sensor_clk_get(dev, id, true, fixed_rate, clk_rate);
+}
+
static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf)
{
/*
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 59679a42b3e7..31fc1bee3797 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -2,7 +2,7 @@
/*
* V4L2 controls support header.
*
- * Copyright (C) 2010 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2010 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef _V4L2_CTRLS_H
@@ -56,6 +56,7 @@ struct video_device;
* @p_av1_tile_group_entry: Pointer to an AV1 tile group entry structure.
* @p_av1_frame: Pointer to an AV1 frame structure.
* @p_av1_film_grain: Pointer to an AV1 film grain structure.
+ * @p_rect: Pointer to a rectangle.
* @p: Pointer to a compound value.
* @p_const: Pointer to a constant compound value.
*/
@@ -89,6 +90,7 @@ union v4l2_ctrl_ptr {
struct v4l2_ctrl_av1_tile_group_entry *p_av1_tile_group_entry;
struct v4l2_ctrl_av1_frame *p_av1_frame;
struct v4l2_ctrl_av1_film_grain *p_av1_film_grain;
+ struct v4l2_rect *p_rect;
void *p;
const void *p_const;
};
@@ -131,6 +133,8 @@ struct v4l2_ctrl_ops {
*
* @equal: return true if all ctrl->elems array elements are equal.
* @init: initialize the value for array elements from from_idx to ctrl->elems.
+ * @minimum: set the value to the minimum value of the control.
+ * @maximum: set the value to the maximum value of the control.
* @log: log the value.
* @validate: validate the value for ctrl->new_elems array elements.
* Return 0 on success and a negative value otherwise.
@@ -140,6 +144,10 @@ struct v4l2_ctrl_type_ops {
union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2);
void (*init)(const struct v4l2_ctrl *ctrl, u32 from_idx,
union v4l2_ctrl_ptr ptr);
+ void (*minimum)(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr);
+ void (*maximum)(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr);
void (*log)(const struct v4l2_ctrl *ctrl);
int (*validate)(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr);
};
@@ -245,6 +253,12 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
* @p_def: The control's default value represented via a union which
* provides a standard way of accessing control types
* through a pointer (for compound controls only).
+ * @p_min: The control's minimum value represented via a union which
+ * provides a standard way of accessing control types
+ * through a pointer (for compound controls only).
+ * @p_max: The control's maximum value represented via a union which
+ * provides a standard way of accessing control types
+ * through a pointer (for compound controls only).
* @p_cur: The control's current value represented via a union which
* provides a standard way of accessing control types
* through a pointer.
@@ -304,6 +318,8 @@ struct v4l2_ctrl {
} cur;
union v4l2_ctrl_ptr p_def;
+ union v4l2_ctrl_ptr p_min;
+ union v4l2_ctrl_ptr p_max;
union v4l2_ctrl_ptr p_new;
union v4l2_ctrl_ptr p_cur;
};
@@ -423,6 +439,8 @@ struct v4l2_ctrl_handler {
* @step: The control's step value for non-menu controls.
* @def: The control's default value.
* @p_def: The control's default value for compound controls.
+ * @p_min: The control's minimum value for compound controls.
+ * @p_max: The control's maximum value for compound controls.
* @dims: The size of each dimension.
* @elem_size: The size in bytes of the control.
* @flags: The control's flags.
@@ -452,6 +470,8 @@ struct v4l2_ctrl_config {
u64 step;
s64 def;
union v4l2_ctrl_ptr p_def;
+ union v4l2_ctrl_ptr p_min;
+ union v4l2_ctrl_ptr p_max;
u32 dims[V4L2_CTRL_MAX_DIMS];
u32 elem_size;
u32 flags;
@@ -559,8 +579,10 @@ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
* @hdl: The control handler.
*
* Does nothing if @hdl == NULL.
+ *
+ * Return: @hdl's error field or 0 if @hdl is NULL.
*/
-void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl);
+int v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl);
/**
* v4l2_ctrl_lock() - Helper function to lock the handler
@@ -721,17 +743,25 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
* @ops: The control ops.
* @id: The control ID.
* @p_def: The control's default value.
+ * @p_min: The control's minimum value.
+ * @p_max: The control's maximum value.
*
- * Sames as v4l2_ctrl_new_std(), but with support to compound controls, thanks
- * to the @p_def field. Use v4l2_ctrl_ptr_create() to create @p_def from a
- * pointer. Use v4l2_ctrl_ptr_create(NULL) if the default value of the
- * compound control should be all zeroes.
+ * Same as v4l2_ctrl_new_std(), but with support for compound controls.
+ * To fill in the @p_def, @p_min and @p_max fields, use v4l2_ctrl_ptr_create()
+ * to convert a pointer to a const union v4l2_ctrl_ptr.
+ * Use v4l2_ctrl_ptr_create(NULL) if you want the default, minimum or maximum
+ * value of the compound control to be all zeroes.
+ * If the compound control does not set the ``V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX``
+ * flag, then it does not has minimum and maximum values. In that case just use
+ * v4l2_ctrl_ptr_create(NULL) for the @p_min and @p_max arguments.
*
*/
struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id,
- const union v4l2_ctrl_ptr p_def);
+ const union v4l2_ctrl_ptr p_def,
+ const union v4l2_ctrl_ptr p_min,
+ const union v4l2_ctrl_ptr p_max);
/**
* v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control.
@@ -1283,13 +1313,13 @@ void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new);
* v4l2_ctrl_log_status - helper function to implement %VIDIOC_LOG_STATUS ioctl
*
* @file: pointer to struct file
- * @fh: unused. Kept just to be compatible to the arguments expected by
+ * @priv: unused. Kept just to be compatible to the arguments expected by
* &struct v4l2_ioctl_ops.vidioc_log_status.
*
* Can be used as a vidioc_log_status function that just dumps all controls
* associated with the filehandle.
*/
-int v4l2_ctrl_log_status(struct file *file, void *fh);
+int v4l2_ctrl_log_status(struct file *file, void *priv);
/**
* v4l2_ctrl_subscribe_event - Subscribes to an event
@@ -1405,6 +1435,18 @@ v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id);
int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc);
/**
+ * v4l2_query_ext_ctrl_to_v4l2_queryctrl - Convert a qec to qe.
+ *
+ * @to: The v4l2_queryctrl to write to.
+ * @from: The v4l2_query_ext_ctrl to read from.
+ *
+ * This function is a helper to convert a v4l2_query_ext_ctrl into a
+ * v4l2_queryctrl.
+ */
+void v4l2_query_ext_ctrl_to_v4l2_queryctrl(struct v4l2_queryctrl *to,
+ const struct v4l2_query_ext_ctrl *from);
+
+/**
* v4l2_query_ext_ctrl - Helper function to implement
* :ref:`VIDIOC_QUERY_EXT_CTRL <vidioc_queryctrl>` ioctl
*
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index d82dfdbf6e58..2e0f6d2e6a78 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -62,6 +62,7 @@ struct v4l2_ioctl_callbacks;
struct video_device;
struct v4l2_device;
struct v4l2_ctrl_handler;
+struct dentry;
/**
* enum v4l2_video_device_flags - Flags used by &struct video_device
@@ -73,7 +74,7 @@ struct v4l2_ctrl_handler;
* @V4L2_FL_USES_V4L2_FH:
* indicates that file->private_data points to &struct v4l2_fh.
* This flag is set by the core when v4l2_fh_init() is called.
- * All new drivers should use it.
+ * All drivers must use it.
* @V4L2_FL_QUIRK_INVERTED_CROP:
* some old M2M drivers use g/s_crop/cropcap incorrectly: crop and
* compose are swapped. If this flag is set, then the selection
@@ -312,10 +313,16 @@ struct video_device {
* media_entity_to_video_device - Returns a &struct video_device from
* the &struct media_entity embedded on it.
*
- * @__entity: pointer to &struct media_entity
+ * @__entity: pointer to &struct media_entity, may be NULL
*/
-#define media_entity_to_video_device(__entity) \
- container_of(__entity, struct video_device, entity)
+#define media_entity_to_video_device(__entity) \
+({ \
+ typeof(__entity) __me_vdev_ent = __entity; \
+ \
+ __me_vdev_ent ? \
+ container_of_const(__me_vdev_ent, struct video_device, \
+ entity) : NULL; \
+})
/**
* to_video_device - Returns a &struct video_device from the
@@ -323,7 +330,7 @@ struct video_device {
*
* @cd: pointer to &struct device
*/
-#define to_video_device(cd) container_of(cd, struct video_device, dev)
+#define to_video_device(cd) container_of_const(cd, struct video_device, dev)
/**
* __video_register_device - register video4linux devices
@@ -539,6 +546,20 @@ static inline int video_is_registered(struct video_device *vdev)
return test_bit(V4L2_FL_REGISTERED, &vdev->flags);
}
+/**
+ * v4l2_debugfs_root - returns the dentry of the top-level "v4l2" debugfs dir
+ *
+ * If this directory does not yet exist, then it will be created.
+ */
+#ifdef CONFIG_DEBUG_FS
+struct dentry *v4l2_debugfs_root(void);
+#else
+static inline struct dentry *v4l2_debugfs_root(void)
+{
+ return NULL;
+}
+#endif
+
#if defined(CONFIG_MEDIA_CONTROLLER)
/**
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index dd897a362f36..25f69b1b8db0 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -2,7 +2,7 @@
/*
V4L2 device support header.
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index 8fa963326bf6..2b42e5d81f9e 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -8,6 +8,7 @@
#ifndef __V4L2_DV_TIMINGS_H
#define __V4L2_DV_TIMINGS_H
+#include <linux/debugfs.h>
#include <linux/videodev2.h>
/**
@@ -146,15 +147,18 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
* @polarities: the horizontal and vertical polarities (same as struct
* v4l2_bt_timings polarities).
* @interlaced: if this flag is true, it indicates interlaced format
+ * @cap: the v4l2_dv_timings_cap capabilities.
* @fmt: the resulting timings.
*
* This function will attempt to detect if the given values correspond to a
* valid CVT format. If so, then it will return true, and fmt will be filled
* in with the found CVT timings.
*/
-bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
- unsigned active_width, u32 polarities, bool interlaced,
- struct v4l2_dv_timings *fmt);
+bool v4l2_detect_cvt(unsigned int frame_height, unsigned int hfreq,
+ unsigned int vsync, unsigned int active_width,
+ u32 polarities, bool interlaced,
+ const struct v4l2_dv_timings_cap *cap,
+ struct v4l2_dv_timings *fmt);
/**
* v4l2_detect_gtf - detect if the given timings follow the GTF standard
@@ -170,15 +174,18 @@ bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
* image height, so it has to be passed explicitly. Usually
* the native screen aspect ratio is used for this. If it
* is not filled in correctly, then 16:9 will be assumed.
+ * @cap: the v4l2_dv_timings_cap capabilities.
* @fmt: the resulting timings.
*
* This function will attempt to detect if the given values correspond to a
* valid GTF format. If so, then it will return true, and fmt will be filled
* in with the found GTF timings.
*/
-bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync,
- u32 polarities, bool interlaced, struct v4l2_fract aspect,
- struct v4l2_dv_timings *fmt);
+bool v4l2_detect_gtf(unsigned int frame_height, unsigned int hfreq,
+ unsigned int vsync, u32 polarities, bool interlaced,
+ struct v4l2_fract aspect,
+ const struct v4l2_dv_timings_cap *cap,
+ struct v4l2_dv_timings *fmt);
/**
* v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
@@ -245,10 +252,59 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
const struct hdmi_vendor_infoframe *hdmi,
unsigned int height);
+unsigned int v4l2_num_edid_blocks(const u8 *edid, unsigned int max_blocks);
u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size,
unsigned int *offset);
void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input);
int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
+/* Add support for exporting InfoFrames to debugfs */
+
+/*
+ * HDMI InfoFrames start with a 3 byte header, then a checksum,
+ * followed by the actual IF payload.
+ *
+ * The payload length is limited to 30 bytes according to the HDMI spec,
+ * but since the length is encoded in 5 bits, it can be 31 bytes theoretically.
+ * So set the max length as 31 + 3 (header) + 1 (checksum) = 35.
+ */
+#define V4L2_DEBUGFS_IF_MAX_LEN (35)
+
+#define V4L2_DEBUGFS_IF_AVI BIT(0)
+#define V4L2_DEBUGFS_IF_AUDIO BIT(1)
+#define V4L2_DEBUGFS_IF_SPD BIT(2)
+#define V4L2_DEBUGFS_IF_HDMI BIT(3)
+#define V4L2_DEBUGFS_IF_DRM BIT(4)
+
+typedef ssize_t (*v4l2_debugfs_if_read_t)(u32 type, void *priv,
+ struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos);
+
+struct v4l2_debugfs_if {
+ struct dentry *if_dir;
+ void *priv;
+
+ v4l2_debugfs_if_read_t if_read;
+};
+
+#ifdef CONFIG_DEBUG_FS
+struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types,
+ void *priv,
+ v4l2_debugfs_if_read_t if_read);
+void v4l2_debugfs_if_free(struct v4l2_debugfs_if *infoframes);
+#else
+static inline
+struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types,
+ void *priv,
+ v4l2_debugfs_if_read_t if_read)
+{
+ return NULL;
+}
+
+static inline void v4l2_debugfs_if_free(struct v4l2_debugfs_if *infoframes)
+{
+}
+#endif
+
#endif
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
index b5b3e00c8e6a..aad4b3689d7e 100644
--- a/include/media/v4l2-fh.h
+++ b/include/media/v4l2-fh.h
@@ -3,7 +3,7 @@
* v4l2-fh.h
*
* V4L2 file handle. Store per file handle data for the V4L2
- * framework. Using file handles is optional for the drivers.
+ * framework. Using file handles is mandatory for the drivers.
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
@@ -57,6 +57,20 @@ struct v4l2_fh {
};
/**
+ * file_to_v4l2_fh - Return the v4l2_fh associated with a struct file
+ *
+ * @filp: pointer to &struct file
+ *
+ * This function should be used by drivers to retrieve the &struct v4l2_fh
+ * instance pointer stored in the file private_data instead of accessing the
+ * private_data field directly.
+ */
+static inline struct v4l2_fh *file_to_v4l2_fh(struct file *filp)
+{
+ return filp->private_data;
+}
+
+/**
* v4l2_fh_init - Initialise the file handle.
*
* @fh: pointer to &struct v4l2_fh
@@ -73,11 +87,14 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev);
* v4l2_fh_add - Add the fh to the list of file handles on a video_device.
*
* @fh: pointer to &struct v4l2_fh
+ * @filp: pointer to &struct file associated with @fh
+ *
+ * The function sets filp->private_data to point to @fh.
*
* .. note::
* The @fh file handle must be initialised first.
*/
-void v4l2_fh_add(struct v4l2_fh *fh);
+void v4l2_fh_add(struct v4l2_fh *fh, struct file *filp);
/**
* v4l2_fh_open - Ancillary routine that can be used as the open\(\) op
@@ -87,6 +104,9 @@ void v4l2_fh_add(struct v4l2_fh *fh);
*
* It allocates a v4l2_fh and inits and adds it to the &struct video_device
* associated with the file pointer.
+ *
+ * On error filp->private_data will be %NULL, otherwise it will point to
+ * the &struct v4l2_fh.
*/
int v4l2_fh_open(struct file *filp);
@@ -94,15 +114,15 @@ int v4l2_fh_open(struct file *filp);
* v4l2_fh_del - Remove file handle from the list of file handles.
*
* @fh: pointer to &struct v4l2_fh
+ * @filp: pointer to &struct file associated with @fh
*
- * On error filp->private_data will be %NULL, otherwise it will point to
- * the &struct v4l2_fh.
+ * The function resets filp->private_data to NULL.
*
* .. note::
* Must be called in v4l2_file_operations->release\(\) handler if the driver
* uses &struct v4l2_fh.
*/
-void v4l2_fh_del(struct v4l2_fh *fh);
+void v4l2_fh_del(struct v4l2_fh *fh, struct file *filp);
/**
* v4l2_fh_exit - Release resources related to a file handle.
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index bdbb7e542321..6f7a58350441 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -193,14 +193,8 @@ struct v4l2_fh;
* :ref:`VIDIOC_G_OUTPUT <vidioc_g_output>` ioctl
* @vidioc_s_output: pointer to the function that implements
* :ref:`VIDIOC_S_OUTPUT <vidioc_g_output>` ioctl
- * @vidioc_queryctrl: pointer to the function that implements
- * :ref:`VIDIOC_QUERYCTRL <vidioc_queryctrl>` ioctl
* @vidioc_query_ext_ctrl: pointer to the function that implements
* :ref:`VIDIOC_QUERY_EXT_CTRL <vidioc_queryctrl>` ioctl
- * @vidioc_g_ctrl: pointer to the function that implements
- * :ref:`VIDIOC_G_CTRL <vidioc_g_ctrl>` ioctl
- * @vidioc_s_ctrl: pointer to the function that implements
- * :ref:`VIDIOC_S_CTRL <vidioc_g_ctrl>` ioctl
* @vidioc_g_ext_ctrls: pointer to the function that implements
* :ref:`VIDIOC_G_EXT_CTRLS <vidioc_g_ext_ctrls>` ioctl
* @vidioc_s_ext_ctrls: pointer to the function that implements
@@ -299,144 +293,144 @@ struct v4l2_ioctl_ops {
/* ioctl callbacks */
/* VIDIOC_QUERYCAP handler */
- int (*vidioc_querycap)(struct file *file, void *fh,
+ int (*vidioc_querycap)(struct file *file, void *priv,
struct v4l2_capability *cap);
/* VIDIOC_ENUM_FMT handlers */
- int (*vidioc_enum_fmt_vid_cap)(struct file *file, void *fh,
+ int (*vidioc_enum_fmt_vid_cap)(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
- int (*vidioc_enum_fmt_vid_overlay)(struct file *file, void *fh,
+ int (*vidioc_enum_fmt_vid_overlay)(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
- int (*vidioc_enum_fmt_vid_out)(struct file *file, void *fh,
+ int (*vidioc_enum_fmt_vid_out)(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
- int (*vidioc_enum_fmt_sdr_cap)(struct file *file, void *fh,
+ int (*vidioc_enum_fmt_sdr_cap)(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
- int (*vidioc_enum_fmt_sdr_out)(struct file *file, void *fh,
+ int (*vidioc_enum_fmt_sdr_out)(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
- int (*vidioc_enum_fmt_meta_cap)(struct file *file, void *fh,
+ int (*vidioc_enum_fmt_meta_cap)(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
- int (*vidioc_enum_fmt_meta_out)(struct file *file, void *fh,
+ int (*vidioc_enum_fmt_meta_out)(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
/* VIDIOC_G_FMT handlers */
- int (*vidioc_g_fmt_vid_cap)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_vid_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_vid_overlay)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_vid_overlay)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_vid_out)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_vid_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_vid_out_overlay)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_vid_out_overlay)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_vbi_cap)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_vbi_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_vbi_out)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_vbi_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_sliced_vbi_cap)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_sliced_vbi_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_sliced_vbi_out)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_sliced_vbi_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_vid_cap_mplane)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_vid_cap_mplane)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_vid_out_mplane)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_vid_out_mplane)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_sdr_cap)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_sdr_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_sdr_out)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_sdr_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_meta_cap)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_meta_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_meta_out)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_meta_out)(struct file *file, void *priv,
struct v4l2_format *f);
/* VIDIOC_S_FMT handlers */
- int (*vidioc_s_fmt_vid_cap)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_vid_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_vid_overlay)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_vid_overlay)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_vid_out)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_vid_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_vid_out_overlay)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_vid_out_overlay)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_vbi_cap)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_vbi_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_vbi_out)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_vbi_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_sliced_vbi_cap)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_sliced_vbi_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_sliced_vbi_out)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_sliced_vbi_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_vid_cap_mplane)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_vid_cap_mplane)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_vid_out_mplane)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_vid_out_mplane)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_sdr_cap)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_sdr_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_sdr_out)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_sdr_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_meta_cap)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_meta_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_meta_out)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_meta_out)(struct file *file, void *priv,
struct v4l2_format *f);
/* VIDIOC_TRY_FMT handlers */
- int (*vidioc_try_fmt_vid_cap)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_vid_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_vid_overlay)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_vid_overlay)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_vid_out)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_vid_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_vid_out_overlay)(struct file *file, void *fh,
- struct v4l2_format *f);
- int (*vidioc_try_fmt_vbi_cap)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_vid_out_overlay)(struct file *file, void *priv,
+ struct v4l2_format *f);
+ int (*vidioc_try_fmt_vbi_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_vbi_out)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_vbi_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_sliced_vbi_cap)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_sliced_vbi_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_sliced_vbi_out)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_sliced_vbi_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_vid_cap_mplane)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_vid_cap_mplane)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_vid_out_mplane)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_vid_out_mplane)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_sdr_cap)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_sdr_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_sdr_out)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_sdr_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_meta_cap)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_meta_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_meta_out)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_meta_out)(struct file *file, void *priv,
struct v4l2_format *f);
/* Buffer handlers */
- int (*vidioc_reqbufs)(struct file *file, void *fh,
+ int (*vidioc_reqbufs)(struct file *file, void *priv,
struct v4l2_requestbuffers *b);
- int (*vidioc_querybuf)(struct file *file, void *fh,
+ int (*vidioc_querybuf)(struct file *file, void *priv,
struct v4l2_buffer *b);
- int (*vidioc_qbuf)(struct file *file, void *fh,
+ int (*vidioc_qbuf)(struct file *file, void *priv,
struct v4l2_buffer *b);
- int (*vidioc_expbuf)(struct file *file, void *fh,
+ int (*vidioc_expbuf)(struct file *file, void *priv,
struct v4l2_exportbuffer *e);
- int (*vidioc_dqbuf)(struct file *file, void *fh,
+ int (*vidioc_dqbuf)(struct file *file, void *priv,
struct v4l2_buffer *b);
- int (*vidioc_create_bufs)(struct file *file, void *fh,
+ int (*vidioc_create_bufs)(struct file *file, void *priv,
struct v4l2_create_buffers *b);
- int (*vidioc_prepare_buf)(struct file *file, void *fh,
+ int (*vidioc_prepare_buf)(struct file *file, void *priv,
struct v4l2_buffer *b);
- int (*vidioc_remove_bufs)(struct file *file, void *fh,
+ int (*vidioc_remove_bufs)(struct file *file, void *priv,
struct v4l2_remove_buffers *d);
- int (*vidioc_overlay)(struct file *file, void *fh, unsigned int i);
- int (*vidioc_g_fbuf)(struct file *file, void *fh,
+ int (*vidioc_overlay)(struct file *file, void *priv, unsigned int i);
+ int (*vidioc_g_fbuf)(struct file *file, void *priv,
struct v4l2_framebuffer *a);
- int (*vidioc_s_fbuf)(struct file *file, void *fh,
+ int (*vidioc_s_fbuf)(struct file *file, void *priv,
const struct v4l2_framebuffer *a);
/* Stream on/off */
- int (*vidioc_streamon)(struct file *file, void *fh,
+ int (*vidioc_streamon)(struct file *file, void *priv,
enum v4l2_buf_type i);
- int (*vidioc_streamoff)(struct file *file, void *fh,
+ int (*vidioc_streamoff)(struct file *file, void *priv,
enum v4l2_buf_type i);
/*
@@ -444,141 +438,135 @@ struct v4l2_ioctl_ops {
*
* Note: ENUMSTD is handled by videodev.c
*/
- int (*vidioc_g_std)(struct file *file, void *fh, v4l2_std_id *norm);
- int (*vidioc_s_std)(struct file *file, void *fh, v4l2_std_id norm);
- int (*vidioc_querystd)(struct file *file, void *fh, v4l2_std_id *a);
+ int (*vidioc_g_std)(struct file *file, void *priv, v4l2_std_id *norm);
+ int (*vidioc_s_std)(struct file *file, void *priv, v4l2_std_id norm);
+ int (*vidioc_querystd)(struct file *file, void *priv, v4l2_std_id *a);
/* Input handling */
- int (*vidioc_enum_input)(struct file *file, void *fh,
+ int (*vidioc_enum_input)(struct file *file, void *priv,
struct v4l2_input *inp);
- int (*vidioc_g_input)(struct file *file, void *fh, unsigned int *i);
- int (*vidioc_s_input)(struct file *file, void *fh, unsigned int i);
+ int (*vidioc_g_input)(struct file *file, void *priv, unsigned int *i);
+ int (*vidioc_s_input)(struct file *file, void *priv, unsigned int i);
/* Output handling */
- int (*vidioc_enum_output)(struct file *file, void *fh,
+ int (*vidioc_enum_output)(struct file *file, void *priv,
struct v4l2_output *a);
- int (*vidioc_g_output)(struct file *file, void *fh, unsigned int *i);
- int (*vidioc_s_output)(struct file *file, void *fh, unsigned int i);
+ int (*vidioc_g_output)(struct file *file, void *priv, unsigned int *i);
+ int (*vidioc_s_output)(struct file *file, void *priv, unsigned int i);
/* Control handling */
- int (*vidioc_queryctrl)(struct file *file, void *fh,
- struct v4l2_queryctrl *a);
- int (*vidioc_query_ext_ctrl)(struct file *file, void *fh,
+ int (*vidioc_query_ext_ctrl)(struct file *file, void *priv,
struct v4l2_query_ext_ctrl *a);
- int (*vidioc_g_ctrl)(struct file *file, void *fh,
- struct v4l2_control *a);
- int (*vidioc_s_ctrl)(struct file *file, void *fh,
- struct v4l2_control *a);
- int (*vidioc_g_ext_ctrls)(struct file *file, void *fh,
+ int (*vidioc_g_ext_ctrls)(struct file *file, void *priv,
struct v4l2_ext_controls *a);
- int (*vidioc_s_ext_ctrls)(struct file *file, void *fh,
+ int (*vidioc_s_ext_ctrls)(struct file *file, void *priv,
struct v4l2_ext_controls *a);
- int (*vidioc_try_ext_ctrls)(struct file *file, void *fh,
+ int (*vidioc_try_ext_ctrls)(struct file *file, void *priv,
struct v4l2_ext_controls *a);
- int (*vidioc_querymenu)(struct file *file, void *fh,
+ int (*vidioc_querymenu)(struct file *file, void *priv,
struct v4l2_querymenu *a);
/* Audio ioctls */
- int (*vidioc_enumaudio)(struct file *file, void *fh,
+ int (*vidioc_enumaudio)(struct file *file, void *priv,
struct v4l2_audio *a);
- int (*vidioc_g_audio)(struct file *file, void *fh,
+ int (*vidioc_g_audio)(struct file *file, void *priv,
struct v4l2_audio *a);
- int (*vidioc_s_audio)(struct file *file, void *fh,
+ int (*vidioc_s_audio)(struct file *file, void *priv,
const struct v4l2_audio *a);
/* Audio out ioctls */
- int (*vidioc_enumaudout)(struct file *file, void *fh,
+ int (*vidioc_enumaudout)(struct file *file, void *priv,
struct v4l2_audioout *a);
- int (*vidioc_g_audout)(struct file *file, void *fh,
+ int (*vidioc_g_audout)(struct file *file, void *priv,
struct v4l2_audioout *a);
- int (*vidioc_s_audout)(struct file *file, void *fh,
+ int (*vidioc_s_audout)(struct file *file, void *priv,
const struct v4l2_audioout *a);
- int (*vidioc_g_modulator)(struct file *file, void *fh,
+ int (*vidioc_g_modulator)(struct file *file, void *priv,
struct v4l2_modulator *a);
- int (*vidioc_s_modulator)(struct file *file, void *fh,
+ int (*vidioc_s_modulator)(struct file *file, void *priv,
const struct v4l2_modulator *a);
/* Crop ioctls */
- int (*vidioc_g_pixelaspect)(struct file *file, void *fh,
+ int (*vidioc_g_pixelaspect)(struct file *file, void *priv,
int buf_type, struct v4l2_fract *aspect);
- int (*vidioc_g_selection)(struct file *file, void *fh,
+ int (*vidioc_g_selection)(struct file *file, void *priv,
struct v4l2_selection *s);
- int (*vidioc_s_selection)(struct file *file, void *fh,
+ int (*vidioc_s_selection)(struct file *file, void *priv,
struct v4l2_selection *s);
/* Compression ioctls */
- int (*vidioc_g_jpegcomp)(struct file *file, void *fh,
+ int (*vidioc_g_jpegcomp)(struct file *file, void *priv,
struct v4l2_jpegcompression *a);
- int (*vidioc_s_jpegcomp)(struct file *file, void *fh,
+ int (*vidioc_s_jpegcomp)(struct file *file, void *priv,
const struct v4l2_jpegcompression *a);
- int (*vidioc_g_enc_index)(struct file *file, void *fh,
+ int (*vidioc_g_enc_index)(struct file *file, void *priv,
struct v4l2_enc_idx *a);
- int (*vidioc_encoder_cmd)(struct file *file, void *fh,
+ int (*vidioc_encoder_cmd)(struct file *file, void *priv,
struct v4l2_encoder_cmd *a);
- int (*vidioc_try_encoder_cmd)(struct file *file, void *fh,
+ int (*vidioc_try_encoder_cmd)(struct file *file, void *priv,
struct v4l2_encoder_cmd *a);
- int (*vidioc_decoder_cmd)(struct file *file, void *fh,
+ int (*vidioc_decoder_cmd)(struct file *file, void *priv,
struct v4l2_decoder_cmd *a);
- int (*vidioc_try_decoder_cmd)(struct file *file, void *fh,
+ int (*vidioc_try_decoder_cmd)(struct file *file, void *priv,
struct v4l2_decoder_cmd *a);
/* Stream type-dependent parameter ioctls */
- int (*vidioc_g_parm)(struct file *file, void *fh,
+ int (*vidioc_g_parm)(struct file *file, void *priv,
struct v4l2_streamparm *a);
- int (*vidioc_s_parm)(struct file *file, void *fh,
+ int (*vidioc_s_parm)(struct file *file, void *priv,
struct v4l2_streamparm *a);
/* Tuner ioctls */
- int (*vidioc_g_tuner)(struct file *file, void *fh,
+ int (*vidioc_g_tuner)(struct file *file, void *priv,
struct v4l2_tuner *a);
- int (*vidioc_s_tuner)(struct file *file, void *fh,
+ int (*vidioc_s_tuner)(struct file *file, void *priv,
const struct v4l2_tuner *a);
- int (*vidioc_g_frequency)(struct file *file, void *fh,
+ int (*vidioc_g_frequency)(struct file *file, void *priv,
struct v4l2_frequency *a);
- int (*vidioc_s_frequency)(struct file *file, void *fh,
+ int (*vidioc_s_frequency)(struct file *file, void *priv,
const struct v4l2_frequency *a);
- int (*vidioc_enum_freq_bands)(struct file *file, void *fh,
+ int (*vidioc_enum_freq_bands)(struct file *file, void *priv,
struct v4l2_frequency_band *band);
/* Sliced VBI cap */
- int (*vidioc_g_sliced_vbi_cap)(struct file *file, void *fh,
+ int (*vidioc_g_sliced_vbi_cap)(struct file *file, void *priv,
struct v4l2_sliced_vbi_cap *a);
/* Log status ioctl */
- int (*vidioc_log_status)(struct file *file, void *fh);
+ int (*vidioc_log_status)(struct file *file, void *priv);
- int (*vidioc_s_hw_freq_seek)(struct file *file, void *fh,
+ int (*vidioc_s_hw_freq_seek)(struct file *file, void *priv,
const struct v4l2_hw_freq_seek *a);
/* Debugging ioctls */
#ifdef CONFIG_VIDEO_ADV_DEBUG
- int (*vidioc_g_register)(struct file *file, void *fh,
+ int (*vidioc_g_register)(struct file *file, void *priv,
struct v4l2_dbg_register *reg);
- int (*vidioc_s_register)(struct file *file, void *fh,
+ int (*vidioc_s_register)(struct file *file, void *priv,
const struct v4l2_dbg_register *reg);
- int (*vidioc_g_chip_info)(struct file *file, void *fh,
+ int (*vidioc_g_chip_info)(struct file *file, void *priv,
struct v4l2_dbg_chip_info *chip);
#endif
- int (*vidioc_enum_framesizes)(struct file *file, void *fh,
+ int (*vidioc_enum_framesizes)(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize);
- int (*vidioc_enum_frameintervals)(struct file *file, void *fh,
+ int (*vidioc_enum_frameintervals)(struct file *file, void *priv,
struct v4l2_frmivalenum *fival);
/* DV Timings IOCTLs */
- int (*vidioc_s_dv_timings)(struct file *file, void *fh,
+ int (*vidioc_s_dv_timings)(struct file *file, void *priv,
struct v4l2_dv_timings *timings);
- int (*vidioc_g_dv_timings)(struct file *file, void *fh,
+ int (*vidioc_g_dv_timings)(struct file *file, void *priv,
struct v4l2_dv_timings *timings);
- int (*vidioc_query_dv_timings)(struct file *file, void *fh,
+ int (*vidioc_query_dv_timings)(struct file *file, void *priv,
struct v4l2_dv_timings *timings);
- int (*vidioc_enum_dv_timings)(struct file *file, void *fh,
+ int (*vidioc_enum_dv_timings)(struct file *file, void *priv,
struct v4l2_enum_dv_timings *timings);
- int (*vidioc_dv_timings_cap)(struct file *file, void *fh,
+ int (*vidioc_dv_timings_cap)(struct file *file, void *priv,
struct v4l2_dv_timings_cap *cap);
- int (*vidioc_g_edid)(struct file *file, void *fh,
+ int (*vidioc_g_edid)(struct file *file, void *priv,
struct v4l2_edid *edid);
- int (*vidioc_s_edid)(struct file *file, void *fh,
+ int (*vidioc_s_edid)(struct file *file, void *priv,
struct v4l2_edid *edid);
int (*vidioc_subscribe_event)(struct v4l2_fh *fh,
@@ -587,7 +575,7 @@ struct v4l2_ioctl_ops {
const struct v4l2_event_subscription *sub);
/* For other private ioctls */
- long (*vidioc_default)(struct file *file, void *fh,
+ long (*vidioc_default)(struct file *file, void *priv,
bool valid_prio, unsigned int cmd, void *arg);
};
@@ -691,6 +679,7 @@ long int v4l2_compat_ioctl32(struct file *file, unsigned int cmd,
#endif
unsigned int v4l2_compat_translate_cmd(unsigned int cmd);
+unsigned int v4l2_translate_cmd(unsigned int cmd);
int v4l2_compat_get_user(void __user *arg, void *parg, unsigned int cmd);
int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd);
int v4l2_compat_get_array_args(struct file *file, void *mbuf,
diff --git a/include/media/v4l2-isp.h b/include/media/v4l2-isp.h
new file mode 100644
index 000000000000..f3a6d0edcb24
--- /dev/null
+++ b/include/media/v4l2-isp.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Video4Linux2 generic ISP parameters and statistics support
+ *
+ * Copyright (C) 2025 Ideas On Board Oy
+ * Author: Jacopo Mondi <jacopo.mondi@ideasonboard.com>
+ */
+
+#ifndef _V4L2_ISP_H_
+#define _V4L2_ISP_H_
+
+#include <linux/media/v4l2-isp.h>
+
+struct device;
+struct vb2_buffer;
+
+/**
+ * v4l2_isp_params_buffer_size - Calculate size of v4l2_isp_params_buffer
+ * @max_params_size: The total size of the ISP configuration blocks
+ *
+ * Users of the v4l2 extensible parameters will have differing sized data arrays
+ * depending on their specific parameter buffers. Drivers and userspace will
+ * need to be able to calculate the appropriate size of the struct to
+ * accommodate all ISP configuration blocks provided by the platform.
+ * This macro provides a convenient tool for the calculation.
+ */
+#define v4l2_isp_params_buffer_size(max_params_size) \
+ (offsetof(struct v4l2_isp_params_buffer, data) + (max_params_size))
+
+/**
+ * v4l2_isp_params_validate_buffer_size - Validate a V4L2 ISP buffer sizes
+ * @dev: the driver's device pointer
+ * @vb: the videobuf2 buffer
+ * @max_size: the maximum allowed buffer size
+ *
+ * This function performs validation of the size of a V4L2 ISP parameters buffer
+ * before the driver can access the actual data buffer content.
+ *
+ * After the sizes validation, drivers should copy the buffer content to a
+ * kernel-only memory area to prevent userspace from modifying it,
+ * before completing validation using v4l2_isp_params_validate_buffer().
+ *
+ * The @vb buffer as received from the vb2 .buf_prepare() operation is checked
+ * against @max_size and it's validated to be large enough to accommodate at
+ * least one ISP configuration block.
+ */
+int v4l2_isp_params_validate_buffer_size(struct device *dev,
+ struct vb2_buffer *vb,
+ size_t max_size);
+
+/**
+ * struct v4l2_isp_params_block_type_info - V4L2 ISP per-block-type info
+ * @size: the block type expected size
+ *
+ * The v4l2_isp_params_block_type_info collects information of the ISP
+ * configuration block types for validation purposes. It currently only contains
+ * the expected block type size.
+ *
+ * Drivers shall prepare a list of block type info, indexed by block type, one
+ * for each supported ISP block type and correctly populate them with the
+ * expected block type size.
+ */
+struct v4l2_isp_params_block_type_info {
+ size_t size;
+};
+
+/**
+ * v4l2_isp_params_validate_buffer - Validate a V4L2 ISP parameters buffer
+ * @dev: the driver's device pointer
+ * @vb: the videobuf2 buffer
+ * @buffer: the V4L2 ISP parameters buffer
+ * @type_info: the array of per-block-type validation info
+ * @num_block_types: the number of block types in the type_info array
+ *
+ * This function completes the validation of a V4L2 ISP parameters buffer,
+ * verifying each configuration block correctness before the driver can use
+ * them to program the hardware.
+ *
+ * Drivers should use this function after having validated the correctness of
+ * the vb2 buffer sizes by using the v4l2_isp_params_validate_buffer_size()
+ * helper first. Once the buffer size has been validated, drivers should
+ * perform a copy of the user provided buffer into a kernel-only memory buffer
+ * to prevent userspace from modifying its content after it has been submitted
+ * to the driver, and then call this function to complete validation.
+ */
+int v4l2_isp_params_validate_buffer(struct device *dev, struct vb2_buffer *vb,
+ const struct v4l2_isp_params_buffer *buffer,
+ const struct v4l2_isp_params_block_type_info *type_info,
+ size_t num_block_types);
+
+#endif /* _V4L2_ISP_H_ */
diff --git a/include/media/v4l2-jpeg.h b/include/media/v4l2-jpeg.h
index 2dba843ce3bd..62dda1560275 100644
--- a/include/media/v4l2-jpeg.h
+++ b/include/media/v4l2-jpeg.h
@@ -14,6 +14,30 @@
#define V4L2_JPEG_MAX_COMPONENTS 4
#define V4L2_JPEG_MAX_TABLES 4
+/*
+ * Prefixes used to generate huffman table class and destination identifiers as
+ * described below:
+ *
+ * V4L2_JPEG_LUM_HT | V4L2_JPEG_DC_HT : Prefix for Luma DC coefficients
+ * huffman table
+ * V4L2_JPEG_LUM_HT | V4L2_JPEG_AC_HT : Prefix for Luma AC coefficients
+ * huffman table
+ * V4L2_JPEG_CHR_HT | V4L2_JPEG_DC_HT : Prefix for Chroma DC coefficients
+ * huffman table
+ * V4L2_JPEG_CHR_HT | V4L2_JPEG_AC_HT : Prefix for Chroma AC coefficients
+ * huffman table
+ */
+#define V4L2_JPEG_LUM_HT 0x00
+#define V4L2_JPEG_CHR_HT 0x01
+#define V4L2_JPEG_DC_HT 0x00
+#define V4L2_JPEG_AC_HT 0x10
+
+/* Length of reference huffman tables as provided in Table K.3 of ITU-T.81 */
+#define V4L2_JPEG_REF_HT_AC_LEN 178
+#define V4L2_JPEG_REF_HT_DC_LEN 28
+
+/* Array size for 8x8 block of samples or DCT coefficient */
+#define V4L2_JPEG_PIXELS_IN_BLOCK 64
/**
* struct v4l2_jpeg_reference - reference into the JPEG buffer
@@ -145,13 +169,12 @@ struct v4l2_jpeg_header {
int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out);
-int v4l2_jpeg_parse_frame_header(void *buf, size_t len,
- struct v4l2_jpeg_frame_header *frame_header);
-int v4l2_jpeg_parse_scan_header(void *buf, size_t len,
- struct v4l2_jpeg_scan_header *scan_header);
-int v4l2_jpeg_parse_quantization_tables(void *buf, size_t len, u8 precision,
- struct v4l2_jpeg_reference *q_tables);
-int v4l2_jpeg_parse_huffman_tables(void *buf, size_t len,
- struct v4l2_jpeg_reference *huffman_tables);
+extern const u8 v4l2_jpeg_zigzag_scan_index[V4L2_JPEG_PIXELS_IN_BLOCK];
+extern const u8 v4l2_jpeg_ref_table_luma_qt[V4L2_JPEG_PIXELS_IN_BLOCK];
+extern const u8 v4l2_jpeg_ref_table_chroma_qt[V4L2_JPEG_PIXELS_IN_BLOCK];
+extern const u8 v4l2_jpeg_ref_table_luma_dc_ht[V4L2_JPEG_REF_HT_DC_LEN];
+extern const u8 v4l2_jpeg_ref_table_luma_ac_ht[V4L2_JPEG_REF_HT_AC_LEN];
+extern const u8 v4l2_jpeg_ref_table_chroma_dc_ht[V4L2_JPEG_REF_HT_DC_LEN];
+extern const u8 v4l2_jpeg_ref_table_chroma_ac_ht[V4L2_JPEG_REF_HT_AC_LEN];
#endif
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
index ed0a44b6eada..1837c9fd78cf 100644
--- a/include/media/v4l2-mc.h
+++ b/include/media/v4l2-mc.h
@@ -178,6 +178,9 @@ void v4l2_pipeline_pm_put(struct media_entity *entity);
* @flags: New link flags that will be applied
* @notification: The link's state change notification type (MEDIA_DEV_NOTIFY_*)
*
+ * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM
+ * ON SUB-DEVICE DRIVERS INSTEAD.
+ *
* React to link management on powered pipelines by updating the use count of
* all entities in the source and sink sides of the link. Entities are powered
* on or off accordingly. The use of this function should be paired
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 5bce6e423e94..24c738cd7894 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -74,6 +74,24 @@
#define V4L2_MBUS_CSI2_MAX_DATA_LANES 8
/**
+ * enum v4l2_mbus_csi2_cphy_line_orders_type - CSI-2 C-PHY line order
+ * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC: C-PHY line order ABC (default)
+ * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ACB: C-PHY line order ACB
+ * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BAC: C-PHY line order BAC
+ * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BCA: C-PHY line order BCA
+ * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CAB: C-PHY line order CAB
+ * @V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CBA: C-PHY line order CBA
+ */
+enum v4l2_mbus_csi2_cphy_line_orders_type {
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ABC,
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_ACB,
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BAC,
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_BCA,
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CAB,
+ V4L2_MBUS_CSI2_CPHY_LINE_ORDER_CBA,
+};
+
+/**
* struct v4l2_mbus_config_mipi_csi2 - MIPI CSI-2 data bus configuration
* @flags: media bus (V4L2_MBUS_*) flags
* @data_lanes: an array of physical data lane indexes
@@ -81,6 +99,8 @@
* @num_data_lanes: number of data lanes
* @lane_polarities: polarity of the lanes. The order is the same of
* the physical lanes.
+ * @line_orders: line order of the data lanes. The order is the same of the
+ * physical lanes.
*/
struct v4l2_mbus_config_mipi_csi2 {
unsigned int flags;
@@ -88,6 +108,7 @@ struct v4l2_mbus_config_mipi_csi2 {
unsigned char clock_lane;
unsigned char num_data_lanes;
bool lane_polarities[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES];
+ enum v4l2_mbus_csi2_cphy_line_orders_type line_orders[V4L2_MBUS_CSI2_MAX_DATA_LANES];
};
/**
@@ -148,6 +169,7 @@ enum v4l2_mbus_type {
/**
* struct v4l2_mbus_config - media bus configuration
* @type: interface type
+ * @link_freq: The link frequency. See also V4L2_CID_LINK_FREQ control.
* @bus: bus configuration data structure
* @bus.parallel: embedded &struct v4l2_mbus_config_parallel.
* Used if the bus is parallel or BT.656.
@@ -162,6 +184,7 @@ enum v4l2_mbus_type {
*/
struct v4l2_mbus_config {
enum v4l2_mbus_type type;
+ u64 link_freq;
union {
struct v4l2_mbus_config_parallel parallel;
struct v4l2_mbus_config_mipi_csi1 mipi_csi1;
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 0af330cf91c3..bf6a09a04dcf 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -153,6 +153,9 @@ void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
*
* @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
* @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
+ *
+ * This function returns the capture queue when @type is a capture type, and the
+ * output queue otherwise. It never returns a NULL pointer.
*/
struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type);
@@ -192,8 +195,7 @@ void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
* other instances to take control of the device.
*
* This function has to be called only after &v4l2_m2m_ops->device_run
- * callback has been called on the driver. To prevent recursion, it should
- * not be called directly from the &v4l2_m2m_ops->device_run callback though.
+ * callback has been called on the driver.
*/
void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx);
@@ -843,19 +845,13 @@ v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
*
* @out_vb: the output buffer that is the source of the metadata.
* @cap_vb: the capture buffer that will receive the metadata.
- * @copy_frame_flags: copy the KEY/B/PFRAME flags as well.
*
* This helper function copies the timestamp, timecode (if the TIMECODE
- * buffer flag was set), field and the TIMECODE, KEYFRAME, BFRAME, PFRAME
- * and TSTAMP_SRC_MASK flags from @out_vb to @cap_vb.
- *
- * If @copy_frame_flags is false, then the KEYFRAME, BFRAME and PFRAME
- * flags are not copied. This is typically needed for encoders that
- * set this bits explicitly.
+ * buffer flag was set), field, and the TIMECODE and TSTAMP_SRC_MASK flags from
+ * @out_vb to @cap_vb.
*/
void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
- struct vb2_v4l2_buffer *cap_vb,
- bool copy_frame_flags);
+ struct vb2_v4l2_buffer *cap_vb);
/* v4l2 request helper */
@@ -864,34 +860,34 @@ void v4l2_m2m_request_queue(struct media_request *req);
/* v4l2 ioctl helpers */
int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *rb);
-int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh,
- struct v4l2_create_buffers *create);
+ struct v4l2_requestbuffers *rb);
+int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create);
int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv,
struct v4l2_remove_buffers *d);
-int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh,
- struct v4l2_buffer *buf);
-int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh,
- struct v4l2_exportbuffer *eb);
-int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh,
- struct v4l2_buffer *buf);
-int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh,
- struct v4l2_buffer *buf);
-int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh,
+int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf);
+int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb);
+int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf);
+int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf);
+int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
struct v4l2_buffer *buf);
-int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
- enum v4l2_buf_type type);
-int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
- enum v4l2_buf_type type);
-int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *fh,
+int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type);
+int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type);
+int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *ec);
-int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *fh,
+int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc);
-int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
+int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *ec);
-int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
+int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc);
-int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
+int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc);
int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc);
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index e30c463d90e5..a37d9a847196 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -2,7 +2,7 @@
/*
* V4L2 sub-device support header.
*
- * Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef _V4L2_SUBDEV_H
@@ -36,6 +36,7 @@ struct v4l2_event_subscription;
struct v4l2_fh;
struct v4l2_subdev;
struct v4l2_subdev_fh;
+struct v4l2_subdev_stream_config;
struct tuner_setup;
struct v4l2_mbus_frame_desc;
struct led_classdev;
@@ -450,7 +451,15 @@ enum v4l2_subdev_pre_streamon_flags {
* already started or stopped subdev. Also see call_s_stream wrapper in
* v4l2-subdev.c.
*
- * @g_pixelaspect: callback to return the pixelaspect ratio.
+ * This callback is DEPRECATED. New drivers should instead implement
+ * &v4l2_subdev_pad_ops.enable_streams and
+ * &v4l2_subdev_pad_ops.disable_streams operations, and use
+ * v4l2_subdev_s_stream_helper for the &v4l2_subdev_video_ops.s_stream
+ * operation to support legacy users.
+ *
+ * Drivers should also not call the .s_stream() subdev operation directly,
+ * but use the v4l2_subdev_enable_streams() and
+ * v4l2_subdev_disable_streams() helpers.
*
* @s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev
* can adjust @size to a lower value and must not write more data to the
@@ -481,7 +490,6 @@ struct v4l2_subdev_video_ops {
int (*g_tvnorms_output)(struct v4l2_subdev *sd, v4l2_std_id *std);
int (*g_input_status)(struct v4l2_subdev *sd, u32 *status);
int (*s_stream)(struct v4l2_subdev *sd, int enable);
- int (*g_pixelaspect)(struct v4l2_subdev *sd, struct v4l2_fract *aspect);
int (*s_rx_buffer)(struct v4l2_subdev *sd, void *buf,
unsigned int *size);
int (*pre_streamon)(struct v4l2_subdev *sd, u32 flags);
@@ -677,30 +685,6 @@ struct v4l2_subdev_pad_config {
};
/**
- * struct v4l2_subdev_stream_config - Used for storing stream configuration.
- *
- * @pad: pad number
- * @stream: stream number
- * @enabled: has the stream been enabled with v4l2_subdev_enable_stream()
- * @fmt: &struct v4l2_mbus_framefmt
- * @crop: &struct v4l2_rect to be used for crop
- * @compose: &struct v4l2_rect to be used for compose
- * @interval: frame interval
- *
- * This structure stores configuration for a stream.
- */
-struct v4l2_subdev_stream_config {
- u32 pad;
- u32 stream;
- bool enabled;
-
- struct v4l2_mbus_framefmt fmt;
- struct v4l2_rect crop;
- struct v4l2_rect compose;
- struct v4l2_fract interval;
-};
-
-/**
* struct v4l2_subdev_stream_configs - A collection of stream configs.
*
* @num_configs: number of entries in @config.
@@ -812,7 +796,9 @@ struct v4l2_subdev_state {
* possible configuration from the remote end, likely calling
* this operation as close as possible to stream on time. The
* operation shall fail if the pad index it has been called on
- * is not valid or in case of unrecoverable failures.
+ * is not valid or in case of unrecoverable failures. The
+ * config argument has been memset to 0 just before calling
+ * the op.
*
* @set_routing: Enable or disable data connection routes described in the
* subdevice routing table. Subdevs that implement this operation
@@ -824,11 +810,19 @@ struct v4l2_subdev_state {
* v4l2_subdev_init_finalize() at initialization time). Do not call
* directly, use v4l2_subdev_enable_streams() instead.
*
+ * Drivers that support only a single stream without setting the
+ * V4L2_SUBDEV_CAP_STREAMS sub-device capability flag can ignore the mask
+ * argument.
+ *
* @disable_streams: Disable the streams defined in streams_mask on the given
* source pad. Subdevs that implement this operation must use the active
* state management provided by the subdev core (enabled through a call to
* v4l2_subdev_init_finalize() at initialization time). Do not call
* directly, use v4l2_subdev_disable_streams() instead.
+ *
+ * Drivers that support only a single stream without setting the
+ * V4L2_SUBDEV_CAP_STREAMS sub-device capability flag can ignore the mask
+ * argument.
*/
struct v4l2_subdev_pad_ops {
int (*enum_mbus_code)(struct v4l2_subdev *sd,
@@ -1041,10 +1035,11 @@ struct v4l2_subdev_platform_data {
* @active_state: Active state for the subdev (NULL for subdevs tracking the
* state internally). Initialized by calling
* v4l2_subdev_init_finalize().
- * @enabled_streams: Bitmask of enabled streams used by
- * v4l2_subdev_enable_streams() and
- * v4l2_subdev_disable_streams() helper functions for fallback
- * cases.
+ * @enabled_pads: Bitmask of enabled pads used by v4l2_subdev_enable_streams()
+ * and v4l2_subdev_disable_streams() helper functions for
+ * fallback cases.
+ * @s_stream_enabled: Tracks whether streaming has been enabled with s_stream.
+ * This is only for call_s_stream() internal use.
*
* Each instance of a subdev driver should create this struct, either
* stand-alone or embedded in a larger struct.
@@ -1092,7 +1087,8 @@ struct v4l2_subdev {
* doesn't support it.
*/
struct v4l2_subdev_state *active_state;
- u64 enabled_streams;
+ u64 enabled_pads;
+ bool s_stream_enabled;
};
@@ -1107,7 +1103,7 @@ struct v4l2_subdev {
typeof(ent) __me_sd_ent = (ent); \
\
__me_sd_ent ? \
- container_of(__me_sd_ent, struct v4l2_subdev, entity) : \
+ container_of_const(__me_sd_ent, struct v4l2_subdev, entity) : \
NULL; \
})
@@ -1239,6 +1235,12 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
* calls v4l2_subdev_link_validate_default() to ensure that
* width, height and the media bus pixel code are equal on both
* source and sink of the link.
+ *
+ * The function can be used as a drop-in &media_entity_ops.link_validate
+ * implementation for v4l2_subdev instances. It supports all links between
+ * subdevs, as well as links between subdevs and video devices, provided that
+ * the video devices also implement their &media_entity_ops.link_validate
+ * operation.
*/
int v4l2_subdev_link_validate(struct media_link *link);
@@ -1326,6 +1328,16 @@ void v4l2_subdev_cleanup(struct v4l2_subdev *sd);
#define __v4l2_subdev_state_gen_call(NAME, _1, ARG, ...) \
__v4l2_subdev_state_get_ ## NAME ## ARG
+/*
+ * A macro to constify the return value of the state accessors when the state
+ * parameter is const.
+ */
+#define __v4l2_subdev_state_constify_ret(state, value) \
+ _Generic(state, \
+ const struct v4l2_subdev_state *: (const typeof(*(value)) *)(value), \
+ struct v4l2_subdev_state *: (value) \
+ )
+
/**
* v4l2_subdev_state_get_format() - Get pointer to a stream format
* @state: subdevice state
@@ -1340,16 +1352,21 @@ void v4l2_subdev_cleanup(struct v4l2_subdev *sd);
*/
/*
* Wrap v4l2_subdev_state_get_format(), allowing the function to be called with
- * two or three arguments. The purpose of the __v4l2_subdev_state_get_format()
- * macro below is to come up with the name of the function or macro to call,
- * using the last two arguments (_stream and _pad). The selected function or
- * macro is then called using the arguments specified by the caller. A similar
- * arrangement is used for v4l2_subdev_state_crop() and
- * v4l2_subdev_state_compose() below.
- */
-#define v4l2_subdev_state_get_format(state, pad, ...) \
- __v4l2_subdev_state_gen_call(format, ##__VA_ARGS__, , _pad) \
- (state, pad, ##__VA_ARGS__)
+ * two or three arguments. The purpose of the __v4l2_subdev_state_gen_call()
+ * macro is to come up with the name of the function or macro to call, using
+ * the last two arguments (_stream and _pad). The selected function or macro is
+ * then called using the arguments specified by the caller. The
+ * __v4l2_subdev_state_constify_ret() macro constifies the returned pointer
+ * when the state is const, allowing the state accessors to guarantee
+ * const-correctness in all cases.
+ *
+ * A similar arrangement is used for v4l2_subdev_state_crop(),
+ * v4l2_subdev_state_compose() and v4l2_subdev_state_get_interval() below.
+ */
+#define v4l2_subdev_state_get_format(state, pad, ...) \
+ __v4l2_subdev_state_constify_ret(state, \
+ __v4l2_subdev_state_gen_call(format, ##__VA_ARGS__, , _pad) \
+ ((struct v4l2_subdev_state *)state, pad, ##__VA_ARGS__))
#define __v4l2_subdev_state_get_format_pad(state, pad) \
__v4l2_subdev_state_get_format(state, pad, 0)
struct v4l2_mbus_framefmt *
@@ -1368,9 +1385,10 @@ __v4l2_subdev_state_get_format(struct v4l2_subdev_state *state,
* For stream-unaware drivers the crop rectangle for the corresponding pad is
* returned. If the pad does not exist, NULL is returned.
*/
-#define v4l2_subdev_state_get_crop(state, pad, ...) \
- __v4l2_subdev_state_gen_call(crop, ##__VA_ARGS__, , _pad) \
- (state, pad, ##__VA_ARGS__)
+#define v4l2_subdev_state_get_crop(state, pad, ...) \
+ __v4l2_subdev_state_constify_ret(state, \
+ __v4l2_subdev_state_gen_call(crop, ##__VA_ARGS__, , _pad) \
+ ((struct v4l2_subdev_state *)state, pad, ##__VA_ARGS__))
#define __v4l2_subdev_state_get_crop_pad(state, pad) \
__v4l2_subdev_state_get_crop(state, pad, 0)
struct v4l2_rect *
@@ -1389,9 +1407,10 @@ __v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad,
* For stream-unaware drivers the compose rectangle for the corresponding pad is
* returned. If the pad does not exist, NULL is returned.
*/
-#define v4l2_subdev_state_get_compose(state, pad, ...) \
- __v4l2_subdev_state_gen_call(compose, ##__VA_ARGS__, , _pad) \
- (state, pad, ##__VA_ARGS__)
+#define v4l2_subdev_state_get_compose(state, pad, ...) \
+ __v4l2_subdev_state_constify_ret(state, \
+ __v4l2_subdev_state_gen_call(compose, ##__VA_ARGS__, , _pad) \
+ ((struct v4l2_subdev_state *)state, pad, ##__VA_ARGS__))
#define __v4l2_subdev_state_get_compose_pad(state, pad) \
__v4l2_subdev_state_get_compose(state, pad, 0)
struct v4l2_rect *
@@ -1410,9 +1429,10 @@ __v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state,
* For stream-unaware drivers the frame interval for the corresponding pad is
* returned. If the pad does not exist, NULL is returned.
*/
-#define v4l2_subdev_state_get_interval(state, pad, ...) \
- __v4l2_subdev_state_gen_call(interval, ##__VA_ARGS__, , _pad) \
- (state, pad, ##__VA_ARGS__)
+#define v4l2_subdev_state_get_interval(state, pad, ...) \
+ __v4l2_subdev_state_constify_ret(state, \
+ __v4l2_subdev_state_gen_call(interval, ##__VA_ARGS__, , _pad) \
+ ((struct v4l2_subdev_state *)state, pad, ##__VA_ARGS__))
#define __v4l2_subdev_state_get_interval_pad(state, pad) \
__v4l2_subdev_state_get_interval(state, pad, 0)
struct v4l2_fract *
@@ -1641,6 +1661,8 @@ int v4l2_subdev_routing_validate(struct v4l2_subdev *sd,
* function implements a best-effort compatibility by calling the .s_stream()
* operation, limited to subdevs that have a single source pad.
*
+ * Drivers that are not stream-aware shall set @streams_mask to BIT_ULL(0).
+ *
* Return:
* * 0: Success
* * -EALREADY: One of the streams in streams_mask is already enabled
@@ -1671,6 +1693,8 @@ int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
* function implements a best-effort compatibility by calling the .s_stream()
* operation, limited to subdevs that have a single source pad.
*
+ * Drivers that are not stream-aware shall set @streams_mask to BIT_ULL(0).
+ *
* Return:
* * 0: Success
* * -EALREADY: One of the streams in streams_mask is not enabled
@@ -1915,19 +1939,23 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers;
*
* Note: only legacy non-MC drivers may need this macro.
*/
-#define v4l2_subdev_call_state_try(sd, o, f, args...) \
- ({ \
- int __result; \
- static struct lock_class_key __key; \
- const char *name = KBUILD_BASENAME \
- ":" __stringify(__LINE__) ":state->lock"; \
- struct v4l2_subdev_state *state = \
- __v4l2_subdev_state_alloc(sd, name, &__key); \
- v4l2_subdev_lock_state(state); \
- __result = v4l2_subdev_call(sd, o, f, state, ##args); \
- v4l2_subdev_unlock_state(state); \
- __v4l2_subdev_state_free(state); \
- __result; \
+#define v4l2_subdev_call_state_try(sd, o, f, args...) \
+ ({ \
+ int __result; \
+ static struct lock_class_key __key; \
+ const char *name = KBUILD_BASENAME \
+ ":" __stringify(__LINE__) ":state->lock"; \
+ struct v4l2_subdev_state *state = \
+ __v4l2_subdev_state_alloc(sd, name, &__key); \
+ if (IS_ERR(state)) { \
+ __result = PTR_ERR(state); \
+ } else { \
+ v4l2_subdev_lock_state(state); \
+ __result = v4l2_subdev_call(sd, o, f, state, ##args); \
+ v4l2_subdev_unlock_state(state); \
+ __v4l2_subdev_state_free(state); \
+ } \
+ __result; \
})
/**
@@ -1954,4 +1982,17 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers;
void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
const struct v4l2_event *ev);
+/**
+ * v4l2_subdev_is_streaming() - Returns if the subdevice is streaming
+ * @sd: The subdevice
+ *
+ * v4l2_subdev_is_streaming() tells if the subdevice is currently streaming.
+ * "Streaming" here means whether .s_stream() or .enable_streams() has been
+ * successfully called, and the streaming has not yet been disabled.
+ *
+ * If the subdevice implements .enable_streams() this function must be called
+ * while holding the active state lock.
+ */
+bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd);
+
#endif /* _V4L2_SUBDEV_H */
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 955237ac503d..9b02aeba4108 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -154,6 +154,8 @@ struct vb2_mem_ops {
* @mem_priv: private data with this plane.
* @dbuf: dma_buf - shared buffer object.
* @dbuf_mapped: flag to show whether dbuf is mapped or not
+ * @dbuf_duplicated: boolean to show whether dbuf is duplicated with a
+ * previous plane of the buffer.
* @bytesused: number of bytes occupied by data in the plane (payload).
* @length: size of this plane (NOT the payload) in bytes. The maximum
* valid size is MAX_UINT - PAGE_SIZE.
@@ -179,6 +181,7 @@ struct vb2_plane {
void *mem_priv;
struct dma_buf *dbuf;
unsigned int dbuf_mapped;
+ bool dbuf_duplicated;
unsigned int bytesused;
unsigned int length;
unsigned int min_length;
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index 48f4a5023d81..d9b91ff02761 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -14,6 +14,11 @@
#include <linux/videodev2.h>
struct device;
+struct vsp1_dl_list;
+
+/* -----------------------------------------------------------------------------
+ * VSP1 DU interface
+ */
int vsp1_du_init(struct device *dev);
@@ -52,6 +57,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index,
* @alpha: alpha value (0: fully transparent, 255: fully opaque)
* @zpos: Z position of the plane (from 0 to number of planes minus 1)
* @premult: true for premultiplied alpha
+ * @color_encoding: color encoding (valid for YUV formats only)
+ * @color_range: color range (valid for YUV formats only)
*/
struct vsp1_du_atomic_config {
u32 pixelformat;
@@ -62,6 +69,8 @@ struct vsp1_du_atomic_config {
unsigned int alpha;
unsigned int zpos;
bool premult;
+ enum v4l2_ycbcr_encoding color_encoding;
+ enum v4l2_quantization color_range;
};
/**
@@ -117,4 +126,88 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index,
int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt);
void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt);
+/* -----------------------------------------------------------------------------
+ * VSP1 ISP interface
+ */
+
+/**
+ * struct vsp1_isp_buffer_desc - Describe a buffer allocated by VSPX
+ * @size: Byte size of the buffer allocated by VSPX
+ * @cpu_addr: CPU-mapped address of a buffer allocated by VSPX
+ * @dma_addr: bus address of a buffer allocated by VSPX
+ */
+struct vsp1_isp_buffer_desc {
+ size_t size;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct vsp1_isp_job_desc - Describe a VSPX buffer transfer request
+ * @config: ConfigDMA buffer descriptor
+ * @config.pairs: number of reg-value pairs in the ConfigDMA buffer
+ * @config.mem: bus address of the ConfigDMA buffer
+ * @img: RAW image buffer descriptor
+ * @img.fmt: RAW image format
+ * @img.mem: bus address of the RAW image buffer
+ * @dl: pointer to the display list populated by the VSPX driver in the
+ * vsp1_isp_job_prepare() function
+ *
+ * Describe a transfer request for the VSPX to perform on behalf of the ISP.
+ * The job descriptor contains an optional ConfigDMA buffer and one RAW image
+ * buffer. Set config.pairs to 0 if no ConfigDMA buffer should be transferred.
+ * The minimum number of config.pairs that can be written using ConfigDMA is 17.
+ * A number of pairs < 16 corrupts the output image. A number of pairs == 16
+ * freezes the VSPX operation. If the ISP driver has to write less than 17 pairs
+ * it shall pad the buffer with writes directed to registers that have no effect
+ * or avoid using ConfigDMA at all for such small write sequences.
+ *
+ * The ISP driver shall pass an instance this type to the vsp1_isp_job_prepare()
+ * function that will populate the display list pointer @dl using the @config
+ * and @img descriptors. When the job has to be run on the VSPX, the descriptor
+ * shall be passed to vsp1_isp_job_run() which consumes the display list.
+ *
+ * Job descriptors not yet run shall be released with a call to
+ * vsp1_isp_job_release() when stopping the streaming in order to properly
+ * release the resources acquired by vsp1_isp_job_prepare().
+ */
+struct vsp1_isp_job_desc {
+ struct {
+ unsigned int pairs;
+ dma_addr_t mem;
+ } config;
+ struct {
+ struct v4l2_pix_format_mplane fmt;
+ dma_addr_t mem;
+ } img;
+ struct vsp1_dl_list *dl;
+};
+
+/**
+ * struct vsp1_vspx_frame_end - VSPX frame end callback data
+ * @vspx_frame_end: Frame end callback. Called after a transfer job has been
+ * completed. If the job includes both a ConfigDMA and a
+ * RAW image, the callback is called after both have been
+ * transferred
+ * @frame_end_data: Frame end callback data, passed to vspx_frame_end
+ */
+struct vsp1_vspx_frame_end {
+ void (*vspx_frame_end)(void *data);
+ void *frame_end_data;
+};
+
+int vsp1_isp_init(struct device *dev);
+struct device *vsp1_isp_get_bus_master(struct device *dev);
+int vsp1_isp_alloc_buffer(struct device *dev, size_t size,
+ struct vsp1_isp_buffer_desc *buffer_desc);
+void vsp1_isp_free_buffer(struct device *dev,
+ struct vsp1_isp_buffer_desc *buffer_desc);
+int vsp1_isp_start_streaming(struct device *dev,
+ struct vsp1_vspx_frame_end *frame_end);
+void vsp1_isp_stop_streaming(struct device *dev);
+int vsp1_isp_job_prepare(struct device *dev,
+ struct vsp1_isp_job_desc *job);
+int vsp1_isp_job_run(struct device *dev, struct vsp1_isp_job_desc *job);
+void vsp1_isp_job_release(struct device *dev, struct vsp1_isp_job_desc *job);
+
#endif /* __MEDIA_VSP1_H__ */
diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h
index b8fa30fd6b50..53663c4e5ae3 100644
--- a/include/memory/renesas-rpc-if.h
+++ b/include/memory/renesas-rpc-if.h
@@ -61,12 +61,14 @@ enum rpcif_type {
RPCIF_RCAR_GEN3,
RPCIF_RCAR_GEN4,
RPCIF_RZ_G2L,
+ XSPI_RZ_G3E,
};
struct rpcif {
struct device *dev;
void __iomem *dirmap;
size_t size;
+ bool xspi;
};
int rpcif_sw_init(struct rpcif *rpc, struct device *dev);
@@ -75,5 +77,7 @@ void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs,
size_t *len);
int rpcif_manual_xfer(struct device *dev);
ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf);
+ssize_t xspi_dirmap_write(struct device *dev, u64 offs, size_t len,
+ const void *buf);
#endif // __RENESAS_RPC_IF_H
diff --git a/include/misc/cxl-base.h b/include/misc/cxl-base.h
deleted file mode 100644
index 2251da7f32d9..000000000000
--- a/include/misc/cxl-base.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#ifndef _MISC_CXL_BASE_H
-#define _MISC_CXL_BASE_H
-
-#ifdef CONFIG_CXL_BASE
-
-#define CXL_IRQ_RANGES 4
-
-struct cxl_irq_ranges {
- irq_hw_number_t offset[CXL_IRQ_RANGES];
- irq_hw_number_t range[CXL_IRQ_RANGES];
-};
-
-extern atomic_t cxl_use_count;
-
-static inline bool cxl_ctx_in_use(void)
-{
- return (atomic_read(&cxl_use_count) != 0);
-}
-
-static inline void cxl_ctx_get(void)
-{
- atomic_inc(&cxl_use_count);
-}
-
-static inline void cxl_ctx_put(void)
-{
- atomic_dec(&cxl_use_count);
-}
-
-struct cxl_afu *cxl_afu_get(struct cxl_afu *afu);
-void cxl_afu_put(struct cxl_afu *afu);
-void cxl_slbia(struct mm_struct *mm);
-
-#else /* CONFIG_CXL_BASE */
-
-static inline bool cxl_ctx_in_use(void) { return false; }
-static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu) { return NULL; }
-static inline void cxl_afu_put(struct cxl_afu *afu) {}
-static inline void cxl_slbia(struct mm_struct *mm) {}
-
-#endif /* CONFIG_CXL_BASE */
-
-#endif
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
deleted file mode 100644
index d8044299d654..000000000000
--- a/include/misc/cxl.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2015 IBM Corp.
- */
-
-#ifndef _MISC_CXL_H
-#define _MISC_CXL_H
-
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/interrupt.h>
-#include <uapi/misc/cxl.h>
-
-/*
- * This documents the in kernel API for driver to use CXL. It allows kernel
- * drivers to bind to AFUs using an AFU configuration record exposed as a PCI
- * configuration record.
- *
- * This API enables control over AFU and contexts which can't be part of the
- * generic PCI API. This API is agnostic to the actual AFU.
- */
-
-/* Get the AFU associated with a pci_dev */
-struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev);
-
-/* Get the AFU conf record number associated with a pci_dev */
-unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev);
-
-
-/*
- * Context lifetime overview:
- *
- * An AFU context may be inited and then started and stopped multiple times
- * before it's released. ie.
- * - cxl_dev_context_init()
- * - cxl_start_context()
- * - cxl_stop_context()
- * - cxl_start_context()
- * - cxl_stop_context()
- * ...repeat...
- * - cxl_release_context()
- * Once released, a context can't be started again.
- *
- * One context is inited by the cxl driver for every pci_dev. This is to be
- * used as a default kernel context. cxl_get_context() will get this
- * context. This context will be released by PCI hot unplug, so doesn't need to
- * be released explicitly by drivers.
- *
- * Additional kernel contexts may be inited using cxl_dev_context_init().
- * These must be released using cxl_context_detach().
- *
- * Once a context has been inited, IRQs may be configured. Firstly these IRQs
- * must be allocated (cxl_allocate_afu_irqs()), then individually mapped to
- * specific handlers (cxl_map_afu_irq()).
- *
- * These IRQs can be unmapped (cxl_unmap_afu_irq()) and finally released
- * (cxl_free_afu_irqs()).
- *
- * The AFU can be reset (cxl_afu_reset()). This will cause the PSL/AFU
- * hardware to lose track of all contexts. It's upto the caller of
- * cxl_afu_reset() to restart these contexts.
- */
-
-/*
- * On pci_enabled_device(), the cxl driver will init a single cxl context for
- * use by the driver. It doesn't start this context (as that will likely
- * generate DMA traffic for most AFUs).
- *
- * This gets the default context associated with this pci_dev. This context
- * doesn't need to be released as this will be done by the PCI subsystem on hot
- * unplug.
- */
-struct cxl_context *cxl_get_context(struct pci_dev *dev);
-/*
- * Allocate and initalise a context associated with a AFU PCI device. This
- * doesn't start the context in the AFU.
- */
-struct cxl_context *cxl_dev_context_init(struct pci_dev *dev);
-/*
- * Release and free a context. Context should be stopped before calling.
- */
-int cxl_release_context(struct cxl_context *ctx);
-
-/*
- * Set and get private data associated with a context. Allows drivers to have a
- * back pointer to some useful structure.
- */
-int cxl_set_priv(struct cxl_context *ctx, void *priv);
-void *cxl_get_priv(struct cxl_context *ctx);
-
-/*
- * Allocate AFU interrupts for this context. num=0 will allocate the default
- * for this AFU as given in the AFU descriptor. This number doesn't include the
- * interrupt 0 (CAIA defines AFU IRQ 0 for page faults). Each interrupt to be
- * used must map a handler with cxl_map_afu_irq.
- */
-int cxl_allocate_afu_irqs(struct cxl_context *cxl, int num);
-/* Free allocated interrupts */
-void cxl_free_afu_irqs(struct cxl_context *cxl);
-
-/*
- * Map a handler for an AFU interrupt associated with a particular context. AFU
- * IRQS numbers start from 1 (CAIA defines AFU IRQ 0 for page faults). cookie
- * is private data is that will be provided to the interrupt handler.
- */
-int cxl_map_afu_irq(struct cxl_context *cxl, int num,
- irq_handler_t handler, void *cookie, char *name);
-/* unmap mapped IRQ handlers */
-void cxl_unmap_afu_irq(struct cxl_context *cxl, int num, void *cookie);
-
-/*
- * Start work on the AFU. This starts an cxl context and associates it with a
- * task. task == NULL will make it a kernel context.
- */
-int cxl_start_context(struct cxl_context *ctx, u64 wed,
- struct task_struct *task);
-/*
- * Stop a context and remove it from the PSL
- */
-int cxl_stop_context(struct cxl_context *ctx);
-
-/* Reset the AFU */
-int cxl_afu_reset(struct cxl_context *ctx);
-
-/*
- * Set a context as a master context.
- * This sets the default problem space area mapped as the full space, rather
- * than just the per context area (for slaves).
- */
-void cxl_set_master(struct cxl_context *ctx);
-
-/*
- * Map and unmap the AFU Problem Space area. The amount and location mapped
- * depends on if this context is a master or slave.
- */
-void __iomem *cxl_psa_map(struct cxl_context *ctx);
-void cxl_psa_unmap(void __iomem *addr);
-
-/* Get the process element for this context */
-int cxl_process_element(struct cxl_context *ctx);
-
-/*
- * These calls allow drivers to create their own file descriptors and make them
- * identical to the cxl file descriptor user API. An example use case:
- *
- * struct file_operations cxl_my_fops = {};
- * ......
- * // Init the context
- * ctx = cxl_dev_context_init(dev);
- * if (IS_ERR(ctx))
- * return PTR_ERR(ctx);
- * // Create and attach a new file descriptor to my file ops
- * file = cxl_get_fd(ctx, &cxl_my_fops, &fd);
- * // Start context
- * rc = cxl_start_work(ctx, &work.work);
- * if (rc) {
- * fput(file);
- * put_unused_fd(fd);
- * return -ENODEV;
- * }
- * // No error paths after installing the fd
- * fd_install(fd, file);
- * return fd;
- *
- * This inits a context, and gets a file descriptor and associates some file
- * ops to that file descriptor. If the file ops are blank, the cxl driver will
- * fill them in with the default ones that mimic the standard user API. Once
- * completed, the file descriptor can be installed. Once the file descriptor is
- * installed, it's visible to the user so no errors must occur past this point.
- *
- * If cxl_fd_release() file op call is installed, the context will be stopped
- * and released when the fd is released. Hence the driver won't need to manage
- * this itself.
- */
-
-/*
- * Take a context and associate it with my file ops. Returns the associated
- * file and file descriptor. Any file ops which are blank are filled in by the
- * cxl driver with the default ops to mimic the standard API.
- */
-struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
- int *fd);
-/* Get the context associated with this file */
-struct cxl_context *cxl_fops_get_context(struct file *file);
-/*
- * Start a context associated a struct cxl_ioctl_start_work used by the
- * standard cxl user API.
- */
-int cxl_start_work(struct cxl_context *ctx,
- struct cxl_ioctl_start_work *work);
-/*
- * Export all the existing fops so drivers can use them
- */
-int cxl_fd_open(struct inode *inode, struct file *file);
-int cxl_fd_release(struct inode *inode, struct file *file);
-long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm);
-__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
-ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
- loff_t *off);
-
-/*
- * For EEH, a driver may want to assert a PERST will reload the same image
- * from flash into the FPGA.
- *
- * This is a property of the entire adapter, not a single AFU, so drivers
- * should set this property with care!
- */
-void cxl_perst_reloads_same_image(struct cxl_afu *afu,
- bool perst_reloads_same_image);
-
-/*
- * Read the VPD for the card where the AFU resides
- */
-ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count);
-
-/*
- * AFU driver ops allow an AFU driver to create their own events to pass to
- * userspace through the file descriptor as a simpler alternative to overriding
- * the read() and poll() calls that works with the generic cxl events. These
- * events are given priority over the generic cxl events, so they will be
- * delivered first if multiple types of events are pending.
- *
- * The AFU driver must call cxl_context_events_pending() to notify the cxl
- * driver that new events are ready to be delivered for a specific context.
- * cxl_context_events_pending() will adjust the current count of AFU driver
- * events for this context, and wake up anyone waiting on the context wait
- * queue.
- *
- * The cxl driver will then call fetch_event() to get a structure defining
- * the size and address of the driver specific event data. The cxl driver
- * will build a cxl header with type and process_element fields filled in,
- * and header.size set to sizeof(struct cxl_event_header) + data_size.
- * The total size of the event is limited to CXL_READ_MIN_SIZE (4K).
- *
- * fetch_event() is called with a spin lock held, so it must not sleep.
- *
- * The cxl driver will then deliver the event to userspace, and finally
- * call event_delivered() to return the status of the operation, identified
- * by cxl context and AFU driver event data pointers.
- * 0 Success
- * -EFAULT copy_to_user() has failed
- * -EINVAL Event data pointer is NULL, or event size is greater than
- * CXL_READ_MIN_SIZE.
- */
-struct cxl_afu_driver_ops {
- struct cxl_event_afu_driver_reserved *(*fetch_event) (
- struct cxl_context *ctx);
- void (*event_delivered) (struct cxl_context *ctx,
- struct cxl_event_afu_driver_reserved *event,
- int rc);
-};
-
-/*
- * Associate the above driver ops with a specific context.
- * Reset the current count of AFU driver events.
- */
-void cxl_set_driver_ops(struct cxl_context *ctx,
- struct cxl_afu_driver_ops *ops);
-
-/* Notify cxl driver that new events are ready to be delivered for context */
-void cxl_context_events_pending(struct cxl_context *ctx,
- unsigned int new_events);
-
-#endif /* _MISC_CXL_H */
diff --git a/include/misc/cxllib.h b/include/misc/cxllib.h
deleted file mode 100644
index eacc417288fc..000000000000
--- a/include/misc/cxllib.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2017 IBM Corp.
- */
-
-#ifndef _MISC_CXLLIB_H
-#define _MISC_CXLLIB_H
-
-#include <linux/pci.h>
-#include <asm/reg.h>
-
-/*
- * cxl driver exports a in-kernel 'library' API which can be called by
- * other drivers to help interacting with an IBM XSL.
- */
-
-/*
- * tells whether capi is supported on the PCIe slot where the
- * device is seated
- *
- * Input:
- * dev: device whose slot needs to be checked
- * flags: 0 for the time being
- */
-bool cxllib_slot_is_supported(struct pci_dev *dev, unsigned long flags);
-
-
-/*
- * Returns the configuration parameters to be used by the XSL or device
- *
- * Input:
- * dev: device, used to find PHB
- * Output:
- * struct cxllib_xsl_config:
- * version
- * capi BAR address, i.e. 0x2000000000000-0x2FFFFFFFFFFFF
- * capi BAR size
- * data send control (XSL_DSNCTL)
- * dummy read address (XSL_DRA)
- */
-#define CXL_XSL_CONFIG_VERSION1 1
-struct cxllib_xsl_config {
- u32 version; /* format version for register encoding */
- u32 log_bar_size;/* log size of the capi_window */
- u64 bar_addr; /* address of the start of capi window */
- u64 dsnctl; /* matches definition of XSL_DSNCTL */
- u64 dra; /* real address that can be used for dummy read */
-};
-
-int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg);
-
-
-/*
- * Activate capi for the pci host bridge associated with the device.
- * Can be extended to deactivate once we know how to do it.
- * Device must be ready to accept messages from the CAPP unit and
- * respond accordingly (TLB invalidates, ...)
- *
- * PHB is switched to capi mode through calls to skiboot.
- * CAPP snooping is activated
- *
- * Input:
- * dev: device whose PHB should switch mode
- * mode: mode to switch to i.e. CAPI or PCI
- * flags: options related to the mode
- */
-enum cxllib_mode {
- CXL_MODE_CXL,
- CXL_MODE_PCI,
-};
-
-#define CXL_MODE_NO_DMA 0
-#define CXL_MODE_DMA_TVT0 1
-#define CXL_MODE_DMA_TVT1 2
-
-int cxllib_switch_phb_mode(struct pci_dev *dev, enum cxllib_mode mode,
- unsigned long flags);
-
-
-/*
- * Set the device for capi DMA.
- * Define its dma_ops and dma offset so that allocations will be using TVT#1
- *
- * Input:
- * dev: device to set
- * flags: options. CXL_MODE_DMA_TVT1 should be used
- */
-int cxllib_set_device_dma(struct pci_dev *dev, unsigned long flags);
-
-
-/*
- * Get the Process Element structure for the given thread
- *
- * Input:
- * task: task_struct for the context of the translation
- * translation_mode: whether addresses should be translated
- * Output:
- * attr: attributes to fill up the Process Element structure from CAIA
- */
-struct cxllib_pe_attributes {
- u64 sr;
- u32 lpid;
- u32 tid;
- u32 pid;
-};
-#define CXL_TRANSLATED_MODE 0
-#define CXL_REAL_MODE 1
-
-int cxllib_get_PE_attributes(struct task_struct *task,
- unsigned long translation_mode, struct cxllib_pe_attributes *attr);
-
-
-/*
- * Handle memory fault.
- * Fault in all the pages of the specified buffer for the permissions
- * provided in ‘flags’
- *
- * Shouldn't be called from interrupt context
- *
- * Input:
- * mm: struct mm for the thread faulting the pages
- * addr: base address of the buffer to page in
- * size: size of the buffer to page in
- * flags: permission requested (DSISR_ISSTORE...)
- */
-int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags);
-
-
-#endif /* _MISC_CXLLIB_H */
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 4f785098c67a..838a94218b59 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -16,6 +16,12 @@
/* Number of requests per row */
#define P9_ROW_MAXTAG 255
+/* DEFAULT MSIZE = 32 pages worth of payload + P9_HDRSZ +
+ * room for write (16 extra) or read (11 extra) operands.
+ */
+
+#define DEFAULT_MSIZE ((128 * 1024) + P9_IOHDRSZ)
+
/** enum p9_proto_versions - 9P protocol versions
* @p9_proto_legacy: 9P Legacy mode, pre-9P2000.u
* @p9_proto_2000u: 9P2000.u extension
@@ -127,6 +133,96 @@ struct p9_client {
};
/**
+ * struct p9_fd_opts - holds client options during parsing
+ * @msize: maximum data size negotiated by protocol
+ * @prot-Oversion: 9P protocol version to use
+ * @trans_mod: module API instantiated with this client
+ *
+ * These parsed options get transferred into client in
+ * apply_client_options()
+ */
+struct p9_client_opts {
+ unsigned int msize;
+ unsigned char proto_version;
+ struct p9_trans_module *trans_mod;
+};
+
+/**
+ * struct p9_fd_opts - per-transport options for fd transport
+ * @rfd: file descriptor for reading (trans=fd)
+ * @wfd: file descriptor for writing (trans=fd)
+ * @port: port to connect to (trans=tcp)
+ * @privport: port is privileged
+ */
+struct p9_fd_opts {
+ int rfd;
+ int wfd;
+ u16 port;
+ bool privport;
+};
+
+/**
+ * struct p9_rdma_opts - Collection of mount options for rdma transport
+ * @port: port of connection
+ * @privport: Whether a privileged port may be used
+ * @sq_depth: The requested depth of the SQ. This really doesn't need
+ * to be any deeper than the number of threads used in the client
+ * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
+ * @timeout: Time to wait in msecs for CM events
+ */
+struct p9_rdma_opts {
+ short port;
+ bool privport;
+ int sq_depth;
+ int rq_depth;
+ long timeout;
+};
+
+/**
+ * struct p9_session_opts - holds parsed options for v9fs_session_info
+ * @flags: session options of type &p9_session_flags
+ * @nodev: set to 1 to disable device mapping
+ * @debug: debug level
+ * @afid: authentication handle
+ * @cache: cache mode of type &p9_cache_bits
+ * @cachetag: the tag of the cache associated with this session
+ * @uname: string user name to mount hierarchy as
+ * @aname: mount specifier for remote hierarchy
+ * @dfltuid: default numeric userid to mount hierarchy as
+ * @dfltgid: default numeric groupid to mount hierarchy as
+ * @uid: if %V9FS_ACCESS_SINGLE, the numeric uid which mounted the hierarchy
+ * @session_lock_timeout: retry interval for blocking locks
+ *
+ * This strucure holds options which are parsed and will be transferred
+ * to the v9fs_session_info structure when mounted, and therefore largely
+ * duplicates struct v9fs_session_info.
+ */
+struct p9_session_opts {
+ unsigned int flags;
+ unsigned char nodev;
+ unsigned short debug;
+ unsigned int afid;
+ unsigned int cache;
+#ifdef CONFIG_9P_FSCACHE
+ char *cachetag;
+#endif
+ char *uname;
+ char *aname;
+ kuid_t dfltuid;
+ kgid_t dfltgid;
+ kuid_t uid;
+ long session_lock_timeout;
+};
+
+/* Used by mount API to store parsed mount options */
+struct v9fs_context {
+ struct p9_client_opts client_opts;
+ struct p9_fd_opts fd_opts;
+ struct p9_rdma_opts rdma_opts;
+ struct p9_session_opts session_opts;
+};
+
+/**
* struct p9_fid - file system entity handle
* @clnt: back pointer to instantiating &p9_client
* @fid: numeric identifier for this handle
@@ -183,7 +279,7 @@ int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid,
const char *name);
int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name,
struct p9_fid *newdirfid, const char *new_name);
-struct p9_client *p9_client_create(const char *dev_name, char *options);
+struct p9_client *p9_client_create(struct fs_context *fc);
void p9_client_destroy(struct p9_client *clnt);
void p9_client_disconnect(struct p9_client *clnt);
void p9_client_begin_disconnect(struct p9_client *clnt);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 766ec07c9599..a912bbaa862f 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -14,6 +14,13 @@
#define P9_DEF_MIN_RESVPORT (665U)
#define P9_DEF_MAX_RESVPORT (1023U)
+#define P9_FD_PORT 564
+
+#define P9_RDMA_PORT 5640
+#define P9_RDMA_SQ_DEPTH 32
+#define P9_RDMA_RQ_DEPTH 32
+#define P9_RDMA_TIMEOUT 30000 /* 30 seconds */
+
/**
* struct p9_trans_module - transport module interface
* @list: used to maintain a list of currently available transports
@@ -24,6 +31,9 @@
* we're less flexible when choosing the response message
* size in this case
* @def: set if this transport should be considered the default
+ * @supports_vmalloc: set if this transport can work with vmalloc'd buffers
+ * (non-physically contiguous memory). Transports requiring
+ * DMA should leave this as false.
* @create: member function to create a new connection on this transport
* @close: member function to discard a connection on this transport
* @request: member function to issue a request to the transport
@@ -43,10 +53,11 @@ struct p9_trans_module {
char *name; /* name of transport */
int maxsize; /* max message size of transport */
bool pooled_rbuffers;
- int def; /* this transport should be default */
+ bool def; /* this transport should be default */
+ bool supports_vmalloc; /* can work with vmalloc'd buffers */
struct module *owner;
int (*create)(struct p9_client *client,
- const char *devname, char *args);
+ struct fs_context *fc);
void (*close)(struct p9_client *client);
int (*request)(struct p9_client *client, struct p9_req_t *req);
int (*cancel)(struct p9_client *client, struct p9_req_t *req);
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 77ee0c657e2c..91a24b5e0b93 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -33,7 +33,10 @@ struct tc_action {
struct tcf_t tcfa_tm;
struct gnet_stats_basic_sync tcfa_bstats;
struct gnet_stats_basic_sync tcfa_bstats_hw;
- struct gnet_stats_queue tcfa_qstats;
+
+ atomic_t tcfa_drops;
+ atomic_t tcfa_overlimits;
+
struct net_rate_estimator __rcu *tcfa_rate_est;
spinlock_t tcfa_lock;
struct gnet_stats_basic_sync __percpu *cpu_bstats;
@@ -53,7 +56,6 @@ struct tc_action {
#define tcf_action common.tcfa_action
#define tcf_tm common.tcfa_tm
#define tcf_bstats common.tcfa_bstats
-#define tcf_qstats common.tcfa_qstats
#define tcf_rate_est common.tcfa_rate_est
#define tcf_lock common.tcfa_lock
@@ -76,19 +78,24 @@ static inline void tcf_lastuse_update(struct tcf_t *tm)
{
unsigned long now = jiffies;
- if (tm->lastuse != now)
- tm->lastuse = now;
- if (unlikely(!tm->firstuse))
- tm->firstuse = now;
+ if (READ_ONCE(tm->lastuse) != now)
+ WRITE_ONCE(tm->lastuse, now);
+ if (unlikely(!READ_ONCE(tm->firstuse)))
+ WRITE_ONCE(tm->firstuse, now);
}
static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm)
{
- dtm->install = jiffies_to_clock_t(jiffies - stm->install);
- dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse);
- dtm->firstuse = stm->firstuse ?
- jiffies_to_clock_t(jiffies - stm->firstuse) : 0;
- dtm->expires = jiffies_to_clock_t(stm->expires);
+ unsigned long firstuse, now = jiffies;
+
+ dtm->install = jiffies_to_clock_t(now - READ_ONCE(stm->install));
+ dtm->lastuse = jiffies_to_clock_t(now - READ_ONCE(stm->lastuse));
+
+ firstuse = READ_ONCE(stm->firstuse);
+ dtm->firstuse = firstuse ?
+ jiffies_to_clock_t(now - firstuse) : 0;
+
+ dtm->expires = jiffies_to_clock_t(READ_ONCE(stm->expires));
}
static inline enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
@@ -170,14 +177,12 @@ static inline void tc_action_net_exit(struct list_head *net_list,
{
struct net *net;
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
struct tc_action_net *tn = net_generic(net, id);
tcf_idrinfo_destroy(tn->ops, tn->idrinfo);
kfree(tn->idrinfo);
}
- rtnl_unlock();
}
int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
@@ -219,7 +224,6 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
int ref, bool terse);
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
-int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
static inline void tcf_action_update_bstats(struct tc_action *a,
struct sk_buff *skb)
@@ -239,9 +243,7 @@ static inline void tcf_action_inc_drop_qstats(struct tc_action *a)
qstats_drop_inc(this_cpu_ptr(a->cpu_qstats));
return;
}
- spin_lock(&a->tcfa_lock);
- qstats_drop_inc(&a->tcfa_qstats);
- spin_unlock(&a->tcfa_lock);
+ atomic_inc(&a->tcfa_drops);
}
static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a)
@@ -250,9 +252,7 @@ static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a)
qstats_overlimit_inc(this_cpu_ptr(a->cpu_qstats));
return;
}
- spin_lock(&a->tcfa_lock);
- qstats_overlimit_inc(&a->tcfa_qstats);
- spin_unlock(&a->tcfa_lock);
+ atomic_inc(&a->tcfa_overlimits);
}
void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 62a407db1bf5..78e8b877fb25 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -37,10 +37,14 @@ struct prefix_info {
struct __packed {
#if defined(__BIG_ENDIAN_BITFIELD)
__u8 onlink : 1,
- autoconf : 1,
- reserved : 6;
+ autoconf : 1,
+ routeraddr : 1,
+ preferpd : 1,
+ reserved : 4;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 reserved : 6,
+ __u8 reserved : 4,
+ preferpd : 1,
+ routeraddr : 1,
autoconf : 1,
onlink : 1;
#else
@@ -84,6 +88,23 @@ struct ifa6_config {
u16 scope;
};
+enum addr_type_t {
+ UNICAST_ADDR,
+ MULTICAST_ADDR,
+ ANYCAST_ADDR,
+};
+
+struct inet6_fill_args {
+ u32 portid;
+ u32 seq;
+ int event;
+ unsigned int flags;
+ int netnsid;
+ int ifindex;
+ enum addr_type_t type;
+ bool force_rt_scope_universe;
+};
+
int addrconf_init(void);
void addrconf_cleanup(void);
@@ -183,10 +204,12 @@ static inline int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
return 0;
}
+#define INFINITY_LIFE_TIME 0xFFFFFFFF
+
static inline unsigned long addrconf_timeout_fixup(u32 timeout,
unsigned int unit)
{
- if (timeout == 0xffffffff)
+ if (timeout == INFINITY_LIFE_TIME)
return ~0UL;
/*
@@ -324,10 +347,20 @@ static inline struct inet6_dev *__in6_dev_get(const struct net_device *dev)
return rcu_dereference_rtnl(dev->ip6_ptr);
}
+static inline struct inet6_dev *in6_dev_rcu(const struct net_device *dev)
+{
+ return rcu_dereference(dev->ip6_ptr);
+}
+
+static inline struct inet6_dev *__in6_dev_get_rtnl_net(const struct net_device *dev)
+{
+ return rtnl_net_dereference(dev_net(dev), dev->ip6_ptr);
+}
+
/**
* __in6_dev_stats_get - get inet6_dev pointer for stats
* @dev: network device
- * @skb: skb for original incoming interface if neeeded
+ * @skb: skb for original incoming interface if needed
*
* Caller must hold rcu_read_lock or RTNL, because this function
* does not take a reference on the inet6_dev.
@@ -519,4 +552,11 @@ int if6_proc_init(void);
void if6_proc_exit(void);
#endif
+int inet6_fill_ifmcaddr(struct sk_buff *skb,
+ const struct ifmcaddr6 *ifmca,
+ struct inet6_fill_args *args);
+
+int inet6_fill_ifacaddr(struct sk_buff *skb,
+ const struct ifacaddr6 *ifaca,
+ struct inet6_fill_args *args);
#endif
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 0754c463224a..0fb4c41c9bbf 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -16,6 +16,7 @@ struct sock;
struct socket;
struct rxrpc_call;
struct rxrpc_peer;
+struct krb5_buffer;
enum rxrpc_abort_reason;
enum rxrpc_interruptibility {
@@ -24,23 +25,33 @@ enum rxrpc_interruptibility {
RXRPC_UNINTERRUPTIBLE, /* Call should not be interruptible at all */
};
+enum rxrpc_oob_type {
+ RXRPC_OOB_CHALLENGE, /* Security challenge for a connection */
+};
+
/*
* Debug ID counter for tracing.
*/
extern atomic_t rxrpc_debug_id;
+/*
+ * Operations table for rxrpc to call out to a kernel application (e.g. kAFS).
+ */
+struct rxrpc_kernel_ops {
+ void (*notify_new_call)(struct sock *sk, struct rxrpc_call *call,
+ unsigned long user_call_ID);
+ void (*discard_new_call)(struct rxrpc_call *call, unsigned long user_call_ID);
+ void (*user_attach_call)(struct rxrpc_call *call, unsigned long user_call_ID);
+ void (*notify_oob)(struct sock *sk, struct sk_buff *oob);
+};
+
typedef void (*rxrpc_notify_rx_t)(struct sock *, struct rxrpc_call *,
unsigned long);
typedef void (*rxrpc_notify_end_tx_t)(struct sock *, struct rxrpc_call *,
unsigned long);
-typedef void (*rxrpc_notify_new_call_t)(struct sock *, struct rxrpc_call *,
- unsigned long);
-typedef void (*rxrpc_discard_new_call_t)(struct rxrpc_call *, unsigned long);
-typedef void (*rxrpc_user_attach_call_t)(struct rxrpc_call *, unsigned long);
-void rxrpc_kernel_new_call_notification(struct socket *,
- rxrpc_notify_new_call_t,
- rxrpc_discard_new_call_t);
+void rxrpc_kernel_set_notifications(struct socket *sock,
+ const struct rxrpc_kernel_ops *app_ops);
struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
struct rxrpc_peer *peer,
struct key *key,
@@ -69,17 +80,36 @@ struct rxrpc_peer *rxrpc_kernel_get_peer(struct rxrpc_peer *peer);
struct rxrpc_peer *rxrpc_kernel_get_call_peer(struct socket *sock, struct rxrpc_call *call);
const struct sockaddr_rxrpc *rxrpc_kernel_remote_srx(const struct rxrpc_peer *peer);
const struct sockaddr *rxrpc_kernel_remote_addr(const struct rxrpc_peer *peer);
+unsigned long rxrpc_kernel_set_peer_data(struct rxrpc_peer *peer, unsigned long app_data);
+unsigned long rxrpc_kernel_get_peer_data(const struct rxrpc_peer *peer);
unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *);
-int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
- rxrpc_user_attach_call_t, unsigned long, gfp_t,
- unsigned int);
+int rxrpc_kernel_charge_accept(struct socket *sock, rxrpc_notify_rx_t notify_rx,
+ unsigned long user_call_ID, gfp_t gfp,
+ unsigned int debug_id);
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
-u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
-void rxrpc_kernel_set_max_life(struct socket *, struct rxrpc_call *,
- unsigned long);
int rxrpc_sock_set_min_security_level(struct sock *sk, unsigned int val);
int rxrpc_sock_set_security_keyring(struct sock *, struct key *);
+int rxrpc_sock_set_manage_response(struct sock *sk, bool set);
+
+enum rxrpc_oob_type rxrpc_kernel_query_oob(struct sk_buff *oob,
+ struct rxrpc_peer **_peer,
+ unsigned long *_peer_appdata);
+struct sk_buff *rxrpc_kernel_dequeue_oob(struct socket *sock,
+ enum rxrpc_oob_type *_type);
+void rxrpc_kernel_free_oob(struct sk_buff *oob);
+void rxrpc_kernel_query_challenge(struct sk_buff *challenge,
+ struct rxrpc_peer **_peer,
+ unsigned long *_peer_appdata,
+ u16 *_service_id, u8 *_security_index);
+int rxrpc_kernel_reject_challenge(struct sk_buff *challenge, u32 abort_code,
+ int error, enum rxrpc_abort_reason why);
+int rxkad_kernel_respond_to_challenge(struct sk_buff *challenge);
+u32 rxgk_kernel_query_challenge(struct sk_buff *challenge);
+int rxgk_kernel_respond_to_challenge(struct sk_buff *challenge,
+ struct krb5_buffer *appdata);
+u8 rxrpc_kernel_query_call_security(struct rxrpc_call *call,
+ u16 *_service_id, u32 *_enctype);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index b6eedf7650da..34f53dde65ce 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -2,11 +2,15 @@
#ifndef __LINUX_NET_AFUNIX_H
#define __LINUX_NET_AFUNIX_H
-#include <linux/socket.h>
-#include <linux/un.h>
+#include <linux/atomic.h>
#include <linux/mutex.h>
+#include <linux/net.h>
+#include <linux/path.h>
#include <linux/refcount.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
#include <net/sock.h>
+#include <uapi/linux/un.h>
#if IS_ENABLED(CONFIG_UNIX)
struct unix_sock *unix_get_socket(struct file *filp);
@@ -17,61 +21,17 @@ static inline struct unix_sock *unix_get_socket(struct file *filp)
}
#endif
-extern unsigned int unix_tot_inflight;
-void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver);
-void unix_del_edges(struct scm_fp_list *fpl);
-void unix_update_edges(struct unix_sock *receiver);
-int unix_prepare_fpl(struct scm_fp_list *fpl);
-void unix_destroy_fpl(struct scm_fp_list *fpl);
-void unix_gc(void);
-void wait_for_unix_gc(struct scm_fp_list *fpl);
-
-struct unix_vertex {
- struct list_head edges;
- struct list_head entry;
- struct list_head scc_entry;
- unsigned long out_degree;
- unsigned long index;
- unsigned long scc_index;
-};
-
-struct unix_edge {
- struct unix_sock *predecessor;
- struct unix_sock *successor;
- struct list_head vertex_entry;
- struct list_head stack_entry;
-};
-
-struct sock *unix_peer_get(struct sock *sk);
-
-#define UNIX_HASH_MOD (256 - 1)
-#define UNIX_HASH_SIZE (256 * 2)
-#define UNIX_HASH_BITS 8
-
struct unix_address {
refcount_t refcnt;
int len;
struct sockaddr_un name[];
};
-struct unix_skb_parms {
- struct pid *pid; /* Skb credentials */
- kuid_t uid;
- kgid_t gid;
- struct scm_fp_list *fp; /* Passed files */
-#ifdef CONFIG_SECURITY_NETWORK
- u32 secid; /* Security ID */
-#endif
- u32 consumed;
-} __randomize_layout;
-
struct scm_stat {
atomic_t nr_fds;
unsigned long nr_unix_fds;
};
-#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
-
/* The AF_UNIX socket */
struct unix_sock {
/* WARNING: sk has to be the first member */
@@ -84,8 +44,11 @@ struct unix_sock {
struct unix_vertex *vertex;
spinlock_t lock;
struct socket_wq peer_wq;
+#define peer_wait peer_wq.wait
wait_queue_entry_t peer_wake;
struct scm_stat scm_stat;
+ int inq_len;
+ bool recvmsg_inq;
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
struct sk_buff *oob_skb;
#endif
@@ -96,47 +59,5 @@ struct unix_sock {
#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
-enum unix_socket_lock_class {
- U_LOCK_NORMAL,
- U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */
- U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
- U_LOCK_GC_LISTENER, /* used for listening socket while determining gc
- * candidates to close a small race window.
- */
-};
-static inline void unix_state_lock_nested(struct sock *sk,
- enum unix_socket_lock_class subclass)
-{
- spin_lock_nested(&unix_sk(sk)->lock, subclass);
-}
-
-#define peer_wait peer_wq.wait
-
-long unix_inq_len(struct sock *sk);
-long unix_outq_len(struct sock *sk);
-
-int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
- int flags);
-int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
- int flags);
-#ifdef CONFIG_SYSCTL
-int unix_sysctl_register(struct net *net);
-void unix_sysctl_unregister(struct net *net);
-#else
-static inline int unix_sysctl_register(struct net *net) { return 0; }
-static inline void unix_sysctl_unregister(struct net *net) {}
-#endif
-
-#ifdef CONFIG_BPF_SYSCALL
-extern struct proto unix_dgram_proto;
-extern struct proto unix_stream_proto;
-
-int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
-int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
-void __init unix_bpf_build_proto(void);
-#else
-static inline void __init unix_bpf_build_proto(void)
-{}
-#endif
#endif
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 535701efc1e5..d40e978126e3 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -169,6 +169,9 @@ struct vsock_transport {
void (*notify_buffer_size)(struct vsock_sock *, u64 *);
int (*notify_set_rcvlowat)(struct vsock_sock *vsk, int val);
+ /* SIOCOUTQ ioctl */
+ ssize_t (*unsent_bytes)(struct vsock_sock *vsk);
+
/* Shutdown. */
int (*shutdown)(struct vsock_sock *, int);
@@ -218,6 +221,7 @@ void vsock_for_each_connected_socket(struct vsock_transport *transport,
void (*fn)(struct sock *sk));
int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk);
bool vsock_find_cid(unsigned int cid);
+void vsock_linger(struct sock *sk);
/**** TAP ****/
@@ -230,13 +234,17 @@ struct vsock_tap {
int vsock_add_tap(struct vsock_tap *vt);
int vsock_remove_tap(struct vsock_tap *vt);
void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque);
+int __vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags);
int vsock_connectible_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
+int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t len, int flags);
int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);
-#ifdef CONFIG_BPF_SYSCALL
extern struct proto vsock_proto;
+#ifdef CONFIG_BPF_SYSCALL
int vsock_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
void __init vsock_bpf_build_proto(void);
#else
diff --git a/include/net/aligned_data.h b/include/net/aligned_data.h
new file mode 100644
index 000000000000..e1a1c8aedc79
--- /dev/null
+++ b/include/net/aligned_data.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _NET_ALIGNED_DATA_H
+#define _NET_ALIGNED_DATA_H
+
+#include <linux/atomic.h>
+#include <linux/types.h>
+
+/* Structure holding cacheline aligned fields on SMP builds.
+ * Each field or group should have an ____cacheline_aligned_in_smp
+ * attribute to ensure no accidental false sharing can happen.
+ */
+struct net_aligned_data {
+ atomic64_t net_cookie ____cacheline_aligned_in_smp;
+#if defined(CONFIG_INET)
+ atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp;
+ atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp;
+#endif
+};
+
+extern struct net_aligned_data net_aligned_data;
+
+#endif /* _NET_ALIGNED_DATA_H */
diff --git a/include/net/ax25.h b/include/net/ax25.h
index cb622d84cd0c..a7bba42dde15 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -231,6 +231,7 @@ typedef struct ax25_dev {
#endif
refcount_t refcount;
bool device_up;
+ struct rcu_head rcu;
} ax25_dev;
typedef struct ax25_cb {
@@ -290,9 +291,8 @@ static inline void ax25_dev_hold(ax25_dev *ax25_dev)
static inline void ax25_dev_put(ax25_dev *ax25_dev)
{
- if (refcount_dec_and_test(&ax25_dev->refcount)) {
- kfree(ax25_dev);
- }
+ if (refcount_dec_and_test(&ax25_dev->refcount))
+ kfree_rcu(ax25_dev, rcu);
}
static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev)
{
@@ -335,9 +335,9 @@ void ax25_digi_invert(const ax25_digi *, ax25_digi *);
extern spinlock_t ax25_dev_lock;
#if IS_ENABLED(CONFIG_AX25)
-static inline ax25_dev *ax25_dev_ax25dev(struct net_device *dev)
+static inline ax25_dev *ax25_dev_ax25dev(const struct net_device *dev)
{
- return dev->ax25_ptr;
+ return rcu_dereference_rtnl(dev->ax25_ptr);
}
#endif
@@ -418,7 +418,6 @@ void ax25_rt_device_down(struct net_device *);
int ax25_rt_ioctl(unsigned int, void __user *);
extern const struct seq_operations ax25_rt_seqops;
ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev);
-int ax25_rt_autobind(ax25_cb *, ax25_address *);
struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *,
ax25_address *, ax25_digi *);
void ax25_rt_free(void);
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index b3228bd6cd6b..d46ed9011ee5 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -29,6 +29,7 @@
#include <linux/poll.h>
#include <net/sock.h>
#include <linux/seq_file.h>
+#include <linux/ethtool.h>
#define BT_SUBSYS_VERSION 2
#define BT_SUBSYS_REVISION 22
@@ -123,6 +124,7 @@ struct bt_voice {
#define BT_VOICE_TRANSPARENT 0x0003
#define BT_VOICE_CVSD_16BIT 0x0060
+#define BT_VOICE_TRANSPARENT_16BIT 0x0063
#define BT_SNDMTU 12
#define BT_RCVMTU 13
@@ -155,6 +157,7 @@ struct bt_voice {
#define BT_PKT_STATUS 16
#define BT_SCM_PKT_STATUS 0x03
+#define BT_SCM_ERROR 0x04
#define BT_ISO_QOS 17
@@ -241,6 +244,12 @@ struct bt_codecs {
#define BT_ISO_BASE 20
+/* Socket option value 21 reserved */
+
+#define BT_PKT_SEQNUM 22
+
+#define BT_SCM_PKT_SEQNUM 0x05
+
__printf(1, 2)
void bt_info(const char *fmt, ...);
__printf(1, 2)
@@ -263,7 +272,8 @@ void bt_err_ratelimited(const char *fmt, ...);
#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__)
#if IS_ENABLED(CONFIG_BT_FEATURE_DEBUG)
-#define BT_DBG(fmt, ...) bt_dbg(fmt "\n", ##__VA_ARGS__)
+#define BT_DBG(fmt, ...) \
+ bt_dbg("%s:%d: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
#else
#define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__)
#endif
@@ -388,7 +398,8 @@ struct bt_sock {
enum {
BT_SK_DEFER_SETUP,
BT_SK_SUSPEND,
- BT_SK_PKT_STATUS
+ BT_SK_PKT_STATUS,
+ BT_SK_PKT_SEQNUM,
};
struct bt_sock_list {
@@ -403,6 +414,7 @@ int bt_sock_register(int proto, const struct net_proto_family *ops);
void bt_sock_unregister(int proto);
void bt_sock_link(struct bt_sock_list *l, struct sock *s);
void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
+bool bt_sock_linked(struct bt_sock_list *l, struct sock *s);
struct sock *bt_sock_alloc(struct net *net, struct socket *sock,
struct proto *prot, int proto, gfp_t prio, int kern);
int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
@@ -441,6 +453,13 @@ typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
u16 opcode, struct sk_buff *skb);
+void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
+ hci_req_complete_t *req_complete,
+ hci_req_complete_skb_t *req_complete_skb);
+
+int hci_ethtool_ts_info(unsigned int index, int sk_proto,
+ struct kernel_ethtool_ts_info *ts_info);
+
#define HCI_REQ_START BIT(0)
#define HCI_REQ_SKB BIT(1)
@@ -464,6 +483,7 @@ struct bt_skb_cb {
u8 pkt_type;
u8 force_active;
u16 expect;
+ u16 pkt_seqnum;
u8 incoming:1;
u8 pkt_status:2;
union {
@@ -477,6 +497,7 @@ struct bt_skb_cb {
#define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type
#define hci_skb_pkt_status(skb) bt_cb((skb))->pkt_status
+#define hci_skb_pkt_seqnum(skb) bt_cb((skb))->pkt_seqnum
#define hci_skb_expect(skb) bt_cb((skb))->expect
#define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode
#define hci_skb_event(skb) bt_cb((skb))->hci.req_event
@@ -585,15 +606,6 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
return skb;
}
-static inline int bt_copy_from_sockptr(void *dst, size_t dst_size,
- sockptr_t src, size_t src_size)
-{
- if (dst_size > src_size)
- return -EINVAL;
-
- return copy_from_sockptr(dst, src, dst_size);
-}
-
int bt_to_errno(u16 code);
__u8 bt_status(int err);
@@ -636,7 +648,7 @@ static inline void sco_exit(void)
#if IS_ENABLED(CONFIG_BT_LE)
int iso_init(void);
int iso_exit(void);
-bool iso_enabled(void);
+bool iso_inited(void);
#else
static inline int iso_init(void)
{
@@ -648,7 +660,7 @@ static inline int iso_exit(void)
return 0;
}
-static inline bool iso_enabled(void)
+static inline bool iso_inited(void)
{
return false;
}
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index fe932ca3bc8c..a27cd3626b87 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1,7 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
- Copyright 2023 NXP
+ Copyright 2023-2024 NXP
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -29,6 +29,7 @@
#define HCI_MAX_ACL_SIZE 1024
#define HCI_MAX_SCO_SIZE 255
#define HCI_MAX_ISO_SIZE 251
+#define HCI_MAX_ISO_BIS 31
#define HCI_MAX_EVENT_SIZE 260
#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
@@ -67,6 +68,7 @@
#define HCI_I2C 8
#define HCI_SMD 9
#define HCI_VIRTIO 10
+#define HCI_IPC 11
/* HCI device quirks */
enum {
@@ -206,14 +208,24 @@ enum {
*/
HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
- /* When this quirk is set, the controller has validated that
- * LE states reported through the HCI_LE_READ_SUPPORTED_STATES are
- * valid. This mechanism is necessary as many controllers have
- * been seen has having trouble initiating a connectable
- * advertisement despite the state combination being reported as
- * supported.
+ /* When this quirk is set consider Sync Flow Control as supported by
+ * the driver.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED,
+
+ /* When this quirk is set, the LE states reported through the
+ * HCI_LE_READ_SUPPORTED_STATES are invalid/broken.
+ *
+ * This mechanism is necessary as many controllers have been seen has
+ * having trouble initiating a connectable advertisement despite the
+ * state combination being reported as supported.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
*/
- HCI_QUIRK_VALID_LE_STATES,
+ HCI_QUIRK_BROKEN_LE_STATES,
/* When this quirk is set, then erroneous data reporting
* is ignored. This is mainly due to the fact that the HCI
@@ -297,6 +309,20 @@ enum {
*/
HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT,
+ /*
+ * When this quirk is set, the HCI_OP_LE_EXT_CREATE_CONN command is
+ * disabled. This is required for the Actions Semiconductor ATS2851
+ * based controllers, which erroneously claims to support it.
+ */
+ HCI_QUIRK_BROKEN_EXT_CREATE_CONN,
+
+ /*
+ * When this quirk is set, the command WRITE_AUTH_PAYLOAD_TIMEOUT is
+ * skipped. This is required for the Actions Semiconductor ATS2851
+ * based controllers, due to a race condition in pairing process.
+ */
+ HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT,
+
/* When this quirk is set, MSFT extension monitor tracking by
* address filter is supported. Since tracking quantity of each
* pattern is limited, this feature supports tracking multiple
@@ -324,6 +350,35 @@ enum {
* claim to support it.
*/
HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE,
+
+ /*
+ * When this quirk is set, the reserved bits of Primary/Secondary_PHY
+ * inside the LE Extended Advertising Report events are discarded.
+ * This is required for some Apple/Broadcom controllers which
+ * abuse these reserved bits for unrelated flags.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY,
+
+ /* When this quirk is set, the HCI_OP_READ_VOICE_SETTING command is
+ * skipped. This is required for a subset of the CSR controller clones
+ * which erroneously claim to support it.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_BROKEN_READ_VOICE_SETTING,
+
+ /* When this quirk is set, the HCI_OP_READ_PAGE_SCAN_TYPE command is
+ * skipped. This is required for a subset of the CSR controller clones
+ * which erroneously claim to support it.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE,
+
+ __HCI_NUM_QUIRKS,
};
/* HCI device flags */
@@ -379,6 +434,7 @@ enum {
HCI_USER_CHANNEL,
HCI_EXT_CONFIGURED,
HCI_LE_ADV,
+ HCI_LE_ADV_0,
HCI_LE_PER_ADV,
HCI_LE_SCAN,
HCI_SSP_ENABLED,
@@ -402,13 +458,13 @@ enum {
HCI_WIDEBAND_SPEECH_ENABLED,
HCI_EVENT_FILTER_CONFIGURED,
HCI_PA_SYNC,
+ HCI_SCO_FLOWCTL,
HCI_DUT_MODE,
HCI_VENDOR_DIAG,
HCI_FORCE_BREDR_SMP,
HCI_FORCE_STATIC_ADDR,
HCI_LL_RPA_RESOLUTION,
- HCI_ENABLE_LL_PRIVACY,
HCI_CMD_PENDING,
HCI_FORCE_NO_MITM,
HCI_QUALITY_REPORT,
@@ -433,6 +489,7 @@ enum {
#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
#define HCI_ACL_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
#define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
+#define HCI_ISO_TX_TIMEOUT usecs_to_jiffies(0x7fffff) /* 8388607 usecs */
/* HCI data types */
#define HCI_COMMAND_PKT 0x01
@@ -441,6 +498,7 @@ enum {
#define HCI_EVENT_PKT 0x04
#define HCI_ISODATA_PKT 0x05
#define HCI_DIAG_PKT 0xf0
+#define HCI_DRV_PKT 0xf1
#define HCI_VENDOR_PKT 0xff
/* HCI packet types */
@@ -504,7 +562,9 @@ enum {
#define ESCO_LINK 0x02
/* Low Energy links do not have defined link type. Use invented one */
#define LE_LINK 0x80
-#define ISO_LINK 0x82
+#define CIS_LINK 0x82
+#define BIS_LINK 0x83
+#define PA_LINK 0x84
#define INVALID_LINK 0xff
/* LMP features */
@@ -587,10 +647,13 @@ enum {
#define HCI_LE_EXT_ADV 0x10
#define HCI_LE_PERIODIC_ADV 0x20
#define HCI_LE_CHAN_SEL_ALG2 0x40
+#define HCI_LE_PAST_SENDER 0x01
+#define HCI_LE_PAST_RECEIVER 0x02
#define HCI_LE_CIS_CENTRAL 0x10
#define HCI_LE_CIS_PERIPHERAL 0x20
#define HCI_LE_ISO_BROADCASTER 0x40
#define HCI_LE_ISO_SYNC_RECEIVER 0x80
+#define HCI_LE_LL_EXT_FEATURE 0x80
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
@@ -654,7 +717,7 @@ enum {
#define HCI_ERROR_REMOTE_POWER_OFF 0x15
#define HCI_ERROR_LOCAL_HOST_TERM 0x16
#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
-#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1e
+#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1a
#define HCI_ERROR_INVALID_LL_PARAMS 0x1e
#define HCI_ERROR_UNSPECIFIED 0x1f
#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
@@ -669,6 +732,7 @@ enum {
#define HCI_RSSI_INVALID 127
#define HCI_SYNC_HANDLE_INVALID 0xffff
+#define HCI_SID_INVALID 0xff
#define HCI_ROLE_MASTER 0x00
#define HCI_ROLE_SLAVE 0x01
@@ -825,6 +889,11 @@ struct hci_cp_remote_name_req_cancel {
bdaddr_t bdaddr;
} __packed;
+struct hci_rp_remote_name_req_cancel {
+ __u8 status;
+ bdaddr_t bdaddr;
+} __packed;
+
#define HCI_OP_READ_REMOTE_FEATURES 0x041b
struct hci_cp_read_remote_features {
__le16 handle;
@@ -1498,6 +1567,11 @@ struct hci_rp_read_tx_power {
__s8 tx_power;
} __packed;
+#define HCI_OP_WRITE_SYNC_FLOWCTL 0x0c2f
+struct hci_cp_write_sync_flowctl {
+ __u8 enable;
+} __packed;
+
#define HCI_OP_READ_PAGE_SCAN_TYPE 0x0c46
struct hci_rp_read_page_scan_type {
__u8 status;
@@ -1867,6 +1941,8 @@ struct hci_cp_le_pa_create_sync {
__u8 sync_cte_type;
} __packed;
+#define HCI_OP_LE_PA_CREATE_SYNC_CANCEL 0x2045
+
#define HCI_OP_LE_PA_TERM_SYNC 0x2046
struct hci_cp_le_pa_term_sync {
__le16 handle;
@@ -1995,6 +2071,44 @@ struct hci_cp_le_set_privacy_mode {
__u8 mode;
} __packed;
+#define HCI_OP_LE_PAST 0x205a
+struct hci_cp_le_past {
+ __le16 handle;
+ __le16 service_data;
+ __le16 sync_handle;
+} __packed;
+
+struct hci_rp_le_past {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_PAST_SET_INFO 0x205b
+struct hci_cp_le_past_set_info {
+ __le16 handle;
+ __le16 service_data;
+ __u8 adv_handle;
+} __packed;
+
+struct hci_rp_le_past_set_info {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_PAST_PARAMS 0x205c
+struct hci_cp_le_past_params {
+ __le16 handle;
+ __u8 mode;
+ __le16 skip;
+ __le16 sync_timeout;
+ __u8 cte_type;
+} __packed;
+
+struct hci_rp_le_past_params {
+ __u8 status;
+ __le16 handle;
+} __packed;
+
#define HCI_OP_LE_READ_BUFFER_SIZE_V2 0x2060
struct hci_rp_le_read_buffer_size_v2 {
__u8 status;
@@ -2142,6 +2256,19 @@ struct hci_cp_le_set_host_feature {
__u8 bit_value;
} __packed;
+#define HCI_OP_LE_READ_ALL_LOCAL_FEATURES 0x2087
+struct hci_rp_le_read_all_local_features {
+ __u8 status;
+ __u8 page;
+ __u8 features[248];
+} __packed;
+
+#define HCI_OP_LE_READ_ALL_REMOTE_FEATURES 0x2088
+struct hci_cp_le_read_all_remote_features {
+ __le16 handle;
+ __u8 pages;
+} __packed;
+
/* ---- HCI Events ---- */
struct hci_ev_status {
__u8 status;
@@ -2564,6 +2691,7 @@ struct hci_ev_le_conn_complete {
#define LE_EXT_ADV_DIRECT_IND 0x0004
#define LE_EXT_ADV_SCAN_RSP 0x0008
#define LE_EXT_ADV_LEGACY_PDU 0x0010
+#define LE_EXT_ADV_DATA_STATUS_MASK 0x0060
#define LE_EXT_ADV_EVT_TYPE_MASK 0x007f
#define ADDR_LE_DEV_PUBLIC 0x00
@@ -2709,6 +2837,11 @@ struct hci_ev_le_per_adv_report {
__u8 data[];
} __packed;
+#define HCI_EV_LE_PA_SYNC_LOST 0x10
+struct hci_ev_le_pa_sync_lost {
+ __le16 handle;
+} __packed;
+
#define LE_PA_DATA_COMPLETE 0x00
#define LE_PA_DATA_MORE_TO_COME 0x01
#define LE_PA_DATA_TRUNCATED 0x02
@@ -2721,6 +2854,20 @@ struct hci_evt_le_ext_adv_set_term {
__u8 num_evts;
} __packed;
+#define HCI_EV_LE_PAST_RECEIVED 0x18
+struct hci_ev_le_past_received {
+ __u8 status;
+ __le16 handle;
+ __le16 service_data;
+ __le16 sync_handle;
+ __u8 sid;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 phy;
+ __le16 interval;
+ __u8 clock_accuracy;
+} __packed;
+
#define HCI_EVT_LE_CIS_ESTABLISHED 0x19
struct hci_evt_le_cis_established {
__u8 status;
@@ -2766,8 +2913,8 @@ struct hci_evt_le_create_big_complete {
__le16 bis_handle[];
} __packed;
-#define HCI_EVT_LE_BIG_SYNC_ESTABILISHED 0x1d
-struct hci_evt_le_big_sync_estabilished {
+#define HCI_EVT_LE_BIG_SYNC_ESTABLISHED 0x1d
+struct hci_evt_le_big_sync_established {
__u8 status;
__u8 handle;
__u8 latency[3];
@@ -2781,6 +2928,12 @@ struct hci_evt_le_big_sync_estabilished {
__le16 bis[];
} __packed;
+#define HCI_EVT_LE_BIG_SYNC_LOST 0x1e
+struct hci_evt_le_big_sync_lost {
+ __u8 handle;
+ __u8 reason;
+} __packed;
+
#define HCI_EVT_LE_BIG_INFO_ADV_REPORT 0x22
struct hci_evt_le_big_info_adv_report {
__le16 sync_handle;
@@ -2798,6 +2951,15 @@ struct hci_evt_le_big_info_adv_report {
__u8 encryption;
} __packed;
+#define HCI_EVT_LE_ALL_REMOTE_FEATURES_COMPLETE 0x2b
+struct hci_evt_le_read_all_remote_features_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 max_pages;
+ __u8 valid_pages;
+ __u8 features[248];
+} __packed;
+
#define HCI_EV_VENDOR 0xff
/* Internal events generated by Bluetooth stack */
@@ -2887,6 +3049,11 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
return (struct hci_sco_hdr *) skb->data;
}
+static inline struct hci_iso_hdr *hci_iso_hdr(const struct sk_buff *skb)
+{
+ return (struct hci_iso_hdr *)skb->data;
+}
+
/* Command opcode pack/unpack */
#define hci_opcode_pack(ogf, ocf) ((__u16) ((ocf & 0x03ff)|(ogf << 10)))
#define hci_opcode_ogf(op) (op >> 10)
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 9231396fe96f..4263e71a23ef 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -29,8 +29,11 @@
#include <linux/idr.h>
#include <linux/leds.h>
#include <linux/rculist.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
#include <net/bluetooth/hci.h>
+#include <net/bluetooth/hci_drv.h>
#include <net/bluetooth/hci_sync.h>
#include <net/bluetooth/hci_sock.h>
#include <net/bluetooth/coredump.h>
@@ -91,9 +94,8 @@ struct discovery_state {
s8 rssi;
u16 uuid_count;
u8 (*uuids)[16];
- unsigned long scan_start;
- unsigned long scan_duration;
unsigned long name_resolve_timeout;
+ spinlock_t lock;
};
#define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
@@ -127,7 +129,9 @@ struct hci_conn_hash {
struct list_head list;
unsigned int acl_num;
unsigned int sco_num;
- unsigned int iso_num;
+ unsigned int cis_num;
+ unsigned int bis_num;
+ unsigned int pa_num;
unsigned int le_num;
unsigned int le_num_peripheral;
};
@@ -159,8 +163,10 @@ struct bdaddr_list_with_irk {
/* Bitmask of connection flags */
enum hci_conn_flags {
- HCI_CONN_FLAG_REMOTE_WAKEUP = 1,
- HCI_CONN_FLAG_DEVICE_PRIVACY = 2,
+ HCI_CONN_FLAG_REMOTE_WAKEUP = BIT(0),
+ HCI_CONN_FLAG_DEVICE_PRIVACY = BIT(1),
+ HCI_CONN_FLAG_ADDRESS_RESOLUTION = BIT(2),
+ HCI_CONN_FLAG_PAST = BIT(3),
};
typedef u8 hci_conn_flags_t;
@@ -188,7 +194,6 @@ struct blocked_key {
struct smp_csrk {
bdaddr_t bdaddr;
u8 bdaddr_type;
- u8 link_type;
u8 type;
u8 val[16];
};
@@ -198,7 +203,6 @@ struct smp_ltk {
struct rcu_head rcu;
bdaddr_t bdaddr;
u8 bdaddr_type;
- u8 link_type;
u8 authenticated;
u8 type;
u8 enc_size;
@@ -213,7 +217,6 @@ struct smp_irk {
bdaddr_t rpa;
bdaddr_t bdaddr;
u8 addr_type;
- u8 link_type;
u8 val[16];
};
@@ -221,8 +224,6 @@ struct link_key {
struct list_head list;
struct rcu_head rcu;
bdaddr_t bdaddr;
- u8 bdaddr_type;
- u8 link_type;
u8 type;
u8 val[HCI_LINK_KEY_SIZE];
u8 pin_len;
@@ -244,9 +245,11 @@ struct adv_info {
bool enabled;
bool pending;
bool periodic;
+ bool periodic_enabled;
__u8 mesh;
__u8 instance;
__u8 handle;
+ __u8 sid;
__u32 flags;
__u16 timeout;
__u16 remaining_time;
@@ -267,6 +270,12 @@ struct adv_info {
struct delayed_work rpa_expired_cb;
};
+struct tx_queue {
+ struct sk_buff_head queue;
+ unsigned int extra;
+ unsigned int tracked;
+};
+
#define HCI_MAX_ADV_INSTANCES 5
#define HCI_DEFAULT_ADV_DURATION 2
@@ -345,6 +354,7 @@ struct adv_monitor {
struct hci_dev {
struct list_head list;
+ struct srcu_struct srcu;
struct mutex lock;
struct ida unset_handle_ida;
@@ -368,7 +378,7 @@ struct hci_dev {
__u8 minor_class;
__u8 max_page;
__u8 features[HCI_MAX_PAGES][8];
- __u8 le_features[8];
+ __u8 le_features[248];
__u8 le_accept_list_size;
__u8 le_resolv_list_size;
__u8 le_num_of_adv_sets;
@@ -460,7 +470,7 @@ struct hci_dev {
unsigned int auto_accept_delay;
- unsigned long quirks;
+ DECLARE_BITMAP(quirk_flags, __HCI_NUM_QUIRKS);
atomic_t cmd_cnt;
unsigned int acl_cnt;
@@ -478,8 +488,8 @@ struct hci_dev {
unsigned int iso_pkts;
unsigned long acl_last_tx;
- unsigned long sco_last_tx;
unsigned long le_last_tx;
+ unsigned long iso_last_tx;
__u8 le_tx_def_phys;
__u8 le_rx_def_phys;
@@ -530,7 +540,6 @@ struct hci_dev {
struct discovery_state discovery;
- int discovery_old_state;
bool discovery_paused;
int advertising_old_state;
bool advertising_paused;
@@ -547,6 +556,7 @@ struct hci_dev {
struct hci_conn_hash conn_hash;
struct list_head mesh_pending;
+ struct mutex mgmt_pending_lock;
struct list_head mgmt_pending;
struct list_head reject_list;
struct list_head accept_list;
@@ -615,6 +625,8 @@ struct hci_dev {
struct list_head monitored_devices;
bool advmon_pend_notify;
+ struct hci_drv *hci_drv;
+
#if IS_ENABLED(CONFIG_BT_LEDS)
struct led_trigger *power_led;
#endif
@@ -641,7 +653,6 @@ struct hci_dev {
int (*post_init)(struct hci_dev *hdev);
int (*set_diag)(struct hci_dev *hdev, bool enable);
int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
- void (*cmd_timeout)(struct hci_dev *hdev);
void (*reset)(struct hci_dev *hdev);
bool (*wakeup)(struct hci_dev *hdev);
int (*set_quality_report)(struct hci_dev *hdev, bool enable);
@@ -649,8 +660,13 @@ struct hci_dev {
int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type,
struct bt_codec *codec, __u8 *vnd_len,
__u8 **vnd_data);
+ u8 (*classify_pkt_type)(struct hci_dev *hdev, struct sk_buff *skb);
};
+#define hci_set_quirk(hdev, nr) set_bit((nr), (hdev)->quirk_flags)
+#define hci_clear_quirk(hdev, nr) clear_bit((nr), (hdev)->quirk_flags)
+#define hci_test_quirk(hdev, nr) test_bit((nr), (hdev)->quirk_flags)
+
#define HCI_PHY_HANDLE(handle) (handle & 0xff)
enum conn_reasons {
@@ -676,6 +692,7 @@ struct hci_conn {
__u8 adv_instance;
__u16 handle;
__u16 sync_handle;
+ __u8 sid;
__u16 state;
__u16 mtu;
__u8 mode;
@@ -685,6 +702,7 @@ struct hci_conn {
__u8 attempt;
__u8 dev_class[3];
__u8 features[HCI_MAX_PAGES][8];
+ __u8 le_features[248];
__u16 pkt_type;
__u16 link_policy;
__u8 key_type;
@@ -718,6 +736,9 @@ struct hci_conn {
__s8 tx_power;
__s8 max_tx_power;
struct bt_iso_qos iso_qos;
+ __u8 num_bis;
+ __u8 bis[HCI_MAX_ISO_BIS];
+
unsigned long flags;
enum conn_reasons conn_reason;
@@ -730,13 +751,14 @@ struct hci_conn {
__u8 remote_cap;
__u8 remote_auth;
- __u8 remote_id;
unsigned int sent;
struct sk_buff_head data_q;
struct list_head chan_list;
+ struct tx_queue tx_q;
+
struct delayed_work disc_work;
struct delayed_work auto_accept_work;
struct delayed_work idle_work;
@@ -818,29 +840,30 @@ extern struct mutex hci_cb_list_lock;
#define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags)
#define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags)
-#define hci_dev_clear_volatile_flags(hdev) \
- do { \
- hci_dev_clear_flag(hdev, HCI_LE_SCAN); \
- hci_dev_clear_flag(hdev, HCI_LE_ADV); \
- hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\
- hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \
- hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \
+#define hci_dev_clear_volatile_flags(hdev) \
+ do { \
+ hci_dev_clear_flag((hdev), HCI_LE_SCAN); \
+ hci_dev_clear_flag((hdev), HCI_LE_ADV); \
+ hci_dev_clear_flag((hdev), HCI_LL_RPA_RESOLUTION); \
+ hci_dev_clear_flag((hdev), HCI_PERIODIC_INQ); \
+ hci_dev_clear_flag((hdev), HCI_QUALITY_REPORT); \
} while (0)
#define hci_dev_le_state_simultaneous(hdev) \
- (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) && \
- (hdev->le_states[4] & 0x08) && /* Central */ \
- (hdev->le_states[4] & 0x40) && /* Peripheral */ \
- (hdev->le_states[3] & 0x10)) /* Simultaneous */
+ (!hci_test_quirk((hdev), HCI_QUIRK_BROKEN_LE_STATES) && \
+ ((hdev)->le_states[4] & 0x08) && /* Central */ \
+ ((hdev)->le_states[4] & 0x40) && /* Peripheral */ \
+ ((hdev)->le_states[3] & 0x10)) /* Simultaneous */
/* ----- HCI interface to upper protocols ----- */
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
int l2cap_disconn_ind(struct hci_conn *hcon);
-void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+int l2cap_recv_acldata(struct hci_dev *hdev, u16 handle, struct sk_buff *skb,
+ u16 flags);
#if IS_ENABLED(CONFIG_BT_BREDR)
int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
-void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+int sco_recv_scodata(struct hci_dev *hdev, u16 handle, struct sk_buff *skb);
#else
static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
__u8 *flags)
@@ -848,23 +871,30 @@ static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
return 0;
}
-static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+static inline int sco_recv_scodata(struct hci_dev *hdev, u16 handle,
+ struct sk_buff *skb)
{
+ kfree_skb(skb);
+ return -ENOENT;
}
#endif
#if IS_ENABLED(CONFIG_BT_LE)
int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
-void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+int iso_recv(struct hci_dev *hdev, u16 handle, struct sk_buff *skb,
+ u16 flags);
#else
static inline int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
__u8 *flags)
{
return 0;
}
-static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb,
- u16 flags)
+
+static inline int iso_recv(struct hci_dev *hdev, u16 handle,
+ struct sk_buff *skb, u16 flags)
{
+ kfree_skb(skb);
+ return -ENOENT;
}
#endif
@@ -874,6 +904,7 @@ static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb,
static inline void discovery_init(struct hci_dev *hdev)
{
+ spin_lock_init(&hdev->discovery.lock);
hdev->discovery.state = DISCOVERY_STOPPED;
INIT_LIST_HEAD(&hdev->discovery.all);
INIT_LIST_HEAD(&hdev->discovery.unknown);
@@ -888,10 +919,11 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
hdev->discovery.report_invalid_rssi = true;
hdev->discovery.rssi = HCI_RSSI_INVALID;
hdev->discovery.uuid_count = 0;
+
+ spin_lock(&hdev->discovery.lock);
kfree(hdev->discovery.uuids);
hdev->discovery.uuids = NULL;
- hdev->discovery.scan_start = 0;
- hdev->discovery.scan_duration = 0;
+ spin_unlock(&hdev->discovery.lock);
}
bool hci_discovery_active(struct hci_dev *hdev);
@@ -955,8 +987,10 @@ enum {
HCI_CONN_PER_ADV,
HCI_CONN_BIG_CREATED,
HCI_CONN_CREATE_CIS,
+ HCI_CONN_CREATE_BIG_SYNC,
HCI_CONN_BIG_SYNC,
HCI_CONN_BIG_SYNC_FAILED,
+ HCI_CONN_CREATE_PA_SYNC,
HCI_CONN_PA_SYNC,
HCI_CONN_PA_SYNC_FAILED,
};
@@ -992,8 +1026,14 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
case ESCO_LINK:
h->sco_num++;
break;
- case ISO_LINK:
- h->iso_num++;
+ case CIS_LINK:
+ h->cis_num++;
+ break;
+ case BIS_LINK:
+ h->bis_num++;
+ break;
+ case PA_LINK:
+ h->pa_num++;
break;
}
}
@@ -1018,8 +1058,14 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
case ESCO_LINK:
h->sco_num--;
break;
- case ISO_LINK:
- h->iso_num--;
+ case CIS_LINK:
+ h->cis_num--;
+ break;
+ case BIS_LINK:
+ h->bis_num--;
+ break;
+ case PA_LINK:
+ h->pa_num--;
break;
}
}
@@ -1035,8 +1081,12 @@ static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
case SCO_LINK:
case ESCO_LINK:
return h->sco_num;
- case ISO_LINK:
- return h->iso_num;
+ case CIS_LINK:
+ return h->cis_num;
+ case BIS_LINK:
+ return h->bis_num;
+ case PA_LINK:
+ return h->pa_num;
default:
return 0;
}
@@ -1046,7 +1096,15 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev)
{
struct hci_conn_hash *c = &hdev->conn_hash;
- return c->acl_num + c->sco_num + c->le_num + c->iso_num;
+ return c->acl_num + c->sco_num + c->le_num + c->cis_num + c->bis_num +
+ c->pa_num;
+}
+
+static inline unsigned int hci_iso_count(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *c = &hdev->conn_hash;
+
+ return c->cis_num + c->bis_num;
}
static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn)
@@ -1096,7 +1154,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (bacmp(&c->dst, ba) || c->type != ISO_LINK)
+ if (bacmp(&c->dst, ba) || c->type != BIS_LINK)
continue;
if (c->iso_qos.bcast.bis == bis) {
@@ -1110,6 +1168,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
}
static inline struct hci_conn *
+hci_conn_hash_lookup_create_pa_sync(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != PA_LINK)
+ continue;
+
+ if (!test_bit(HCI_CONN_CREATE_PA_SYNC, &c->flags))
+ continue;
+
+ rcu_read_unlock();
+ return c;
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *
hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev,
bdaddr_t *ba,
__u8 big, __u8 bis)
@@ -1120,8 +1202,8 @@ hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (bacmp(&c->dst, ba) || c->type != ISO_LINK ||
- !test_bit(HCI_CONN_PER_ADV, &c->flags))
+ if (bacmp(&c->dst, ba) || c->type != BIS_LINK ||
+ !test_bit(HCI_CONN_PER_ADV, &c->flags))
continue;
if (c->iso_qos.bcast.big == big &&
@@ -1174,6 +1256,27 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
return NULL;
}
+static inline struct hci_conn *hci_conn_hash_lookup_role(struct hci_dev *hdev,
+ __u8 type, __u8 role,
+ bdaddr_t *ba)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && c->role == role && !bacmp(&c->dst, ba)) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev,
bdaddr_t *ba,
__u8 ba_type)
@@ -1210,7 +1313,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY))
+ if (c->type != CIS_LINK)
continue;
/* Match CIG ID if set */
@@ -1242,7 +1345,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY))
+ if (c->type != CIS_LINK)
continue;
if (handle == c->iso_qos.ucast.cig) {
@@ -1265,7 +1368,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK)
+ if (c->type != BIS_LINK)
continue;
if (handle == c->iso_qos.bcast.big) {
@@ -1280,7 +1383,8 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
}
static inline struct hci_conn *
-hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state)
+hci_conn_hash_lookup_big_sync_pend(struct hci_dev *hdev,
+ __u8 handle, __u8 num_bis)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
@@ -1288,11 +1392,10 @@ hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state)
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK ||
- c->state != state)
+ if (c->type != PA_LINK)
continue;
- if (handle == c->iso_qos.bcast.big) {
+ if (handle == c->iso_qos.bcast.big && num_bis == c->num_bis) {
rcu_read_unlock();
return c;
}
@@ -1304,7 +1407,8 @@ hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state)
}
static inline struct hci_conn *
-hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big)
+hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state,
+ __u8 role)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
@@ -1312,22 +1416,22 @@ hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big)
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != ISO_LINK ||
- !test_bit(HCI_CONN_PA_SYNC, &c->flags))
+ if (c->type != BIS_LINK || c->state != state || c->role != role)
continue;
- if (c->iso_qos.bcast.big == big) {
+ if (handle == c->iso_qos.bcast.big) {
rcu_read_unlock();
return c;
}
}
+
rcu_read_unlock();
return NULL;
}
static inline struct hci_conn *
-hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
+hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
@@ -1335,10 +1439,11 @@ hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != ISO_LINK)
+ if (c->type != BIS_LINK ||
+ !test_bit(HCI_CONN_PA_SYNC, &c->flags))
continue;
- if (c->sync_handle == sync_handle) {
+ if (c->iso_qos.bcast.big == big) {
rcu_read_unlock();
return c;
}
@@ -1348,8 +1453,8 @@ hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
return NULL;
}
-static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
- __u8 type, __u16 state)
+static inline struct hci_conn *
+hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
@@ -1357,12 +1462,21 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type == type && c->state == state) {
+ if (c->type != PA_LINK)
+ continue;
+
+ /* Ignore the listen hcon, we are looking
+ * for the child hcon that was created as
+ * a result of the PA sync established event.
+ */
+ if (c->state == BT_LISTEN)
+ continue;
+
+ if (c->sync_handle == sync_handle) {
rcu_read_unlock();
return c;
}
}
-
rcu_read_unlock();
return NULL;
@@ -1458,9 +1572,9 @@ int hci_le_create_cis_pending(struct hci_dev *hdev);
int hci_conn_check_create_cis(struct hci_conn *conn);
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
- u8 role, u16 handle);
+ u8 dst_type, u8 role, u16 handle);
struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
- bdaddr_t *dst, u8 role);
+ bdaddr_t *dst, u8 dst_type, u8 role);
void hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
@@ -1484,20 +1598,24 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
__u16 setting, struct bt_codec *codec,
u16 timeout);
struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
- __u8 dst_type, struct bt_iso_qos *qos);
-struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, struct bt_iso_qos *qos,
+ u16 timeout);
+struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 sid,
struct bt_iso_qos *qos,
- __u8 base_len, __u8 *base);
+ __u8 base_len, __u8 *base, u16 timeout);
+int hci_past_bis(struct hci_conn *conn, bdaddr_t *dst, __u8 dst_type);
struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
- __u8 dst_type, struct bt_iso_qos *qos);
-struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, struct bt_iso_qos *qos,
- __u8 data_len, __u8 *data);
+ u16 timeout);
+struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ __u8 dst_type, __u8 sid,
+ struct bt_iso_qos *qos,
+ __u8 data_len, __u8 *data, u16 timeout);
struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, __u8 sid, struct bt_iso_qos *qos);
-int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
- struct bt_iso_qos *qos,
- __u16 sync_handle, __u8 num_bis, __u8 bis[]);
+int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
+ struct bt_iso_qos *qos, __u16 sync_handle,
+ __u8 num_bis, __u8 bis[]);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
@@ -1509,6 +1627,18 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
void hci_conn_failed(struct hci_conn *conn, u8 status);
u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle);
+void hci_conn_tx_queue(struct hci_conn *conn, struct sk_buff *skb);
+void hci_conn_tx_dequeue(struct hci_conn *conn);
+void hci_setup_tx_timestamp(struct sk_buff *skb, size_t key_offset,
+ const struct sockcm_cookie *sockc);
+
+static inline void hci_sockcm_init(struct sockcm_cookie *sockc, struct sock *sk)
+{
+ *sockc = (struct sockcm_cookie) {
+ .tsflags = READ_ONCE(sk->sk_tsflags),
+ };
+}
+
/*
* hci_conn_get() and hci_conn_put() are used to control the life-time of an
* "hci_conn" object. They do not guarantee that the hci_conn object is running,
@@ -1697,8 +1827,6 @@ int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type);
int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
u8 type);
-int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
- u8 type);
void hci_bdaddr_list_clear(struct list_head *list);
struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
@@ -1719,6 +1847,7 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
void hci_uuids_clear(struct hci_dev *hdev);
void hci_link_keys_clear(struct hci_dev *hdev);
+u8 *hci_conn_key_enc_size(struct hci_conn *conn);
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
bdaddr_t *bdaddr, u8 *val, u8 type,
@@ -1755,6 +1884,7 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
void hci_adv_instances_clear(struct hci_dev *hdev);
struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance);
+struct adv_info *hci_find_adv_sid(struct hci_dev *hdev, u8 sid);
struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance);
struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
u32 flags, u16 adv_data_len, u8 *adv_data,
@@ -1762,7 +1892,7 @@ struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
u16 timeout, u16 duration, s8 tx_power,
u32 min_interval, u32 max_interval,
u8 mesh_handle);
-struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
+struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u8 sid,
u32 flags, u8 data_len, u8 *data,
u32 min_interval, u32 max_interval);
int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
@@ -1797,6 +1927,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD)
#define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF)
#define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK)
+#define lmp_sco_capable(dev) ((dev)->features[0][1] & LMP_SCO)
#define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ)
#define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO)
#define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR))
@@ -1839,6 +1970,8 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
!hci_dev_test_flag(dev, HCI_RPA_EXPIRED))
#define adv_rpa_valid(adv) (bacmp(&adv->random_addr, BDADDR_ANY) && \
!adv->rpa_expired)
+#define le_enabled(dev) (lmp_le_capable(dev) && \
+ hci_dev_test_flag(dev, HCI_LE_ENABLED))
#define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M))
@@ -1849,40 +1982,41 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M))
#define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED) && \
- !test_bit(HCI_QUIRK_BROKEN_LE_CODED, \
- &(dev)->quirks))
+ !hci_test_quirk((dev), \
+ HCI_QUIRK_BROKEN_LE_CODED))
#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
#define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
+#define ll_privacy_enabled(dev) (le_enabled(dev) && ll_privacy_capable(dev))
-/* Use LL Privacy based address resolution if supported */
-#define use_ll_privacy(dev) (ll_privacy_capable(dev) && \
- hci_dev_test_flag(dev, HCI_ENABLE_LL_PRIVACY))
-
-#define privacy_mode_capable(dev) (use_ll_privacy(dev) && \
- (hdev->commands[39] & 0x04))
+#define privacy_mode_capable(dev) (ll_privacy_capable(dev) && \
+ ((dev)->commands[39] & 0x04))
#define read_key_size_capable(dev) \
((dev)->commands[20] & 0x10 && \
- !test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks))
+ !hci_test_quirk((dev), HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE))
+
+#define read_voice_setting_capable(dev) \
+ ((dev)->commands[9] & 0x04 && \
+ !hci_test_quirk((dev), HCI_QUIRK_BROKEN_READ_VOICE_SETTING))
/* Use enhanced synchronous connection if command is supported and its quirk
* has not been set.
*/
#define enhanced_sync_conn_capable(dev) \
(((dev)->commands[29] & 0x08) && \
- !test_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &(dev)->quirks))
+ !hci_test_quirk((dev), HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN))
/* Use ext scanning if set ext scan param and ext scan enable is supported */
#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
((dev)->commands[37] & 0x40) && \
- !test_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &(dev)->quirks))
+ !hci_test_quirk((dev), HCI_QUIRK_BROKEN_EXT_SCAN))
/* Use ext create connection if command is supported */
-#define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
-
+#define use_ext_conn(dev) (((dev)->commands[37] & 0x80) && \
+ !hci_test_quirk((dev), HCI_QUIRK_BROKEN_EXT_CREATE_CONN))
/* Extended advertising support */
#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
@@ -1895,25 +2029,50 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
* C24: Mandatory if the LE Controller supports Connection State and either
* LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported
*/
-#define use_enhanced_conn_complete(dev) (ll_privacy_capable(dev) || \
- ext_adv_capable(dev))
+#define use_enhanced_conn_complete(dev) ((ll_privacy_capable(dev) || \
+ ext_adv_capable(dev)) && \
+ !hci_test_quirk((dev), \
+ HCI_QUIRK_BROKEN_EXT_CREATE_CONN))
/* Periodic advertising support */
#define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV))
/* CIS Master/Slave and BIS support */
#define iso_capable(dev) (cis_capable(dev) || bis_capable(dev))
+#define iso_enabled(dev) (le_enabled(dev) && iso_capable(dev))
#define cis_capable(dev) \
(cis_central_capable(dev) || cis_peripheral_capable(dev))
+#define cis_enabled(dev) (le_enabled(dev) && cis_capable(dev))
#define cis_central_capable(dev) \
((dev)->le_features[3] & HCI_LE_CIS_CENTRAL)
+#define cis_central_enabled(dev) \
+ (le_enabled(dev) && cis_central_capable(dev))
#define cis_peripheral_capable(dev) \
((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL)
+#define cis_peripheral_enabled(dev) \
+ (le_enabled(dev) && cis_peripheral_capable(dev))
#define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER)
-#define sync_recv_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER)
+#define bis_enabled(dev) (le_enabled(dev) && bis_capable(dev))
+#define sync_recv_capable(dev) \
+ ((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER)
+#define sync_recv_enabled(dev) (le_enabled(dev) && sync_recv_capable(dev))
+#define past_sender_capable(dev) \
+ ((dev)->le_features[3] & HCI_LE_PAST_SENDER)
+#define past_receiver_capable(dev) \
+ ((dev)->le_features[3] & HCI_LE_PAST_RECEIVER)
+#define past_capable(dev) \
+ (past_sender_capable(dev) || past_receiver_capable(dev))
+#define past_sender_enabled(dev) \
+ (le_enabled(dev) && past_sender_capable(dev))
+#define past_receiver_enabled(dev) \
+ (le_enabled(dev) && past_receiver_capable(dev))
+#define past_enabled(dev) \
+ (past_sender_enabled(dev) || past_receiver_enabled(dev))
+#define ll_ext_feature_capable(dev) \
+ ((dev)->le_features[7] & HCI_LE_LL_EXT_FEATURE)
#define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \
- (!test_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &(dev)->quirks)))
+ (!hci_test_quirk((dev), HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG)))
/* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01
@@ -1929,7 +2088,9 @@ static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
case ESCO_LINK:
return sco_connect_ind(hdev, bdaddr, flags);
- case ISO_LINK:
+ case CIS_LINK:
+ case BIS_LINK:
+ case PA_LINK:
return iso_connect_ind(hdev, bdaddr, flags);
default:
@@ -2113,18 +2274,46 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
{
u16 max_latency;
- if (min > max || min < 6 || max > 3200)
+ if (min > max) {
+ BT_WARN("min %d > max %d", min, max);
+ return -EINVAL;
+ }
+
+ if (min < 6) {
+ BT_WARN("min %d < 6", min);
+ return -EINVAL;
+ }
+
+ if (max > 3200) {
+ BT_WARN("max %d > 3200", max);
+ return -EINVAL;
+ }
+
+ if (to_multiplier < 10) {
+ BT_WARN("to_multiplier %d < 10", to_multiplier);
return -EINVAL;
+ }
- if (to_multiplier < 10 || to_multiplier > 3200)
+ if (to_multiplier > 3200) {
+ BT_WARN("to_multiplier %d > 3200", to_multiplier);
return -EINVAL;
+ }
- if (max >= to_multiplier * 8)
+ if (max >= to_multiplier * 8) {
+ BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier);
return -EINVAL;
+ }
max_latency = (to_multiplier * 4 / max) - 1;
- if (latency > 499 || latency > max_latency)
+ if (latency > 499) {
+ BT_WARN("latency %d > 499", latency);
+ return -EINVAL;
+ }
+
+ if (latency > max_latency) {
+ BT_WARN("latency %d > max_latency %d", latency, max_latency);
return -EINVAL;
+ }
return 0;
}
@@ -2239,8 +2428,8 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
bool mgmt_connected);
void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 status);
-void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 status);
+void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 status);
void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 status);
@@ -2267,8 +2456,6 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
u8 status);
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
-void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status);
-void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status);
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
@@ -2294,7 +2481,6 @@ void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
u8 instance);
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
u8 instance);
-void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle);
int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
bdaddr_t *bdaddr, u8 addr_type);
diff --git a/include/net/bluetooth/hci_drv.h b/include/net/bluetooth/hci_drv.h
new file mode 100644
index 000000000000..3fd6fdbdb02e
--- /dev/null
+++ b/include/net/bluetooth/hci_drv.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 Google Corporation
+ */
+
+#ifndef __HCI_DRV_H
+#define __HCI_DRV_H
+
+#include <linux/types.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci.h>
+
+struct hci_drv_cmd_hdr {
+ __le16 opcode;
+ __le16 len;
+} __packed;
+
+struct hci_drv_ev_hdr {
+ __le16 opcode;
+ __le16 len;
+} __packed;
+
+#define HCI_DRV_EV_CMD_STATUS 0x0000
+struct hci_drv_ev_cmd_status {
+ __le16 opcode;
+ __u8 status;
+} __packed;
+
+#define HCI_DRV_EV_CMD_COMPLETE 0x0001
+struct hci_drv_ev_cmd_complete {
+ __le16 opcode;
+ __u8 status;
+ __u8 data[];
+} __packed;
+
+#define HCI_DRV_STATUS_SUCCESS 0x00
+#define HCI_DRV_STATUS_UNSPECIFIED_ERROR 0x01
+#define HCI_DRV_STATUS_UNKNOWN_COMMAND 0x02
+#define HCI_DRV_STATUS_INVALID_PARAMETERS 0x03
+
+#define HCI_DRV_MAX_DRIVER_NAME_LENGTH 32
+
+/* Common commands that make sense on all drivers start from 0x0000 */
+#define HCI_DRV_OP_READ_INFO 0x0000
+#define HCI_DRV_READ_INFO_SIZE 0
+struct hci_drv_rp_read_info {
+ __u8 driver_name[HCI_DRV_MAX_DRIVER_NAME_LENGTH];
+ __le16 num_supported_commands;
+ __le16 supported_commands[] __counted_by_le(num_supported_commands);
+} __packed;
+
+/* Driver specific OGF (Opcode Group Field)
+ * Commands in this group may have different meanings across different drivers.
+ */
+#define HCI_DRV_OGF_DRIVER_SPECIFIC 0x01
+
+int hci_drv_cmd_status(struct hci_dev *hdev, u16 cmd, u8 status);
+int hci_drv_cmd_complete(struct hci_dev *hdev, u16 cmd, u8 status, void *rp,
+ size_t rp_len);
+int hci_drv_process_cmd(struct hci_dev *hdev, struct sk_buff *cmd_skb);
+
+struct hci_drv_handler {
+ int (*func)(struct hci_dev *hdev, void *data, u16 data_len);
+ size_t data_len;
+};
+
+struct hci_drv {
+ size_t common_handler_count;
+ const struct hci_drv_handler *common_handlers;
+
+ size_t specific_handler_count;
+ const struct hci_drv_handler *specific_handlers;
+};
+
+#endif /* __HCI_DRV_H */
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 082f89531b88..bbd752494ef9 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -51,6 +51,8 @@ struct hci_mon_hdr {
#define HCI_MON_CTRL_EVENT 17
#define HCI_MON_ISO_TX_PKT 18
#define HCI_MON_ISO_RX_PKT 19
+#define HCI_MON_DRV_TX_PKT 20
+#define HCI_MON_DRV_RX_PKT 21
struct hci_mon_new_index {
__u8 type;
diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h
index 9949870f7d78..13e8cd4414a1 100644
--- a/include/net/bluetooth/hci_sock.h
+++ b/include/net/bluetooth/hci_sock.h
@@ -144,7 +144,7 @@ struct hci_dev_req {
struct hci_dev_list_req {
__u16 dev_num;
- struct hci_dev_req dev_req[]; /* hci_dev_req structures */
+ struct hci_dev_req dev_req[] __counted_by(dev_num);
};
struct hci_conn_list_req {
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
index 6a9d063e9f47..56076bbc981d 100644
--- a/include/net/bluetooth/hci_sync.h
+++ b/include/net/bluetooth/hci_sync.h
@@ -8,6 +8,23 @@
#define UINT_PTR(_handle) ((void *)((uintptr_t)_handle))
#define PTR_UINT(_ptr) ((uintptr_t)((void *)_ptr))
+#define HCI_REQ_DONE 0
+#define HCI_REQ_PEND 1
+#define HCI_REQ_CANCELED 2
+
+#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
+#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
+
+struct hci_request {
+ struct hci_dev *hdev;
+ struct sk_buff_head cmd_q;
+
+ /* If something goes wrong when building the HCI request, the error
+ * value is stored in this field.
+ */
+ int err;
+};
+
typedef int (*hci_cmd_sync_work_func_t)(struct hci_dev *hdev, void *data);
typedef void (*hci_cmd_sync_work_destroy_t)(struct hci_dev *hdev, void *data,
int err);
@@ -20,6 +37,10 @@ struct hci_cmd_sync_work_entry {
};
struct adv_info;
+
+struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, struct sock *sk);
+
/* Function with sync suffix shall not be called with hdev->lock held as they
* wait the command to complete and in the meantime an event could be received
* which could attempt to acquire hdev->lock causing a deadlock.
@@ -38,6 +59,8 @@ int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u8 event, u32 timeout,
struct sock *sk);
+int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout);
void hci_cmd_sync_init(struct hci_dev *hdev);
void hci_cmd_sync_clear(struct hci_dev *hdev);
@@ -50,6 +73,10 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
+int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
+int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy);
struct hci_cmd_sync_work_entry *
hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
@@ -66,7 +93,7 @@ int hci_update_class_sync(struct hci_dev *hdev);
int hci_update_eir_sync(struct hci_dev *hdev);
int hci_update_class_sync(struct hci_dev *hdev);
-int hci_update_name_sync(struct hci_dev *hdev);
+int hci_update_name_sync(struct hci_dev *hdev, const u8 *name);
int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode);
int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
@@ -88,8 +115,8 @@ int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance);
int hci_enable_advertising_sync(struct hci_dev *hdev);
int hci_enable_advertising(struct hci_dev *hdev);
-int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
- u8 *data, u32 flags, u16 min_interval,
+int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 sid,
+ u8 data_len, u8 *data, u32 flags, u16 min_interval,
u16 max_interval, u16 sync_interval);
int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance);
@@ -113,7 +140,6 @@ int hci_update_scan(struct hci_dev *hdev);
int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul);
int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
struct sock *sk);
-int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance);
struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, bool ext,
struct sock *sk);
@@ -129,6 +155,8 @@ int hci_update_discoverable(struct hci_dev *hdev);
int hci_update_connectable_sync(struct hci_dev *hdev);
+int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp);
+
int hci_start_discovery_sync(struct hci_dev *hdev);
int hci_stop_discovery_sync(struct hci_dev *hdev);
@@ -136,6 +164,7 @@ int hci_suspend_sync(struct hci_dev *hdev);
int hci_resume_sync(struct hci_dev *hdev);
struct hci_conn;
+struct hci_conn_params;
int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason);
@@ -154,3 +183,11 @@ int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn);
+int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ struct hci_conn_params *params);
+
+int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn);
+int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn);
+int hci_past_sync(struct hci_conn *conn, struct hci_conn *le);
+
+int hci_le_read_remote_features(struct hci_conn *conn);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 5cfdc813491a..00e182a22720 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -27,7 +27,7 @@
#ifndef __L2CAP_H
#define __L2CAP_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/atomic.h>
/* L2CAP defaults */
@@ -668,7 +668,7 @@ struct l2cap_conn {
struct l2cap_chan *smp;
struct list_head chan_l;
- struct mutex chan_lock;
+ struct mutex lock;
struct kref ref;
struct list_head users;
};
@@ -955,7 +955,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason);
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
bdaddr_t *dst, u8 dst_type, u16 timeout);
int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu);
-int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
+ const struct sockcm_cookie *sockc);
void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail);
int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator);
@@ -968,12 +969,9 @@ void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
void *data);
void l2cap_chan_del(struct l2cap_chan *chan, int err);
void l2cap_send_conn_req(struct l2cap_chan *chan);
-void l2cap_move_start(struct l2cap_chan *chan);
-void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
- u8 status);
-void __l2cap_physical_cfm(struct l2cap_chan *chan, int result);
struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn);
+struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *conn);
void l2cap_conn_put(struct l2cap_conn *conn);
int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index d382679efd2b..8234915854b6 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -53,10 +53,15 @@ struct mgmt_hdr {
} __packed;
struct mgmt_tlv {
- __le16 type;
- __u8 length;
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(mgmt_tlv_hdr, __hdr, __packed,
+ __le16 type;
+ __u8 length;
+ );
__u8 value[];
} __packed;
+static_assert(offsetof(struct mgmt_tlv, value) == sizeof(struct mgmt_tlv_hdr),
+ "struct member likely outside of __struct_group()");
struct mgmt_addr_info {
bdaddr_t bdaddr;
@@ -113,6 +118,9 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_CIS_PERIPHERAL BIT(19)
#define MGMT_SETTING_ISO_BROADCASTER BIT(20)
#define MGMT_SETTING_ISO_SYNC_RECEIVER BIT(21)
+#define MGMT_SETTING_LL_PRIVACY BIT(22)
+#define MGMT_SETTING_PAST_SENDER BIT(23)
+#define MGMT_SETTING_PAST_RECEIVER BIT(24)
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
@@ -774,7 +782,7 @@ struct mgmt_adv_pattern {
__u8 ad_type;
__u8 offset;
__u8 length;
- __u8 value[31];
+ __u8 value[HCI_MAX_AD_LENGTH];
} __packed;
#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR 0x0052
@@ -847,7 +855,7 @@ struct mgmt_cp_set_mesh {
__le16 window;
__le16 period;
__u8 num_ad_types;
- __u8 ad_types[];
+ __u8 ad_types[] __counted_by(num_ad_types);
} __packed;
#define MGMT_SET_MESH_RECEIVER_SIZE 6
@@ -878,6 +886,16 @@ struct mgmt_cp_mesh_send_cancel {
} __packed;
#define MGMT_MESH_SEND_CANCEL_SIZE 1
+#define MGMT_OP_HCI_CMD_SYNC 0x005B
+struct mgmt_cp_hci_cmd_sync {
+ __le16 opcode;
+ __u8 event;
+ __u8 timeout;
+ __le16 params_len;
+ __u8 params[];
+} __packed;
+#define MGMT_HCI_CMD_SYNC_SIZE 6
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 99d26879b02a..c05882476900 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -355,7 +355,7 @@ struct rfcomm_dev_info {
struct rfcomm_dev_list_req {
u16 dev_num;
- struct rfcomm_dev_info dev_info[];
+ struct rfcomm_dev_info dev_info[] __counted_by(dev_num);
};
int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index 9ce5ac2bfbad..c92d4a976246 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -26,6 +26,7 @@ enum {
BOND_AD_STABLE = 0,
BOND_AD_BANDWIDTH = 1,
BOND_AD_COUNT = 2,
+ BOND_AD_PRIO = 3,
};
/* rx machine states(43.4.11 in the 802.3ad standard) */
@@ -231,7 +232,10 @@ typedef struct port {
mux_states_t sm_mux_state; /* state machine mux state */
u16 sm_mux_timer_counter; /* state machine mux timer counter */
tx_states_t sm_tx_state; /* state machine tx state */
- u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */
+ u16 sm_tx_timer_counter; /* state machine tx timer counter
+ * (always on - enter to transmit
+ * state 3 time per second)
+ */
u16 sm_churn_actor_timer_counter;
u16 sm_churn_partner_timer_counter;
u32 churn_actor_count;
@@ -271,6 +275,7 @@ struct ad_slave_info {
struct port port; /* 802.3ad port structure */
struct bond_3ad_stats stats;
u16 id;
+ u16 port_priority;
};
static inline const char *bond_3ad_churn_desc(churn_state_t state)
@@ -304,6 +309,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
+void bond_3ad_update_lacp_active(struct bonding *bond);
void bond_3ad_update_ad_actor_settings(struct bonding *bond);
int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
size_t bond_3ad_stats_size(void);
diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h
index 9dc082b2d543..e5945427f38d 100644
--- a/include/net/bond_alb.h
+++ b/include/net/bond_alb.h
@@ -53,7 +53,7 @@ struct slave;
struct tlb_client_info {
- struct slave *tx_slave; /* A pointer to slave used for transmiting
+ struct slave *tx_slave; /* A pointer to slave used for transmitting
* packets to a Client that the Hash function
* gave this entry index.
*/
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index 473a0147769e..e6eedf23aea1 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -77,6 +77,8 @@ enum {
BOND_OPT_NS_TARGETS,
BOND_OPT_PRIO,
BOND_OPT_COUPLED_CONTROL,
+ BOND_OPT_BROADCAST_NEIGH,
+ BOND_OPT_ACTOR_PORT_PRIO,
BOND_OPT_LAST
};
@@ -161,5 +163,7 @@ void bond_option_arp_ip_targets_clear(struct bonding *bond);
#if IS_ENABLED(CONFIG_IPV6)
void bond_option_ns_ip6_targets_clear(struct bonding *bond);
#endif
+void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave);
+void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave);
#endif /* _NET_BOND_OPTIONS_H */
diff --git a/include/net/bonding.h b/include/net/bonding.h
index b61fb1aa3a56..49edc7da0586 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -115,6 +115,8 @@ static inline int is_netpoll_tx_blocked(struct net_device *dev)
#define is_netpoll_tx_blocked(dev) (0)
#endif
+DECLARE_STATIC_KEY_FALSE(bond_bcast_neigh_enabled);
+
struct bond_params {
int mode;
int xmit_policy;
@@ -124,7 +126,6 @@ struct bond_params {
int arp_interval;
int arp_validate;
int arp_all_targets;
- int use_carrier;
int fail_over_mac;
int updelay;
int downdelay;
@@ -149,6 +150,7 @@ struct bond_params {
struct in6_addr ns_targets[BOND_MAX_NS_TARGETS];
#endif
int coupled_control;
+ int broadcast_neighbor;
/* 2 bytes of padding : see ether_addr_equal_64bits() */
u8 ad_actor_system[ETH_ALEN + 2];
@@ -260,7 +262,7 @@ struct bonding {
#ifdef CONFIG_XFRM_OFFLOAD
struct list_head ipsec_list;
/* protecting ipsec_list */
- spinlock_t ipsec_lock;
+ struct mutex ipsec_lock;
#endif /* CONFIG_XFRM_OFFLOAD */
struct bpf_prog *xdp_prog;
};
@@ -695,6 +697,7 @@ void bond_debug_register(struct bonding *bond);
void bond_debug_unregister(struct bonding *bond);
void bond_debug_reregister(struct bonding *bond);
const char *bond_mode_name(int mode);
+bool bond_xdp_check(struct bonding *bond, int mode);
void bond_setup(struct net_device *bond_dev);
unsigned int bond_get_num_tx_queues(void);
int bond_netlink_init(void);
@@ -707,6 +710,7 @@ struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave);
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay);
void bond_work_init_all(struct bonding *bond);
+void bond_work_cancel_all(struct bonding *bond);
#ifdef CONFIG_PROC_FS
void bond_create_proc_entry(struct bonding *bond);
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 9b09acac538e..6e172d0f6ef5 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -24,6 +24,11 @@
*/
#define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
+static inline bool napi_id_valid(unsigned int napi_id)
+{
+ return napi_id >= MIN_NAPI_ID;
+}
+
#define BUSY_POLL_BUDGET 8
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -52,6 +57,9 @@ void napi_busy_loop_rcu(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
void *loop_end_arg, bool prefer_busy_poll, u16 budget);
+void napi_suspend_irqs(unsigned int napi_id);
+void napi_resume_irqs(unsigned int napi_id);
+
#else /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long net_busy_loop_on(void)
{
@@ -68,7 +76,7 @@ static inline bool sk_can_busy_loop(struct sock *sk)
static inline unsigned long busy_loop_current_time(void)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
- return (unsigned long)(local_clock() >> 10);
+ return (unsigned long)(ktime_get_ns() >> 10);
#else
return 0;
#endif
@@ -111,7 +119,7 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock)
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int napi_id = READ_ONCE(sk->sk_napi_id);
- if (napi_id >= MIN_NAPI_ID)
+ if (napi_id_valid(napi_id))
napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk,
READ_ONCE(sk->sk_prefer_busy_poll),
READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET);
@@ -119,19 +127,25 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock)
}
/* used in the NIC receive handler to mark the skb */
-static inline void skb_mark_napi_id(struct sk_buff *skb,
- struct napi_struct *napi)
+static inline void __skb_mark_napi_id(struct sk_buff *skb,
+ const struct gro_node *gro)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
/* If the skb was already marked with a valid NAPI ID, avoid overwriting
* it.
*/
- if (skb->napi_id < MIN_NAPI_ID)
- skb->napi_id = napi->napi_id;
+ if (!napi_id_valid(skb->napi_id))
+ skb->napi_id = gro->cached_napi_id;
#endif
}
-/* used in the protocol hanlder to propagate the napi_id to the socket */
+static inline void skb_mark_napi_id(struct sk_buff *skb,
+ const struct napi_struct *napi)
+{
+ __skb_mark_napi_id(skb, &napi->gro);
+}
+
+/* used in the protocol handler to propagate the napi_id to the socket */
static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -171,12 +185,4 @@ static inline void sk_mark_napi_id_once(struct sock *sk,
#endif
}
-static inline void sk_mark_napi_id_once_xdp(struct sock *sk,
- const struct xdp_buff *xdp)
-{
-#ifdef CONFIG_NET_RX_BUSY_POLL
- __sk_mark_napi_id_once(sk, xdp->rxq->napi_id);
-#endif
-}
-
#endif /* _LINUX_NET_BUSY_POLL_H */
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
index 51f7bb42a936..053e7c6a6a66 100644
--- a/include/net/caif/caif_layer.h
+++ b/include/net/caif/caif_layer.h
@@ -11,9 +11,7 @@
struct cflayer;
struct cfpkt;
-struct cfpktq;
struct caif_payload_info;
-struct caif_packet_funcs;
#define CAIF_LAYER_NAME_SZ 16
@@ -22,7 +20,7 @@ struct caif_packet_funcs;
* @assert: expression to evaluate.
*
* This function will print a error message and a do WARN_ON if the
- * assertion failes. Normally this will do a stack up at the current location.
+ * assertion fails. Normally this will do a stack up at the current location.
*/
#define caif_assert(assert) \
do { \
@@ -118,7 +116,7 @@ enum caif_direction {
* @dn: Pointer down to the layer below.
* @node: List node used when layer participate in a list.
* @receive: Packet receive function.
- * @transmit: Packet transmit funciton.
+ * @transmit: Packet transmit function.
* @ctrlcmd: Used for control signalling upwards in the stack.
* @modemcmd: Used for control signaling downwards in the stack.
* @id: The identity of this layer
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index 44d914a50369..acf664227d96 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -18,7 +18,7 @@ struct cfpkt *cfpkt_create(u16 len);
/*
* Destroy a CAIF Packet.
- * pkt Packet to be destoyed.
+ * pkt Packet to be destroyed.
*/
void cfpkt_destroy(struct cfpkt *pkt);
diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h
index 5ee7b322e18b..a000dc45f966 100644
--- a/include/net/caif/cfsrvl.h
+++ b/include/net/caif/cfsrvl.h
@@ -40,7 +40,6 @@ void cfsrvl_init(struct cfsrvl *service,
struct dev_info *dev_info,
bool supports_flowctrl);
bool cfsrvl_ready(struct cfsrvl *service, int *err);
-u8 cfsrvl_getphyid(struct cflayer *layer);
static inline void cfsrvl_get(struct cflayer *layr)
{
diff --git a/include/net/calipso.h b/include/net/calipso.h
index f8667a3fda9e..76b9e08c10c2 100644
--- a/include/net/calipso.h
+++ b/include/net/calipso.h
@@ -25,7 +25,7 @@
#include <net/netlabel.h>
#include <net/request_sock.h>
#include <linux/refcount.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/* known doi values */
#define CALIPSO_DOI_UNKNOWN 0x00000000
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index cbf1664dc569..899f267b7cf9 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -7,7 +7,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/ethtool.h>
@@ -101,16 +101,6 @@ struct wiphy;
* @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
* on this channel.
* @IEEE80211_CHAN_NO_HE: HE operation is not permitted on this channel.
- * @IEEE80211_CHAN_1MHZ: 1 MHz bandwidth is permitted
- * on this channel.
- * @IEEE80211_CHAN_2MHZ: 2 MHz bandwidth is permitted
- * on this channel.
- * @IEEE80211_CHAN_4MHZ: 4 MHz bandwidth is permitted
- * on this channel.
- * @IEEE80211_CHAN_8MHZ: 8 MHz bandwidth is permitted
- * on this channel.
- * @IEEE80211_CHAN_16MHZ: 16 MHz bandwidth is permitted
- * on this channel.
* @IEEE80211_CHAN_NO_320MHZ: If the driver supports 320 MHz on the band,
* this flag indicates that a 320 MHz channel cannot use this
* channel as the control or any of the secondary channels.
@@ -125,33 +115,46 @@ struct wiphy;
* @IEEE80211_CHAN_CAN_MONITOR: This channel can be used for monitor
* mode even in the presence of other (regulatory) restrictions,
* even if it is otherwise disabled.
+ * @IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP: Allow using this channel for AP operation
+ * with very low power (VLP), even if otherwise set to NO_IR.
+ * @IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY: Allow activity on a 20 MHz channel,
+ * even if otherwise set to NO_IR.
+ * @IEEE80211_CHAN_S1G_NO_PRIMARY: Prevents the channel for use as an S1G
+ * primary channel. Does not prevent the wider operating channel
+ * described by the chandef from being used. In order for a 2MHz primary
+ * to be used, both 1MHz subchannels shall not contain this flag.
+ * @IEEE80211_CHAN_NO_4MHZ: 4 MHz bandwidth is not permitted on this channel.
+ * @IEEE80211_CHAN_NO_8MHZ: 8 MHz bandwidth is not permitted on this channel.
+ * @IEEE80211_CHAN_NO_16MHZ: 16 MHz bandwidth is not permitted on this channel.
*/
enum ieee80211_channel_flags {
- IEEE80211_CHAN_DISABLED = 1<<0,
- IEEE80211_CHAN_NO_IR = 1<<1,
- IEEE80211_CHAN_PSD = 1<<2,
- IEEE80211_CHAN_RADAR = 1<<3,
- IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
- IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
- IEEE80211_CHAN_NO_OFDM = 1<<6,
- IEEE80211_CHAN_NO_80MHZ = 1<<7,
- IEEE80211_CHAN_NO_160MHZ = 1<<8,
- IEEE80211_CHAN_INDOOR_ONLY = 1<<9,
- IEEE80211_CHAN_IR_CONCURRENT = 1<<10,
- IEEE80211_CHAN_NO_20MHZ = 1<<11,
- IEEE80211_CHAN_NO_10MHZ = 1<<12,
- IEEE80211_CHAN_NO_HE = 1<<13,
- IEEE80211_CHAN_1MHZ = 1<<14,
- IEEE80211_CHAN_2MHZ = 1<<15,
- IEEE80211_CHAN_4MHZ = 1<<16,
- IEEE80211_CHAN_8MHZ = 1<<17,
- IEEE80211_CHAN_16MHZ = 1<<18,
- IEEE80211_CHAN_NO_320MHZ = 1<<19,
- IEEE80211_CHAN_NO_EHT = 1<<20,
- IEEE80211_CHAN_DFS_CONCURRENT = 1<<21,
- IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = 1<<22,
- IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = 1<<23,
- IEEE80211_CHAN_CAN_MONITOR = 1<<24,
+ IEEE80211_CHAN_DISABLED = BIT(0),
+ IEEE80211_CHAN_NO_IR = BIT(1),
+ IEEE80211_CHAN_PSD = BIT(2),
+ IEEE80211_CHAN_RADAR = BIT(3),
+ IEEE80211_CHAN_NO_HT40PLUS = BIT(4),
+ IEEE80211_CHAN_NO_HT40MINUS = BIT(5),
+ IEEE80211_CHAN_NO_OFDM = BIT(6),
+ IEEE80211_CHAN_NO_80MHZ = BIT(7),
+ IEEE80211_CHAN_NO_160MHZ = BIT(8),
+ IEEE80211_CHAN_INDOOR_ONLY = BIT(9),
+ IEEE80211_CHAN_IR_CONCURRENT = BIT(10),
+ IEEE80211_CHAN_NO_20MHZ = BIT(11),
+ IEEE80211_CHAN_NO_10MHZ = BIT(12),
+ IEEE80211_CHAN_NO_HE = BIT(13),
+ /* can use free bits here */
+ IEEE80211_CHAN_NO_320MHZ = BIT(19),
+ IEEE80211_CHAN_NO_EHT = BIT(20),
+ IEEE80211_CHAN_DFS_CONCURRENT = BIT(21),
+ IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = BIT(22),
+ IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = BIT(23),
+ IEEE80211_CHAN_CAN_MONITOR = BIT(24),
+ IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP = BIT(25),
+ IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY = BIT(26),
+ IEEE80211_CHAN_S1G_NO_PRIMARY = BIT(27),
+ IEEE80211_CHAN_NO_4MHZ = BIT(28),
+ IEEE80211_CHAN_NO_8MHZ = BIT(29),
+ IEEE80211_CHAN_NO_16MHZ = BIT(30),
};
#define IEEE80211_CHAN_NO_HT40 \
@@ -229,13 +232,13 @@ struct ieee80211_channel {
* @IEEE80211_RATE_SUPPORTS_10MHZ: Rate can be used in 10 MHz mode
*/
enum ieee80211_rate_flags {
- IEEE80211_RATE_SHORT_PREAMBLE = 1<<0,
- IEEE80211_RATE_MANDATORY_A = 1<<1,
- IEEE80211_RATE_MANDATORY_B = 1<<2,
- IEEE80211_RATE_MANDATORY_G = 1<<3,
- IEEE80211_RATE_ERP_G = 1<<4,
- IEEE80211_RATE_SUPPORTS_5MHZ = 1<<5,
- IEEE80211_RATE_SUPPORTS_10MHZ = 1<<6,
+ IEEE80211_RATE_SHORT_PREAMBLE = BIT(0),
+ IEEE80211_RATE_MANDATORY_A = BIT(1),
+ IEEE80211_RATE_MANDATORY_B = BIT(2),
+ IEEE80211_RATE_MANDATORY_G = BIT(3),
+ IEEE80211_RATE_ERP_G = BIT(4),
+ IEEE80211_RATE_SUPPORTS_5MHZ = BIT(5),
+ IEEE80211_RATE_SUPPORTS_10MHZ = BIT(6),
};
/**
@@ -554,7 +557,7 @@ struct ieee80211_sta_s1g_cap {
* @vht_cap: VHT capabilities in this band
* @s1g_cap: S1G capabilities in this band
* @edmg_cap: EDMG capabilities in this band
- * @s1g_cap: S1G capabilities in this band (S1B band only, of course)
+ * @s1g_cap: S1G capabilities in this band (S1G band only, of course)
* @n_iftype_data: number of iftype data entries
* @iftype_data: interface type data entries. Note that the bits in
* @types_mask inside this structure cannot overlap (i.e. only
@@ -627,7 +630,7 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
const struct ieee80211_sband_iftype_data *data;
int i;
- if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
+ if (WARN_ON(iftype >= NUM_NL80211_IFTYPES))
return NULL;
if (iftype == NL80211_IFTYPE_AP_VLAN)
@@ -682,7 +685,7 @@ ieee80211_get_he_6ghz_capa(const struct ieee80211_supported_band *sband,
}
/**
- * ieee80211_get_eht_iftype_cap - return ETH capabilities for an sband's iftype
+ * ieee80211_get_eht_iftype_cap - return EHT capabilities for an sband's iftype
* @sband: the sband to search for the iftype on
* @iftype: enum nl80211_iftype
*
@@ -783,8 +786,7 @@ struct vif_params {
* @key: key material
* @key_len: length of key material
* @cipher: cipher suite selector
- * @seq: sequence counter (IV/PN) for TKIP and CCMP keys, only used
- * with the get_key() callback, must be in little endian,
+ * @seq: sequence counter (IV/PN), must be in little endian,
* length given by @seq_len.
* @seq_len: length of @seq.
* @vlan_id: vlan_id for VLAN group key (if nonzero)
@@ -815,6 +817,9 @@ struct key_params {
* @punctured: mask of the punctured 20 MHz subchannels, with
* bits turned on being disabled (punctured); numbered
* from lower to higher frequency (like in the spec)
+ * @s1g_primary_2mhz: Indicates if the control channel pointed to
+ * by 'chan' exists as a 1MHz primary subchannel within an
+ * S1G 2MHz primary channel.
*/
struct cfg80211_chan_def {
struct ieee80211_channel *chan;
@@ -824,6 +829,7 @@ struct cfg80211_chan_def {
struct ieee80211_edmg edmg;
u16 freq1_offset;
u16 punctured;
+ bool s1g_primary_2mhz;
};
/*
@@ -835,9 +841,12 @@ struct cfg80211_bitrate_mask {
u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
u16 vht_mcs[NL80211_VHT_NSS_MAX];
u16 he_mcs[NL80211_HE_NSS_MAX];
+ u16 eht_mcs[NL80211_EHT_NSS_MAX];
enum nl80211_txrate_gi gi;
enum nl80211_he_gi he_gi;
+ enum nl80211_eht_gi eht_gi;
enum nl80211_he_ltf he_ltf;
+ enum nl80211_eht_ltf eht_ltf;
} control[NUM_NL80211_BANDS];
};
@@ -965,7 +974,8 @@ cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1,
chandef1->center_freq1 == chandef2->center_freq1 &&
chandef1->freq1_offset == chandef2->freq1_offset &&
chandef1->center_freq2 == chandef2->center_freq2 &&
- chandef1->punctured == chandef2->punctured);
+ chandef1->punctured == chandef2->punctured &&
+ chandef1->s1g_primary_2mhz == chandef2->s1g_primary_2mhz);
}
/**
@@ -982,6 +992,18 @@ cfg80211_chandef_is_edmg(const struct cfg80211_chan_def *chandef)
}
/**
+ * cfg80211_chandef_is_s1g - check if chandef represents an S1G channel
+ * @chandef: the channel definition
+ *
+ * Return: %true if S1G.
+ */
+static inline bool
+cfg80211_chandef_is_s1g(const struct cfg80211_chan_def *chandef)
+{
+ return chandef->chan->band == NL80211_BAND_S1GHZ;
+}
+
+/**
* cfg80211_chandef_compatible - check if two channel definitions are compatible
* @chandef1: first channel definition
* @chandef2: second channel definition
@@ -993,6 +1015,7 @@ const struct cfg80211_chan_def *
cfg80211_chandef_compatible(const struct cfg80211_chan_def *chandef1,
const struct cfg80211_chan_def *chandef2);
+
/**
* nl80211_chan_width_to_mhz - get the channel width in MHz
* @chan_width: the channel width from &enum nl80211_chan_width
@@ -1003,6 +1026,17 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *chandef1,
int nl80211_chan_width_to_mhz(enum nl80211_chan_width chan_width);
/**
+ * cfg80211_chandef_get_width - return chandef width in MHz
+ * @c: chandef to return bandwidth for
+ * Return: channel width in MHz for the given chandef; note that it returns
+ * 80 for 80+80 configurations
+ */
+static inline int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c)
+{
+ return nl80211_chan_width_to_mhz(c->width);
+}
+
+/**
* cfg80211_chandef_valid - check if a channel definition is valid
* @chandef: the channel definition to check
* Return: %true if the channel definition is valid. %false otherwise.
@@ -1080,43 +1114,6 @@ int cfg80211_chandef_primary(const struct cfg80211_chan_def *chandef,
int nl80211_send_chandef(struct sk_buff *msg, const struct cfg80211_chan_def *chandef);
/**
- * ieee80211_chanwidth_rate_flags - return rate flags for channel width
- * @width: the channel width of the channel
- *
- * In some channel types, not all rates may be used - for example CCK
- * rates may not be used in 5/10 MHz channels.
- *
- * Returns: rate flags which apply for this channel width
- */
-static inline enum ieee80211_rate_flags
-ieee80211_chanwidth_rate_flags(enum nl80211_chan_width width)
-{
- switch (width) {
- case NL80211_CHAN_WIDTH_5:
- return IEEE80211_RATE_SUPPORTS_5MHZ;
- case NL80211_CHAN_WIDTH_10:
- return IEEE80211_RATE_SUPPORTS_10MHZ;
- default:
- break;
- }
- return 0;
-}
-
-/**
- * ieee80211_chandef_rate_flags - returns rate flags for a channel
- * @chandef: channel definition for the channel
- *
- * See ieee80211_chanwidth_rate_flags().
- *
- * Returns: rate flags which apply for this channel
- */
-static inline enum ieee80211_rate_flags
-ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef)
-{
- return ieee80211_chanwidth_rate_flags(chandef->width);
-}
-
-/**
* ieee80211_chandef_max_power - maximum transmission power for the chandef
*
* In some regulations, the transmit power may depend on the configured channel
@@ -1283,11 +1280,13 @@ struct cfg80211_crypto_settings {
* struct cfg80211_mbssid_config - AP settings for multi bssid
*
* @tx_wdev: pointer to the transmitted interface in the MBSSID set
+ * @tx_link_id: link ID of the transmitted profile in an MLD.
* @index: index of this AP in the multi bssid group.
* @ema: set to true if the beacons should be sent out in EMA mode.
*/
struct cfg80211_mbssid_config {
struct wireless_dev *tx_wdev;
+ u8 tx_link_id;
u8 index;
bool ema;
};
@@ -1442,6 +1441,23 @@ struct cfg80211_unsol_bcast_probe_resp {
};
/**
+ * struct cfg80211_s1g_short_beacon - S1G short beacon data.
+ *
+ * @update: Set to true if the feature configuration should be updated.
+ * @short_head: Short beacon head.
+ * @short_tail: Short beacon tail.
+ * @short_head_len: Short beacon head len.
+ * @short_tail_len: Short beacon tail len.
+ */
+struct cfg80211_s1g_short_beacon {
+ bool update;
+ const u8 *short_head;
+ const u8 *short_tail;
+ size_t short_head_len;
+ size_t short_tail_len;
+};
+
+/**
* struct cfg80211_ap_settings - AP configuration
*
* Used to configure an AP interface.
@@ -1457,7 +1473,6 @@ struct cfg80211_unsol_bcast_probe_resp {
* @crypto: crypto settings
* @privacy: the BSS uses privacy
* @auth_type: Authentication type (algorithm)
- * @smps_mode: SMPS mode
* @inactivity_timeout: time in seconds to determine station's inactivity.
* @p2p_ctwindow: P2P CT Window
* @p2p_opp_ps: P2P opportunistic PS
@@ -1482,6 +1497,8 @@ struct cfg80211_unsol_bcast_probe_resp {
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @mbssid_config: AP settings for multiple bssid
+ * @s1g_long_beacon_period: S1G long beacon period
+ * @s1g_short_beacon: S1G short beacon data
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1495,7 +1512,6 @@ struct cfg80211_ap_settings {
struct cfg80211_crypto_settings crypto;
bool privacy;
enum nl80211_auth_type auth_type;
- enum nl80211_smps_mode smps_mode;
int inactivity_timeout;
u8 p2p_ctwindow;
bool p2p_opp_ps;
@@ -1516,6 +1532,8 @@ struct cfg80211_ap_settings {
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
struct cfg80211_mbssid_config mbssid_config;
+ u8 s1g_long_beacon_period;
+ struct cfg80211_s1g_short_beacon s1g_short_beacon;
};
@@ -1527,11 +1545,13 @@ struct cfg80211_ap_settings {
* @beacon: beacon data
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
+ * @s1g_short_beacon: S1G short beacon data
*/
struct cfg80211_ap_update {
struct cfg80211_beacon_data beacon;
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
+ struct cfg80211_s1g_short_beacon s1g_short_beacon;
};
/**
@@ -1546,6 +1566,7 @@ struct cfg80211_ap_update {
* @n_counter_offsets_beacon: number of csa counters the beacon (tail)
* @n_counter_offsets_presp: number of csa counters in the probe response
* @beacon_after: beacon data to be used on the new channel
+ * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @radar_required: whether radar detection is required on the new channel
* @block_tx: whether transmissions should be blocked while changing
* @count: number of beacons until switch
@@ -1560,6 +1581,7 @@ struct cfg80211_csa_settings {
unsigned int n_counter_offsets_beacon;
unsigned int n_counter_offsets_presp;
struct cfg80211_beacon_data beacon_after;
+ struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
bool radar_required;
bool block_tx;
u8 count;
@@ -1575,6 +1597,7 @@ struct cfg80211_csa_settings {
* @counter_offset_beacon: offsets of the counters within the beacon (tail)
* @counter_offset_presp: offsets of the counters within the probe response
* @beacon_next: beacon data to be used after the color change
+ * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @count: number of beacons until the color change
* @color: the color used after the change
* @link_id: defines the link on which color change is expected during MLO.
@@ -1585,6 +1608,7 @@ struct cfg80211_color_change_settings {
u16 counter_offset_beacon;
u16 counter_offset_presp;
struct cfg80211_beacon_data beacon_next;
+ struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
u8 count;
u8 color;
u8 link_id;
@@ -1595,6 +1619,7 @@ struct cfg80211_color_change_settings {
*
* Used to pass interface combination parameters
*
+ * @radio_idx: wiphy radio index or -1 for global
* @num_different_channels: the number of different channels we want
* to use for verification
* @radar_detect: a bitmap where each bit corresponds to a channel
@@ -1608,6 +1633,7 @@ struct cfg80211_color_change_settings {
* the verification
*/
struct iface_combination_params {
+ int radio_idx;
int num_different_channels;
u8 radar_detect;
int iftype_num[NUM_NL80211_IFTYPES];
@@ -1671,6 +1697,7 @@ struct sta_txpwr {
* @he_6ghz_capa: HE 6 GHz Band capabilities of station
* @eht_capa: EHT capabilities of station
* @eht_capa_len: the length of the EHT capabilities
+ * @s1g_capa: S1G capabilities of station
*/
struct link_station_parameters {
const u8 *mld_mac;
@@ -1689,6 +1716,7 @@ struct link_station_parameters {
const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
const struct ieee80211_eht_cap_elem *eht_capa;
u8 eht_capa_len;
+ const struct ieee80211_s1g_cap *s1g_capa;
};
/**
@@ -1753,6 +1781,9 @@ struct cfg80211_ttlm_params {
* @supported_oper_classes_len: number of supported operating classes
* @support_p2p_ps: information if station supports P2P PS mechanism
* @airtime_weight: airtime scheduler weight for this station
+ * @eml_cap_present: Specifies if EML capabilities field (@eml_cap) is
+ * present/updated
+ * @eml_cap: EML capabilities of this station
* @link_sta_params: link related params.
*/
struct station_parameters {
@@ -1777,6 +1808,8 @@ struct station_parameters {
u8 supported_oper_classes_len;
int support_p2p_ps;
u16 airtime_weight;
+ bool eml_cap_present;
+ u16 eml_cap;
struct link_station_parameters link_sta_params;
};
@@ -1957,9 +1990,9 @@ struct rate_info {
* @BSS_PARAM_FLAGS_SHORT_SLOT_TIME: whether short slot time is enabled
*/
enum bss_param_flags {
- BSS_PARAM_FLAGS_CTS_PROT = 1<<0,
- BSS_PARAM_FLAGS_SHORT_PREAMBLE = 1<<1,
- BSS_PARAM_FLAGS_SHORT_SLOT_TIME = 1<<2,
+ BSS_PARAM_FLAGS_CTS_PROT = BIT(0),
+ BSS_PARAM_FLAGS_SHORT_PREAMBLE = BIT(1),
+ BSS_PARAM_FLAGS_SHORT_SLOT_TIME = BIT(2),
};
/**
@@ -2031,6 +2064,99 @@ struct cfg80211_tid_stats {
#define IEEE80211_MAX_CHAINS 4
/**
+ * struct link_station_info - link station information
+ *
+ * Link station information filled by driver for get_station() and
+ * dump_station().
+ * @filled: bit flag of flags using the bits of &enum nl80211_sta_info to
+ * indicate the relevant values in this struct for them
+ * @connected_time: time(in secs) since a link of station is last connected
+ * @inactive_time: time since last activity for link station(tx/rx)
+ * in milliseconds
+ * @assoc_at: bootime (ns) of the last association of link of station
+ * @rx_bytes: bytes (size of MPDUs) received from this link of station
+ * @tx_bytes: bytes (size of MPDUs) transmitted to this link of station
+ * @signal: The signal strength, type depends on the wiphy's signal_type.
+ * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
+ * @signal_avg: Average signal strength, type depends on the wiphy's
+ * signal_type. For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_
+ * @chains: bitmask for filled values in @chain_signal, @chain_signal_avg
+ * @chain_signal: per-chain signal strength of last received packet in dBm
+ * @chain_signal_avg: per-chain signal strength average in dBm
+ * @txrate: current unicast bitrate from this link of station
+ * @rxrate: current unicast bitrate to this link of station
+ * @rx_packets: packets (MSDUs & MMPDUs) received from this link of station
+ * @tx_packets: packets (MSDUs & MMPDUs) transmitted to this link of station
+ * @tx_retries: cumulative retry counts (MPDUs) for this link of station
+ * @tx_failed: number of failed transmissions (MPDUs) (retries exceeded, no ACK)
+ * @rx_dropped_misc: Dropped for un-specified reason.
+ * @bss_param: current BSS parameters
+ * @beacon_loss_count: Number of times beacon loss event has triggered.
+ * @expected_throughput: expected throughput in kbps (including 802.11 headers)
+ * towards this station.
+ * @rx_beacon: number of beacons received from this peer
+ * @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received
+ * from this peer
+ * @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
+ * @tx_duration: aggregate PPDU duration(usecs) for all the frames to a peer
+ * @airtime_weight: current airtime scheduling weight
+ * @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
+ * (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
+ * Note that this doesn't use the @filled bit, but is used if non-NULL.
+ * @ack_signal: signal strength (in dBm) of the last ACK frame.
+ * @avg_ack_signal: average rssi value of ack packet for the no of msdu's has
+ * been sent.
+ * @rx_mpdu_count: number of MPDUs received from this station
+ * @fcs_err_count: number of packets (MPDUs) received from this station with
+ * an FCS error. This counter should be incremented only when TA of the
+ * received packet with an FCS error matches the peer MAC address.
+ * @addr: For MLO STA connection, filled with address of the link of station.
+ */
+struct link_station_info {
+ u64 filled;
+ u32 connected_time;
+ u32 inactive_time;
+ u64 assoc_at;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ s8 signal;
+ s8 signal_avg;
+
+ u8 chains;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+ s8 chain_signal_avg[IEEE80211_MAX_CHAINS];
+
+ struct rate_info txrate;
+ struct rate_info rxrate;
+ u32 rx_packets;
+ u32 tx_packets;
+ u32 tx_retries;
+ u32 tx_failed;
+ u32 rx_dropped_misc;
+ struct sta_bss_parameters bss_param;
+
+ u32 beacon_loss_count;
+
+ u32 expected_throughput;
+
+ u64 tx_duration;
+ u64 rx_duration;
+ u64 rx_beacon;
+ u8 rx_beacon_signal_avg;
+
+ u16 airtime_weight;
+
+ s8 ack_signal;
+ s8 avg_ack_signal;
+ struct cfg80211_tid_stats *pertid;
+
+ u32 rx_mpdu_count;
+ u32 fcs_err_count;
+
+ u8 addr[ETH_ALEN] __aligned(2);
+};
+
+/**
* struct station_info - station information
*
* Station information filled by driver for get_station() and dump_station.
@@ -2042,9 +2168,6 @@ struct cfg80211_tid_stats {
* @assoc_at: bootime (ns) of the last association
* @rx_bytes: bytes (size of MPDUs) received from this station
* @tx_bytes: bytes (size of MPDUs) transmitted to this station
- * @llid: mesh local link id
- * @plid: mesh peer link id
- * @plink_state: mesh peer link state
* @signal: The signal strength, type depends on the wiphy's signal_type.
* For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
* @signal_avg: Average signal strength, type depends on the wiphy's signal_type.
@@ -2064,14 +2187,20 @@ struct cfg80211_tid_stats {
* This number should increase every time the list of stations
* changes, i.e. when a station is added or removed, so that
* userspace can tell whether it got a consistent snapshot.
+ * @beacon_loss_count: Number of times beacon loss event has triggered.
* @assoc_req_ies: IEs from (Re)Association Request.
* This is used only when in AP mode with drivers that do not use
* user space MLME/SME implementation. The information is provided for
* the cfg80211_new_sta() calls to notify user space of the IEs.
* @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
* @sta_flags: station flags mask & values
- * @beacon_loss_count: Number of times beacon loss event has triggered.
* @t_offset: Time offset of the station relative to this host.
+ * @llid: mesh local link id
+ * @plid: mesh peer link id
+ * @plink_state: mesh peer link state
+ * @connected_to_gate: true if mesh STA has a path to mesh gate
+ * @connected_to_as: true if mesh STA has a path to authentication server
+ * @airtime_link_metric: mesh airtime link metric.
* @local_pm: local mesh STA power save mode
* @peer_pm: peer mesh STA power save mode
* @nonpeer_pm: non-peer mesh STA power save mode
@@ -2080,7 +2209,6 @@ struct cfg80211_tid_stats {
* @rx_beacon: number of beacons received from this peer
* @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received
* from this peer
- * @connected_to_gate: true if mesh STA has a path to mesh gate
* @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
* @tx_duration: aggregate PPDU duration(usecs) for all the frames to a peer
* @airtime_weight: current airtime scheduling weight
@@ -2094,8 +2222,6 @@ struct cfg80211_tid_stats {
* @fcs_err_count: number of packets (MPDUs) received from this station with
* an FCS error. This counter should be incremented only when TA of the
* received packet with an FCS error matches the peer MAC address.
- * @airtime_link_metric: mesh airtime link metric.
- * @connected_to_as: true if mesh STA has a path to authentication server
* @mlo_params_valid: Indicates @assoc_link_id and @mld_addr fields are filled
* by driver. Drivers use this only in cfg80211_new_sta() calls when AP
* MLD's MLME/SME is offload to driver. Drivers won't fill this
@@ -2114,6 +2240,11 @@ struct cfg80211_tid_stats {
* dump_station() callbacks. User space needs this information to determine
* the accepted and rejected affiliated links of the connected station.
* @assoc_resp_ies_len: Length of @assoc_resp_ies buffer in octets.
+ * @valid_links: bitmap of valid links, or 0 for non-MLO. Drivers fill this
+ * information in cfg80211_new_sta(), cfg80211_del_sta_sinfo(),
+ * get_station() and dump_station() callbacks.
+ * @links: reference to Link sta entries for MLO STA, all link specific
+ * information is accessed through links[link_id].
*/
struct station_info {
u64 filled;
@@ -2122,9 +2253,6 @@ struct station_info {
u64 assoc_at;
u64 rx_bytes;
u64 tx_bytes;
- u16 llid;
- u16 plid;
- u8 plink_state;
s8 signal;
s8 signal_avg;
@@ -2144,41 +2272,46 @@ struct station_info {
int generation;
+ u32 beacon_loss_count;
+
const u8 *assoc_req_ies;
size_t assoc_req_ies_len;
- u32 beacon_loss_count;
s64 t_offset;
+ u16 llid;
+ u16 plid;
+ u8 plink_state;
+ u8 connected_to_gate;
+ u8 connected_to_as;
+ u32 airtime_link_metric;
enum nl80211_mesh_power_mode local_pm;
enum nl80211_mesh_power_mode peer_pm;
enum nl80211_mesh_power_mode nonpeer_pm;
u32 expected_throughput;
- u64 tx_duration;
- u64 rx_duration;
- u64 rx_beacon;
- u8 rx_beacon_signal_avg;
- u8 connected_to_gate;
+ u16 airtime_weight;
- struct cfg80211_tid_stats *pertid;
s8 ack_signal;
s8 avg_ack_signal;
+ struct cfg80211_tid_stats *pertid;
- u16 airtime_weight;
+ u64 tx_duration;
+ u64 rx_duration;
+ u64 rx_beacon;
+ u8 rx_beacon_signal_avg;
u32 rx_mpdu_count;
u32 fcs_err_count;
- u32 airtime_link_metric;
-
- u8 connected_to_as;
-
bool mlo_params_valid;
u8 assoc_link_id;
u8 mld_addr[ETH_ALEN] __aligned(2);
const u8 *assoc_resp_ies;
size_t assoc_resp_ies_len;
+
+ u16 valid_links;
+ struct link_station_info *links[IEEE80211_MLD_MAX_NUM_LINKS];
};
/**
@@ -2200,7 +2333,7 @@ struct cfg80211_sar_sub_specs {
struct cfg80211_sar_specs {
enum nl80211_sar_type type;
u32 num_sub_specs;
- struct cfg80211_sar_sub_specs sub_specs[];
+ struct cfg80211_sar_sub_specs sub_specs[] __counted_by(num_sub_specs);
};
@@ -2262,17 +2395,19 @@ static inline int cfg80211_get_station(struct net_device *dev,
* @MONITOR_FLAG_PLCPFAIL: pass frames with bad PLCP
* @MONITOR_FLAG_CONTROL: pass control frames
* @MONITOR_FLAG_OTHER_BSS: disable BSSID filtering
- * @MONITOR_FLAG_COOK_FRAMES: report frames after processing
+ * @MONITOR_FLAG_COOK_FRAMES: deprecated, will unconditionally be refused
* @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address
+ * @MONITOR_FLAG_SKIP_TX: do not pass locally transmitted frames
*/
enum monitor_flags {
- MONITOR_FLAG_CHANGED = 1<<__NL80211_MNTR_FLAG_INVALID,
- MONITOR_FLAG_FCSFAIL = 1<<NL80211_MNTR_FLAG_FCSFAIL,
- MONITOR_FLAG_PLCPFAIL = 1<<NL80211_MNTR_FLAG_PLCPFAIL,
- MONITOR_FLAG_CONTROL = 1<<NL80211_MNTR_FLAG_CONTROL,
- MONITOR_FLAG_OTHER_BSS = 1<<NL80211_MNTR_FLAG_OTHER_BSS,
- MONITOR_FLAG_COOK_FRAMES = 1<<NL80211_MNTR_FLAG_COOK_FRAMES,
- MONITOR_FLAG_ACTIVE = 1<<NL80211_MNTR_FLAG_ACTIVE,
+ MONITOR_FLAG_CHANGED = BIT(__NL80211_MNTR_FLAG_INVALID),
+ MONITOR_FLAG_FCSFAIL = BIT(NL80211_MNTR_FLAG_FCSFAIL),
+ MONITOR_FLAG_PLCPFAIL = BIT(NL80211_MNTR_FLAG_PLCPFAIL),
+ MONITOR_FLAG_CONTROL = BIT(NL80211_MNTR_FLAG_CONTROL),
+ MONITOR_FLAG_OTHER_BSS = BIT(NL80211_MNTR_FLAG_OTHER_BSS),
+ MONITOR_FLAG_COOK_FRAMES = BIT(NL80211_MNTR_FLAG_COOK_FRAMES),
+ MONITOR_FLAG_ACTIVE = BIT(NL80211_MNTR_FLAG_ACTIVE),
+ MONITOR_FLAG_SKIP_TX = BIT(NL80211_MNTR_FLAG_SKIP_TX),
};
/**
@@ -2339,6 +2474,29 @@ struct mpath_info {
};
/**
+ * enum wiphy_bss_param_flags - bit positions for supported bss parameters.
+ *
+ * @WIPHY_BSS_PARAM_CTS_PROT: support changing CTS protection.
+ * @WIPHY_BSS_PARAM_SHORT_PREAMBLE: support changing short preamble usage.
+ * @WIPHY_BSS_PARAM_SHORT_SLOT_TIME: support changing short slot time usage.
+ * @WIPHY_BSS_PARAM_BASIC_RATES: support reconfiguring basic rates.
+ * @WIPHY_BSS_PARAM_AP_ISOLATE: support changing AP isolation.
+ * @WIPHY_BSS_PARAM_HT_OPMODE: support changing HT operating mode.
+ * @WIPHY_BSS_PARAM_P2P_CTWINDOW: support reconfiguring ctwindow.
+ * @WIPHY_BSS_PARAM_P2P_OPPPS: support changing P2P opportunistic power-save.
+ */
+enum wiphy_bss_param_flags {
+ WIPHY_BSS_PARAM_CTS_PROT = BIT(0),
+ WIPHY_BSS_PARAM_SHORT_PREAMBLE = BIT(1),
+ WIPHY_BSS_PARAM_SHORT_SLOT_TIME = BIT(2),
+ WIPHY_BSS_PARAM_BASIC_RATES = BIT(3),
+ WIPHY_BSS_PARAM_AP_ISOLATE = BIT(4),
+ WIPHY_BSS_PARAM_HT_OPMODE = BIT(5),
+ WIPHY_BSS_PARAM_P2P_CTWINDOW = BIT(6),
+ WIPHY_BSS_PARAM_P2P_OPPPS = BIT(7),
+};
+
+/**
* struct bss_parameters - BSS parameters
*
* Used to change BSS parameters (mainly for AP mode).
@@ -2657,15 +2815,16 @@ struct cfg80211_scan_6ghz_params {
* @wiphy: the wiphy this was for
* @scan_start: time (in jiffies) when the scan started
* @wdev: the wireless device to scan for
- * @info: (internal) information about completed scan
- * @notified: (internal) scan request was notified as done or aborted
* @no_cck: used to send probe requests at non CCK rate in 2GHz band
* @mac_addr: MAC address used with randomisation
* @mac_addr_mask: MAC address mask used with randomisation, bits that
* are 0 in the mask should be randomised, bits that are 1 should
* be taken from the @mac_addr
* @scan_6ghz: relevant for split scan request only,
- * true if this is the second scan request
+ * true if this is a 6 GHz scan request
+ * @first_part: %true if this is the first part of a split scan request or a
+ * scan that was not split. May be %true for a @scan_6ghz scan if no other
+ * channels were requested
* @n_6ghz_params: number of 6 GHz params
* @scan_6ghz_params: 6 GHz params
* @bssid: BSSID to scan for (most commonly, the wildcard BSSID)
@@ -2689,20 +2848,17 @@ struct cfg80211_scan_request {
u8 mac_addr[ETH_ALEN] __aligned(2);
u8 mac_addr_mask[ETH_ALEN] __aligned(2);
u8 bssid[ETH_ALEN] __aligned(2);
-
- /* internal */
struct wiphy *wiphy;
unsigned long scan_start;
- struct cfg80211_scan_info info;
- bool notified;
bool no_cck;
bool scan_6ghz;
+ bool first_part;
u32 n_6ghz_params;
struct cfg80211_scan_6ghz_params *scan_6ghz_params;
s8 tsf_report_link_id;
/* keep last */
- struct ieee80211_channel *channels[] __counted_by(n_channels);
+ struct ieee80211_channel *channels[];
};
static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
@@ -2838,7 +2994,7 @@ struct cfg80211_sched_scan_request {
struct list_head list;
/* keep last */
- struct ieee80211_channel *channels[];
+ struct ieee80211_channel *channels[] __counted_by(n_channels);
};
/**
@@ -2942,6 +3098,7 @@ struct cfg80211_bss_ies {
* @nontrans_list: list of non-transmitted BSS, if this is a transmitted one
* (multi-BSSID support)
* @signal: signal strength value (type depends on the wiphy's signal_type)
+ * @ts_boottime: timestamp of the last BSS update in nanoseconds since boot
* @chains: bitmask for filled values in @chain_signal.
* @chain_signal: per-chain signal strength of last received BSS in dBm.
* @bssid_index: index in the multiple BSS set
@@ -2966,6 +3123,8 @@ struct cfg80211_bss {
s32 signal;
+ u64 ts_boottime;
+
u16 beacon_interval;
u16 capability;
@@ -3018,6 +3177,10 @@ static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id)
*
* @bss: The BSS to authenticate with, the callee must obtain a reference
* to it if it needs to keep it.
+ * @supported_selectors: List of selectors that should be assumed to be
+ * supported by the station.
+ * SAE_H2E must be assumed supported if set to %NULL.
+ * @supported_selectors_len: Length of supported_selectors in octets.
* @auth_type: Authentication type (algorithm)
* @ie: Extra IEs to add to Authentication frame or %NULL
* @ie_len: Length of ie buffer in octets
@@ -3040,6 +3203,8 @@ struct cfg80211_auth_request {
struct cfg80211_bss *bss;
const u8 *ie;
size_t ie_len;
+ const u8 *supported_selectors;
+ u8 supported_selectors_len;
enum nl80211_auth_type auth_type;
const u8 *key;
u8 key_len;
@@ -3070,6 +3235,19 @@ struct cfg80211_assoc_link {
};
/**
+ * struct cfg80211_ml_reconf_req - MLO link reconfiguration request
+ * @add_links: data for links to add, see &struct cfg80211_assoc_link
+ * @rem_links: bitmap of links to remove
+ * @ext_mld_capa_ops: extended MLD capabilities and operations set by
+ * userspace for the ML reconfiguration action frame
+ */
+struct cfg80211_ml_reconf_req {
+ struct cfg80211_assoc_link add_links[IEEE80211_MLD_MAX_NUM_LINKS];
+ u16 rem_links;
+ u16 ext_mld_capa_ops;
+};
+
+/**
* enum cfg80211_assoc_req_flags - Over-ride default behaviour in association.
*
* @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n)
@@ -3119,6 +3297,10 @@ enum cfg80211_assoc_req_flags {
* included in the Current AP address field of the Reassociation Request
* frame.
* @flags: See &enum cfg80211_assoc_req_flags
+ * @supported_selectors: supported BSS selectors in IEEE 802.11 format
+ * (or %NULL for no change).
+ * If %NULL, then support for SAE_H2E should be assumed.
+ * @supported_selectors_len: number of supported BSS selectors
* @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
* will be used in ht_capa. Un-supported values will be ignored.
* @ht_capa_mask: The bits of ht_capa which are to be used.
@@ -3137,6 +3319,8 @@ enum cfg80211_assoc_req_flags {
* the link on which the association request should be sent
* @ap_mld_addr: AP MLD address in case of MLO association request,
* valid iff @link_id >= 0
+ * @ext_mld_capa_ops: extended MLD capabilities and operations set by
+ * userspace for the association
*/
struct cfg80211_assoc_request {
struct cfg80211_bss *bss;
@@ -3145,6 +3329,8 @@ struct cfg80211_assoc_request {
struct cfg80211_crypto_settings crypto;
bool use_mfp;
u32 flags;
+ const u8 *supported_selectors;
+ u8 supported_selectors_len;
struct ieee80211_ht_cap ht_capa;
struct ieee80211_ht_cap ht_capa_mask;
struct ieee80211_vht_cap vht_capa, vht_capa_mask;
@@ -3155,6 +3341,7 @@ struct cfg80211_assoc_request {
struct cfg80211_assoc_link links[IEEE80211_MLD_MAX_NUM_LINKS];
const u8 *ap_mld_addr;
s8 link_id;
+ u16 ext_mld_capa_ops;
};
/**
@@ -3399,15 +3586,15 @@ enum cfg80211_connect_params_changed {
* @WIPHY_PARAM_TXQ_QUANTUM: TXQ scheduler quantum
*/
enum wiphy_params_flags {
- WIPHY_PARAM_RETRY_SHORT = 1 << 0,
- WIPHY_PARAM_RETRY_LONG = 1 << 1,
- WIPHY_PARAM_FRAG_THRESHOLD = 1 << 2,
- WIPHY_PARAM_RTS_THRESHOLD = 1 << 3,
- WIPHY_PARAM_COVERAGE_CLASS = 1 << 4,
- WIPHY_PARAM_DYN_ACK = 1 << 5,
- WIPHY_PARAM_TXQ_LIMIT = 1 << 6,
- WIPHY_PARAM_TXQ_MEMORY_LIMIT = 1 << 7,
- WIPHY_PARAM_TXQ_QUANTUM = 1 << 8,
+ WIPHY_PARAM_RETRY_SHORT = BIT(0),
+ WIPHY_PARAM_RETRY_LONG = BIT(1),
+ WIPHY_PARAM_FRAG_THRESHOLD = BIT(2),
+ WIPHY_PARAM_RTS_THRESHOLD = BIT(3),
+ WIPHY_PARAM_COVERAGE_CLASS = BIT(4),
+ WIPHY_PARAM_DYN_ACK = BIT(5),
+ WIPHY_PARAM_TXQ_LIMIT = BIT(6),
+ WIPHY_PARAM_TXQ_MEMORY_LIMIT = BIT(7),
+ WIPHY_PARAM_TXQ_QUANTUM = BIT(8),
};
#define IEEE80211_DEFAULT_AIRTIME_WEIGHT 256
@@ -3566,8 +3753,8 @@ struct cfg80211_coalesce_rules {
* @n_rules: number of rules
*/
struct cfg80211_coalesce {
- struct cfg80211_coalesce_rules *rules;
int n_rules;
+ struct cfg80211_coalesce_rules rules[] __counted_by(n_rules);
};
/**
@@ -3582,7 +3769,7 @@ struct cfg80211_coalesce {
struct cfg80211_wowlan_nd_match {
struct cfg80211_ssid ssid;
int n_channels;
- u32 channels[];
+ u32 channels[] __counted_by(n_channels);
};
/**
@@ -3596,7 +3783,7 @@ struct cfg80211_wowlan_nd_match {
*/
struct cfg80211_wowlan_nd_info {
int n_matches;
- struct cfg80211_wowlan_nd_match *matches[];
+ struct cfg80211_wowlan_nd_match *matches[] __counted_by(n_matches);
};
/**
@@ -3740,6 +3927,38 @@ struct cfg80211_qos_map {
};
/**
+ * struct cfg80211_nan_band_config - NAN band specific configuration
+ *
+ * @chan: Pointer to the IEEE 802.11 channel structure. The channel to be used
+ * for NAN operations on this band. For 2.4 GHz band, this is always
+ * channel 6. For 5 GHz band, the channel is either 44 or 149, according
+ * to the regulatory constraints. If chan pointer is NULL the entire band
+ * configuration entry is considered invalid and should not be used.
+ * @rssi_close: RSSI close threshold used for NAN state transition algorithm
+ * as described in chapters 3.3.6 and 3.3.7 "NAN Device Role and State
+ * Transition" of Wi-Fi Aware Specification v4.0. If not
+ * specified (set to 0), default device value is used. The value should
+ * be greater than -60 dBm.
+ * @rssi_middle: RSSI middle threshold used for NAN state transition algorithm.
+ * as described in chapters 3.3.6 and 3.3.7 "NAN Device Role and State
+ * Transition" of Wi-Fi Aware Specification v4.0. If not
+ * specified (set to 0), default device value is used. The value should be
+ * greater than -75 dBm and less than rssi_close.
+ * @awake_dw_interval: Committed DW interval. Valid values range: 0-5. 0
+ * indicates no wakeup for DW and can't be used on 2.4GHz band, otherwise
+ * 2^(n-1).
+ * @disable_scan: If true, the device will not scan this band for cluster
+ * merge. Disabling scan on 2.4 GHz band is not allowed.
+ */
+struct cfg80211_nan_band_config {
+ struct ieee80211_channel *chan;
+ s8 rssi_close;
+ s8 rssi_middle;
+ u8 awake_dw_interval;
+ bool disable_scan;
+};
+
+/**
* struct cfg80211_nan_conf - NAN configuration
*
* This struct defines NAN configuration parameters
@@ -3748,10 +3967,34 @@ struct cfg80211_qos_map {
* @bands: operating bands, a bitmap of &enum nl80211_band values.
* For instance, for NL80211_BAND_2GHZ, bit 0 would be set
* (i.e. BIT(NL80211_BAND_2GHZ)).
+ * @cluster_id: cluster ID used for NAN synchronization. This is a MAC address
+ * that can take a value from 50-6F-9A-01-00-00 to 50-6F-9A-01-FF-FF.
+ * If NULL, the device will pick a random Cluster ID.
+ * @scan_period: period (in seconds) between NAN scans.
+ * @scan_dwell_time: dwell time (in milliseconds) for NAN scans.
+ * @discovery_beacon_interval: interval (in TUs) for discovery beacons.
+ * @enable_dw_notification: flag to enable/disable discovery window
+ * notifications.
+ * @band_cfgs: array of band specific configurations, indexed by
+ * &enum nl80211_band values.
+ * @extra_nan_attrs: pointer to additional NAN attributes.
+ * @extra_nan_attrs_len: length of the additional NAN attributes.
+ * @vendor_elems: pointer to vendor-specific elements.
+ * @vendor_elems_len: length of the vendor-specific elements.
*/
struct cfg80211_nan_conf {
u8 master_pref;
u8 bands;
+ const u8 *cluster_id;
+ u16 scan_period;
+ u16 scan_dwell_time;
+ u8 discovery_beacon_interval;
+ bool enable_dw_notification;
+ struct cfg80211_nan_band_config band_cfgs[NUM_NL80211_BANDS];
+ const u8 *extra_nan_attrs;
+ u16 extra_nan_attrs_len;
+ const u8 *vendor_elems;
+ u16 vendor_elems_len;
};
/**
@@ -3760,10 +4003,17 @@ struct cfg80211_nan_conf {
*
* @CFG80211_NAN_CONF_CHANGED_PREF: master preference
* @CFG80211_NAN_CONF_CHANGED_BANDS: operating bands
+ * @CFG80211_NAN_CONF_CHANGED_CONFIG: changed additional configuration.
+ * When this flag is set, it indicates that some additional attribute(s)
+ * (other then master_pref and bands) have been changed. In this case,
+ * all the unchanged attributes will be properly configured to their
+ * previous values. The driver doesn't need to store any
+ * previous configuration besides master_pref and bands.
*/
enum cfg80211_nan_conf_changes {
CFG80211_NAN_CONF_CHANGED_PREF = BIT(0),
CFG80211_NAN_CONF_CHANGED_BANDS = BIT(1),
+ CFG80211_NAN_CONF_CHANGED_CONFIG = BIT(2),
};
/**
@@ -4577,6 +4827,18 @@ struct mgmt_frame_regs {
*
* @set_hw_timestamp: Enable/disable HW timestamping of TM/FTM frames.
* @set_ttlm: set the TID to link mapping.
+ * @set_epcs: Enable/Disable EPCS for station mode.
+ * @get_radio_mask: get bitmask of radios in use.
+ * (invoked with the wiphy mutex held)
+ * @assoc_ml_reconf: Request a non-AP MLO connection to perform ML
+ * reconfiguration, i.e., add and/or remove links to/from the
+ * association using ML reconfiguration action frames. Successfully added
+ * links will be added to the set of valid links. Successfully removed
+ * links will be removed from the set of valid links. The driver must
+ * indicate removed links by calling cfg80211_links_removed() and added
+ * links by calling cfg80211_mlo_reconf_add_done(). When calling
+ * cfg80211_mlo_reconf_add_done() the bss pointer must be given for each
+ * link for which MLO reconfiguration 'add' operation was requested.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -4689,6 +4951,7 @@ struct cfg80211_ops {
struct ieee80211_channel *chan);
int (*set_monitor_channel)(struct wiphy *wiphy,
+ struct net_device *dev,
struct cfg80211_chan_def *chandef);
int (*scan)(struct wiphy *wiphy,
@@ -4720,12 +4983,14 @@ struct cfg80211_ops {
int (*set_mcast_rate)(struct wiphy *wiphy, struct net_device *dev,
int rate[NUM_NL80211_BANDS]);
- int (*set_wiphy_params)(struct wiphy *wiphy, u32 changed);
+ int (*set_wiphy_params)(struct wiphy *wiphy, int radio_idx,
+ u32 changed);
int (*set_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int radio_idx,
enum nl80211_tx_power_setting type, int mbm);
int (*get_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
- int *dbm);
+ int radio_idx, unsigned int link_id, int *dbm);
void (*rfkill_poll)(struct wiphy *wiphy);
@@ -4787,8 +5052,10 @@ struct cfg80211_ops {
struct wireless_dev *wdev,
struct mgmt_frame_regs *upd);
- int (*set_antenna)(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant);
- int (*get_antenna)(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant);
+ int (*set_antenna)(struct wiphy *wiphy, int radio_idx,
+ u32 tx_ant, u32 rx_ant);
+ int (*get_antenna)(struct wiphy *wiphy, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant);
int (*sched_scan_start)(struct wiphy *wiphy,
struct net_device *dev,
@@ -4830,9 +5097,9 @@ struct cfg80211_ops {
int (*start_radar_detection)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_chan_def *chandef,
- u32 cac_time_ms);
+ u32 cac_time_ms, int link_id);
void (*end_cac)(struct wiphy *wiphy,
- struct net_device *dev);
+ struct net_device *dev, unsigned int link_id);
int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_update_ft_ies_params *ftie);
int (*crit_proto_start)(struct wiphy *wiphy,
@@ -4938,6 +5205,11 @@ struct cfg80211_ops {
struct cfg80211_set_hw_timestamp *hwts);
int (*set_ttlm)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ttlm_params *params);
+ u32 (*get_radio_mask)(struct wiphy *wiphy, struct net_device *dev);
+ int (*assoc_ml_reconf)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ml_reconf_req *req);
+ int (*set_epcs)(struct wiphy *wiphy, struct net_device *dev,
+ bool val);
};
/*
@@ -5043,7 +5315,9 @@ struct ieee80211_iface_limit {
* struct ieee80211_iface_combination - possible interface combination
*
* With this structure the driver can describe which interface
- * combinations it supports concurrently.
+ * combinations it supports concurrently. When set in a struct wiphy_radio,
+ * the combinations refer to combinations of interfaces currently active on
+ * that radio.
*
* Examples:
*
@@ -5403,6 +5677,94 @@ struct wiphy_iftype_akm_suites {
int n_akm_suites;
};
+/**
+ * struct wiphy_radio_cfg - physical radio config of a wiphy
+ * This structure describes the configurations of a physical radio in a
+ * wiphy. It is used to denote per-radio attributes belonging to a wiphy.
+ *
+ * @rts_threshold: RTS threshold (dot11RTSThreshold);
+ * -1 (default) = RTS/CTS disabled
+ * @radio_debugfsdir: Pointer to debugfs directory containing the radio-
+ * specific parameters.
+ * NULL (default) = Debugfs directory not created
+ */
+struct wiphy_radio_cfg {
+ u32 rts_threshold;
+ struct dentry *radio_debugfsdir;
+};
+
+/**
+ * struct wiphy_radio_freq_range - wiphy frequency range
+ * @start_freq: start range edge frequency (kHz)
+ * @end_freq: end range edge frequency (kHz)
+ */
+struct wiphy_radio_freq_range {
+ u32 start_freq;
+ u32 end_freq;
+};
+
+
+/**
+ * struct wiphy_radio - physical radio of a wiphy
+ * This structure describes a physical radio belonging to a wiphy.
+ * It is used to describe concurrent-channel capabilities. Only one channel
+ * can be active on the radio described by struct wiphy_radio.
+ *
+ * @freq_range: frequency range that the radio can operate on.
+ * @n_freq_range: number of elements in @freq_range
+ *
+ * @iface_combinations: Valid interface combinations array, should not
+ * list single interface types.
+ * @n_iface_combinations: number of entries in @iface_combinations array.
+ *
+ * @antenna_mask: bitmask of antennas connected to this radio.
+ */
+struct wiphy_radio {
+ const struct wiphy_radio_freq_range *freq_range;
+ int n_freq_range;
+
+ const struct ieee80211_iface_combination *iface_combinations;
+ int n_iface_combinations;
+
+ u32 antenna_mask;
+};
+
+/**
+ * enum wiphy_nan_flags - NAN capabilities
+ *
+ * @WIPHY_NAN_FLAGS_CONFIGURABLE_SYNC: Device supports NAN configurable
+ * synchronization.
+ * @WIPHY_NAN_FLAGS_USERSPACE_DE: Device doesn't support DE offload.
+ */
+enum wiphy_nan_flags {
+ WIPHY_NAN_FLAGS_CONFIGURABLE_SYNC = BIT(0),
+ WIPHY_NAN_FLAGS_USERSPACE_DE = BIT(1),
+};
+
+/**
+ * struct wiphy_nan_capa - NAN capabilities
+ *
+ * This structure describes the NAN capabilities of a wiphy.
+ *
+ * @flags: NAN capabilities flags, see &enum wiphy_nan_flags
+ * @op_mode: NAN operation mode, as defined in Wi-Fi Aware (TM) specification
+ * Table 81.
+ * @n_antennas: number of antennas supported by the device for Tx/Rx. Lower
+ * nibble indicates the number of TX antennas and upper nibble indicates the
+ * number of RX antennas. Value 0 indicates the information is not
+ * available.
+ * @max_channel_switch_time: maximum channel switch time in milliseconds.
+ * @dev_capabilities: NAN device capabilities as defined in Wi-Fi Aware (TM)
+ * specification Table 79 (Capabilities field).
+ */
+struct wiphy_nan_capa {
+ u32 flags;
+ u8 op_mode;
+ u8 n_antennas;
+ u16 max_channel_switch_time;
+ u8 dev_capabilities;
+};
+
#define CFG80211_HW_TIMESTAMP_ALL_PEERS 0xffff
/**
@@ -5563,6 +5925,11 @@ struct wiphy_iftype_akm_suites {
* and probe responses. This value should be set if the driver
* wishes to limit the number of csa counters. Default (0) means
* infinite.
+ * @bss_param_support: bitmask indicating which bss_parameters as defined in
+ * &struct bss_parameters the driver can actually handle in the
+ * .change_bss() callback. The bit positions are defined in &enum
+ * wiphy_bss_param_flags.
+ *
* @bss_select_support: bitmask indicating the BSS selection criteria supported
* by the driver in the .connect() callback. The bit position maps to the
* attribute indices defined in &enum nl80211_bss_select_attr.
@@ -5571,6 +5938,7 @@ struct wiphy_iftype_akm_suites {
* bitmap of &enum nl80211_band values. For instance, for
* NL80211_BAND_2GHZ, bit 0 would be set
* (i.e. BIT(NL80211_BAND_2GHZ)).
+ * @nan_capa: NAN capabilities
*
* @txq_limit: configuration of internal TX queue frame limit
* @txq_memory_limit: configuration internal TX queue memory limit
@@ -5621,6 +5989,13 @@ struct wiphy_iftype_akm_suites {
* A value of %CFG80211_HW_TIMESTAMP_ALL_PEERS indicates the driver
* supports enabling HW timestamping for all peers (i.e. no need to
* specify a mac address).
+ *
+ * @radio_cfg: configuration of radios belonging to a muli-radio wiphy. This
+ * struct contains a list of all radio specific attributes and should be
+ * used only for multi-radio wiphy.
+ *
+ * @radio: radios belonging to this wiphy
+ * @n_radio: number of radios
*/
struct wiphy {
struct mutex mtx;
@@ -5708,6 +6083,8 @@ struct wiphy {
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request);
+ struct wiphy_radio_cfg *radio_cfg;
+
/* fields below are read-only, assigned by cfg80211 */
const struct ieee80211_regdomain __rcu *regd;
@@ -5739,9 +6116,11 @@ struct wiphy {
u8 max_num_csa_counters;
+ u32 bss_param_support;
u32 bss_select_support;
u8 nan_supported_bands;
+ struct wiphy_nan_capa nan_capa;
u32 txq_limit;
u32 txq_memory_limit;
@@ -5771,6 +6150,9 @@ struct wiphy {
u16 hw_timestamp_max_peers;
+ int n_radio;
+ const struct wiphy_radio *radio;
+
char priv[] __aligned(NETDEV_ALIGN);
};
@@ -5904,7 +6286,7 @@ int wiphy_register(struct wiphy *wiphy);
* @wiphy: the wiphy to check the locking on
* @p: The pointer to read, prior to dereferencing
*
- * Return the value of the specified RCU-protected pointer, but omit the
+ * Return: the value of the specified RCU-protected pointer, but omit the
* READ_ONCE(), because caller holds the wiphy mutex used for updates.
*/
#define wiphy_dereference(wiphy, p) \
@@ -5978,6 +6360,10 @@ static inline void wiphy_unlock(struct wiphy *wiphy)
mutex_unlock(&wiphy->mtx);
}
+DEFINE_GUARD(wiphy, struct wiphy *,
+ mutex_lock(&_T->mtx),
+ mutex_unlock(&_T->mtx))
+
struct wiphy_work;
typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *);
@@ -6054,6 +6440,11 @@ static inline void wiphy_delayed_work_init(struct wiphy_delayed_work *dwork,
* after wiphy_lock() was called. Therefore, wiphy_cancel_work() can
* use just cancel_work() instead of cancel_work_sync(), it requires
* being in a section protected by wiphy_lock().
+ *
+ * Note that these are scheduled with a timer where the accuracy
+ * becomes less the longer in the future the scheduled timer is. Use
+ * wiphy_hrtimer_work_queue() if the timer must be not be late by more
+ * than approximately 10 percent.
*/
void wiphy_delayed_work_queue(struct wiphy *wiphy,
struct wiphy_delayed_work *dwork,
@@ -6082,6 +6473,138 @@ void wiphy_delayed_work_flush(struct wiphy *wiphy,
struct wiphy_delayed_work *dwork);
/**
+ * wiphy_delayed_work_pending - Find out whether a wiphy delayable
+ * work item is currently pending.
+ *
+ * @wiphy: the wiphy, for debug purposes
+ * @dwork: the delayed work in question
+ *
+ * Return: true if timer is pending, false otherwise
+ *
+ * How wiphy_delayed_work_queue() works is by setting a timer which
+ * when it expires calls wiphy_work_queue() to queue the wiphy work.
+ * Because wiphy_delayed_work_queue() uses mod_timer(), if it is
+ * called twice and the second call happens before the first call
+ * deadline, the work will rescheduled for the second deadline and
+ * won't run before that.
+ *
+ * wiphy_delayed_work_pending() can be used to detect if calling
+ * wiphy_work_delayed_work_queue() would start a new work schedule
+ * or delayed a previous one. As seen below it cannot be used to
+ * detect precisely if the work has finished to execute nor if it
+ * is currently executing.
+ *
+ * CPU0 CPU1
+ * wiphy_delayed_work_queue(wk)
+ * mod_timer(wk->timer)
+ * wiphy_delayed_work_pending(wk) -> true
+ *
+ * [...]
+ * expire_timers(wk->timer)
+ * detach_timer(wk->timer)
+ * wiphy_delayed_work_pending(wk) -> false
+ * wk->timer->function() |
+ * wiphy_work_queue(wk) | delayed work pending
+ * list_add_tail() | returns false but
+ * queue_work(cfg80211_wiphy_work) | wk->func() has not
+ * | been run yet
+ * [...] |
+ * cfg80211_wiphy_work() |
+ * wk->func() V
+ *
+ */
+bool wiphy_delayed_work_pending(struct wiphy *wiphy,
+ struct wiphy_delayed_work *dwork);
+
+struct wiphy_hrtimer_work {
+ struct wiphy_work work;
+ struct wiphy *wiphy;
+ struct hrtimer timer;
+};
+
+enum hrtimer_restart wiphy_hrtimer_work_timer(struct hrtimer *t);
+
+static inline void wiphy_hrtimer_work_init(struct wiphy_hrtimer_work *hrwork,
+ wiphy_work_func_t func)
+{
+ hrtimer_setup(&hrwork->timer, wiphy_hrtimer_work_timer,
+ CLOCK_BOOTTIME, HRTIMER_MODE_REL);
+ wiphy_work_init(&hrwork->work, func);
+}
+
+/**
+ * wiphy_hrtimer_work_queue - queue hrtimer work for the wiphy
+ * @wiphy: the wiphy to queue for
+ * @hrwork: the high resolution timer worker
+ * @delay: the delay given as a ktime_t
+ *
+ * Please refer to wiphy_delayed_work_queue(). The difference is that
+ * the hrtimer work uses a high resolution timer for scheduling. This
+ * may be needed if timeouts might be scheduled further in the future
+ * and the accuracy of the normal timer is not sufficient.
+ *
+ * Expect a delay of a few milliseconds as the timer is scheduled
+ * with some slack and some more time may pass between queueing the
+ * work and its start.
+ */
+void wiphy_hrtimer_work_queue(struct wiphy *wiphy,
+ struct wiphy_hrtimer_work *hrwork,
+ ktime_t delay);
+
+/**
+ * wiphy_hrtimer_work_cancel - cancel previously queued hrtimer work
+ * @wiphy: the wiphy, for debug purposes
+ * @hrtimer: the hrtimer work to cancel
+ *
+ * Cancel the work *without* waiting for it, this assumes being
+ * called under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_hrtimer_work_cancel(struct wiphy *wiphy,
+ struct wiphy_hrtimer_work *hrtimer);
+
+/**
+ * wiphy_hrtimer_work_flush - flush previously queued hrtimer work
+ * @wiphy: the wiphy, for debug purposes
+ * @hrwork: the hrtimer work to flush
+ *
+ * Flush the work (i.e. run it if pending). This must be called
+ * under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_hrtimer_work_flush(struct wiphy *wiphy,
+ struct wiphy_hrtimer_work *hrwork);
+
+/**
+ * wiphy_hrtimer_work_pending - Find out whether a wiphy hrtimer
+ * work item is currently pending.
+ *
+ * @wiphy: the wiphy, for debug purposes
+ * @hrwork: the hrtimer work in question
+ *
+ * Return: true if timer is pending, false otherwise
+ *
+ * Please refer to the wiphy_delayed_work_pending() documentation as
+ * this is the equivalent function for hrtimer based delayed work
+ * items.
+ */
+bool wiphy_hrtimer_work_pending(struct wiphy *wiphy,
+ struct wiphy_hrtimer_work *hrwork);
+
+/**
+ * enum ieee80211_ap_reg_power - regulatory power for an Access Point
+ *
+ * @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode
+ * @IEEE80211_REG_LPI_AP: Indoor Access Point
+ * @IEEE80211_REG_SP_AP: Standard power Access Point
+ * @IEEE80211_REG_VLP_AP: Very low power Access Point
+ */
+enum ieee80211_ap_reg_power {
+ IEEE80211_REG_UNSET_AP,
+ IEEE80211_REG_LPI_AP,
+ IEEE80211_REG_SP_AP,
+ IEEE80211_REG_VLP_AP,
+};
+
+/**
* struct wireless_dev - wireless device state
*
* For netdevs, this structure must be allocated by the driver
@@ -6131,9 +6654,6 @@ void wiphy_delayed_work_flush(struct wiphy *wiphy,
* @address: The address for this device, valid only if @netdev is %NULL
* @is_running: true if this is a non-netdev device that has been started, e.g.
* the P2P Device.
- * @cac_started: true if DFS channel availability check has been started
- * @cac_start_time: timestamp (jiffies) when the dfs state was entered.
- * @cac_time_ms: CAC time in ms
* @ps: powersave mode is enabled
* @ps_timeout: dynamic powersave timeout
* @ap_unexpected_nlportid: (private) netlink port ID of application
@@ -6157,7 +6677,13 @@ void wiphy_delayed_work_flush(struct wiphy *wiphy,
* unprotected beacon report
* @links: array of %IEEE80211_MLD_MAX_NUM_LINKS elements containing @addr
* @ap and @client for each link
+ * @links.cac_started: true if DFS channel availability check has been
+ * started
+ * @links.cac_start_time: timestamp (jiffies) when the dfs state was
+ * entered.
+ * @links.cac_time_ms: CAC time in ms
* @valid_links: bitmap describing what elements of @links are valid
+ * @radio_mask: Bitmask of radios that this interface is allowed to operate on.
*/
struct wireless_dev {
struct wiphy *wiphy;
@@ -6198,11 +6724,6 @@ struct wireless_dev {
u32 owner_nlportid;
bool nl_owner_dead;
- /* FIXME: need to rework radar detection for MLO */
- bool cac_started;
- unsigned long cac_start_time;
- unsigned int cac_time_ms;
-
#ifdef CONFIG_CFG80211_WEXT
/* wext data */
struct {
@@ -6256,6 +6777,9 @@ struct wireless_dev {
struct {
struct cfg80211_chan_def chandef;
} ocb;
+ struct {
+ u8 cluster_id[ETH_ALEN] __aligned(2);
+ } nan;
} u;
struct {
@@ -6269,8 +6793,14 @@ struct wireless_dev {
struct cfg80211_internal_bss *current_bss;
} client;
};
+
+ bool cac_started;
+ unsigned long cac_start_time;
+ unsigned int cac_time_ms;
} links[IEEE80211_MLD_MAX_NUM_LINKS];
u16 valid_links;
+
+ u32 radio_mask;
};
static inline const u8 *wdev_address(struct wireless_dev *wdev)
@@ -6358,16 +6888,6 @@ ieee80211_channel_to_khz(const struct ieee80211_channel *chan)
}
/**
- * ieee80211_s1g_channel_width - get allowed channel width from @chan
- *
- * Only allowed for band NL80211_BAND_S1GHZ
- * @chan: channel
- * Return: The allowed channel width for this center_freq
- */
-enum nl80211_chan_width
-ieee80211_s1g_channel_width(const struct ieee80211_channel *chan);
-
-/**
* ieee80211_channel_to_freq_khz - convert channel number to frequency
* @chan: channel number
* @band: band, necessary due to channel number overlap
@@ -6446,6 +6966,41 @@ static inline bool cfg80211_channel_is_psc(struct ieee80211_channel *chan)
}
/**
+ * ieee80211_radio_freq_range_valid - Check if the radio supports the
+ * specified frequency range
+ *
+ * @radio: wiphy radio
+ * @freq: the frequency (in KHz) to be queried
+ * @width: the bandwidth (in KHz) to be queried
+ *
+ * Return: whether or not the given frequency range is valid for the given radio
+ */
+bool ieee80211_radio_freq_range_valid(const struct wiphy_radio *radio,
+ u32 freq, u32 width);
+
+/**
+ * cfg80211_radio_chandef_valid - Check if the radio supports the chandef
+ *
+ * @radio: wiphy radio
+ * @chandef: chandef for current channel
+ *
+ * Return: whether or not the given chandef is valid for the given radio
+ */
+bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio,
+ const struct cfg80211_chan_def *chandef);
+
+/**
+ * cfg80211_wdev_channel_allowed - Check if the wdev may use the channel
+ *
+ * @wdev: the wireless device
+ * @chan: channel to check
+ *
+ * Return: whether or not the wdev may use the channel
+ */
+bool cfg80211_wdev_channel_allowed(struct wireless_dev *wdev,
+ struct ieee80211_channel *chan);
+
+/**
* ieee80211_get_response_rate - get basic rate for a given rate
*
* @sband: the band to look for rates in
@@ -8296,6 +8851,17 @@ void cfg80211_tx_mgmt_expired(struct wireless_dev *wdev, u64 cookie,
int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp);
/**
+ * cfg80211_link_sinfo_alloc_tid_stats - allocate per-tid statistics.
+ *
+ * @link_sinfo: the link station information
+ * @gfp: allocation flags
+ *
+ * Return: 0 on success. Non-zero on error.
+ */
+int cfg80211_link_sinfo_alloc_tid_stats(struct link_station_info *link_sinfo,
+ gfp_t gfp);
+
+/**
* cfg80211_sinfo_release_content - release contents of station info
* @sinfo: the station information
*
@@ -8306,6 +8872,13 @@ int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp);
static inline void cfg80211_sinfo_release_content(struct station_info *sinfo)
{
kfree(sinfo->pertid);
+
+ for (int link_id = 0; link_id < ARRAY_SIZE(sinfo->links); link_id++) {
+ if (sinfo->links[link_id]) {
+ kfree(sinfo->links[link_id]->pertid);
+ kfree(sinfo->links[link_id]);
+ }
+ }
}
/**
@@ -8666,6 +9239,7 @@ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
* @chandef: chandef for the current channel
* @event: type of event
* @gfp: context flags
+ * @link_id: valid link_id for MLO operation or 0 otherwise.
*
* This function is called when a Channel availability check (CAC) is finished
* or aborted. This must be called to notify the completion of a CAC process,
@@ -8673,7 +9247,8 @@ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
*/
void cfg80211_cac_event(struct net_device *netdev,
const struct cfg80211_chan_def *chandef,
- enum nl80211_radar_event event, gfp_t gfp);
+ enum nl80211_radar_event event, gfp_t gfp,
+ unsigned int link_id);
/**
* cfg80211_background_cac_abort - Channel Availability Check offchan abort event
@@ -8708,6 +9283,7 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
/**
* cfg80211_rx_spurious_frame - inform userspace about a spurious frame
* @dev: The device the frame matched to
+ * @link_id: the link the frame was received on, -1 if not applicable or unknown
* @addr: the transmitter address
* @gfp: context flags
*
@@ -8717,13 +9293,14 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
* Return: %true if the frame was passed to userspace (or this failed
* for a reason other than not having a subscription.)
*/
-bool cfg80211_rx_spurious_frame(struct net_device *dev,
- const u8 *addr, gfp_t gfp);
+bool cfg80211_rx_spurious_frame(struct net_device *dev, const u8 *addr,
+ int link_id, gfp_t gfp);
/**
* cfg80211_rx_unexpected_4addr_frame - inform about unexpected WDS frame
* @dev: The device the frame matched to
* @addr: the transmitter address
+ * @link_id: the link the frame was received on, -1 if not applicable or unknown
* @gfp: context flags
*
* This function is used in AP mode (only!) to inform userspace that
@@ -8733,8 +9310,8 @@ bool cfg80211_rx_spurious_frame(struct net_device *dev,
* Return: %true if the frame was passed to userspace (or this failed
* for a reason other than not having a subscription.)
*/
-bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
- const u8 *addr, gfp_t gfp);
+bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, const u8 *addr,
+ int link_id, gfp_t gfp);
/**
* cfg80211_probe_status - notify userspace about probe status
@@ -8786,6 +9363,34 @@ static inline void cfg80211_report_obss_beacon(struct wiphy *wiphy,
}
/**
+ * struct cfg80211_beaconing_check_config - beacon check configuration
+ * @iftype: the interface type to check for
+ * @relax: allow IR-relaxation conditions to apply (e.g. another
+ * interface connected already on the same channel)
+ * NOTE: If this is set, wiphy mutex must be held.
+ * @reg_power: &enum ieee80211_ap_reg_power value indicating the
+ * advertised/used 6 GHz regulatory power setting
+ */
+struct cfg80211_beaconing_check_config {
+ enum nl80211_iftype iftype;
+ enum ieee80211_ap_reg_power reg_power;
+ bool relax;
+};
+
+/**
+ * cfg80211_reg_check_beaconing - check if beaconing is allowed
+ * @wiphy: the wiphy
+ * @chandef: the channel definition
+ * @cfg: additional parameters for the checking
+ *
+ * Return: %true if there is no secondary channel or the secondary channel(s)
+ * can be used for beaconing (i.e. is not a radar channel etc.)
+ */
+bool cfg80211_reg_check_beaconing(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ struct cfg80211_beaconing_check_config *cfg);
+
+/**
* cfg80211_reg_can_beacon - check if beaconing is allowed
* @wiphy: the wiphy
* @chandef: the channel definition
@@ -8794,9 +9399,17 @@ static inline void cfg80211_report_obss_beacon(struct wiphy *wiphy,
* Return: %true if there is no secondary channel or the secondary channel(s)
* can be used for beaconing (i.e. is not a radar channel etc.)
*/
-bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef,
- enum nl80211_iftype iftype);
+static inline bool
+cfg80211_reg_can_beacon(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype)
+{
+ struct cfg80211_beaconing_check_config config = {
+ .iftype = iftype,
+ };
+
+ return cfg80211_reg_check_beaconing(wiphy, chandef, &config);
+}
/**
* cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation
@@ -8811,9 +9424,18 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
*
* Context: Requires the wiphy mutex to be held.
*/
-bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef,
- enum nl80211_iftype iftype);
+static inline bool
+cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ enum nl80211_iftype iftype)
+{
+ struct cfg80211_beaconing_check_config config = {
+ .iftype = iftype,
+ .relax = true,
+ };
+
+ return cfg80211_reg_check_beaconing(wiphy, chandef, &config);
+}
/**
* cfg80211_ch_switch_notify - update wdev channel and notify userspace
@@ -9155,6 +9777,17 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
void (*iter)(const struct ieee80211_iface_combination *c,
void *data),
void *data);
+/**
+ * cfg80211_get_radio_idx_by_chan - get the radio index by the channel
+ *
+ * @wiphy: the wiphy
+ * @chan: channel for which the supported radio index is required
+ *
+ * Return: radio index on success or -EINVAL otherwise
+ */
+int cfg80211_get_radio_idx_by_chan(struct wiphy *wiphy,
+ const struct ieee80211_channel *chan);
+
/**
* cfg80211_stop_iface - trigger interface disconnection
@@ -9503,6 +10136,36 @@ static inline int cfg80211_color_change_notify(struct net_device *dev,
}
/**
+ * cfg80211_6ghz_power_type - determine AP regulatory power type
+ * @control: control flags
+ * @client_flags: &enum ieee80211_channel_flags for station mode to enable
+ * SP to LPI fallback, zero otherwise.
+ *
+ * Return: regulatory power type from &enum ieee80211_ap_reg_power
+ */
+static inline enum ieee80211_ap_reg_power
+cfg80211_6ghz_power_type(u8 control, u32 client_flags)
+{
+ switch (u8_get_bits(control, IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) {
+ case IEEE80211_6GHZ_CTRL_REG_LPI_AP:
+ case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP:
+ case IEEE80211_6GHZ_CTRL_REG_AP_ROLE_NOT_RELEVANT:
+ return IEEE80211_REG_LPI_AP;
+ case IEEE80211_6GHZ_CTRL_REG_SP_AP:
+ case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP_OLD:
+ return IEEE80211_REG_SP_AP;
+ case IEEE80211_6GHZ_CTRL_REG_VLP_AP:
+ return IEEE80211_REG_VLP_AP;
+ case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP:
+ if (client_flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT)
+ return IEEE80211_REG_LPI_AP;
+ return IEEE80211_REG_SP_AP;
+ default:
+ return IEEE80211_REG_UNSET_AP;
+ }
+}
+
+/**
* cfg80211_links_removed - Notify about removed STA MLD setup links.
* @dev: network device.
* @link_mask: BIT mask of removed STA MLD setup link IDs.
@@ -9516,6 +10179,46 @@ static inline int cfg80211_color_change_notify(struct net_device *dev,
void cfg80211_links_removed(struct net_device *dev, u16 link_mask);
/**
+ * struct cfg80211_mlo_reconf_done_data - MLO reconfiguration data
+ * @buf: MLO Reconfiguration Response frame (header + body)
+ * @len: length of the frame data
+ * @driver_initiated: Indicates whether the add links request is initiated by
+ * driver. This is set to true when the link reconfiguration request
+ * initiated by driver due to AP link recommendation requests
+ * (Ex: BTM (BSS Transition Management) request) handling offloaded to
+ * driver.
+ * @added_links: BIT mask of links successfully added to the association
+ * @links: per-link information indexed by link ID
+ * @links.bss: the BSS that MLO reconfiguration was requested for, ownership of
+ * the pointer moves to cfg80211 in the call to
+ * cfg80211_mlo_reconf_add_done().
+ *
+ * The BSS pointer must be set for each link for which 'add' operation was
+ * requested in the assoc_ml_reconf callback.
+ */
+struct cfg80211_mlo_reconf_done_data {
+ const u8 *buf;
+ size_t len;
+ bool driver_initiated;
+ u16 added_links;
+ struct {
+ struct cfg80211_bss *bss;
+ u8 *addr;
+ } links[IEEE80211_MLD_MAX_NUM_LINKS];
+};
+
+/**
+ * cfg80211_mlo_reconf_add_done - Notify about MLO reconfiguration result
+ * @dev: network device.
+ * @data: MLO reconfiguration done data, &struct cfg80211_mlo_reconf_done_data
+ *
+ * Inform cfg80211 and the userspace that processing of ML reconfiguration
+ * request to add links to the association is done.
+ */
+void cfg80211_mlo_reconf_add_done(struct net_device *dev,
+ struct cfg80211_mlo_reconf_done_data *data);
+
+/**
* cfg80211_schedule_channels_check - schedule regulatory check if needed
* @wdev: the wireless device to check
*
@@ -9525,6 +10228,36 @@ void cfg80211_links_removed(struct net_device *dev, u16 link_mask);
*/
void cfg80211_schedule_channels_check(struct wireless_dev *wdev);
+/**
+ * cfg80211_epcs_changed - Notify about a change in EPCS state
+ * @netdev: the wireless device whose EPCS state changed
+ * @enabled: set to true if EPCS was enabled, otherwise set to false.
+ */
+void cfg80211_epcs_changed(struct net_device *netdev, bool enabled);
+
+/**
+ * cfg80211_next_nan_dw_notif - Notify about the next NAN Discovery Window (DW)
+ * @wdev: Pointer to the wireless device structure
+ * @chan: DW channel (6, 44 or 149)
+ * @gfp: Memory allocation flags
+ */
+void cfg80211_next_nan_dw_notif(struct wireless_dev *wdev,
+ struct ieee80211_channel *chan, gfp_t gfp);
+
+/**
+ * cfg80211_nan_cluster_joined - Notify about NAN cluster join
+ * @wdev: Pointer to the wireless device structure
+ * @cluster_id: Cluster ID of the NAN cluster that was joined or started
+ * @new_cluster: Indicates if this is a new cluster or an existing one
+ * @gfp: Memory allocation flags
+ *
+ * This function is used to notify user space when a NAN cluster has been
+ * joined, providing the cluster ID and a flag whether it is a new cluster.
+ */
+void cfg80211_nan_cluster_joined(struct wireless_dev *wdev,
+ const u8 *cluster_id, bool new_cluster,
+ gfp_t gfp);
+
#ifdef CONFIG_CFG80211_DEBUGFS
/**
* wiphy_locked_debugfs_read - do a locked read in debugfs
@@ -9575,4 +10308,72 @@ ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file,
void *data);
#endif
+/**
+ * cfg80211_s1g_get_start_freq_khz - get S1G chandef start frequency
+ * @chandef: the chandef to use
+ *
+ * Return: the chandefs starting frequency in KHz
+ */
+static inline u32
+cfg80211_s1g_get_start_freq_khz(const struct cfg80211_chan_def *chandef)
+{
+ u32 bw_mhz = cfg80211_chandef_get_width(chandef);
+ u32 center_khz =
+ MHZ_TO_KHZ(chandef->center_freq1) + chandef->freq1_offset;
+ return center_khz - bw_mhz * 500 + 500;
+}
+
+/**
+ * cfg80211_s1g_get_end_freq_khz - get S1G chandef end frequency
+ * @chandef: the chandef to use
+ *
+ * Return: the chandefs ending frequency in KHz
+ */
+static inline u32
+cfg80211_s1g_get_end_freq_khz(const struct cfg80211_chan_def *chandef)
+{
+ u32 bw_mhz = cfg80211_chandef_get_width(chandef);
+ u32 center_khz =
+ MHZ_TO_KHZ(chandef->center_freq1) + chandef->freq1_offset;
+ return center_khz + bw_mhz * 500 - 500;
+}
+
+/**
+ * cfg80211_s1g_get_primary_sibling - retrieve the sibling 1MHz subchannel
+ * for an S1G chandef using a 2MHz primary channel.
+ * @wiphy: wiphy the channel belongs to
+ * @chandef: the chandef to use
+ *
+ * When chandef::s1g_primary_2mhz is set to true, we are operating on a 2MHz
+ * primary channel. The 1MHz subchannel designated by the primary channel
+ * location exists within chandef::chan, whilst the 'sibling' is denoted as
+ * being the other 1MHz subchannel that make up the 2MHz primary channel.
+ *
+ * Returns: the sibling 1MHz &struct ieee80211_channel, or %NULL on failure.
+ */
+static inline struct ieee80211_channel *
+cfg80211_s1g_get_primary_sibling(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef)
+{
+ int width_mhz = cfg80211_chandef_get_width(chandef);
+ u32 pri_1mhz_khz, sibling_1mhz_khz, op_low_1mhz_khz, pri_index;
+
+ if (!chandef->s1g_primary_2mhz || width_mhz < 2)
+ return NULL;
+
+ pri_1mhz_khz = ieee80211_channel_to_khz(chandef->chan);
+ op_low_1mhz_khz = cfg80211_s1g_get_start_freq_khz(chandef);
+
+ /*
+ * Compute the index of the primary 1 MHz subchannel within the
+ * operating channel, relative to the lowest 1 MHz center frequency.
+ * Flip the least significant bit to select the even/odd sibling,
+ * then translate that index back into a channel frequency.
+ */
+ pri_index = (pri_1mhz_khz - op_low_1mhz_khz) / 1000;
+ sibling_1mhz_khz = op_low_1mhz_khz + ((pri_index ^ 1) * 1000);
+
+ return ieee80211_get_channel_khz(wiphy, sibling_1mhz_khz);
+}
+
#endif /* __NET_CFG80211_H */
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 1338cb92c8e7..3cbab35de5ab 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -99,12 +99,6 @@ csum_block_add(__wsum csum, __wsum csum2, int offset)
}
static __always_inline __wsum
-csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
-{
- return csum_block_add(csum, csum2, offset);
-}
-
-static __always_inline __wsum
csum_block_sub(__wsum csum, __wsum csum2, int offset)
{
return csum_block_add(csum, ~csum2, offset);
@@ -115,12 +109,6 @@ static __always_inline __wsum csum_unfold(__sum16 n)
return (__force __wsum)n;
}
-static __always_inline
-__wsum csum_partial_ext(const void *buff, int len, __wsum sum)
-{
- return csum_partial(buff, len, sum);
-}
-
#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
@@ -151,6 +139,12 @@ static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
*csum = csum_add(csum_sub(*csum, old), new);
}
+static inline unsigned short csum_from32to16(unsigned int sum)
+{
+ sum += (sum >> 16) | (sum << 16);
+ return (unsigned short)(sum >> 16);
+}
+
struct sk_buff;
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
__be32 from, __be32 to, bool pseudohdr);
@@ -158,7 +152,7 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
const __be32 *from, const __be32 *to,
bool pseudohdr);
void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
- __wsum diff, bool pseudohdr);
+ __wsum diff, bool pseudohdr, bool ipv6);
static __always_inline
void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index c9111bb2f59b..d6780d7903f4 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -28,7 +28,7 @@
#include <net/request_sock.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/* known doi values */
#define CIPSO_V4_DOI_UNKNOWN 0x00000000
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index 7e78e7d6f015..668aeee9b3f6 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -63,7 +63,7 @@ static inline u32 task_get_classid(const struct sk_buff *skb)
* calls by looking at the number of nested bh disable calls because
* softirqs always disables bh.
*/
- if (in_serving_softirq()) {
+ if (softirq_count()) {
struct sock *sk = skb_to_full_sk(skb);
/* If there is an sock_cgroup_classid we'll use that. */
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 35eb0f884386..cb839e0435a1 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -35,7 +35,7 @@ struct devlink_port_phys_attrs {
/**
* struct devlink_port_pci_pf_attrs - devlink port's PCI PF attributes
* @controller: Associated controller number
- * @pf: Associated PCI PF number for this port.
+ * @pf: associated PCI function number for the devlink port instance
* @external: when set, indicates if a port is for an external controller
*/
struct devlink_port_pci_pf_attrs {
@@ -47,8 +47,9 @@ struct devlink_port_pci_pf_attrs {
/**
* struct devlink_port_pci_vf_attrs - devlink port's PCI VF attributes
* @controller: Associated controller number
- * @pf: Associated PCI PF number for this port.
- * @vf: Associated PCI VF for of the PCI PF for this port.
+ * @pf: associated PCI function number for the devlink port instance
+ * @vf: associated PCI VF number of a PF for the devlink port instance;
+ * VF number starts from 0 for the first PCI virtual function
* @external: when set, indicates if a port is for an external controller
*/
struct devlink_port_pci_vf_attrs {
@@ -61,8 +62,8 @@ struct devlink_port_pci_vf_attrs {
/**
* struct devlink_port_pci_sf_attrs - devlink port's PCI SF attributes
* @controller: Associated controller number
- * @sf: Associated PCI SF for of the PCI PF for this port.
- * @pf: Associated PCI PF number for this port.
+ * @sf: associated SF number of a PF for the devlink port instance
+ * @pf: associated PCI function number for the devlink port instance
* @external: when set, indicates if a port is for an external controller
*/
struct devlink_port_pci_sf_attrs {
@@ -77,6 +78,9 @@ struct devlink_port_pci_sf_attrs {
* @flavour: flavour of the port
* @split: indicates if this is split port
* @splittable: indicates if the port can be split.
+ * @no_phys_port_name: skip automatic phys_port_name generation; for
+ * compatibility only, newly added driver/port instance
+ * should never set this.
* @lanes: maximum number of lanes the port supports. 0 value is not passed to netlink.
* @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL
* @phys: physical port attributes
@@ -86,7 +90,8 @@ struct devlink_port_pci_sf_attrs {
*/
struct devlink_port_attrs {
u8 split:1,
- splittable:1;
+ splittable:1,
+ no_phys_port_name:1;
u32 lanes;
enum devlink_port_flavour flavour;
struct netdev_phys_item_id switch_id;
@@ -117,6 +122,8 @@ struct devlink_rate {
u32 tx_priority;
u32 tx_weight;
+
+ u32 tc_bw[DEVLINK_RATE_TCS_MAX];
};
struct devlink_port {
@@ -352,7 +359,7 @@ struct devlink_dpipe_table {
bool resource_valid;
u64 resource_id;
u64 resource_units;
- struct devlink_dpipe_table_ops *table_ops;
+ const struct devlink_dpipe_table_ops *table_ops;
struct rcu_head rcu;
};
@@ -419,17 +426,19 @@ typedef u64 devlink_resource_occ_get_t(void *priv);
#define __DEVLINK_PARAM_MAX_STRING_VALUE 32
enum devlink_param_type {
- DEVLINK_PARAM_TYPE_U8,
- DEVLINK_PARAM_TYPE_U16,
- DEVLINK_PARAM_TYPE_U32,
- DEVLINK_PARAM_TYPE_STRING,
- DEVLINK_PARAM_TYPE_BOOL,
+ DEVLINK_PARAM_TYPE_U8 = DEVLINK_VAR_ATTR_TYPE_U8,
+ DEVLINK_PARAM_TYPE_U16 = DEVLINK_VAR_ATTR_TYPE_U16,
+ DEVLINK_PARAM_TYPE_U32 = DEVLINK_VAR_ATTR_TYPE_U32,
+ DEVLINK_PARAM_TYPE_U64 = DEVLINK_VAR_ATTR_TYPE_U64,
+ DEVLINK_PARAM_TYPE_STRING = DEVLINK_VAR_ATTR_TYPE_STRING,
+ DEVLINK_PARAM_TYPE_BOOL = DEVLINK_VAR_ATTR_TYPE_FLAG,
};
union devlink_param_value {
u8 vu8;
u16 vu16;
u32 vu32;
+ u64 vu64;
char vstr[__DEVLINK_PARAM_MAX_STRING_VALUE];
bool vbool;
};
@@ -470,6 +479,10 @@ struct devlink_flash_notify {
* @set: set parameter value, used for runtime and permanent
* configuration modes
* @validate: validate input value is applicable (within value range, etc.)
+ * @get_default: get parameter default value, used for runtime and permanent
+ * configuration modes
+ * @reset_default: reset parameter to default value, used for runtime and permanent
+ * configuration modes
*
* This struct should be used by the driver to fill the data for
* a parameter it registers.
@@ -481,13 +494,20 @@ struct devlink_param {
enum devlink_param_type type;
unsigned long supported_cmodes;
int (*get)(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx);
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
int (*set)(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack);
int (*validate)(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack);
+ int (*get_default)(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
+ int (*reset_default)(struct devlink *devlink, u32 id,
+ enum devlink_param_cmode cmode,
+ struct netlink_ext_ack *extack);
};
struct devlink_param_item {
@@ -499,6 +519,7 @@ struct devlink_param_item {
* until reload.
*/
bool driverinit_value_new_valid;
+ union devlink_param_value driverinit_default;
};
enum devlink_param_generic_id {
@@ -519,6 +540,11 @@ enum devlink_param_generic_id {
DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
+ DEVLINK_PARAM_GENERIC_ID_CLOCK_ID,
+ DEVLINK_PARAM_GENERIC_ID_TOTAL_VFS,
+ DEVLINK_PARAM_GENERIC_ID_NUM_DOORBELLS,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MAC_PER_VF,
/* add new param generic ids above here*/
__DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -577,6 +603,21 @@ enum devlink_param_generic_id {
#define DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME "event_eq_size"
#define DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE DEVLINK_PARAM_TYPE_U32
+#define DEVLINK_PARAM_GENERIC_ENABLE_PHC_NAME "enable_phc"
+#define DEVLINK_PARAM_GENERIC_ENABLE_PHC_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_CLOCK_ID_NAME "clock_id"
+#define DEVLINK_PARAM_GENERIC_CLOCK_ID_TYPE DEVLINK_PARAM_TYPE_U64
+
+#define DEVLINK_PARAM_GENERIC_TOTAL_VFS_NAME "total_vfs"
+#define DEVLINK_PARAM_GENERIC_TOTAL_VFS_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_NUM_DOORBELLS_NAME "num_doorbells"
+#define DEVLINK_PARAM_GENERIC_NUM_DOORBELLS_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_MAX_MAC_PER_VF_NAME "max_mac_per_vf"
+#define DEVLINK_PARAM_GENERIC_MAX_MAC_PER_VF_TYPE DEVLINK_PARAM_TYPE_U32
+
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
{ \
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
@@ -600,6 +641,37 @@ enum devlink_param_generic_id {
.validate = _validate, \
}
+#define DEVLINK_PARAM_GENERIC_WITH_DEFAULTS(_id, _cmodes, _get, _set, \
+ _validate, _get_default, \
+ _reset_default) \
+{ \
+ .id = DEVLINK_PARAM_GENERIC_ID_##_id, \
+ .name = DEVLINK_PARAM_GENERIC_##_id##_NAME, \
+ .type = DEVLINK_PARAM_GENERIC_##_id##_TYPE, \
+ .generic = true, \
+ .supported_cmodes = _cmodes, \
+ .get = _get, \
+ .set = _set, \
+ .validate = _validate, \
+ .get_default = _get_default, \
+ .reset_default = _reset_default, \
+}
+
+#define DEVLINK_PARAM_DRIVER_WITH_DEFAULTS(_id, _name, _type, _cmodes, \
+ _get, _set, _validate, \
+ _get_default, _reset_default) \
+{ \
+ .id = _id, \
+ .name = _name, \
+ .type = _type, \
+ .supported_cmodes = _cmodes, \
+ .get = _get, \
+ .set = _set, \
+ .validate = _validate, \
+ .get_default = _get_default, \
+ .reset_default = _reset_default, \
+}
+
/* Identifier of board design */
#define DEVLINK_INFO_VERSION_GENERIC_BOARD_ID "board.id"
/* Revision of board design */
@@ -729,6 +801,10 @@ enum devlink_health_reporter_state {
* if priv_ctx is NULL, run a full dump
* @diagnose: callback to diagnose the current status
* @test: callback to trigger a test event
+ * @default_graceful_period: default min time (in msec)
+ * between recovery attempts
+ * @default_burst_period: default time (in msec) for
+ * error recoveries before starting the grace period
*/
struct devlink_health_reporter_ops {
@@ -743,6 +819,8 @@ struct devlink_health_reporter_ops {
struct netlink_ext_ack *extack);
int (*test)(struct devlink_health_reporter *reporter,
struct netlink_ext_ack *extack);
+ u64 default_graceful_period;
+ u64 default_burst_period;
};
/**
@@ -1261,6 +1339,18 @@ enum devlink_trap_group_generic_id {
.min_burst = _min_burst, \
}
+#define devlink_fmsg_put(fmsg, name, value) ( \
+ _Generic((value), \
+ bool : devlink_fmsg_bool_pair_put, \
+ u8 : devlink_fmsg_u8_pair_put, \
+ u16 : devlink_fmsg_u32_pair_put, \
+ u32 : devlink_fmsg_u32_pair_put, \
+ u64 : devlink_fmsg_u64_pair_put, \
+ int : devlink_fmsg_u32_pair_put, \
+ char * : devlink_fmsg_string_pair_put, \
+ const char * : devlink_fmsg_string_pair_put) \
+ (fmsg, name, (value)))
+
enum {
/* device supports reload operations */
DEVLINK_F_RELOAD = 1UL << 0,
@@ -1469,6 +1559,9 @@ struct devlink_ops {
u32 tx_priority, struct netlink_ext_ack *extack);
int (*rate_leaf_tx_weight_set)(struct devlink_rate *devlink_rate, void *priv,
u32 tx_weight, struct netlink_ext_ack *extack);
+ int (*rate_leaf_tc_bw_set)(struct devlink_rate *devlink_rate,
+ void *priv, u32 *tc_bw,
+ struct netlink_ext_ack *extack);
int (*rate_node_tx_share_set)(struct devlink_rate *devlink_rate, void *priv,
u64 tx_share, struct netlink_ext_ack *extack);
int (*rate_node_tx_max_set)(struct devlink_rate *devlink_rate, void *priv,
@@ -1477,6 +1570,9 @@ struct devlink_ops {
u32 tx_priority, struct netlink_ext_ack *extack);
int (*rate_node_tx_weight_set)(struct devlink_rate *devlink_rate, void *priv,
u32 tx_weight, struct netlink_ext_ack *extack);
+ int (*rate_node_tc_bw_set)(struct devlink_rate *devlink_rate,
+ void *priv, u32 *tc_bw,
+ struct netlink_ext_ack *extack);
int (*rate_node_new)(struct devlink_rate *rate_node, void **priv,
struct netlink_ext_ack *extack);
int (*rate_node_del)(struct devlink_rate *rate_node, void *priv,
@@ -1522,6 +1618,7 @@ int devl_trylock(struct devlink *devlink);
void devl_unlock(struct devlink *devlink);
void devl_assert_locked(struct devlink *devlink);
bool devl_lock_is_held(struct devlink *devlink);
+DEFINE_GUARD(devl, struct devlink *, devl_lock(_T), devl_unlock(_T));
struct ib_device;
@@ -1707,7 +1804,7 @@ void devlink_port_type_ib_set(struct devlink_port *devlink_port,
struct ib_device *ibdev);
void devlink_port_type_clear(struct devlink_port *devlink_port);
void devlink_port_attrs_set(struct devlink_port *devlink_port,
- struct devlink_port_attrs *devlink_port_attrs);
+ const struct devlink_port_attrs *attrs);
void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 controller,
u16 pf, bool external);
void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
@@ -1751,7 +1848,7 @@ void devl_sb_unregister(struct devlink *devlink, unsigned int sb_index);
void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index);
int devl_dpipe_table_register(struct devlink *devlink,
const char *table_name,
- struct devlink_dpipe_table_ops *table_ops,
+ const struct devlink_dpipe_table_ops *table_ops,
void *priv, bool counter_control_extern);
void devl_dpipe_table_unregister(struct devlink *devlink,
const char *table_name);
@@ -1779,12 +1876,6 @@ int devl_resource_register(struct devlink *devlink,
u64 resource_id,
u64 parent_resource_id,
const struct devlink_resource_size_params *size_params);
-int devlink_resource_register(struct devlink *devlink,
- const char *resource_name,
- u64 resource_size,
- u64 resource_id,
- u64 parent_resource_id,
- const struct devlink_resource_size_params *size_params);
void devl_resources_unregister(struct devlink *devlink);
void devlink_resources_unregister(struct devlink *devlink);
int devl_resource_size_get(struct devlink *devlink,
@@ -1797,15 +1888,8 @@ void devl_resource_occ_get_register(struct devlink *devlink,
u64 resource_id,
devlink_resource_occ_get_t *occ_get,
void *occ_get_priv);
-void devlink_resource_occ_get_register(struct devlink *devlink,
- u64 resource_id,
- devlink_resource_occ_get_t *occ_get,
- void *occ_get_priv);
void devl_resource_occ_get_unregister(struct devlink *devlink,
u64 resource_id);
-
-void devlink_resource_occ_get_unregister(struct devlink *devlink,
- u64 resource_id);
int devl_params_register(struct devlink *devlink,
const struct devlink_param *params,
size_t params_count);
@@ -1905,22 +1989,22 @@ void devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
struct devlink_health_reporter *
devl_port_health_reporter_create(struct devlink_port *port,
const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv);
+ void *priv);
struct devlink_health_reporter *
devlink_port_health_reporter_create(struct devlink_port *port,
const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv);
+ void *priv);
struct devlink_health_reporter *
devl_health_reporter_create(struct devlink *devlink,
const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv);
+ void *priv);
struct devlink_health_reporter *
devlink_health_reporter_create(struct devlink *devlink,
const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv);
+ void *priv);
void
devl_health_reporter_destroy(struct devlink_health_reporter *reporter);
@@ -2007,6 +2091,7 @@ int devlink_compat_switch_id_get(struct net_device *dev,
int devlink_nl_port_handle_fill(struct sk_buff *msg, struct devlink_port *devlink_port);
size_t devlink_nl_port_handle_size(struct devlink_port *devlink_port);
+void devlink_fmsg_dump_skb(struct devlink_fmsg *fmsg, const struct sk_buff *skb);
#else
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index 9707ab54fdd5..58d91ccc56e0 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -6,9 +6,13 @@
#define DEFINE_DROP_REASON(FN, FNe) \
FN(NOT_SPECIFIED) \
FN(NO_SOCKET) \
+ FN(SOCKET_CLOSE) \
+ FN(SOCKET_FILTER) \
+ FN(SOCKET_RCVBUFF) \
+ FN(UNIX_DISCONNECT) \
+ FN(UNIX_SKIP_OOB) \
FN(PKT_TOO_SMALL) \
FN(TCP_CSUM) \
- FN(SOCKET_FILTER) \
FN(UDP_CSUM) \
FN(NETFILTER_DROP) \
FN(OTHERHOST) \
@@ -18,7 +22,6 @@
FN(UNICAST_IN_L2_MULTICAST) \
FN(XFRM_POLICY) \
FN(IP_NOPROTO) \
- FN(SOCKET_RCVBUFF) \
FN(PROTO_MEM) \
FN(TCP_AUTH_HDR) \
FN(TCP_MD5NOTFOUND) \
@@ -36,8 +39,13 @@
FN(TCP_OVERWINDOW) \
FN(TCP_OFOMERGE) \
FN(TCP_RFC7323_PAWS) \
+ FN(TCP_RFC7323_PAWS_ACK) \
+ FN(TCP_RFC7323_TW_PAWS) \
+ FN(TCP_RFC7323_TSECR) \
+ FN(TCP_LISTEN_OVERFLOW) \
FN(TCP_OLD_SEQUENCE) \
FN(TCP_INVALID_SEQUENCE) \
+ FN(TCP_INVALID_END_SEQUENCE) \
FN(TCP_INVALID_ACK_SEQUENCE) \
FN(TCP_RESET) \
FN(TCP_INVALID_SYN) \
@@ -55,9 +63,16 @@
FN(NEIGH_FAILED) \
FN(NEIGH_QUEUEFULL) \
FN(NEIGH_DEAD) \
+ FN(NEIGH_HH_FILLFAIL) \
FN(TC_EGRESS) \
FN(SECURITY_HOOK) \
FN(QDISC_DROP) \
+ FN(QDISC_OVERLIMIT) \
+ FN(QDISC_CONGESTED) \
+ FN(CAKE_FLOOD) \
+ FN(FQ_BAND_LIMIT) \
+ FN(FQ_HORIZON_LIMIT) \
+ FN(FQ_FLOW_LIMIT) \
FN(CPU_BACKLOG) \
FN(XDP) \
FN(TC_INGRESS) \
@@ -76,6 +91,10 @@
FN(INVALID_PROTO) \
FN(IP_INADDRERRORS) \
FN(IP_INNOROUTES) \
+ FN(IP_LOCAL_SOURCE) \
+ FN(IP_INVALID_SOURCE) \
+ FN(IP_LOCALNET) \
+ FN(IP_INVALID_DEST) \
FN(PKT_TOO_BIG) \
FN(DUP_FRAG) \
FN(FRAG_REASM_TIMEOUT) \
@@ -92,6 +111,24 @@
FN(PACKET_SOCK_ERROR) \
FN(TC_CHAIN_NOTFOUND) \
FN(TC_RECLASSIFY_LOOP) \
+ FN(VXLAN_INVALID_HDR) \
+ FN(VXLAN_VNI_NOT_FOUND) \
+ FN(MAC_INVALID_SOURCE) \
+ FN(VXLAN_ENTRY_EXISTS) \
+ FN(NO_TX_TARGET) \
+ FN(IP_TUNNEL_ECN) \
+ FN(TUNNEL_TXINFO) \
+ FN(LOCAL_MAC) \
+ FN(ARP_PVLAN_DISABLE) \
+ FN(MAC_IEEE_MAC_CONTROL) \
+ FN(BRIDGE_INGRESS_STP_STATE) \
+ FN(CAN_RX_INVALID_FRAME) \
+ FN(CANFD_RX_INVALID_FRAME) \
+ FN(CANXL_RX_INVALID_FRAME) \
+ FN(PFMEMALLOC) \
+ FN(DUALPI2_STEP_DROP) \
+ FN(PSP_INPUT) \
+ FN(PSP_OUTPUT) \
FNe(MAX)
/**
@@ -116,12 +153,27 @@ enum skb_drop_reason {
* 3) no valid child socket during 3WHS process
*/
SKB_DROP_REASON_NO_SOCKET,
+ /** @SKB_DROP_REASON_SOCKET_CLOSE: socket is close()d */
+ SKB_DROP_REASON_SOCKET_CLOSE,
+ /** @SKB_DROP_REASON_SOCKET_FILTER: dropped by socket filter */
+ SKB_DROP_REASON_SOCKET_FILTER,
+ /** @SKB_DROP_REASON_SOCKET_RCVBUFF: socket receive buff is full */
+ SKB_DROP_REASON_SOCKET_RCVBUFF,
+ /**
+ * @SKB_DROP_REASON_UNIX_DISCONNECT: recv queue is purged when SOCK_DGRAM
+ * or SOCK_SEQPACKET socket re-connect()s to another socket or notices
+ * during send() that the peer has been close()d.
+ */
+ SKB_DROP_REASON_UNIX_DISCONNECT,
+ /**
+ * @SKB_DROP_REASON_UNIX_SKIP_OOB: Out-Of-Band data is skipped by
+ * recv() without MSG_OOB so dropped.
+ */
+ SKB_DROP_REASON_UNIX_SKIP_OOB,
/** @SKB_DROP_REASON_PKT_TOO_SMALL: packet size is too small */
SKB_DROP_REASON_PKT_TOO_SMALL,
/** @SKB_DROP_REASON_TCP_CSUM: TCP checksum error */
SKB_DROP_REASON_TCP_CSUM,
- /** @SKB_DROP_REASON_SOCKET_FILTER: dropped by socket filter */
- SKB_DROP_REASON_SOCKET_FILTER,
/** @SKB_DROP_REASON_UDP_CSUM: UDP checksum error */
SKB_DROP_REASON_UDP_CSUM,
/** @SKB_DROP_REASON_NETFILTER_DROP: dropped by netfilter */
@@ -152,11 +204,9 @@ enum skb_drop_reason {
SKB_DROP_REASON_XFRM_POLICY,
/** @SKB_DROP_REASON_IP_NOPROTO: no support for IP protocol */
SKB_DROP_REASON_IP_NOPROTO,
- /** @SKB_DROP_REASON_SOCKET_RCVBUFF: socket receive buff is full */
- SKB_DROP_REASON_SOCKET_RCVBUFF,
/**
- * @SKB_DROP_REASON_PROTO_MEM: proto memory limition, such as udp packet
- * drop out of udp_memory_allocated.
+ * @SKB_DROP_REASON_PROTO_MEM: proto memory limitation, such as
+ * udp packet drop out of udp_memory_allocated.
*/
SKB_DROP_REASON_PROTO_MEM,
/**
@@ -217,7 +267,7 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_TCP_ZEROWINDOW,
/**
- * @SKB_DROP_REASON_TCP_OLD_DATA: the TCP data reveived is already
+ * @SKB_DROP_REASON_TCP_OLD_DATA: the TCP data received is already
* received before (spurious retrans may happened), see
* LINUX_MIB_DELAYEDACKLOST
*/
@@ -238,11 +288,35 @@ enum skb_drop_reason {
* LINUX_MIB_PAWSESTABREJECTED, LINUX_MIB_PAWSACTIVEREJECTED
*/
SKB_DROP_REASON_TCP_RFC7323_PAWS,
+ /**
+ * @SKB_DROP_REASON_TCP_RFC7323_PAWS_ACK: PAWS check, old ACK packet.
+ * Corresponds to LINUX_MIB_PAWS_OLD_ACK.
+ */
+ SKB_DROP_REASON_TCP_RFC7323_PAWS_ACK,
+ /**
+ * @SKB_DROP_REASON_TCP_RFC7323_TW_PAWS: PAWS check, socket is in
+ * TIME_WAIT state.
+ * Corresponds to LINUX_MIB_PAWS_TW_REJECTED.
+ */
+ SKB_DROP_REASON_TCP_RFC7323_TW_PAWS,
+ /**
+ * @SKB_DROP_REASON_TCP_RFC7323_TSECR: PAWS check, invalid TSEcr.
+ * Corresponds to LINUX_MIB_TSECRREJECTED.
+ */
+ SKB_DROP_REASON_TCP_RFC7323_TSECR,
+ /** @SKB_DROP_REASON_TCP_LISTEN_OVERFLOW: listener queue full. */
+ SKB_DROP_REASON_TCP_LISTEN_OVERFLOW,
/** @SKB_DROP_REASON_TCP_OLD_SEQUENCE: Old SEQ field (duplicate packet) */
SKB_DROP_REASON_TCP_OLD_SEQUENCE,
- /** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */
+ /** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field. */
SKB_DROP_REASON_TCP_INVALID_SEQUENCE,
/**
+ * @SKB_DROP_REASON_TCP_INVALID_END_SEQUENCE:
+ * Not acceptable END_SEQ field.
+ * Corresponds to LINUX_MIB_BEYOND_WINDOW.
+ */
+ SKB_DROP_REASON_TCP_INVALID_END_SEQUENCE,
+ /**
* @SKB_DROP_REASON_TCP_INVALID_ACK_SEQUENCE: Not acceptable ACK SEQ
* field because ack sequence is not in the window between snd_una
* and snd_nxt
@@ -289,6 +363,8 @@ enum skb_drop_reason {
SKB_DROP_REASON_NEIGH_QUEUEFULL,
/** @SKB_DROP_REASON_NEIGH_DEAD: neigh entry is dead */
SKB_DROP_REASON_NEIGH_DEAD,
+ /** @SKB_DROP_REASON_NEIGH_HH_FILLFAIL: failed to fill the device hard header */
+ SKB_DROP_REASON_NEIGH_HH_FILLFAIL,
/** @SKB_DROP_REASON_TC_EGRESS: dropped in TC egress HOOK */
SKB_DROP_REASON_TC_EGRESS,
/** @SKB_DROP_REASON_SECURITY_HOOK: dropped due to security HOOK */
@@ -299,6 +375,36 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_QDISC_DROP,
/**
+ * @SKB_DROP_REASON_QDISC_OVERLIMIT: dropped by qdisc when a qdisc
+ * instance exceeds its total buffer size limit.
+ */
+ SKB_DROP_REASON_QDISC_OVERLIMIT,
+ /**
+ * @SKB_DROP_REASON_QDISC_CONGESTED: dropped by a qdisc AQM algorithm
+ * due to congestion.
+ */
+ SKB_DROP_REASON_QDISC_CONGESTED,
+ /**
+ * @SKB_DROP_REASON_CAKE_FLOOD: dropped by the flood protection part of
+ * CAKE qdisc AQM algorithm (BLUE).
+ */
+ SKB_DROP_REASON_CAKE_FLOOD,
+ /**
+ * @SKB_DROP_REASON_FQ_BAND_LIMIT: dropped by fq qdisc when per band
+ * limit is reached.
+ */
+ SKB_DROP_REASON_FQ_BAND_LIMIT,
+ /**
+ * @SKB_DROP_REASON_FQ_HORIZON_LIMIT: dropped by fq qdisc when packet
+ * timestamp is too far in the future.
+ */
+ SKB_DROP_REASON_FQ_HORIZON_LIMIT,
+ /**
+ * @SKB_DROP_REASON_FQ_FLOW_LIMIT: dropped by fq qdisc when a flow
+ * exceeds its limits.
+ */
+ SKB_DROP_REASON_FQ_FLOW_LIMIT,
+ /**
* @SKB_DROP_REASON_CPU_BACKLOG: failed to enqueue the skb to the per CPU
* backlog queue. This can be caused by backlog queue full (see
* netdev_max_backlog in net.rst) or RPS flow limit
@@ -365,6 +471,21 @@ enum skb_drop_reason {
* IPSTATS_MIB_INADDRERRORS
*/
SKB_DROP_REASON_IP_INNOROUTES,
+ /** @SKB_DROP_REASON_IP_LOCAL_SOURCE: the source ip is local */
+ SKB_DROP_REASON_IP_LOCAL_SOURCE,
+ /**
+ * @SKB_DROP_REASON_IP_INVALID_SOURCE: the source ip is invalid:
+ * 1) source ip is multicast or limited broadcast
+ * 2) source ip is zero and not IGMP
+ */
+ SKB_DROP_REASON_IP_INVALID_SOURCE,
+ /** @SKB_DROP_REASON_IP_LOCALNET: source or dest ip is local net */
+ SKB_DROP_REASON_IP_LOCALNET,
+ /**
+ * @SKB_DROP_REASON_IP_INVALID_DEST: the dest ip is invalid:
+ * 1) dest ip is 0
+ */
+ SKB_DROP_REASON_IP_INVALID_DEST,
/**
* @SKB_DROP_REASON_PKT_TOO_BIG: packet size is too big (maybe exceed the
* MTU)
@@ -419,6 +540,83 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_TC_RECLASSIFY_LOOP,
/**
+ * @SKB_DROP_REASON_VXLAN_INVALID_HDR: VXLAN header is invalid. E.g.:
+ * 1) reserved fields are not zero
+ * 2) "I" flag is not set
+ */
+ SKB_DROP_REASON_VXLAN_INVALID_HDR,
+ /** @SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND: no VXLAN device found for VNI */
+ SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND,
+ /** @SKB_DROP_REASON_MAC_INVALID_SOURCE: source mac is invalid */
+ SKB_DROP_REASON_MAC_INVALID_SOURCE,
+ /**
+ * @SKB_DROP_REASON_VXLAN_ENTRY_EXISTS: trying to migrate a static
+ * entry or an entry pointing to a nexthop.
+ */
+ SKB_DROP_REASON_VXLAN_ENTRY_EXISTS,
+ /** @SKB_DROP_REASON_NO_TX_TARGET: no target found for xmit */
+ SKB_DROP_REASON_NO_TX_TARGET,
+ /**
+ * @SKB_DROP_REASON_IP_TUNNEL_ECN: skb is dropped according to
+ * RFC 6040 4.2, see __INET_ECN_decapsulate() for detail.
+ */
+ SKB_DROP_REASON_IP_TUNNEL_ECN,
+ /**
+ * @SKB_DROP_REASON_TUNNEL_TXINFO: packet without necessary metadata
+ * reached a device which is in "external" mode.
+ */
+ SKB_DROP_REASON_TUNNEL_TXINFO,
+ /**
+ * @SKB_DROP_REASON_LOCAL_MAC: the source MAC address is equal to
+ * the MAC address of the local netdev.
+ */
+ SKB_DROP_REASON_LOCAL_MAC,
+ /**
+ * @SKB_DROP_REASON_ARP_PVLAN_DISABLE: packet which is not IP is
+ * forwarded to the in_dev, and the proxy_arp_pvlan is not
+ * enabled.
+ */
+ SKB_DROP_REASON_ARP_PVLAN_DISABLE,
+ /**
+ * @SKB_DROP_REASON_MAC_IEEE_MAC_CONTROL: the destination MAC address
+ * is an IEEE MAC Control address.
+ */
+ SKB_DROP_REASON_MAC_IEEE_MAC_CONTROL,
+ /**
+ * @SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE: the STP state of the
+ * ingress bridge port does not allow frames to be forwarded.
+ */
+ SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE,
+ /**
+ * @SKB_DROP_REASON_CAN_RX_INVALID_FRAME: received
+ * non conform CAN frame (or device is unable to receive CAN frames)
+ */
+ SKB_DROP_REASON_CAN_RX_INVALID_FRAME,
+ /**
+ * @SKB_DROP_REASON_CANFD_RX_INVALID_FRAME: received
+ * non conform CAN-FD frame (or device is unable to receive CAN frames)
+ */
+ SKB_DROP_REASON_CANFD_RX_INVALID_FRAME,
+ /**
+ * @SKB_DROP_REASON_CANXL_RX_INVALID_FRAME: received
+ * non conform CAN-XL frame (or device is unable to receive CAN frames)
+ */
+ SKB_DROP_REASON_CANXL_RX_INVALID_FRAME,
+ /**
+ * @SKB_DROP_REASON_PFMEMALLOC: packet allocated from memory reserve
+ * reached a path or socket not eligible for use of memory reserves
+ */
+ SKB_DROP_REASON_PFMEMALLOC,
+ /**
+ * @SKB_DROP_REASON_DUALPI2_STEP_DROP: dropped by the step drop
+ * threshold of DualPI2 qdisc.
+ */
+ SKB_DROP_REASON_DUALPI2_STEP_DROP,
+ /** @SKB_DROP_REASON_PSP_INPUT: PSP input checks failed */
+ SKB_DROP_REASON_PSP_INPUT,
+ /** @SKB_DROP_REASON_PSP_OUTPUT: PSP output checks failed */
+ SKB_DROP_REASON_PSP_OUTPUT,
+ /**
* @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which
* shouldn't be used as a real 'reason' - only for tracing code gen
*/
diff --git a/include/net/dropreason.h b/include/net/dropreason.h
index 56cb7be92244..7d3b1a2a6fec 100644
--- a/include/net/dropreason.h
+++ b/include/net/dropreason.h
@@ -18,12 +18,6 @@ enum skb_drop_reason_subsys {
SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE,
/**
- * @SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR: mac80211 drop reasons
- * for frames still going to monitor, see net/mac80211/drop.h
- */
- SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR,
-
- /**
* @SKB_DROP_REASON_SUBSYS_OPENVSWITCH: openvswitch drop reasons,
* see net/openvswitch/drop.h
*/
diff --git a/include/net/dsa.h b/include/net/dsa.h
index b60e7e410aba..cced1a866757 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -53,11 +53,16 @@ struct tc_action;
#define DSA_TAG_PROTO_RTL8_4T_VALUE 25
#define DSA_TAG_PROTO_RZN1_A5PSW_VALUE 26
#define DSA_TAG_PROTO_LAN937X_VALUE 27
+#define DSA_TAG_PROTO_VSC73XX_8021Q_VALUE 28
+#define DSA_TAG_PROTO_BRCM_LEGACY_FCS_VALUE 29
+#define DSA_TAG_PROTO_YT921X_VALUE 30
+#define DSA_TAG_PROTO_MXL_GSW1XX_VALUE 31
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
DSA_TAG_PROTO_BRCM = DSA_TAG_PROTO_BRCM_VALUE,
DSA_TAG_PROTO_BRCM_LEGACY = DSA_TAG_PROTO_BRCM_LEGACY_VALUE,
+ DSA_TAG_PROTO_BRCM_LEGACY_FCS = DSA_TAG_PROTO_BRCM_LEGACY_FCS_VALUE,
DSA_TAG_PROTO_BRCM_PREPEND = DSA_TAG_PROTO_BRCM_PREPEND_VALUE,
DSA_TAG_PROTO_DSA = DSA_TAG_PROTO_DSA_VALUE,
DSA_TAG_PROTO_EDSA = DSA_TAG_PROTO_EDSA_VALUE,
@@ -83,6 +88,9 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_RTL8_4T = DSA_TAG_PROTO_RTL8_4T_VALUE,
DSA_TAG_PROTO_RZN1_A5PSW = DSA_TAG_PROTO_RZN1_A5PSW_VALUE,
DSA_TAG_PROTO_LAN937X = DSA_TAG_PROTO_LAN937X_VALUE,
+ DSA_TAG_PROTO_VSC73XX_8021Q = DSA_TAG_PROTO_VSC73XX_8021Q_VALUE,
+ DSA_TAG_PROTO_YT921X = DSA_TAG_PROTO_YT921X_VALUE,
+ DSA_TAG_PROTO_MXL_GSW1XX = DSA_TAG_PROTO_MXL_GSW1XX_VALUE,
};
struct dsa_switch;
@@ -401,14 +409,18 @@ struct dsa_switch {
*/
u32 configure_vlan_while_not_filtering:1;
- /* If the switch driver always programs the CPU port as egress tagged
- * despite the VLAN configuration indicating otherwise, then setting
- * @untag_bridge_pvid will force the DSA receive path to pop the
- * bridge's default_pvid VLAN tagged frames to offer a consistent
- * behavior between a vlan_filtering=0 and vlan_filtering=1 bridge
- * device.
+ /* Pop the default_pvid of VLAN-unaware bridge ports from tagged frames.
+ * DEPRECATED: Do NOT set this field in new drivers. Instead look at
+ * the dsa_software_vlan_untag() comments.
*/
u32 untag_bridge_pvid:1;
+ /* Pop the default_pvid of VLAN-aware bridge ports from tagged frames.
+ * Useful if the switch cannot preserve the VLAN tag as seen on the
+ * wire for user port ingress, and chooses to send all frames as
+ * VLAN-tagged to the CPU, including those which were originally
+ * untagged.
+ */
+ u32 untag_vlan_aware_bridge_pvid:1;
/* Let DSA manage the FDB entries towards the
* CPU, based on the software bridge database.
@@ -879,27 +891,6 @@ struct dsa_switch_ops {
*/
void (*phylink_get_caps)(struct dsa_switch *ds, int port,
struct phylink_config *config);
- struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds,
- int port,
- phy_interface_t iface);
- int (*phylink_mac_prepare)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
- void (*phylink_mac_config)(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state);
- int (*phylink_mac_finish)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
- void (*phylink_mac_link_down)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
- void (*phylink_mac_link_up)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev,
- int speed, int duplex,
- bool tx_pause, bool rx_pause);
void (*phylink_fixed_state)(struct dsa_switch *ds, int port,
struct phylink_link_state *state);
/*
@@ -921,6 +912,8 @@ struct dsa_switch_ops {
void (*get_rmon_stats)(struct dsa_switch *ds, int port,
struct ethtool_rmon_stats *rmon_stats,
const struct ethtool_rmon_hist_range **ranges);
+ void (*get_ts_stats)(struct dsa_switch *ds, int port,
+ struct ethtool_ts_stats *ts_stats);
void (*get_stats64)(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s);
void (*get_pause_stats)(struct dsa_switch *ds, int port,
@@ -940,7 +933,7 @@ struct dsa_switch_ops {
* ethtool timestamp info
*/
int (*get_ts_info)(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *ts);
+ struct kernel_ethtool_ts_info *ts);
/*
* ethtool MAC merge layer
@@ -1003,10 +996,9 @@ struct dsa_switch_ops {
/*
* Port's MAC EEE settings
*/
+ bool (*support_eee)(struct dsa_switch *ds, int port);
int (*set_mac_eee)(struct dsa_switch *ds, int port,
struct ethtool_keee *e);
- int (*get_mac_eee)(struct dsa_switch *ds, int port,
- struct ethtool_keee *e);
/* EEPROM access */
int (*get_eeprom_len)(struct dsa_switch *ds);
@@ -1145,9 +1137,10 @@ struct dsa_switch_ops {
* PTP functionality
*/
int (*port_hwtstamp_get)(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *config);
int (*port_hwtstamp_set)(struct dsa_switch *ds, int port,
- struct ifreq *ifr);
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void (*port_txtstamp)(struct dsa_switch *ds, int port,
struct sk_buff *skb);
bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
@@ -1258,7 +1251,8 @@ struct dsa_switch_ops {
dsa_devlink_param_get, dsa_devlink_param_set, NULL)
int dsa_devlink_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx);
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
int dsa_devlink_param_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx,
struct netlink_ext_ack *extack);
@@ -1321,11 +1315,6 @@ static inline int dsa_devlink_port_to_port(struct devlink_port *port)
return port->index;
}
-struct dsa_switch_driver {
- struct list_head list;
- const struct dsa_switch_ops *ops;
-};
-
bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db);
@@ -1333,6 +1322,15 @@ bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db);
+int dsa_port_simple_hsr_validate(struct dsa_switch *ds, int port,
+ struct net_device *hsr,
+ struct netlink_ext_ack *extack);
+int dsa_port_simple_hsr_join(struct dsa_switch *ds, int port,
+ struct net_device *hsr,
+ struct netlink_ext_ack *extack);
+int dsa_port_simple_hsr_leave(struct dsa_switch *ds, int port,
+ struct net_device *hsr);
+
/* Keep inline for faster access in hot path */
static inline bool netdev_uses_dsa(const struct net_device *dev)
{
@@ -1398,5 +1396,6 @@ static inline bool dsa_user_dev_check(const struct net_device *dev)
netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up);
+bool dsa_supports_eee(struct dsa_switch *ds, int port);
#endif
diff --git a/include/net/dst.h b/include/net/dst.h
index 0aa331bd2fdb..f8aa1239b4db 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -24,7 +24,10 @@
struct sk_buff;
struct dst_entry {
- struct net_device *dev;
+ union {
+ struct net_device *dev;
+ struct net_device __rcu *dev_rcu;
+ };
struct dst_ops *ops;
unsigned long _metrics;
unsigned long expires;
@@ -240,9 +243,9 @@ static inline void dst_hold(struct dst_entry *dst)
static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
{
- if (unlikely(time != dst->lastuse)) {
+ if (unlikely(time != READ_ONCE(dst->lastuse))) {
dst->__use++;
- dst->lastuse = time;
+ WRITE_ONCE(dst->lastuse, time);
}
}
@@ -307,7 +310,7 @@ static inline bool dst_hold_safe(struct dst_entry *dst)
* @skb: buffer
*
* If dst is not yet refcounted and not destroyed, grab a ref on it.
- * Returns true if dst is refcounted.
+ * Returns: true if dst is refcounted.
*/
static inline bool skb_dst_force(struct sk_buff *skb)
{
@@ -341,7 +344,7 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
skb->dev = dev;
/*
- * Clear hash so that we can recalulate the hash for the
+ * Clear hash so that we can recalculate the hash for the
* encapsulated packet, unless we have already determine the hash
* over the L4 4-tuple.
*/
@@ -431,13 +434,24 @@ static inline void dst_link_failure(struct sk_buff *skb)
static inline void dst_set_expires(struct dst_entry *dst, int timeout)
{
- unsigned long expires = jiffies + timeout;
+ unsigned long old, expires = jiffies + timeout;
if (expires == 0)
expires = 1;
- if (dst->expires == 0 || time_before(expires, dst->expires))
- dst->expires = expires;
+ old = READ_ONCE(dst->expires);
+
+ if (!old || time_before(expires, old))
+ WRITE_ONCE(dst->expires, expires);
+}
+
+static inline unsigned int dst_dev_overhead(struct dst_entry *dst,
+ struct sk_buff *skb)
+{
+ if (likely(dst))
+ return LL_RESERVED_SPACE(dst->dev);
+
+ return skb->mac_len;
}
INDIRECT_CALLABLE_DECLARE(int ip6_output(struct net *, struct sock *,
@@ -447,7 +461,7 @@ INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *,
/* Output packet to network from transport. */
static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- return INDIRECT_CALL_INET(skb_dst(skb)->output,
+ return INDIRECT_CALL_INET(READ_ONCE(skb_dst(skb)->output),
ip6_output, ip_output,
net, sk, skb);
}
@@ -457,7 +471,7 @@ INDIRECT_CALLABLE_DECLARE(int ip_local_deliver(struct sk_buff *));
/* Input packet from network to transport. */
static inline int dst_input(struct sk_buff *skb)
{
- return INDIRECT_CALL_INET(skb_dst(skb)->input,
+ return INDIRECT_CALL_INET(READ_ONCE(skb_dst(skb)->input),
ip6_input, ip_local_deliver, skb);
}
@@ -467,7 +481,7 @@ INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
u32));
static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
{
- if (dst->obsolete)
+ if (READ_ONCE(dst->obsolete))
dst = INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check,
ipv4_dst_check, dst, cookie);
return dst;
@@ -552,6 +566,41 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
}
+static inline struct net_device *dst_dev(const struct dst_entry *dst)
+{
+ return READ_ONCE(dst->dev);
+}
+
+static inline struct net_device *dst_dev_rcu(const struct dst_entry *dst)
+{
+ return rcu_dereference(dst->dev_rcu);
+}
+
+static inline struct net *dst_dev_net_rcu(const struct dst_entry *dst)
+{
+ return dev_net_rcu(dst_dev_rcu(dst));
+}
+
+static inline struct net_device *skb_dst_dev(const struct sk_buff *skb)
+{
+ return dst_dev(skb_dst(skb));
+}
+
+static inline struct net_device *skb_dst_dev_rcu(const struct sk_buff *skb)
+{
+ return dst_dev_rcu(skb_dst(skb));
+}
+
+static inline struct net *skb_dst_dev_net(const struct sk_buff *skb)
+{
+ return dev_net(skb_dst_dev(skb));
+}
+
+static inline struct net *skb_dst_dev_net_rcu(const struct sk_buff *skb)
+{
+ return dev_net_rcu(skb_dst_dev_rcu(skb));
+}
+
struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu, bool confirm_neigh);
diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h
index b4a55d2d5e71..1961699598e2 100644
--- a/include/net/dst_cache.h
+++ b/include/net/dst_cache.h
@@ -102,7 +102,7 @@ int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp);
* @dst_cache: the cache
*
* No synchronization is enforced: it must be called only when the cache
- * is unsed.
+ * is unused.
*/
void dst_cache_destroy(struct dst_cache *dst_cache);
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 4160731dcb6e..1fc2fb03ce3f 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -3,6 +3,7 @@
#define __NET_DST_METADATA_H 1
#include <linux/skbuff.h>
+#include <net/ip.h>
#include <net/ip_tunnels.h>
#include <net/macsec.h>
#include <net/dst.h>
@@ -220,9 +221,15 @@ static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
int md_size)
{
const struct iphdr *iph = ip_hdr(skb);
+ struct metadata_dst *tun_dst;
+
+ tun_dst = __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
+ 0, flags, tunnel_id, md_size);
- return __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
- 0, flags, tunnel_id, md_size);
+ if (tun_dst && (iph->frag_off & htons(IP_DF)))
+ __set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT,
+ tun_dst->u.tun_info.key.tun_flags);
+ return tun_dst;
}
static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *saddr,
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 6d1c8541183d..3a9001a042a5 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -24,7 +24,7 @@ struct dst_ops {
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
struct net_device *dev);
- struct dst_entry * (*negative_advice)(struct dst_entry *);
+ void (*negative_advice)(struct sock *sk, struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
diff --git a/include/net/eee.h b/include/net/eee.h
index 84837aba3cd9..cfab1b8bc46a 100644
--- a/include/net/eee.h
+++ b/include/net/eee.h
@@ -13,10 +13,7 @@ struct eee_config {
static inline bool eeecfg_mac_can_tx_lpi(const struct eee_config *eeecfg)
{
/* eee_enabled is the master on/off */
- if (!eeecfg->eee_enabled || !eeecfg->tx_lpi_enabled)
- return false;
-
- return true;
+ return eeecfg->eee_enabled && eeecfg->tx_lpi_enabled;
}
static inline void eeecfg_to_eee(struct ethtool_keee *eee,
diff --git a/include/net/erspan.h b/include/net/erspan.h
index 6cb4cbd6a48f..c6209e7b6c96 100644
--- a/include/net/erspan.h
+++ b/include/net/erspan.h
@@ -89,7 +89,7 @@ enum erspan_encap_type {
ERSPAN_ENCAP_NOVLAN = 0x0, /* originally without VLAN tag */
ERSPAN_ENCAP_ISL = 0x1, /* originally ISL encapsulated */
ERSPAN_ENCAP_8021Q = 0x2, /* originally 802.1Q encapsulated */
- ERSPAN_ENCAP_INFRAME = 0x3, /* VLAN tag perserved in frame */
+ ERSPAN_ENCAP_INFRAME = 0x3, /* VLAN tag preserved in frame */
};
#define ERSPAN_V1_MDSIZE 4
@@ -192,7 +192,7 @@ static inline void erspan_build_header(struct sk_buff *skb,
enc_type = ERSPAN_ENCAP_NOVLAN;
/* If mirrored packet has vlan tag, extract tci and
- * perserve vlan header in the mirrored frame.
+ * preserve vlan header in the mirrored frame.
*/
if (eth->h_proto == htons(ETH_P_8021Q)) {
qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
diff --git a/include/net/fib_notifier.h b/include/net/fib_notifier.h
index 6d59221ff05a..48aad6128fea 100644
--- a/include/net/fib_notifier.h
+++ b/include/net/fib_notifier.h
@@ -28,7 +28,7 @@ enum fib_event_type {
struct fib_notifier_ops {
int family;
struct list_head list;
- unsigned int (*fib_seq_read)(struct net *net);
+ unsigned int (*fib_seq_read)(const struct net *net);
int (*fib_dump)(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
struct module *owner;
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index d17855c52ef9..6e68e359ad18 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -43,6 +43,10 @@ struct fib_rule {
struct fib_kuid_range uid_range;
struct fib_rule_port_range sport_range;
struct fib_rule_port_range dport_range;
+ u16 sport_mask;
+ u16 dport_mask;
+ u8 iif_is_l3_master;
+ u8 oif_is_l3_master;
struct rcu_head rcu;
};
@@ -146,6 +150,17 @@ static inline bool fib_rule_port_inrange(const struct fib_rule_port_range *a,
ntohs(port) <= a->end;
}
+static inline bool fib_rule_port_match(const struct fib_rule_port_range *range,
+ u16 port_mask, __be16 port)
+{
+ if ((range->start ^ ntohs(port)) & port_mask)
+ return false;
+ if (!port_mask && fib_rule_port_range_set(range) &&
+ !fib_rule_port_inrange(range, port))
+ return false;
+ return true;
+}
+
static inline bool fib_rule_port_range_valid(const struct fib_rule_port_range *a)
{
return a->start != 0 && a->end != 0 && a->end < 0xffff &&
@@ -159,6 +174,12 @@ static inline bool fib_rule_port_range_compare(struct fib_rule_port_range *a,
a->end == b->end;
}
+static inline bool
+fib_rule_port_is_range(const struct fib_rule_port_range *range)
+{
+ return range->start != range->end;
+}
+
static inline bool fib_rule_requires_fldissect(struct fib_rule *rule)
{
return rule->iifindex != LOOPBACK_IFINDEX && (rule->ip_proto ||
@@ -176,12 +197,12 @@ int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table);
bool fib_rule_matchall(const struct fib_rule *rule);
int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
struct netlink_ext_ack *extack);
-unsigned int fib_rules_seq_read(struct net *net, int family);
+unsigned int fib_rules_seq_read(const struct net *net, int family);
-int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack);
-int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack);
+int fib_newrule(struct net *net, struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack, bool rtnl_held);
+int fib_delrule(struct net *net, struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack, bool rtnl_held);
INDIRECT_CALLABLE_DECLARE(int fib6_rule_match(struct fib_rule *rule,
struct flowi *fl, int flags));
diff --git a/include/net/flow.h b/include/net/flow.h
index 335bbc52171c..ae9481c40063 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -12,6 +12,7 @@
#include <linux/atomic.h>
#include <linux/container_of.h>
#include <linux/uidgid.h>
+#include <net/inet_dscp.h>
struct flow_keys;
@@ -32,12 +33,14 @@ struct flowi_common {
int flowic_iif;
int flowic_l3mdev;
__u32 flowic_mark;
- __u8 flowic_tos;
+ dscp_t flowic_dscp;
__u8 flowic_scope;
__u8 flowic_proto;
__u8 flowic_flags;
#define FLOWI_FLAG_ANYSRC 0x01
#define FLOWI_FLAG_KNOWN_NH 0x02
+#define FLOWI_FLAG_L3MDEV_OIF 0x04
+#define FLOWI_FLAG_ANY_SPORT 0x08
__u32 flowic_secid;
kuid_t flowic_uid;
__u32 flowic_multipath_hash;
@@ -68,7 +71,7 @@ struct flowi4 {
#define flowi4_iif __fl_common.flowic_iif
#define flowi4_l3mdev __fl_common.flowic_l3mdev
#define flowi4_mark __fl_common.flowic_mark
-#define flowi4_tos __fl_common.flowic_tos
+#define flowi4_dscp __fl_common.flowic_dscp
#define flowi4_scope __fl_common.flowic_scope
#define flowi4_proto __fl_common.flowic_proto
#define flowi4_flags __fl_common.flowic_flags
@@ -101,7 +104,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
fl4->flowi4_iif = LOOPBACK_IFINDEX;
fl4->flowi4_l3mdev = 0;
fl4->flowi4_mark = mark;
- fl4->flowi4_tos = tos;
+ fl4->flowi4_dscp = inet_dsfield_to_dscp(tos);
fl4->flowi4_scope = scope;
fl4->flowi4_proto = proto;
fl4->flowi4_flags = flags;
@@ -139,7 +142,7 @@ struct flowi6 {
#define flowi6_uid __fl_common.flowic_uid
struct in6_addr daddr;
struct in6_addr saddr;
- /* Note: flowi6_tos is encoded in flowlabel, too. */
+ /* Note: flowi6_dscp is encoded in flowlabel, too. */
__be32 flowlabel;
union flowi_uli uli;
#define fl6_sport uli.ports.sport
@@ -161,7 +164,7 @@ struct flowi {
#define flowi_iif u.__fl_common.flowic_iif
#define flowi_l3mdev u.__fl_common.flowic_l3mdev
#define flowi_mark u.__fl_common.flowic_mark
-#define flowi_tos u.__fl_common.flowic_tos
+#define flowi_dscp u.__fl_common.flowic_dscp
#define flowi_scope u.__fl_common.flowic_scope
#define flowi_proto u.__fl_common.flowic_proto
#define flowi_flags u.__fl_common.flowic_flags
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 9ab376d1a677..ced79dc8e856 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -7,6 +7,7 @@
#include <linux/siphash.h>
#include <linux/string.h>
#include <uapi/linux/if_ether.h>
+#include <uapi/linux/pkt_cls.h>
struct bpf_prog;
struct net;
@@ -16,7 +17,8 @@ struct sk_buff;
* struct flow_dissector_key_control:
* @thoff: Transport header offset
* @addr_type: Type of key. One of FLOW_DISSECTOR_KEY_*
- * @flags: Key flags. Any of FLOW_DIS_(IS_FRAGMENT|FIRST_FRAGENCAPSULATION)
+ * @flags: Key flags.
+ * Any of FLOW_DIS_(IS_FRAGMENT|FIRST_FRAG|ENCAPSULATION|F_*)
*/
struct flow_dissector_key_control {
u16 thoff;
@@ -24,9 +26,20 @@ struct flow_dissector_key_control {
u32 flags;
};
-#define FLOW_DIS_IS_FRAGMENT BIT(0)
-#define FLOW_DIS_FIRST_FRAG BIT(1)
-#define FLOW_DIS_ENCAPSULATION BIT(2)
+/* The control flags are kept in sync with TCA_FLOWER_KEY_FLAGS_*, as those
+ * flags are exposed to userspace in some error paths, ie. unsupported flags.
+ */
+enum flow_dissector_ctrl_flags {
+ FLOW_DIS_IS_FRAGMENT = TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT,
+ FLOW_DIS_FIRST_FRAG = TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
+ FLOW_DIS_F_TUNNEL_CSUM = TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM,
+ FLOW_DIS_F_TUNNEL_DONT_FRAGMENT = TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT,
+ FLOW_DIS_F_TUNNEL_OAM = TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM,
+ FLOW_DIS_F_TUNNEL_CRIT_OPT = TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT,
+
+ /* These flags are internal to the kernel */
+ FLOW_DIS_ENCAPSULATION = (TCA_FLOWER_KEY_FLAGS_MAX << 1),
+};
enum flow_dissect_ret {
FLOW_DISSECT_RET_OUT_GOOD,
@@ -433,6 +446,8 @@ static inline bool flow_keys_have_l4(const struct flow_keys *keys)
}
u32 flow_hash_from_keys(struct flow_keys *keys);
+u32 flow_hash_from_keys_seed(struct flow_keys *keys,
+ const siphash_key_t *keyval);
void skb_flow_get_icmp_tci(const struct sk_buff *skb,
struct flow_dissector_key_icmp *key_icmp,
const void *data, int thoff, int hlen);
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index ec9f80509f60..596ab9791e4d 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -472,6 +472,28 @@ static inline bool flow_rule_is_supp_control_flags(const u32 supp_flags,
}
/**
+ * flow_rule_is_supp_enc_control_flags() - check for supported control flags
+ * @supp_enc_flags: encapsulation control flags supported by driver
+ * @enc_ctrl_flags: encapsulation control flags present in rule
+ * @extack: The netlink extended ACK for reporting errors.
+ *
+ * Return: true if only supported control flags are set, false otherwise.
+ */
+static inline bool flow_rule_is_supp_enc_control_flags(const u32 supp_enc_flags,
+ const u32 enc_ctrl_flags,
+ struct netlink_ext_ack *extack)
+{
+ if (likely((enc_ctrl_flags & ~supp_enc_flags) == 0))
+ return true;
+
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported match on enc_control.flags %#x",
+ enc_ctrl_flags);
+
+ return false;
+}
+
+/**
* flow_rule_has_control_flags() - check for presence of any control flags
* @ctrl_flags: control flags present in rule
* @extack: The netlink extended ACK for reporting errors.
@@ -485,6 +507,19 @@ static inline bool flow_rule_has_control_flags(const u32 ctrl_flags,
}
/**
+ * flow_rule_has_enc_control_flags() - check for presence of any control flags
+ * @enc_ctrl_flags: encapsulation control flags present in rule
+ * @extack: The netlink extended ACK for reporting errors.
+ *
+ * Return: true if control flags are set, false otherwise.
+ */
+static inline bool flow_rule_has_enc_control_flags(const u32 enc_ctrl_flags,
+ struct netlink_ext_ack *extack)
+{
+ return !flow_rule_is_supp_enc_control_flags(0, enc_ctrl_flags, extack);
+}
+
+/**
* flow_rule_match_has_control_flags() - match and check for any control flags
* @rule: The flow_rule under evaluation.
* @extack: The netlink extended ACK for reporting errors.
@@ -650,6 +685,7 @@ struct flow_cls_common_offload {
u32 chain_index;
__be16 protocol;
u32 prio;
+ bool skip_sw;
struct netlink_ext_ack *extack;
};
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 9ab49bfeae78..7b84f2cef8b1 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -62,7 +62,7 @@ struct genl_info;
* @small_ops: the small-struct operations supported by this family
* @n_small_ops: number of small-struct operations supported by this family
* @split_ops: the split do/dump form of operation definition
- * @n_split_ops: number of entries in @split_ops, not that with split do/dump
+ * @n_split_ops: number of entries in @split_ops, note that with split do/dump
* ops the number of entries is not the same as number of commands
* @sock_priv_size: the size of per-socket private memory
* @sock_priv_init: the per-socket private memory initializer
@@ -124,7 +124,8 @@ struct genl_family {
* @genlhdr: generic netlink message header
* @attrs: netlink attributes
* @_net: network namespace
- * @user_ptr: user pointers
+ * @ctx: storage space for the use by the family
+ * @user_ptr: user pointers (deprecated, use ctx instead)
* @extack: extended ACK report struct
*/
struct genl_info {
@@ -135,7 +136,10 @@ struct genl_info {
struct genlmsghdr * genlhdr;
struct nlattr ** attrs;
possible_net_t _net;
- void * user_ptr[2];
+ union {
+ u8 ctx[NETLINK_CTX_SIZE];
+ void * user_ptr[2];
+ };
struct netlink_ext_ack *extack;
};
@@ -350,7 +354,7 @@ __genlmsg_iput(struct sk_buff *skb, const struct genl_info *info, int flags)
* such requests) or a struct initialized by genl_info_init_ntf()
* when constructing notifications.
*
- * Returns pointer to new genetlink header.
+ * Returns: pointer to new genetlink header.
*/
static inline void *
genlmsg_iput(struct sk_buff *skb, const struct genl_info *info)
@@ -362,7 +366,7 @@ genlmsg_iput(struct sk_buff *skb, const struct genl_info *info)
* genlmsg_nlhdr - Obtain netlink header from user specified header
* @user_hdr: user header as returned from genlmsg_put()
*
- * Returns pointer to netlink header.
+ * Returns: pointer to netlink header.
*/
static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr)
{
@@ -431,7 +435,7 @@ static inline void genl_dump_check_consistent(struct netlink_callback *cb,
* @flags: netlink message flags
* @cmd: generic netlink command
*
- * Returns pointer to user specific header
+ * Returns: pointer to user specific header
*/
static inline void *genlmsg_put_reply(struct sk_buff *skb,
struct genl_info *info,
@@ -531,13 +535,12 @@ static inline int genlmsg_multicast(const struct genl_family *family,
* @skb: netlink message as socket buffer
* @portid: own netlink portid to avoid sending to yourself
* @group: offset of multicast group in groups array
- * @flags: allocation flags
*
* This function must hold the RTNL or rcu_read_lock().
*/
int genlmsg_multicast_allns(const struct genl_family *family,
struct sk_buff *skb, u32 portid,
- unsigned int group, gfp_t flags);
+ unsigned int group);
/**
* genlmsg_unicast - unicast a netlink message
diff --git a/include/net/gro.h b/include/net/gro.h
index b9b58c1f8d19..b65f631c521d 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -11,6 +11,9 @@
#include <net/udp.h>
#include <net/hotdata.h>
+/* This should be increased if a protocol with a bigger head is added. */
+#define GRO_MAX_HEAD (MAX_HEADER + 128)
+
struct napi_gro_cb {
union {
struct {
@@ -68,14 +71,11 @@ struct napi_gro_cb {
/* Free the skb? */
u8 free:2;
- /* Used in foo-over-udp, set in udp[46]_gro_receive */
- u8 is_ipv6:1;
-
/* Used in GRE, set in fou/gue_gro_receive */
u8 is_fou:1;
/* Used to determine if ipid_offset can be ignored */
- u8 ip_fixedid:1;
+ u8 ip_fixedid:2;
/* Number of gro_receive callbacks this packet already went through */
u8 recursion_counter:4;
@@ -442,29 +442,25 @@ static inline __wsum ip6_gro_compute_pseudo(const struct sk_buff *skb,
}
static inline int inet_gro_flush(const struct iphdr *iph, const struct iphdr *iph2,
- struct sk_buff *p, bool outer)
+ struct sk_buff *p, bool inner)
{
const u32 id = ntohl(*(__be32 *)&iph->id);
const u32 id2 = ntohl(*(__be32 *)&iph2->id);
const u16 ipid_offset = (id >> 16) - (id2 >> 16);
const u16 count = NAPI_GRO_CB(p)->count;
- const u32 df = id & IP_DF;
- int flush;
/* All fields must match except length and checksum. */
- flush = (iph->ttl ^ iph2->ttl) | (iph->tos ^ iph2->tos) | (df ^ (id2 & IP_DF));
-
- if (flush | (outer && df))
- return flush;
+ if ((iph->ttl ^ iph2->ttl) | (iph->tos ^ iph2->tos) | ((id ^ id2) & IP_DF))
+ return true;
/* When we receive our second frame we can make a decision on if we
* continue this flow as an atomic flow with a fixed ID or if we use
* an incrementing ID.
*/
- if (count == 1 && df && !ipid_offset)
- NAPI_GRO_CB(p)->ip_fixedid = true;
+ if (count == 1 && !ipid_offset)
+ NAPI_GRO_CB(p)->ip_fixedid |= 1 << inner;
- return ipid_offset ^ (count * !NAPI_GRO_CB(p)->ip_fixedid);
+ return ipid_offset ^ (count * !(NAPI_GRO_CB(p)->ip_fixedid & (1 << inner)));
}
static inline int ipv6_gro_flush(const struct ipv6hdr *iph, const struct ipv6hdr *iph2)
@@ -479,7 +475,7 @@ static inline int ipv6_gro_flush(const struct ipv6hdr *iph, const struct ipv6hdr
static inline int __gro_receive_network_flush(const void *th, const void *th2,
struct sk_buff *p, const u16 diff,
- bool outer)
+ bool inner)
{
const void *nh = th - diff;
const void *nh2 = th2 - diff;
@@ -487,47 +483,70 @@ static inline int __gro_receive_network_flush(const void *th, const void *th2,
if (((struct iphdr *)nh)->version == 6)
return ipv6_gro_flush(nh, nh2);
else
- return inet_gro_flush(nh, nh2, p, outer);
+ return inet_gro_flush(nh, nh2, p, inner);
}
static inline int gro_receive_network_flush(const void *th, const void *th2,
struct sk_buff *p)
{
- const bool encap_mark = NAPI_GRO_CB(p)->encap_mark;
int off = skb_transport_offset(p);
int flush;
- flush = __gro_receive_network_flush(th, th2, p, off - NAPI_GRO_CB(p)->network_offset, encap_mark);
- if (encap_mark)
- flush |= __gro_receive_network_flush(th, th2, p, off - NAPI_GRO_CB(p)->inner_network_offset, false);
+ flush = __gro_receive_network_flush(th, th2, p, off - NAPI_GRO_CB(p)->network_offset, false);
+ if (NAPI_GRO_CB(p)->encap_mark)
+ flush |= __gro_receive_network_flush(th, th2, p, off - NAPI_GRO_CB(p)->inner_network_offset, true);
return flush;
}
int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
+void __gro_flush(struct gro_node *gro, bool flush_old);
+
+static inline void gro_flush(struct gro_node *gro, bool flush_old)
+{
+ if (!gro->bitmask)
+ return;
+
+ __gro_flush(gro, flush_old);
+}
+
+static inline void napi_gro_flush(struct napi_struct *napi, bool flush_old)
+{
+ gro_flush(&napi->gro, flush_old);
+}
/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
-static inline void gro_normal_list(struct napi_struct *napi)
+static inline void gro_normal_list(struct gro_node *gro)
{
- if (!napi->rx_count)
+ if (!gro->rx_count)
return;
- netif_receive_skb_list_internal(&napi->rx_list);
- INIT_LIST_HEAD(&napi->rx_list);
- napi->rx_count = 0;
+ netif_receive_skb_list_internal(&gro->rx_list);
+ INIT_LIST_HEAD(&gro->rx_list);
+ gro->rx_count = 0;
+}
+
+static inline void gro_flush_normal(struct gro_node *gro, bool flush_old)
+{
+ gro_flush(gro, flush_old);
+ gro_normal_list(gro);
}
/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
* pass the whole batch up to the stack.
*/
-static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
+static inline void gro_normal_one(struct gro_node *gro, struct sk_buff *skb,
+ int segs)
{
- list_add_tail(&skb->list, &napi->rx_list);
- napi->rx_count += segs;
- if (napi->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch))
- gro_normal_list(napi);
+ list_add_tail(&skb->list, &gro->rx_list);
+ gro->rx_count += segs;
+ if (gro->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch))
+ gro_normal_list(gro);
}
+void gro_init(struct gro_node *gro);
+void gro_cleanup(struct gro_node *gro);
+
/* This function is the alternative of 'inet_iif' and 'inet_sdif'
* functions in case we can not rely on fields of IPCB.
*
@@ -574,4 +593,31 @@ static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *
struct packet_offload *gro_find_receive_by_type(__be16 type);
struct packet_offload *gro_find_complete_by_type(__be16 type);
+static inline struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb)
+{
+ unsigned int thlen, hlen, off;
+ struct tcphdr *th;
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*th);
+ th = skb_gro_header(skb, hlen, off);
+ if (unlikely(!th))
+ return NULL;
+
+ thlen = th->doff * 4;
+ if (unlikely(thlen < sizeof(*th)))
+ return NULL;
+
+ hlen = off + thlen;
+ if (!skb_gro_may_pull(skb, hlen)) {
+ th = skb_gro_header_slow(skb, hlen, off);
+ if (unlikely(!th))
+ return NULL;
+ }
+
+ skb_gro_pull(skb, thlen);
+
+ return th;
+}
+
#endif /* _NET_GRO_H */
diff --git a/include/net/hotdata.h b/include/net/hotdata.h
index 30e9570beb2a..4acec191c54a 100644
--- a/include/net/hotdata.h
+++ b/include/net/hotdata.h
@@ -2,10 +2,16 @@
#ifndef _NET_HOTDATA_H
#define _NET_HOTDATA_H
+#include <linux/llist.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <net/protocol.h>
+struct skb_defer_node {
+ struct llist_head defer_list;
+ atomic_long_t defer_count;
+} ____cacheline_aligned_in_smp;
+
/* Read mostly data used in network fast paths. */
struct net_hotdata {
#if IS_ENABLED(CONFIG_INET)
@@ -23,7 +29,6 @@ struct net_hotdata {
struct net_offload udpv6_offload;
#endif
struct list_head offload_base;
- struct list_head ptype_all;
struct kmem_cache *skbuff_cache;
struct kmem_cache *skbuff_fclone_cache;
struct kmem_cache *skb_small_head_cache;
@@ -31,6 +36,7 @@ struct net_hotdata {
struct rps_sock_flow_table __rcu *rps_sock_flow_table;
u32 rps_cpu_mask;
#endif
+ struct skb_defer_node __percpu *skb_defer_nodes;
int gro_normal_batch;
int netdev_budget;
int netdev_budget_usecs;
diff --git a/include/net/hwbm.h b/include/net/hwbm.h
index aa495decec35..bdbe91c609ff 100644
--- a/include/net/hwbm.h
+++ b/include/net/hwbm.h
@@ -11,9 +11,9 @@ struct hwbm_pool {
int frag_size;
/* Number of buffers currently used by this pool */
int buf_num;
- /* constructor called during alocation */
+ /* constructor called during allocation */
int (*construct)(struct hwbm_pool *bm_pool, void *buf);
- /* protect acces to the buffer counter*/
+ /* protect access to the buffer counter*/
struct mutex buf_lock;
/* private data */
void *priv;
diff --git a/include/net/icmp.h b/include/net/icmp.h
index caddf4a59ad1..935ee13d9ae9 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -37,10 +37,10 @@ struct sk_buff;
struct net;
void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
- const struct ip_options *opt);
+ const struct inet_skb_parm *parm);
static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
{
- __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
+ __icmp_send(skb_in, type, code, info, IPCB(skb_in));
}
#if IS_ENABLED(CONFIG_NF_NAT)
@@ -48,8 +48,10 @@ void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info);
#else
static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
{
- struct ip_options opts = { 0 };
- __icmp_send(skb_in, type, code, info, &opts);
+ struct inet_skb_parm parm;
+
+ memset(&parm, 0, sizeof(parm));
+ __icmp_send(skb_in, type, code, info, &parm);
}
#endif
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 925bac726a92..c60867e7e43c 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2017 Intel Deutschland GmbH
- * Copyright (c) 2018-2019, 2021-2022 Intel Corporation
+ * Copyright (c) 2018-2019, 2021-2022, 2025 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -18,31 +18,33 @@
#define __RADIOTAP_H
#include <linux/kernel.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/**
* struct ieee80211_radiotap_header - base radiotap header
*/
struct ieee80211_radiotap_header {
- /**
- * @it_version: radiotap version, always 0
- */
- uint8_t it_version;
-
- /**
- * @it_pad: padding (or alignment)
- */
- uint8_t it_pad;
-
- /**
- * @it_len: overall radiotap header length
- */
- __le16 it_len;
-
- /**
- * @it_present: (first) present word
- */
- __le32 it_present;
+ __struct_group(ieee80211_radiotap_header_fixed, hdr, __packed,
+ /**
+ * @it_version: radiotap version, always 0
+ */
+ uint8_t it_version;
+
+ /**
+ * @it_pad: padding (or alignment)
+ */
+ uint8_t it_pad;
+
+ /**
+ * @it_len: overall radiotap header length
+ */
+ __le16 it_len;
+
+ /**
+ * @it_present: (first) present word
+ */
+ __le32 it_present;
+ );
/**
* @it_optional: all remaining presence bitmaps
@@ -50,6 +52,9 @@ struct ieee80211_radiotap_header {
__le32 it_optional[];
} __packed;
+static_assert(offsetof(struct ieee80211_radiotap_header, it_optional) == sizeof(struct ieee80211_radiotap_header_fixed),
+ "struct member likely outside of __struct_group()");
+
/* version is always 0 */
#define PKTHDR_RADIOTAP_VERSION 0
@@ -197,6 +202,24 @@ enum ieee80211_radiotap_vht_coding {
IEEE80211_RADIOTAP_CODING_LDPC_USER3 = 0x08,
};
+enum ieee80211_radiotap_vht_bandwidth {
+ /* Note: more values are defined but can't really be used */
+ IEEE80211_RADIOTAP_VHT_BW_20 = 0,
+ IEEE80211_RADIOTAP_VHT_BW_40 = 1,
+ IEEE80211_RADIOTAP_VHT_BW_80 = 4,
+ IEEE80211_RADIOTAP_VHT_BW_160 = 11,
+};
+
+struct ieee80211_radiotap_vht {
+ __le16 known;
+ u8 flags;
+ u8 bandwidth;
+ u8 mcs_nss[4];
+ u8 coding;
+ u8 group_id;
+ __le16 partial_aid;
+} __packed;
+
/* for IEEE80211_RADIOTAP_TIMESTAMP */
enum ieee80211_radiotap_timestamp_unit_spos {
IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MASK = 0x000F,
@@ -582,6 +605,7 @@ enum ieee80211_radiotap_eht_usig_tb {
/**
* ieee80211_get_radiotap_len - get radiotap header length
* @data: pointer to the header
+ * Return: the radiotap header length
*/
static inline u16 ieee80211_get_radiotap_len(const char *data)
{
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 025bd8d3c769..745891d2e113 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -21,8 +21,6 @@ struct sockaddr;
struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6,
const struct request_sock *req, u8 proto);
-void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
-
int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 533a7337865a..282e29237d93 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -40,8 +40,7 @@ static inline unsigned int __inet6_ehashfn(const u32 lhash,
*
* The sockhash lock must be held as a reader here.
*/
-struct sock *__inet6_lookup_established(struct net *net,
- struct inet_hashinfo *hashinfo,
+struct sock *__inet6_lookup_established(const struct net *net,
const struct in6_addr *saddr,
const __be16 sport,
const struct in6_addr *daddr,
@@ -56,7 +55,7 @@ inet6_ehashfn_t inet6_ehashfn;
INDIRECT_CALLABLE_DECLARE(inet6_ehashfn_t udp6_ehashfn);
-struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk,
+struct sock *inet6_lookup_reuseport(const struct net *net, struct sock *sk,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
__be16 sport,
@@ -64,8 +63,7 @@ struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk,
unsigned short hnum,
inet6_ehashfn_t *ehashfn);
-struct sock *inet6_lookup_listener(struct net *net,
- struct inet_hashinfo *hashinfo,
+struct sock *inet6_lookup_listener(const struct net *net,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
const __be16 sport,
@@ -73,7 +71,7 @@ struct sock *inet6_lookup_listener(struct net *net,
const unsigned short hnum,
const int dif, const int sdif);
-struct sock *inet6_lookup_run_sk_lookup(struct net *net,
+struct sock *inet6_lookup_run_sk_lookup(const struct net *net,
int protocol,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
@@ -82,8 +80,7 @@ struct sock *inet6_lookup_run_sk_lookup(struct net *net,
const u16 hnum, const int dif,
inet6_ehashfn_t *ehashfn);
-static inline struct sock *__inet6_lookup(struct net *net,
- struct inet_hashinfo *hashinfo,
+static inline struct sock *__inet6_lookup(const struct net *net,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
const __be16 sport,
@@ -92,14 +89,14 @@ static inline struct sock *__inet6_lookup(struct net *net,
const int dif, const int sdif,
bool *refcounted)
{
- struct sock *sk = __inet6_lookup_established(net, hashinfo, saddr,
- sport, daddr, hnum,
+ struct sock *sk = __inet6_lookup_established(net, saddr, sport,
+ daddr, hnum,
dif, sdif);
*refcounted = true;
if (sk)
return sk;
*refcounted = false;
- return inet6_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
+ return inet6_lookup_listener(net, skb, doff, saddr, sport,
daddr, hnum, dif, sdif);
}
@@ -143,14 +140,13 @@ struct sock *inet6_steal_sock(struct net *net, struct sk_buff *skb, int doff,
return reuse_sk;
}
-static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
- struct sk_buff *skb, int doff,
+static inline struct sock *__inet6_lookup_skb(struct sk_buff *skb, int doff,
const __be16 sport,
const __be16 dport,
int iif, int sdif,
bool *refcounted)
{
- struct net *net = dev_net(skb_dst(skb)->dev);
+ struct net *net = skb_dst_dev_net_rcu(skb);
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct sock *sk;
@@ -161,21 +157,17 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
if (sk)
return sk;
- return __inet6_lookup(net, hashinfo, skb,
- doff, &ip6h->saddr, sport,
+ return __inet6_lookup(net, skb, doff, &ip6h->saddr, sport,
&ip6h->daddr, ntohs(dport),
iif, sdif, refcounted);
}
-struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
- struct sk_buff *skb, int doff,
+struct sock *inet6_lookup(const struct net *net, struct sk_buff *skb, int doff,
const struct in6_addr *saddr, const __be16 sport,
const struct in6_addr *daddr, const __be16 dport,
const int dif);
-int inet6_hash(struct sock *sk);
-
-static inline bool inet6_match(struct net *net, const struct sock *sk,
+static inline bool inet6_match(const struct net *net, const struct sock *sk,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
const __portpair ports,
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index c17a6585d0b0..5dd2bf24449e 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -19,15 +19,14 @@ struct msghdr;
struct net;
struct page;
struct sock;
-struct sockaddr;
struct socket;
int inet_release(struct socket *sock);
-int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+int inet_stream_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
int addr_len, int flags);
-int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+int __inet_stream_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
int addr_len, int flags, int is_sendmsg);
-int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
+int inet_dgram_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
int addr_len, int flags);
int inet_accept(struct socket *sock, struct socket *newsock,
struct proto_accept_arg *arg);
@@ -42,8 +41,8 @@ int inet_shutdown(struct socket *sock, int how);
int inet_listen(struct socket *sock, int backlog);
int __inet_listen_sk(struct sock *sk, int backlog);
void inet_sock_destruct(struct sock *sk);
-int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
-int inet_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+int inet_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len);
+int inet_bind_sk(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len);
/* Don't allocate port at this moment, defer to connect. */
#define BIND_FORCE_ADDRESS_NO_PORT (1 << 0)
/* Grab and release socket lock. */
@@ -52,7 +51,7 @@ int inet_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len);
#define BIND_FROM_BPF (1 << 2)
/* Skip CAP_NET_BIND_SERVICE check. */
#define BIND_NO_CAP_NET_BIND_SERVICE (1 << 3)
-int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
+int __inet_bind(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len,
u32 flags);
int inet_getname(struct socket *sock, struct sockaddr *uaddr,
int peer);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 7d6b1254c92d..ecb362025c4e 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -44,12 +44,10 @@ struct inet_connection_sock_af_ops {
struct request_sock *req_unhash,
bool *own_req);
u16 net_header_len;
- u16 sockaddr_len;
int (*setsockopt)(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen);
int (*getsockopt)(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
- void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
void (*mtu_reduced)(struct sock *sk);
};
@@ -58,15 +56,15 @@ struct inet_connection_sock_af_ops {
* @icsk_accept_queue: FIFO of established children
* @icsk_bind_hash: Bind node
* @icsk_bind2_hash: Bind node in the bhash2 table
- * @icsk_timeout: Timeout
- * @icsk_retransmit_timer: Resend (no ack)
+ * @icsk_delack_timer: Delayed ACK timer
+ * @icsk_keepalive_timer: Keepalive timer
+ * @mptcp_tout_timer: mptcp timer
* @icsk_rto: Retransmit timeout
* @icsk_pmtu_cookie Last pmtu seen by socket
* @icsk_ca_ops Pluggable congestion control hook
* @icsk_af_ops Operations which are AF_INET{4,6} specific
* @icsk_ulp_ops Pluggable ULP control hook
* @icsk_ulp_data ULP private data
- * @icsk_clean_acked Clean acked data hook
* @icsk_ca_state: Congestion control state
* @icsk_retransmits: Number of unrecovered [RTO] timeouts
* @icsk_pending: Scheduled timer event
@@ -85,18 +83,20 @@ struct inet_connection_sock {
struct request_sock_queue icsk_accept_queue;
struct inet_bind_bucket *icsk_bind_hash;
struct inet_bind2_bucket *icsk_bind2_hash;
- unsigned long icsk_timeout;
- struct timer_list icsk_retransmit_timer;
- struct timer_list icsk_delack_timer;
+ struct timer_list icsk_delack_timer;
+ union {
+ struct timer_list icsk_keepalive_timer;
+ struct timer_list mptcp_tout_timer;
+ };
__u32 icsk_rto;
__u32 icsk_rto_min;
+ u32 icsk_rto_max;
__u32 icsk_delack_max;
__u32 icsk_pmtu_cookie;
const struct tcp_congestion_ops *icsk_ca_ops;
const struct inet_connection_sock_af_ops *icsk_af_ops;
const struct tcp_ulp_ops *icsk_ulp_ops;
void __rcu *icsk_ulp_data;
- void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
__u8 icsk_ca_state:5,
icsk_ca_initialized:1,
@@ -116,8 +116,8 @@ struct inet_connection_sock {
#define ATO_BITS 8
__u32 ato:ATO_BITS, /* Predicted tick of soft clock */
lrcv_flowlabel:20, /* last received ipv6 flowlabel */
- unused:4;
- unsigned long timeout; /* Currently scheduled timeout */
+ dst_quick_ack:1, /* cache dst RTAX_QUICKACK */
+ unused:3;
__u32 lrcvtime; /* timestamp of last received data packet */
__u16 last_seg_size; /* Size of last incoming segment */
__u16 rcv_mss; /* MSS used for delayed ACK decisions */
@@ -189,20 +189,28 @@ static inline void inet_csk_delack_init(struct sock *sk)
memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
}
-void inet_csk_delete_keepalive_timer(struct sock *sk);
-void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
+static inline unsigned long tcp_timeout_expires(const struct sock *sk)
+{
+ return READ_ONCE(sk->tcp_retransmit_timer.expires);
+}
+
+static inline unsigned long
+icsk_delack_timeout(const struct inet_connection_sock *icsk)
+{
+ return READ_ONCE(icsk->icsk_delack_timer.expires);
+}
static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) {
- icsk->icsk_pending = 0;
+ smp_store_release(&icsk->icsk_pending, 0);
#ifdef INET_CSK_CLEAR_TIMERS
- sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
+ sk_stop_timer(sk, &sk->tcp_retransmit_timer);
#endif
} else if (what == ICSK_TIME_DACK) {
- icsk->icsk_ack.pending = 0;
+ smp_store_release(&icsk->icsk_ack.pending, 0);
icsk->icsk_ack.retry = 0;
#ifdef INET_CSK_CLEAR_TIMERS
sk_stop_timer(sk, &icsk->icsk_delack_timer);
@@ -227,15 +235,15 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
when = max_when;
}
+ when += jiffies;
if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) {
- icsk->icsk_pending = what;
- icsk->icsk_timeout = jiffies + when;
- sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
+ smp_store_release(&icsk->icsk_pending, what);
+ sk_reset_timer(sk, &sk->tcp_retransmit_timer, when);
} else if (what == ICSK_TIME_DACK) {
- icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
- icsk->icsk_ack.timeout = jiffies + when;
- sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
+ smp_store_release(&icsk->icsk_ack.pending,
+ icsk->icsk_ack.pending | ICSK_ACK_TIMER);
+ sk_reset_timer(sk, &icsk->icsk_delack_timer, when);
} else {
pr_debug("inet_csk BUG: unknown timer value\n");
}
@@ -263,8 +271,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
struct request_sock *req,
struct sock *child);
-void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
- unsigned long timeout);
+bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req);
struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
struct request_sock *req,
bool own_req);
@@ -281,28 +288,14 @@ static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
{
- return inet_csk_reqsk_queue_len(sk) >= READ_ONCE(sk->sk_max_ack_backlog);
+ return inet_csk_reqsk_queue_len(sk) > READ_ONCE(sk->sk_max_ack_backlog);
}
bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
-static inline unsigned long
-reqsk_timeout(struct request_sock *req, unsigned long max_timeout)
-{
- u64 timeout = (u64)req->timeout << req->num_timeout;
-
- return (unsigned long)min_t(u64, timeout, max_timeout);
-}
-
-static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
-{
- /* The below has to be done to allow calling inet_csk_destroy_sock */
- sock_set_flag(sk, SOCK_DEAD);
- this_cpu_inc(*sk->sk_prot->orphan_count);
-}
-
void inet_csk_destroy_sock(struct sock *sk);
+void inet_csk_prepare_for_destroy_sock(struct sock *sk);
void inet_csk_prepare_forced_close(struct sock *sk);
/*
@@ -317,11 +310,10 @@ static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
int inet_csk_listen_start(struct sock *sk);
void inet_csk_listen_stop(struct sock *sk);
-void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
-
/* update the fast reuse flag when adding a socket */
-void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
- struct sock *sk);
+void inet_csk_update_fastreuse(const struct sock *sk,
+ struct inet_bind_bucket *tb,
+ struct inet_bind2_bucket *tb2);
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
diff --git a/include/net/inet_dscp.h b/include/net/inet_dscp.h
index 72f250dffada..1aa9f04ed1ab 100644
--- a/include/net/inet_dscp.h
+++ b/include/net/inet_dscp.h
@@ -39,6 +39,12 @@ typedef u8 __bitwise dscp_t;
#define INET_DSCP_MASK 0xfc
+/* A few places in the IPv4 code need to ignore the three high order bits of
+ * DSCP because of backward compatibility (as these bits used to represent the
+ * IPv4 Precedence in RFC 791's TOS field and were ignored).
+ */
+#define INET_DSCP_LEGACY_TOS_MASK ((__force dscp_t)0x1c)
+
static inline dscp_t inet_dsfield_to_dscp(__u8 dsfield)
{
return (__force dscp_t)(dsfield & INET_DSCP_MASK);
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 153960663ce4..0eccd9c3a883 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -76,7 +76,7 @@ struct frag_v6_compare_key {
* @stamp: timestamp of the last received fragment
* @len: total length of the original datagram
* @meat: length of received fragments so far
- * @mono_delivery_time: stamp has a mono delivery time (EDT)
+ * @tstamp_type: stamp has a mono delivery time (EDT)
* @flags: fragment queue flags
* @max_size: maximum received fragment size
* @fqdir: pointer to struct fqdir
@@ -97,7 +97,7 @@ struct inet_frag_queue {
ktime_t stamp;
int len;
int meat;
- u8 mono_delivery_time;
+ u8 tstamp_type;
__u8 flags;
u16 max_size;
struct fqdir *fqdir;
@@ -137,7 +137,7 @@ static inline void fqdir_pre_exit(struct fqdir *fqdir)
}
void fqdir_exit(struct fqdir *fqdir);
-void inet_frag_kill(struct inet_frag_queue *q);
+void inet_frag_kill(struct inet_frag_queue *q, int *refs);
void inet_frag_destroy(struct inet_frag_queue *q);
struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key);
@@ -145,9 +145,9 @@ struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key);
unsigned int inet_frag_rbtree_purge(struct rb_root *root,
enum skb_drop_reason reason);
-static inline void inet_frag_put(struct inet_frag_queue *q)
+static inline void inet_frag_putn(struct inet_frag_queue *q, int refs)
{
- if (refcount_dec_and_test(&q->refcnt))
+ if (refs && refcount_sub_and_test(refs, &q->refcnt))
inet_frag_destroy(q);
}
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 7f1b38458743..ac05a52d9e13 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -89,6 +89,7 @@ struct inet_bind_bucket {
bool fast_ipv6_only;
struct hlist_node node;
struct hlist_head bhash2;
+ struct rcu_head rcu;
};
struct inet_bind2_bucket {
@@ -107,6 +108,8 @@ struct inet_bind2_bucket {
struct hlist_node bhash_node;
/* List of sockets hashed to this bucket */
struct hlist_head owners;
+ signed char fastreuse;
+ signed char fastreuseport;
};
static inline struct net *ib_net(const struct inet_bind_bucket *ib)
@@ -174,14 +177,9 @@ struct inet_hashinfo {
bool pernet;
} ____cacheline_aligned_in_smp;
-static inline struct inet_hashinfo *tcp_or_dccp_get_hashinfo(const struct sock *sk)
+static inline struct inet_hashinfo *tcp_get_hashinfo(const struct sock *sk)
{
-#if IS_ENABLED(CONFIG_IP_DCCP)
- return sk->sk_prot->h.hashinfo ? :
- sock_net(sk)->ipv4.tcp_death_row.hashinfo;
-#else
return sock_net(sk)->ipv4.tcp_death_row.hashinfo;
-#endif
}
static inline struct inet_listen_hashbucket *
@@ -206,12 +204,6 @@ static inline spinlock_t *inet_ehash_lockp(
int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
-static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h)
-{
- kfree(h->lhash2);
- h->lhash2 = NULL;
-}
-
static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
{
kvfree(hashinfo->ehash_locks);
@@ -226,8 +218,7 @@ struct inet_bind_bucket *
inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
struct inet_bind_hashbucket *head,
const unsigned short snum, int l3mdev);
-void inet_bind_bucket_destroy(struct kmem_cache *cachep,
- struct inet_bind_bucket *tb);
+void inet_bind_bucket_destroy(struct inet_bind_bucket *tb);
bool inet_bind_bucket_match(const struct inet_bind_bucket *tb,
const struct net *net, unsigned short port,
@@ -300,12 +291,10 @@ int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
bool *found_dup_sk);
-int __inet_hash(struct sock *sk, struct sock *osk);
int inet_hash(struct sock *sk);
void inet_unhash(struct sock *sk);
-struct sock *__inet_lookup_listener(struct net *net,
- struct inet_hashinfo *hashinfo,
+struct sock *__inet_lookup_listener(const struct net *net,
struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
const __be32 daddr,
@@ -313,12 +302,12 @@ struct sock *__inet_lookup_listener(struct net *net,
const int dif, const int sdif);
static inline struct sock *inet_lookup_listener(struct net *net,
- struct inet_hashinfo *hashinfo,
- struct sk_buff *skb, int doff,
- __be32 saddr, __be16 sport,
- __be32 daddr, __be16 dport, int dif, int sdif)
+ struct sk_buff *skb, int doff,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, __be16 dport,
+ int dif, int sdif)
{
- return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
+ return __inet_lookup_listener(net, skb, doff, saddr, sport,
daddr, ntohs(dport), dif, sdif);
}
@@ -351,7 +340,7 @@ static inline struct sock *inet_lookup_listener(struct net *net,
((__force __u64)(__be32)(__saddr)))
#endif /* __BIG_ENDIAN */
-static inline bool inet_match(struct net *net, const struct sock *sk,
+static inline bool inet_match(const struct net *net, const struct sock *sk,
const __addrpair cookie, const __portpair ports,
int dif, int sdif)
{
@@ -368,8 +357,7 @@ static inline bool inet_match(struct net *net, const struct sock *sk,
/* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
* not check it for lookups anymore, thanks Alexey. -DaveM
*/
-struct sock *__inet_lookup_established(struct net *net,
- struct inet_hashinfo *hashinfo,
+struct sock *__inet_lookup_established(const struct net *net,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 hnum,
const int dif, const int sdif);
@@ -382,31 +370,29 @@ inet_ehashfn_t inet_ehashfn;
INDIRECT_CALLABLE_DECLARE(inet_ehashfn_t udp_ehashfn);
-struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk,
+struct sock *inet_lookup_reuseport(const struct net *net, struct sock *sk,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned short hnum,
inet_ehashfn_t *ehashfn);
-struct sock *inet_lookup_run_sk_lookup(struct net *net,
+struct sock *inet_lookup_run_sk_lookup(const struct net *net,
int protocol,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
__be32 daddr, u16 hnum, const int dif,
inet_ehashfn_t *ehashfn);
-static inline struct sock *
- inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
- const __be32 saddr, const __be16 sport,
- const __be32 daddr, const __be16 dport,
- const int dif)
+static inline struct sock *inet_lookup_established(struct net *net,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const __be16 dport,
+ const int dif)
{
- return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
+ return __inet_lookup_established(net, saddr, sport, daddr,
ntohs(dport), dif, 0);
}
static inline struct sock *__inet_lookup(struct net *net,
- struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const __be16 dport,
@@ -416,18 +402,17 @@ static inline struct sock *__inet_lookup(struct net *net,
u16 hnum = ntohs(dport);
struct sock *sk;
- sk = __inet_lookup_established(net, hashinfo, saddr, sport,
+ sk = __inet_lookup_established(net, saddr, sport,
daddr, hnum, dif, sdif);
*refcounted = true;
if (sk)
return sk;
*refcounted = false;
- return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
+ return __inet_lookup_listener(net, skb, doff, saddr,
sport, daddr, hnum, dif, sdif);
}
static inline struct sock *inet_lookup(struct net *net,
- struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const __be16 dport,
@@ -436,7 +421,7 @@ static inline struct sock *inet_lookup(struct net *net,
struct sock *sk;
bool refcounted;
- sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
+ sk = __inet_lookup(net, skb, doff, saddr, sport, daddr,
dport, dif, 0, &refcounted);
if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
@@ -484,15 +469,14 @@ struct sock *inet_steal_sock(struct net *net, struct sk_buff *skb, int doff,
return reuse_sk;
}
-static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
- struct sk_buff *skb,
+static inline struct sock *__inet_lookup_skb(struct sk_buff *skb,
int doff,
const __be16 sport,
const __be16 dport,
const int sdif,
bool *refcounted)
{
- struct net *net = dev_net(skb_dst(skb)->dev);
+ struct net *net = skb_dst_dev_net_rcu(skb);
const struct iphdr *iph = ip_hdr(skb);
struct sock *sk;
@@ -503,8 +487,7 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
if (sk)
return sk;
- return __inet_lookup(net, hashinfo, skb,
- doff, iph->saddr, sport,
+ return __inet_lookup(net, skb, doff, iph->saddr, sport,
iph->daddr, dport, inet_iif(skb), sdif,
refcounted);
}
@@ -527,9 +510,12 @@ static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk, u64 port_offset,
+ u32 hash_port0,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16,
- struct inet_timewait_sock **));
+ struct inet_timewait_sock **,
+ bool rcu_lookup,
+ u32 hash));
int inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index f9ddd47dc4f8..ac1c75975908 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -19,6 +19,7 @@
#include <linux/netdevice.h>
#include <net/flow.h>
+#include <net/inet_dscp.h>
#include <net/sock.h>
#include <net/request_sock.h>
#include <net/netns/hash.h>
@@ -150,7 +151,8 @@ static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if,
return bound_dev_if == dif || bound_dev_if == sdif;
}
-static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+static inline bool inet_sk_bound_dev_eq(const struct net *net,
+ int bound_dev_if,
int dif, int sdif)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
@@ -171,8 +173,9 @@ struct inet_cork {
u8 tx_flags;
__u8 ttl;
__s16 tos;
- char priority;
+ u32 priority;
__u16 gso_size;
+ u32 ts_opt_id;
u64 transmit_time;
u32 mark;
};
@@ -211,6 +214,7 @@ struct inet_sock {
struct sock sk;
#if IS_ENABLED(CONFIG_IPV6)
struct ipv6_pinfo *pinet6;
+ struct ipv6_fl_socklist __rcu *ipv6_fl_list;
#endif
/* Socket demultiplex comparisons on incoming packets. */
#define inet_daddr sk.__sk_common.skc_daddr
@@ -240,7 +244,8 @@ struct inet_sock {
struct inet_cork_full cork;
};
-#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
+#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
+#define IPCORK_TS_OPT_ID 2 /* ts_opt_id field is valid, overriding sk_tskey */
enum {
INET_FLAGS_PKTINFO = 0,
@@ -299,6 +304,11 @@ static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet)
return READ_ONCE(inet->inet_flags) & IP_CMSG_ALL;
}
+static inline dscp_t inet_sk_dscp(const struct inet_sock *inet)
+{
+ return inet_dsfield_to_dscp(READ_ONCE(inet->tos));
+}
+
#define inet_test_bit(nr, sk) \
test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
#define inet_set_bit(nr, sk) \
@@ -318,8 +328,10 @@ static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet)
static inline struct sock *sk_to_full_sk(struct sock *sk)
{
#ifdef CONFIG_INET
- if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
+ if (sk && READ_ONCE(sk->sk_state) == TCP_NEW_SYN_RECV)
sk = inet_reqsk(sk)->rsk_listener;
+ if (sk && READ_ONCE(sk->sk_state) == TCP_TIME_WAIT)
+ sk = NULL;
#endif
return sk;
}
@@ -328,8 +340,10 @@ static inline struct sock *sk_to_full_sk(struct sock *sk)
static inline const struct sock *sk_const_to_full_sk(const struct sock *sk)
{
#ifdef CONFIG_INET
- if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
+ if (sk && READ_ONCE(sk->sk_state) == TCP_NEW_SYN_RECV)
sk = ((const struct request_sock *)sk)->rsk_listener;
+ if (sk && READ_ONCE(sk->sk_state) == TCP_TIME_WAIT)
+ sk = NULL;
#endif
return sk;
}
@@ -341,14 +355,6 @@ static inline struct sock *skb_to_full_sk(const struct sk_buff *skb)
#define inet_sk(ptr) container_of_const(ptr, struct inet_sock, sk)
-static inline void __inet_sk_copy_descendant(struct sock *sk_to,
- const struct sock *sk_from,
- const int ancestor_size)
-{
- memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
- sk_from->sk_prot->obj_size - ancestor_size);
-}
-
int inet_sk_rebuild_header(struct sock *sk);
/**
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 2a536eea9424..63a644ff30de 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -45,6 +45,8 @@ struct inet_timewait_sock {
#define tw_node __tw_common.skc_nulls_node
#define tw_bind_node __tw_common.skc_bind_node
#define tw_refcnt __tw_common.skc_refcnt
+#define tw_tx_queue_mapping __tw_common.skc_tx_queue_mapping
+#define tw_rx_queue_mapping __tw_common.skc_rx_queue_mapping
#define tw_hash __tw_common.skc_hash
#define tw_prot __tw_common.skc_prot
#define tw_net __tw_common.skc_net
@@ -58,7 +60,7 @@ struct inet_timewait_sock {
#define tw_dr __tw_common.skc_tw_dr
__u32 tw_mark;
- volatile unsigned char tw_substate;
+ unsigned char tw_substate;
unsigned char tw_rcv_wscale;
/* Socket demultiplex comparisons on incoming packets. */
@@ -68,13 +70,26 @@ struct inet_timewait_sock {
unsigned int tw_transparent : 1,
tw_flowlabel : 20,
tw_usec_ts : 1,
- tw_pad : 2, /* 2 bits hole */
+ tw_connect_bind : 1,
+ tw_pad : 1, /* 1 bit hole */
tw_tos : 8;
u32 tw_txhash;
u32 tw_priority;
+ /**
+ * @tw_reuse_stamp: Time of entry into %TCP_TIME_WAIT state in msec.
+ */
+ u32 tw_entry_stamp;
struct timer_list tw_timer;
struct inet_bind_bucket *tw_tb;
struct inet_bind2_bucket *tw_tb2;
+#if IS_ENABLED(CONFIG_INET_PSP)
+ struct psp_assoc __rcu *psp_assoc;
+#endif
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+ struct sk_buff* (*tw_validate_xmit_skb)(struct sock *sk,
+ struct net_device *dev,
+ struct sk_buff *skb);
+#endif
};
#define tw_tclass tw_tos
@@ -93,17 +108,14 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
struct inet_timewait_death_row *dr,
const int state);
-void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
- struct inet_hashinfo *hashinfo);
+void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
+ struct sock *sk,
+ struct inet_hashinfo *hashinfo,
+ int timeo);
void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
bool rearm);
-static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
-{
- __inet_twsk_schedule(tw, timeo, false);
-}
-
static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
{
__inet_twsk_schedule(tw, timeo, true);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 74ff688568a0..f475757daafb 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -96,30 +96,28 @@ static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
/* can be called with or without local BH being disabled */
struct inet_peer *inet_getpeer(struct inet_peer_base *base,
- const struct inetpeer_addr *daddr,
- int create);
+ const struct inetpeer_addr *daddr);
static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
__be32 v4daddr,
- int vif, int create)
+ int vif)
{
struct inetpeer_addr daddr;
daddr.a4.addr = v4daddr;
daddr.a4.vif = vif;
daddr.family = AF_INET;
- return inet_getpeer(base, &daddr, create);
+ return inet_getpeer(base, &daddr);
}
static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
- const struct in6_addr *v6daddr,
- int create)
+ const struct in6_addr *v6daddr)
{
struct inetpeer_addr daddr;
daddr.a6 = *v6daddr;
daddr.family = AF_INET6;
- return inet_getpeer(base, &daddr, create);
+ return inet_getpeer(base, &daddr);
}
static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
diff --git a/include/net/ip.h b/include/net/ip.h
index 6d735e00d3f3..69d5cef46004 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -33,6 +33,7 @@
#include <net/flow_dissector.h>
#include <net/netns/hash.h>
#include <net/lwtunnel.h>
+#include <net/inet_dscp.h>
#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
#define IPV4_MIN_MTU 68 /* RFC 791 */
@@ -58,6 +59,7 @@ struct inet_skb_parm {
#define IPSKB_L3SLAVE BIT(7)
#define IPSKB_NOPOLICY BIT(8)
#define IPSKB_MULTIPATH BIT(9)
+#define IPSKB_MCROUTE BIT(10)
u16 frag_max_size;
};
@@ -80,7 +82,6 @@ struct ipcm_cookie {
__u8 protocol;
__u8 ttl;
__s16 tos;
- char priority;
__u16 gso_size;
};
@@ -92,10 +93,12 @@ static inline void ipcm_init(struct ipcm_cookie *ipcm)
static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
const struct inet_sock *inet)
{
- ipcm_init(ipcm);
+ *ipcm = (struct ipcm_cookie) {
+ .tos = READ_ONCE(inet->tos),
+ };
+
+ sockcm_init(&ipcm->sockc, &inet->sk);
- ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark);
- ipcm->sockc.tsflags = READ_ONCE(inet->sk.sk_tsflags);
ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
ipcm->addr = inet->inet_saddr;
ipcm->protocol = inet->inet_num;
@@ -165,6 +168,7 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
int ip_local_deliver(struct sk_buff *skb);
void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
int ip_mr_input(struct sk_buff *skb);
+int ip_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
@@ -256,14 +260,9 @@ static inline u8 ip_sendmsg_scope(const struct inet_sock *inet,
return RT_SCOPE_UNIVERSE;
}
-static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
-{
- return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(READ_ONCE(inet->tos));
-}
-
/* datagram.c */
-int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
-int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+int __ip4_datagram_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len);
+int ip4_datagram_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len);
void ip4_datagram_release_cb(struct sock *sk);
@@ -285,7 +284,8 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
}
-void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
+void ip_send_unicast_reply(struct sock *sk, const struct sock *orig_sk,
+ struct sk_buff *skb,
const struct ip_options *sopt,
__be32 daddr, __be32 saddr,
const struct ip_reply_arg *arg,
@@ -326,11 +326,12 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
}
#endif
-#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
+#define snmp_get_cpu_field64_batch_cnt(buff64, stats_list, cnt, \
+ mib_statistic, offset) \
{ \
int i, c; \
for_each_possible_cpu(c) { \
- for (i = 0; stats_list[i].name; i++) \
+ for (i = 0; i < cnt; i++) \
buff64[i] += snmp_get_cpu_field64( \
mib_statistic, \
c, stats_list[i].entry, \
@@ -338,11 +339,11 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
} \
}
-#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
+#define snmp_get_cpu_field_batch_cnt(buff, stats_list, cnt, mib_statistic) \
{ \
int i, c; \
for_each_possible_cpu(c) { \
- for (i = 0; stats_list[i].name; i++) \
+ for (i = 0; i < cnt; i++) \
buff[i] += snmp_get_cpu_field( \
mib_statistic, \
c, stats_list[i].entry); \
@@ -359,7 +360,7 @@ static inline void inet_get_local_port_range(const struct net *net, int *low, in
bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high);
#ifdef CONFIG_SYSCTL
-static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
+static inline bool inet_is_local_reserved_port(const struct net *net, unsigned short port)
{
if (!net->ipv4.sysctl_local_reserved_ports)
return false;
@@ -421,6 +422,11 @@ int ip_decrease_ttl(struct iphdr *iph)
return --iph->ttl;
}
+static inline dscp_t ip4h_dscp(const struct iphdr *ip4h)
+{
+ return inet_dsfield_to_dscp(ip4h->tos);
+}
+
static inline int ip_mtu_locked(const struct dst_entry *dst)
{
const struct rtable *rt = dst_rtable(dst);
@@ -462,14 +468,19 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
bool forwarding)
{
const struct rtable *rt = dst_rtable(dst);
- struct net *net = dev_net(dst->dev);
- unsigned int mtu;
+ const struct net_device *dev;
+ unsigned int mtu, res;
+ struct net *net;
+
+ rcu_read_lock();
+ dev = dst_dev_rcu(dst);
+ net = dev_net_rcu(dev);
if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
ip_mtu_locked(dst) ||
!forwarding) {
mtu = rt->rt_pmtu;
- if (mtu && time_before(jiffies, rt->dst.expires))
+ if (mtu && time_before(jiffies, READ_ONCE(rt->dst.expires)))
goto out;
}
@@ -478,7 +489,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
if (mtu)
goto out;
- mtu = READ_ONCE(dst->dev->mtu);
+ mtu = READ_ONCE(dev->mtu);
if (unlikely(ip_mtu_locked(dst))) {
if (rt->rt_uses_gateway && mtu > 576)
@@ -488,26 +499,30 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
out:
mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
- return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
+ res = mtu - lwtunnel_headroom(dst->lwtstate, mtu);
+
+ rcu_read_unlock();
+
+ return res;
}
static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
const struct sk_buff *skb)
{
+ const struct dst_entry *dst = skb_dst(skb);
unsigned int mtu;
if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
- return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
+ return ip_dst_mtu_maybe_forward(dst, forwarding);
}
- mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
- return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu);
+ mtu = min(READ_ONCE(dst_dev(dst)->mtu), IP_MAX_MTU);
+ return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
}
-struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
- int fc_mx_len,
+struct dst_metrics *ip_fib_metrics_init(struct nlattr *fc_mx, int fc_mx_len,
struct netlink_ext_ack *extack);
static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
{
@@ -677,11 +692,24 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
#endif
+#if IS_MODULE(CONFIG_IPV6)
+#define EXPORT_IPV6_MOD(X) EXPORT_SYMBOL(X)
+#define EXPORT_IPV6_MOD_GPL(X) EXPORT_SYMBOL_GPL(X)
+#else
+#define EXPORT_IPV6_MOD(X)
+#define EXPORT_IPV6_MOD_GPL(X)
+#endif
+
static inline unsigned int ipv4_addr_hash(__be32 ip)
{
return (__force unsigned int) ip;
}
+static inline u32 __ipv4_addr_hash(const __be32 ip, const u32 initval)
+{
+ return jhash_1word((__force u32)ip, initval);
+}
+
static inline u32 ipv4_portaddr_hash(const struct net *net,
__be32 saddr,
unsigned int port)
@@ -795,9 +823,8 @@ static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
}
-bool icmp_global_allow(void);
-extern int sysctl_icmp_msgs_per_sec;
-extern int sysctl_icmp_msgs_burst;
+bool icmp_global_allow(struct net *net);
+void icmp_global_consume(struct net *net);
#ifdef CONFIG_PROC_FS
int ip_misc_proc_init(void);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 6cb867ce4878..88b0dd4d8e09 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -198,6 +198,7 @@ struct fib6_info {
fib6_destroying:1,
unused:4;
+ struct list_head purge_link;
struct rcu_head rcu;
struct nexthop *nh;
struct fib6_nh fib6_nh[];
@@ -394,7 +395,7 @@ struct fib6_table {
struct fib6_node tb6_root;
struct inet_peer_base tb6_peers;
unsigned int flags;
- unsigned int fib_seq;
+ unsigned int fib_seq; /* writes protected by rtnl_mutex */
struct hlist_head tb6_gc_hlist; /* GC candidates */
#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
};
@@ -563,7 +564,7 @@ int call_fib6_notifiers(struct net *net, enum fib_event_type event_type,
int __net_init fib6_notifier_init(struct net *net);
void __net_exit fib6_notifier_exit(struct net *net);
-unsigned int fib6_tables_seq_read(struct net *net);
+unsigned int fib6_tables_seq_read(const struct net *net);
int fib6_tables_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
@@ -632,7 +633,7 @@ void fib6_rules_cleanup(void);
bool fib6_rule_default(const struct fib_rule *rule);
int fib6_rules_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
-unsigned int fib6_rules_seq_read(struct net *net);
+unsigned int fib6_rules_seq_read(const struct net *net);
static inline bool fib6_rules_early_flow_dissect(struct net *net,
struct sk_buff *skb,
@@ -676,7 +677,7 @@ static inline int fib6_rules_dump(struct net *net, struct notifier_block *nb,
{
return 0;
}
-static inline unsigned int fib6_rules_seq_read(struct net *net)
+static inline unsigned int fib6_rules_seq_read(const struct net *net)
{
return 0;
}
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index a18ed24fed94..7c5512baa4b2 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -127,18 +127,26 @@ void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args,
static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i,
const struct in6_addr *daddr,
- unsigned int prefs,
+ unsigned int prefs, int l3mdev_index,
struct in6_addr *saddr)
{
+ struct net_device *l3mdev;
+ struct net_device *dev;
+ bool same_vrf;
int err = 0;
- if (f6i && f6i->fib6_prefsrc.plen) {
+ rcu_read_lock();
+
+ l3mdev = dev_get_by_index_rcu(net, l3mdev_index);
+ if (!f6i || !f6i->fib6_prefsrc.plen || l3mdev)
+ dev = f6i ? fib6_info_nh_dev(f6i) : NULL;
+ same_vrf = !l3mdev || l3mdev_master_dev_rcu(dev) == l3mdev;
+ if (f6i && f6i->fib6_prefsrc.plen && same_vrf)
*saddr = f6i->fib6_prefsrc.addr;
- } else {
- struct net_device *dev = f6i ? fib6_info_nh_dev(f6i) : NULL;
+ else
+ err = ipv6_dev_get_saddr(net, same_vrf ? dev : l3mdev, daddr, prefs, saddr);
- err = ipv6_dev_get_saddr(net, dev, daddr, prefs, saddr);
- }
+ rcu_read_unlock();
return err;
}
@@ -221,16 +229,16 @@ static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
* Store a destination cache entry in a socket
*/
static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
- const struct in6_addr *daddr,
- const struct in6_addr *saddr)
+ bool daddr_set,
+ bool saddr_set)
{
struct ipv6_pinfo *np = inet6_sk(sk);
np->dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
sk_setup_caps(sk, dst);
- np->daddr_cache = daddr;
+ np->daddr_cache = daddr_set;
#ifdef CONFIG_IPV6_SUBTREES
- np->saddr_cache = saddr;
+ np->saddr_cache = saddr_set;
#endif
}
@@ -266,7 +274,7 @@ static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb)
unsigned int mtu;
if (np && READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE) {
- mtu = READ_ONCE(dst->dev->mtu);
+ mtu = READ_ONCE(dst_dev(dst)->mtu);
mtu -= lwtunnel_headroom(dst->lwtstate, mtu);
} else {
mtu = dst_mtu(dst);
@@ -329,7 +337,7 @@ static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst
mtu = IPV6_MIN_MTU;
rcu_read_lock();
- idev = __in6_dev_get(dst->dev);
+ idev = __in6_dev_get(dst_dev_rcu(dst));
if (idev)
mtu = READ_ONCE(idev->cnf.mtu6);
rcu_read_unlock();
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 399592405c72..120db2865811 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -152,13 +152,14 @@ int ip6_tnl_get_iflink(const struct net_device *dev);
int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
- struct net_device *dev)
+ struct net_device *dev, u16 ip6cb_flags)
{
int pkt_len, err;
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+ IP6CB(skb)->flags = ip6cb_flags;
pkt_len = skb->len - skb_inner_network_offset(skb);
- err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
+ err = ip6_local_out(skb_dst_dev_net(skb), sk, skb);
if (dev) {
if (unlikely(net_xmit_eval(err)))
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 9b2f69ba5e49..b4495c38e0a0 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -22,6 +22,8 @@
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/refcount.h>
+#include <linux/ip.h>
+#include <linux/in_route.h>
struct fib_config {
u8 fc_dst_len;
@@ -160,6 +162,8 @@ struct fib_info {
struct fib_nh fib_nh[] __counted_by(fib_nhs);
};
+int __net_init fib4_semantics_init(struct net *net);
+void __net_exit fib4_semantics_exit(struct net *net);
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rule;
@@ -173,6 +177,7 @@ struct fib_result {
unsigned char type;
unsigned char scope;
u32 tclassid;
+ dscp_t dscp;
struct fib_nh_common *nhc;
struct fib_info *fi;
struct fib_table *table;
@@ -344,7 +349,7 @@ static inline int fib4_rules_dump(struct net *net, struct notifier_block *nb,
return 0;
}
-static inline unsigned int fib4_rules_seq_read(struct net *net)
+static inline unsigned int fib4_rules_seq_read(const struct net *net)
{
return 0;
}
@@ -408,7 +413,7 @@ static inline bool fib4_has_custom_rules(const struct net *net)
bool fib4_rule_default(const struct fib_rule *rule);
int fib4_rules_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack);
-unsigned int fib4_rules_seq_read(struct net *net);
+unsigned int fib4_rules_seq_read(const struct net *net);
static inline bool fib4_rules_early_flow_dissect(struct net *net,
struct sk_buff *skb,
@@ -433,6 +438,11 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net,
#endif /* CONFIG_IP_MULTIPLE_TABLES */
+static inline bool fib_dscp_masked_match(dscp_t dscp, const struct flowi4 *fl4)
+{
+ return dscp == (fl4->flowi4_dscp & INET_DSCP_LEGACY_TOS_MASK);
+}
+
/* Exported by fib_frontend.c */
extern const struct nla_policy rtm_ipv4_policy[];
void ip_fib_init(void);
@@ -441,8 +451,21 @@ int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla,
__be32 fib_compute_spec_dst(struct sk_buff *skb);
bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev);
int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
- u8 tos, int oif, struct net_device *dev,
+ dscp_t dscp, int oif, struct net_device *dev,
struct in_device *idev, u32 *itag);
+
+static inline enum skb_drop_reason
+fib_validate_source_reason(struct sk_buff *skb, __be32 src, __be32 dst,
+ dscp_t dscp, int oif, struct net_device *dev,
+ struct in_device *idev, u32 *itag)
+{
+ int err = fib_validate_source(skb, src, dst, dscp, oif, dev, idev,
+ itag);
+ if (err < 0)
+ return -err;
+ return SKB_NOT_DROPPED_YET;
+}
+
#ifdef CONFIG_IP_ROUTE_CLASSID
static inline int fib_num_tclassid_users(struct net *net)
{
@@ -520,10 +543,39 @@ void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
const struct sk_buff *skb, struct flow_keys *flkeys);
+
+static void
+fib_multipath_hash_construct_key(siphash_key_t *key, u32 mp_seed)
+{
+ u64 mp_seed_64 = mp_seed;
+
+ key->key[0] = (mp_seed_64 << 32) | mp_seed_64;
+ key->key[1] = key->key[0];
+}
+
+static inline u32 fib_multipath_hash_from_keys(const struct net *net,
+ struct flow_keys *keys)
+{
+ siphash_aligned_key_t hash_key;
+ u32 mp_seed;
+
+ mp_seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).mp_seed;
+ fib_multipath_hash_construct_key(&hash_key, mp_seed);
+
+ return flow_hash_from_keys_seed(keys, &hash_key);
+}
+#else
+static inline u32 fib_multipath_hash_from_keys(const struct net *net,
+ struct flow_keys *keys)
+{
+ return flow_hash_from_keys(keys);
+}
#endif
+
int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope,
struct netlink_ext_ack *extack);
-void fib_select_multipath(struct fib_result *res, int hash);
+void fib_select_multipath(struct fib_result *res, int hash,
+ const struct flowi4 *fl4);
void fib_select_path(struct net *net, struct fib_result *res,
struct flowi4 *fl4, const struct sk_buff *skb);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 9a6a08ec7713..ecae35512b9b 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -11,7 +11,9 @@
#include <linux/bitops.h>
#include <net/dsfield.h>
+#include <net/flow.h>
#include <net/gro_cells.h>
+#include <net/inet_dscp.h>
#include <net/inet_ecn.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
@@ -95,8 +97,8 @@ struct ip_tunnel_encap {
#define ip_tunnel_info_opts(info) \
_Generic(info, \
- const struct ip_tunnel_info * : ((const void *)((info) + 1)),\
- struct ip_tunnel_info * : ((void *)((info) + 1))\
+ const struct ip_tunnel_info * : ((const void *)(info)->options),\
+ struct ip_tunnel_info * : ((void *)(info)->options)\
)
struct ip_tunnel_info {
@@ -107,6 +109,7 @@ struct ip_tunnel_info {
#endif
u8 options_len;
u8 mode;
+ u8 options[] __aligned_largest __counted_by(options_len);
};
/* 6rd prefix/relay information */
@@ -354,14 +357,14 @@ static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
memset(fl4, 0, sizeof(*fl4));
if (oif) {
- fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index_rcu(net, oif);
+ fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index(net, oif);
/* Legacy VRF/l3mdev use case */
fl4->flowi4_oif = fl4->flowi4_l3mdev ? 0 : oif;
}
fl4->daddr = daddr;
fl4->saddr = saddr;
- fl4->flowi4_tos = tos;
+ fl4->flowi4_dscp = inet_dsfield_to_dscp(tos);
fl4->flowi4_proto = proto;
fl4->fl4_gre_key = key;
fl4->flowi4_mark = mark;
@@ -376,10 +379,9 @@ struct net *ip_tunnel_get_link_net(const struct net_device *dev);
int ip_tunnel_get_iflink(const struct net_device *dev);
int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
-
-void ip_tunnel_delete_nets(struct list_head *list_net, unsigned int id,
- struct rtnl_link_ops *ops,
- struct list_head *dev_to_kill);
+void ip_tunnel_delete_net(struct net *net, unsigned int id,
+ struct rtnl_link_ops *ops,
+ struct list_head *dev_to_kill);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
@@ -406,8 +408,9 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
bool log_ecn_error);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm_kern *p, __u32 fwmark);
-int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
- struct ip_tunnel_parm_kern *p, __u32 fwmark);
+int ip_tunnel_newlink(struct net *net, struct net_device *dev,
+ struct nlattr *tb[], struct ip_tunnel_parm_kern *p,
+ __u32 fwmark);
void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
bool ip_tunnel_netlink_encap_parms(struct nlattr *data[],
@@ -439,7 +442,8 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
int ip_tunnel_encap_setup(struct ip_tunnel *t,
struct ip_tunnel_encap *ipencap);
-static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+static inline enum skb_drop_reason
+pskb_inet_may_pull_reason(struct sk_buff *skb)
{
int nhlen;
@@ -456,15 +460,22 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
nhlen = 0;
}
- return pskb_network_may_pull(skb, nhlen);
+ return pskb_network_may_pull_reason(skb, nhlen);
+}
+
+static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+{
+ return pskb_inet_may_pull_reason(skb) == SKB_NOT_DROPPED_YET;
}
/* Variant of pskb_inet_may_pull().
*/
-static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
+static inline enum skb_drop_reason
+skb_vlan_inet_prepare(struct sk_buff *skb, bool inner_proto_inherit)
{
- int nhlen = 0, maclen = ETH_HLEN;
+ int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN;
__be16 type = skb->protocol;
+ enum skb_drop_reason reason;
/* Essentially this is skb_protocol(skb, true)
* And we get MAC len.
@@ -485,11 +496,13 @@ static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
/* For ETH_P_IPV6/ETH_P_IP we make sure to pull
* a base network header in skb->head.
*/
- if (!pskb_may_pull(skb, maclen + nhlen))
- return false;
+ reason = pskb_may_pull_reason(skb, maclen + nhlen);
+ if (reason)
+ return reason;
skb_set_network_header(skb, maclen);
- return true;
+
+ return SKB_NOT_DROPPED_YET;
}
static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
@@ -572,7 +585,7 @@ static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
return 0;
}
-/* Propogate ECN bits out */
+/* Propagate ECN bits out */
static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
const struct sk_buff *skb)
{
@@ -592,12 +605,27 @@ static inline int iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, u8 proto,
- u8 tos, u8 ttl, __be16 df, bool xnet);
+ u8 tos, u8 ttl, __be16 df, bool xnet, u16 ipcb_flags);
struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
gfp_t flags);
int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst,
int headroom, bool reply);
+static inline void ip_tunnel_adj_headroom(struct net_device *dev,
+ unsigned int headroom)
+{
+ /* we must cap headroom to some upperlimit, else pskb_expand_head
+ * will overflow header offsets in skb_headers_offset_update().
+ */
+ const unsigned int max_allowed = 512;
+
+ if (headroom > max_allowed)
+ headroom = max_allowed;
+
+ if (headroom > READ_ONCE(dev->needed_headroom))
+ WRITE_ONCE(dev->needed_headroom, headroom);
+}
+
int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
static inline int iptunnel_pull_offloads(struct sk_buff *skb)
@@ -640,7 +668,7 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
static inline void ip_tunnel_info_opts_get(void *to,
const struct ip_tunnel_info *info)
{
- memcpy(to, info + 1, info->options_len);
+ memcpy(to, ip_tunnel_info_opts(info), info->options_len);
}
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index ff406ef4fd4a..29a36709e7f3 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1163,6 +1163,14 @@ static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs)
return housekeeping_cpumask(HK_TYPE_KTHREAD);
}
+static inline const struct cpumask *sysctl_est_preferred_cpulist(struct netns_ipvs *ipvs)
+{
+ if (ipvs->est_cpulist_valid)
+ return ipvs->sysctl_est_cpulist;
+ else
+ return NULL;
+}
+
static inline int sysctl_est_nice(struct netns_ipvs *ipvs)
{
return ipvs->sysctl_est_nice;
@@ -1270,6 +1278,11 @@ static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs)
return housekeeping_cpumask(HK_TYPE_KTHREAD);
}
+static inline const struct cpumask *sysctl_est_preferred_cpulist(struct netns_ipvs *ipvs)
+{
+ return NULL;
+}
+
static inline int sysctl_est_nice(struct netns_ipvs *ipvs)
{
return IPVS_EST_NICE;
diff --git a/include/net/ipcomp.h b/include/net/ipcomp.h
index 8660a2a6d1fc..51401f01e2a5 100644
--- a/include/net/ipcomp.h
+++ b/include/net/ipcomp.h
@@ -3,20 +3,9 @@
#define _NET_IPCOMP_H
#include <linux/skbuff.h>
-#include <linux/types.h>
-
-#define IPCOMP_SCRATCH_SIZE 65400
-
-struct crypto_comp;
-struct ip_comp_hdr;
-
-struct ipcomp_data {
- u16 threshold;
- struct crypto_comp * __percpu *tfms;
-};
struct ip_comp_hdr;
-struct sk_buff;
+struct netlink_ext_ack;
struct xfrm_state;
int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 88a8e554f7a1..74fbf1ad8065 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -246,17 +246,20 @@ extern int sysctl_mld_qrv;
#define _DEVADD(net, statname, mod, idev, field, val) \
({ \
struct inet6_dev *_idev = (idev); \
+ unsigned long _field = (field); \
+ unsigned long _val = (val); \
if (likely(_idev != NULL)) \
- mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \
- mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\
+ mod##SNMP_ADD_STATS((_idev)->stats.statname, _field, _val); \
+ mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, _field, _val);\
})
#define _DEVUPD(net, statname, mod, idev, field, val) \
({ \
struct inet6_dev *_idev = (idev); \
+ unsigned long _val = (val); \
if (likely(_idev != NULL)) \
- mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \
- mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\
+ mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, _val); \
+ mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, _val);\
})
/* MIBs */
@@ -363,15 +366,6 @@ struct ipcm6_cookie {
struct ipv6_txoptions *opt;
};
-static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
-{
- *ipc6 = (struct ipcm6_cookie) {
- .hlimit = -1,
- .tclass = -1,
- .dontfrag = -1,
- };
-}
-
static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
const struct sock *sk)
{
@@ -380,6 +374,8 @@ static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
.tclass = inet6_sk(sk)->tclass,
.dontfrag = inet6_test_bit(DONTFRAG, sk),
};
+
+ sockcm_init(&ipc6->sockc, sk);
}
static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
@@ -471,7 +467,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
/* This helper is specialized for BIG TCP needs.
* It assumes the hop_jumbo_hdr will immediately follow the IPV6 header.
* It assumes headers are already in skb->head.
- * Returns 0, or IPPROTO_TCP if a BIG TCP packet is there.
+ * Returns: 0, or IPPROTO_TCP if a BIG TCP packet is there.
*/
static inline int ipv6_has_hopopt_jumbo(const struct sk_buff *skb)
{
@@ -851,7 +847,7 @@ static inline int __ipv6_addr_diff32(const void *token1, const void *token2, int
* we should *never* get to this point since that
* would mean the addrs are equal
*
- * However, we do get to it 8) And exacly, when
+ * However, we do get to it 8) And exactly, when
* addresses are equal 8)
*
* ip route add 1111::/128 via ...
@@ -973,7 +969,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
hash = skb_get_hash_flowi6(skb, fl6);
/* Since this is being sent on the wire obfuscate hash a bit
- * to minimize possbility that any useful information to an
+ * to minimize possibility that any useful information to an
* attacker is leaked. Only lower 20 bits are relevant.
*/
hash = rol32(hash, 16);
@@ -1192,10 +1188,10 @@ int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
int ipv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
-int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr,
+int __ip6_datagram_connect(struct sock *sk, struct sockaddr_unsized *addr,
int addr_len);
-int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
-int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
+int ip6_datagram_connect(struct sock *sk, struct sockaddr_unsized *addr, int addr_len);
+int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr_unsized *addr,
int addr_len);
int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
void ip6_datagram_release_cb(struct sock *sk);
@@ -1212,8 +1208,8 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
void inet6_cleanup_sock(struct sock *sk);
void inet6_sock_destruct(struct sock *sk);
int inet6_release(struct socket *sock);
-int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
-int inet6_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+int inet6_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len);
+int inet6_bind_sk(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len);
int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
int peer);
int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
@@ -1365,4 +1361,16 @@ static inline void ip6_sock_set_recvpktinfo(struct sock *sk)
release_sock(sk);
}
+#define IPV6_ADDR_WORDS 4
+
+static inline void ipv6_addr_cpu_to_be32(__be32 *dst, const u32 *src)
+{
+ cpu_to_be32_array(dst, src, IPV6_ADDR_WORDS);
+}
+
+static inline void ipv6_addr_be32_to_cpu(u32 *dst, const __be32 *src)
+{
+ be32_to_cpu_array(dst, src, IPV6_ADDR_WORDS);
+}
+
#endif /* _NET_IPV6_H */
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
index 7321ffe3a108..38ef66826939 100644
--- a/include/net/ipv6_frag.h
+++ b/include/net/ipv6_frag.h
@@ -66,6 +66,7 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
{
struct net_device *dev = NULL;
struct sk_buff *head;
+ int refs = 1;
rcu_read_lock();
/* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */
@@ -77,7 +78,7 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
goto out;
fq->q.flags |= INET_FRAG_DROP;
- inet_frag_kill(&fq->q);
+ inet_frag_kill(&fq->q, &refs);
dev = dev_get_by_index_rcu(net, fq->iif);
if (!dev)
@@ -109,7 +110,7 @@ out:
spin_unlock(&fq->q.lock);
out_rcu_unlock:
rcu_read_unlock();
- inet_frag_put(&fq->q);
+ inet_frag_putn(&fq->q, refs);
}
/* Check if the upper layer header is truncated in the first fragment. */
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
index 485c39a89866..d3013e721b14 100644
--- a/include/net/ipv6_stubs.h
+++ b/include/net/ipv6_stubs.h
@@ -9,6 +9,7 @@
#include <net/flow.h>
#include <net/neighbour.h>
#include <net/sock.h>
+#include <net/ipv6.h>
/* structs from net/ip6_fib.h */
struct fib6_info;
@@ -72,14 +73,16 @@ struct ipv6_stub {
int (*output)(struct net *, struct sock *, struct sk_buff *));
struct net_device *(*ipv6_dev_find)(struct net *net, const struct in6_addr *addr,
struct net_device *dev);
+ int (*ip6_xmit)(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+ __u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority);
};
extern const struct ipv6_stub *ipv6_stub __read_mostly;
/* A stub used by bpf helpers. Similarly ugly as ipv6_stub */
struct ipv6_bpf_stub {
- int (*inet6_bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len,
+ int (*inet6_bind)(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len,
u32 flags);
- struct sock *(*udp6_lib_lookup)(struct net *net,
+ struct sock *(*udp6_lib_lookup)(const struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif, int sdif, struct udp_table *tbl,
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h
index 4d114e6d6d23..9804fa5d9c67 100644
--- a/include/net/iucv/iucv.h
+++ b/include/net/iucv/iucv.h
@@ -15,7 +15,7 @@
* To explore any of the IUCV functions, one must first register their
* program using iucv_register(). Once your program has successfully
* completed a register, it can exploit the other functions.
- * For furthur reference on all IUCV functionality, refer to the
+ * For further reference on all IUCV functionality, refer to the
* CP Programming Services book, also available on the web thru
* www.vm.ibm.com/pubs, manual # SC24-6084
*
@@ -202,7 +202,7 @@ struct iucv_handler {
*
* Registers a driver with IUCV.
*
- * Returns 0 on success, -ENOMEM if the memory allocation for the pathid
+ * Returns: 0 on success, -ENOMEM if the memory allocation for the pathid
* table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus.
*/
int iucv_register(struct iucv_handler *handler, int smp);
@@ -224,7 +224,7 @@ void iucv_unregister(struct iucv_handler *handle, int smp);
*
* Allocate a new path structure for use with iucv_connect.
*
- * Returns NULL if the memory allocation failed or a pointer to the
+ * Returns: NULL if the memory allocation failed or a pointer to the
* path structure.
*/
static inline struct iucv_path *iucv_path_alloc(u16 msglim, u8 flags, gfp_t gfp)
@@ -260,7 +260,7 @@ static inline void iucv_path_free(struct iucv_path *path)
* This function is issued after the user received a connection pending
* external interrupt and now wishes to complete the IUCV communication path.
*
- * Returns the result of the CP IUCV call.
+ * Returns: the result of the CP IUCV call.
*/
int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
u8 *userdata, void *private);
@@ -278,7 +278,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
* successfully, you are not able to use the path until you receive an IUCV
* Connection Complete external interrupt.
*
- * Returns the result of the CP IUCV call.
+ * Returns: the result of the CP IUCV call.
*/
int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
u8 *userid, u8 *system, u8 *userdata,
@@ -292,7 +292,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
* This function temporarily suspends incoming messages on an IUCV path.
* You can later reactivate the path by invoking the iucv_resume function.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_path_quiesce(struct iucv_path *path, u8 *userdata);
@@ -304,7 +304,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 *userdata);
* This function resumes incoming messages on an IUCV path that has
* been stopped with iucv_path_quiesce.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_path_resume(struct iucv_path *path, u8 *userdata);
@@ -315,7 +315,7 @@ int iucv_path_resume(struct iucv_path *path, u8 *userdata);
*
* This function terminates an IUCV path.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_path_sever(struct iucv_path *path, u8 *userdata);
@@ -327,7 +327,7 @@ int iucv_path_sever(struct iucv_path *path, u8 *userdata);
*
* Cancels a message you have sent.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
u32 srccls);
@@ -347,7 +347,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
*
* Locking: local_bh_enable/local_bh_disable
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
u8 flags, void *buffer, size_t size, size_t *residual);
@@ -367,7 +367,7 @@ int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
*
* Locking: no locking.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
u8 flags, void *buffer, size_t size,
@@ -382,7 +382,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
* are notified of a message and the time that you complete the message,
* the message may be rejected.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg);
@@ -399,7 +399,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg);
* pathid, msgid, and trgcls. Prmmsg signifies the data is moved into
* the parameter list.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
u8 flags, void *reply, size_t size);
@@ -419,7 +419,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
*
* Locking: local_bh_enable/local_bh_disable
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
u8 flags, u32 srccls, void *buffer, size_t size);
@@ -439,7 +439,7 @@ int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
*
* Locking: no locking.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
u8 flags, u32 srccls, void *buffer, size_t size);
@@ -461,7 +461,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
* reply to the message and a buffer is provided into which IUCV moves
* the reply to this message.
*
- * Returns the result from the CP IUCV call.
+ * Returns: the result from the CP IUCV call.
*/
int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
u8 flags, u32 srccls, void *buffer, size_t size,
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index b2cf243ebe44..b80e474cb0aa 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -23,7 +23,7 @@
* to handle wireless statistics.
*
* The initial APIs served us well and has proven a reasonably good design.
- * However, there is a few shortcommings :
+ * However, there are a few shortcomings :
* o No events, everything is a request to the driver.
* o Large ioctl function in driver with gigantic switch statement
* (i.e. spaghetti code).
@@ -38,13 +38,13 @@
* -------------------------------
* The new driver API is just a bunch of standard functions (handlers),
* each handling a specific Wireless Extension. The driver just export
- * the list of handler it supports, and those will be called apropriately.
+ * the list of handler it supports, and those will be called appropriately.
*
* I tried to keep the main advantage of the previous API (simplicity,
* efficiency and light weight), and also I provide a good dose of backward
* compatibility (most structures are the same, driver can use both API
* simultaneously, ...).
- * Hopefully, I've also addressed the shortcomming of the initial API.
+ * Hopefully, I've also addressed the shortcoming of the initial API.
*
* The advantage of the new API are :
* o Handling of Extensions in driver broken in small contained functions
@@ -84,7 +84,7 @@
/* ---------------------- THE IMPLEMENTATION ---------------------- */
/*
- * Some of the choice I've made are pretty controversials. Defining an
+ * Some of the choice I've made are pretty controversial. Defining an
* API is very much weighting compromises. This goes into some of the
* details and the thinking behind the implementation.
*
@@ -140,7 +140,7 @@
* example to distinguish setting max rate and basic rate), I would
* break the prototype. Using iwreq_data is more flexible.
* 3) Also, the above form is not generic (see above).
- * 4) I don't expect driver developper using the wrong field of the
+ * 4) I don't expect driver developer using the wrong field of the
* union (Doh !), so static typechecking doesn't add much value.
* 5) Lastly, you can skip the union by doing :
* static int mydriver_ioctl_setrate(struct net_device *dev,
@@ -279,8 +279,6 @@
#define IW_DESCR_FLAG_RESTRICT 0x0004 /* GET : request is ROOT only */
/* SET : Omit payload from generated iwevent */
#define IW_DESCR_FLAG_NOMAX 0x0008 /* GET : no limit on request size */
-/* Driver level flags */
-#define IW_DESCR_FLAG_WAIT 0x0100 /* Wait for driver event */
/****************************** TYPES ******************************/
@@ -373,11 +371,10 @@ struct iw_handler_def {
*/
struct iw_ioctl_description {
__u8 header_type; /* NULL, iw_point or other */
- __u8 token_type; /* Future */
+ __u8 flags; /* Special handling of the request */
__u16 token_size; /* Granularity of payload */
__u16 min_tokens; /* Min acceptable token number */
__u16 max_tokens; /* Max acceptable token number */
- __u32 flags; /* Special handling of the request */
};
/* Need to think of short header translation table. Later. */
@@ -404,26 +401,6 @@ struct iw_spy_data {
u_char spy_thr_under[IW_MAX_SPY];
};
-/* --------------------- DEVICE WIRELESS DATA --------------------- */
-/*
- * This is all the wireless data specific to a device instance that
- * is managed by the core of Wireless Extensions or the 802.11 layer.
- * We only keep pointer to those structures, so that a driver is free
- * to share them between instances.
- * This structure should be initialised before registering the device.
- * Access to this data follow the same rules as any other struct net_device
- * data (i.e. valid as long as struct net_device exist, same locking rules).
- */
-/* Forward declaration */
-struct libipw_device;
-/* The struct */
-struct iw_public_data {
- /* Driver enhanced spy support */
- struct iw_spy_data * spy_data;
- /* Legacy structure managed by the ipw2x00-specific IEEE 802.11 layer */
- struct libipw_device * libipw;
-};
-
/**************************** PROTOTYPES ****************************/
/*
* Functions part of the Wireless Extensions (defined in net/wireless/wext-core.c).
@@ -443,23 +420,7 @@ static inline void wireless_nlevent_flush(void) {}
/* We may need a function to send a stream of events to user space.
* More on that later... */
-/* Standard handler for SIOCSIWSPY */
-int iw_handler_set_spy(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-/* Standard handler for SIOCGIWSPY */
-int iw_handler_get_spy(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-/* Standard handler for SIOCSIWTHRSPY */
-int iw_handler_set_thrspy(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-/* Standard handler for SIOCGIWTHRSPY */
-int iw_handler_get_thrspy(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-/* Driver call to update spy records */
-void wireless_spy_update(struct net_device *dev, unsigned char *address,
- struct iw_quality *wstats);
-
-/************************* INLINE FUNTIONS *************************/
+/************************* INLINE FUNCTIONS *************************/
/*
* Function that are so simple that it's more efficient inlining them
*/
diff --git a/include/net/kcm.h b/include/net/kcm.h
index 90279e5e09a5..d9c35e71ecea 100644
--- a/include/net/kcm.h
+++ b/include/net/kcm.h
@@ -70,7 +70,7 @@ struct kcm_sock {
struct work_struct tx_work;
struct list_head wait_psock_list;
struct sk_buff *seq_skb;
- u32 tx_stopped : 1;
+ struct mutex tx_mutex;
/* Don't use bit fields here, these are set under different locks */
bool tx_wait;
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index 031c661aa14d..1eb8dad18f7e 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -59,6 +59,20 @@ int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net,
int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
struct fib_lookup_arg *arg);
+static inline
+bool l3mdev_fib_rule_iif_match(const struct flowi *fl, int iifindex)
+{
+ return !(fl->flowi_flags & FLOWI_FLAG_L3MDEV_OIF) &&
+ fl->flowi_l3mdev == iifindex;
+}
+
+static inline
+bool l3mdev_fib_rule_oif_match(const struct flowi *fl, int oifindex)
+{
+ return fl->flowi_flags & FLOWI_FLAG_L3MDEV_OIF &&
+ fl->flowi_l3mdev == oifindex;
+}
+
void l3mdev_update_flow(struct net *net, struct flowi *fl);
int l3mdev_master_ifindex_rcu(const struct net_device *dev);
@@ -78,7 +92,7 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
struct net_device *dev;
int rc = 0;
- if (likely(ifindex)) {
+ if (ifindex) {
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifindex);
@@ -198,10 +212,12 @@ struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto)
if (netif_is_l3_slave(dev)) {
struct net_device *master;
+ rcu_read_lock();
master = netdev_master_upper_dev_get_rcu(dev);
if (master && master->l3mdev_ops->l3mdev_l3_out)
skb = master->l3mdev_ops->l3mdev_l3_out(master, sk,
skb, proto);
+ rcu_read_unlock();
}
return skb;
@@ -325,6 +341,19 @@ int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
{
return 1;
}
+
+static inline
+bool l3mdev_fib_rule_iif_match(const struct flowi *fl, int iifindex)
+{
+ return false;
+}
+
+static inline
+bool l3mdev_fib_rule_oif_match(const struct flowi *fl, int oifindex)
+{
+ return false;
+}
+
static inline
void l3mdev_update_flow(struct net *net, struct flowi *fl)
{
diff --git a/include/net/lapb.h b/include/net/lapb.h
index 124ee122f2c8..6c07420644e4 100644
--- a/include/net/lapb.h
+++ b/include/net/lapb.h
@@ -4,7 +4,7 @@
#include <linux/lapb.h>
#include <linux/refcount.h>
-#define LAPB_HEADER_LEN 20 /* LAPB over Ethernet + a bit more */
+#define LAPB_HEADER_LEN MAX_HEADER /* LAPB over Ethernet + a bit more */
#define LAPB_ACK_PENDING_CONDITION 0x01
#define LAPB_REJECT_CONDITION 0x02
diff --git a/include/net/lib80211.h b/include/net/lib80211.h
deleted file mode 100644
index 8b47d3a51cf8..000000000000
--- a/include/net/lib80211.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * lib80211.h -- common bits for IEEE802.11 wireless drivers
- *
- * Copyright (c) 2008, John W. Linville <linville@tuxdriver.com>
- *
- * Some bits copied from old ieee80211 component, w/ original copyright
- * notices below:
- *
- * Original code based on Host AP (software wireless LAN access point) driver
- * for Intersil Prism2/2.5/3.
- *
- * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
- * <j@w1.fi>
- * Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi>
- *
- * Adaption to a generic IEEE 802.11 stack by James Ketrenos
- * <jketreno@linux.intel.com>
- *
- * Copyright (c) 2004, Intel Corporation
- *
- */
-
-#ifndef LIB80211_H
-#define LIB80211_H
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/atomic.h>
-#include <linux/if.h>
-#include <linux/skbuff.h>
-#include <linux/ieee80211.h>
-#include <linux/timer.h>
-#include <linux/seq_file.h>
-
-#define NUM_WEP_KEYS 4
-
-enum {
- IEEE80211_CRYPTO_TKIP_COUNTERMEASURES = (1 << 0),
-};
-
-struct module;
-
-struct lib80211_crypto_ops {
- const char *name;
- struct list_head list;
-
- /* init new crypto context (e.g., allocate private data space,
- * select IV, etc.); returns NULL on failure or pointer to allocated
- * private data on success */
- void *(*init) (int keyidx);
-
- /* deinitialize crypto context and free allocated private data */
- void (*deinit) (void *priv);
-
- /* encrypt/decrypt return < 0 on error or >= 0 on success. The return
- * value from decrypt_mpdu is passed as the keyidx value for
- * decrypt_msdu. skb must have enough head and tail room for the
- * encryption; if not, error will be returned; these functions are
- * called for all MPDUs (i.e., fragments).
- */
- int (*encrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv);
- int (*decrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv);
-
- /* These functions are called for full MSDUs, i.e. full frames.
- * These can be NULL if full MSDU operations are not needed. */
- int (*encrypt_msdu) (struct sk_buff * skb, int hdr_len, void *priv);
- int (*decrypt_msdu) (struct sk_buff * skb, int keyidx, int hdr_len,
- void *priv);
-
- int (*set_key) (void *key, int len, u8 * seq, void *priv);
- int (*get_key) (void *key, int len, u8 * seq, void *priv);
-
- /* procfs handler for printing out key information and possible
- * statistics */
- void (*print_stats) (struct seq_file *m, void *priv);
-
- /* Crypto specific flag get/set for configuration settings */
- unsigned long (*get_flags) (void *priv);
- unsigned long (*set_flags) (unsigned long flags, void *priv);
-
- /* maximum number of bytes added by encryption; encrypt buf is
- * allocated with extra_prefix_len bytes, copy of in_buf, and
- * extra_postfix_len; encrypt need not use all this space, but
- * the result must start at the beginning of the buffer and correct
- * length must be returned */
- int extra_mpdu_prefix_len, extra_mpdu_postfix_len;
- int extra_msdu_prefix_len, extra_msdu_postfix_len;
-
- struct module *owner;
-};
-
-struct lib80211_crypt_data {
- struct list_head list; /* delayed deletion list */
- struct lib80211_crypto_ops *ops;
- void *priv;
- atomic_t refcnt;
-};
-
-struct lib80211_crypt_info {
- char *name;
- /* Most clients will already have a lock,
- so just point to that. */
- spinlock_t *lock;
-
- struct lib80211_crypt_data *crypt[NUM_WEP_KEYS];
- int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
- struct list_head crypt_deinit_list;
- struct timer_list crypt_deinit_timer;
- int crypt_quiesced;
-};
-
-int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name,
- spinlock_t *lock);
-void lib80211_crypt_info_free(struct lib80211_crypt_info *info);
-int lib80211_register_crypto_ops(struct lib80211_crypto_ops *ops);
-int lib80211_unregister_crypto_ops(struct lib80211_crypto_ops *ops);
-struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name);
-void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info,
- struct lib80211_crypt_data **crypt);
-
-#endif /* LIB80211_H */
diff --git a/include/net/libeth/cache.h b/include/net/libeth/cache.h
new file mode 100644
index 000000000000..bdb0c043ce61
--- /dev/null
+++ b/include/net/libeth/cache.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef __LIBETH_CACHE_H
+#define __LIBETH_CACHE_H
+
+#include <linux/cache.h>
+
+/**
+ * libeth_cacheline_group_assert - make sure cacheline group size is expected
+ * @type: type of the structure containing the group
+ * @grp: group name inside the struct
+ * @sz: expected group size
+ */
+#if defined(CONFIG_64BIT) && SMP_CACHE_BYTES == 64
+#define libeth_cacheline_group_assert(type, grp, sz) \
+ static_assert(offsetof(type, __cacheline_group_end__##grp) - \
+ offsetofend(type, __cacheline_group_begin__##grp) == \
+ (sz))
+#define __libeth_cacheline_struct_assert(type, sz) \
+ static_assert(sizeof(type) == (sz))
+#else /* !CONFIG_64BIT || SMP_CACHE_BYTES != 64 */
+#define libeth_cacheline_group_assert(type, grp, sz) \
+ static_assert(offsetof(type, __cacheline_group_end__##grp) - \
+ offsetofend(type, __cacheline_group_begin__##grp) <= \
+ (sz))
+#define __libeth_cacheline_struct_assert(type, sz) \
+ static_assert(sizeof(type) <= (sz))
+#endif /* !CONFIG_64BIT || SMP_CACHE_BYTES != 64 */
+
+#define __libeth_cls1(sz1) SMP_CACHE_ALIGN(sz1)
+#define __libeth_cls2(sz1, sz2) (SMP_CACHE_ALIGN(sz1) + SMP_CACHE_ALIGN(sz2))
+#define __libeth_cls3(sz1, sz2, sz3) \
+ (SMP_CACHE_ALIGN(sz1) + SMP_CACHE_ALIGN(sz2) + SMP_CACHE_ALIGN(sz3))
+#define __libeth_cls(...) \
+ CONCATENATE(__libeth_cls, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
+
+/**
+ * libeth_cacheline_struct_assert - make sure CL-based struct size is expected
+ * @type: type of the struct
+ * @...: from 1 to 3 CL group sizes (read-mostly, read-write, cold)
+ *
+ * When a struct contains several CL groups, it's difficult to predict its size
+ * on different architectures. The macro instead takes sizes of all of the
+ * groups the structure contains and generates the final struct size.
+ */
+#define libeth_cacheline_struct_assert(type, ...) \
+ __libeth_cacheline_struct_assert(type, __libeth_cls(__VA_ARGS__)); \
+ static_assert(__alignof(type) >= SMP_CACHE_BYTES)
+
+/**
+ * libeth_cacheline_set_assert - make sure CL-based struct layout is expected
+ * @type: type of the struct
+ * @ro: expected size of the read-mostly group
+ * @rw: expected size of the read-write group
+ * @c: expected size of the cold group
+ *
+ * Check that each group size is expected and then do final struct size check.
+ */
+#define libeth_cacheline_set_assert(type, ro, rw, c) \
+ libeth_cacheline_group_assert(type, read_mostly, ro); \
+ libeth_cacheline_group_assert(type, read_write, rw); \
+ libeth_cacheline_group_assert(type, cold, c); \
+ libeth_cacheline_struct_assert(type, ro, rw, c)
+
+#endif /* __LIBETH_CACHE_H */
diff --git a/include/net/libeth/rx.h b/include/net/libeth/rx.h
index f29ea3e34c6c..5d991404845e 100644
--- a/include/net/libeth/rx.h
+++ b/include/net/libeth/rx.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (C) 2024 Intel Corporation */
+/* Copyright (C) 2024-2025 Intel Corporation */
#ifndef __LIBETH_RX_H
#define __LIBETH_RX_H
@@ -13,10 +13,14 @@
/* Space reserved in front of each frame */
#define LIBETH_SKB_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+#define LIBETH_XDP_HEADROOM (ALIGN(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
+ NET_IP_ALIGN)
/* Maximum headroom for worst-case calculations */
-#define LIBETH_MAX_HEADROOM LIBETH_SKB_HEADROOM
+#define LIBETH_MAX_HEADROOM LIBETH_XDP_HEADROOM
/* Link layer / L2 overhead: Ethernet, 2 VLAN tags (C + S), FCS */
#define LIBETH_RX_LL_LEN (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN)
+/* Maximum supported L2-L4 header length */
+#define LIBETH_MAX_HEAD roundup_pow_of_two(max(MAX_HEADER, 256))
/* Always use order-0 pages */
#define LIBETH_RX_PAGE_ORDER 0
@@ -29,7 +33,7 @@
/**
* struct libeth_fqe - structure representing an Rx buffer (fill queue element)
- * @page: page holding the buffer
+ * @netmem: network memory reference holding the buffer
* @offset: offset from the page start (to the headroom)
* @truesize: total space occupied by the buffer (w/ headroom and tailroom)
*
@@ -38,18 +42,33 @@
* former, @offset is always 0 and @truesize is always ```PAGE_SIZE```.
*/
struct libeth_fqe {
- struct page *page;
+ netmem_ref netmem;
u32 offset;
u32 truesize;
} __aligned_largest;
/**
+ * enum libeth_fqe_type - enum representing types of Rx buffers
+ * @LIBETH_FQE_MTU: buffer size is determined by MTU
+ * @LIBETH_FQE_SHORT: buffer size is smaller than MTU, for short frames
+ * @LIBETH_FQE_HDR: buffer size is ```LIBETH_MAX_HEAD```-sized, for headers
+ */
+enum libeth_fqe_type {
+ LIBETH_FQE_MTU = 0U,
+ LIBETH_FQE_SHORT,
+ LIBETH_FQE_HDR,
+};
+
+/**
* struct libeth_fq - structure representing a buffer (fill) queue
* @fp: hotpath part of the structure
* @pp: &page_pool for buffer management
* @fqes: array of Rx buffers
* @truesize: size to allocate per buffer, w/overhead
* @count: number of descriptors/buffers the queue has
+ * @type: type of the buffers this queue has
+ * @hsplit: flag whether header split is enabled
+ * @xdp: flag indicating whether XDP is enabled
* @buf_len: HW-writeable length per each buffer
* @nid: ID of the closest NUMA node with memory
*/
@@ -63,6 +82,10 @@ struct libeth_fq {
);
/* Cold fields */
+ enum libeth_fqe_type type:2;
+ bool hsplit:1;
+ bool xdp:1;
+
u32 buf_len;
int nid;
};
@@ -83,15 +106,16 @@ static inline dma_addr_t libeth_rx_alloc(const struct libeth_fq_fp *fq, u32 i)
struct libeth_fqe *buf = &fq->fqes[i];
buf->truesize = fq->truesize;
- buf->page = page_pool_dev_alloc(fq->pp, &buf->offset, &buf->truesize);
- if (unlikely(!buf->page))
+ buf->netmem = page_pool_dev_alloc_netmem(fq->pp, &buf->offset,
+ &buf->truesize);
+ if (unlikely(!buf->netmem))
return DMA_MAPPING_ERROR;
- return page_pool_get_dma_addr(buf->page) + buf->offset +
+ return page_pool_get_dma_addr_netmem(buf->netmem) + buf->offset +
fq->pp->p.offset;
}
-void libeth_rx_recycle_slow(struct page *page);
+void libeth_rx_recycle_slow(netmem_ref netmem);
/**
* libeth_rx_sync_for_cpu - synchronize or recycle buffer post DMA
@@ -107,18 +131,19 @@ void libeth_rx_recycle_slow(struct page *page);
static inline bool libeth_rx_sync_for_cpu(const struct libeth_fqe *fqe,
u32 len)
{
- struct page *page = fqe->page;
+ netmem_ref netmem = fqe->netmem;
/* Very rare, but possible case. The most common reason:
* the last fragment contained FCS only, which was then
* stripped by the HW.
*/
if (unlikely(!len)) {
- libeth_rx_recycle_slow(page);
+ libeth_rx_recycle_slow(netmem);
return false;
}
- page_pool_dma_sync_for_cpu(page->pp, page, fqe->offset, len);
+ page_pool_dma_sync_netmem_for_cpu(netmem_get_pp(netmem), netmem,
+ fqe->offset, len);
return true;
}
@@ -179,6 +204,53 @@ struct libeth_rx_pt {
enum xdp_rss_hash_type hash_type:16;
};
+/**
+ * struct libeth_rx_csum - checksum offload bits decoded from the Rx descriptor
+ * @l3l4p: detectable L3 and L4 integrity check is processed by the hardware
+ * @ipe: IP checksum error
+ * @eipe: external (outermost) IP header (only for tunels)
+ * @eudpe: external (outermost) UDP checksum error (only for tunels)
+ * @ipv6exadd: IPv6 header with extension headers
+ * @l4e: L4 integrity error
+ * @pprs: set for packets that skip checksum calculation in the HW pre parser
+ * @nat: the packet is a UDP tunneled packet
+ * @raw_csum_valid: set if raw checksum is valid
+ * @pad: padding to naturally align raw_csum field
+ * @raw_csum: raw checksum
+ */
+struct libeth_rx_csum {
+ u32 l3l4p:1;
+ u32 ipe:1;
+ u32 eipe:1;
+ u32 eudpe:1;
+ u32 ipv6exadd:1;
+ u32 l4e:1;
+ u32 pprs:1;
+ u32 nat:1;
+
+ u32 raw_csum_valid:1;
+ u32 pad:7;
+ u32 raw_csum:16;
+};
+
+/**
+ * struct libeth_rqe_info - receive queue element info
+ * @len: packet length
+ * @ptype: packet type based on types programmed into the device
+ * @eop: whether it's the last fragment of the packet
+ * @rxe: MAC errors: CRC, Alignment, Oversize, Undersizes, Length error
+ * @vlan: C-VLAN or S-VLAN tag depending on the VLAN offload configuration
+ */
+struct libeth_rqe_info {
+ u32 len;
+
+ u32 ptype:14;
+ u32 eop:1;
+ u32 rxe:1;
+
+ u32 vlan:16;
+};
+
void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt);
/**
diff --git a/include/net/libeth/tx.h b/include/net/libeth/tx.h
new file mode 100644
index 000000000000..c3db5c6f1641
--- /dev/null
+++ b/include/net/libeth/tx.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024-2025 Intel Corporation */
+
+#ifndef __LIBETH_TX_H
+#define __LIBETH_TX_H
+
+#include <linux/skbuff.h>
+
+#include <net/libeth/types.h>
+
+/* Tx buffer completion */
+
+/**
+ * enum libeth_sqe_type - type of &libeth_sqe to act on Tx completion
+ * @LIBETH_SQE_EMPTY: unused/empty OR XDP_TX/XSk frame, no action required
+ * @LIBETH_SQE_CTX: context descriptor with empty SQE, no action required
+ * @LIBETH_SQE_SLAB: kmalloc-allocated buffer, unmap and kfree()
+ * @LIBETH_SQE_FRAG: mapped skb frag, only unmap DMA
+ * @LIBETH_SQE_SKB: &sk_buff, unmap and napi_consume_skb(), update stats
+ * @__LIBETH_SQE_XDP_START: separator between skb and XDP types
+ * @LIBETH_SQE_XDP_TX: &skb_shared_info, libeth_xdp_return_buff_bulk(), stats
+ * @LIBETH_SQE_XDP_XMIT: &xdp_frame, unmap and xdp_return_frame_bulk(), stats
+ * @LIBETH_SQE_XDP_XMIT_FRAG: &xdp_frame frag, only unmap DMA
+ * @LIBETH_SQE_XSK_TX: &libeth_xdp_buff on XSk queue, xsk_buff_free(), stats
+ * @LIBETH_SQE_XSK_TX_FRAG: &libeth_xdp_buff frag on XSk queue, xsk_buff_free()
+ */
+enum libeth_sqe_type {
+ LIBETH_SQE_EMPTY = 0U,
+ LIBETH_SQE_CTX,
+ LIBETH_SQE_SLAB,
+ LIBETH_SQE_FRAG,
+ LIBETH_SQE_SKB,
+
+ __LIBETH_SQE_XDP_START,
+ LIBETH_SQE_XDP_TX = __LIBETH_SQE_XDP_START,
+ LIBETH_SQE_XDP_XMIT,
+ LIBETH_SQE_XDP_XMIT_FRAG,
+ LIBETH_SQE_XSK_TX,
+ LIBETH_SQE_XSK_TX_FRAG,
+};
+
+/**
+ * struct libeth_sqe - represents a Send Queue Element / Tx buffer
+ * @type: type of the buffer, see the enum above
+ * @rs_idx: index of the last buffer from the batch this one was sent in
+ * @raw: slab buffer to free via kfree()
+ * @skb: &sk_buff to consume
+ * @sinfo: skb shared info of an XDP_TX frame
+ * @xdpf: XDP frame from ::ndo_xdp_xmit()
+ * @xsk: XSk Rx frame from XDP_TX action
+ * @dma: DMA address to unmap
+ * @len: length of the mapped region to unmap
+ * @nr_frags: number of frags in the frame this buffer belongs to
+ * @packets: number of physical packets sent for this frame
+ * @bytes: number of physical bytes sent for this frame
+ * @priv: driver-private scratchpad
+ */
+struct libeth_sqe {
+ enum libeth_sqe_type type:32;
+ u32 rs_idx;
+
+ union {
+ void *raw;
+ struct sk_buff *skb;
+ struct skb_shared_info *sinfo;
+ struct xdp_frame *xdpf;
+ struct libeth_xdp_buff *xsk;
+ };
+
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+
+ u32 nr_frags;
+ u32 packets;
+ u32 bytes;
+
+ unsigned long priv;
+} __aligned_largest;
+
+/**
+ * LIBETH_SQE_CHECK_PRIV - check the driver's private SQE data
+ * @p: type or name of the object the driver wants to fit into &libeth_sqe
+ *
+ * Make sure the driver's private data fits into libeth_sqe::priv. To be used
+ * right after its declaration.
+ */
+#define LIBETH_SQE_CHECK_PRIV(p) \
+ static_assert(sizeof(p) <= sizeof_field(struct libeth_sqe, priv))
+
+/**
+ * struct libeth_cq_pp - completion queue poll params
+ * @dev: &device to perform DMA unmapping
+ * @bq: XDP frame bulk to combine return operations
+ * @ss: onstack NAPI stats to fill
+ * @xss: onstack XDPSQ NAPI stats to fill
+ * @xdp_tx: number of XDP-not-XSk frames processed
+ * @napi: whether it's called from the NAPI context
+ *
+ * libeth uses this structure to access objects needed for performing full
+ * Tx complete operation without passing lots of arguments and change the
+ * prototypes each time a new one is added.
+ */
+struct libeth_cq_pp {
+ struct device *dev;
+ struct xdp_frame_bulk *bq;
+
+ union {
+ struct libeth_sq_napi_stats *ss;
+ struct libeth_xdpsq_napi_stats *xss;
+ };
+ u32 xdp_tx;
+
+ bool napi;
+};
+
+/**
+ * libeth_tx_complete - perform Tx completion for one SQE
+ * @sqe: SQE to complete
+ * @cp: poll params
+ *
+ * Do Tx complete for all the types of buffers, incl. freeing, unmapping,
+ * updating the stats etc.
+ */
+static inline void libeth_tx_complete(struct libeth_sqe *sqe,
+ const struct libeth_cq_pp *cp)
+{
+ switch (sqe->type) {
+ case LIBETH_SQE_EMPTY:
+ return;
+ case LIBETH_SQE_SKB:
+ case LIBETH_SQE_FRAG:
+ case LIBETH_SQE_SLAB:
+ dma_unmap_page(cp->dev, dma_unmap_addr(sqe, dma),
+ dma_unmap_len(sqe, len), DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+
+ switch (sqe->type) {
+ case LIBETH_SQE_SKB:
+ cp->ss->packets += sqe->packets;
+ cp->ss->bytes += sqe->bytes;
+
+ napi_consume_skb(sqe->skb, cp->napi);
+ break;
+ case LIBETH_SQE_SLAB:
+ kfree(sqe->raw);
+ break;
+ default:
+ break;
+ }
+
+ sqe->type = LIBETH_SQE_EMPTY;
+}
+
+void libeth_tx_complete_any(struct libeth_sqe *sqe, struct libeth_cq_pp *cp);
+
+#endif /* __LIBETH_TX_H */
diff --git a/include/net/libeth/types.h b/include/net/libeth/types.h
new file mode 100644
index 000000000000..cf1d78a9dc38
--- /dev/null
+++ b/include/net/libeth/types.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024-2025 Intel Corporation */
+
+#ifndef __LIBETH_TYPES_H
+#define __LIBETH_TYPES_H
+
+#include <linux/workqueue.h>
+
+/* Stats */
+
+/**
+ * struct libeth_rq_napi_stats - "hot" counters to update in Rx polling loop
+ * @packets: received frames counter
+ * @bytes: sum of bytes of received frames above
+ * @fragments: sum of fragments of received S/G frames
+ * @hsplit: number of frames the device performed the header split for
+ * @raw: alias to access all the fields as an array
+ */
+struct libeth_rq_napi_stats {
+ union {
+ struct {
+ u32 packets;
+ u32 bytes;
+ u32 fragments;
+ u32 hsplit;
+ };
+ DECLARE_FLEX_ARRAY(u32, raw);
+ };
+};
+
+/**
+ * struct libeth_sq_napi_stats - "hot" counters to update in Tx completion loop
+ * @packets: completed frames counter
+ * @bytes: sum of bytes of completed frames above
+ * @raw: alias to access all the fields as an array
+ */
+struct libeth_sq_napi_stats {
+ union {
+ struct {
+ u32 packets;
+ u32 bytes;
+ };
+ DECLARE_FLEX_ARRAY(u32, raw);
+ };
+};
+
+/**
+ * struct libeth_xdpsq_napi_stats - "hot" counters to update in XDP Tx
+ * completion loop
+ * @packets: completed frames counter
+ * @bytes: sum of bytes of completed frames above
+ * @fragments: sum of fragments of completed S/G frames
+ * @raw: alias to access all the fields as an array
+ */
+struct libeth_xdpsq_napi_stats {
+ union {
+ struct {
+ u32 packets;
+ u32 bytes;
+ u32 fragments;
+ };
+ DECLARE_FLEX_ARRAY(u32, raw);
+ };
+};
+
+/* XDP */
+
+/*
+ * The following structures should be embedded into driver's queue structure
+ * and passed to the libeth_xdp helpers, never used directly.
+ */
+
+/* XDPSQ sharing */
+
+/**
+ * struct libeth_xdpsq_lock - locking primitive for sharing XDPSQs
+ * @lock: spinlock for locking the queue
+ * @share: whether this particular queue is shared
+ */
+struct libeth_xdpsq_lock {
+ spinlock_t lock;
+ bool share;
+};
+
+/* XDPSQ clean-up timers */
+
+/**
+ * struct libeth_xdpsq_timer - timer for cleaning up XDPSQs w/o interrupts
+ * @xdpsq: queue this timer belongs to
+ * @lock: lock for the queue
+ * @dwork: work performing cleanups
+ *
+ * XDPSQs not using interrupts but lazy cleaning, i.e. only when there's no
+ * space for sending the current queued frame/bulk, must fire up timers to
+ * make sure there are no stale buffers to free.
+ */
+struct libeth_xdpsq_timer {
+ void *xdpsq;
+ struct libeth_xdpsq_lock *lock;
+
+ struct delayed_work dwork;
+};
+
+/* Rx polling path */
+
+/**
+ * struct libeth_xdp_buff_stash - struct for stashing &xdp_buff onto a queue
+ * @data: pointer to the start of the frame, xdp_buff.data
+ * @headroom: frame headroom, xdp_buff.data - xdp_buff.data_hard_start
+ * @len: frame linear space length, xdp_buff.data_end - xdp_buff.data
+ * @frame_sz: truesize occupied by the frame, xdp_buff.frame_sz
+ * @flags: xdp_buff.flags
+ *
+ * &xdp_buff is 56 bytes long on x64, &libeth_xdp_buff is 64 bytes. This
+ * structure carries only necessary fields to save/restore a partially built
+ * frame on the queue structure to finish it during the next NAPI poll.
+ */
+struct libeth_xdp_buff_stash {
+ void *data;
+ u16 headroom;
+ u16 len;
+
+ u32 frame_sz:24;
+ u32 flags:8;
+} __aligned_largest;
+
+#endif /* __LIBETH_TYPES_H */
diff --git a/include/net/libeth/xdp.h b/include/net/libeth/xdp.h
new file mode 100644
index 000000000000..898723ab62e8
--- /dev/null
+++ b/include/net/libeth/xdp.h
@@ -0,0 +1,1870 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBETH_XDP_H
+#define __LIBETH_XDP_H
+
+#include <linux/bpf_trace.h>
+#include <linux/unroll.h>
+
+#include <net/libeth/rx.h>
+#include <net/libeth/tx.h>
+#include <net/xsk_buff_pool.h>
+
+/*
+ * Defined as bits to be able to use them as a mask on Rx.
+ * Also used as internal return values on Tx.
+ */
+enum {
+ LIBETH_XDP_PASS = 0U,
+ LIBETH_XDP_DROP = BIT(0),
+ LIBETH_XDP_ABORTED = BIT(1),
+ LIBETH_XDP_TX = BIT(2),
+ LIBETH_XDP_REDIRECT = BIT(3),
+};
+
+/*
+ * &xdp_buff_xsk is the largest structure &libeth_xdp_buff gets casted to,
+ * pick maximum pointer-compatible alignment.
+ */
+#define __LIBETH_XDP_BUFF_ALIGN \
+ (IS_ALIGNED(sizeof(struct xdp_buff_xsk), 16) ? 16 : \
+ IS_ALIGNED(sizeof(struct xdp_buff_xsk), 8) ? 8 : \
+ sizeof(long))
+
+/**
+ * struct libeth_xdp_buff - libeth extension over &xdp_buff
+ * @base: main &xdp_buff
+ * @data: shortcut for @base.data
+ * @desc: RQ descriptor containing metadata for this buffer
+ * @priv: driver-private scratchspace
+ *
+ * The main reason for this is to have a pointer to the descriptor to be able
+ * to quickly get frame metadata from xdpmo and driver buff-to-xdp callbacks
+ * (as well as bigger alignment).
+ * Pointer/layout-compatible with &xdp_buff and &xdp_buff_xsk.
+ */
+struct libeth_xdp_buff {
+ union {
+ struct xdp_buff base;
+ void *data;
+ };
+
+ const void *desc;
+ unsigned long priv[]
+ __aligned(__LIBETH_XDP_BUFF_ALIGN);
+} __aligned(__LIBETH_XDP_BUFF_ALIGN);
+static_assert(offsetof(struct libeth_xdp_buff, data) ==
+ offsetof(struct xdp_buff_xsk, xdp.data));
+static_assert(offsetof(struct libeth_xdp_buff, desc) ==
+ offsetof(struct xdp_buff_xsk, cb));
+static_assert(IS_ALIGNED(sizeof(struct xdp_buff_xsk),
+ __alignof(struct libeth_xdp_buff)));
+
+/**
+ * __LIBETH_XDP_ONSTACK_BUFF - declare a &libeth_xdp_buff on the stack
+ * @name: name of the variable to declare
+ * @...: sizeof() of the driver-private data
+ */
+#define __LIBETH_XDP_ONSTACK_BUFF(name, ...) \
+ ___LIBETH_XDP_ONSTACK_BUFF(name, ##__VA_ARGS__)
+/**
+ * LIBETH_XDP_ONSTACK_BUFF - declare a &libeth_xdp_buff on the stack
+ * @name: name of the variable to declare
+ * @...: type or variable name of the driver-private data
+ */
+#define LIBETH_XDP_ONSTACK_BUFF(name, ...) \
+ __LIBETH_XDP_ONSTACK_BUFF(name, __libeth_xdp_priv_sz(__VA_ARGS__))
+
+#define ___LIBETH_XDP_ONSTACK_BUFF(name, ...) \
+ __DEFINE_FLEX(struct libeth_xdp_buff, name, priv, \
+ LIBETH_XDP_PRIV_SZ(__VA_ARGS__ + 0), \
+ __uninitialized); \
+ LIBETH_XDP_ASSERT_PRIV_SZ(__VA_ARGS__ + 0)
+
+#define __libeth_xdp_priv_sz(...) \
+ CONCATENATE(__libeth_xdp_psz, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define __libeth_xdp_psz0(...)
+#define __libeth_xdp_psz1(...) sizeof(__VA_ARGS__)
+
+#define LIBETH_XDP_PRIV_SZ(sz) \
+ (ALIGN(sz, __alignof(struct libeth_xdp_buff)) / sizeof(long))
+
+/* Performs XSK_CHECK_PRIV_TYPE() */
+#define LIBETH_XDP_ASSERT_PRIV_SZ(sz) \
+ static_assert(offsetofend(struct xdp_buff_xsk, cb) >= \
+ struct_size_t(struct libeth_xdp_buff, priv, \
+ LIBETH_XDP_PRIV_SZ(sz)))
+
+/* XDPSQ sharing */
+
+DECLARE_STATIC_KEY_FALSE(libeth_xdpsq_share);
+
+/**
+ * libeth_xdpsq_num - calculate optimal number of XDPSQs for this device + sys
+ * @rxq: current number of active Rx queues
+ * @txq: current number of active Tx queues
+ * @max: maximum number of Tx queues
+ *
+ * Each RQ must have its own XDPSQ for XSk pairs, each CPU must have own XDPSQ
+ * for lockless sending (``XDP_TX``, .ndo_xdp_xmit()). Cap the maximum of these
+ * two with the number of SQs the device can have (minus used ones).
+ *
+ * Return: number of XDP Tx queues the device needs to use.
+ */
+static inline u32 libeth_xdpsq_num(u32 rxq, u32 txq, u32 max)
+{
+ return min(max(nr_cpu_ids, rxq), max - txq);
+}
+
+/**
+ * libeth_xdpsq_shared - whether XDPSQs can be shared between several CPUs
+ * @num: number of active XDPSQs
+ *
+ * Return: true if there's no 1:1 XDPSQ/CPU association, false otherwise.
+ */
+static inline bool libeth_xdpsq_shared(u32 num)
+{
+ return num < nr_cpu_ids;
+}
+
+/**
+ * libeth_xdpsq_id - get XDPSQ index corresponding to this CPU
+ * @num: number of active XDPSQs
+ *
+ * Helper for libeth_xdp routines, do not use in drivers directly.
+ *
+ * Return: XDPSQ index needs to be used on this CPU.
+ */
+static inline u32 libeth_xdpsq_id(u32 num)
+{
+ u32 ret = raw_smp_processor_id();
+
+ if (static_branch_unlikely(&libeth_xdpsq_share) &&
+ libeth_xdpsq_shared(num))
+ ret %= num;
+
+ return ret;
+}
+
+void __libeth_xdpsq_get(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev);
+void __libeth_xdpsq_put(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev);
+
+/**
+ * libeth_xdpsq_get - initialize &libeth_xdpsq_lock
+ * @lock: lock to initialize
+ * @dev: netdev which this lock belongs to
+ * @share: whether XDPSQs can be shared
+ *
+ * Tracks the current XDPSQ association and enables the static lock
+ * if needed.
+ */
+static inline void libeth_xdpsq_get(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev,
+ bool share)
+{
+ if (unlikely(share))
+ __libeth_xdpsq_get(lock, dev);
+}
+
+/**
+ * libeth_xdpsq_put - deinitialize &libeth_xdpsq_lock
+ * @lock: lock to deinitialize
+ * @dev: netdev which this lock belongs to
+ *
+ * Tracks the current XDPSQ association and disables the static lock
+ * if needed.
+ */
+static inline void libeth_xdpsq_put(struct libeth_xdpsq_lock *lock,
+ const struct net_device *dev)
+{
+ if (static_branch_unlikely(&libeth_xdpsq_share) && lock->share)
+ __libeth_xdpsq_put(lock, dev);
+}
+
+void __libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock);
+void __libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock);
+
+/**
+ * libeth_xdpsq_lock - grab &libeth_xdpsq_lock if needed
+ * @lock: lock to take
+ *
+ * Touches the underlying spinlock only if the static key is enabled
+ * and the queue itself is marked as shareable.
+ */
+static inline void libeth_xdpsq_lock(struct libeth_xdpsq_lock *lock)
+{
+ if (static_branch_unlikely(&libeth_xdpsq_share) && lock->share)
+ __libeth_xdpsq_lock(lock);
+}
+
+/**
+ * libeth_xdpsq_unlock - free &libeth_xdpsq_lock if needed
+ * @lock: lock to free
+ *
+ * Touches the underlying spinlock only if the static key is enabled
+ * and the queue itself is marked as shareable.
+ */
+static inline void libeth_xdpsq_unlock(struct libeth_xdpsq_lock *lock)
+{
+ if (static_branch_unlikely(&libeth_xdpsq_share) && lock->share)
+ __libeth_xdpsq_unlock(lock);
+}
+
+/* XDPSQ clean-up timers */
+
+void libeth_xdpsq_init_timer(struct libeth_xdpsq_timer *timer, void *xdpsq,
+ struct libeth_xdpsq_lock *lock,
+ void (*poll)(struct work_struct *work));
+
+/**
+ * libeth_xdpsq_deinit_timer - deinitialize &libeth_xdpsq_timer
+ * @timer: timer to deinitialize
+ *
+ * Flush and disable the underlying workqueue.
+ */
+static inline void libeth_xdpsq_deinit_timer(struct libeth_xdpsq_timer *timer)
+{
+ cancel_delayed_work_sync(&timer->dwork);
+}
+
+/**
+ * libeth_xdpsq_queue_timer - run &libeth_xdpsq_timer
+ * @timer: timer to queue
+ *
+ * Should be called after the queue was filled and the transmission was run
+ * to complete the pending buffers if no further sending will be done in a
+ * second (-> lazy cleaning won't happen).
+ * If the timer was already run, it will be requeued back to one second
+ * timeout again.
+ */
+static inline void libeth_xdpsq_queue_timer(struct libeth_xdpsq_timer *timer)
+{
+ mod_delayed_work_on(raw_smp_processor_id(), system_bh_highpri_wq,
+ &timer->dwork, HZ);
+}
+
+/**
+ * libeth_xdpsq_run_timer - wrapper to run a queue clean-up on a timer event
+ * @work: workqueue belonging to the corresponding timer
+ * @poll: driver-specific completion queue poll function
+ *
+ * Run the polling function on the locked queue and requeue the timer if
+ * there's more work to do.
+ * Designed to be used via LIBETH_XDP_DEFINE_TIMER() below.
+ */
+static __always_inline void
+libeth_xdpsq_run_timer(struct work_struct *work,
+ u32 (*poll)(void *xdpsq, u32 budget))
+{
+ struct libeth_xdpsq_timer *timer = container_of(work, typeof(*timer),
+ dwork.work);
+
+ libeth_xdpsq_lock(timer->lock);
+
+ if (poll(timer->xdpsq, U32_MAX))
+ libeth_xdpsq_queue_timer(timer);
+
+ libeth_xdpsq_unlock(timer->lock);
+}
+
+/* Common Tx bits */
+
+/**
+ * enum - libeth_xdp internal Tx flags
+ * @LIBETH_XDP_TX_BULK: one bulk size at which it will be flushed to the queue
+ * @LIBETH_XDP_TX_BATCH: batch size for which the queue fill loop is unrolled
+ * @LIBETH_XDP_TX_DROP: indicates the send function must drop frames not sent
+ * @LIBETH_XDP_TX_NDO: whether the send function is called from .ndo_xdp_xmit()
+ * @LIBETH_XDP_TX_XSK: whether the function is called for ``XDP_TX`` for XSk
+ */
+enum {
+ LIBETH_XDP_TX_BULK = DEV_MAP_BULK_SIZE,
+ LIBETH_XDP_TX_BATCH = 8,
+
+ LIBETH_XDP_TX_DROP = BIT(0),
+ LIBETH_XDP_TX_NDO = BIT(1),
+ LIBETH_XDP_TX_XSK = BIT(2),
+};
+
+/**
+ * enum - &libeth_xdp_tx_frame and &libeth_xdp_tx_desc flags
+ * @LIBETH_XDP_TX_LEN: only for ``XDP_TX``, [15:0] of ::len_fl is actual length
+ * @LIBETH_XDP_TX_CSUM: for XSk xmit, enable checksum offload
+ * @LIBETH_XDP_TX_XSKMD: for XSk xmit, mask of the metadata bits
+ * @LIBETH_XDP_TX_FIRST: indicates the frag is the first one of the frame
+ * @LIBETH_XDP_TX_LAST: whether the frag is the last one of the frame
+ * @LIBETH_XDP_TX_MULTI: whether the frame contains several frags
+ * @LIBETH_XDP_TX_FLAGS: only for ``XDP_TX``, [31:16] of ::len_fl is flags
+ */
+enum {
+ LIBETH_XDP_TX_LEN = GENMASK(15, 0),
+
+ LIBETH_XDP_TX_CSUM = XDP_TXMD_FLAGS_CHECKSUM,
+ LIBETH_XDP_TX_XSKMD = LIBETH_XDP_TX_LEN,
+
+ LIBETH_XDP_TX_FIRST = BIT(16),
+ LIBETH_XDP_TX_LAST = BIT(17),
+ LIBETH_XDP_TX_MULTI = BIT(18),
+
+ LIBETH_XDP_TX_FLAGS = GENMASK(31, 16),
+};
+
+/**
+ * struct libeth_xdp_tx_frame - represents one XDP Tx element
+ * @data: frame start pointer for ``XDP_TX``
+ * @len_fl: ``XDP_TX``, combined flags [31:16] and len [15:0] field for speed
+ * @soff: ``XDP_TX``, offset from @data to the start of &skb_shared_info
+ * @frag: one (non-head) frag for ``XDP_TX``
+ * @xdpf: &xdp_frame for the head frag for .ndo_xdp_xmit()
+ * @dma: DMA address of the non-head frag for .ndo_xdp_xmit()
+ * @xsk: ``XDP_TX`` for XSk, XDP buffer for any frag
+ * @len: frag length for XSk ``XDP_TX`` and .ndo_xdp_xmit()
+ * @flags: Tx flags for the above
+ * @opts: combined @len + @flags for the above for speed
+ * @desc: XSk xmit descriptor for direct casting
+ */
+struct libeth_xdp_tx_frame {
+ union {
+ /* ``XDP_TX`` */
+ struct {
+ void *data;
+ u32 len_fl;
+ u32 soff;
+ };
+
+ /* ``XDP_TX`` frag */
+ skb_frag_t frag;
+
+ /* .ndo_xdp_xmit(), XSk ``XDP_TX`` */
+ struct {
+ union {
+ struct xdp_frame *xdpf;
+ dma_addr_t dma;
+
+ struct libeth_xdp_buff *xsk;
+ };
+ union {
+ struct {
+ u32 len;
+ u32 flags;
+ };
+ aligned_u64 opts;
+ };
+ };
+
+ /* XSk xmit */
+ struct xdp_desc desc;
+ };
+} __aligned(sizeof(struct xdp_desc));
+static_assert(offsetof(struct libeth_xdp_tx_frame, frag.len) ==
+ offsetof(struct libeth_xdp_tx_frame, len_fl));
+static_assert(sizeof(struct libeth_xdp_tx_frame) == sizeof(struct xdp_desc));
+
+/**
+ * struct libeth_xdp_tx_bulk - XDP Tx frame bulk for bulk sending
+ * @prog: corresponding active XDP program, %NULL for .ndo_xdp_xmit()
+ * @dev: &net_device which the frames are transmitted on
+ * @xdpsq: shortcut to the corresponding driver-specific XDPSQ structure
+ * @act_mask: Rx only, mask of all the XDP prog verdicts for that NAPI session
+ * @count: current number of frames in @bulk
+ * @bulk: array of queued frames for bulk Tx
+ *
+ * All XDP Tx operations except XSk xmit queue each frame to the bulk first
+ * and flush it when @count reaches the array end. Bulk is always placed on
+ * the stack for performance. One bulk element contains all the data necessary
+ * for sending a frame and then freeing it on completion.
+ * For XSk xmit, Tx descriptor array from &xsk_buff_pool is casted directly
+ * to &libeth_xdp_tx_frame as they are compatible and the bulk structure is
+ * not used.
+ */
+struct libeth_xdp_tx_bulk {
+ const struct bpf_prog *prog;
+ struct net_device *dev;
+ void *xdpsq;
+
+ u32 act_mask;
+ u32 count;
+ struct libeth_xdp_tx_frame bulk[LIBETH_XDP_TX_BULK];
+} __aligned(sizeof(struct libeth_xdp_tx_frame));
+
+/**
+ * LIBETH_XDP_ONSTACK_BULK - declare &libeth_xdp_tx_bulk on the stack
+ * @bq: name of the variable to declare
+ *
+ * Helper to declare a bulk on the stack with a compiler hint that it should
+ * not be initialized automatically (with `CONFIG_INIT_STACK_ALL_*`) for
+ * performance reasons.
+ */
+#define LIBETH_XDP_ONSTACK_BULK(bq) \
+ struct libeth_xdp_tx_bulk bq __uninitialized
+
+/**
+ * struct libeth_xdpsq - abstraction for an XDPSQ
+ * @pool: XSk buffer pool for XSk ``XDP_TX`` and xmit
+ * @sqes: array of Tx buffers from the actual queue struct
+ * @descs: opaque pointer to the HW descriptor array
+ * @ntu: pointer to the next free descriptor index
+ * @count: number of descriptors on that queue
+ * @pending: pointer to the number of sent-not-completed descs on that queue
+ * @xdp_tx: pointer to the above, but only for non-XSk-xmit frames
+ * @lock: corresponding XDPSQ lock
+ *
+ * Abstraction for driver-independent implementation of Tx. Placed on the stack
+ * and filled by the driver before the transmission, so that the generic
+ * functions can access and modify driver-specific resources.
+ */
+struct libeth_xdpsq {
+ struct xsk_buff_pool *pool;
+ struct libeth_sqe *sqes;
+ void *descs;
+
+ u32 *ntu;
+ u32 count;
+
+ u32 *pending;
+ u32 *xdp_tx;
+ struct libeth_xdpsq_lock *lock;
+};
+
+/**
+ * struct libeth_xdp_tx_desc - abstraction for an XDP Tx descriptor
+ * @addr: DMA address of the frame
+ * @len: length of the frame
+ * @flags: XDP Tx flags
+ * @opts: combined @len + @flags for speed
+ *
+ * Filled by the generic functions and then passed to driver-specific functions
+ * to fill a HW Tx descriptor, always placed on the [function] stack.
+ */
+struct libeth_xdp_tx_desc {
+ dma_addr_t addr;
+ union {
+ struct {
+ u32 len;
+ u32 flags;
+ };
+ aligned_u64 opts;
+ };
+} __aligned_largest;
+
+/**
+ * libeth_xdp_ptr_to_priv - convert pointer to a libeth_xdp u64 priv
+ * @ptr: pointer to convert
+ *
+ * The main sending function passes private data as the largest scalar, u64.
+ * Use this helper when you want to pass a pointer there.
+ */
+#define libeth_xdp_ptr_to_priv(ptr) ({ \
+ typecheck_pointer(ptr); \
+ ((u64)(uintptr_t)(ptr)); \
+})
+/**
+ * libeth_xdp_priv_to_ptr - convert libeth_xdp u64 priv to a pointer
+ * @priv: private data to convert
+ *
+ * The main sending function passes private data as the largest scalar, u64.
+ * Use this helper when your callback takes this u64 and you want to convert
+ * it back to a pointer.
+ */
+#define libeth_xdp_priv_to_ptr(priv) ({ \
+ static_assert(__same_type(priv, u64)); \
+ ((const void *)(uintptr_t)(priv)); \
+})
+
+/*
+ * On 64-bit systems, assigning one u64 is faster than two u32s. When ::len
+ * occupies lowest 32 bits (LE), whole ::opts can be assigned directly instead.
+ */
+#ifdef __LITTLE_ENDIAN
+#define __LIBETH_WORD_ACCESS 1
+#endif
+#ifdef __LIBETH_WORD_ACCESS
+#define __libeth_xdp_tx_len(flen, ...) \
+ .opts = ((flen) | FIELD_PREP(GENMASK_ULL(63, 32), (__VA_ARGS__ + 0)))
+#else
+#define __libeth_xdp_tx_len(flen, ...) \
+ .len = (flen), .flags = (__VA_ARGS__ + 0)
+#endif
+
+/**
+ * libeth_xdp_tx_xmit_bulk - main XDP Tx function
+ * @bulk: array of frames to send
+ * @xdpsq: pointer to the driver-specific XDPSQ struct
+ * @n: number of frames to send
+ * @unroll: whether to unroll the queue filling loop for speed
+ * @priv: driver-specific private data
+ * @prep: callback for cleaning the queue and filling abstract &libeth_xdpsq
+ * @fill: internal callback for filling &libeth_sqe and &libeth_xdp_tx_desc
+ * @xmit: callback for filling a HW descriptor with the frame info
+ *
+ * Internal abstraction for placing @n XDP Tx frames on the HW XDPSQ. Used for
+ * all types of frames: ``XDP_TX``, .ndo_xdp_xmit(), XSk ``XDP_TX``, and XSk
+ * xmit.
+ * @prep must lock the queue as this function releases it at the end. @unroll
+ * greatly increases the object code size, but also greatly increases XSk xmit
+ * performance; for other types of frames, it's not enabled.
+ * The compilers inline all those onstack abstractions to direct data accesses.
+ *
+ * Return: number of frames actually placed on the queue, <= @n. The function
+ * can't fail, but can send less frames if there's no enough free descriptors
+ * available. The actual free space is returned by @prep from the driver.
+ */
+static __always_inline __nocfi_generic u32
+libeth_xdp_tx_xmit_bulk(const struct libeth_xdp_tx_frame *bulk, void *xdpsq,
+ u32 n, bool unroll, u64 priv,
+ u32 (*prep)(void *xdpsq, struct libeth_xdpsq *sq),
+ struct libeth_xdp_tx_desc
+ (*fill)(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv),
+ void (*xmit)(struct libeth_xdp_tx_desc desc, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv))
+{
+ struct libeth_xdpsq sq __uninitialized;
+ u32 this, batched, off = 0;
+ u32 ntu, i = 0;
+
+ n = min(n, prep(xdpsq, &sq));
+ if (unlikely(!n))
+ goto unlock;
+
+ ntu = *sq.ntu;
+
+ this = sq.count - ntu;
+ if (likely(this > n))
+ this = n;
+
+again:
+ if (!unroll)
+ goto linear;
+
+ batched = ALIGN_DOWN(this, LIBETH_XDP_TX_BATCH);
+
+ for ( ; i < off + batched; i += LIBETH_XDP_TX_BATCH) {
+ u32 base = ntu + i - off;
+
+ unrolled_count(LIBETH_XDP_TX_BATCH)
+ for (u32 j = 0; j < LIBETH_XDP_TX_BATCH; j++)
+ xmit(fill(bulk[i + j], base + j, &sq, priv),
+ base + j, &sq, priv);
+ }
+
+ if (batched < this) {
+linear:
+ for ( ; i < off + this; i++)
+ xmit(fill(bulk[i], ntu + i - off, &sq, priv),
+ ntu + i - off, &sq, priv);
+ }
+
+ ntu += this;
+ if (likely(ntu < sq.count))
+ goto out;
+
+ ntu = 0;
+
+ if (i < n) {
+ this = n - i;
+ off = i;
+
+ goto again;
+ }
+
+out:
+ *sq.ntu = ntu;
+ *sq.pending += n;
+ if (sq.xdp_tx)
+ *sq.xdp_tx += n;
+
+unlock:
+ libeth_xdpsq_unlock(sq.lock);
+
+ return n;
+}
+
+/* ``XDP_TX`` bulking */
+
+void libeth_xdp_return_buff_slow(struct libeth_xdp_buff *xdp);
+
+/**
+ * libeth_xdp_tx_queue_head - internal helper for queueing one ``XDP_TX`` head
+ * @bq: XDP Tx bulk to queue the head frag to
+ * @xdp: XDP buffer with the head to queue
+ *
+ * Return: false if it's the only frag of the frame, true if it's an S/G frame.
+ */
+static inline bool libeth_xdp_tx_queue_head(struct libeth_xdp_tx_bulk *bq,
+ const struct libeth_xdp_buff *xdp)
+{
+ const struct xdp_buff *base = &xdp->base;
+
+ bq->bulk[bq->count++] = (typeof(*bq->bulk)){
+ .data = xdp->data,
+ .len_fl = (base->data_end - xdp->data) | LIBETH_XDP_TX_FIRST,
+ .soff = xdp_data_hard_end(base) - xdp->data,
+ };
+
+ if (!xdp_buff_has_frags(base))
+ return false;
+
+ bq->bulk[bq->count - 1].len_fl |= LIBETH_XDP_TX_MULTI;
+
+ return true;
+}
+
+/**
+ * libeth_xdp_tx_queue_frag - internal helper for queueing one ``XDP_TX`` frag
+ * @bq: XDP Tx bulk to queue the frag to
+ * @frag: frag to queue
+ */
+static inline void libeth_xdp_tx_queue_frag(struct libeth_xdp_tx_bulk *bq,
+ const skb_frag_t *frag)
+{
+ bq->bulk[bq->count++].frag = *frag;
+}
+
+/**
+ * libeth_xdp_tx_queue_bulk - internal helper for queueing one ``XDP_TX`` frame
+ * @bq: XDP Tx bulk to queue the frame to
+ * @xdp: XDP buffer to queue
+ * @flush_bulk: driver callback to flush the bulk to the HW queue
+ *
+ * Return: true on success, false on flush error.
+ */
+static __always_inline bool
+libeth_xdp_tx_queue_bulk(struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp,
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags))
+{
+ const struct skb_shared_info *sinfo;
+ bool ret = true;
+ u32 nr_frags;
+
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, 0))) {
+ libeth_xdp_return_buff_slow(xdp);
+ return false;
+ }
+
+ if (!libeth_xdp_tx_queue_head(bq, xdp))
+ goto out;
+
+ sinfo = xdp_get_shared_info_from_buff(&xdp->base);
+ nr_frags = sinfo->nr_frags;
+
+ for (u32 i = 0; i < nr_frags; i++) {
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, 0))) {
+ ret = false;
+ break;
+ }
+
+ libeth_xdp_tx_queue_frag(bq, &sinfo->frags[i]);
+ }
+
+out:
+ bq->bulk[bq->count - 1].len_fl |= LIBETH_XDP_TX_LAST;
+ xdp->data = NULL;
+
+ return ret;
+}
+
+/**
+ * libeth_xdp_tx_fill_stats - fill &libeth_sqe with ``XDP_TX`` frame stats
+ * @sqe: SQ element to fill
+ * @desc: libeth_xdp Tx descriptor
+ * @sinfo: &skb_shared_info for this frame
+ *
+ * Internal helper for filling an SQE with the frame stats, do not use in
+ * drivers. Fills the number of frags and bytes for this frame.
+ */
+#define libeth_xdp_tx_fill_stats(sqe, desc, sinfo) \
+ __libeth_xdp_tx_fill_stats(sqe, desc, sinfo, __UNIQUE_ID(sqe_), \
+ __UNIQUE_ID(desc_), __UNIQUE_ID(sinfo_))
+
+#define __libeth_xdp_tx_fill_stats(sqe, desc, sinfo, ue, ud, us) do { \
+ const struct libeth_xdp_tx_desc *ud = (desc); \
+ const struct skb_shared_info *us; \
+ struct libeth_sqe *ue = (sqe); \
+ \
+ ue->nr_frags = 1; \
+ ue->bytes = ud->len; \
+ \
+ if (ud->flags & LIBETH_XDP_TX_MULTI) { \
+ us = (sinfo); \
+ ue->nr_frags += us->nr_frags; \
+ ue->bytes += us->xdp_frags_size; \
+ } \
+} while (0)
+
+/**
+ * libeth_xdp_tx_fill_buf - internal helper to fill one ``XDP_TX`` &libeth_sqe
+ * @frm: XDP Tx frame from the bulk
+ * @i: index on the HW queue
+ * @sq: XDPSQ abstraction for the queue
+ * @priv: private data
+ *
+ * Return: XDP Tx descriptor with the synced DMA and other info to pass to
+ * the driver callback.
+ */
+static inline struct libeth_xdp_tx_desc
+libeth_xdp_tx_fill_buf(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv)
+{
+ struct libeth_xdp_tx_desc desc;
+ struct skb_shared_info *sinfo;
+ skb_frag_t *frag = &frm.frag;
+ struct libeth_sqe *sqe;
+ netmem_ref netmem;
+
+ if (frm.len_fl & LIBETH_XDP_TX_FIRST) {
+ sinfo = frm.data + frm.soff;
+ skb_frag_fill_netmem_desc(frag, virt_to_netmem(frm.data),
+ offset_in_page(frm.data),
+ frm.len_fl);
+ } else {
+ sinfo = NULL;
+ }
+
+ netmem = skb_frag_netmem(frag);
+ desc = (typeof(desc)){
+ .addr = page_pool_get_dma_addr_netmem(netmem) +
+ skb_frag_off(frag),
+ .len = skb_frag_size(frag) & LIBETH_XDP_TX_LEN,
+ .flags = skb_frag_size(frag) & LIBETH_XDP_TX_FLAGS,
+ };
+
+ dma_sync_single_for_device(__netmem_get_pp(netmem)->p.dev, desc.addr,
+ desc.len, DMA_BIDIRECTIONAL);
+
+ if (!sinfo)
+ return desc;
+
+ sqe = &sq->sqes[i];
+ sqe->type = LIBETH_SQE_XDP_TX;
+ sqe->sinfo = sinfo;
+ libeth_xdp_tx_fill_stats(sqe, &desc, sinfo);
+
+ return desc;
+}
+
+void libeth_xdp_tx_exception(struct libeth_xdp_tx_bulk *bq, u32 sent,
+ u32 flags);
+
+/**
+ * __libeth_xdp_tx_flush_bulk - internal helper to flush one XDP Tx bulk
+ * @bq: bulk to flush
+ * @flags: XDP TX flags (.ndo_xdp_xmit(), XSk etc.)
+ * @prep: driver-specific callback to prepare the queue for sending
+ * @fill: libeth_xdp callback to fill &libeth_sqe and &libeth_xdp_tx_desc
+ * @xmit: driver callback to fill a HW descriptor
+ *
+ * Internal abstraction to create bulk flush functions for drivers. Used for
+ * everything except XSk xmit.
+ *
+ * Return: true if anything was sent, false otherwise.
+ */
+static __always_inline bool
+__libeth_xdp_tx_flush_bulk(struct libeth_xdp_tx_bulk *bq, u32 flags,
+ u32 (*prep)(void *xdpsq, struct libeth_xdpsq *sq),
+ struct libeth_xdp_tx_desc
+ (*fill)(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv),
+ void (*xmit)(struct libeth_xdp_tx_desc desc, u32 i,
+ const struct libeth_xdpsq *sq,
+ u64 priv))
+{
+ u32 sent, drops;
+ int err = 0;
+
+ sent = libeth_xdp_tx_xmit_bulk(bq->bulk, bq->xdpsq,
+ min(bq->count, LIBETH_XDP_TX_BULK),
+ false, 0, prep, fill, xmit);
+ drops = bq->count - sent;
+
+ if (unlikely(drops)) {
+ libeth_xdp_tx_exception(bq, sent, flags);
+ err = -ENXIO;
+ } else {
+ bq->count = 0;
+ }
+
+ trace_xdp_bulk_tx(bq->dev, sent, drops, err);
+
+ return likely(sent);
+}
+
+/**
+ * libeth_xdp_tx_flush_bulk - wrapper to define flush of one ``XDP_TX`` bulk
+ * @bq: bulk to flush
+ * @flags: Tx flags, see above
+ * @prep: driver callback to prepare the queue
+ * @xmit: driver callback to fill a HW descriptor
+ *
+ * Use via LIBETH_XDP_DEFINE_FLUSH_TX() to define an ``XDP_TX`` driver
+ * callback.
+ */
+#define libeth_xdp_tx_flush_bulk(bq, flags, prep, xmit) \
+ __libeth_xdp_tx_flush_bulk(bq, flags, prep, libeth_xdp_tx_fill_buf, \
+ xmit)
+
+/* .ndo_xdp_xmit() implementation */
+
+/**
+ * libeth_xdp_xmit_init_bulk - internal helper to initialize bulk for XDP xmit
+ * @bq: bulk to initialize
+ * @dev: target &net_device
+ * @xdpsqs: array of driver-specific XDPSQ structs
+ * @num: number of active XDPSQs (the above array length)
+ */
+#define libeth_xdp_xmit_init_bulk(bq, dev, xdpsqs, num) \
+ __libeth_xdp_xmit_init_bulk(bq, dev, (xdpsqs)[libeth_xdpsq_id(num)])
+
+static inline void __libeth_xdp_xmit_init_bulk(struct libeth_xdp_tx_bulk *bq,
+ struct net_device *dev,
+ void *xdpsq)
+{
+ bq->dev = dev;
+ bq->xdpsq = xdpsq;
+ bq->count = 0;
+}
+
+/**
+ * libeth_xdp_xmit_frame_dma - internal helper to access DMA of an &xdp_frame
+ * @xf: pointer to the XDP frame
+ *
+ * There's no place in &libeth_xdp_tx_frame to store DMA address for an
+ * &xdp_frame head. The headroom is used then, the address is placed right
+ * after the frame struct, naturally aligned.
+ *
+ * Return: pointer to the DMA address to use.
+ */
+#define libeth_xdp_xmit_frame_dma(xf) \
+ _Generic((xf), \
+ const struct xdp_frame *: \
+ (const dma_addr_t *)__libeth_xdp_xmit_frame_dma(xf), \
+ struct xdp_frame *: \
+ (dma_addr_t *)__libeth_xdp_xmit_frame_dma(xf) \
+ )
+
+static inline void *__libeth_xdp_xmit_frame_dma(const struct xdp_frame *xdpf)
+{
+ void *addr = (void *)(xdpf + 1);
+
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+ __alignof(*xdpf) < sizeof(dma_addr_t))
+ addr = PTR_ALIGN(addr, sizeof(dma_addr_t));
+
+ return addr;
+}
+
+/**
+ * libeth_xdp_xmit_queue_head - internal helper for queueing one XDP xmit head
+ * @bq: XDP Tx bulk to queue the head frag to
+ * @xdpf: XDP frame with the head to queue
+ * @dev: device to perform DMA mapping
+ *
+ * Return: ``LIBETH_XDP_DROP`` on DMA mapping error,
+ * ``LIBETH_XDP_PASS`` if it's the only frag in the frame,
+ * ``LIBETH_XDP_TX`` if it's an S/G frame.
+ */
+static inline u32 libeth_xdp_xmit_queue_head(struct libeth_xdp_tx_bulk *bq,
+ struct xdp_frame *xdpf,
+ struct device *dev)
+{
+ dma_addr_t dma;
+
+ dma = dma_map_single(dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ return LIBETH_XDP_DROP;
+
+ *libeth_xdp_xmit_frame_dma(xdpf) = dma;
+
+ bq->bulk[bq->count++] = (typeof(*bq->bulk)){
+ .xdpf = xdpf,
+ __libeth_xdp_tx_len(xdpf->len, LIBETH_XDP_TX_FIRST),
+ };
+
+ if (!xdp_frame_has_frags(xdpf))
+ return LIBETH_XDP_PASS;
+
+ bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_MULTI;
+
+ return LIBETH_XDP_TX;
+}
+
+/**
+ * libeth_xdp_xmit_queue_frag - internal helper for queueing one XDP xmit frag
+ * @bq: XDP Tx bulk to queue the frag to
+ * @frag: frag to queue
+ * @dev: device to perform DMA mapping
+ *
+ * Return: true on success, false on DMA mapping error.
+ */
+static inline bool libeth_xdp_xmit_queue_frag(struct libeth_xdp_tx_bulk *bq,
+ const skb_frag_t *frag,
+ struct device *dev)
+{
+ dma_addr_t dma;
+
+ dma = skb_frag_dma_map(dev, frag);
+ if (dma_mapping_error(dev, dma))
+ return false;
+
+ bq->bulk[bq->count++] = (typeof(*bq->bulk)){
+ .dma = dma,
+ __libeth_xdp_tx_len(skb_frag_size(frag)),
+ };
+
+ return true;
+}
+
+/**
+ * libeth_xdp_xmit_queue_bulk - internal helper for queueing one XDP xmit frame
+ * @bq: XDP Tx bulk to queue the frame to
+ * @xdpf: XDP frame to queue
+ * @flush_bulk: driver callback to flush the bulk to the HW queue
+ *
+ * Return: ``LIBETH_XDP_TX`` on success,
+ * ``LIBETH_XDP_DROP`` if the frame should be dropped by the stack,
+ * ``LIBETH_XDP_ABORTED`` if the frame will be dropped by libeth_xdp.
+ */
+static __always_inline u32
+libeth_xdp_xmit_queue_bulk(struct libeth_xdp_tx_bulk *bq,
+ struct xdp_frame *xdpf,
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags))
+{
+ u32 head, nr_frags, i, ret = LIBETH_XDP_TX;
+ struct device *dev = bq->dev->dev.parent;
+ const struct skb_shared_info *sinfo;
+
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, LIBETH_XDP_TX_NDO)))
+ return LIBETH_XDP_DROP;
+
+ head = libeth_xdp_xmit_queue_head(bq, xdpf, dev);
+ if (head == LIBETH_XDP_PASS)
+ goto out;
+ else if (head == LIBETH_XDP_DROP)
+ return LIBETH_XDP_DROP;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ nr_frags = sinfo->nr_frags;
+
+ for (i = 0; i < nr_frags; i++) {
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, LIBETH_XDP_TX_NDO)))
+ break;
+
+ if (!libeth_xdp_xmit_queue_frag(bq, &sinfo->frags[i], dev))
+ break;
+ }
+
+ if (unlikely(i < nr_frags))
+ ret = LIBETH_XDP_ABORTED;
+
+out:
+ bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_LAST;
+
+ return ret;
+}
+
+/**
+ * libeth_xdp_xmit_fill_buf - internal helper to fill one XDP xmit &libeth_sqe
+ * @frm: XDP Tx frame from the bulk
+ * @i: index on the HW queue
+ * @sq: XDPSQ abstraction for the queue
+ * @priv: private data
+ *
+ * Return: XDP Tx descriptor with the mapped DMA and other info to pass to
+ * the driver callback.
+ */
+static inline struct libeth_xdp_tx_desc
+libeth_xdp_xmit_fill_buf(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv)
+{
+ struct libeth_xdp_tx_desc desc;
+ struct libeth_sqe *sqe;
+ struct xdp_frame *xdpf;
+
+ if (frm.flags & LIBETH_XDP_TX_FIRST) {
+ xdpf = frm.xdpf;
+ desc.addr = *libeth_xdp_xmit_frame_dma(xdpf);
+ } else {
+ xdpf = NULL;
+ desc.addr = frm.dma;
+ }
+ desc.opts = frm.opts;
+
+ sqe = &sq->sqes[i];
+ dma_unmap_addr_set(sqe, dma, desc.addr);
+ dma_unmap_len_set(sqe, len, desc.len);
+
+ if (!xdpf) {
+ sqe->type = LIBETH_SQE_XDP_XMIT_FRAG;
+ return desc;
+ }
+
+ sqe->type = LIBETH_SQE_XDP_XMIT;
+ sqe->xdpf = xdpf;
+ libeth_xdp_tx_fill_stats(sqe, &desc,
+ xdp_get_shared_info_from_frame(xdpf));
+
+ return desc;
+}
+
+/**
+ * libeth_xdp_xmit_flush_bulk - wrapper to define flush of one XDP xmit bulk
+ * @bq: bulk to flush
+ * @flags: Tx flags, see __libeth_xdp_tx_flush_bulk()
+ * @prep: driver callback to prepare the queue
+ * @xmit: driver callback to fill a HW descriptor
+ *
+ * Use via LIBETH_XDP_DEFINE_FLUSH_XMIT() to define an XDP xmit driver
+ * callback.
+ */
+#define libeth_xdp_xmit_flush_bulk(bq, flags, prep, xmit) \
+ __libeth_xdp_tx_flush_bulk(bq, (flags) | LIBETH_XDP_TX_NDO, prep, \
+ libeth_xdp_xmit_fill_buf, xmit)
+
+u32 libeth_xdp_xmit_return_bulk(const struct libeth_xdp_tx_frame *bq,
+ u32 count, const struct net_device *dev);
+
+/**
+ * __libeth_xdp_xmit_do_bulk - internal function to implement .ndo_xdp_xmit()
+ * @bq: XDP Tx bulk to queue frames to
+ * @frames: XDP frames passed by the stack
+ * @n: number of frames
+ * @flags: flags passed by the stack
+ * @flush_bulk: driver callback to flush an XDP xmit bulk
+ * @finalize: driver callback to finalize sending XDP Tx frames on the queue
+ *
+ * Perform common checks, map the frags and queue them to the bulk, then flush
+ * the bulk to the XDPSQ. If requested by the stack, finalize the queue.
+ *
+ * Return: number of frames send or -errno on error.
+ */
+static __always_inline int
+__libeth_xdp_xmit_do_bulk(struct libeth_xdp_tx_bulk *bq,
+ struct xdp_frame **frames, u32 n, u32 flags,
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags),
+ void (*finalize)(void *xdpsq, bool sent, bool flush))
+{
+ u32 nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (u32 i = 0; likely(i < n); i++) {
+ u32 ret;
+
+ ret = libeth_xdp_xmit_queue_bulk(bq, frames[i], flush_bulk);
+ if (unlikely(ret != LIBETH_XDP_TX)) {
+ nxmit += ret == LIBETH_XDP_ABORTED;
+ break;
+ }
+
+ nxmit++;
+ }
+
+ if (bq->count) {
+ flush_bulk(bq, LIBETH_XDP_TX_NDO);
+ if (unlikely(bq->count))
+ nxmit -= libeth_xdp_xmit_return_bulk(bq->bulk,
+ bq->count,
+ bq->dev);
+ }
+
+ finalize(bq->xdpsq, nxmit, flags & XDP_XMIT_FLUSH);
+
+ return nxmit;
+}
+
+/**
+ * libeth_xdp_xmit_do_bulk - implement full .ndo_xdp_xmit() in driver
+ * @dev: target &net_device
+ * @n: number of frames to send
+ * @fr: XDP frames to send
+ * @f: flags passed by the stack
+ * @xqs: array of XDPSQs driver structs
+ * @nqs: number of active XDPSQs, the above array length
+ * @fl: driver callback to flush an XDP xmit bulk
+ * @fin: driver cabback to finalize the queue
+ *
+ * If the driver has active XDPSQs, perform common checks and send the frames.
+ * Finalize the queue, if requested.
+ *
+ * Return: number of frames sent or -errno on error.
+ */
+#define libeth_xdp_xmit_do_bulk(dev, n, fr, f, xqs, nqs, fl, fin) \
+ _libeth_xdp_xmit_do_bulk(dev, n, fr, f, xqs, nqs, fl, fin, \
+ __UNIQUE_ID(bq_), __UNIQUE_ID(ret_), \
+ __UNIQUE_ID(nqs_))
+
+#define _libeth_xdp_xmit_do_bulk(d, n, fr, f, xqs, nqs, fl, fin, ub, ur, un) \
+({ \
+ u32 un = (nqs); \
+ int ur; \
+ \
+ if (likely(un)) { \
+ LIBETH_XDP_ONSTACK_BULK(ub); \
+ \
+ libeth_xdp_xmit_init_bulk(&ub, d, xqs, un); \
+ ur = __libeth_xdp_xmit_do_bulk(&ub, fr, n, f, fl, fin); \
+ } else { \
+ ur = -ENXIO; \
+ } \
+ \
+ ur; \
+})
+
+/* Rx polling path */
+
+/**
+ * libeth_xdp_tx_init_bulk - initialize an XDP Tx bulk for Rx NAPI poll
+ * @bq: bulk to initialize
+ * @prog: RCU pointer to the XDP program (can be %NULL)
+ * @dev: target &net_device
+ * @xdpsqs: array of driver XDPSQ structs
+ * @num: number of active XDPSQs, the above array length
+ *
+ * Should be called on an onstack XDP Tx bulk before the NAPI polling loop.
+ * Initializes all the needed fields to run libeth_xdp functions. If @num == 0,
+ * assumes XDP is not enabled.
+ * Do not use for XSk, it has its own optimized helper.
+ */
+#define libeth_xdp_tx_init_bulk(bq, prog, dev, xdpsqs, num) \
+ __libeth_xdp_tx_init_bulk(bq, prog, dev, xdpsqs, num, false, \
+ __UNIQUE_ID(bq_), __UNIQUE_ID(nqs_))
+
+#define __libeth_xdp_tx_init_bulk(bq, pr, d, xdpsqs, num, xsk, ub, un) do { \
+ typeof(bq) ub = (bq); \
+ u32 un = (num); \
+ \
+ rcu_read_lock(); \
+ \
+ if (un || (xsk)) { \
+ ub->prog = rcu_dereference(pr); \
+ ub->dev = (d); \
+ ub->xdpsq = (xdpsqs)[libeth_xdpsq_id(un)]; \
+ } else { \
+ ub->prog = NULL; \
+ } \
+ \
+ ub->act_mask = 0; \
+ ub->count = 0; \
+} while (0)
+
+void libeth_xdp_load_stash(struct libeth_xdp_buff *dst,
+ const struct libeth_xdp_buff_stash *src);
+void libeth_xdp_save_stash(struct libeth_xdp_buff_stash *dst,
+ const struct libeth_xdp_buff *src);
+void __libeth_xdp_return_stash(struct libeth_xdp_buff_stash *stash);
+
+/**
+ * libeth_xdp_init_buff - initialize a &libeth_xdp_buff for Rx NAPI poll
+ * @dst: onstack buffer to initialize
+ * @src: XDP buffer stash placed on the queue
+ * @rxq: registered &xdp_rxq_info corresponding to this queue
+ *
+ * Should be called before the main NAPI polling loop. Loads the content of
+ * the previously saved stash or initializes the buffer from scratch.
+ * Do not use for XSk.
+ */
+static inline void
+libeth_xdp_init_buff(struct libeth_xdp_buff *dst,
+ const struct libeth_xdp_buff_stash *src,
+ struct xdp_rxq_info *rxq)
+{
+ if (likely(!src->data))
+ dst->data = NULL;
+ else
+ libeth_xdp_load_stash(dst, src);
+
+ dst->base.rxq = rxq;
+}
+
+/**
+ * libeth_xdp_save_buff - save a partially built buffer on a queue
+ * @dst: XDP buffer stash placed on the queue
+ * @src: onstack buffer to save
+ *
+ * Should be called after the main NAPI polling loop. If the loop exited before
+ * the buffer was finished, saves its content on the queue, so that it can be
+ * completed during the next poll. Otherwise, clears the stash.
+ */
+static inline void libeth_xdp_save_buff(struct libeth_xdp_buff_stash *dst,
+ const struct libeth_xdp_buff *src)
+{
+ if (likely(!src->data))
+ dst->data = NULL;
+ else
+ libeth_xdp_save_stash(dst, src);
+}
+
+/**
+ * libeth_xdp_return_stash - free an XDP buffer stash from a queue
+ * @stash: stash to free
+ *
+ * If the queue is about to be destroyed, but it still has an incompleted
+ * buffer stash, this helper should be called to free it.
+ */
+static inline void libeth_xdp_return_stash(struct libeth_xdp_buff_stash *stash)
+{
+ if (stash->data)
+ __libeth_xdp_return_stash(stash);
+}
+
+static inline void libeth_xdp_return_va(const void *data, bool napi)
+{
+ netmem_ref netmem = virt_to_netmem(data);
+
+ page_pool_put_full_netmem(__netmem_get_pp(netmem), netmem, napi);
+}
+
+static inline void libeth_xdp_return_frags(const struct skb_shared_info *sinfo,
+ bool napi)
+{
+ for (u32 i = 0; i < sinfo->nr_frags; i++) {
+ netmem_ref netmem = skb_frag_netmem(&sinfo->frags[i]);
+
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, napi);
+ }
+}
+
+/**
+ * libeth_xdp_return_buff - free/recycle &libeth_xdp_buff
+ * @xdp: buffer to free
+ *
+ * Hotpath helper to free &libeth_xdp_buff. Comparing to xdp_return_buff(),
+ * it's faster as it gets inlined and always assumes order-0 pages and safe
+ * direct recycling. Zeroes @xdp->data to avoid UAFs.
+ */
+#define libeth_xdp_return_buff(xdp) __libeth_xdp_return_buff(xdp, true)
+
+static inline void __libeth_xdp_return_buff(struct libeth_xdp_buff *xdp,
+ bool napi)
+{
+ if (!xdp_buff_has_frags(&xdp->base))
+ goto out;
+
+ libeth_xdp_return_frags(xdp_get_shared_info_from_buff(&xdp->base),
+ napi);
+
+out:
+ libeth_xdp_return_va(xdp->data, napi);
+ xdp->data = NULL;
+}
+
+bool libeth_xdp_buff_add_frag(struct libeth_xdp_buff *xdp,
+ const struct libeth_fqe *fqe,
+ u32 len);
+
+/**
+ * libeth_xdp_prepare_buff - fill &libeth_xdp_buff with head FQE data
+ * @xdp: XDP buffer to attach the head to
+ * @fqe: FQE containing the head buffer
+ * @len: buffer len passed from HW
+ *
+ * Internal, use libeth_xdp_process_buff() instead. Initializes XDP buffer
+ * head with the Rx buffer data: data pointer, length, headroom, and
+ * truesize/tailroom. Zeroes the flags.
+ */
+static inline void libeth_xdp_prepare_buff(struct libeth_xdp_buff *xdp,
+ const struct libeth_fqe *fqe,
+ u32 len)
+{
+ const struct page *page = __netmem_to_page(fqe->netmem);
+
+ xdp_prepare_buff(&xdp->base, page_address(page) + fqe->offset,
+ pp_page_to_nmdesc(page)->pp->p.offset, len, true);
+ xdp_init_buff(&xdp->base, fqe->truesize, xdp->base.rxq);
+}
+
+/**
+ * libeth_xdp_process_buff - attach Rx buffer to &libeth_xdp_buff
+ * @xdp: XDP buffer to attach the Rx buffer to
+ * @fqe: Rx buffer to process
+ * @len: received data length from the descriptor
+ *
+ * If the XDP buffer is empty, attaches the Rx buffer as head and initializes
+ * the required fields. Otherwise, attaches the buffer as a frag.
+ * Already performs DMA sync-for-CPU and frame start prefetch
+ * (for head buffers only).
+ *
+ * Return: true on success, false if the descriptor must be skipped (empty or
+ * no space for a new frag).
+ */
+static inline bool libeth_xdp_process_buff(struct libeth_xdp_buff *xdp,
+ const struct libeth_fqe *fqe,
+ u32 len)
+{
+ if (!libeth_rx_sync_for_cpu(fqe, len))
+ return false;
+
+ if (xdp->data)
+ return libeth_xdp_buff_add_frag(xdp, fqe, len);
+
+ libeth_xdp_prepare_buff(xdp, fqe, len);
+
+ prefetch(xdp->data);
+
+ return true;
+}
+
+/**
+ * libeth_xdp_buff_stats_frags - update onstack RQ stats with XDP frags info
+ * @ss: onstack stats to update
+ * @xdp: buffer to account
+ *
+ * Internal helper used by __libeth_xdp_run_pass(), do not call directly.
+ * Adds buffer's frags count and total len to the onstack stats.
+ */
+static inline void
+libeth_xdp_buff_stats_frags(struct libeth_rq_napi_stats *ss,
+ const struct libeth_xdp_buff *xdp)
+{
+ const struct skb_shared_info *sinfo;
+
+ sinfo = xdp_get_shared_info_from_buff(&xdp->base);
+ ss->bytes += sinfo->xdp_frags_size;
+ ss->fragments += sinfo->nr_frags + 1;
+}
+
+u32 libeth_xdp_prog_exception(const struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp,
+ enum xdp_action act, int ret);
+
+/**
+ * __libeth_xdp_run_prog - run XDP program on an XDP buffer
+ * @xdp: XDP buffer to run the prog on
+ * @bq: buffer bulk for ``XDP_TX`` queueing
+ *
+ * Internal inline abstraction to run XDP program. Handles ``XDP_DROP``
+ * and ``XDP_REDIRECT`` only, the rest is processed levels up.
+ * Reports an XDP prog exception on errors.
+ *
+ * Return: libeth_xdp prog verdict depending on the prog's verdict.
+ */
+static __always_inline u32
+__libeth_xdp_run_prog(struct libeth_xdp_buff *xdp,
+ const struct libeth_xdp_tx_bulk *bq)
+{
+ enum xdp_action act;
+
+ act = bpf_prog_run_xdp(bq->prog, &xdp->base);
+ if (unlikely(act < XDP_DROP || act > XDP_REDIRECT))
+ goto out;
+
+ switch (act) {
+ case XDP_PASS:
+ return LIBETH_XDP_PASS;
+ case XDP_DROP:
+ libeth_xdp_return_buff(xdp);
+
+ return LIBETH_XDP_DROP;
+ case XDP_TX:
+ return LIBETH_XDP_TX;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(bq->dev, &xdp->base, bq->prog)))
+ break;
+
+ xdp->data = NULL;
+
+ return LIBETH_XDP_REDIRECT;
+ default:
+ break;
+ }
+
+out:
+ return libeth_xdp_prog_exception(bq, xdp, act, 0);
+}
+
+/**
+ * __libeth_xdp_run_flush - run XDP program and handle ``XDP_TX`` verdict
+ * @xdp: XDP buffer to run the prog on
+ * @bq: buffer bulk for ``XDP_TX`` queueing
+ * @run: internal callback for running XDP program
+ * @queue: internal callback for queuing ``XDP_TX`` frame
+ * @flush_bulk: driver callback for flushing a bulk
+ *
+ * Internal inline abstraction to run XDP program and additionally handle
+ * ``XDP_TX`` verdict. Used by both XDP and XSk, hence @run and @queue.
+ * Do not use directly.
+ *
+ * Return: libeth_xdp prog verdict depending on the prog's verdict.
+ */
+static __always_inline u32
+__libeth_xdp_run_flush(struct libeth_xdp_buff *xdp,
+ struct libeth_xdp_tx_bulk *bq,
+ u32 (*run)(struct libeth_xdp_buff *xdp,
+ const struct libeth_xdp_tx_bulk *bq),
+ bool (*queue)(struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp,
+ bool (*flush_bulk)
+ (struct libeth_xdp_tx_bulk *bq,
+ u32 flags)),
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags))
+{
+ u32 act;
+
+ act = run(xdp, bq);
+ if (act == LIBETH_XDP_TX && unlikely(!queue(bq, xdp, flush_bulk)))
+ act = LIBETH_XDP_DROP;
+
+ bq->act_mask |= act;
+
+ return act;
+}
+
+/**
+ * libeth_xdp_run_prog - run XDP program (non-XSk path) and handle all verdicts
+ * @xdp: XDP buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` buffers
+ * @fl: driver ``XDP_TX`` bulk flush callback
+ *
+ * Run the attached XDP program and handle all possible verdicts. XSk has its
+ * own version.
+ * Prefer using it via LIBETH_XDP_DEFINE_RUN{,_PASS,_PROG}().
+ *
+ * Return: true if the buffer should be passed up the stack, false if the poll
+ * should go to the next buffer.
+ */
+#define libeth_xdp_run_prog(xdp, bq, fl) \
+ (__libeth_xdp_run_flush(xdp, bq, __libeth_xdp_run_prog, \
+ libeth_xdp_tx_queue_bulk, \
+ fl) == LIBETH_XDP_PASS)
+
+/**
+ * __libeth_xdp_run_pass - helper to run XDP program and handle the result
+ * @xdp: XDP buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` frames
+ * @napi: NAPI to build an skb and pass it up the stack
+ * @rs: onstack libeth RQ stats
+ * @md: metadata that should be filled to the XDP buffer
+ * @prep: callback for filling the metadata
+ * @run: driver wrapper to run XDP program
+ * @populate: driver callback to populate an skb with the HW descriptor data
+ *
+ * Inline abstraction that does the following (non-XSk path):
+ * 1) adds frame size and frag number (if needed) to the onstack stats;
+ * 2) fills the descriptor metadata to the onstack &libeth_xdp_buff
+ * 3) runs XDP program if present;
+ * 4) handles all possible verdicts;
+ * 5) on ``XDP_PASS`, builds an skb from the buffer;
+ * 6) populates it with the descriptor metadata;
+ * 7) passes it up the stack.
+ *
+ * In most cases, number 2 means just writing the pointer to the HW descriptor
+ * to the XDP buffer. If so, please use LIBETH_XDP_DEFINE_RUN{,_PASS}()
+ * wrappers to build a driver function.
+ */
+static __always_inline void
+__libeth_xdp_run_pass(struct libeth_xdp_buff *xdp,
+ struct libeth_xdp_tx_bulk *bq, struct napi_struct *napi,
+ struct libeth_rq_napi_stats *rs, const void *md,
+ void (*prep)(struct libeth_xdp_buff *xdp,
+ const void *md),
+ bool (*run)(struct libeth_xdp_buff *xdp,
+ struct libeth_xdp_tx_bulk *bq),
+ bool (*populate)(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs))
+{
+ struct sk_buff *skb;
+
+ rs->bytes += xdp->base.data_end - xdp->data;
+ rs->packets++;
+
+ if (xdp_buff_has_frags(&xdp->base))
+ libeth_xdp_buff_stats_frags(rs, xdp);
+
+ if (prep && (!__builtin_constant_p(!!md) || md))
+ prep(xdp, md);
+
+ if (!bq || !run || !bq->prog)
+ goto build;
+
+ if (!run(xdp, bq))
+ return;
+
+build:
+ skb = xdp_build_skb_from_buff(&xdp->base);
+ if (unlikely(!skb)) {
+ libeth_xdp_return_buff_slow(xdp);
+ return;
+ }
+
+ xdp->data = NULL;
+
+ if (unlikely(!populate(skb, xdp, rs))) {
+ napi_consume_skb(skb, true);
+ return;
+ }
+
+ napi_gro_receive(napi, skb);
+}
+
+static inline void libeth_xdp_prep_desc(struct libeth_xdp_buff *xdp,
+ const void *desc)
+{
+ xdp->desc = desc;
+}
+
+/**
+ * libeth_xdp_run_pass - helper to run XDP program and handle the result
+ * @xdp: XDP buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` frames
+ * @napi: NAPI to build an skb and pass it up the stack
+ * @ss: onstack libeth RQ stats
+ * @desc: pointer to the HW descriptor for that frame
+ * @run: driver wrapper to run XDP program
+ * @populate: driver callback to populate an skb with the HW descriptor data
+ *
+ * Wrapper around the underscored version when "fill the descriptor metadata"
+ * means just writing the pointer to the HW descriptor as @xdp->desc.
+ */
+#define libeth_xdp_run_pass(xdp, bq, napi, ss, desc, run, populate) \
+ __libeth_xdp_run_pass(xdp, bq, napi, ss, desc, libeth_xdp_prep_desc, \
+ run, populate)
+
+/**
+ * libeth_xdp_finalize_rx - finalize XDPSQ after a NAPI polling loop (non-XSk)
+ * @bq: ``XDP_TX`` frame bulk
+ * @flush: driver callback to flush the bulk
+ * @finalize: driver callback to start sending the frames and run the timer
+ *
+ * Flush the bulk if there are frames left to send, kick the queue and flush
+ * the XDP maps.
+ */
+#define libeth_xdp_finalize_rx(bq, flush, finalize) \
+ __libeth_xdp_finalize_rx(bq, 0, flush, finalize)
+
+static __always_inline void
+__libeth_xdp_finalize_rx(struct libeth_xdp_tx_bulk *bq, u32 flags,
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags),
+ void (*finalize)(void *xdpsq, bool sent, bool flush))
+{
+ if (bq->act_mask & LIBETH_XDP_TX) {
+ if (bq->count)
+ flush_bulk(bq, flags | LIBETH_XDP_TX_DROP);
+ finalize(bq->xdpsq, true, true);
+ }
+ if (bq->act_mask & LIBETH_XDP_REDIRECT)
+ xdp_do_flush();
+
+ rcu_read_unlock();
+}
+
+/*
+ * Helpers to reduce boilerplate code in drivers.
+ *
+ * Typical driver Rx flow would be (excl. bulk and buff init, frag attach):
+ *
+ * LIBETH_XDP_DEFINE_START();
+ * LIBETH_XDP_DEFINE_FLUSH_TX(static driver_xdp_flush_tx, driver_xdp_tx_prep,
+ * driver_xdp_xmit);
+ * LIBETH_XDP_DEFINE_RUN(static driver_xdp_run, driver_xdp_run_prog,
+ * driver_xdp_flush_tx, driver_populate_skb);
+ * LIBETH_XDP_DEFINE_FINALIZE(static driver_xdp_finalize_rx,
+ * driver_xdp_flush_tx, driver_xdp_finalize_sq);
+ * LIBETH_XDP_DEFINE_END();
+ *
+ * This will build a set of 4 static functions. The compiler is free to decide
+ * whether to inline them.
+ * Then, in the NAPI polling function:
+ *
+ * while (packets < budget) {
+ * // ...
+ * driver_xdp_run(xdp, &bq, napi, &rs, desc);
+ * }
+ * driver_xdp_finalize_rx(&bq);
+ */
+
+#define LIBETH_XDP_DEFINE_START() \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wold-style-declaration", \
+ "Allow specifying \'static\' after the return type")
+
+/**
+ * LIBETH_XDP_DEFINE_TIMER - define a driver XDPSQ cleanup timer callback
+ * @name: name of the function to define
+ * @poll: Tx polling/completion function
+ */
+#define LIBETH_XDP_DEFINE_TIMER(name, poll) \
+void name(struct work_struct *work) \
+{ \
+ libeth_xdpsq_run_timer(work, poll); \
+}
+
+/**
+ * LIBETH_XDP_DEFINE_FLUSH_TX - define a driver ``XDP_TX`` bulk flush function
+ * @name: name of the function to define
+ * @prep: driver callback to clean an XDPSQ
+ * @xmit: driver callback to write a HW Tx descriptor
+ */
+#define LIBETH_XDP_DEFINE_FLUSH_TX(name, prep, xmit) \
+ __LIBETH_XDP_DEFINE_FLUSH_TX(name, prep, xmit, xdp)
+
+#define __LIBETH_XDP_DEFINE_FLUSH_TX(name, prep, xmit, pfx) \
+bool name(struct libeth_xdp_tx_bulk *bq, u32 flags) \
+{ \
+ return libeth_##pfx##_tx_flush_bulk(bq, flags, prep, xmit); \
+}
+
+/**
+ * LIBETH_XDP_DEFINE_FLUSH_XMIT - define a driver XDP xmit bulk flush function
+ * @name: name of the function to define
+ * @prep: driver callback to clean an XDPSQ
+ * @xmit: driver callback to write a HW Tx descriptor
+ */
+#define LIBETH_XDP_DEFINE_FLUSH_XMIT(name, prep, xmit) \
+bool name(struct libeth_xdp_tx_bulk *bq, u32 flags) \
+{ \
+ return libeth_xdp_xmit_flush_bulk(bq, flags, prep, xmit); \
+}
+
+/**
+ * LIBETH_XDP_DEFINE_RUN_PROG - define a driver XDP program run function
+ * @name: name of the function to define
+ * @flush: driver callback to flush an ``XDP_TX`` bulk
+ */
+#define LIBETH_XDP_DEFINE_RUN_PROG(name, flush) \
+ bool __LIBETH_XDP_DEFINE_RUN_PROG(name, flush, xdp)
+
+#define __LIBETH_XDP_DEFINE_RUN_PROG(name, flush, pfx) \
+name(struct libeth_xdp_buff *xdp, struct libeth_xdp_tx_bulk *bq) \
+{ \
+ return libeth_##pfx##_run_prog(xdp, bq, flush); \
+}
+
+/**
+ * LIBETH_XDP_DEFINE_RUN_PASS - define a driver buffer process + pass function
+ * @name: name of the function to define
+ * @run: driver callback to run XDP program (above)
+ * @populate: driver callback to fill an skb with HW descriptor info
+ */
+#define LIBETH_XDP_DEFINE_RUN_PASS(name, run, populate) \
+ void __LIBETH_XDP_DEFINE_RUN_PASS(name, run, populate, xdp)
+
+#define __LIBETH_XDP_DEFINE_RUN_PASS(name, run, populate, pfx) \
+name(struct libeth_xdp_buff *xdp, struct libeth_xdp_tx_bulk *bq, \
+ struct napi_struct *napi, struct libeth_rq_napi_stats *ss, \
+ const void *desc) \
+{ \
+ return libeth_##pfx##_run_pass(xdp, bq, napi, ss, desc, run, \
+ populate); \
+}
+
+/**
+ * LIBETH_XDP_DEFINE_RUN - define a driver buffer process, run + pass function
+ * @name: name of the function to define
+ * @run: name of the XDP prog run function to define
+ * @flush: driver callback to flush an ``XDP_TX`` bulk
+ * @populate: driver callback to fill an skb with HW descriptor info
+ */
+#define LIBETH_XDP_DEFINE_RUN(name, run, flush, populate) \
+ __LIBETH_XDP_DEFINE_RUN(name, run, flush, populate, XDP)
+
+#define __LIBETH_XDP_DEFINE_RUN(name, run, flush, populate, pfx) \
+ LIBETH_##pfx##_DEFINE_RUN_PROG(static run, flush); \
+ LIBETH_##pfx##_DEFINE_RUN_PASS(name, run, populate)
+
+/**
+ * LIBETH_XDP_DEFINE_FINALIZE - define a driver Rx NAPI poll finalize function
+ * @name: name of the function to define
+ * @flush: driver callback to flush an ``XDP_TX`` bulk
+ * @finalize: driver callback to finalize an XDPSQ and run the timer
+ */
+#define LIBETH_XDP_DEFINE_FINALIZE(name, flush, finalize) \
+ __LIBETH_XDP_DEFINE_FINALIZE(name, flush, finalize, xdp)
+
+#define __LIBETH_XDP_DEFINE_FINALIZE(name, flush, finalize, pfx) \
+void name(struct libeth_xdp_tx_bulk *bq) \
+{ \
+ libeth_##pfx##_finalize_rx(bq, flush, finalize); \
+}
+
+#define LIBETH_XDP_DEFINE_END() __diag_pop()
+
+/* XMO */
+
+/**
+ * libeth_xdp_buff_to_rq - get RQ pointer from an XDP buffer pointer
+ * @xdp: &libeth_xdp_buff corresponding to the queue
+ * @type: typeof() of the driver Rx queue structure
+ * @member: name of &xdp_rxq_info inside @type
+ *
+ * Often times, pointer to the RQ is needed when reading/filling metadata from
+ * HW descriptors. The helper can be used to quickly jump from an XDP buffer
+ * to the queue corresponding to its &xdp_rxq_info without introducing
+ * additional fields (&libeth_xdp_buff is precisely 1 cacheline long on x64).
+ */
+#define libeth_xdp_buff_to_rq(xdp, type, member) \
+ container_of_const((xdp)->base.rxq, type, member)
+
+/**
+ * libeth_xdpmo_rx_hash - convert &libeth_rx_pt to an XDP RSS hash metadata
+ * @hash: pointer to the variable to write the hash to
+ * @rss_type: pointer to the variable to write the hash type to
+ * @val: hash value from the HW descriptor
+ * @pt: libeth parsed packet type
+ *
+ * Handle zeroed/non-available hash and convert libeth parsed packet type to
+ * the corresponding XDP RSS hash type. To be called at the end of
+ * xdp_metadata_ops idpf_xdpmo::xmo_rx_hash() implementation.
+ * Note that if the driver doesn't use a constant packet type lookup table but
+ * generates it at runtime, it must call libeth_rx_pt_gen_hash_type(pt) to
+ * generate XDP RSS hash type for each packet type.
+ *
+ * Return: 0 on success, -ENODATA when the hash is not available.
+ */
+static inline int libeth_xdpmo_rx_hash(u32 *hash,
+ enum xdp_rss_hash_type *rss_type,
+ u32 val, struct libeth_rx_pt pt)
+{
+ if (unlikely(!val))
+ return -ENODATA;
+
+ *hash = val;
+ *rss_type = pt.hash_type;
+
+ return 0;
+}
+
+/* Tx buffer completion */
+
+void libeth_xdp_return_buff_bulk(const struct skb_shared_info *sinfo,
+ struct xdp_frame_bulk *bq, bool frags);
+void libeth_xsk_buff_free_slow(struct libeth_xdp_buff *xdp);
+
+/**
+ * __libeth_xdp_complete_tx - complete sent XDPSQE
+ * @sqe: SQ element / Tx buffer to complete
+ * @cp: Tx polling/completion params
+ * @bulk: internal callback to bulk-free ``XDP_TX`` buffers
+ * @xsk: internal callback to free XSk ``XDP_TX`` buffers
+ *
+ * Use the non-underscored version in drivers instead. This one is shared
+ * internally with libeth_tx_complete_any().
+ * Complete an XDPSQE of any type of XDP frame. This includes DMA unmapping
+ * when needed, buffer freeing, stats update, and SQE invalidation.
+ */
+static __always_inline void
+__libeth_xdp_complete_tx(struct libeth_sqe *sqe, struct libeth_cq_pp *cp,
+ typeof(libeth_xdp_return_buff_bulk) bulk,
+ typeof(libeth_xsk_buff_free_slow) xsk)
+{
+ enum libeth_sqe_type type = sqe->type;
+
+ switch (type) {
+ case LIBETH_SQE_EMPTY:
+ return;
+ case LIBETH_SQE_XDP_XMIT:
+ case LIBETH_SQE_XDP_XMIT_FRAG:
+ dma_unmap_page(cp->dev, dma_unmap_addr(sqe, dma),
+ dma_unmap_len(sqe, len), DMA_TO_DEVICE);
+ break;
+ default:
+ break;
+ }
+
+ switch (type) {
+ case LIBETH_SQE_XDP_TX:
+ bulk(sqe->sinfo, cp->bq, sqe->nr_frags != 1);
+ break;
+ case LIBETH_SQE_XDP_XMIT:
+ xdp_return_frame_bulk(sqe->xdpf, cp->bq);
+ break;
+ case LIBETH_SQE_XSK_TX:
+ case LIBETH_SQE_XSK_TX_FRAG:
+ xsk(sqe->xsk);
+ break;
+ default:
+ break;
+ }
+
+ switch (type) {
+ case LIBETH_SQE_XDP_TX:
+ case LIBETH_SQE_XDP_XMIT:
+ case LIBETH_SQE_XSK_TX:
+ cp->xdp_tx -= sqe->nr_frags;
+
+ cp->xss->packets++;
+ cp->xss->bytes += sqe->bytes;
+ break;
+ default:
+ break;
+ }
+
+ sqe->type = LIBETH_SQE_EMPTY;
+}
+
+static inline void libeth_xdp_complete_tx(struct libeth_sqe *sqe,
+ struct libeth_cq_pp *cp)
+{
+ __libeth_xdp_complete_tx(sqe, cp, libeth_xdp_return_buff_bulk,
+ libeth_xsk_buff_free_slow);
+}
+
+/* Misc */
+
+u32 libeth_xdp_queue_threshold(u32 count);
+
+void __libeth_xdp_set_features(struct net_device *dev,
+ const struct xdp_metadata_ops *xmo,
+ u32 zc_segs,
+ const struct xsk_tx_metadata_ops *tmo);
+void libeth_xdp_set_redirect(struct net_device *dev, bool enable);
+
+/**
+ * libeth_xdp_set_features - set XDP features for netdev
+ * @dev: &net_device to configure
+ * @...: optional params, see __libeth_xdp_set_features()
+ *
+ * Set all the features libeth_xdp supports, including .ndo_xdp_xmit(). That
+ * said, it should be used only when XDPSQs are always available regardless
+ * of whether an XDP prog is attached to @dev.
+ */
+#define libeth_xdp_set_features(dev, ...) \
+ CONCATENATE(__libeth_xdp_feat, \
+ COUNT_ARGS(__VA_ARGS__))(dev, ##__VA_ARGS__)
+
+#define __libeth_xdp_feat0(dev) \
+ __libeth_xdp_set_features(dev, NULL, 0, NULL)
+#define __libeth_xdp_feat1(dev, xmo) \
+ __libeth_xdp_set_features(dev, xmo, 0, NULL)
+#define __libeth_xdp_feat2(dev, xmo, zc_segs) \
+ __libeth_xdp_set_features(dev, xmo, zc_segs, NULL)
+#define __libeth_xdp_feat3(dev, xmo, zc_segs, tmo) \
+ __libeth_xdp_set_features(dev, xmo, zc_segs, tmo)
+
+/**
+ * libeth_xdp_set_features_noredir - enable all libeth_xdp features w/o redir
+ * @dev: target &net_device
+ * @...: optional params, see __libeth_xdp_set_features()
+ *
+ * Enable everything except the .ndo_xdp_xmit() feature, use when XDPSQs are
+ * not available right after netdev registration.
+ */
+#define libeth_xdp_set_features_noredir(dev, ...) \
+ __libeth_xdp_set_features_noredir(dev, __UNIQUE_ID(dev_), \
+ ##__VA_ARGS__)
+
+#define __libeth_xdp_set_features_noredir(dev, ud, ...) do { \
+ struct net_device *ud = (dev); \
+ \
+ libeth_xdp_set_features(ud, ##__VA_ARGS__); \
+ libeth_xdp_set_redirect(ud, false); \
+} while (0)
+
+#define libeth_xsktmo ((const void *)GOLDEN_RATIO_PRIME)
+
+#endif /* __LIBETH_XDP_H */
diff --git a/include/net/libeth/xsk.h b/include/net/libeth/xsk.h
new file mode 100644
index 000000000000..481a7b28e6f2
--- /dev/null
+++ b/include/net/libeth/xsk.h
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBETH_XSK_H
+#define __LIBETH_XSK_H
+
+#include <net/libeth/xdp.h>
+#include <net/xdp_sock_drv.h>
+
+/* ``XDP_TXMD_FLAGS_VALID`` is defined only under ``CONFIG_XDP_SOCKETS`` */
+#ifdef XDP_TXMD_FLAGS_VALID
+static_assert(XDP_TXMD_FLAGS_VALID <= LIBETH_XDP_TX_XSKMD);
+#endif
+
+/* ``XDP_TX`` bulking */
+
+/**
+ * libeth_xsk_tx_queue_head - internal helper for queueing XSk ``XDP_TX`` head
+ * @bq: XDP Tx bulk to queue the head frag to
+ * @xdp: XSk buffer with the head to queue
+ *
+ * Return: false if it's the only frag of the frame, true if it's an S/G frame.
+ */
+static inline bool libeth_xsk_tx_queue_head(struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp)
+{
+ bq->bulk[bq->count++] = (typeof(*bq->bulk)){
+ .xsk = xdp,
+ __libeth_xdp_tx_len(xdp->base.data_end - xdp->data,
+ LIBETH_XDP_TX_FIRST),
+ };
+
+ if (likely(!xdp_buff_has_frags(&xdp->base)))
+ return false;
+
+ bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_MULTI;
+
+ return true;
+}
+
+/**
+ * libeth_xsk_tx_queue_frag - internal helper for queueing XSk ``XDP_TX`` frag
+ * @bq: XDP Tx bulk to queue the frag to
+ * @frag: XSk frag to queue
+ */
+static inline void libeth_xsk_tx_queue_frag(struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *frag)
+{
+ bq->bulk[bq->count++] = (typeof(*bq->bulk)){
+ .xsk = frag,
+ __libeth_xdp_tx_len(frag->base.data_end - frag->data),
+ };
+}
+
+/**
+ * libeth_xsk_tx_queue_bulk - internal helper for queueing XSk ``XDP_TX`` frame
+ * @bq: XDP Tx bulk to queue the frame to
+ * @xdp: XSk buffer to queue
+ * @flush_bulk: driver callback to flush the bulk to the HW queue
+ *
+ * Return: true on success, false on flush error.
+ */
+static __always_inline bool
+libeth_xsk_tx_queue_bulk(struct libeth_xdp_tx_bulk *bq,
+ struct libeth_xdp_buff *xdp,
+ bool (*flush_bulk)(struct libeth_xdp_tx_bulk *bq,
+ u32 flags))
+{
+ bool ret = true;
+
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, LIBETH_XDP_TX_XSK))) {
+ libeth_xsk_buff_free_slow(xdp);
+ return false;
+ }
+
+ if (!libeth_xsk_tx_queue_head(bq, xdp))
+ goto out;
+
+ for (const struct libeth_xdp_buff *head = xdp; ; ) {
+ xdp = container_of(xsk_buff_get_frag(&head->base),
+ typeof(*xdp), base);
+ if (!xdp)
+ break;
+
+ if (unlikely(bq->count == LIBETH_XDP_TX_BULK) &&
+ unlikely(!flush_bulk(bq, LIBETH_XDP_TX_XSK))) {
+ ret = false;
+ break;
+ }
+
+ libeth_xsk_tx_queue_frag(bq, xdp);
+ }
+
+out:
+ bq->bulk[bq->count - 1].flags |= LIBETH_XDP_TX_LAST;
+
+ return ret;
+}
+
+/**
+ * libeth_xsk_tx_fill_buf - internal helper to fill XSk ``XDP_TX`` &libeth_sqe
+ * @frm: XDP Tx frame from the bulk
+ * @i: index on the HW queue
+ * @sq: XDPSQ abstraction for the queue
+ * @priv: private data
+ *
+ * Return: XDP Tx descriptor with the synced DMA and other info to pass to
+ * the driver callback.
+ */
+static inline struct libeth_xdp_tx_desc
+libeth_xsk_tx_fill_buf(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv)
+{
+ struct libeth_xdp_buff *xdp = frm.xsk;
+ struct libeth_xdp_tx_desc desc = {
+ .addr = xsk_buff_xdp_get_dma(&xdp->base),
+ .opts = frm.opts,
+ };
+ struct libeth_sqe *sqe;
+
+ xsk_buff_raw_dma_sync_for_device(sq->pool, desc.addr, desc.len);
+
+ sqe = &sq->sqes[i];
+ sqe->xsk = xdp;
+
+ if (!(desc.flags & LIBETH_XDP_TX_FIRST)) {
+ sqe->type = LIBETH_SQE_XSK_TX_FRAG;
+ return desc;
+ }
+
+ sqe->type = LIBETH_SQE_XSK_TX;
+ libeth_xdp_tx_fill_stats(sqe, &desc,
+ xdp_get_shared_info_from_buff(&xdp->base));
+
+ return desc;
+}
+
+/**
+ * libeth_xsk_tx_flush_bulk - wrapper to define flush of XSk ``XDP_TX`` bulk
+ * @bq: bulk to flush
+ * @flags: Tx flags, see __libeth_xdp_tx_flush_bulk()
+ * @prep: driver callback to prepare the queue
+ * @xmit: driver callback to fill a HW descriptor
+ *
+ * Use via LIBETH_XSK_DEFINE_FLUSH_TX() to define an XSk ``XDP_TX`` driver
+ * callback.
+ */
+#define libeth_xsk_tx_flush_bulk(bq, flags, prep, xmit) \
+ __libeth_xdp_tx_flush_bulk(bq, (flags) | LIBETH_XDP_TX_XSK, prep, \
+ libeth_xsk_tx_fill_buf, xmit)
+
+/* XSk TMO */
+
+/**
+ * libeth_xsktmo_req_csum - XSk Tx metadata op to request checksum offload
+ * @csum_start: unused
+ * @csum_offset: unused
+ * @priv: &libeth_xdp_tx_desc from the filling helper
+ *
+ * Generic implementation of ::tmo_request_checksum. Works only when HW doesn't
+ * require filling checksum offsets and other parameters beside the checksum
+ * request bit.
+ * Consider using within @libeth_xsktmo unless the driver requires HW-specific
+ * callbacks.
+ */
+static inline void libeth_xsktmo_req_csum(u16 csum_start, u16 csum_offset,
+ void *priv)
+{
+ ((struct libeth_xdp_tx_desc *)priv)->flags |= LIBETH_XDP_TX_CSUM;
+}
+
+/* Only to inline the callbacks below, use @libeth_xsktmo in drivers instead */
+static const struct xsk_tx_metadata_ops __libeth_xsktmo = {
+ .tmo_request_checksum = libeth_xsktmo_req_csum,
+};
+
+/**
+ * __libeth_xsk_xmit_fill_buf_md - internal helper to prepare XSk xmit w/meta
+ * @xdesc: &xdp_desc from the XSk buffer pool
+ * @sq: XDPSQ abstraction for the queue
+ * @priv: XSk Tx metadata ops
+ *
+ * Same as __libeth_xsk_xmit_fill_buf(), but requests metadata pointer and
+ * fills additional fields in &libeth_xdp_tx_desc to ask for metadata offload.
+ *
+ * Return: XDP Tx descriptor with the DMA, metadata request bits, and other
+ * info to pass to the driver callback.
+ */
+static __always_inline struct libeth_xdp_tx_desc
+__libeth_xsk_xmit_fill_buf_md(const struct xdp_desc *xdesc,
+ const struct libeth_xdpsq *sq,
+ u64 priv)
+{
+ const struct xsk_tx_metadata_ops *tmo = libeth_xdp_priv_to_ptr(priv);
+ struct libeth_xdp_tx_desc desc;
+ struct xdp_desc_ctx ctx;
+
+ ctx = xsk_buff_raw_get_ctx(sq->pool, xdesc->addr);
+ desc = (typeof(desc)){
+ .addr = ctx.dma,
+ __libeth_xdp_tx_len(xdesc->len),
+ };
+
+ BUILD_BUG_ON(!__builtin_constant_p(tmo == libeth_xsktmo));
+ tmo = tmo == libeth_xsktmo ? &__libeth_xsktmo : tmo;
+
+ xsk_tx_metadata_request(ctx.meta, tmo, &desc);
+
+ return desc;
+}
+
+/* XSk xmit implementation */
+
+/**
+ * __libeth_xsk_xmit_fill_buf - internal helper to prepare XSk xmit w/o meta
+ * @xdesc: &xdp_desc from the XSk buffer pool
+ * @sq: XDPSQ abstraction for the queue
+ *
+ * Return: XDP Tx descriptor with the DMA and other info to pass to
+ * the driver callback.
+ */
+static inline struct libeth_xdp_tx_desc
+__libeth_xsk_xmit_fill_buf(const struct xdp_desc *xdesc,
+ const struct libeth_xdpsq *sq)
+{
+ return (struct libeth_xdp_tx_desc){
+ .addr = xsk_buff_raw_get_dma(sq->pool, xdesc->addr),
+ __libeth_xdp_tx_len(xdesc->len),
+ };
+}
+
+/**
+ * libeth_xsk_xmit_fill_buf - internal helper to prepare an XSk xmit
+ * @frm: &xdp_desc from the XSk buffer pool
+ * @i: index on the HW queue
+ * @sq: XDPSQ abstraction for the queue
+ * @priv: XSk Tx metadata ops
+ *
+ * Depending on the metadata ops presence (determined at compile time), calls
+ * the quickest helper to build a libeth XDP Tx descriptor.
+ *
+ * Return: XDP Tx descriptor with the synced DMA, metadata request bits,
+ * and other info to pass to the driver callback.
+ */
+static __always_inline struct libeth_xdp_tx_desc
+libeth_xsk_xmit_fill_buf(struct libeth_xdp_tx_frame frm, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv)
+{
+ struct libeth_xdp_tx_desc desc;
+
+ if (priv)
+ desc = __libeth_xsk_xmit_fill_buf_md(&frm.desc, sq, priv);
+ else
+ desc = __libeth_xsk_xmit_fill_buf(&frm.desc, sq);
+
+ desc.flags |= xsk_is_eop_desc(&frm.desc) ? LIBETH_XDP_TX_LAST : 0;
+
+ xsk_buff_raw_dma_sync_for_device(sq->pool, desc.addr, desc.len);
+
+ return desc;
+}
+
+/**
+ * libeth_xsk_xmit_do_bulk - send XSk xmit frames
+ * @pool: XSk buffer pool containing the frames to send
+ * @xdpsq: opaque pointer to driver's XDPSQ struct
+ * @budget: maximum number of frames can be sent
+ * @tmo: optional XSk Tx metadata ops
+ * @prep: driver callback to build a &libeth_xdpsq
+ * @xmit: driver callback to put frames to a HW queue
+ * @finalize: driver callback to start a transmission
+ *
+ * Implements generic XSk xmit. Always turns on XSk Tx wakeup as it's assumed
+ * lazy cleaning is used and interrupts are disabled for the queue.
+ * HW descriptor filling is unrolled by ``LIBETH_XDP_TX_BATCH`` to optimize
+ * writes.
+ * Note that unlike other XDP Tx ops, the queue must be locked and cleaned
+ * prior to calling this function to already know available @budget.
+ * @prepare must only build a &libeth_xdpsq and return ``U32_MAX``.
+ *
+ * Return: false if @budget was exhausted, true otherwise.
+ */
+static __always_inline bool
+libeth_xsk_xmit_do_bulk(struct xsk_buff_pool *pool, void *xdpsq, u32 budget,
+ const struct xsk_tx_metadata_ops *tmo,
+ u32 (*prep)(void *xdpsq, struct libeth_xdpsq *sq),
+ void (*xmit)(struct libeth_xdp_tx_desc desc, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv),
+ void (*finalize)(void *xdpsq, bool sent, bool flush))
+{
+ const struct libeth_xdp_tx_frame *bulk;
+ bool wake;
+ u32 n;
+
+ wake = xsk_uses_need_wakeup(pool);
+ if (wake)
+ xsk_clear_tx_need_wakeup(pool);
+
+ n = xsk_tx_peek_release_desc_batch(pool, budget);
+ bulk = container_of(&pool->tx_descs[0], typeof(*bulk), desc);
+
+ libeth_xdp_tx_xmit_bulk(bulk, xdpsq, n, true,
+ libeth_xdp_ptr_to_priv(tmo), prep,
+ libeth_xsk_xmit_fill_buf, xmit);
+ finalize(xdpsq, n, true);
+
+ if (wake)
+ xsk_set_tx_need_wakeup(pool);
+
+ return n < budget;
+}
+
+/* Rx polling path */
+
+/**
+ * libeth_xsk_tx_init_bulk - initialize XDP Tx bulk for an XSk Rx NAPI poll
+ * @bq: bulk to initialize
+ * @prog: RCU pointer to the XDP program (never %NULL)
+ * @dev: target &net_device
+ * @xdpsqs: array of driver XDPSQ structs
+ * @num: number of active XDPSQs, the above array length
+ *
+ * Should be called on an onstack XDP Tx bulk before the XSk NAPI polling loop.
+ * Initializes all the needed fields to run libeth_xdp functions.
+ * Never checks if @prog is %NULL or @num == 0 as XDP must always be enabled
+ * when hitting this path.
+ */
+#define libeth_xsk_tx_init_bulk(bq, prog, dev, xdpsqs, num) \
+ __libeth_xdp_tx_init_bulk(bq, prog, dev, xdpsqs, num, true, \
+ __UNIQUE_ID(bq_), __UNIQUE_ID(nqs_))
+
+struct libeth_xdp_buff *libeth_xsk_buff_add_frag(struct libeth_xdp_buff *head,
+ struct libeth_xdp_buff *xdp);
+
+/**
+ * libeth_xsk_process_buff - attach XSk Rx buffer to &libeth_xdp_buff
+ * @head: head XSk buffer to attach the XSk buffer to (or %NULL)
+ * @xdp: XSk buffer to process
+ * @len: received data length from the descriptor
+ *
+ * If @head == %NULL, treats the XSk buffer as head and initializes
+ * the required fields. Otherwise, attaches the buffer as a frag.
+ * Already performs DMA sync-for-CPU and frame start prefetch
+ * (for head buffers only).
+ *
+ * Return: head XSk buffer on success or if the descriptor must be skipped
+ * (empty), %NULL if there is no space for a new frag.
+ */
+static inline struct libeth_xdp_buff *
+libeth_xsk_process_buff(struct libeth_xdp_buff *head,
+ struct libeth_xdp_buff *xdp, u32 len)
+{
+ if (unlikely(!len)) {
+ libeth_xsk_buff_free_slow(xdp);
+ return head;
+ }
+
+ xsk_buff_set_size(&xdp->base, len);
+ xsk_buff_dma_sync_for_cpu(&xdp->base);
+
+ if (head)
+ return libeth_xsk_buff_add_frag(head, xdp);
+
+ prefetch(xdp->data);
+
+ return xdp;
+}
+
+void libeth_xsk_buff_stats_frags(struct libeth_rq_napi_stats *rs,
+ const struct libeth_xdp_buff *xdp);
+
+u32 __libeth_xsk_run_prog_slow(struct libeth_xdp_buff *xdp,
+ const struct libeth_xdp_tx_bulk *bq,
+ enum xdp_action act, int ret);
+
+/**
+ * __libeth_xsk_run_prog - run XDP program on XSk buffer
+ * @xdp: XSk buffer to run the prog on
+ * @bq: buffer bulk for ``XDP_TX`` queueing
+ *
+ * Internal inline abstraction to run XDP program on XSk Rx path. Handles
+ * only the most common ``XDP_REDIRECT`` inline, the rest is processed
+ * externally.
+ * Reports an XDP prog exception on errors.
+ *
+ * Return: libeth_xdp prog verdict depending on the prog's verdict.
+ */
+static __always_inline u32
+__libeth_xsk_run_prog(struct libeth_xdp_buff *xdp,
+ const struct libeth_xdp_tx_bulk *bq)
+{
+ enum xdp_action act;
+ int ret = 0;
+
+ act = bpf_prog_run_xdp(bq->prog, &xdp->base);
+ if (unlikely(act != XDP_REDIRECT))
+rest:
+ return __libeth_xsk_run_prog_slow(xdp, bq, act, ret);
+
+ ret = xdp_do_redirect(bq->dev, &xdp->base, bq->prog);
+ if (unlikely(ret))
+ goto rest;
+
+ return LIBETH_XDP_REDIRECT;
+}
+
+/**
+ * libeth_xsk_run_prog - run XDP program on XSk path and handle all verdicts
+ * @xdp: XSk buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` buffers
+ * @fl: driver ``XDP_TX`` bulk flush callback
+ *
+ * Run the attached XDP program and handle all possible verdicts.
+ * Prefer using it via LIBETH_XSK_DEFINE_RUN{,_PASS,_PROG}().
+ *
+ * Return: libeth_xdp prog verdict depending on the prog's verdict.
+ */
+#define libeth_xsk_run_prog(xdp, bq, fl) \
+ __libeth_xdp_run_flush(xdp, bq, __libeth_xsk_run_prog, \
+ libeth_xsk_tx_queue_bulk, fl)
+
+/**
+ * __libeth_xsk_run_pass - helper to run XDP program and handle the result
+ * @xdp: XSk buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` frames
+ * @napi: NAPI to build an skb and pass it up the stack
+ * @rs: onstack libeth RQ stats
+ * @md: metadata that should be filled to the XSk buffer
+ * @prep: callback for filling the metadata
+ * @run: driver wrapper to run XDP program
+ * @populate: driver callback to populate an skb with the HW descriptor data
+ *
+ * Inline abstraction, XSk's counterpart of __libeth_xdp_run_pass(), see its
+ * doc for details.
+ *
+ * Return: false if the polling loop must be exited due to lack of free
+ * buffers, true otherwise.
+ */
+static __always_inline bool
+__libeth_xsk_run_pass(struct libeth_xdp_buff *xdp,
+ struct libeth_xdp_tx_bulk *bq, struct napi_struct *napi,
+ struct libeth_rq_napi_stats *rs, const void *md,
+ void (*prep)(struct libeth_xdp_buff *xdp,
+ const void *md),
+ u32 (*run)(struct libeth_xdp_buff *xdp,
+ struct libeth_xdp_tx_bulk *bq),
+ bool (*populate)(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs))
+{
+ struct sk_buff *skb;
+ u32 act;
+
+ rs->bytes += xdp->base.data_end - xdp->data;
+ rs->packets++;
+
+ if (unlikely(xdp_buff_has_frags(&xdp->base)))
+ libeth_xsk_buff_stats_frags(rs, xdp);
+
+ if (prep && (!__builtin_constant_p(!!md) || md))
+ prep(xdp, md);
+
+ act = run(xdp, bq);
+ if (likely(act == LIBETH_XDP_REDIRECT))
+ return true;
+
+ if (act != LIBETH_XDP_PASS)
+ return act != LIBETH_XDP_ABORTED;
+
+ skb = xdp_build_skb_from_zc(&xdp->base);
+ if (unlikely(!skb)) {
+ libeth_xsk_buff_free_slow(xdp);
+ return true;
+ }
+
+ if (unlikely(!populate(skb, xdp, rs))) {
+ napi_consume_skb(skb, true);
+ return true;
+ }
+
+ napi_gro_receive(napi, skb);
+
+ return true;
+}
+
+/**
+ * libeth_xsk_run_pass - helper to run XDP program and handle the result
+ * @xdp: XSk buffer to process
+ * @bq: XDP Tx bulk to queue ``XDP_TX`` frames
+ * @napi: NAPI to build an skb and pass it up the stack
+ * @rs: onstack libeth RQ stats
+ * @desc: pointer to the HW descriptor for that frame
+ * @run: driver wrapper to run XDP program
+ * @populate: driver callback to populate an skb with the HW descriptor data
+ *
+ * Wrapper around the underscored version when "fill the descriptor metadata"
+ * means just writing the pointer to the HW descriptor as @xdp->desc.
+ */
+#define libeth_xsk_run_pass(xdp, bq, napi, rs, desc, run, populate) \
+ __libeth_xsk_run_pass(xdp, bq, napi, rs, desc, libeth_xdp_prep_desc, \
+ run, populate)
+
+/**
+ * libeth_xsk_finalize_rx - finalize XDPSQ after an XSk NAPI polling loop
+ * @bq: ``XDP_TX`` frame bulk
+ * @flush: driver callback to flush the bulk
+ * @finalize: driver callback to start sending the frames and run the timer
+ *
+ * Flush the bulk if there are frames left to send, kick the queue and flush
+ * the XDP maps.
+ */
+#define libeth_xsk_finalize_rx(bq, flush, finalize) \
+ __libeth_xdp_finalize_rx(bq, LIBETH_XDP_TX_XSK, flush, finalize)
+
+/*
+ * Helpers to reduce boilerplate code in drivers.
+ *
+ * Typical driver XSk Rx flow would be (excl. bulk and buff init, frag attach):
+ *
+ * LIBETH_XDP_DEFINE_START();
+ * LIBETH_XSK_DEFINE_FLUSH_TX(static driver_xsk_flush_tx, driver_xsk_tx_prep,
+ * driver_xdp_xmit);
+ * LIBETH_XSK_DEFINE_RUN(static driver_xsk_run, driver_xsk_run_prog,
+ * driver_xsk_flush_tx, driver_populate_skb);
+ * LIBETH_XSK_DEFINE_FINALIZE(static driver_xsk_finalize_rx,
+ * driver_xsk_flush_tx, driver_xdp_finalize_sq);
+ * LIBETH_XDP_DEFINE_END();
+ *
+ * This will build a set of 4 static functions. The compiler is free to decide
+ * whether to inline them.
+ * Then, in the NAPI polling function:
+ *
+ * while (packets < budget) {
+ * // ...
+ * if (!driver_xsk_run(xdp, &bq, napi, &rs, desc))
+ * break;
+ * }
+ * driver_xsk_finalize_rx(&bq);
+ */
+
+/**
+ * LIBETH_XSK_DEFINE_FLUSH_TX - define a driver XSk ``XDP_TX`` flush function
+ * @name: name of the function to define
+ * @prep: driver callback to clean an XDPSQ
+ * @xmit: driver callback to write a HW Tx descriptor
+ */
+#define LIBETH_XSK_DEFINE_FLUSH_TX(name, prep, xmit) \
+ __LIBETH_XDP_DEFINE_FLUSH_TX(name, prep, xmit, xsk)
+
+/**
+ * LIBETH_XSK_DEFINE_RUN_PROG - define a driver XDP program run function
+ * @name: name of the function to define
+ * @flush: driver callback to flush an XSk ``XDP_TX`` bulk
+ */
+#define LIBETH_XSK_DEFINE_RUN_PROG(name, flush) \
+ u32 __LIBETH_XDP_DEFINE_RUN_PROG(name, flush, xsk)
+
+/**
+ * LIBETH_XSK_DEFINE_RUN_PASS - define a driver buffer process + pass function
+ * @name: name of the function to define
+ * @run: driver callback to run XDP program (above)
+ * @populate: driver callback to fill an skb with HW descriptor info
+ */
+#define LIBETH_XSK_DEFINE_RUN_PASS(name, run, populate) \
+ bool __LIBETH_XDP_DEFINE_RUN_PASS(name, run, populate, xsk)
+
+/**
+ * LIBETH_XSK_DEFINE_RUN - define a driver buffer process, run + pass function
+ * @name: name of the function to define
+ * @run: name of the XDP prog run function to define
+ * @flush: driver callback to flush an XSk ``XDP_TX`` bulk
+ * @populate: driver callback to fill an skb with HW descriptor info
+ */
+#define LIBETH_XSK_DEFINE_RUN(name, run, flush, populate) \
+ __LIBETH_XDP_DEFINE_RUN(name, run, flush, populate, XSK)
+
+/**
+ * LIBETH_XSK_DEFINE_FINALIZE - define a driver XSk NAPI poll finalize function
+ * @name: name of the function to define
+ * @flush: driver callback to flush an XSk ``XDP_TX`` bulk
+ * @finalize: driver callback to finalize an XDPSQ and run the timer
+ */
+#define LIBETH_XSK_DEFINE_FINALIZE(name, flush, finalize) \
+ __LIBETH_XDP_DEFINE_FINALIZE(name, flush, finalize, xsk)
+
+/* Refilling */
+
+/**
+ * struct libeth_xskfq - structure representing an XSk buffer (fill) queue
+ * @fp: hotpath part of the structure
+ * @pool: &xsk_buff_pool for buffer management
+ * @fqes: array of XSk buffer pointers
+ * @descs: opaque pointer to the HW descriptor array
+ * @ntu: index of the next buffer to poll
+ * @count: number of descriptors/buffers the queue has
+ * @pending: current number of XSkFQEs to refill
+ * @thresh: threshold below which the queue is refilled
+ * @buf_len: HW-writeable length per each buffer
+ * @nid: ID of the closest NUMA node with memory
+ */
+struct libeth_xskfq {
+ struct_group_tagged(libeth_xskfq_fp, fp,
+ struct xsk_buff_pool *pool;
+ struct libeth_xdp_buff **fqes;
+ void *descs;
+
+ u32 ntu;
+ u32 count;
+ );
+
+ /* Cold fields */
+ u32 pending;
+ u32 thresh;
+
+ u32 buf_len;
+ int nid;
+};
+
+int libeth_xskfq_create(struct libeth_xskfq *fq);
+void libeth_xskfq_destroy(struct libeth_xskfq *fq);
+
+/**
+ * libeth_xsk_buff_xdp_get_dma - get DMA address of XSk &libeth_xdp_buff
+ * @xdp: buffer to get the DMA addr for
+ */
+#define libeth_xsk_buff_xdp_get_dma(xdp) \
+ xsk_buff_xdp_get_dma(&(xdp)->base)
+
+/**
+ * libeth_xskfqe_alloc - allocate @n XSk Rx buffers
+ * @fq: hotpath part of the XSkFQ, usually onstack
+ * @n: number of buffers to allocate
+ * @fill: driver callback to write DMA addresses to HW descriptors
+ *
+ * Note that @fq->ntu gets updated, but ::pending must be recalculated
+ * by the caller.
+ *
+ * Return: number of buffers refilled.
+ */
+static __always_inline u32
+libeth_xskfqe_alloc(struct libeth_xskfq_fp *fq, u32 n,
+ void (*fill)(const struct libeth_xskfq_fp *fq, u32 i))
+{
+ u32 this, ret, done = 0;
+ struct xdp_buff **xskb;
+
+ this = fq->count - fq->ntu;
+ if (likely(this > n))
+ this = n;
+
+again:
+ xskb = (typeof(xskb))&fq->fqes[fq->ntu];
+ ret = xsk_buff_alloc_batch(fq->pool, xskb, this);
+
+ for (u32 i = 0, ntu = fq->ntu; likely(i < ret); i++)
+ fill(fq, ntu + i);
+
+ done += ret;
+ fq->ntu += ret;
+
+ if (likely(fq->ntu < fq->count) || unlikely(ret < this))
+ goto out;
+
+ fq->ntu = 0;
+
+ if (this < n) {
+ this = n - this;
+ goto again;
+ }
+
+out:
+ return done;
+}
+
+/* .ndo_xsk_wakeup */
+
+void libeth_xsk_init_wakeup(call_single_data_t *csd, struct napi_struct *napi);
+void libeth_xsk_wakeup(call_single_data_t *csd, u32 qid);
+
+/* Pool setup */
+
+int libeth_xsk_setup_pool(struct net_device *dev, u32 qid, bool enable);
+
+#endif /* __LIBETH_XSK_H */
diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
index 53823d61d8b6..a4bea0f33188 100644
--- a/include/net/llc_c_st.h
+++ b/include/net/llc_c_st.h
@@ -44,8 +44,8 @@ struct llc_conn_state_trans {
};
struct llc_conn_state {
- u8 current_state;
- struct llc_conn_state_trans **transitions;
+ u8 current_state;
+ const struct llc_conn_state_trans **transitions;
};
extern struct llc_conn_state llc_conn_state_table[];
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 1d55ba7c45be..86681f29bda7 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -254,7 +254,7 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
}
/**
- * llc_pdu_decode_sa - extracs source address (MAC) of input frame
+ * llc_pdu_decode_sa - extracts, source address (MAC) of input frame
* @skb: input skb that source address must be extracted from it.
* @sa: pointer to source address (6 byte array).
*
diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
index ed5b2fa40d32..fca49d483d20 100644
--- a/include/net/llc_s_st.h
+++ b/include/net/llc_s_st.h
@@ -29,8 +29,8 @@ struct llc_sap_state_trans {
};
struct llc_sap_state {
- u8 curr_state;
- struct llc_sap_state_trans **transitions;
+ u8 curr_state;
+ const struct llc_sap_state_trans **transitions;
};
/* only access to SAP state table */
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index 53bd2d02a4f0..26232f603e33 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -138,12 +138,12 @@ int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
static inline void lwtunnel_set_redirect(struct dst_entry *dst)
{
if (lwtunnel_output_redirect(dst->lwtstate)) {
- dst->lwtstate->orig_output = dst->output;
- dst->output = lwtunnel_output;
+ dst->lwtstate->orig_output = READ_ONCE(dst->output);
+ WRITE_ONCE(dst->output, lwtunnel_output);
}
if (lwtunnel_input_redirect(dst->lwtstate)) {
- dst->lwtstate->orig_input = dst->input;
- dst->input = lwtunnel_input;
+ dst->lwtstate->orig_input = READ_ONCE(dst->input);
+ WRITE_ONCE(dst->input, lwtunnel_input);
}
}
#else
@@ -206,6 +206,7 @@ static inline int lwtunnel_valid_encap_type(u16 encap_type,
NL_SET_ERR_MSG(extack, "CONFIG_LWTUNNEL is not enabled in this kernel");
return -EOPNOTSUPP;
}
+
static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len,
struct netlink_ext_ack *extack)
{
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index cafc664ee531..c2e49542626c 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -7,7 +7,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2024 Intel Corporation
+ * Copyright (C) 2018 - 2025 Intel Corporation
*/
#ifndef MAC80211_H
@@ -22,7 +22,7 @@
#include <net/cfg80211.h>
#include <net/codel.h>
#include <net/ieee80211_radiotap.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/**
* DOC: Introduction
@@ -213,7 +213,7 @@ struct ieee80211_low_level_stats {
* @IEEE80211_CHANCTX_CHANGE_RADAR: radar detection flag changed
* @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel,
* this is used only with channel switching with CSA
- * @IEEE80211_CHANCTX_CHANGE_MIN_WIDTH: The min required channel width changed
+ * @IEEE80211_CHANCTX_CHANGE_MIN_DEF: The min chandef changed
* @IEEE80211_CHANCTX_CHANGE_AP: The AP channel definition changed, so (wider
* bandwidth) OFDMA settings need to be changed
* @IEEE80211_CHANCTX_CHANGE_PUNCTURING: The punctured channel(s) bitmap
@@ -224,7 +224,7 @@ enum ieee80211_chanctx_change {
IEEE80211_CHANCTX_CHANGE_RX_CHAINS = BIT(1),
IEEE80211_CHANCTX_CHANGE_RADAR = BIT(2),
IEEE80211_CHANCTX_CHANGE_CHANNEL = BIT(3),
- IEEE80211_CHANCTX_CHANGE_MIN_WIDTH = BIT(4),
+ IEEE80211_CHANCTX_CHANGE_MIN_DEF = BIT(4),
IEEE80211_CHANCTX_CHANGE_AP = BIT(5),
IEEE80211_CHANCTX_CHANGE_PUNCTURING = BIT(6),
};
@@ -250,6 +250,7 @@ struct ieee80211_chan_req {
* @min_def: the minimum channel definition currently required.
* @ap: the channel definition the AP actually is operating as,
* for use with (wider bandwidth) OFDMA
+ * @radio_idx: index of the wiphy radio used used for this channel
* @rx_chains_static: The number of RX chains that must always be
* active on the channel to receive MIMO transmissions
* @rx_chains_dynamic: The number of RX chains that must be enabled
@@ -264,6 +265,7 @@ struct ieee80211_chanctx_conf {
struct cfg80211_chan_def min_def;
struct cfg80211_chan_def ap;
+ int radio_idx;
u8 rx_chains_static, rx_chains_dynamic;
bool radar_enabled;
@@ -362,6 +364,7 @@ struct ieee80211_vif_chanctx_switch {
* status changed.
* @BSS_CHANGED_MLD_VALID_LINKS: MLD valid links status changed.
* @BSS_CHANGED_MLD_TTLM: negotiated TID to link mapping was changed
+ * @BSS_CHANGED_TPE: transmit power envelope changed
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -395,9 +398,10 @@ enum ieee80211_bss_change {
BSS_CHANGED_HE_OBSS_PD = 1<<28,
BSS_CHANGED_HE_BSS_COLOR = 1<<29,
BSS_CHANGED_FILS_DISCOVERY = 1<<30,
- BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
+ BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = BIT_ULL(31),
BSS_CHANGED_MLD_VALID_LINKS = BIT_ULL(33),
BSS_CHANGED_MLD_TTLM = BIT_ULL(34),
+ BSS_CHANGED_TPE = BIT_ULL(35),
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -550,6 +554,39 @@ struct ieee80211_fils_discovery {
u32 max_interval;
};
+#define IEEE80211_TPE_EIRP_ENTRIES_320MHZ 5
+struct ieee80211_parsed_tpe_eirp {
+ bool valid;
+ s8 power[IEEE80211_TPE_EIRP_ENTRIES_320MHZ];
+ u8 count;
+};
+
+#define IEEE80211_TPE_PSD_ENTRIES_320MHZ 16
+struct ieee80211_parsed_tpe_psd {
+ bool valid;
+ s8 power[IEEE80211_TPE_PSD_ENTRIES_320MHZ];
+ u8 count, n;
+};
+
+/**
+ * struct ieee80211_parsed_tpe - parsed transmit power envelope information
+ * @max_local: maximum local EIRP, one value for 20, 40, 80, 160, 320 MHz each
+ * (indexed by TX power category)
+ * @max_reg_client: maximum regulatory client EIRP, one value for 20, 40, 80,
+ * 160, 320 MHz each
+ * (indexed by TX power category)
+ * @psd_local: maximum local power spectral density, one value for each 20 MHz
+ * subchannel per bss_conf's chanreq.oper
+ * (indexed by TX power category)
+ * @psd_reg_client: maximum regulatory power spectral density, one value for
+ * each 20 MHz subchannel per bss_conf's chanreq.oper
+ * (indexed by TX power category)
+ */
+struct ieee80211_parsed_tpe {
+ struct ieee80211_parsed_tpe_eirp max_local[2], max_reg_client[2];
+ struct ieee80211_parsed_tpe_psd psd_local[2], psd_reg_client[2];
+};
+
/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
@@ -645,6 +682,9 @@ struct ieee80211_fils_discovery {
* responder functionality.
* @ftmr_params: configurable lci/civic parameter when enabling FTM responder.
* @nontransmitted: this BSS is a nontransmitted BSS profile
+ * @tx_bss_conf: Pointer to the BSS configuration of transmitting interface
+ * if MBSSID is enabled. This pointer is RCU-protected due to CSA finish
+ * and BSS color change flows accessing it.
* @transmitter_bssid: the address of transmitter AP
* @bssid_index: index inside the multiple BSSID set
* @bssid_indicator: 2^bssid_indicator is the maximum number of APs in set
@@ -662,10 +702,10 @@ struct ieee80211_fils_discovery {
* @beacon_tx_rate: The configured beacon transmit rate that needs to be passed
* to driver when rate control is offloaded to firmware.
* @power_type: power type of BSS for 6 GHz
- * @tx_pwr_env: transmit power envelope array of BSS.
- * @tx_pwr_env_num: number of @tx_pwr_env.
+ * @tpe: transmit power envelope information
* @pwr_reduction: power constraint of BSS.
* @eht_support: does this BSS support EHT
+ * @epcs_support: does this BSS support EPCS
* @csa_active: marks whether a channel switch is going on.
* @mu_mimo_owner: indicates interface owns MU-MIMO capability
* @chanctx_conf: The channel context this interface is assigned to, or %NULL
@@ -701,6 +741,25 @@ struct ieee80211_fils_discovery {
* beamformee
* @eht_mu_beamformer: in AP-mode, does this BSS enable operation as an EHT MU
* beamformer
+ * @eht_80mhz_full_bw_ul_mumimo: in AP-mode, does this BSS support the
+ * reception of an EHT TB PPDU on an RU that spans the entire PPDU
+ * bandwidth
+ * @eht_disable_mcs15: disable EHT-MCS 15 reception capability.
+ * @bss_param_ch_cnt: in BSS-mode, the BSS params change count. This
+ * information is the latest known value. It can come from this link's
+ * beacon or from a beacon sent by another link.
+ * @bss_param_ch_cnt_link_id: in BSS-mode, the link_id to which the beacon
+ * that updated &bss_param_ch_cnt belongs. E.g. if link 1 doesn't hear
+ * its beacons, and link 2 sent a beacon with an RNR element that updated
+ * link 1's BSS params change count, then, link 1's
+ * bss_param_ch_cnt_link_id will be 2. That means that link 1 knows that
+ * link 2 was the link that updated its bss_param_ch_cnt value.
+ * In case link 1 hears its beacon again, bss_param_ch_cnt_link_id will
+ * be updated to 1, even if bss_param_ch_cnt didn't change. This allows
+ * the link to know that it heard the latest value from its own beacon
+ * (as opposed to hearing its value from another link's beacon).
+ * @s1g_long_beacon_period: number of beacon intervals between each long
+ * beacon transmission.
*/
struct ieee80211_bss_conf {
struct ieee80211_vif *vif;
@@ -751,6 +810,7 @@ struct ieee80211_bss_conf {
struct ieee80211_ftm_responder_params *ftmr_params;
/* Multiple BSSID data */
bool nontransmitted;
+ struct ieee80211_bss_conf __rcu *tx_bss_conf;
u8 transmitter_bssid[ETH_ALEN];
u8 bssid_index;
u8 bssid_indicator;
@@ -766,11 +826,12 @@ struct ieee80211_bss_conf {
u32 unsol_bcast_probe_resp_interval;
struct cfg80211_bitrate_mask beacon_tx_rate;
enum ieee80211_ap_reg_power power_type;
- struct ieee80211_tx_pwr_env tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT];
- u8 tx_pwr_env_num;
+
+ struct ieee80211_parsed_tpe tpe;
+
u8 pwr_reduction;
bool eht_support;
-
+ bool epcs_support;
bool csa_active;
bool mu_mimo_owner;
@@ -793,6 +854,13 @@ struct ieee80211_bss_conf {
bool eht_su_beamformer;
bool eht_su_beamformee;
bool eht_mu_beamformer;
+ bool eht_80mhz_full_bw_ul_mumimo;
+ bool eht_disable_mcs15;
+
+ u8 bss_param_ch_cnt;
+ u8 bss_param_ch_cnt_link_id;
+
+ u8 s1g_long_beacon_period;
};
/**
@@ -953,8 +1021,9 @@ enum mac80211_tx_info_flags {
* of their QoS TID or other priority field values.
* @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally
* for sequence number assignment
- * @IEEE80211_TX_CTRL_SCAN_TX: Indicates that this frame is transmitted
- * due to scanning, not in normal operation on the interface.
+ * @IEEE80211_TX_CTRL_DONT_USE_RATE_MASK: Don't use rate mask for this frame
+ * which is transmitted due to scanning or offchannel TX, not in normal
+ * operation on the interface.
* @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this
* frame should be transmitted on the specific link. This really is
* only relevant for frames that do not have data present, and is
@@ -975,7 +1044,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_NO_SEQNO = BIT(7),
IEEE80211_TX_CTRL_DONT_REORDER = BIT(8),
IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX = BIT(9),
- IEEE80211_TX_CTRL_SCAN_TX = BIT(10),
+ IEEE80211_TX_CTRL_DONT_USE_RATE_MASK = BIT(10),
IEEE80211_TX_CTRL_MLO_LINK = 0xf0000000,
};
@@ -1406,8 +1475,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
* @RX_FLAG_AMPDU_DELIM_CRC_ERROR: A delimiter CRC error has been detected
* on this subframe
- * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
- * is stored in the @ampdu_delimiter_crc field)
* @RX_FLAG_MIC_STRIPPED: The mic was stripped of this packet. Decryption was
* done by the hardware
* @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without
@@ -1462,6 +1529,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* known the frame shouldn't be reported.
* @RX_FLAG_8023: the frame has an 802.3 header (decap offload performed by
* hardware or driver)
+ * @RX_FLAG_RADIOTAP_VHT: VHT radiotap data is present
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -1479,7 +1547,7 @@ enum mac80211_rx_flags {
RX_FLAG_AMPDU_LAST_KNOWN = BIT(12),
RX_FLAG_AMPDU_IS_LAST = BIT(13),
RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(14),
- RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(15),
+ /* one free bit at 15 */
RX_FLAG_MACTIME = BIT(16) | BIT(17),
RX_FLAG_MACTIME_PLCP_START = 1 << 16,
RX_FLAG_MACTIME_START = 2 << 16,
@@ -1497,6 +1565,7 @@ enum mac80211_rx_flags {
RX_FLAG_RADIOTAP_LSIG = BIT(28),
RX_FLAG_NO_PSDU = BIT(29),
RX_FLAG_8023 = BIT(30),
+ RX_FLAG_RADIOTAP_VHT = BIT(31),
};
/**
@@ -1576,7 +1645,6 @@ enum mac80211_rx_encoding {
* @rx_flags: internal RX flags for mac80211
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
- * @ampdu_delimiter_crc: A-MPDU delimiter CRC
* @zero_length_psdu_type: radiotap type of the 0-length PSDU
* @link_valid: if the link which is identified by @link_id is valid. This flag
* is set only when connection is MLO.
@@ -1614,7 +1682,6 @@ struct ieee80211_rx_status {
s8 signal;
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
- u8 ampdu_delimiter_crc;
u8 zero_length_psdu_type;
u8 link_valid:1, link_id:4;
};
@@ -1802,6 +1869,9 @@ struct ieee80211_channel_switch {
* operation on this interface and request a channel context without
* the AP definition. Use this e.g. because the device is able to
* handle OFDMA (downlink and trigger for uplink) on a per-AP basis.
+ * @IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC: indicates that the AP sta should
+ * be removed only after setting the vif as unassociated, and not the
+ * opposite. Only relevant for STA vifs.
*/
enum ieee80211_vif_flags {
IEEE80211_VIF_BEACON_FILTER = BIT(0),
@@ -1810,6 +1880,7 @@ enum ieee80211_vif_flags {
IEEE80211_VIF_GET_NOA_UPDATE = BIT(3),
IEEE80211_VIF_EML_ACTIVE = BIT(4),
IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW = BIT(5),
+ IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC = BIT(6),
};
@@ -1934,6 +2005,8 @@ enum ieee80211_neg_ttlm_res {
* @neg_ttlm: negotiated TID to link mapping info.
* see &struct ieee80211_neg_ttlm.
* @addr: address of this interface
+ * @addr_valid: indicates if the address is actively used. Set to false for
+ * passive monitor interfaces, true in all other cases.
* @p2p: indicates whether this AP or STA interface is a p2p
* interface, i.e. a GO or p2p-sta respectively
* @netdev_features: tx netdev features supported by the hardware for this
@@ -1963,7 +2036,6 @@ enum ieee80211_neg_ttlm_res {
* @txq: the multicast data TX queue
* @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see
* &enum ieee80211_offload_flags.
- * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled.
*/
struct ieee80211_vif {
enum nl80211_iftype type;
@@ -1973,6 +2045,7 @@ struct ieee80211_vif {
u16 valid_links, active_links, dormant_links, suspended_links;
struct ieee80211_neg_ttlm neg_ttlm;
u8 addr[ETH_ALEN] __aligned(2);
+ bool addr_valid;
bool p2p;
u8 cab_queue;
@@ -1991,8 +2064,6 @@ struct ieee80211_vif {
bool probe_req_reg;
bool rx_mcast_action_reg;
- struct ieee80211_vif *mbssid_tx_vif;
-
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
@@ -2160,7 +2231,7 @@ enum ieee80211_key_flags {
* @tx_pn: PN used for TX keys, may be used by the driver as well if it
* needs to do software PN assignment by itself (e.g. due to TSO)
* @flags: key flags, see &enum ieee80211_key_flags.
- * @keyidx: the key index (0-3)
+ * @keyidx: the key index (0-7)
* @keylen: key material length
* @key: key material. For ALG_TKIP the key is encoded as a 256-bit (32 byte)
* data block:
@@ -2169,7 +2240,7 @@ enum ieee80211_key_flags {
* - Temporal Authenticator Rx MIC Key (64 bits)
* @icv_len: The ICV length for this key type
* @iv_len: The IV length for this key type
- * @link_id: the link ID for MLO, or -1 for non-MLO or pairwise keys
+ * @link_id: the link ID, 0 for non-MLO, or -1 for pairwise keys
*/
struct ieee80211_key_conf {
atomic64_t tx_pn;
@@ -2280,6 +2351,8 @@ enum ieee80211_sta_rx_bandwidth {
IEEE80211_STA_RX_BW_320,
};
+#define IEEE80211_STA_RX_BW_MAX IEEE80211_STA_RX_BW_320
+
/**
* struct ieee80211_sta_rates - station rate selection table
*
@@ -2361,6 +2434,7 @@ struct ieee80211_sta_aggregates {
* @he_cap: HE capabilities of this STA
* @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities
* @eht_cap: EHT capabilities of this STA
+ * @s1g_cap: S1G capabilities of this STA
* @agg: per-link data for multi-link aggregation
* @bandwidth: current bandwidth the station can receive with
* @rx_nss: in HT/VHT, the maximum number of spatial streams the
@@ -2383,6 +2457,7 @@ struct ieee80211_link_sta {
struct ieee80211_sta_he_cap he_cap;
struct ieee80211_he_6ghz_capa he_6ghz_capa;
struct ieee80211_sta_eht_cap eht_cap;
+ struct ieee80211_sta_s1g_cap s1g_cap;
struct ieee80211_sta_aggregates agg;
@@ -2425,6 +2500,7 @@ struct ieee80211_link_sta {
* @max_amsdu_subframes: indicates the maximal number of MSDUs in a single
* A-MSDU. Taken from the Extended Capabilities element. 0 means
* unlimited.
+ * @eml_cap: EML capabilities of this MLO station
* @cur: currently valid data as aggregated from the active links
* For non MLO STA it will point to the deflink data. For MLO STA
* ieee80211_sta_recalc_aggregates() must be called to update it.
@@ -2446,7 +2522,7 @@ struct ieee80211_link_sta {
* @spp_amsdu: indicates whether the STA uses SPP A-MSDU or not.
*/
struct ieee80211_sta {
- u8 addr[ETH_ALEN];
+ u8 addr[ETH_ALEN] __aligned(2);
u16 aid;
u16 max_rx_aggregation_subframes;
bool wme;
@@ -2459,6 +2535,7 @@ struct ieee80211_sta {
bool mlo;
bool spp_amsdu;
u8 max_amsdu_subframes;
+ u16 eml_cap;
struct ieee80211_sta_aggregates *cur;
@@ -2641,6 +2718,11 @@ struct ieee80211_txq {
* a virtual monitor interface when monitor interfaces are the only
* active interfaces.
*
+ * @IEEE80211_HW_NO_VIRTUAL_MONITOR: The driver would like to be informed
+ * of any monitor interface, as well as their configured channel.
+ * This is useful for supporting multiple monitor interfaces on different
+ * channels.
+ *
* @IEEE80211_HW_NO_AUTO_VIF: The driver would like for no wlanX to
* be created. It is expected user-space will create vifs as
* desired (and thus have them named as desired).
@@ -2728,14 +2810,6 @@ struct ieee80211_txq {
* @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on
* TDLS links.
*
- * @IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP: The driver requires the
- * mgd_prepare_tx() callback to be called before transmission of a
- * deauthentication frame in case the association was completed but no
- * beacon was heard. This is required in multi-channel scenarios, where the
- * virtual interface might not be given air time for the transmission of
- * the frame, as it is not synced with the AP/P2P GO yet, and thus the
- * deauthentication frame might not be transmitted.
- *
* @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't
* support QoS NDP for AP probing - that's most likely a driver bug.
*
@@ -2784,14 +2858,17 @@ struct ieee80211_txq {
*
* @IEEE80211_HW_DISALLOW_PUNCTURING: HW requires disabling puncturing in EHT
* and connecting with a lower bandwidth instead
- * @IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ: HW requires disabling puncturing in
- * EHT in 5 GHz and connecting with a lower bandwidth instead
*
* @IEEE80211_HW_HANDLES_QUIET_CSA: HW/driver handles quieting for CSA, so
* no need to stop queues. This really should be set by a driver that
* implements MLO, so operation can continue on other links when one
* link is switching.
*
+ * @IEEE80211_HW_STRICT: strictly enforce certain things mandated by the spec
+ * but otherwise ignored/worked around for interoperability. This is a
+ * HW flag so drivers can opt in according to their own control, e.g. in
+ * testing.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2808,6 +2885,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_DYNAMIC_PS,
IEEE80211_HW_MFP_CAPABLE,
IEEE80211_HW_WANT_MONITOR_VIF,
+ IEEE80211_HW_NO_VIRTUAL_MONITOR,
IEEE80211_HW_NO_AUTO_VIF,
IEEE80211_HW_SW_CRYPTO_CONTROL,
IEEE80211_HW_SUPPORT_FAST_XMIT,
@@ -2835,7 +2913,6 @@ enum ieee80211_hw_flags {
IEEE80211_HW_REPORTS_LOW_ACK,
IEEE80211_HW_SUPPORTS_TX_FRAG,
IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
- IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP,
IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP,
IEEE80211_HW_BUFF_MMPDU_TXQ,
IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW,
@@ -2850,8 +2927,8 @@ enum ieee80211_hw_flags {
IEEE80211_HW_DETECTS_COLOR_COLLISION,
IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX,
IEEE80211_HW_DISALLOW_PUNCTURING,
- IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ,
IEEE80211_HW_HANDLES_QUIET_CSA,
+ IEEE80211_HW_STRICT,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -3117,6 +3194,10 @@ ieee80211_get_tx_rate(const struct ieee80211_hw *hw,
{
if (WARN_ON_ONCE(c->control.rates[0].idx < 0))
return NULL;
+
+ if (c->band >= NUM_NL80211_BANDS)
+ return NULL;
+
return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[0].idx];
}
@@ -3149,6 +3230,19 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
/**
+ * ieee80211_purge_tx_queue - purge TX skb queue
+ * @hw: the hardware
+ * @skbs: the skbs
+ *
+ * Free a set of transmit skbs. Use this function when device is going to stop
+ * but some transmit skbs without TX status are still queued.
+ * This function does not take the list lock and the caller must hold the
+ * relevant locks to use it.
+ */
+void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
+ struct sk_buff_head *skbs);
+
+/**
* DOC: Hardware crypto acceleration
*
* mac80211 is capable of taking advantage of many hardware
@@ -3748,13 +3842,15 @@ enum ieee80211_reconfig_type {
* @success: whether the frame exchange was successful, only
* used with the mgd_complete_tx() method, and then only
* valid for auth and (re)assoc.
+ * @was_assoc: set if this call is due to deauth/disassoc
+ * while just having been associated
* @link_id: the link id on which the frame will be TX'ed.
- * Only used with the mgd_prepare_tx() method.
+ * 0 for a non-MLO connection.
*/
struct ieee80211_prep_tx_info {
u16 duration;
u16 subtype;
- u8 success:1;
+ u8 success:1, was_assoc:1;
int link_id;
};
@@ -4027,8 +4123,8 @@ struct ieee80211_prep_tx_info {
* in @sta_state.
* The callback can sleep.
*
- * @sta_rc_update: Notifies the driver of changes to the bitrates that can be
- * used to transmit to the station. The changes are advertised with bits
+ * @link_sta_rc_update: Notifies the driver of changes to the bitrates that can
+ * be used to transmit to the station. The changes are advertised with bits
* from &enum ieee80211_rate_control_changed and the values are reflected
* in the station data. This callback should only be used when the driver
* uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since
@@ -4046,6 +4142,15 @@ struct ieee80211_prep_tx_info {
* Statistics that the driver doesn't fill will be filled by mac80211.
* The callback can sleep.
*
+ * @link_sta_statistics: Get link statistics for this station. For example with
+ * beacon filtering, the statistics kept by mac80211 might not be
+ * accurate, so let the driver pre-fill the statistics. The driver can
+ * fill most of the values (indicating which by setting the filled
+ * bitmap), but not all of them make sense - see the source for which
+ * ones are possible.
+ * Statistics that the driver doesn't fill will be filled by mac80211.
+ * The callback can sleep.
+ *
* @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
* bursting) for a hardware TX queue.
* Returns a negative error code on failure.
@@ -4203,12 +4308,9 @@ struct ieee80211_prep_tx_info {
* yet it need not necessarily be given airtime, in particular since any
* transmission to a P2P GO needs to be synchronized against the GO's
* powersave state. mac80211 will call this function before transmitting a
- * management frame prior to having successfully associated to allow the
- * driver to give it channel time for the transmission, to get a response
- * and to be able to synchronize with the GO.
- * For drivers that set %IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP, mac80211
- * would also call this function before transmitting a deauthentication
- * frame in case that no beacon was heard from the AP/P2P GO.
+ * management frame prior to transmitting that frame to allow the driver
+ * to give it channel time for the transmission, to get a response and be
+ * able to synchronize with the GO.
* The callback will be called before each transmission and upon return
* mac80211 will transmit the frame right away.
* Additional information is passed in the &struct ieee80211_prep_tx_info
@@ -4218,6 +4320,8 @@ struct ieee80211_prep_tx_info {
* @mgd_complete_tx: Notify the driver that the response frame for a previously
* transmitted frame announced with @mgd_prepare_tx was received, the data
* is filled similarly to @mgd_prepare_tx though the duration is not used.
+ * Note that this isn't always called for each mgd_prepare_tx() call, for
+ * example for SAE the 'confirm' messages can be on the air in any order.
*
* @mgd_protect_tdls_discover: Protect a TDLS discovery session. After sending
* a TDLS discovery-request, we expect a reply to arrive on the AP's
@@ -4382,6 +4486,8 @@ struct ieee80211_prep_tx_info {
* new links bitmaps may be 0 if going from/to a non-MLO situation.
* The @old array contains pointers to the old bss_conf structures
* that were already removed, in case they're needed.
+ * Note that removal of link should always succeed, so the return value
+ * will be ignored in a removal only case.
* This callback can sleep.
* @change_sta_links: Change the valid links of a station, similar to
* @change_vif_links. This callback can sleep.
@@ -4399,13 +4505,19 @@ struct ieee80211_prep_tx_info {
* if the requested TID-To-Link mapping can be accepted or not.
* If it's not accepted the driver may suggest a preferred mapping and
* modify @ttlm parameter with the suggested TID-to-Link mapping.
+ * @prep_add_interface: prepare for interface addition. This can be used by
+ * drivers to prepare for the addition of a new interface, e.g., allocate
+ * the needed resources etc. This callback doesn't guarantee that an
+ * interface with the specified type would be added, and thus drivers that
+ * implement this callback need to handle such cases. The type is the full
+ * &enum nl80211_iftype.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb);
int (*start)(struct ieee80211_hw *hw);
- void (*stop)(struct ieee80211_hw *hw);
+ void (*stop)(struct ieee80211_hw *hw, bool suspend);
#ifdef CONFIG_PM
int (*suspend)(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
int (*resume)(struct ieee80211_hw *hw);
@@ -4418,7 +4530,7 @@ struct ieee80211_ops {
enum nl80211_iftype new_type, bool p2p);
void (*remove_interface)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
- int (*config)(struct ieee80211_hw *hw, u32 changed);
+ int (*config)(struct ieee80211_hw *hw, int radio_idx, u32 changed);
void (*bss_info_changed)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -4481,8 +4593,10 @@ struct ieee80211_ops {
void (*get_key_seq)(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key,
struct ieee80211_key_seq *seq);
- int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value);
- int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value);
+ int (*set_frag_threshold)(struct ieee80211_hw *hw, int radio_idx,
+ u32 value);
+ int (*set_rts_threshold)(struct ieee80211_hw *hw, int radio_idx,
+ u32 value);
int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int (*sta_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -4515,10 +4629,10 @@ struct ieee80211_ops {
void (*sta_pre_rcu_remove)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
- void (*sta_rc_update)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- u32 changed);
+ void (*link_sta_rc_update)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed);
void (*sta_rate_tbl_update)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -4537,6 +4651,10 @@ struct ieee80211_ops {
s64 offset);
void (*reset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int (*tx_last_beacon)(struct ieee80211_hw *hw);
+ void (*link_sta_statistics)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct link_station_info *link_sinfo);
/**
* @ampdu_action:
@@ -4575,7 +4693,8 @@ struct ieee80211_ops {
int (*get_survey)(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void (*rfkill_poll)(struct ieee80211_hw *hw);
- void (*set_coverage_class)(struct ieee80211_hw *hw, s16 coverage_class);
+ void (*set_coverage_class)(struct ieee80211_hw *hw, int radio_idx,
+ s16 coverage_class);
#ifdef CONFIG_NL80211_TESTMODE
int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
@@ -4590,8 +4709,10 @@ struct ieee80211_ops {
void (*channel_switch)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *ch_switch);
- int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
- int (*get_antenna)(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
+ int (*set_antenna)(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant);
+ int (*get_antenna)(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant);
int (*remain_on_channel)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -4691,7 +4812,7 @@ struct ieee80211_ops {
u32 (*get_expected_throughput)(struct ieee80211_hw *hw,
struct ieee80211_sta *sta);
int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- int *dbm);
+ unsigned int link_id, int *dbm);
int (*tdls_channel_switch)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -4783,6 +4904,8 @@ struct ieee80211_ops {
enum ieee80211_neg_ttlm_res
(*can_neg_ttlm)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_neg_ttlm *ttlm);
+ void (*prep_add_interface)(struct ieee80211_hw *hw,
+ enum nl80211_iftype type);
};
/**
@@ -5261,22 +5384,6 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
int max_rates);
/**
- * ieee80211_sta_set_expected_throughput - set the expected tpt for a station
- *
- * Call this function to notify mac80211 about a change in expected throughput
- * to a station. A driver for a device that does rate control in firmware can
- * call this function when the expected throughput estimate towards a station
- * changes. The information is used to tune the CoDel AQM applied to traffic
- * going towards that station (which can otherwise be too aggressive and cause
- * slow stations to starve).
- *
- * @pubsta: the station to set throughput for.
- * @thr: the current expected throughput in kbps.
- */
-void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta,
- u32 thr);
-
-/**
* ieee80211_tx_rate_update - transmit rate update callback
*
* Drivers should call this functions with a non-NULL pub sta
@@ -5932,21 +6039,12 @@ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
int tid, struct ieee80211_key_seq *seq);
/**
- * ieee80211_remove_key - remove the given key
- * @keyconf: the parameter passed with the set key
- *
- * Context: Must be called with the wiphy mutex held.
- *
- * Remove the given key. If the key was uploaded to the hardware at the
- * time this function is called, it is not deleted in the hardware but
- * instead assumed to have been removed already.
- */
-void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
-
-/**
* ieee80211_gtk_rekey_add - add a GTK key from rekeying during WoWLAN
* @vif: the virtual interface to add the key on
- * @keyconf: new key data
+ * @idx: the keyidx of the key
+ * @key_data: the key data
+ * @key_len: the key data. Might be bigger than the actual key length,
+ * but not smaller (for the driver convinence)
* @link_id: the link id of the key or -1 for non-MLO
*
* When GTK rekeying was done while the system was suspended, (a) new
@@ -5969,13 +6067,11 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
* for the new key for each TID to set up sequence counters properly.
*
* IMPORTANT: If this replaces a key that is present in the hardware,
- * then it will attempt to remove it during this call. In many cases
- * this isn't what you want, so call ieee80211_remove_key() first for
- * the key that's being replaced.
+ * then it will attempt to remove it during this call.
*/
struct ieee80211_key_conf *
ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
+ u8 idx, u8 *key_data, u8 key_len,
int link_id);
/**
@@ -6211,6 +6307,24 @@ void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw,
void (*iterator)(void *data,
struct ieee80211_sta *sta),
void *data);
+
+/**
+ * ieee80211_iterate_stations_mtx - iterate stations
+ *
+ * This function iterates over all stations associated with a given
+ * hardware that are currently uploaded to the driver and calls the callback
+ * function for them. This version can only be used while holding the wiphy
+ * mutex.
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iterator: the iterator function to call
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_stations_mtx(struct ieee80211_hw *hw,
+ void (*iterator)(void *data,
+ struct ieee80211_sta *sta),
+ void *data);
+
/**
* ieee80211_queue_work - add work onto the mac80211 workqueue
*
@@ -6568,6 +6682,31 @@ void ieee80211_iter_chan_contexts_atomic(
void *iter_data);
/**
+ * ieee80211_iter_chan_contexts_mtx - iterate channel contexts
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @iter: iterator function
+ * @iter_data: data passed to iterator function
+ *
+ * Iterate all active channel contexts. This function can only be used while
+ * holding the wiphy mutex.
+ *
+ * The iterator will not find a context that's being added (during
+ * the driver callback to add it) but will find it while it's being
+ * removed.
+ *
+ * Note that during hardware restart, all contexts that existed
+ * before the restart are considered already present so will be
+ * found while iterating, whether they've been re-added already
+ * or not.
+ */
+void ieee80211_iter_chan_contexts_mtx(
+ struct ieee80211_hw *hw,
+ void (*iter)(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf,
+ void *data),
+ void *iter_data);
+
+/**
* ieee80211_ap_probereq_get - retrieve a Probe Request template
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -6685,8 +6824,11 @@ void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp);
* ieee80211_radar_detected - inform that a radar was detected
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @chanctx_conf: Channel context on which radar is detected. Mandatory to
+ * pass a valid pointer during MLO. For non-MLO %NULL can be passed
*/
-void ieee80211_radar_detected(struct ieee80211_hw *hw);
+void ieee80211_radar_detected(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *chanctx_conf);
/**
* ieee80211_chswitch_done - Complete channel switch process
@@ -6704,14 +6846,12 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
/**
* ieee80211_channel_switch_disconnect - disconnect due to channel switch error
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
- * @block_tx: if %true, do not send deauth frame.
*
* Instruct mac80211 to disconnect due to a channel switch error. The channel
* switch can request to block the tx and so, we need to make sure we do not send
* a deauth frame in this case.
*/
-void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif,
- bool block_tx);
+void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif);
/**
* ieee80211_request_smps - request SM PS transition
@@ -7083,7 +7223,7 @@ ieee80211_get_he_6ghz_capa_vif(const struct ieee80211_supported_band *sband,
}
/**
- * ieee80211_get_eht_iftype_cap_vif - return ETH capabilities for sband/vif
+ * ieee80211_get_eht_iftype_cap_vif - return EHT capabilities for sband/vif
* @sband: the sband to search for the iftype on
* @vif: the vif to get the iftype from
*
@@ -7122,13 +7262,14 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif);
* ieee80211_ave_rssi - report the average RSSI for the specified interface
*
* @vif: the specified virtual interface
+ * @link_id: the link ID for MLO, or -1 for non-MLO
*
* Note: This function assumes that the given vif is valid.
*
* Return: The average RSSI value for the requested interface, or 0 if not
* applicable.
*/
-int ieee80211_ave_rssi(struct ieee80211_vif *vif);
+int ieee80211_ave_rssi(struct ieee80211_vif *vif, int link_id);
/**
* ieee80211_report_wowlan_wakeup - report WoWLAN wakeup
@@ -7581,12 +7722,12 @@ static inline bool ieee80211_is_tx_data(struct sk_buff *skb)
*
* - change_vif_links(0x11)
* - unassign_vif_chanctx(link_id=0)
+ * - assign_vif_chanctx(link_id=4)
* - change_sta_links(0x11) for each affected STA (the AP)
* (TDLS connections on now inactive links should be torn down)
* - remove group keys on the old link (link_id 0)
* - add new group keys (GTK/IGTK/BIGTK) on the new link (link_id 4)
* - change_sta_links(0x10) for each affected STA (the AP)
- * - assign_vif_chanctx(link_id=4)
* - change_vif_links(0x10)
*
* Return: 0 on success. An error code otherwise.
@@ -7615,6 +7756,77 @@ void ieee80211_set_active_links_async(struct ieee80211_vif *vif,
*/
void ieee80211_send_teardown_neg_ttlm(struct ieee80211_vif *vif);
+/**
+ * ieee80211_chan_width_to_rx_bw - convert channel width to STA RX bandwidth
+ * @width: the channel width value to convert
+ * Return: the STA RX bandwidth value for the channel width
+ */
+static inline enum ieee80211_sta_rx_bandwidth
+ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width)
+{
+ switch (width) {
+ default:
+ WARN_ON_ONCE(1);
+ fallthrough;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return IEEE80211_STA_RX_BW_20;
+ case NL80211_CHAN_WIDTH_40:
+ return IEEE80211_STA_RX_BW_40;
+ case NL80211_CHAN_WIDTH_80:
+ return IEEE80211_STA_RX_BW_80;
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_80P80:
+ return IEEE80211_STA_RX_BW_160;
+ case NL80211_CHAN_WIDTH_320:
+ return IEEE80211_STA_RX_BW_320;
+ }
+}
+
+/**
+ * ieee80211_prepare_rx_omi_bw - prepare for sending BW RX OMI
+ * @link_sta: the link STA the OMI is going to be sent to
+ * @bw: the bandwidth requested
+ *
+ * When the driver decides to do RX OMI to change bandwidth with a STA
+ * it calls this function to prepare, then sends the OMI, and finally
+ * calls ieee80211_finalize_rx_omi_bw().
+ *
+ * Note that the (link) STA rate control is updated accordingly as well,
+ * but the chanctx might not be updated if there are other users.
+ * If the intention is to reduce the listen bandwidth, the driver must
+ * ensure there are no TDLS stations nor other uses of the chanctx.
+ *
+ * Also note that in order to sequence correctly, narrowing bandwidth
+ * will only happen in ieee80211_finalize_rx_omi_bw(), whereas widening
+ * again (e.g. going back to normal) will happen here.
+ *
+ * Note that we treat this symmetrically, so if the driver calls this
+ * and tells the peer to only send with a lower bandwidth, we assume
+ * that the driver also wants to only send at that lower bandwidth, to
+ * allow narrowing of the chanctx request for this station/interface.
+ *
+ * Finally, the driver must ensure that if the function returned %true,
+ * ieee80211_finalize_rx_omi_bw() is also called, even for example in
+ * case of HW restart.
+ *
+ * Context: Must be called with wiphy mutex held, and will call back
+ * into the driver, so ensure no driver locks are held.
+ *
+ * Return: %true if changes are going to be made, %false otherwise
+ */
+bool ieee80211_prepare_rx_omi_bw(struct ieee80211_link_sta *link_sta,
+ enum ieee80211_sta_rx_bandwidth bw);
+
+/**
+ * ieee80211_finalize_rx_omi_bw - finalize BW RX OMI update
+ * @link_sta: the link STA the OMI was sent to
+ *
+ * See ieee80211_client_prepare_rx_omi_bw(). Context is the same here
+ * as well.
+ */
+void ieee80211_finalize_rx_omi_bw(struct ieee80211_link_sta *link_sta);
+
/* for older drivers - let's not document these ... */
int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx);
@@ -7628,4 +7840,10 @@ int ieee80211_emulate_switch_vif_chanctx(struct ieee80211_hw *hw,
int n_vifs,
enum ieee80211_chanctx_switch_mode mode);
+/**
+ * ieee80211_vif_nan_started - Return whether a NAN vif is started
+ * @vif: the vif
+ * Return: %true iff the vif is a NAN interface and NAN is started
+ */
+bool ieee80211_vif_nan_started(struct ieee80211_vif *vif);
#endif /* MAC80211_H */
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 4a3a9de9da73..d72006a85f02 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -7,7 +7,7 @@
#ifndef NET_MAC802154_H
#define NET_MAC802154_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/af_ieee802154.h>
#include <linux/ieee802154.h>
#include <linux/skbuff.h>
@@ -140,7 +140,7 @@ enum ieee802154_hw_flags {
*
* xmit_sync:
* Handler that 802.15.4 module calls for each transmitted frame.
- * skb cntains the buffer starting from the IEEE 802.15.4 header.
+ * skb contains the buffer starting from the IEEE 802.15.4 header.
* The low-level driver should send the frame based on available
* configuration. This is called by a workqueue and useful for
* synchronous 802.15.4 drivers.
@@ -152,7 +152,7 @@ enum ieee802154_hw_flags {
*
* xmit_async:
* Handler that 802.15.4 module calls for each transmitted frame.
- * skb cntains the buffer starting from the IEEE 802.15.4 header.
+ * skb contains the buffer starting from the IEEE 802.15.4 header.
* The low-level driver should send the frame based on available
* configuration.
* This function should return zero or negative errno.
diff --git a/include/net/macsec.h b/include/net/macsec.h
index de216cbc6b05..bc7de5b53e54 100644
--- a/include/net/macsec.h
+++ b/include/net/macsec.h
@@ -38,8 +38,8 @@ struct metadata_dst;
typedef union salt {
struct {
- u32 ssci;
- u64 pn;
+ ssci_t ssci;
+ __be64 pn;
} __packed;
u8 bytes[MACSEC_SALT_LEN];
} __packed salt_t;
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 27684135bb4d..eaa27483f99b 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -10,6 +10,7 @@
#include "shm_channel.h"
#define GDMA_STATUS_MORE_ENTRIES 0x00000105
+#define GDMA_STATUS_CMD_UNSUPPORTED 0xffffffff
/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
* them are naturally aligned and hence don't need __packed.
@@ -58,8 +59,11 @@ enum gdma_eqe_type {
GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
GDMA_EQE_HWC_INIT_DATA = 130,
GDMA_EQE_HWC_INIT_DONE = 131,
- GDMA_EQE_HWC_SOC_RECONFIG = 132,
+ GDMA_EQE_HWC_FPGA_RECONFIG = 132,
GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133,
+ GDMA_EQE_HWC_SOC_SERVICE = 134,
+ GDMA_EQE_HWC_RESET_REQUEST = 135,
+ GDMA_EQE_RNIC_QP_FATAL = 176,
};
enum {
@@ -69,6 +73,18 @@ enum {
GDMA_DEVICE_MANA_IB = 3,
};
+enum gdma_service_type {
+ GDMA_SERVICE_TYPE_NONE = 0,
+ GDMA_SERVICE_TYPE_RDMA_SUSPEND = 1,
+ GDMA_SERVICE_TYPE_RDMA_RESUME = 2,
+};
+
+struct mana_service_work {
+ struct work_struct work;
+ struct gdma_dev *gdma_dev;
+ enum gdma_service_type event;
+};
+
struct gdma_resource {
/* Protect the bitmap */
spinlock_t lock;
@@ -151,6 +167,7 @@ struct gdma_general_req {
#define GDMA_MESSAGE_V1 1
#define GDMA_MESSAGE_V2 2
#define GDMA_MESSAGE_V3 3
+#define GDMA_MESSAGE_V4 4
struct gdma_general_resp {
struct gdma_resp_hdr hdr;
@@ -222,9 +239,19 @@ struct gdma_dev {
void *driver_data;
struct auxiliary_device *adev;
+ bool is_suspended;
+ bool rdma_teardown;
};
-#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
+/* MANA_PAGE_SIZE is the DMA unit */
+#define MANA_PAGE_SHIFT 12
+#define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT)
+#define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE)
+#define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE)
+#define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT)
+
+/* Required by HW */
+#define MANA_MIN_QSIZE MANA_PAGE_SIZE
#define GDMA_CQE_SIZE 64
#define GDMA_EQE_SIZE 16
@@ -258,7 +285,8 @@ struct gdma_event {
struct gdma_queue;
struct mana_eq {
- struct gdma_queue *eq;
+ struct gdma_queue *eq;
+ struct dentry *mana_eq_debugfs;
};
typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
@@ -354,14 +382,19 @@ struct gdma_irq_context {
char name[MANA_IRQ_NAME_SZ];
};
+enum gdma_context_flags {
+ GC_PROBE_SUCCEEDED = 0,
+};
+
struct gdma_context {
struct device *dev;
+ struct dentry *mana_pci_debugfs;
/* Per-vPort max number of queues */
unsigned int max_num_queues;
unsigned int max_num_msix;
unsigned int num_msix_usable;
- struct gdma_irq_context *irq_contexts;
+ struct xarray irq_contexts;
/* L2 MTU */
u16 adapter_mtu;
@@ -376,6 +409,8 @@ struct gdma_context {
u32 test_event_eq_id;
bool is_pf;
+ bool in_service;
+
phys_addr_t bar0_pa;
void __iomem *bar0_va;
void __iomem *shm_base;
@@ -395,9 +430,13 @@ struct gdma_context {
/* Azure RDMA adapter */
struct gdma_dev mana_ib;
-};
-#define MAX_NUM_GDMA_DEVICES 4
+ u64 pf_cap_flags1;
+
+ struct workqueue_struct *service_wq;
+
+ unsigned long flags;
+};
static inline bool mana_gd_is_mana(struct gdma_dev *gd)
{
@@ -453,6 +492,8 @@ struct gdma_wqe {
#define INLINE_OOB_SMALL_SIZE 8
#define INLINE_OOB_LARGE_SIZE 24
+#define MANA_MAX_TX_WQE_SGL_ENTRIES 30
+
#define MAX_TX_WQE_SIZE 512
#define MAX_RX_WQE_SIZE 256
@@ -543,11 +584,44 @@ enum {
*/
#define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
#define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
+#define GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB BIT(4)
+#define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
+
+/* Driver can handle holes (zeros) in the device list */
+#define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11)
+
+/* Driver supports dynamic MSI-X vector allocation */
+#define GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT BIT(13)
+
+/* Driver can self reset on EQE notification */
+#define GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE BIT(14)
+
+/* Driver can self reset on FPGA Reconfig EQE notification */
+#define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17)
+#define GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE BIT(6)
+
+/* Driver supports linearizing the skb when num_sge exceeds hardware limit */
+#define GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE BIT(20)
+
+/* Driver can send HWC periodically to query stats */
+#define GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY BIT(21)
+
+/* Driver can handle hardware recovery events during probe */
+#define GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY BIT(22)
#define GDMA_DRV_CAP_FLAGS1 \
(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
- GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG)
+ GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
+ GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \
+ GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \
+ GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \
+ GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \
+ GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE | \
+ GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE | \
+ GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY | \
+ GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE | \
+ GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY)
#define GDMA_DRV_CAP_FLAGS2 0
@@ -608,11 +682,12 @@ struct gdma_query_max_resources_resp {
}; /* HW DATA */
/* GDMA_LIST_DEVICES */
+#define GDMA_DEV_LIST_SIZE 64
struct gdma_list_devices_resp {
struct gdma_resp_hdr hdr;
u32 num_of_devs;
u32 reserved;
- struct gdma_dev_id devs[64];
+ struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE];
}; /* HW DATA */
/* GDMA_REGISTER_DEVICE */
@@ -690,20 +765,6 @@ struct gdma_query_hwc_timeout_resp {
u32 reserved;
};
-enum atb_page_size {
- ATB_PAGE_SIZE_4K,
- ATB_PAGE_SIZE_8K,
- ATB_PAGE_SIZE_16K,
- ATB_PAGE_SIZE_32K,
- ATB_PAGE_SIZE_64K,
- ATB_PAGE_SIZE_128K,
- ATB_PAGE_SIZE_256K,
- ATB_PAGE_SIZE_512K,
- ATB_PAGE_SIZE_1M,
- ATB_PAGE_SIZE_2M,
- ATB_PAGE_SIZE_MAX,
-};
-
enum gdma_mr_access_flags {
GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
@@ -762,6 +823,7 @@ struct gdma_destroy_dma_region_req {
enum gdma_pd_flags {
GDMA_PD_FLAG_INVALID = 0,
+ GDMA_PD_FLAG_ALLOW_GPA_MR = 1,
};
struct gdma_create_pd_req {
@@ -787,11 +849,18 @@ struct gdma_destory_pd_resp {
};/* HW DATA */
enum gdma_mr_type {
+ /*
+ * Guest Physical Address - MRs of this type allow access
+ * to any DMA-mapped memory using bus-logical address
+ */
+ GDMA_MR_TYPE_GPA = 1,
/* Guest Virtual Address - MRs of this type allow access
* to memory mapped by PTEs associated with this MR using a virtual
* address that is set up in the MST
*/
GDMA_MR_TYPE_GVA = 2,
+ /* Guest zero-based address MRs */
+ GDMA_MR_TYPE_ZBVA = 4,
};
struct gdma_create_mr_params {
@@ -803,6 +872,10 @@ struct gdma_create_mr_params {
u64 virtual_address;
enum gdma_mr_access_flags access_flags;
} gva;
+ struct {
+ u64 dma_region_handle;
+ enum gdma_mr_access_flags access_flags;
+ } zbva;
};
};
@@ -818,7 +891,10 @@ struct gdma_create_mr_request {
u64 virtual_address;
enum gdma_mr_access_flags access_flags;
} gva;
-
+ struct {
+ u64 dma_region_handle;
+ enum gdma_mr_access_flags access_flags;
+ } zbva;
};
u32 reserved_2;
};/* HW DATA */
@@ -867,5 +943,14 @@ int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
u32 resp_len, void *resp);
int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
+void mana_register_debugfs(void);
+void mana_unregister_debugfs(void);
+
+int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event);
+
+int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state);
+int mana_gd_resume(struct pci_dev *pdev);
+
+bool mana_need_log(struct gdma_context *gc, int err);
#endif /* _GDMA_H */
diff --git a/include/net/mana/hw_channel.h b/include/net/mana/hw_channel.h
index 158b125692c2..16feb39616c1 100644
--- a/include/net/mana/hw_channel.h
+++ b/include/net/mana/hw_channel.h
@@ -24,6 +24,8 @@
#define HWC_INIT_DATA_PF_DEST_CQ_ID 11
#define HWC_DATA_CFG_HWC_TIMEOUT 1
+#define HWC_DATA_HW_LINK_CONNECT 2
+#define HWC_DATA_HW_LINK_DISCONNECT 3
#define HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS 30000
@@ -49,6 +51,15 @@ union hwc_init_type_data {
};
}; /* HW DATA */
+union hwc_init_soc_service_type {
+ u32 as_uint32;
+
+ struct {
+ u32 value : 28;
+ u32 type : 4;
+ };
+}; /* HW DATA */
+
struct hwc_rx_oob {
u32 type : 6;
u32 eom : 1;
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 561f6719fb4e..d7e089c6b694 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -5,6 +5,7 @@
#define _MANA_H
#include <net/xdp.h>
+#include <net/net_shaper.h>
#include "gdma.h"
#include "hw_channel.h"
@@ -30,19 +31,32 @@ enum TRI_STATE {
};
/* Number of entries for hardware indirection table must be in power of 2 */
-#define MANA_INDIRECT_TABLE_SIZE 64
-#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
+#define MANA_INDIRECT_TABLE_MAX_SIZE 512
+#define MANA_INDIRECT_TABLE_DEF_SIZE 64
/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
#define MANA_HASH_KEY_SIZE 40
#define COMP_ENTRY_SIZE 64
-#define RX_BUFFERS_PER_QUEUE 512
+/* This Max value for RX buffers is derived from __alloc_page()'s max page
+ * allocation calculation. It allows maximum 2^(MAX_ORDER -1) pages. RX buffer
+ * size beyond this value gets rejected by __alloc_page() call.
+ */
+#define MAX_RX_BUFFERS_PER_QUEUE 8192
+#define DEF_RX_BUFFERS_PER_QUEUE 1024
+#define MIN_RX_BUFFERS_PER_QUEUE 128
+
+/* This max value for TX buffers is derived as the maximum allocatable
+ * pages supported on host per guest through testing. TX buffer size beyond
+ * this value is rejected by the hardware.
+ */
+#define MAX_TX_BUFFERS_PER_QUEUE 16384
+#define DEF_TX_BUFFERS_PER_QUEUE 256
+#define MIN_TX_BUFFERS_PER_QUEUE 128
-#define MAX_SEND_BUFFERS_PER_QUEUE 256
+#define EQ_SIZE (8 * MANA_PAGE_SIZE)
-#define EQ_SIZE (8 * PAGE_SIZE)
#define LOG2_EQ_THROTTLE 3
#define MAX_PORTS_IN_MANA_DEV 256
@@ -51,6 +65,8 @@ enum TRI_STATE {
#define MANA_STATS_RX_COUNT 5
#define MANA_STATS_TX_COUNT 11
+#define MANA_RX_FRAG_ALIGNMENT 64
+
struct mana_stats_rx {
u64 packets;
u64 bytes;
@@ -97,6 +113,8 @@ struct mana_txq {
atomic_t pending_sends;
+ bool napi_initialized;
+
struct mana_stats_tx stats;
};
@@ -274,6 +292,7 @@ struct mana_cq {
/* NAPI data */
struct napi_struct napi;
int work_done;
+ int work_done_since_doorbell;
int budget;
};
@@ -284,7 +303,7 @@ struct mana_recv_buf_oob {
void *buf_va;
bool from_pool; /* allocated from a page pool */
- /* SGL of the buffer going to be sent has part of the work request. */
+ /* SGL of the buffer going to be sent as part of the work request. */
u32 num_sge;
struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
@@ -311,6 +330,7 @@ struct mana_rxq {
u32 datasize;
u32 alloc_size;
u32 headroom;
+ u32 frag_count;
mana_handle_t rxobj;
@@ -334,6 +354,7 @@ struct mana_rxq {
int xdp_rc; /* XDP redirect return code */
struct page_pool *page_pool;
+ struct dentry *mana_rx_debugfs;
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
@@ -347,11 +368,21 @@ struct mana_tx_qp {
struct mana_cq tx_cq;
mana_handle_t tx_object;
+
+ struct dentry *mana_tx_debugfs;
};
struct mana_ethtool_stats {
u64 stop_queue;
u64 wake_queue;
+ u64 tx_cqe_err;
+ u64 tx_cqe_unknown_type;
+ u64 tx_linear_pkt_cnt;
+ u64 rx_coalesced_err;
+ u64 rx_cqe_unknown_type;
+};
+
+struct mana_ethtool_hc_stats {
u64 hc_rx_discards_no_wqe;
u64 hc_rx_err_vport_disabled;
u64 hc_rx_bytes;
@@ -379,20 +410,86 @@ struct mana_ethtool_stats {
u64 hc_tx_mcast_pkts;
u64 hc_tx_mcast_bytes;
u64 hc_tx_err_gdma;
- u64 tx_cqe_err;
- u64 tx_cqe_unknown_type;
- u64 rx_coalesced_err;
- u64 rx_cqe_unknown_type;
+};
+
+struct mana_ethtool_phy_stats {
+ /* Drop Counters */
+ u64 rx_pkt_drop_phy;
+ u64 tx_pkt_drop_phy;
+
+ /* Per TC traffic Counters */
+ u64 rx_pkt_tc0_phy;
+ u64 tx_pkt_tc0_phy;
+ u64 rx_pkt_tc1_phy;
+ u64 tx_pkt_tc1_phy;
+ u64 rx_pkt_tc2_phy;
+ u64 tx_pkt_tc2_phy;
+ u64 rx_pkt_tc3_phy;
+ u64 tx_pkt_tc3_phy;
+ u64 rx_pkt_tc4_phy;
+ u64 tx_pkt_tc4_phy;
+ u64 rx_pkt_tc5_phy;
+ u64 tx_pkt_tc5_phy;
+ u64 rx_pkt_tc6_phy;
+ u64 tx_pkt_tc6_phy;
+ u64 rx_pkt_tc7_phy;
+ u64 tx_pkt_tc7_phy;
+
+ u64 rx_byte_tc0_phy;
+ u64 tx_byte_tc0_phy;
+ u64 rx_byte_tc1_phy;
+ u64 tx_byte_tc1_phy;
+ u64 rx_byte_tc2_phy;
+ u64 tx_byte_tc2_phy;
+ u64 rx_byte_tc3_phy;
+ u64 tx_byte_tc3_phy;
+ u64 rx_byte_tc4_phy;
+ u64 tx_byte_tc4_phy;
+ u64 rx_byte_tc5_phy;
+ u64 tx_byte_tc5_phy;
+ u64 rx_byte_tc6_phy;
+ u64 tx_byte_tc6_phy;
+ u64 rx_byte_tc7_phy;
+ u64 tx_byte_tc7_phy;
+
+ /* Per TC pause Counters */
+ u64 rx_pause_tc0_phy;
+ u64 tx_pause_tc0_phy;
+ u64 rx_pause_tc1_phy;
+ u64 tx_pause_tc1_phy;
+ u64 rx_pause_tc2_phy;
+ u64 tx_pause_tc2_phy;
+ u64 rx_pause_tc3_phy;
+ u64 tx_pause_tc3_phy;
+ u64 rx_pause_tc4_phy;
+ u64 tx_pause_tc4_phy;
+ u64 rx_pause_tc5_phy;
+ u64 tx_pause_tc5_phy;
+ u64 rx_pause_tc6_phy;
+ u64 tx_pause_tc6_phy;
+ u64 rx_pause_tc7_phy;
+ u64 tx_pause_tc7_phy;
};
struct mana_context {
struct gdma_dev *gdma_dev;
u16 num_ports;
+ u8 bm_hostmode;
+ struct mana_ethtool_hc_stats hc_stats;
struct mana_eq *eqs;
+ struct dentry *mana_eqs_debugfs;
+
+ /* Workqueue for querying hardware stats */
+ struct delayed_work gf_stats_work;
+ bool hwc_timeout_occurred;
struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
+
+ /* Link state change work */
+ struct work_struct link_change_work;
+ u32 link_event;
};
struct mana_port_context {
@@ -410,10 +507,11 @@ struct mana_port_context {
struct mana_tx_qp *tx_qp;
/* Indirection Table for RX & TX. The values are queue indexes */
- u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
+ u32 *indir_table;
+ u32 indir_table_sz;
/* Indirection table containing RxObject Handles */
- mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
+ mana_handle_t *rxobj_table;
/* Hash key used by the NIC */
u8 hashkey[MANA_HASH_KEY_SIZE];
@@ -428,6 +526,7 @@ struct mana_port_context {
u32 rxbpre_datasize;
u32 rxbpre_alloc_size;
u32 rxbpre_headroom;
+ u32 rxbpre_frag_count;
struct bpf_prog *bpf_prog;
@@ -435,6 +534,9 @@ struct mana_port_context {
unsigned int max_queues;
unsigned int num_queues;
+ unsigned int rx_queue_size;
+ unsigned int tx_queue_size;
+
mana_handle_t port_handle;
mana_handle_t pf_filter_handle;
@@ -442,12 +544,24 @@ struct mana_port_context {
struct mutex vport_mutex;
int vport_use_count;
+ /* Net shaper handle*/
+ struct net_shaper_handle handle;
+
u16 port_idx;
+ /* Currently configured speed (mbps) */
+ u32 speed;
+ /* Maximum speed supported by the SKU (mbps) */
+ u32 max_speed;
bool port_is_up;
bool port_st_save; /* Saved port state */
struct mana_ethtool_stats eth_stats;
+
+ struct mana_ethtool_phy_stats phy_stats;
+
+ /* Debugfs */
+ struct dentry *mana_port_debugfs;
};
netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
@@ -461,6 +575,9 @@ int mana_detach(struct net_device *ndev, bool from_close);
int mana_probe(struct gdma_dev *gd, bool resuming);
void mana_remove(struct gdma_dev *gd, bool suspending);
+int mana_rdma_probe(struct gdma_dev *gd);
+void mana_rdma_remove(struct gdma_dev *gd);
+
void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
u32 flags);
@@ -469,9 +586,17 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
-void mana_query_gf_stats(struct mana_port_context *apc);
+int mana_query_gf_stats(struct mana_context *ac);
+int mana_query_link_cfg(struct mana_port_context *apc);
+int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
+ int enable_clamping);
+void mana_query_phy_stats(struct mana_port_context *apc);
+int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
+void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
+void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc);
extern const struct ethtool_ops mana_ethtool_ops;
+extern struct dentry *mana_debugfs_root;
/* A CQ can be created not associated with any EQ */
#define GDMA_CQ_NO_EQ 0xffff
@@ -493,6 +618,9 @@ enum mana_command_code {
MANA_FENCE_RQ = 0x20006,
MANA_CONFIG_VPORT_RX = 0x20007,
MANA_QUERY_VPORT_CONFIG = 0x20008,
+ MANA_QUERY_LINK_CONFIG = 0x2000A,
+ MANA_SET_BW_CLAMP = 0x2000B,
+ MANA_QUERY_PHY_STAT = 0x2000c,
/* Privileged commands for the PF mode */
MANA_REGISTER_FILTER = 0x28000,
@@ -501,6 +629,35 @@ enum mana_command_code {
MANA_DEREGISTER_HW_PORT = 0x28004,
};
+/* Query Link Configuration*/
+struct mana_query_link_config_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+}; /* HW DATA */
+
+struct mana_query_link_config_resp {
+ struct gdma_resp_hdr hdr;
+ u32 qos_speed_mbps;
+ u8 qos_unconfigured;
+ u8 reserved1[3];
+ u32 link_speed_mbps;
+ u8 reserved2[4];
+}; /* HW DATA */
+
+/* Set Bandwidth Clamp*/
+struct mana_set_bw_clamp_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ enum TRI_STATE enable_clamping;
+ u32 link_speed_mbps;
+}; /* HW DATA */
+
+struct mana_set_bw_clamp_resp {
+ struct gdma_resp_hdr hdr;
+ u8 qos_unconfigured;
+ u8 reserved[7];
+}; /* HW DATA */
+
/* Query Device Configuration */
struct mana_query_device_cfg_req {
struct gdma_req_hdr hdr;
@@ -527,7 +684,8 @@ struct mana_query_device_cfg_resp {
u64 pf_cap_flags4;
u16 max_num_vports;
- u16 reserved;
+ u8 bm_hostmode; /* response v3: Bare Metal Host Mode */
+ u8 reserved;
u32 max_num_eqs;
/* response v2: */
@@ -654,6 +812,74 @@ struct mana_query_gf_stat_resp {
u64 tx_err_gdma;
}; /* HW DATA */
+/* Query phy stats */
+struct mana_query_phy_stat_req {
+ struct gdma_req_hdr hdr;
+ u64 req_stats;
+}; /* HW DATA */
+
+struct mana_query_phy_stat_resp {
+ struct gdma_resp_hdr hdr;
+ u64 reported_stats;
+
+ /* Aggregate Drop Counters */
+ u64 rx_pkt_drop_phy;
+ u64 tx_pkt_drop_phy;
+
+ /* Per TC(Traffic class) traffic Counters */
+ u64 rx_pkt_tc0_phy;
+ u64 tx_pkt_tc0_phy;
+ u64 rx_pkt_tc1_phy;
+ u64 tx_pkt_tc1_phy;
+ u64 rx_pkt_tc2_phy;
+ u64 tx_pkt_tc2_phy;
+ u64 rx_pkt_tc3_phy;
+ u64 tx_pkt_tc3_phy;
+ u64 rx_pkt_tc4_phy;
+ u64 tx_pkt_tc4_phy;
+ u64 rx_pkt_tc5_phy;
+ u64 tx_pkt_tc5_phy;
+ u64 rx_pkt_tc6_phy;
+ u64 tx_pkt_tc6_phy;
+ u64 rx_pkt_tc7_phy;
+ u64 tx_pkt_tc7_phy;
+
+ u64 rx_byte_tc0_phy;
+ u64 tx_byte_tc0_phy;
+ u64 rx_byte_tc1_phy;
+ u64 tx_byte_tc1_phy;
+ u64 rx_byte_tc2_phy;
+ u64 tx_byte_tc2_phy;
+ u64 rx_byte_tc3_phy;
+ u64 tx_byte_tc3_phy;
+ u64 rx_byte_tc4_phy;
+ u64 tx_byte_tc4_phy;
+ u64 rx_byte_tc5_phy;
+ u64 tx_byte_tc5_phy;
+ u64 rx_byte_tc6_phy;
+ u64 tx_byte_tc6_phy;
+ u64 rx_byte_tc7_phy;
+ u64 tx_byte_tc7_phy;
+
+ /* Per TC(Traffic Class) pause Counters */
+ u64 rx_pause_tc0_phy;
+ u64 tx_pause_tc0_phy;
+ u64 rx_pause_tc1_phy;
+ u64 tx_pause_tc1_phy;
+ u64 rx_pause_tc2_phy;
+ u64 tx_pause_tc2_phy;
+ u64 rx_pause_tc3_phy;
+ u64 tx_pause_tc3_phy;
+ u64 rx_pause_tc4_phy;
+ u64 tx_pause_tc4_phy;
+ u64 rx_pause_tc5_phy;
+ u64 tx_pause_tc5_phy;
+ u64 rx_pause_tc6_phy;
+ u64 tx_pause_tc6_phy;
+ u64 rx_pause_tc7_phy;
+ u64 tx_pause_tc7_phy;
+}; /* HW DATA */
+
/* Configure vPort Rx Steering */
struct mana_cfg_rx_steer_req_v2 {
struct gdma_req_hdr hdr;
@@ -796,4 +1022,8 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
u32 doorbell_pg_id);
void mana_uncfg_vport(struct mana_port_context *apc);
+
+struct net_device *mana_get_primary_netdev(struct mana_context *ac,
+ u32 port_index,
+ netdevice_tracker *tracker);
#endif /* _MANA_H */
diff --git a/include/net/mctp.h b/include/net/mctp.h
index 7b17c52e8ce2..c3207ce98f07 100644
--- a/include/net/mctp.h
+++ b/include/net/mctp.h
@@ -69,7 +69,10 @@ struct mctp_sock {
/* bind() params */
unsigned int bind_net;
- mctp_eid_t bind_addr;
+ mctp_eid_t bind_local_addr;
+ mctp_eid_t bind_peer_addr;
+ unsigned int bind_peer_net;
+ bool bind_peer_set;
__u8 bind_type;
/* sendmsg()/recvmsg() uses struct sockaddr_mctp_ext */
@@ -183,8 +186,8 @@ struct mctp_sk_key {
struct mctp_skb_cb {
unsigned int magic;
unsigned int net;
- int ifindex; /* extended/direct addressing if set */
- mctp_eid_t src;
+ /* fields below provide extended addressing for ingress to recvmsg() */
+ int ifindex;
unsigned char halen;
unsigned char haddr[MAX_ADDR_LEN];
};
@@ -212,7 +215,7 @@ static inline struct mctp_skb_cb *mctp_cb(struct sk_buff *skb)
BUILD_BUG_ON(sizeof(struct mctp_skb_cb) > sizeof(skb->cb));
WARN_ON(cb->magic != 0x4d435450);
- return (void *)(skb->cb);
+ return cb;
}
/* If CONFIG_MCTP_FLOWS, we may add one of these as a SKB extension,
@@ -222,6 +225,8 @@ struct mctp_flow {
struct mctp_sk_key *key;
};
+struct mctp_dst;
+
/* Route definition.
*
* These are held in the pernet->mctp.routes list, with RCU protection for
@@ -229,16 +234,25 @@ struct mctp_flow {
* dropped on NETDEV_UNREGISTER events.
*
* Updates to the route table are performed under rtnl; all reads under RCU,
- * so routes cannot be referenced over a RCU grace period. Specifically: A
- * caller cannot block between mctp_route_lookup and mctp_route_release()
+ * so routes cannot be referenced over a RCU grace period.
*/
struct mctp_route {
mctp_eid_t min, max;
unsigned char type;
+
unsigned int mtu;
- struct mctp_dev *dev;
- int (*output)(struct mctp_route *route,
+
+ enum {
+ MCTP_ROUTE_DIRECT,
+ MCTP_ROUTE_GATEWAY,
+ } dst_type;
+ union {
+ struct mctp_dev *dev;
+ struct mctp_fq_addr gateway;
+ };
+
+ int (*output)(struct mctp_dst *dst,
struct sk_buff *skb);
struct list_head list;
@@ -246,12 +260,35 @@ struct mctp_route {
struct rcu_head rcu;
};
+/* Route lookup result: dst. Represents the results of a routing decision,
+ * but is only held over the individual routing operation.
+ *
+ * Will typically be stored on the caller stack, and must be released after
+ * usage.
+ */
+struct mctp_dst {
+ struct mctp_dev *dev;
+ unsigned int mtu;
+ mctp_eid_t nexthop;
+
+ /* set for direct addressing */
+ unsigned char halen;
+ unsigned char haddr[MAX_ADDR_LEN];
+
+ int (*output)(struct mctp_dst *dst, struct sk_buff *skb);
+};
+
+int mctp_dst_from_extaddr(struct mctp_dst *dst, struct net *net, int ifindex,
+ unsigned char halen, const unsigned char *haddr);
+
/* route interfaces */
-struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
- mctp_eid_t daddr);
+int mctp_route_lookup(struct net *net, unsigned int dnet,
+ mctp_eid_t daddr, struct mctp_dst *dst);
+
+void mctp_dst_release(struct mctp_dst *dst);
/* always takes ownership of skb */
-int mctp_local_output(struct sock *sk, struct mctp_route *rt,
+int mctp_local_output(struct sock *sk, struct mctp_dst *dst,
struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag);
void mctp_key_unref(struct mctp_sk_key *key);
@@ -295,7 +332,25 @@ void mctp_neigh_remove_dev(struct mctp_dev *mdev);
int mctp_routes_init(void);
void mctp_routes_exit(void);
-void mctp_device_init(void);
+int mctp_device_init(void);
void mctp_device_exit(void);
+/* MCTP IDs and Codes from DMTF specification
+ * "DSP0239 Management Component Transport Protocol (MCTP) IDs and Codes"
+ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0239_1.11.1.pdf
+ */
+enum mctp_phys_binding {
+ MCTP_PHYS_BINDING_UNSPEC = 0x00,
+ MCTP_PHYS_BINDING_SMBUS = 0x01,
+ MCTP_PHYS_BINDING_PCIE_VDM = 0x02,
+ MCTP_PHYS_BINDING_USB = 0x03,
+ MCTP_PHYS_BINDING_KCS = 0x04,
+ MCTP_PHYS_BINDING_SERIAL = 0x05,
+ MCTP_PHYS_BINDING_I3C = 0x06,
+ MCTP_PHYS_BINDING_MMBI = 0x07,
+ MCTP_PHYS_BINDING_PCC = 0x08,
+ MCTP_PHYS_BINDING_UCIE = 0x09,
+ MCTP_PHYS_BINDING_VENDOR = 0xFF,
+};
+
#endif /* __NET_MCTP_H */
diff --git a/include/net/mctpdevice.h b/include/net/mctpdevice.h
index 5c0d04b5c12c..957d9ef924c5 100644
--- a/include/net/mctpdevice.h
+++ b/include/net/mctpdevice.h
@@ -22,6 +22,7 @@ struct mctp_dev {
refcount_t refs;
unsigned int net;
+ enum mctp_phys_binding binding;
const struct mctp_netdev_ops *ops;
@@ -44,7 +45,8 @@ struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev);
struct mctp_dev *__mctp_dev_get(const struct net_device *dev);
int mctp_register_netdev(struct net_device *dev,
- const struct mctp_netdev_ops *ops);
+ const struct mctp_netdev_ops *ops,
+ enum mctp_phys_binding binding);
void mctp_unregister_netdev(struct net_device *dev);
void mctp_dev_hold(struct mctp_dev *mdev);
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 0bc4ab03f487..f7263fe2a2e4 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -14,6 +14,7 @@
struct mptcp_info;
struct mptcp_sock;
+struct mptcp_pm_addr_entry;
struct seq_file;
/* MPTCP sk_buff extension data */
@@ -100,17 +101,9 @@ struct mptcp_out_options {
#define MPTCP_SCHED_MAX 128
#define MPTCP_SCHED_BUF_MAX (MPTCP_SCHED_NAME_MAX * MPTCP_SCHED_MAX)
-#define MPTCP_SUBFLOWS_MAX 8
-
-struct mptcp_sched_data {
- bool reinject;
- u8 subflows;
- struct mptcp_subflow_context *contexts[MPTCP_SUBFLOWS_MAX];
-};
-
struct mptcp_sched_ops {
- int (*get_subflow)(struct mptcp_sock *msk,
- struct mptcp_sched_data *data);
+ int (*get_send)(struct mptcp_sock *msk);
+ int (*get_retrans)(struct mptcp_sock *msk);
char name[MPTCP_SCHED_NAME_MAX];
struct module *owner;
@@ -120,6 +113,19 @@ struct mptcp_sched_ops {
void (*release)(struct mptcp_sock *msk);
} ____cacheline_aligned_in_smp;
+#define MPTCP_PM_NAME_MAX 16
+#define MPTCP_PM_MAX 128
+#define MPTCP_PM_BUF_MAX (MPTCP_PM_NAME_MAX * MPTCP_PM_MAX)
+
+struct mptcp_pm_ops {
+ char name[MPTCP_PM_NAME_MAX];
+ struct module *owner;
+ struct list_head list;
+
+ void (*init)(struct mptcp_sock *msk);
+ void (*release)(struct mptcp_sock *msk);
+} ____cacheline_aligned_in_smp;
+
#ifdef CONFIG_MPTCP
void mptcp_init(void);
@@ -223,6 +229,8 @@ static inline __be32 mptcp_reset_option(const struct sk_buff *skb)
return htonl(0u);
}
+
+void mptcp_active_detect_blackhole(struct sock *sk, bool expired);
#else
static inline void mptcp_init(void)
@@ -307,6 +315,8 @@ static inline struct request_sock *mptcp_subflow_reqsk_alloc(const struct reques
}
static inline __be32 mptcp_reset_option(const struct sk_buff *skb) { return htonl(0u); }
+
+static inline void mptcp_active_detect_blackhole(struct sock *sk, bool expired) { }
#endif /* CONFIG_MPTCP */
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 9bbdf6eaa942..d38783a2ce57 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -60,15 +60,6 @@ enum {
#include <net/neighbour.h>
-/* Set to 3 to get tracing... */
-#define ND_DEBUG 1
-
-#define ND_PRINTK(val, level, fmt, ...) \
-do { \
- if (val <= ND_DEBUG) \
- net_##level##_ratelimited(fmt, ##__VA_ARGS__); \
-} while (0)
-
struct ctl_table;
struct inet6_dev;
struct net_device;
@@ -147,11 +138,6 @@ void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data,
* The following hooks can be defined; unless noted otherwise, they are
* optional and can be filled with a null pointer.
*
- * int (*is_useropt)(u8 nd_opt_type):
- * This function is called when IPv6 decide RA userspace options. if
- * this function returns 1 then the option given by nd_opt_type will
- * be handled as userspace option additional to the IPv6 options.
- *
* int (*parse_options)(const struct net_device *dev,
* struct nd_opt_hdr *nd_opt,
* struct ndisc_options *ndopts):
@@ -200,7 +186,6 @@ void __ndisc_fill_addr_option(struct sk_buff *skb, int type, const void *data,
* addresses. E.g. 802.15.4 6LoWPAN.
*/
struct ndisc_ops {
- int (*is_useropt)(u8 nd_opt_type);
int (*parse_options)(const struct net_device *dev,
struct nd_opt_hdr *nd_opt,
struct ndisc_options *ndopts);
@@ -224,15 +209,6 @@ struct ndisc_ops {
};
#if IS_ENABLED(CONFIG_IPV6)
-static inline int ndisc_ops_is_useropt(const struct net_device *dev,
- u8 nd_opt_type)
-{
- if (dev->ndisc_ops && dev->ndisc_ops->is_useropt)
- return dev->ndisc_ops->is_useropt(nd_opt_type);
- else
- return 0;
-}
-
static inline int ndisc_ops_parse_options(const struct net_device *dev,
struct nd_opt_hdr *nd_opt,
struct ndisc_options *ndopts)
@@ -486,7 +462,7 @@ void igmp6_event_report(struct sk_buff *skb);
#ifdef CONFIG_SYSCTL
-int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
+int ndisc_ifinfo_sysctl_change(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
#endif
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 0d28172193fa..2dfee6d4258a 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -29,6 +29,7 @@
#include <linux/sysctl.h>
#include <linux/workqueue.h>
#include <net/rtnetlink.h>
+#include <net/neighbour_tables.h>
/*
* NUD stands for "neighbor unreachability detection"
@@ -91,15 +92,17 @@ struct neigh_parms {
static inline void neigh_var_set(struct neigh_parms *p, int index, int val)
{
set_bit(index, p->data_state);
- p->data[index] = val;
+ WRITE_ONCE(p->data[index], val);
}
-#define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr])
+#define __NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr])
+#define NEIGH_VAR(p, attr) READ_ONCE(__NEIGH_VAR(p, attr))
+#define NEIGH_VAR_PTR(p, attr) (&(__NEIGH_VAR(p, attr)))
/* In ndo_neigh_setup, NEIGH_VAR_INIT should be used.
* In other cases, NEIGH_VAR_SET should be used.
*/
-#define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val)
+#define NEIGH_VAR_INIT(p, attr, val) (__NEIGH_VAR(p, attr) = val)
#define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val)
static inline void neigh_parms_data_state_setall(struct neigh_parms *p)
@@ -135,7 +138,8 @@ struct neigh_statistics {
#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
struct neighbour {
- struct neighbour __rcu *next;
+ struct hlist_node hash;
+ struct hlist_node dev_list;
struct neigh_table *tbl;
struct neigh_parms *parms;
unsigned long confirmed;
@@ -174,12 +178,17 @@ struct neigh_ops {
};
struct pneigh_entry {
- struct pneigh_entry *next;
+ struct pneigh_entry __rcu *next;
possible_net_t net;
struct net_device *dev;
netdevice_tracker dev_tracker;
+ union {
+ struct list_head free_node;
+ struct rcu_head rcu;
+ };
u32 flags;
u8 protocol;
+ bool permanent;
u32 key[];
};
@@ -190,7 +199,7 @@ struct pneigh_entry {
#define NEIGH_NUM_HASH_RND 4
struct neigh_hash_table {
- struct neighbour __rcu **hash_buckets;
+ struct hlist_head *hash_heads;
unsigned int hash_shift;
__u32 hash_rnd[NEIGH_NUM_HASH_RND];
struct rcu_head rcu;
@@ -229,19 +238,12 @@ struct neigh_table {
atomic_t gc_entries;
struct list_head gc_list;
struct list_head managed_list;
- rwlock_t lock;
+ spinlock_t lock;
unsigned long last_rand;
struct neigh_statistics __percpu *stats;
struct neigh_hash_table __rcu *nht;
- struct pneigh_entry **phash_buckets;
-};
-
-enum {
- NEIGH_ARP_TABLE = 0,
- NEIGH_ND_TABLE = 1,
- NEIGH_DN_TABLE = 2,
- NEIGH_NR_TABLES,
- NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */
+ struct mutex phash_lock;
+ struct pneigh_entry __rcu **phash_buckets;
};
static inline int neigh_parms_family(struct neigh_parms *p)
@@ -266,16 +268,24 @@ static inline void *neighbour_priv(const struct neighbour *n)
#define NEIGH_UPDATE_F_EXT_LEARNED BIT(5)
#define NEIGH_UPDATE_F_ISROUTER BIT(6)
#define NEIGH_UPDATE_F_ADMIN BIT(7)
+#define NEIGH_UPDATE_F_EXT_VALIDATED BIT(8)
/* In-kernel representation for NDA_FLAGS_EXT flags: */
#define NTF_OLD_MASK 0xff
#define NTF_EXT_SHIFT 8
-#define NTF_EXT_MASK (NTF_EXT_MANAGED)
+#define NTF_EXT_MASK (NTF_EXT_MANAGED | NTF_EXT_EXT_VALIDATED)
#define NTF_MANAGED (NTF_EXT_MANAGED << NTF_EXT_SHIFT)
+#define NTF_EXT_VALIDATED (NTF_EXT_EXT_VALIDATED << NTF_EXT_SHIFT)
extern const struct nla_policy nda_policy[];
+#define neigh_for_each_in_bucket(pos, head) hlist_for_each_entry(pos, head, hash)
+#define neigh_for_each_in_bucket_rcu(pos, head) \
+ hlist_for_each_entry_rcu(pos, head, hash)
+#define neigh_for_each_in_bucket_safe(pos, tmp, head) \
+ hlist_for_each_entry_safe(pos, tmp, head, hash)
+
static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
{
return *(const u32 *)n->primary_key == *(const u32 *)pkey;
@@ -304,12 +314,9 @@ static inline struct neighbour *___neigh_lookup_noref(
u32 hash_val;
hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
- for (n = rcu_dereference(nht->hash_buckets[hash_val]);
- n != NULL;
- n = rcu_dereference(n->next)) {
+ neigh_for_each_in_bucket_rcu(n, &nht->hash_heads[hash_val])
if (n->dev == dev && key_eq(n, pkey))
return n;
- }
return NULL;
}
@@ -350,7 +357,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
u32 nlmsg_pid);
void __neigh_set_probe_once(struct neighbour *neigh);
-bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl);
+bool neigh_remove_one(struct neighbour *ndel);
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev);
@@ -373,13 +380,20 @@ struct net *neigh_parms_net(const struct neigh_parms *parms)
unsigned long neigh_rand_reach_time(unsigned long base);
+static inline void neigh_set_reach_time(struct neigh_parms *p)
+{
+ unsigned long base = NEIGH_VAR(p, BASE_REACHABLE_TIME);
+
+ WRITE_ONCE(p->reachable_time, neigh_rand_reach_time(base));
+}
+
void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
struct sk_buff *skb);
struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net,
- const void *key, struct net_device *dev,
- int creat);
-struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net,
- const void *key, struct net_device *dev);
+ const void *key, struct net_device *dev);
+int pneigh_create(struct neigh_table *tbl, struct net *net, const void *key,
+ struct net_device *dev, u32 flags, u8 protocol,
+ bool permanent);
int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key,
struct net_device *dev);
@@ -412,12 +426,12 @@ void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
void *neigh_seq_next(struct seq_file *, void *, loff_t *);
void neigh_seq_stop(struct seq_file *, void *);
-int neigh_proc_dointvec(struct ctl_table *ctl, int write,
+int neigh_proc_dointvec(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
+int neigh_proc_dointvec_jiffies(const struct ctl_table *ctl, int write,
void *buffer,
size_t *lenp, loff_t *ppos);
-int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
+int neigh_proc_dointvec_ms_jiffies(const struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
diff --git a/include/net/neighbour_tables.h b/include/net/neighbour_tables.h
new file mode 100644
index 000000000000..bcffbe8f7601
--- /dev/null
+++ b/include/net/neighbour_tables.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_NEIGHBOUR_TABLES_H
+#define _NET_NEIGHBOUR_TABLES_H
+
+enum {
+ NEIGH_ARP_TABLE = 0,
+ NEIGH_ND_TABLE = 1,
+ NEIGH_NR_TABLES,
+ NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */
+};
+
+#endif
diff --git a/include/net/net_debug.h b/include/net/net_debug.h
index 1e74684cbbdb..47f7a4a878b9 100644
--- a/include/net/net_debug.h
+++ b/include/net/net_debug.h
@@ -27,7 +27,7 @@ void netdev_info(const struct net_device *dev, const char *format, ...);
#define netdev_level_once(level, dev, fmt, ...) \
do { \
- static bool __section(".data.once") __print_once; \
+ static bool __section(".data..once") __print_once; \
\
if (!__print_once) { \
__print_once = true; \
@@ -149,9 +149,11 @@ do { \
#if defined(CONFIG_DEBUG_NET)
-#define DEBUG_NET_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
+#define DEBUG_NET_WARN_ON_ONCE(cond) ((void)WARN_ON_ONCE(cond))
+#define DEBUG_NET_WARN_ONCE(cond, format...) ((void)WARN_ONCE(cond, format))
#else
#define DEBUG_NET_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
+#define DEBUG_NET_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#endif
#endif /* _LINUX_NET_DEBUG_H */
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 20c34bd7a077..cb664f6e3558 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -80,8 +80,12 @@ struct net {
* or to unregister pernet ops
* (pernet_ops_rwsem write locked).
*/
+ struct llist_node defer_free_list;
struct llist_node cleanup_list; /* namespaces on death row */
+ struct list_head ptype_all;
+ struct list_head ptype_specific;
+
#ifdef CONFIG_KEYS
struct key_tag *key_domain; /* Key domain of operation tag */
#endif
@@ -188,6 +192,10 @@ struct net {
#if IS_ENABLED(CONFIG_SMC)
struct netns_smc smc;
#endif
+#ifdef CONFIG_DEBUG_NET_SMALL_RTNL
+ /* Move to a better place when the config guard is removed. */
+ struct mutex rtnl_mutex;
+#endif
} __randomize_layout;
#include <linux/seq_file_net.h>
@@ -196,7 +204,7 @@ struct net {
extern struct net init_net;
#ifdef CONFIG_NET_NS
-struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
+struct net *copy_net_ns(u64 flags, struct user_namespace *user_ns,
struct net *old_net);
void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
@@ -205,10 +213,12 @@ void net_ns_barrier(void);
struct ns_common *get_net_ns(struct ns_common *ns);
struct net *get_net_ns_by_fd(int fd);
+extern struct task_struct *cleanup_net_task;
+
#else /* CONFIG_NET_NS */
#include <linux/sched.h>
#include <linux/nsproxy.h>
-static inline struct net *copy_net_ns(unsigned long flags,
+static inline struct net *copy_net_ns(u64 flags,
struct user_namespace *user_ns, struct net *old_net)
{
if (flags & CLONE_NEWNET)
@@ -252,10 +262,15 @@ void ipx_unregister_sysctl(void);
#ifdef CONFIG_NET_NS
void __put_net(struct net *net);
+static inline struct net *to_net_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct net, ns);
+}
+
/* Try using get_net_track() instead */
static inline struct net *get_net(struct net *net)
{
- refcount_inc(&net->ns.count);
+ ns_ref_inc(net);
return net;
}
@@ -266,7 +281,7 @@ static inline struct net *maybe_get_net(struct net *net)
* exists. If the reference count is zero this
* function fails and returns NULL.
*/
- if (!refcount_inc_not_zero(&net->ns.count))
+ if (!ns_ref_get(net))
net = NULL;
return net;
}
@@ -274,7 +289,7 @@ static inline struct net *maybe_get_net(struct net *net)
/* Try using put_net_track() instead */
static inline void put_net(struct net *net)
{
- if (refcount_dec_and_test(&net->ns.count))
+ if (ns_ref_put(net))
__put_net(net);
}
@@ -286,10 +301,11 @@ int net_eq(const struct net *net1, const struct net *net2)
static inline int check_net(const struct net *net)
{
- return refcount_read(&net->ns.count) != 0;
+ return ns_ref_read(net) != 0;
}
void net_drop_ns(void *);
+void net_passive_dec(struct net *net);
#else
@@ -319,8 +335,23 @@ static inline int check_net(const struct net *net)
}
#define net_drop_ns NULL
+
+static inline void net_passive_dec(struct net *net)
+{
+ refcount_dec(&net->passive);
+}
#endif
+static inline void net_passive_inc(struct net *net)
+{
+ refcount_inc(&net->passive);
+}
+
+/* Returns true if the netns initialization is completed successfully */
+static inline bool net_initialized(const struct net *net)
+{
+ return READ_ONCE(net->list.next);
+}
static inline void __netns_tracker_alloc(struct net *net,
netns_tracker *tracker,
@@ -386,7 +417,7 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
#endif
}
-static inline struct net *read_pnet_rcu(possible_net_t *pnet)
+static inline struct net *read_pnet_rcu(const possible_net_t *pnet)
{
#ifdef CONFIG_NET_NS
return rcu_dereference(pnet->net);
@@ -449,10 +480,10 @@ struct pernet_operations {
void (*exit)(struct net *net);
void (*exit_batch)(struct list_head *net_exit_list);
/* Following method is called with RTNL held. */
- void (*exit_batch_rtnl)(struct list_head *net_exit_list,
- struct list_head *dev_kill_list);
- unsigned int *id;
- size_t size;
+ void (*exit_rtnl)(struct net *net,
+ struct list_head *dev_kill_list);
+ unsigned int * const id;
+ const size_t size;
};
/*
diff --git a/include/net/net_shaper.h b/include/net/net_shaper.h
new file mode 100644
index 000000000000..5c3f49b52fe9
--- /dev/null
+++ b/include/net/net_shaper.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _NET_SHAPER_H_
+#define _NET_SHAPER_H_
+
+#include <linux/types.h>
+
+#include <uapi/linux/net_shaper.h>
+
+struct net_device;
+struct devlink;
+struct netlink_ext_ack;
+
+enum net_shaper_binding_type {
+ NET_SHAPER_BINDING_TYPE_NETDEV,
+ /* NET_SHAPER_BINDING_TYPE_DEVLINK_PORT */
+};
+
+struct net_shaper_binding {
+ enum net_shaper_binding_type type;
+ union {
+ struct net_device *netdev;
+ struct devlink *devlink;
+ };
+};
+
+struct net_shaper_handle {
+ enum net_shaper_scope scope;
+ u32 id;
+};
+
+/**
+ * struct net_shaper - represents a shaping node on the NIC H/W
+ * zeroed field are considered not set.
+ * @parent: Unique identifier for the shaper parent, usually implied
+ * @handle: Unique identifier for this shaper
+ * @metric: Specify if the rate limits refers to PPS or BPS
+ * @bw_min: Minimum guaranteed rate for this shaper
+ * @bw_max: Maximum peak rate allowed for this shaper
+ * @burst: Maximum burst for the peek rate of this shaper
+ * @priority: Scheduling priority for this shaper
+ * @weight: Scheduling weight for this shaper
+ */
+struct net_shaper {
+ struct net_shaper_handle parent;
+ struct net_shaper_handle handle;
+ enum net_shaper_metric metric;
+ u64 bw_min;
+ u64 bw_max;
+ u64 burst;
+ u32 priority;
+ u32 weight;
+
+ /* private: */
+ u32 leaves; /* accounted only for NODE scope */
+ struct rcu_head rcu;
+};
+
+/**
+ * struct net_shaper_ops - Operations on device H/W shapers
+ *
+ * The operations applies to either net_device and devlink objects.
+ * The initial shaping configuration at device initialization is empty:
+ * does not constraint the rate in any way.
+ * The network core keeps track of the applied user-configuration in
+ * the net_device or devlink structure.
+ * The operations are serialized via a per device lock.
+ *
+ * Device not supporting any kind of nesting should not provide the
+ * group operation.
+ *
+ * Each shaper is uniquely identified within the device with a 'handle'
+ * comprising the shaper scope and a scope-specific id.
+ */
+struct net_shaper_ops {
+ /**
+ * @group: create the specified shapers scheduling group
+ *
+ * Nest the @leaves shapers identified under the * @node shaper.
+ * All the shapers belong to the device specified by @binding.
+ * The @leaves arrays size is specified by @leaves_count.
+ * Create either the @leaves and the @node shaper; or if they already
+ * exists, links them together in the desired way.
+ * @leaves scope must be NET_SHAPER_SCOPE_QUEUE.
+ */
+ int (*group)(struct net_shaper_binding *binding, int leaves_count,
+ const struct net_shaper *leaves,
+ const struct net_shaper *node,
+ struct netlink_ext_ack *extack);
+
+ /**
+ * @set: Updates the specified shaper
+ *
+ * Updates or creates the @shaper on the device specified by @binding.
+ */
+ int (*set)(struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ struct netlink_ext_ack *extack);
+
+ /**
+ * @delete: Removes the specified shaper
+ *
+ * Removes the shaper configuration as identified by the given @handle
+ * on the device specified by @binding, restoring the default behavior.
+ */
+ int (*delete)(struct net_shaper_binding *binding,
+ const struct net_shaper_handle *handle,
+ struct netlink_ext_ack *extack);
+
+ /**
+ * @capabilities: get the shaper features supported by the device
+ *
+ * Fills the bitmask @cap with the supported capabilities for the
+ * specified @scope and device specified by @binding.
+ */
+ void (*capabilities)(struct net_shaper_binding *binding,
+ enum net_shaper_scope scope, unsigned long *cap);
+};
+
+#endif
diff --git a/include/net/netdev_lock.h b/include/net/netdev_lock.h
new file mode 100644
index 000000000000..3d3aef80beac
--- /dev/null
+++ b/include/net/netdev_lock.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _NET_NETDEV_LOCK_H
+#define _NET_NETDEV_LOCK_H
+
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+
+static inline bool netdev_trylock(struct net_device *dev)
+{
+ return mutex_trylock(&dev->lock);
+}
+
+static inline void netdev_assert_locked(const struct net_device *dev)
+{
+ lockdep_assert_held(&dev->lock);
+}
+
+static inline void
+netdev_assert_locked_or_invisible(const struct net_device *dev)
+{
+ if (dev->reg_state == NETREG_REGISTERED ||
+ dev->reg_state == NETREG_UNREGISTERING)
+ netdev_assert_locked(dev);
+}
+
+static inline bool netdev_need_ops_lock(const struct net_device *dev)
+{
+ bool ret = dev->request_ops_lock || !!dev->queue_mgmt_ops;
+
+#if IS_ENABLED(CONFIG_NET_SHAPER)
+ ret |= !!dev->netdev_ops->net_shaper_ops;
+#endif
+
+ return ret;
+}
+
+static inline void netdev_lock_ops(struct net_device *dev)
+{
+ if (netdev_need_ops_lock(dev))
+ netdev_lock(dev);
+}
+
+static inline void netdev_unlock_ops(struct net_device *dev)
+{
+ if (netdev_need_ops_lock(dev))
+ netdev_unlock(dev);
+}
+
+static inline void netdev_lock_ops_to_full(struct net_device *dev)
+{
+ if (netdev_need_ops_lock(dev))
+ netdev_assert_locked(dev);
+ else
+ netdev_lock(dev);
+}
+
+static inline void netdev_unlock_full_to_ops(struct net_device *dev)
+{
+ if (netdev_need_ops_lock(dev))
+ netdev_assert_locked(dev);
+ else
+ netdev_unlock(dev);
+}
+
+static inline void netdev_ops_assert_locked(const struct net_device *dev)
+{
+ if (netdev_need_ops_lock(dev))
+ lockdep_assert_held(&dev->lock);
+ else
+ ASSERT_RTNL();
+}
+
+static inline void
+netdev_ops_assert_locked_or_invisible(const struct net_device *dev)
+{
+ if (dev->reg_state == NETREG_REGISTERED ||
+ dev->reg_state == NETREG_UNREGISTERING)
+ netdev_ops_assert_locked(dev);
+}
+
+static inline void netdev_lock_ops_compat(struct net_device *dev)
+{
+ if (netdev_need_ops_lock(dev))
+ netdev_lock(dev);
+ else
+ rtnl_lock();
+}
+
+static inline void netdev_unlock_ops_compat(struct net_device *dev)
+{
+ if (netdev_need_ops_lock(dev))
+ netdev_unlock(dev);
+ else
+ rtnl_unlock();
+}
+
+static inline int netdev_lock_cmp_fn(const struct lockdep_map *a,
+ const struct lockdep_map *b)
+{
+ if (a == b)
+ return 0;
+
+ /* Allow locking multiple devices only under rtnl_lock,
+ * the exact order doesn't matter.
+ * Note that upper devices don't lock their ops, so nesting
+ * mostly happens in batched device removal for now.
+ */
+ return lockdep_rtnl_is_held() ? -1 : 1;
+}
+
+#define netdev_lockdep_set_classes(dev) \
+{ \
+ static struct lock_class_key qdisc_tx_busylock_key; \
+ static struct lock_class_key qdisc_xmit_lock_key; \
+ static struct lock_class_key dev_addr_list_lock_key; \
+ static struct lock_class_key dev_instance_lock_key; \
+ unsigned int i; \
+ \
+ (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
+ lockdep_set_class(&(dev)->addr_list_lock, \
+ &dev_addr_list_lock_key); \
+ lockdep_set_class(&(dev)->lock, \
+ &dev_instance_lock_key); \
+ lock_set_cmp_fn(&dev->lock, netdev_lock_cmp_fn, NULL); \
+ for (i = 0; i < (dev)->num_tx_queues; i++) \
+ lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
+ &qdisc_xmit_lock_key); \
+}
+
+#define netdev_lock_dereference(p, dev) \
+ rcu_dereference_protected(p, lockdep_is_held(&(dev)->lock))
+
+int netdev_debug_event(struct notifier_block *nb, unsigned long event,
+ void *ptr);
+
+#endif
diff --git a/include/net/netdev_netlink.h b/include/net/netdev_netlink.h
new file mode 100644
index 000000000000..075962dbe743
--- /dev/null
+++ b/include/net/netdev_netlink.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_NETDEV_NETLINK_H
+#define __NET_NETDEV_NETLINK_H
+
+#include <linux/list.h>
+
+struct netdev_nl_sock {
+ struct mutex lock;
+ struct list_head bindings;
+};
+
+#endif /* __NET_NETDEV_NETLINK_H */
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index a8a7e48dfa6c..cd00e0406cf4 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -4,6 +4,16 @@
#include <linux/netdevice.h>
+/**
+ * struct netdev_config - queue-related configuration for a netdev
+ * @hds_thresh: HDS Threshold value.
+ * @hds_config: HDS value from userspace.
+ */
+struct netdev_config {
+ u32 hds_thresh;
+ u8 hds_config;
+};
+
/* See the netdev.yaml spec for definition of each statistic */
struct netdev_queue_stats_rx {
u64 bytes;
@@ -13,6 +23,7 @@ struct netdev_queue_stats_rx {
u64 hw_drops;
u64 hw_drop_overruns;
+ u64 csum_complete;
u64 csum_unnecessary;
u64 csum_none;
u64 csum_bad;
@@ -62,6 +73,8 @@ struct netdev_queue_stats_tx {
* statistics will not generally add up to the total number of events for
* the device. The @get_base_stats callback allows filling in the delta
* between events for currently live queues and overall device history.
+ * @get_base_stats can also be used to report any miscellaneous packets
+ * transferred outside of the main set of queues used by the networking stack.
* When the statistics for the entire device are queried, first @get_base_stats
* is issued to collect the delta, and then a series of per-queue callbacks.
* Only statistics which are set in @get_base_stats will be reported
@@ -72,9 +85,11 @@ struct netdev_queue_stats_tx {
* for some of the events is not maintained, and reliable "total" cannot
* be provided).
*
+ * Ops are called under the instance lock if netdev_need_ops_lock()
+ * returns true, otherwise under rtnl_lock.
* Device drivers can assume that when collecting total device stats,
* the @get_base_stats and subsequent per-queue calls are performed
- * "atomically" (without releasing the rtnl_lock).
+ * "atomically" (without releasing the relevant lock).
*
* Device drivers are encouraged to reset the per-queue statistics when
* number of queues change. This is because the primary use case for
@@ -90,6 +105,12 @@ struct netdev_stat_ops {
struct netdev_queue_stats_tx *tx);
};
+void netdev_stat_queue_sum(struct net_device *netdev,
+ int rx_start, int rx_end,
+ struct netdev_queue_stats_rx *rx_sum,
+ int tx_start, int tx_end,
+ struct netdev_queue_stats_tx *tx_sum);
+
/**
* struct netdev_queue_mgmt_ops - netdev ops for queue management
*
@@ -105,6 +126,13 @@ struct netdev_stat_ops {
*
* @ndo_queue_stop: Stop the RX queue at the specified index. The stopped
* queue's memory is written at the specified address.
+ *
+ * @ndo_queue_get_dma_dev: Get dma device for zero-copy operations to be used
+ * for this queue. Return NULL on error.
+ *
+ * Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while
+ * the interface is closed. @ndo_queue_start and @ndo_queue_stop will only
+ * be called for an interface which is open.
*/
struct netdev_queue_mgmt_ops {
size_t ndo_queue_mem_size;
@@ -119,8 +147,12 @@ struct netdev_queue_mgmt_ops {
int (*ndo_queue_stop)(struct net_device *dev,
void *per_queue_mem,
int idx);
+ struct device * (*ndo_queue_get_dma_dev)(struct net_device *dev,
+ int idx);
};
+bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx);
+
/**
* DOC: Lockless queue stopping / waking helpers.
*
@@ -263,28 +295,39 @@ netdev_txq_completed_mb(struct netdev_queue *dev_queue,
#define netif_subqueue_try_stop(dev, idx, get_desc, start_thrs) \
({ \
- struct netdev_queue *txq; \
+ struct netdev_queue *_txq; \
\
- txq = netdev_get_tx_queue(dev, idx); \
- netif_txq_try_stop(txq, get_desc, start_thrs); \
+ _txq = netdev_get_tx_queue(dev, idx); \
+ netif_txq_try_stop(_txq, get_desc, start_thrs); \
})
+static inline void netif_subqueue_sent(const struct net_device *dev,
+ unsigned int idx, unsigned int bytes)
+{
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(dev, idx);
+ netdev_tx_sent_queue(txq, bytes);
+}
+
#define netif_subqueue_maybe_stop(dev, idx, get_desc, stop_thrs, start_thrs) \
({ \
- struct netdev_queue *txq; \
+ struct netdev_queue *_txq; \
\
- txq = netdev_get_tx_queue(dev, idx); \
- netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs); \
+ _txq = netdev_get_tx_queue(dev, idx); \
+ netif_txq_maybe_stop(_txq, get_desc, stop_thrs, start_thrs); \
})
#define netif_subqueue_completed_wake(dev, idx, pkts, bytes, \
get_desc, start_thrs) \
({ \
- struct netdev_queue *txq; \
+ struct netdev_queue *_txq; \
\
- txq = netdev_get_tx_queue(dev, idx); \
- netif_txq_completed_wake(txq, pkts, bytes, \
+ _txq = netdev_get_tx_queue(dev, idx); \
+ netif_txq_completed_wake(_txq, pkts, bytes, \
get_desc, start_thrs); \
})
+struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx);
+
#endif
diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
index aa1716fb0e53..8cdcd138b33f 100644
--- a/include/net/netdev_rx_queue.h
+++ b/include/net/netdev_rx_queue.h
@@ -6,6 +6,7 @@
#include <linux/netdevice.h>
#include <linux/sysfs.h>
#include <net/xdp.h>
+#include <net/page_pool/types.h>
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
@@ -15,16 +16,18 @@ struct netdev_rx_queue {
struct rps_dev_flow_table __rcu *rps_flow_table;
#endif
struct kobject kobj;
+ const struct attribute_group **groups;
struct net_device *dev;
netdevice_tracker dev_tracker;
+ /* All fields below are "ops protected",
+ * see comment about net_device::lock
+ */
#ifdef CONFIG_XDP_SOCKETS
struct xsk_buff_pool *pool;
#endif
- /* NAPI instance for the queue
- * Readers and writers must hold RTNL
- */
struct napi_struct *napi;
+ struct pp_memory_provider_params mp_params;
} ____cacheline_aligned_in_smp;
/*
@@ -43,7 +46,6 @@ __netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
return dev->_rx + rxq;
}
-#ifdef CONFIG_SYSFS
static inline unsigned int
get_netdev_rx_queue_index(struct netdev_rx_queue *queue)
{
@@ -53,5 +55,7 @@ get_netdev_rx_queue_index(struct netdev_rx_queue *queue)
BUG_ON(index >= dev->num_rx_queues);
return index;
}
-#endif
+
+int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq);
+
#endif
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 2c8c2b023848..8d65ffbf57de 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -13,9 +13,6 @@
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
-#ifdef CONFIG_NF_CT_PROTO_DCCP
-extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp;
-#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp;
#endif
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index c653fcb88354..09de2f2686b5 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -10,14 +10,6 @@
void nf_send_unreach(struct sk_buff *skb_in, int code, int hook);
void nf_send_reset(struct net *net, struct sock *, struct sk_buff *oldskb,
int hook);
-const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
- struct tcphdr *_oth, int hook);
-struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
- const struct sk_buff *oldskb,
- __u8 protocol, int ttl);
-void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
- const struct tcphdr *oth);
-
struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
index d729344ba644..94ec0b9f2838 100644
--- a/include/net/netfilter/ipv6/nf_reject.h
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -9,16 +9,6 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char cod
unsigned int hooknum);
void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
int hook);
-const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
- struct tcphdr *otcph,
- unsigned int *otcplen, int hook);
-struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
- const struct sk_buff *oldskb,
- __u8 protocol, int hoplimit);
-void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
- const struct sk_buff *oldskb,
- const struct tcphdr *oth, unsigned int otcplen);
-
struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index cba3ccf03fcc..aa0a7c82199e 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -18,7 +18,6 @@
#include <linux/netfilter/nf_conntrack_common.h>
#include <linux/netfilter/nf_conntrack_tcp.h>
-#include <linux/netfilter/nf_conntrack_dccp.h>
#include <linux/netfilter/nf_conntrack_sctp.h>
#include <linux/netfilter/nf_conntrack_proto_gre.h>
@@ -31,7 +30,6 @@ struct nf_ct_udp {
/* per conntrack: protocol private data */
union nf_conntrack_proto {
/* insert conntrack proto private data here */
- struct nf_ct_dccp dccp;
struct ip_ct_sctp sctp;
struct ip_ct_tcp tcp;
struct nf_ct_udp udp;
@@ -204,8 +202,7 @@ bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
struct nf_conntrack_tuple *tuple);
void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- const struct sk_buff *skb,
- u32 extra_jiffies, bool do_acct);
+ u32 extra_jiffies, unsigned int bytes);
/* Refresh conntrack for this many jiffies and do accounting */
static inline void nf_ct_refresh_acct(struct nf_conn *ct,
@@ -213,15 +210,14 @@ static inline void nf_ct_refresh_acct(struct nf_conn *ct,
const struct sk_buff *skb,
u32 extra_jiffies)
{
- __nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, true);
+ __nf_ct_refresh_acct(ct, ctinfo, extra_jiffies, skb->len);
}
/* Refresh conntrack for this many jiffies */
static inline void nf_ct_refresh(struct nf_conn *ct,
- const struct sk_buff *skb,
u32 extra_jiffies)
{
- __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, false);
+ __nf_ct_refresh_acct(ct, 0, extra_jiffies, 0);
}
/* kill conntrack and do accounting */
@@ -308,22 +304,23 @@ static inline bool nf_ct_is_expired(const struct nf_conn *ct)
/* use after obtaining a reference count */
static inline bool nf_ct_should_gc(const struct nf_conn *ct)
{
- return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
- !nf_ct_is_dying(ct);
+ if (!nf_ct_is_confirmed(ct))
+ return false;
+
+ /* load ct->timeout after is_confirmed() test.
+ * Pairs with __nf_conntrack_confirm() which:
+ * 1. Increases ct->timeout value
+ * 2. Inserts ct into rcu hlist
+ * 3. Sets the confirmed bit
+ * 4. Unlocks the hlist lock
+ */
+ smp_acquire__after_ctrl_dep();
+
+ return nf_ct_is_expired(ct) && !nf_ct_is_dying(ct);
}
#define NF_CT_DAY (86400 * HZ)
-/* Set an arbitrary timeout large enough not to ever expire, this save
- * us a check for the IPS_OFFLOAD_BIT from the packet path via
- * nf_ct_is_expired().
- */
-static inline void nf_ct_offload_timeout(struct nf_conn *ct)
-{
- if (nf_ct_expires(ct) < NF_CT_DAY / 2)
- WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY);
-}
-
struct kernel_param;
int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp);
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index e227d997fc71..52a06de41aa0 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -15,20 +15,17 @@ struct nf_conncount_list {
unsigned int count; /* length of list */
};
-struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
- unsigned int keylen);
-void nf_conncount_destroy(struct net *net, unsigned int family,
- struct nf_conncount_data *data);
-
-unsigned int nf_conncount_count(struct net *net,
- struct nf_conncount_data *data,
- const u32 *key,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone);
-
-int nf_conncount_add(struct net *net, struct nf_conncount_list *list,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone);
+struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int keylen);
+void nf_conncount_destroy(struct net *net, struct nf_conncount_data *data);
+
+unsigned int nf_conncount_count_skb(struct net *net,
+ const struct sk_buff *skb,
+ u16 l3num,
+ struct nf_conncount_data *data,
+ const u32 *key);
+
+int nf_conncount_add_skb(struct net *net, const struct sk_buff *skb,
+ u16 l3num, struct nf_conncount_list *list);
void nf_conncount_list_init(struct nf_conncount_list *list);
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 0c1dac318e02..8dcf7c371ee9 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -12,6 +12,7 @@
#include <linux/netfilter/nf_conntrack_common.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
#include <net/netfilter/nf_conntrack_extend.h>
+#include <asm/local64.h>
enum nf_ct_ecache_state {
NFCT_ECACHE_DESTROY_FAIL, /* tried but failed to send destroy event */
@@ -20,6 +21,9 @@ enum nf_ct_ecache_state {
struct nf_conntrack_ecache {
unsigned long cache; /* bitops want long */
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ local64_t timestamp; /* event timestamp, in nanoseconds */
+#endif
u16 ctmask; /* bitmask of ct events to be delivered */
u16 expmask; /* bitmask of expect events to be delivered */
u32 missed; /* missed events */
@@ -108,6 +112,14 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
if (e == NULL)
return;
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ /* renew only if this is the first cached event, so that the
+ * timestamp reflects the first, not the last, generated event.
+ */
+ if (local64_read(&e->timestamp) && READ_ONCE(e->cache) == 0)
+ local64_set(&e->timestamp, ktime_get_real_ns());
+#endif
+
set_bit(event, &e->cache);
#endif
}
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 1f47bef51722..cd5020835a6d 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -30,7 +30,7 @@ struct nf_conntrack_l4proto {
/* called by gc worker if table is full */
bool (*can_early_drop)(const struct nf_conn *ct);
- /* convert protoinfo to nfnetink attributes */
+ /* convert protoinfo to nfnetlink attributes */
int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct, bool destroy);
@@ -117,11 +117,6 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state);
-int nf_conntrack_dccp_packet(struct nf_conn *ct,
- struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- const struct nf_hook_state *state);
int nf_conntrack_sctp_packet(struct nf_conn *ct,
struct sk_buff *skb,
unsigned int dataoff,
@@ -137,7 +132,6 @@ void nf_conntrack_generic_init_net(struct net *net);
void nf_conntrack_tcp_init_net(struct net *net);
void nf_conntrack_udp_init_net(struct net *net);
void nf_conntrack_gre_init_net(struct net *net);
-void nf_conntrack_dccp_init_net(struct net *net);
void nf_conntrack_sctp_init_net(struct net *net);
void nf_conntrack_icmp_init_net(struct net *net);
void nf_conntrack_icmpv6_init_net(struct net *net);
@@ -223,13 +217,6 @@ static inline bool nf_conntrack_tcp_established(const struct nf_conn *ct)
}
#endif
-#ifdef CONFIG_NF_CT_PROTO_DCCP
-static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.dccp;
-}
-#endif
-
#ifdef CONFIG_NF_CT_PROTO_SCTP
static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net)
{
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 9abb7ee40d72..b09c11c048d5 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -107,6 +107,19 @@ enum flow_offload_xmit_type {
#define NF_FLOW_TABLE_ENCAP_MAX 2
+struct flow_offload_tunnel {
+ union {
+ struct in_addr src_v4;
+ struct in6_addr src_v6;
+ };
+ union {
+ struct in_addr dst_v4;
+ struct in6_addr dst_v6;
+ };
+
+ u8 l3_proto;
+};
+
struct flow_offload_tuple {
union {
struct in_addr src_v4;
@@ -130,22 +143,25 @@ struct flow_offload_tuple {
__be16 proto;
} encap[NF_FLOW_TABLE_ENCAP_MAX];
+ struct flow_offload_tunnel tun;
+
/* All members above are keys for lookups, see flow_offload_hash(). */
struct { } __hash;
u8 dir:2,
xmit_type:3,
encap_num:2,
+ tun_num:2,
in_vlan_ingress:2;
u16 mtu;
union {
struct {
struct dst_entry *dst_cache;
+ u32 ifidx;
u32 dst_cookie;
};
struct {
u32 ifidx;
- u32 hw_ifidx;
u8 h_source[ETH_ALEN];
u8 h_dest[ETH_ALEN];
} out;
@@ -163,6 +179,7 @@ struct flow_offload_tuple_rhash {
enum nf_flow_flags {
NF_FLOW_SNAT,
NF_FLOW_DNAT,
+ NF_FLOW_CLOSING,
NF_FLOW_TEARDOWN,
NF_FLOW_HW,
NF_FLOW_HW_DYING,
@@ -205,7 +222,9 @@ struct nf_flow_route {
u16 id;
__be16 proto;
} encap[NF_FLOW_TABLE_ENCAP_MAX];
+ struct flow_offload_tunnel tun;
u8 num_encaps:2,
+ num_tuns:2,
ingress_vlans:2;
} in;
struct {
@@ -221,6 +240,12 @@ struct nf_flow_route {
struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
void flow_offload_free(struct flow_offload *flow);
+struct nft_flowtable;
+struct nft_pktinfo;
+int nft_flow_route(const struct nft_pktinfo *pkt, const struct nf_conn *ct,
+ struct nf_flow_route *route, enum ip_conntrack_dir dir,
+ struct nft_flowtable *ft);
+
static inline int
nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
flow_setup_cb_t *cb, void *cb_priv)
@@ -305,11 +330,26 @@ struct flow_ports {
__be16 source, dest;
};
+struct nf_flowtable *nf_flowtable_by_dev(const struct net_device *dev);
+int nf_flow_offload_xdp_setup(struct nf_flowtable *flowtable,
+ struct net_device *dev,
+ enum flow_block_command cmd);
+
unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state);
unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state);
+#if (IS_BUILTIN(CONFIG_NF_FLOW_TABLE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
+ (IS_MODULE(CONFIG_NF_FLOW_TABLE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
+extern int nf_flow_register_bpf(void);
+#else
+static inline int nf_flow_register_bpf(void)
+{
+ return 0;
+}
+#endif
+
#define MODULE_ALIAS_NF_FLOWTABLE(family) \
MODULE_ALIAS("nf-flowtable-" __stringify(family))
@@ -354,7 +394,7 @@ static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb)
static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto)
{
- if (!pskb_may_pull(skb, PPPOE_SES_HLEN))
+ if (!pskb_may_pull(skb, ETH_HLEN + PPPOE_SES_HLEN))
return false;
*inner_proto = __nf_flow_pppoe_proto(skb);
diff --git a/include/net/netfilter/nf_hooks_lwtunnel.h b/include/net/netfilter/nf_hooks_lwtunnel.h
index 52e27920f829..cef7a4eb8f97 100644
--- a/include/net/netfilter/nf_hooks_lwtunnel.h
+++ b/include/net/netfilter/nf_hooks_lwtunnel.h
@@ -2,6 +2,6 @@
#include <linux/types.h>
#ifdef CONFIG_SYSCTL
-int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write,
+int nf_hooks_lwtunnel_sysctl_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
#endif
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index e55eedc84ed7..00506792a06d 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -59,6 +59,9 @@ extern int sysctl_nf_log_all_netns;
int nf_log_register(u_int8_t pf, struct nf_logger *logger);
void nf_log_unregister(struct nf_logger *logger);
+/* Check if any logger is registered for a given protocol family. */
+bool nf_log_is_registered(u_int8_t pf);
+
int nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger);
void nf_log_unset(struct net *net, const struct nf_logger *logger);
diff --git a/include/net/netfilter/nf_reject.h b/include/net/netfilter/nf_reject.h
index 7c669792fb9c..f1db33bc6bf8 100644
--- a/include/net/netfilter/nf_reject.h
+++ b/include/net/netfilter/nf_reject.h
@@ -34,7 +34,6 @@ static inline bool nf_reject_verify_csum(struct sk_buff *skb, int dataoff,
/* Protocols with partial checksums. */
case IPPROTO_UDPLITE:
- case IPPROTO_DCCP:
return false;
}
return true;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 2796153b03da..fab7dc73f738 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -2,7 +2,7 @@
#ifndef _NET_NF_TABLES_H
#define _NET_NF_TABLES_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/list.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
@@ -209,6 +209,7 @@ static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
* @family: protocol family
* @level: depth of the chains
* @report: notify via unicast netlink message
+ * @reg_inited: bitmap of initialised registers
*/
struct nft_ctx {
struct net *net;
@@ -221,6 +222,7 @@ struct nft_ctx {
u8 family;
u8 level;
bool report;
+ DECLARE_BITMAP(reg_inited, NFT_REG32_NUM);
};
enum nft_data_desc_flags {
@@ -254,7 +256,8 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
-int nft_parse_register_load(const struct nlattr *attr, u8 *sreg, u32 len);
+int nft_parse_register_load(const struct nft_ctx *ctx,
+ const struct nlattr *attr, u8 *sreg, u32 len);
int nft_parse_register_store(const struct nft_ctx *ctx,
const struct nlattr *attr, u8 *dreg,
const struct nft_data *data,
@@ -311,6 +314,7 @@ static inline void *nft_elem_priv_cast(const struct nft_elem_priv *priv)
/**
* enum nft_iter_type - nftables set iterator type
*
+ * @NFT_ITER_UNSPEC: unspecified, to catch errors
* @NFT_ITER_READ: read-only iteration over set elements
* @NFT_ITER_UPDATE: iteration under mutex to update set element state
*/
@@ -438,6 +442,9 @@ struct nft_set_ext;
* @remove: remove element from set
* @walk: iterate over all set elements
* @get: get set elements
+ * @ksize: kernel set size
+ * @usize: userspace set size
+ * @adjust_maxsize: delta to adjust maximum set size
* @commit: commit set elements
* @abort: abort set elements
* @privsize: function to return size of set private data
@@ -452,19 +459,13 @@ struct nft_set_ext;
* control plane functions.
*/
struct nft_set_ops {
- bool (*lookup)(const struct net *net,
+ const struct nft_set_ext * (*lookup)(const struct net *net,
const struct nft_set *set,
+ const u32 *key);
+ const struct nft_set_ext * (*update)(struct nft_set *set,
const u32 *key,
- const struct nft_set_ext **ext);
- bool (*update)(struct nft_set *set,
- const u32 *key,
- struct nft_elem_priv *
- (*new)(struct nft_set *,
- const struct nft_expr *,
- struct nft_regs *),
const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_set_ext **ext);
+ struct nft_regs *regs);
bool (*delete)(const struct nft_set *set,
const u32 *key);
@@ -491,6 +492,9 @@ struct nft_set_ops {
const struct nft_set *set,
const struct nft_set_elem *elem,
unsigned int flags);
+ u32 (*ksize)(u32 size);
+ u32 (*usize)(u32 size);
+ u32 (*adjust_maxsize)(const struct nft_set *set);
void (*commit)(struct nft_set *set);
void (*abort)(const struct nft_set *set);
u64 (*privsize)(const struct nlattr * const nla[],
@@ -552,6 +556,7 @@ struct nft_set_elem_expr {
* @size: maximum set size
* @field_len: length of each field in concatenation, bytes
* @field_count: number of concatenated fields in element
+ * @in_update_walk: true during ->walk() in transaction phase
* @use: number of rules references to this set
* @nelems: number of elements
* @ndeact: number of deactivated elements queued for removal
@@ -586,6 +591,7 @@ struct nft_set {
u32 size;
u8 field_len[NFT_REG32_COUNT];
u8 field_count;
+ bool in_update_walk;
u32 use;
atomic_t nelems;
u32 ndeact;
@@ -619,6 +625,11 @@ static inline void *nft_set_priv(const struct nft_set *set)
return (void *)set->data;
}
+static inline enum nft_data_types nft_set_datatype(const struct nft_set *set)
+{
+ return set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE;
+}
+
static inline bool nft_set_gc_is_pending(const struct nft_set *s)
{
return refcount_read(&s->refs) != 1;
@@ -678,9 +689,8 @@ void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
* @NFT_SET_EXT_DATA: mapping data
* @NFT_SET_EXT_FLAGS: element flags
* @NFT_SET_EXT_TIMEOUT: element timeout
- * @NFT_SET_EXT_EXPIRATION: element expiration time
* @NFT_SET_EXT_USERDATA: user data associated with the element
- * @NFT_SET_EXT_EXPRESSIONS: expressions assiciated with the element
+ * @NFT_SET_EXT_EXPRESSIONS: expressions associated with the element
* @NFT_SET_EXT_OBJREF: stateful object reference associated with element
* @NFT_SET_EXT_NUM: number of extension types
*/
@@ -690,7 +700,6 @@ enum nft_set_extensions {
NFT_SET_EXT_DATA,
NFT_SET_EXT_FLAGS,
NFT_SET_EXT_TIMEOUT,
- NFT_SET_EXT_EXPIRATION,
NFT_SET_EXT_USERDATA,
NFT_SET_EXT_EXPRESSIONS,
NFT_SET_EXT_OBJREF,
@@ -726,15 +735,18 @@ struct nft_set_ext_tmpl {
/**
* struct nft_set_ext - set extensions
*
- * @genmask: generation mask
+ * @genmask: generation mask, but also flags (see NFT_SET_ELEM_DEAD_BIT)
* @offset: offsets of individual extension types
* @data: beginning of extension data
+ *
+ * This structure must be aligned to word size, otherwise atomic bitops
+ * on genmask field can cause alignment failure on some archs.
*/
struct nft_set_ext {
u8 genmask;
u8 offset[NFT_SET_EXT_NUM];
char data[];
-};
+} __aligned(BITS_PER_LONG / 8);
static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
{
@@ -802,14 +814,14 @@ static inline u8 *nft_set_ext_flags(const struct nft_set_ext *ext)
return nft_set_ext(ext, NFT_SET_EXT_FLAGS);
}
-static inline u64 *nft_set_ext_timeout(const struct nft_set_ext *ext)
-{
- return nft_set_ext(ext, NFT_SET_EXT_TIMEOUT);
-}
+struct nft_timeout {
+ u64 timeout;
+ u64 expiration;
+};
-static inline u64 *nft_set_ext_expiration(const struct nft_set_ext *ext)
+static inline struct nft_timeout *nft_set_ext_timeout(const struct nft_set_ext *ext)
{
- return nft_set_ext(ext, NFT_SET_EXT_EXPIRATION);
+ return nft_set_ext(ext, NFT_SET_EXT_TIMEOUT);
}
static inline struct nft_userdata *nft_set_ext_userdata(const struct nft_set_ext *ext)
@@ -825,8 +837,11 @@ static inline struct nft_set_elem_expr *nft_set_ext_expr(const struct nft_set_ex
static inline bool __nft_set_elem_expired(const struct nft_set_ext *ext,
u64 tstamp)
{
- return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) &&
- time_after_eq64(tstamp, *nft_set_ext_expiration(ext));
+ if (!nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) ||
+ READ_ONCE(nft_set_ext_timeout(ext)->timeout) == 0)
+ return false;
+
+ return time_after_eq64(tstamp, READ_ONCE(nft_set_ext_timeout(ext)->expiration));
}
static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
@@ -954,8 +969,7 @@ struct nft_expr_ops {
const struct nft_expr *expr,
bool reset);
int (*validate)(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data);
+ const struct nft_expr *expr);
bool (*reduce)(struct nft_regs_track *track,
const struct nft_expr *expr);
bool (*gc)(struct net *net,
@@ -1171,7 +1185,7 @@ static inline bool nft_chain_is_bound(struct nft_chain *chain)
int nft_chain_add(struct nft_table *table, struct nft_chain *chain);
void nft_chain_del(struct nft_chain *chain);
-void nf_tables_chain_destroy(struct nft_ctx *ctx);
+void nf_tables_chain_destroy(struct nft_chain *chain);
struct nft_stats {
u64 bytes;
@@ -1181,10 +1195,17 @@ struct nft_stats {
struct nft_hook {
struct list_head list;
- struct nf_hook_ops ops;
+ struct list_head ops_list;
struct rcu_head rcu;
+ char ifname[IFNAMSIZ];
+ u8 ifnamelen;
};
+struct nf_hook_ops *nft_hook_find_ops(const struct nft_hook *hook,
+ const struct net_device *dev);
+struct nf_hook_ops *nft_hook_find_ops_rcu(const struct nft_hook *hook,
+ const struct net_device *dev);
+
/**
* struct nft_base_chain - nf_tables base chain
*
@@ -1218,8 +1239,6 @@ static inline bool nft_is_base_chain(const struct nft_chain *chain)
return chain->flags & NFT_CHAIN_BASE;
}
-int __nft_release_basechain(struct nft_ctx *ctx);
-
unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
static inline bool nft_use_inc(u32 *use)
@@ -1454,7 +1473,8 @@ struct nft_flowtable {
struct nf_flowtable data;
};
-struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table,
+struct nft_flowtable *nft_flowtable_lookup(const struct net *net,
+ const struct nft_table *table,
const struct nlattr *nla,
u8 genmask);
@@ -1608,41 +1628,68 @@ static inline int nft_set_elem_is_dead(const struct nft_set_ext *ext)
}
/**
- * struct nft_trans - nf_tables object update in transaction
+ * struct nft_trans - nf_tables object update in transaction
*
- * @list: used internally
- * @binding_list: list of objects with possible bindings
- * @msg_type: message type
- * @put_net: ctx->net needs to be put
- * @ctx: transaction context
- * @data: internal information related to the transaction
+ * @list: used internally
+ * @net: struct net
+ * @table: struct nft_table the object resides in
+ * @msg_type: message type
+ * @seq: netlink sequence number
+ * @flags: modifiers to new request
+ * @report: notify via unicast netlink message
+ * @put_net: net needs to be put
+ *
+ * This is the information common to all objects in the transaction,
+ * this must always be the first member of derived sub-types.
*/
struct nft_trans {
struct list_head list;
- struct list_head binding_list;
+ struct net *net;
+ struct nft_table *table;
int msg_type;
- bool put_net;
- struct nft_ctx ctx;
- char data[];
+ u32 seq;
+ u16 flags;
+ u8 report:1;
+ u8 put_net:1;
+};
+
+/**
+ * struct nft_trans_binding - nf_tables object with binding support in transaction
+ * @nft_trans: base structure, MUST be first member
+ * @binding_list: list of objects with possible bindings
+ *
+ * This is the base type used by objects that can be bound to a chain.
+ */
+struct nft_trans_binding {
+ struct nft_trans nft_trans;
+ struct list_head binding_list;
};
struct nft_trans_rule {
+ struct nft_trans nft_trans;
struct nft_rule *rule;
+ struct nft_chain *chain;
struct nft_flow_rule *flow;
u32 rule_id;
bool bound;
};
-#define nft_trans_rule(trans) \
- (((struct nft_trans_rule *)trans->data)->rule)
-#define nft_trans_flow_rule(trans) \
- (((struct nft_trans_rule *)trans->data)->flow)
-#define nft_trans_rule_id(trans) \
- (((struct nft_trans_rule *)trans->data)->rule_id)
-#define nft_trans_rule_bound(trans) \
- (((struct nft_trans_rule *)trans->data)->bound)
+#define nft_trans_container_rule(trans) \
+ container_of(trans, struct nft_trans_rule, nft_trans)
+#define nft_trans_rule(trans) \
+ nft_trans_container_rule(trans)->rule
+#define nft_trans_flow_rule(trans) \
+ nft_trans_container_rule(trans)->flow
+#define nft_trans_rule_id(trans) \
+ nft_trans_container_rule(trans)->rule_id
+#define nft_trans_rule_bound(trans) \
+ nft_trans_container_rule(trans)->bound
+#define nft_trans_rule_chain(trans) \
+ nft_trans_container_rule(trans)->chain
struct nft_trans_set {
+ struct nft_trans_binding nft_trans_binding;
+ struct list_head list_trans_newset;
struct nft_set *set;
u32 set_id;
u32 gc_int;
@@ -1652,100 +1699,132 @@ struct nft_trans_set {
u32 size;
};
-#define nft_trans_set(trans) \
- (((struct nft_trans_set *)trans->data)->set)
-#define nft_trans_set_id(trans) \
- (((struct nft_trans_set *)trans->data)->set_id)
-#define nft_trans_set_bound(trans) \
- (((struct nft_trans_set *)trans->data)->bound)
-#define nft_trans_set_update(trans) \
- (((struct nft_trans_set *)trans->data)->update)
-#define nft_trans_set_timeout(trans) \
- (((struct nft_trans_set *)trans->data)->timeout)
-#define nft_trans_set_gc_int(trans) \
- (((struct nft_trans_set *)trans->data)->gc_int)
-#define nft_trans_set_size(trans) \
- (((struct nft_trans_set *)trans->data)->size)
+#define nft_trans_container_set(t) \
+ container_of(t, struct nft_trans_set, nft_trans_binding.nft_trans)
+#define nft_trans_set(trans) \
+ nft_trans_container_set(trans)->set
+#define nft_trans_set_id(trans) \
+ nft_trans_container_set(trans)->set_id
+#define nft_trans_set_bound(trans) \
+ nft_trans_container_set(trans)->bound
+#define nft_trans_set_update(trans) \
+ nft_trans_container_set(trans)->update
+#define nft_trans_set_timeout(trans) \
+ nft_trans_container_set(trans)->timeout
+#define nft_trans_set_gc_int(trans) \
+ nft_trans_container_set(trans)->gc_int
+#define nft_trans_set_size(trans) \
+ nft_trans_container_set(trans)->size
struct nft_trans_chain {
+ struct nft_trans_binding nft_trans_binding;
struct nft_chain *chain;
- bool update;
char *name;
struct nft_stats __percpu *stats;
u8 policy;
+ bool update;
bool bound;
u32 chain_id;
struct nft_base_chain *basechain;
struct list_head hook_list;
};
-#define nft_trans_chain(trans) \
- (((struct nft_trans_chain *)trans->data)->chain)
-#define nft_trans_chain_update(trans) \
- (((struct nft_trans_chain *)trans->data)->update)
-#define nft_trans_chain_name(trans) \
- (((struct nft_trans_chain *)trans->data)->name)
-#define nft_trans_chain_stats(trans) \
- (((struct nft_trans_chain *)trans->data)->stats)
-#define nft_trans_chain_policy(trans) \
- (((struct nft_trans_chain *)trans->data)->policy)
-#define nft_trans_chain_bound(trans) \
- (((struct nft_trans_chain *)trans->data)->bound)
-#define nft_trans_chain_id(trans) \
- (((struct nft_trans_chain *)trans->data)->chain_id)
-#define nft_trans_basechain(trans) \
- (((struct nft_trans_chain *)trans->data)->basechain)
-#define nft_trans_chain_hooks(trans) \
- (((struct nft_trans_chain *)trans->data)->hook_list)
+#define nft_trans_container_chain(t) \
+ container_of(t, struct nft_trans_chain, nft_trans_binding.nft_trans)
+#define nft_trans_chain(trans) \
+ nft_trans_container_chain(trans)->chain
+#define nft_trans_chain_update(trans) \
+ nft_trans_container_chain(trans)->update
+#define nft_trans_chain_name(trans) \
+ nft_trans_container_chain(trans)->name
+#define nft_trans_chain_stats(trans) \
+ nft_trans_container_chain(trans)->stats
+#define nft_trans_chain_policy(trans) \
+ nft_trans_container_chain(trans)->policy
+#define nft_trans_chain_bound(trans) \
+ nft_trans_container_chain(trans)->bound
+#define nft_trans_chain_id(trans) \
+ nft_trans_container_chain(trans)->chain_id
+#define nft_trans_basechain(trans) \
+ nft_trans_container_chain(trans)->basechain
+#define nft_trans_chain_hooks(trans) \
+ nft_trans_container_chain(trans)->hook_list
struct nft_trans_table {
+ struct nft_trans nft_trans;
bool update;
};
-#define nft_trans_table_update(trans) \
- (((struct nft_trans_table *)trans->data)->update)
+#define nft_trans_container_table(trans) \
+ container_of(trans, struct nft_trans_table, nft_trans)
+#define nft_trans_table_update(trans) \
+ nft_trans_container_table(trans)->update
+
+enum nft_trans_elem_flags {
+ NFT_TRANS_UPD_TIMEOUT = (1 << 0),
+ NFT_TRANS_UPD_EXPIRATION = (1 << 1),
+};
+
+struct nft_elem_update {
+ u64 timeout;
+ u64 expiration;
+ u8 flags;
+};
+
+struct nft_trans_one_elem {
+ struct nft_elem_priv *priv;
+ struct nft_elem_update *update;
+};
struct nft_trans_elem {
+ struct nft_trans nft_trans;
struct nft_set *set;
- struct nft_elem_priv *elem_priv;
bool bound;
+ unsigned int nelems;
+ struct nft_trans_one_elem elems[] __counted_by(nelems);
};
-#define nft_trans_elem_set(trans) \
- (((struct nft_trans_elem *)trans->data)->set)
-#define nft_trans_elem_priv(trans) \
- (((struct nft_trans_elem *)trans->data)->elem_priv)
-#define nft_trans_elem_set_bound(trans) \
- (((struct nft_trans_elem *)trans->data)->bound)
+#define nft_trans_container_elem(t) \
+ container_of(t, struct nft_trans_elem, nft_trans)
+#define nft_trans_elem_set(trans) \
+ nft_trans_container_elem(trans)->set
+#define nft_trans_elem_set_bound(trans) \
+ nft_trans_container_elem(trans)->bound
struct nft_trans_obj {
+ struct nft_trans nft_trans;
struct nft_object *obj;
struct nft_object *newobj;
bool update;
};
-#define nft_trans_obj(trans) \
- (((struct nft_trans_obj *)trans->data)->obj)
-#define nft_trans_obj_newobj(trans) \
- (((struct nft_trans_obj *)trans->data)->newobj)
-#define nft_trans_obj_update(trans) \
- (((struct nft_trans_obj *)trans->data)->update)
+#define nft_trans_container_obj(t) \
+ container_of(t, struct nft_trans_obj, nft_trans)
+#define nft_trans_obj(trans) \
+ nft_trans_container_obj(trans)->obj
+#define nft_trans_obj_newobj(trans) \
+ nft_trans_container_obj(trans)->newobj
+#define nft_trans_obj_update(trans) \
+ nft_trans_container_obj(trans)->update
struct nft_trans_flowtable {
+ struct nft_trans nft_trans;
struct nft_flowtable *flowtable;
- bool update;
struct list_head hook_list;
u32 flags;
+ bool update;
};
-#define nft_trans_flowtable(trans) \
- (((struct nft_trans_flowtable *)trans->data)->flowtable)
-#define nft_trans_flowtable_update(trans) \
- (((struct nft_trans_flowtable *)trans->data)->update)
-#define nft_trans_flowtable_hooks(trans) \
- (((struct nft_trans_flowtable *)trans->data)->hook_list)
-#define nft_trans_flowtable_flags(trans) \
- (((struct nft_trans_flowtable *)trans->data)->flags)
+#define nft_trans_container_flowtable(t) \
+ container_of(t, struct nft_trans_flowtable, nft_trans)
+#define nft_trans_flowtable(trans) \
+ nft_trans_container_flowtable(trans)->flowtable
+#define nft_trans_flowtable_update(trans) \
+ nft_trans_container_flowtable(trans)->update
+#define nft_trans_flowtable_hooks(trans) \
+ nft_trans_container_flowtable(trans)->hook_list
+#define nft_trans_flowtable_flags(trans) \
+ nft_trans_container_flowtable(trans)->flags
#define NFT_TRANS_GC_BATCHCOUNT 256
@@ -1759,6 +1838,33 @@ struct nft_trans_gc {
struct rcu_head rcu;
};
+static inline void nft_ctx_update(struct nft_ctx *ctx,
+ const struct nft_trans *trans)
+{
+ switch (trans->msg_type) {
+ case NFT_MSG_NEWRULE:
+ case NFT_MSG_DELRULE:
+ case NFT_MSG_DESTROYRULE:
+ ctx->chain = nft_trans_rule_chain(trans);
+ break;
+ case NFT_MSG_NEWCHAIN:
+ case NFT_MSG_DELCHAIN:
+ case NFT_MSG_DESTROYCHAIN:
+ ctx->chain = nft_trans_chain(trans);
+ break;
+ default:
+ ctx->chain = NULL;
+ break;
+ }
+
+ ctx->net = trans->net;
+ ctx->table = trans->table;
+ ctx->family = trans->table->family;
+ ctx->report = trans->report;
+ ctx->flags = trans->flags;
+ ctx->seq = trans->seq;
+}
+
struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set,
unsigned int gc_seq, gfp_t gfp);
void nft_trans_gc_destroy(struct nft_trans_gc *trans);
@@ -1786,7 +1892,7 @@ void nft_chain_filter_fini(void);
void __init nft_chain_route_init(void);
void nft_chain_route_fini(void);
-void nf_tables_trans_destroy_flush_work(void);
+void nf_tables_trans_destroy_flush_work(struct net *net);
int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result);
__be64 nf_jiffies64_to_msecs(u64 input);
@@ -1800,15 +1906,17 @@ static inline int nft_request_module(struct net *net, const char *fmt, ...) { re
struct nftables_pernet {
struct list_head tables;
struct list_head commit_list;
+ struct list_head destroy_list;
+ struct list_head commit_set_list;
struct list_head binding_list;
struct list_head module_list;
struct list_head notify_list;
struct mutex commit_mutex;
u64 table_handle;
u64 tstamp;
- unsigned int base_seq;
unsigned int gc_seq;
u8 validate_state;
+ struct work_struct destroy_work;
};
extern unsigned int nf_tables_net_id;
@@ -1826,11 +1934,6 @@ static inline u64 nft_net_tstamp(const struct net *net)
#define __NFT_REDUCE_READONLY 1UL
#define NFT_REDUCE_READONLY (void *)__NFT_REDUCE_READONLY
-static inline bool nft_reduce_is_readonly(const struct nft_expr *expr)
-{
- return expr->ops->reduce == NFT_REDUCE_READONLY;
-}
-
void nft_reg_track_update(struct nft_regs_track *track,
const struct nft_expr *expr, u8 dreg, u8 len);
void nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg, u8 len);
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index ff27cb2e1662..b8df5acbb723 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -73,7 +73,7 @@ struct nft_ct {
struct nft_payload {
enum nft_payload_bases base:8;
- u8 offset;
+ u16 offset;
u8 len;
u8 dreg;
};
@@ -94,34 +94,35 @@ extern const struct nft_set_type nft_set_pipapo_type;
extern const struct nft_set_type nft_set_pipapo_avx2_type;
#ifdef CONFIG_MITIGATION_RETPOLINE
-bool nft_rhash_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
-bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
-bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
-bool nft_hash_lookup_fast(const struct net *net,
- const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
-bool nft_hash_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
-bool nft_set_do_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
-#else
-static inline bool
-nft_set_do_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext)
-{
- return set->ops->lookup(net, set, key, ext);
-}
+const struct nft_set_ext *
+nft_rhash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
+const struct nft_set_ext *
+nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
+const struct nft_set_ext *
+nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
+const struct nft_set_ext *
+nft_hash_lookup_fast(const struct net *net, const struct nft_set *set,
+ const u32 *key);
+const struct nft_set_ext *
+nft_hash_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
#endif
+const struct nft_set_ext *
+nft_set_do_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
+
/* called from nft_pipapo_avx2.c */
-bool nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
+const struct nft_set_ext *
+nft_pipapo_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
/* called from nft_set_pipapo.c */
-bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
- const u32 *key, const struct nft_set_ext **ext);
+const struct nft_set_ext *
+nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
+ const u32 *key);
void nft_counter_init_seqcount(void);
@@ -161,6 +162,7 @@ enum {
};
struct nft_inner_tun_ctx {
+ unsigned long cookie;
u16 type;
u16 inner_tunoff;
u16 inner_lloff;
@@ -180,4 +182,7 @@ void nft_objref_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt);
void nft_objref_map_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt);
+struct nft_elem_priv *nft_dynset_new(struct nft_set *set,
+ const struct nft_expr *expr,
+ struct nft_regs *regs);
#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index 60a7d0ce3080..fcf967286e37 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -19,7 +19,7 @@ static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt)
static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
{
struct iphdr *iph, _iph;
- u32 len, thoff;
+ u32 len, thoff, skb_len;
iph = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
sizeof(*iph), &_iph);
@@ -30,8 +30,10 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
return -1;
len = iph_totlen(pkt->skb, iph);
- thoff = skb_network_offset(pkt->skb) + (iph->ihl * 4);
- if (pkt->skb->len < len)
+ thoff = iph->ihl * 4;
+ skb_len = pkt->skb->len - skb_network_offset(pkt->skb);
+
+ if (skb_len < len)
return -1;
else if (len < thoff)
return -1;
@@ -40,7 +42,7 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
pkt->flags = NFT_PKTINFO_L4PROTO;
pkt->tprot = iph->protocol;
- pkt->thoff = thoff;
+ pkt->thoff = skb_network_offset(pkt->skb) + thoff;
pkt->fragoff = ntohs(iph->frag_off) & IP_OFFSET;
return 0;
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 467d59b9e533..a0633eeaec97 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -31,8 +31,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
struct ipv6hdr *ip6h, _ip6h;
unsigned int thoff = 0;
unsigned short frag_off;
+ u32 pkt_len, skb_len;
int protohdr;
- u32 pkt_len;
ip6h = skb_header_pointer(pkt->skb, skb_network_offset(pkt->skb),
sizeof(*ip6h), &_ip6h);
@@ -43,7 +43,8 @@ static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt)
return -1;
pkt_len = ntohs(ip6h->payload_len);
- if (pkt_len + sizeof(*ip6h) > pkt->skb->len)
+ skb_len = pkt->skb->len - skb_network_offset(pkt->skb);
+ if (pkt_len + sizeof(*ip6h) > skb_len)
return -1;
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
index faa108b1ba67..06985530517b 100644
--- a/include/net/netfilter/nf_tproxy.h
+++ b/include/net/netfilter/nf_tproxy.h
@@ -36,6 +36,7 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr);
/**
* nf_tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections
+ * @net: The network namespace.
* @skb: The skb being processed.
* @laddr: IPv4 address to redirect to or zero.
* @lport: TCP port to redirect to or zero.
@@ -48,7 +49,7 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr);
*
* nf_tproxy_handle_time_wait4() consumes the socket reference passed in.
*
- * Returns the listener socket if there's one, the TIME_WAIT socket if
+ * Returns: the listener socket if there's one, the TIME_WAIT socket if
* no such listener is found, or NULL if the TCP header is incomplete.
*/
struct sock *
@@ -107,7 +108,7 @@ nf_tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
*
* nf_tproxy_handle_time_wait6() consumes the socket reference passed in.
*
- * Returns the listener socket if there's one, the TIME_WAIT socket if
+ * Returns: the listener socket if there's one, the TIME_WAIT socket if
* no such listener is found, or NULL if the TCP header is incomplete.
*/
struct sock *
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
index 167640b843ef..7370fba844ef 100644
--- a/include/net/netfilter/nft_fib.h
+++ b/include/net/netfilter/nft_fib.h
@@ -2,6 +2,7 @@
#ifndef _NFT_FIB_H_
#define _NFT_FIB_H_
+#include <net/l3mdev.h>
#include <net/netfilter/nf_tables.h>
struct nft_fib {
@@ -18,12 +19,39 @@ nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
}
+static inline bool nft_fib_can_skip(const struct nft_pktinfo *pkt)
+{
+ const struct net_device *indev = nft_in(pkt);
+ const struct sock *sk;
+
+ switch (nft_hook(pkt)) {
+ case NF_INET_PRE_ROUTING:
+ case NF_INET_INGRESS:
+ case NF_INET_LOCAL_IN:
+ break;
+ default:
+ return false;
+ }
+
+ sk = pkt->skb->sk;
+ if (sk && sk_fullsock(sk))
+ return sk->sk_rx_dst_ifindex == indev->ifindex;
+
+ return nft_fib_is_loopback(pkt->skb, indev);
+}
+
+static inline int nft_fib_l3mdev_master_ifindex_rcu(const struct nft_pktinfo *pkt,
+ const struct net_device *iif)
+{
+ const struct net_device *dev = iif ? iif : pkt->skb->dev;
+
+ return l3mdev_master_ifindex_rcu(dev);
+}
+
int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset);
int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[]);
-int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
- const struct nft_data **data);
-
+int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr);
void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt);
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
index ba1238f12a48..d602263590fe 100644
--- a/include/net/netfilter/nft_meta.h
+++ b/include/net/netfilter/nft_meta.h
@@ -41,8 +41,7 @@ void nft_meta_set_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr);
int nft_meta_set_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data);
+ const struct nft_expr *expr);
bool nft_meta_get_reduce(struct nft_regs_track *track,
const struct nft_expr *expr);
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
index 6d9ba62efd75..19060212988a 100644
--- a/include/net/netfilter/nft_reject.h
+++ b/include/net/netfilter/nft_reject.h
@@ -15,8 +15,7 @@ struct nft_reject {
extern const struct nla_policy nft_reject_policy[];
int nft_reject_validate(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nft_data **data);
+ const struct nft_expr *expr);
int nft_reject_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
diff --git a/include/net/netlabel.h b/include/net/netlabel.h
index 654bc777d2a7..02914b1df38b 100644
--- a/include/net/netlabel.h
+++ b/include/net/netlabel.h
@@ -30,7 +30,7 @@ struct calipso_doi;
/*
* NetLabel - A management interface for maintaining network packet label
- * mapping tables for explicit packet labling protocols.
+ * mapping tables for explicit packet labeling protocols.
*
* Network protocols such as CIPSO and RIPSO require a label translation layer
* to convert the label on the packet into something meaningful on the host
@@ -97,7 +97,7 @@ struct calipso_doi;
/* NetLabel audit information */
struct netlbl_audit {
- u32 secid;
+ struct lsm_prop prop;
kuid_t loginuid;
unsigned int sessionid;
};
@@ -208,6 +208,7 @@ struct netlbl_lsm_secattr {
* struct netlbl_calipso_ops - NetLabel CALIPSO operations
* @doi_add: add a CALIPSO DOI
* @doi_free: free a CALIPSO DOI
+ * @doi_remove: remove a CALIPSO DOI
* @doi_getdef: returns a reference to a DOI
* @doi_putdef: releases a reference of a DOI
* @doi_walk: enumerate the DOI list
diff --git a/include/net/netlink.h b/include/net/netlink.h
index e78ce008e07c..1a8356ca4b78 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -68,6 +68,8 @@
* nlmsg_for_each_msg() loop over all messages
* nlmsg_validate() validate netlink message incl. attrs
* nlmsg_for_each_attr() loop over all attributes
+ * nlmsg_for_each_attr_type() loop over all attributes with the
+ * given type
*
* Misc:
* nlmsg_report() report back to application?
@@ -118,6 +120,7 @@
* nla_nest_start(skb, type) start a nested attribute
* nla_nest_end(skb, nla) finalize a nested attribute
* nla_nest_cancel(skb, nla) cancel nested attribute construction
+ * nla_put_empty_nest(skb, type) create an empty nest
*
* Attribute Length Calculations:
* nla_attr_size(payload) length of attribute w/o padding
@@ -142,6 +145,8 @@
* nla_get_flag(nla) return 1 if flag is true
* nla_get_msecs(nla) get payload for a msecs attribute
*
+ * The same functions also exist with _default().
+ *
* Attribute Misc:
* nla_memcpy(dest, nla, count) copy attribute into memory
* nla_memcmp(nla, data, size) compare attribute with memory area
@@ -318,7 +323,13 @@ enum nla_policy_validation {
* All other Unused - but note that it's a union
*
* Meaning of `validate' field, use via NLA_POLICY_VALIDATE_FN:
+ * NLA_U8, NLA_U16,
+ * NLA_U32, NLA_U64,
+ * NLA_S8, NLA_S16,
+ * NLA_S32, NLA_S64,
+ * NLA_MSECS,
* NLA_BINARY Validation function called for the attribute.
+ *
* All other Unused - but note that it's a union
*
* Example:
@@ -469,6 +480,7 @@ struct nla_policy {
.max = _len \
}
#define NLA_POLICY_MIN_LEN(_len) NLA_POLICY_MIN(NLA_BINARY, _len)
+#define NLA_POLICY_MAX_LEN(_len) NLA_POLICY_MAX(NLA_BINARY, _len)
/**
* struct nl_info - netlink source information
@@ -608,6 +620,22 @@ static inline int nlmsg_len(const struct nlmsghdr *nlh)
}
/**
+ * nlmsg_payload - message payload if the data fits in the len
+ * @nlh: netlink message header
+ * @len: struct length
+ *
+ * Returns: The netlink message payload/data if the length is sufficient,
+ * otherwise NULL.
+ */
+static inline void *nlmsg_payload(const struct nlmsghdr *nlh, size_t len)
+{
+ if (nlh->nlmsg_len < nlmsg_msg_size(len))
+ return NULL;
+
+ return nlmsg_data(nlh);
+}
+
+/**
* nlmsg_attrdata - head of attributes data
* @nlh: netlink message header
* @hdrlen: length of family specific header
@@ -646,7 +674,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
* @nlh: netlink message header
* @remaining: number of bytes remaining in message stream
*
- * Returns the next netlink message in the message stream and
+ * Returns: the next netlink message in the message stream and
* decrements remaining by the size of the current message.
*/
static inline struct nlmsghdr *
@@ -673,7 +701,7 @@ nlmsg_next(const struct nlmsghdr *nlh, int *remaining)
* exceeding maxtype will be rejected, policy must be specified, attributes
* will be validated in the strictest way possible.
*
- * Returns 0 on success or a negative error code.
+ * Returns: 0 on success or a negative error code.
*/
static inline int nla_parse(struct nlattr **tb, int maxtype,
const struct nlattr *head, int len,
@@ -698,7 +726,7 @@ static inline int nla_parse(struct nlattr **tb, int maxtype,
* exceeding maxtype will be ignored and attributes from the policy are not
* always strictly validated (only for new attributes).
*
- * Returns 0 on success or a negative error code.
+ * Returns: 0 on success or a negative error code.
*/
static inline int nla_parse_deprecated(struct nlattr **tb, int maxtype,
const struct nlattr *head, int len,
@@ -723,7 +751,7 @@ static inline int nla_parse_deprecated(struct nlattr **tb, int maxtype,
* exceeding maxtype will be rejected as well as trailing data, but the
* policy is not completely strictly validated (only for new attributes).
*
- * Returns 0 on success or a negative error code.
+ * Returns: 0 on success or a negative error code.
*/
static inline int nla_parse_deprecated_strict(struct nlattr **tb, int maxtype,
const struct nlattr *head,
@@ -827,10 +855,10 @@ nlmsg_parse_deprecated_strict(const struct nlmsghdr *nlh, int hdrlen,
/**
* nlmsg_find_attr - find a specific attribute in a netlink message
* @nlh: netlink message header
- * @hdrlen: length of familiy specific header
+ * @hdrlen: length of family specific header
* @attrtype: type of attribute to look for
*
- * Returns the first attribute which matches the specified type.
+ * Returns: the first attribute which matches the specified type.
*/
static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
int hdrlen, int attrtype)
@@ -849,9 +877,9 @@ static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
*
* Validates all attributes in the specified attribute stream against the
* specified policy. Validation is done in liberal mode.
- * See documenation of struct nla_policy for more details.
+ * See documentation of struct nla_policy for more details.
*
- * Returns 0 on success or a negative error code.
+ * Returns: 0 on success or a negative error code.
*/
static inline int nla_validate_deprecated(const struct nlattr *head, int len,
int maxtype,
@@ -872,9 +900,9 @@ static inline int nla_validate_deprecated(const struct nlattr *head, int len,
*
* Validates all attributes in the specified attribute stream against the
* specified policy. Validation is done in strict mode.
- * See documenation of struct nla_policy for more details.
+ * See documentation of struct nla_policy for more details.
*
- * Returns 0 on success or a negative error code.
+ * Returns: 0 on success or a negative error code.
*/
static inline int nla_validate(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy,
@@ -887,7 +915,7 @@ static inline int nla_validate(const struct nlattr *head, int len, int maxtype,
/**
* nlmsg_validate_deprecated - validate a netlink message including attributes
* @nlh: netlinket message header
- * @hdrlen: length of familiy specific header
+ * @hdrlen: length of family specific header
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
* @extack: extended ACK report struct
@@ -911,7 +939,7 @@ static inline int nlmsg_validate_deprecated(const struct nlmsghdr *nlh,
* nlmsg_report - need to report back to application?
* @nlh: netlink message header
*
- * Returns 1 if a report back to the application is requested.
+ * Returns: 1 if a report back to the application is requested.
*/
static inline int nlmsg_report(const struct nlmsghdr *nlh)
{
@@ -922,7 +950,7 @@ static inline int nlmsg_report(const struct nlmsghdr *nlh)
* nlmsg_seq - return the seq number of netlink message
* @nlh: netlink message header
*
- * Returns 0 if netlink message is NULL
+ * Returns: 0 if netlink message is NULL
*/
static inline u32 nlmsg_seq(const struct nlmsghdr *nlh)
{
@@ -933,7 +961,7 @@ static inline u32 nlmsg_seq(const struct nlmsghdr *nlh)
* nlmsg_for_each_attr - iterate over a stream of attributes
* @pos: loop counter, set to current attribute
* @nlh: netlink message header
- * @hdrlen: length of familiy specific header
+ * @hdrlen: length of family specific header
* @rem: initialized to len, holds bytes currently remaining in stream
*/
#define nlmsg_for_each_attr(pos, nlh, hdrlen, rem) \
@@ -941,6 +969,18 @@ static inline u32 nlmsg_seq(const struct nlmsghdr *nlh)
nlmsg_attrlen(nlh, hdrlen), rem)
/**
+ * nlmsg_for_each_attr_type - iterate over a stream of attributes
+ * @pos: loop counter, set to the current attribute
+ * @type: required attribute type for @pos
+ * @nlh: netlink message header
+ * @hdrlen: length of the family specific header
+ * @rem: initialized to len, holds bytes currently remaining in stream
+ */
+#define nlmsg_for_each_attr_type(pos, type, nlh, hdrlen, rem) \
+ nlmsg_for_each_attr(pos, nlh, hdrlen, rem) \
+ if (nla_type(pos) == type)
+
+/**
* nlmsg_put - Add a new netlink message to an skb
* @skb: socket buffer to store message in
* @portid: netlink PORTID of requesting application
@@ -949,7 +989,7 @@ static inline u32 nlmsg_seq(const struct nlmsghdr *nlh)
* @payload: length of message payload
* @flags: message flags
*
- * Returns NULL if the tailroom of the skb is insufficient to store
+ * Returns: NULL if the tailroom of the skb is insufficient to store
* the message header and payload.
*/
static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
@@ -968,7 +1008,7 @@ static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 portid, u32 se
*
* Append data to an existing nlmsg, used when constructing a message
* with multiple fixed-format headers (which is rare).
- * Returns NULL if the tailroom of the skb is insufficient to store
+ * Returns: NULL if the tailroom of the skb is insufficient to store
* the extra payload.
*/
static inline void *nlmsg_append(struct sk_buff *skb, u32 size)
@@ -990,7 +1030,7 @@ static inline void *nlmsg_append(struct sk_buff *skb, u32 size)
* @payload: length of message payload
* @flags: message flags
*
- * Returns NULL if the tailroom of the skb is insufficient to store
+ * Returns: NULL if the tailroom of the skb is insufficient to store
* the message header and payload.
*/
static inline struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb,
@@ -1034,7 +1074,7 @@ static inline struct sk_buff *nlmsg_new_large(size_t payload)
* @skb: socket buffer the message is stored in
* @nlh: netlink message header
*
- * Corrects the netlink message header to include the appeneded
+ * Corrects the netlink message header to include the appended
* attributes. Only necessary if attributes have been added to
* the message.
*/
@@ -1047,7 +1087,7 @@ static inline void nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
* nlmsg_get_pos - return current position in netlink message
* @skb: socket buffer the message is stored in
*
- * Returns a pointer to the current tail of the message.
+ * Returns: a pointer to the current tail of the message.
*/
static inline void *nlmsg_get_pos(struct sk_buff *skb)
{
@@ -1273,7 +1313,7 @@ static inline int nla_ok(const struct nlattr *nla, int remaining)
* @nla: netlink attribute
* @remaining: number of bytes remaining in attribute stream
*
- * Returns the next netlink attribute in the attribute stream and
+ * Returns: the next netlink attribute in the attribute stream and
* decrements remaining by the size of the current attribute.
*/
static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
@@ -1289,7 +1329,7 @@ static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
* @nla: attribute containing the nested attributes
* @attrtype: type of attribute to look for
*
- * Returns the first attribute which matches the specified type.
+ * Returns: the first attribute which matches the specified type.
*/
static inline struct nlattr *
nla_find_nested(const struct nlattr *nla, int attrtype)
@@ -1695,6 +1735,20 @@ static inline u32 nla_get_u32(const struct nlattr *nla)
}
/**
+ * nla_get_u32_default - return payload of u32 attribute or default
+ * @nla: u32 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline u32 nla_get_u32_default(const struct nlattr *nla, u32 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_u32(nla);
+}
+
+/**
* nla_get_be32 - return payload of __be32 attribute
* @nla: __be32 netlink attribute
*/
@@ -1704,6 +1758,21 @@ static inline __be32 nla_get_be32(const struct nlattr *nla)
}
/**
+ * nla_get_be32_default - return payload of be32 attribute or default
+ * @nla: __be32 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __be32 nla_get_be32_default(const struct nlattr *nla,
+ __be32 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_be32(nla);
+}
+
+/**
* nla_get_le32 - return payload of __le32 attribute
* @nla: __le32 netlink attribute
*/
@@ -1713,6 +1782,21 @@ static inline __le32 nla_get_le32(const struct nlattr *nla)
}
/**
+ * nla_get_le32_default - return payload of le32 attribute or default
+ * @nla: __le32 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __le32 nla_get_le32_default(const struct nlattr *nla,
+ __le32 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_le32(nla);
+}
+
+/**
* nla_get_u16 - return payload of u16 attribute
* @nla: u16 netlink attribute
*/
@@ -1722,6 +1806,20 @@ static inline u16 nla_get_u16(const struct nlattr *nla)
}
/**
+ * nla_get_u16_default - return payload of u16 attribute or default
+ * @nla: u16 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline u16 nla_get_u16_default(const struct nlattr *nla, u16 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_u16(nla);
+}
+
+/**
* nla_get_be16 - return payload of __be16 attribute
* @nla: __be16 netlink attribute
*/
@@ -1731,6 +1829,21 @@ static inline __be16 nla_get_be16(const struct nlattr *nla)
}
/**
+ * nla_get_be16_default - return payload of be16 attribute or default
+ * @nla: __be16 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __be16 nla_get_be16_default(const struct nlattr *nla,
+ __be16 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_be16(nla);
+}
+
+/**
* nla_get_le16 - return payload of __le16 attribute
* @nla: __le16 netlink attribute
*/
@@ -1740,6 +1853,21 @@ static inline __le16 nla_get_le16(const struct nlattr *nla)
}
/**
+ * nla_get_le16_default - return payload of le16 attribute or default
+ * @nla: __le16 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __le16 nla_get_le16_default(const struct nlattr *nla,
+ __le16 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_le16(nla);
+}
+
+/**
* nla_get_u8 - return payload of u8 attribute
* @nla: u8 netlink attribute
*/
@@ -1749,6 +1877,20 @@ static inline u8 nla_get_u8(const struct nlattr *nla)
}
/**
+ * nla_get_u8_default - return payload of u8 attribute or default
+ * @nla: u8 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline u8 nla_get_u8_default(const struct nlattr *nla, u8 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_u8(nla);
+}
+
+/**
* nla_get_u64 - return payload of u64 attribute
* @nla: u64 netlink attribute
*/
@@ -1762,6 +1904,20 @@ static inline u64 nla_get_u64(const struct nlattr *nla)
}
/**
+ * nla_get_u64_default - return payload of u64 attribute or default
+ * @nla: u64 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline u64 nla_get_u64_default(const struct nlattr *nla, u64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_u64(nla);
+}
+
+/**
* nla_get_uint - return payload of uint attribute
* @nla: uint netlink attribute
*/
@@ -1773,6 +1929,20 @@ static inline u64 nla_get_uint(const struct nlattr *nla)
}
/**
+ * nla_get_uint_default - return payload of uint attribute or default
+ * @nla: uint netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline u64 nla_get_uint_default(const struct nlattr *nla, u64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_uint(nla);
+}
+
+/**
* nla_get_be64 - return payload of __be64 attribute
* @nla: __be64 netlink attribute
*/
@@ -1786,6 +1956,21 @@ static inline __be64 nla_get_be64(const struct nlattr *nla)
}
/**
+ * nla_get_be64_default - return payload of be64 attribute or default
+ * @nla: __be64 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __be64 nla_get_be64_default(const struct nlattr *nla,
+ __be64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_be64(nla);
+}
+
+/**
* nla_get_le64 - return payload of __le64 attribute
* @nla: __le64 netlink attribute
*/
@@ -1795,6 +1980,21 @@ static inline __le64 nla_get_le64(const struct nlattr *nla)
}
/**
+ * nla_get_le64_default - return payload of le64 attribute or default
+ * @nla: __le64 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __le64 nla_get_le64_default(const struct nlattr *nla,
+ __le64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_le64(nla);
+}
+
+/**
* nla_get_s32 - return payload of s32 attribute
* @nla: s32 netlink attribute
*/
@@ -1804,6 +2004,20 @@ static inline s32 nla_get_s32(const struct nlattr *nla)
}
/**
+ * nla_get_s32_default - return payload of s32 attribute or default
+ * @nla: s32 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline s32 nla_get_s32_default(const struct nlattr *nla, s32 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_s32(nla);
+}
+
+/**
* nla_get_s16 - return payload of s16 attribute
* @nla: s16 netlink attribute
*/
@@ -1813,6 +2027,20 @@ static inline s16 nla_get_s16(const struct nlattr *nla)
}
/**
+ * nla_get_s16_default - return payload of s16 attribute or default
+ * @nla: s16 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline s16 nla_get_s16_default(const struct nlattr *nla, s16 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_s16(nla);
+}
+
+/**
* nla_get_s8 - return payload of s8 attribute
* @nla: s8 netlink attribute
*/
@@ -1822,6 +2050,20 @@ static inline s8 nla_get_s8(const struct nlattr *nla)
}
/**
+ * nla_get_s8_default - return payload of s8 attribute or default
+ * @nla: s8 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline s8 nla_get_s8_default(const struct nlattr *nla, s8 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_s8(nla);
+}
+
+/**
* nla_get_s64 - return payload of s64 attribute
* @nla: s64 netlink attribute
*/
@@ -1835,6 +2077,20 @@ static inline s64 nla_get_s64(const struct nlattr *nla)
}
/**
+ * nla_get_s64_default - return payload of s64 attribute or default
+ * @nla: s64 netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline s64 nla_get_s64_default(const struct nlattr *nla, s64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_s64(nla);
+}
+
+/**
* nla_get_sint - return payload of uint attribute
* @nla: uint netlink attribute
*/
@@ -1846,6 +2102,20 @@ static inline s64 nla_get_sint(const struct nlattr *nla)
}
/**
+ * nla_get_sint_default - return payload of sint attribute or default
+ * @nla: sint netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline s64 nla_get_sint_default(const struct nlattr *nla, s64 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_sint(nla);
+}
+
+/**
* nla_get_flag - return payload of flag attribute
* @nla: flag netlink attribute
*/
@@ -1858,7 +2128,7 @@ static inline int nla_get_flag(const struct nlattr *nla)
* nla_get_msecs - return payload of msecs attribute
* @nla: msecs netlink attribute
*
- * Returns the number of milliseconds in jiffies.
+ * Returns: the number of milliseconds in jiffies.
*/
static inline unsigned long nla_get_msecs(const struct nlattr *nla)
{
@@ -1868,6 +2138,21 @@ static inline unsigned long nla_get_msecs(const struct nlattr *nla)
}
/**
+ * nla_get_msecs_default - return payload of msecs attribute or default
+ * @nla: msecs netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline unsigned long nla_get_msecs_default(const struct nlattr *nla,
+ unsigned long defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_msecs(nla);
+}
+
+/**
* nla_get_in_addr - return payload of IPv4 address attribute
* @nla: IPv4 address netlink attribute
*/
@@ -1877,6 +2162,21 @@ static inline __be32 nla_get_in_addr(const struct nlattr *nla)
}
/**
+ * nla_get_in_addr_default - return payload of be32 attribute or default
+ * @nla: IPv4 address netlink attribute, may be %NULL
+ * @defvalue: default value to use if @nla is %NULL
+ *
+ * Return: the value of the attribute, or the default value if not present
+ */
+static inline __be32 nla_get_in_addr_default(const struct nlattr *nla,
+ __be32 defvalue)
+{
+ if (!nla)
+ return defvalue;
+ return nla_get_in_addr(nla);
+}
+
+/**
* nla_get_in6_addr - return payload of IPv6 address attribute
* @nla: IPv6 address netlink attribute
*/
@@ -1920,7 +2220,7 @@ static inline void *nla_memdup_noprof(const struct nlattr *src, gfp_t gfp)
* marked their nest attributes with NLA_F_NESTED flag. New APIs should use
* nla_nest_start() which sets the flag.
*
- * Returns the container attribute or NULL on error
+ * Returns: the container attribute or NULL on error
*/
static inline struct nlattr *nla_nest_start_noflag(struct sk_buff *skb,
int attrtype)
@@ -1941,7 +2241,7 @@ static inline struct nlattr *nla_nest_start_noflag(struct sk_buff *skb,
* Unlike nla_nest_start_noflag(), mark the nest attribute with NLA_F_NESTED
* flag. This is the preferred function to use in new code.
*
- * Returns the container attribute or NULL on error
+ * Returns: the container attribute or NULL on error
*/
static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
{
@@ -1954,9 +2254,9 @@ static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
* @start: container attribute
*
* Corrects the container attribute header to include the all
- * appeneded attributes.
+ * appended attributes.
*
- * Returns the total data length of the skb.
+ * Returns: the total data length of the skb.
*/
static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start)
{
@@ -1978,6 +2278,20 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
}
/**
+ * nla_put_empty_nest - Create an empty nest
+ * @skb: socket buffer the message is stored in
+ * @attrtype: attribute type of the container
+ *
+ * This function is a helper for creating empty nests.
+ *
+ * Returns: 0 when successful or -EMSGSIZE on failure.
+ */
+static inline int nla_put_empty_nest(struct sk_buff *skb, int attrtype)
+{
+ return nla_nest_start(skb, attrtype) ? 0 : -EMSGSIZE;
+}
+
+/**
* __nla_validate_nested - Validate a stream of nested attributes
* @start: container attribute
* @maxtype: maximum attribute type to be expected
@@ -1987,9 +2301,9 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
*
* Validates all attributes in the nested attribute stream against the
* specified policy. Attributes with a type exceeding maxtype will be
- * ignored. See documenation of struct nla_policy for more details.
+ * ignored. See documentation of struct nla_policy for more details.
*
- * Returns 0 on success or a negative error code.
+ * Returns: 0 on success or a negative error code.
*/
static inline int __nla_validate_nested(const struct nlattr *start, int maxtype,
const struct nla_policy *policy,
@@ -2022,7 +2336,7 @@ nla_validate_nested_deprecated(const struct nlattr *start, int maxtype,
* nla_need_padding_for_64bit - test 64-bit alignment of the next attribute
* @skb: socket buffer the message is stored in
*
- * Return true if padding is needed to align the next attribute (nla_data()) to
+ * Return: true if padding is needed to align the next attribute (nla_data()) to
* a 64-bit aligned area.
*/
static inline bool nla_need_padding_for_64bit(struct sk_buff *skb)
@@ -2049,7 +2363,7 @@ static inline bool nla_need_padding_for_64bit(struct sk_buff *skb)
* This will only be done in architectures which do not have
* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS defined.
*
- * Returns zero on success or a negative error code.
+ * Returns: zero on success or a negative error code.
*/
static inline int nla_align_64bit(struct sk_buff *skb, int padattr)
{
diff --git a/include/net/netmem.h b/include/net/netmem.h
index d8b810245c1d..9e10f4ac50c3 100644
--- a/include/net/netmem.h
+++ b/include/net/netmem.h
@@ -8,34 +8,407 @@
#ifndef _NET_NETMEM_H
#define _NET_NETMEM_H
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <net/net_debug.h>
+
+/* These fields in struct page are used by the page_pool and net stack:
+ *
+ * struct {
+ * unsigned long pp_magic;
+ * struct page_pool *pp;
+ * unsigned long _pp_mapping_pad;
+ * unsigned long dma_addr;
+ * atomic_long_t pp_ref_count;
+ * };
+ *
+ * We mirror the page_pool fields here so the page_pool can access these
+ * fields without worrying whether the underlying fields belong to a
+ * page or netmem_desc.
+ *
+ * CAUTION: Do not update the fields in netmem_desc without also
+ * updating the anonymous aliasing union in struct net_iov.
+ */
+struct netmem_desc {
+ unsigned long _flags;
+ unsigned long pp_magic;
+ struct page_pool *pp;
+ unsigned long _pp_mapping_pad;
+ unsigned long dma_addr;
+ atomic_long_t pp_ref_count;
+};
+
+#define NETMEM_DESC_ASSERT_OFFSET(pg, desc) \
+ static_assert(offsetof(struct page, pg) == \
+ offsetof(struct netmem_desc, desc))
+NETMEM_DESC_ASSERT_OFFSET(flags, _flags);
+NETMEM_DESC_ASSERT_OFFSET(pp_magic, pp_magic);
+NETMEM_DESC_ASSERT_OFFSET(pp, pp);
+NETMEM_DESC_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad);
+NETMEM_DESC_ASSERT_OFFSET(dma_addr, dma_addr);
+NETMEM_DESC_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
+#undef NETMEM_DESC_ASSERT_OFFSET
+
+/*
+ * Since struct netmem_desc uses the space in struct page, the size
+ * should be checked, until struct netmem_desc has its own instance from
+ * slab, to avoid conflicting with other members within struct page.
+ */
+static_assert(sizeof(struct netmem_desc) <= offsetof(struct page, _refcount));
+
+/* net_iov */
+
+DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers);
+
+/* We overload the LSB of the struct page pointer to indicate whether it's
+ * a page or net_iov.
+ */
+#define NET_IOV 0x01UL
+
+enum net_iov_type {
+ NET_IOV_DMABUF,
+ NET_IOV_IOURING,
+};
+
+/* A memory descriptor representing abstract networking I/O vectors,
+ * generally for non-pages memory that doesn't have its corresponding
+ * struct page and needs to be explicitly allocated through slab.
+ *
+ * net_iovs are allocated and used by networking code, and the size of
+ * the chunk is PAGE_SIZE.
+ *
+ * This memory can be any form of non-struct paged memory. Examples
+ * include imported dmabuf memory and imported io_uring memory. See
+ * net_iov_type for all the supported types.
+ *
+ * @pp_magic: pp field, similar to the one in struct page/struct
+ * netmem_desc.
+ * @pp: the pp this net_iov belongs to, if any.
+ * @dma_addr: the dma addrs of the net_iov. Needed for the network
+ * card to send/receive this net_iov.
+ * @pp_ref_count: the pp ref count of this net_iov, exactly the same
+ * usage as struct page/struct netmem_desc.
+ * @owner: the net_iov_area this net_iov belongs to, if any.
+ * @type: the type of the memory. Different types of net_iovs are
+ * supported.
+ */
+struct net_iov {
+ union {
+ struct netmem_desc desc;
+
+ /* XXX: The following part should be removed once all
+ * the references to them are converted so as to be
+ * accessed via netmem_desc e.g. niov->desc.pp instead
+ * of niov->pp.
+ */
+ struct {
+ unsigned long _flags;
+ unsigned long pp_magic;
+ struct page_pool *pp;
+ unsigned long _pp_mapping_pad;
+ unsigned long dma_addr;
+ atomic_long_t pp_ref_count;
+ };
+ };
+ struct net_iov_area *owner;
+ enum net_iov_type type;
+};
+
+struct net_iov_area {
+ /* Array of net_iovs for this area. */
+ struct net_iov *niovs;
+ size_t num_niovs;
+
+ /* Offset into the dma-buf where this chunk starts. */
+ unsigned long base_virtual;
+};
+
+/* net_iov is union'ed with struct netmem_desc mirroring struct page, so
+ * the page_pool can access these fields without worrying whether the
+ * underlying fields are accessed via netmem_desc or directly via
+ * net_iov, until all the references to them are converted so as to be
+ * accessed via netmem_desc e.g. niov->desc.pp instead of niov->pp.
+ *
+ * The non-net stack fields of struct page are private to the mm stack
+ * and must never be mirrored to net_iov.
+ */
+#define NET_IOV_ASSERT_OFFSET(desc, iov) \
+ static_assert(offsetof(struct netmem_desc, desc) == \
+ offsetof(struct net_iov, iov))
+NET_IOV_ASSERT_OFFSET(_flags, _flags);
+NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic);
+NET_IOV_ASSERT_OFFSET(pp, pp);
+NET_IOV_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad);
+NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr);
+NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
+#undef NET_IOV_ASSERT_OFFSET
+
+static inline struct net_iov_area *net_iov_owner(const struct net_iov *niov)
+{
+ return niov->owner;
+}
+
+static inline unsigned int net_iov_idx(const struct net_iov *niov)
+{
+ return niov - net_iov_owner(niov)->niovs;
+}
+
+/* netmem */
+
/**
* typedef netmem_ref - a nonexistent type marking a reference to generic
* network memory.
*
- * A netmem_ref currently is always a reference to a struct page. This
- * abstraction is introduced so support for new memory types can be added.
+ * A netmem_ref can be a struct page* or a struct net_iov* underneath.
*
* Use the supplied helpers to obtain the underlying memory pointer and fields.
*/
typedef unsigned long __bitwise netmem_ref;
-/* This conversion fails (returns NULL) if the netmem_ref is not struct page
- * backed.
+static inline bool netmem_is_net_iov(const netmem_ref netmem)
+{
+ return (__force unsigned long)netmem & NET_IOV;
+}
+
+/**
+ * __netmem_to_page - unsafely get pointer to the &page backing @netmem
+ * @netmem: netmem reference to convert
*
- * Currently struct page is the only possible netmem, and this helper never
- * fails.
+ * Unsafe version of netmem_to_page(). When @netmem is always page-backed,
+ * e.g. when it's a header buffer, performs faster and generates smaller
+ * object code (no check for the LSB, no WARN). When @netmem points to IOV,
+ * provokes undefined behaviour.
+ *
+ * Return: pointer to the &page (garbage if @netmem is not page-backed).
*/
-static inline struct page *netmem_to_page(netmem_ref netmem)
+static inline struct page *__netmem_to_page(netmem_ref netmem)
{
return (__force struct page *)netmem;
}
-/* Converting from page to netmem is always safe, because a page can always be
- * a netmem.
+static inline struct page *netmem_to_page(netmem_ref netmem)
+{
+ if (WARN_ON_ONCE(netmem_is_net_iov(netmem)))
+ return NULL;
+
+ return __netmem_to_page(netmem);
+}
+
+static inline struct net_iov *netmem_to_net_iov(netmem_ref netmem)
+{
+ if (netmem_is_net_iov(netmem))
+ return (struct net_iov *)((__force unsigned long)netmem &
+ ~NET_IOV);
+
+ DEBUG_NET_WARN_ON_ONCE(true);
+ return NULL;
+}
+
+static inline netmem_ref net_iov_to_netmem(struct net_iov *niov)
+{
+ return (__force netmem_ref)((unsigned long)niov | NET_IOV);
+}
+
+#define page_to_netmem(p) (_Generic((p), \
+ const struct page * : (__force const netmem_ref)(p), \
+ struct page * : (__force netmem_ref)(p)))
+
+/**
+ * virt_to_netmem - convert virtual memory pointer to a netmem reference
+ * @data: host memory pointer to convert
+ *
+ * Return: netmem reference to the &page backing this virtual address.
+ */
+static inline netmem_ref virt_to_netmem(const void *data)
+{
+ return page_to_netmem(virt_to_page(data));
+}
+
+static inline int netmem_ref_count(netmem_ref netmem)
+{
+ /* The non-pp refcount of net_iov is always 1. On net_iov, we only
+ * support pp refcounting which uses the pp_ref_count field.
+ */
+ if (netmem_is_net_iov(netmem))
+ return 1;
+
+ return page_ref_count(netmem_to_page(netmem));
+}
+
+static inline unsigned long netmem_pfn_trace(netmem_ref netmem)
+{
+ if (netmem_is_net_iov(netmem))
+ return 0;
+
+ return page_to_pfn(netmem_to_page(netmem));
+}
+
+/* XXX: How to extract netmem_desc from page must be changed, once
+ * netmem_desc no longer overlays on page and will be allocated through
+ * slab.
+ */
+#define __pp_page_to_nmdesc(p) (_Generic((p), \
+ const struct page * : (const struct netmem_desc *)(p), \
+ struct page * : (struct netmem_desc *)(p)))
+
+/* CAUTION: Check if the page is a pp page before calling this helper or
+ * know it's a pp page.
+ */
+#define pp_page_to_nmdesc(p) \
+({ \
+ DEBUG_NET_WARN_ON_ONCE(!page_pool_page_is_pp(p)); \
+ __pp_page_to_nmdesc(p); \
+})
+
+/**
+ * __netmem_to_nmdesc - unsafely get pointer to the &netmem_desc backing
+ * @netmem
+ * @netmem: netmem reference to convert
+ *
+ * Unsafe version that can be used only when @netmem is always backed by
+ * system memory, performs faster and generates smaller object code (no
+ * check for the LSB, no WARN). When @netmem points to IOV, provokes
+ * undefined behaviour.
+ *
+ * Return: pointer to the &netmem_desc (garbage if @netmem is not backed
+ * by system memory).
+ */
+static inline struct netmem_desc *__netmem_to_nmdesc(netmem_ref netmem)
+{
+ return (__force struct netmem_desc *)netmem;
+}
+
+/* netmem_to_nmdesc - convert netmem_ref to struct netmem_desc * for
+ * access to common fields.
+ * @netmem: netmem reference to get netmem_desc.
+ *
+ * All the sub types of netmem_ref (netmem_desc, net_iov) have the same
+ * pp, pp_magic, dma_addr, and pp_ref_count fields via netmem_desc.
+ *
+ * Return: the pointer to struct netmem_desc * regardless of its
+ * underlying type.
+ */
+static inline struct netmem_desc *netmem_to_nmdesc(netmem_ref netmem)
+{
+ void *p = (void *)((__force unsigned long)netmem & ~NET_IOV);
+
+ if (netmem_is_net_iov(netmem))
+ return &((struct net_iov *)p)->desc;
+
+ return __pp_page_to_nmdesc((struct page *)p);
+}
+
+/**
+ * __netmem_get_pp - unsafely get pointer to the &page_pool backing @netmem
+ * @netmem: netmem reference to get the pointer from
+ *
+ * Unsafe version of netmem_get_pp(). When @netmem is always page-backed,
+ * e.g. when it's a header buffer, performs faster and generates smaller
+ * object code (avoids clearing the LSB). When @netmem points to IOV,
+ * provokes invalid memory access.
+ *
+ * Return: pointer to the &page_pool (garbage if @netmem is not page-backed).
+ */
+static inline struct page_pool *__netmem_get_pp(netmem_ref netmem)
+{
+ return __netmem_to_nmdesc(netmem)->pp;
+}
+
+static inline struct page_pool *netmem_get_pp(netmem_ref netmem)
+{
+ return netmem_to_nmdesc(netmem)->pp;
+}
+
+static inline atomic_long_t *netmem_get_pp_ref_count_ref(netmem_ref netmem)
+{
+ return &netmem_to_nmdesc(netmem)->pp_ref_count;
+}
+
+static inline bool netmem_is_pref_nid(netmem_ref netmem, int pref_nid)
+{
+ /* NUMA node preference only makes sense if we're allocating
+ * system memory. Memory providers (which give us net_iovs)
+ * choose for us.
+ */
+ if (netmem_is_net_iov(netmem))
+ return true;
+
+ return page_to_nid(netmem_to_page(netmem)) == pref_nid;
+}
+
+static inline netmem_ref netmem_compound_head(netmem_ref netmem)
+{
+ /* niov are never compounded */
+ if (netmem_is_net_iov(netmem))
+ return netmem;
+
+ return page_to_netmem(compound_head(netmem_to_page(netmem)));
+}
+
+/**
+ * __netmem_address - unsafely get pointer to the memory backing @netmem
+ * @netmem: netmem reference to get the pointer for
+ *
+ * Unsafe version of netmem_address(). When @netmem is always page-backed,
+ * e.g. when it's a header buffer, performs faster and generates smaller
+ * object code (no check for the LSB). When @netmem points to IOV, provokes
+ * undefined behaviour.
+ *
+ * Return: pointer to the memory (garbage if @netmem is not page-backed).
+ */
+static inline void *__netmem_address(netmem_ref netmem)
+{
+ return page_address(__netmem_to_page(netmem));
+}
+
+static inline void *netmem_address(netmem_ref netmem)
+{
+ if (netmem_is_net_iov(netmem))
+ return NULL;
+
+ return __netmem_address(netmem);
+}
+
+/**
+ * netmem_is_pfmemalloc - check if @netmem was allocated under memory pressure
+ * @netmem: netmem reference to check
+ *
+ * Return: true if @netmem is page-backed and the page was allocated under
+ * memory pressure, false otherwise.
*/
-static inline netmem_ref page_to_netmem(struct page *page)
+static inline bool netmem_is_pfmemalloc(netmem_ref netmem)
+{
+ if (netmem_is_net_iov(netmem))
+ return false;
+
+ return page_is_pfmemalloc(netmem_to_page(netmem));
+}
+
+static inline unsigned long netmem_get_dma_addr(netmem_ref netmem)
+{
+ return netmem_to_nmdesc(netmem)->dma_addr;
+}
+
+void get_netmem(netmem_ref netmem);
+void put_netmem(netmem_ref netmem);
+
+#define netmem_dma_unmap_addr_set(NETMEM, PTR, ADDR_NAME, VAL) \
+ do { \
+ if (!netmem_is_net_iov(NETMEM)) \
+ dma_unmap_addr_set(PTR, ADDR_NAME, VAL); \
+ else \
+ dma_unmap_addr_set(PTR, ADDR_NAME, 0); \
+ } while (0)
+
+static inline void netmem_dma_unmap_page_attrs(struct device *dev,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
{
- return (__force netmem_ref)page;
+ if (!addr)
+ return;
+
+ dma_unmap_page_attrs(dev, addr, size, dir, attrs);
}
#endif /* _NET_NETMEM_H */
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index bae914815aa3..ab74b5ed0b01 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -7,9 +7,6 @@
#include <linux/atomic.h>
#include <linux/workqueue.h>
#include <linux/netfilter/nf_conntrack_tcp.h>
-#ifdef CONFIG_NF_CT_PROTO_DCCP
-#include <linux/netfilter/nf_conntrack_dccp.h>
-#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
#include <linux/netfilter/nf_conntrack_sctp.h>
#endif
@@ -50,13 +47,6 @@ struct nf_icmp_net {
unsigned int timeout;
};
-#ifdef CONFIG_NF_CT_PROTO_DCCP
-struct nf_dccp_net {
- u8 dccp_loose;
- unsigned int dccp_timeout[CT_DCCP_MAX + 1];
-};
-#endif
-
#ifdef CONFIG_NF_CT_PROTO_SCTP
struct nf_sctp_net {
unsigned int timeouts[SCTP_CONNTRACK_MAX];
@@ -82,9 +72,6 @@ struct nf_ip_net {
struct nf_udp_net udp;
struct nf_icmp_net icmp;
struct nf_icmp_net icmpv6;
-#ifdef CONFIG_NF_CT_PROTO_DCCP
- struct nf_dccp_net dccp;
-#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
struct nf_sctp_net sctp;
#endif
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index 78214f1b43a2..9ef3d70e5e9c 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -13,8 +13,11 @@ struct netns_core {
struct ctl_table_header *sysctl_hdr;
int sysctl_somaxconn;
+ int sysctl_txq_reselection;
int sysctl_optmem_max;
u8 sysctl_txrehash;
+ u8 sysctl_tstamp_allow_data;
+ u8 sysctl_bypass_prot_mem;
#ifdef CONFIG_PROC_FS
struct prot_inuse __percpu *prot_inuse;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index c356c458b340..2dbd46fc4734 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -40,6 +40,18 @@ struct inet_timewait_death_row {
struct tcp_fastopen_context;
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+struct sysctl_fib_multipath_hash_seed {
+ u32 user_seed;
+ u32 mp_seed;
+};
+#endif
+
+struct udp_tunnel_gro {
+ struct sock __rcu *sk;
+ struct hlist_head list;
+};
+
struct netns_ipv4 {
/* Cacheline organization can be found documented in
* Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst.
@@ -62,20 +74,28 @@ struct netns_ipv4 {
/* TXRX readonly hotpath cache lines */
__cacheline_group_begin(netns_ipv4_read_txrx);
- u8 sysctl_tcp_moderate_rcvbuf;
__cacheline_group_end(netns_ipv4_read_txrx);
/* RX readonly hotpath cache line */
__cacheline_group_begin(netns_ipv4_read_rx);
+ u8 sysctl_tcp_moderate_rcvbuf;
u8 sysctl_ip_early_demux;
u8 sysctl_tcp_early_demux;
+ u8 sysctl_tcp_l3mdev_accept;
+ /* 3 bytes hole, try to pack */
int sysctl_tcp_reordering;
int sysctl_tcp_rmem[3];
+ int sysctl_tcp_rcvbuf_low_rtt;
__cacheline_group_end(netns_ipv4_read_rx);
struct inet_timewait_death_row tcp_death_row;
struct udp_table *udp_table;
+#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
+ /* Not in a pernet subsys because need to be available at GRO stage */
+ struct udp_tunnel_gro udp_tunnel_gro[2];
+#endif
+
#ifdef CONFIG_SYSCTL
struct ctl_table_header *forw_hdr;
struct ctl_table_header *frags_hdr;
@@ -102,6 +122,9 @@ struct netns_ipv4 {
#endif
struct hlist_head *fib_table_hash;
struct sock *fibnl;
+ struct hlist_head *fib_info_hash;
+ unsigned int fib_info_hash_bits;
+ unsigned int fib_info_cnt;
struct sock *mc_autojoin_sk;
@@ -113,9 +136,13 @@ struct netns_ipv4 {
u8 sysctl_icmp_echo_ignore_broadcasts;
u8 sysctl_icmp_ignore_bogus_error_responses;
u8 sysctl_icmp_errors_use_inbound_ifaddr;
+ u8 sysctl_icmp_errors_extension_mask;
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;
-
+ int sysctl_icmp_msgs_per_sec;
+ int sysctl_icmp_msgs_burst;
+ atomic_t icmp_global_credit;
+ u32 icmp_global_stamp;
u32 ip_rt_min_pmtu;
int ip_rt_mtu_expires;
int ip_rt_min_advmss;
@@ -123,6 +150,8 @@ struct netns_ipv4 {
struct local_ports ip_local_ports;
u8 sysctl_tcp_ecn;
+ u8 sysctl_tcp_ecn_option;
+ u8 sysctl_tcp_ecn_option_beacon;
u8 sysctl_tcp_ecn_fallback;
u8 sysctl_ip_default_ttl;
@@ -141,9 +170,6 @@ struct netns_ipv4 {
u8 sysctl_fwmark_reflect;
u8 sysctl_tcp_fwmark_accept;
-#ifdef CONFIG_NET_L3_MASTER_DEV
- u8 sysctl_tcp_l3mdev_accept;
-#endif
u8 sysctl_tcp_mtu_probing;
int sysctl_tcp_mtu_probe_floor;
int sysctl_tcp_base_mss;
@@ -166,10 +192,13 @@ struct netns_ipv4 {
u8 sysctl_tcp_retries2;
u8 sysctl_tcp_orphan_retries;
u8 sysctl_tcp_tw_reuse;
+ unsigned int sysctl_tcp_tw_reuse_delay;
int sysctl_tcp_fin_timeout;
u8 sysctl_tcp_sack;
u8 sysctl_tcp_window_scaling;
u8 sysctl_tcp_timestamps;
+ int sysctl_tcp_rto_min_us;
+ int sysctl_tcp_rto_max_ms;
u8 sysctl_tcp_recovery;
u8 sysctl_tcp_thin_linear_timeouts;
u8 sysctl_tcp_slow_start_after_idle;
@@ -193,6 +222,7 @@ struct netns_ipv4 {
int sysctl_tcp_pacing_ss_ratio;
int sysctl_tcp_pacing_ca_ratio;
unsigned int sysctl_tcp_child_ehash_entries;
+ int sysctl_tcp_comp_sack_rtt_percent;
unsigned long sysctl_tcp_comp_sack_delay_ns;
unsigned long sysctl_tcp_comp_sack_slack_ns;
int sysctl_max_syn_backlog;
@@ -226,6 +256,7 @@ struct netns_ipv4 {
int sysctl_igmp_qrv;
struct ping_group_range ping_group_range;
+ u16 ping_port_rover;
atomic_t dev_addr_genid;
@@ -245,18 +276,22 @@ struct netns_ipv4 {
#endif
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ struct sysctl_fib_multipath_hash_seed sysctl_fib_multipath_hash_seed;
u32 sysctl_fib_multipath_hash_fields;
u8 sysctl_fib_multipath_use_neigh;
u8 sysctl_fib_multipath_hash_policy;
#endif
struct fib_notifier_ops *notifier_ops;
- unsigned int fib_seq; /* protected by rtnl_mutex */
+ unsigned int fib_seq; /* writes protected by rtnl_mutex */
struct fib_notifier_ops *ipmr_notifier_ops;
unsigned int ipmr_seq; /* protected by rtnl_mutex */
atomic_t rt_genid;
siphash_key_t ip_id_key;
+ struct hlist_head *inet_addr_lst;
+ struct delayed_work addr_chk_work;
};
+
#endif
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 5f2cfd84570a..08d2ecc96e2b 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -56,6 +56,7 @@ struct netns_sysctl_ipv6 {
u8 skip_notify_on_dev_down;
u8 fib_notify_on_flag_change;
u8 icmpv6_error_anycast_as_unicast;
+ u8 icmpv6_errors_extension_mask;
};
struct netns_ipv6 {
@@ -72,6 +73,7 @@ struct netns_ipv6 {
struct rt6_statistics *rt6_stats;
struct timer_list ip6_fib_timer;
struct hlist_head *fib_table_hash;
+ spinlock_t fib_table_hash_lock;
struct fib6_table *fib6_main_tbl;
struct list_head fib6_walkers;
rwlock_t fib6_walker_lock;
diff --git a/include/net/netns/mctp.h b/include/net/netns/mctp.h
index 1db8f9aaddb4..89555f90b97b 100644
--- a/include/net/netns/mctp.h
+++ b/include/net/netns/mctp.h
@@ -6,19 +6,25 @@
#ifndef __NETNS_MCTP_H__
#define __NETNS_MCTP_H__
+#include <linux/hash.h>
+#include <linux/hashtable.h>
#include <linux/mutex.h>
#include <linux/types.h>
+#define MCTP_BINDS_BITS 7
+
struct netns_mctp {
/* Only updated under RTNL, entries freed via RCU */
struct list_head routes;
- /* Bound sockets: list of sockets bound by type.
- * This list is updated from non-atomic contexts (under bind_lock),
- * and read (under rcu) in packet rx
+ /* Bound sockets: hash table of sockets, keyed by
+ * (type, src_eid, dest_eid).
+ * Specific src_eid/dest_eid entries also have an entry for
+ * MCTP_ADDR_ANY. This list is updated from non-atomic contexts
+ * (under bind_lock), and read (under rcu) in packet rx.
*/
struct mutex bind_lock;
- struct hlist_head binds;
+ DECLARE_HASHTABLE(binds, MCTP_BINDS_BITS);
/* tag allocations. This list is read and updated from atomic contexts,
* but elements are free()ed after a RCU grace-period
@@ -34,4 +40,10 @@ struct netns_mctp {
struct list_head neighbours;
};
+static inline u32 mctp_bind_hash(u8 type, u8 local_addr, u8 peer_addr)
+{
+ return hash_32(type | (u32)local_addr << 8 | (u32)peer_addr << 16,
+ MCTP_BINDS_BITS);
+}
+
#endif /* __NETNS_MCTP_H__ */
diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h
index 19ad2574b267..6682e51513ef 100644
--- a/include/net/netns/mpls.h
+++ b/include/net/netns/mpls.h
@@ -16,6 +16,7 @@ struct netns_mpls {
int default_ttl;
size_t platform_labels;
struct mpls_route __rcu * __rcu *platform_label;
+ struct mutex platform_mutex;
struct ctl_table_header *ctl;
};
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 02bbdc577f8e..a6a0bf4a247e 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -15,6 +15,9 @@ struct netns_nf {
const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
+#ifdef CONFIG_LWTUNNEL
+ struct ctl_table_header *nf_lwtnl_dir_header;
+#endif
#endif
struct nf_hook_entries __rcu *hooks_ipv4[NF_INET_NUMHOOKS];
struct nf_hook_entries __rcu *hooks_ipv6[NF_INET_NUMHOOKS];
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index cc8060c017d5..99dd166c5d07 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -3,6 +3,7 @@
#define _NETNS_NFTABLES_H_
struct netns_nftables {
+ unsigned int base_seq;
u8 gencursor;
};
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index 7eff3d981b89..c0f97f36389e 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -75,8 +75,8 @@ struct netns_sctp {
/* Whether Cookie Preservative is enabled(1) or not(0) */
int cookie_preserve_enable;
- /* The namespace default hmac alg */
- char *sctp_hmac_alg;
+ /* Whether cookie authentication is enabled(1) or not(0) */
+ int cookie_auth_enable;
/* Valid.Cookie.Life - 60 seconds */
unsigned int valid_cookie_life;
@@ -125,14 +125,14 @@ struct netns_sctp {
int pf_expose;
/*
- * Policy for preforming sctp/socket accounting
+ * Policy for performing sctp/socket accounting
* 0 - do socket level accounting, all assocs share sk_sndbuf
* 1 - do sctp accounting, each asoc may use sk_sndbuf bytes
*/
int sndbuf_policy;
/*
- * Policy for preforming sctp/socket accounting
+ * Policy for performing sctp/socket accounting
* 0 - do socket level accounting, all assocs share sk_rcvbuf
* 1 - do sctp accounting, each asoc may use sk_rcvbuf bytes
*/
diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h
index fc752a50f91b..ed24c9f638ee 100644
--- a/include/net/netns/smc.h
+++ b/include/net/netns/smc.h
@@ -17,6 +17,9 @@ struct netns_smc {
#ifdef CONFIG_SYSCTL
struct ctl_table_header *smc_hdr;
#endif
+#if IS_ENABLED(CONFIG_SMC_HS_CTRL_BPF)
+ struct smc_hs_ctrl __rcu *hs_ctrl;
+#endif /* CONFIG_SMC_HS_CTRL_BPF */
unsigned int sysctl_autocorking_size;
unsigned int sysctl_smcr_buf_type;
int sysctl_smcr_testlink_time;
@@ -24,5 +27,7 @@ struct netns_smc {
int sysctl_rmem;
int sysctl_max_links_per_lgr;
int sysctl_max_conns_per_lgr;
+ unsigned int sysctl_smcr_max_send_wr;
+ unsigned int sysctl_smcr_max_recv_wr;
};
#endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 423b52eca908..23dd647fe024 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -43,6 +43,7 @@ struct netns_xfrm {
struct hlist_head __rcu *state_bysrc;
struct hlist_head __rcu *state_byspi;
struct hlist_head __rcu *state_byseq;
+ struct hlist_head __percpu *state_cache_input;
unsigned int state_hmask;
unsigned int state_num;
struct work_struct state_hash_work;
@@ -51,7 +52,6 @@ struct netns_xfrm {
struct hlist_head *policy_byidx;
unsigned int policy_idx_hmask;
unsigned int idx_generator;
- struct hlist_head policy_inexact[XFRM_POLICY_MAX];
struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX];
unsigned int policy_count[XFRM_POLICY_MAX * 2];
struct work_struct policy_hash_work;
@@ -83,6 +83,7 @@ struct netns_xfrm {
spinlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
+ struct delayed_work nat_keepalive_work;
};
#endif
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 68463aebcc05..572e69cda476 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -105,7 +105,7 @@ struct nh_grp_entry_stats {
struct nh_grp_entry {
struct nexthop *nh;
struct nh_grp_entry_stats __percpu *stats;
- u8 weight;
+ u16 weight;
union {
struct {
@@ -152,6 +152,8 @@ struct nexthop {
u8 protocol; /* app managing this nh */
u8 nh_flags;
bool is_group;
+ bool dead;
+ spinlock_t lock; /* protect dead and f6i_list */
refcount_t refcnt;
struct rcu_head rcu;
@@ -192,7 +194,7 @@ struct nh_notifier_single_info {
};
struct nh_notifier_grp_entry_info {
- u8 weight;
+ u16 weight;
struct nh_notifier_single_info nh;
};
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index e82f55f543bb..09efcaed7c3f 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -332,7 +332,7 @@ struct nci_core_init_rsp_1 {
__le32 nfcc_features;
__u8 num_supported_rf_interfaces;
__u8 supported_rf_interfaces[]; /* variable size array */
- /* continuted in nci_core_init_rsp_2 */
+ /* continued in nci_core_init_rsp_2 */
} __packed;
struct nci_core_init_rsp_2 {
@@ -475,7 +475,7 @@ struct nci_rf_discover_ntf {
#define NCI_OP_RF_INTF_ACTIVATED_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05)
struct activation_params_nfca_poll_iso_dep {
__u8 rats_res_len;
- __u8 rats_res[20];
+ __u8 rats_res[NFC_ATS_MAXSIZE];
};
struct activation_params_nfcb_poll_iso_dep {
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index ea8595651c38..664d5058e66e 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -52,7 +52,7 @@ enum nci_state {
#define NCI_RF_DISC_SELECT_TIMEOUT 5000
#define NCI_RF_DEACTIVATE_TIMEOUT 30000
#define NCI_CMD_TIMEOUT 5000
-#define NCI_DATA_TIMEOUT 700
+#define NCI_DATA_TIMEOUT 3000
struct nci_dev;
@@ -265,6 +265,10 @@ struct nci_dev {
/* stored during intf_activated_ntf */
__u8 remote_gb[NFC_MAX_GT_LEN];
__u8 remote_gb_len;
+
+ /* stored during intf_activated_ntf */
+ __u8 target_ats[NFC_ATS_MAXSIZE];
+ __u8 target_ats_len;
};
/* ----- NCI Devices ----- */
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 3d07abacf08b..127e6c7d910d 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -80,12 +80,14 @@ struct nfc_ops {
#define NFC_ATR_REQ_GT_OFFSET 14
/**
- * struct nfc_target - NFC target descriptiom
+ * struct nfc_target - NFC target description
*
* @sens_res: 2 bytes describing the target SENS_RES response, if the target
* is a type A one. The %sens_res most significant byte must be byte 2
* as described by the NFC Forum digital specification (i.e. the platform
* configuration one) while %sens_res least significant byte is byte 1.
+ * @ats_len: length of Answer To Select in bytes
+ * @ats: Answer To Select returned by an ISO 14443 Type A target upon activation
*/
struct nfc_target {
u32 idx;
@@ -105,6 +107,8 @@ struct nfc_target {
u8 is_iso15693;
u8 iso15693_dsfid;
u8 iso15693_uid[NFC_ISO15693_UID_MAXSIZE];
+ u8 ats_len;
+ u8 ats[NFC_ATS_MAXSIZE];
};
/**
@@ -230,10 +234,10 @@ static inline void nfc_set_parent_dev(struct nfc_dev *nfc_dev,
}
/**
- * nfc_set_drvdata - set driver specifc data
+ * nfc_set_drvdata - set driver specific data
*
* @dev: The nfc device
- * @data: Pointer to driver specifc data
+ * @data: Pointer to driver specific data
*/
static inline void nfc_set_drvdata(struct nfc_dev *dev, void *data)
{
@@ -241,7 +245,7 @@ static inline void nfc_set_drvdata(struct nfc_dev *dev, void *data)
}
/**
- * nfc_get_drvdata - get driver specifc data
+ * nfc_get_drvdata - get driver specific data
*
* @dev: The nfc device
*/
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index 4c752f799957..442822746e92 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -191,14 +191,12 @@ enum nl802154_iftype {
* @NL802154_CAP_ATTR_CHANNELS: a nested attribute for nl802154_channel_attr
* @NL802154_CAP_ATTR_TX_POWERS: a nested attribute for
* nl802154_wpan_phy_tx_power
- * @NL802154_CAP_ATTR_MIN_CCA_ED_LEVEL: minimum value for cca_ed_level
- * @NL802154_CAP_ATTR_MAX_CCA_ED_LEVEL: maxmimum value for cca_ed_level
* @NL802154_CAP_ATTR_CCA_MODES: nl802154_cca_modes flags
* @NL802154_CAP_ATTR_CCA_OPTS: nl802154_cca_opts flags
* @NL802154_CAP_ATTR_MIN_MINBE: minimum of minbe value
* @NL802154_CAP_ATTR_MAX_MINBE: maximum of minbe value
* @NL802154_CAP_ATTR_MIN_MAXBE: minimum of maxbe value
- * @NL802154_CAP_ATTR_MAX_MINBE: maximum of maxbe value
+ * @NL802154_CAP_ATTR_MAX_MAXBE: maximum of maxbe value
* @NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS: minimum of csma backoff value
* @NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS: maximum of csma backoffs value
* @NL802154_CAP_ATTR_MIN_FRAME_RETRIES: minimum of frame retries value
@@ -364,6 +362,7 @@ enum nl802154_cca_opts {
NL802154_CCA_OPT_ENERGY_CARRIER_AND,
NL802154_CCA_OPT_ENERGY_CARRIER_OR,
+ /* private: */
/* keep last */
__NL802154_CCA_OPT_ATTR_AFTER_LAST,
NL802154_CCA_OPT_ATTR_MAX = __NL802154_CCA_OPT_ATTR_AFTER_LAST - 1
diff --git a/include/net/p8022.h b/include/net/p8022.h
deleted file mode 100644
index a29e224ac498..000000000000
--- a/include/net/p8022.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NET_P8022_H
-#define _NET_P8022_H
-
-struct net_device;
-struct packet_type;
-struct sk_buff;
-
-struct datalink_proto *
-register_8022_client(unsigned char type,
- int (*func)(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *pt,
- struct net_device *orig_dev));
-void unregister_8022_client(struct datalink_proto *proto);
-#endif
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 873631c79ab1..3247026e096a 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -55,6 +55,8 @@
#include <linux/dma-mapping.h>
#include <net/page_pool/types.h>
+#include <net/net_debug.h>
+#include <net/netmem.h>
#ifdef CONFIG_PAGE_POOL_STATS
/* Deprecated driver-facing API, use netlink instead */
@@ -102,8 +104,7 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
*
* Get a page fragment from the page allocator or page_pool caches.
*
- * Return:
- * Return allocated page fragment, otherwise return NULL.
+ * Return: allocated page fragment, otherwise return NULL.
*/
static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
unsigned int *offset,
@@ -114,22 +115,22 @@ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
return page_pool_alloc_frag(pool, offset, size, gfp);
}
-static inline struct page *page_pool_alloc(struct page_pool *pool,
- unsigned int *offset,
- unsigned int *size, gfp_t gfp)
+static inline netmem_ref page_pool_alloc_netmem(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int *size, gfp_t gfp)
{
unsigned int max_size = PAGE_SIZE << pool->p.order;
- struct page *page;
+ netmem_ref netmem;
if ((*size << 1) > max_size) {
*size = max_size;
*offset = 0;
- return page_pool_alloc_pages(pool, gfp);
+ return page_pool_alloc_netmems(pool, gfp);
}
- page = page_pool_alloc_frag(pool, offset, *size, gfp);
- if (unlikely(!page))
- return NULL;
+ netmem = page_pool_alloc_frag_netmem(pool, offset, *size, gfp);
+ if (unlikely(!netmem))
+ return 0;
/* There is very likely not enough space for another fragment, so append
* the remaining size to the current fragment to avoid truesize
@@ -140,7 +141,30 @@ static inline struct page *page_pool_alloc(struct page_pool *pool,
pool->frag_offset = max_size;
}
- return page;
+ return netmem;
+}
+
+static inline netmem_ref page_pool_dev_alloc_netmem(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int *size)
+{
+ gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+
+ return page_pool_alloc_netmem(pool, offset, size, gfp);
+}
+
+static inline netmem_ref page_pool_dev_alloc_netmems(struct page_pool *pool)
+{
+ gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+
+ return page_pool_alloc_netmems(pool, gfp);
+}
+
+static inline struct page *page_pool_alloc(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int *size, gfp_t gfp)
+{
+ return netmem_to_page(page_pool_alloc_netmem(pool, offset, size, gfp));
}
/**
@@ -153,8 +177,7 @@ static inline struct page *page_pool_alloc(struct page_pool *pool,
* depending on the requested size in order to allocate memory with least memory
* utilization and performance penalty.
*
- * Return:
- * Return allocated page or page fragment, otherwise return NULL.
+ * Return: allocated page or page fragment, otherwise return NULL.
*/
static inline struct page *page_pool_dev_alloc(struct page_pool *pool,
unsigned int *offset,
@@ -188,8 +211,7 @@ static inline void *page_pool_alloc_va(struct page_pool *pool,
* This is just a thin wrapper around the page_pool_alloc() API, and
* it returns va of the allocated page or page fragment.
*
- * Return:
- * Return the va for the allocated page or page fragment, otherwise return NULL.
+ * Return: the va for the allocated page or page fragment, otherwise return NULL.
*/
static inline void *page_pool_dev_alloc_va(struct page_pool *pool,
unsigned int *size)
@@ -212,6 +234,11 @@ page_pool_get_dma_dir(const struct page_pool *pool)
return pool->p.dma_dir;
}
+static inline void page_pool_fragment_netmem(netmem_ref netmem, long nr)
+{
+ atomic_long_set(netmem_get_pp_ref_count_ref(netmem), nr);
+}
+
/**
* page_pool_fragment_page() - split a fresh page into fragments
* @page: page to split
@@ -232,11 +259,12 @@ page_pool_get_dma_dir(const struct page_pool *pool)
*/
static inline void page_pool_fragment_page(struct page *page, long nr)
{
- atomic_long_set(&page->pp_ref_count, nr);
+ page_pool_fragment_netmem(page_to_netmem(page), nr);
}
-static inline long page_pool_unref_page(struct page *page, long nr)
+static inline long page_pool_unref_netmem(netmem_ref netmem, long nr)
{
+ atomic_long_t *pp_ref_count = netmem_get_pp_ref_count_ref(netmem);
long ret;
/* If nr == pp_ref_count then we have cleared all remaining
@@ -253,19 +281,19 @@ static inline long page_pool_unref_page(struct page *page, long nr)
* initially, and only overwrite it when the page is partitioned into
* more than one piece.
*/
- if (atomic_long_read(&page->pp_ref_count) == nr) {
+ if (atomic_long_read(pp_ref_count) == nr) {
/* As we have ensured nr is always one for constant case using
* the BUILD_BUG_ON(), only need to handle the non-constant case
* here for pp_ref_count draining, which is a rare case.
*/
BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1);
if (!__builtin_constant_p(nr))
- atomic_long_set(&page->pp_ref_count, 1);
+ atomic_long_set(pp_ref_count, 1);
return 0;
}
- ret = atomic_long_sub_return(nr, &page->pp_ref_count);
+ ret = atomic_long_sub_return(nr, pp_ref_count);
WARN_ON(ret < 0);
/* We are the last user here too, reset pp_ref_count back to 1 to
@@ -274,20 +302,46 @@ static inline long page_pool_unref_page(struct page *page, long nr)
* page_pool_unref_page() currently.
*/
if (unlikely(!ret))
- atomic_long_set(&page->pp_ref_count, 1);
+ atomic_long_set(pp_ref_count, 1);
return ret;
}
+static inline long page_pool_unref_page(struct page *page, long nr)
+{
+ return page_pool_unref_netmem(page_to_netmem(page), nr);
+}
+
+static inline void page_pool_ref_netmem(netmem_ref netmem)
+{
+ atomic_long_inc(netmem_get_pp_ref_count_ref(netmem));
+}
+
static inline void page_pool_ref_page(struct page *page)
{
- atomic_long_inc(&page->pp_ref_count);
+ page_pool_ref_netmem(page_to_netmem(page));
}
-static inline bool page_pool_is_last_ref(struct page *page)
+static inline bool page_pool_unref_and_test(netmem_ref netmem)
{
/* If page_pool_unref_page() returns 0, we were the last user */
- return page_pool_unref_page(page, 1) == 0;
+ return page_pool_unref_netmem(netmem, 1) == 0;
+}
+
+static inline void page_pool_put_netmem(struct page_pool *pool,
+ netmem_ref netmem,
+ unsigned int dma_sync_size,
+ bool allow_direct)
+{
+ /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
+ * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
+ */
+#ifdef CONFIG_PAGE_POOL
+ if (!page_pool_unref_and_test(netmem))
+ return;
+
+ page_pool_put_unrefed_netmem(pool, netmem, dma_sync_size, allow_direct);
+#endif
}
/**
@@ -308,15 +362,15 @@ static inline void page_pool_put_page(struct page_pool *pool,
unsigned int dma_sync_size,
bool allow_direct)
{
- /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
- * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
- */
-#ifdef CONFIG_PAGE_POOL
- if (!page_pool_is_last_ref(page))
- return;
+ page_pool_put_netmem(pool, page_to_netmem(page), dma_sync_size,
+ allow_direct);
+}
- page_pool_put_unrefed_page(pool, page, dma_sync_size, allow_direct);
-#endif
+static inline void page_pool_put_full_netmem(struct page_pool *pool,
+ netmem_ref netmem,
+ bool allow_direct)
+{
+ page_pool_put_netmem(pool, netmem, -1, allow_direct);
}
/**
@@ -331,7 +385,7 @@ static inline void page_pool_put_page(struct page_pool *pool,
static inline void page_pool_put_full_page(struct page_pool *pool,
struct page *page, bool allow_direct)
{
- page_pool_put_page(pool, page, -1, allow_direct);
+ page_pool_put_netmem(pool, page_to_netmem(page), -1, allow_direct);
}
/**
@@ -348,6 +402,12 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
page_pool_put_full_page(pool, page, true);
}
+static inline void page_pool_recycle_direct_netmem(struct page_pool *pool,
+ netmem_ref netmem)
+{
+ page_pool_put_full_netmem(pool, netmem, true);
+}
+
#define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \
(sizeof(dma_addr_t) > sizeof(unsigned long))
@@ -365,6 +425,16 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va,
page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct);
}
+static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem)
+{
+ dma_addr_t ret = netmem_get_dma_addr(netmem);
+
+ if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
+ ret <<= PAGE_SHIFT;
+
+ return ret;
+}
+
/**
* page_pool_get_dma_addr() - Retrieve the stored DMA address.
* @page: page allocated from a page pool
@@ -374,27 +444,16 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va,
*/
static inline dma_addr_t page_pool_get_dma_addr(const struct page *page)
{
- dma_addr_t ret = page->dma_addr;
-
- if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
- ret <<= PAGE_SHIFT;
-
- return ret;
+ return page_pool_get_dma_addr_netmem(page_to_netmem(page));
}
-static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+static inline void __page_pool_dma_sync_for_cpu(const struct page_pool *pool,
+ const dma_addr_t dma_addr,
+ u32 offset, u32 dma_sync_size)
{
- if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
- page->dma_addr = addr >> PAGE_SHIFT;
-
- /* We assume page alignment to shave off bottom bits,
- * if this "compression" doesn't work we need to drop.
- */
- return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT;
- }
-
- page->dma_addr = addr;
- return false;
+ dma_sync_single_range_for_cpu(pool->p.dev, dma_addr,
+ offset + pool->p.offset, dma_sync_size,
+ page_pool_get_dma_dir(pool));
}
/**
@@ -413,10 +472,26 @@ static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool,
const struct page *page,
u32 offset, u32 dma_sync_size)
{
- dma_sync_single_range_for_cpu(pool->p.dev,
- page_pool_get_dma_addr(page),
- offset + pool->p.offset, dma_sync_size,
- page_pool_get_dma_dir(pool));
+ __page_pool_dma_sync_for_cpu(pool, page_pool_get_dma_addr(page), offset,
+ dma_sync_size);
+}
+
+static inline void
+page_pool_dma_sync_netmem_for_cpu(const struct page_pool *pool,
+ const netmem_ref netmem, u32 offset,
+ u32 dma_sync_size)
+{
+ if (!pool->dma_sync_for_cpu)
+ return;
+
+ __page_pool_dma_sync_for_cpu(pool,
+ page_pool_get_dma_addr_netmem(netmem),
+ offset, dma_sync_size);
+}
+
+static inline void page_pool_get(struct page_pool *pool)
+{
+ refcount_inc(&pool->user_cnt);
}
static inline bool page_pool_put(struct page_pool *pool)
@@ -430,4 +505,21 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
page_pool_update_nid(pool, new_nid);
}
+/**
+ * page_pool_is_unreadable() - will allocated buffers be unreadable for the CPU
+ * @pool: queried page pool
+ *
+ * Check if page pool will return buffers which are unreadable to the CPU /
+ * kernel. This will only be the case if user space bound a memory provider (mp)
+ * which returns unreadable memory to the queue served by the page pool.
+ * If %PP_FLAG_ALLOW_UNREADABLE_NETMEM was set but there is no mp bound
+ * this helper will return false. See also netif_rxq_has_unreadable_mp().
+ *
+ * Return: true if memory allocated by the page pool may be unreadable
+ */
+static inline bool page_pool_is_unreadable(struct page_pool *pool)
+{
+ return !!pool->mp_ops;
+}
+
#endif /* _NET_PAGE_POOL_HELPERS_H */
diff --git a/include/net/page_pool/memory_provider.h b/include/net/page_pool/memory_provider.h
new file mode 100644
index 000000000000..ada4f968960a
--- /dev/null
+++ b/include/net/page_pool/memory_provider.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_PAGE_POOL_MEMORY_PROVIDER_H
+#define _NET_PAGE_POOL_MEMORY_PROVIDER_H
+
+#include <net/netmem.h>
+#include <net/page_pool/types.h>
+
+struct netdev_rx_queue;
+struct netlink_ext_ack;
+struct sk_buff;
+
+struct memory_provider_ops {
+ netmem_ref (*alloc_netmems)(struct page_pool *pool, gfp_t gfp);
+ bool (*release_netmem)(struct page_pool *pool, netmem_ref netmem);
+ int (*init)(struct page_pool *pool);
+ void (*destroy)(struct page_pool *pool);
+ int (*nl_fill)(void *mp_priv, struct sk_buff *rsp,
+ struct netdev_rx_queue *rxq);
+ void (*uninstall)(void *mp_priv, struct netdev_rx_queue *rxq);
+};
+
+bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr);
+void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov);
+void net_mp_niov_clear_page_pool(struct net_iov *niov);
+
+int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
+ struct pp_memory_provider_params *p);
+int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
+ const struct pp_memory_provider_params *p,
+ struct netlink_ext_ack *extack);
+void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
+ struct pp_memory_provider_params *old_p);
+void __net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
+ const struct pp_memory_provider_params *old_p);
+
+/**
+ * net_mp_netmem_place_in_cache() - give a netmem to a page pool
+ * @pool: the page pool to place the netmem into
+ * @netmem: netmem to give
+ *
+ * Push an accounted netmem into the page pool's allocation cache. The caller
+ * must ensure that there is space in the cache. It should only be called off
+ * the mp_ops->alloc_netmems() path.
+ */
+static inline void net_mp_netmem_place_in_cache(struct page_pool *pool,
+ netmem_ref netmem)
+{
+ pool->alloc.cache[pool->alloc.count++] = netmem;
+}
+
+#endif
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index b088d131aeb0..1509a536cb85 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -6,6 +6,8 @@
#include <linux/dma-direction.h>
#include <linux/ptr_ring.h>
#include <linux/types.h>
+#include <linux/xarray.h>
+#include <net/netmem.h>
#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
* map/unmap
@@ -19,8 +21,21 @@
* device driver responsibility
*/
#define PP_FLAG_SYSTEM_POOL BIT(2) /* Global system page_pool */
+
+/* Allow unreadable (net_iov backed) netmem in this page_pool. Drivers setting
+ * this must be able to support unreadable netmem, where netmem_address() would
+ * return NULL. This flag should not be set for header page_pools.
+ *
+ * If the driver sets PP_FLAG_ALLOW_UNREADABLE_NETMEM, it should also set
+ * page_pool_params.slow.queue_idx.
+ */
+#define PP_FLAG_ALLOW_UNREADABLE_NETMEM BIT(3)
+
#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \
- PP_FLAG_SYSTEM_POOL)
+ PP_FLAG_SYSTEM_POOL | PP_FLAG_ALLOW_UNREADABLE_NETMEM)
+
+/* Index limit to stay within PP_DMA_INDEX_BITS for DMA indices */
+#define PP_DMA_INDEX_LIMIT XA_LIMIT(1, BIT(PP_DMA_INDEX_BITS) - 1)
/*
* Fast allocation side cache array/stack
@@ -40,22 +55,25 @@
#define PP_ALLOC_CACHE_REFILL 64
struct pp_alloc_cache {
u32 count;
- struct page *cache[PP_ALLOC_CACHE_SIZE];
+ netmem_ref cache[PP_ALLOC_CACHE_SIZE];
};
/**
* struct page_pool_params - page pool parameters
+ * @fast: params accessed frequently on hotpath
* @order: 2^order pages on allocation
* @pool_size: size of the ptr_ring
* @nid: NUMA node id to allocate from pages from
* @dev: device, for DMA pre-mapping purposes
- * @netdev: netdev this pool will serve (leave as NULL if none or multiple)
* @napi: NAPI which is the sole consumer of pages, otherwise NULL
* @dma_dir: DMA mapping direction
* @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
* @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
- * @netdev: corresponding &net_device for Netlink introspection
- * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL
+ * @slow: params with slowpath access only (initialization and Netlink)
+ * @netdev: netdev this pool will serve (leave as NULL if none or multiple)
+ * @queue_idx: queue idx this page_pool is being created for.
+ * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL,
+ * PP_FLAG_ALLOW_UNREADABLE_NETMEM.
*/
struct page_pool_params {
struct_group_tagged(page_pool_params_fast, fast,
@@ -70,9 +88,10 @@ struct page_pool_params {
);
struct_group_tagged(page_pool_params_slow, slow,
struct net_device *netdev;
+ unsigned int queue_idx;
unsigned int flags;
/* private: used by test code only */
- void (*init_callback)(struct page *page, void *arg);
+ void (*init_callback)(netmem_ref netmem, void *arg);
void *init_arg;
);
};
@@ -127,6 +146,23 @@ struct page_pool_stats {
};
#endif
+/* The whole frag API block must stay within one cacheline. On 32-bit systems,
+ * sizeof(long) == sizeof(int), so that the block size is ``3 * sizeof(long)``.
+ * On 64-bit systems, the actual size is ``2 * sizeof(long) + sizeof(int)``.
+ * The closest pow-2 to both of them is ``4 * sizeof(long)``, so just use that
+ * one for simplicity.
+ * Having it aligned to a cacheline boundary may be excessive and doesn't bring
+ * any good.
+ */
+#define PAGE_POOL_FRAG_GROUP_ALIGN (4 * sizeof(long))
+
+struct memory_provider_ops;
+
+struct pp_memory_provider_params {
+ void *mp_priv;
+ const struct memory_provider_ops *mp_ops;
+};
+
struct page_pool {
struct page_pool_params_fast p;
@@ -135,24 +171,17 @@ struct page_pool {
bool has_init_callback:1; /* slow::init_callback is set */
bool dma_map:1; /* Perform DMA mapping */
- bool dma_sync:1; /* Perform DMA sync */
+ bool dma_sync:1; /* Perform DMA sync for device */
+ bool dma_sync_for_cpu:1; /* Perform DMA sync for cpu */
#ifdef CONFIG_PAGE_POOL_STATS
bool system:1; /* This is a global percpu pool */
#endif
- /* The following block must stay within one cacheline. On 32-bit
- * systems, sizeof(long) == sizeof(int), so that the block size is
- * ``3 * sizeof(long)``. On 64-bit systems, the actual size is
- * ``2 * sizeof(long) + sizeof(int)``. The closest pow-2 to both of
- * them is ``4 * sizeof(long)``, so just use that one for simplicity.
- * Having it aligned to a cacheline boundary may be excessive and
- * doesn't bring any good.
- */
- __cacheline_group_begin(frag) __aligned(4 * sizeof(long));
+ __cacheline_group_begin_aligned(frag, PAGE_POOL_FRAG_GROUP_ALIGN);
long frag_users;
- struct page *frag_page;
+ netmem_ref frag_page;
unsigned int frag_offset;
- __cacheline_group_end(frag);
+ __cacheline_group_end_aligned(frag, PAGE_POOL_FRAG_GROUP_ALIGN);
struct delayed_work release_dw;
void (*disconnect)(void *pool);
@@ -193,6 +222,11 @@ struct page_pool {
*/
struct ptr_ring ring;
+ void *mp_priv;
+ const struct memory_provider_ops *mp_ops;
+
+ struct xarray dma_mapped;
+
#ifdef CONFIG_PAGE_POOL_STATS
/* recycle stats are per-cpu to avoid locking */
struct page_pool_recycle_stats __percpu *recycle_stats;
@@ -213,14 +247,17 @@ struct page_pool {
struct {
struct hlist_node list;
u64 detach_time;
- u32 napi_id;
u32 id;
} user;
};
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
+netmem_ref page_pool_alloc_netmems(struct page_pool *pool, gfp_t gfp);
struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
unsigned int size, gfp_t gfp);
+netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool,
+ unsigned int *offset, unsigned int size,
+ gfp_t gfp);
struct page_pool *page_pool_create(const struct page_pool_params *params);
struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
int cpuid);
@@ -228,11 +265,13 @@ struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
struct xdp_mem_info;
#ifdef CONFIG_PAGE_POOL
+void page_pool_enable_direct_recycling(struct page_pool *pool,
+ struct napi_struct *napi);
+void page_pool_disable_direct_recycling(struct page_pool *pool);
void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
const struct xdp_mem_info *mem);
-void page_pool_put_page_bulk(struct page_pool *pool, void **data,
- int count);
+void page_pool_put_netmem_bulk(netmem_ref *data, u32 count);
#else
static inline void page_pool_destroy(struct page_pool *pool)
{
@@ -244,12 +283,14 @@ static inline void page_pool_use_xdp_mem(struct page_pool *pool,
{
}
-static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
- int count)
+static inline void page_pool_put_netmem_bulk(netmem_ref *data, u32 count)
{
}
#endif
+void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
+ unsigned int dma_sync_size,
+ bool allow_direct);
void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
unsigned int dma_sync_size,
bool allow_direct);
diff --git a/include/net/pfcp.h b/include/net/pfcp.h
index af14f970b80e..639553797d3e 100644
--- a/include/net/pfcp.h
+++ b/include/net/pfcp.h
@@ -45,7 +45,7 @@ struct pfcphdr_session {
reserved:4;
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 reserved:4,
- message_priprity:4;
+ message_priority:4;
#else
#error "Please fix <asm/byteorder>"
#endif
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
index e9dc8dca5817..37a3e83531c6 100644
--- a/include/net/phonet/pn_dev.h
+++ b/include/net/phonet/pn_dev.h
@@ -11,13 +11,13 @@
#define PN_DEV_H
#include <linux/list.h>
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
struct net;
struct phonet_device_list {
struct list_head list;
- struct mutex lock;
+ spinlock_t lock;
};
struct phonet_device_list *phonet_device_list(struct net *net);
@@ -38,11 +38,11 @@ int phonet_address_add(struct net_device *dev, u8 addr);
int phonet_address_del(struct net_device *dev, u8 addr);
u8 phonet_address_get(struct net_device *dev, u8 addr);
int phonet_address_lookup(struct net *net, u8 addr);
-void phonet_address_notify(int event, struct net_device *dev, u8 addr);
+void phonet_address_notify(struct net *net, int event, u32 ifindex, u8 addr);
int phonet_route_add(struct net_device *dev, u8 daddr);
int phonet_route_del(struct net_device *dev, u8 daddr);
-void rtm_phonet_notify(int event, struct net_device *dev, u8 dst);
+void rtm_phonet_notify(struct net *net, int event, u32 ifindex, u8 dst);
struct net_device *phonet_route_get_rcu(struct net *net, u8 daddr);
struct net_device *phonet_route_output(struct net *net, u8 daddr);
diff --git a/include/net/ping.h b/include/net/ping.h
index bc7779262e60..05bfd594a64c 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -54,12 +54,11 @@ struct pingfakehdr {
};
int ping_get_port(struct sock *sk, unsigned short ident);
-int ping_hash(struct sock *sk);
void ping_unhash(struct sock *sk);
int ping_init_sock(struct sock *sk);
void ping_close(struct sock *sk, long timeout);
-int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+int ping_bind(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len);
void ping_err(struct sk_buff *skb, int offset, u32 info);
int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd,
struct sk_buff *);
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 41297bd38dff..99ac747b7906 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -75,11 +75,11 @@ static inline bool tcf_block_non_null_shared(struct tcf_block *block)
}
#ifdef CONFIG_NET_CLS_ACT
-DECLARE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key);
+DECLARE_STATIC_KEY_FALSE(tcf_sw_enabled_key);
static inline bool tcf_block_bypass_sw(struct tcf_block *block)
{
- return block && block->bypass_wanted;
+ return block && !atomic_read(&block->useswcnt);
}
#endif
@@ -319,7 +319,7 @@ tcf_exts_hw_stats_update(const struct tcf_exts *exts,
* tcf_exts_has_actions - check if at least one action is present
* @exts: tc filter extensions handle
*
- * Returns true if at least one action is present.
+ * Returns: true if at least one action is present.
*/
static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
{
@@ -491,7 +491,7 @@ int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
struct tcf_pkt_info *);
/**
- * tcf_em_tree_match - evaulate an ematch tree
+ * tcf_em_tree_match - evaluate an ematch tree
*
* @skb: socket buffer of the packet in question
* @tree: ematch tree to be used for evaluation
@@ -501,7 +501,7 @@ int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
* through all ematches respecting their logic relations returning
* as soon as the result is obvious.
*
- * Returns 1 if the ematch tree as-one matches, no ematches are configured
+ * Returns: 1 if the ematch tree as-one matches, no ematches are configured
* or ematch is not enabled in the kernel, otherwise 0 is returned.
*/
static inline int tcf_em_tree_match(struct sk_buff *skb,
@@ -536,6 +536,8 @@ static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
case TCF_LAYER_NETWORK:
return skb_network_header(skb);
case TCF_LAYER_TRANSPORT:
+ if (!skb_transport_header_was_set(skb))
+ break;
return skb_transport_header(skb);
}
@@ -755,10 +757,20 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
cls_common->chain_index = tp->chain->index;
cls_common->protocol = tp->protocol;
cls_common->prio = tp->prio >> 16;
+ cls_common->skip_sw = tc_skip_sw(flags);
if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
cls_common->extack = extack;
}
+static inline void tcf_proto_update_usesw(struct tcf_proto *tp, u32 flags)
+{
+ if (tp->usesw)
+ return;
+ if (tc_skip_sw(flags) && tc_in_hw(flags))
+ return;
+ tp->usesw = true;
+}
+
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
{
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index d7b7b6cd4aa1..e703c507d0da 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -25,11 +25,6 @@ struct qdisc_walker {
const struct Qdisc * : (const void *)&q->privdata, \
struct Qdisc * : (void *)&q->privdata)
-static inline struct Qdisc *qdisc_from_priv(void *priv)
-{
- return container_of(priv, struct Qdisc, privdata);
-}
-
/*
Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth
@@ -48,7 +43,6 @@ static inline struct Qdisc *qdisc_from_priv(void *priv)
*/
typedef u64 psched_time_t;
-typedef long psched_tdiff_t;
/* Avoid doing 64 bit divide */
#define PSCHED_SHIFT 6
@@ -114,19 +108,19 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
struct netlink_ext_ack *extack);
void qdisc_put_rtab(struct qdisc_rate_table *tab);
void qdisc_put_stab(struct qdisc_size_table *tab);
-void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev, struct netdev_queue *txq,
spinlock_t *root_lock, bool validate);
void __qdisc_run(struct Qdisc *q);
-static inline void qdisc_run(struct Qdisc *q)
+static inline struct sk_buff *qdisc_run(struct Qdisc *q)
{
if (qdisc_run_begin(q)) {
__qdisc_run(q);
- qdisc_run_end(q);
+ return qdisc_run_end(q);
}
+ return NULL;
}
extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
@@ -290,4 +284,28 @@ static inline bool tc_qdisc_stats_dump(struct Qdisc *sch,
return true;
}
+static inline void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
+{
+ if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
+ pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
+ txt, qdisc->ops->id, qdisc->handle >> 16);
+ qdisc->flags |= TCQ_F_WARN_NONWC;
+ }
+}
+
+static inline unsigned int qdisc_peek_len(struct Qdisc *sch)
+{
+ struct sk_buff *skb;
+ unsigned int len;
+
+ skb = sch->ops->peek(sch);
+ if (unlikely(skb == NULL)) {
+ qdisc_warn_nonwc("qdisc_peek_len", sch);
+ return 0;
+ }
+ len = qdisc_pkt_len(skb);
+
+ return len;
+}
+
#endif
diff --git a/include/net/proto_memory.h b/include/net/proto_memory.h
index a6ab2f4f5e28..ad6d703ce6fe 100644
--- a/include/net/proto_memory.h
+++ b/include/net/proto_memory.h
@@ -31,10 +31,13 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
if (!sk->sk_prot->memory_pressure)
return false;
- if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
- mem_cgroup_under_socket_pressure(sk->sk_memcg))
+ if (mem_cgroup_sk_enabled(sk) &&
+ mem_cgroup_sk_under_memory_pressure(sk))
return true;
+ if (sk->sk_bypass_prot_mem)
+ return false;
+
return !!READ_ONCE(*sk->sk_prot->memory_pressure);
}
diff --git a/include/net/psample.h b/include/net/psample.h
index 0509d2d6be67..5071b5fc2b59 100644
--- a/include/net/psample.h
+++ b/include/net/psample.h
@@ -24,7 +24,10 @@ struct psample_metadata {
u8 out_tc_valid:1,
out_tc_occ_valid:1,
latency_valid:1,
- unused:5;
+ rate_as_probability:1,
+ unused:4;
+ const u8 *user_cookie;
+ u32 user_cookie_len;
};
struct psample_group *psample_group_get(struct net *net, u32 group_num);
@@ -35,13 +38,15 @@ struct sk_buff;
#if IS_ENABLED(CONFIG_PSAMPLE)
-void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
- u32 sample_rate, const struct psample_metadata *md);
+void psample_sample_packet(struct psample_group *group,
+ const struct sk_buff *skb, u32 sample_rate,
+ const struct psample_metadata *md);
#else
static inline void psample_sample_packet(struct psample_group *group,
- struct sk_buff *skb, u32 sample_rate,
+ const struct sk_buff *skb,
+ u32 sample_rate,
const struct psample_metadata *md)
{
}
diff --git a/include/net/psp.h b/include/net/psp.h
new file mode 100644
index 000000000000..33bb4d1dc46e
--- /dev/null
+++ b/include/net/psp.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __NET_PSP_ALL_H
+#define __NET_PSP_ALL_H
+
+#include <uapi/linux/psp.h>
+#include <net/psp/functions.h>
+#include <net/psp/types.h>
+
+/* Do not add any code here. Put it in the sub-headers instead. */
+
+#endif /* __NET_PSP_ALL_H */
diff --git a/include/net/psp/functions.h b/include/net/psp/functions.h
new file mode 100644
index 000000000000..c5c23a54774e
--- /dev/null
+++ b/include/net/psp/functions.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __NET_PSP_HELPERS_H
+#define __NET_PSP_HELPERS_H
+
+#include <linux/skbuff.h>
+#include <linux/rcupdate.h>
+#include <linux/udp.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/psp/types.h>
+
+struct inet_timewait_sock;
+
+/* Driver-facing API */
+struct psp_dev *
+psp_dev_create(struct net_device *netdev, struct psp_dev_ops *psd_ops,
+ struct psp_dev_caps *psd_caps, void *priv_ptr);
+void psp_dev_unregister(struct psp_dev *psd);
+bool psp_dev_encapsulate(struct net *net, struct sk_buff *skb, __be32 spi,
+ u8 ver, __be16 sport);
+int psp_dev_rcv(struct sk_buff *skb, u16 dev_id, u8 generation, bool strip_icv);
+
+/* Kernel-facing API */
+void psp_assoc_put(struct psp_assoc *pas);
+
+static inline void *psp_assoc_drv_data(struct psp_assoc *pas)
+{
+ return pas->drv_data;
+}
+
+#if IS_ENABLED(CONFIG_INET_PSP)
+unsigned int psp_key_size(u32 version);
+void psp_sk_assoc_free(struct sock *sk);
+void psp_twsk_init(struct inet_timewait_sock *tw, const struct sock *sk);
+void psp_twsk_assoc_free(struct inet_timewait_sock *tw);
+void psp_reply_set_decrypted(const struct sock *sk, struct sk_buff *skb);
+
+static inline struct psp_assoc *psp_sk_assoc(const struct sock *sk)
+{
+ return rcu_dereference_check(sk->psp_assoc, lockdep_sock_is_held(sk));
+}
+
+static inline void
+psp_enqueue_set_decrypted(struct sock *sk, struct sk_buff *skb)
+{
+ struct psp_assoc *pas;
+
+ pas = psp_sk_assoc(sk);
+ if (pas && pas->tx.spi)
+ skb->decrypted = 1;
+}
+
+static inline unsigned long
+__psp_skb_coalesce_diff(const struct sk_buff *one, const struct sk_buff *two,
+ unsigned long diffs)
+{
+ struct psp_skb_ext *a, *b;
+
+ a = skb_ext_find(one, SKB_EXT_PSP);
+ b = skb_ext_find(two, SKB_EXT_PSP);
+
+ diffs |= (!!a) ^ (!!b);
+ if (!diffs && unlikely(a))
+ diffs |= memcmp(a, b, sizeof(*a));
+ return diffs;
+}
+
+static inline bool
+psp_is_allowed_nondata(struct sk_buff *skb, struct psp_assoc *pas)
+{
+ bool fin = !!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN);
+ u32 end_seq = TCP_SKB_CB(skb)->end_seq;
+ u32 seq = TCP_SKB_CB(skb)->seq;
+ bool pure_fin;
+
+ pure_fin = fin && end_seq - seq == 1;
+
+ return seq == end_seq || (pure_fin && seq == pas->upgrade_seq);
+}
+
+static inline bool
+psp_pse_matches_pas(struct psp_skb_ext *pse, struct psp_assoc *pas)
+{
+ return pse && pas->rx.spi == pse->spi &&
+ pas->generation == pse->generation &&
+ pas->version == pse->version &&
+ pas->dev_id == pse->dev_id;
+}
+
+static inline enum skb_drop_reason
+__psp_sk_rx_policy_check(struct sk_buff *skb, struct psp_assoc *pas)
+{
+ struct psp_skb_ext *pse = skb_ext_find(skb, SKB_EXT_PSP);
+
+ if (!pas)
+ return pse ? SKB_DROP_REASON_PSP_INPUT : 0;
+
+ if (likely(psp_pse_matches_pas(pse, pas))) {
+ if (unlikely(!pas->peer_tx))
+ pas->peer_tx = 1;
+
+ return 0;
+ }
+
+ if (!pse) {
+ if (!pas->tx.spi ||
+ (!pas->peer_tx && psp_is_allowed_nondata(skb, pas)))
+ return 0;
+ }
+
+ return SKB_DROP_REASON_PSP_INPUT;
+}
+
+static inline enum skb_drop_reason
+psp_sk_rx_policy_check(struct sock *sk, struct sk_buff *skb)
+{
+ return __psp_sk_rx_policy_check(skb, psp_sk_assoc(sk));
+}
+
+static inline enum skb_drop_reason
+psp_twsk_rx_policy_check(struct inet_timewait_sock *tw, struct sk_buff *skb)
+{
+ return __psp_sk_rx_policy_check(skb, rcu_dereference(tw->psp_assoc));
+}
+
+static inline struct psp_assoc *psp_sk_get_assoc_rcu(const struct sock *sk)
+{
+ struct psp_assoc *pas;
+ int state;
+
+ state = READ_ONCE(sk->sk_state);
+ if (!sk_is_inet(sk) || state == TCP_NEW_SYN_RECV)
+ return NULL;
+
+ pas = state == TCP_TIME_WAIT ?
+ rcu_dereference(inet_twsk(sk)->psp_assoc) :
+ rcu_dereference(sk->psp_assoc);
+ return pas;
+}
+
+static inline struct psp_assoc *psp_skb_get_assoc_rcu(struct sk_buff *skb)
+{
+ if (!skb->decrypted || !skb->sk)
+ return NULL;
+
+ return psp_sk_get_assoc_rcu(skb->sk);
+}
+
+static inline unsigned int psp_sk_overhead(const struct sock *sk)
+{
+ int psp_encap = sizeof(struct udphdr) + PSP_HDR_SIZE + PSP_TRL_SIZE;
+ bool has_psp = rcu_access_pointer(sk->psp_assoc);
+
+ return has_psp ? psp_encap : 0;
+}
+#else
+static inline void psp_sk_assoc_free(struct sock *sk) { }
+static inline void
+psp_twsk_init(struct inet_timewait_sock *tw, const struct sock *sk) { }
+static inline void psp_twsk_assoc_free(struct inet_timewait_sock *tw) { }
+static inline void
+psp_reply_set_decrypted(const struct sock *sk, struct sk_buff *skb) { }
+
+static inline struct psp_assoc *psp_sk_assoc(const struct sock *sk)
+{
+ return NULL;
+}
+
+static inline void
+psp_enqueue_set_decrypted(struct sock *sk, struct sk_buff *skb) { }
+
+static inline unsigned long
+__psp_skb_coalesce_diff(const struct sk_buff *one, const struct sk_buff *two,
+ unsigned long diffs)
+{
+ return diffs;
+}
+
+static inline enum skb_drop_reason
+psp_sk_rx_policy_check(struct sock *sk, struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline enum skb_drop_reason
+psp_twsk_rx_policy_check(struct inet_timewait_sock *tw, struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline struct psp_assoc *psp_skb_get_assoc_rcu(struct sk_buff *skb)
+{
+ return NULL;
+}
+
+static inline unsigned int psp_sk_overhead(const struct sock *sk)
+{
+ return 0;
+}
+#endif
+
+static inline unsigned long
+psp_skb_coalesce_diff(const struct sk_buff *one, const struct sk_buff *two)
+{
+ return __psp_skb_coalesce_diff(one, two, 0);
+}
+
+#endif /* __NET_PSP_HELPERS_H */
diff --git a/include/net/psp/types.h b/include/net/psp/types.h
new file mode 100644
index 000000000000..25a9096d4e7d
--- /dev/null
+++ b/include/net/psp/types.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __NET_PSP_H
+#define __NET_PSP_H
+
+#include <linux/mutex.h>
+#include <linux/refcount.h>
+
+struct netlink_ext_ack;
+
+#define PSP_DEFAULT_UDP_PORT 1000
+
+struct psphdr {
+ u8 nexthdr;
+ u8 hdrlen;
+ u8 crypt_offset;
+ u8 verfl;
+ __be32 spi;
+ __be64 iv;
+ __be64 vc[]; /* optional */
+};
+
+#define PSP_ENCAP_HLEN (sizeof(struct udphdr) + sizeof(struct psphdr))
+
+#define PSP_SPI_KEY_ID GENMASK(30, 0)
+#define PSP_SPI_KEY_PHASE BIT(31)
+
+#define PSPHDR_CRYPT_OFFSET GENMASK(5, 0)
+
+#define PSPHDR_VERFL_SAMPLE BIT(7)
+#define PSPHDR_VERFL_DROP BIT(6)
+#define PSPHDR_VERFL_VERSION GENMASK(5, 2)
+#define PSPHDR_VERFL_VIRT BIT(1)
+#define PSPHDR_VERFL_ONE BIT(0)
+
+#define PSP_HDRLEN_NOOPT ((sizeof(struct psphdr) - 8) / 8)
+
+/**
+ * struct psp_dev_config - PSP device configuration
+ * @versions: PSP versions enabled on the device
+ */
+struct psp_dev_config {
+ u32 versions;
+};
+
+/**
+ * struct psp_dev - PSP device struct
+ * @main_netdev: original netdevice of this PSP device
+ * @ops: driver callbacks
+ * @caps: device capabilities
+ * @drv_priv: driver priv pointer
+ * @lock: instance lock, protects all fields
+ * @refcnt: reference count for the instance
+ * @id: instance id
+ * @generation: current generation of the device key
+ * @config: current device configuration
+ * @active_assocs: list of registered associations
+ * @prev_assocs: associations which use old (but still usable)
+ * device key
+ * @stale_assocs: associations which use a rotated out key
+ *
+ * @stats: statistics maintained by the core
+ * @stats.rotations: See stats attr key-rotations
+ * @stats.stales: See stats attr stale-events
+ *
+ * @rcu: RCU head for freeing the structure
+ */
+struct psp_dev {
+ struct net_device *main_netdev;
+
+ struct psp_dev_ops *ops;
+ struct psp_dev_caps *caps;
+ void *drv_priv;
+
+ struct mutex lock;
+ refcount_t refcnt;
+
+ u32 id;
+
+ u8 generation;
+
+ struct psp_dev_config config;
+
+ struct list_head active_assocs;
+ struct list_head prev_assocs;
+ struct list_head stale_assocs;
+
+ struct {
+ unsigned long rotations;
+ unsigned long stales;
+ } stats;
+
+ struct rcu_head rcu;
+};
+
+#define PSP_GEN_VALID_MASK 0x7f
+
+/**
+ * struct psp_dev_caps - PSP device capabilities
+ */
+struct psp_dev_caps {
+ /**
+ * @versions: mask of supported PSP versions
+ * Set this field to 0 to indicate PSP is not supported at all.
+ */
+ u32 versions;
+
+ /**
+ * @assoc_drv_spc: size of driver-specific state in Tx assoc
+ * Determines the size of struct psp_assoc::drv_data
+ */
+ u32 assoc_drv_spc;
+};
+
+#define PSP_MAX_KEY 32
+
+#define PSP_HDR_SIZE 16 /* We don't support optional fields, yet */
+#define PSP_TRL_SIZE 16 /* AES-GCM/GMAC trailer size */
+
+struct psp_skb_ext {
+ __be32 spi;
+ u16 dev_id;
+ u8 generation;
+ u8 version;
+};
+
+struct psp_key_parsed {
+ __be32 spi;
+ u8 key[PSP_MAX_KEY];
+};
+
+struct psp_assoc {
+ struct psp_dev *psd;
+
+ u16 dev_id;
+ u8 generation;
+ u8 version;
+ u8 peer_tx;
+
+ u32 upgrade_seq;
+
+ struct psp_key_parsed tx;
+ struct psp_key_parsed rx;
+
+ refcount_t refcnt;
+ struct rcu_head rcu;
+ struct work_struct work;
+ struct list_head assocs_list;
+
+ u8 drv_data[] __aligned(8);
+};
+
+struct psp_dev_stats {
+ union {
+ struct {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_auth_fail;
+ u64 rx_error;
+ u64 rx_bad;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_error;
+ };
+ DECLARE_FLEX_ARRAY(u64, required);
+ };
+};
+
+/**
+ * struct psp_dev_ops - netdev driver facing PSP callbacks
+ */
+struct psp_dev_ops {
+ /**
+ * @set_config: set configuration of a PSP device
+ * Driver can inspect @psd->config for the previous configuration.
+ * Core will update @psd->config with @config on success.
+ */
+ int (*set_config)(struct psp_dev *psd, struct psp_dev_config *conf,
+ struct netlink_ext_ack *extack);
+
+ /**
+ * @key_rotate: rotate the device key
+ */
+ int (*key_rotate)(struct psp_dev *psd, struct netlink_ext_ack *extack);
+
+ /**
+ * @rx_spi_alloc: allocate an Rx SPI+key pair
+ * Allocate an Rx SPI and resulting derived key.
+ * This key should remain valid until key rotation.
+ */
+ int (*rx_spi_alloc)(struct psp_dev *psd, u32 version,
+ struct psp_key_parsed *assoc,
+ struct netlink_ext_ack *extack);
+
+ /**
+ * @tx_key_add: add a Tx key to the device
+ * Install an association in the device. Core will allocate space
+ * for the driver to use at drv_data.
+ */
+ int (*tx_key_add)(struct psp_dev *psd, struct psp_assoc *pas,
+ struct netlink_ext_ack *extack);
+ /**
+ * @tx_key_del: remove a Tx key from the device
+ * Remove an association from the device.
+ */
+ void (*tx_key_del)(struct psp_dev *psd, struct psp_assoc *pas);
+
+ /**
+ * @get_stats: get statistics from the device
+ * Stats required by the spec must be maintained and filled in.
+ * Stats must be filled in member-by-member, never memset the struct.
+ */
+ void (*get_stats)(struct psp_dev *psd, struct psp_dev_stats *stats);
+};
+
+#endif /* __NET_PSP_H */
diff --git a/include/net/raw.h b/include/net/raw.h
index 32a61481a253..66c0ffeada2e 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -81,6 +81,7 @@ struct raw_sock {
struct inet_sock inet;
struct icmp_filter filter;
u32 ipmr_table;
+ struct numa_drop_counters drop_counters;
};
#define raw_sk(ptr) container_of_const(ptr, struct raw_sock, inet.sk)
diff --git a/include/net/red.h b/include/net/red.h
index 802287d52c9e..159a09359fc0 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -40,7 +40,7 @@
max_P should be small (not 1), usually 0.01..0.02 is good value.
max_P is chosen as a number, so that max_P/(th_max-th_min)
- is a negative power of two in order arithmetics to contain
+ is a negative power of two in order arithmetic to contain
only shifts.
@@ -159,7 +159,7 @@ static inline u32 red_maxp(u8 Plog)
static inline void red_set_vars(struct red_vars *v)
{
/* Reset average queue length, the value is strictly bound
- * to the parameters below, reseting hurts a bit but leaving
+ * to the parameters below, resetting hurts a bit but leaving
* it might result in an unreasonable qavg for a while. --TGR
*/
v->qavg = 0;
@@ -340,7 +340,7 @@ static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p
{
/*
* NOTE: v->qavg is fixed point number with point at Wlog.
- * The formula below is equvalent to floating point
+ * The formula below is equivalent to floating point
* version:
*
* qavg = qavg*(1-W) + backlog*W;
@@ -375,7 +375,7 @@ static inline int red_mark_probability(const struct red_parms *p,
OK. qR is random number in the interval
(0..1/max_P)*(qth_max-qth_min)
i.e. 0..(2^Plog). If we used floating point
- arithmetics, it would be: (2^Plog)*rnd_num,
+ arithmetic, it would be: (2^Plog)*rnd_num,
where rnd_num is less 1.
Taking into account, that qavg have fixed
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index ebf9e028d1ef..6633627f6e76 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -71,8 +71,6 @@ enum environment_cap {
* CRDA and can be used by other regulatory requests. When a
* the last request is not yet processed we must yield until it
* is processed before processing any new requests.
- * @country_ie_checksum: checksum of the last processed and accepted
- * country IE
* @country_ie_env: lets us know if the AP is telling us we are outdoor,
* indoor, or if it doesn't matter
* @list: used to insert into the reg_requests_list linked list
@@ -123,7 +121,7 @@ struct regulatory_request {
* @REGULATORY_DISABLE_BEACON_HINTS: enable this if your driver needs to
* ensure that passive scan flags and beaconing flags may not be lifted by
* cfg80211 due to regulatory beacon hints. For more information on beacon
- * hints read the documenation for regulatory_hint_found_beacon()
+ * hints read the documentation for regulatory_hint_found_beacon()
* @REGULATORY_COUNTRY_IE_FOLLOW_POWER: for devices that have a preference
* that even though they may have programmed their own custom power
* setting prior to wiphy registration, they want to ensure their channel
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index d88c0dfc2d46..9b9e04f6bb89 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -30,19 +30,14 @@ struct request_sock_ops {
unsigned int obj_size;
struct kmem_cache *slab;
char *slab_name;
- int (*rtx_syn_ack)(const struct sock *sk,
- struct request_sock *req);
void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
void (*send_reset)(const struct sock *sk,
struct sk_buff *skb,
enum sk_rst_reason reason);
void (*destructor)(struct request_sock *req);
- void (*syn_ack_timeout)(const struct request_sock *req);
};
-int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
-
struct saved_syn {
u32 mac_hdrlen;
u32 network_hdrlen;
@@ -128,39 +123,6 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb,
return sk;
}
-static inline struct request_sock *
-reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener,
- bool attach_listener)
-{
- struct request_sock *req;
-
- req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
- if (!req)
- return NULL;
- req->rsk_listener = NULL;
- if (attach_listener) {
- if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
- kmem_cache_free(ops->slab, req);
- return NULL;
- }
- req->rsk_listener = sk_listener;
- }
- req->rsk_ops = ops;
- req_to_sk(req)->sk_prot = sk_listener->sk_prot;
- sk_node_init(&req_to_sk(req)->sk_node);
- sk_tx_queue_clear(req_to_sk(req));
- req->saved_syn = NULL;
- req->syncookie = 0;
- req->timeout = 0;
- req->num_timeout = 0;
- req->num_retrans = 0;
- req->sk = NULL;
- refcount_set(&req->rsk_refcnt, 0);
-
- return req;
-}
-#define reqsk_alloc(...) alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__))
-
static inline void __reqsk_free(struct request_sock *req)
{
req->rsk_ops->destructor(req);
@@ -172,14 +134,14 @@ static inline void __reqsk_free(struct request_sock *req)
static inline void reqsk_free(struct request_sock *req)
{
- WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
+ DEBUG_NET_WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
__reqsk_free(req);
}
static inline void reqsk_put(struct request_sock *req)
{
if (refcount_dec_and_test(&req->rsk_refcnt))
- reqsk_free(req);
+ __reqsk_free(req);
}
/*
@@ -222,8 +184,8 @@ struct fastopen_queue {
struct request_sock_queue {
spinlock_t rskq_lock;
u8 rskq_defer_accept;
+ u8 synflood_warned;
- u32 synflood_warned;
atomic_t qlen;
atomic_t young;
@@ -285,4 +247,16 @@ static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
return atomic_read(&queue->young);
}
+/* RFC 7323 2.3 Using the Window Scale Option
+ * The window field (SEG.WND) of every outgoing segment, with the
+ * exception of <SYN> segments, MUST be right-shifted by
+ * Rcv.Wind.Shift bits.
+ *
+ * This means the SEG.WND carried in SYNACK can not exceed 65535.
+ * We use this property to harden TCP stack while in NEW_SYN_RECV state.
+ */
+static inline u32 tcp_synack_window(const struct request_sock *req)
+{
+ return min(req->rsk_rcv_wnd, 65535U);
+}
#endif /* _REQUEST_SOCK_H */
diff --git a/include/net/rose.h b/include/net/rose.h
index 23267b4efcfa..2b5491bbf39a 100644
--- a/include/net/rose.h
+++ b/include/net/rose.h
@@ -8,6 +8,7 @@
#ifndef _ROSE_H
#define _ROSE_H
+#include <linux/refcount.h>
#include <linux/rose.h>
#include <net/ax25.h>
#include <net/sock.h>
@@ -96,7 +97,7 @@ struct rose_neigh {
ax25_cb *ax25;
struct net_device *dev;
unsigned short count;
- unsigned short use;
+ refcount_t use;
unsigned int number;
char restarted;
char dce_mode;
@@ -151,6 +152,21 @@ struct rose_sock {
#define rose_sk(sk) ((struct rose_sock *)(sk))
+static inline void rose_neigh_hold(struct rose_neigh *rose_neigh)
+{
+ refcount_inc(&rose_neigh->use);
+}
+
+static inline void rose_neigh_put(struct rose_neigh *rose_neigh)
+{
+ if (refcount_dec_and_test(&rose_neigh->use)) {
+ if (rose_neigh->ax25)
+ ax25_cb_put(rose_neigh->ax25);
+ kfree(rose_neigh->digipeat);
+ kfree(rose_neigh);
+ }
+}
+
/* af_rose.c */
extern ax25_address rose_callsign;
extern int sysctl_rose_restart_request_timeout;
diff --git a/include/net/route.h b/include/net/route.h
index 93833cfe9c96..f90106f383c5 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -27,6 +27,8 @@
#include <net/ip_fib.h>
#include <net/arp.h>
#include <net/ndisc.h>
+#include <net/inet_dscp.h>
+#include <net/sock.h>
#include <linux/in_route.h>
#include <linux/rtnetlink.h>
#include <linux/rcupdate.h>
@@ -45,7 +47,7 @@ static inline __u8 ip_sock_rt_scope(const struct sock *sk)
static inline __u8 ip_sock_rt_tos(const struct sock *sk)
{
- return RT_TOS(READ_ONCE(inet_sk(sk)->tos));
+ return READ_ONCE(inet_sk(sk)->tos) & INET_DSCP_MASK;
}
struct ip_tunnel_info;
@@ -128,6 +130,33 @@ struct in_device;
int ip_rt_init(void);
void rt_cache_flush(struct net *net);
void rt_flush_dev(struct net_device *dev);
+
+static inline void inet_sk_init_flowi4(const struct inet_sock *inet,
+ struct flowi4 *fl4)
+{
+ const struct ip_options_rcu *ip4_opt;
+ const struct sock *sk;
+ __be32 daddr;
+
+ rcu_read_lock();
+ ip4_opt = rcu_dereference(inet->inet_opt);
+
+ /* Source routing option overrides the socket destination address */
+ if (ip4_opt && ip4_opt->opt.srr)
+ daddr = ip4_opt->opt.faddr;
+ else
+ daddr = inet->inet_daddr;
+ rcu_read_unlock();
+
+ sk = &inet->sk;
+ flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark),
+ ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
+ sk->sk_protocol, inet_sk_flowi_flags(sk), daddr,
+ inet->inet_saddr, inet->inet_dport,
+ inet->inet_sport, sk_uid(sk));
+ security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
+}
+
struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *flp,
const struct sk_buff *skb);
struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *flp,
@@ -155,12 +184,12 @@ static inline struct rtable *ip_route_output_key(struct net *net, struct flowi4
* structure is only partially set, it may bypass some fib-rules.
*/
static inline struct rtable *ip_route_output(struct net *net, __be32 daddr,
- __be32 saddr, u8 tos, int oif,
- __u8 scope)
+ __be32 saddr, dscp_t dscp,
+ int oif, __u8 scope)
{
struct flowi4 fl4 = {
.flowi4_oif = oif,
- .flowi4_tos = tos,
+ .flowi4_dscp = dscp,
.flowi4_scope = scope,
.daddr = daddr,
.saddr = saddr,
@@ -184,43 +213,34 @@ static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi
return ip_route_output_flow(net, fl4, sk);
}
-static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4 *fl4,
- __be32 daddr, __be32 saddr,
- __be32 gre_key, __u8 tos, int oif)
-{
- memset(fl4, 0, sizeof(*fl4));
- fl4->flowi4_oif = oif;
- fl4->daddr = daddr;
- fl4->saddr = saddr;
- fl4->flowi4_tos = tos;
- fl4->flowi4_proto = IPPROTO_GRE;
- fl4->fl4_gre_key = gre_key;
- return ip_route_output_key(net, fl4);
-}
-int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev,
- struct in_device *in_dev, u32 *itag);
-int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin);
-int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin,
- const struct sk_buff *hint);
-
-static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin)
+enum skb_drop_reason
+ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ dscp_t dscp, struct net_device *dev,
+ struct in_device *in_dev, u32 *itag);
+enum skb_drop_reason
+ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ dscp_t dscp, struct net_device *dev);
+enum skb_drop_reason
+ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ dscp_t dscp, struct net_device *dev,
+ const struct sk_buff *hint);
+
+static inline enum skb_drop_reason
+ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, dscp_t dscp,
+ struct net_device *devin)
{
- int err;
+ enum skb_drop_reason reason;
rcu_read_lock();
- err = ip_route_input_noref(skb, dst, src, tos, devin);
- if (!err) {
+ reason = ip_route_input_noref(skb, dst, src, dscp, devin);
+ if (!reason) {
skb_dst_force(skb);
if (!skb_dst(skb))
- err = -EINVAL;
+ reason = SKB_DROP_REASON_NOT_SPECIFIED;
}
rcu_read_unlock();
- return err;
+ return reason;
}
void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif,
@@ -265,8 +285,6 @@ static inline void ip_rt_put(struct rtable *rt)
dst_release(&rt->dst);
}
-#define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3)
-
extern const __u8 ip_tos2prio[16];
static inline char rt_tos2priority(u8 tos)
@@ -308,9 +326,12 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst,
if (inet_test_bit(TRANSPARENT, sk))
flow_flags |= FLOWI_FLAG_ANYSRC;
+ if (IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) && !sport)
+ flow_flags |= FLOWI_FLAG_ANY_SPORT;
+
flowi4_init_output(fl4, oif, READ_ONCE(sk->sk_mark), ip_sock_rt_tos(sk),
ip_sock_rt_scope(sk), protocol, flow_flags, dst,
- src, dport, sport, sk->sk_uid);
+ src, dport, sport, sk_uid(sk));
}
static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst,
@@ -364,10 +385,15 @@ static inline int inet_iif(const struct sk_buff *skb)
static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
{
int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
- struct net *net = dev_net(dst->dev);
- if (hoplimit == 0)
+ if (hoplimit == 0) {
+ const struct net *net;
+
+ rcu_read_lock();
+ net = dst_dev_net_rcu(dst);
hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
+ rcu_read_unlock();
+ }
return hoplimit;
}
diff --git a/include/net/rps.h b/include/net/rps.h
index a93401d23d66..f1794cd2e7fb 100644
--- a/include/net/rps.h
+++ b/include/net/rps.h
@@ -25,13 +25,16 @@ struct rps_map {
/*
* The rps_dev_flow structure contains the mapping of a flow to a CPU, the
- * tail pointer for that CPU's input queue at the time of last enqueue, and
- * a hardware filter index.
+ * tail pointer for that CPU's input queue at the time of last enqueue, a
+ * hardware filter index, and the hash of the flow if aRFS is enabled.
*/
struct rps_dev_flow {
u16 cpu;
u16 filter;
unsigned int last_qtail;
+#ifdef CONFIG_RFS_ACCEL
+ u32 hash;
+#endif
};
#define RPS_NO_FILTER 0xffff
@@ -39,7 +42,7 @@ struct rps_dev_flow {
* The rps_dev_flow_table structure contains a table of flow mappings.
*/
struct rps_dev_flow_table {
- unsigned int mask;
+ u8 log;
struct rcu_head rcu;
struct rps_dev_flow flows[];
};
@@ -57,9 +60,10 @@ struct rps_dev_flow_table {
* meaning we use 32-6=26 bits for the hash.
*/
struct rps_sock_flow_table {
- u32 mask;
+ struct rcu_head rcu;
+ u32 mask;
- u32 ents[] ____cacheline_aligned_in_smp;
+ u32 ents[] ____cacheline_aligned_in_smp;
};
#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
@@ -81,11 +85,8 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
WRITE_ONCE(table->ents[index], val);
}
-#endif /* CONFIG_RPS */
-
-static inline void sock_rps_record_flow_hash(__u32 hash)
+static inline void _sock_rps_record_flow_hash(__u32 hash)
{
-#ifdef CONFIG_RPS
struct rps_sock_flow_table *sock_flow_table;
if (!hash)
@@ -95,30 +96,84 @@ static inline void sock_rps_record_flow_hash(__u32 hash)
if (sock_flow_table)
rps_record_sock_flow(sock_flow_table, hash);
rcu_read_unlock();
+}
+
+static inline void _sock_rps_record_flow(const struct sock *sk)
+{
+ /* Reading sk->sk_rxhash might incur an expensive cache line
+ * miss.
+ *
+ * TCP_ESTABLISHED does cover almost all states where RFS
+ * might be useful, and is cheaper [1] than testing :
+ * IPv4: inet_sk(sk)->inet_daddr
+ * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
+ * OR an additional socket flag
+ * [1] : sk_state and sk_prot are in the same cache line.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED) {
+ /* This READ_ONCE() is paired with the WRITE_ONCE()
+ * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
+ */
+ _sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
+ }
+}
+
+static inline void _sock_rps_delete_flow(const struct sock *sk)
+{
+ struct rps_sock_flow_table *table;
+ u32 hash, index;
+
+ hash = READ_ONCE(sk->sk_rxhash);
+ if (!hash)
+ return;
+
+ rcu_read_lock();
+ table = rcu_dereference(net_hotdata.rps_sock_flow_table);
+ if (table) {
+ index = hash & table->mask;
+ if (READ_ONCE(table->ents[index]) != RPS_NO_CPU)
+ WRITE_ONCE(table->ents[index], RPS_NO_CPU);
+ }
+ rcu_read_unlock();
+}
+#endif /* CONFIG_RPS */
+
+static inline bool rfs_is_needed(void)
+{
+#ifdef CONFIG_RPS
+ return static_branch_unlikely(&rfs_needed);
+#else
+ return false;
+#endif
+}
+
+static inline void sock_rps_record_flow_hash(__u32 hash)
+{
+#ifdef CONFIG_RPS
+ if (!rfs_is_needed())
+ return;
+
+ _sock_rps_record_flow_hash(hash);
#endif
}
static inline void sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
- if (static_branch_unlikely(&rfs_needed)) {
- /* Reading sk->sk_rxhash might incur an expensive cache line
- * miss.
- *
- * TCP_ESTABLISHED does cover almost all states where RFS
- * might be useful, and is cheaper [1] than testing :
- * IPv4: inet_sk(sk)->inet_daddr
- * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
- * OR an additional socket flag
- * [1] : sk_state and sk_prot are in the same cache line.
- */
- if (sk->sk_state == TCP_ESTABLISHED) {
- /* This READ_ONCE() is paired with the WRITE_ONCE()
- * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
- */
- sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
- }
- }
+ if (!rfs_is_needed())
+ return;
+
+ _sock_rps_record_flow(sk);
+#endif
+}
+
+static inline void sock_rps_delete_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ if (!rfs_is_needed())
+ return;
+
+ _sock_rps_delete_flow(sk);
#endif
}
diff --git a/include/net/rstreason.h b/include/net/rstreason.h
index 2575c85d7f7a..979ac87b5d99 100644
--- a/include/net/rstreason.h
+++ b/include/net/rstreason.h
@@ -17,6 +17,12 @@
FN(TCP_ABORT_ON_DATA) \
FN(TCP_TIMEWAIT_SOCKET) \
FN(INVALID_SYN) \
+ FN(TCP_ABORT_ON_CLOSE) \
+ FN(TCP_ABORT_ON_LINGER) \
+ FN(TCP_ABORT_ON_MEMORY) \
+ FN(TCP_STATE) \
+ FN(TCP_KEEPALIVE_TIMEOUT) \
+ FN(TCP_DISCONNECT_WITH_DATA) \
FN(MPTCP_RST_EUNSPEC) \
FN(MPTCP_RST_EMPTCP) \
FN(MPTCP_RST_ERESOURCE) \
@@ -30,7 +36,7 @@
/**
* enum sk_rst_reason - the reasons of socket reset
*
- * The reasons of sk reset, which are used in DCCP/TCP/MPTCP protocols.
+ * The reasons of sk reset, which are used in TCP/MPTCP protocols.
*
* There are three parts in order:
* 1) skb drop reasons: relying on drop reasons for such as passive reset
@@ -84,6 +90,39 @@ enum sk_rst_reason {
* an error, send a reset"
*/
SK_RST_REASON_INVALID_SYN,
+ /**
+ * @SK_RST_REASON_TCP_ABORT_ON_CLOSE: abort on close
+ * corresponding to LINUX_MIB_TCPABORTONCLOSE
+ */
+ SK_RST_REASON_TCP_ABORT_ON_CLOSE,
+ /**
+ * @SK_RST_REASON_TCP_ABORT_ON_LINGER: abort on linger
+ * corresponding to LINUX_MIB_TCPABORTONLINGER
+ */
+ SK_RST_REASON_TCP_ABORT_ON_LINGER,
+ /**
+ * @SK_RST_REASON_TCP_ABORT_ON_MEMORY: abort on memory
+ * corresponding to LINUX_MIB_TCPABORTONMEMORY
+ */
+ SK_RST_REASON_TCP_ABORT_ON_MEMORY,
+ /**
+ * @SK_RST_REASON_TCP_STATE: abort on tcp state
+ * Please see RFC 9293 for all possible reset conditions
+ */
+ SK_RST_REASON_TCP_STATE,
+ /**
+ * @SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT: time to timeout
+ * When we have already run out of all the chances, which means
+ * keepalive timeout, we have to reset the connection
+ */
+ SK_RST_REASON_TCP_KEEPALIVE_TIMEOUT,
+ /**
+ * @SK_RST_REASON_TCP_DISCONNECT_WITH_DATA: disconnect when write
+ * queue is not empty
+ * It means user has written data into the write queue when doing
+ * disconnecting, so we have to send an RST.
+ */
+ SK_RST_REASON_TCP_DISCONNECT_WITH_DATA,
/* Copy from include/uapi/linux/mptcp.h.
* These reset fields will not be changed since they adhere to
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 3bfb80bad173..ec65a8cebb99 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -3,6 +3,7 @@
#define __NET_RTNETLINK_H
#include <linux/rtnetlink.h>
+#include <linux/srcu.h>
#include <net/netlink.h>
typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *,
@@ -11,8 +12,11 @@ typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
enum rtnl_link_flags {
RTNL_FLAG_DOIT_UNLOCKED = BIT(0),
+#define RTNL_FLAG_DOIT_PERNET RTNL_FLAG_DOIT_UNLOCKED
+#define RTNL_FLAG_DOIT_PERNET_WIP RTNL_FLAG_DOIT_UNLOCKED
RTNL_FLAG_BULK_DEL_SUPPORTED = BIT(1),
RTNL_FLAG_DUMP_UNLOCKED = BIT(2),
+ RTNL_FLAG_DUMP_SPLIT_NLM_DONE = BIT(3), /* legacy behavior */
};
enum rtnl_kinds {
@@ -28,13 +32,35 @@ static inline enum rtnl_kinds rtnl_msgtype_kind(int msgtype)
return msgtype & RTNL_KIND_MASK;
}
-void rtnl_register(int protocol, int msgtype,
- rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
-int rtnl_register_module(struct module *owner, int protocol, int msgtype,
- rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
-int rtnl_unregister(int protocol, int msgtype);
+/**
+ * struct rtnl_msg_handler - rtnetlink message type and handlers
+ *
+ * @owner: NULL for built-in, THIS_MODULE for module
+ * @protocol: Protocol family or PF_UNSPEC
+ * @msgtype: rtnetlink message type
+ * @doit: Function pointer called for each request message
+ * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
+ * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions
+ */
+struct rtnl_msg_handler {
+ struct module *owner;
+ int protocol;
+ int msgtype;
+ rtnl_doit_func doit;
+ rtnl_dumpit_func dumpit;
+ int flags;
+};
+
void rtnl_unregister_all(int protocol);
+int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n);
+void __rtnl_unregister_many(const struct rtnl_msg_handler *handlers, int n);
+
+#define rtnl_register_many(handlers) \
+ __rtnl_register_many(handlers, ARRAY_SIZE(handlers))
+#define rtnl_unregister_many(handlers) \
+ __rtnl_unregister_many(handlers, ARRAY_SIZE(handlers))
+
static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
{
if (nlmsg_len(nlh) >= sizeof(struct rtgenmsg))
@@ -44,11 +70,47 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
}
/**
+ * struct rtnl_newlink_params - parameters of rtnl_link_ops::newlink()
+ *
+ * @src_net: Source netns of rtnetlink socket
+ * @link_net: Link netns by IFLA_LINK_NETNSID, NULL if not specified
+ * @peer_net: Peer netns
+ * @tb: IFLA_* attributes
+ * @data: IFLA_INFO_DATA attributes
+ */
+struct rtnl_newlink_params {
+ struct net *src_net;
+ struct net *link_net;
+ struct net *peer_net;
+ struct nlattr **tb;
+ struct nlattr **data;
+};
+
+/* Get effective link netns from newlink params. Generally, this is link_net
+ * and falls back to src_net. But for compatibility, a driver may * choose to
+ * use dev_net(dev) instead.
+ */
+static inline struct net *rtnl_newlink_link_net(struct rtnl_newlink_params *p)
+{
+ return p->link_net ? : p->src_net;
+}
+
+/* Get peer netns from newlink params. Fallback to link netns if peer netns is
+ * not specified explicitly.
+ */
+static inline struct net *rtnl_newlink_peer_net(struct rtnl_newlink_params *p)
+{
+ return p->peer_net ? : rtnl_newlink_link_net(p);
+}
+
+/**
* struct rtnl_link_ops - rtnetlink link operations
*
- * @list: Used internally
+ * @list: Used internally, protected by link_ops_mutex and SRCU
+ * @srcu: Used internally
* @kind: Identifier
* @netns_refund: Physical device, move to init_net on netns exit
+ * @peer_type: Peer device specific netlink attribute number (e.g. VETH_INFO_PEER)
* @maxtype: Highest device specific netlink attribute number
* @policy: Netlink policy for device specific attribute validation
* @validate: Optional validation function for netlink/changelink parameters
@@ -77,6 +139,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
*/
struct rtnl_link_ops {
struct list_head list;
+ struct srcu_struct srcu;
const char *kind;
@@ -89,16 +152,15 @@ struct rtnl_link_ops {
void (*setup)(struct net_device *dev);
bool netns_refund;
+ const u16 peer_type;
unsigned int maxtype;
const struct nla_policy *policy;
int (*validate)(struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack);
- int (*newlink)(struct net *src_net,
- struct net_device *dev,
- struct nlattr *tb[],
- struct nlattr *data[],
+ int (*newlink)(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack);
int (*changelink)(struct net_device *dev,
struct nlattr *tb[],
@@ -137,16 +199,14 @@ struct rtnl_link_ops {
int *prividx, int attr);
};
-int __rtnl_link_register(struct rtnl_link_ops *ops);
-void __rtnl_link_unregister(struct rtnl_link_ops *ops);
-
int rtnl_link_register(struct rtnl_link_ops *ops);
void rtnl_link_unregister(struct rtnl_link_ops *ops);
/**
* struct rtnl_af_ops - rtnetlink address family operations
*
- * @list: Used internally
+ * @list: Used internally, protected by RTNL and SRCU
+ * @srcu: Used internally
* @family: Address family
* @fill_link_af: Function to fill IFLA_AF_SPEC with address family
* specific netlink attributes.
@@ -159,6 +219,8 @@ void rtnl_link_unregister(struct rtnl_link_ops *ops);
*/
struct rtnl_af_ops {
struct list_head list;
+ struct srcu_struct srcu;
+
int family;
int (*fill_link_af)(struct sk_buff *skb,
@@ -178,7 +240,7 @@ struct rtnl_af_ops {
size_t (*get_stats_af_size)(const struct net_device *dev);
};
-void rtnl_af_register(struct rtnl_af_ops *ops);
+int rtnl_af_register(struct rtnl_af_ops *ops);
void rtnl_af_unregister(struct rtnl_af_ops *ops);
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 79edd5b5e3c9..c3a7268b567e 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -41,13 +41,6 @@ enum qdisc_state_t {
__QDISC_STATE_DRAINING,
};
-enum qdisc_state2_t {
- /* Only for !TCQ_F_NOLOCK qdisc. Never access it directly.
- * Use qdisc_run_begin/end() or qdisc_is_running() instead.
- */
- __QDISC_STATE2_RUNNING,
-};
-
#define QDISC_STATE_MISSED BIT(__QDISC_STATE_MISSED)
#define QDISC_STATE_DRAINING BIT(__QDISC_STATE_DRAINING)
@@ -95,6 +88,8 @@ struct Qdisc {
#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
#define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
+#define TCQ_F_DEQUEUE_DROPS 0x400 /* ->dequeue() can drop packets in q->to_free */
+
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
@@ -110,20 +105,30 @@ struct Qdisc {
int pad;
refcount_t refcnt;
- /*
- * For performance sake on SMP, we put highly modified fields at the end
- */
- struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
- struct qdisc_skb_head q;
- struct gnet_stats_basic_sync bstats;
- struct gnet_stats_queue qstats;
- int owner;
- unsigned long state;
- unsigned long state2; /* must be written under qdisc spinlock */
- struct Qdisc *next_sched;
- struct sk_buff_head skb_bad_txq;
-
- spinlock_t busylock ____cacheline_aligned_in_smp;
+ /* Cache line potentially dirtied in dequeue() or __netif_reschedule(). */
+ __cacheline_group_begin(Qdisc_read_mostly) ____cacheline_aligned;
+ struct sk_buff_head gso_skb;
+ struct Qdisc *next_sched;
+ struct sk_buff_head skb_bad_txq;
+ __cacheline_group_end(Qdisc_read_mostly);
+
+ /* Fields dirtied in dequeue() fast path. */
+ __cacheline_group_begin(Qdisc_write) ____cacheline_aligned;
+ struct qdisc_skb_head q;
+ unsigned long state;
+ struct gnet_stats_basic_sync bstats;
+ bool running; /* must be written under qdisc spinlock */
+
+ /* Note : we only change qstats.backlog in fast path. */
+ struct gnet_stats_queue qstats;
+
+ struct sk_buff *to_free;
+ __cacheline_group_end(Qdisc_write);
+
+
+ atomic_long_t defer_count ____cacheline_aligned_in_smp;
+ struct llist_head defer_list;
+
spinlock_t seqlock;
struct rcu_head rcu;
@@ -168,7 +173,7 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK)
return spin_is_locked(&qdisc->seqlock);
- return test_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
+ return READ_ONCE(qdisc->running);
}
static inline bool nolock_qdisc_is_empty(const struct Qdisc *qdisc)
@@ -211,11 +216,16 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
*/
return spin_trylock(&qdisc->seqlock);
}
- return !__test_and_set_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
+ if (READ_ONCE(qdisc->running))
+ return false;
+ WRITE_ONCE(qdisc->running, true);
+ return true;
}
-static inline void qdisc_run_end(struct Qdisc *qdisc)
+static inline struct sk_buff *qdisc_run_end(struct Qdisc *qdisc)
{
+ struct sk_buff *to_free = NULL;
+
if (qdisc->flags & TCQ_F_NOLOCK) {
spin_unlock(&qdisc->seqlock);
@@ -228,9 +238,16 @@ static inline void qdisc_run_end(struct Qdisc *qdisc)
if (unlikely(test_bit(__QDISC_STATE_MISSED,
&qdisc->state)))
__netif_schedule(qdisc);
- } else {
- __clear_bit(__QDISC_STATE2_RUNNING, &qdisc->state2);
+ return NULL;
+ }
+
+ if (qdisc->flags & TCQ_F_DEQUEUE_DROPS) {
+ to_free = qdisc->to_free;
+ if (to_free)
+ qdisc->to_free = NULL;
}
+ WRITE_ONCE(qdisc->running, false);
+ return to_free;
}
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
@@ -425,19 +442,23 @@ struct tcf_proto {
spinlock_t lock;
bool deleting;
bool counted;
+ bool usesw;
refcount_t refcnt;
struct rcu_head rcu;
struct hlist_node destroy_ht_node;
};
struct qdisc_skb_cb {
- struct {
- unsigned int pkt_len;
- u16 slave_dev_queue_mapping;
- u16 tc_classid;
- };
+ unsigned int pkt_len;
+ u16 pkt_segs;
+ u16 tc_classid;
#define QDISC_CB_PRIV_LEN 20
unsigned char data[QDISC_CB_PRIV_LEN];
+
+ u16 slave_dev_queue_mapping;
+ u8 post_ct:1;
+ u8 post_ct_snat:1;
+ u8 post_ct_dnat:1;
};
typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
@@ -474,9 +495,7 @@ struct tcf_block {
struct flow_block flow_block;
struct list_head owner_list;
bool keep_dst;
- bool bypass_wanted;
- atomic_t filtercnt; /* Number of filters */
- atomic_t skipswcnt; /* Number of skip_sw filters */
+ atomic_t useswcnt;
atomic_t offloadcnt; /* Number of oddloaded filters */
unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
@@ -804,6 +823,14 @@ static inline bool qdisc_tx_changing(const struct net_device *dev)
return false;
}
+/* "noqueue" qdisc identified by not having any enqueue, see noqueue_init() */
+static inline bool qdisc_txq_has_no_queue(const struct netdev_queue *txq)
+{
+ struct Qdisc *qdisc = rcu_access_pointer(txq->qdisc);
+
+ return qdisc->enqueue == NULL;
+}
+
/* Is the device using the noop qdisc on all queues? */
static inline bool qdisc_tx_is_noop(const struct net_device *dev)
{
@@ -822,6 +849,15 @@ static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
return qdisc_skb_cb(skb)->pkt_len;
}
+static inline unsigned int qdisc_pkt_segs(const struct sk_buff *skb)
+{
+ u32 pkt_segs = qdisc_skb_cb(skb)->pkt_segs;
+
+ DEBUG_NET_WARN_ON_ONCE(pkt_segs !=
+ (skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1));
+ return pkt_segs;
+}
+
/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
enum net_xmit_qdisc_t {
__NET_XMIT_STOLEN = 0x00010000,
@@ -848,12 +884,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
- qdisc_calculate_pkt_len(skb, sch);
return sch->enqueue(skb, sch, to_free);
}
static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
- __u64 bytes, __u32 packets)
+ __u64 bytes, __u64 packets)
{
u64_stats_update_begin(&bstats->syncp);
u64_stats_add(&bstats->bytes, bytes);
@@ -864,9 +899,7 @@ static inline void _bstats_update(struct gnet_stats_basic_sync *bstats,
static inline void bstats_update(struct gnet_stats_basic_sync *bstats,
const struct sk_buff *skb)
{
- _bstats_update(bstats,
- qdisc_pkt_len(skb),
- skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
+ _bstats_update(bstats, qdisc_pkt_len(skb), qdisc_pkt_segs(skb));
}
static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
@@ -967,14 +1000,6 @@ static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
*backlog = qstats.backlog;
}
-static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
-{
- __u32 qlen, backlog;
-
- qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
- qdisc_tree_reduce_backlog(sch, qlen, backlog);
-}
-
static inline void qdisc_purge_queue(struct Qdisc *sch)
{
__u32 qlen, backlog;
@@ -1033,6 +1058,26 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
return skb;
}
+static inline struct sk_buff *qdisc_dequeue_internal(struct Qdisc *sch, bool direct)
+{
+ struct sk_buff *skb;
+
+ skb = __skb_dequeue(&sch->gso_skb);
+ if (skb) {
+ sch->q.qlen--;
+ qdisc_qstats_backlog_dec(sch, skb);
+ return skb;
+ }
+ if (direct) {
+ skb = __qdisc_dequeue_head(&sch->q);
+ if (skb)
+ qdisc_qstats_backlog_dec(sch, skb);
+ return skb;
+ } else {
+ return sch->dequeue(sch);
+ }
+}
+
static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
{
struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
@@ -1049,11 +1094,8 @@ struct tc_skb_cb {
struct qdisc_skb_cb qdisc_cb;
u32 drop_reason;
- u16 zone; /* Only valid if post_ct = true */
+ u16 zone; /* Only valid if qdisc_skb_cb(skb)->post_ct = true */
u16 mru;
- u8 post_ct:1;
- u8 post_ct_snat:1;
- u8 post_ct_dnat:1;
};
static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
@@ -1076,6 +1118,28 @@ static inline void tcf_set_drop_reason(const struct sk_buff *skb,
tc_skb_cb(skb)->drop_reason = reason;
}
+static inline void tcf_kfree_skb_list(struct sk_buff *skb)
+{
+ while (unlikely(skb)) {
+ struct sk_buff *next = skb->next;
+
+ prefetch(next);
+ kfree_skb_reason(skb, tcf_get_drop_reason(skb));
+ skb = next;
+ }
+}
+
+static inline void qdisc_dequeue_drop(struct Qdisc *q, struct sk_buff *skb,
+ enum skb_drop_reason reason)
+{
+ DEBUG_NET_WARN_ON_ONCE(!(q->flags & TCQ_F_DEQUEUE_DROPS));
+ DEBUG_NET_WARN_ON_ONCE(q->flags & TCQ_F_NOLOCK);
+
+ tcf_set_drop_reason(skb, reason);
+ skb->next = q->to_free;
+ q->to_free = skb;
+}
+
/* Instead of calling kfree_skb() while root qdisc lock is held,
* queue the skb for future freeing at end of __dev_xmit_skb()
*/
@@ -1246,6 +1310,14 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_DROP;
}
+static inline int qdisc_drop_reason(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free,
+ enum skb_drop_reason reason)
+{
+ tcf_set_drop_reason(skb, reason);
+ return qdisc_drop(skb, sch, to_free);
+}
+
static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
diff --git a/include/net/scm.h b/include/net/scm.h
index 0d35c7c77a74..c52519669349 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -69,7 +69,7 @@ static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_co
static __inline__ void scm_set_cred(struct scm_cookie *scm,
struct pid *pid, kuid_t uid, kgid_t gid)
{
- scm->pid = get_pid(pid);
+ scm->pid = get_pid(pid);
scm->creds.pid = pid_vnr(pid);
scm->creds.uid = uid;
scm->creds.gid = gid;
@@ -78,7 +78,7 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm,
static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
{
put_pid(scm->pid);
- scm->pid = NULL;
+ scm->pid = NULL;
}
static __inline__ void scm_destroy(struct scm_cookie *scm)
@@ -102,123 +102,10 @@ static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
return __scm_send(sock, msg, scm);
}
-#ifdef CONFIG_SECURITY_NETWORK
-static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
-{
- char *secdata;
- u32 seclen;
- int err;
-
- if (test_bit(SOCK_PASSSEC, &sock->flags)) {
- err = security_secid_to_secctx(scm->secid, &secdata, &seclen);
-
- if (!err) {
- put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, seclen, secdata);
- security_release_secctx(secdata, seclen);
- }
- }
-}
-
-static inline bool scm_has_secdata(struct socket *sock)
-{
- return test_bit(SOCK_PASSSEC, &sock->flags);
-}
-#else
-static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
-{ }
-
-static inline bool scm_has_secdata(struct socket *sock)
-{
- return false;
-}
-#endif /* CONFIG_SECURITY_NETWORK */
-
-static __inline__ void scm_pidfd_recv(struct msghdr *msg, struct scm_cookie *scm)
-{
- struct file *pidfd_file = NULL;
- int len, pidfd;
-
- /* put_cmsg() doesn't return an error if CMSG is truncated,
- * that's why we need to opencode these checks here.
- */
- if (msg->msg_flags & MSG_CMSG_COMPAT)
- len = sizeof(struct compat_cmsghdr) + sizeof(int);
- else
- len = sizeof(struct cmsghdr) + sizeof(int);
-
- if (msg->msg_controllen < len) {
- msg->msg_flags |= MSG_CTRUNC;
- return;
- }
-
- if (!scm->pid)
- return;
-
- pidfd = pidfd_prepare(scm->pid, 0, &pidfd_file);
-
- if (put_cmsg(msg, SOL_SOCKET, SCM_PIDFD, sizeof(int), &pidfd)) {
- if (pidfd_file) {
- put_unused_fd(pidfd);
- fput(pidfd_file);
- }
-
- return;
- }
-
- if (pidfd_file)
- fd_install(pidfd, pidfd_file);
-}
-
-static inline bool __scm_recv_common(struct socket *sock, struct msghdr *msg,
- struct scm_cookie *scm, int flags)
-{
- if (!msg->msg_control) {
- if (test_bit(SOCK_PASSCRED, &sock->flags) ||
- test_bit(SOCK_PASSPIDFD, &sock->flags) ||
- scm->fp || scm_has_secdata(sock))
- msg->msg_flags |= MSG_CTRUNC;
- scm_destroy(scm);
- return false;
- }
-
- if (test_bit(SOCK_PASSCRED, &sock->flags)) {
- struct user_namespace *current_ns = current_user_ns();
- struct ucred ucreds = {
- .pid = scm->creds.pid,
- .uid = from_kuid_munged(current_ns, scm->creds.uid),
- .gid = from_kgid_munged(current_ns, scm->creds.gid),
- };
- put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(ucreds), &ucreds);
- }
-
- scm_passec(sock, msg, scm);
-
- if (scm->fp)
- scm_detach_fds(msg, scm);
-
- return true;
-}
-
-static inline void scm_recv(struct socket *sock, struct msghdr *msg,
- struct scm_cookie *scm, int flags)
-{
- if (!__scm_recv_common(sock, msg, scm, flags))
- return;
-
- scm_destroy_cred(scm);
-}
-
-static inline void scm_recv_unix(struct socket *sock, struct msghdr *msg,
- struct scm_cookie *scm, int flags)
-{
- if (!__scm_recv_common(sock, msg, scm, flags))
- return;
-
- if (test_bit(SOCK_PASSPIDFD, &sock->flags))
- scm_pidfd_recv(msg, scm);
-
- scm_destroy_cred(scm);
-}
+void scm_recv(struct socket *sock, struct msghdr *msg,
+ struct scm_cookie *scm, int flags);
+void scm_recv_unix(struct socket *sock, struct msghdr *msg,
+ struct scm_cookie *scm, int flags);
static inline int scm_recv_one_fd(struct file *f, int __user *ufd,
unsigned int flags)
diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h
index d4b3b2dcd15b..6f2cd562b1de 100644
--- a/include/net/sctp/auth.h
+++ b/include/net/sctp/auth.h
@@ -22,16 +22,11 @@ struct sctp_endpoint;
struct sctp_association;
struct sctp_authkey;
struct sctp_hmacalgo;
-struct crypto_shash;
-/*
- * Define a generic struct that will hold all the info
- * necessary for an HMAC transform
- */
+/* Defines an HMAC algorithm supported by SCTP chunk authentication */
struct sctp_hmac {
- __u16 hmac_id; /* one of the above ids */
- char *hmac_name; /* name for loading */
- __u16 hmac_len; /* length of the signature */
+ __u16 hmac_id; /* one of SCTP_AUTH_HMAC_ID_* */
+ __u16 hmac_len; /* length of the HMAC value in bytes */
};
/* This is generic structure that containst authentication bytes used
@@ -77,10 +72,9 @@ struct sctp_shared_key *sctp_auth_get_shkey(
int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep,
struct sctp_association *asoc,
gfp_t gfp);
-int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp);
-void sctp_auth_destroy_hmacs(struct crypto_shash *auth_hmacs[]);
-struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id);
-struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc);
+const struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id);
+const struct sctp_hmac *
+sctp_auth_asoc_get_hmac(const struct sctp_association *asoc);
void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc,
struct sctp_hmac_algo_param *hmacs);
int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc,
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index f514a0aa849e..654d37ec0402 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -15,8 +15,6 @@
* Dinakaran Joseph
* Jon Grimm <jgrimm@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Rewritten to use libcrc32c by:
* Vlad Yasevich <vladislav.yasevich@hp.com>
*/
@@ -25,42 +23,18 @@
#include <linux/types.h>
#include <linux/sctp.h>
-#include <linux/crc32c.h>
-#include <linux/crc32.h>
-
-static inline __wsum sctp_csum_update(const void *buff, int len, __wsum sum)
-{
- /* This uses the crypto implementation of crc32c, which is either
- * implemented w/ hardware support or resolves to __crc32c_le().
- */
- return (__force __wsum)crc32c((__force __u32)sum, buff, len);
-}
-
-static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
- int offset, int len)
-{
- return (__force __wsum)__crc32c_le_combine((__force __u32)csum,
- (__force __u32)csum2, len);
-}
-
-static const struct skb_checksum_ops sctp_csum_ops = {
- .update = sctp_csum_update,
- .combine = sctp_csum_combine,
-};
static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
unsigned int offset)
{
struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
__le32 old = sh->checksum;
- __wsum new;
+ u32 new;
sh->checksum = 0;
- new = ~__skb_checksum(skb, offset, skb->len - offset, ~(__wsum)0,
- &sctp_csum_ops);
+ new = ~skb_crc32c(skb, offset, skb->len - offset, ~0);
sh->checksum = old;
-
- return cpu_to_le32((__force __u32)new);
+ return cpu_to_le32(new);
}
#endif /* __sctp_checksum_h__ */
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 5859e0a16a58..ae3376ba0b99 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -296,9 +296,8 @@ enum { SCTP_MAX_GABS = 16 };
*/
#define SCTP_DEFAULT_MINSEGMENT 512 /* MTU size ... if no mtu disc */
-#define SCTP_SECRET_SIZE 32 /* Number of octets in a 256 bits. */
-
-#define SCTP_SIGNATURE_SIZE 20 /* size of a SLA-1 signature */
+#define SCTP_COOKIE_KEY_SIZE 32 /* size of cookie HMAC key */
+#define SCTP_COOKIE_MAC_SIZE 32 /* size of HMAC field in cookies */
#define SCTP_COOKIE_MULTIPLE 32 /* Pad out our cookie to make our hash
* functions simpler to write.
@@ -417,16 +416,12 @@ enum {
SCTP_AUTH_HMAC_ID_RESERVED_0,
SCTP_AUTH_HMAC_ID_SHA1,
SCTP_AUTH_HMAC_ID_RESERVED_2,
-#if defined (CONFIG_CRYPTO_SHA256) || defined (CONFIG_CRYPTO_SHA256_MODULE)
SCTP_AUTH_HMAC_ID_SHA256,
-#endif
__SCTP_AUTH_HMAC_MAX
};
#define SCTP_AUTH_HMAC_ID_MAX __SCTP_AUTH_HMAC_MAX - 1
#define SCTP_AUTH_NUM_HMACS __SCTP_AUTH_HMAC_MAX
-#define SCTP_SHA1_SIG_SIZE 20
-#define SCTP_SHA256_SIG_SIZE 32
/* SCTP-AUTH, Section 3.2
* The chunk types for INIT, INIT-ACK, SHUTDOWN-COMPLETE and AUTH chunks
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index a2310fa995f6..58242b37b47a 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -28,7 +28,7 @@
#define __net_sctp_h__
/* Header Strategy.
- * Start getting some control over the header file depencies:
+ * Start getting some control over the header file dependencies:
* includes
* constants
* structs
@@ -85,7 +85,7 @@ void sctp_udp_sock_stop(struct net *net);
/*
* sctp/socket.c
*/
-int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
+int sctp_inet_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
int addr_len, int flags);
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
@@ -94,8 +94,7 @@ void sctp_data_ready(struct sock *sk);
__poll_t sctp_poll(struct file *file, struct socket *sock,
poll_table *wait);
void sctp_sock_rfree(struct sk_buff *skb);
-void sctp_copy_sock(struct sock *newsk, struct sock *sk,
- struct sctp_association *asoc);
+
extern struct percpu_counter sctp_sockets_allocated;
int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int *);
@@ -364,8 +363,6 @@ sctp_assoc_to_state(const struct sctp_association *asoc)
/* Look up the association by its id. */
struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
-int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp);
-
/* A macro to walk a list of skbs. */
#define sctp_skb_for_each(pos, head, tmp) \
skb_queue_walk_safe(head, pos, tmp)
@@ -636,7 +633,7 @@ static inline void sctp_transport_pl_reset(struct sctp_transport *t)
}
} else {
if (t->pl.state != SCTP_PL_DISABLED) {
- if (del_timer(&t->probe_timer))
+ if (timer_delete(&t->probe_timer))
sctp_transport_put(t);
t->pl.state = SCTP_PL_DISABLED;
}
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 64c42bd56bb2..3bfd261a53cc 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -161,7 +161,6 @@ const struct sctp_sm_table_entry *sctp_sm_lookup_event(
enum sctp_event_type event_type,
enum sctp_state state,
union sctp_subtype event_subtype);
-int sctp_chunk_iif(const struct sctp_chunk *);
struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *,
struct sctp_chunk *,
gfp_t gfp);
diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h
index 572d73fdcd5e..77806ef1cb70 100644
--- a/include/net/sctp/stream_sched.h
+++ b/include/net/sctp/stream_sched.h
@@ -35,10 +35,10 @@ struct sctp_sched_ops {
struct sctp_chunk *(*dequeue)(struct sctp_outq *q);
/* Called only if the chunk fit the packet */
void (*dequeue_done)(struct sctp_outq *q, struct sctp_chunk *chunk);
- /* Sched all chunks already enqueued */
- void (*sched_all)(struct sctp_stream *steam);
- /* Unched all chunks already enqueued */
- void (*unsched_all)(struct sctp_stream *steam);
+ /* Schedule all chunks already enqueued */
+ void (*sched_all)(struct sctp_stream *stream);
+ /* Unschedule all chunks already enqueued */
+ void (*unsched_all)(struct sctp_stream *stream);
};
int sctp_sched_set_sched(struct sctp_association *asoc,
@@ -52,10 +52,10 @@ void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch);
void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch);
int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
-struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream);
+const struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream);
void sctp_sched_ops_register(enum sctp_sched_type sched,
- struct sctp_sched_ops *sched_ops);
+ const struct sctp_sched_ops *sched_ops);
void sctp_sched_ops_prio_init(void);
void sctp_sched_ops_rr_init(void);
void sctp_sched_ops_fc_init(void);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index f24a1bbcb3ef..affee44bd38e 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -32,6 +32,7 @@
#ifndef __sctp_structs_h__
#define __sctp_structs_h__
+#include <crypto/sha2.h>
#include <linux/ktime.h>
#include <linux/generic-radix-tree.h>
#include <linux/rhashtable-types.h>
@@ -51,9 +52,9 @@
* We should wean ourselves off this.
*/
union sctp_addr {
+ struct sockaddr_inet sa; /* Large enough for both address families */
struct sockaddr_in v4;
struct sockaddr_in6 v6;
- struct sockaddr sa;
};
/* Forward declarations for data structures. */
@@ -68,7 +69,6 @@ struct sctp_outq;
struct sctp_bind_addr;
struct sctp_ulpq;
struct sctp_ep_common;
-struct crypto_shash;
struct sctp_stream;
@@ -155,10 +155,6 @@ struct sctp_sock {
/* PF_ family specific functions. */
struct sctp_pf *pf;
- /* Access to HMAC transform. */
- struct crypto_shash *hmac;
- char *sctp_hmac_alg;
-
/* What is our base endpointer? */
struct sctp_endpoint *ep;
@@ -227,14 +223,11 @@ struct sctp_sock {
frag_interleave:1,
recvrcvinfo:1,
recvnxtinfo:1,
- data_ready_signalled:1;
+ data_ready_signalled:1,
+ cookie_auth_enable:1;
atomic_t pd_mode;
- /* Fields after this point will be skipped on copies, like on accept
- * and peeloff operations
- */
-
/* Receive to here while partial delivery is in effect. */
struct sk_buff_head pd_lobby;
@@ -335,7 +328,7 @@ struct sctp_cookie {
/* The format of our cookie that we send to our peer. */
struct sctp_signed_cookie {
- __u8 signature[SCTP_SECRET_SIZE];
+ __u8 mac[SCTP_COOKIE_MAC_SIZE];
__u32 __pad; /* force sctp_cookie alignment to 64 bits */
struct sctp_cookie c;
} __packed;
@@ -500,9 +493,6 @@ struct sctp_pf {
int (*bind_verify) (struct sctp_sock *, union sctp_addr *);
int (*send_verify) (struct sctp_sock *, union sctp_addr *);
int (*supported_addrs)(const struct sctp_sock *, __be16 *);
- struct sock *(*create_accept_sk) (struct sock *sk,
- struct sctp_association *asoc,
- bool kern);
int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr);
void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
@@ -521,7 +511,7 @@ struct sctp_datamsg {
refcount_t refcnt;
/* When is this message no longer interesting to the peer? */
unsigned long expires_at;
- /* Did the messenge fail to send? */
+ /* Did the message fail to send? */
int send_error;
u8 send_failed:1,
can_delay:1, /* should this message be Nagle delayed */
@@ -775,6 +765,7 @@ struct sctp_transport {
/* Reference counting. */
refcount_t refcnt;
+ __u32 dead:1,
/* RTO-Pending : A flag used to track if one of the DATA
* chunks sent to this address is currently being
* used to compute a RTT. If this flag is 0,
@@ -784,7 +775,7 @@ struct sctp_transport {
* calculation completes (i.e. the DATA chunk
* is SACK'd) clear this flag.
*/
- __u32 rto_pending:1,
+ rto_pending:1,
/*
* hb_sent : a flag that signals that we have a pending
@@ -792,7 +783,7 @@ struct sctp_transport {
*/
hb_sent:1,
- /* Is the Path MTU update pending on this tranport */
+ /* Is the Path MTU update pending on this transport */
pmtu_pending:1,
dst_pending_confirm:1, /* need to confirm neighbour */
@@ -1078,7 +1069,7 @@ struct sctp_outq {
struct list_head out_chunk_list;
/* Stream scheduler being used */
- struct sctp_sched_ops *sched;
+ const struct sctp_sched_ops *sched;
unsigned int out_qlen; /* Total length of queued data chunks. */
@@ -1223,7 +1214,7 @@ enum sctp_endpoint_type {
};
/*
- * A common base class to bridge the implmentation view of a
+ * A common base class to bridge the implementation view of a
* socket (usually listening) endpoint versus an association's
* local endpoint.
* This common structure is useful for several purposes:
@@ -1306,33 +1297,15 @@ struct sctp_endpoint {
/* This is really a list of struct sctp_association entries. */
struct list_head asocs;
- /* Secret Key: A secret key used by this endpoint to compute
- * the MAC. This SHOULD be a cryptographic quality
- * random number with a sufficient length.
- * Discussion in [RFC1750] can be helpful in
- * selection of the key.
- */
- __u8 secret_key[SCTP_SECRET_SIZE];
-
- /* digest: This is a digest of the sctp cookie. This field is
- * only used on the receive path when we try to validate
- * that the cookie has not been tampered with. We put
- * this here so we pre-allocate this once and can re-use
- * on every receive.
- */
- __u8 *digest;
-
+ /* Cookie authentication key used by this endpoint */
+ struct hmac_sha256_key cookie_auth_key;
+
/* sendbuf acct. policy. */
__u32 sndbuf_policy;
/* rcvbuf acct. policy. */
__u32 rcvbuf_policy;
- /* SCTP AUTH: array of the HMACs that will be allocated
- * we need this per association so that we don't serialize
- */
- struct crypto_shash **auth_hmacs;
-
/* SCTP-AUTH: hmacs for the endpoint encoded into parameter */
struct sctp_hmac_algo_param *auth_hmacs_list;
@@ -1353,7 +1326,7 @@ struct sctp_endpoint {
struct rcu_head rcu;
};
-/* Recover the outter endpoint structure. */
+/* Recover the outer endpoint structure. */
static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
{
struct sctp_endpoint *ep;
@@ -1906,7 +1879,7 @@ struct sctp_association {
__u32 rwnd_over;
/* Keeps treack of rwnd pressure. This happens when we have
- * a window, but not recevie buffer (i.e small packets). This one
+ * a window, but not receive buffer (i.e small packets). This one
* is releases slowly (1 PMTU at a time ).
*/
__u32 rwnd_press;
@@ -1994,7 +1967,7 @@ struct sctp_association {
/* ADDIP Section 5.2 Upon reception of an ASCONF Chunk.
*
- * This is needed to implement itmes E1 - E4 of the updated
+ * This is needed to implement items E1 - E4 of the updated
* spec. Here is the justification:
*
* Since the peer may bundle multiple ASCONF chunks toward us,
@@ -2005,7 +1978,7 @@ struct sctp_association {
/* These ASCONF chunks are waiting to be sent.
*
- * These chunaks can't be pushed to outqueue until receiving
+ * These chunks can't be pushed to outqueue until receiving
* ASCONF_ACK for the previous ASCONF indicated by
* addip_last_asconf, so as to guarantee that only one ASCONF
* is in flight at any time.
@@ -2059,13 +2032,13 @@ struct sctp_association {
struct sctp_transport *new_transport;
/* SCTP AUTH: list of the endpoint shared keys. These
- * keys are provided out of band by the user applicaton
+ * keys are provided out of band by the user application
* and can't change during the lifetime of the association
*/
struct list_head endpoint_shared_keys;
/* SCTP AUTH:
- * The current generated assocaition shared key (secret)
+ * The current generated association shared key (secret)
*/
struct sctp_auth_bytes *asoc_shared_key;
struct sctp_shared_key *shkey;
@@ -2121,7 +2094,7 @@ enum {
SCTP_ASSOC_EYECATCHER = 0xa550c123,
};
-/* Recover the outter association structure. */
+/* Recover the outer association structure. */
static inline struct sctp_association *sctp_assoc(struct sctp_ep_common *base)
{
struct sctp_association *asoc;
@@ -2151,8 +2124,6 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *,
const union sctp_addr *address,
const gfp_t gfp,
const int peer_state);
-void sctp_assoc_del_peer(struct sctp_association *asoc,
- const union sctp_addr *addr);
void sctp_assoc_rm_peer(struct sctp_association *asoc,
struct sctp_transport *peer);
void sctp_assoc_control_transport(struct sctp_association *asoc,
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index 21e7fa2a1813..cddebafb9f77 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -16,9 +16,5 @@ u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
__be16 sport, __be16 dport);
u32 secure_tcpv6_ts_off(const struct net *net,
const __be32 *saddr, const __be32 *daddr);
-u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport);
-u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
- __be16 sport, __be16 dport);
#endif /* _NET_SECURE_SEQ */
diff --git a/include/net/seg6.h b/include/net/seg6.h
index af668f17b398..82b3fbbcbb93 100644
--- a/include/net/seg6.h
+++ b/include/net/seg6.h
@@ -52,10 +52,17 @@ static inline struct seg6_pernet_data *seg6_pernet(struct net *net)
extern int seg6_init(void);
extern void seg6_exit(void);
+#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
extern int seg6_iptunnel_init(void);
extern void seg6_iptunnel_exit(void);
extern int seg6_local_init(void);
extern void seg6_local_exit(void);
+#else
+static inline int seg6_iptunnel_init(void) { return 0; }
+static inline void seg6_iptunnel_exit(void) {}
+static inline int seg6_local_init(void) { return 0; }
+static inline void seg6_local_exit(void) {}
+#endif
extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced);
extern struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags);
diff --git a/include/net/seg6_hmac.h b/include/net/seg6_hmac.h
index 2b5d2ee5613e..e9f41725933e 100644
--- a/include/net/seg6_hmac.h
+++ b/include/net/seg6_hmac.h
@@ -9,6 +9,8 @@
#ifndef _NET_SEG6_HMAC_H
#define _NET_SEG6_HMAC_H
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
#include <net/flow.h>
#include <net/ip6_fib.h>
#include <net/sock.h>
@@ -19,7 +21,6 @@
#include <linux/seg6_hmac.h>
#include <linux/rhashtable-types.h>
-#define SEG6_HMAC_MAX_DIGESTSIZE 160
#define SEG6_HMAC_RING_SIZE 256
struct seg6_hmac_info {
@@ -27,16 +28,15 @@ struct seg6_hmac_info {
struct rcu_head rcu;
u32 hmackeyid;
+ /* The raw key, kept only so it can be returned back to userspace */
char secret[SEG6_HMAC_SECRET_LEN];
u8 slen;
u8 alg_id;
-};
-
-struct seg6_hmac_algo {
- u8 alg_id;
- char name[64];
- struct crypto_shash * __percpu *tfms;
- struct shash_desc * __percpu *shashs;
+ /* The prepared key, which the calculations actually use */
+ union {
+ struct hmac_sha1_key sha1;
+ struct hmac_sha256_key sha256;
+ } key;
};
extern int seg6_hmac_compute(struct seg6_hmac_info *hinfo,
@@ -49,9 +49,12 @@ extern int seg6_hmac_info_del(struct net *net, u32 key);
extern int seg6_push_hmac(struct net *net, struct in6_addr *saddr,
struct ipv6_sr_hdr *srh);
extern bool seg6_hmac_validate_skb(struct sk_buff *skb);
-extern int seg6_hmac_init(void);
-extern void seg6_hmac_exit(void);
+#ifdef CONFIG_IPV6_SEG6_HMAC
extern int seg6_hmac_net_init(struct net *net);
extern void seg6_hmac_net_exit(struct net *net);
+#else
+static inline int seg6_hmac_net_init(struct net *net) { return 0; }
+static inline void seg6_hmac_net_exit(struct net *net) {}
+#endif
#endif
diff --git a/include/net/seg6_local.h b/include/net/seg6_local.h
index 3fab9dec2ec4..888c1ce6f527 100644
--- a/include/net/seg6_local.h
+++ b/include/net/seg6_local.h
@@ -19,6 +19,7 @@ extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
extern bool seg6_bpf_has_valid_srh(struct sk_buff *skb);
struct seg6_bpf_srh_state {
+ local_lock_t bh_lock;
struct ipv6_sr_hdr *srh;
u16 hdrlen;
bool valid;
diff --git a/include/net/selftests.h b/include/net/selftests.h
index e65e8d230d33..c36e07406ad4 100644
--- a/include/net/selftests.h
+++ b/include/net/selftests.h
@@ -3,9 +3,48 @@
#define _NET_SELFTESTS
#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+struct net_packet_attrs {
+ const unsigned char *src;
+ const unsigned char *dst;
+ u32 ip_src;
+ u32 ip_dst;
+ bool tcp;
+ u16 sport;
+ u16 dport;
+ int timeout;
+ int size;
+ int max_size;
+ u8 id;
+ u16 queue_mapping;
+ bool bad_csum;
+};
+
+struct net_test_priv {
+ struct net_packet_attrs *packet;
+ struct packet_type pt;
+ struct completion comp;
+ int double_vlan;
+ int vlan_id;
+ int ok;
+};
+
+struct netsfhdr {
+ __be32 version;
+ __be64 magic;
+ u8 id;
+} __packed;
+
+#define NET_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
+ sizeof(struct netsfhdr))
+#define NET_TEST_PKT_MAGIC 0xdeadcafecafedeadULL
+#define NET_LB_TIMEOUT msecs_to_jiffies(200)
#if IS_ENABLED(CONFIG_NET_SELFTESTS)
+struct sk_buff *net_test_get_skb(struct net_device *ndev, u8 id,
+ struct net_packet_attrs *attr);
void net_selftest(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf);
int net_selftest_get_count(void);
@@ -13,6 +52,12 @@ void net_selftest_get_strings(u8 *data);
#else
+static inline struct sk_buff *net_test_get_skb(struct net_device *ndev, u8 id,
+ struct net_packet_attrs *attr)
+{
+ return NULL;
+}
+
static inline void net_selftest(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf)
{
diff --git a/include/net/smc.h b/include/net/smc.h
index db84e4e35080..bfdc4c41f019 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -15,8 +15,10 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
-#include "linux/ism.h"
+#include <linux/dibs.h>
+struct tcp_sock;
+struct inet_request_sock;
struct sock;
#define SMC_MAX_PNETID_LEN 16 /* Max. length of PNET id */
@@ -27,62 +29,15 @@ struct smc_hashinfo {
};
/* SMCD/ISM device driver interface */
-struct smcd_dmb {
- u64 dmb_tok;
- u64 rgid;
- u32 dmb_len;
- u32 sba_idx;
- u32 vlan_valid;
- u32 vlan_id;
- void *cpu_addr;
- dma_addr_t dma_addr;
-};
-
-#define ISM_EVENT_DMB 0
-#define ISM_EVENT_GID 1
-#define ISM_EVENT_SWR 2
-
#define ISM_RESERVED_VLANID 0x1FFF
-#define ISM_ERROR 0xFFFF
-
-struct smcd_dev;
-
struct smcd_gid {
u64 gid;
u64 gid_ext;
};
-struct smcd_ops {
- int (*query_remote_gid)(struct smcd_dev *dev, struct smcd_gid *rgid,
- u32 vid_valid, u32 vid);
- int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb,
- void *client);
- int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
- int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx,
- bool sf, unsigned int offset, void *data,
- unsigned int size);
- int (*supports_v2)(void);
- void (*get_local_gid)(struct smcd_dev *dev, struct smcd_gid *gid);
- u16 (*get_chid)(struct smcd_dev *dev);
- struct device* (*get_dev)(struct smcd_dev *dev);
-
- /* optional operations */
- int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
- int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
- int (*set_vlan_required)(struct smcd_dev *dev);
- int (*reset_vlan_required)(struct smcd_dev *dev);
- int (*signal_event)(struct smcd_dev *dev, struct smcd_gid *rgid,
- u32 trigger_irq, u32 event_code, u64 info);
- int (*support_dmb_nocopy)(struct smcd_dev *dev);
- int (*attach_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
- int (*detach_dmb)(struct smcd_dev *dev, u64 token);
-};
-
struct smcd_dev {
- const struct smcd_ops *ops;
- void *priv;
- void *client;
+ struct dibs_dev *dibs;
struct list_head list;
spinlock_t lock;
struct smc_connection **conn;
@@ -97,4 +52,55 @@ struct smcd_dev {
u8 going_away : 1;
};
+#define SMC_HS_CTRL_NAME_MAX 16
+
+enum {
+ /* ops can be inherit from init_net */
+ SMC_HS_CTRL_FLAG_INHERITABLE = 0x1,
+
+ SMC_HS_CTRL_ALL_FLAGS = SMC_HS_CTRL_FLAG_INHERITABLE,
+};
+
+struct smc_hs_ctrl {
+ /* private */
+
+ struct list_head list;
+ struct module *owner;
+
+ /* public */
+
+ /* unique name */
+ char name[SMC_HS_CTRL_NAME_MAX];
+ int flags;
+
+ /* Invoked before computing SMC option for SYN packets.
+ * We can control whether to set SMC options by returning various value.
+ * Return 0 to disable SMC, or return any other value to enable it.
+ */
+ int (*syn_option)(struct tcp_sock *tp);
+
+ /* Invoked before Set up SMC options for SYN-ACK packets
+ * We can control whether to respond SMC options by returning various
+ * value. Return 0 to disable SMC, or return any other value to enable
+ * it.
+ */
+ int (*synack_option)(const struct tcp_sock *tp,
+ struct inet_request_sock *ireq);
+};
+
+#if IS_ENABLED(CONFIG_SMC_HS_CTRL_BPF)
+#define smc_call_hsbpf(init_val, tp, func, ...) ({ \
+ typeof(init_val) __ret = (init_val); \
+ struct smc_hs_ctrl *ctrl; \
+ rcu_read_lock(); \
+ ctrl = rcu_dereference(sock_net((struct sock *)(tp))->smc.hs_ctrl); \
+ if (ctrl && ctrl->func) \
+ __ret = ctrl->func(tp, ##__VA_ARGS__); \
+ rcu_read_unlock(); \
+ __ret; \
+})
+#else
+#define smc_call_hsbpf(init_val, tp, ...) ({ (void)(tp); (init_val); })
+#endif /* CONFIG_SMC_HS_CTRL_BPF */
+
#endif /* _SMC_H */
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 468a67836e2f..584e70742e9b 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -36,11 +36,6 @@ struct snmp_mib {
.entry = _entry, \
}
-#define SNMP_MIB_SENTINEL { \
- .name = NULL, \
- .entry = 0, \
-}
-
/*
* We use unsigned longs for most mibs but u64 for ipstats.
*/
@@ -159,7 +154,7 @@ struct linux_tls_mib {
#define __SNMP_ADD_STATS64(mib, field, addend) \
do { \
- __typeof__(*mib) *ptr = raw_cpu_ptr(mib); \
+ TYPEOF_UNQUAL(*mib) *ptr = raw_cpu_ptr(mib); \
u64_stats_update_begin(&ptr->syncp); \
ptr->mibs[field] += addend; \
u64_stats_update_end(&ptr->syncp); \
@@ -176,8 +171,7 @@ struct linux_tls_mib {
#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
#define __SNMP_UPD_PO_STATS64(mib, basefield, addend) \
do { \
- __typeof__(*mib) *ptr; \
- ptr = raw_cpu_ptr((mib)); \
+ TYPEOF_UNQUAL(*mib) *ptr = raw_cpu_ptr(mib); \
u64_stats_update_begin(&ptr->syncp); \
ptr->mibs[basefield##PKTS]++; \
ptr->mibs[basefield##OCTETS] += addend; \
diff --git a/include/net/sock.h b/include/net/sock.h
index 5f4d0629348f..aafe8bdb2c0f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -118,6 +118,7 @@ typedef __u64 __bitwise __addrpair;
* @skc_reuseport: %SO_REUSEPORT setting
* @skc_ipv6only: socket is IPV6 only
* @skc_net_refcnt: socket is using net ref counting
+ * @skc_bypass_prot_mem: bypass the per-protocol memory accounting for skb
* @skc_bound_dev_if: bound device index if != 0
* @skc_bind_node: bind hash linkage for various protocol lookup tables
* @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
@@ -174,6 +175,7 @@ struct sock_common {
unsigned char skc_reuseport:1;
unsigned char skc_ipv6only:1;
unsigned char skc_net_refcnt:1;
+ unsigned char skc_bypass_prot_mem:1;
int skc_bound_dev_if;
union {
struct hlist_node skc_bind_node;
@@ -249,6 +251,7 @@ struct sk_filter;
* @sk_dst_cache: destination cache
* @sk_dst_pending_confirm: need to confirm neighbour
* @sk_policy: flow policy
+ * @psp_assoc: PSP association, if socket is PSP-secured
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
* @sk_tsq_flags: TCP Small Queues flags
@@ -282,9 +285,11 @@ struct sk_filter;
* @sk_err_soft: errors that don't cause failure but are the cause of a
* persistent failure not just 'timed out'
* @sk_drops: raw/udp drops counter
+ * @sk_drop_counters: optional pointer to numa_drop_counters
* @sk_ack_backlog: current listen backlog
* @sk_max_ack_backlog: listen backlog set in listen()
* @sk_uid: user id of owner
+ * @sk_ino: inode number (zero if orphaned)
* @sk_prefer_busy_poll: prefer busypolling over softirq processing
* @sk_busy_poll_budget: napi processing budget when busypolling
* @sk_priority: %SO_PRIORITY setting
@@ -300,15 +305,19 @@ struct sk_filter;
* @sk_txrehash: enable TX hash rethink
* @sk_filter: socket filtering instructions
* @sk_timer: sock cleanup timer
+ * @tcp_retransmit_timer: tcp retransmit timer
+ * @mptcp_retransmit_timer: mptcp retransmit timer
* @sk_stamp: time stamp of last packet received
* @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
* @sk_tsflags: SO_TIMESTAMPING flags
+ * @sk_bpf_cb_flags: used in bpf_setsockopt()
* @sk_use_task_frag: allow sk_page_frag() to use current->task_frag.
* Sockets that can be used under memory reclaim should
* set this to false.
* @sk_bind_phc: SO_TIMESTAMPING bind PHC index of PTP virtual clock
* for timestamping
* @sk_tskey: counter to disambiguate concurrent tstamp requests
+ * @sk_tx_queue_mapping_jiffies: time in jiffies of last @sk_tx_queue_mapping refresh.
* @sk_zckey: counter to order MSG_ZEROCOPY notifications
* @sk_socket: Identd and reporting IO signals
* @sk_user_data: RPC layer private data. Write-protected by @sk_callback_lock.
@@ -336,7 +345,16 @@ struct sk_filter;
* @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
* @sk_txtime_report_errors: set report errors mode for SO_TXTIME
* @sk_txtime_unused: unused txtime flags
+ * @sk_scm_recv_flags: all flags used by scm_recv()
+ * @sk_scm_credentials: flagged by SO_PASSCRED to recv SCM_CREDENTIALS
+ * @sk_scm_security: flagged by SO_PASSSEC to recv SCM_SECURITY
+ * @sk_scm_pidfd: flagged by SO_PASSPIDFD to recv SCM_PIDFD
+ * @sk_scm_rights: flagged by SO_PASSRIGHTS to recv SCM_RIGHTS
+ * @sk_scm_unused: unused flags for scm_recv()
* @ns_tracker: tracker for netns reference
+ * @sk_user_frags: xarray of pages the user is holding a reference on.
+ * @sk_owner: reference to the real owner of the socket that calls
+ * sock_lock_init_class_and_name().
*/
struct sock {
/*
@@ -367,6 +385,7 @@ struct sock {
#define sk_reuseport __sk_common.skc_reuseport
#define sk_ipv6only __sk_common.skc_ipv6only
#define sk_net_refcnt __sk_common.skc_net_refcnt
+#define sk_bypass_prot_mem __sk_common.skc_bypass_prot_mem
#define sk_bound_dev_if __sk_common.skc_bound_dev_if
#define sk_bind_node __sk_common.skc_bind_node
#define sk_prot __sk_common.skc_prot
@@ -433,10 +452,15 @@ struct sock {
__cacheline_group_begin(sock_read_rxtx);
int sk_err;
struct socket *sk_socket;
+#ifdef CONFIG_MEMCG
struct mem_cgroup *sk_memcg;
+#endif
#ifdef CONFIG_XFRM
struct xfrm_policy __rcu *sk_policy[2];
#endif
+#if IS_ENABLED(CONFIG_INET_PSP)
+ struct psp_assoc __rcu *psp_assoc;
+#endif
__cacheline_group_end(sock_read_rxtx);
__cacheline_group_begin(sock_write_rxtx);
@@ -449,7 +473,7 @@ struct sock {
__cacheline_group_begin(sock_write_tx);
int sk_write_pending;
atomic_t sk_omem_alloc;
- int sk_sndbuf;
+ int sk_err_soft;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
@@ -459,21 +483,28 @@ struct sock {
struct rb_root tcp_rtx_queue;
};
struct sk_buff_head sk_write_queue;
- u32 sk_dst_pending_confirm;
- u32 sk_pacing_status; /* see enum sk_pacing */
struct page_frag sk_frag;
- struct timer_list sk_timer;
-
+ union {
+ struct timer_list sk_timer;
+ struct timer_list tcp_retransmit_timer;
+ struct timer_list mptcp_retransmit_timer;
+ };
unsigned long sk_pacing_rate; /* bytes per second */
atomic_t sk_zckey;
atomic_t sk_tskey;
+ unsigned long sk_tx_queue_mapping_jiffies;
__cacheline_group_end(sock_write_tx);
__cacheline_group_begin(sock_read_tx);
+ u32 sk_dst_pending_confirm;
+ u32 sk_pacing_status; /* see enum sk_pacing */
unsigned long sk_max_pacing_rate;
long sk_sndtimeo;
u32 sk_priority;
u32 sk_mark;
+ kuid_t sk_uid;
+ u16 sk_protocol;
+ u16 sk_type;
struct dst_entry __rcu *sk_dst_cache;
netdev_features_t sk_route_caps;
#ifdef CONFIG_SOCK_VALIDATE_XMIT
@@ -486,6 +517,7 @@ struct sock {
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
u32 sk_txhash;
+ int sk_sndbuf;
u8 sk_pacing_shift;
bool sk_use_task_frag;
__cacheline_group_end(sock_read_tx);
@@ -499,15 +531,12 @@ struct sock {
sk_no_check_tx : 1,
sk_no_check_rx : 1;
u8 sk_shutdown;
- u16 sk_type;
- u16 sk_protocol;
unsigned long sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
- int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
- kuid_t sk_uid;
+ unsigned long sk_ino;
spinlock_t sk_peer_lock;
int sk_bind_phc;
struct pid *sk_peer_pid;
@@ -519,11 +548,23 @@ struct sock {
#endif
int sk_disconnects;
- u8 sk_txrehash;
+ union {
+ u8 sk_txrehash;
+ u8 sk_scm_recv_flags;
+ struct {
+ u8 sk_scm_credentials : 1,
+ sk_scm_security : 1,
+ sk_scm_pidfd : 1,
+ sk_scm_rights : 1,
+ sk_scm_unused : 4;
+ };
+ };
u8 sk_clockid;
u8 sk_txtime_deadline_mode : 1,
sk_txtime_report_errors : 1,
sk_txtime_unused : 6;
+#define SK_BPF_CB_FLAG_TEST(SK, FLAG) ((SK)->sk_bpf_cb_flags & (FLAG))
+ u8 sk_bpf_cb_flags;
void *sk_user_data;
#ifdef CONFIG_SECURITY
@@ -540,8 +581,19 @@ struct sock {
#ifdef CONFIG_BPF_SYSCALL
struct bpf_local_storage __rcu *sk_bpf_storage;
#endif
+ struct numa_drop_counters *sk_drop_counters;
struct rcu_head sk_rcu;
netns_tracker ns_tracker;
+ struct xarray sk_user_frags;
+
+#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
+ struct module *sk_owner;
+#endif
+};
+
+struct sock_bh_locked {
+ struct sock *sock;
+ local_lock_t bh_lock;
};
enum sk_pacing {
@@ -786,11 +838,9 @@ static inline bool sk_del_node_init(struct sock *sk)
{
bool rc = __sk_del_node_init(sk);
- if (rc) {
- /* paranoid for a while -acme */
- WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
+ if (rc)
__sock_put(sk);
- }
+
return rc;
}
#define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
@@ -808,14 +858,25 @@ static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
{
bool rc = __sk_nulls_del_node_init_rcu(sk);
- if (rc) {
- /* paranoid for a while -acme */
- WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
+ if (rc)
__sock_put(sk);
- }
+
return rc;
}
+static inline bool sk_nulls_replace_node_init_rcu(struct sock *old,
+ struct sock *new)
+{
+ if (sk_hashed(old)) {
+ hlist_nulls_replace_init_rcu(&old->sk_nulls_node,
+ &new->sk_nulls_node);
+ __sock_put(old);
+ return true;
+ }
+
+ return false;
+}
+
static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
{
hlist_add_head(&sk->sk_node, list);
@@ -887,6 +948,8 @@ static inline void sk_add_bind_node(struct sock *sk,
hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
#define sk_for_each_bound(__sk, list) \
hlist_for_each_entry(__sk, list, sk_bind_node)
+#define sk_for_each_bound_safe(__sk, tmp, list) \
+ hlist_for_each_entry_safe(__sk, tmp, list, sk_bind_node)
/**
* sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
@@ -944,9 +1007,17 @@ enum sock_flags {
SOCK_XDP, /* XDP is attached */
SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */
SOCK_RCVMARK, /* Receive SO_MARK ancillary data with packet */
+ SOCK_RCVPRIORITY, /* Receive SO_PRIORITY ancillary data with packet */
+ SOCK_TIMESTAMPING_ANY, /* Copy of sk_tsflags & TSFLAGS_ANY */
};
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
+/*
+ * The highest bit of sk_tsflags is reserved for kernel-internal
+ * SOCKCM_FLAG_TS_OPT_ID. There is a check in core/sock.c to control that
+ * SOF_TIMESTAMPING* values do not reach this reserved area
+ */
+#define SOCKCM_FLAG_TS_OPT_ID BIT(31)
static inline void sock_copy_flags(struct sock *nsk, const struct sock *osk)
{
@@ -1208,10 +1279,10 @@ struct proto {
void (*close)(struct sock *sk,
long timeout);
int (*pre_connect)(struct sock *sk,
- struct sockaddr *uaddr,
+ struct sockaddr_unsized *uaddr,
int addr_len);
int (*connect)(struct sock *sk,
- struct sockaddr *uaddr,
+ struct sockaddr_unsized *uaddr,
int addr_len);
int (*disconnect)(struct sock *sk, int flags);
@@ -1240,9 +1311,9 @@ struct proto {
size_t len, int flags, int *addr_len);
void (*splice_eof)(struct socket *sock);
int (*bind)(struct sock *sk,
- struct sockaddr *addr, int addr_len);
+ struct sockaddr_unsized *addr, int addr_len);
int (*bind_add)(struct sock *sk,
- struct sockaddr *addr, int addr_len);
+ struct sockaddr_unsized *addr, int addr_len);
int (*backlog_rcv) (struct sock *sk,
struct sk_buff *skb);
@@ -1268,10 +1339,6 @@ struct proto {
unsigned int inuse_idx;
#endif
-#if IS_ENABLED(CONFIG_MPTCP)
- int (*forward_alloc_get)(const struct sock *sk);
-#endif
-
bool (*stream_memory_free)(const struct sock *sk, int wake);
bool (*sock_is_readable)(struct sock *sk);
/* Memory pressure */
@@ -1306,8 +1373,6 @@ struct proto {
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
- unsigned int __percpu *orphan_count;
-
struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot;
@@ -1332,15 +1397,6 @@ int sock_load_diag_module(int family, int protocol);
INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
-static inline int sk_forward_alloc_get(const struct sock *sk)
-{
-#if IS_ENABLED(CONFIG_MPTCP)
- if (sk->sk_prot->forward_alloc_get)
- return sk->sk_prot->forward_alloc_get(sk);
-#endif
- return READ_ONCE(sk->sk_forward_alloc);
-}
-
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
{
if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
@@ -1457,6 +1513,10 @@ static inline int __sk_prot_rehash(struct sock *sk)
#define SOCK_BINDADDR_LOCK 4
#define SOCK_BINDPORT_LOCK 8
+/**
+ * define SOCK_CONNECT_BIND - &sock->sk_userlocks flag for auto-bind at connect() time
+ */
+#define SOCK_CONNECT_BIND 16
struct socket_alloc {
struct socket socket;
@@ -1512,7 +1572,7 @@ static inline bool sk_wmem_schedule(struct sock *sk, int size)
}
static inline bool
-sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
+__sk_rmem_schedule(struct sock *sk, int size, bool pfmemalloc)
{
int delta;
@@ -1520,7 +1580,13 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
return true;
delta = size - sk->sk_forward_alloc;
return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) ||
- skb_pfmemalloc(skb);
+ pfmemalloc;
+}
+
+static inline bool
+sk_rmem_schedule(struct sock *sk, const struct sk_buff *skb, int size)
+{
+ return __sk_rmem_schedule(sk, size, skb_pfmemalloc(skb));
}
static inline int sk_unused_reserved_mem(const struct sock *sk)
@@ -1570,6 +1636,37 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
sk_mem_reclaim(sk);
}
+void __sk_charge(struct sock *sk, gfp_t gfp);
+
+#if IS_ENABLED(CONFIG_PROVE_LOCKING) && IS_ENABLED(CONFIG_MODULES)
+static inline void sk_owner_set(struct sock *sk, struct module *owner)
+{
+ __module_get(owner);
+ sk->sk_owner = owner;
+}
+
+static inline void sk_owner_clear(struct sock *sk)
+{
+ sk->sk_owner = NULL;
+}
+
+static inline void sk_owner_put(struct sock *sk)
+{
+ module_put(sk->sk_owner);
+}
+#else
+static inline void sk_owner_set(struct sock *sk, struct module *owner)
+{
+}
+
+static inline void sk_owner_clear(struct sock *sk)
+{
+}
+
+static inline void sk_owner_put(struct sock *sk)
+{
+}
+#endif
/*
* Macro so as to not evaluate some arguments when
* lockdep is not enabled.
@@ -1579,13 +1676,14 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
*/
#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
do { \
+ sk_owner_set(sk, THIS_MODULE); \
sk->sk_lock.owned = 0; \
init_waitqueue_head(&sk->sk_lock.wq); \
spin_lock_init(&(sk)->sk_lock.slock); \
debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
- sizeof((sk)->sk_lock)); \
+ sizeof((sk)->sk_lock)); \
lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
- (skey), (sname)); \
+ (skey), (sname)); \
lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
} while (0)
@@ -1619,7 +1717,7 @@ bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
* lock_sock_fast - fast version of lock_sock
* @sk: socket
*
- * This version should be used for very small section, where process wont block
+ * This version should be used for very small section, where process won't block
* return false if fast path is taken:
*
* sk_lock.slock locked, owned = 0, BH disabled
@@ -1729,9 +1827,14 @@ static inline bool sock_allow_reclassification(const struct sock *csk)
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
struct proto *prot, int kern);
void sk_free(struct sock *sk);
+void sk_net_refcnt_upgrade(struct sock *sk);
void sk_destruct(struct sock *sk);
-struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
-void sk_free_unlock_clone(struct sock *sk);
+struct sock *sk_clone(const struct sock *sk, const gfp_t priority, bool lock);
+
+static inline struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
+{
+ return sk_clone(sk, priority, true);
+}
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
gfp_t priority);
@@ -1745,6 +1848,15 @@ void sock_efree(struct sk_buff *skb);
#ifdef CONFIG_INET
void sock_edemux(struct sk_buff *skb);
void sock_pfree(struct sk_buff *skb);
+
+static inline void skb_set_owner_edemux(struct sk_buff *skb, struct sock *sk)
+{
+ skb_orphan(skb);
+ if (refcount_inc_not_zero(&sk->sk_refcnt)) {
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+ }
+}
#else
#define sock_edemux sock_efree
#endif
@@ -1774,6 +1886,8 @@ static inline struct sk_buff *sock_alloc_send_skb(struct sock *sk,
}
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
+void *sock_kmemdup(struct sock *sk, const void *src,
+ int size, gfp_t priority);
void sock_kfree_s(struct sock *sk, void *mem, int size);
void sock_kzfree_s(struct sock *sk, void *mem, int size);
void sk_send_sigurg(struct sock *sk);
@@ -1789,13 +1903,18 @@ struct sockcm_cookie {
u64 transmit_time;
u32 mark;
u32 tsflags;
+ u32 ts_opt_id;
+ u32 priority;
+ u32 dmabuf_id;
};
static inline void sockcm_init(struct sockcm_cookie *sockc,
const struct sock *sk)
{
*sockc = (struct sockcm_cookie) {
- .tsflags = READ_ONCE(sk->sk_tsflags)
+ .mark = READ_ONCE(sk->sk_mark),
+ .tsflags = READ_ONCE(sk->sk_tsflags),
+ .priority = READ_ONCE(sk->sk_priority),
};
}
@@ -1808,8 +1927,8 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
* Functions to fill in entries in struct proto_ops when a protocol
* does not implement a particular function.
*/
-int sock_no_bind(struct socket *, struct sockaddr *, int);
-int sock_no_connect(struct socket *, struct sockaddr *, int, int);
+int sock_no_bind(struct socket *sock, struct sockaddr_unsized *saddr, int len);
+int sock_no_connect(struct socket *sock, struct sockaddr_unsized *saddr, int len, int flags);
int sock_no_socketpair(struct socket *, struct socket *);
int sock_no_accept(struct socket *, struct socket *, struct proto_accept_arg *);
int sock_no_getname(struct socket *, struct sockaddr *, int);
@@ -1899,7 +2018,15 @@ static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
/* Paired with READ_ONCE() in sk_tx_queue_get() and
* other WRITE_ONCE() because socket lock might be not held.
*/
- WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
+ if (READ_ONCE(sk->sk_tx_queue_mapping) != tx_queue) {
+ WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
+ WRITE_ONCE(sk->sk_tx_queue_mapping_jiffies, jiffies);
+ return;
+ }
+
+ /* Refresh sk_tx_queue_mapping_jiffies if too old. */
+ if (time_is_before_jiffies(READ_ONCE(sk->sk_tx_queue_mapping_jiffies) + HZ))
+ WRITE_ONCE(sk->sk_tx_queue_mapping_jiffies, jiffies);
}
#define NO_QUEUE_MAPPING USHRT_MAX
@@ -1912,19 +2039,7 @@ static inline void sk_tx_queue_clear(struct sock *sk)
WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
}
-static inline int sk_tx_queue_get(const struct sock *sk)
-{
- if (sk) {
- /* Paired with WRITE_ONCE() in sk_tx_queue_clear()
- * and sk_tx_queue_set().
- */
- int val = READ_ONCE(sk->sk_tx_queue_mapping);
-
- if (val != NO_QUEUE_MAPPING)
- return val;
- }
- return -1;
-}
+int sk_tx_queue_get(const struct sock *sk);
static inline void __sk_rx_queue_set(struct sock *sk,
const struct sk_buff *skb,
@@ -1975,6 +2090,13 @@ static inline int sk_rx_queue_get(const struct sock *sk)
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
{
sk->sk_socket = sock;
+ if (sock) {
+ WRITE_ONCE(sk->sk_uid, SOCK_INODE(sock)->i_uid);
+ WRITE_ONCE(sk->sk_ino, SOCK_INODE(sock)->i_ino);
+ } else {
+ /* Note: sk_uid is unchanged. */
+ WRITE_ONCE(sk->sk_ino, 0);
+ }
}
static inline wait_queue_head_t *sk_sleep(struct sock *sk)
@@ -2005,18 +2127,25 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
rcu_assign_pointer(sk->sk_wq, &parent->wq);
parent->sk = sk;
sk_set_socket(sk, parent);
- sk->sk_uid = SOCK_INODE(parent)->i_uid;
security_sock_graft(sk, parent);
write_unlock_bh(&sk->sk_callback_lock);
}
-kuid_t sock_i_uid(struct sock *sk);
-unsigned long __sock_i_ino(struct sock *sk);
-unsigned long sock_i_ino(struct sock *sk);
+static inline unsigned long sock_i_ino(const struct sock *sk)
+{
+ /* Paired with WRITE_ONCE() in sock_graft() and sock_orphan() */
+ return READ_ONCE(sk->sk_ino);
+}
+
+static inline kuid_t sk_uid(const struct sock *sk)
+{
+ /* Paired with WRITE_ONCE() in sockfs_setattr() */
+ return READ_ONCE(sk->sk_uid);
+}
static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
{
- return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
+ return sk ? sk_uid(sk) : make_kuid(net->user_ns, 0);
}
static inline u32 net_tx_rndhash(void)
@@ -2063,17 +2192,10 @@ sk_dst_get(const struct sock *sk)
static inline void __dst_negative_advice(struct sock *sk)
{
- struct dst_entry *ndst, *dst = __sk_dst_get(sk);
-
- if (dst && dst->ops->negative_advice) {
- ndst = dst->ops->negative_advice(dst);
+ struct dst_entry *dst = __sk_dst_get(sk);
- if (ndst != dst) {
- rcu_assign_pointer(sk->sk_dst_cache, ndst);
- sk_tx_queue_clear(sk);
- WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
- }
- }
+ if (dst && dst->ops->negative_advice)
+ dst->ops->negative_advice(sk, dst);
}
static inline void dst_negative_advice(struct sock *sk)
@@ -2102,7 +2224,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst)
sk_tx_queue_clear(sk);
WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
- old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+ old_dst = unrcu_pointer(xchg(&sk->sk_dst_cache, RCU_INITIALIZER(dst)));
dst_release(old_dst);
}
@@ -2203,6 +2325,7 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *fro
return 0;
}
+#define SK_WMEM_ALLOC_BIAS 1
/**
* sk_wmem_alloc_get - returns write allocations
* @sk: socket
@@ -2211,7 +2334,7 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *fro
*/
static inline int sk_wmem_alloc_get(const struct sock *sk)
{
- return refcount_read(&sk->sk_wmem_alloc) - 1;
+ return refcount_read(&sk->sk_wmem_alloc) - SK_WMEM_ALLOC_BIAS;
}
/**
@@ -2273,7 +2396,7 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
}
/**
- * sock_poll_wait - place memory barrier behind the poll_wait call.
+ * sock_poll_wait - wrapper for the poll_wait call.
* @filp: file
* @sock: socket to wait on
* @p: poll_table
@@ -2283,15 +2406,12 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
static inline void sock_poll_wait(struct file *filp, struct socket *sock,
poll_table *p)
{
- if (!poll_does_not_wait(p)) {
- poll_wait(filp, &sock->wq.wait, p);
- /* We need to be sure we are in sync with the
- * socket flags modification.
- *
- * This memory barrier is paired in the wq_has_sleeper.
- */
- smp_mb();
- }
+ /* Provides a barrier we need to be sure we are in sync
+ * with the socket flags modification.
+ *
+ * This memory barrier is paired in the wq_has_sleeper.
+ */
+ poll_wait(filp, &sock->wq.wait, p);
}
static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
@@ -2499,12 +2619,16 @@ static inline struct page_frag *sk_page_frag(struct sock *sk)
bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
+static inline bool __sock_writeable(const struct sock *sk, int wmem_alloc)
+{
+ return wmem_alloc < (READ_ONCE(sk->sk_sndbuf) >> 1);
+}
/*
* Default write policy as shown to user space via poll/select/SIGIO
*/
static inline bool sock_writeable(const struct sock *sk)
{
- return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
+ return __sock_writeable(sk, refcount_read(&sk->sk_wmem_alloc));
}
static inline gfp_t gfp_any(void)
@@ -2517,14 +2641,62 @@ static inline gfp_t gfp_memcg_charge(void)
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
+#ifdef CONFIG_MEMCG
+static inline struct mem_cgroup *mem_cgroup_from_sk(const struct sock *sk)
+{
+ return sk->sk_memcg;
+}
+
+static inline bool mem_cgroup_sk_enabled(const struct sock *sk)
+{
+ return mem_cgroup_sockets_enabled && mem_cgroup_from_sk(sk);
+}
+
+static inline bool mem_cgroup_sk_under_memory_pressure(const struct sock *sk)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
+
+#ifdef CONFIG_MEMCG_V1
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ return !!memcg->tcpmem_pressure;
+#endif /* CONFIG_MEMCG_V1 */
+
+ do {
+ if (time_before64(get_jiffies_64(),
+ mem_cgroup_get_socket_pressure(memcg))) {
+ memcg_memory_event(mem_cgroup_from_sk(sk),
+ MEMCG_SOCK_THROTTLED);
+ return true;
+ }
+ } while ((memcg = parent_mem_cgroup(memcg)));
+
+ return false;
+}
+#else
+static inline struct mem_cgroup *mem_cgroup_from_sk(const struct sock *sk)
+{
+ return NULL;
+}
+
+static inline bool mem_cgroup_sk_enabled(const struct sock *sk)
+{
+ return false;
+}
+
+static inline bool mem_cgroup_sk_under_memory_pressure(const struct sock *sk)
+{
+ return false;
+}
+#endif
+
static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
{
- return noblock ? 0 : sk->sk_rcvtimeo;
+ return noblock ? 0 : READ_ONCE(sk->sk_rcvtimeo);
}
static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
{
- return noblock ? 0 : sk->sk_sndtimeo;
+ return noblock ? 0 : READ_ONCE(sk->sk_sndtimeo);
}
static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
@@ -2548,10 +2720,10 @@ struct sock_skb_cb {
/* Store sock_skb_cb at the end of skb->cb[] so protocol families
* using skb->cb[] would keep using it directly and utilize its
- * alignement guarantee.
+ * alignment guarantee.
*/
-#define SOCK_SKB_CB_OFFSET ((sizeof_field(struct sk_buff, cb) - \
- sizeof(struct sock_skb_cb)))
+#define SOCK_SKB_CB_OFFSET (sizeof_field(struct sk_buff, cb) - \
+ sizeof(struct sock_skb_cb))
#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
SOCK_SKB_CB_OFFSET))
@@ -2559,18 +2731,53 @@ struct sock_skb_cb {
#define sock_skb_cb_check_size(size) \
BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
+static inline void sk_drops_add(struct sock *sk, int segs)
+{
+ struct numa_drop_counters *ndc = sk->sk_drop_counters;
+
+ if (ndc)
+ numa_drop_add(ndc, segs);
+ else
+ atomic_add(segs, &sk->sk_drops);
+}
+
+static inline void sk_drops_inc(struct sock *sk)
+{
+ sk_drops_add(sk, 1);
+}
+
+static inline int sk_drops_read(const struct sock *sk)
+{
+ const struct numa_drop_counters *ndc = sk->sk_drop_counters;
+
+ if (ndc) {
+ DEBUG_NET_WARN_ON_ONCE(atomic_read(&sk->sk_drops));
+ return numa_drop_read(ndc);
+ }
+ return atomic_read(&sk->sk_drops);
+}
+
+static inline void sk_drops_reset(struct sock *sk)
+{
+ struct numa_drop_counters *ndc = sk->sk_drop_counters;
+
+ if (ndc)
+ numa_drop_reset(ndc);
+ atomic_set(&sk->sk_drops, 0);
+}
+
static inline void
sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
{
SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
- atomic_read(&sk->sk_drops) : 0;
+ sk_drops_read(sk) : 0;
}
-static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
+static inline void sk_drops_skbadd(struct sock *sk, const struct sk_buff *skb)
{
int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
- atomic_add(segs, &sk->sk_drops);
+ sk_drops_add(sk, segs);
}
static inline ktime_t sock_read_timestamp(struct sock *sk)
@@ -2606,6 +2813,10 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
+bool skb_has_tx_timestamp(struct sk_buff *skb, const struct sock *sk);
+int skb_get_tx_timestamp(struct sk_buff *skb, struct sock *sk,
+ struct timespec64 *ts);
+
static inline void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
{
@@ -2640,12 +2851,13 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
{
#define FLAGS_RECV_CMSGS ((1UL << SOCK_RXQ_OVFL) | \
(1UL << SOCK_RCVTSTAMP) | \
- (1UL << SOCK_RCVMARK))
+ (1UL << SOCK_RCVMARK) | \
+ (1UL << SOCK_RCVPRIORITY) | \
+ (1UL << SOCK_TIMESTAMPING_ANY))
#define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
SOF_TIMESTAMPING_RAW_HARDWARE)
- if (sk->sk_flags & FLAGS_RECV_CMSGS ||
- READ_ONCE(sk->sk_tsflags) & TSFLAGS_ANY)
+ if (READ_ONCE(sk->sk_flags) & FLAGS_RECV_CMSGS)
__sock_recv_cmsgs(msg, sk, skb);
else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
sock_write_timestamp(sk, skb->tstamp);
@@ -2653,39 +2865,46 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
sock_write_timestamp(sk, 0);
}
-void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
+void __sock_tx_timestamp(__u32 tsflags, __u8 *tx_flags);
/**
* _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
* @sk: socket sending this packet
- * @tsflags: timestamping flags to use
+ * @sockc: pointer to socket cmsg cookie to get timestamping info
* @tx_flags: completed with instructions for time stamping
* @tskey: filled in with next sk_tskey (not for TCP, which uses seqno)
*
* Note: callers should take care of initial ``*tx_flags`` value (usually 0)
*/
-static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+static inline void _sock_tx_timestamp(struct sock *sk,
+ const struct sockcm_cookie *sockc,
__u8 *tx_flags, __u32 *tskey)
{
+ __u32 tsflags = sockc->tsflags;
+
if (unlikely(tsflags)) {
__sock_tx_timestamp(tsflags, tx_flags);
if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
- tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
- *tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+ tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) {
+ if (tsflags & SOCKCM_FLAG_TS_OPT_ID)
+ *tskey = sockc->ts_opt_id;
+ else
+ *tskey = atomic_inc_return(&sk->sk_tskey) - 1;
+ }
}
- if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
- *tx_flags |= SKBTX_WIFI_STATUS;
}
-static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+static inline void sock_tx_timestamp(struct sock *sk,
+ const struct sockcm_cookie *sockc,
__u8 *tx_flags)
{
- _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
+ _sock_tx_timestamp(sk, sockc, tx_flags, NULL);
}
-static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
+static inline void skb_setup_tx_timestamp(struct sk_buff *skb,
+ const struct sockcm_cookie *sockc)
{
- _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
+ _sock_tx_timestamp(skb->sk, sockc, &skb_shinfo(skb)->tx_flags,
&skb_shinfo(skb)->tskey);
}
@@ -2710,9 +2929,26 @@ static inline bool sk_is_udp(const struct sock *sk)
sk->sk_protocol == IPPROTO_UDP;
}
+static inline bool sk_is_unix(const struct sock *sk)
+{
+ return sk->sk_family == AF_UNIX;
+}
+
static inline bool sk_is_stream_unix(const struct sock *sk)
{
- return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM;
+ return sk_is_unix(sk) && sk->sk_type == SOCK_STREAM;
+}
+
+static inline bool sk_is_vsock(const struct sock *sk)
+{
+ return sk->sk_family == AF_VSOCK;
+}
+
+static inline bool sk_may_scm_recv(const struct sock *sk)
+{
+ return (IS_ENABLED(CONFIG_UNIX) && sk->sk_family == AF_UNIX) ||
+ sk->sk_family == AF_NETLINK ||
+ (IS_ENABLED(CONFIG_BT) && sk->sk_family == AF_BLUETOOTH);
}
/**
@@ -2754,26 +2990,10 @@ sk_is_refcounted(struct sock *sk)
return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
}
-/* Checks if this SKB belongs to an HW offloaded socket
- * and whether any SW fallbacks are required based on dev.
- * Check decrypted mark in case skb_orphan() cleared socket.
- */
-static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
- struct net_device *dev)
+static inline bool
+sk_requests_wifi_status(struct sock *sk)
{
-#ifdef CONFIG_SOCK_VALIDATE_XMIT
- struct sock *sk = skb->sk;
-
- if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
- skb = sk->sk_validate_xmit_skb(sk, dev, skb);
- } else if (unlikely(skb_is_decrypted(skb))) {
- pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
- kfree_skb(skb);
- skb = NULL;
- }
-#endif
-
- return skb;
+ return sk && sk_fullsock(sk) && sock_flag(sk, SOCK_WIFI_STATUS);
}
/* This helper checks if a socket is a LISTEN or NEW_SYN_RECV
@@ -2784,6 +3004,16 @@ static inline bool sk_listener(const struct sock *sk)
return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
}
+/* This helper checks if a socket is a LISTEN or NEW_SYN_RECV or TIME_WAIT
+ * TCP SYNACK messages can be attached to LISTEN or NEW_SYN_RECV (depending on SYNCOOKIE)
+ * TCP RST and ACK can be attached to TIME_WAIT.
+ */
+static inline bool sk_listener_or_tw(const struct sock *sk)
+{
+ return (1 << READ_ONCE(sk->sk_state)) &
+ (TCPF_LISTEN | TCPF_NEW_SYN_RECV | TCPF_TIME_WAIT);
+}
+
void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
int type);
@@ -2802,14 +3032,12 @@ void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
*/
#define _SK_MEM_PACKETS 256
#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
-#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
-#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
+#define SK_WMEM_DEFAULT (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
+#define SK_RMEM_DEFAULT (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
-extern int sysctl_tstamp_allow_data;
-
extern __u32 sysctl_wmem_default;
extern __u32 sysctl_rmem_default;
@@ -2872,7 +3100,13 @@ void sock_set_timestamp(struct sock *sk, int optname, bool valbool);
int sock_set_timestamping(struct sock *sk, int optname,
struct so_timestamping timestamping);
-void sock_enable_timestamps(struct sock *sk);
+#if defined(CONFIG_CGROUP_BPF)
+void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op);
+#else
+static inline void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op)
+{
+}
+#endif
void sock_no_linger(struct sock *sk);
void sock_set_keepalive(struct sock *sk);
void sock_set_priority(struct sock *sk, u32 priority);
@@ -2882,7 +3116,7 @@ void sock_set_reuseaddr(struct sock *sk);
void sock_set_reuseport(struct sock *sk);
void sock_set_sndtimeo(struct sock *sk, s64 secs);
-int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
+int sock_bind_add(struct sock *sk, struct sockaddr_unsized *addr, int addr_len);
int sock_get_timeout(long timeo, void *optval, bool old_timeval);
int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
@@ -2893,8 +3127,11 @@ int sock_ioctl_inout(struct sock *sk, unsigned int cmd,
int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
static inline bool sk_is_readable(struct sock *sk)
{
- if (sk->sk_prot->sock_is_readable)
- return sk->sk_prot->sock_is_readable(sk);
+ const struct proto *prot = READ_ONCE(sk->sk_prot);
+
+ if (prot->sock_is_readable)
+ return prot->sock_is_readable(sk);
+
return false;
}
#endif /* _SOCK_H */
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index 6ec140b0a61b..6e4faf3ee76f 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -26,7 +26,7 @@ struct sock_reuseport {
unsigned int bind_inany:1;
unsigned int has_conns:1;
struct bpf_prog __rcu *prog; /* optional BPF sock selector */
- struct sock *socks[]; /* array of sock pointers */
+ struct sock *socks[] __counted_by(max_socks);
};
extern int reuseport_alloc(struct sock *sk, bool bind_inany);
diff --git a/include/net/strparser.h b/include/net/strparser.h
index 41e2ce9e9e10..0ed73e364faa 100644
--- a/include/net/strparser.h
+++ b/include/net/strparser.h
@@ -43,6 +43,8 @@ struct strparser;
struct strp_callbacks {
int (*parse_msg)(struct strparser *strp, struct sk_buff *skb);
void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb);
+ int (*read_sock)(struct strparser *strp, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor);
int (*read_sock_done)(struct strparser *strp, int err);
void (*abort_parser)(struct strparser *strp, int err);
void (*lock)(struct strparser *strp);
@@ -112,8 +114,6 @@ static inline void strp_pause(struct strparser *strp)
/* May be called without holding lock for attached socket */
void strp_unpause(struct strparser *strp);
-/* Must be called with process lock held (lock_sock) */
-void __strp_unpause(struct strparser *strp);
static inline void save_strp_stats(struct strparser *strp,
struct strp_aggr_stats *agg_stats)
diff --git a/include/net/tc_act/tc_connmark.h b/include/net/tc_act/tc_connmark.h
index e8dd77a96748..a5ce83f3eea4 100644
--- a/include/net/tc_act/tc_connmark.h
+++ b/include/net/tc_act/tc_connmark.h
@@ -7,6 +7,7 @@
struct tcf_connmark_parms {
struct net *net;
u16 zone;
+ int action;
struct rcu_head rcu;
};
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index 68269e4581b7..8d0c7a9f9345 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -8,6 +8,7 @@
struct tcf_csum_params {
u32 update_flags;
+ int action;
struct rcu_head rcu;
};
@@ -18,15 +19,6 @@ struct tcf_csum {
};
#define to_tcf_csum(a) ((struct tcf_csum *)a)
-static inline bool is_tcf_csum(const struct tc_action *a)
-{
-#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->id == TCA_ID_CSUM)
- return true;
-#endif
- return false;
-}
-
static inline u32 tcf_csum_update_flags(const struct tc_action *a)
{
u32 update_flags;
diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h
index 77f87c622a2e..8b90c86c0b0d 100644
--- a/include/net/tc_act/tc_ct.h
+++ b/include/net/tc_act/tc_ct.h
@@ -13,7 +13,7 @@ struct tcf_ct_params {
struct nf_conntrack_helper *helper;
struct nf_conn *tmpl;
u16 zone;
-
+ int action;
u32 mark;
u32 mark_mask;
@@ -92,13 +92,4 @@ static inline void
tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie) { }
#endif
-static inline bool is_tcf_ct(const struct tc_action *a)
-{
-#if defined(CONFIG_NET_CLS_ACT) && IS_ENABLED(CONFIG_NF_CONNTRACK)
- if (a->ops && a->ops->id == TCA_ID_CT)
- return true;
-#endif
- return false;
-}
-
#endif /* __NET_TC_CT_H */
diff --git a/include/net/tc_act/tc_ctinfo.h b/include/net/tc_act/tc_ctinfo.h
index f071c1d70a25..7fe01ab236da 100644
--- a/include/net/tc_act/tc_ctinfo.h
+++ b/include/net/tc_act/tc_ctinfo.h
@@ -7,6 +7,7 @@
struct tcf_ctinfo_params {
struct rcu_head rcu;
struct net *net;
+ int action;
u32 dscpmask;
u32 dscpstatemask;
u32 cpmarkmask;
@@ -18,9 +19,9 @@ struct tcf_ctinfo_params {
struct tcf_ctinfo {
struct tc_action common;
struct tcf_ctinfo_params __rcu *params;
- u64 stats_dscp_set;
- u64 stats_dscp_error;
- u64 stats_cpmark_set;
+ atomic64_t stats_dscp_set;
+ atomic64_t stats_dscp_error;
+ atomic64_t stats_cpmark_set;
};
enum {
diff --git a/include/net/tc_act/tc_gate.h b/include/net/tc_act/tc_gate.h
index c8fa11ebb397..c1a67149c6b6 100644
--- a/include/net/tc_act/tc_gate.h
+++ b/include/net/tc_act/tc_gate.h
@@ -51,15 +51,6 @@ struct tcf_gate {
#define to_gate(a) ((struct tcf_gate *)a)
-static inline bool is_tcf_gate(const struct tc_action *a)
-{
-#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->id == TCA_ID_GATE)
- return true;
-#endif
- return false;
-}
-
static inline s32 tcf_gate_prio(const struct tc_action *a)
{
s32 tcfg_prio;
diff --git a/include/net/tc_act/tc_mpls.h b/include/net/tc_act/tc_mpls.h
index 721de4f5733a..dd067bd4018d 100644
--- a/include/net/tc_act/tc_mpls.h
+++ b/include/net/tc_act/tc_mpls.h
@@ -10,6 +10,7 @@
struct tcf_mpls_params {
int tcfm_action;
u32 tcfm_label;
+ int action; /* tcf_action */
u8 tcfm_tc;
u8 tcfm_ttl;
u8 tcfm_bos;
@@ -27,15 +28,6 @@ struct tcf_mpls {
};
#define to_mpls(a) ((struct tcf_mpls *)a)
-static inline bool is_tcf_mpls(const struct tc_action *a)
-{
-#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->id == TCA_ID_MPLS)
- return true;
-#endif
- return false;
-}
-
static inline u32 tcf_mpls_action(const struct tc_action *a)
{
u32 tcfm_action;
diff --git a/include/net/tc_act/tc_nat.h b/include/net/tc_act/tc_nat.h
index c869274ac529..ae35f4009445 100644
--- a/include/net/tc_act/tc_nat.h
+++ b/include/net/tc_act/tc_nat.h
@@ -6,6 +6,7 @@
#include <net/act_api.h>
struct tcf_nat_parms {
+ int action;
__be32 old_addr;
__be32 new_addr;
__be32 mask;
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
index 83fe39931781..f58ee15cd858 100644
--- a/include/net/tc_act/tc_pedit.h
+++ b/include/net/tc_act/tc_pedit.h
@@ -14,6 +14,7 @@ struct tcf_pedit_key_ex {
struct tcf_pedit_parms {
struct tc_pedit_key *tcfp_keys;
struct tcf_pedit_key_ex *tcfp_keys_ex;
+ int action;
u32 tcfp_off_max_hint;
unsigned char tcfp_nkeys;
unsigned char tcfp_flags;
diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h
index 283bde711a42..a89fc8e68b1e 100644
--- a/include/net/tc_act/tc_police.h
+++ b/include/net/tc_act/tc_police.h
@@ -5,10 +5,11 @@
#include <net/act_api.h>
struct tcf_police_params {
+ int action;
int tcfp_result;
u32 tcfp_ewma_rate;
- s64 tcfp_burst;
u32 tcfp_mtu;
+ s64 tcfp_burst;
s64 tcfp_mtu_ptoks;
s64 tcfp_pkt_burst;
struct psched_ratecfg rate;
@@ -44,15 +45,6 @@ struct tc_police_compat {
struct tc_ratespec peakrate;
};
-static inline bool is_tcf_police(const struct tc_action *act)
-{
-#ifdef CONFIG_NET_CLS_ACT
- if (act->ops && act->ops->id == TCA_ID_POLICE)
- return true;
-#endif
- return false;
-}
-
static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act)
{
struct tcf_police *police = to_police(act);
diff --git a/include/net/tc_act/tc_sample.h b/include/net/tc_act/tc_sample.h
index b5d76305e854..abd163ca1864 100644
--- a/include/net/tc_act/tc_sample.h
+++ b/include/net/tc_act/tc_sample.h
@@ -17,15 +17,6 @@ struct tcf_sample {
};
#define to_sample(a) ((struct tcf_sample *)a)
-static inline bool is_tcf_sample(const struct tc_action *a)
-{
-#ifdef CONFIG_NET_CLS_ACT
- return a->ops && a->ops->id == TCA_ID_SAMPLE;
-#else
- return false;
-#endif
-}
-
static inline __u32 tcf_sample_rate(const struct tc_action *a)
{
return to_sample(a)->rate;
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index 9649600fb3dc..31b2cd0bebb5 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -12,6 +12,7 @@
#include <linux/tc_act/tc_skbedit.h>
struct tcf_skbedit_params {
+ int action;
u32 flags;
u32 priority;
u32 mark;
diff --git a/include/net/tc_act/tc_skbmod.h b/include/net/tc_act/tc_skbmod.h
index 7c240d2fed4e..626704cd6241 100644
--- a/include/net/tc_act/tc_skbmod.h
+++ b/include/net/tc_act/tc_skbmod.h
@@ -12,6 +12,7 @@
struct tcf_skbmod_params {
struct rcu_head rcu;
u64 flags; /*up to 64 types of operations; extend if needed */
+ int action;
u8 eth_dst[ETH_ALEN];
u16 eth_type;
u8 eth_src[ETH_ALEN];
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
index 879fe8cff581..0f1925f97520 100644
--- a/include/net/tc_act/tc_tunnel_key.h
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -14,6 +14,7 @@
struct tcf_tunnel_key_params {
struct rcu_head rcu;
int tcft_action;
+ int action;
struct metadata_dst *tcft_enc_metadata;
};
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index 904eddfc1826..beadee41669a 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -10,6 +10,7 @@
#include <linux/tc_act/tc_vlan.h>
struct tcf_vlan_params {
+ int action;
int tcfv_action;
unsigned char tcfv_push_dst[ETH_ALEN];
unsigned char tcfv_push_src[ETH_ALEN];
@@ -26,15 +27,6 @@ struct tcf_vlan {
};
#define to_vlan(a) ((struct tcf_vlan *)a)
-static inline bool is_tcf_vlan(const struct tc_action *a)
-{
-#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->id == TCA_ID_VLAN)
- return true;
-#endif
- return false;
-}
-
static inline u32 tcf_vlan_action(const struct tc_action *a)
{
u32 tcfv_action;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 060e95b331a2..0deb5e9dd911 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -26,6 +26,7 @@
#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/indirect_call_wrapper.h>
+#include <linux/bits.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
@@ -41,6 +42,7 @@
#include <net/inet_ecn.h>
#include <net/dst.h>
#include <net/mptcp.h>
+#include <net/xfrm.h>
#include <linux/seq_file.h>
#include <linux/memcontrol.h>
@@ -52,6 +54,16 @@ extern struct inet_hashinfo tcp_hashinfo;
DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
int tcp_orphan_count_sum(void);
+static inline void tcp_orphan_count_inc(void)
+{
+ this_cpu_inc(tcp_orphan_count);
+}
+
+static inline void tcp_orphan_count_dec(void)
+{
+ this_cpu_dec(tcp_orphan_count);
+}
+
DECLARE_PER_CPU(u32, tcp_tw_isn);
void tcp_time_wait(struct sock *sk, int state, int timeo);
@@ -88,6 +100,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
/* Maximal number of window scale according to RFC1323 */
#define TCP_MAX_WSCALE 14U
+/* Default sending frequency of accurate ECN option per RTT */
+#define TCP_ACCECN_OPTION_BEACON 3
+
/* urg_data states */
#define TCP_URG_VALID 0x0100
#define TCP_URG_NOTYET 0x0200
@@ -143,8 +158,9 @@ static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
#define TCP_DELACK_MIN 4U
#define TCP_ATO_MIN 4U
#endif
-#define TCP_RTO_MAX ((unsigned)(120*HZ))
-#define TCP_RTO_MIN ((unsigned)(HZ/5))
+#define TCP_RTO_MAX_SEC 120
+#define TCP_RTO_MAX ((unsigned)(TCP_RTO_MAX_SEC * HZ))
+#define TCP_RTO_MIN ((unsigned)(HZ / 5))
#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
@@ -200,6 +216,8 @@ static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
#define TCPOPT_AO 29 /* Authentication Option (RFC5925) */
#define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
+#define TCPOPT_ACCECN0 172 /* 0xAC: Accurate ECN Order 0 */
+#define TCPOPT_ACCECN1 174 /* 0xAE: Accurate ECN Order 1 */
#define TCPOPT_EXP 254 /* Experimental */
/* Magic number to be after the option value for sharing TCP
* experimental options. See draft-ietf-tcpm-experimental-options-00.txt
@@ -217,6 +235,7 @@ static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
#define TCPOLEN_TIMESTAMP 10
#define TCPOLEN_MD5SIG 18
#define TCPOLEN_FASTOPEN_BASE 2
+#define TCPOLEN_ACCECN_BASE 2
#define TCPOLEN_EXP_FASTOPEN_BASE 4
#define TCPOLEN_EXP_SMC_BASE 6
@@ -230,6 +249,14 @@ static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
#define TCPOLEN_MD5SIG_ALIGNED 20
#define TCPOLEN_MSS_ALIGNED 4
#define TCPOLEN_EXP_SMC_BASE_ALIGNED 8
+#define TCPOLEN_ACCECN_PERFIELD 3
+
+/* Maximum number of byte counters in AccECN option + size */
+#define TCP_ACCECN_NUMFIELDS 3
+#define TCP_ACCECN_MAXSIZE (TCPOLEN_ACCECN_BASE + \
+ TCPOLEN_ACCECN_PERFIELD * \
+ TCP_ACCECN_NUMFIELDS)
+#define TCP_ACCECN_SAFETY_SHIFT 1 /* SAFETY_FACTOR in accecn draft */
/* Flags in tp->nonagle */
#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
@@ -264,7 +291,6 @@ extern long sysctl_tcp_mem[3];
#define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */
#define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
-extern atomic_long_t tcp_memory_allocated;
DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
extern struct percpu_counter tcp_sockets_allocated;
@@ -273,10 +299,13 @@ extern unsigned long tcp_memory_pressure;
/* optimized version of sk_under_memory_pressure() for TCP sockets */
static inline bool tcp_under_memory_pressure(const struct sock *sk)
{
- if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
- mem_cgroup_under_socket_pressure(sk->sk_memcg))
+ if (mem_cgroup_sk_enabled(sk) &&
+ mem_cgroup_sk_under_memory_pressure(sk))
return true;
+ if (sk->sk_bypass_prot_mem)
+ return false;
+
return READ_ONCE(tcp_memory_pressure);
}
/*
@@ -318,7 +347,7 @@ extern struct proto tcp_prot;
#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
-void tcp_tasklet_init(void);
+void tcp_tsq_work_init(void);
int tcp_v4_err(struct sk_buff *skb, u32);
@@ -344,6 +373,7 @@ void tcp_delack_timer_handler(struct sock *sk);
int tcp_ioctl(struct sock *sk, int cmd, int *karg);
enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
+void tcp_rcvbuf_grow(struct sock *sk, u32 newval);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
void tcp_twsk_destructor(struct sock *sk);
@@ -371,31 +401,71 @@ static inline void tcp_dec_quickack_mode(struct sock *sk)
}
}
-#define TCP_ECN_OK 1
-#define TCP_ECN_QUEUE_CWR 2
-#define TCP_ECN_DEMAND_CWR 4
-#define TCP_ECN_SEEN 8
+#define TCP_ECN_MODE_RFC3168 BIT(0)
+#define TCP_ECN_QUEUE_CWR BIT(1)
+#define TCP_ECN_DEMAND_CWR BIT(2)
+#define TCP_ECN_SEEN BIT(3)
+#define TCP_ECN_MODE_ACCECN BIT(4)
+
+#define TCP_ECN_DISABLED 0
+#define TCP_ECN_MODE_PENDING (TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN)
+#define TCP_ECN_MODE_ANY (TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN)
+
+static inline bool tcp_ecn_mode_any(const struct tcp_sock *tp)
+{
+ return tp->ecn_flags & TCP_ECN_MODE_ANY;
+}
+
+static inline bool tcp_ecn_mode_rfc3168(const struct tcp_sock *tp)
+{
+ return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_RFC3168;
+}
+
+static inline bool tcp_ecn_mode_accecn(const struct tcp_sock *tp)
+{
+ return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_ACCECN;
+}
+
+static inline bool tcp_ecn_disabled(const struct tcp_sock *tp)
+{
+ return !tcp_ecn_mode_any(tp);
+}
+
+static inline bool tcp_ecn_mode_pending(const struct tcp_sock *tp)
+{
+ return (tp->ecn_flags & TCP_ECN_MODE_PENDING) == TCP_ECN_MODE_PENDING;
+}
+
+static inline void tcp_ecn_mode_set(struct tcp_sock *tp, u8 mode)
+{
+ tp->ecn_flags &= ~TCP_ECN_MODE_ANY;
+ tp->ecn_flags |= mode;
+}
enum tcp_tw_status {
TCP_TW_SUCCESS = 0,
TCP_TW_RST = 1,
TCP_TW_ACK = 2,
- TCP_TW_SYN = 3
+ TCP_TW_SYN = 3,
+ TCP_TW_ACK_OOW = 4
};
enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
struct sk_buff *skb,
const struct tcphdr *th,
- u32 *tw_isn);
+ u32 *tw_isn,
+ enum skb_drop_reason *drop_reason);
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req, bool fastopen,
- bool *lost_race);
+ bool *lost_race, enum skb_drop_reason *drop_reason);
enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb);
void tcp_enter_loss(struct sock *sk);
void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
void tcp_clear_retrans(struct tcp_sock *tp);
+void tcp_update_pacing_rate(struct sock *sk);
+void tcp_set_rto(struct sock *sk);
void tcp_update_metrics(struct sock *sk);
void tcp_init_metrics(struct sock *sk);
void tcp_metrics_init(void);
@@ -415,6 +485,7 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen);
int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen);
+void tcp_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
void tcp_set_keepalive(struct sock *sk, int val);
void tcp_syn_ack_timeout(const struct request_sock *req);
int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
@@ -464,7 +535,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req_unhash,
bool *own_req);
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
-int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+int tcp_v4_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len);
int tcp_connect(struct sock *sk);
enum tcp_synack_type {
TCP_SYNACK_NORMAL,
@@ -666,7 +737,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority,
enum sk_rst_reason reason);
int tcp_send_synack(struct sock *);
void tcp_push_one(struct sock *, unsigned int mss_now);
-void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags);
void tcp_send_ack(struct sock *sk);
void tcp_send_delayed_ack(struct sock *sk);
void tcp_send_loss_probe(struct sock *sk);
@@ -677,11 +748,25 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb,
/* tcp_input.c */
void tcp_rearm_rto(struct sock *sk);
void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
+void tcp_done_with_error(struct sock *sk, int err);
void tcp_reset(struct sock *sk, struct sk_buff *skb);
void tcp_fin(struct sock *sk);
void tcp_check_space(struct sock *sk);
void tcp_sack_compress_send_ack(struct sock *sk);
+static inline void tcp_cleanup_skb(struct sk_buff *skb)
+{
+ skb_dst_drop(skb);
+ secpath_reset(skb);
+}
+
+static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb)
+{
+ DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
+ DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb));
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+}
+
/* tcp_timer.c */
void tcp_init_xmit_timers(struct sock *);
static inline void tcp_clear_xmit_timers(struct sock *sk)
@@ -728,6 +813,9 @@ void tcp_get_info(struct sock *, struct tcp_info *);
/* Read 'sendfile()'-style from a TCP socket */
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
+int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor, bool noack,
+ u32 *copied_seq);
int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
void tcp_read_done(struct sock *sk, size_t len);
@@ -738,42 +826,27 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu);
int tcp_mss_to_mtu(struct sock *sk, int mss);
void tcp_mtup_init(struct sock *sk);
-static inline void tcp_bound_rto(struct sock *sk)
+static inline unsigned int tcp_rto_max(const struct sock *sk)
{
- if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
- inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
+ return READ_ONCE(inet_csk(sk)->icsk_rto_max);
}
-static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
+static inline void tcp_bound_rto(struct sock *sk)
{
- return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
+ inet_csk(sk)->icsk_rto = min(inet_csk(sk)->icsk_rto, tcp_rto_max(sk));
}
-static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
+static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
{
- /* mptcp hooks are only on the slow path */
- if (sk_is_mptcp((struct sock *)tp))
- return;
-
- tp->pred_flags = htonl((tp->tcp_header_len << 26) |
- ntohl(TCP_FLAG_ACK) |
- snd_wnd);
+ return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
}
-static inline void tcp_fast_path_on(struct tcp_sock *tp)
+static inline unsigned long tcp_reqsk_timeout(struct request_sock *req)
{
- __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
-}
+ u64 timeout = (u64)req->timeout << req->num_timeout;
-static inline void tcp_fast_path_check(struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
- tp->rcv_wnd &&
- atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
- !tp->urg_data)
- tcp_fast_path_on(tp);
+ return (unsigned long)min_t(u64, timeout,
+ tcp_rto_max(req->rsk_listener));
}
u32 tcp_delack_max(const struct sock *sk);
@@ -782,7 +855,7 @@ u32 tcp_delack_max(const struct sock *sk);
static inline u32 tcp_rto_min(const struct sock *sk)
{
const struct dst_entry *dst = __sk_dst_get(sk);
- u32 rto_min = inet_csk(sk)->icsk_rto_min;
+ u32 rto_min = READ_ONCE(inet_csk(sk)->icsk_rto_min);
if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
@@ -910,16 +983,35 @@ static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
-#define TCPHDR_FIN 0x01
-#define TCPHDR_SYN 0x02
-#define TCPHDR_RST 0x04
-#define TCPHDR_PSH 0x08
-#define TCPHDR_ACK 0x10
-#define TCPHDR_URG 0x20
-#define TCPHDR_ECE 0x40
-#define TCPHDR_CWR 0x80
-
+#define TCPHDR_FIN BIT(0)
+#define TCPHDR_SYN BIT(1)
+#define TCPHDR_RST BIT(2)
+#define TCPHDR_PSH BIT(3)
+#define TCPHDR_ACK BIT(4)
+#define TCPHDR_URG BIT(5)
+#define TCPHDR_ECE BIT(6)
+#define TCPHDR_CWR BIT(7)
+#define TCPHDR_AE BIT(8)
+#define TCPHDR_FLAGS_MASK (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
+ TCPHDR_PSH | TCPHDR_ACK | TCPHDR_URG | \
+ TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
+#define tcp_flags_ntohs(th) (ntohs(*(__be16 *)&tcp_flag_word(th)) & \
+ TCPHDR_FLAGS_MASK)
+
+#define TCPHDR_ACE (TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
+#define TCPHDR_SYNACK_ACCECN (TCPHDR_SYN | TCPHDR_ACK | TCPHDR_CWR)
+
+#define TCP_ACCECN_CEP_ACE_MASK 0x7
+#define TCP_ACCECN_ACE_MAX_DELTA 6
+
+/* To avoid/detect middlebox interference, not all counters start at 0.
+ * See draft-ietf-tcpm-accurate-ecn for the latest values.
+ */
+#define TCP_ACCECN_CEP_INIT_OFFSET 5
+#define TCP_ACCECN_E1B_INIT_OFFSET 1
+#define TCP_ACCECN_E0B_INIT_OFFSET 1
+#define TCP_ACCECN_CEB_INIT_OFFSET 0
/* State flags for sacked in struct tcp_skb_cb */
enum tcp_skb_cb_sacked_flags {
@@ -953,14 +1045,16 @@ struct tcp_skb_cb {
u16 tcp_gso_size;
};
};
- __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
+ __u16 tcp_flags; /* TCP header flags (tcp[12-13])*/
__u8 sacked; /* State flags for SACK. */
__u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
- __u8 txstamp_ack:1, /* Record TX timestamp for ack? */
+#define TSTAMP_ACK_SK 0x1
+#define TSTAMP_ACK_BPF 0x2
+ __u8 txstamp_ack:2, /* Record TX timestamp for ack? */
eor:1, /* Is skb MSG_EOR marked? */
has_rxtstamp:1, /* SKB has a RX timestamp */
- unused:5;
+ unused:4;
__u32 ack_seq; /* Sequence number ACK'd */
union {
struct {
@@ -1065,9 +1159,18 @@ static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
const struct sk_buff *from)
{
+ /* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */
return likely(tcp_skb_can_collapse_to(to) &&
mptcp_skb_can_collapse(to, from) &&
- skb_pure_zcopy_same(to, from));
+ skb_pure_zcopy_same(to, from) &&
+ skb_frags_readable(to) == skb_frags_readable(from));
+}
+
+static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to,
+ const struct sk_buff *from)
+{
+ return likely(mptcp_skb_can_collapse(to, from) &&
+ !skb_cmp_decrypted(to, from));
}
/* Events passed to congestion control interface */
@@ -1097,9 +1200,9 @@ enum tcp_ca_ack_event_flags {
#define TCP_CA_UNSPEC 0
/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
-#define TCP_CONG_NON_RESTRICTED 0x1
+#define TCP_CONG_NON_RESTRICTED BIT(0)
/* Requires ECN/ECT set on all packets */
-#define TCP_CONG_NEEDS_ECN 0x2
+#define TCP_CONG_NEEDS_ECN BIT(1)
#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
union tcp_cc_info;
@@ -1215,7 +1318,7 @@ extern struct tcp_congestion_ops tcp_reno;
struct tcp_congestion_ops *tcp_ca_find(const char *name);
struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
-u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
+u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
#ifdef CONFIG_INET
char *tcp_ca_get_name_by_key(u32 key, char *buffer);
#else
@@ -1413,10 +1516,12 @@ static inline unsigned long tcp_pacing_delay(const struct sock *sk)
static inline void tcp_reset_xmit_timer(struct sock *sk,
const int what,
unsigned long when,
- const unsigned long max_when)
+ bool pace_delay)
{
- inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
- max_when);
+ if (pace_delay)
+ when += tcp_pacing_delay(sk);
+ inet_csk_reset_xmit_timer(sk, what, when,
+ tcp_rto_max(sk));
}
/* Something is really bad, we could not queue an additional packet,
@@ -1445,7 +1550,7 @@ static inline void tcp_check_probe_timer(struct sock *sk)
{
if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- tcp_probe0_base(sk), TCP_RTO_MAX);
+ tcp_probe0_base(sk), true);
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
@@ -1477,7 +1582,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
enum skb_drop_reason *reason);
-int tcp_filter(struct sock *sk, struct sk_buff *skb);
+int tcp_filter(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason *reason);
void tcp_set_state(struct sock *sk, int state);
void tcp_done(struct sock *sk);
int tcp_abort(struct sock *sk, int err);
@@ -1715,6 +1820,40 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
return true;
}
+static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
+{
+ u32 ace;
+
+ /* mptcp hooks are only on the slow path */
+ if (sk_is_mptcp((struct sock *)tp))
+ return;
+
+ ace = tcp_ecn_mode_accecn(tp) ?
+ ((tp->delivered_ce + TCP_ACCECN_CEP_INIT_OFFSET) &
+ TCP_ACCECN_CEP_ACE_MASK) : 0;
+
+ tp->pred_flags = htonl((tp->tcp_header_len << 26) |
+ (ace << 22) |
+ ntohl(TCP_FLAG_ACK) |
+ snd_wnd);
+}
+
+static inline void tcp_fast_path_on(struct tcp_sock *tp)
+{
+ __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
+}
+
+static inline void tcp_fast_path_check(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
+ tp->rcv_wnd &&
+ atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+ !tp->urg_data)
+ tcp_fast_path_on(tp);
+}
+
bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
int mib_idx, u32 *last_oow_ack_time);
@@ -1728,14 +1867,8 @@ static inline void tcp_mib_init(struct net *net)
}
/* from STCP */
-static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
-{
- tp->lost_skb_hint = NULL;
-}
-
static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
{
- tcp_clear_retrans_hints_partial(tp);
tp->retransmit_skb_hint = NULL;
}
@@ -1776,13 +1909,6 @@ struct tcp6_pseudohdr {
__be32 protocol; /* including padding */
};
-union tcp_md5sum_block {
- struct tcp4_pseudohdr ip4;
-#if IS_ENABLED(CONFIG_IPV6)
- struct tcp6_pseudohdr ip6;
-#endif
-};
-
/*
* struct tcp_sigpool - per-CPU pool of ahash_requests
* @scratch: per-CPU temporary area, that can be used between
@@ -1807,7 +1933,7 @@ int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
* @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
* @c: returned tcp_sigpool for usage (uninitialized on failure)
*
- * Returns 0 on success, error otherwise.
+ * Returns: 0 on success, error otherwise.
*/
int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c);
/**
@@ -1817,8 +1943,8 @@ int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c);
void tcp_sigpool_end(struct tcp_sigpool *c);
size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len);
/* - functions */
-int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
- const struct sock *sk, const struct sk_buff *skb);
+void tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
+ const struct sock *sk, const struct sk_buff *skb);
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
int family, u8 prefixlen, int l3index, u8 flags,
const u8 *newkey, u8 newkeylen);
@@ -1854,13 +1980,8 @@ tcp_md5_do_lookup_any_l3index(const struct sock *sk,
return __tcp_md5_do_lookup(sk, 0, addr, family, true);
}
-enum skb_drop_reason
-tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
- const void *saddr, const void *daddr,
- int family, int l3index, const __u8 *hash_location);
-
-
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
+void tcp_md5_destruct_sock(struct sock *sk);
#else
static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock *sk, int l3index,
@@ -1876,23 +1997,16 @@ tcp_md5_do_lookup_any_l3index(const struct sock *sk,
return NULL;
}
-static inline enum skb_drop_reason
-tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
- const void *saddr, const void *daddr,
- int family, int l3index, const __u8 *hash_location)
+#define tcp_twsk_md5_key(twsk) NULL
+static inline void tcp_md5_destruct_sock(struct sock *sk)
{
- return SKB_NOT_DROPPED_YET;
}
-#define tcp_twsk_md5_key(twsk) NULL
#endif
-int tcp_md5_alloc_sigpool(void);
-void tcp_md5_release_sigpool(void);
-void tcp_md5_add_sigpool(void);
-extern int tcp_md5_sigpool_id;
-
-int tcp_md5_hash_key(struct tcp_sigpool *hp,
- const struct tcp_md5sig_key *key);
+struct md5_ctx;
+void tcp_md5_hash_skb_data(struct md5_ctx *ctx, const struct sk_buff *skb,
+ unsigned int header_len);
+void tcp_md5_hash_key(struct md5_ctx *ctx, const struct tcp_md5sig_key *key);
/* From tcp_fastopen.c */
void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
@@ -2094,6 +2208,14 @@ static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct soc
tcp_wmem_free_skb(sk, skb);
}
+static inline void tcp_write_collapse_fence(struct sock *sk)
+{
+ struct sk_buff *skb = tcp_write_queue_tail(sk);
+
+ if (skb)
+ TCP_SKB_CB(skb)->eor = 1;
+}
+
static inline void tcp_push_pending_frames(struct sock *sk)
{
if (tcp_send_head(sk)) {
@@ -2191,7 +2313,6 @@ void tcp_v4_destroy_sock(struct sock *sk);
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features);
-struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb);
struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
struct tcphdr *th);
@@ -2234,7 +2355,7 @@ struct tcp_sock_af_ops {
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
const struct sock *addr_sk);
- int (*calc_md5_hash)(char *location,
+ void (*calc_md5_hash)(char *location,
const struct tcp_md5sig_key *md5,
const struct sock *sk,
const struct sk_buff *skb);
@@ -2262,7 +2383,7 @@ struct tcp_request_sock_ops {
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
const struct sock *addr_sk);
- int (*calc_md5_hash) (char *location,
+ void (*calc_md5_hash) (char *location,
const struct tcp_md5sig_key *md5,
const struct sock *sk,
const struct sk_buff *skb);
@@ -2369,21 +2490,15 @@ static inline void tcp_get_current_key(const struct sock *sk,
static inline bool tcp_key_is_md5(const struct tcp_key *key)
{
-#ifdef CONFIG_TCP_MD5SIG
- if (static_branch_unlikely(&tcp_md5_needed.key) &&
- key->type == TCP_KEY_MD5)
- return true;
-#endif
+ if (static_branch_tcp_md5())
+ return key->type == TCP_KEY_MD5;
return false;
}
static inline bool tcp_key_is_ao(const struct tcp_key *key)
{
-#ifdef CONFIG_TCP_AO
- if (static_branch_unlikely(&tcp_ao_needed.key) &&
- key->type == TCP_KEY_AO)
- return true;
-#endif
+ if (static_branch_tcp_ao())
+ return key->type == TCP_KEY_AO;
return false;
}
@@ -2431,14 +2546,35 @@ void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
+static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str)
+{
+ WARN_ONCE(cond,
+ "%scwn:%u out:%u sacked:%u lost:%u retrans:%u tlp_high_seq:%u sk_state:%u ca_state:%u advmss:%u mss_cache:%u pmtu:%u\n",
+ str,
+ tcp_snd_cwnd(tcp_sk(sk)),
+ tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
+ tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
+ tcp_sk(sk)->tlp_high_seq, sk->sk_state,
+ inet_csk(sk)->icsk_ca_state,
+ tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
+ inet_csk(sk)->icsk_pmtu_cookie);
+}
+
/* At how many usecs into the future should the RTO fire? */
static inline s64 tcp_rto_delta_us(const struct sock *sk)
{
const struct sk_buff *skb = tcp_rtx_queue_head(sk);
u32 rto = inet_csk(sk)->icsk_rto;
- u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
- return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
+ if (likely(skb)) {
+ u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
+
+ return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
+ } else {
+ tcp_warn_once(sk, 1, "rtx queue empty: ");
+ return jiffies_to_usecs(rto);
+ }
+
}
/*
@@ -2526,7 +2662,7 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
*/
static inline void tcp_listendrop(const struct sock *sk)
{
- atomic_inc(&((struct sock *)sk)->sk_drops);
+ sk_drops_inc((struct sock *)sk);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
}
@@ -2551,8 +2687,8 @@ struct tcp_ulp_ops {
/* cleanup ulp */
void (*release)(struct sock *sk);
/* diagnostic */
- int (*get_info)(struct sock *sk, struct sk_buff *skb);
- size_t (*get_info_size)(const struct sock *sk);
+ int (*get_info)(struct sock *sk, struct sk_buff *skb, bool net_admin);
+ size_t (*get_info_size)(const struct sock *sk, bool net_admin);
/* clone ulp */
void (*clone)(const struct request_sock *req, struct sock *newsk,
const gfp_t priority);
@@ -2569,8 +2705,8 @@ void tcp_update_ulp(struct sock *sk, struct proto *p,
void (*write_space)(struct sock *sk));
#define MODULE_ALIAS_TCP_ULP(name) \
- __MODULE_INFO(alias, alias_userspace, name); \
- __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
+ MODULE_INFO(alias, name); \
+ MODULE_INFO(alias, "tcp-ulp-" name)
#ifdef CONFIG_NET_SOCK_MSG
struct sk_msg;
@@ -2579,6 +2715,11 @@ struct sk_psock;
#ifdef CONFIG_BPF_SYSCALL
int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
+#ifdef CONFIG_BPF_STREAM_PARSER
+struct strparser;
+int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc,
+ sk_read_actor_t recv_actor);
+#endif /* CONFIG_BPF_STREAM_PARSER */
#endif /* CONFIG_BPF_SYSCALL */
#ifdef CONFIG_INET
@@ -2629,6 +2770,7 @@ static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
if (sk_fullsock(sk)) {
sock_ops.is_fullsock = 1;
+ sock_ops.is_locked_tcp_sock = 1;
sock_owned_by_me(sk);
}
@@ -2717,9 +2859,9 @@ extern struct static_key_false tcp_have_smc;
#endif
#if IS_ENABLED(CONFIG_TLS_DEVICE)
-void clean_acked_data_enable(struct inet_connection_sock *icsk,
+void clean_acked_data_enable(struct tcp_sock *tp,
void (*cad)(struct sock *sk, u32 ack_seq));
-void clean_acked_data_disable(struct inet_connection_sock *icsk);
+void clean_acked_data_disable(struct tcp_sock *tp);
void clean_acked_data_flush(void);
#endif
@@ -2795,66 +2937,9 @@ static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
return false;
}
-/* Called with rcu_read_lock() */
-static inline enum skb_drop_reason
-tcp_inbound_hash(struct sock *sk, const struct request_sock *req,
- const struct sk_buff *skb,
- const void *saddr, const void *daddr,
- int family, int dif, int sdif)
-{
- const struct tcphdr *th = tcp_hdr(skb);
- const struct tcp_ao_hdr *aoh;
- const __u8 *md5_location;
- int l3index;
-
- /* Invalid option or two times meet any of auth options */
- if (tcp_parse_auth_options(th, &md5_location, &aoh)) {
- tcp_hash_fail("TCP segment has incorrect auth options set",
- family, skb, "");
- return SKB_DROP_REASON_TCP_AUTH_HDR;
- }
-
- if (req) {
- if (tcp_rsk_used_ao(req) != !!aoh) {
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
- tcp_hash_fail("TCP connection can't start/end using TCP-AO",
- family, skb, "%s",
- !aoh ? "missing AO" : "AO signed");
- return SKB_DROP_REASON_TCP_AOFAILURE;
- }
- }
-
- /* sdif set, means packet ingressed via a device
- * in an L3 domain and dif is set to the l3mdev
- */
- l3index = sdif ? dif : 0;
-
- /* Fast path: unsigned segments */
- if (likely(!md5_location && !aoh)) {
- /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid
- * for the remote peer. On TCP-AO established connection
- * the last key is impossible to remove, so there's
- * always at least one current_key.
- */
- if (tcp_ao_required(sk, saddr, family, l3index, true)) {
- tcp_hash_fail("AO hash is required, but not found",
- family, skb, "L3 index %d", l3index);
- return SKB_DROP_REASON_TCP_AONOTFOUND;
- }
- if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) {
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
- tcp_hash_fail("MD5 Hash not found",
- family, skb, "L3 index %d", l3index);
- return SKB_DROP_REASON_TCP_MD5NOTFOUND;
- }
- return SKB_NOT_DROPPED_YET;
- }
-
- if (aoh)
- return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh);
-
- return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family,
- l3index, md5_location);
-}
+enum skb_drop_reason tcp_inbound_hash(struct sock *sk,
+ const struct request_sock *req, const struct sk_buff *skb,
+ const void *saddr, const void *daddr,
+ int family, int dif, int sdif);
#endif /* _TCP_H */
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
index 471e177362b4..1e9e27d6e06b 100644
--- a/include/net/tcp_ao.h
+++ b/include/net/tcp_ao.h
@@ -19,6 +19,11 @@ struct tcp_ao_hdr {
u8 rnext_keyid;
};
+static inline u8 tcp_ao_hdr_maclen(const struct tcp_ao_hdr *aoh)
+{
+ return aoh->length - sizeof(struct tcp_ao_hdr);
+}
+
struct tcp_ao_counters {
atomic64_t pkt_good;
atomic64_t pkt_bad;
@@ -86,7 +91,8 @@ static inline int tcp_ao_sizeof_key(const struct tcp_ao_key *key)
struct tcp_ao_info {
/* List of tcp_ao_key's */
struct hlist_head head;
- /* current_key and rnext_key aren't maintained on listen sockets.
+ /* current_key and rnext_key are maintained on sockets
+ * in TCP_AO_ESTABLISHED states.
* Their purpose is to cache keys on established connections,
* saving needless lookups. Never dereference any of them from
* listen sockets.
@@ -124,7 +130,6 @@ struct tcp_ao_info {
u32 snd_sne;
u32 rcv_sne;
refcount_t refcnt; /* Protects twsk destruction */
- struct rcu_head rcu;
};
#ifdef CONFIG_TCP_MD5SIG
@@ -143,43 +148,6 @@ extern struct static_key_false_deferred tcp_ao_needed;
#define static_branch_tcp_ao() false
#endif
-static inline bool tcp_hash_should_produce_warnings(void)
-{
- return static_branch_tcp_md5() || static_branch_tcp_ao();
-}
-
-#define tcp_hash_fail(msg, family, skb, fmt, ...) \
-do { \
- const struct tcphdr *th = tcp_hdr(skb); \
- char hdr_flags[6]; \
- char *f = hdr_flags; \
- \
- if (!tcp_hash_should_produce_warnings()) \
- break; \
- if (th->fin) \
- *f++ = 'F'; \
- if (th->syn) \
- *f++ = 'S'; \
- if (th->rst) \
- *f++ = 'R'; \
- if (th->psh) \
- *f++ = 'P'; \
- if (th->ack) \
- *f++ = '.'; \
- *f = 0; \
- if ((family) == AF_INET) { \
- net_info_ratelimited("%s for %pI4.%d->%pI4.%d [%s] " fmt "\n", \
- msg, &ip_hdr(skb)->saddr, ntohs(th->source), \
- &ip_hdr(skb)->daddr, ntohs(th->dest), \
- hdr_flags, ##__VA_ARGS__); \
- } else { \
- net_info_ratelimited("%s for [%pI6c].%d->[%pI6c].%d [%s]" fmt "\n", \
- msg, &ipv6_hdr(skb)->saddr, ntohs(th->source), \
- &ipv6_hdr(skb)->daddr, ntohs(th->dest), \
- hdr_flags, ##__VA_ARGS__); \
- } \
-} while (0)
-
#ifdef CONFIG_TCP_AO
/* TCP-AO structures and functions */
struct tcp4_ao_context {
@@ -201,9 +169,9 @@ struct tcp6_ao_context {
};
struct tcp_sigpool;
+/* Established states are fast-path and there always is current_key/rnext_key */
#define TCP_AO_ESTABLISHED (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | \
- TCPF_CLOSE | TCPF_CLOSE_WAIT | \
- TCPF_LAST_ACK | TCPF_CLOSING)
+ TCPF_CLOSE_WAIT | TCPF_LAST_ACK | TCPF_CLOSING)
int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb,
struct tcp_ao_key *key, struct tcphdr *th,
@@ -214,7 +182,8 @@ int tcp_ao_hash_skb(unsigned short int family,
const u8 *tkey, int hash_offset, u32 sne);
int tcp_parse_ao(struct sock *sk, int cmd, unsigned short int family,
sockptr_t optval, int optlen);
-struct tcp_ao_key *tcp_ao_established_key(struct tcp_ao_info *ao,
+struct tcp_ao_key *tcp_ao_established_key(const struct sock *sk,
+ struct tcp_ao_info *ao,
int sndid, int rcvid);
int tcp_ao_copy_all_matching(const struct sock *sk, struct sock *newsk,
struct request_sock *req, struct sk_buff *skb,
diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h
new file mode 100644
index 000000000000..f13e5cd2b1ac
--- /dev/null
+++ b/include/net/tcp_ecn.h
@@ -0,0 +1,642 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _TCP_ECN_H
+#define _TCP_ECN_H
+
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/bitfield.h>
+
+#include <net/inet_connection_sock.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/inet_ecn.h>
+
+/* The highest ECN variant (Accurate ECN, ECN, or no ECN) that is
+ * attemped to be negotiated and requested for incoming connection
+ * and outgoing connection, respectively.
+ */
+enum tcp_ecn_mode {
+ TCP_ECN_IN_NOECN_OUT_NOECN = 0,
+ TCP_ECN_IN_ECN_OUT_ECN = 1,
+ TCP_ECN_IN_ECN_OUT_NOECN = 2,
+ TCP_ECN_IN_ACCECN_OUT_ACCECN = 3,
+ TCP_ECN_IN_ACCECN_OUT_ECN = 4,
+ TCP_ECN_IN_ACCECN_OUT_NOECN = 5,
+};
+
+/* AccECN option sending when AccECN has been successfully negotiated */
+enum tcp_accecn_option {
+ TCP_ACCECN_OPTION_DISABLED = 0,
+ TCP_ACCECN_OPTION_MINIMUM = 1,
+ TCP_ACCECN_OPTION_FULL = 2,
+};
+
+static inline void tcp_ecn_queue_cwr(struct tcp_sock *tp)
+{
+ /* Do not set CWR if in AccECN mode! */
+ if (tcp_ecn_mode_rfc3168(tp))
+ tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
+}
+
+static inline void tcp_ecn_accept_cwr(struct sock *sk,
+ const struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (tcp_ecn_mode_rfc3168(tp) && tcp_hdr(skb)->cwr) {
+ tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+
+ /* If the sender is telling us it has entered CWR, then its
+ * cwnd may be very low (even just 1 packet), so we should ACK
+ * immediately.
+ */
+ if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq)
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+ }
+}
+
+static inline void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
+{
+ tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
+}
+
+/* tp->accecn_fail_mode */
+#define TCP_ACCECN_ACE_FAIL_SEND BIT(0)
+#define TCP_ACCECN_ACE_FAIL_RECV BIT(1)
+#define TCP_ACCECN_OPT_FAIL_SEND BIT(2)
+#define TCP_ACCECN_OPT_FAIL_RECV BIT(3)
+
+static inline bool tcp_accecn_ace_fail_send(const struct tcp_sock *tp)
+{
+ return tp->accecn_fail_mode & TCP_ACCECN_ACE_FAIL_SEND;
+}
+
+static inline bool tcp_accecn_ace_fail_recv(const struct tcp_sock *tp)
+{
+ return tp->accecn_fail_mode & TCP_ACCECN_ACE_FAIL_RECV;
+}
+
+static inline bool tcp_accecn_opt_fail_send(const struct tcp_sock *tp)
+{
+ return tp->accecn_fail_mode & TCP_ACCECN_OPT_FAIL_SEND;
+}
+
+static inline bool tcp_accecn_opt_fail_recv(const struct tcp_sock *tp)
+{
+ return tp->accecn_fail_mode & TCP_ACCECN_OPT_FAIL_RECV;
+}
+
+static inline void tcp_accecn_fail_mode_set(struct tcp_sock *tp, u8 mode)
+{
+ tp->accecn_fail_mode |= mode;
+}
+
+#define TCP_ACCECN_OPT_NOT_SEEN 0x0
+#define TCP_ACCECN_OPT_EMPTY_SEEN 0x1
+#define TCP_ACCECN_OPT_COUNTER_SEEN 0x2
+#define TCP_ACCECN_OPT_FAIL_SEEN 0x3
+
+static inline u8 tcp_accecn_ace(const struct tcphdr *th)
+{
+ return (th->ae << 2) | (th->cwr << 1) | th->ece;
+}
+
+/* Infer the ECT value our SYN arrived with from the echoed ACE field */
+static inline int tcp_accecn_extract_syn_ect(u8 ace)
+{
+ /* Below is an excerpt from the 1st block of Table 2 of AccECN spec */
+ static const int ace_to_ecn[8] = {
+ INET_ECN_ECT_0, /* 0b000 (Undefined) */
+ INET_ECN_ECT_1, /* 0b001 (Undefined) */
+ INET_ECN_NOT_ECT, /* 0b010 (Not-ECT is received) */
+ INET_ECN_ECT_1, /* 0b011 (ECT-1 is received) */
+ INET_ECN_ECT_0, /* 0b100 (ECT-0 is received) */
+ INET_ECN_ECT_1, /* 0b101 (Reserved) */
+ INET_ECN_CE, /* 0b110 (CE is received) */
+ INET_ECN_ECT_1 /* 0b111 (Undefined) */
+ };
+
+ return ace_to_ecn[ace & 0x7];
+}
+
+/* Check ECN field transition to detect invalid transitions */
+static inline bool tcp_ect_transition_valid(u8 snt, u8 rcv)
+{
+ if (rcv == snt)
+ return true;
+
+ /* Non-ECT altered to something or something became non-ECT */
+ if (snt == INET_ECN_NOT_ECT || rcv == INET_ECN_NOT_ECT)
+ return false;
+ /* CE -> ECT(0/1)? */
+ if (snt == INET_ECN_CE)
+ return false;
+ return true;
+}
+
+static inline bool tcp_accecn_validate_syn_feedback(struct sock *sk, u8 ace,
+ u8 sent_ect)
+{
+ u8 ect = tcp_accecn_extract_syn_ect(ace);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback))
+ return true;
+
+ if (!tcp_ect_transition_valid(sent_ect, ect)) {
+ tcp_accecn_fail_mode_set(tp, TCP_ACCECN_ACE_FAIL_RECV);
+ return false;
+ }
+
+ return true;
+}
+
+static inline void tcp_accecn_saw_opt_fail_recv(struct tcp_sock *tp,
+ u8 saw_opt)
+{
+ tp->saw_accecn_opt = saw_opt;
+ if (tp->saw_accecn_opt == TCP_ACCECN_OPT_FAIL_SEEN)
+ tcp_accecn_fail_mode_set(tp, TCP_ACCECN_OPT_FAIL_RECV);
+}
+
+/* Validate the 3rd ACK based on the ACE field, see Table 4 of AccECN spec */
+static inline void tcp_accecn_third_ack(struct sock *sk,
+ const struct sk_buff *skb, u8 sent_ect)
+{
+ u8 ace = tcp_accecn_ace(tcp_hdr(skb));
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ switch (ace) {
+ case 0x0:
+ /* Invalid value */
+ tcp_accecn_fail_mode_set(tp, TCP_ACCECN_ACE_FAIL_RECV);
+ break;
+ case 0x7:
+ case 0x5:
+ case 0x1:
+ /* Unused but legal values */
+ break;
+ default:
+ /* Validation only applies to first non-data packet */
+ if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq &&
+ !TCP_SKB_CB(skb)->sacked &&
+ tcp_accecn_validate_syn_feedback(sk, ace, sent_ect)) {
+ if ((tcp_accecn_extract_syn_ect(ace) == INET_ECN_CE) &&
+ !tp->delivered_ce)
+ tp->delivered_ce++;
+ }
+ break;
+ }
+}
+
+/* Demand the minimum # to send AccECN optnio */
+static inline void tcp_accecn_opt_demand_min(struct sock *sk,
+ u8 opt_demand_min)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ u8 opt_demand;
+
+ opt_demand = max_t(u8, opt_demand_min, tp->accecn_opt_demand);
+ tp->accecn_opt_demand = opt_demand;
+}
+
+/* Maps IP ECN field ECT/CE code point to AccECN option field number, given
+ * we are sending fields with Accurate ECN Order 1: ECT(1), CE, ECT(0).
+ */
+static inline u8 tcp_ecnfield_to_accecn_optfield(u8 ecnfield)
+{
+ switch (ecnfield & INET_ECN_MASK) {
+ case INET_ECN_NOT_ECT:
+ return 0; /* AccECN does not send counts of NOT_ECT */
+ case INET_ECN_ECT_1:
+ return 1;
+ case INET_ECN_CE:
+ return 2;
+ case INET_ECN_ECT_0:
+ return 3;
+ }
+ return 0;
+}
+
+/* Maps IP ECN field ECT/CE code point to AccECN option field value offset.
+ * Some fields do not start from zero, to detect zeroing by middleboxes.
+ */
+static inline u32 tcp_accecn_field_init_offset(u8 ecnfield)
+{
+ switch (ecnfield & INET_ECN_MASK) {
+ case INET_ECN_NOT_ECT:
+ return 0; /* AccECN does not send counts of NOT_ECT */
+ case INET_ECN_ECT_1:
+ return TCP_ACCECN_E1B_INIT_OFFSET;
+ case INET_ECN_CE:
+ return TCP_ACCECN_CEB_INIT_OFFSET;
+ case INET_ECN_ECT_0:
+ return TCP_ACCECN_E0B_INIT_OFFSET;
+ }
+ return 0;
+}
+
+/* Maps AccECN option field #nr to IP ECN field ECT/CE bits */
+static inline unsigned int tcp_accecn_optfield_to_ecnfield(unsigned int option,
+ bool order)
+{
+ /* Based on Table 5 of the AccECN spec to map (option, order) to
+ * the corresponding ECN conuters (ECT-1, ECT-0, or CE).
+ */
+ static const u8 optfield_lookup[2][3] = {
+ /* order = 0: 1st field ECT-0, 2nd field CE, 3rd field ECT-1 */
+ { INET_ECN_ECT_0, INET_ECN_CE, INET_ECN_ECT_1 },
+ /* order = 1: 1st field ECT-1, 2nd field CE, 3rd field ECT-0 */
+ { INET_ECN_ECT_1, INET_ECN_CE, INET_ECN_ECT_0 }
+ };
+
+ return optfield_lookup[order][option % 3];
+}
+
+/* Handles AccECN option ECT and CE 24-bit byte counters update into
+ * the u32 value in tcp_sock. As we're processing TCP options, it is
+ * safe to access from - 1.
+ */
+static inline s32 tcp_update_ecn_bytes(u32 *cnt, const char *from,
+ u32 init_offset)
+{
+ u32 truncated = (get_unaligned_be32(from - 1) - init_offset) &
+ 0xFFFFFFU;
+ u32 delta = (truncated - *cnt) & 0xFFFFFFU;
+
+ /* If delta has the highest bit set (24th bit) indicating
+ * negative, sign extend to correct an estimation using
+ * sign_extend32(delta, 24 - 1)
+ */
+ delta = sign_extend32(delta, 23);
+ *cnt += delta;
+ return (s32)delta;
+}
+
+/* Updates Accurate ECN received counters from the received IP ECN field */
+static inline void tcp_ecn_received_counters(struct sock *sk,
+ const struct sk_buff *skb, u32 len)
+{
+ u8 ecnfield = TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK;
+ u8 is_ce = INET_ECN_is_ce(ecnfield);
+ struct tcp_sock *tp = tcp_sk(sk);
+ bool ecn_edge;
+
+ if (!INET_ECN_is_not_ect(ecnfield)) {
+ u32 pcount = is_ce * max_t(u16, 1, skb_shinfo(skb)->gso_segs);
+
+ /* As for accurate ECN, the TCP_ECN_SEEN flag is set by
+ * tcp_ecn_received_counters() when the ECN codepoint of
+ * received TCP data or ACK contains ECT(0), ECT(1), or CE.
+ */
+ if (!tcp_ecn_mode_rfc3168(tp))
+ tp->ecn_flags |= TCP_ECN_SEEN;
+
+ /* ACE counter tracks *all* segments including pure ACKs */
+ tp->received_ce += pcount;
+ tp->received_ce_pending = min(tp->received_ce_pending + pcount,
+ 0xfU);
+
+ if (len > 0) {
+ u8 minlen = tcp_ecnfield_to_accecn_optfield(ecnfield);
+ u32 oldbytes = tp->received_ecn_bytes[ecnfield - 1];
+ u32 bytes_mask = GENMASK_U32(31, 22);
+
+ tp->received_ecn_bytes[ecnfield - 1] += len;
+ tp->accecn_minlen = max_t(u8, tp->accecn_minlen,
+ minlen);
+
+ /* Send AccECN option at least once per 2^22-byte
+ * increase in any ECN byte counter.
+ */
+ if ((tp->received_ecn_bytes[ecnfield - 1] ^ oldbytes) &
+ bytes_mask) {
+ tcp_accecn_opt_demand_min(sk, 1);
+ }
+ }
+ }
+
+ ecn_edge = tp->prev_ecnfield != ecnfield;
+ if (ecn_edge || is_ce) {
+ tp->prev_ecnfield = ecnfield;
+ /* Demand Accurate ECN change-triggered ACKs. Two ACK are
+ * demanded to indicate unambiguously the ecnfield value
+ * in the latter ACK.
+ */
+ if (tcp_ecn_mode_accecn(tp)) {
+ if (ecn_edge)
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+ tp->accecn_opt_demand = 2;
+ }
+ }
+}
+
+/* AccECN specification, 2.2: [...] A Data Receiver maintains four counters
+ * initialized at the start of the half-connection. [...] These byte counters
+ * reflect only the TCP payload length, excluding TCP header and TCP options.
+ */
+static inline void tcp_ecn_received_counters_payload(struct sock *sk,
+ const struct sk_buff *skb)
+{
+ const struct tcphdr *th = (const struct tcphdr *)skb->data;
+
+ tcp_ecn_received_counters(sk, skb, skb->len - th->doff * 4);
+}
+
+/* AccECN specification, 5.1: [...] a server can determine that it
+ * negotiated AccECN as [...] if the ACK contains an ACE field with
+ * the value 0b010 to 0b111 (decimal 2 to 7).
+ */
+static inline bool cookie_accecn_ok(const struct tcphdr *th)
+{
+ return tcp_accecn_ace(th) > 0x1;
+}
+
+/* Used to form the ACE flags for SYN/ACK */
+static inline u16 tcp_accecn_reflector_flags(u8 ect)
+{
+ /* TCP ACE flags of SYN/ACK are set based on IP-ECN received from SYN.
+ * Below is an excerpt from the 1st block of Table 2 of AccECN spec,
+ * in which TCP ACE flags are encoded as: (AE << 2) | (CWR << 1) | ECE
+ */
+ static const u8 ecn_to_ace_flags[4] = {
+ 0b010, /* Not-ECT is received */
+ 0b011, /* ECT(1) is received */
+ 0b100, /* ECT(0) is received */
+ 0b110 /* CE is received */
+ };
+
+ return FIELD_PREP(TCPHDR_ACE, ecn_to_ace_flags[ect & 0x3]);
+}
+
+/* AccECN specification, 3.1.2: If a TCP server that implements AccECN
+ * receives a SYN with the three TCP header flags (AE, CWR and ECE) set
+ * to any combination other than 000, 011 or 111, it MUST negotiate the
+ * use of AccECN as if they had been set to 111.
+ */
+static inline bool tcp_accecn_syn_requested(const struct tcphdr *th)
+{
+ u8 ace = tcp_accecn_ace(th);
+
+ return ace && ace != 0x3;
+}
+
+static inline void __tcp_accecn_init_bytes_counters(int *counter_array)
+{
+ BUILD_BUG_ON(INET_ECN_ECT_1 != 0x1);
+ BUILD_BUG_ON(INET_ECN_ECT_0 != 0x2);
+ BUILD_BUG_ON(INET_ECN_CE != 0x3);
+
+ counter_array[INET_ECN_ECT_1 - 1] = 0;
+ counter_array[INET_ECN_ECT_0 - 1] = 0;
+ counter_array[INET_ECN_CE - 1] = 0;
+}
+
+static inline void tcp_accecn_init_counters(struct tcp_sock *tp)
+{
+ tp->received_ce = 0;
+ tp->received_ce_pending = 0;
+ __tcp_accecn_init_bytes_counters(tp->received_ecn_bytes);
+ __tcp_accecn_init_bytes_counters(tp->delivered_ecn_bytes);
+ tp->accecn_minlen = 0;
+ tp->accecn_opt_demand = 0;
+ tp->est_ecnfield = 0;
+}
+
+/* Used for make_synack to form the ACE flags */
+static inline void tcp_accecn_echo_syn_ect(struct tcphdr *th, u8 ect)
+{
+ /* TCP ACE flags of SYN/ACK are set based on IP-ECN codepoint received
+ * from SYN. Below is an excerpt from Table 2 of the AccECN spec:
+ * +====================+====================================+
+ * | IP-ECN codepoint | Respective ACE falgs on SYN/ACK |
+ * | received on SYN | AE CWR ECE |
+ * +====================+====================================+
+ * | Not-ECT | 0 1 0 |
+ * | ECT(1) | 0 1 1 |
+ * | ECT(0) | 1 0 0 |
+ * | CE | 1 1 0 |
+ * +====================+====================================+
+ */
+ th->ae = !!(ect & INET_ECN_ECT_0);
+ th->cwr = ect != INET_ECN_ECT_0;
+ th->ece = ect == INET_ECN_ECT_1;
+}
+
+static inline void tcp_accecn_set_ace(struct tcp_sock *tp, struct sk_buff *skb,
+ struct tcphdr *th)
+{
+ u32 wire_ace;
+
+ /* The final packet of the 3WHS or anything like it must reflect
+ * the SYN/ACK ECT instead of putting CEP into ACE field, such
+ * case show up in tcp_flags.
+ */
+ if (likely(!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACE))) {
+ wire_ace = tp->received_ce + TCP_ACCECN_CEP_INIT_OFFSET;
+ th->ece = !!(wire_ace & 0x1);
+ th->cwr = !!(wire_ace & 0x2);
+ th->ae = !!(wire_ace & 0x4);
+ tp->received_ce_pending = 0;
+ }
+}
+
+static inline u8 tcp_accecn_option_init(const struct sk_buff *skb,
+ u8 opt_offset)
+{
+ u8 *ptr = skb_transport_header(skb) + opt_offset;
+ unsigned int optlen = ptr[1] - 2;
+
+ if (WARN_ON_ONCE(ptr[0] != TCPOPT_ACCECN0 && ptr[0] != TCPOPT_ACCECN1))
+ return TCP_ACCECN_OPT_FAIL_SEEN;
+ ptr += 2;
+
+ /* Detect option zeroing: an AccECN connection "MAY check that the
+ * initial value of the EE0B field or the EE1B field is non-zero"
+ */
+ if (optlen < TCPOLEN_ACCECN_PERFIELD)
+ return TCP_ACCECN_OPT_EMPTY_SEEN;
+ if (get_unaligned_be24(ptr) == 0)
+ return TCP_ACCECN_OPT_FAIL_SEEN;
+ if (optlen < TCPOLEN_ACCECN_PERFIELD * 3)
+ return TCP_ACCECN_OPT_COUNTER_SEEN;
+ ptr += TCPOLEN_ACCECN_PERFIELD * 2;
+ if (get_unaligned_be24(ptr) == 0)
+ return TCP_ACCECN_OPT_FAIL_SEEN;
+
+ return TCP_ACCECN_OPT_COUNTER_SEEN;
+}
+
+/* See Table 2 of the AccECN draft */
+static inline void tcp_ecn_rcv_synack(struct sock *sk, const struct sk_buff *skb,
+ const struct tcphdr *th, u8 ip_dsfield)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ u8 ace = tcp_accecn_ace(th);
+
+ switch (ace) {
+ case 0x0:
+ case 0x7:
+ /* +========+========+============+=============+
+ * | A | B | SYN/ACK | Feedback |
+ * | | | B->A | Mode of A |
+ * | | | AE CWR ECE | |
+ * +========+========+============+=============+
+ * | AccECN | No ECN | 0 0 0 | Not ECN |
+ * | AccECN | Broken | 1 1 1 | Not ECN |
+ * +========+========+============+=============+
+ */
+ tcp_ecn_mode_set(tp, TCP_ECN_DISABLED);
+ break;
+ case 0x1:
+ case 0x5:
+ /* +========+========+============+=============+
+ * | A | B | SYN/ACK | Feedback |
+ * | | | B->A | Mode of A |
+ * | | | AE CWR ECE | |
+ * +========+========+============+=============+
+ * | AccECN | Nonce | 1 0 1 | (Reserved) |
+ * | AccECN | ECN | 0 0 1 | Classic ECN |
+ * | Nonce | AccECN | 0 0 1 | Classic ECN |
+ * | ECN | AccECN | 0 0 1 | Classic ECN |
+ * +========+========+============+=============+
+ */
+ if (tcp_ecn_mode_pending(tp))
+ /* Downgrade from AccECN, or requested initially */
+ tcp_ecn_mode_set(tp, TCP_ECN_MODE_RFC3168);
+ break;
+ default:
+ tcp_ecn_mode_set(tp, TCP_ECN_MODE_ACCECN);
+ tp->syn_ect_rcv = ip_dsfield & INET_ECN_MASK;
+ if (tp->rx_opt.accecn &&
+ tp->saw_accecn_opt < TCP_ACCECN_OPT_COUNTER_SEEN) {
+ u8 saw_opt = tcp_accecn_option_init(skb, tp->rx_opt.accecn);
+
+ tcp_accecn_saw_opt_fail_recv(tp, saw_opt);
+ tp->accecn_opt_demand = 2;
+ }
+ if (INET_ECN_is_ce(ip_dsfield) &&
+ tcp_accecn_validate_syn_feedback(sk, ace,
+ tp->syn_ect_snt)) {
+ tp->received_ce++;
+ tp->received_ce_pending++;
+ }
+ break;
+ }
+}
+
+static inline void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th,
+ const struct sk_buff *skb)
+{
+ if (tcp_ecn_mode_pending(tp)) {
+ if (!tcp_accecn_syn_requested(th)) {
+ /* Downgrade to classic ECN feedback */
+ tcp_ecn_mode_set(tp, TCP_ECN_MODE_RFC3168);
+ } else {
+ tp->syn_ect_rcv = TCP_SKB_CB(skb)->ip_dsfield &
+ INET_ECN_MASK;
+ tp->prev_ecnfield = tp->syn_ect_rcv;
+ tcp_ecn_mode_set(tp, TCP_ECN_MODE_ACCECN);
+ }
+ }
+ if (tcp_ecn_mode_rfc3168(tp) && (!th->ece || !th->cwr))
+ tcp_ecn_mode_set(tp, TCP_ECN_DISABLED);
+}
+
+static inline bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp,
+ const struct tcphdr *th)
+{
+ if (th->ece && !th->syn && tcp_ecn_mode_rfc3168(tp))
+ return true;
+ return false;
+}
+
+/* Packet ECN state for a SYN-ACK */
+static inline void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
+ if (tcp_ecn_disabled(tp))
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
+ else if (tcp_ca_needs_ecn(sk) ||
+ tcp_bpf_ca_needs_ecn(sk))
+ INET_ECN_xmit(sk);
+
+ if (tp->ecn_flags & TCP_ECN_MODE_ACCECN) {
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ACE;
+ TCP_SKB_CB(skb)->tcp_flags |=
+ tcp_accecn_reflector_flags(tp->syn_ect_rcv);
+ tp->syn_ect_snt = inet_sk(sk)->tos & INET_ECN_MASK;
+ }
+}
+
+/* Packet ECN state for a SYN. */
+static inline void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
+ bool use_ecn, use_accecn;
+ u8 tcp_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn);
+
+ use_accecn = tcp_ecn == TCP_ECN_IN_ACCECN_OUT_ACCECN;
+ use_ecn = tcp_ecn == TCP_ECN_IN_ECN_OUT_ECN ||
+ tcp_ecn == TCP_ECN_IN_ACCECN_OUT_ECN ||
+ tcp_ca_needs_ecn(sk) || bpf_needs_ecn || use_accecn;
+
+ if (!use_ecn) {
+ const struct dst_entry *dst = __sk_dst_get(sk);
+
+ if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
+ use_ecn = true;
+ }
+
+ tp->ecn_flags = 0;
+
+ if (use_ecn) {
+ if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
+ INET_ECN_xmit(sk);
+
+ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
+ if (use_accecn) {
+ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_AE;
+ tcp_ecn_mode_set(tp, TCP_ECN_MODE_PENDING);
+ tp->syn_ect_snt = inet_sk(sk)->tos & INET_ECN_MASK;
+ } else {
+ tcp_ecn_mode_set(tp, TCP_ECN_MODE_RFC3168);
+ }
+ }
+}
+
+static inline void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
+{
+ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)) {
+ /* tp->ecn_flags are cleared at a later point in time when
+ * SYN ACK is ultimatively being received.
+ */
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ACE;
+ }
+}
+
+static inline void
+tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
+{
+ if (tcp_rsk(req)->accecn_ok)
+ tcp_accecn_echo_syn_ect(th, tcp_rsk(req)->syn_ect_rcv);
+ else if (inet_rsk(req)->ecn_ok)
+ th->ece = 1;
+}
+
+static inline bool tcp_accecn_option_beacon_check(const struct sock *sk)
+{
+ u32 ecn_beacon = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option_beacon);
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+ if (!ecn_beacon)
+ return false;
+
+ return tcp_stamp_us_delta(tp->tcp_mstamp, tp->accecn_opt_tstamp) * ecn_beacon >=
+ (tp->srtt_us >> 3);
+}
+
+#endif /* _LINUX_TCP_ECN_H */
diff --git a/include/net/tcx.h b/include/net/tcx.h
index 72a3e75e539f..23a61af13547 100644
--- a/include/net/tcx.h
+++ b/include/net/tcx.h
@@ -13,14 +13,13 @@ struct mini_Qdisc;
struct tcx_entry {
struct mini_Qdisc __rcu *miniq;
struct bpf_mprog_bundle bundle;
- bool miniq_active;
+ u32 miniq_active;
struct rcu_head rcu;
};
struct tcx_link {
struct bpf_link link;
struct net_device *dev;
- u32 location;
};
static inline void tcx_set_ingress(struct sk_buff *skb, bool ingress)
@@ -125,11 +124,16 @@ static inline void tcx_skeys_dec(bool ingress)
tcx_dec();
}
-static inline void tcx_miniq_set_active(struct bpf_mprog_entry *entry,
- const bool active)
+static inline void tcx_miniq_inc(struct bpf_mprog_entry *entry)
{
ASSERT_RTNL();
- tcx_entry(entry)->miniq_active = active;
+ tcx_entry(entry)->miniq_active++;
+}
+
+static inline void tcx_miniq_dec(struct bpf_mprog_entry *entry)
+{
+ ASSERT_RTNL();
+ tcx_entry(entry)->miniq_active--;
}
static inline bool tcx_entry_is_active(struct bpf_mprog_entry *entry)
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index 62b3e9f2aed4..0a85ac64a66d 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -15,13 +15,6 @@ struct timewait_sock_ops {
struct kmem_cache *twsk_slab;
char *twsk_slab_name;
unsigned int twsk_obj_size;
- void (*twsk_destructor)(struct sock *sk);
};
-static inline void twsk_destructor(struct sock *sk)
-{
- if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
- sk->sk_prot->twsk_prot->twsk_destructor(sk);
-}
-
#endif /* _TIMEWAIT_SOCK_H */
diff --git a/include/net/tls.h b/include/net/tls.h
index 3a33924db2bc..ebd2550280ae 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -53,12 +53,16 @@ struct tls_rec;
/* Maximum data size carried in a TLS record */
#define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
+/* Minimum record size limit as per RFC8449 */
+#define TLS_MIN_RECORD_SIZE_LIM ((size_t)1 << 6)
#define TLS_HEADER_SIZE 5
#define TLS_NONCE_OFFSET TLS_HEADER_SIZE
#define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
+#define TLS_HANDSHAKE_KEYUPDATE 24 /* rfc8446 B.3: Key update */
+
#define TLS_AAD_SPACE_SIZE 13
#define TLS_MAX_IV_SIZE 16
@@ -130,6 +134,7 @@ struct tls_sw_context_rx {
u8 async_capable:1;
u8 zc_capable:1;
u8 reader_contended:1;
+ bool key_update_pending;
struct tls_strparser strp;
@@ -223,6 +228,7 @@ struct tls_context {
u8 rx_conf:3;
u8 zerocopy_sendfile:1;
u8 rx_no_pad:1;
+ u16 tx_max_payload_len;
int (*push_pending_record)(struct sock *sk, int flags);
void (*sk_write_space)(struct sock *sk);
@@ -390,8 +396,12 @@ tls_offload_ctx_tx(const struct tls_context *tls_ctx)
static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
{
- struct tls_context *ctx = tls_get_ctx(sk);
+ struct tls_context *ctx;
+
+ if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk))
+ return false;
+ ctx = tls_get_ctx(sk);
if (!ctx)
return false;
return !!tls_sw_ctx_tx(ctx);
@@ -399,8 +409,12 @@ static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
{
- struct tls_context *ctx = tls_get_ctx(sk);
+ struct tls_context *ctx;
+
+ if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk))
+ return false;
+ ctx = tls_get_ctx(sk);
if (!ctx)
return false;
return !!tls_sw_ctx_rx(ctx);
@@ -440,25 +454,26 @@ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
/* Log all TLS record header TCP sequences in [seq, seq+len] */
static inline void
-tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
+tls_offload_rx_resync_async_request_start(struct tls_offload_resync_async *resync_async,
+ __be32 seq, u16 len)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
-
- atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
+ atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) |
((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
- rx_ctx->resync_async->loglen = 0;
- rx_ctx->resync_async->rcd_delta = 0;
+ resync_async->loglen = 0;
+ resync_async->rcd_delta = 0;
}
static inline void
-tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
+tls_offload_rx_resync_async_request_end(struct tls_offload_resync_async *resync_async,
+ __be32 seq)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+ atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
+}
- atomic64_set(&rx_ctx->resync_async->req,
- ((u64)ntohl(seq) << 32) | RESYNC_REQ);
+static inline void
+tls_offload_rx_resync_async_request_cancel(struct tls_offload_resync_async *resync_async)
+{
+ atomic64_set(&resync_async->req, 0);
}
static inline void
diff --git a/include/net/udp.h b/include/net/udp.h
index c4e05b14b648..a061d1b22ddc 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -50,39 +50,68 @@ struct udp_skb_cb {
#define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb))
/**
- * struct udp_hslot - UDP hash slot
+ * struct udp_hslot - UDP hash slot used by udp_table.hash/hash4
*
* @head: head of list of sockets
+ * @nulls_head: head of list of sockets, only used by hash4
* @count: number of sockets in 'head' list
* @lock: spinlock protecting changes to head/count
*/
struct udp_hslot {
- struct hlist_head head;
+ union {
+ struct hlist_head head;
+ /* hash4 uses hlist_nulls to avoid moving wrongly onto another
+ * hlist, because rehash() can happen with lookup().
+ */
+ struct hlist_nulls_head nulls_head;
+ };
int count;
spinlock_t lock;
-} __attribute__((aligned(2 * sizeof(long))));
+} __aligned(2 * sizeof(long));
+
+/**
+ * struct udp_hslot_main - UDP hash slot used by udp_table.hash2
+ *
+ * @hslot: basic hash slot
+ * @hash4_cnt: number of sockets in hslot4 of the same
+ * (local port, local address)
+ */
+struct udp_hslot_main {
+ struct udp_hslot hslot; /* must be the first member */
+#if !IS_ENABLED(CONFIG_BASE_SMALL)
+ u32 hash4_cnt;
+#endif
+} __aligned(2 * sizeof(long));
+#define UDP_HSLOT_MAIN(__hslot) ((struct udp_hslot_main *)(__hslot))
/**
* struct udp_table - UDP table
*
* @hash: hash table, sockets are hashed on (local port)
* @hash2: hash table, sockets are hashed on (local port, local address)
+ * @hash4: hash table, connected sockets are hashed on
+ * (local port, local address, remote port, remote address)
* @mask: number of slots in hash tables, minus 1
* @log: log2(number of slots in hash table)
*/
struct udp_table {
struct udp_hslot *hash;
- struct udp_hslot *hash2;
+ struct udp_hslot_main *hash2;
+#if !IS_ENABLED(CONFIG_BASE_SMALL)
+ struct udp_hslot *hash4;
+#endif
unsigned int mask;
unsigned int log;
};
extern struct udp_table udp_table;
void udp_table_init(struct udp_table *, const char *);
static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
- struct net *net, unsigned int num)
+ const struct net *net,
+ unsigned int num)
{
return &table->hash[udp_hashfn(net, num, table->mask)];
}
+
/*
* For secondary hash, net_hash_mix() is performed before calling
* udp_hashslot2(), this explains difference with udp_hashslot()
@@ -90,12 +119,92 @@ static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
unsigned int hash)
{
- return &table->hash2[hash & table->mask];
+ return &table->hash2[hash & table->mask].hslot;
+}
+
+#if IS_ENABLED(CONFIG_BASE_SMALL)
+static inline void udp_table_hash4_init(struct udp_table *table)
+{
+}
+
+static inline struct udp_hslot *udp_hashslot4(struct udp_table *table,
+ unsigned int hash)
+{
+ BUILD_BUG();
+ return NULL;
+}
+
+static inline bool udp_hashed4(const struct sock *sk)
+{
+ return false;
+}
+
+static inline unsigned int udp_hash4_slot_size(void)
+{
+ return 0;
+}
+
+static inline bool udp_has_hash4(const struct udp_hslot *hslot2)
+{
+ return false;
+}
+
+static inline void udp_hash4_inc(struct udp_hslot *hslot2)
+{
+}
+
+static inline void udp_hash4_dec(struct udp_hslot *hslot2)
+{
+}
+#else /* !CONFIG_BASE_SMALL */
+
+/* Must be called with table->hash2 initialized */
+static inline void udp_table_hash4_init(struct udp_table *table)
+{
+ table->hash4 = (void *)(table->hash2 + (table->mask + 1));
+ for (int i = 0; i <= table->mask; i++) {
+ table->hash2[i].hash4_cnt = 0;
+
+ INIT_HLIST_NULLS_HEAD(&table->hash4[i].nulls_head, i);
+ table->hash4[i].count = 0;
+ spin_lock_init(&table->hash4[i].lock);
+ }
}
+static inline struct udp_hslot *udp_hashslot4(struct udp_table *table,
+ unsigned int hash)
+{
+ return &table->hash4[hash & table->mask];
+}
+
+static inline bool udp_hashed4(const struct sock *sk)
+{
+ return !hlist_nulls_unhashed(&udp_sk(sk)->udp_lrpa_node);
+}
+
+static inline unsigned int udp_hash4_slot_size(void)
+{
+ return sizeof(struct udp_hslot);
+}
+
+static inline bool udp_has_hash4(const struct udp_hslot *hslot2)
+{
+ return UDP_HSLOT_MAIN(hslot2)->hash4_cnt;
+}
+
+static inline void udp_hash4_inc(struct udp_hslot *hslot2)
+{
+ UDP_HSLOT_MAIN(hslot2)->hash4_cnt++;
+}
+
+static inline void udp_hash4_dec(struct udp_hslot *hslot2)
+{
+ UDP_HSLOT_MAIN(hslot2)->hash4_cnt--;
+}
+#endif /* CONFIG_BASE_SMALL */
+
extern struct proto udp_prot;
-extern atomic_long_t udp_memory_allocated;
DECLARE_PER_CPU(int, udp_memory_per_cpu_fw_alloc);
/* sysctl variables for udp */
@@ -175,13 +284,28 @@ INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
netdev_features_t features, bool is_ipv6);
-static inline void udp_lib_init_sock(struct sock *sk)
+static inline int udp_lib_init_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
+ sk->sk_drop_counters = &up->drop_counters;
skb_queue_head_init(&up->reader_queue);
+ INIT_HLIST_NODE(&up->tunnel_list);
up->forward_threshold = sk->sk_rcvbuf >> 2;
set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
+
+ up->udp_prod_queue = kcalloc(nr_node_ids, sizeof(*up->udp_prod_queue),
+ GFP_KERNEL);
+ if (!up->udp_prod_queue)
+ return -ENOMEM;
+ for (int i = 0; i < nr_node_ids; i++)
+ init_llist_head(&up->udp_prod_queue[i].ll_root);
+ return 0;
+}
+
+static inline void udp_drops_inc(struct sock *sk)
+{
+ numa_drop_add(&udp_sk(sk)->drop_counters, 1);
}
/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
@@ -192,13 +316,29 @@ static inline int udp_lib_hash(struct sock *sk)
}
void udp_lib_unhash(struct sock *sk);
-void udp_lib_rehash(struct sock *sk, u16 new_hash);
+void udp_lib_rehash(struct sock *sk, u16 new_hash, u16 new_hash4);
+u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport,
+ const __be32 faddr, const __be16 fport);
static inline void udp_lib_close(struct sock *sk, long timeout)
{
sk_common_release(sk);
}
+/* hash4 routines shared between UDPv4/6 */
+#if IS_ENABLED(CONFIG_BASE_SMALL)
+static inline void udp_lib_hash4(struct sock *sk, u16 hash)
+{
+}
+
+static inline void udp4_hash4(struct sock *sk)
+{
+}
+#else /* !CONFIG_BASE_SMALL */
+void udp_lib_hash4(struct sock *sk, u16 hash);
+void udp4_hash4(struct sock *sk);
+#endif /* CONFIG_BASE_SMALL */
+
int udp_lib_get_port(struct sock *sk, unsigned short snum,
unsigned int hash2_nulladdr);
@@ -231,7 +371,7 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
}
/* Since this is being sent on the wire obfuscate hash a bit
- * to minimize possbility that any useful information to an
+ * to minimize possibility that any useful information to an
* attacker is leaked. Only upper 16 bits are relevant in the
* computation for 16 bit port value.
*/
@@ -245,7 +385,7 @@ static inline int udp_rqueue_get(struct sock *sk)
return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
}
-static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+static inline bool udp_sk_bound_dev_eq(const struct net *net, int bound_dev_if,
int dif, int sdif)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
@@ -271,7 +411,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
return __skb_recv_udp(sk, flags, &off, err);
}
-int udp_v4_early_demux(struct sk_buff *skb);
+enum skb_drop_reason udp_v4_early_demux(struct sk_buff *skb);
bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
int udp_err(struct sk_buff *, u32);
int udp_abort(struct sock *sk, int err);
@@ -284,7 +424,7 @@ void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
int udp_rcv(struct sk_buff *skb);
int udp_ioctl(struct sock *sk, int cmd, int *karg);
int udp_init_sock(struct sock *sk);
-int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+int udp_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len);
int __udp_disconnect(struct sock *sk, int flags);
int udp_disconnect(struct sock *sk, int flags);
__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
@@ -296,18 +436,19 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
int udp_lib_setsockopt(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen,
int (*push_pending_frames)(struct sock *));
-struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+struct sock *udp4_lib_lookup(const struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif);
-struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+struct sock *__udp4_lib_lookup(const struct net *net, __be32 saddr,
+ __be16 sport,
__be32 daddr, __be16 dport, int dif, int sdif,
struct udp_table *tbl, struct sk_buff *skb);
struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport);
-struct sock *udp6_lib_lookup(struct net *net,
+struct sock *udp6_lib_lookup(const struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif);
-struct sock *__udp6_lib_lookup(struct net *net,
+struct sock *__udp6_lib_lookup(const struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif, int sdif, struct udp_table *tbl,
@@ -459,6 +600,16 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
{
netdev_features_t features = NETIF_F_SG;
struct sk_buff *segs;
+ int drop_count;
+
+ /*
+ * Segmentation in UDP receive path is only for UDP GRO, drop udp
+ * fragmentation offload (UFO) packets.
+ */
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
+ drop_count = 1;
+ goto drop;
+ }
/* Avoid csum recalculation by skb_segment unless userspace explicitly
* asks for the final checksum values
@@ -482,16 +633,18 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
*/
segs = __skb_gso_segment(skb, features, false);
if (IS_ERR_OR_NULL(segs)) {
- int segs_nr = skb_shinfo(skb)->gso_segs;
-
- atomic_add(segs_nr, &sk->sk_drops);
- SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
- kfree_skb(skb);
- return NULL;
+ drop_count = skb_shinfo(skb)->gso_segs;
+ goto drop;
}
consume_skb(skb);
return segs;
+
+drop:
+ sk_drops_add(sk, drop_count);
+ SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, drop_count);
+ kfree_skb(skb);
+ return NULL;
}
static inline void udp_post_segment_fix_csum(struct sk_buff *skb)
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index a93dc51f6323..9acef2fbd2fd 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -130,35 +130,20 @@ void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type);
void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
-static inline void udp_tunnel_get_rx_info(struct net_device *dev)
-{
- ASSERT_RTNL();
- if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
- return;
- call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
-}
-
-static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
-{
- ASSERT_RTNL();
- if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
- return;
- call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
-}
-
/* Transmit the skb using UDP encapsulation. */
void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 tos, __u8 ttl,
__be16 df, __be16 src_port, __be16 dst_port,
- bool xnet, bool nocheck);
+ bool xnet, bool nocheck, u16 ipcb_flags);
-int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb,
- struct net_device *dev,
- const struct in6_addr *saddr,
- const struct in6_addr *daddr,
- __u8 prio, __u8 ttl, __be32 label,
- __be16 src_port, __be16 dst_port, bool nocheck);
+void udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb,
+ struct net_device *dev,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u8 prio, __u8 ttl, __be32 label,
+ __be16 src_port, __be16 dst_port, bool nocheck,
+ u16 ip6cb_flags);
void udp_tunnel_sock_release(struct socket *sock);
@@ -191,6 +176,21 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
}
#endif
+#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
+void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add);
+void udp_tunnel_update_gro_rcv(struct sock *sk, bool add);
+#else
+static inline void udp_tunnel_update_gro_lookup(struct net *net,
+ struct sock *sk, bool add) {}
+static inline void udp_tunnel_update_gro_rcv(struct sock *sk, bool add) {}
+#endif
+
+static inline void udp_tunnel_cleanup_gro(struct sock *sk)
+{
+ udp_tunnel_update_gro_rcv(sk, false);
+ udp_tunnel_update_gro_lookup(sock_net(sk), sk, false);
+}
+
static inline void udp_tunnel_encap_enable(struct sock *sk)
{
if (udp_test_and_set_bit(ENCAP_ENABLED, sk))
@@ -206,19 +206,17 @@ static inline void udp_tunnel_encap_enable(struct sock *sk)
#define UDP_TUNNEL_NIC_MAX_TABLES 4
enum udp_tunnel_nic_info_flags {
- /* Device callbacks may sleep */
- UDP_TUNNEL_NIC_INFO_MAY_SLEEP = BIT(0),
/* Device only supports offloads when it's open, all ports
* will be removed before close and re-added after open.
*/
- UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(1),
+ UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(0),
/* Device supports only IPv4 tunnels */
- UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(2),
+ UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(1),
/* Device has hard-coded the IANA VXLAN port (4789) as VXLAN.
* This port must not be counted towards n_entries of any table.
* Driver will not receive any callback associated with port 4789.
*/
- UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(3),
+ UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(2),
};
struct udp_tunnel_nic;
@@ -309,6 +307,9 @@ struct udp_tunnel_nic_ops {
size_t (*dump_size)(struct net_device *dev, unsigned int table);
int (*dump_write)(struct net_device *dev, unsigned int table,
struct sk_buff *skb);
+ void (*assert_locked)(struct net_device *dev);
+ void (*lock)(struct net_device *dev);
+ void (*unlock)(struct net_device *dev);
};
#ifdef CONFIG_INET
@@ -337,8 +338,28 @@ static inline void
udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
unsigned int idx, u8 priv)
{
- if (udp_tunnel_nic_ops)
+ if (udp_tunnel_nic_ops) {
+ udp_tunnel_nic_ops->assert_locked(dev);
udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv);
+ }
+}
+
+static inline void udp_tunnel_nic_assert_locked(struct net_device *dev)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->assert_locked(dev);
+}
+
+static inline void udp_tunnel_nic_lock(struct net_device *dev)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->lock(dev);
+}
+
+static inline void udp_tunnel_nic_unlock(struct net_device *dev)
+{
+ if (udp_tunnel_nic_ops)
+ udp_tunnel_nic_ops->unlock(dev);
}
static inline void
@@ -380,17 +401,50 @@ static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev)
static inline size_t
udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
{
+ size_t ret;
+
if (!udp_tunnel_nic_ops)
return 0;
- return udp_tunnel_nic_ops->dump_size(dev, table);
+
+ udp_tunnel_nic_ops->lock(dev);
+ ret = udp_tunnel_nic_ops->dump_size(dev, table);
+ udp_tunnel_nic_ops->unlock(dev);
+
+ return ret;
}
static inline int
udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
struct sk_buff *skb)
{
+ int ret;
+
if (!udp_tunnel_nic_ops)
return 0;
- return udp_tunnel_nic_ops->dump_write(dev, table, skb);
+
+ udp_tunnel_nic_ops->lock(dev);
+ ret = udp_tunnel_nic_ops->dump_write(dev, table, skb);
+ udp_tunnel_nic_ops->unlock(dev);
+
+ return ret;
}
+
+static inline void udp_tunnel_get_rx_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
+ udp_tunnel_nic_assert_locked(dev);
+ call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
+}
+
+static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
+ return;
+ udp_tunnel_nic_assert_locked(dev);
+ call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
+}
+
#endif
diff --git a/include/net/vsock_addr.h b/include/net/vsock_addr.h
index cf8cc140d68d..c3f4cc206198 100644
--- a/include/net/vsock_addr.h
+++ b/include/net/vsock_addr.h
@@ -16,7 +16,7 @@ bool vsock_addr_bound(const struct sockaddr_vm *addr);
void vsock_addr_unbind(struct sockaddr_vm *addr);
bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
const struct sockaddr_vm *other);
-int vsock_addr_cast(const struct sockaddr *addr, size_t len,
+int vsock_addr_cast(const struct sockaddr_unsized *addr, size_t len,
struct sockaddr_vm **out_addr);
#endif
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 33ba6fc151cf..0ee50785f4f1 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -227,6 +227,7 @@ struct vxlan_config {
unsigned int addrmax;
bool no_share;
enum ifla_vxlan_df df;
+ struct vxlanhdr reserved_bits;
};
enum {
@@ -295,7 +296,7 @@ struct vxlan_dev {
struct vxlan_rdst default_dst; /* default destination */
struct timer_list age_timer;
- spinlock_t hash_lock[FDB_HASH_SIZE];
+ spinlock_t hash_lock;
unsigned int addrcnt;
struct gro_cells gro_cells;
@@ -303,9 +304,10 @@ struct vxlan_dev {
struct vxlan_vni_group __rcu *vnigrp;
- struct hlist_head fdb_head[FDB_HASH_SIZE];
+ struct rhashtable fdb_hash_tbl;
struct rhashtable mdb_tbl;
+ struct hlist_head fdb_list;
struct hlist_head mdb_list;
unsigned int mdb_seq;
};
@@ -330,6 +332,7 @@ struct vxlan_dev {
#define VXLAN_F_VNIFILTER 0x20000
#define VXLAN_F_MDB 0x40000
#define VXLAN_F_LOCALBYPASS 0x80000
+#define VXLAN_F_MC_ROUTE 0x100000
/* Flags that are used in the receive path. These flags must match in
* order for a socket to be shareable
@@ -351,7 +354,9 @@ struct vxlan_dev {
VXLAN_F_UDP_ZERO_CSUM6_RX | \
VXLAN_F_COLLECT_METADATA | \
VXLAN_F_VNIFILTER | \
- VXLAN_F_LOCALBYPASS)
+ VXLAN_F_LOCALBYPASS | \
+ VXLAN_F_MC_ROUTE | \
+ 0)
struct net_device *vxlan_dev_create(struct net *net, const char *name,
u8 name_assign_type, struct vxlan_config *conf);
diff --git a/include/net/x25.h b/include/net/x25.h
index 597eb53c471e..414f3fd99345 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -81,7 +81,7 @@ enum {
#define X25_DEFAULT_WINDOW_SIZE 2 /* Default Window Size */
#define X25_DEFAULT_PACKET_SIZE X25_PS128 /* Default Packet Size */
-#define X25_DEFAULT_THROUGHPUT 0x0A /* Deafult Throughput */
+#define X25_DEFAULT_THROUGHPUT 0x0A /* Default Throughput */
#define X25_DEFAULT_REVERSE 0x00 /* Default Reverse Charging */
#define X25_SMODULUS 8
@@ -203,7 +203,6 @@ void x25_send_frame(struct sk_buff *, struct x25_neigh *);
int x25_lapb_receive_frame(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
void x25_establish_link(struct x25_neigh *);
-void x25_terminate_link(struct x25_neigh *);
/* x25_facilities.c */
int x25_parse_facilities(struct sk_buff *, struct x25_facilities *,
diff --git a/include/net/xdp.h b/include/net/xdp.h
index e6770dd40c91..aa742f413c35 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -11,6 +11,8 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h> /* skb_shared_info */
+#include <net/page_pool/types.h>
+
/**
* DOC: XDP RX-queue information
*
@@ -62,7 +64,6 @@ struct xdp_rxq_info {
u32 queue_index;
u32 reg_state;
struct xdp_mem_info mem;
- unsigned int napi_id;
u32 frag_size;
} ____cacheline_aligned; /* perf critical, avoid false-sharing */
@@ -75,6 +76,11 @@ enum xdp_buff_flags {
XDP_FLAGS_FRAGS_PF_MEMALLOC = BIT(1), /* xdp paged memory is under
* pressure
*/
+ /* frags have unreadable mem, this can't be true for real XDP packets,
+ * but drivers may use XDP helpers to construct Rx pkt state even when
+ * XDP program is not attached.
+ */
+ XDP_FLAGS_FRAGS_UNREADABLE = BIT(2),
};
struct xdp_buff {
@@ -84,11 +90,23 @@ struct xdp_buff {
void *data_hard_start;
struct xdp_rxq_info *rxq;
struct xdp_txq_info *txq;
- u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
- u32 flags; /* supported values defined in xdp_buff_flags */
+
+ union {
+ struct {
+ /* frame size to deduce data_hard_end/tailroom */
+ u32 frame_sz;
+ /* supported values defined in xdp_buff_flags */
+ u32 flags;
+ };
+
+#ifdef __LITTLE_ENDIAN
+ /* Used to micro-optimize xdp_init_buff(), don't use directly */
+ u64 frame_sz_flags_init;
+#endif
+ };
};
-static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp)
+static __always_inline bool xdp_buff_has_frags(const struct xdp_buff *xdp)
{
return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS);
}
@@ -103,22 +121,42 @@ static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp)
xdp->flags &= ~XDP_FLAGS_HAS_FRAGS;
}
-static __always_inline bool xdp_buff_is_frag_pfmemalloc(struct xdp_buff *xdp)
+static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp)
{
- return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
+ xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC;
}
-static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp)
+static __always_inline void xdp_buff_set_frag_unreadable(struct xdp_buff *xdp)
{
- xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC;
+ xdp->flags |= XDP_FLAGS_FRAGS_UNREADABLE;
+}
+
+static __always_inline u32 xdp_buff_get_skb_flags(const struct xdp_buff *xdp)
+{
+ return xdp->flags;
+}
+
+static __always_inline void xdp_buff_clear_frag_pfmemalloc(struct xdp_buff *xdp)
+{
+ xdp->flags &= ~XDP_FLAGS_FRAGS_PF_MEMALLOC;
}
static __always_inline void
xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
{
- xdp->frame_sz = frame_sz;
xdp->rxq = rxq;
+
+#ifdef __LITTLE_ENDIAN
+ /*
+ * Force the compilers to initialize ::flags and assign ::frame_sz with
+ * one write on 64-bit LE architectures as they're often unable to do
+ * it themselves.
+ */
+ xdp->frame_sz_flags_init = frame_sz;
+#else
+ xdp->frame_sz = frame_sz;
xdp->flags = 0;
+#endif
}
static __always_inline void
@@ -144,15 +182,16 @@ xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start,
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
static inline struct skb_shared_info *
-xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
+xdp_get_shared_info_from_buff(const struct xdp_buff *xdp)
{
return (struct skb_shared_info *)xdp_data_hard_end(xdp);
}
-static __always_inline unsigned int xdp_get_buff_len(struct xdp_buff *xdp)
+static __always_inline unsigned int
+xdp_get_buff_len(const struct xdp_buff *xdp)
{
unsigned int len = xdp->data_end - xdp->data;
- struct skb_shared_info *sinfo;
+ const struct skb_shared_info *sinfo;
if (likely(!xdp_buff_has_frags(xdp)))
goto out;
@@ -163,45 +202,133 @@ out:
return len;
}
+void xdp_return_frag(netmem_ref netmem, const struct xdp_buff *xdp);
+
+/**
+ * __xdp_buff_add_frag - attach frag to &xdp_buff
+ * @xdp: XDP buffer to attach the frag to
+ * @netmem: network memory containing the frag
+ * @offset: offset at which the frag starts
+ * @size: size of the frag
+ * @truesize: total memory size occupied by the frag
+ * @try_coalesce: whether to try coalescing the frags (not valid for XSk)
+ *
+ * Attach frag to the XDP buffer. If it currently has no frags attached,
+ * initialize the related fields, otherwise check that the frag number
+ * didn't reach the limit of ``MAX_SKB_FRAGS``. If possible, try coalescing
+ * the frag with the previous one.
+ * The function doesn't check/update the pfmemalloc bit. Please use the
+ * non-underscored wrapper in drivers.
+ *
+ * Return: true on success, false if there's no space for the frag in
+ * the shared info struct.
+ */
+static inline bool __xdp_buff_add_frag(struct xdp_buff *xdp, netmem_ref netmem,
+ u32 offset, u32 size, u32 truesize,
+ bool try_coalesce)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ skb_frag_t *prev;
+ u32 nr_frags;
+
+ if (!xdp_buff_has_frags(xdp)) {
+ xdp_buff_set_frags_flag(xdp);
+
+ nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ sinfo->xdp_frags_truesize = 0;
+
+ goto fill;
+ }
+
+ nr_frags = sinfo->nr_frags;
+ prev = &sinfo->frags[nr_frags - 1];
+
+ if (try_coalesce && netmem == skb_frag_netmem(prev) &&
+ offset == skb_frag_off(prev) + skb_frag_size(prev)) {
+ skb_frag_size_add(prev, size);
+ /* Guaranteed to only decrement the refcount */
+ xdp_return_frag(netmem, xdp);
+ } else if (unlikely(nr_frags == MAX_SKB_FRAGS)) {
+ return false;
+ } else {
+fill:
+ __skb_fill_netmem_desc_noacc(sinfo, nr_frags++, netmem,
+ offset, size);
+ }
+
+ sinfo->nr_frags = nr_frags;
+ sinfo->xdp_frags_size += size;
+ sinfo->xdp_frags_truesize += truesize;
+
+ return true;
+}
+
+/**
+ * xdp_buff_add_frag - attach frag to &xdp_buff
+ * @xdp: XDP buffer to attach the frag to
+ * @netmem: network memory containing the frag
+ * @offset: offset at which the frag starts
+ * @size: size of the frag
+ * @truesize: total memory size occupied by the frag
+ *
+ * Version of __xdp_buff_add_frag() which takes care of the pfmemalloc bit.
+ *
+ * Return: true on success, false if there's no space for the frag in
+ * the shared info struct.
+ */
+static inline bool xdp_buff_add_frag(struct xdp_buff *xdp, netmem_ref netmem,
+ u32 offset, u32 size, u32 truesize)
+{
+ if (!__xdp_buff_add_frag(xdp, netmem, offset, size, truesize, true))
+ return false;
+
+ if (unlikely(netmem_is_pfmemalloc(netmem)))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+ if (unlikely(netmem_is_net_iov(netmem)))
+ xdp_buff_set_frag_unreadable(xdp);
+
+ return true;
+}
+
struct xdp_frame {
void *data;
- u16 len;
- u16 headroom;
+ u32 len;
+ u32 headroom;
u32 metasize; /* uses lower 8-bits */
/* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time,
- * while mem info is valid on remote CPU.
+ * while mem_type is valid on remote CPU.
*/
- struct xdp_mem_info mem;
+ enum xdp_mem_type mem_type:32;
struct net_device *dev_rx; /* used by cpumap */
u32 frame_sz;
u32 flags; /* supported values defined in xdp_buff_flags */
};
-static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame)
+static __always_inline bool xdp_frame_has_frags(const struct xdp_frame *frame)
{
return !!(frame->flags & XDP_FLAGS_HAS_FRAGS);
}
-static __always_inline bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame)
+static __always_inline u32
+xdp_frame_get_skb_flags(const struct xdp_frame *frame)
{
- return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
+ return frame->flags;
}
#define XDP_BULK_QUEUE_SIZE 16
struct xdp_frame_bulk {
int count;
- void *xa;
- void *q[XDP_BULK_QUEUE_SIZE];
+ netmem_ref q[XDP_BULK_QUEUE_SIZE];
};
static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
{
- /* bq->count will be zero'ed when bq->xa gets updated */
- bq->xa = NULL;
+ bq->count = 0;
}
static inline struct skb_shared_info *
-xdp_get_shared_info_from_frame(struct xdp_frame *frame)
+xdp_get_shared_info_from_frame(const struct xdp_frame *frame)
{
void *data_hard_start = frame->data - frame->headroom - sizeof(*frame);
@@ -223,33 +350,43 @@ static inline void xdp_scrub_frame(struct xdp_frame *frame)
}
static inline void
-xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags,
- unsigned int size, unsigned int truesize,
- bool pfmemalloc)
+xdp_update_skb_frags_info(struct sk_buff *skb, u8 nr_frags,
+ unsigned int size, unsigned int truesize,
+ u32 xdp_flags)
{
- skb_shinfo(skb)->nr_frags = nr_frags;
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+ sinfo->nr_frags = nr_frags;
+ /*
+ * ``destructor_arg`` is unionized with ``xdp_frags_{,true}size``,
+ * reset it after that these fields aren't used anymore.
+ */
+ sinfo->destructor_arg = NULL;
skb->len += size;
skb->data_len += size;
skb->truesize += truesize;
- skb->pfmemalloc |= pfmemalloc;
+ skb->pfmemalloc |= !!(xdp_flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
+ skb->unreadable |= !!(xdp_flags & XDP_FLAGS_FRAGS_UNREADABLE);
}
/* Avoids inlining WARN macro in fast-path */
void xdp_warn(const char *msg, const char *func, const int line);
#define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)
+struct sk_buff *xdp_build_skb_from_buff(const struct xdp_buff *xdp);
+struct sk_buff *xdp_build_skb_from_zc(struct xdp_buff *xdp);
struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
struct sk_buff *skb,
struct net_device *dev);
struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
struct net_device *dev);
-int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp);
struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf);
static inline
-void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
+void xdp_convert_frame_to_buff(const struct xdp_frame *frame,
+ struct xdp_buff *xdp)
{
xdp->data_hard_start = frame->data - frame->headroom - sizeof(*frame);
xdp->data = frame->data;
@@ -260,7 +397,7 @@ void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
}
static inline
-int xdp_update_frame_from_buff(struct xdp_buff *xdp,
+int xdp_update_frame_from_buff(const struct xdp_buff *xdp,
struct xdp_frame *xdp_frame)
{
int metasize, headroom;
@@ -302,24 +439,33 @@ struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0))
return NULL;
- /* rxq only valid until napi_schedule ends, convert to xdp_mem_info */
- xdp_frame->mem = xdp->rxq->mem;
+ /* rxq only valid until napi_schedule ends, convert to xdp_mem_type */
+ xdp_frame->mem_type = xdp->rxq->mem.type;
return xdp_frame;
}
-void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
- struct xdp_buff *xdp);
+void __xdp_return(netmem_ref netmem, enum xdp_mem_type mem_type,
+ bool napi_direct, struct xdp_buff *xdp);
void xdp_return_frame(struct xdp_frame *xdpf);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
void xdp_return_buff(struct xdp_buff *xdp);
-void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
void xdp_return_frame_bulk(struct xdp_frame *xdpf,
struct xdp_frame_bulk *bq);
-static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf)
+static inline void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
{
- struct skb_shared_info *sinfo;
+ if (unlikely(!bq->count))
+ return;
+
+ page_pool_put_netmem_bulk(bq->q, bq->count);
+ bq->count = 0;
+}
+
+static __always_inline unsigned int
+xdp_get_frame_len(const struct xdp_frame *xdpf)
+{
+ const struct skb_shared_info *sinfo;
unsigned int len = xdpf->len;
if (likely(!xdp_frame_has_frags(xdpf)))
@@ -351,6 +497,38 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq);
int xdp_reg_mem_model(struct xdp_mem_info *mem,
enum xdp_mem_type type, void *allocator);
void xdp_unreg_mem_model(struct xdp_mem_info *mem);
+int xdp_reg_page_pool(struct page_pool *pool);
+void xdp_unreg_page_pool(const struct page_pool *pool);
+void xdp_rxq_info_attach_page_pool(struct xdp_rxq_info *xdp_rxq,
+ const struct page_pool *pool);
+
+/**
+ * xdp_rxq_info_attach_mem_model - attach registered mem info to RxQ info
+ * @xdp_rxq: XDP RxQ info to attach the memory info to
+ * @mem: already registered memory info
+ *
+ * If the driver registers its memory providers manually, it must use this
+ * function instead of xdp_rxq_info_reg_mem_model().
+ */
+static inline void
+xdp_rxq_info_attach_mem_model(struct xdp_rxq_info *xdp_rxq,
+ const struct xdp_mem_info *mem)
+{
+ xdp_rxq->mem = *mem;
+}
+
+/**
+ * xdp_rxq_info_detach_mem_model - detach registered mem info from RxQ info
+ * @xdp_rxq: XDP RxQ info to detach the memory info from
+ *
+ * If the driver registers its memory providers manually and then attaches it
+ * via xdp_rxq_info_attach_mem_model(), it must call this function before
+ * xdp_rxq_info_unreg().
+ */
+static inline void xdp_rxq_info_detach_mem_model(struct xdp_rxq_info *xdp_rxq)
+{
+ xdp_rxq->mem = (struct xdp_mem_info){ };
+}
/* Drivers not supporting XDP metadata can use this helper, which
* rejects any room expansion for metadata as a result.
@@ -477,8 +655,12 @@ struct xdp_metadata_ops {
u32 bpf_xdp_metadata_kfunc_id(int id);
bool bpf_dev_bound_kfunc_id(u32 btf_id);
void xdp_set_features_flag(struct net_device *dev, xdp_features_t val);
+void xdp_set_features_flag_locked(struct net_device *dev, xdp_features_t val);
void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg);
+void xdp_features_set_redirect_target_locked(struct net_device *dev,
+ bool support_sg);
void xdp_features_clear_redirect_target(struct net_device *dev);
+void xdp_features_clear_redirect_target_locked(struct net_device *dev);
#else
static inline u32 bpf_xdp_metadata_kfunc_id(int id) { return 0; }
static inline bool bpf_dev_bound_kfunc_id(u32 btf_id) { return false; }
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 3d54de168a6d..23e8861e8b25 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -71,9 +71,6 @@ struct xdp_sock {
*/
u32 tx_budget_spent;
- /* Protects generic receive. */
- spinlock_t rx_lock;
-
/* Statistics */
u64 rx_dropped;
u64 rx_queue_full;
@@ -87,6 +84,7 @@ struct xdp_sock {
struct list_head map_list;
/* Protects map_list */
spinlock_t map_list_lock;
+ u32 max_tx_budget;
/* Protects multiple processes in the control path */
struct mutex mutex;
struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
@@ -110,18 +108,24 @@ struct xdp_sock {
* indicates position where checksumming should start.
* csum_offset indicates position where checksum should be stored.
*
+ * void (*tmo_request_launch_time)(u64 launch_time, void *priv)
+ * Called when AF_XDP frame requested launch time HW offload support.
+ * launch_time indicates the PTP time at which the device can schedule the
+ * packet for transmission.
*/
struct xsk_tx_metadata_ops {
void (*tmo_request_timestamp)(void *priv);
u64 (*tmo_fill_timestamp)(void *priv);
void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv);
+ void (*tmo_request_launch_time)(u64 launch_time, void *priv);
};
#ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
-void __xsk_map_flush(void);
+void __xsk_map_flush(struct list_head *flush_list);
+INDIRECT_CALLABLE_DECLARE(void xsk_destruct_skb(struct sk_buff *));
/**
* xsk_tx_metadata_to_compl - Save enough relevant metadata information
@@ -162,6 +166,11 @@ static inline void xsk_tx_metadata_request(const struct xsk_tx_metadata *meta,
if (!meta)
return;
+ if (ops->tmo_request_launch_time)
+ if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME)
+ ops->tmo_request_launch_time(meta->request.launch_time,
+ priv);
+
if (ops->tmo_request_timestamp)
if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP)
ops->tmo_request_timestamp(priv);
@@ -206,9 +215,15 @@ static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
return -EOPNOTSUPP;
}
-static inline void __xsk_map_flush(void)
+static inline void __xsk_map_flush(struct list_head *flush_list)
+{
+}
+
+#ifdef CONFIG_MITIGATION_RETPOLINE
+static inline void xsk_destruct_skb(struct sk_buff *skb)
{
}
+#endif
static inline void xsk_tx_metadata_to_compl(struct xsk_tx_metadata *meta,
struct xsk_tx_metadata_compl *compl)
@@ -228,14 +243,4 @@ static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
}
#endif /* CONFIG_XDP_SOCKETS */
-
-#if defined(CONFIG_XDP_SOCKETS) && defined(CONFIG_DEBUG_NET)
-bool xsk_map_check_flush(void);
-#else
-static inline bool xsk_map_check_flush(void)
-{
- return false;
-}
-#endif
-
#endif /* _LINUX_XDP_SOCK_H */
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index 0a5dca2b2b3f..242e34f771cc 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -12,6 +12,10 @@
#define XDP_UMEM_MIN_CHUNK_SHIFT 11
#define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
+#define NETDEV_XDP_ACT_XSK (NETDEV_XDP_ACT_BASIC | \
+ NETDEV_XDP_ACT_REDIRECT | \
+ NETDEV_XDP_ACT_XSK_ZEROCOPY)
+
struct xsk_cb_desc {
void *src;
u8 off;
@@ -59,15 +63,6 @@ static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
xp_fill_cb(pool, desc);
}
-static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
-{
-#ifdef CONFIG_NET_RX_BUSY_POLL
- return pool->heads[0].xdp.rxq->napi_id;
-#else
- return 0;
-#endif
-}
-
static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
unsigned long attrs)
{
@@ -101,7 +96,7 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
return xp_alloc(pool);
}
-static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
+static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
{
return !xp_mb_desc(desc);
}
@@ -126,8 +121,8 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
if (likely(!xdp_buff_has_frags(xdp)))
goto out;
- list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
- list_del(&pos->xskb_list_node);
+ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
+ list_del(&pos->list_node);
xp_free(pos);
}
@@ -136,34 +131,54 @@ out:
xp_free(xskb);
}
-static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
+static inline bool xsk_buff_add_frag(struct xdp_buff *head,
+ struct xdp_buff *xdp)
{
- struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
+ const void *data = xdp->data;
+ struct xdp_buff_xsk *frag;
+
+ if (!__xdp_buff_add_frag(head, virt_to_netmem(data),
+ offset_in_page(data), xdp->data_end - data,
+ xdp->frame_sz, false))
+ return false;
+
+ frag = container_of(xdp, struct xdp_buff_xsk, xdp);
+ list_add_tail(&frag->list_node, &frag->pool->xskb_list);
- list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list);
+ return true;
}
-static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
{
struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
struct xdp_buff *ret = NULL;
struct xdp_buff_xsk *frag;
frag = list_first_entry_or_null(&xskb->pool->xskb_list,
- struct xdp_buff_xsk, xskb_list_node);
+ struct xdp_buff_xsk, list_node);
if (frag) {
- list_del(&frag->xskb_list_node);
+ list_del(&frag->list_node);
ret = &frag->xdp;
}
return ret;
}
-static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+static inline void xsk_buff_del_frag(struct xdp_buff *xdp)
{
- struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
+ struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
- list_del(&xskb->xskb_list_node);
+ list_del(&xskb->list_node);
+}
+
+static inline struct xdp_buff *xsk_buff_get_head(struct xdp_buff *first)
+{
+ struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
+ struct xdp_buff_xsk *frag;
+
+ frag = list_first_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
+ list_node);
+ return &frag->xdp;
}
static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
@@ -172,7 +187,7 @@ static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
struct xdp_buff_xsk *frag;
frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
- xskb_list_node);
+ list_node);
return &frag->xdp;
}
@@ -195,30 +210,56 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
return xp_raw_get_data(pool, addr);
}
+/**
+ * xsk_buff_raw_get_ctx - get &xdp_desc context
+ * @pool: XSk buff pool desc address belongs to
+ * @addr: desc address (from userspace)
+ *
+ * Wrapper for xp_raw_get_ctx() to be used in drivers, see its kdoc for
+ * details.
+ *
+ * Return: new &xdp_desc_ctx struct containing desc's DMA address and metadata
+ * pointer, if it is present and valid (initialized to %NULL otherwise).
+ */
+static inline struct xdp_desc_ctx
+xsk_buff_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr)
+{
+ return xp_raw_get_ctx(pool, addr);
+}
+
#define XDP_TXMD_FLAGS_VALID ( \
XDP_TXMD_FLAGS_TIMESTAMP | \
XDP_TXMD_FLAGS_CHECKSUM | \
+ XDP_TXMD_FLAGS_LAUNCH_TIME | \
0)
-static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
+static inline bool
+xsk_buff_valid_tx_metadata(const struct xsk_tx_metadata *meta)
{
return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
}
-static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
+static inline struct xsk_tx_metadata *
+__xsk_buff_get_metadata(const struct xsk_buff_pool *pool, void *data)
{
struct xsk_tx_metadata *meta;
if (!pool->tx_metadata_len)
return NULL;
- meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len;
+ meta = data - pool->tx_metadata_len;
if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
return NULL; /* no way to signal the error to the user */
return meta;
}
+static inline struct xsk_tx_metadata *
+xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
+{
+ return __xsk_buff_get_metadata(pool, xp_raw_get_data(pool, addr));
+}
+
static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
@@ -306,11 +347,6 @@ static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
{
}
-static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
-{
- return 0;
-}
-
static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
unsigned long attrs)
{
@@ -337,7 +373,7 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
return NULL;
}
-static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
+static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
{
return false;
}
@@ -356,17 +392,24 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
{
}
-static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
+static inline bool xsk_buff_add_frag(struct xdp_buff *head,
+ struct xdp_buff *xdp)
{
+ return false;
}
-static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
{
return NULL;
}
-static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+static inline void xsk_buff_del_frag(struct xdp_buff *xdp)
+{
+}
+
+static inline struct xdp_buff *xsk_buff_get_head(struct xdp_buff *first)
{
+ return NULL;
}
static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
@@ -389,12 +432,25 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
return NULL;
}
+static inline struct xdp_desc_ctx
+xsk_buff_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr)
+{
+ return (struct xdp_desc_ctx){ };
+}
+
static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
{
return false;
}
-static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
+static inline struct xsk_tx_metadata *
+__xsk_buff_get_metadata(const struct xsk_buff_pool *pool, void *data)
+{
+ return NULL;
+}
+
+static inline struct xsk_tx_metadata *
+xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
{
return NULL;
}
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 77ebf5bcf0b9..0a14daaa5dd4 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -19,6 +19,7 @@
#include <net/sock.h>
#include <net/dst.h>
+#include <net/inet_dscp.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/ipv6.h>
@@ -37,6 +38,7 @@
#define XFRM_PROTO_COMP 108
#define XFRM_PROTO_IPIP 4
#define XFRM_PROTO_IPV6 41
+#define XFRM_PROTO_IPTFS IPPROTO_AGGFRAG
#define XFRM_PROTO_ROUTING IPPROTO_ROUTING
#define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
@@ -67,27 +69,27 @@
- instance of a transformer, struct xfrm_state (=SA)
- template to clone xfrm_state, struct xfrm_tmpl
- SPD is plain linear list of xfrm_policy rules, ordered by priority.
+ SPD is organized as hash table (for policies that meet minimum address prefix
+ length setting, net->xfrm.policy_hthresh). Other policies are stored in
+ lists, sorted into rbtree ordered by destination and source address networks.
+ See net/xfrm/xfrm_policy.c for details.
+
(To be compatible with existing pfkeyv2 implementations,
many rules with priority of 0x7fffffff are allowed to exist and
such rules are ordered in an unpredictable way, thanks to bsd folks.)
- Lookup is plain linear search until the first match with selector.
-
If "action" is "block", then we prohibit the flow, otherwise:
if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
policy entry has list of up to XFRM_MAX_DEPTH transformations,
described by templates xfrm_tmpl. Each template is resolved
to a complete xfrm_state (see below) and we pack bundle of transformations
- to a dst_entry returned to requestor.
+ to a dst_entry returned to requester.
dst -. xfrm .-> xfrm_state #1
|---. child .-> dst -. xfrm .-> xfrm_state #2
|---. child .-> dst -. xfrm .-> xfrm_state #3
|---. child .-> NULL
- Bundles are cached at xrfm_policy struct (field ->bundles).
-
Resolution of xrfm_tmpl
-----------------------
@@ -145,8 +147,19 @@ enum {
};
struct xfrm_dev_offload {
+ /* The device for this offload.
+ * Device drivers should not use this directly, as that will prevent
+ * them from working with bonding device. Instead, the device passed
+ * to the add/delete callbacks should be used.
+ */
struct net_device *dev;
netdevice_tracker dev_tracker;
+ /* This is a private pointer used by the bonding driver (and eventually
+ * should be moved there). Device drivers should not use it.
+ * Protected by xfrm_state.lock AND bond.ipsec_lock in most cases,
+ * except in the .xdo_dev_state_del() flow, where only xfrm_state.lock
+ * is held.
+ */
struct net_device *real_dev;
unsigned long offload_handle;
u8 dir : 2;
@@ -178,13 +191,19 @@ struct xfrm_state {
struct hlist_node gclist;
struct hlist_node bydst;
};
- struct hlist_node bysrc;
+ union {
+ struct hlist_node dev_gclist;
+ struct hlist_node bysrc;
+ };
struct hlist_node byspi;
struct hlist_node byseq;
+ struct hlist_node state_cache;
+ struct hlist_node state_cache_input;
refcount_t refcnt;
spinlock_t lock;
+ u32 pcpu_num;
struct xfrm_id id;
struct xfrm_selector sel;
struct xfrm_mark mark;
@@ -206,6 +225,7 @@ struct xfrm_state {
u16 family;
xfrm_address_t saddr;
int header_len;
+ int enc_hdr_len;
int trailer_len;
u32 extra_flags;
struct xfrm_mark smark;
@@ -227,7 +247,10 @@ struct xfrm_state {
/* Data for encapsulator */
struct xfrm_encap_tmpl *encap;
- struct sock __rcu *encap_sk;
+
+ /* NAT keepalive */
+ u32 nat_keepalive_interval; /* seconds */
+ time64_t nat_keepalive_expiration;
/* Data for care-of address */
xfrm_address_t *coaddr;
@@ -292,6 +315,9 @@ struct xfrm_state {
* interpreted by xfrm_type methods. */
void *data;
u8 dir;
+
+ const struct xfrm_mode_cbs *mode_cbs;
+ void *mode_data;
};
static inline struct net *xs_net(struct xfrm_state *x)
@@ -342,20 +368,25 @@ struct xfrm_if_cb {
void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
void xfrm_if_unregister_cb(void);
+struct xfrm_dst_lookup_params {
+ struct net *net;
+ dscp_t dscp;
+ int oif;
+ xfrm_address_t *saddr;
+ xfrm_address_t *daddr;
+ u32 mark;
+ __u8 ipproto;
+ union flowi_uli uli;
+};
+
struct net_device;
struct xfrm_type;
struct xfrm_dst;
struct xfrm_policy_afinfo {
struct dst_ops *dst_ops;
- struct dst_entry *(*dst_lookup)(struct net *net,
- int tos, int oif,
- const xfrm_address_t *saddr,
- const xfrm_address_t *daddr,
- u32 mark);
- int (*get_saddr)(struct net *net, int oif,
- xfrm_address_t *saddr,
- xfrm_address_t *daddr,
- u32 mark);
+ struct dst_entry *(*dst_lookup)(const struct xfrm_dst_lookup_params *params);
+ int (*get_saddr)(xfrm_address_t *saddr,
+ const struct xfrm_dst_lookup_params *params);
int (*fill_dst)(struct xfrm_dst *xdst,
struct net_device *dev,
const struct flowi *fl);
@@ -410,7 +441,6 @@ int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
void xfrm_flush_gc(void);
-void xfrm_state_delete_tunnel(struct xfrm_state *x);
struct xfrm_type {
struct module *owner;
@@ -443,6 +473,54 @@ struct xfrm_type_offload {
int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
+void xfrm_set_type_offload(struct xfrm_state *x, bool try_load);
+static inline void xfrm_unset_type_offload(struct xfrm_state *x)
+{
+ if (!x->type_offload)
+ return;
+
+ module_put(x->type_offload->owner);
+ x->type_offload = NULL;
+}
+
+/**
+ * struct xfrm_mode_cbs - XFRM mode callbacks
+ * @owner: module owner or NULL
+ * @init_state: Add/init mode specific state in `xfrm_state *x`
+ * @clone_state: Copy mode specific values from `orig` to new state `x`
+ * @destroy_state: Cleanup mode specific state from `xfrm_state *x`
+ * @user_init: Process mode specific netlink attributes from user
+ * @copy_to_user: Add netlink attributes to `attrs` based on state in `x`
+ * @sa_len: Return space required to store mode specific netlink attributes
+ * @get_inner_mtu: Return avail payload space after removing encap overhead
+ * @input: Process received packet from SA using mode
+ * @output: Output given packet using mode
+ * @prepare_output: Add mode specific encapsulation to packet in skb. On return
+ * `transport_header` should point at ESP header, `network_header` should
+ * point at outer IP header and `mac_header` should opint at the
+ * protocol/nexthdr field of the outer IP.
+ *
+ * One should examine and understand the specific uses of these callbacks in
+ * xfrm for further detail on how and when these functions are called. RTSL.
+ */
+struct xfrm_mode_cbs {
+ struct module *owner;
+ int (*init_state)(struct xfrm_state *x);
+ int (*clone_state)(struct xfrm_state *x, struct xfrm_state *orig);
+ void (*destroy_state)(struct xfrm_state *x);
+ int (*user_init)(struct net *net, struct xfrm_state *x,
+ struct nlattr **attrs,
+ struct netlink_ext_ack *extack);
+ int (*copy_to_user)(struct xfrm_state *x, struct sk_buff *skb);
+ unsigned int (*sa_len)(const struct xfrm_state *x);
+ u32 (*get_inner_mtu)(struct xfrm_state *x, int outer_mtu);
+ int (*input)(struct xfrm_state *x, struct sk_buff *skb);
+ int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
+ int (*prepare_output)(struct xfrm_state *x, struct sk_buff *skb);
+};
+
+int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs);
+void xfrm_unregister_mode_cbs(u8 mode);
static inline int xfrm_af2proto(unsigned int family)
{
@@ -458,7 +536,8 @@ static inline int xfrm_af2proto(unsigned int family)
static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
{
- if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
+ if ((x->sel.family != AF_UNSPEC) ||
+ (ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
(ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
return &x->inner_mode;
else
@@ -519,11 +598,44 @@ struct xfrm_policy_queue {
unsigned long timeout;
};
+/**
+ * struct xfrm_policy - xfrm policy
+ * @xp_net: network namespace the policy lives in
+ * @bydst: hlist node for SPD hash table or rbtree list
+ * @byidx: hlist node for index hash table
+ * @state_cache_list: hlist head for policy cached xfrm states
+ * @lock: serialize changes to policy structure members
+ * @refcnt: reference count, freed once it reaches 0
+ * @pos: kernel internal tie-breaker to determine age of policy
+ * @timer: timer
+ * @genid: generation, used to invalidate old policies
+ * @priority: priority, set by userspace
+ * @index: policy index (autogenerated)
+ * @if_id: virtual xfrm interface id
+ * @mark: packet mark
+ * @selector: selector
+ * @lft: liftime configuration data
+ * @curlft: liftime state
+ * @walk: list head on pernet policy list
+ * @polq: queue to hold packets while aqcuire operaion in progress
+ * @bydst_reinsert: policy tree node needs to be merged
+ * @type: XFRM_POLICY_TYPE_MAIN or _SUB
+ * @action: XFRM_POLICY_ALLOW or _BLOCK
+ * @flags: XFRM_POLICY_LOCALOK, XFRM_POLICY_ICMP
+ * @xfrm_nr: number of used templates in @xfrm_vec
+ * @family: protocol family
+ * @security: SELinux security label
+ * @xfrm_vec: array of templates to resolve state
+ * @rcu: rcu head, used to defer memory release
+ * @xdo: hardware offload state
+ */
struct xfrm_policy {
possible_net_t xp_net;
struct hlist_node bydst;
struct hlist_node byidx;
+ struct hlist_head state_cache_list;
+
/* This lock only affects elements except for entry. */
rwlock_t lock;
refcount_t refcnt;
@@ -548,7 +660,6 @@ struct xfrm_policy {
u16 family;
struct xfrm_sec_ctx *security;
struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
- struct hlist_node bydst_inexact_list;
struct rcu_head rcu;
struct xfrm_dev_offload xdo;
@@ -805,7 +916,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
xfrm_pol_put(pols[i]);
}
-void __xfrm_state_destroy(struct xfrm_state *, bool);
+void __xfrm_state_destroy(struct xfrm_state *);
static inline void __xfrm_state_put(struct xfrm_state *x)
{
@@ -815,13 +926,7 @@ static inline void __xfrm_state_put(struct xfrm_state *x)
static inline void xfrm_state_put(struct xfrm_state *x)
{
if (refcount_dec_and_test(&x->refcnt))
- __xfrm_state_destroy(x, false);
-}
-
-static inline void xfrm_state_put_sync(struct xfrm_state *x)
-{
- if (refcount_dec_and_test(&x->refcnt))
- __xfrm_state_destroy(x, true);
+ __xfrm_state_destroy(x);
}
static inline void xfrm_state_hold(struct xfrm_state *x)
@@ -1009,7 +1114,7 @@ void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
struct xfrm_if_parms {
int link; /* ifindex of underlying L2 interface */
- u32 if_id; /* interface identifyer */
+ u32 if_id; /* interface identifier */
bool collect_md;
};
@@ -1176,9 +1281,19 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
if (xo) {
x = xfrm_input_state(skb);
- if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
- return (xo->flags & CRYPTO_DONE) &&
- (xo->status & CRYPTO_SUCCESS);
+ if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
+ bool check = (xo->flags & CRYPTO_DONE) &&
+ (xo->status & CRYPTO_SUCCESS);
+
+ /* The packets here are plain ones and secpath was
+ * needed to indicate that hardware already handled
+ * them and there is no need to do nothing in addition.
+ *
+ * Consume secpath which was set by drivers.
+ */
+ secpath_reset(skb);
+ return check;
+ }
}
return __xfrm_check_nopolicy(net, skb, dir) ||
@@ -1588,7 +1703,7 @@ void xfrm_state_update_stats(struct net *net);
static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
{
struct xfrm_dev_offload *xdo = &x->xso;
- struct net_device *dev = xdo->dev;
+ struct net_device *dev = READ_ONCE(xdo->dev);
if (dev && dev->xfrmdev_ops &&
dev->xfrmdev_ops->xdo_dev_state_update_stats)
@@ -1604,6 +1719,10 @@ int xfrm_state_update(struct xfrm_state *x);
struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
const xfrm_address_t *daddr, __be32 spi,
u8 proto, unsigned short family);
+struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
+ const xfrm_address_t *daddr,
+ __be32 spi, u8 proto,
+ unsigned short family);
struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
@@ -1643,9 +1762,9 @@ struct xfrmk_spdinfo {
u32 spdhmcnt;
};
-struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
+struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num);
int xfrm_state_delete(struct xfrm_state *x);
-int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
+int xfrm_state_flush(struct net *net, u8 proto, bool task_valid);
int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
bool task_valid);
@@ -1654,8 +1773,7 @@ void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack);
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
-int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
- struct netlink_ext_ack *extack);
+int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack);
int xfrm_init_state(struct xfrm_state *x);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
@@ -1667,6 +1785,15 @@ int xfrm_trans_queue(struct sk_buff *skb,
struct sk_buff *));
int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
int xfrm_output(struct sock *sk, struct sk_buff *skb);
+int xfrm4_tunnel_check_size(struct sk_buff *skb);
+#if IS_ENABLED(CONFIG_IPV6)
+int xfrm6_tunnel_check_size(struct sk_buff *skb);
+#else
+static inline int xfrm6_tunnel_check_size(struct sk_buff *skb)
+{
+ return -EMSGSIZE;
+}
+#endif
#if IS_ENABLED(CONFIG_NET_PKTGEN)
int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
@@ -1728,10 +1855,7 @@ static inline int xfrm_user_policy(struct sock *sk, int optname,
}
#endif
-struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
- const xfrm_address_t *saddr,
- const xfrm_address_t *daddr,
- int family, u32 mark);
+struct dst_entry *__xfrm_dst_lookup(int family, const struct xfrm_dst_lookup_params *params);
struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
@@ -1758,7 +1882,7 @@ int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack);
int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi,
struct netlink_ext_ack *extack);
struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
- u8 mode, u32 reqid, u32 if_id, u8 proto,
+ u8 mode, u32 reqid, u32 if_id, u32 pcpu_num, u8 proto,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr, int create,
unsigned short family);
@@ -1773,12 +1897,16 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n
u32 if_id);
struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
struct xfrm_migrate *m,
- struct xfrm_encap_tmpl *encap);
+ struct xfrm_encap_tmpl *encap,
+ struct net *net,
+ struct xfrm_user_offload *xuo,
+ struct netlink_ext_ack *extack);
int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
struct xfrm_migrate *m, int num_bundles,
struct xfrm_kmaddress *k, struct net *net,
struct xfrm_encap_tmpl *encap, u32 if_id,
- struct netlink_ext_ack *extack);
+ struct netlink_ext_ack *extack,
+ struct xfrm_user_offload *xuo);
#endif
int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
@@ -1946,13 +2074,16 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
struct xfrm_user_offload *xuo, u8 dir,
struct netlink_ext_ack *extack);
bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+void xfrm_dev_state_delete(struct xfrm_state *x);
+void xfrm_dev_state_free(struct xfrm_state *x);
static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
{
struct xfrm_dev_offload *xso = &x->xso;
+ struct net_device *dev = READ_ONCE(xso->dev);
- if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn)
- xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
+ if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn)
+ dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
}
static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
@@ -1973,28 +2104,6 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
return false;
}
-static inline void xfrm_dev_state_delete(struct xfrm_state *x)
-{
- struct xfrm_dev_offload *xso = &x->xso;
-
- if (xso->dev)
- xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
-}
-
-static inline void xfrm_dev_state_free(struct xfrm_state *x)
-{
- struct xfrm_dev_offload *xso = &x->xso;
- struct net_device *dev = xso->dev;
-
- if (dev && dev->xfrmdev_ops) {
- if (dev->xfrmdev_ops->xdo_dev_state_free)
- dev->xfrmdev_ops->xdo_dev_state_free(x);
- xso->dev = NULL;
- xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
- netdev_put(dev, &xso->dev_tracker);
- }
-}
-
static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
{
struct xfrm_dev_offload *xdo = &x->xdo;
@@ -2203,4 +2312,10 @@ static inline int register_xfrm_state_bpf(void)
}
#endif
+int xfrm_nat_keepalive_init(unsigned short family);
+void xfrm_nat_keepalive_fini(unsigned short family);
+int xfrm_nat_keepalive_net_init(struct net *net);
+int xfrm_nat_keepalive_net_fini(struct net *net);
+void xfrm_nat_keepalive_state_updated(struct xfrm_state *x);
+
#endif /* _NET_XFRM_H */
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index bacb33f1e3e5..92a2358c6ce3 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -28,10 +28,8 @@ struct xdp_buff_xsk {
dma_addr_t dma;
dma_addr_t frame_dma;
struct xsk_buff_pool *pool;
- u64 orig_addr;
- struct list_head free_list_node;
- struct list_head xskb_list_node;
-};
+ struct list_head list_node;
+} __aligned_largest;
#define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
#define XSK_TX_COMPL_FITS(t) BUILD_BUG_ON(sizeof(struct xsk_tx_metadata_compl) > sizeof(t))
@@ -55,6 +53,8 @@ struct xsk_buff_pool {
refcount_t users;
struct xdp_umem *umem;
struct work_struct work;
+ /* Protects generic receive in shared and non-shared umem mode. */
+ spinlock_t rx_lock;
struct list_head free_list;
struct list_head xskb_list;
u32 heads_cnt;
@@ -78,17 +78,23 @@ struct xsk_buff_pool {
u32 chunk_size;
u32 chunk_shift;
u32 frame_len;
+ u32 xdp_zc_max_segs;
u8 tx_metadata_len; /* inherited from umem */
u8 cached_need_wakeup;
bool uses_need_wakeup;
bool unaligned;
bool tx_sw_csum;
void *addrs;
- /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
- * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
- * sockets share a single cq when the same netdev and queue id is shared.
+ /* Mutual exclusion of the completion ring in the SKB mode.
+ * Protect: NAPI TX thread and sendmsg error paths in the SKB
+ * destructor callback.
+ */
+ spinlock_t cq_prod_lock;
+ /* Mutual exclusion of the completion ring in the SKB mode.
+ * Protect: when sockets share a single cq when the same netdev
+ * and queue id is shared.
*/
- spinlock_t cq_lock;
+ spinlock_t cq_cached_prod_lock;
struct xdp_buff_xsk *free_heads[];
};
@@ -120,7 +126,6 @@ void xp_free(struct xdp_buff_xsk *xskb);
static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
u64 addr)
{
- xskb->orig_addr = addr;
xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
}
@@ -143,6 +148,14 @@ u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
+
+struct xdp_desc_ctx {
+ dma_addr_t dma;
+ struct xsk_tx_metadata *meta;
+};
+
+struct xdp_desc_ctx xp_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr);
+
static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
{
return xskb->dma;
@@ -185,7 +198,7 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
!(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
}
-static inline bool xp_mb_desc(struct xdp_desc *desc)
+static inline bool xp_mb_desc(const struct xdp_desc *desc)
{
return desc->options & XDP_PKT_CONTD;
}
@@ -222,14 +235,19 @@ static inline void xp_release(struct xdp_buff_xsk *xskb)
xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
}
-static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
+static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb,
+ struct xsk_buff_pool *pool)
{
- u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
+ u64 orig_addr = xskb->xdp.data - pool->addrs;
+ u64 offset;
+
+ if (!pool->unaligned)
+ return orig_addr;
- offset += xskb->pool->headroom;
- if (!xskb->pool->unaligned)
- return xskb->orig_addr + offset;
- return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
+ offset = xskb->xdp.data - xskb->xdp.data_hard_start;
+ offset += pool->headroom;
+ orig_addr -= offset;
+ return orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
}
static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool)
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index 7cf7dbbfa131..89aed99bfeae 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -227,12 +227,8 @@ struct pcmcia_socket {
/* socket drivers must define the resource operations type they use. There
- * are three options:
+ * are two options:
* - pccard_static_ops iomem and ioport areas are assigned statically
- * - pccard_iodyn_ops iomem areas is assigned statically, ioport
- * areas dynamically
- * If this option is selected, use
- * "select PCCARD_IODYN" in Kconfig.
* - pccard_nonstatic_ops iomem and ioport areas are assigned dynamically.
* If this option is selected, use
* "select PCCARD_NONSTATIC" in Kconfig.
@@ -240,13 +236,11 @@ struct pcmcia_socket {
*/
extern struct pccard_resource_ops pccard_static_ops;
#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
-extern struct pccard_resource_ops pccard_iodyn_ops;
extern struct pccard_resource_ops pccard_nonstatic_ops;
#else
/* If PCMCIA is not used, but only CARDBUS, these functions are not used
* at all. Therefore, do not use the large (240K!) rsrc_nonstatic module
*/
-#define pccard_iodyn_ops pccard_static_ops
#define pccard_nonstatic_ops pccard_static_ops
#endif
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 7c47151d5c72..eaecc3c5f772 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -12,7 +12,6 @@
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/cper.h>
-#include <linux/mm.h>
/*
* MCE Extended Error Log trace event
@@ -168,11 +167,25 @@ TRACE_EVENT(mc_event,
* This event is generated when hardware detects an ARM processor error
* has occurred. UEFI 2.6 spec section N.2.4.4.
*/
+#define APEIL "ARM Processor Err Info data len"
+#define APEID "ARM Processor Err Info raw data"
+#define APECIL "ARM Processor Err Context Info data len"
+#define APECID "ARM Processor Err Context Info raw data"
+#define VSEIL "Vendor Specific Err Info data len"
+#define VSEID "Vendor Specific Err Info raw data"
TRACE_EVENT(arm_event,
- TP_PROTO(const struct cper_sec_proc_arm *proc),
+ TP_PROTO(const struct cper_sec_proc_arm *proc,
+ const u8 *pei_err,
+ const u32 pei_len,
+ const u8 *ctx_err,
+ const u32 ctx_len,
+ const u8 *oem,
+ const u32 oem_len,
+ u8 sev,
+ int cpu),
- TP_ARGS(proc),
+ TP_ARGS(proc, pei_err, pei_len, ctx_err, ctx_len, oem, oem_len, sev, cpu),
TP_STRUCT__entry(
__field(u64, mpidr)
@@ -180,6 +193,14 @@ TRACE_EVENT(arm_event,
__field(u32, running_state)
__field(u32, psci_state)
__field(u8, affinity)
+ __field(u32, pei_len)
+ __dynamic_array(u8, pei_buf, pei_len)
+ __field(u32, ctx_len)
+ __dynamic_array(u8, ctx_buf, ctx_len)
+ __field(u32, oem_len)
+ __dynamic_array(u8, oem_buf, oem_len)
+ __field(u8, sev)
+ __field(int, cpu)
),
TP_fast_assign(
@@ -199,12 +220,29 @@ TRACE_EVENT(arm_event,
__entry->running_state = ~0;
__entry->psci_state = ~0;
}
+ __entry->pei_len = pei_len;
+ memcpy(__get_dynamic_array(pei_buf), pei_err, pei_len);
+ __entry->ctx_len = ctx_len;
+ memcpy(__get_dynamic_array(ctx_buf), ctx_err, ctx_len);
+ __entry->oem_len = oem_len;
+ memcpy(__get_dynamic_array(oem_buf), oem, oem_len);
+ __entry->sev = sev;
+ __entry->cpu = cpu;
),
- TP_printk("affinity level: %d; MPIDR: %016llx; MIDR: %016llx; "
- "running state: %d; PSCI state: %d",
+ TP_printk("cpu: %d; error: %d; affinity level: %d; MPIDR: %016llx; MIDR: %016llx; "
+ "running state: %d; PSCI state: %d; "
+ "%s: %d; %s: %s; %s: %d; %s: %s; %s: %d; %s: %s",
+ __entry->cpu,
+ __entry->sev,
__entry->affinity, __entry->mpidr, __entry->midr,
- __entry->running_state, __entry->psci_state)
+ __entry->running_state, __entry->psci_state,
+ APEIL, __entry->pei_len, APEID,
+ __print_hex(__get_dynamic_array(pei_buf), __entry->pei_len),
+ APECIL, __entry->ctx_len, APECID,
+ __print_hex(__get_dynamic_array(ctx_buf), __entry->ctx_len),
+ VSEIL, __entry->oem_len, VSEID,
+ __print_hex(__get_dynamic_array(oem_buf), __entry->oem_len))
);
/*
@@ -252,6 +290,7 @@ TRACE_EVENT(non_standard_event,
__print_hex(__get_dynamic_array(buf), __entry->len))
);
+#ifdef CONFIG_PCIEAER
/*
* PCIe AER Trace event
*
@@ -309,7 +348,7 @@ TRACE_EVENT(aer_event,
__field( u32, status )
__field( u8, severity )
__field( u8, tlp_header_valid)
- __array( u32, tlp_header, 4 )
+ __array( u32, tlp_header, PCIE_STD_MAX_TLP_HEADERLOG)
),
TP_fast_assign(
@@ -318,10 +357,10 @@ TRACE_EVENT(aer_event,
__entry->severity = severity;
__entry->tlp_header_valid = tlp_header_valid;
if (tlp_header_valid) {
- __entry->tlp_header[0] = tlp->dw[0];
- __entry->tlp_header[1] = tlp->dw[1];
- __entry->tlp_header[2] = tlp->dw[2];
- __entry->tlp_header[3] = tlp->dw[3];
+ int i;
+
+ for (i = 0; i < PCIE_STD_MAX_TLP_HEADERLOG; i++)
+ __entry->tlp_header[i] = tlp->dw[i];
}
),
@@ -334,94 +373,10 @@ TRACE_EVENT(aer_event,
__print_flags(__entry->status, "|", aer_correctable_errors) :
__print_flags(__entry->status, "|", aer_uncorrectable_errors),
__entry->tlp_header_valid ?
- __print_array(__entry->tlp_header, 4, 4) :
+ __print_array(__entry->tlp_header, PCIE_STD_MAX_TLP_HEADERLOG, 4) :
"Not available")
);
-
-/*
- * memory-failure recovery action result event
- *
- * unsigned long pfn - Page Frame Number of the corrupted page
- * int type - Page types of the corrupted page
- * int result - Result of recovery action
- */
-
-#ifdef CONFIG_MEMORY_FAILURE
-#define MF_ACTION_RESULT \
- EM ( MF_IGNORED, "Ignored" ) \
- EM ( MF_FAILED, "Failed" ) \
- EM ( MF_DELAYED, "Delayed" ) \
- EMe ( MF_RECOVERED, "Recovered" )
-
-#define MF_PAGE_TYPE \
- EM ( MF_MSG_KERNEL, "reserved kernel page" ) \
- EM ( MF_MSG_KERNEL_HIGH_ORDER, "high-order kernel page" ) \
- EM ( MF_MSG_SLAB, "kernel slab page" ) \
- EM ( MF_MSG_DIFFERENT_COMPOUND, "different compound page after locking" ) \
- EM ( MF_MSG_HUGE, "huge page" ) \
- EM ( MF_MSG_FREE_HUGE, "free huge page" ) \
- EM ( MF_MSG_UNMAP_FAILED, "unmapping failed page" ) \
- EM ( MF_MSG_DIRTY_SWAPCACHE, "dirty swapcache page" ) \
- EM ( MF_MSG_CLEAN_SWAPCACHE, "clean swapcache page" ) \
- EM ( MF_MSG_DIRTY_MLOCKED_LRU, "dirty mlocked LRU page" ) \
- EM ( MF_MSG_CLEAN_MLOCKED_LRU, "clean mlocked LRU page" ) \
- EM ( MF_MSG_DIRTY_UNEVICTABLE_LRU, "dirty unevictable LRU page" ) \
- EM ( MF_MSG_CLEAN_UNEVICTABLE_LRU, "clean unevictable LRU page" ) \
- EM ( MF_MSG_DIRTY_LRU, "dirty LRU page" ) \
- EM ( MF_MSG_CLEAN_LRU, "clean LRU page" ) \
- EM ( MF_MSG_TRUNCATED_LRU, "already truncated LRU page" ) \
- EM ( MF_MSG_BUDDY, "free buddy page" ) \
- EM ( MF_MSG_DAX, "dax page" ) \
- EM ( MF_MSG_UNSPLIT_THP, "unsplit thp" ) \
- EMe ( MF_MSG_UNKNOWN, "unknown page" )
-
-/*
- * First define the enums in MM_ACTION_RESULT to be exported to userspace
- * via TRACE_DEFINE_ENUM().
- */
-#undef EM
-#undef EMe
-#define EM(a, b) TRACE_DEFINE_ENUM(a);
-#define EMe(a, b) TRACE_DEFINE_ENUM(a);
-
-MF_ACTION_RESULT
-MF_PAGE_TYPE
-
-/*
- * Now redefine the EM() and EMe() macros to map the enums to the strings
- * that will be printed in the output.
- */
-#undef EM
-#undef EMe
-#define EM(a, b) { a, b },
-#define EMe(a, b) { a, b }
-
-TRACE_EVENT(memory_failure_event,
- TP_PROTO(unsigned long pfn,
- int type,
- int result),
-
- TP_ARGS(pfn, type, result),
-
- TP_STRUCT__entry(
- __field(unsigned long, pfn)
- __field(int, type)
- __field(int, result)
- ),
-
- TP_fast_assign(
- __entry->pfn = pfn;
- __entry->type = type;
- __entry->result = result;
- ),
-
- TP_printk("pfn %#lx: recovery action for %s: %s",
- __entry->pfn,
- __print_symbolic(__entry->type, MF_PAGE_TYPE),
- __print_symbolic(__entry->result, MF_ACTION_RESULT)
- )
-);
-#endif /* CONFIG_MEMORY_FAILURE */
+#endif /* CONFIG_PCIEAER */
#endif /* _TRACE_HW_EVENT_MC_H */
/* This part must be outside protection */
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index 226ae3702d8a..2bf09b594d10 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -64,22 +64,6 @@ int ib_find_cached_pkey(struct ib_device *device,
u16 *index);
/**
- * ib_find_exact_cached_pkey - Returns the PKey table index where a specified
- * PKey value occurs. Comparison uses the FULL 16 bits (incl membership bit)
- * @device: The device to query.
- * @port_num: The port number of the device to search for the PKey.
- * @pkey: The PKey value to search for.
- * @index: The index into the cached PKey table where the PKey was found.
- *
- * ib_find_exact_cached_pkey() searches the specified PKey table in
- * the local software cache.
- */
-int ib_find_exact_cached_pkey(struct ib_device *device,
- u32 port_num,
- u16 pkey,
- u16 *index);
-
-/**
* ib_get_cached_lmc - Returns a cached lmc table entry
* @device: The device to query.
* @port_num: The port number of the device to query.
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index a2ac62b4a6cf..4808a355de41 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -271,7 +271,7 @@ struct ib_cm_event {
#define CM_APR_ATTR_ID cpu_to_be16(0x001A)
/**
- * ib_cm_handler - User-defined callback to process communication events.
+ * typedef ib_cm_handler - User-defined callback to process communication events.
* @cm_id: Communication identifier associated with the reported event.
* @event: Information about the communication event.
*
@@ -480,23 +480,12 @@ int ib_send_cm_rej(struct ib_cm_id *cm_id,
const void *private_data,
u8 private_data_len);
-#define IB_CM_MRA_FLAG_DELAY 0x80 /* Send MRA only after a duplicate msg */
-
/**
- * ib_send_cm_mra - Sends a message receipt acknowledgement to a connection
- * message.
+ * ib_prepare_cm_mra - Prepares to send a message receipt acknowledgment to a
+ * connection message in case duplicates are received.
* @cm_id: Connection identifier associated with the connection message.
- * @service_timeout: The lower 5-bits specify the maximum time required for
- * the sender to reply to the connection message. The upper 3-bits
- * specify additional control flags.
- * @private_data: Optional user-defined private data sent with the
- * message receipt acknowledgement.
- * @private_data_len: Size of the private data buffer, in bytes.
*/
-int ib_send_cm_mra(struct ib_cm_id *cm_id,
- u8 service_timeout,
- const void *private_data,
- u8 private_data_len);
+int ib_prepare_cm_mra(struct ib_cm_id *cm_id);
/**
* ib_cm_init_qp_attr - Initializes the QP attributes for use in transitioning
diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h
index 8ae07c0ecdf7..1c4c1a69937a 100644
--- a/include/rdma/ib_hdrs.h
+++ b/include/rdma/ib_hdrs.h
@@ -7,7 +7,7 @@
#define IB_HDRS_H
#include <linux/types.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <rdma/ib_verbs.h>
#define IB_SEQ_NAK (3 << 29)
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 3f1b58d8b4bf..8bd0e1eb393b 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -48,6 +48,7 @@
#define IB_MGMT_METHOD_REPORT 0x06
#define IB_MGMT_METHOD_REPORT_RESP 0x86
#define IB_MGMT_METHOD_TRAP_REPRESS 0x07
+#define IB_MGMT_METHOD_GET_TABLE 0x12
#define IB_MGMT_METHOD_RESP 0x80
#define IB_BM_ATTR_MOD_RESP cpu_to_be32(1)
diff --git a/include/rdma/ib_marshall.h b/include/rdma/ib_marshall.h
index 1838869aad28..b179e464e3d1 100644
--- a/include/rdma/ib_marshall.h
+++ b/include/rdma/ib_marshall.h
@@ -22,7 +22,4 @@ void ib_copy_ah_attr_to_user(struct ib_device *device,
void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
struct sa_path_rec *src);
-void ib_copy_path_rec_from_user(struct sa_path_rec *dst,
- struct ib_user_path_rec *src);
-
#endif /* IB_USER_MARSHALL_H */
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index b8c56d7dc35d..8266fab826a7 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -283,7 +283,4 @@ int ib_ud_header_init(int payload_bytes,
int ib_ud_header_pack(struct ib_ud_header *header,
void *buf);
-int ib_ud_header_unpack(void *buf,
- struct ib_ud_header *header);
-
#endif /* IB_PACK_H */
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index b46353fc53bf..95e8924ad563 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -189,6 +189,20 @@ struct sa_path_rec {
u32 flags;
};
+struct sa_service_rec {
+ __be64 id;
+ __u8 gid[16];
+ __be16 pkey;
+ __u8 reserved[2];
+ __be32 lease;
+ __u8 key[16];
+ __u8 name[64];
+ __u8 data_8[16];
+ __be16 data_16[8];
+ __be32 data_32[4];
+ __be64 data_64[2];
+};
+
static inline enum ib_gid_type
sa_conv_pathrec_to_gid_type(struct sa_path_rec *rec)
{
@@ -417,6 +431,17 @@ int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device,
unsigned int num_prs, void *context),
void *context, struct ib_sa_query **query);
+int ib_sa_service_rec_get(struct ib_sa_client *client,
+ struct ib_device *device, u32 port_num,
+ struct sa_service_rec *rec,
+ ib_sa_comp_mask comp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct sa_service_rec *resp,
+ unsigned int num_services,
+ void *context),
+ void *context, struct ib_sa_query **sa_query);
+
struct ib_sa_multicast {
struct ib_sa_mcmember_rec rec;
ib_sa_comp_mask comp_mask;
@@ -509,6 +534,18 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num,
void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute);
/**
+ * ib_sa_pack_service - Convert a service record from struct ib_sa_service_rec
+ * to IB MAD wire format.
+ */
+void ib_sa_pack_service(struct sa_service_rec *rec, void *attribute);
+
+/**
+ * ib_sa_unpack_service - Convert a service record from MAD format to struct
+ * ib_sa_service_rec.
+ */
+void ib_sa_unpack_service(void *attribute, struct sa_service_rec *rec);
+
+/**
* ib_sa_unpack_path - Convert a path record from MAD format to struct
* ib_sa_path_rec.
*/
diff --git a/include/rdma/ib_ucaps.h b/include/rdma/ib_ucaps.h
new file mode 100644
index 000000000000..d9f96be3a553
--- /dev/null
+++ b/include/rdma/ib_ucaps.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#ifndef _IB_UCAPS_H_
+#define _IB_UCAPS_H_
+
+#define UCAP_ENABLED(ucaps, type) (!!((ucaps) & (1U << (type))))
+
+enum rdma_user_cap {
+ RDMA_UCAP_MLX5_CTRL_LOCAL,
+ RDMA_UCAP_MLX5_CTRL_OTHER_VHCA,
+ RDMA_UCAP_MAX
+};
+
+void ib_cleanup_ucaps(void);
+int ib_get_ucaps(int *fds, int fd_count, uint64_t *idx_mask);
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+int ib_create_ucap(enum rdma_user_cap type);
+void ib_remove_ucap(enum rdma_user_cap type);
+#else
+static inline int ib_create_ucap(enum rdma_user_cap type)
+{
+ return -EOPNOTSUPP;
+}
+static inline void ib_remove_ucap(enum rdma_user_cap type) {}
+#endif /* CONFIG_INFINIBAND_USER_ACCESS */
+
+#endif /* _IB_UCAPS_H_ */
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 565a85044541..0a8e092c0ea8 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -38,6 +38,7 @@ struct ib_umem_dmabuf {
unsigned long last_sg_trim;
void *private;
u8 pinned : 1;
+ u8 revoked : 1;
};
static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
@@ -51,11 +52,15 @@ static inline int ib_umem_offset(struct ib_umem *umem)
return umem->address & ~PAGE_MASK;
}
+static inline dma_addr_t ib_umem_start_dma_addr(struct ib_umem *umem)
+{
+ return sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem);
+}
+
static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
unsigned long pgsz)
{
- return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
- (pgsz - 1);
+ return ib_umem_start_dma_addr(umem) & (pgsz - 1);
}
static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
@@ -134,14 +139,27 @@ static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
unsigned long pgsz_bitmap,
u64 pgoff_bitmask)
{
- struct scatterlist *sg = umem->sgt_append.sgt.sgl;
dma_addr_t dma_addr;
- dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
+ dma_addr = ib_umem_start_dma_addr(umem);
return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
dma_addr & pgoff_bitmask);
}
+static inline bool ib_umem_is_contiguous(struct ib_umem *umem)
+{
+ dma_addr_t dma_addr;
+ unsigned long pgsz;
+
+ /*
+ * Select the smallest aligned page that can contain the whole umem if
+ * it was contiguous.
+ */
+ dma_addr = ib_umem_start_dma_addr(umem);
+ pgsz = roundup_pow_of_two((dma_addr ^ (umem->length - 1 + dma_addr)) + 1);
+ return !!ib_umem_find_best_pgoff(umem, pgsz, U64_MAX);
+}
+
struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
unsigned long offset, size_t size,
int fd, int access,
@@ -150,9 +168,15 @@ struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
unsigned long offset,
size_t size, int fd,
int access);
+struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
+ struct device *dma_device,
+ unsigned long offset, size_t size,
+ int fd, int access);
int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
+void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf);
#else /* CONFIG_INFINIBAND_USER_MEM */
@@ -196,12 +220,23 @@ ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
{
return ERR_PTR(-EOPNOTSUPP);
}
+
+static inline struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
+ struct device *dma_device,
+ unsigned long offset, size_t size,
+ int fd, int access)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
{
return -EOPNOTSUPP;
}
static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
+static inline void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf) {}
#endif /* CONFIG_INFINIBAND_USER_MEM */
#endif /* IB_UMEM_H */
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 0844c1d05ac6..2a24bf791c10 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -8,23 +8,17 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_verbs.h>
+#include <linux/hmm-dma.h>
struct ib_umem_odp {
struct ib_umem umem;
struct mmu_interval_notifier notifier;
struct pid *tgid;
- /* An array of the pfns included in the on-demand paging umem. */
- unsigned long *pfn_list;
+ struct hmm_dma_map map;
/*
- * An array with DMA addresses mapped for pfns in pfn_list.
- * The lower two bits designate access permissions.
- * See ODP_READ_ALLOWED_BIT and ODP_WRITE_ALLOWED_BIT.
- */
- dma_addr_t *dma_list;
- /*
- * The umem_mutex protects the page_list and dma_list fields of an ODP
+ * The umem_mutex protects the page_list field of an ODP
* umem, allowing only a single thread to map/unmap pages. The mutex
* also protects access to the mmu notifier counters.
*/
@@ -67,19 +61,6 @@ static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
umem_odp->page_shift;
}
-/*
- * The lower 2 bits of the DMA address signal the R/W permissions for
- * the entry. To upgrade the permissions, provide the appropriate
- * bitmask to the map_dma_pages function.
- *
- * Be aware that upgrading a mapped address might result in change of
- * the DMA address for the page.
- */
-#define ODP_READ_ALLOWED_BIT (1<<0ULL)
-#define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
-
-#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
-
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_umem_odp *
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 477bf9dd5e71..6aad66bc5dd7 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -42,6 +42,7 @@
#include <rdma/signature.h>
#include <uapi/rdma/rdma_user_ioctl.h>
#include <uapi/rdma/ib_user_ioctl_verbs.h>
+#include <linux/pci-tph.h>
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
@@ -59,9 +60,6 @@ extern struct workqueue_struct *ib_comp_unbound_wq;
struct ib_ucq_object;
-__printf(3, 4) __cold
-void ibdev_printk(const char *level, const struct ib_device *ibdev,
- const char *format, ...);
__printf(2, 3) __cold
void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
__printf(2, 3) __cold
@@ -317,17 +315,19 @@ enum ib_atomic_cap {
};
enum ib_odp_general_cap_bits {
- IB_ODP_SUPPORT = 1 << 0,
- IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
+ IB_ODP_SUPPORT = IB_UVERBS_ODP_SUPPORT,
+ IB_ODP_SUPPORT_IMPLICIT = IB_UVERBS_ODP_SUPPORT_IMPLICIT,
};
enum ib_odp_transport_cap_bits {
- IB_ODP_SUPPORT_SEND = 1 << 0,
- IB_ODP_SUPPORT_RECV = 1 << 1,
- IB_ODP_SUPPORT_WRITE = 1 << 2,
- IB_ODP_SUPPORT_READ = 1 << 3,
- IB_ODP_SUPPORT_ATOMIC = 1 << 4,
- IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
+ IB_ODP_SUPPORT_SEND = IB_UVERBS_ODP_SUPPORT_SEND,
+ IB_ODP_SUPPORT_RECV = IB_UVERBS_ODP_SUPPORT_RECV,
+ IB_ODP_SUPPORT_WRITE = IB_UVERBS_ODP_SUPPORT_WRITE,
+ IB_ODP_SUPPORT_READ = IB_UVERBS_ODP_SUPPORT_READ,
+ IB_ODP_SUPPORT_ATOMIC = IB_UVERBS_ODP_SUPPORT_ATOMIC,
+ IB_ODP_SUPPORT_SRQ_RECV = IB_UVERBS_ODP_SUPPORT_SRQ_RECV,
+ IB_ODP_SUPPORT_FLUSH = IB_UVERBS_ODP_SUPPORT_FLUSH,
+ IB_ODP_SUPPORT_ATOMIC_WRITE = IB_UVERBS_ODP_SUPPORT_ATOMIC_WRITE,
};
struct ib_odp_caps {
@@ -522,6 +522,23 @@ enum ib_port_state {
IB_PORT_ACTIVE_DEFER = 5
};
+static inline const char *__attribute_const__
+ib_port_state_to_str(enum ib_port_state state)
+{
+ const char * const states[] = {
+ [IB_PORT_NOP] = "NOP",
+ [IB_PORT_DOWN] = "DOWN",
+ [IB_PORT_INIT] = "INIT",
+ [IB_PORT_ARMED] = "ARMED",
+ [IB_PORT_ACTIVE] = "ACTIVE",
+ [IB_PORT_ACTIVE_DEFER] = "ACTIVE_DEFER",
+ };
+
+ if (state < ARRAY_SIZE(states))
+ return states[state];
+ return "UNKNOWN";
+}
+
enum ib_port_phys_state {
IB_PORT_PHYS_STATE_SLEEP = 1,
IB_PORT_PHYS_STATE_POLLING = 2,
@@ -569,10 +586,10 @@ enum ib_stat_flag {
};
/**
- * struct rdma_stat_desc
- * @name - The name of the counter
- * @flags - Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL
- * @priv - Driver private information; Core code should not use
+ * struct rdma_stat_desc - description of one rdma stat/counter
+ * @name: The name of the counter
+ * @flags: Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL
+ * @priv: Driver private information; Core code should not use
*/
struct rdma_stat_desc {
const char *name;
@@ -581,24 +598,24 @@ struct rdma_stat_desc {
};
/**
- * struct rdma_hw_stats
- * @lock - Mutex to protect parallel write access to lifespan and values
+ * struct rdma_hw_stats - collection of hardware stats and their management
+ * @lock: Mutex to protect parallel write access to lifespan and values
* of counters, which are 64bits and not guaranteed to be written
* atomicaly on 32bits systems.
- * @timestamp - Used by the core code to track when the last update was
- * @lifespan - Used by the core code to determine how old the counters
+ * @timestamp: Used by the core code to track when the last update was
+ * @lifespan: Used by the core code to determine how old the counters
* should be before being updated again. Stored in jiffies, defaults
* to 10 milliseconds, drivers can override the default be specifying
* their own value during their allocation routine.
- * @descs - Array of pointers to static descriptors used for the counters
+ * @descs: Array of pointers to static descriptors used for the counters
* in directory.
- * @is_disabled - A bitmap to indicate each counter is currently disabled
+ * @is_disabled: A bitmap to indicate each counter is currently disabled
* or not.
- * @num_counters - How many hardware counters there are. If name is
+ * @num_counters: How many hardware counters there are. If name is
* shorter than this number, a kernel oops will result. Driver authors
* are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
* in their code to prevent this.
- * @value - Array of u64 counters that are accessed by the sysfs code and
+ * @value: Array of u64 counters that are accessed by the sysfs code and
* filled in by the drivers get_stats routine
*/
struct rdma_hw_stats {
@@ -842,6 +859,7 @@ enum ib_rate {
IB_RATE_400_GBPS = 21,
IB_RATE_600_GBPS = 22,
IB_RATE_800_GBPS = 23,
+ IB_RATE_1600_GBPS = 25,
};
/**
@@ -1516,6 +1534,7 @@ struct ib_ucontext {
struct ib_uverbs_file *ufile;
struct ib_rdmacg_object cg_obj;
+ u64 enabled_caps;
/*
* Implementation details of the RDMA core, don't use in drivers:
*/
@@ -1788,6 +1807,7 @@ struct ib_qp {
struct list_head rdma_mrs;
struct list_head sig_mrs;
struct ib_srq *srq;
+ struct completion srq_completion;
struct ib_xrcd *xrcd; /* XRC TGT QPs only */
struct list_head xrcd_list;
@@ -1797,6 +1817,7 @@ struct ib_qp {
struct ib_qp *real_qp;
struct ib_uqp_object *uobject;
void (*event_handler)(struct ib_event *, void *);
+ void (*registered_event_handler)(struct ib_event *, void *);
void *qp_context;
/* sgid_attrs associated with the AV's */
const struct ib_gid_attr *av_sgid_attr;
@@ -1827,6 +1848,27 @@ struct ib_dm {
atomic_t usecnt;
};
+/* bit values to mark existence of ib_dmah fields */
+enum {
+ IB_DMAH_CPU_ID_EXISTS,
+ IB_DMAH_MEM_TYPE_EXISTS,
+ IB_DMAH_PH_EXISTS,
+};
+
+struct ib_dmah {
+ struct ib_device *device;
+ struct ib_uobject *uobject;
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
+ u32 cpu_id;
+ enum tph_mem_type mem_type;
+ atomic_t usecnt;
+ u8 ph;
+ u8 valid_fields; /* use IB_DMAH_XXX_EXISTS */
+};
+
struct ib_mr {
struct ib_device *device;
struct ib_pd *pd;
@@ -1844,6 +1886,7 @@ struct ib_mr {
struct ib_dm *dm;
struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
+ struct ib_dmah *dmah;
/*
* Implementation details of the RDMA core, don't use in drivers:
*/
@@ -2175,6 +2218,7 @@ struct ib_port_cache {
struct ib_gid_table *gid;
u8 lmc;
enum ib_port_state port_state;
+ enum ib_port_state last_port_state;
};
struct ib_port_immutable {
@@ -2254,7 +2298,9 @@ struct rdma_netdev_alloc_params {
struct ib_odp_counters {
atomic64_t faults;
+ atomic64_t faults_handled;
atomic64_t invalidations;
+ atomic64_t invalidations_handled;
atomic64_t prefetch;
};
@@ -2360,7 +2406,7 @@ struct ib_device_ops {
int (*modify_port)(struct ib_device *device, u32 port_num,
int port_modify_mask,
struct ib_port_modify *port_modify);
- /**
+ /*
* The following mandatory functions are used only at device
* registration. Keep functions such as these at the end of this
* structure to avoid cache line misses when accessing struct ib_device
@@ -2370,7 +2416,7 @@ struct ib_device_ops {
struct ib_port_immutable *immutable);
enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
u32 port_num);
- /**
+ /*
* When calling get_netdev, the HW vendor's driver should return the
* net device of device @device at port @port_num or NULL if such
* a net device doesn't exist. The vendor driver should call dev_hold
@@ -2380,7 +2426,7 @@ struct ib_device_ops {
*/
struct net_device *(*get_netdev)(struct ib_device *device,
u32 port_num);
- /**
+ /*
* rdma netdev operation
*
* Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
@@ -2394,14 +2440,14 @@ struct ib_device_ops {
int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type,
struct rdma_netdev_alloc_params *params);
- /**
+ /*
* query_gid should be return GID value for @device, when @port_num
* link layer is either IB or iWarp. It is no-op if @port_num port
* is RoCE link layer.
*/
int (*query_gid)(struct ib_device *device, u32 port_num, int index,
union ib_gid *gid);
- /**
+ /*
* When calling add_gid, the HW vendor's driver should add the gid
* of device of port at gid index available at @attr. Meta-info of
* that gid (for example, the network device related to this gid) is
@@ -2415,7 +2461,7 @@ struct ib_device_ops {
* roce_gid_table is used.
*/
int (*add_gid)(const struct ib_gid_attr *attr, void **context);
- /**
+ /*
* When calling del_gid, the HW vendor's driver should delete the
* gid of device @device at gid index gid_index of port port_num
* available in @attr.
@@ -2430,7 +2476,7 @@ struct ib_device_ops {
struct ib_udata *udata);
void (*dealloc_ucontext)(struct ib_ucontext *context);
int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
- /**
+ /*
* This will be called once refcount of an entry in mmap_xa reaches
* zero. The type of the memory that was mapped may differ between
* entries and is opaque to the rdma_user_mmap interface.
@@ -2463,18 +2509,33 @@ struct ib_device_ops {
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
- struct ib_udata *udata);
+ struct uverbs_attr_bundle *attrs);
+ int (*create_cq_umem)(struct ib_cq *cq,
+ const struct ib_cq_init_attr *attr,
+ struct ib_umem *umem,
+ struct uverbs_attr_bundle *attrs);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
+ /*
+ * pre_destroy_cq - Prevent a cq from generating any new work
+ * completions, but not free any kernel resources
+ */
+ int (*pre_destroy_cq)(struct ib_cq *cq);
+ /*
+ * post_destroy_cq - Free all kernel resources
+ */
+ void (*post_destroy_cq)(struct ib_cq *cq);
struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
u64 length, u64 virt_addr, int fd,
int mr_access_flags,
- struct ib_udata *udata);
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs);
struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
u64 length, u64 virt_addr,
int mr_access_flags, struct ib_pd *pd,
@@ -2538,6 +2599,9 @@ struct ib_device_ops {
struct ib_dm_alloc_attr *attr,
struct uverbs_attr_bundle *attrs);
int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
+ int (*alloc_dmah)(struct ib_dmah *ibdmah,
+ struct uverbs_attr_bundle *attrs);
+ int (*dealloc_dmah)(struct ib_dmah *dmah, struct uverbs_attr_bundle *attrs);
struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs);
@@ -2552,7 +2616,7 @@ struct ib_device_ops {
struct scatterlist *meta_sg, int meta_sg_nents,
unsigned int *meta_sg_offset);
- /**
+ /*
* alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and
* fill in the driver initialized data. The struct is kfree()'ed by
* the sysfs core when the device is removed. A lifespan of -1 in the
@@ -2561,7 +2625,7 @@ struct ib_device_ops {
struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device);
struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device,
u32 port_num);
- /**
+ /*
* get_hw_stats - Fill in the counter value(s) in the stats struct.
* @index - The index in the value array we wish to have updated, or
* num_counters if we want all stats updated
@@ -2576,14 +2640,14 @@ struct ib_device_ops {
int (*get_hw_stats)(struct ib_device *device,
struct rdma_hw_stats *stats, u32 port, int index);
- /**
+ /*
* modify_hw_stat - Modify the counter configuration
* @enable: true/false when enable/disable a counter
* Return codes - 0 on success or error code otherwise.
*/
int (*modify_hw_stat)(struct ib_device *device, u32 port,
unsigned int counter_index, bool enable);
- /**
+ /*
* Allows rdma drivers to add their own restrack attributes.
*/
int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
@@ -2619,33 +2683,39 @@ struct ib_device_ops {
u8 pdata_len);
int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
- /**
+ /*
* counter_bind_qp - Bind a QP to a counter.
* @counter - The counter to be bound. If counter->id is zero then
* the driver needs to allocate a new counter and set counter->id
*/
- int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
- /**
+ int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp,
+ u32 port);
+ /*
* counter_unbind_qp - Unbind the qp from the dynamically-allocated
* counter and bind it onto the default one
*/
- int (*counter_unbind_qp)(struct ib_qp *qp);
- /**
+ int (*counter_unbind_qp)(struct ib_qp *qp, u32 port);
+ /*
* counter_dealloc -De-allocate the hw counter
*/
int (*counter_dealloc)(struct rdma_counter *counter);
- /**
+ /*
* counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
* the driver initialized data.
*/
struct rdma_hw_stats *(*counter_alloc_stats)(
struct rdma_counter *counter);
- /**
+ /*
* counter_update_stats - Query the stats value of this counter
*/
int (*counter_update_stats)(struct rdma_counter *counter);
- /**
+ /*
+ * counter_init - Initialize the driver specific rdma counter struct.
+ */
+ void (*counter_init)(struct rdma_counter *counter);
+
+ /*
* Allows rdma drivers to add their own restrack attributes
* dumped via 'rdma stat' iproute2 command.
*/
@@ -2661,9 +2731,35 @@ struct ib_device_ops {
*/
int (*get_numa_node)(struct ib_device *dev);
+ /*
+ * add_sub_dev - Add a sub IB device
+ */
+ struct ib_device *(*add_sub_dev)(struct ib_device *parent,
+ enum rdma_nl_dev_type type,
+ const char *name);
+
+ /*
+ * del_sub_dev - Delete a sub IB device
+ */
+ void (*del_sub_dev)(struct ib_device *sub_dev);
+
+ /*
+ * ufile_cleanup - Attempt to cleanup ubojects HW resources inside
+ * the ufile.
+ */
+ void (*ufile_hw_cleanup)(struct ib_uverbs_file *ufile);
+
+ /*
+ * report_port_event - Drivers need to implement this if they have
+ * some private stuff to handle when link status changes.
+ */
+ void (*report_port_event)(struct ib_device *ibdev,
+ struct net_device *ndev, unsigned long event);
+
DECLARE_RDMA_OBJ_SIZE(ib_ah);
DECLARE_RDMA_OBJ_SIZE(ib_counters);
DECLARE_RDMA_OBJ_SIZE(ib_cq);
+ DECLARE_RDMA_OBJ_SIZE(ib_dmah);
DECLARE_RDMA_OBJ_SIZE(ib_mw);
DECLARE_RDMA_OBJ_SIZE(ib_pd);
DECLARE_RDMA_OBJ_SIZE(ib_qp);
@@ -2671,6 +2767,7 @@ struct ib_device_ops {
DECLARE_RDMA_OBJ_SIZE(ib_srq);
DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
+ DECLARE_RDMA_OBJ_SIZE(rdma_counter);
};
struct ib_core_device {
@@ -2723,6 +2820,7 @@ struct ib_device {
* It is a NULL terminated array.
*/
const struct attribute_group *groups[4];
+ u8 hw_stats_attr_index;
u64 uverbs_cmd_mask;
@@ -2771,6 +2869,17 @@ struct ib_device {
char iw_ifname[IFNAMSIZ];
u32 iw_driver_flags;
u32 lag_flags;
+
+ /* A parent device has a list of sub-devices */
+ struct mutex subdev_lock;
+ struct list_head subdev_list_head;
+
+ /* A sub device has a type and a parent */
+ enum rdma_nl_dev_type type;
+ struct ib_device *parent;
+ struct list_head subdev_list;
+
+ enum rdma_nl_name_assign_type name_assign_type;
};
static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size,
@@ -2839,11 +2948,18 @@ struct ib_block_iter {
unsigned int __pg_bit; /* alignment of current block */
};
-struct ib_device *_ib_alloc_device(size_t size);
+struct ib_device *_ib_alloc_device(size_t size, struct net *net);
#define ib_alloc_device(drv_struct, member) \
container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
BUILD_BUG_ON_ZERO(offsetof( \
- struct drv_struct, member))), \
+ struct drv_struct, member)), \
+ &init_net), \
+ struct drv_struct, member)
+
+#define ib_alloc_device_with_net(drv_struct, member, net) \
+ container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof( \
+ struct drv_struct, member)), net), \
struct drv_struct, member)
void ib_dealloc_device(struct ib_device *device);
@@ -2923,6 +3039,14 @@ int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
size_t length, u32 min_pgoff,
u32 max_pgoff);
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+void rdma_user_mmap_disassociate(struct ib_device *device);
+#else
+static inline void rdma_user_mmap_disassociate(struct ib_device *device)
+{
+}
+#endif
+
static inline int
rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext,
struct rdma_user_mmap_entry *entry,
@@ -3034,8 +3158,8 @@ static inline u32 rdma_start_port(const struct ib_device *device)
/**
* rdma_for_each_port - Iterate over all valid port numbers of the IB device
- * @device - The struct ib_device * to iterate over
- * @iter - The unsigned int to store the port number
+ * @device: The struct ib_device * to iterate over
+ * @iter: The unsigned int to store the port number
*/
#define rdma_for_each_port(device, iter) \
for (iter = rdma_start_port(device + \
@@ -3401,7 +3525,7 @@ static inline bool rdma_core_cap_opa_port(struct ib_device *device,
/**
* rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
* @device: Device
- * @port_num: Port number
+ * @port: Port number
* @mtu: enum value of MTU
*
* Return the MTU size supported by the port as an integer value. Will return
@@ -3419,7 +3543,7 @@ static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port,
/**
* rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
* @device: Device
- * @port_num: Port number
+ * @port: Port number
* @attr: port attribute
*
* Return the MTU size supported by the port as an integer value.
@@ -3796,7 +3920,7 @@ static inline int ib_destroy_qp(struct ib_qp *qp)
/**
* ib_open_qp - Obtain a reference to an existing sharable QP.
- * @xrcd - XRC domain
+ * @xrcd: XRC domain
* @qp_open_attr: Attributes identifying the QP to open.
*
* Returns a reference to a sharable QP.
@@ -4150,9 +4274,9 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
/**
* ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses
* @dev: The device for which the DMA addresses are to be created
- * @sg: The sg_table object describing the buffer
+ * @sgt: The sg_table object describing the buffer
* @direction: The direction of the DMA
- * @attrs: Optional DMA attributes for the map operation
+ * @dma_attrs: Optional DMA attributes for the map operation
*/
static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev,
struct sg_table *sgt,
@@ -4296,8 +4420,8 @@ struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
/**
* ib_update_fast_reg_key - updates the key portion of the fast_reg MR
* R_Key and L_Key.
- * @mr - struct ib_mr pointer to be updated.
- * @newkey - new key to be used.
+ * @mr: struct ib_mr pointer to be updated.
+ * @newkey: new key to be used.
*/
static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
{
@@ -4308,7 +4432,7 @@ static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
/**
* ib_inc_rkey - increments the key portion of the given rkey. Can be used
* for calculating a new rkey for type 2 memory windows.
- * @rkey - the rkey to increment.
+ * @rkey: the rkey to increment.
*/
static inline u32 ib_inc_rkey(u32 rkey)
{
@@ -4402,7 +4526,7 @@ int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
/**
* ib_device_try_get: Hold a registration lock
- * device: The device to lock
+ * @dev: The device to lock
*
* A device under an active registration lock cannot become unregistered. It
* is only possible to obtain a registration lock on a device that is fully
@@ -4428,6 +4552,19 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
const struct sockaddr *addr);
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
unsigned int port);
+struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
+ u32 port);
+int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev,
+ u32 *port);
+
+static inline enum ib_port_state ib_get_curr_port_state(struct net_device *net_dev)
+{
+ return (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+}
+
+void ib_dispatch_port_state_event(struct ib_device *ibdev,
+ struct net_device *ndev);
struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr);
int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
@@ -4639,6 +4776,8 @@ static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
return RDMA_AH_ATTR_TYPE_OPA;
return RDMA_AH_ATTR_TYPE_IB;
}
+ if (dev->type == RDMA_DEVICE_TYPE_SMI)
+ return RDMA_AH_ATTR_TYPE_IB;
return RDMA_AH_ATTR_TYPE_UNDEFINED;
}
@@ -4694,13 +4833,29 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector)
* rdma_roce_rescan_device - Rescan all of the network devices in the system
* and add their gids, as needed, to the relevant RoCE devices.
*
- * @device: the rdma device
+ * @ibdev: the rdma device
*/
void rdma_roce_rescan_device(struct ib_device *ibdev);
+void rdma_roce_rescan_port(struct ib_device *ib_dev, u32 port);
+void roce_del_all_netdev_gids(struct ib_device *ib_dev,
+ u32 port, struct net_device *ndev);
struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
+bool rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs);
+#else
+static inline int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs)
+{
+ return 0;
+}
+static inline bool
+rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs)
+{
+ return false;
+}
+#endif
struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type, const char *name,
@@ -4731,7 +4886,7 @@ static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
/**
* ibdev_to_node - return the NUMA node for a given ib_device
- * @dev: device to get the NUMA node for.
+ * @ibdev: device to get the NUMA node for.
*/
static inline int ibdev_to_node(struct ib_device *ibdev)
{
@@ -4756,6 +4911,12 @@ static inline int ibdev_to_node(struct ib_device *ibdev)
bool rdma_dev_access_netns(const struct ib_device *device,
const struct net *net);
+bool rdma_dev_has_raw_cap(const struct ib_device *dev);
+static inline struct net *rdma_dev_net(struct ib_device *device)
+{
+ return read_pnet(&device->coredev.rdma_net);
+}
+
#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
@@ -4763,6 +4924,7 @@ bool rdma_dev_access_netns(const struct ib_device *device,
/**
* rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
* on the flow_label
+ * @fl: flow_label value
*
* This function will convert the 20 bit flow_label input to a valid RoCE v2
* UDP src port 14 bit value. All RoCE V2 drivers should use this same
@@ -4820,4 +4982,32 @@ static inline u16 rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
const struct ib_port_immutable*
ib_port_immutable_read(struct ib_device *dev, unsigned int port);
+
+/** ib_add_sub_device - Add a sub IB device on an existing one
+ *
+ * @parent: The IB device that needs to add a sub device
+ * @type: The type of the new sub device
+ * @name: The name of the new sub device
+ *
+ *
+ * Return 0 on success, an error code otherwise
+ */
+int ib_add_sub_device(struct ib_device *parent,
+ enum rdma_nl_dev_type type,
+ const char *name);
+
+
+/** ib_del_sub_device_and_put - Delect an IB sub device while holding a 'get'
+ *
+ * @sub: The sub device that is going to be deleted
+ *
+ * Return 0 on success, an error code otherwise
+ */
+int ib_del_sub_device_and_put(struct ib_device *sub);
+
+static inline void ib_mark_name_assigned_by_user(struct ib_device *ibdev)
+{
+ ibdev->name_assign_type = RDMA_NAME_ASSIGN_TYPE_USER;
+}
+
#endif /* IB_VERBS_H */
diff --git a/include/rdma/iba.h b/include/rdma/iba.h
index 6a1115b02a0d..dcae154edc26 100644
--- a/include/rdma/iba.h
+++ b/include/rdma/iba.h
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <linux/bitfield.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static inline u32 _iba_get8(const u8 *ptr)
{
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 8a8ab2f793ab..9bd930a83e6e 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -33,7 +33,11 @@ enum rdma_cm_event_type {
RDMA_CM_EVENT_MULTICAST_JOIN,
RDMA_CM_EVENT_MULTICAST_ERROR,
RDMA_CM_EVENT_ADDR_CHANGE,
- RDMA_CM_EVENT_TIMEWAIT_EXIT
+ RDMA_CM_EVENT_TIMEWAIT_EXIT,
+ RDMA_CM_EVENT_ADDRINFO_RESOLVED,
+ RDMA_CM_EVENT_ADDRINFO_ERROR,
+ RDMA_CM_EVENT_USER,
+ RDMA_CM_EVENT_INTERNAL,
};
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event);
@@ -63,6 +67,9 @@ struct rdma_route {
* 2 - Both primary and alternate path are available
*/
int num_pri_alt_paths;
+
+ unsigned int num_service_recs;
+ struct sa_service_rec *service_recs;
};
struct rdma_conn_param {
@@ -93,6 +100,7 @@ struct rdma_cm_event {
union {
struct rdma_conn_param conn;
struct rdma_ud_param ud;
+ u64 arg;
} param;
struct rdma_ucm_ece ece;
};
@@ -198,6 +206,17 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms);
/**
+ * rdma_resolve_ib_service - Resolve the IB service record of the
+ * service with the given service ID or name.
+ *
+ * This function is optional in the rdma cm flow. It is called on the client
+ * side of a connection, before calling rdma_resolve_route. The resolution
+ * can be done once per rdma_cm_id.
+ */
+int rdma_resolve_ib_service(struct rdma_cm_id *id,
+ struct rdma_ucm_ib_service *ibs);
+
+/**
* rdma_create_qp - Allocate a QP and associate it with the specified RDMA
* identifier.
*
@@ -388,6 +407,5 @@ void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
union ib_gid *dgid);
struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *cm_id);
-struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res);
#endif /* RDMA_CM_H */
diff --git a/include/rdma/rdma_counter.h b/include/rdma/rdma_counter.h
index 45d5481a7846..4204d08a010a 100644
--- a/include/rdma/rdma_counter.h
+++ b/include/rdma/rdma_counter.h
@@ -23,6 +23,7 @@ struct rdma_counter_mode {
enum rdma_nl_counter_mode mode;
enum rdma_nl_counter_mask mask;
struct auto_mode_param param;
+ bool bind_opcnt;
};
struct rdma_port_counter {
@@ -47,9 +48,10 @@ void rdma_counter_init(struct ib_device *dev);
void rdma_counter_release(struct ib_device *dev);
int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port,
enum rdma_nl_counter_mask mask,
+ bool bind_opcnt,
struct netlink_ext_ack *extack);
int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port);
-int rdma_counter_unbind_qp(struct ib_qp *qp, bool force);
+int rdma_counter_unbind_qp(struct ib_qp *qp, u32 port, bool force);
int rdma_counter_query_stats(struct rdma_counter *counter);
u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index);
@@ -61,7 +63,8 @@ int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port,
u32 qp_num, u32 counter_id);
int rdma_counter_get_mode(struct ib_device *dev, u32 port,
enum rdma_nl_counter_mode *mode,
- enum rdma_nl_counter_mask *mask);
+ enum rdma_nl_counter_mask *mask,
+ bool *opcnt);
int rdma_counter_modify(struct ib_device *dev, u32 port,
unsigned int index, bool enable);
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index c2a79aeee113..326deaf56d5d 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -6,6 +6,8 @@
#include <linux/netlink.h>
#include <uapi/rdma/rdma_netlink.h>
+struct ib_device;
+
enum {
RDMA_NLDEV_ATTR_EMPTY_STRING = 1,
RDMA_NLDEV_ATTR_ENTRY_STRLEN = 16,
@@ -110,6 +112,16 @@ int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
*/
bool rdma_nl_chk_listeners(unsigned int group);
+/**
+ * Prepare and send an event message
+ * @ib: the IB device which triggered the event
+ * @port_num: the port number which triggered the event - 0 if unused
+ * @type: the event type
+ * Returns 0 on success or a negative error code
+ */
+int rdma_nl_notify_event(struct ib_device *ib, u32 port_num,
+ enum rdma_nl_notify_event_type type);
+
struct rdma_link_ops {
struct list_head list;
const char *type;
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index d67892944193..71140ea0aeb2 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -144,7 +144,7 @@
#define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1)
/**
- * rvt_ud_wr - IB UD work plus AH cache
+ * struct rvt_ud_wr - IB UD work plus AH cache
* @wr: valid IB work request
* @attr: pointer to an allocated AH attribute
*
@@ -184,10 +184,10 @@ struct rvt_swqe {
* struct rvt_krwq - kernel struct receive work request
* @p_lock: lock to protect producer of the kernel buffer
* @head: index of next entry to fill
- * @c_lock:lock to protect consumer of the kernel buffer
+ * @c_lock: lock to protect consumer of the kernel buffer
* @tail: index of next entry to pull
- * @count: count is aproximate of total receive enteries posted
- * @rvt_rwqe: struct of receive work request queue entry
+ * @count: count is approximate of total receive entries posted
+ * @curr_wq: struct of receive work request queue entry
*
* This structure is used to contain the head pointer,
* tail pointer and receive work queue entries for kernel
@@ -309,10 +309,10 @@ struct rvt_ack_entry {
#define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
/**
- * rvt_operation_params - op table entry
- * @length - the length to copy into the swqe entry
- * @qpt_support - a bit mask indicating QP type support
- * @flags - RVT_OPERATION flags (see above)
+ * struct rvt_operation_params - op table entry
+ * @length: the length to copy into the swqe entry
+ * @qpt_support: a bit mask indicating QP type support
+ * @flags: RVT_OPERATION flags (see above)
*
* This supports table driven post send so that
* the driver can have differing an potentially
@@ -552,7 +552,7 @@ static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
/**
* rvt_is_user_qp - return if this is user mode QP
- * @qp - the target QP
+ * @qp: the target QP
*/
static inline bool rvt_is_user_qp(struct rvt_qp *qp)
{
@@ -561,7 +561,7 @@ static inline bool rvt_is_user_qp(struct rvt_qp *qp)
/**
* rvt_get_qp - get a QP reference
- * @qp - the QP to hold
+ * @qp: the QP to hold
*/
static inline void rvt_get_qp(struct rvt_qp *qp)
{
@@ -570,7 +570,7 @@ static inline void rvt_get_qp(struct rvt_qp *qp)
/**
* rvt_put_qp - release a QP reference
- * @qp - the QP to release
+ * @qp: the QP to release
*/
static inline void rvt_put_qp(struct rvt_qp *qp)
{
@@ -580,7 +580,7 @@ static inline void rvt_put_qp(struct rvt_qp *qp)
/**
* rvt_put_swqe - drop mr refs held by swqe
- * @wqe - the send wqe
+ * @wqe: the send wqe
*
* This drops any mr references held by the swqe
*/
@@ -597,8 +597,8 @@ static inline void rvt_put_swqe(struct rvt_swqe *wqe)
/**
* rvt_qp_wqe_reserve - reserve operation
- * @qp - the rvt qp
- * @wqe - the send wqe
+ * @qp: the rvt qp
+ * @wqe: the send wqe
*
* This routine used in post send to record
* a wqe relative reserved operation use.
@@ -612,8 +612,8 @@ static inline void rvt_qp_wqe_reserve(
/**
* rvt_qp_wqe_unreserve - clean reserved operation
- * @qp - the rvt qp
- * @flags - send wqe flags
+ * @qp: the rvt qp
+ * @flags: send wqe flags
*
* This decrements the reserve use count.
*
@@ -653,8 +653,8 @@ u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
/**
* rvt_div_round_up_mtu - round up divide
- * @qp - the qp pair
- * @len - the length
+ * @qp: the qp pair
+ * @len: the length
*
* Perform a shift based mtu round up divide
*/
@@ -664,8 +664,9 @@ static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
}
/**
- * @qp - the qp pair
- * @len - the length
+ * rvt_div_mtu - shift-based divide
+ * @qp: the qp pair
+ * @len: the length
*
* Perform a shift based mtu divide
*/
@@ -676,7 +677,7 @@ static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
/**
* rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
- * @timeout - timeout input(0 - 31).
+ * @timeout: timeout input(0 - 31).
*
* Return a timeout value in jiffies.
*/
@@ -690,7 +691,8 @@ static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
/**
* rvt_lookup_qpn - return the QP with the given QPN
- * @ibp: the ibport
+ * @rdi: rvt device info structure
+ * @rvp: the ibport
* @qpn: the QP number to look up
*
* The caller must hold the rcu_read_lock(), and keep the lock until
@@ -716,9 +718,9 @@ static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
}
/**
- * rvt_mod_retry_timer - mod a retry timer
- * @qp - the QP
- * @shift - timeout shift to wait for multiple packets
+ * rvt_mod_retry_timer_ext - mod a retry timer
+ * @qp: the QP
+ * @shift: timeout shift to wait for multiple packets
* Modify a potentially already running retry timer
*/
static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
@@ -753,7 +755,7 @@ static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
}
/**
- * rvt_qp_sqwe_incr - increment ring index
+ * rvt_qp_swqe_incr - increment ring index
* @qp: the qp
* @val: the starting value
*
@@ -811,10 +813,10 @@ static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc,
/**
* rvt_qp_complete_swqe - insert send completion
- * @qp - the qp
- * @wqe - the send wqe
- * @opcode - wc operation (driver dependent)
- * @status - completion status
+ * @qp: the qp
+ * @wqe: the send wqe
+ * @opcode: wc operation (driver dependent)
+ * @status: completion status
*
* Update the s_last information, and then insert a send
* completion into the completion
@@ -891,7 +893,7 @@ void rvt_ruc_loopback(struct rvt_qp *qp);
/**
* struct rvt_qp_iter - the iterator for QPs
- * @qp - the current QP
+ * @qp: the current QP
*
* This structure defines the current iterator
* state for sequenced access to all QPs relative
@@ -913,7 +915,7 @@ struct rvt_qp_iter {
/**
* ib_cq_tail - Return tail index of cq buffer
- * @send_cq - The cq for send
+ * @send_cq: The cq for send
*
* This is called in qp_iter_print to get tail
* of cq buffer.
@@ -929,7 +931,7 @@ static inline u32 ib_cq_tail(struct ib_cq *send_cq)
/**
* ib_cq_head - Return head index of cq buffer
- * @send_cq - The cq for send
+ * @send_cq: The cq for send
*
* This is called in qp_iter_print to get head
* of cq buffer.
@@ -945,7 +947,7 @@ static inline u32 ib_cq_head(struct ib_cq *send_cq)
/**
* rvt_free_rq - free memory allocated for rvt_rq struct
- * @rvt_rq: request queue data structure
+ * @rq: request queue data structure
*
* This function should only be called if the rvt_mmap_info()
* has not succeeded.
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index 0d69ded73bf2..8a9bcf77dace 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -57,6 +57,10 @@ enum rdma_restrack_type {
*/
RDMA_RESTRACK_SRQ,
/**
+ * @RDMA_RESTRACK_DMAH: DMA handle
+ */
+ RDMA_RESTRACK_DMAH,
+ /**
* @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
*/
RDMA_RESTRACK_MAX
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index fe0512116958..555ea3d142a4 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -34,7 +34,7 @@
static inline void *_uobj_get_obj_read(struct ib_uobject *uobj)
{
if (IS_ERR(uobj))
- return NULL;
+ return ERR_CAST(uobj);
return uobj->object;
}
#define uobj_get_obj_read(_object, _type, _id, _attrs) \
diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h
index ccd11631c167..26ba919ac245 100644
--- a/include/rdma/uverbs_types.h
+++ b/include/rdma/uverbs_types.h
@@ -134,6 +134,8 @@ static inline void uverbs_uobject_get(struct ib_uobject *uobject)
}
void uverbs_uobject_put(struct ib_uobject *uobject);
+int uverbs_try_lock_object(struct ib_uobject *uobj, enum rdma_lookup_mode mode);
+
struct uverbs_obj_fd_type {
/*
* In fd based objects, uverbs_obj_type_ops points to generic
@@ -150,6 +152,37 @@ struct uverbs_obj_fd_type {
int flags;
};
+struct ib_uverbs_file {
+ struct kref ref;
+ struct ib_uverbs_device *device;
+ struct mutex ucontext_lock;
+ /*
+ * ucontext must be accessed via ib_uverbs_get_ucontext() or with
+ * ucontext_lock held
+ */
+ struct ib_ucontext *ucontext;
+ struct ib_uverbs_async_event_file *default_async_file;
+ struct list_head list;
+
+ /*
+ * To access the uobjects list hw_destroy_rwsem must be held for write
+ * OR hw_destroy_rwsem held for read AND uobjects_lock held.
+ * hw_destroy_rwsem should be called across any destruction of the HW
+ * object of an associated uobject.
+ */
+ struct rw_semaphore hw_destroy_rwsem;
+ spinlock_t uobjects_lock;
+ struct list_head uobjects;
+
+ struct mutex umap_lock;
+ struct list_head umaps;
+ struct page *disassociate_page;
+
+ struct xarray idr;
+
+ struct mutex disassociation_lock;
+};
+
extern const struct uverbs_obj_type_class uverbs_idr_class;
extern const struct uverbs_obj_type_class uverbs_fd_class;
int uverbs_uobject_fd_release(struct inode *inode, struct file *filp);
diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
index 9705b2a98e49..0cef64366538 100644
--- a/include/rv/da_monitor.h
+++ b/include/rv/da_monitor.h
@@ -14,59 +14,22 @@
#include <rv/automata.h>
#include <linux/rv.h>
#include <linux/bug.h>
+#include <linux/sched.h>
-#ifdef CONFIG_RV_REACTORS
-
-#define DECLARE_RV_REACTING_HELPERS(name, type) \
-static char REACT_MSG_##name[1024]; \
+/*
+ * Generic helpers for all types of deterministic automata monitors.
+ */
+#define DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
\
-static inline char *format_react_msg_##name(type curr_state, type event) \
+static void react_##name(type curr_state, type event) \
{ \
- snprintf(REACT_MSG_##name, 1024, \
+ rv_react(&rv_##name, \
"rv: monitor %s does not allow event %s on state %s\n", \
#name, \
model_get_event_name_##name(event), \
model_get_state_name_##name(curr_state)); \
- return REACT_MSG_##name; \
-} \
- \
-static void cond_react_##name(char *msg) \
-{ \
- if (rv_##name.react) \
- rv_##name.react(msg); \
-} \
- \
-static bool rv_reacting_on_##name(void) \
-{ \
- return rv_reacting_on(); \
-}
-
-#else /* CONFIG_RV_REACTOR */
-
-#define DECLARE_RV_REACTING_HELPERS(name, type) \
-static inline char *format_react_msg_##name(type curr_state, type event) \
-{ \
- return NULL; \
-} \
- \
-static void cond_react_##name(char *msg) \
-{ \
- return; \
} \
\
-static bool rv_reacting_on_##name(void) \
-{ \
- return 0; \
-}
-#endif
-
-/*
- * Generic helpers for all types of deterministic automata monitors.
- */
-#define DECLARE_DA_MON_GENERIC_HELPERS(name, type) \
- \
-DECLARE_RV_REACTING_HELPERS(name, type) \
- \
/* \
* da_monitor_reset_##name - reset a monitor and setting it to init state \
*/ \
@@ -77,23 +40,6 @@ static inline void da_monitor_reset_##name(struct da_monitor *da_mon) \
} \
\
/* \
- * da_monitor_curr_state_##name - return the current state \
- */ \
-static inline type da_monitor_curr_state_##name(struct da_monitor *da_mon) \
-{ \
- return da_mon->curr_state; \
-} \
- \
-/* \
- * da_monitor_set_state_##name - set the new current state \
- */ \
-static inline void \
-da_monitor_set_state_##name(struct da_monitor *da_mon, enum states_##name state) \
-{ \
- da_mon->curr_state = state; \
-} \
- \
-/* \
* da_monitor_start_##name - start monitoring \
* \
* The monitor will ignore all events until monitoring is set to true. This \
@@ -149,65 +95,81 @@ static inline bool da_monitor_handling_event_##name(struct da_monitor *da_mon)
* Event handler for implicit monitors. Implicit monitor is the one which the
* handler does not need to specify which da_monitor to manipulate. Examples
* of implicit monitor are the per_cpu or the global ones.
+ *
+ * Retry in case there is a race between getting and setting the next state,
+ * warn and reset the monitor if it runs out of retries. The monitor should be
+ * able to handle various orders.
*/
#define DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \
\
static inline bool \
da_event_##name(struct da_monitor *da_mon, enum events_##name event) \
{ \
- type curr_state = da_monitor_curr_state_##name(da_mon); \
- type next_state = model_get_next_state_##name(curr_state, event); \
- \
- if (next_state != INVALID_STATE) { \
- da_monitor_set_state_##name(da_mon, next_state); \
- \
- trace_event_##name(model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event), \
- model_get_state_name_##name(next_state), \
- model_is_final_state_##name(next_state)); \
- \
- return true; \
+ enum states_##name curr_state, next_state; \
+ \
+ curr_state = READ_ONCE(da_mon->curr_state); \
+ for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) { \
+ next_state = model_get_next_state_##name(curr_state, event); \
+ if (next_state == INVALID_STATE) { \
+ react_##name(curr_state, event); \
+ trace_error_##name(model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event)); \
+ return false; \
+ } \
+ if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) { \
+ trace_event_##name(model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(next_state), \
+ model_is_final_state_##name(next_state)); \
+ return true; \
+ } \
} \
\
- if (rv_reacting_on_##name()) \
- cond_react_##name(format_react_msg_##name(curr_state, event)); \
- \
- trace_error_##name(model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event)); \
- \
+ trace_rv_retries_error(#name, model_get_event_name_##name(event)); \
+ pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS) \
+ " retries reached for event %s, resetting monitor %s", \
+ model_get_event_name_##name(event), #name); \
return false; \
} \
/*
* Event handler for per_task monitors.
+ *
+ * Retry in case there is a race between getting and setting the next state,
+ * warn and reset the monitor if it runs out of retries. The monitor should be
+ * able to handle various orders.
*/
#define DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \
\
static inline bool da_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \
enum events_##name event) \
{ \
- type curr_state = da_monitor_curr_state_##name(da_mon); \
- type next_state = model_get_next_state_##name(curr_state, event); \
- \
- if (next_state != INVALID_STATE) { \
- da_monitor_set_state_##name(da_mon, next_state); \
- \
- trace_event_##name(tsk->pid, \
- model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event), \
- model_get_state_name_##name(next_state), \
- model_is_final_state_##name(next_state)); \
- \
- return true; \
+ enum states_##name curr_state, next_state; \
+ \
+ curr_state = READ_ONCE(da_mon->curr_state); \
+ for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) { \
+ next_state = model_get_next_state_##name(curr_state, event); \
+ if (next_state == INVALID_STATE) { \
+ react_##name(curr_state, event); \
+ trace_error_##name(tsk->pid, \
+ model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event)); \
+ return false; \
+ } \
+ if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) { \
+ trace_event_##name(tsk->pid, \
+ model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(next_state), \
+ model_is_final_state_##name(next_state)); \
+ return true; \
+ } \
} \
\
- if (rv_reacting_on_##name()) \
- cond_react_##name(format_react_msg_##name(curr_state, event)); \
- \
- trace_error_##name(tsk->pid, \
- model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event)); \
- \
+ trace_rv_retries_error(#name, model_get_event_name_##name(event)); \
+ pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS) \
+ " retries reached for event %s, resetting monitor %s", \
+ model_get_event_name_##name(event), #name); \
return false; \
}
@@ -324,10 +286,13 @@ static inline struct da_monitor *da_get_monitor_##name(struct task_struct *tsk)
static void da_monitor_reset_all_##name(void) \
{ \
struct task_struct *g, *p; \
+ int cpu; \
\
read_lock(&tasklist_lock); \
for_each_process_thread(g, p) \
da_monitor_reset_##name(da_get_monitor_##name(p)); \
+ for_each_present_cpu(cpu) \
+ da_monitor_reset_##name(da_get_monitor_##name(idle_task(cpu))); \
read_unlock(&tasklist_lock); \
} \
\
@@ -508,6 +473,30 @@ da_handle_start_event_##name(struct task_struct *tsk, enum events_##name event)
__da_handle_event_##name(da_mon, tsk, event); \
\
return 1; \
+} \
+ \
+/* \
+ * da_handle_start_run_event_##name - start monitoring and handle event \
+ * \
+ * This function is used to notify the monitor that the system is in the \
+ * initial state, so the monitor can start monitoring and handling event. \
+ */ \
+static inline bool \
+da_handle_start_run_event_##name(struct task_struct *tsk, enum events_##name event) \
+{ \
+ struct da_monitor *da_mon; \
+ \
+ if (!da_monitor_enabled_##name()) \
+ return 0; \
+ \
+ da_mon = da_get_monitor_##name(tsk); \
+ \
+ if (unlikely(!da_monitoring_##name(da_mon))) \
+ da_monitor_start_##name(da_mon); \
+ \
+ __da_handle_event_##name(da_mon, tsk, event); \
+ \
+ return 1; \
}
/*
diff --git a/include/rv/ltl_monitor.h b/include/rv/ltl_monitor.h
new file mode 100644
index 000000000000..eff60cd61106
--- /dev/null
+++ b/include/rv/ltl_monitor.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+ * This file must be combined with the $(MODEL_NAME).h file generated by
+ * tools/verification/rvgen.
+ */
+
+#include <linux/args.h>
+#include <linux/rv.h>
+#include <linux/stringify.h>
+#include <linux/seq_buf.h>
+#include <rv/instrumentation.h>
+#include <trace/events/task.h>
+#include <trace/events/sched.h>
+
+#ifndef MONITOR_NAME
+#error "Please include $(MODEL_NAME).h generated by rvgen"
+#endif
+
+#define RV_MONITOR_NAME CONCATENATE(rv_, MONITOR_NAME)
+static struct rv_monitor RV_MONITOR_NAME;
+
+static int ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
+
+static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon);
+static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation);
+
+static struct ltl_monitor *ltl_get_monitor(struct task_struct *task)
+{
+ return &task->rv[ltl_monitor_slot].ltl_mon;
+}
+
+static void ltl_task_init(struct task_struct *task, bool task_creation)
+{
+ struct ltl_monitor *mon = ltl_get_monitor(task);
+
+ memset(&mon->states, 0, sizeof(mon->states));
+
+ for (int i = 0; i < LTL_NUM_ATOM; ++i)
+ __set_bit(i, mon->unknown_atoms);
+
+ ltl_atoms_init(task, mon, task_creation);
+ ltl_atoms_fetch(task, mon);
+}
+
+static void handle_task_newtask(void *data, struct task_struct *task, u64 flags)
+{
+ ltl_task_init(task, true);
+}
+
+static int ltl_monitor_init(void)
+{
+ struct task_struct *g, *p;
+ int ret, cpu;
+
+ ret = rv_get_task_monitor_slot();
+ if (ret < 0)
+ return ret;
+
+ ltl_monitor_slot = ret;
+
+ rv_attach_trace_probe(name, task_newtask, handle_task_newtask);
+
+ read_lock(&tasklist_lock);
+
+ for_each_process_thread(g, p)
+ ltl_task_init(p, false);
+
+ for_each_present_cpu(cpu)
+ ltl_task_init(idle_task(cpu), false);
+
+ read_unlock(&tasklist_lock);
+
+ return 0;
+}
+
+static void ltl_monitor_destroy(void)
+{
+ rv_detach_trace_probe(name, task_newtask, handle_task_newtask);
+
+ rv_put_task_monitor_slot(ltl_monitor_slot);
+ ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
+}
+
+static void ltl_illegal_state(struct task_struct *task, struct ltl_monitor *mon)
+{
+ CONCATENATE(trace_error_, MONITOR_NAME)(task);
+ rv_react(&RV_MONITOR_NAME, "rv: "__stringify(MONITOR_NAME)": %s[%d]: violation detected\n",
+ task->comm, task->pid);
+}
+
+static void ltl_attempt_start(struct task_struct *task, struct ltl_monitor *mon)
+{
+ if (rv_ltl_all_atoms_known(mon))
+ ltl_start(task, mon);
+}
+
+static inline void ltl_atom_set(struct ltl_monitor *mon, enum ltl_atom atom, bool value)
+{
+ __clear_bit(atom, mon->unknown_atoms);
+ if (value)
+ __set_bit(atom, mon->atoms);
+ else
+ __clear_bit(atom, mon->atoms);
+}
+
+static void
+ltl_trace_event(struct task_struct *task, struct ltl_monitor *mon, unsigned long *next_state)
+{
+ const char *format_str = "%s";
+ DECLARE_SEQ_BUF(atoms, 64);
+ char states[32], next[32];
+ int i;
+
+ if (!CONCATENATE(CONCATENATE(trace_event_, MONITOR_NAME), _enabled)())
+ return;
+
+ snprintf(states, sizeof(states), "%*pbl", RV_MAX_BA_STATES, mon->states);
+ snprintf(next, sizeof(next), "%*pbl", RV_MAX_BA_STATES, next_state);
+
+ for (i = 0; i < LTL_NUM_ATOM; ++i) {
+ if (test_bit(i, mon->atoms)) {
+ seq_buf_printf(&atoms, format_str, ltl_atom_str(i));
+ format_str = ",%s";
+ }
+ }
+
+ CONCATENATE(trace_event_, MONITOR_NAME)(task, states, atoms.buffer, next);
+}
+
+static void ltl_validate(struct task_struct *task, struct ltl_monitor *mon)
+{
+ DECLARE_BITMAP(next_states, RV_MAX_BA_STATES) = {0};
+
+ if (!rv_ltl_valid_state(mon))
+ return;
+
+ for (unsigned int i = 0; i < RV_NUM_BA_STATES; ++i) {
+ if (test_bit(i, mon->states))
+ ltl_possible_next_states(mon, i, next_states);
+ }
+
+ ltl_trace_event(task, mon, next_states);
+
+ memcpy(mon->states, next_states, sizeof(next_states));
+
+ if (!rv_ltl_valid_state(mon))
+ ltl_illegal_state(task, mon);
+}
+
+static void ltl_atom_update(struct task_struct *task, enum ltl_atom atom, bool value)
+{
+ struct ltl_monitor *mon = ltl_get_monitor(task);
+
+ ltl_atom_set(mon, atom, value);
+ ltl_atoms_fetch(task, mon);
+
+ if (!rv_ltl_valid_state(mon)) {
+ ltl_attempt_start(task, mon);
+ return;
+ }
+
+ ltl_validate(task, mon);
+}
+
+static void __maybe_unused ltl_atom_pulse(struct task_struct *task, enum ltl_atom atom, bool value)
+{
+ struct ltl_monitor *mon = ltl_get_monitor(task);
+
+ ltl_atom_update(task, atom, value);
+
+ ltl_atom_set(mon, atom, !value);
+ ltl_validate(task, mon);
+}
diff --git a/include/scsi/fcoe_sysfs.h b/include/scsi/fcoe_sysfs.h
index 4b1216de3f22..2b28a05e492b 100644
--- a/include/scsi/fcoe_sysfs.h
+++ b/include/scsi/fcoe_sysfs.h
@@ -50,9 +50,7 @@ struct fcoe_ctlr_device {
struct fcoe_sysfs_function_template *f;
struct list_head fcfs;
- char work_q_name[20];
struct workqueue_struct *work_q;
- char devloss_work_q_name[20];
struct workqueue_struct *devloss_work_q;
struct mutex lock;
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 4a9b4169e081..183d9fd50d2d 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -963,7 +963,7 @@ int fc_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
int fc_eh_abort(struct scsi_cmnd *);
int fc_eh_device_reset(struct scsi_cmnd *);
int fc_eh_host_reset(struct scsi_cmnd *);
-int fc_slave_alloc(struct scsi_device *);
+int fc_sdev_init(struct scsi_device *);
/*
* ELS/CT interface
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 3c5899290aed..6616348e59b9 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -15,7 +15,7 @@
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/local_lock.h>
-#include <linux/random.h>
+#include <linux/prandom.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
#include <scsi/fcoe_sysfs.h>
diff --git a/include/scsi/libiscsi_tcp.h b/include/scsi/libiscsi_tcp.h
index 7c8ba9d7378b..ef53d4bea28a 100644
--- a/include/scsi/libiscsi_tcp.h
+++ b/include/scsi/libiscsi_tcp.h
@@ -15,7 +15,6 @@
struct iscsi_tcp_conn;
struct iscsi_segment;
struct sk_buff;
-struct ahash_request;
typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *,
struct iscsi_segment *);
@@ -27,7 +26,7 @@ struct iscsi_segment {
unsigned int total_size;
unsigned int total_copied;
- struct ahash_request *hash;
+ u32 *crcp;
unsigned char padbuf[ISCSI_PAD_LEN];
unsigned char recv_digest[ISCSI_DIGEST_SIZE];
unsigned char digest[ISCSI_DIGEST_SIZE];
@@ -61,8 +60,8 @@ struct iscsi_tcp_conn {
* stop to terminate */
/* control data */
struct iscsi_tcp_recv in; /* TCP receive context */
- /* CRC32C (Rx) LLD should set this is they do not offload */
- struct ahash_request *rx_hash;
+ /* CRC32C (Rx) LLD should set this if they do not offload */
+ u32 *rx_crcp;
};
struct iscsi_tcp_task {
@@ -99,18 +98,15 @@ extern void iscsi_tcp_segment_unmap(struct iscsi_segment *segment);
extern void iscsi_segment_init_linear(struct iscsi_segment *segment,
void *data, size_t size,
- iscsi_segment_done_fn_t *done,
- struct ahash_request *hash);
+ iscsi_segment_done_fn_t *done, u32 *crcp);
extern int
iscsi_segment_seek_sg(struct iscsi_segment *segment,
struct scatterlist *sg_list, unsigned int sg_count,
unsigned int offset, size_t size,
- iscsi_segment_done_fn_t *done,
- struct ahash_request *hash);
+ iscsi_segment_done_fn_t *done, u32 *crcp);
/* digest helpers */
-extern void iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr,
- size_t hdrlen,
+extern void iscsi_tcp_dgst_header(const void *hdr, size_t hdrlen,
unsigned char digest[ISCSI_DIGEST_SIZE]);
extern struct iscsi_cls_conn *
iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 1324068dd950..a0635b128d7a 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -203,6 +203,14 @@ static inline bool dev_is_expander(enum sas_device_type type)
type == SAS_FANOUT_EXPANDER_DEVICE;
}
+static inline bool dev_parent_is_expander(struct domain_device *dev)
+{
+ if (!dev->parent)
+ return false;
+
+ return dev_is_expander(dev->parent->dev_type);
+}
+
static inline void INIT_SAS_WORK(struct sas_work *sw, void (*fn)(struct work_struct *))
{
INIT_WORK(&sw->work, fn);
@@ -683,10 +691,9 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset);
int sas_phy_enable(struct sas_phy *phy, int enable);
extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
extern int sas_target_alloc(struct scsi_target *);
-int sas_device_configure(struct scsi_device *dev,
- struct queue_limits *lim);
+int sas_sdev_configure(struct scsi_device *dev, struct queue_limits *lim);
extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
-extern int sas_bios_param(struct scsi_device *, struct block_device *,
+extern int sas_bios_param(struct scsi_device *, struct gendisk *,
sector_t capacity, int *hsc);
int sas_execute_internal_abort_single(struct domain_device *device,
u16 tag, unsigned int qid,
@@ -703,7 +710,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd);
int sas_eh_target_reset_handler(struct scsi_cmnd *cmd);
extern void sas_target_destroy(struct scsi_target *);
-extern int sas_slave_alloc(struct scsi_device *);
+extern int sas_sdev_init(struct scsi_device *);
extern int sas_ioctl(struct scsi_device *sdev, unsigned int cmd,
void __user *arg);
extern int sas_drain_work(struct sas_ha_struct *ha);
@@ -750,8 +757,8 @@ void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
#endif
#define LIBSAS_SHT_BASE _LIBSAS_SHT_BASE \
- .device_configure = sas_device_configure, \
- .slave_alloc = sas_slave_alloc, \
+ .sdev_configure = sas_sdev_configure, \
+ .sdev_init = sas_sdev_init, \
#define LIBSAS_SHT_BASE_NO_SLAVE_INIT _LIBSAS_SHT_BASE
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
index 92e27e7bf088..a161c0222931 100644
--- a/include/scsi/sas_ata.h
+++ b/include/scsi/sas_ata.h
@@ -15,89 +15,37 @@
#ifdef CONFIG_SCSI_SAS_ATA
-static inline int dev_is_sata(struct domain_device *dev)
+static inline bool dev_is_sata(struct domain_device *dev)
{
- return dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
- dev->dev_type == SAS_SATA_PM_PORT || dev->dev_type == SAS_SATA_PENDING;
+ switch (dev->dev_type) {
+ case SAS_SATA_DEV:
+ case SAS_SATA_PENDING:
+ case SAS_SATA_PM:
+ case SAS_SATA_PM_PORT:
+ return true;
+ default:
+ return false;
+ }
}
-int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy);
-int sas_ata_init(struct domain_device *dev);
-void sas_ata_task_abort(struct sas_task *task);
-void sas_ata_strategy_handler(struct Scsi_Host *shost);
-void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q);
void sas_ata_schedule_reset(struct domain_device *dev);
-void sas_ata_wait_eh(struct domain_device *dev);
-void sas_probe_sata(struct asd_sas_port *port);
-void sas_suspend_sata(struct asd_sas_port *port);
-void sas_resume_sata(struct asd_sas_port *port);
-void sas_ata_end_eh(struct ata_port *ap);
void sas_ata_device_link_abort(struct domain_device *dev, bool force_reset);
-int sas_execute_ata_cmd(struct domain_device *device, u8 *fis,
- int force_phy_id);
+int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id);
int smp_ata_check_ready_type(struct ata_link *link);
-int sas_discover_sata(struct domain_device *dev);
-int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy,
- struct domain_device *child, int phy_id);
extern const struct attribute_group sas_ata_sdev_attr_group;
#else
-static inline void sas_ata_disabled_notice(void)
-{
- pr_notice_once("ATA device seen but CONFIG_SCSI_SAS_ATA=N\n");
-}
-
-static inline int dev_is_sata(struct domain_device *dev)
-{
- return 0;
-}
-static inline int sas_ata_init(struct domain_device *dev)
-{
- return 0;
-}
-static inline void sas_ata_task_abort(struct sas_task *task)
-{
-}
-
-static inline void sas_ata_strategy_handler(struct Scsi_Host *shost)
-{
-}
-
-static inline void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q)
+static inline bool dev_is_sata(struct domain_device *dev)
{
+ return false;
}
static inline void sas_ata_schedule_reset(struct domain_device *dev)
{
}
-static inline void sas_ata_wait_eh(struct domain_device *dev)
-{
-}
-
-static inline void sas_probe_sata(struct asd_sas_port *port)
-{
-}
-
-static inline void sas_suspend_sata(struct asd_sas_port *port)
-{
-}
-
-static inline void sas_resume_sata(struct asd_sas_port *port)
-{
-}
-
-static inline int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
-{
- return 0;
-}
-
-static inline void sas_ata_end_eh(struct ata_port *ap)
-{
-}
-
static inline void sas_ata_device_link_abort(struct domain_device *dev,
bool force_reset)
{
@@ -114,19 +62,6 @@ static inline int smp_ata_check_ready_type(struct ata_link *link)
return 0;
}
-static inline int sas_discover_sata(struct domain_device *dev)
-{
- sas_ata_disabled_notice();
- return -ENXIO;
-}
-
-static inline int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy,
- struct domain_device *child, int phy_id)
-{
- sas_ata_disabled_notice();
- return -ENODEV;
-}
-
#define sas_ata_sdev_attr_group ((struct attribute_group) {})
#endif
diff --git a/include/scsi/scsi_bsg_iscsi.h b/include/scsi/scsi_bsg_iscsi.h
index 9b1f0f424a79..a569c35b258d 100644
--- a/include/scsi/scsi_bsg_iscsi.h
+++ b/include/scsi/scsi_bsg_iscsi.h
@@ -59,7 +59,7 @@ struct iscsi_bsg_host_vendor {
*/
struct iscsi_bsg_host_vendor_reply {
/* start of vendor response area */
- uint32_t vendor_rsp[0];
+ DECLARE_FLEX_ARRAY(uint32_t, vendor_rsp);
};
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 45c40d200154..8ecfb94049db 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -234,7 +234,7 @@ static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
static inline unsigned int scsi_logical_block_count(struct scsi_cmnd *scmd)
{
- unsigned int shift = ilog2(scmd->device->sector_size) - SECTOR_SHIFT;
+ unsigned int shift = ilog2(scmd->device->sector_size);
return blk_rq_bytes(scsi_cmd_to_rq(scmd)) >> shift;
}
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index 7b196d234626..efcdc78530d5 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -11,11 +11,11 @@ extern size_t __scsi_format_command(char *, size_t,
const unsigned char *, size_t);
extern void scsi_print_sense_hdr(const struct scsi_device *, const char *,
const struct scsi_sense_hdr *);
-extern void scsi_print_sense(const struct scsi_cmnd *);
+extern void scsi_print_sense(struct scsi_cmnd *);
extern void __scsi_print_sense(const struct scsi_device *, const char *name,
const unsigned char *sense_buffer,
int sense_len);
-extern void scsi_print_result(const struct scsi_cmnd *, const char *, int);
+extern void scsi_print_result(struct scsi_cmnd *, const char *, int);
#ifdef CONFIG_SCSI_CONSTANTS
extern bool scsi_opcode_sa_name(int, int, const char **, const char **);
@@ -24,7 +24,6 @@ extern const char *scsi_extd_sense_format(unsigned char, unsigned char,
const char **);
extern const char *scsi_mlreturn_string(int);
extern const char *scsi_hostbyte_string(int);
-extern const char *scsi_driverbyte_string(int);
#else
static inline bool
scsi_opcode_sa_name(int cmd, int sa,
@@ -76,12 +75,6 @@ scsi_hostbyte_string(int result)
return NULL;
}
-static inline const char *
-scsi_driverbyte_string(int result)
-{
- return NULL;
-}
-
#endif
#endif /* _SCSI_SCSI_DBG_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 9c540f5468eb..d32f5841f4f8 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -155,7 +155,7 @@ struct scsi_device {
blist_flags_t sdev_bflags; /* black/white flags as also found in
* scsi_devinfo.[hc]. For now used only to
- * pass settings from slave_alloc to scsi
+ * pass settings from sdev_init to scsi
* core. */
unsigned int eh_timeout; /* Error handling timeout */
@@ -179,11 +179,22 @@ struct scsi_device {
unsigned manage_shutdown:1;
/*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for system restart (reboot) operations.
+ */
+ unsigned manage_restart:1;
+
+ /*
* If set and if the device is runtime suspended, ask the high-level
* device driver (sd) to force a runtime resume of the device.
*/
unsigned force_runtime_start_on_system_start:1;
+ /*
+ * Set if the device is an ATA device.
+ */
+ unsigned is_ata:1;
+
unsigned removable:1;
unsigned changed:1; /* Data invalid due to media change */
unsigned busy:1; /* Used to prevent races */
@@ -247,6 +258,9 @@ struct scsi_device {
unsigned int queue_stopped; /* request queue is quiesced */
bool offline_already; /* Device offline message logged */
+ atomic_t ua_new_media_ctr; /* Counter for New Media UNIT ATTENTIONs */
+ atomic_t ua_por_ctr; /* Counter for Power On / Reset UAs */
+
atomic_t disk_events_disable_depth; /* disable depth for disk events */
DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
@@ -305,8 +319,8 @@ sdev_prefix_printk(const char *, const struct scsi_device *, const char *,
#define sdev_printk(l, sdev, fmt, a...) \
sdev_prefix_printk(l, sdev, NULL, fmt, ##a)
-__printf(3, 4) void
-scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
+__printf(3, 4) void scmd_printk(const char *, struct scsi_cmnd *, const char *,
+ ...);
#define scmd_dbg(scmd, fmt, a...) \
do { \
@@ -357,7 +371,7 @@ struct scsi_target {
atomic_t target_blocked;
/*
- * LLDs should set this in the slave_alloc host template callout.
+ * LLDs should set this in the sdev_init host template callout.
* If set to zero then there is not limit.
*/
unsigned int can_queue;
@@ -550,6 +564,10 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
const struct scsi_exec_args *args);
void scsi_failures_reset_retries(struct scsi_failures *failures);
+struct scsi_cmnd *scsi_get_internal_cmd(struct scsi_device *sdev,
+ enum dma_data_direction data_direction,
+ blk_mq_req_flags_t flags);
+void scsi_put_internal_cmd(struct scsi_cmnd *scmd);
extern void sdev_disable_disk_events(struct scsi_device *sdev);
extern void sdev_enable_disk_events(struct scsi_device *sdev);
extern int scsi_vpd_lun_id(struct scsi_device *, char *, size_t);
@@ -581,6 +599,22 @@ static inline unsigned int sdev_id(struct scsi_device *sdev)
#define scmd_id(scmd) sdev_id((scmd)->device)
#define scmd_channel(scmd) sdev_channel((scmd)->device)
+/**
+ * scsi_device_is_pseudo_dev() - Whether a device is a pseudo SCSI device.
+ * @sdev: SCSI device to examine
+ *
+ * A pseudo SCSI device can be used to allocate SCSI commands but does not show
+ * up in sysfs. Additionally, the logical unit information in *@sdev is made up.
+ *
+ * This function tests the LUN number instead of comparing @sdev with
+ * @sdev->host->pseudo_sdev because this function may be called before
+ * @sdev->host->pseudo_sdev has been initialized.
+ */
+static inline bool scsi_device_is_pseudo_dev(struct scsi_device *sdev)
+{
+ return sdev->lun == U64_MAX;
+}
+
/*
* checks for positions of the SCSI state machine
*/
@@ -684,6 +718,10 @@ static inline int scsi_device_busy(struct scsi_device *sdev)
return sbitmap_weight(&sdev->budget_map);
}
+/* Macros to access the UNIT ATTENTION counters */
+#define scsi_get_ua_new_media_ctr(sdev) atomic_read(&sdev->ua_new_media_ctr)
+#define scsi_get_ua_por_ctr(sdev) atomic_read(&sdev->ua_por_ctr)
+
#define MODULE_ALIAS_SCSI_DEVICE(type) \
MODULE_ALIAS("scsi:t-" __stringify(type) "*")
#define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 6b548dc2c496..1d79a3b536ce 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -69,8 +69,10 @@
#define BLIST_RETRY_ITF ((__force blist_flags_t)(1ULL << 32))
/* Always retry ABORTED_COMMAND with ASC 0xc1 */
#define BLIST_RETRY_ASC_C1 ((__force blist_flags_t)(1ULL << 33))
+/* Do not query the IO Advice Hints Grouping mode page */
+#define BLIST_SKIP_IO_HINTS ((__force blist_flags_t)(1ULL << 34))
-#define __BLIST_LAST_USED BLIST_RETRY_ASC_C1
+#define __BLIST_LAST_USED BLIST_SKIP_IO_HINTS
#define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
(__force blist_flags_t) \
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 19a1c5c48935..e87cf7eadd26 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -87,6 +87,12 @@ struct scsi_host_template {
int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
/*
+ * Queue a reserved command (BLK_MQ_REQ_RESERVED). The .queuecommand()
+ * documentation also applies to the .queue_reserved_command() callback.
+ */
+ int (*queue_reserved_command)(struct Scsi_Host *, struct scsi_cmnd *);
+
+ /*
* The commit_rqs function is used to trigger a hardware
* doorbell after some requests have been queued with
* queuecommand, when an error is encountered before sending
@@ -168,20 +174,20 @@ struct scsi_host_template {
* Return values: 0 on success, non-0 on failure
*
* Deallocation: If we didn't find any devices at this ID, you will
- * get an immediate call to slave_destroy(). If we find something
- * here then you will get a call to slave_configure(), then the
+ * get an immediate call to sdev_destroy(). If we find something
+ * here then you will get a call to sdev_configure(), then the
* device will be used for however long it is kept around, then when
* the device is removed from the system (or * possibly at reboot
- * time), you will then get a call to slave_destroy(). This is
- * assuming you implement slave_configure and slave_destroy.
+ * time), you will then get a call to sdev_destroy(). This is
+ * assuming you implement sdev_configure and sdev_destroy.
* However, if you allocate memory and hang it off the device struct,
- * then you must implement the slave_destroy() routine at a minimum
+ * then you must implement the sdev_destroy() routine at a minimum
* in order to avoid leaking memory
* each time a device is tore down.
*
* Status: OPTIONAL
*/
- int (* slave_alloc)(struct scsi_device *);
+ int (* sdev_init)(struct scsi_device *);
/*
* Once the device has responded to an INQUIRY and we know the
@@ -206,28 +212,24 @@ struct scsi_host_template {
* specific setup basis...
* 6. Return 0 on success, non-0 on error. The device will be marked
* as offline on error so that no access will occur. If you return
- * non-0, your slave_destroy routine will never get called for this
+ * non-0, your sdev_destroy routine will never get called for this
* device, so don't leave any loose memory hanging around, clean
* up after yourself before returning non-0
*
* Status: OPTIONAL
- *
- * Note: slave_configure is the legacy version, use device_configure for
- * all new code. A driver must never define both.
*/
- int (* device_configure)(struct scsi_device *, struct queue_limits *lim);
- int (* slave_configure)(struct scsi_device *);
+ int (* sdev_configure)(struct scsi_device *, struct queue_limits *lim);
/*
* Immediately prior to deallocating the device and after all activity
* has ceased the mid layer calls this point so that the low level
* driver may completely detach itself from the scsi device and vice
* versa. The low level driver is responsible for freeing any memory
- * it allocated in the slave_alloc or slave_configure calls.
+ * it allocated in the sdev_init or sdev_configure calls.
*
* Status: OPTIONAL
*/
- void (* slave_destroy)(struct scsi_device *);
+ void (* sdev_destroy)(struct scsi_device *);
/*
* Before the mid layer attempts to scan for a new device attached
@@ -322,7 +324,7 @@ struct scsi_host_template {
*
* Status: OPTIONAL
*/
- int (* bios_param)(struct scsi_device *, struct block_device *,
+ int (* bios_param)(struct scsi_device *, struct gendisk *,
sector_t, int []);
/*
@@ -379,11 +381,20 @@ struct scsi_host_template {
/*
* This determines if we will use a non-interrupt driven
* or an interrupt driven scheme. It is set to the maximum number
- * of simultaneous commands a single hw queue in HBA will accept.
+ * of simultaneous commands a single hw queue in HBA will accept
+ * excluding internal commands.
*/
int can_queue;
/*
+ * This determines how many commands the HBA will set aside
+ * for internal commands. This number will be added to
+ * @can_queue to calculate the maximum number of simultaneous
+ * commands sent to the host.
+ */
+ int nr_reserved_cmds;
+
+ /*
* In many instances, especially where disconnect / reconnect are
* supported, our host also has an ID on the SCSI bus. If this is
* the case, then it must be reserved. Please set this_id to -1 if
@@ -438,8 +449,10 @@ struct scsi_host_template {
*/
short cmd_per_lun;
- /* If use block layer to manage tags, this is tag allocation policy */
- int tag_alloc_policy;
+ /*
+ * Allocate tags starting from last allocated tag.
+ */
+ bool tag_alloc_policy_rr : 1;
/*
* Track QUEUE_FULL events and reduce queue depth on demand.
@@ -599,7 +612,7 @@ struct Scsi_Host {
* have some way of identifying each detected host adapter properly
* and uniquely. For hosts that do not support more than one card
* in the system at one time, this does not need to be set. It is
- * initialized to 0 in scsi_register.
+ * initialized to 0 in scsi_host_alloc.
*/
unsigned int unique_id;
@@ -613,7 +626,17 @@ struct Scsi_Host {
unsigned short max_cmd_len;
int this_id;
+
+ /*
+ * Number of commands this host can handle at the same time.
+ * This excludes reserved commands as specified by nr_reserved_cmds.
+ */
int can_queue;
+ /*
+ * Number of reserved commands to allocate, if any.
+ */
+ unsigned int nr_reserved_cmds;
+
short cmd_per_lun;
short unsigned int sg_tablesize;
short unsigned int sg_prot_tablesize;
@@ -672,12 +695,9 @@ struct Scsi_Host {
/* The transport requires the LUN bits NOT to be stored in CDB[1] */
unsigned no_scsi2_lun_in_cdb:1;
- unsigned no_highmem:1;
-
/*
* Optional work queue to be utilized by the transport
*/
- char work_q_name[20];
struct workqueue_struct *work_q;
/*
@@ -708,6 +728,12 @@ struct Scsi_Host {
struct device shost_gendev, shost_dev;
/*
+ * A SCSI device structure used for sending internal commands to the
+ * HBA. There is no corresponding logical unit inside the SCSI device.
+ */
+ struct scsi_device *pseudo_sdev;
+
+ /*
* Points to the transport data (if any) which is allocated
* separately
*/
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index 843106e1109f..f64385cde5b9 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -33,8 +33,8 @@
#define INQUIRY 0x12
#define RECOVER_BUFFERED_DATA 0x14
#define MODE_SELECT 0x15
-#define RESERVE 0x16
-#define RELEASE 0x17
+#define RESERVE_6 0x16
+#define RELEASE_6 0x17
#define COPY 0x18
#define ERASE 0x19
#define MODE_SENSE 0x1a
@@ -120,6 +120,7 @@
#define WRITE_SAME_16 0x93
#define ZBC_OUT 0x94
#define ZBC_IN 0x95
+#define WRITE_ATOMIC_16 0x9c
#define SERVICE_ACTION_BIDIRECTIONAL 0x9d
#define SERVICE_ACTION_IN_16 0x9e
#define SERVICE_ACTION_OUT_16 0x9f
@@ -345,10 +346,9 @@ static_assert(sizeof(struct scsi_stream_status) == 8);
/* GET STREAM STATUS parameter data */
struct scsi_stream_status_header {
- __be32 len; /* length in bytes of stream_status[] array. */
+ __be32 len; /* length in bytes of following payload */
u16 reserved;
__be16 number_of_open_streams;
- DECLARE_FLEX_ARRAY(struct scsi_stream_status, stream_status);
};
static_assert(sizeof(struct scsi_stream_status_header) == 8);
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 4b884b8013e0..b908aacfef48 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -12,7 +12,7 @@
#include <linux/sched.h>
#include <linux/bsg-lib.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_netlink.h>
#include <scsi/scsi_host.h>
@@ -383,6 +383,8 @@ struct fc_rport { /* aka fc_starget_attrs */
struct work_struct stgt_delete_work;
struct work_struct rport_delete_work;
struct request_queue *rqst_q; /* bsg support */
+
+ struct workqueue_struct *devloss_work_q;
} __attribute__((aligned(sizeof(unsigned long))));
/* bit field values for struct fc_rport "flags" field: */
@@ -575,10 +577,7 @@ struct fc_host_attrs {
u16 npiv_vports_inuse;
/* work queues for rport state manipulation */
- char work_q_name[20];
struct workqueue_struct *work_q;
- char devloss_work_q_name[20];
- struct workqueue_struct *devloss_work_q;
/* bsg support */
struct request_queue *rqst_q;
@@ -654,14 +653,8 @@ struct fc_host_attrs {
(((struct fc_host_attrs *)(x)->shost_data)->next_vport_number)
#define fc_host_npiv_vports_inuse(x) \
(((struct fc_host_attrs *)(x)->shost_data)->npiv_vports_inuse)
-#define fc_host_work_q_name(x) \
- (((struct fc_host_attrs *)(x)->shost_data)->work_q_name)
#define fc_host_work_q(x) \
(((struct fc_host_attrs *)(x)->shost_data)->work_q)
-#define fc_host_devloss_work_q_name(x) \
- (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q_name)
-#define fc_host_devloss_work_q(x) \
- (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q)
#define fc_host_dev_loss_tmo(x) \
(((struct fc_host_attrs *)(x)->shost_data)->dev_loss_tmo)
#define fc_host_max_ct_payload(x) \
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index fb3399e4cd29..76de2b662f4f 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -447,10 +447,6 @@ extern int iscsi_add_session(struct iscsi_cls_session *session,
unsigned int target_id);
extern int iscsi_session_event(struct iscsi_cls_session *session,
enum iscsi_uevent_e event);
-extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
- struct iscsi_transport *t,
- int dd_size,
- unsigned int target_id);
extern void iscsi_force_destroy_session(struct iscsi_cls_session *session);
extern void iscsi_remove_session(struct iscsi_cls_session *session);
extern void iscsi_free_session(struct iscsi_cls_session *session);
@@ -495,10 +491,10 @@ iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess);
extern void iscsi_destroy_all_flashnode(struct Scsi_Host *shost);
extern int iscsi_flashnode_bus_match(struct device *dev,
- struct device_driver *drv);
+ const struct device_driver *drv);
extern struct device *
-iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
- int (*fn)(struct device *dev, void *data));
+iscsi_find_flashnode_sess(struct Scsi_Host *shost, const void *data,
+ device_match_t fn);
extern struct device *
iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess);
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 0e75b9277c8c..e3b6ce3cbf88 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -200,6 +200,8 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *);
void sas_disable_tlr(struct scsi_device *);
void sas_enable_tlr(struct scsi_device *);
+bool sas_ata_ncq_prio_supported(struct scsi_device *sdev);
+
extern struct sas_rphy *sas_end_device_alloc(struct sas_port *);
extern struct sas_rphy *sas_expander_alloc(struct sas_port *, enum sas_device_type);
void sas_rphy_free(struct sas_rphy *);
diff --git a/include/scsi/scsicam.h b/include/scsi/scsicam.h
index 08edd603e521..1131f51ed2c8 100644
--- a/include/scsi/scsicam.h
+++ b/include/scsi/scsicam.h
@@ -13,7 +13,8 @@
#ifndef SCSICAM_H
#define SCSICAM_H
-int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip);
-bool scsi_partsize(struct block_device *bdev, sector_t capacity, int geom[3]);
-unsigned char *scsi_bios_ptable(struct block_device *bdev);
+struct gendisk;
+int scsicam_bios_param(struct gendisk *disk, sector_t capacity, int *ip);
+bool scsi_partsize(struct gendisk *disk, sector_t capacity, int geom[3]);
+unsigned char *scsi_bios_ptable(struct gendisk *disk);
#endif /* def SCSICAM_H */
diff --git a/include/soc/arc/aux.h b/include/soc/arc/arc_aux.h
index 9c2eff6140b6..9c2eff6140b6 100644
--- a/include/soc/arc/aux.h
+++ b/include/soc/arc/arc_aux.h
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h
index d1a93c73f006..a78dacd149f1 100644
--- a/include/soc/arc/mcip.h
+++ b/include/soc/arc/mcip.h
@@ -8,7 +8,7 @@
#ifndef __SOC_ARC_MCIP_H
#define __SOC_ARC_MCIP_H
-#include <soc/arc/aux.h>
+#include <soc/arc/arc_aux.h>
#define ARC_REG_MCIP_BCR 0x0d0
#define ARC_REG_MCIP_IDU_BCR 0x0D5
diff --git a/include/soc/arc/timers.h b/include/soc/arc/timers.h
index ae99d3e855f1..51a74166296c 100644
--- a/include/soc/arc/timers.h
+++ b/include/soc/arc/timers.h
@@ -6,7 +6,7 @@
#ifndef __SOC_ARC_TIMERS_H
#define __SOC_ARC_TIMERS_H
-#include <soc/arc/aux.h>
+#include <soc/arc/arc_aux.h>
/* Timer related Aux registers */
#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
diff --git a/include/soc/at91/sama7-sfrbu.h b/include/soc/at91/sama7-sfrbu.h
index 76b740810d34..8cee48d1ae2c 100644
--- a/include/soc/at91/sama7-sfrbu.h
+++ b/include/soc/at91/sama7-sfrbu.h
@@ -18,13 +18,6 @@
#define AT91_SFRBU_PSWBU_SOFTSWITCH (1 << 1) /* Power switch BU source selection */
#define AT91_SFRBU_PSWBU_CTRL (1 << 0) /* Power switch BU control */
-#define AT91_SFRBU_25LDOCR (0x0C) /* SFRBU 2.5V LDO Control Register */
-#define AT91_SFRBU_25LDOCR_LDOANAKEY (0x3B6E18 << 8) /* Specific value mandatory to allow writing of other register bits. */
-#define AT91_SFRBU_25LDOCR_STATE (1 << 3) /* LDOANA Switch On/Off Control */
-#define AT91_SFRBU_25LDOCR_LP (1 << 2) /* LDOANA Low-Power Mode Control */
-#define AT91_SFRBU_PD_VALUE_MSK (0x3)
-#define AT91_SFRBU_25LDOCR_PD_VALUE(v) ((v) & AT91_SFRBU_PD_VALUE_MSK) /* LDOANA Pull-down value */
-
#define AT91_FRBU_DDRPWR (0x10) /* SFRBU DDR Power Control Register */
#define AT91_FRBU_DDRPWR_STATE (1 << 0) /* DDR Power Mode State */
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 73cac8d0287e..e1f87fbfe554 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -152,6 +152,7 @@ enum rpi_firmware_clk_id {
RPI_FIRMWARE_M2MC_CLK_ID,
RPI_FIRMWARE_PIXEL_BVB_CLK_ID,
RPI_FIRMWARE_VEC_CLK_ID,
+ RPI_FIRMWARE_DISP_CLK_ID,
RPI_FIRMWARE_NUM_CLK_ID,
};
diff --git a/include/soc/fsl/caam-blob.h b/include/soc/fsl/caam-blob.h
index 937cac52f36d..922f7ec3e231 100644
--- a/include/soc/fsl/caam-blob.h
+++ b/include/soc/fsl/caam-blob.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ * Copyright 2024-2025 NXP
*/
#ifndef __CAAM_BLOB_GEN
@@ -12,11 +13,34 @@
#define CAAM_BLOB_KEYMOD_LENGTH 16
#define CAAM_BLOB_OVERHEAD (32 + 16)
#define CAAM_BLOB_MAX_LEN 4096
+#define CAAM_ENC_ALGO_CCM 0x1
+#define CAAM_ENC_ALGO_ECB 0x2
+#define CAAM_NONCE_SIZE 6
+#define CAAM_ICV_SIZE 6
+#define CAAM_CCM_OVERHEAD (CAAM_NONCE_SIZE + CAAM_ICV_SIZE)
struct caam_blob_priv;
/**
+ * struct caam_pkey_info - information for CAAM protected key
+ * @is_pkey: flag to identify, if the key is protected.
+ * @key_enc_algo: identifies the algorithm, ccm or ecb
+ * @plain_key_sz: size of plain key.
+ * @key_buf: contains key data
+ */
+struct caam_pkey_info {
+ u8 is_pkey;
+ u8 key_enc_algo;
+ u16 plain_key_sz;
+ u8 key_buf[];
+} __packed;
+
+/* sizeof struct caam_pkey_info */
+#define CAAM_PKEY_HEADER 4
+
+/**
* struct caam_blob_info - information for CAAM blobbing
+ * @pkey_info: pointer to keep protected key information
* @input: pointer to input buffer (must be DMAable)
* @input_len: length of @input buffer in bytes.
* @output: pointer to output buffer (must be DMAable)
@@ -26,6 +50,8 @@ struct caam_blob_priv;
* May not exceed %CAAM_BLOB_KEYMOD_LENGTH
*/
struct caam_blob_info {
+ struct caam_pkey_info pkey_info;
+
void *input;
size_t input_len;
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index af793f2a0ec4..8f967d15e479 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -23,6 +23,8 @@
#include <linux/of_address.h>
#include <linux/types.h>
+struct device;
+
#define QE_NUM_OF_SNUM 256 /* There are 256 serial number in QE */
#define QE_NUM_OF_BRGS 16
#define QE_NUM_OF_PORTS 1024
@@ -93,8 +95,12 @@ int cpm_muram_init(void);
#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE)
s32 cpm_muram_alloc(unsigned long size, unsigned long align);
+s32 devm_cpm_muram_alloc(struct device *dev, unsigned long size,
+ unsigned long align);
void cpm_muram_free(s32 offset);
s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
+s32 devm_cpm_muram_alloc_fixed(struct device *dev, unsigned long offset,
+ unsigned long size);
void __iomem *cpm_muram_addr(unsigned long offset);
unsigned long cpm_muram_offset(const void __iomem *addr);
dma_addr_t cpm_muram_dma(void __iomem *addr);
@@ -106,6 +112,12 @@ static inline s32 cpm_muram_alloc(unsigned long size,
return -ENOSYS;
}
+static inline s32 devm_cpm_muram_alloc(struct device *dev, unsigned long size,
+ unsigned long align)
+{
+ return -ENOSYS;
+}
+
static inline void cpm_muram_free(s32 offset)
{
}
@@ -116,6 +128,13 @@ static inline s32 cpm_muram_alloc_fixed(unsigned long offset,
return -ENOSYS;
}
+static inline s32 devm_cpm_muram_alloc_fixed(struct device *dev,
+ unsigned long offset,
+ unsigned long size)
+{
+ return -ENOSYS;
+}
+
static inline void __iomem *cpm_muram_addr(unsigned long offset)
{
return NULL;
@@ -172,7 +191,6 @@ static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; }
/*
* Pin multiplexing functions.
*/
-struct device;
struct qe_pin;
#ifdef CONFIG_QE_GPIO
extern struct qe_pin *qe_pin_request(struct device *dev, int index);
@@ -233,7 +251,9 @@ static inline int qe_alive_during_sleep(void)
/* we actually use cpm_muram implementation, define this for convenience */
#define qe_muram_init cpm_muram_init
#define qe_muram_alloc cpm_muram_alloc
+#define devm_qe_muram_alloc devm_cpm_muram_alloc
#define qe_muram_alloc_fixed cpm_muram_alloc_fixed
+#define devm_qe_muram_alloc_fixed devm_cpm_muram_alloc_fixed
#define qe_muram_free cpm_muram_free
#define qe_muram_addr cpm_muram_addr
#define qe_muram_offset cpm_muram_offset
@@ -449,6 +469,7 @@ enum comm_dir {
#define QE_QMC_STOP_TX 0x0000000c
#define QE_QMC_STOP_RX 0x0000000d
#define QE_SS7_SU_FIL_RESET 0x0000000e
+#define QE_PUSHSCHED 0x0000000f
/* jonathbr added from here down for 83xx */
#define QE_RESET_BCS 0x0000000a
#define QE_MCC_INIT_TX_RX_16 0x00000003
diff --git a/include/soc/fsl/qe/qmc.h b/include/soc/fsl/qe/qmc.h
index 2a333fc1ea81..294e42ea8d4c 100644
--- a/include/soc/fsl/qe/qmc.h
+++ b/include/soc/fsl/qe/qmc.h
@@ -16,11 +16,32 @@ struct device_node;
struct device;
struct qmc_chan;
-struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name);
+int qmc_chan_count_phandles(struct device_node *np, const char *phandles_name);
+
+struct qmc_chan *qmc_chan_get_byphandles_index(struct device_node *np,
+ const char *phandles_name,
+ int index);
+struct qmc_chan *devm_qmc_chan_get_byphandles_index(struct device *dev,
+ struct device_node *np,
+ const char *phandles_name,
+ int index);
+
+static inline struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np,
+ const char *phandle_name)
+{
+ return qmc_chan_get_byphandles_index(np, phandle_name, 0);
+}
+
+static inline struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev,
+ struct device_node *np,
+ const char *phandle_name)
+{
+ return devm_qmc_chan_get_byphandles_index(dev, np, phandle_name, 0);
+}
+
struct qmc_chan *qmc_chan_get_bychild(struct device_node *np);
void qmc_chan_put(struct qmc_chan *chan);
-struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev, struct device_node *np,
- const char *phandle_name);
+
struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev, struct device_node *np);
enum qmc_mode {
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index 0d3d6beb7fdb..7f7a4932d7f1 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -242,7 +242,7 @@ static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len)
static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
{
- return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK;
+ return be16_to_cpu(sg->offset) & QM_SG_OFF_MASK;
}
/* "Frame Dequeue Response" */
diff --git a/include/soc/microchip/mpfs.h b/include/soc/microchip/mpfs.h
index 0bd67e10b704..ec04c98a8b63 100644
--- a/include/soc/microchip/mpfs.h
+++ b/include/soc/microchip/mpfs.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/of_device.h>
+#include <linux/regmap.h>
struct mpfs_sys_controller;
@@ -44,7 +45,7 @@ struct mtd_info *mpfs_sys_controller_get_flash(struct mpfs_sys_controller *mpfs_
#if IS_ENABLED(CONFIG_MCHP_CLK_MPFS)
#if IS_ENABLED(CONFIG_RESET_POLARFIRE_SOC)
-int mpfs_reset_controller_register(struct device *clk_dev, void __iomem *base);
+int mpfs_reset_controller_register(struct device *clk_dev, struct regmap *map);
#else
static inline int mpfs_reset_controller_register(struct device *clk_dev, void __iomem *base) { return 0; }
#endif /* if IS_ENABLED(CONFIG_RESET_POLARFIRE_SOC) */
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index 1e1b40f4e664..48d6deb3efd7 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -759,6 +759,14 @@ struct ocelot_mm_state {
u8 active_preemptible_tcs;
};
+struct ocelot_ts_stats {
+ u64 pkts;
+ u64 onestep_pkts_unconfirmed;
+ u64 lost;
+ u64 err;
+ struct u64_stats_sync syncp;
+};
+
struct ocelot_port;
struct ocelot_port {
@@ -778,7 +786,7 @@ struct ocelot_port {
phy_interface_t phy_mode;
- unsigned int ptp_skbs_in_flight;
+ struct ocelot_ts_stats *ts_stats;
struct sk_buff_head tx_skbs;
unsigned int trap_proto;
@@ -786,7 +794,6 @@ struct ocelot_port {
u16 mrp_ring_id;
u8 ptp_cmd;
- u8 ts_id;
u8 index;
@@ -813,6 +820,9 @@ struct ocelot {
const u32 *const *map;
struct list_head stats_regions;
+ spinlock_t inj_lock;
+ spinlock_t xtr_lock;
+
u32 pool_size[OCELOT_SB_NUM][OCELOT_SB_POOL_NUM];
int packet_buffer_size;
int num_frame_refs;
@@ -966,10 +976,17 @@ void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
u32 val, u32 reg, u32 offset);
/* Packet I/O */
+void ocelot_lock_inj_grp(struct ocelot *ocelot, int grp);
+void ocelot_unlock_inj_grp(struct ocelot *ocelot, int grp);
+void ocelot_lock_xtr_grp(struct ocelot *ocelot, int grp);
+void ocelot_unlock_xtr_grp(struct ocelot *ocelot, int grp);
+void ocelot_lock_xtr_grp_bh(struct ocelot *ocelot, int grp);
+void ocelot_unlock_xtr_grp_bh(struct ocelot *ocelot, int grp);
bool ocelot_can_inject(struct ocelot *ocelot, int grp);
void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
u32 rew_op, struct sk_buff *skb);
-void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag);
+void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port,
+ u32 rew_op, struct sk_buff *skb);
int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **skb);
void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp);
void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
@@ -1015,8 +1032,10 @@ void ocelot_port_get_eth_mac_stats(struct ocelot *ocelot, int port,
struct ethtool_eth_mac_stats *mac_stats);
void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port,
struct ethtool_eth_phy_stats *phy_stats);
+void ocelot_port_get_ts_stats(struct ocelot *ocelot, int port,
+ struct ethtool_ts_stats *ts_stats);
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs);
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled,
struct netlink_ext_ack *extack);
@@ -1054,8 +1073,11 @@ int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged);
int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid);
-int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr);
-int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr);
+void ocelot_hwstamp_get(struct ocelot *ocelot, int port,
+ struct kernel_hwtstamp_config *cfg);
+int ocelot_hwstamp_set(struct ocelot *ocelot, int port,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
struct sk_buff *skb,
struct sk_buff **clone);
diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h
index c601a4598b0d..eb19668a06db 100644
--- a/include/soc/mscc/ocelot_vcap.h
+++ b/include/soc/mscc/ocelot_vcap.h
@@ -13,6 +13,7 @@
*/
#define OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream) ((upstream) << 16 | (port))
#define OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port) (port)
+#define OCELOT_VCAP_IS1_VLAN_RECLASSIFY(ocelot, port) ((ocelot)->num_phys_ports + (port))
#define OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port) (port)
#define OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port) ((ocelot)->num_phys_ports + (port))
#define OCELOT_VCAP_IS2_MRP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2)
@@ -499,6 +500,7 @@ struct ocelot_vcap_key_vlan {
struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */
enum ocelot_vcap_bit dei; /* DEI */
enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */
+ enum ocelot_vcap_bit tpid;
};
struct ocelot_vcap_key_etype {
diff --git a/include/soc/nuvoton/clock-npcm8xx.h b/include/soc/nuvoton/clock-npcm8xx.h
new file mode 100644
index 000000000000..1d974e89d8a8
--- /dev/null
+++ b/include/soc/nuvoton/clock-npcm8xx.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SOC_NPCM8XX_CLOCK_H
+#define __SOC_NPCM8XX_CLOCK_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/container_of.h>
+
+struct npcm_clock_adev {
+ void __iomem *base;
+ struct auxiliary_device adev;
+};
+
+static inline struct npcm_clock_adev *to_npcm_clock_adev(struct auxiliary_device *_adev)
+{
+ return container_of(_adev, struct npcm_clock_adev, adev);
+}
+
+#endif
diff --git a/include/soc/qcom/ice.h b/include/soc/qcom/ice.h
index 5870a94599a2..4bee553f0a59 100644
--- a/include/soc/qcom/ice.h
+++ b/include/soc/qcom/ice.h
@@ -6,32 +6,29 @@
#ifndef __QCOM_ICE_H__
#define __QCOM_ICE_H__
+#include <linux/blk-crypto.h>
#include <linux/types.h>
struct qcom_ice;
-enum qcom_ice_crypto_key_size {
- QCOM_ICE_CRYPTO_KEY_SIZE_INVALID = 0x0,
- QCOM_ICE_CRYPTO_KEY_SIZE_128 = 0x1,
- QCOM_ICE_CRYPTO_KEY_SIZE_192 = 0x2,
- QCOM_ICE_CRYPTO_KEY_SIZE_256 = 0x3,
- QCOM_ICE_CRYPTO_KEY_SIZE_512 = 0x4,
-};
-
-enum qcom_ice_crypto_alg {
- QCOM_ICE_CRYPTO_ALG_AES_XTS = 0x0,
- QCOM_ICE_CRYPTO_ALG_BITLOCKER_AES_CBC = 0x1,
- QCOM_ICE_CRYPTO_ALG_AES_ECB = 0x2,
- QCOM_ICE_CRYPTO_ALG_ESSIV_AES_CBC = 0x3,
-};
-
int qcom_ice_enable(struct qcom_ice *ice);
int qcom_ice_resume(struct qcom_ice *ice);
int qcom_ice_suspend(struct qcom_ice *ice);
-int qcom_ice_program_key(struct qcom_ice *ice,
- u8 algorithm_id, u8 key_size,
- const u8 crypto_key[], u8 data_unit_size,
- int slot);
+int qcom_ice_program_key(struct qcom_ice *ice, unsigned int slot,
+ const struct blk_crypto_key *blk_key);
int qcom_ice_evict_key(struct qcom_ice *ice, int slot);
-struct qcom_ice *of_qcom_ice_get(struct device *dev);
+enum blk_crypto_key_type qcom_ice_get_supported_key_type(struct qcom_ice *ice);
+int qcom_ice_derive_sw_secret(struct qcom_ice *ice,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]);
+int qcom_ice_generate_key(struct qcom_ice *ice,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+int qcom_ice_prepare_key(struct qcom_ice *ice,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+int qcom_ice_import_key(struct qcom_ice *ice,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+struct qcom_ice *devm_of_qcom_ice_get(struct device *dev);
+
#endif /* __QCOM_ICE_H__ */
diff --git a/include/soc/qcom/qcom-spmi-pmic.h b/include/soc/qcom/qcom-spmi-pmic.h
index a62d500a6fda..2cf9e2d8cd55 100644
--- a/include/soc/qcom/qcom-spmi-pmic.h
+++ b/include/soc/qcom/qcom-spmi-pmic.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2022 Linaro. All rights reserved.
- * Author: Caleb Connolly <caleb.connolly@linaro.org>
+ * Author: Casey Connolly <casey.connolly@linaro.org>
*/
#ifndef __QCOM_SPMI_PMIC_H__
@@ -50,6 +50,8 @@
#define PMR735B_SUBTYPE 0x34
#define PM6350_SUBTYPE 0x36
#define PM4125_SUBTYPE 0x37
+#define PMM8650AU_SUBTYPE 0x4e
+#define PMM8650AU_PSAIL_SUBTYPE 0x4f
#define PMI8998_FAB_ID_SMIC 0x11
#define PMI8998_FAB_ID_GF 0x30
diff --git a/include/soc/qcom/tcs.h b/include/soc/qcom/tcs.h
index 3acca067c72b..cff67ce25488 100644
--- a/include/soc/qcom/tcs.h
+++ b/include/soc/qcom/tcs.h
@@ -6,6 +6,9 @@
#ifndef __SOC_QCOM_TCS_H__
#define __SOC_QCOM_TCS_H__
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
#define MAX_RPMH_PAYLOAD 16
/**
@@ -60,22 +63,17 @@ struct tcs_request {
struct tcs_cmd *cmds;
};
-#define BCM_TCS_CMD_COMMIT_SHFT 30
-#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
-#define BCM_TCS_CMD_VALID_SHFT 29
-#define BCM_TCS_CMD_VALID_MASK 0x20000000
-#define BCM_TCS_CMD_VOTE_X_SHFT 14
-#define BCM_TCS_CMD_VOTE_MASK 0x3fff
-#define BCM_TCS_CMD_VOTE_Y_SHFT 0
-#define BCM_TCS_CMD_VOTE_Y_MASK 0xfffc000
+#define BCM_TCS_CMD_COMMIT_MASK BIT(30)
+#define BCM_TCS_CMD_VALID_MASK BIT(29)
+#define BCM_TCS_CMD_VOTE_MASK GENMASK(13, 0)
+#define BCM_TCS_CMD_VOTE_Y_MASK GENMASK(13, 0)
+#define BCM_TCS_CMD_VOTE_X_MASK GENMASK(27, 14)
/* Construct a Bus Clock Manager (BCM) specific TCS command */
#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
- (((commit) << BCM_TCS_CMD_COMMIT_SHFT) | \
- ((valid) << BCM_TCS_CMD_VALID_SHFT) | \
- ((cpu_to_le32(vote_x) & \
- BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) | \
- ((cpu_to_le32(vote_y) & \
- BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT))
+ (u32_encode_bits(commit, BCM_TCS_CMD_COMMIT_MASK) | \
+ u32_encode_bits(valid, BCM_TCS_CMD_VALID_MASK) | \
+ u32_encode_bits(vote_x, BCM_TCS_CMD_VOTE_X_MASK) | \
+ u32_encode_bits(vote_y, BCM_TCS_CMD_VOTE_Y_MASK))
#endif /* __SOC_QCOM_TCS_H__ */
diff --git a/include/soc/rockchip/rk3588_grf.h b/include/soc/rockchip/rk3588_grf.h
index 630b35a55064..02a7b2432d99 100644
--- a/include/soc/rockchip/rk3588_grf.h
+++ b/include/soc/rockchip/rk3588_grf.h
@@ -12,7 +12,11 @@
#define RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3 GENMASK(13, 12)
#define RK3588_PMUGRF_OS_REG3_SYSREG_VERSION GENMASK(31, 28)
-#define RK3588_PMUGRF_OS_REG4 0x210
-#define RK3588_PMUGRF_OS_REG5 0x214
+#define RK3588_PMUGRF_OS_REG4 0x210
+#define RK3588_PMUGRF_OS_REG5 0x214
+#define RK3588_PMUGRF_OS_REG6 0x218
+#define RK3588_PMUGRF_OS_REG6_LP5_BANK_MODE GENMASK(2, 1)
+/* Whether the LPDDR5 is in 2:1 (= 0) or 4:1 (= 1) CKR a.k.a. DQS mode */
+#define RK3588_PMUGRF_OS_REG6_LP5_CKR BIT(0)
#endif /* __SOC_RK3588_GRF_H */
diff --git a/include/soc/rockchip/rockchip_grf.h b/include/soc/rockchip/rockchip_grf.h
index e46fd72aea8d..41c7bb26fd53 100644
--- a/include/soc/rockchip/rockchip_grf.h
+++ b/include/soc/rockchip/rockchip_grf.h
@@ -13,6 +13,7 @@ enum {
ROCKCHIP_DDRTYPE_LPDDR3 = 6,
ROCKCHIP_DDRTYPE_LPDDR4 = 7,
ROCKCHIP_DDRTYPE_LPDDR4X = 8,
+ ROCKCHIP_DDRTYPE_LPDDR5 = 9,
};
#endif /* __SOC_ROCKCHIP_GRF_H */
diff --git a/include/soc/rockchip/rockchip_sip.h b/include/soc/rockchip/rockchip_sip.h
index c46a9ae2a2ab..501ad1fedb20 100644
--- a/include/soc/rockchip/rockchip_sip.h
+++ b/include/soc/rockchip/rockchip_sip.h
@@ -6,6 +6,9 @@
#ifndef __SOC_ROCKCHIP_SIP_H
#define __SOC_ROCKCHIP_SIP_H
+#define ROCKCHIP_SIP_SUSPEND_MODE 0x82000003
+#define ROCKCHIP_SLEEP_PD_CONFIG 0xff
+
#define ROCKCHIP_SIP_DRAM_FREQ 0x82000008
#define ROCKCHIP_SIP_CONFIG_DRAM_INIT 0x00
#define ROCKCHIP_SIP_CONFIG_DRAM_SET_RATE 0x01
diff --git a/include/soc/spacemit/k1-syscon.h b/include/soc/spacemit/k1-syscon.h
new file mode 100644
index 000000000000..354751562c55
--- /dev/null
+++ b/include/soc/spacemit/k1-syscon.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/* SpacemiT clock and reset driver definitions for the K1 SoC */
+
+#ifndef __SOC_K1_SYSCON_H__
+#define __SOC_K1_SYSCON_H__
+
+/* Auxiliary device used to represent a CCU reset controller */
+struct spacemit_ccu_adev {
+ struct auxiliary_device adev;
+ struct regmap *regmap;
+};
+
+static inline struct spacemit_ccu_adev *
+to_spacemit_ccu_adev(struct auxiliary_device *adev)
+{
+ return container_of(adev, struct spacemit_ccu_adev, adev);
+}
+
+/* APBS register offset */
+#define APBS_PLL1_SWCR1 0x100
+#define APBS_PLL1_SWCR2 0x104
+#define APBS_PLL1_SWCR3 0x108
+#define APBS_PLL2_SWCR1 0x118
+#define APBS_PLL2_SWCR2 0x11c
+#define APBS_PLL2_SWCR3 0x120
+#define APBS_PLL3_SWCR1 0x124
+#define APBS_PLL3_SWCR2 0x128
+#define APBS_PLL3_SWCR3 0x12c
+
+/* MPMU register offset */
+#define MPMU_POSR 0x0010
+#define MPMU_FCCR 0x0008
+#define POSR_PLL1_LOCK BIT(27)
+#define POSR_PLL2_LOCK BIT(28)
+#define POSR_PLL3_LOCK BIT(29)
+#define MPMU_SUCCR 0x0014
+#define MPMU_ISCCR 0x0044
+#define MPMU_WDTPCR 0x0200
+#define MPMU_RIPCCR 0x0210
+#define MPMU_ACGR 0x1024
+#define MPMU_APBCSCR 0x1050
+#define MPMU_SUCCR_1 0x10b0
+
+/* APBC register offset */
+#define APBC_UART1_CLK_RST 0x00
+#define APBC_UART2_CLK_RST 0x04
+#define APBC_GPIO_CLK_RST 0x08
+#define APBC_PWM0_CLK_RST 0x0c
+#define APBC_PWM1_CLK_RST 0x10
+#define APBC_PWM2_CLK_RST 0x14
+#define APBC_PWM3_CLK_RST 0x18
+#define APBC_TWSI8_CLK_RST 0x20
+#define APBC_UART3_CLK_RST 0x24
+#define APBC_RTC_CLK_RST 0x28
+#define APBC_TWSI0_CLK_RST 0x2c
+#define APBC_TWSI1_CLK_RST 0x30
+#define APBC_TIMERS1_CLK_RST 0x34
+#define APBC_TWSI2_CLK_RST 0x38
+#define APBC_AIB_CLK_RST 0x3c
+#define APBC_TWSI4_CLK_RST 0x40
+#define APBC_TIMERS2_CLK_RST 0x44
+#define APBC_ONEWIRE_CLK_RST 0x48
+#define APBC_TWSI5_CLK_RST 0x4c
+#define APBC_DRO_CLK_RST 0x58
+#define APBC_IR_CLK_RST 0x5c
+#define APBC_TWSI6_CLK_RST 0x60
+#define APBC_COUNTER_CLK_SEL 0x64
+#define APBC_TWSI7_CLK_RST 0x68
+#define APBC_TSEN_CLK_RST 0x6c
+#define APBC_UART4_CLK_RST 0x70
+#define APBC_UART5_CLK_RST 0x74
+#define APBC_UART6_CLK_RST 0x78
+#define APBC_SSP3_CLK_RST 0x7c
+#define APBC_SSPA0_CLK_RST 0x80
+#define APBC_SSPA1_CLK_RST 0x84
+#define APBC_IPC_AP2AUD_CLK_RST 0x90
+#define APBC_UART7_CLK_RST 0x94
+#define APBC_UART8_CLK_RST 0x98
+#define APBC_UART9_CLK_RST 0x9c
+#define APBC_CAN0_CLK_RST 0xa0
+#define APBC_PWM4_CLK_RST 0xa8
+#define APBC_PWM5_CLK_RST 0xac
+#define APBC_PWM6_CLK_RST 0xb0
+#define APBC_PWM7_CLK_RST 0xb4
+#define APBC_PWM8_CLK_RST 0xb8
+#define APBC_PWM9_CLK_RST 0xbc
+#define APBC_PWM10_CLK_RST 0xc0
+#define APBC_PWM11_CLK_RST 0xc4
+#define APBC_PWM12_CLK_RST 0xc8
+#define APBC_PWM13_CLK_RST 0xcc
+#define APBC_PWM14_CLK_RST 0xd0
+#define APBC_PWM15_CLK_RST 0xd4
+#define APBC_PWM16_CLK_RST 0xd8
+#define APBC_PWM17_CLK_RST 0xdc
+#define APBC_PWM18_CLK_RST 0xe0
+#define APBC_PWM19_CLK_RST 0xe4
+
+/* APMU register offset */
+#define APMU_JPG_CLK_RES_CTRL 0x020
+#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x024
+#define APMU_ISP_CLK_RES_CTRL 0x038
+#define APMU_LCD_CLK_RES_CTRL1 0x044
+#define APMU_LCD_SPI_CLK_RES_CTRL 0x048
+#define APMU_LCD_CLK_RES_CTRL2 0x04c
+#define APMU_CCIC_CLK_RES_CTRL 0x050
+#define APMU_SDH0_CLK_RES_CTRL 0x054
+#define APMU_SDH1_CLK_RES_CTRL 0x058
+#define APMU_USB_CLK_RES_CTRL 0x05c
+#define APMU_QSPI_CLK_RES_CTRL 0x060
+#define APMU_DMA_CLK_RES_CTRL 0x064
+#define APMU_AES_CLK_RES_CTRL 0x068
+#define APMU_VPU_CLK_RES_CTRL 0x0a4
+#define APMU_GPU_CLK_RES_CTRL 0x0cc
+#define APMU_SDH2_CLK_RES_CTRL 0x0e0
+#define APMU_PMUA_MC_CTRL 0x0e8
+#define APMU_PMU_CC2_AP 0x100
+#define APMU_PMUA_EM_CLK_RES_CTRL 0x104
+#define APMU_AUDIO_CLK_RES_CTRL 0x14c
+#define APMU_HDMI_CLK_RES_CTRL 0x1b8
+#define APMU_CCI550_CLK_CTRL 0x300
+#define APMU_ACLK_CLK_CTRL 0x388
+#define APMU_CPU_C0_CLK_CTRL 0x38C
+#define APMU_CPU_C1_CLK_CTRL 0x390
+#define APMU_PCIE_CLK_RES_CTRL_0 0x3cc
+#define APMU_PCIE_CLK_RES_CTRL_1 0x3d4
+#define APMU_PCIE_CLK_RES_CTRL_2 0x3dc
+#define APMU_EMAC0_CLK_RES_CTRL 0x3e4
+#define APMU_EMAC1_CLK_RES_CTRL 0x3ec
+
+/* RCPU register offsets */
+#define RCPU_SSP0_CLK_RST 0x0028
+#define RCPU_I2C0_CLK_RST 0x0030
+#define RCPU_UART1_CLK_RST 0x003c
+#define RCPU_CAN_CLK_RST 0x0048
+#define RCPU_IR_CLK_RST 0x004c
+#define RCPU_UART0_CLK_RST 0x00d8
+#define AUDIO_HDMI_CLK_CTRL 0x2044
+
+/* RCPU2 register offsets */
+#define RCPU2_PWM0_CLK_RST 0x0000
+#define RCPU2_PWM1_CLK_RST 0x0004
+#define RCPU2_PWM2_CLK_RST 0x0008
+#define RCPU2_PWM3_CLK_RST 0x000c
+#define RCPU2_PWM4_CLK_RST 0x0010
+#define RCPU2_PWM5_CLK_RST 0x0014
+#define RCPU2_PWM6_CLK_RST 0x0018
+#define RCPU2_PWM7_CLK_RST 0x001c
+#define RCPU2_PWM8_CLK_RST 0x0020
+#define RCPU2_PWM9_CLK_RST 0x0024
+
+/* APBC2 register offsets */
+#define APBC2_UART1_CLK_RST 0x0000
+#define APBC2_SSP2_CLK_RST 0x0004
+#define APBC2_TWSI3_CLK_RST 0x0008
+#define APBC2_RTC_CLK_RST 0x000c
+#define APBC2_TIMERS0_CLK_RST 0x0010
+#define APBC2_KPC_CLK_RST 0x0014
+#define APBC2_GPIO_CLK_RST 0x001c
+
+#endif /* __SOC_K1_SYSCON_H__ */
diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h
index 6b995a8f0f6d..dc0789c20333 100644
--- a/include/soc/tegra/bpmp-abi.h
+++ b/include/soc/tegra/bpmp-abi.h
@@ -3755,7 +3755,7 @@ enum mrq_pwr_limit_cmd {
* @defgroup bpmp_pwr_limit_type PWR_LIMIT TYPEs
* @{
*/
-/** @brief Limit value specifies traget cap */
+/** @brief Limit value specifies target cap */
#define PWR_LIMIT_TYPE_TARGET_CAP 0U
/** @brief Limit value specifies maximum possible target cap */
#define PWR_LIMIT_TYPE_BOUND_MAX 1U
diff --git a/include/sound/ac97/codec.h b/include/sound/ac97/codec.h
index 2fc641cb1982..882b849b9255 100644
--- a/include/sound/ac97/codec.h
+++ b/include/sound/ac97/codec.h
@@ -73,10 +73,7 @@ static inline struct ac97_codec_device *to_ac97_device(struct device *d)
return container_of(d, struct ac97_codec_device, dev);
}
-static inline struct ac97_codec_driver *to_ac97_driver(struct device_driver *d)
-{
- return container_of(d, struct ac97_codec_driver, driver);
-}
+#define to_ac97_driver(__drv) container_of_const(__drv, struct ac97_codec_driver, driver)
#if IS_ENABLED(CONFIG_AC97_BUS_NEW)
int snd_ac97_codec_driver_register(struct ac97_codec_driver *drv);
diff --git a/include/sound/aci.h b/include/sound/aci.h
index 6ebbd4223f12..36a761c9820d 100644
--- a/include/sound/aci.h
+++ b/include/sound/aci.h
@@ -72,6 +72,7 @@
#define ACI_SET_EQ7 0x46 /* ... to Treble */
struct snd_miro_aci {
+ struct snd_card *card;
unsigned long aci_port;
int aci_vendor;
int aci_product;
diff --git a/include/sound/adau1373.h b/include/sound/adau1373.h
deleted file mode 100644
index 4c32ba1328ed..000000000000
--- a/include/sound/adau1373.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Analog Devices ADAU1373 Audio Codec drive
- *
- * Copyright 2011 Analog Devices Inc.
- * Author: Lars-Peter Clausen <lars@metafoo.de>
- */
-
-#ifndef __SOUND_ADAU1373_H__
-#define __SOUND_ADAU1373_H__
-
-enum adau1373_micbias_voltage {
- ADAU1373_MICBIAS_2_9V = 0,
- ADAU1373_MICBIAS_2_2V = 1,
- ADAU1373_MICBIAS_2_6V = 2,
- ADAU1373_MICBIAS_1_8V = 3,
-};
-
-#define ADAU1373_DRC_SIZE 13
-
-struct adau1373_platform_data {
- bool input_differential[4];
- bool lineout_differential;
- bool lineout_ground_sense;
-
- unsigned int num_drc;
- uint8_t drc_setting[3][ADAU1373_DRC_SIZE];
-
- enum adau1373_micbias_voltage micbias1;
- enum adau1373_micbias_voltage micbias2;
-};
-
-#endif
diff --git a/include/sound/asoundef.h b/include/sound/asoundef.h
index 9fdeac19dadb..c4a929d4fd51 100644
--- a/include/sound/asoundef.h
+++ b/include/sound/asoundef.h
@@ -12,6 +12,15 @@
* Digital audio interface *
* *
****************************************************************************/
+/* IEC958 subframe format */
+#define IEC958_SUBFRAME_PREAMBLE_MASK (0xfU)
+#define IEC958_SUBFRAME_AUXILIARY_MASK (0xfU << 4)
+#define IEC958_SUBFRAME_SAMPLE_24_MASK (0xffffffU << 4)
+#define IEC958_SUBFRAME_SAMPLE_20_MASK (0xfffffU << 8)
+#define IEC958_SUBFRAME_VALIDITY (0x1U << 28)
+#define IEC958_SUBFRAME_USER_DATA (0x1U << 29)
+#define IEC958_SUBFRAME_CHANNEL_STATUS (0x1U << 30)
+#define IEC958_SUBFRAME_PARITY (0x1U << 31)
/* AES/IEC958 channel status bits */
#define IEC958_AES0_PROFESSIONAL (1<<0) /* 0 = consumer, 1 = professional */
@@ -110,18 +119,22 @@
#define IEC958_AES2_CON_SOURCE_UNSPEC (0<<0) /* unspecified */
#define IEC958_AES2_CON_CHANNEL (15<<4) /* mask - channel number */
#define IEC958_AES2_CON_CHANNEL_UNSPEC (0<<4) /* unspecified */
-#define IEC958_AES3_CON_FS (15<<0) /* mask - sample frequency */
+#define IEC958_AES3_CON_FS ((1<<7) | (15<<0)) /* mask - sample frequency */
#define IEC958_AES3_CON_FS_44100 (0<<0) /* 44.1kHz */
#define IEC958_AES3_CON_FS_NOTID (1<<0) /* non indicated */
#define IEC958_AES3_CON_FS_48000 (2<<0) /* 48kHz */
#define IEC958_AES3_CON_FS_32000 (3<<0) /* 32kHz */
#define IEC958_AES3_CON_FS_22050 (4<<0) /* 22.05kHz */
+#define IEC958_AES3_CON_FS_384000 (5<<0) /* 384kHz */
#define IEC958_AES3_CON_FS_24000 (6<<0) /* 24kHz */
#define IEC958_AES3_CON_FS_88200 (8<<0) /* 88.2kHz */
#define IEC958_AES3_CON_FS_768000 (9<<0) /* 768kHz */
#define IEC958_AES3_CON_FS_96000 (10<<0) /* 96kHz */
#define IEC958_AES3_CON_FS_176400 (12<<0) /* 176.4kHz */
+#define IEC958_AES3_CON_FS_352400 (13<<0) /* 352.4kHz */
#define IEC958_AES3_CON_FS_192000 (14<<0) /* 192kHz */
+#define IEC958_AES3_CON_FS_128000 ((1<<7) | (11<<0)) /* 128kHz */
+#define IEC958_AES3_CON_FS_705600 ((1<<7) | (13<<0)) /* 705.6kHz */
#define IEC958_AES3_CON_CLOCK (3<<4) /* mask - clock accuracy */
#define IEC958_AES3_CON_CLOCK_1000PPM (0<<4) /* 1000 ppm */
#define IEC958_AES3_CON_CLOCK_50PPM (1<<4) /* 50 ppm */
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index bcf872c17dd3..9e3d801e45ec 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -20,6 +20,30 @@
struct snd_compr_ops;
/**
+ * struct snd_compr_task_runtime: task runtime description
+ * @list: list of all managed tasks
+ * @input: input DMA buffer
+ * @output: output DMA buffer
+ * @seqno: sequence number
+ * @input_size: really used data in the input buffer
+ * @output_size: really used data in the output buffer
+ * @flags: see SND_COMPRESS_TFLG_*
+ * @state: actual task state
+ * @private_value: used by the lowlevel driver (opaque)
+ */
+struct snd_compr_task_runtime {
+ struct list_head list;
+ struct dma_buf *input;
+ struct dma_buf *output;
+ u64 seqno;
+ u64 input_size;
+ u64 output_size;
+ u32 flags;
+ u8 state;
+ void *private_value;
+};
+
+/**
* struct snd_compr_runtime: runtime stream description
* @state: stream state
* @ops: pointer to DSP callbacks
@@ -37,6 +61,10 @@ struct snd_compr_ops;
* @dma_addr: physical buffer address (not accessible from main CPU)
* @dma_bytes: size of DMA area
* @dma_buffer_p: runtime dma buffer pointer
+ * @active_tasks: count of active tasks
+ * @total_tasks: count of all tasks
+ * @task_seqno: last task sequence number (!= 0)
+ * @tasks: list of all tasks
*/
struct snd_compr_runtime {
snd_pcm_state_t state;
@@ -54,6 +82,13 @@ struct snd_compr_runtime {
dma_addr_t dma_addr;
size_t dma_bytes;
struct snd_dma_buffer *dma_buffer_p;
+
+#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
+ u32 active_tasks;
+ u32 total_tasks;
+ u64 task_seqno;
+ struct list_head tasks;
+#endif
};
/**
@@ -108,6 +143,10 @@ struct snd_compr_stream {
* Not valid if copy is implemented
* @get_caps: Retrieve DSP capabilities, mandatory
* @get_codec_caps: Retrieve capabilities for a specific codec, mandatory
+ * @task_create: Create a set of input/output buffers for accel operations
+ * @task_start: Start (queue) a task for accel operations
+ * @task_stop: Stop (dequeue) a task for accel operations
+ * @task_free: Free a set of input/output buffers for accel operations
*/
struct snd_compr_ops {
int (*open)(struct snd_compr_stream *stream);
@@ -122,7 +161,7 @@ struct snd_compr_ops {
struct snd_compr_metadata *metadata);
int (*trigger)(struct snd_compr_stream *stream, int cmd);
int (*pointer)(struct snd_compr_stream *stream,
- struct snd_compr_tstamp *tstamp);
+ struct snd_compr_tstamp64 *tstamp);
int (*copy)(struct snd_compr_stream *stream, char __user *buf,
size_t count);
int (*mmap)(struct snd_compr_stream *stream,
@@ -132,6 +171,12 @@ struct snd_compr_ops {
struct snd_compr_caps *caps);
int (*get_codec_caps) (struct snd_compr_stream *stream,
struct snd_compr_codec_caps *codec);
+#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
+ int (*task_create) (struct snd_compr_stream *stream, struct snd_compr_task_runtime *task);
+ int (*task_start) (struct snd_compr_stream *stream, struct snd_compr_task_runtime *task);
+ int (*task_stop) (struct snd_compr_stream *stream, struct snd_compr_task_runtime *task);
+ int (*task_free) (struct snd_compr_stream *stream, struct snd_compr_task_runtime *task);
+#endif
};
/**
@@ -242,4 +287,9 @@ int snd_compr_free_pages(struct snd_compr_stream *stream);
int snd_compr_stop_error(struct snd_compr_stream *stream,
snd_pcm_state_t state);
+#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
+void snd_compr_task_finished(struct snd_compr_stream *stream,
+ struct snd_compr_task_runtime *task);
+#endif
+
#endif
diff --git a/include/sound/control.h b/include/sound/control.h
index c1659036c4a7..e07f6b960641 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -81,7 +81,7 @@ struct snd_kcontrol {
unsigned long private_value;
void *private_data;
void (*private_free)(struct snd_kcontrol *kcontrol);
- struct snd_kcontrol_volatile vd[]; /* volatile data */
+ struct snd_kcontrol_volatile vd[] __counted_by(count); /* volatile data */
};
#define snd_kcontrol(n) list_entry(n, struct snd_kcontrol, list)
@@ -140,9 +140,7 @@ int snd_ctl_remove_id(struct snd_card * card, struct snd_ctl_elem_id *id);
int snd_ctl_rename_id(struct snd_card * card, struct snd_ctl_elem_id *src_id, struct snd_ctl_elem_id *dst_id);
void snd_ctl_rename(struct snd_card *card, struct snd_kcontrol *kctl, const char *name);
int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id, int active);
-struct snd_kcontrol *snd_ctl_find_numid_locked(struct snd_card *card, unsigned int numid);
struct snd_kcontrol *snd_ctl_find_numid(struct snd_card *card, unsigned int numid);
-struct snd_kcontrol *snd_ctl_find_id_locked(struct snd_card *card, const struct snd_ctl_elem_id *id);
struct snd_kcontrol *snd_ctl_find_id(struct snd_card *card, const struct snd_ctl_elem_id *id);
/**
@@ -167,29 +165,6 @@ snd_ctl_find_id_mixer(struct snd_card *card, const char *name)
return snd_ctl_find_id(card, &id);
}
-/**
- * snd_ctl_find_id_mixer_locked - find the control instance with the given name string
- * @card: the card instance
- * @name: the name string
- *
- * Finds the control instance with the given name and
- * @SNDRV_CTL_ELEM_IFACE_MIXER. Other fields are set to zero.
- *
- * This is merely a wrapper to snd_ctl_find_id_locked().
- * The caller must down card->controls_rwsem before calling this function.
- *
- * Return: The pointer of the instance if found, or %NULL if not.
- */
-static inline struct snd_kcontrol *
-snd_ctl_find_id_mixer_locked(struct snd_card *card, const char *name)
-{
- struct snd_ctl_elem_id id = {};
-
- id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
- strscpy(id.name, name, sizeof(id.name));
- return snd_ctl_find_id_locked(card, &id);
-}
-
int snd_ctl_create(struct snd_card *card);
int snd_ctl_register_ioctl(snd_kctl_ioctl_func_t fcn);
diff --git a/include/sound/core.h b/include/sound/core.h
index dfef0c9d4b9f..64327e971122 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -99,7 +99,7 @@ struct snd_card {
struct device *ctl_dev; /* control device */
unsigned int last_numid; /* last used numeric ID */
struct rw_semaphore controls_rwsem; /* controls lock (list and values) */
- rwlock_t ctl_files_rwlock; /* ctl_files list lock */
+ rwlock_t controls_rwlock; /* lock for lookup and ctl_files list */
int controls_count; /* count of all controls */
size_t user_ctl_alloc_size; // current memory allocation by user controls.
struct list_head controls; /* all controls for this card */
@@ -326,7 +326,6 @@ void snd_device_disconnect(struct snd_card *card, void *device_data);
void snd_device_disconnect_all(struct snd_card *card);
void snd_device_free(struct snd_card *card, void *device_data);
void snd_device_free_all(struct snd_card *card);
-int snd_device_get_state(struct snd_card *card, void *device_data);
/* isadma.c */
@@ -345,46 +344,8 @@ void release_and_free_resource(struct resource *res);
/* --- */
-/* sound printk debug levels */
-enum {
- SND_PR_ALWAYS,
- SND_PR_DEBUG,
- SND_PR_VERBOSE,
-};
-
-#if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK)
-__printf(4, 5)
-void __snd_printk(unsigned int level, const char *file, int line,
- const char *format, ...);
-#else
-#define __snd_printk(level, file, line, format, ...) \
- printk(format, ##__VA_ARGS__)
-#endif
-
-/**
- * snd_printk - printk wrapper
- * @fmt: format string
- *
- * Works like printk() but prints the file and the line of the caller
- * when configured with CONFIG_SND_VERBOSE_PRINTK.
- */
-#define snd_printk(fmt, ...) \
- __snd_printk(0, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
-
#ifdef CONFIG_SND_DEBUG
/**
- * snd_printd - debug printk
- * @fmt: format string
- *
- * Works like snd_printk() for debugging purposes.
- * Ignored when CONFIG_SND_DEBUG is not set.
- */
-#define snd_printd(fmt, ...) \
- __snd_printk(1, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
-#define _snd_printd(level, fmt, ...) \
- __snd_printk(level, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
-
-/**
* snd_BUG - give a BUG warning message and stack trace
*
* Calls WARN() if CONFIG_SND_DEBUG is set.
@@ -393,12 +354,6 @@ void __snd_printk(unsigned int level, const char *file, int line,
#define snd_BUG() WARN(1, "BUG?\n")
/**
- * snd_printd_ratelimit - Suppress high rates of output when
- * CONFIG_SND_DEBUG is enabled.
- */
-#define snd_printd_ratelimit() printk_ratelimit()
-
-/**
* snd_BUG_ON - debugging check macro
* @cond: condition to evaluate
*
@@ -409,11 +364,6 @@ void __snd_printk(unsigned int level, const char *file, int line,
#else /* !CONFIG_SND_DEBUG */
-__printf(1, 2)
-static inline void snd_printd(const char *format, ...) {}
-__printf(2, 3)
-static inline void _snd_printd(int level, const char *format, ...) {}
-
#define snd_BUG() do { } while (0)
#define snd_BUG_ON(condition) ({ \
@@ -421,26 +371,8 @@ static inline void _snd_printd(int level, const char *format, ...) {}
unlikely(__ret_warn_on); \
})
-static inline bool snd_printd_ratelimit(void) { return false; }
-
#endif /* CONFIG_SND_DEBUG */
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-/**
- * snd_printdd - debug printk
- * @format: format string
- *
- * Works like snd_printk() for debugging purposes.
- * Ignored when CONFIG_SND_DEBUG_VERBOSE is not set.
- */
-#define snd_printdd(format, ...) \
- __snd_printk(2, __FILE__, __LINE__, format, ##__VA_ARGS__)
-#else
-__printf(1, 2)
-static inline void snd_printdd(const char *format, ...) {}
-#endif
-
-
#define SNDRV_OSS_VERSION ((3<<16)|(8<<8)|(1<<4)|(0)) /* 3.8.1a */
/* for easier backward-porting */
diff --git a/include/sound/cs-amp-lib.h b/include/sound/cs-amp-lib.h
index f481148735e1..61e00017c9aa 100644
--- a/include/sound/cs-amp-lib.h
+++ b/include/sound/cs-amp-lib.h
@@ -23,7 +23,7 @@ struct cirrus_amp_cal_data {
struct cirrus_amp_efi_data {
u32 size;
u32 count;
- struct cirrus_amp_cal_data data[];
+ struct cirrus_amp_cal_data data[] __counted_by(count);
} __packed;
/**
@@ -47,20 +47,44 @@ struct cirrus_amp_cal_controls {
int cs_amp_write_cal_coeffs(struct cs_dsp *dsp,
const struct cirrus_amp_cal_controls *controls,
const struct cirrus_amp_cal_data *data);
+int cs_amp_read_cal_coeffs(struct cs_dsp *dsp,
+ const struct cirrus_amp_cal_controls *controls,
+ struct cirrus_amp_cal_data *data);
+int cs_amp_write_ambient_temp(struct cs_dsp *dsp,
+ const struct cirrus_amp_cal_controls *controls,
+ u32 temp);
int cs_amp_get_efi_calibration_data(struct device *dev, u64 target_uid, int amp_index,
struct cirrus_amp_cal_data *out_data);
+int cs_amp_set_efi_calibration_data(struct device *dev, int amp_index, int num_amps,
+ const struct cirrus_amp_cal_data *in_data);
+int cs_amp_get_vendor_spkid(struct device *dev);
+struct dentry *cs_amp_create_debugfs(struct device *dev);
+
+static inline u64 cs_amp_cal_target_u64(const struct cirrus_amp_cal_data *data)
+{
+ return ((u64)data->calTarget[1] << 32) | data->calTarget[0];
+}
struct cs_amp_test_hooks {
efi_status_t (*get_efi_variable)(efi_char16_t *name,
efi_guid_t *guid,
+ u32 *returned_attr,
unsigned long *size,
void *buf);
+ efi_status_t (*set_efi_variable)(efi_char16_t *name,
+ efi_guid_t *guid,
+ u32 attr,
+ unsigned long size,
+ void *buf);
int (*write_cal_coeff)(struct cs_dsp *dsp,
const struct cirrus_amp_cal_controls *controls,
const char *ctl_name, u32 val);
-};
+ int (*read_cal_coeff)(struct cs_dsp *dsp,
+ const struct cirrus_amp_cal_controls *controls,
+ const char *ctl_name, u32 *val);
+};
extern const struct cs_amp_test_hooks * const cs_amp_test_hooks;
#endif /* CS_AMP_LIB_H */
diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h
index bb70782d15d0..7542cabfa726 100644
--- a/include/sound/cs35l41.h
+++ b/include/sound/cs35l41.h
@@ -609,6 +609,18 @@
#define CS35L41_DSP_NG_DELAY_MASK 0x0F00
#define CS35L41_DSP_NG_DELAY_SHIFT 8
+#define CS35L41_ASP_RX1_EN_MASK 0x00010000
+#define CS35L41_ASP_RX1_EN_SHIFT 16
+#define CS35L41_ASP_RX2_EN_MASK 0x00020000
+#define CS35L41_ASP_RX2_EN_SHIFT 17
+#define CS35L41_ASP_TX1_EN_MASK 0x00000001
+#define CS35L41_ASP_TX1_EN_SHIFT 0
+#define CS35L41_ASP_TX2_EN_MASK 0x00000002
+#define CS35L41_ASP_TX2_EN_SHIFT 1
+#define CS35L41_ASP_TX3_EN_MASK 0x00000004
+#define CS35L41_ASP_TX3_EN_SHIFT 2
+#define CS35L41_ASP_TX4_EN_MASK 0x00000008
+#define CS35L41_ASP_TX4_EN_SHIFT 3
#define CS35L41_ASP_FMT_MASK 0x0700
#define CS35L41_ASP_FMT_SHIFT 8
#define CS35L41_ASP_DOUT_HIZ_MASK 0x03
@@ -896,8 +908,8 @@ int cs35l41_test_key_lock(struct device *dev, struct regmap *regmap);
int cs35l41_otp_unpack(struct device *dev, struct regmap *regmap);
int cs35l41_register_errata_patch(struct device *dev, struct regmap *reg, unsigned int reg_revid);
int cs35l41_set_channels(struct device *dev, struct regmap *reg,
- unsigned int tx_num, unsigned int *tx_slot,
- unsigned int rx_num, unsigned int *rx_slot);
+ unsigned int tx_num, const unsigned int *tx_slot,
+ unsigned int rx_num, const unsigned int *rx_slot);
int cs35l41_gpio_config(struct regmap *regmap, struct cs35l41_hw_cfg *hw_cfg);
void cs35l41_configure_cs_dsp(struct device *dev, struct regmap *reg, struct cs_dsp *dsp);
int cs35l41_set_cspl_mbox_cmd(struct device *dev, struct regmap *regmap,
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
index 1a3c6f66f620..5928af539c46 100644
--- a/include/sound/cs35l56.h
+++ b/include/sound/cs35l56.h
@@ -9,11 +9,15 @@
#ifndef __CS35L56_H
#define __CS35L56_H
+#include <linux/debugfs.h>
#include <linux/firmware/cirrus/cs_dsp.h>
#include <linux/regulator/consumer.h>
#include <linux/regmap.h>
+#include <linux/spi/spi.h>
#include <sound/cs-amp-lib.h>
+struct snd_ctl_elem_value;
+
#define CS35L56_DEVID 0x0000000
#define CS35L56_REVID 0x0000004
#define CS35L56_RELID 0x000000C
@@ -61,6 +65,9 @@
#define CS35L56_IRQ1_MASK_8 0x000E0AC
#define CS35L56_IRQ1_MASK_18 0x000E0D4
#define CS35L56_IRQ1_MASK_20 0x000E0DC
+#define CS35L56_MIXER_NGATE_CH1_CFG 0x0010004
+#define CS35L56_MIXER_NGATE_CH2_CFG 0x0010008
+#define CS35L56_DSP_MBOX_1_RAW 0x0011000
#define CS35L56_DSP_VIRTUAL1_MBOX_1 0x0011020
#define CS35L56_DSP_VIRTUAL1_MBOX_2 0x0011024
#define CS35L56_DSP_VIRTUAL1_MBOX_3 0x0011028
@@ -69,6 +76,8 @@
#define CS35L56_DSP_VIRTUAL1_MBOX_6 0x0011034
#define CS35L56_DSP_VIRTUAL1_MBOX_7 0x0011038
#define CS35L56_DSP_VIRTUAL1_MBOX_8 0x001103C
+#define CS35L56_DIE_STS1 0x0017040
+#define CS35L56_DIE_STS2 0x0017044
#define CS35L56_DSP_RESTRICT_STS1 0x00190F0
#define CS35L56_DSP1_XMEM_PACKED_0 0x2000000
#define CS35L56_DSP1_XMEM_PACKED_6143 0x2005FFC
@@ -80,10 +89,10 @@
#define CS35L56_DSP1_AHBM_WINDOW_DEBUG_1 0x25E2044
#define CS35L56_DSP1_XMEM_UNPACKED24_0 0x2800000
#define CS35L56_DSP1_FW_VER 0x2800010
-#define CS35L56_DSP1_HALO_STATE_A1 0x2801E58
#define CS35L56_DSP1_HALO_STATE 0x28021E0
-#define CS35L56_DSP1_PM_CUR_STATE_A1 0x2804000
+#define CS35L56_B2_DSP1_HALO_STATE 0x2803D20
#define CS35L56_DSP1_PM_CUR_STATE 0x2804308
+#define CS35L56_B2_DSP1_PM_CUR_STATE 0x2804678
#define CS35L56_DSP1_XMEM_UNPACKED24_8191 0x2807FFC
#define CS35L56_DSP1_CORE_BASE 0x2B80000
#define CS35L56_DSP1_SCRATCH1 0x2B805C0
@@ -104,6 +113,15 @@
#define CS35L56_DSP1_PMEM_0 0x3800000
#define CS35L56_DSP1_PMEM_5114 0x3804FE8
+#define CS35L63_DSP1_FW_VER CS35L56_DSP1_FW_VER
+#define CS35L63_DSP1_HALO_STATE 0x2803C04
+#define CS35L63_DSP1_PM_CUR_STATE 0x2804518
+#define CS35L63_PROTECTION_STATUS 0x340009C
+#define CS35L63_TRANSDUCER_ACTUAL_PS 0x34000F4
+#define CS35L63_MAIN_RENDER_USER_MUTE 0x3400020
+#define CS35L63_MAIN_RENDER_USER_VOLUME 0x3400028
+#define CS35L63_MAIN_POSTURE_NUMBER 0x3400068
+
/* DEVID */
#define CS35L56_DEVID_MASK 0x00FFFFFF
@@ -164,6 +182,9 @@
/* IRQ1_EINT_8 */
#define CS35L56_TEMP_ERR_EINT1_MASK 0x80000000
+/* MIXER_NGATE_CHn_CFG */
+#define CS35L56_AUX_NGATE_CHn_EN 0x00000001
+
/* Mixer input sources */
#define CS35L56_INPUT_SRC_NONE 0x00
#define CS35L56_INPUT_SRC_ASP1RX1 0x08
@@ -209,7 +230,7 @@
/* CS35L56_MAIN_RENDER_USER_VOLUME */
#define CS35L56_MAIN_RENDER_USER_VOLUME_MIN -400
-#define CS35L56_MAIN_RENDER_USER_VOLUME_MAX 400
+#define CS35L56_MAIN_RENDER_USER_VOLUME_MAX 48
#define CS35L56_MAIN_RENDER_USER_VOLUME_MASK 0x0000FFC0
#define CS35L56_MAIN_RENDER_USER_VOLUME_SHIFT 6
#define CS35L56_MAIN_RENDER_USER_VOLUME_SIGNBIT 9
@@ -226,9 +247,11 @@
#define CS35L56_HALO_STATE_SHUTDOWN 1
#define CS35L56_HALO_STATE_BOOT_DONE 2
+#define CS35L56_MBOX_CMD_PING 0x0A000000
#define CS35L56_MBOX_CMD_AUDIO_PLAY 0x0B000001
#define CS35L56_MBOX_CMD_AUDIO_PAUSE 0x0B000002
#define CS35L56_MBOX_CMD_AUDIO_REINIT 0x0B000003
+#define CS35L56_MBOX_CMD_AUDIO_CALIBRATION 0x0B000006
#define CS35L56_MBOX_CMD_HIBERNATE_NOW 0x02000001
#define CS35L56_MBOX_CMD_WAKEUP 0x02000002
#define CS35L56_MBOX_CMD_PREVENT_AUTO_HIBERNATE 0x02000003
@@ -244,21 +267,59 @@
#define CS35L56_PS3_POLL_US 500
#define CS35L56_PS3_TIMEOUT_US 300000
+#define CS35L56_CAL_STATUS_SUCCESS 1
+#define CS35L56_CAL_STATUS_OUT_OF_RANGE 3
+
+#define CS35L56_CAL_SET_STATUS_UNKNOWN 0
+#define CS35L56_CAL_SET_STATUS_DEFAULT 1
+#define CS35L56_CAL_SET_STATUS_SET 2
+
#define CS35L56_CONTROL_PORT_READY_US 2200
#define CS35L56_HALO_STATE_POLL_US 1000
#define CS35L56_HALO_STATE_TIMEOUT_US 250000
#define CS35L56_RESET_PULSE_MIN_US 1100
#define CS35L56_WAKE_HOLD_TIME_US 1000
+#define CS35L56_CALIBRATION_POLL_US (100 * USEC_PER_MSEC)
+#define CS35L56_CALIBRATION_TIMEOUT_US (5 * USEC_PER_SEC)
+
#define CS35L56_SDW1_PLAYBACK_PORT 1
#define CS35L56_SDW1_CAPTURE_PORT 3
#define CS35L56_NUM_BULK_SUPPLIES 3
#define CS35L56_NUM_DSP_REGIONS 5
+/* Additional margin for SYSTEM_RESET to control port ready on SPI */
+#define CS35L56_SPI_RESET_TO_PORT_READY_US (CS35L56_CONTROL_PORT_READY_US + 2500)
+
+struct cs35l56_spi_payload {
+ __be32 addr;
+ __be16 pad;
+ __be32 value;
+} __packed;
+static_assert(sizeof(struct cs35l56_spi_payload) == 10);
+
+struct cs35l56_fw_reg {
+ unsigned int fw_ver;
+ unsigned int halo_state;
+ unsigned int pm_cur_stat;
+ unsigned int prot_sts;
+ unsigned int transducer_actual_ps;
+ unsigned int user_mute;
+ unsigned int user_volume;
+ unsigned int posture_number;
+};
+
+struct cs35l56_cal_debugfs_fops {
+ const struct debugfs_short_fops calibrate;
+ const struct debugfs_short_fops cal_temperature;
+ const struct debugfs_short_fops cal_data;
+};
+
struct cs35l56_base {
struct device *dev;
struct regmap *regmap;
+ struct cs_dsp *dsp;
int irq;
struct mutex irq_lock;
u8 type;
@@ -267,25 +328,53 @@ struct cs35l56_base {
bool fw_patched;
bool secured;
bool can_hibernate;
- bool fw_owns_asp1;
bool cal_data_valid;
s8 cal_index;
+ u8 num_amps;
struct cirrus_amp_cal_data cal_data;
struct gpio_desc *reset_gpio;
+ struct cs35l56_spi_payload *spi_payload_buf;
+ const struct cs35l56_fw_reg *fw_reg;
+ const struct cirrus_amp_cal_controls *calibration_controls;
+ struct dentry *debugfs;
+ u64 silicon_uid;
};
-extern struct regmap_config cs35l56_regmap_i2c;
-extern struct regmap_config cs35l56_regmap_spi;
-extern struct regmap_config cs35l56_regmap_sdw;
+static inline bool cs35l56_is_otp_register(unsigned int reg)
+{
+ return (reg >> 16) == 3;
+}
+
+static inline int cs35l56_init_config_for_spi(struct cs35l56_base *cs35l56,
+ struct spi_device *spi)
+{
+ cs35l56->spi_payload_buf = devm_kzalloc(&spi->dev,
+ sizeof(*cs35l56->spi_payload_buf),
+ GFP_KERNEL | GFP_DMA);
+ if (!cs35l56->spi_payload_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline bool cs35l56_is_spi(struct cs35l56_base *cs35l56)
+{
+ return IS_ENABLED(CONFIG_SPI_MASTER) && !!cs35l56->spi_payload_buf;
+}
+
+extern const struct regmap_config cs35l56_regmap_i2c;
+extern const struct regmap_config cs35l56_regmap_spi;
+extern const struct regmap_config cs35l56_regmap_sdw;
+extern const struct regmap_config cs35l63_regmap_i2c;
+extern const struct regmap_config cs35l63_regmap_sdw;
extern const struct cirrus_amp_cal_controls cs35l56_calibration_controls;
+extern const char * const cs35l56_cal_set_status_text[3];
extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC];
extern const unsigned int cs35l56_tx_input_values[CS35L56_NUM_INPUT_SRC];
int cs35l56_set_patch(struct cs35l56_base *cs35l56_base);
-int cs35l56_init_asp1_regs_for_driver_control(struct cs35l56_base *cs35l56_base);
-int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base);
int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command);
int cs35l56_firmware_shutdown(struct cs35l56_base *cs35l56_base);
int cs35l56_wait_for_firmware_boot(struct cs35l56_base *cs35l56_base);
@@ -299,8 +388,29 @@ int cs35l56_runtime_suspend_common(struct cs35l56_base *cs35l56_base);
int cs35l56_runtime_resume_common(struct cs35l56_base *cs35l56_base, bool is_soundwire);
void cs35l56_init_cs_dsp(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_dsp);
int cs35l56_get_calibration(struct cs35l56_base *cs35l56_base);
+int cs35l56_stash_calibration(struct cs35l56_base *cs35l56_base,
+ const struct cirrus_amp_cal_data *data);
+ssize_t cs35l56_calibrate_debugfs_write(struct cs35l56_base *cs35l56_base,
+ const char __user *from, size_t count,
+ loff_t *ppos);
+ssize_t cs35l56_cal_ambient_debugfs_write(struct cs35l56_base *cs35l56_base,
+ const char __user *from, size_t count,
+ loff_t *ppos);
+ssize_t cs35l56_cal_data_debugfs_read(struct cs35l56_base *cs35l56_base,
+ char __user *to, size_t count,
+ loff_t *ppos);
+ssize_t cs35l56_cal_data_debugfs_write(struct cs35l56_base *cs35l56_base,
+ const char __user *from, size_t count,
+ loff_t *ppos);
+void cs35l56_create_cal_debugfs(struct cs35l56_base *cs35l56_base,
+ const struct cs35l56_cal_debugfs_fops *fops);
+void cs35l56_remove_cal_debugfs(struct cs35l56_base *cs35l56_base);
+int cs35l56_cal_set_status_get(struct cs35l56_base *cs35l56_base,
+ struct snd_ctl_elem_value *uvalue);
int cs35l56_read_prot_status(struct cs35l56_base *cs35l56_base,
bool *fw_missing, unsigned int *fw_version);
+void cs35l56_warn_if_firmware_missing(struct cs35l56_base *cs35l56_base);
+void cs35l56_log_tuning(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_dsp);
int cs35l56_hw_init(struct cs35l56_base *cs35l56_base);
int cs35l56_get_speaker_id(struct cs35l56_base *cs35l56_base);
int cs35l56_get_bclk_freq_id(unsigned int freq);
diff --git a/include/sound/cs42l52.h b/include/sound/cs42l52.h
deleted file mode 100644
index c20649666abe..000000000000
--- a/include/sound/cs42l52.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/sound/cs42l52.h -- Platform data for CS42L52
- *
- * Copyright (c) 2012 Cirrus Logic Inc.
- */
-
-#ifndef __CS42L52_H
-#define __CS42L52_H
-
-struct cs42l52_platform_data {
-
- /* MICBIAS Level. Check datasheet Pg48 */
- unsigned int micbias_lvl;
-
- /* MICA mode selection Differential or Single-ended */
- bool mica_diff_cfg;
-
- /* MICB mode selection Differential or Single-ended */
- bool micb_diff_cfg;
-
- /* Charge Pump Freq. Check datasheet Pg73 */
- unsigned int chgfreq;
-
- /* Reset GPIO */
- unsigned int reset_gpio;
-};
-
-#endif /* __CS42L52_H */
diff --git a/include/sound/cs42l56.h b/include/sound/cs42l56.h
deleted file mode 100644
index 62e9f7a3b414..000000000000
--- a/include/sound/cs42l56.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/sound/cs42l56.h -- Platform data for CS42L56
- *
- * Copyright (c) 2014 Cirrus Logic Inc.
- */
-
-#ifndef __CS42L56_H
-#define __CS42L56_H
-
-struct cs42l56_platform_data {
-
- /* GPIO for Reset */
- unsigned int gpio_nreset;
-
- /* MICBIAS Level. Check datasheet Pg48 */
- unsigned int micbias_lvl;
-
- /* Analog Input 1A Reference 0=Single 1=Pseudo-Differential */
- unsigned int ain1a_ref_cfg;
-
- /* Analog Input 2A Reference 0=Single 1=Pseudo-Differential */
- unsigned int ain2a_ref_cfg;
-
- /* Analog Input 1B Reference 0=Single 1=Pseudo-Differential */
- unsigned int ain1b_ref_cfg;
-
- /* Analog Input 2B Reference 0=Single 1=Pseudo-Differential */
- unsigned int ain2b_ref_cfg;
-
- /* Charge Pump Freq. Check datasheet Pg62 */
- unsigned int chgfreq;
-
- /* HighPass Filter Right Channel Corner Frequency */
- unsigned int hpfb_freq;
-
- /* HighPass Filter Left Channel Corner Frequency */
- unsigned int hpfa_freq;
-
- /* Adaptive Power Control for LO/HP */
- unsigned int adaptive_pwr;
-
-};
-
-#endif /* __CS42L56_H */
diff --git a/include/sound/cs42l73.h b/include/sound/cs42l73.h
deleted file mode 100644
index 5a93393b6124..000000000000
--- a/include/sound/cs42l73.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/sound/cs42l73.h -- Platform data for CS42L73
- *
- * Copyright (c) 2012 Cirrus Logic Inc.
- */
-
-#ifndef __CS42L73_H
-#define __CS42L73_H
-
-struct cs42l73_platform_data {
- /* RST GPIO */
- unsigned int reset_gpio;
- unsigned int chgfreq;
- int jack_detection;
- unsigned int mclk_freq;
-};
-
-#endif /* __CS42L73_H */
diff --git a/include/sound/cs48l32.h b/include/sound/cs48l32.h
new file mode 100644
index 000000000000..27b3e7cf999a
--- /dev/null
+++ b/include/sound/cs48l32.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Register definitions for Cirrus Logic CS48L32
+ *
+ * Copyright (C) 2017-2018, 2020, 2022, 2025 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef CS48L32_H
+#define CS48L32_H
+
+/* pll_id for snd_soc_component_set_pll() */
+#define CS48L32_FLL1_REFCLK 1
+
+/* source for snd_soc_component_set_pll() */
+#define CS48L32_FLL_SRC_NONE -1
+#define CS48L32_FLL_SRC_MCLK1 0
+#define CS48L32_FLL_SRC_PDMCLK 5
+#define CS48L32_FLL_SRC_ASP1_BCLK 8
+#define CS48L32_FLL_SRC_ASP2_BCLK 9
+#define CS48L32_FLL_SRC_ASP1_FSYNC 12
+#define CS48L32_FLL_SRC_ASP2_FSYNC 13
+
+/* clk_id for snd_soc_component_set_sysclk() and snd_soc_dai_set_sysclk() */
+#define CS48L32_CLK_SYSCLK_1 1
+#define CS48L32_CLK_SYSCLK_2 2
+#define CS48L32_CLK_SYSCLK_3 3
+#define CS48L32_CLK_SYSCLK_4 4
+#define CS48L32_CLK_DSPCLK 7
+#define CS48L32_CLK_PDM_FLLCLK 13
+
+/* source for snd_soc_component_set_sysclk() */
+#define CS48L32_CLK_SRC_MCLK1 0x0
+#define CS48L32_CLK_SRC_FLL1 0x4
+#define CS48L32_CLK_SRC_ASP1_BCLK 0x8
+#define CS48L32_CLK_SRC_ASP2_BCLK 0x9
+
+struct cs48l32 {
+ struct regmap *regmap;
+ struct device *dev;
+ struct gpio_desc *reset_gpio;
+ struct clk *mclk1;
+ struct regulator_bulk_data core_supplies[2];
+ struct regulator *vdd_d;
+ int irq;
+};
+#endif
diff --git a/include/sound/cs48l32_registers.h b/include/sound/cs48l32_registers.h
new file mode 100644
index 000000000000..f29410fdf76f
--- /dev/null
+++ b/include/sound/cs48l32_registers.h
@@ -0,0 +1,530 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Register definitions for Cirrus Logic CS48L32
+ *
+ * Copyright (C) 2017-2018, 2020, 2022, 2025 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef CS48L32_REGISTERS_H
+#define CS48L32_REGISTERS_H
+
+/* Register Addresses. */
+#define CS48L32_DEVID 0x0
+#define CS48L32_REVID 0x4
+#define CS48L32_OTPID 0x10
+#define CS48L32_SFT_RESET 0x20
+#define CS48L32_CTRL_IF_DEBUG3 0xA8
+#define CS48L32_MCU_CTRL1 0x804
+#define CS48L32_GPIO1_CTRL1 0xc08
+#define CS48L32_GPIO3_CTRL1 0xc10
+#define CS48L32_GPIO7_CTRL1 0xc20
+#define CS48L32_GPIO16_CTRL1 0xc44
+#define CS48L32_OUTPUT_SYS_CLK 0x1020
+#define CS48L32_AUXPDM_CTRL 0x1044
+#define CS48L32_AUXPDM_CTRL2 0x105c
+#define CS48L32_CLOCK32K 0x1400
+#define CS48L32_SYSTEM_CLOCK1 0x1404
+#define CS48L32_SYSTEM_CLOCK2 0x1408
+#define CS48L32_SAMPLE_RATE1 0x1420
+#define CS48L32_SAMPLE_RATE2 0x1424
+#define CS48L32_SAMPLE_RATE3 0x1428
+#define CS48L32_SAMPLE_RATE4 0x142c
+#define CS48L32_DSP_CLOCK1 0x1510
+#define CS48L32_FLL1_CONTROL1 0x1c00
+#define CS48L32_FLL1_CONTROL5 0x1c10
+#define CS48L32_FLL1_CONTROL6 0x1c14
+#define CS48L32_FLL1_GPIO_CLOCK 0x1ca0
+#define CS48L32_CHARGE_PUMP1 0x2000
+#define CS48L32_LDO2_CTRL1 0x2408
+#define CS48L32_MICBIAS_CTRL1 0x2410
+#define CS48L32_MICBIAS_CTRL5 0x2418
+#define CS48L32_IRQ1_CTRL_AOD 0x2710
+#define CS48L32_AOD_PAD_CTRL 0x2718
+#define CS48L32_INPUT_CONTROL 0x4000
+#define CS48L32_INPUT_STATUS 0x4004
+#define CS48L32_INPUT_RATE_CONTROL 0x4008
+#define CS48L32_INPUT_CONTROL2 0x400c
+#define CS48L32_INPUT_CONTROL3 0x4014
+#define CS48L32_INPUT1_CONTROL1 0x4020
+#define CS48L32_IN1L_CONTROL1 0x4024
+#define CS48L32_IN1L_CONTROL2 0x4028
+#define CS48L32_IN1R_CONTROL1 0x4044
+#define CS48L32_IN1R_CONTROL2 0x4048
+#define CS48L32_INPUT2_CONTROL1 0x4060
+#define CS48L32_IN2L_CONTROL1 0x4064
+#define CS48L32_IN2L_CONTROL2 0x4068
+#define CS48L32_IN2R_CONTROL1 0x4084
+#define CS48L32_IN2R_CONTROL2 0x4088
+#define CS48L32_INPUT_HPF_CONTROL 0x4244
+#define CS48L32_INPUT_VOL_CONTROL 0x4248
+#define CS48L32_AUXPDM_CONTROL1 0x4300
+#define CS48L32_AUXPDM_CONTROL2 0x4304
+#define CS48L32_AUXPDM1_CONTROL1 0x4308
+#define CS48L32_AUXPDM2_CONTROL1 0x4310
+#define CS48L32_ADC1L_ANA_CONTROL1 0x4688
+#define CS48L32_ADC1R_ANA_CONTROL1 0x468c
+#define CS48L32_ASP1_ENABLES1 0x6000
+#define CS48L32_ASP1_CONTROL3 0x600C
+#define CS48L32_ASP1_DATA_CONTROL5 0x6040
+#define CS48L32_ASP2_ENABLES1 0x6080
+#define CS48L32_ASP2_CONTROL3 0x608C
+#define CS48L32_ASP2_DATA_CONTROL5 0x60c0
+#define CS48L32_ASP1TX1_INPUT1 0x8200
+#define CS48L32_ASP1TX2_INPUT1 0x8210
+#define CS48L32_ASP1TX3_INPUT1 0x8220
+#define CS48L32_ASP1TX4_INPUT1 0x8230
+#define CS48L32_ASP1TX5_INPUT1 0x8240
+#define CS48L32_ASP1TX6_INPUT1 0x8250
+#define CS48L32_ASP1TX7_INPUT1 0x8260
+#define CS48L32_ASP1TX8_INPUT1 0x8270
+#define CS48L32_ASP1TX8_INPUT4 0x827c
+#define CS48L32_ASP2TX1_INPUT1 0x8300
+#define CS48L32_ASP2TX2_INPUT1 0x8310
+#define CS48L32_ASP2TX3_INPUT1 0x8320
+#define CS48L32_ASP2TX4_INPUT1 0x8330
+#define CS48L32_ASP2TX4_INPUT4 0x833c
+#define CS48L32_ISRC1INT1_INPUT1 0x8980
+#define CS48L32_ISRC1INT2_INPUT1 0x8990
+#define CS48L32_ISRC1INT3_INPUT1 0x89a0
+#define CS48L32_ISRC1INT4_INPUT1 0x89b0
+#define CS48L32_ISRC1DEC1_INPUT1 0x89c0
+#define CS48L32_ISRC1DEC2_INPUT1 0x89d0
+#define CS48L32_ISRC1DEC3_INPUT1 0x89e0
+#define CS48L32_ISRC1DEC4_INPUT1 0x89f0
+#define CS48L32_ISRC2INT1_INPUT1 0x8a00
+#define CS48L32_ISRC2INT2_INPUT1 0x8a10
+#define CS48L32_ISRC2DEC1_INPUT1 0x8a40
+#define CS48L32_ISRC2DEC2_INPUT1 0x8a50
+#define CS48L32_ISRC3INT1_INPUT1 0x8a80
+#define CS48L32_ISRC3INT2_INPUT1 0x8a90
+#define CS48L32_ISRC3DEC1_INPUT1 0x8ac0
+#define CS48L32_ISRC3DEC2_INPUT1 0x8ad0
+#define CS48L32_EQ1_INPUT1 0x8b80
+#define CS48L32_EQ2_INPUT1 0x8b90
+#define CS48L32_EQ3_INPUT1 0x8ba0
+#define CS48L32_EQ4_INPUT1 0x8bb0
+#define CS48L32_EQ4_INPUT4 0x8bbc
+#define CS48L32_DRC1L_INPUT1 0x8c00
+#define CS48L32_DRC1R_INPUT1 0x8c10
+#define CS48L32_DRC1R_INPUT4 0x8c1c
+#define CS48L32_DRC2L_INPUT1 0x8c20
+#define CS48L32_DRC2R_INPUT1 0x8c30
+#define CS48L32_DRC2R_INPUT4 0x8c3c
+#define CS48L32_LHPF1_INPUT1 0x8c80
+#define CS48L32_LHPF1_INPUT4 0x8c8c
+#define CS48L32_LHPF2_INPUT1 0x8c90
+#define CS48L32_LHPF2_INPUT4 0x8c9c
+#define CS48L32_LHPF3_INPUT1 0x8ca0
+#define CS48L32_LHPF3_INPUT4 0x8cac
+#define CS48L32_LHPF4_INPUT1 0x8cb0
+#define CS48L32_LHPF4_INPUT4 0x8cbc
+#define CS48L32_DSP1RX1_INPUT1 0x9000
+#define CS48L32_DSP1RX2_INPUT1 0x9010
+#define CS48L32_DSP1RX3_INPUT1 0x9020
+#define CS48L32_DSP1RX4_INPUT1 0x9030
+#define CS48L32_DSP1RX5_INPUT1 0x9040
+#define CS48L32_DSP1RX6_INPUT1 0x9050
+#define CS48L32_DSP1RX7_INPUT1 0x9060
+#define CS48L32_DSP1RX8_INPUT1 0x9070
+#define CS48L32_DSP1RX8_INPUT4 0x907c
+#define CS48L32_ISRC1_CONTROL1 0xa400
+#define CS48L32_ISRC1_CONTROL2 0xa404
+#define CS48L32_ISRC2_CONTROL1 0xa510
+#define CS48L32_ISRC2_CONTROL2 0xa514
+#define CS48L32_ISRC3_CONTROL1 0xa620
+#define CS48L32_ISRC3_CONTROL2 0xa624
+#define CS48L32_FX_SAMPLE_RATE 0xa800
+#define CS48L32_EQ_CONTROL1 0xa808
+#define CS48L32_EQ_CONTROL2 0xa80c
+#define CS48L32_EQ1_GAIN1 0xa810
+#define CS48L32_EQ1_GAIN2 0xa814
+#define CS48L32_EQ1_BAND1_COEFF1 0xa818
+#define CS48L32_EQ1_BAND1_COEFF2 0xa81c
+#define CS48L32_EQ1_BAND1_PG 0xa820
+#define CS48L32_EQ1_BAND2_COEFF1 0xa824
+#define CS48L32_EQ1_BAND2_COEFF2 0xa828
+#define CS48L32_EQ1_BAND2_PG 0xa82c
+#define CS48L32_EQ1_BAND3_COEFF1 0xa830
+#define CS48L32_EQ1_BAND3_COEFF2 0xa834
+#define CS48L32_EQ1_BAND3_PG 0xa838
+#define CS48L32_EQ1_BAND4_COEFF1 0xa83c
+#define CS48L32_EQ1_BAND4_COEFF2 0xa840
+#define CS48L32_EQ1_BAND4_PG 0xa844
+#define CS48L32_EQ1_BAND5_COEFF1 0xa848
+#define CS48L32_EQ1_BAND5_PG 0xa850
+#define CS48L32_EQ2_GAIN1 0xa854
+#define CS48L32_EQ2_GAIN2 0xa858
+#define CS48L32_EQ2_BAND1_COEFF1 0xa85c
+#define CS48L32_EQ2_BAND1_COEFF2 0xa860
+#define CS48L32_EQ2_BAND1_PG 0xa864
+#define CS48L32_EQ2_BAND2_COEFF1 0xa868
+#define CS48L32_EQ2_BAND2_COEFF2 0xa86c
+#define CS48L32_EQ2_BAND2_PG 0xa870
+#define CS48L32_EQ2_BAND3_COEFF1 0xa874
+#define CS48L32_EQ2_BAND3_COEFF2 0xa878
+#define CS48L32_EQ2_BAND3_PG 0xa87c
+#define CS48L32_EQ2_BAND4_COEFF1 0xa880
+#define CS48L32_EQ2_BAND4_COEFF2 0xa884
+#define CS48L32_EQ2_BAND4_PG 0xa888
+#define CS48L32_EQ2_BAND5_COEFF1 0xa88c
+#define CS48L32_EQ2_BAND5_PG 0xa894
+#define CS48L32_EQ3_GAIN1 0xa898
+#define CS48L32_EQ3_GAIN2 0xa89c
+#define CS48L32_EQ3_BAND1_COEFF1 0xa8a0
+#define CS48L32_EQ3_BAND1_COEFF2 0xa8a4
+#define CS48L32_EQ3_BAND1_PG 0xa8a8
+#define CS48L32_EQ3_BAND2_COEFF1 0xa8ac
+#define CS48L32_EQ3_BAND2_COEFF2 0xa8b0
+#define CS48L32_EQ3_BAND2_PG 0xa8b4
+#define CS48L32_EQ3_BAND3_COEFF1 0xa8b8
+#define CS48L32_EQ3_BAND3_COEFF2 0xa8bc
+#define CS48L32_EQ3_BAND3_PG 0xa8c0
+#define CS48L32_EQ3_BAND4_COEFF1 0xa8c4
+#define CS48L32_EQ3_BAND4_COEFF2 0xa8c8
+#define CS48L32_EQ3_BAND4_PG 0xa8cc
+#define CS48L32_EQ3_BAND5_COEFF1 0xa8d0
+#define CS48L32_EQ3_BAND5_PG 0xa8d8
+#define CS48L32_EQ4_GAIN1 0xa8dc
+#define CS48L32_EQ4_GAIN2 0xa8e0
+#define CS48L32_EQ4_BAND1_COEFF1 0xa8e4
+#define CS48L32_EQ4_BAND1_COEFF2 0xa8e8
+#define CS48L32_EQ4_BAND1_PG 0xa8ec
+#define CS48L32_EQ4_BAND2_COEFF1 0xa8f0
+#define CS48L32_EQ4_BAND2_COEFF2 0xa8f4
+#define CS48L32_EQ4_BAND2_PG 0xa8f8
+#define CS48L32_EQ4_BAND3_COEFF1 0xa8fc
+#define CS48L32_EQ4_BAND3_COEFF2 0xa900
+#define CS48L32_EQ4_BAND3_PG 0xa904
+#define CS48L32_EQ4_BAND4_COEFF1 0xa908
+#define CS48L32_EQ4_BAND4_COEFF2 0xa90c
+#define CS48L32_EQ4_BAND4_PG 0xa910
+#define CS48L32_EQ4_BAND5_COEFF1 0xa914
+#define CS48L32_EQ4_BAND5_PG 0xa91c
+#define CS48L32_LHPF_CONTROL1 0xaa30
+#define CS48L32_LHPF_CONTROL2 0xaa34
+#define CS48L32_LHPF1_COEFF 0xaa38
+#define CS48L32_LHPF2_COEFF 0xaa3c
+#define CS48L32_LHPF3_COEFF 0xaa40
+#define CS48L32_LHPF4_COEFF 0xaa44
+#define CS48L32_DRC1_CONTROL1 0xab00
+#define CS48L32_DRC1_CONTROL4 0xab0c
+#define CS48L32_DRC2_CONTROL1 0xab14
+#define CS48L32_DRC2_CONTROL4 0xab20
+#define CS48L32_TONE_GENERATOR1 0xb000
+#define CS48L32_TONE_GENERATOR2 0xb004
+#define CS48L32_COMFORT_NOISE_GENERATOR 0xb400
+#define CS48L32_US_CONTROL 0xb800
+#define CS48L32_US1_CONTROL 0xb804
+#define CS48L32_US1_DET_CONTROL 0xb808
+#define CS48L32_US2_CONTROL 0xb814
+#define CS48L32_US2_DET_CONTROL 0xb818
+#define CS48L32_DSP1_XM_SRAM_IBUS_SETUP_0 0x1700c
+#define CS48L32_DSP1_XM_SRAM_IBUS_SETUP_1 0x17010
+#define CS48L32_DSP1_XM_SRAM_IBUS_SETUP_24 0x1706c
+#define CS48L32_DSP1_YM_SRAM_IBUS_SETUP_0 0x17070
+#define CS48L32_DSP1_YM_SRAM_IBUS_SETUP_1 0x17074
+#define CS48L32_DSP1_YM_SRAM_IBUS_SETUP_8 0x17090
+#define CS48L32_DSP1_PM_SRAM_IBUS_SETUP_0 0x17094
+#define CS48L32_DSP1_PM_SRAM_IBUS_SETUP_1 0x17098
+#define CS48L32_DSP1_PM_SRAM_IBUS_SETUP_7 0x170b0
+#define CS48L32_IRQ1_STATUS 0x18004
+#define CS48L32_IRQ1_EINT_1 0x18010
+#define CS48L32_IRQ1_EINT_2 0x18014
+#define CS48L32_IRQ1_EINT_7 0x18028
+#define CS48L32_IRQ1_EINT_9 0x18030
+#define CS48L32_IRQ1_EINT_11 0x18038
+#define CS48L32_IRQ1_STS_1 0x18090
+#define CS48L32_IRQ1_STS_6 0x180a4
+#define CS48L32_IRQ1_STS_11 0x180b8
+#define CS48L32_IRQ1_MASK_1 0x18110
+#define CS48L32_IRQ1_MASK_2 0x18114
+#define CS48L32_IRQ1_MASK_7 0x18128
+#define CS48L32_IRQ1_MASK_9 0x18130
+#define CS48L32_IRQ1_MASK_11 0x18138
+#define CS48L32_DSP1_XMEM_PACKED_0 0x2000000
+#define CS48L32_DSP1_XMEM_PACKED_LAST 0x208fff0
+#define CS48L32_DSP1_SYS_INFO_ID 0x25e0000
+#define CS48L32_DSP1_AHBM_WINDOW_DEBUG_1 0x25e2044
+#define CS48L32_DSP1_XMEM_UNPACKED24_0 0x2800000
+#define CS48L32_DSP1_XMEM_UNPACKED24_LAST 0x28bfff4
+#define CS48L32_DSP1_CLOCK_FREQ 0x2b80000
+#define CS48L32_DSP1_SAMPLE_RATE_TX8 0x2b802b8
+#define CS48L32_DSP1_SCRATCH1 0x2b805c0
+#define CS48L32_DSP1_SCRATCH4 0x2b805d8
+#define CS48L32_DSP1_CCM_CORE_CONTROL 0x2bc1000
+#define CS48L32_DSP1_STREAM_ARB_RESYNC_MSK1 0x2bc5a00
+#define CS48L32_DSP1_YMEM_PACKED_0 0x2c00000
+#define CS48L32_DSP1_YMEM_PACKED_LAST 0x2c2fff0
+#define CS48L32_DSP1_YMEM_UNPACKED24_0 0x3400000
+#define CS48L32_DSP1_YMEM_UNPACKED24_LAST 0x343fff4
+#define CS48L32_DSP1_PMEM_0 0x3800000
+#define CS48L32_DSP1_PMEM_LAST 0x3845fe8
+
+/* (0x0) DEVID */
+#define CS48L32_DEVID_MASK 0x00ffffff
+#define CS48L32_DEVID_SHIFT 0
+
+/* (0x4) REVID */
+#define CS48L32_AREVID_MASK 0x000000f0
+#define CS48L32_AREVID_SHIFT 4
+#define CS48L32_MTLREVID_MASK 0x0000000f
+#define CS48L32_MTLREVID_SHIFT 0
+
+/* (0x10) OTPID */
+#define CS48L32_OTPID_MASK 0x0000000f
+
+/* (0x0804) MCU_CTRL1 */
+#define CS48L32_MCU_STS_MASK 0x0000ff00
+#define CS48L32_MCU_STS_SHIFT 8
+
+/* (0xc08) GPIO1_CTRL1 */
+#define CS48L32_GPIOX_CTRL1_FN_MASK 0x000003ff
+
+/* (0x1020) OUTPUT_SYS_CLK */
+#define CS48L32_OPCLK_EN_SHIFT 15
+#define CS48L32_OPCLK_DIV_MASK 0x000000f8
+#define CS48L32_OPCLK_DIV_SHIFT 3
+#define CS48L32_OPCLK_SEL_MASK 0x00000007
+
+/* (0x105c) AUXPDM_CTRL2 */
+#define CS48L32_AUXPDMDAT2_SRC_SHIFT 4
+#define CS48L32_AUXPDMDAT1_SRC_SHIFT 0
+
+/* (0x1400) CLOCK32K */
+#define CS48L32_CLK_32K_EN_MASK 0x00000040
+#define CS48L32_CLK_32K_SRC_MASK 0x00000003
+
+/* (0x1404) SYSTEM_CLOCK1 */
+#define CS48L32_SYSCLK_FRAC_MASK 0x00008000
+#define CS48L32_SYSCLK_FREQ_MASK 0x00000700
+#define CS48L32_SYSCLK_FREQ_SHIFT 8
+#define CS48L32_SYSCLK_EN_SHIFT 6
+#define CS48L32_SYSCLK_SRC_MASK 0x0000001f
+#define CS48L32_SYSCLK_SRC_SHIFT 0
+
+/* (0x1408) SYSTEM_CLOCK2 */
+#define CS48L32_SYSCLK_FREQ_STS_MASK 0x00000700
+#define CS48L32_SYSCLK_FREQ_STS_SHIFT 8
+
+/* (0x1420) SAMPLE_RATE1 */
+#define CS48L32_SAMPLE_RATE_1_MASK 0x0000001f
+#define CS48L32_SAMPLE_RATE_1_SHIFT 0
+
+/* (0x1510) DSP_CLOCK1 */
+#define CS48L32_DSP_CLK_FREQ_MASK 0xffff0000
+#define CS48L32_DSP_CLK_FREQ_SHIFT 16
+
+/* (0x1c00) FLL_CONTROL1 */
+#define CS48L32_FLL_CTRL_UPD_MASK 0x00000004
+#define CS48L32_FLL_HOLD_MASK 0x00000002
+#define CS48L32_FLL_EN_MASK 0x00000001
+
+/* (0x1c04) FLL_CONTROL2 */
+#define CS48L32_FLL_LOCKDET_THR_MASK 0xf0000000
+#define CS48L32_FLL_LOCKDET_THR_SHIFT 28
+#define CS48L32_FLL_LOCKDET_MASK 0x08000000
+#define CS48L32_FLL_PHASEDET_MASK 0x00400000
+#define CS48L32_FLL_PHASEDET_SHIFT 22
+#define CS48L32_FLL_REFCLK_DIV_MASK 0x00030000
+#define CS48L32_FLL_REFCLK_DIV_SHIFT 16
+#define CS48L32_FLL_REFCLK_SRC_MASK 0x0000f000
+#define CS48L32_FLL_REFCLK_SRC_SHIFT 12
+#define CS48L32_FLL_N_MASK 0x000003ff
+#define CS48L32_FLL_N_SHIFT 0
+
+/* (0x1c08) FLL_CONTROL3 */
+#define CS48L32_FLL_LAMBDA_MASK 0xffff0000
+#define CS48L32_FLL_LAMBDA_SHIFT 16
+#define CS48L32_FLL_THETA_MASK 0x0000ffff
+#define CS48L32_FLL_THETA_SHIFT 0
+
+/* (0x1c0c) FLL_CONTROL4 */
+#define CS48L32_FLL_FD_GAIN_COARSE_SHIFT 16
+#define CS48L32_FLL_HP_MASK 0x00003000
+#define CS48L32_FLL_HP_SHIFT 12
+#define CS48L32_FLL_FB_DIV_MASK 0x000003ff
+#define CS48L32_FLL_FB_DIV_SHIFT 0
+
+/* (0x1c10) FLL_CONTROL5 */
+#define CS48L32_FLL_FRC_INTEG_UPD_MASK 0x00008000
+
+/* (0x2000) CHARGE_PUMP1 */
+#define CS48L32_CP2_BYPASS_SHIFT 1
+#define CS48L32_CP2_EN_SHIFT 0
+
+/* (0x2408) LDO2_CTRL1 */
+#define CS48L32_LDO2_VSEL_MASK 0x000007e0
+#define CS48L32_LDO2_VSEL_SHIFT 5
+
+/* (0x2410) MICBIAS_CTRL1 */
+#define CS48L32_MICB1_LVL_MASK 0x000001e0
+#define CS48L32_MICB1_LVL_SHIFT 5
+#define CS48L32_MICB1_EN_SHIFT 0
+
+/* (0x2418) MICBIAS_CTRL5 */
+#define CS48L32_MICB1C_EN_SHIFT 8
+#define CS48L32_MICB1B_EN_SHIFT 4
+#define CS48L32_MICB1A_EN_SHIFT 0
+
+/* (0x2710) IRQ1_CTRL_AOD */
+#define CS48L32_IRQ_POL_MASK 0x00000400
+
+/* (0x4000) INPUT_CONTROL */
+#define CS48L32_IN2L_EN_SHIFT 3
+#define CS48L32_IN2R_EN_SHIFT 2
+#define CS48L32_IN1L_EN_SHIFT 1
+#define CS48L32_IN1R_EN_SHIFT 0
+
+/* (0x400c) INPUT_CONTROL2 */
+#define CS48L32_PDM_FLLCLK_SRC_MASK 0x0000000f
+#define CS48L32_PDM_FLLCLK_SRC_SHIFT 0
+
+/* (0x4014) INPUT_CONTROL3 */
+#define CS48L32_IN_VU 0x20000000
+#define CS48L32_IN_VU_MASK 0x20000000
+#define CS48L32_IN_VU_SHIFT 29
+#define CS48L32_IN_VU_WIDTH 1
+
+/* (0x4020) INPUT1_CONTROL1 */
+#define CS48L32_IN1_OSR_SHIFT 16
+#define CS48L32_IN1_PDM_SUP_MASK 0x00000300
+#define CS48L32_IN1_PDM_SUP_SHIFT 8
+#define CS48L32_IN1_MODE_SHIFT 0
+
+/*
+ * (0x4024) IN1L_CONTROL1
+ * (0x4044) IN1R_CONTROL1
+ */
+#define CS48L32_INx_SRC_MASK 0x30000000
+#define CS48L32_INx_SRC_SHIFT 28
+#define CS48L32_INx_RATE_MASK 0x0000f800
+#define CS48L32_INx_RATE_SHIFT 11
+#define CS48L32_INx_HPF_SHIFT 2
+#define CS48L32_INx_LP_MODE_SHIFT 0
+
+/*
+ * (0x4028) IN1L_CONTROL2
+ * (0x4048) IN1R_CONTROL2
+ */
+#define CS48L32_INx_MUTE_MASK 0x10000000
+#define CS48L32_INx_VOL_SHIFT 16
+#define CS48L32_INx_PGA_VOL_SHIFT 1
+
+/* (0x4244) INPUT_HPF_CONTROL */
+#define CS48L32_IN_HPF_CUT_SHIFT 0
+
+/* (0x4248) INPUT_VOL_CONTROL */
+#define CS48L32_IN_VD_RAMP_SHIFT 4
+#define CS48L32_IN_VI_RAMP_SHIFT 0
+
+/* (0x4308) AUXPDM1_CONTROL1 */
+#define CS48L32_AUXPDM1_FREQ_SHIFT 16
+#define CS48L32_AUXPDM1_SRC_MASK 0x00000f00
+#define CS48L32_AUXPDM1_SRC_SHIFT 8
+
+/* (0x4688) ADC1L_ANA_CONTROL1 */
+/* (0x468c) ADC1R_ANA_CONTROL1 */
+#define CS48L32_ADC1x_INT_ENA_FRC_MASK 0x00000002
+
+/* (0x6004) ASPn_CONTROL1 */
+#define CS48L32_ASP_RATE_MASK 0x00001f00
+#define CS48L32_ASP_RATE_SHIFT 8
+#define CS48L32_ASP_BCLK_FREQ_MASK 0x0000003f
+
+/* (0x6008) ASPn_CONTROL2 */
+#define CS48L32_ASP_RX_WIDTH_MASK 0xff000000
+#define CS48L32_ASP_RX_WIDTH_SHIFT 24
+#define CS48L32_ASP_TX_WIDTH_MASK 0x00ff0000
+#define CS48L32_ASP_TX_WIDTH_SHIFT 16
+#define CS48L32_ASP_FMT_MASK 0x00000700
+#define CS48L32_ASP_FMT_SHIFT 8
+#define CS48L32_ASP_BCLK_INV_MASK 0x00000040
+#define CS48L32_ASP_BCLK_MSTR_MASK 0x00000010
+#define CS48L32_ASP_FSYNC_INV_MASK 0x00000004
+#define CS48L32_ASP_FSYNC_MSTR_MASK 0x00000001
+
+/* (0x6010) ASPn_CONTROL3 */
+#define CS48L32_ASP_DOUT_HIZ_MASK 0x00000003
+
+/* (0x6030) ASPn_DATA_CONTROL1 */
+#define CS48L32_ASP_TX_WL_MASK 0x0000003f
+
+/* (0x6040) ASPn_DATA_CONTROL5 */
+#define CS48L32_ASP_RX_WL_MASK 0x0000003f
+
+/* (0x82xx - 0x90xx) *_INPUT[1-4] */
+#define CS48L32_MIXER_VOL_MASK 0x00FE0000
+#define CS48L32_MIXER_VOL_SHIFT 17
+#define CS48L32_MIXER_VOL_WIDTH 7
+#define CS48L32_MIXER_SRC_MASK 0x000001ff
+#define CS48L32_MIXER_SRC_SHIFT 0
+#define CS48L32_MIXER_SRC_WIDTH 9
+
+/* (0xa400) ISRC1_CONTROL1 */
+#define CS48L32_ISRC1_FSL_MASK 0xf8000000
+#define CS48L32_ISRC1_FSL_SHIFT 27
+#define CS48L32_ISRC1_FSH_MASK 0x0000f800
+#define CS48L32_ISRC1_FSH_SHIFT 11
+
+/* (0xa404) ISRC1_CONTROL2 */
+#define CS48L32_ISRC1_INT4_EN_SHIFT 11
+#define CS48L32_ISRC1_INT3_EN_SHIFT 10
+#define CS48L32_ISRC1_INT2_EN_SHIFT 9
+#define CS48L32_ISRC1_INT1_EN_SHIFT 8
+#define CS48L32_ISRC1_DEC4_EN_SHIFT 3
+#define CS48L32_ISRC1_DEC3_EN_SHIFT 2
+#define CS48L32_ISRC1_DEC2_EN_SHIFT 1
+#define CS48L32_ISRC1_DEC1_EN_SHIFT 0
+
+/* (0xa800) FX_SAMPLE_RATE */
+#define CS48L32_FX_RATE_MASK 0x0000f800
+#define CS48L32_FX_RATE_SHIFT 11
+
+/* (0xab00) DRC1_CONTROL1 */
+#define CS48L32_DRC1L_EN_SHIFT 1
+#define CS48L32_DRC1R_EN_SHIFT 0
+
+/* (0xb400) Comfort_Noise_Generator */
+#define CS48L32_NOISE_GEN_RATE_MASK 0x0000f800
+#define CS48L32_NOISE_GEN_RATE_SHIFT 11
+#define CS48L32_NOISE_GEN_EN_SHIFT 5
+#define CS48L32_NOISE_GEN_GAIN_SHIFT 0
+
+/* (0xb800) US_CONTROL */
+#define CS48L32_US1_DET_EN_SHIFT 8
+
+/* (0xb804) US1_CONTROL */
+#define CS48L32_US1_RATE_MASK 0xf8000000
+#define CS48L32_US1_RATE_SHIFT 27
+#define CS48L32_US1_GAIN_SHIFT 12
+#define CS48L32_US1_SRC_MASK 0x00000f00
+#define CS48L32_US1_SRC_SHIFT 8
+#define CS48L32_US1_FREQ_MASK 0x00000070
+#define CS48L32_US1_FREQ_SHIFT 4
+
+/* (0xb808) US1_DET_CONTROL */
+#define CS48L32_US1_DET_DCY_SHIFT 28
+#define CS48L32_US1_DET_HOLD_SHIFT 24
+#define CS48L32_US1_DET_NUM_SHIFT 20
+#define CS48L32_US1_DET_THR_SHIFT 16
+#define CS48L32_US1_DET_LPF_CUT_SHIFT 5
+#define CS48L32_US1_DET_LPF_SHIFT 4
+
+/* (0x18004) IRQ1_STATUS */
+#define CS48L32_IRQ1_STS_MASK 0x00000001
+
+/* (0x18014) IRQ1_EINT_2 */
+#define CS48L32_BOOT_DONE_EINT1_MASK 0x00000008
+
+/* (0x18028) IRQ1_EINT_7 */
+#define CS48L32_DSP1_MPU_ERR_EINT1_MASK 0x00200000
+#define CS48L32_DSP1_WDT_EXPIRE_EINT1_MASK 0x00100000
+
+/* (0x18030) IRQ1_EINT_9 */
+#define CS48L32_DSP1_IRQ0_EINT1_MASK 0x00000001
+
+/* (0x180a4) IRQ1_STS_6 */
+#define CS48L32_FLL1_LOCK_STS1_MASK 0x00000001
+
+#endif
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index c11aaf8079fb..9472f0a966a2 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -36,9 +36,8 @@ snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream
int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
struct dma_chan *chan);
int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream);
+int snd_dmaengine_pcm_sync_stop(struct snd_pcm_substream *substream);
-int snd_dmaengine_pcm_open_request_chan(struct snd_pcm_substream *substream,
- dma_filter_fn filter_fn, void *filter_data);
int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream);
struct dma_chan *snd_dmaengine_pcm_request_channel(dma_filter_fn filter_fn,
@@ -70,6 +69,10 @@ struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
* @peripheral_config: peripheral configuration for programming peripheral
* for dmaengine transfer
* @peripheral_size: peripheral configuration buffer size
+ * @port_window_size: The length of the register area in words the data need
+ * to be accessed on the device side. It is only used for devices which is using
+ * an area instead of a single register to send/receive the data. Typically the
+ * DMA loops in this area in order to transfer the data.
*/
struct snd_dmaengine_dai_dma_data {
dma_addr_t addr;
@@ -81,6 +84,7 @@ struct snd_dmaengine_dai_dma_data {
unsigned int flags;
void *peripheral_config;
size_t peripheral_size;
+ u32 port_window_size;
};
void snd_dmaengine_pcm_set_config_from_dai_data(
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 38db50b280eb..4f94565c9d15 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1842,8 +1842,7 @@ unsigned int snd_emu10k1_ptr20_read(struct snd_emu10k1 * emu, unsigned int reg,
void snd_emu10k1_ptr20_write(struct snd_emu10k1 *emu, unsigned int reg, unsigned int chn, unsigned int data);
int snd_emu10k1_spi_write(struct snd_emu10k1 * emu, unsigned int data);
int snd_emu10k1_i2c_write(struct snd_emu10k1 *emu, u32 reg, u32 value);
-static inline void snd_emu1010_fpga_lock(struct snd_emu10k1 *emu) { mutex_lock(&emu->emu1010.lock); };
-static inline void snd_emu1010_fpga_unlock(struct snd_emu10k1 *emu) { mutex_unlock(&emu->emu1010.lock); };
+DEFINE_GUARD(snd_emu1010_fpga_lock, struct snd_emu10k1 *, mutex_lock(&(_T)->emu1010.lock), mutex_unlock(&(_T)->emu1010.lock))
void snd_emu1010_fpga_write_lock(struct snd_emu10k1 *emu, u32 reg, u32 value);
void snd_emu1010_fpga_write(struct snd_emu10k1 *emu, u32 reg, u32 value);
void snd_emu1010_fpga_read(struct snd_emu10k1 *emu, u32 reg, u32 *value);
diff --git a/include/sound/es1688.h b/include/sound/es1688.h
index 099569c31fbb..425a3717d77a 100644
--- a/include/sound/es1688.h
+++ b/include/sound/es1688.h
@@ -17,6 +17,7 @@
#define ES1688_HW_UNDEF 0x0003
struct snd_es1688 {
+ struct snd_card *card;
unsigned long port; /* port of ESS chip */
struct resource *res_port;
unsigned long mpu_port; /* MPU-401 port of ESS chip */
diff --git a/include/sound/gus.h b/include/sound/gus.h
index cd8da68cab92..321ae93625eb 100644
--- a/include/sound/gus.h
+++ b/include/sound/gus.h
@@ -513,25 +513,8 @@ struct _SND_IW_LFO_PROGRAM {
unsigned short depth;
};
-#if 0
-extern irqreturn_t snd_gf1_lfo_effect_interrupt(struct snd_gus_card * gus, snd_gf1_voice_t * voice);
-#endif
-extern void snd_gf1_lfo_init(struct snd_gus_card * gus);
-extern void snd_gf1_lfo_done(struct snd_gus_card * gus);
-extern void snd_gf1_lfo_program(struct snd_gus_card * gus, int voice, int lfo_type, struct _SND_IW_LFO_PROGRAM *program);
-extern void snd_gf1_lfo_enable(struct snd_gus_card * gus, int voice, int lfo_type);
-extern void snd_gf1_lfo_disable(struct snd_gus_card * gus, int voice, int lfo_type);
-extern void snd_gf1_lfo_change_freq(struct snd_gus_card * gus, int voice, int lfo_type, int freq);
-extern void snd_gf1_lfo_change_depth(struct snd_gus_card * gus, int voice, int lfo_type, int depth);
-extern void snd_gf1_lfo_setup(struct snd_gus_card * gus, int voice, int lfo_type, int freq, int current_depth, int depth, int sweep, int shape);
-extern void snd_gf1_lfo_shutdown(struct snd_gus_card * gus, int voice, int lfo_type);
-#if 0
-extern void snd_gf1_lfo_command(struct snd_gus_card * gus, int voice, unsigned char *command);
-#endif
-
/* gus_mem.c */
-void snd_gf1_mem_lock(struct snd_gf1_mem * alloc, int xup);
int snd_gf1_mem_xfree(struct snd_gf1_mem * alloc, struct snd_gf1_mem_block * block);
struct snd_gf1_mem_block *snd_gf1_mem_alloc(struct snd_gf1_mem * alloc, int owner,
char *name, int size, int w_16,
@@ -578,14 +561,8 @@ int snd_gf1_new_mixer(struct snd_gus_card * gus);
int snd_gf1_pcm_new(struct snd_gus_card *gus, int pcm_dev, int control_index);
-#ifdef CONFIG_SND_DEBUG
-extern void snd_gf1_print_voice_registers(struct snd_gus_card * gus);
-#endif
-
/* gus.c */
-int snd_gus_use_inc(struct snd_gus_card * gus);
-void snd_gus_use_dec(struct snd_gus_card * gus);
int snd_gus_create(struct snd_card *card,
unsigned long port,
int irq, int dma1, int dma2,
diff --git a/include/sound/hda-mlink.h b/include/sound/hda-mlink.h
index 9ced94686ce3..4327317be6af 100644
--- a/include/sound/hda-mlink.h
+++ b/include/sound/hda-mlink.h
@@ -15,6 +15,7 @@ int hda_bus_ml_init(struct hdac_bus *bus);
void hda_bus_ml_free(struct hdac_bus *bus);
int hdac_bus_eml_get_count(struct hdac_bus *bus, bool alt, int elid);
+void hdac_bus_eml_enable_interrupt_unlocked(struct hdac_bus *bus, bool alt, int elid, bool enable);
void hdac_bus_eml_enable_interrupt(struct hdac_bus *bus, bool alt, int elid, bool enable);
bool hdac_bus_eml_check_interrupt(struct hdac_bus *bus, bool alt, int elid);
@@ -61,6 +62,12 @@ struct mutex *hdac_bus_eml_get_mutex(struct hdac_bus *bus, bool alt, int elid);
int hdac_bus_eml_enable_offload(struct hdac_bus *bus, bool alt, int elid, bool enable);
+/* microphone privacy specific function supported by ACE3+ architecture */
+void hdac_bus_eml_set_mic_privacy_mask(struct hdac_bus *bus, bool alt, int elid,
+ unsigned long mask);
+bool hdac_bus_eml_is_mic_privacy_changed(struct hdac_bus *bus, bool alt, int elid);
+bool hdac_bus_eml_get_mic_privacy_state(struct hdac_bus *bus, bool alt, int elid);
+
#else
static inline int
@@ -72,6 +79,9 @@ static inline int
hdac_bus_eml_get_count(struct hdac_bus *bus, bool alt, int elid) { return 0; }
static inline void
+hdac_bus_eml_enable_interrupt_unlocked(struct hdac_bus *bus, bool alt, int elid, bool enable) { }
+
+static inline void
hdac_bus_eml_enable_interrupt(struct hdac_bus *bus, bool alt, int elid, bool enable) { }
static inline bool
@@ -181,4 +191,23 @@ hdac_bus_eml_enable_offload(struct hdac_bus *bus, bool alt, int elid, bool enabl
{
return 0;
}
+
+static inline void
+hdac_bus_eml_set_mic_privacy_mask(struct hdac_bus *bus, bool alt, int elid,
+ unsigned long mask)
+{
+}
+
+static inline bool
+hdac_bus_eml_is_mic_privacy_changed(struct hdac_bus *bus, bool alt, int elid)
+{
+ return false;
+}
+
+static inline bool
+hdac_bus_eml_get_mic_privacy_state(struct hdac_bus *bus, bool alt, int elid)
+{
+ return false;
+}
+
#endif /* CONFIG_SND_SOC_SOF_HDA_MLINK */
diff --git a/include/sound/hda-sdw-bpt.h b/include/sound/hda-sdw-bpt.h
new file mode 100644
index 000000000000..f649549b75d5
--- /dev/null
+++ b/include/sound/hda-sdw-bpt.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2025 Intel Corporation.
+ */
+
+#ifndef __HDA_SDW_BPT_H
+#define __HDA_SDW_BPT_H
+
+#include <linux/device.h>
+
+struct hdac_ext_stream;
+struct snd_dma_buffer;
+
+#if IS_ENABLED(CONFIG_SND_SOF_SOF_HDA_SDW_BPT)
+int hda_sdw_bpt_open(struct device *dev, int link_id, struct hdac_ext_stream **bpt_tx_stream,
+ struct snd_dma_buffer *dmab_tx_bdl, u32 bpt_tx_num_bytes,
+ u32 tx_dma_bandwidth, struct hdac_ext_stream **bpt_rx_stream,
+ struct snd_dma_buffer *dmab_rx_bdl, u32 bpt_rx_num_bytes,
+ u32 rx_dma_bandwidth);
+
+int hda_sdw_bpt_send_async(struct device *dev, struct hdac_ext_stream *bpt_tx_stream,
+ struct hdac_ext_stream *bpt_rx_stream);
+
+int hda_sdw_bpt_wait(struct device *dev, struct hdac_ext_stream *bpt_tx_stream,
+ struct hdac_ext_stream *bpt_rx_stream);
+
+int hda_sdw_bpt_close(struct device *dev, struct hdac_ext_stream *bpt_tx_stream,
+ struct snd_dma_buffer *dmab_tx_bdl, struct hdac_ext_stream *bpt_rx_stream,
+ struct snd_dma_buffer *dmab_rx_bdl);
+#else
+static inline int hda_sdw_bpt_open(struct device *dev, int link_id,
+ struct hdac_ext_stream **bpt_tx_stream,
+ struct snd_dma_buffer *dmab_tx_bdl, u32 bpt_tx_num_bytes,
+ u32 tx_dma_bandwidth, struct hdac_ext_stream **bpt_rx_stream,
+ struct snd_dma_buffer *dmab_rx_bdl, u32 bpt_rx_num_bytes,
+ u32 rx_dma_bandwidth)
+{
+ WARN_ONCE(1, "SoundWire BPT is disabled");
+ return -EOPNOTSUPP;
+}
+
+static inline int hda_sdw_bpt_send_async(struct device *dev, struct hdac_ext_stream *bpt_tx_stream,
+ struct hdac_ext_stream *bpt_rx_stream)
+{
+ WARN_ONCE(1, "SoundWire BPT is disabled");
+ return -EOPNOTSUPP;
+}
+
+static inline int hda_sdw_bpt_wait(struct device *dev, struct hdac_ext_stream *bpt_tx_stream,
+ struct hdac_ext_stream *bpt_rx_stream)
+{
+ WARN_ONCE(1, "SoundWire BPT is disabled");
+ return -EOPNOTSUPP;
+}
+
+static inline int hda_sdw_bpt_close(struct device *dev, struct hdac_ext_stream *bpt_tx_stream,
+ struct snd_dma_buffer *dmab_tx_bdl,
+ struct hdac_ext_stream *bpt_rx_stream,
+ struct snd_dma_buffer *dmab_rx_bdl)
+{
+ WARN_ONCE(1, "SoundWire BPT is disabled");
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif /* __HDA_SDW_BPT_H */
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 575e55aa08ca..5d9f0ef228af 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -27,6 +27,7 @@ struct hda_beep;
struct hda_codec;
struct hda_pcm;
struct hda_pcm_stream;
+struct hda_codec_ops;
/*
* codec bus
@@ -69,28 +70,31 @@ struct hda_bus {
/*
* codec preset
- *
- * Known codecs have the patch to build and set up the controls/PCMs
- * better than the generic parser.
*/
-typedef int (*hda_codec_patch_t)(struct hda_codec *);
-
+
#define HDA_CODEC_ID_SKIP_PROBE 0x00000001
#define HDA_CODEC_ID_GENERIC_HDMI 0x00000101
#define HDA_CODEC_ID_GENERIC 0x00000201
-#define HDA_CODEC_REV_ENTRY(_vid, _rev, _name, _patch) \
+#define HDA_CODEC_ID_REV_MODEL(_vid, _rev, _name, _model) \
{ .vendor_id = (_vid), .rev_id = (_rev), .name = (_name), \
- .api_version = HDA_DEV_LEGACY, \
- .driver_data = (unsigned long)(_patch) }
-#define HDA_CODEC_ENTRY(_vid, _name, _patch) \
- HDA_CODEC_REV_ENTRY(_vid, 0, _name, _patch)
+ .api_version = HDA_DEV_LEGACY, .driver_data = (_model) }
+#define HDA_CODEC_ID_MODEL(_vid, _name, _model) \
+ HDA_CODEC_ID_REV_MODEL(_vid, 0, _name, _model)
+#define HDA_CODEC_ID_REV(_vid, _rev, _name) \
+ HDA_CODEC_ID_REV_MODEL(_vid, _rev, _name, 0)
+#define HDA_CODEC_ID(_vid, _name) \
+ HDA_CODEC_ID_REV(_vid, 0, _name)
struct hda_codec_driver {
struct hdac_driver core;
const struct hda_device_id *id;
+ const struct hda_codec_ops *ops;
};
+#define hda_codec_to_driver(codec) \
+ container_of((codec)->core.dev.driver, struct hda_codec_driver, core.driver)
+
int __hda_codec_driver_register(struct hda_codec_driver *drv, const char *name,
struct module *owner);
#define hda_codec_driver_register(drv) \
@@ -100,12 +104,13 @@ void hda_codec_driver_unregister(struct hda_codec_driver *drv);
module_driver(drv, hda_codec_driver_register, \
hda_codec_driver_unregister)
-/* ops set by the preset patch */
+/* ops for hda codec driver */
struct hda_codec_ops {
+ int (*probe)(struct hda_codec *codec, const struct hda_device_id *id);
+ void (*remove)(struct hda_codec *codec);
int (*build_controls)(struct hda_codec *codec);
int (*build_pcms)(struct hda_codec *codec);
int (*init)(struct hda_codec *codec);
- void (*free)(struct hda_codec *codec);
void (*unsol_event)(struct hda_codec *codec, unsigned int res);
void (*set_power_state)(struct hda_codec *codec, hda_nid_t fg,
unsigned int power_state);
@@ -181,10 +186,7 @@ struct hda_codec {
const struct hda_device_id *preset;
const char *modelname; /* model name for preset */
- /* set by patch */
- struct hda_codec_ops patch_ops;
-
- /* PCM to create, set by patch_ops.build_pcms callback */
+ /* PCM to create, set by hda_codec_ops.build_pcms callback */
struct list_head pcm_list_head;
refcount_t pcm_ref;
wait_queue_head_t remove_sleep;
@@ -195,6 +197,7 @@ struct hda_codec {
/* beep device */
struct hda_beep *beep;
unsigned int beep_mode;
+ bool beep_just_power_on;
/* widget capabilities cache */
u32 *wcaps;
@@ -357,8 +360,8 @@ int snd_hda_override_conn_list(struct hda_codec *codec, hda_nid_t nid, int nums,
int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux,
hda_nid_t nid, int recursive);
unsigned int snd_hda_get_num_devices(struct hda_codec *codec, hda_nid_t nid);
-int snd_hda_get_devices(struct hda_codec *codec, hda_nid_t nid,
- u8 *dev_list, int max_devices);
+unsigned int snd_hda_get_devices(struct hda_codec *codec, hda_nid_t nid,
+ u8 *dev_list, unsigned int max_devices);
int snd_hda_get_dev_select(struct hda_codec *codec, hda_nid_t nid);
int snd_hda_set_dev_select(struct hda_codec *codec, hda_nid_t nid, int dev_id);
@@ -477,8 +480,10 @@ extern const struct dev_pm_ops hda_codec_driver_pm;
static inline
int hda_call_check_power_status(struct hda_codec *codec, hda_nid_t nid)
{
- if (codec->patch_ops.check_power_status)
- return codec->patch_ops.check_power_status(codec, nid);
+ struct hda_codec_driver *driver = hda_codec_to_driver(codec);
+
+ if (driver->ops && driver->ops->check_power_status)
+ return driver->ops->check_power_status(codec, nid);
return 0;
}
@@ -498,6 +503,36 @@ static inline bool hda_codec_need_resume(struct hda_codec *codec)
return !codec->relaxed_resume && codec->jacktbl.used;
}
+/*
+ * PM with auto-cleanup: call like CLASS(snd_hda_power, pm)(codec)
+ * If the error handling is needed, refer pm.err.
+ */
+struct __hda_power_obj {
+ struct hda_codec *codec;
+ int err;
+};
+
+static inline struct __hda_power_obj __snd_hda_power_up(struct hda_codec *codec)
+{
+ struct __hda_power_obj T = { .codec = codec };
+ T.err = snd_hda_power_up(codec);
+ return T;
+}
+
+static inline struct __hda_power_obj __snd_hda_power_up_pm(struct hda_codec *codec)
+{
+ struct __hda_power_obj T = { .codec = codec };
+ T.err = snd_hda_power_up_pm(codec);
+ return T;
+}
+
+DEFINE_CLASS(snd_hda_power, struct __hda_power_obj,
+ snd_hda_power_down((_T).codec), __snd_hda_power_up(codec),
+ struct hda_codec *codec)
+DEFINE_CLASS(snd_hda_power_pm, struct __hda_power_obj,
+ snd_hda_power_down_pm((_T).codec), __snd_hda_power_up_pm(codec),
+ struct hda_codec *codec)
+
#ifdef CONFIG_SND_HDA_PATCH_LOADER
/*
* patch firmware
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index 5ff31e6d41c1..db1cc0b897fd 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -180,7 +180,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define SD_STS_FIFO_READY 0x20 /* FIFO ready */
/* INTCTL and INTSTS */
-#define AZX_INT_ALL_STREAM 0xff /* all stream interrupts */
+#define AZX_INT_ALL_STREAM 0x3fffffff /* all stream interrupts */
#define AZX_INT_CTRL_EN 0x40000000 /* controller interrupt enable bit */
#define AZX_INT_GLOBAL_EN 0x80000000 /* global interrupt enable bit */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 1d10939e40af..4e0c1d8af09f 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -18,7 +18,7 @@
#include <sound/pcm.h>
#include <sound/memalloc.h>
#include <sound/hda_verbs.h>
-#include <drm/i915_component.h>
+#include <drm/intel/i915_component.h>
/* codec node id */
typedef u16 hda_nid_t;
@@ -223,7 +223,7 @@ struct hdac_driver {
struct device_driver driver;
int type;
const struct hda_device_id *id_table;
- int (*match)(struct hdac_device *dev, struct hdac_driver *drv);
+ int (*match)(struct hdac_device *dev, const struct hdac_driver *drv);
void (*unsol_event)(struct hdac_device *dev, unsigned int event);
/* fields used by ext bus APIs */
@@ -235,7 +235,7 @@ struct hdac_driver {
#define drv_to_hdac_driver(_drv) container_of(_drv, struct hdac_driver, driver)
const struct hda_device_id *
-hdac_get_device_id(struct hdac_device *hdev, struct hdac_driver *drv);
+hdac_get_device_id(struct hdac_device *hdev, const struct hdac_driver *drv);
/*
* Bus verb operators
@@ -590,7 +590,7 @@ void snd_hdac_stream_sync_trigger(struct hdac_stream *azx_dev, bool set,
void snd_hdac_stream_sync(struct hdac_stream *azx_dev, bool start,
unsigned int streams);
void snd_hdac_stream_timecounter_init(struct hdac_stream *azx_dev,
- unsigned int streams);
+ unsigned int streams, bool start);
int snd_hdac_get_stream_stripe_ctl(struct hdac_bus *bus,
struct snd_pcm_substream *substream);
@@ -598,8 +598,6 @@ void snd_hdac_stream_spbcap_enable(struct hdac_bus *chip,
bool enable, int index);
int snd_hdac_stream_set_spib(struct hdac_bus *bus,
struct hdac_stream *azx_dev, u32 value);
-int snd_hdac_stream_get_spbmaxfifo(struct hdac_bus *bus,
- struct hdac_stream *azx_dev);
void snd_hdac_stream_drsm_enable(struct hdac_bus *bus,
bool enable, int index);
int snd_hdac_stream_wait_drsm(struct hdac_stream *azx_dev);
@@ -653,6 +651,7 @@ int snd_hdac_stream_set_lpib(struct hdac_stream *azx_dev, u32 value);
#define snd_hdac_dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
#define snd_hdac_dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
#define snd_hdac_stream_is_locked(dev) ((dev)->locked)
+DEFINE_GUARD(snd_hdac_dsp_lock, struct hdac_stream *, snd_hdac_dsp_lock(_T), snd_hdac_dsp_unlock(_T))
/* DSP loader helpers */
int snd_hdac_dsp_prepare(struct hdac_stream *azx_dev, unsigned int format,
unsigned int byte_size, struct snd_dma_buffer *bufp);
@@ -682,6 +681,30 @@ static inline void snd_hdac_dsp_cleanup(struct hdac_stream *azx_dev,
}
#endif /* CONFIG_SND_HDA_DSP_LOADER */
+/*
+ * Easy macros for widget capabilities
+ */
+#define snd_hdac_get_wcaps(codec, nid) \
+ snd_hdac_read_parm(codec, nid, AC_PAR_AUDIO_WIDGET_CAP)
+
+/* get the widget type from widget capability bits */
+static inline int snd_hdac_get_wcaps_type(unsigned int wcaps)
+{
+ if (!wcaps)
+ return -1; /* invalid type */
+ return (wcaps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
+}
+
+/* get the number of supported channels */
+static inline unsigned int snd_hdac_get_wcaps_channels(u32 wcaps)
+{
+ unsigned int chans;
+
+ chans = (wcaps & AC_WCAP_CHAN_CNT_EXT) >> 13;
+ chans = (chans + 1) * 2;
+
+ return chans;
+}
/*
* generic array helpers
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 957295364a5e..7de390022ac2 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -2,8 +2,6 @@
#ifndef __SOUND_HDAUDIO_EXT_H
#define __SOUND_HDAUDIO_EXT_H
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/iopoll.h>
#include <sound/hdaudio.h>
int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
@@ -24,6 +22,7 @@ void snd_hdac_ext_bus_ppcap_enable(struct hdac_bus *chip, bool enable);
void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_bus *chip, bool enable);
int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_bus *bus);
+struct hdac_ext_link *snd_hdac_ext_bus_get_hlink_by_id(struct hdac_bus *bus, u32 id);
struct hdac_ext_link *snd_hdac_ext_bus_get_hlink_by_addr(struct hdac_bus *bus, int addr);
struct hdac_ext_link *snd_hdac_ext_bus_get_hlink_by_name(struct hdac_bus *bus,
const char *codec_name);
@@ -99,12 +98,17 @@ struct hdac_ext_link {
void __iomem *ml_addr; /* link output stream reg pointer */
u32 lcaps; /* link capablities */
u16 lsdiid; /* link sdi identifier */
+ u32 id;
+ u8 slcount;
int ref_count;
struct list_head list;
};
+#define hdac_ext_link_alt(link) ((link)->lcaps & AZX_ML_HDA_LCAP_ALT)
+#define hdac_ext_link_ofls(link) ((link)->lcaps & AZX_ML_HDA_LCAP_OFLS)
+
int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *hlink);
int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *hlink);
int snd_hdac_ext_bus_link_power_up_all(struct hdac_bus *bus);
@@ -119,49 +123,6 @@ int snd_hdac_ext_bus_link_put(struct hdac_bus *bus, struct hdac_ext_link *hlink)
void snd_hdac_ext_bus_link_power(struct hdac_device *codec, bool enable);
-#define snd_hdac_adsp_writeb(chip, reg, value) \
- snd_hdac_reg_writeb(chip, (chip)->dsp_ba + (reg), value)
-#define snd_hdac_adsp_readb(chip, reg) \
- snd_hdac_reg_readb(chip, (chip)->dsp_ba + (reg))
-#define snd_hdac_adsp_writew(chip, reg, value) \
- snd_hdac_reg_writew(chip, (chip)->dsp_ba + (reg), value)
-#define snd_hdac_adsp_readw(chip, reg) \
- snd_hdac_reg_readw(chip, (chip)->dsp_ba + (reg))
-#define snd_hdac_adsp_writel(chip, reg, value) \
- snd_hdac_reg_writel(chip, (chip)->dsp_ba + (reg), value)
-#define snd_hdac_adsp_readl(chip, reg) \
- snd_hdac_reg_readl(chip, (chip)->dsp_ba + (reg))
-#define snd_hdac_adsp_writeq(chip, reg, value) \
- snd_hdac_reg_writeq(chip, (chip)->dsp_ba + (reg), value)
-#define snd_hdac_adsp_readq(chip, reg) \
- snd_hdac_reg_readq(chip, (chip)->dsp_ba + (reg))
-
-#define snd_hdac_adsp_updateb(chip, reg, mask, val) \
- snd_hdac_adsp_writeb(chip, reg, \
- (snd_hdac_adsp_readb(chip, reg) & ~(mask)) | (val))
-#define snd_hdac_adsp_updatew(chip, reg, mask, val) \
- snd_hdac_adsp_writew(chip, reg, \
- (snd_hdac_adsp_readw(chip, reg) & ~(mask)) | (val))
-#define snd_hdac_adsp_updatel(chip, reg, mask, val) \
- snd_hdac_adsp_writel(chip, reg, \
- (snd_hdac_adsp_readl(chip, reg) & ~(mask)) | (val))
-#define snd_hdac_adsp_updateq(chip, reg, mask, val) \
- snd_hdac_adsp_writeq(chip, reg, \
- (snd_hdac_adsp_readq(chip, reg) & ~(mask)) | (val))
-
-#define snd_hdac_adsp_readb_poll(chip, reg, val, cond, delay_us, timeout_us) \
- readb_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
- delay_us, timeout_us)
-#define snd_hdac_adsp_readw_poll(chip, reg, val, cond, delay_us, timeout_us) \
- readw_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
- delay_us, timeout_us)
-#define snd_hdac_adsp_readl_poll(chip, reg, val, cond, delay_us, timeout_us) \
- readl_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
- delay_us, timeout_us)
-#define snd_hdac_adsp_readq_poll(chip, reg, val, cond, delay_us, timeout_us) \
- readq_poll_timeout((chip)->dsp_ba + (reg), val, cond, \
- delay_us, timeout_us)
-
struct hdac_ext_device;
/* ops common to all codec drivers */
diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
index 5e1a9eafd10f..273f4c36fad9 100644
--- a/include/sound/hdmi-codec.h
+++ b/include/sound/hdmi-codec.h
@@ -105,7 +105,8 @@ struct hdmi_codec_ops {
* Optional
*/
int (*get_dai_id)(struct snd_soc_component *comment,
- struct device_node *endpoint);
+ struct device_node *endpoint,
+ void *data);
/*
* Hook callback function to handle connector plug event.
@@ -114,20 +115,19 @@ struct hdmi_codec_ops {
int (*hook_plugged_cb)(struct device *dev, void *data,
hdmi_codec_plugged_cb fn,
struct device *codec_dev);
-
- /* bit field */
- unsigned int no_capture_mute:1;
};
/* HDMI codec initalization data */
struct hdmi_codec_pdata {
const struct hdmi_codec_ops *ops;
+ u64 i2s_formats;
uint i2s:1;
uint no_i2s_playback:1;
uint no_i2s_capture:1;
uint spdif:1;
uint no_spdif_playback:1;
uint no_spdif_capture:1;
+ uint no_capture_mute:1;
int max_i2s_channels;
void *data;
};
diff --git a/include/sound/jack.h b/include/sound/jack.h
index 1ed90e2109e9..715b95983188 100644
--- a/include/sound/jack.h
+++ b/include/sound/jack.h
@@ -22,6 +22,7 @@ struct input_dev;
* @SND_JACK_VIDEOOUT: Video out
* @SND_JACK_AVOUT: AV (Audio Video) out
* @SND_JACK_LINEIN: Line in
+ * @SND_JACK_USB: USB audio device
* @SND_JACK_BTN_0: Button 0
* @SND_JACK_BTN_1: Button 1
* @SND_JACK_BTN_2: Button 2
@@ -43,6 +44,7 @@ enum snd_jack_types {
SND_JACK_VIDEOOUT = 0x0010,
SND_JACK_AVOUT = SND_JACK_LINEOUT | SND_JACK_VIDEOOUT,
SND_JACK_LINEIN = 0x0020,
+ SND_JACK_USB = 0x0040,
/* Kept separate from switches to facilitate implementation */
SND_JACK_BTN_0 = 0x4000,
@@ -54,7 +56,7 @@ enum snd_jack_types {
};
/* Keep in sync with definitions above */
-#define SND_JACK_SWITCH_TYPES 6
+#define SND_JACK_SWITCH_TYPES 7
struct snd_jack {
struct list_head kctl_list;
@@ -79,7 +81,6 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
struct snd_jack **jack, bool initial_kctl, bool phantom_jack);
int snd_jack_add_new_kctl(struct snd_jack *jack, const char * name, int mask);
#ifdef CONFIG_SND_JACK_INPUT_DEV
-void snd_jack_set_parent(struct snd_jack *jack, struct device *parent);
int snd_jack_set_key(struct snd_jack *jack, enum snd_jack_types type,
int keytype);
#endif
@@ -104,11 +105,6 @@ static inline void snd_jack_report(struct snd_jack *jack, int status)
#endif
#if !defined(CONFIG_SND_JACK) || !defined(CONFIG_SND_JACK_INPUT_DEV)
-static inline void snd_jack_set_parent(struct snd_jack *jack,
- struct device *parent)
-{
-}
-
static inline int snd_jack_set_key(struct snd_jack *jack,
enum snd_jack_types type,
int keytype)
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 43d524580bd2..9dd475cf4e8c 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -42,17 +42,12 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_NONCONTIG 8 /* non-coherent SG buffer */
#define SNDRV_DMA_TYPE_NONCOHERENT 9 /* non-coherent buffer */
#ifdef CONFIG_SND_DMA_SGBUF
-#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_NONCONTIG
+#define SNDRV_DMA_TYPE_DEV_SG 3 /* S/G pages */
#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
#else
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
#endif
-/* fallback types, don't use those directly */
-#ifdef CONFIG_SND_DMA_SGBUF
-#define SNDRV_DMA_TYPE_DEV_SG_FALLBACK 10
-#define SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK 11
-#endif
/*
* info for buffer allocation
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 61c6054618c8..58fd6e84f961 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -93,14 +93,15 @@ struct snd_pcm_ops {
#define SNDRV_PCM_IOCTL1_CHANNEL_INFO 2
/* 3 is absent slot. */
#define SNDRV_PCM_IOCTL1_FIFO_SIZE 4
+#define SNDRV_PCM_IOCTL1_SYNC_ID 5
#define SNDRV_PCM_TRIGGER_STOP 0
#define SNDRV_PCM_TRIGGER_START 1
-#define SNDRV_PCM_TRIGGER_PAUSE_PUSH 3
-#define SNDRV_PCM_TRIGGER_PAUSE_RELEASE 4
-#define SNDRV_PCM_TRIGGER_SUSPEND 5
-#define SNDRV_PCM_TRIGGER_RESUME 6
-#define SNDRV_PCM_TRIGGER_DRAIN 7
+#define SNDRV_PCM_TRIGGER_PAUSE_PUSH 2
+#define SNDRV_PCM_TRIGGER_PAUSE_RELEASE 3
+#define SNDRV_PCM_TRIGGER_SUSPEND 4
+#define SNDRV_PCM_TRIGGER_RESUME 5
+#define SNDRV_PCM_TRIGGER_DRAIN 6
#define SNDRV_PCM_POS_XRUN ((snd_pcm_uframes_t)-1)
@@ -122,9 +123,13 @@ struct snd_pcm_ops {
#define SNDRV_PCM_RATE_384000 (1U<<14) /* 384000Hz */
#define SNDRV_PCM_RATE_705600 (1U<<15) /* 705600Hz */
#define SNDRV_PCM_RATE_768000 (1U<<16) /* 768000Hz */
+/* extended rates since 6.12 */
+#define SNDRV_PCM_RATE_12000 (1U<<17) /* 12000Hz */
+#define SNDRV_PCM_RATE_24000 (1U<<18) /* 24000Hz */
+#define SNDRV_PCM_RATE_128000 (1U<<19) /* 128000Hz */
#define SNDRV_PCM_RATE_CONTINUOUS (1U<<30) /* continuous range */
-#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuos rates */
+#define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuous rates */
#define SNDRV_PCM_RATE_8000_44100 (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_11025|\
SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_22050|\
@@ -401,7 +406,7 @@ struct snd_pcm_runtime {
snd_pcm_uframes_t silence_start; /* starting pointer to silence area */
snd_pcm_uframes_t silence_filled; /* already filled part of silence area */
- union snd_pcm_sync_id sync; /* hardware synchronization ID */
+ bool std_sync_id; /* hardware synchronization - standard per card ID */
/* -- mmap -- */
struct snd_pcm_mmap_status *status;
@@ -497,6 +502,9 @@ struct snd_pcm_substream {
/* misc flags */
unsigned int hw_opened: 1;
unsigned int managed_buffer_alloc:1;
+#ifdef CONFIG_SND_PCM_XRUN_DEBUG
+ unsigned int xrun_counter; /* number of times xrun happens */
+#endif /* CONFIG_SND_PCM_XRUN_DEBUG */
};
#define SUBSTREAM_BUSY(substream) ((substream)->ref_count > 0)
@@ -1155,7 +1163,18 @@ int snd_pcm_format_set_silence(snd_pcm_format_t format, void *buf, unsigned int
void snd_pcm_set_ops(struct snd_pcm * pcm, int direction,
const struct snd_pcm_ops *ops);
-void snd_pcm_set_sync(struct snd_pcm_substream *substream);
+void snd_pcm_set_sync_per_card(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params,
+ const unsigned char *id, unsigned int len);
+/**
+ * snd_pcm_set_sync - set the PCM sync id
+ * @substream: the pcm substream
+ *
+ * Use the default PCM sync identifier for the specific card.
+ */
+static inline void snd_pcm_set_sync(struct snd_pcm_substream *substream)
+{
+ substream->runtime->std_sync_id = true;
+}
int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg);
void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream);
@@ -1232,8 +1251,6 @@ unsigned int snd_pcm_rate_to_rate_bit(unsigned int rate);
unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit);
unsigned int snd_pcm_rate_mask_intersect(unsigned int rates_a,
unsigned int rates_b);
-unsigned int snd_pcm_rate_range_to_bits(unsigned int rate_min,
- unsigned int rate_max);
/**
* snd_pcm_set_runtime_buffer - Set the PCM runtime buffer
@@ -1343,48 +1360,6 @@ snd_pcm_set_fixed_buffer_all(struct snd_pcm *pcm, int type,
return snd_pcm_set_managed_buffer_all(pcm, type, data, size, 0);
}
-int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream,
- size_t size, gfp_t gfp_flags);
-int snd_pcm_lib_free_vmalloc_buffer(struct snd_pcm_substream *substream);
-struct page *snd_pcm_lib_get_vmalloc_page(struct snd_pcm_substream *substream,
- unsigned long offset);
-/**
- * snd_pcm_lib_alloc_vmalloc_buffer - allocate virtual DMA buffer
- * @substream: the substream to allocate the buffer to
- * @size: the requested buffer size, in bytes
- *
- * Allocates the PCM substream buffer using vmalloc(), i.e., the memory is
- * contiguous in kernel virtual space, but not in physical memory. Use this
- * if the buffer is accessed by kernel code but not by device DMA.
- *
- * Return: 1 if the buffer was changed, 0 if not changed, or a negative error
- * code.
- */
-static inline int snd_pcm_lib_alloc_vmalloc_buffer
- (struct snd_pcm_substream *substream, size_t size)
-{
- return _snd_pcm_lib_alloc_vmalloc_buffer(substream, size,
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
-}
-
-/**
- * snd_pcm_lib_alloc_vmalloc_32_buffer - allocate 32-bit-addressable buffer
- * @substream: the substream to allocate the buffer to
- * @size: the requested buffer size, in bytes
- *
- * This function works like snd_pcm_lib_alloc_vmalloc_buffer(), but uses
- * vmalloc_32(), i.e., the pages are allocated from 32-bit-addressable memory.
- *
- * Return: 1 if the buffer was changed, 0 if not changed, or a negative error
- * code.
- */
-static inline int snd_pcm_lib_alloc_vmalloc_32_buffer
- (struct snd_pcm_substream *substream, size_t size)
-{
- return _snd_pcm_lib_alloc_vmalloc_buffer(substream, size,
- GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
-}
-
#define snd_pcm_get_dma_buf(substream) ((substream)->runtime->dma_buffer_p)
/**
@@ -1416,30 +1391,6 @@ snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
return snd_sgbuf_get_chunk_size(snd_pcm_get_dma_buf(substream), ofs, size);
}
-/**
- * snd_pcm_mmap_data_open - increase the mmap counter
- * @area: VMA
- *
- * PCM mmap callback should handle this counter properly
- */
-static inline void snd_pcm_mmap_data_open(struct vm_area_struct *area)
-{
- struct snd_pcm_substream *substream = (struct snd_pcm_substream *)area->vm_private_data;
- atomic_inc(&substream->mmap_count);
-}
-
-/**
- * snd_pcm_mmap_data_close - decrease the mmap counter
- * @area: VMA
- *
- * PCM mmap callback should handle this counter properly
- */
-static inline void snd_pcm_mmap_data_close(struct vm_area_struct *area)
-{
- struct snd_pcm_substream *substream = (struct snd_pcm_substream *)area->vm_private_data;
- atomic_dec(&substream->mmap_count);
-}
-
int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *area);
/* mmap for io-memory area */
@@ -1451,6 +1402,8 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s
#define snd_pcm_lib_mmap_iomem NULL
#endif
+void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime);
+
/**
* snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer
* @dma: DMA number
@@ -1579,9 +1532,10 @@ static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format)
dev_dbg((pcm)->card->dev, fmt, ##args)
/* helpers for copying between iov_iter and iomem */
-int copy_to_iter_fromio(struct iov_iter *itert, const void __iomem *src,
- size_t count);
-int copy_from_iter_toio(void __iomem *dst, struct iov_iter *iter, size_t count);
+size_t copy_to_iter_fromio(const void __iomem *src, size_t bytes,
+ struct iov_iter *iter) __must_check;
+size_t copy_from_iter_toio(void __iomem *dst, size_t bytes,
+ struct iov_iter *iter) __must_check;
struct snd_pcm_status64 {
snd_pcm_state_t state; /* stream state */
diff --git a/include/sound/pcm_drm_eld.h b/include/sound/pcm_drm_eld.h
index 28a55a8beb28..5a38413ada91 100644
--- a/include/sound/pcm_drm_eld.h
+++ b/include/sound/pcm_drm_eld.h
@@ -2,6 +2,97 @@
#ifndef __SOUND_PCM_DRM_ELD_H
#define __SOUND_PCM_DRM_ELD_H
+enum eld_versions {
+ ELD_VER_CEA_861D = 2,
+ ELD_VER_PARTIAL = 31,
+};
+
+enum cea_audio_coding_types {
+ AUDIO_CODING_TYPE_REF_STREAM_HEADER = 0,
+ AUDIO_CODING_TYPE_LPCM = 1,
+ AUDIO_CODING_TYPE_AC3 = 2,
+ AUDIO_CODING_TYPE_MPEG1 = 3,
+ AUDIO_CODING_TYPE_MP3 = 4,
+ AUDIO_CODING_TYPE_MPEG2 = 5,
+ AUDIO_CODING_TYPE_AACLC = 6,
+ AUDIO_CODING_TYPE_DTS = 7,
+ AUDIO_CODING_TYPE_ATRAC = 8,
+ AUDIO_CODING_TYPE_SACD = 9,
+ AUDIO_CODING_TYPE_EAC3 = 10,
+ AUDIO_CODING_TYPE_DTS_HD = 11,
+ AUDIO_CODING_TYPE_MLP = 12,
+ AUDIO_CODING_TYPE_DST = 13,
+ AUDIO_CODING_TYPE_WMAPRO = 14,
+ AUDIO_CODING_TYPE_REF_CXT = 15,
+ /* also include valid xtypes below */
+ AUDIO_CODING_TYPE_HE_AAC = 15,
+ AUDIO_CODING_TYPE_HE_AAC2 = 16,
+ AUDIO_CODING_TYPE_MPEG_SURROUND = 17,
+};
+
+enum cea_audio_coding_xtypes {
+ AUDIO_CODING_XTYPE_HE_REF_CT = 0,
+ AUDIO_CODING_XTYPE_HE_AAC = 1,
+ AUDIO_CODING_XTYPE_HE_AAC2 = 2,
+ AUDIO_CODING_XTYPE_MPEG_SURROUND = 3,
+ AUDIO_CODING_XTYPE_FIRST_RESERVED = 4,
+};
+
+/*
+ * CEA Short Audio Descriptor data
+ */
+struct snd_cea_sad {
+ int channels;
+ int format; /* (format == 0) indicates invalid SAD */
+ int rates;
+ int sample_bits; /* for LPCM */
+ int max_bitrate; /* for AC3...ATRAC */
+ int profile; /* for WMAPRO */
+};
+
+#define ELD_FIXED_BYTES 20
+#define ELD_MAX_SIZE 256
+#define ELD_MAX_MNL 16
+#define ELD_MAX_SAD 16
+
+#define ELD_PCM_BITS_8 BIT(0)
+#define ELD_PCM_BITS_16 BIT(1)
+#define ELD_PCM_BITS_20 BIT(2)
+#define ELD_PCM_BITS_24 BIT(3)
+#define ELD_PCM_BITS_32 BIT(4)
+
+/*
+ * ELD: EDID Like Data
+ */
+struct snd_parsed_hdmi_eld {
+ /*
+ * all fields will be cleared before updating ELD
+ */
+ int baseline_len;
+ int eld_ver;
+ int cea_edid_ver;
+ char monitor_name[ELD_MAX_MNL + 1];
+ int manufacture_id;
+ int product_id;
+ u64 port_id;
+ int support_hdcp;
+ int support_ai;
+ int conn_type;
+ int aud_synch_delay;
+ int spk_alloc;
+ int sad_count;
+ struct snd_cea_sad sad[ELD_MAX_SAD];
+};
+
int snd_pcm_hw_constraint_eld(struct snd_pcm_runtime *runtime, void *eld);
+int snd_parse_eld(struct device *dev, struct snd_parsed_hdmi_eld *e,
+ const unsigned char *buf, int size);
+void snd_show_eld(struct device *dev, struct snd_parsed_hdmi_eld *e);
+
+#ifdef CONFIG_SND_PROC_FS
+void snd_print_eld_info(struct snd_parsed_hdmi_eld *eld,
+ struct snd_info_buffer *buffer);
+#endif
+
#endif
diff --git a/include/sound/q6usboffload.h b/include/sound/q6usboffload.h
new file mode 100644
index 000000000000..f7e2449fe1b3
--- /dev/null
+++ b/include/sound/q6usboffload.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * sound/q6usboffload.h -- QDSP6 USB offload
+ *
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/**
+ * struct q6usb_offload - USB backend DAI link offload parameters
+ * @dev: dev handle to usb be
+ * @domain: allocated iommu domain
+ * @intr_num: usb interrupter number
+ * @sid: streamID for iommu
+ **/
+struct q6usb_offload {
+ struct device *dev;
+ struct iommu_domain *domain;
+ u16 intr_num;
+ u8 sid;
+};
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index f31cabf0158c..6916f7133597 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -89,6 +89,7 @@ struct snd_rawmidi_substream {
unsigned int framing; /* whether to frame input data */
unsigned int clock_type; /* clock source to use for input framing */
int use_count; /* use counter (for output) */
+ bool inactive; /* inactive substream (for UMP legacy) */
size_t bytes;
spinlock_t lock;
struct snd_rawmidi *rmidi;
@@ -118,6 +119,7 @@ struct snd_rawmidi {
struct list_head list;
unsigned int device; /* device number */
unsigned int info_flags; /* SNDRV_RAWMIDI_INFO_XXXX */
+ unsigned int tied_device;
char id[64];
char name[80];
@@ -189,4 +191,13 @@ long snd_rawmidi_kernel_read(struct snd_rawmidi_substream *substream,
long snd_rawmidi_kernel_write(struct snd_rawmidi_substream *substream,
const unsigned char *buf, long count);
+/* set up the tied devices */
+static inline void snd_rawmidi_tie_devices(struct snd_rawmidi *r1,
+ struct snd_rawmidi *r2)
+{
+ /* tied_device field keeps the device+1 (so that 0 being unknown) */
+ r1->tied_device = r2->device + 1;
+ r2->tied_device = r1->device + 1;
+}
+
#endif /* __SOUND_RAWMIDI_H */
diff --git a/include/sound/rt1318.h b/include/sound/rt1318.h
new file mode 100644
index 000000000000..fe6bff06036c
--- /dev/null
+++ b/include/sound/rt1318.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/sound/rt1318.h -- Platform data for RT1318
+ *
+ * Copyright 2024 Realtek Semiconductor Corp.
+ */
+
+#ifndef __LINUX_SND_RT1318_H
+#define __LINUX_SND_RT1318_H
+
+struct rt1318_platform_data {
+ unsigned int init_r0_l;
+ unsigned int init_r0_r;
+};
+
+#endif
diff --git a/include/sound/sdca.h b/include/sound/sdca.h
new file mode 100644
index 000000000000..67ff3c88705d
--- /dev/null
+++ b/include/sound/sdca.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * The MIPI SDCA specification is available for public downloads at
+ * https://www.mipi.org/mipi-sdca-v1-0-download
+ *
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#ifndef __SDCA_H__
+#define __SDCA_H__
+
+#include <linux/types.h>
+#include <linux/kconfig.h>
+
+struct acpi_table_swft;
+struct fwnode_handle;
+struct sdw_slave;
+struct sdca_dev;
+
+#define SDCA_MAX_FUNCTION_COUNT 8
+
+/**
+ * struct sdca_function_desc - short descriptor for an SDCA Function
+ * @node: firmware node for the Function.
+ * @func_dev: pointer to SDCA function device.
+ * @name: Human-readable string.
+ * @type: Function topology type.
+ * @adr: ACPI address (used for SDCA register access).
+ */
+struct sdca_function_desc {
+ struct fwnode_handle *node;
+ struct sdca_dev *func_dev;
+ const char *name;
+ u32 type;
+ u8 adr;
+};
+
+/**
+ * struct sdca_device_data - structure containing all SDCA related information
+ * @interface_revision: Value read from _DSD property, mainly to check
+ * for changes between silicon versions.
+ * @num_functions: Total number of supported SDCA functions. Invalid/unsupported
+ * functions will be skipped.
+ * @function: Array of function descriptors.
+ * @swft: Pointer to the SWFT table, if available.
+ */
+struct sdca_device_data {
+ u32 interface_revision;
+ int num_functions;
+ struct sdca_function_desc function[SDCA_MAX_FUNCTION_COUNT];
+ struct acpi_table_swft *swft;
+};
+
+enum sdca_quirk {
+ SDCA_QUIRKS_RT712_VB,
+ SDCA_QUIRKS_SKIP_FUNC_TYPE_PATCHING,
+};
+
+#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_SOC_SDCA)
+
+void sdca_lookup_functions(struct sdw_slave *slave);
+void sdca_lookup_swft(struct sdw_slave *slave);
+void sdca_lookup_interface_revision(struct sdw_slave *slave);
+bool sdca_device_quirk_match(struct sdw_slave *slave, enum sdca_quirk quirk);
+int sdca_dev_register_functions(struct sdw_slave *slave);
+void sdca_dev_unregister_functions(struct sdw_slave *slave);
+
+#else
+
+static inline void sdca_lookup_functions(struct sdw_slave *slave) {}
+static inline void sdca_lookup_swft(struct sdw_slave *slave) {}
+static inline void sdca_lookup_interface_revision(struct sdw_slave *slave) {}
+static inline bool sdca_device_quirk_match(struct sdw_slave *slave, enum sdca_quirk quirk)
+{
+ return false;
+}
+
+static inline int sdca_dev_register_functions(struct sdw_slave *slave)
+{
+ return 0;
+}
+
+static inline void sdca_dev_unregister_functions(struct sdw_slave *slave) {}
+
+#endif
+
+#endif
diff --git a/include/sound/sdca_asoc.h b/include/sound/sdca_asoc.h
new file mode 100644
index 000000000000..aa9124f93218
--- /dev/null
+++ b/include/sound/sdca_asoc.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The MIPI SDCA specification is available for public downloads at
+ * https://www.mipi.org/mipi-sdca-v1-0-download
+ *
+ * Copyright (C) 2025 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef __SDCA_ASOC_H__
+#define __SDCA_ASOC_H__
+
+struct device;
+struct regmap;
+struct sdca_function_data;
+struct snd_kcontrol_new;
+struct snd_pcm_hw_params;
+struct snd_pcm_substream;
+struct snd_soc_component_driver;
+struct snd_soc_dai;
+struct snd_soc_dai_driver;
+struct snd_soc_dai_ops;
+struct snd_soc_dapm_route;
+struct snd_soc_dapm_widget;
+
+int sdca_asoc_count_component(struct device *dev, struct sdca_function_data *function,
+ int *num_widgets, int *num_routes, int *num_controls,
+ int *num_dais);
+
+int sdca_asoc_populate_dapm(struct device *dev, struct sdca_function_data *function,
+ struct snd_soc_dapm_widget *widgets,
+ struct snd_soc_dapm_route *routes);
+int sdca_asoc_populate_controls(struct device *dev,
+ struct sdca_function_data *function,
+ struct snd_kcontrol_new *kctl);
+int sdca_asoc_populate_dais(struct device *dev, struct sdca_function_data *function,
+ struct snd_soc_dai_driver *dais,
+ const struct snd_soc_dai_ops *ops);
+
+int sdca_asoc_populate_component(struct device *dev,
+ struct sdca_function_data *function,
+ struct snd_soc_component_driver *component_drv,
+ struct snd_soc_dai_driver **dai_drv, int *num_dai_drv,
+ const struct snd_soc_dai_ops *ops);
+
+int sdca_asoc_set_constraints(struct device *dev, struct regmap *regmap,
+ struct sdca_function_data *function,
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+void sdca_asoc_free_constraints(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+int sdca_asoc_get_port(struct device *dev, struct regmap *regmap,
+ struct sdca_function_data *function,
+ struct snd_soc_dai *dai);
+int sdca_asoc_hw_params(struct device *dev, struct regmap *regmap,
+ struct sdca_function_data *function,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai);
+
+#endif // __SDCA_ASOC_H__
diff --git a/include/sound/sdca_fdl.h b/include/sound/sdca_fdl.h
new file mode 100644
index 000000000000..fbaf4b384c8a
--- /dev/null
+++ b/include/sound/sdca_fdl.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The MIPI SDCA specification is available for public downloads at
+ * https://www.mipi.org/mipi-sdca-v1-0-download
+ *
+ * Copyright (C) 2025 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef __SDCA_FDL_H__
+#define __SDCA_FDL_H__
+
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+
+struct device;
+struct regmap;
+struct sdca_fdl_set;
+struct sdca_function_data;
+struct sdca_interrupt;
+struct sdca_interrupt_info;
+
+/**
+ * struct fdl_state - FDL state structure to keep data between interrupts
+ * @begin: Completion indicating the start of an FDL download cycle.
+ * @done: Completion indicating the end of an FDL download cycle.
+ * @timeout: Delayed work used for timing out UMP transactions.
+ * @lock: Mutex to protect between the timeout work and IRQ handlers.
+ * @interrupt: Pointer to the interrupt struct to which this FDL is attached.
+ * @set: Pointer to the FDL set currently being downloaded.
+ * @file_index: Index of the current file being processed.
+ */
+struct fdl_state {
+ struct completion begin;
+ struct completion done;
+ struct delayed_work timeout;
+ struct mutex lock;
+
+ struct sdca_interrupt *interrupt;
+ struct sdca_fdl_set *set;
+ int file_index;
+};
+
+#define SDCA_CTL_XU_FDLH_COMPLETE 0
+#define SDCA_CTL_XU_FDLH_MORE_FILES SDCA_CTL_XU_FDLH_SET_IN_PROGRESS
+#define SDCA_CTL_XU_FDLH_FILE_AVAILABLE (SDCA_CTL_XU_FDLH_TRANSFERRED_FILE | \
+ SDCA_CTL_XU_FDLH_SET_IN_PROGRESS)
+#define SDCA_CTL_XU_FDLH_MASK (SDCA_CTL_XU_FDLH_TRANSFERRED_CHUNK | \
+ SDCA_CTL_XU_FDLH_TRANSFERRED_FILE | \
+ SDCA_CTL_XU_FDLH_SET_IN_PROGRESS | \
+ SDCA_CTL_XU_FDLH_RESET_ACK | \
+ SDCA_CTL_XU_FDLH_REQ_ABORT)
+
+#define SDCA_CTL_XU_FDLD_COMPLETE 0
+#define SDCA_CTL_XU_FDLD_FILE_OK (SDCA_CTL_XU_FDLH_TRANSFERRED_FILE | \
+ SDCA_CTL_XU_FDLH_SET_IN_PROGRESS | \
+ SDCA_CTL_XU_FDLD_ACK_TRANSFER | \
+ SDCA_CTL_XU_FDLD_NEEDS_SET)
+#define SDCA_CTL_XU_FDLD_MORE_FILES_OK (SDCA_CTL_XU_FDLH_SET_IN_PROGRESS | \
+ SDCA_CTL_XU_FDLD_ACK_TRANSFER | \
+ SDCA_CTL_XU_FDLD_NEEDS_SET)
+#define SDCA_CTL_XU_FDLD_MASK (SDCA_CTL_XU_FDLD_REQ_RESET | \
+ SDCA_CTL_XU_FDLD_REQ_ABORT | \
+ SDCA_CTL_XU_FDLD_ACK_TRANSFER | \
+ SDCA_CTL_XU_FDLD_NEEDS_SET)
+
+#if IS_ENABLED(CONFIG_SND_SOC_SDCA_FDL)
+
+int sdca_fdl_alloc_state(struct sdca_interrupt *interrupt);
+int sdca_fdl_process(struct sdca_interrupt *interrupt);
+int sdca_fdl_sync(struct device *dev, struct sdca_function_data *function,
+ struct sdca_interrupt_info *info);
+
+int sdca_reset_function(struct device *dev, struct sdca_function_data *function,
+ struct regmap *regmap);
+
+#else
+
+static inline int sdca_fdl_alloc_state(struct sdca_interrupt *interrupt)
+{
+ return 0;
+}
+
+static inline int sdca_fdl_process(struct sdca_interrupt *interrupt)
+{
+ return 0;
+}
+
+static inline int sdca_fdl_sync(struct device *dev,
+ struct sdca_function_data *function,
+ struct sdca_interrupt_info *info)
+{
+ return 0;
+}
+
+static inline int sdca_reset_function(struct device *dev,
+ struct sdca_function_data *function,
+ struct regmap *regmap)
+{
+ return 0;
+}
+
+#endif // CONFIG_SND_SOC_SDCA_FDL
+
+#endif // __SDCA_FDL_H__
diff --git a/include/sound/sdca_function.h b/include/sound/sdca_function.h
new file mode 100644
index 000000000000..6e9391b3816c
--- /dev/null
+++ b/include/sound/sdca_function.h
@@ -0,0 +1,1475 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * The MIPI SDCA specification is available for public downloads at
+ * https://www.mipi.org/mipi-sdca-v1-0-download
+ *
+ * Copyright(c) 2024 Intel Corporation
+ */
+
+#ifndef __SDCA_FUNCTION_H__
+#define __SDCA_FUNCTION_H__
+
+#include <linux/bits.h>
+#include <linux/types.h>
+#include <linux/hid.h>
+
+struct acpi_table_swft;
+struct device;
+struct sdca_entity;
+struct sdca_function_desc;
+
+#define SDCA_NO_INTERRUPT -1
+
+/*
+ * The addressing space for SDCA relies on 7 bits for Entities, so a
+ * maximum of 128 Entities per function can be represented.
+ */
+#define SDCA_MAX_ENTITY_COUNT 128
+
+/*
+ * Sanity check on number of initialization writes, can be expanded if needed.
+ */
+#define SDCA_MAX_INIT_COUNT 2048
+
+/*
+ * The Cluster IDs are 16-bit, so a maximum of 65535 Clusters per
+ * function can be represented, however limit this to a slightly
+ * more reasonable value. Can be expanded if needed.
+ */
+#define SDCA_MAX_CLUSTER_COUNT 256
+
+/*
+ * Sanity check on number of channels per Cluster, can be expanded if needed.
+ */
+#define SDCA_MAX_CHANNEL_COUNT 32
+
+/*
+ * Sanity check on number of PDE delays, can be expanded if needed.
+ */
+#define SDCA_MAX_DELAY_COUNT 256
+
+/*
+ * Sanity check on size of affected controls data, can be expanded if needed.
+ */
+#define SDCA_MAX_AFFECTED_COUNT 2048
+
+/**
+ * enum sdca_function_type - SDCA Function Type codes
+ * @SDCA_FUNCTION_TYPE_SMART_AMP: Amplifier with protection features.
+ * @SDCA_FUNCTION_TYPE_SIMPLE_AMP: Subset of SmartAmp.
+ * @SDCA_FUNCTION_TYPE_SMART_MIC: Smart microphone with acoustic triggers.
+ * @SDCA_FUNCTION_TYPE_SIMPLE_MIC: Subset of SmartMic.
+ * @SDCA_FUNCTION_TYPE_SPEAKER_MIC: Combination of SmartMic and SmartAmp.
+ * @SDCA_FUNCTION_TYPE_UAJ: 3.5mm Universal Audio jack.
+ * @SDCA_FUNCTION_TYPE_RJ: Retaskable jack.
+ * @SDCA_FUNCTION_TYPE_SIMPLE_JACK: Subset of UAJ.
+ * @SDCA_FUNCTION_TYPE_HID: Human Interface Device, for e.g. buttons.
+ * @SDCA_FUNCTION_TYPE_COMPANION_AMP: Sources audio from another amp.
+ * @SDCA_FUNCTION_TYPE_IMP_DEF: Implementation-defined function.
+ *
+ * SDCA Function Types from SDCA specification v1.0a Section 5.1.2
+ * all Function types not described are reserved.
+ *
+ * Note that SIMPLE_AMP, SIMPLE_MIC and SIMPLE_JACK Function Types
+ * are NOT defined in SDCA 1.0a, but they were defined in earlier
+ * drafts and are planned for 1.1.
+ */
+enum sdca_function_type {
+ SDCA_FUNCTION_TYPE_SMART_AMP = 0x01,
+ SDCA_FUNCTION_TYPE_SIMPLE_AMP = 0x02,
+ SDCA_FUNCTION_TYPE_SMART_MIC = 0x03,
+ SDCA_FUNCTION_TYPE_SIMPLE_MIC = 0x04,
+ SDCA_FUNCTION_TYPE_SPEAKER_MIC = 0x05,
+ SDCA_FUNCTION_TYPE_UAJ = 0x06,
+ SDCA_FUNCTION_TYPE_RJ = 0x07,
+ SDCA_FUNCTION_TYPE_SIMPLE_JACK = 0x08,
+ SDCA_FUNCTION_TYPE_HID = 0x0A,
+ SDCA_FUNCTION_TYPE_COMPANION_AMP = 0x0B,
+ SDCA_FUNCTION_TYPE_IMP_DEF = 0x1F,
+};
+
+/* Human-readable names used for kernel logs and Function device registration/bind */
+#define SDCA_FUNCTION_TYPE_SMART_AMP_NAME "SmartAmp"
+#define SDCA_FUNCTION_TYPE_SIMPLE_AMP_NAME "SimpleAmp"
+#define SDCA_FUNCTION_TYPE_SMART_MIC_NAME "SmartMic"
+#define SDCA_FUNCTION_TYPE_SIMPLE_MIC_NAME "SimpleMic"
+#define SDCA_FUNCTION_TYPE_SPEAKER_MIC_NAME "SpeakerMic"
+#define SDCA_FUNCTION_TYPE_UAJ_NAME "UAJ"
+#define SDCA_FUNCTION_TYPE_RJ_NAME "RJ"
+#define SDCA_FUNCTION_TYPE_SIMPLE_NAME "SimpleJack"
+#define SDCA_FUNCTION_TYPE_HID_NAME "HID"
+#define SDCA_FUNCTION_TYPE_COMPANION_AMP_NAME "CompanionAmp"
+#define SDCA_FUNCTION_TYPE_IMP_DEF_NAME "ImplementationDefined"
+
+/**
+ * struct sdca_init_write - a single initialization write
+ * @addr: Register address to be written
+ * @val: Single byte value to be written
+ */
+struct sdca_init_write {
+ u32 addr;
+ u8 val;
+};
+
+/**
+ * define SDCA_CTL_TYPE - create a unique identifier for an SDCA Control
+ * @ent: Entity Type code.
+ * @sel: Control Selector code.
+ *
+ * Sometimes there is a need to identify a type of Control, for example to
+ * determine what name the control should have. SDCA Selectors are reused
+ * across Entity types, as such it is necessary to combine both the Entity
+ * Type and the Control Selector to obtain a unique identifier.
+ */
+#define SDCA_CTL_TYPE(ent, sel) ((ent) << 8 | (sel))
+
+/**
+ * define SDCA_CTL_TYPE_S - static version of SDCA_CTL_TYPE
+ * @ent: Entity name, for example IT, MFPU, etc. this string can be read
+ * from the last characters of the SDCA_ENTITY_TYPE_* macros.
+ * @sel: Control Selector name, for example MIC_BIAS, MUTE, etc. this
+ * string can be read from the last characters of the SDCA_CTL_*_*
+ * macros.
+ *
+ * Short hand to specific a Control type statically for example:
+ * SDCA_CTL_TYPE_S(IT, MIC_BIAS).
+ */
+#define SDCA_CTL_TYPE_S(ent, sel) SDCA_CTL_TYPE(SDCA_ENTITY_TYPE_##ent, \
+ SDCA_CTL_##ent##_##sel)
+
+/**
+ * enum sdca_messageoffset_range - Column definitions UMP MessageOffset
+ */
+enum sdca_messageoffset_range {
+ SDCA_MESSAGEOFFSET_BUFFER_START_ADDRESS = 0,
+ SDCA_MESSAGEOFFSET_BUFFER_LENGTH = 1,
+ SDCA_MESSAGEOFFSET_UMP_MODE = 2,
+ SDCA_MESSAGEOFFSET_NCOLS = 3,
+};
+
+/**
+ * enum sdca_ump_mode - SDCA UMP Mode
+ */
+enum sdca_ump_mode {
+ SDCA_UMP_MODE_DIRECT = 0x00,
+ SDCA_UMP_MODE_INDIRECT = 0x01,
+};
+
+/**
+ * enum sdca_ump_owner - SDCA UMP Owner
+ */
+enum sdca_ump_owner {
+ SDCA_UMP_OWNER_HOST = 0x00,
+ SDCA_UMP_OWNER_DEVICE = 0x01,
+};
+
+/**
+ * enum sdca_it_controls - SDCA Controls for Input Terminal
+ *
+ * Control Selectors for Input Terminal from SDCA specification v1.0
+ * section 6.2.1.3.
+ */
+enum sdca_it_controls {
+ SDCA_CTL_IT_MIC_BIAS = 0x03,
+ SDCA_CTL_IT_USAGE = 0x04,
+ SDCA_CTL_IT_LATENCY = 0x08,
+ SDCA_CTL_IT_CLUSTERINDEX = 0x10,
+ SDCA_CTL_IT_DATAPORT_SELECTOR = 0x11,
+ SDCA_CTL_IT_MATCHING_GUID = 0x12,
+ SDCA_CTL_IT_KEEP_ALIVE = 0x13,
+ SDCA_CTL_IT_NDAI_STREAM = 0x14,
+ SDCA_CTL_IT_NDAI_CATEGORY = 0x15,
+ SDCA_CTL_IT_NDAI_CODINGTYPE = 0x16,
+ SDCA_CTL_IT_NDAI_PACKETTYPE = 0x17,
+};
+
+/**
+ * enum sdca_ot_controls - SDCA Controls for Output Terminal
+ *
+ * Control Selectors for Output Terminal from SDCA specification v1.0
+ * section 6.2.2.3.
+ */
+enum sdca_ot_controls {
+ SDCA_CTL_OT_USAGE = 0x04,
+ SDCA_CTL_OT_LATENCY = 0x08,
+ SDCA_CTL_OT_DATAPORT_SELECTOR = 0x11,
+ SDCA_CTL_OT_MATCHING_GUID = 0x12,
+ SDCA_CTL_OT_KEEP_ALIVE = 0x13,
+ SDCA_CTL_OT_NDAI_STREAM = 0x14,
+ SDCA_CTL_OT_NDAI_CATEGORY = 0x15,
+ SDCA_CTL_OT_NDAI_CODINGTYPE = 0x16,
+ SDCA_CTL_OT_NDAI_PACKETTYPE = 0x17,
+};
+
+/**
+ * enum sdca_usage_range - Column definitions for Usage
+ */
+enum sdca_usage_range {
+ SDCA_USAGE_NUMBER = 0,
+ SDCA_USAGE_CBN = 1,
+ SDCA_USAGE_SAMPLE_RATE = 2,
+ SDCA_USAGE_SAMPLE_WIDTH = 3,
+ SDCA_USAGE_FULL_SCALE = 4,
+ SDCA_USAGE_NOISE_FLOOR = 5,
+ SDCA_USAGE_TAG = 6,
+ SDCA_USAGE_NCOLS = 7,
+};
+
+/**
+ * enum sdca_dataport_selector_range - Column definitions for DataPort_Selector
+ */
+enum sdca_dataport_selector_range {
+ SDCA_DATAPORT_SELECTOR_NCOLS = 16,
+ SDCA_DATAPORT_SELECTOR_NROWS = 4,
+};
+
+/**
+ * enum sdca_mu_controls - SDCA Controls for Mixer Unit
+ *
+ * Control Selectors for Mixer Unit from SDCA specification v1.0
+ * section 6.3.4.2.
+ */
+enum sdca_mu_controls {
+ SDCA_CTL_MU_MIXER = 0x01,
+ SDCA_CTL_MU_LATENCY = 0x06,
+};
+
+/**
+ * enum sdca_su_controls - SDCA Controls for Selector Unit
+ *
+ * Control Selectors for Selector Unit from SDCA specification v1.0
+ * section 6.3.8.3.
+ */
+enum sdca_su_controls {
+ SDCA_CTL_SU_SELECTOR = 0x01,
+ SDCA_CTL_SU_LATENCY = 0x02,
+};
+
+/**
+ * enum sdca_fu_controls - SDCA Controls for Feature Unit
+ *
+ * Control Selectors for Feature Unit from SDCA specification v1.0
+ * section 6.3.2.3.
+ */
+enum sdca_fu_controls {
+ SDCA_CTL_FU_MUTE = 0x01,
+ SDCA_CTL_FU_CHANNEL_VOLUME = 0x02,
+ SDCA_CTL_FU_AGC = 0x07,
+ SDCA_CTL_FU_BASS_BOOST = 0x09,
+ SDCA_CTL_FU_LOUDNESS = 0x0A,
+ SDCA_CTL_FU_GAIN = 0x0B,
+ SDCA_CTL_FU_LATENCY = 0x10,
+};
+
+/**
+ * enum sdca_volume_range - Column definitions for Q7.8dB volumes/gains
+ */
+enum sdca_volume_range {
+ SDCA_VOLUME_LINEAR_MIN = 0,
+ SDCA_VOLUME_LINEAR_MAX = 1,
+ SDCA_VOLUME_LINEAR_STEP = 2,
+ SDCA_VOLUME_LINEAR_NCOLS = 3,
+};
+
+/**
+ * enum sdca_xu_controls - SDCA Controls for Extension Unit
+ *
+ * Control Selectors for Extension Unit from SDCA specification v1.0
+ * section 6.3.10.3.
+ */
+enum sdca_xu_controls {
+ SDCA_CTL_XU_BYPASS = 0x01,
+ SDCA_CTL_XU_LATENCY = 0x06,
+ SDCA_CTL_XU_XU_ID = 0x07,
+ SDCA_CTL_XU_XU_VERSION = 0x08,
+ SDCA_CTL_XU_FDL_CURRENTOWNER = 0x10,
+ SDCA_CTL_XU_FDL_MESSAGEOFFSET = 0x12,
+ SDCA_CTL_XU_FDL_MESSAGELENGTH = 0x13,
+ SDCA_CTL_XU_FDL_STATUS = 0x14,
+ SDCA_CTL_XU_FDL_SET_INDEX = 0x15,
+ SDCA_CTL_XU_FDL_HOST_REQUEST = 0x16,
+
+ /* FDL Status Host->Device bit definitions */
+ SDCA_CTL_XU_FDLH_TRANSFERRED_CHUNK = BIT(0),
+ SDCA_CTL_XU_FDLH_TRANSFERRED_FILE = BIT(1),
+ SDCA_CTL_XU_FDLH_SET_IN_PROGRESS = BIT(2),
+ SDCA_CTL_XU_FDLH_RESET_ACK = BIT(4),
+ SDCA_CTL_XU_FDLH_REQ_ABORT = BIT(5),
+ /* FDL Status Device->Host bit definitions */
+ SDCA_CTL_XU_FDLD_REQ_RESET = BIT(4),
+ SDCA_CTL_XU_FDLD_REQ_ABORT = BIT(5),
+ SDCA_CTL_XU_FDLD_ACK_TRANSFER = BIT(6),
+ SDCA_CTL_XU_FDLD_NEEDS_SET = BIT(7),
+};
+
+/**
+ * enum sdca_set_index_range - Column definitions UMP SetIndex
+ */
+enum sdca_fdl_set_index_range {
+ SDCA_FDL_SET_INDEX_SET_NUMBER = 0,
+ SDCA_FDL_SET_INDEX_FILE_SET_ID = 1,
+ SDCA_FDL_SET_INDEX_NCOLS = 2,
+};
+
+/**
+ * enum sdca_cs_controls - SDCA Controls for Clock Source
+ *
+ * Control Selectors for Clock Source from SDCA specification v1.0
+ * section 6.4.1.3.
+ */
+enum sdca_cs_controls {
+ SDCA_CTL_CS_CLOCK_VALID = 0x02,
+ SDCA_CTL_CS_SAMPLERATEINDEX = 0x10,
+};
+
+/**
+ * enum sdca_samplerateindex_range - Column definitions for SampleRateIndex
+ */
+enum sdca_samplerateindex_range {
+ SDCA_SAMPLERATEINDEX_INDEX = 0,
+ SDCA_SAMPLERATEINDEX_RATE = 1,
+ SDCA_SAMPLERATEINDEX_NCOLS = 2,
+};
+
+/**
+ * enum sdca_cx_controls - SDCA Controls for Clock Selector
+ *
+ * Control Selectors for Clock Selector from SDCA specification v1.0
+ * section 6.4.2.3.
+ */
+enum sdca_cx_controls {
+ SDCA_CTL_CX_CLOCK_SELECT = 0x01,
+};
+
+/**
+ * enum sdca_pde_controls - SDCA Controls for Power Domain Entity
+ *
+ * Control Selectors for Power Domain Entity from SDCA specification
+ * v1.0 section 6.5.2.2.
+ */
+enum sdca_pde_controls {
+ SDCA_CTL_PDE_REQUESTED_PS = 0x01,
+ SDCA_CTL_PDE_ACTUAL_PS = 0x10,
+};
+
+/**
+ * enum sdca_requested_ps_range - Column definitions for Requested PS
+ */
+enum sdca_requested_ps_range {
+ SDCA_REQUESTED_PS_STATE = 0,
+ SDCA_REQUESTED_PS_NCOLS = 1,
+};
+
+/**
+ * enum sdca_ge_controls - SDCA Controls for Group Unit
+ *
+ * Control Selectors for Group Unit from SDCA specification v1.0
+ * section 6.5.1.4.
+ */
+enum sdca_ge_controls {
+ SDCA_CTL_GE_SELECTED_MODE = 0x01,
+ SDCA_CTL_GE_DETECTED_MODE = 0x02,
+};
+
+/**
+ * enum sdca_selected_mode_range - Column definitions for Selected Mode
+ */
+enum sdca_selected_mode_range {
+ SDCA_SELECTED_MODE_INDEX = 0,
+ SDCA_SELECTED_MODE_TERM_TYPE = 1,
+ SDCA_SELECTED_MODE_NCOLS = 2,
+};
+
+/**
+ * enum sdca_detected_mode_values - Predefined GE Detected Mode values
+ */
+enum sdca_detected_mode_values {
+ SDCA_DETECTED_MODE_JACK_UNPLUGGED = 0,
+ SDCA_DETECTED_MODE_JACK_UNKNOWN = 1,
+ SDCA_DETECTED_MODE_DETECTION_IN_PROGRESS = 2,
+};
+
+/**
+ * enum sdca_spe_controls - SDCA Controls for Security & Privacy Unit
+ *
+ * Control Selectors for Security & Privacy Unit from SDCA
+ * specification v1.0 Section 6.5.3.2.
+ */
+enum sdca_spe_controls {
+ SDCA_CTL_SPE_PRIVATE = 0x01,
+ SDCA_CTL_SPE_PRIVACY_POLICY = 0x02,
+ SDCA_CTL_SPE_PRIVACY_LOCKSTATE = 0x03,
+ SDCA_CTL_SPE_PRIVACY_OWNER = 0x04,
+ SDCA_CTL_SPE_AUTHTX_CURRENTOWNER = 0x10,
+ SDCA_CTL_SPE_AUTHTX_MESSAGEOFFSET = 0x12,
+ SDCA_CTL_SPE_AUTHTX_MESSAGELENGTH = 0x13,
+ SDCA_CTL_SPE_AUTHRX_CURRENTOWNER = 0x14,
+ SDCA_CTL_SPE_AUTHRX_MESSAGEOFFSET = 0x16,
+ SDCA_CTL_SPE_AUTHRX_MESSAGELENGTH = 0x17,
+};
+
+/**
+ * enum sdca_cru_controls - SDCA Controls for Channel Remapping Unit
+ *
+ * Control Selectors for Channel Remapping Unit from SDCA
+ * specification v1.0 Section 6.3.1.3.
+ */
+enum sdca_cru_controls {
+ SDCA_CTL_CRU_LATENCY = 0x06,
+ SDCA_CTL_CRU_CLUSTERINDEX = 0x10,
+};
+
+/**
+ * enum sdca_udmpu_controls - SDCA Controls for Up-Down Mixer Processing Unit
+ *
+ * Control Selectors for Up-Down Mixer Processing Unit from SDCA
+ * specification v1.0 Section 6.3.9.3.
+ */
+enum sdca_udmpu_controls {
+ SDCA_CTL_UDMPU_LATENCY = 0x06,
+ SDCA_CTL_UDMPU_CLUSTERINDEX = 0x10,
+ SDCA_CTL_UDMPU_ACOUSTIC_ENERGY_LEVEL_MONITOR = 0x11,
+ SDCA_CTL_UDMPU_ULTRASOUND_LOOP_GAIN = 0x12,
+ SDCA_CTL_UDMPU_OPAQUESET_0 = 0x18,
+ SDCA_CTL_UDMPU_OPAQUESET_1 = 0x19,
+ SDCA_CTL_UDMPU_OPAQUESET_2 = 0x1A,
+ SDCA_CTL_UDMPU_OPAQUESET_3 = 0x1B,
+ SDCA_CTL_UDMPU_OPAQUESET_4 = 0x1C,
+ SDCA_CTL_UDMPU_OPAQUESET_5 = 0x1D,
+ SDCA_CTL_UDMPU_OPAQUESET_6 = 0x1E,
+ SDCA_CTL_UDMPU_OPAQUESET_7 = 0x1F,
+ SDCA_CTL_UDMPU_OPAQUESET_8 = 0x20,
+ SDCA_CTL_UDMPU_OPAQUESET_9 = 0x21,
+ SDCA_CTL_UDMPU_OPAQUESET_10 = 0x22,
+ SDCA_CTL_UDMPU_OPAQUESET_11 = 0x23,
+ SDCA_CTL_UDMPU_OPAQUESET_12 = 0x24,
+ SDCA_CTL_UDMPU_OPAQUESET_13 = 0x25,
+ SDCA_CTL_UDMPU_OPAQUESET_14 = 0x26,
+ SDCA_CTL_UDMPU_OPAQUESET_15 = 0x27,
+ SDCA_CTL_UDMPU_OPAQUESET_16 = 0x28,
+ SDCA_CTL_UDMPU_OPAQUESET_17 = 0x29,
+ SDCA_CTL_UDMPU_OPAQUESET_18 = 0x2A,
+ SDCA_CTL_UDMPU_OPAQUESET_19 = 0x2B,
+ SDCA_CTL_UDMPU_OPAQUESET_20 = 0x2C,
+ SDCA_CTL_UDMPU_OPAQUESET_21 = 0x2D,
+ SDCA_CTL_UDMPU_OPAQUESET_22 = 0x2E,
+ SDCA_CTL_UDMPU_OPAQUESET_23 = 0x2F,
+};
+
+/**
+ * enum sdca_mfpu_controls - SDCA Controls for Multi-Function Processing Unit
+ *
+ * Control Selectors for Multi-Function Processing Unit from SDCA
+ * specification v1.0 Section 6.3.3.4.
+ */
+enum sdca_mfpu_controls {
+ SDCA_CTL_MFPU_BYPASS = 0x01,
+ SDCA_CTL_MFPU_ALGORITHM_READY = 0x04,
+ SDCA_CTL_MFPU_ALGORITHM_ENABLE = 0x05,
+ SDCA_CTL_MFPU_LATENCY = 0x08,
+ SDCA_CTL_MFPU_ALGORITHM_PREPARE = 0x09,
+ SDCA_CTL_MFPU_CLUSTERINDEX = 0x10,
+ SDCA_CTL_MFPU_CENTER_FREQUENCY_INDEX = 0x11,
+ SDCA_CTL_MFPU_ULTRASOUND_LEVEL = 0x12,
+ SDCA_CTL_MFPU_AE_NUMBER = 0x13,
+ SDCA_CTL_MFPU_AE_CURRENTOWNER = 0x14,
+ SDCA_CTL_MFPU_AE_MESSAGEOFFSET = 0x16,
+ SDCA_CTL_MFPU_AE_MESSAGELENGTH = 0x17,
+};
+
+/**
+ * enum sdca_smpu_controls - SDCA Controls for Smart Mic Processing Unit
+ *
+ * Control Selectors for Smart Mic Processing Unit from SDCA
+ * specification v1.0 Section 6.3.7.3.
+ */
+enum sdca_smpu_controls {
+ SDCA_CTL_SMPU_LATENCY = 0x06,
+ SDCA_CTL_SMPU_TRIGGER_ENABLE = 0x10,
+ SDCA_CTL_SMPU_TRIGGER_STATUS = 0x11,
+ SDCA_CTL_SMPU_HIST_BUFFER_MODE = 0x12,
+ SDCA_CTL_SMPU_HIST_BUFFER_PREAMBLE = 0x13,
+ SDCA_CTL_SMPU_HIST_ERROR = 0x14,
+ SDCA_CTL_SMPU_TRIGGER_EXTENSION = 0x15,
+ SDCA_CTL_SMPU_TRIGGER_READY = 0x16,
+ SDCA_CTL_SMPU_HIST_CURRENTOWNER = 0x18,
+ SDCA_CTL_SMPU_HIST_MESSAGEOFFSET = 0x1A,
+ SDCA_CTL_SMPU_HIST_MESSAGELENGTH = 0x1B,
+ SDCA_CTL_SMPU_DTODTX_CURRENTOWNER = 0x1C,
+ SDCA_CTL_SMPU_DTODTX_MESSAGEOFFSET = 0x1E,
+ SDCA_CTL_SMPU_DTODTX_MESSAGELENGTH = 0x1F,
+ SDCA_CTL_SMPU_DTODRX_CURRENTOWNER = 0x20,
+ SDCA_CTL_SMPU_DTODRX_MESSAGEOFFSET = 0x22,
+ SDCA_CTL_SMPU_DTODRX_MESSAGELENGTH = 0x23,
+};
+
+/**
+ * enum sdca_sapu_controls - SDCA Controls for Smart Amp Processing Unit
+ *
+ * Control Selectors for Smart Amp Processing Unit from SDCA
+ * specification v1.0 Section 6.3.6.3.
+ */
+enum sdca_sapu_controls {
+ SDCA_CTL_SAPU_LATENCY = 0x05,
+ SDCA_CTL_SAPU_PROTECTION_MODE = 0x10,
+ SDCA_CTL_SAPU_PROTECTION_STATUS = 0x11,
+ SDCA_CTL_SAPU_OPAQUESETREQ_INDEX = 0x12,
+ SDCA_CTL_SAPU_DTODTX_CURRENTOWNER = 0x14,
+ SDCA_CTL_SAPU_DTODTX_MESSAGEOFFSET = 0x16,
+ SDCA_CTL_SAPU_DTODTX_MESSAGELENGTH = 0x17,
+ SDCA_CTL_SAPU_DTODRX_CURRENTOWNER = 0x18,
+ SDCA_CTL_SAPU_DTODRX_MESSAGEOFFSET = 0x1A,
+ SDCA_CTL_SAPU_DTODRX_MESSAGELENGTH = 0x1B,
+};
+
+/**
+ * enum sdca_ppu_controls - SDCA Controls for Post Processing Unit
+ *
+ * Control Selectors for Post Processing Unit from SDCA specification
+ * v1.0 Section 6.3.5.3.
+ */
+enum sdca_ppu_controls {
+ SDCA_CTL_PPU_LATENCY = 0x06,
+ SDCA_CTL_PPU_POSTURENUMBER = 0x10,
+ SDCA_CTL_PPU_POSTUREEXTENSION = 0x11,
+ SDCA_CTL_PPU_HORIZONTALBALANCE = 0x12,
+ SDCA_CTL_PPU_VERTICALBALANCE = 0x13,
+};
+
+/**
+ * enum sdca_tg_controls - SDCA Controls for Tone Generator Entity
+ *
+ * Control Selectors for Tone Generator from SDCA specification v1.0
+ * Section 6.5.4.4.
+ */
+enum sdca_tg_controls {
+ SDCA_CTL_TG_TONE_DIVIDER = 0x10,
+};
+
+/**
+ * enum sdca_hide_controls - SDCA Controls for HIDE Entity
+ *
+ * Control Selectors for HIDE from SDCA specification v1.0 Section
+ * 6.6.1.2.
+ */
+enum sdca_hide_controls {
+ SDCA_CTL_HIDE_HIDTX_CURRENTOWNER = 0x10,
+ SDCA_CTL_HIDE_HIDTX_MESSAGEOFFSET = 0x12,
+ SDCA_CTL_HIDE_HIDTX_MESSAGELENGTH = 0x13,
+ SDCA_CTL_HIDE_HIDRX_CURRENTOWNER = 0x14,
+ SDCA_CTL_HIDE_HIDRX_MESSAGEOFFSET = 0x16,
+ SDCA_CTL_HIDE_HIDRX_MESSAGELENGTH = 0x17,
+};
+
+/**
+ * enum sdca_entity0_controls - SDCA Controls for Entity 0
+ *
+ * Control Selectors for Entity 0 from SDCA specification v1.0 Section
+ * 6.7.1.1.
+ */
+enum sdca_entity0_controls {
+ SDCA_CTL_ENTITY_0_COMMIT_GROUP_MASK = 0x01,
+ SDCA_CTL_ENTITY_0_FUNCTION_SDCA_VERSION = 0x04,
+ SDCA_CTL_ENTITY_0_FUNCTION_TYPE = 0x05,
+ SDCA_CTL_ENTITY_0_FUNCTION_MANUFACTURER_ID = 0x06,
+ SDCA_CTL_ENTITY_0_FUNCTION_ID = 0x07,
+ SDCA_CTL_ENTITY_0_FUNCTION_VERSION = 0x08,
+ SDCA_CTL_ENTITY_0_FUNCTION_EXTENSION_ID = 0x09,
+ SDCA_CTL_ENTITY_0_FUNCTION_EXTENSION_VERSION = 0x0A,
+ SDCA_CTL_ENTITY_0_FUNCTION_STATUS = 0x10,
+ SDCA_CTL_ENTITY_0_FUNCTION_ACTION = 0x11,
+ SDCA_CTL_ENTITY_0_MATCHING_GUID = 0x12,
+ SDCA_CTL_ENTITY_0_DEVICE_MANUFACTURER_ID = 0x2C,
+ SDCA_CTL_ENTITY_0_DEVICE_PART_ID = 0x2D,
+ SDCA_CTL_ENTITY_0_DEVICE_VERSION = 0x2E,
+ SDCA_CTL_ENTITY_0_DEVICE_SDCA_VERSION = 0x2F,
+
+ /* Function Status Bits */
+ SDCA_CTL_ENTITY_0_DEVICE_NEWLY_ATTACHED = BIT(0),
+ SDCA_CTL_ENTITY_0_INTS_DISABLED_ABNORMALLY = BIT(1),
+ SDCA_CTL_ENTITY_0_STREAMING_STOPPED_ABNORMALLY = BIT(2),
+ SDCA_CTL_ENTITY_0_FUNCTION_FAULT = BIT(3),
+ SDCA_CTL_ENTITY_0_UMP_SEQUENCE_FAULT = BIT(4),
+ SDCA_CTL_ENTITY_0_FUNCTION_NEEDS_INITIALIZATION = BIT(5),
+ SDCA_CTL_ENTITY_0_FUNCTION_HAS_BEEN_RESET = BIT(6),
+ SDCA_CTL_ENTITY_0_FUNCTION_BUSY = BIT(7),
+
+ /* Function Action Bits */
+ SDCA_CTL_ENTITY_0_RESET_FUNCTION_NOW = BIT(0),
+};
+
+#define SDCA_CTL_MIC_BIAS_NAME "Mic Bias"
+#define SDCA_CTL_USAGE_NAME "Usage"
+#define SDCA_CTL_LATENCY_NAME "Latency"
+#define SDCA_CTL_CLUSTERINDEX_NAME "Cluster Index"
+#define SDCA_CTL_DATAPORT_SELECTOR_NAME "Dataport Selector"
+#define SDCA_CTL_MATCHING_GUID_NAME "Matching GUID"
+#define SDCA_CTL_KEEP_ALIVE_NAME "Keep Alive"
+#define SDCA_CTL_NDAI_STREAM_NAME "NDAI Stream"
+#define SDCA_CTL_NDAI_CATEGORY_NAME "NDAI Category"
+#define SDCA_CTL_NDAI_CODINGTYPE_NAME "NDAI Coding Type"
+#define SDCA_CTL_NDAI_PACKETTYPE_NAME "NDAI Packet Type"
+#define SDCA_CTL_MIXER_NAME "Mixer"
+#define SDCA_CTL_SELECTOR_NAME "Selector"
+#define SDCA_CTL_MUTE_NAME "Channel"
+#define SDCA_CTL_CHANNEL_VOLUME_NAME "Channel Volume"
+#define SDCA_CTL_AGC_NAME "AGC"
+#define SDCA_CTL_BASS_BOOST_NAME "Bass Boost"
+#define SDCA_CTL_LOUDNESS_NAME "Loudness"
+#define SDCA_CTL_GAIN_NAME "Gain"
+#define SDCA_CTL_BYPASS_NAME "Bypass"
+#define SDCA_CTL_XU_ID_NAME "XU ID"
+#define SDCA_CTL_XU_VERSION_NAME "XU Version"
+#define SDCA_CTL_FDL_CURRENTOWNER_NAME "FDL Current Owner"
+#define SDCA_CTL_FDL_MESSAGEOFFSET_NAME "FDL Message Offset"
+#define SDCA_CTL_FDL_MESSAGELENGTH_NAME "FDL Message Length"
+#define SDCA_CTL_FDL_STATUS_NAME "FDL Status"
+#define SDCA_CTL_FDL_SET_INDEX_NAME "FDL Set Index"
+#define SDCA_CTL_FDL_HOST_REQUEST_NAME "FDL Host Request"
+#define SDCA_CTL_CLOCK_VALID_NAME "Clock Valid"
+#define SDCA_CTL_SAMPLERATEINDEX_NAME "Sample Rate Index"
+#define SDCA_CTL_CLOCK_SELECT_NAME "Clock Select"
+#define SDCA_CTL_REQUESTED_PS_NAME "Requested PS"
+#define SDCA_CTL_ACTUAL_PS_NAME "Actual PS"
+#define SDCA_CTL_SELECTED_MODE_NAME "Selected Mode"
+#define SDCA_CTL_DETECTED_MODE_NAME "Detected Mode"
+#define SDCA_CTL_PRIVATE_NAME "Private"
+#define SDCA_CTL_PRIVACY_POLICY_NAME "Privacy Policy"
+#define SDCA_CTL_PRIVACY_LOCKSTATE_NAME "Privacy Lockstate"
+#define SDCA_CTL_PRIVACY_OWNER_NAME "Privacy Owner"
+#define SDCA_CTL_AUTHTX_CURRENTOWNER_NAME "AuthTX Current Owner"
+#define SDCA_CTL_AUTHTX_MESSAGEOFFSET_NAME "AuthTX Message Offset"
+#define SDCA_CTL_AUTHTX_MESSAGELENGTH_NAME "AuthTX Message Length"
+#define SDCA_CTL_AUTHRX_CURRENTOWNER_NAME "AuthRX Current Owner"
+#define SDCA_CTL_AUTHRX_MESSAGEOFFSET_NAME "AuthRX Message Offset"
+#define SDCA_CTL_AUTHRX_MESSAGELENGTH_NAME "AuthRX Message Length"
+#define SDCA_CTL_ACOUSTIC_ENERGY_LEVEL_MONITOR_NAME "Acoustic Energy Level Monitor"
+#define SDCA_CTL_ULTRASOUND_LOOP_GAIN_NAME "Ultrasound Loop Gain"
+#define SDCA_CTL_OPAQUESET_0_NAME "Opaqueset 0"
+#define SDCA_CTL_OPAQUESET_1_NAME "Opaqueset 1"
+#define SDCA_CTL_OPAQUESET_2_NAME "Opaqueset 2"
+#define SDCA_CTL_OPAQUESET_3_NAME "Opaqueset 3"
+#define SDCA_CTL_OPAQUESET_4_NAME "Opaqueset 4"
+#define SDCA_CTL_OPAQUESET_5_NAME "Opaqueset 5"
+#define SDCA_CTL_OPAQUESET_6_NAME "Opaqueset 6"
+#define SDCA_CTL_OPAQUESET_7_NAME "Opaqueset 7"
+#define SDCA_CTL_OPAQUESET_8_NAME "Opaqueset 8"
+#define SDCA_CTL_OPAQUESET_9_NAME "Opaqueset 9"
+#define SDCA_CTL_OPAQUESET_10_NAME "Opaqueset 10"
+#define SDCA_CTL_OPAQUESET_11_NAME "Opaqueset 11"
+#define SDCA_CTL_OPAQUESET_12_NAME "Opaqueset 12"
+#define SDCA_CTL_OPAQUESET_13_NAME "Opaqueset 13"
+#define SDCA_CTL_OPAQUESET_14_NAME "Opaqueset 14"
+#define SDCA_CTL_OPAQUESET_15_NAME "Opaqueset 15"
+#define SDCA_CTL_OPAQUESET_16_NAME "Opaqueset 16"
+#define SDCA_CTL_OPAQUESET_17_NAME "Opaqueset 17"
+#define SDCA_CTL_OPAQUESET_18_NAME "Opaqueset 18"
+#define SDCA_CTL_OPAQUESET_19_NAME "Opaqueset 19"
+#define SDCA_CTL_OPAQUESET_20_NAME "Opaqueset 20"
+#define SDCA_CTL_OPAQUESET_21_NAME "Opaqueset 21"
+#define SDCA_CTL_OPAQUESET_22_NAME "Opaqueset 22"
+#define SDCA_CTL_OPAQUESET_23_NAME "Opaqueset 23"
+#define SDCA_CTL_ALGORITHM_READY_NAME "Algorithm Ready"
+#define SDCA_CTL_ALGORITHM_ENABLE_NAME "Algorithm Enable"
+#define SDCA_CTL_ALGORITHM_PREPARE_NAME "Algorithm Prepare"
+#define SDCA_CTL_CENTER_FREQUENCY_INDEX_NAME "Center Frequency Index"
+#define SDCA_CTL_ULTRASOUND_LEVEL_NAME "Ultrasound Level"
+#define SDCA_CTL_AE_NUMBER_NAME "AE Number"
+#define SDCA_CTL_AE_CURRENTOWNER_NAME "AE Current Owner"
+#define SDCA_CTL_AE_MESSAGEOFFSET_NAME "AE Message Offset"
+#define SDCA_CTL_AE_MESSAGELENGTH_NAME "AE Message Length"
+#define SDCA_CTL_TRIGGER_ENABLE_NAME "Trigger Enable"
+#define SDCA_CTL_TRIGGER_STATUS_NAME "Trigger Status"
+#define SDCA_CTL_HIST_BUFFER_MODE_NAME "Hist Buffer Mode"
+#define SDCA_CTL_HIST_BUFFER_PREAMBLE_NAME "Hist Buffer Preamble"
+#define SDCA_CTL_HIST_ERROR_NAME "Hist Error"
+#define SDCA_CTL_TRIGGER_EXTENSION_NAME "Trigger Extension"
+#define SDCA_CTL_TRIGGER_READY_NAME "Trigger Ready"
+#define SDCA_CTL_HIST_CURRENTOWNER_NAME "Hist Current Owner"
+#define SDCA_CTL_HIST_MESSAGEOFFSET_NAME "Hist Message Offset"
+#define SDCA_CTL_HIST_MESSAGELENGTH_NAME "Hist Message Length"
+#define SDCA_CTL_DTODTX_CURRENTOWNER_NAME "DTODTX Current Owner"
+#define SDCA_CTL_DTODTX_MESSAGEOFFSET_NAME "DTODTX Message Offset"
+#define SDCA_CTL_DTODTX_MESSAGELENGTH_NAME "DTODTX Message Length"
+#define SDCA_CTL_DTODRX_CURRENTOWNER_NAME "DTODRX Current Owner"
+#define SDCA_CTL_DTODRX_MESSAGEOFFSET_NAME "DTODRX Message Offset"
+#define SDCA_CTL_DTODRX_MESSAGELENGTH_NAME "DTODRX Message Length"
+#define SDCA_CTL_PROTECTION_MODE_NAME "Protection Mode"
+#define SDCA_CTL_PROTECTION_STATUS_NAME "Protection Status"
+#define SDCA_CTL_OPAQUESETREQ_INDEX_NAME "Opaqueset Req Index"
+#define SDCA_CTL_DTODTX_CURRENTOWNER_NAME "DTODTX Current Owner"
+#define SDCA_CTL_DTODTX_MESSAGEOFFSET_NAME "DTODTX Message Offset"
+#define SDCA_CTL_DTODTX_MESSAGELENGTH_NAME "DTODTX Message Length"
+#define SDCA_CTL_DTODRX_CURRENTOWNER_NAME "DTODRX Current Owner"
+#define SDCA_CTL_DTODRX_MESSAGEOFFSET_NAME "DTODRX Message Offset"
+#define SDCA_CTL_DTODRX_MESSAGELENGTH_NAME "DTODRX Message Length"
+#define SDCA_CTL_POSTURENUMBER_NAME "Posture Number"
+#define SDCA_CTL_POSTUREEXTENSION_NAME "Posture Extension"
+#define SDCA_CTL_HORIZONTALBALANCE_NAME "Horizontal Balance"
+#define SDCA_CTL_VERTICALBALANCE_NAME "Vertical Balance"
+#define SDCA_CTL_TONE_DIVIDER_NAME "Tone Divider"
+#define SDCA_CTL_HIDTX_CURRENTOWNER_NAME "HIDTX Current Owner"
+#define SDCA_CTL_HIDTX_MESSAGEOFFSET_NAME "HIDTX Message Offset"
+#define SDCA_CTL_HIDTX_MESSAGELENGTH_NAME "HIDTX Message Length"
+#define SDCA_CTL_HIDRX_CURRENTOWNER_NAME "HIDRX Current Owner"
+#define SDCA_CTL_HIDRX_MESSAGEOFFSET_NAME "HIDRX Message Offset"
+#define SDCA_CTL_HIDRX_MESSAGELENGTH_NAME "HIDRX Message Length"
+#define SDCA_CTL_COMMIT_GROUP_MASK_NAME "Commit Group Mask"
+#define SDCA_CTL_FUNCTION_SDCA_VERSION_NAME "Function SDCA Version"
+#define SDCA_CTL_FUNCTION_TYPE_NAME "Function Type"
+#define SDCA_CTL_FUNCTION_MANUFACTURER_ID_NAME "Function Manufacturer ID"
+#define SDCA_CTL_FUNCTION_ID_NAME "Function ID"
+#define SDCA_CTL_FUNCTION_VERSION_NAME "Function Version"
+#define SDCA_CTL_FUNCTION_EXTENSION_ID_NAME "Function Extension ID"
+#define SDCA_CTL_FUNCTION_EXTENSION_VERSION_NAME "Function Extension Version"
+#define SDCA_CTL_FUNCTION_STATUS_NAME "Function Status"
+#define SDCA_CTL_FUNCTION_ACTION_NAME "Function Action"
+#define SDCA_CTL_DEVICE_MANUFACTURER_ID_NAME "Device Manufacturer ID"
+#define SDCA_CTL_DEVICE_PART_ID_NAME "Device Part ID"
+#define SDCA_CTL_DEVICE_VERSION_NAME "Device Version"
+#define SDCA_CTL_DEVICE_SDCA_VERSION_NAME "Device SDCA Version"
+
+/**
+ * enum sdca_control_datatype - SDCA Control Data Types
+ *
+ * Data Types as described in the SDCA specification v1.0 section
+ * 7.3.
+ */
+enum sdca_control_datatype {
+ SDCA_CTL_DATATYPE_ONEBIT,
+ SDCA_CTL_DATATYPE_INTEGER,
+ SDCA_CTL_DATATYPE_SPEC_ENCODED_VALUE,
+ SDCA_CTL_DATATYPE_BCD,
+ SDCA_CTL_DATATYPE_Q7P8DB,
+ SDCA_CTL_DATATYPE_BYTEINDEX,
+ SDCA_CTL_DATATYPE_POSTURENUMBER,
+ SDCA_CTL_DATATYPE_DP_INDEX,
+ SDCA_CTL_DATATYPE_BITINDEX,
+ SDCA_CTL_DATATYPE_BITMAP,
+ SDCA_CTL_DATATYPE_GUID,
+ SDCA_CTL_DATATYPE_IMPDEF,
+};
+
+/**
+ * enum sdca_access_mode - SDCA Control access mode
+ *
+ * Access modes as described in the SDCA specification v1.0 section
+ * 7.1.8.2.
+ */
+enum sdca_access_mode {
+ SDCA_ACCESS_MODE_RW = 0x0,
+ SDCA_ACCESS_MODE_DUAL = 0x1,
+ SDCA_ACCESS_MODE_RW1C = 0x2,
+ SDCA_ACCESS_MODE_RO = 0x3,
+ SDCA_ACCESS_MODE_RW1S = 0x4,
+ SDCA_ACCESS_MODE_DC = 0x5,
+};
+
+/**
+ * enum sdca_access_layer - SDCA Control access layer
+ *
+ * Access layers as described in the SDCA specification v1.0 section
+ * 7.1.9.
+ */
+enum sdca_access_layer {
+ SDCA_ACCESS_LAYER_USER = 1 << 0,
+ SDCA_ACCESS_LAYER_APPLICATION = 1 << 1,
+ SDCA_ACCESS_LAYER_CLASS = 1 << 2,
+ SDCA_ACCESS_LAYER_PLATFORM = 1 << 3,
+ SDCA_ACCESS_LAYER_DEVICE = 1 << 4,
+ SDCA_ACCESS_LAYER_EXTENSION = 1 << 5,
+};
+
+/**
+ * struct sdca_control_range - SDCA Control range table
+ * @cols: Number of columns in the range table.
+ * @rows: Number of rows in the range table.
+ * @data: Array of values contained in the range table.
+ */
+struct sdca_control_range {
+ unsigned int cols;
+ unsigned int rows;
+ u32 *data;
+};
+
+/**
+ * struct sdca_control - information for one SDCA Control
+ * @label: Name for the Control, from SDCA Specification v1.0, section 7.1.7.
+ * @sel: Identifier used for addressing.
+ * @nbits: Number of bits used in the Control.
+ * @values: Holds the Control value for constants and defaults.
+ * @cn_list: A bitmask showing the valid Control Numbers within this Control,
+ * Control Numbers typically represent channels.
+ * @interrupt_position: SCDA interrupt line that will alert to changes on this
+ * Control.
+ * @type: Format of the data in the Control.
+ * @range: Buffer describing valid range of values for the Control.
+ * @mode: Access mode of the Control.
+ * @layers: Bitmask of access layers of the Control.
+ * @deferrable: Indicates if the access to the Control can be deferred.
+ * @has_default: Indicates the Control has a default value to be written.
+ * @has_fixed: Indicates the Control only supports a single value.
+ */
+struct sdca_control {
+ const char *label;
+ int sel;
+
+ int nbits;
+ int *values;
+ u64 cn_list;
+ int interrupt_position;
+
+ enum sdca_control_datatype type;
+ struct sdca_control_range range;
+ enum sdca_access_mode mode;
+ u8 layers;
+
+ bool deferrable;
+ bool is_volatile;
+ bool has_default;
+ bool has_fixed;
+};
+
+/**
+ * enum sdca_terminal_type - SDCA Terminal Types
+ *
+ * Indicate what a Terminal Entity is used for, see in section 6.2.3
+ * of the SDCA v1.0 specification.
+ */
+enum sdca_terminal_type {
+ /* Table 77 - Data Port*/
+ SDCA_TERM_TYPE_GENERIC = 0x101,
+ SDCA_TERM_TYPE_ULTRASOUND = 0x180,
+ SDCA_TERM_TYPE_CAPTURE_DIRECT_PCM_MIC = 0x181,
+ SDCA_TERM_TYPE_RAW_PDM_MIC = 0x182,
+ SDCA_TERM_TYPE_SPEECH = 0x183,
+ SDCA_TERM_TYPE_VOICE = 0x184,
+ SDCA_TERM_TYPE_SECONDARY_PCM_MIC = 0x185,
+ SDCA_TERM_TYPE_ACOUSTIC_CONTEXT_AWARENESS = 0x186,
+ SDCA_TERM_TYPE_DTOD_STREAM = 0x187,
+ SDCA_TERM_TYPE_REFERENCE_STREAM = 0x188,
+ SDCA_TERM_TYPE_SENSE_CAPTURE = 0x189,
+ SDCA_TERM_TYPE_STREAMING_MIC = 0x18A,
+ SDCA_TERM_TYPE_OPTIMIZATION_STREAM = 0x190,
+ SDCA_TERM_TYPE_PDM_RENDER_STREAM = 0x191,
+ SDCA_TERM_TYPE_COMPANION_DATA = 0x192,
+ /* Table 78 - Transducer */
+ SDCA_TERM_TYPE_MICROPHONE_TRANSDUCER = 0x201,
+ SDCA_TERM_TYPE_MICROPHONE_ARRAY_TRANSDUCER = 0x205,
+ SDCA_TERM_TYPE_PRIMARY_FULL_RANGE_SPEAKER = 0x380,
+ SDCA_TERM_TYPE_PRIMARY_LFE_SPEAKER = 0x381,
+ SDCA_TERM_TYPE_PRIMARY_TWEETER_SPEAKER = 0x382,
+ SDCA_TERM_TYPE_PRIMARY_ULTRASOUND_SPEAKER = 0x383,
+ SDCA_TERM_TYPE_SECONDARY_FULL_RANGE_SPEAKER = 0x390,
+ SDCA_TERM_TYPE_SECONDARY_LFE_SPEAKER = 0x391,
+ SDCA_TERM_TYPE_SECONDARY_TWEETER_SPEAKER = 0x392,
+ SDCA_TERM_TYPE_SECONDARY_ULTRASOUND_SPEAKER = 0x393,
+ SDCA_TERM_TYPE_TERTIARY_FULL_RANGE_SPEAKER = 0x3A0,
+ SDCA_TERM_TYPE_TERTIARY_LFE_SPEAKER = 0x3A1,
+ SDCA_TERM_TYPE_TERTIARY_TWEETER_SPEAKER = 0x3A2,
+ SDCA_TERM_TYPE_TERTIARY_ULTRASOUND_SPEAKER = 0x3A3,
+ SDCA_TERM_TYPE_SPDIF = 0x605,
+ SDCA_TERM_TYPE_NDAI_DISPLAY_AUDIO = 0x610,
+ SDCA_TERM_TYPE_NDAI_USB = 0x612,
+ SDCA_TERM_TYPE_NDAI_BLUETOOTH_MAIN = 0x614,
+ SDCA_TERM_TYPE_NDAI_BLUETOOTH_ALTERNATE = 0x615,
+ SDCA_TERM_TYPE_NDAI_BLUETOOTH_BOTH = 0x616,
+ SDCA_TERM_TYPE_LINEIN_STEREO = 0x680,
+ SDCA_TERM_TYPE_LINEIN_FRONT_LR = 0x681,
+ SDCA_TERM_TYPE_LINEIN_CENTER_LFE = 0x682,
+ SDCA_TERM_TYPE_LINEIN_SURROUND_LR = 0x683,
+ SDCA_TERM_TYPE_LINEIN_REAR_LR = 0x684,
+ SDCA_TERM_TYPE_LINEOUT_STEREO = 0x690,
+ SDCA_TERM_TYPE_LINEOUT_FRONT_LR = 0x691,
+ SDCA_TERM_TYPE_LINEOUT_CENTER_LFE = 0x692,
+ SDCA_TERM_TYPE_LINEOUT_SURROUND_LR = 0x693,
+ SDCA_TERM_TYPE_LINEOUT_REAR_LR = 0x694,
+ SDCA_TERM_TYPE_MIC_JACK = 0x6A0,
+ SDCA_TERM_TYPE_STEREO_JACK = 0x6B0,
+ SDCA_TERM_TYPE_FRONT_LR_JACK = 0x6B1,
+ SDCA_TERM_TYPE_CENTER_LFE_JACK = 0x6B2,
+ SDCA_TERM_TYPE_SURROUND_LR_JACK = 0x6B3,
+ SDCA_TERM_TYPE_REAR_LR_JACK = 0x6B4,
+ SDCA_TERM_TYPE_HEADPHONE_JACK = 0x6C0,
+ SDCA_TERM_TYPE_HEADSET_JACK = 0x6D0,
+ /* Table 79 - System */
+ SDCA_TERM_TYPE_SENSE_DATA = 0x280,
+ SDCA_TERM_TYPE_PRIVACY_SIGNALING = 0x741,
+ SDCA_TERM_TYPE_PRIVACY_INDICATORS = 0x747,
+};
+
+#define SDCA_TERM_TYPE_LINEIN_STEREO_NAME "LineIn Stereo"
+#define SDCA_TERM_TYPE_LINEIN_FRONT_LR_NAME "LineIn Front-LR"
+#define SDCA_TERM_TYPE_LINEIN_CENTER_LFE_NAME "LineIn Center-LFE"
+#define SDCA_TERM_TYPE_LINEIN_SURROUND_LR_NAME "LineIn Surround-LR"
+#define SDCA_TERM_TYPE_LINEIN_REAR_LR_NAME "LineIn Rear-LR"
+#define SDCA_TERM_TYPE_LINEOUT_STEREO_NAME "LineOut Stereo"
+#define SDCA_TERM_TYPE_LINEOUT_FRONT_LR_NAME "LineOut Front-LR"
+#define SDCA_TERM_TYPE_LINEOUT_CENTER_LFE_NAME "LineOut Center-LFE"
+#define SDCA_TERM_TYPE_LINEOUT_SURROUND_LR_NAME "LineOut Surround-LR"
+#define SDCA_TERM_TYPE_LINEOUT_REAR_LR_NAME "LineOut Rear-LR"
+#define SDCA_TERM_TYPE_MIC_JACK_NAME "Microphone"
+#define SDCA_TERM_TYPE_STEREO_JACK_NAME "Speaker Stereo"
+#define SDCA_TERM_TYPE_FRONT_LR_JACK_NAME "Speaker Front-LR"
+#define SDCA_TERM_TYPE_CENTER_LFE_JACK_NAME "Speaker Center-LFE"
+#define SDCA_TERM_TYPE_SURROUND_LR_JACK_NAME "Speaker Surround-LR"
+#define SDCA_TERM_TYPE_REAR_LR_JACK_NAME "Speaker Rear-LR"
+#define SDCA_TERM_TYPE_HEADPHONE_JACK_NAME "Headphone"
+#define SDCA_TERM_TYPE_HEADSET_JACK_NAME "Headset"
+
+/**
+ * enum sdca_connector_type - SDCA Connector Types
+ *
+ * Indicate the type of Connector that a Terminal Entity represents,
+ * see section 6.2.4 of the SDCA v1.0 specification.
+ */
+enum sdca_connector_type {
+ SDCA_CONN_TYPE_UNKNOWN = 0x00,
+ SDCA_CONN_TYPE_2P5MM_JACK = 0x01,
+ SDCA_CONN_TYPE_3P5MM_JACK = 0x02,
+ SDCA_CONN_TYPE_QUARTER_INCH_JACK = 0x03,
+ SDCA_CONN_TYPE_XLR = 0x05,
+ SDCA_CONN_TYPE_SPDIF_OPTICAL = 0x06,
+ SDCA_CONN_TYPE_RCA = 0x07,
+ SDCA_CONN_TYPE_DIN = 0x0E,
+ SDCA_CONN_TYPE_MINI_DIN = 0x0F,
+ SDCA_CONN_TYPE_EIAJ_OPTICAL = 0x13,
+ SDCA_CONN_TYPE_HDMI = 0x14,
+ SDCA_CONN_TYPE_DISPLAYPORT = 0x17,
+ SDCA_CONN_TYPE_LIGHTNING = 0x1B,
+ SDCA_CONN_TYPE_USB_C = 0x1E,
+ SDCA_CONN_TYPE_OTHER = 0xFF,
+};
+
+/**
+ * struct sdca_entity_iot - information specific to Input/Output Entities
+ * @clock: Pointer to the Entity providing this Terminal's clock.
+ * @type: Usage of the Terminal Entity.
+ * @connector: Physical Connector of the Terminal Entity.
+ * @reference: Physical Jack number of the Terminal Entity.
+ * @num_transducer: Number of transducers attached to the Terminal Entity.
+ * @is_dataport: Boolean indicating if this Terminal represents a Dataport.
+ */
+struct sdca_entity_iot {
+ struct sdca_entity *clock;
+
+ enum sdca_terminal_type type;
+ enum sdca_connector_type connector;
+ int reference;
+ int num_transducer;
+
+ bool is_dataport;
+};
+
+/**
+ * enum sdca_clock_type - SDCA Clock Types
+ *
+ * Indicate the synchronicity of an Clock Entity, see section 6.4.1.3
+ * of the SDCA v1.0 specification.
+ */
+enum sdca_clock_type {
+ SDCA_CLOCK_TYPE_EXTERNAL = 0x00,
+ SDCA_CLOCK_TYPE_INTERNAL_ASYNC = 0x01,
+ SDCA_CLOCK_TYPE_INTERNAL_SYNC = 0x02,
+ SDCA_CLOCK_TYPE_INTERNAL_SOURCE_SYNC = 0x03,
+};
+
+/**
+ * struct sdca_entity_cs - information specific to Clock Source Entities
+ * @type: Synchronicity of the Clock Source.
+ * @max_delay: The maximum delay in microseconds before the clock is stable.
+ */
+struct sdca_entity_cs {
+ enum sdca_clock_type type;
+ unsigned int max_delay;
+};
+
+/**
+ * enum sdca_pde_power_state - SDCA Power States
+ *
+ * SDCA Power State values from SDCA specification v1.0 Section 7.12.4.
+ */
+enum sdca_pde_power_state {
+ SDCA_PDE_PS0 = 0x0,
+ SDCA_PDE_PS1 = 0x1,
+ SDCA_PDE_PS2 = 0x2,
+ SDCA_PDE_PS3 = 0x3,
+ SDCA_PDE_PS4 = 0x4,
+};
+
+/**
+ * struct sdca_pde_delay - describes the delay changing between 2 power states
+ * @from_ps: The power state being exited.
+ * @to_ps: The power state being entered.
+ * @us: The delay in microseconds switching between the two states.
+ */
+struct sdca_pde_delay {
+ int from_ps;
+ int to_ps;
+ unsigned int us;
+};
+
+/**
+ * struct sdca_entity_pde - information specific to Power Domain Entities
+ * @managed: Dynamically allocated array pointing to each Entity
+ * controlled by this PDE.
+ * @max_delay: Dynamically allocated array of delays for switching
+ * between power states.
+ * @num_managed: Number of Entities controlled by this PDE.
+ * @num_max_delay: Number of delays specified for state changes.
+ */
+struct sdca_entity_pde {
+ struct sdca_entity **managed;
+ struct sdca_pde_delay *max_delay;
+ int num_managed;
+ int num_max_delay;
+};
+
+/**
+ * enum sdca_entity_type - SDCA Entity Type codes
+ * @SDCA_ENTITY_TYPE_ENTITY_0: Entity 0, not actually from the
+ * specification but useful internally as an Entity structure
+ * is allocated for Entity 0, to hold Entity 0 controls.
+ * @SDCA_ENTITY_TYPE_IT: Input Terminal.
+ * @SDCA_ENTITY_TYPE_OT: Output Terminal.
+ * @SDCA_ENTITY_TYPE_MU: Mixer Unit.
+ * @SDCA_ENTITY_TYPE_SU: Selector Unit.
+ * @SDCA_ENTITY_TYPE_FU: Feature Unit.
+ * @SDCA_ENTITY_TYPE_XU: Extension Unit.
+ * @SDCA_ENTITY_TYPE_CS: Clock Source.
+ * @SDCA_ENTITY_TYPE_CX: Clock selector.
+ * @SDCA_ENTITY_TYPE_PDE: Power-Domain Entity.
+ * @SDCA_ENTITY_TYPE_GE: Group Entity.
+ * @SDCA_ENTITY_TYPE_SPE: Security & Privacy Entity.
+ * @SDCA_ENTITY_TYPE_CRU: Channel Remapping Unit.
+ * @SDCA_ENTITY_TYPE_UDMPU: Up-Down Mixer Processing Unit.
+ * @SDCA_ENTITY_TYPE_MFPU: Multi-Function Processing Unit.
+ * @SDCA_ENTITY_TYPE_SMPU: Smart Microphone Processing Unit.
+ * @SDCA_ENTITY_TYPE_SAPU: Smart Amp Processing Unit.
+ * @SDCA_ENTITY_TYPE_PPU: Posture Processing Unit.
+ * @SDCA_ENTITY_TYPE_TG: Tone Generator.
+ * @SDCA_ENTITY_TYPE_HIDE: Human Interface Device Entity.
+ *
+ * SDCA Entity Types from SDCA specification v1.0 Section 6.1.2
+ * all Entity Types not described are reserved.
+ */
+enum sdca_entity_type {
+ SDCA_ENTITY_TYPE_ENTITY_0 = 0x00,
+ SDCA_ENTITY_TYPE_IT = 0x02,
+ SDCA_ENTITY_TYPE_OT = 0x03,
+ SDCA_ENTITY_TYPE_MU = 0x05,
+ SDCA_ENTITY_TYPE_SU = 0x06,
+ SDCA_ENTITY_TYPE_FU = 0x07,
+ SDCA_ENTITY_TYPE_XU = 0x0A,
+ SDCA_ENTITY_TYPE_CS = 0x0B,
+ SDCA_ENTITY_TYPE_CX = 0x0C,
+ SDCA_ENTITY_TYPE_PDE = 0x11,
+ SDCA_ENTITY_TYPE_GE = 0x12,
+ SDCA_ENTITY_TYPE_SPE = 0x13,
+ SDCA_ENTITY_TYPE_CRU = 0x20,
+ SDCA_ENTITY_TYPE_UDMPU = 0x21,
+ SDCA_ENTITY_TYPE_MFPU = 0x22,
+ SDCA_ENTITY_TYPE_SMPU = 0x23,
+ SDCA_ENTITY_TYPE_SAPU = 0x24,
+ SDCA_ENTITY_TYPE_PPU = 0x25,
+ SDCA_ENTITY_TYPE_TG = 0x30,
+ SDCA_ENTITY_TYPE_HIDE = 0x31,
+};
+
+/**
+ * struct sdca_ge_control - control entry in the affected controls list
+ * @id: Entity ID of the Control affected.
+ * @sel: Control Selector of the Control affected.
+ * @cn: Control Number of the Control affected.
+ * @val: Value written to Control for this Mode.
+ */
+struct sdca_ge_control {
+ int id;
+ int sel;
+ int cn;
+ int val;
+};
+
+/**
+ * struct sdca_ge_mode - mode entry in the affected controls list
+ * @controls: Dynamically allocated array of controls written for this Mode.
+ * @num_controls: Number of controls written in this Mode.
+ * @val: GE Selector Mode value.
+ */
+struct sdca_ge_mode {
+ struct sdca_ge_control *controls;
+ int num_controls;
+ int val;
+};
+
+/**
+ * struct sdca_entity_ge - information specific to Group Entities
+ * @kctl: ALSA control pointer that can be used by linked Entities.
+ * @modes: Dynamically allocated array of Modes and the Controls written
+ * in each mode.
+ * @num_modes: Number of Modes.
+ */
+struct sdca_entity_ge {
+ struct snd_kcontrol_new *kctl;
+ struct sdca_ge_mode *modes;
+ int num_modes;
+};
+
+/**
+ * struct sdca_entity_hide - information specific to HIDE Entities
+ * @hid: HID device structure
+ * @num_hidtx_ids: number of HIDTx Report ID
+ * @num_hidrx_ids: number of HIDRx Report ID
+ * @hidtx_ids: HIDTx Report ID
+ * @hidrx_ids: HIDRx Report ID
+ * @af_number_list: which Audio Function Numbers within this Device are
+ * sending/receiving the messages in this HIDE
+ * @hide_reside_function_num: indicating which Audio Function Numbers
+ * within this Device
+ * @max_delay: the maximum time in microseconds allowed for the Device
+ * to change the ownership from Device to Host
+ * @hid_report_desc: HID Report Descriptor for the HIDE Entity
+ * @hid_desc: HID descriptor for the HIDE Entity
+ */
+struct sdca_entity_hide {
+ struct hid_device *hid;
+ unsigned int *hidtx_ids;
+ unsigned int *hidrx_ids;
+ int num_hidtx_ids;
+ int num_hidrx_ids;
+ unsigned int af_number_list[SDCA_MAX_FUNCTION_COUNT];
+ unsigned int hide_reside_function_num;
+ unsigned int max_delay;
+ unsigned char *hid_report_desc;
+ struct hid_descriptor hid_desc;
+};
+
+/**
+ * enum sdca_xu_reset_machanism - SDCA FDL Resets
+ */
+enum sdca_xu_reset_mechanism {
+ SDCA_XU_RESET_FUNCTION = 0x0,
+ SDCA_XU_RESET_DEVICE = 0x1,
+ SDCA_XU_RESET_BUS = 0x2,
+};
+
+/**
+ * struct sdca_entity_xu - information specific to XU Entities
+ * @max_delay: the maximum time in microseconds allowed for the Device
+ * to change the ownership from Device to Host
+ * @reset_mechanism: indicates the type of reset that can be requested
+ * the end of an FDL.
+ */
+struct sdca_entity_xu {
+ unsigned int max_delay;
+ enum sdca_xu_reset_mechanism reset_mechanism;
+};
+
+/**
+ * struct sdca_entity - information for one SDCA Entity
+ * @label: String such as "OT 12".
+ * @id: Identifier used for addressing.
+ * @type: Type code for the Entity.
+ * @group: Pointer to Group Entity controlling this one, NULL if N/A.
+ * @sources: Dynamically allocated array pointing to each input Entity
+ * connected to this Entity.
+ * @controls: Dynamically allocated array of Controls.
+ * @num_sources: Number of sources for the Entity.
+ * @num_controls: Number of Controls for the Entity.
+ * @iot: Input/Output Terminal specific Entity properties.
+ * @cs: Clock Source specific Entity properties.
+ * @pde: Power Domain Entity specific Entity properties.
+ * @ge: Group Entity specific Entity properties.
+ * @hide: HIDE Entity specific Entity properties.
+ * @xu: XU Entity specific Entity properties.
+ */
+struct sdca_entity {
+ const char *label;
+ int id;
+ enum sdca_entity_type type;
+
+ struct sdca_entity *group;
+ struct sdca_entity **sources;
+ struct sdca_control *controls;
+ int num_sources;
+ int num_controls;
+ union {
+ struct sdca_entity_iot iot;
+ struct sdca_entity_cs cs;
+ struct sdca_entity_pde pde;
+ struct sdca_entity_ge ge;
+ struct sdca_entity_hide hide;
+ struct sdca_entity_xu xu;
+ };
+};
+
+/**
+ * enum sdca_channel_purpose - SDCA Channel Purpose code
+ *
+ * Channel Purpose codes as described in the SDCA specification v1.0
+ * section 11.4.3.
+ */
+enum sdca_channel_purpose {
+ /* Table 210 - Purpose */
+ SDCA_CHAN_PURPOSE_GENERIC_AUDIO = 0x01,
+ SDCA_CHAN_PURPOSE_VOICE = 0x02,
+ SDCA_CHAN_PURPOSE_SPEECH = 0x03,
+ SDCA_CHAN_PURPOSE_AMBIENT = 0x04,
+ SDCA_CHAN_PURPOSE_REFERENCE = 0x05,
+ SDCA_CHAN_PURPOSE_ULTRASOUND = 0x06,
+ SDCA_CHAN_PURPOSE_SENSE = 0x08,
+ SDCA_CHAN_PURPOSE_SILENCE = 0xFE,
+ SDCA_CHAN_PURPOSE_NON_AUDIO = 0xFF,
+ /* Table 211 - Amp Sense */
+ SDCA_CHAN_PURPOSE_SENSE_V1 = 0x09,
+ SDCA_CHAN_PURPOSE_SENSE_V2 = 0x0A,
+ SDCA_CHAN_PURPOSE_SENSE_V12_INTERLEAVED = 0x10,
+ SDCA_CHAN_PURPOSE_SENSE_V21_INTERLEAVED = 0x11,
+ SDCA_CHAN_PURPOSE_SENSE_V12_PACKED = 0x12,
+ SDCA_CHAN_PURPOSE_SENSE_V21_PACKED = 0x13,
+ SDCA_CHAN_PURPOSE_SENSE_V1212_INTERLEAVED = 0x14,
+ SDCA_CHAN_PURPOSE_SENSE_V2121_INTERLEAVED = 0x15,
+ SDCA_CHAN_PURPOSE_SENSE_V1122_INTERLEAVED = 0x16,
+ SDCA_CHAN_PURPOSE_SENSE_V2211_INTERLEAVED = 0x17,
+ SDCA_CHAN_PURPOSE_SENSE_V1212_PACKED = 0x18,
+ SDCA_CHAN_PURPOSE_SENSE_V2121_PACKED = 0x19,
+ SDCA_CHAN_PURPOSE_SENSE_V1122_PACKED = 0x1A,
+ SDCA_CHAN_PURPOSE_SENSE_V2211_PACKED = 0x1B,
+};
+
+/**
+ * enum sdca_channel_relationship - SDCA Channel Relationship code
+ *
+ * Channel Relationship codes as described in the SDCA specification
+ * v1.0 section 11.4.2.
+ */
+enum sdca_channel_relationship {
+ /* Table 206 - Streaming */
+ SDCA_CHAN_REL_UNDEFINED = 0x00,
+ SDCA_CHAN_REL_GENERIC_MONO = 0x01,
+ SDCA_CHAN_REL_GENERIC_LEFT = 0x02,
+ SDCA_CHAN_REL_GENERIC_RIGHT = 0x03,
+ SDCA_CHAN_REL_GENERIC_TOP = 0x48,
+ SDCA_CHAN_REL_GENERIC_BOTTOM = 0x49,
+ SDCA_CHAN_REL_CAPTURE_DIRECT = 0x4E,
+ SDCA_CHAN_REL_RENDER_DIRECT = 0x4F,
+ SDCA_CHAN_REL_FRONT_LEFT = 0x0B,
+ SDCA_CHAN_REL_FRONT_RIGHT = 0x0C,
+ SDCA_CHAN_REL_FRONT_CENTER = 0x0D,
+ SDCA_CHAN_REL_SIDE_LEFT = 0x12,
+ SDCA_CHAN_REL_SIDE_RIGHT = 0x13,
+ SDCA_CHAN_REL_BACK_LEFT = 0x16,
+ SDCA_CHAN_REL_BACK_RIGHT = 0x17,
+ SDCA_CHAN_REL_LOW_FREQUENCY_EFFECTS = 0x43,
+ SDCA_CHAN_REL_SOUNDWIRE_MIC = 0x55,
+ SDCA_CHAN_REL_SENSE_TRANSDUCER_1 = 0x58,
+ SDCA_CHAN_REL_SENSE_TRANSDUCER_2 = 0x59,
+ SDCA_CHAN_REL_SENSE_TRANSDUCER_12 = 0x5A,
+ SDCA_CHAN_REL_SENSE_TRANSDUCER_21 = 0x5B,
+ SDCA_CHAN_REL_ECHOREF_NONE = 0x70,
+ SDCA_CHAN_REL_ECHOREF_1 = 0x71,
+ SDCA_CHAN_REL_ECHOREF_2 = 0x72,
+ SDCA_CHAN_REL_ECHOREF_3 = 0x73,
+ SDCA_CHAN_REL_ECHOREF_4 = 0x74,
+ SDCA_CHAN_REL_ECHOREF_ALL = 0x75,
+ SDCA_CHAN_REL_ECHOREF_LFE_ALL = 0x76,
+ /* Table 207 - Speaker */
+ SDCA_CHAN_REL_PRIMARY_TRANSDUCER = 0x50,
+ SDCA_CHAN_REL_SECONDARY_TRANSDUCER = 0x51,
+ SDCA_CHAN_REL_TERTIARY_TRANSDUCER = 0x52,
+ SDCA_CHAN_REL_LOWER_LEFT_ALLTRANSDUCER = 0x60,
+ SDCA_CHAN_REL_LOWER_RIGHT_ALLTRANSDUCER = 0x61,
+ SDCA_CHAN_REL_UPPER_LEFT_ALLTRANSDUCER = 0x62,
+ SDCA_CHAN_REL_UPPER_RIGHT_ALLTRANSDUCER = 0x63,
+ SDCA_CHAN_REL_LOWER_LEFT_PRIMARY = 0x64,
+ SDCA_CHAN_REL_LOWER_RIGHT_PRIMARY = 0x65,
+ SDCA_CHAN_REL_UPPER_LEFT_PRIMARY = 0x66,
+ SDCA_CHAN_REL_UPPER_RIGHT_PRIMARY = 0x67,
+ SDCA_CHAN_REL_LOWER_LEFT_SECONDARY = 0x68,
+ SDCA_CHAN_REL_LOWER_RIGHT_SECONDARY = 0x69,
+ SDCA_CHAN_REL_UPPER_LEFT_SECONDARY = 0x6A,
+ SDCA_CHAN_REL_UPPER_RIGHT_SECONDARY = 0x6B,
+ SDCA_CHAN_REL_LOWER_LEFT_TERTIARY = 0x6C,
+ SDCA_CHAN_REL_LOWER_RIGHT_TERTIARY = 0x6D,
+ SDCA_CHAN_REL_UPPER_LEFT_TERTIARY = 0x6E,
+ SDCA_CHAN_REL_UPPER_RIGHT_TERTIARY = 0x6F,
+ SDCA_CHAN_REL_DERIVED_LOWER_LEFT_PRIMARY = 0x94,
+ SDCA_CHAN_REL_DERIVED_LOWER_RIGHT_PRIMARY = 0x95,
+ SDCA_CHAN_REL_DERIVED_UPPER_LEFT_PRIMARY = 0x96,
+ SDCA_CHAN_REL_DERIVED_UPPER_RIGHT_PRIMARY = 0x97,
+ SDCA_CHAN_REL_DERIVED_LOWER_LEFT_SECONDARY = 0x98,
+ SDCA_CHAN_REL_DERIVED_LOWER_RIGHT_SECONDARY = 0x99,
+ SDCA_CHAN_REL_DERIVED_UPPER_LEFT_SECONDARY = 0x9A,
+ SDCA_CHAN_REL_DERIVED_UPPER_RIGHT_SECONDARY = 0x9B,
+ SDCA_CHAN_REL_DERIVED_LOWER_LEFT_TERTIARY = 0x9C,
+ SDCA_CHAN_REL_DERIVED_LOWER_RIGHT_TERTIARY = 0x9D,
+ SDCA_CHAN_REL_DERIVED_UPPER_LEFT_TERTIARY = 0x9E,
+ SDCA_CHAN_REL_DERIVED_UPPER_RIGHT_TERTIARY = 0x9F,
+ SDCA_CHAN_REL_DERIVED_MONO_PRIMARY = 0xA0,
+ SDCA_CHAN_REL_DERIVED_MONO_SECONDARY = 0xAB,
+ SDCA_CHAN_REL_DERIVED_MONO_TERTIARY = 0xAC,
+ /* Table 208 - Equipment */
+ SDCA_CHAN_REL_EQUIPMENT_LEFT = 0x02,
+ SDCA_CHAN_REL_EQUIPMENT_RIGHT = 0x03,
+ SDCA_CHAN_REL_EQUIPMENT_COMBINED = 0x47,
+ SDCA_CHAN_REL_EQUIPMENT_TOP = 0x48,
+ SDCA_CHAN_REL_EQUIPMENT_BOTTOM = 0x49,
+ SDCA_CHAN_REL_EQUIPMENT_TOP_LEFT = 0x4A,
+ SDCA_CHAN_REL_EQUIPMENT_BOTTOM_LEFT = 0x4B,
+ SDCA_CHAN_REL_EQUIPMENT_TOP_RIGHT = 0x4C,
+ SDCA_CHAN_REL_EQUIPMENT_BOTTOM_RIGHT = 0x4D,
+ SDCA_CHAN_REL_EQUIPMENT_SILENCED_OUTPUT = 0x57,
+ /* Table 209 - Other */
+ SDCA_CHAN_REL_ARRAY = 0x04,
+ SDCA_CHAN_REL_MIC = 0x53,
+ SDCA_CHAN_REL_RAW = 0x54,
+ SDCA_CHAN_REL_SILENCED_MIC = 0x56,
+ SDCA_CHAN_REL_MULTI_SOURCE_1 = 0x78,
+ SDCA_CHAN_REL_MULTI_SOURCE_2 = 0x79,
+ SDCA_CHAN_REL_MULTI_SOURCE_3 = 0x7A,
+ SDCA_CHAN_REL_MULTI_SOURCE_4 = 0x7B,
+};
+
+/**
+ * struct sdca_channel - a single Channel with a Cluster
+ * @id: Identifier used for addressing.
+ * @purpose: Indicates the purpose of the Channel, usually to give
+ * semantic meaning to the audio, eg. voice, ultrasound.
+ * @relationship: Indicates the relationship of this Channel to others
+ * in the Cluster, often used to identify the physical position of the
+ * Channel eg. left.
+ */
+struct sdca_channel {
+ int id;
+ enum sdca_channel_purpose purpose;
+ enum sdca_channel_relationship relationship;
+};
+
+/**
+ * struct sdca_cluster - information about an SDCA Channel Cluster
+ * @id: Identifier used for addressing.
+ * @num_channels: Number of Channels within this Cluster.
+ * @channels: Dynamically allocated array of Channels.
+ */
+struct sdca_cluster {
+ int id;
+ int num_channels;
+ struct sdca_channel *channels;
+};
+
+/**
+ * enum sdca_cluster_range - SDCA Range column definitions for ClusterIndex
+ */
+enum sdca_cluster_range {
+ SDCA_CLUSTER_BYTEINDEX = 0,
+ SDCA_CLUSTER_CLUSTERID = 1,
+ SDCA_CLUSTER_NCOLS = 2,
+};
+
+/**
+ * struct sdca_fdl_file - information about a file from a fileset used in FDL
+ * @vendor_id: Vendor ID of the file.
+ * @file_id: File ID of the file.
+ * @fdl_offset: Offset information for FDL.
+ */
+struct sdca_fdl_file {
+ u16 vendor_id;
+ u32 file_id;
+ u32 fdl_offset;
+};
+
+/**
+ * struct sdca_fdl_set - information about a set of files used in FDL
+ * @files: Array of files in this FDL set.
+ * @num_files: Number of files in this FDL set.
+ * @id: ID of the FDL set.
+ */
+struct sdca_fdl_set {
+ struct sdca_fdl_file *files;
+ int num_files;
+ u32 id;
+};
+
+/**
+ * struct sdca_fdl_data - information about a function's FDL data
+ * @swft: Pointer to the SoundWire File Table.
+ * @sets: Array of FDL sets used by this function.
+ * @num_sets: Number of FDL sets used by this function.
+ */
+struct sdca_fdl_data {
+ struct acpi_table_swft *swft;
+ struct sdca_fdl_set *sets;
+ int num_sets;
+};
+
+/**
+ * struct sdca_function_data - top-level information for one SDCA function
+ * @desc: Pointer to short descriptor from initial parsing.
+ * @init_table: Pointer to a table of initialization writes.
+ * @entities: Dynamically allocated array of Entities.
+ * @clusters: Dynamically allocated array of Channel Clusters.
+ * @num_init_table: Number of initialization writes.
+ * @num_entities: Number of Entities reported in this Function.
+ * @num_clusters: Number of Channel Clusters reported in this Function.
+ * @busy_max_delay: Maximum Function busy delay in microseconds, before an
+ * error should be reported.
+ * @reset_max_delay: Maximum Function reset delay in microseconds, before an
+ * error should be reported.
+ * @fdl_data: FDL data for this Function, if available.
+ */
+struct sdca_function_data {
+ struct sdca_function_desc *desc;
+
+ struct sdca_init_write *init_table;
+ struct sdca_entity *entities;
+ struct sdca_cluster *clusters;
+ int num_init_table;
+ int num_entities;
+ int num_clusters;
+
+ unsigned int busy_max_delay;
+ unsigned int reset_max_delay;
+
+ struct sdca_fdl_data fdl_data;
+};
+
+static inline u32 sdca_range(struct sdca_control_range *range,
+ unsigned int col, unsigned int row)
+{
+ return range->data[(row * range->cols) + col];
+}
+
+static inline u32 sdca_range_search(struct sdca_control_range *range,
+ int search_col, int value, int result_col)
+{
+ int i;
+
+ for (i = 0; i < range->rows; i++) {
+ if (sdca_range(range, search_col, i) == value)
+ return sdca_range(range, result_col, i);
+ }
+
+ return 0;
+}
+
+int sdca_parse_function(struct device *dev, struct sdw_slave *sdw,
+ struct sdca_function_desc *desc,
+ struct sdca_function_data *function);
+
+const char *sdca_find_terminal_name(enum sdca_terminal_type type);
+
+struct sdca_control *sdca_selector_find_control(struct device *dev,
+ struct sdca_entity *entity,
+ const int sel);
+struct sdca_control_range *sdca_control_find_range(struct device *dev,
+ struct sdca_entity *entity,
+ struct sdca_control *control,
+ int cols, int rows);
+struct sdca_control_range *sdca_selector_find_range(struct device *dev,
+ struct sdca_entity *entity,
+ int sel, int cols, int rows);
+struct sdca_cluster *sdca_id_find_cluster(struct device *dev,
+ struct sdca_function_data *function,
+ const int id);
+
+#endif
diff --git a/include/sound/sdca_hid.h b/include/sound/sdca_hid.h
new file mode 100644
index 000000000000..18bebbe428c9
--- /dev/null
+++ b/include/sound/sdca_hid.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * The MIPI SDCA specification is available for public downloads at
+ * https://www.mipi.org/mipi-sdca-v1-0-download
+ *
+ */
+
+#ifndef __SDCA_HID_H__
+#define __SDCA_HID_H__
+
+struct device;
+struct sdw_slave;
+
+struct sdca_entity;
+struct sdca_interrupt;
+
+#if IS_ENABLED(CONFIG_SND_SOC_SDCA_HID)
+
+int sdca_add_hid_device(struct device *dev, struct sdw_slave *sdw,
+ struct sdca_entity *entity);
+int sdca_hid_process_report(struct sdca_interrupt *interrupt);
+
+#else
+
+static inline int sdca_add_hid_device(struct device *dev, struct sdw_slave *sdw,
+ struct sdca_entity *entity)
+{
+ return 0;
+}
+
+static inline int sdca_hid_process_report(struct sdca_interrupt *interrupt)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* __SDCA_HID_H__ */
diff --git a/include/sound/sdca_interrupts.h b/include/sound/sdca_interrupts.h
new file mode 100644
index 000000000000..8f13417d129a
--- /dev/null
+++ b/include/sound/sdca_interrupts.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The MIPI SDCA specification is available for public downloads at
+ * https://www.mipi.org/mipi-sdca-v1-0-download
+ *
+ * Copyright (C) 2025 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef __SDCA_INTERRUPTS_H__
+#define __SDCA_INTERRUPTS_H__
+
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+struct device;
+struct snd_soc_component;
+struct sdca_function_data;
+
+#define SDCA_MAX_INTERRUPTS 31 /* the last bit is reserved for future extensions */
+
+/**
+ * struct sdca_interrupt - contains information about a single SDCA interrupt
+ * @name: The name of the interrupt.
+ * @dev: Pointer to the Function device.
+ * @device_regmap: Pointer to the IRQ regmap.
+ * @function_regmap: Pointer to the SDCA Function regmap.
+ * @component: Pointer to the ASoC component owns the interrupt.
+ * @function: Pointer to the Function that the interrupt is associated with.
+ * @entity: Pointer to the Entity that the interrupt is associated with.
+ * @control: Pointer to the Control that the interrupt is associated with.
+ * @priv: Pointer to private data for use by the handler.
+ * @irq: IRQ number allocated to this interrupt, also used internally to track
+ * the IRQ being assigned.
+ */
+struct sdca_interrupt {
+ const char *name;
+
+ struct device *dev;
+ struct regmap *device_regmap;
+ struct regmap *function_regmap;
+ struct snd_soc_component *component;
+ struct sdca_function_data *function;
+ struct sdca_entity *entity;
+ struct sdca_control *control;
+
+ void *priv;
+
+ int irq;
+};
+
+/**
+ * struct sdca_interrupt_info - contains top-level SDCA interrupt information
+ * @irq_chip: regmap irq chip structure.
+ * @irq_data: regmap irq chip data structure.
+ * @irqs: Array of data for each individual IRQ.
+ * @irq_lock: Protects access to the list of sdca_interrupt structures.
+ */
+struct sdca_interrupt_info {
+ struct regmap_irq_chip irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+
+ struct sdca_interrupt irqs[SDCA_MAX_INTERRUPTS];
+
+ struct mutex irq_lock; /* Protect irqs list across functions */
+};
+
+int sdca_irq_request(struct device *dev, struct sdca_interrupt_info *interrupt_info,
+ int sdca_irq, const char *name, irq_handler_t handler,
+ void *data);
+int sdca_irq_data_populate(struct device *dev, struct regmap *function_regmap,
+ struct snd_soc_component *component,
+ struct sdca_function_data *function,
+ struct sdca_entity *entity,
+ struct sdca_control *control,
+ struct sdca_interrupt *interrupt);
+int sdca_irq_populate_early(struct device *dev, struct regmap *function_regmap,
+ struct sdca_function_data *function,
+ struct sdca_interrupt_info *info);
+int sdca_irq_populate(struct sdca_function_data *function,
+ struct snd_soc_component *component,
+ struct sdca_interrupt_info *info);
+struct sdca_interrupt_info *sdca_irq_allocate(struct device *dev,
+ struct regmap *regmap, int irq);
+
+#endif
diff --git a/include/sound/sdca_regmap.h b/include/sound/sdca_regmap.h
new file mode 100644
index 000000000000..792540a530fc
--- /dev/null
+++ b/include/sound/sdca_regmap.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The MIPI SDCA specification is available for public downloads at
+ * https://www.mipi.org/mipi-sdca-v1-0-download
+ *
+ * Copyright (C) 2025 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef __SDCA_REGMAP_H__
+#define __SDCA_REGMAP_H__
+
+struct device;
+struct sdca_function_data;
+struct regmap;
+struct reg_default;
+
+bool sdca_regmap_readable(struct sdca_function_data *function, unsigned int reg);
+bool sdca_regmap_writeable(struct sdca_function_data *function, unsigned int reg);
+bool sdca_regmap_volatile(struct sdca_function_data *function, unsigned int reg);
+bool sdca_regmap_deferrable(struct sdca_function_data *function, unsigned int reg);
+int sdca_regmap_mbq_size(struct sdca_function_data *function, unsigned int reg);
+
+int sdca_regmap_count_constants(struct device *dev, struct sdca_function_data *function);
+int sdca_regmap_populate_constants(struct device *dev, struct sdca_function_data *function,
+ struct reg_default *consts);
+
+int sdca_regmap_write_defaults(struct device *dev, struct regmap *regmap,
+ struct sdca_function_data *function);
+int sdca_regmap_write_init(struct device *dev, struct regmap *regmap,
+ struct sdca_function_data *function);
+
+#endif // __SDCA_REGMAP_H__
diff --git a/include/sound/sdca_ump.h b/include/sound/sdca_ump.h
new file mode 100644
index 000000000000..f54f9d48c64c
--- /dev/null
+++ b/include/sound/sdca_ump.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * The MIPI SDCA specification is available for public downloads at
+ * https://www.mipi.org/mipi-sdca-v1-0-download
+ *
+ * Copyright (C) 2025 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef __SDCA_UMP_H__
+#define __SDCA_UMP_H__
+
+struct regmap;
+struct sdca_control;
+struct sdca_entity;
+struct sdca_function_data;
+struct snd_soc_component;
+struct delayed_work;
+
+int sdca_ump_get_owner_host(struct device *dev,
+ struct regmap *function_regmap,
+ struct sdca_function_data *function,
+ struct sdca_entity *entity,
+ struct sdca_control *control);
+int sdca_ump_set_owner_device(struct device *dev,
+ struct regmap *function_regmap,
+ struct sdca_function_data *function,
+ struct sdca_entity *entity,
+ struct sdca_control *control);
+int sdca_ump_read_message(struct device *dev,
+ struct regmap *device_regmap,
+ struct regmap *function_regmap,
+ struct sdca_function_data *function,
+ struct sdca_entity *entity,
+ unsigned int offset_sel, unsigned int length_sel,
+ void **msg);
+int sdca_ump_write_message(struct device *dev,
+ struct regmap *device_regmap,
+ struct regmap *function_regmap,
+ struct sdca_function_data *function,
+ struct sdca_entity *entity,
+ unsigned int offset_sel, unsigned int msg_offset,
+ unsigned int length_sel,
+ void *msg, int msg_len);
+
+void sdca_ump_cancel_timeout(struct delayed_work *work);
+void sdca_ump_schedule_timeout(struct delayed_work *work,
+ unsigned int timeout_us);
+
+#endif // __SDCA_UMP_H__
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index c8621671fa70..00c32eed2124 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -86,10 +86,6 @@ static inline size_t snd_seq_event_packet_size(struct snd_seq_event *ev)
/* interface for OSS emulation */
int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo);
-/* port callback routines */
-void snd_port_init_callback(struct snd_seq_port_callback *p);
-struct snd_seq_port_callback *snd_port_alloc_callback(void);
-
/* port attach/detach */
int snd_seq_event_port_attach(int client, struct snd_seq_port_callback *pcbp,
int cap, int type, int midi_channels, int midi_voices, char *portname);
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index ad67957b7b48..69a9c9c4d0e9 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -89,6 +89,13 @@ struct simple_util_priv {
#define simple_props_to_dai_codec(props, i) ((props)->codec_dai + i)
#define simple_props_to_codec_conf(props, i) ((props)->codec_conf + i)
+/* has the same effect as simple_priv_to_props(). Preferred over
+ * simple_priv_to_props() when dealing with PCM runtime data as
+ * the ID stored in rtd->id may not be a valid array index.
+ */
+#define runtime_simple_priv_to_props(priv, rtd) \
+ ((priv)->dai_props + ((rtd)->dai_link - (priv)->dai_link))
+
#define for_each_prop_dlc_cpus(props, i, cpu) \
for ((i) = 0; \
((i) < (props)->num.cpus) && \
@@ -135,14 +142,14 @@ int simple_util_parse_daifmt(struct device *dev,
struct device_node *codec,
char *prefix,
unsigned int *retfmt);
-int simple_util_parse_tdm_width_map(struct device *dev, struct device_node *np,
+int simple_util_parse_tdm_width_map(struct simple_util_priv *priv, struct device_node *np,
struct simple_util_dai *dai);
__printf(3, 4)
-int simple_util_set_dailink_name(struct device *dev,
+int simple_util_set_dailink_name(struct simple_util_priv *priv,
struct snd_soc_dai_link *dai_link,
const char *fmt, ...);
-int simple_util_parse_card_name(struct snd_soc_card *card,
+int simple_util_parse_card_name(struct simple_util_priv *priv,
char *prefix);
int simple_util_parse_clk(struct device *dev,
@@ -174,6 +181,8 @@ void simple_util_parse_convert(struct device_node *np, char *prefix,
struct simple_util_data *data);
bool simple_util_is_convert_required(const struct simple_util_data *data);
+int simple_util_get_sample_fmt(struct simple_util_data *data);
+
int simple_util_parse_routing(struct snd_soc_card *card,
char *prefix);
int simple_util_parse_widgets(struct snd_soc_card *card,
@@ -192,11 +201,15 @@ void simple_util_remove(struct platform_device *pdev);
int graph_util_card_probe(struct snd_soc_card *card);
int graph_util_is_ports0(struct device_node *port);
-int graph_util_parse_dai(struct device *dev, struct device_node *ep,
+int graph_util_parse_dai(struct simple_util_priv *priv, struct device_node *ep,
struct snd_soc_dai_link_component *dlc, int *is_single_link);
-int graph_util_parse_link_direction(struct device_node *np,
+void graph_util_parse_link_direction(struct device_node *np,
bool *is_playback_only, bool *is_capture_only);
+void graph_util_parse_trigger_order(struct simple_util_priv *priv,
+ struct device_node *np,
+ enum snd_soc_trigger_order *trigger_start,
+ enum snd_soc_trigger_order *trigger_stop);
#ifdef DEBUG
static inline void simple_util_debug_dai(struct simple_util_priv *priv,
@@ -258,9 +271,13 @@ static inline void simple_util_debug_info(struct simple_util_priv *priv)
simple_util_debug_dai(priv, "codec", dai);
if (link->name)
- dev_dbg(dev, "dai name = %s\n", link->name);
+ dev_dbg(dev, "link name = %s\n", link->name);
if (link->dai_fmt)
- dev_dbg(dev, "dai format = %04x\n", link->dai_fmt);
+ dev_dbg(dev, "link format = %04x\n", link->dai_fmt);
+ if (link->playback_only)
+ dev_dbg(dev, "link has playback_only");
+ if (link->capture_only)
+ dev_dbg(dev, "link has capture_only");
if (props->adata.convert_rate)
dev_dbg(dev, "convert_rate = %d\n", props->adata.convert_rate);
if (props->adata.convert_channels)
diff --git a/include/sound/snd_wavefront.h b/include/sound/snd_wavefront.h
index 55053557c898..30f508a56766 100644
--- a/include/sound/snd_wavefront.h
+++ b/include/sound/snd_wavefront.h
@@ -110,12 +110,8 @@ struct _snd_wavefront_card {
};
extern void snd_wavefront_internal_interrupt (snd_wavefront_card_t *card);
-extern int snd_wavefront_detect_irq (snd_wavefront_t *dev) ;
-extern int snd_wavefront_check_irq (snd_wavefront_t *dev, int irq);
-extern int snd_wavefront_restart (snd_wavefront_t *dev);
extern int snd_wavefront_start (snd_wavefront_t *dev);
extern int snd_wavefront_detect (snd_wavefront_card_t *card);
-extern int snd_wavefront_config_midi (snd_wavefront_t *dev) ;
extern int snd_wavefront_cmd (snd_wavefront_t *, int, unsigned char *,
unsigned char *);
@@ -137,8 +133,4 @@ extern int snd_wavefront_fx_ioctl (struct snd_hwdep *,
extern int snd_wavefront_fx_open (struct snd_hwdep *, struct file *);
extern int snd_wavefront_fx_release (struct snd_hwdep *, struct file *);
-/* prefix in all snd_printk() delivered messages */
-
-#define LOGNAME "WaveFront: "
-
#endif /* __SOUND_SND_WAVEFRONT_H__ */
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index 4843b57798f6..382029724e85 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -33,6 +33,8 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_ptl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_nvl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_sdw_machines[];
@@ -44,6 +46,8 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_ptl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_nvl_sdw_machines[];
/*
* generic table used for HDA codec-based platforms, possibly with
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 38ccec4e3fcd..90d73b9bddab 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -10,6 +10,7 @@
#include <linux/acpi.h>
#include <linux/mod_devicetable.h>
#include <linux/soundwire/sdw.h>
+#include <sound/soc.h>
struct snd_soc_acpi_package_context {
char *name; /* package name */
@@ -62,7 +63,6 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
* @platform: string used for HDAudio codec support
* @codec_mask: used for HDAudio support
* @dmic_num: number of SoC- or chipset-attached PDM digital microphones
- * @common_hdmi_codec_drv: use commom HDAudio HDMI codec driver
* @link_mask: SoundWire links enabled on the board
* @links: array of SoundWire link _ADR descriptors, null terminated
* @i2s_link_mask: I2S/TDM links enabled on the board
@@ -70,15 +70,16 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
* @dai_drivers: pointer to dai_drivers, used e.g. in nocodec mode
* @subsystem_vendor: optional PCI SSID vendor value
* @subsystem_device: optional PCI SSID device value
+ * @subsystem_rev: optional PCI SSID revision value
* @subsystem_id_set: true if a value has been written to
* subsystem_vendor and subsystem_device.
+ * @bt_link_mask: BT offload link enabled on the board
*/
struct snd_soc_acpi_mach_params {
u32 acpi_ipc_irq_index;
const char *platform;
u32 codec_mask;
u32 dmic_num;
- bool common_hdmi_codec_drv;
u32 link_mask;
const struct snd_soc_acpi_link_adr *links;
u32 i2s_link_mask;
@@ -86,7 +87,9 @@ struct snd_soc_acpi_mach_params {
struct snd_soc_dai_driver *dai_drivers;
unsigned short subsystem_vendor;
unsigned short subsystem_device;
+ unsigned short subsystem_rev;
bool subsystem_id_set;
+ u32 bt_link_mask;
};
/**
@@ -111,8 +114,8 @@ struct snd_soc_acpi_endpoint {
* @name_prefix: string used for codec controls
*/
struct snd_soc_acpi_adr_device {
- const u64 adr;
- const u8 num_endpoints;
+ u64 adr;
+ u8 num_endpoints;
const struct snd_soc_acpi_endpoint *endpoints;
const char *name_prefix;
};
@@ -128,8 +131,8 @@ struct snd_soc_acpi_adr_device {
*/
struct snd_soc_acpi_link_adr {
- const u32 mask;
- const u32 num_adr;
+ u32 mask;
+ u32 num_adr;
const struct snd_soc_acpi_adr_device *adr_d;
};
@@ -183,10 +186,23 @@ struct snd_soc_acpi_link_adr {
* ACPI ID alone is not sufficient, wrong or misleading
* @quirk_data: data used to uniquely identify a machine, usually a list of
* audio codecs whose presence if checked with ACPI
+ * @machine_check: pointer to quirk function. The functionality is similar to
+ * the use of @machine_quirk, except that the return value is a boolean: the intent
+ * is to skip a machine if the additional hardware/firmware verification invalidates
+ * the initial selection in the snd_soc_acpi_mach table.
* @pdata: intended for platform data or machine specific-ops. This structure
* is not constant since this field may be updated at run-time
* @sof_tplg_filename: Sound Open Firmware topology file name, if enabled
* @tplg_quirk_mask: quirks to select different topology files dynamically
+ * @get_function_tplg_files: This is an optional callback, if specified then instead of
+ * the single sof_tplg_filename the callback will return the list of function topology
+ * files to be loaded.
+ * Return value: The number of the files or negative ERRNO. 0 means that the single topology
+ * file should be used, no function topology split can be used on the machine.
+ * @card: the pointer of the card
+ * @mach: the pointer of the machine driver
+ * @prefix: the prefix of the topology file name. Typically, it is the path.
+ * @tplg_files: the pointer of the array of the topology file names.
*/
/* Descriptor for SST ASoC machine driver */
struct snd_soc_acpi_mach {
@@ -201,10 +217,14 @@ struct snd_soc_acpi_mach {
const char *board;
struct snd_soc_acpi_mach * (*machine_quirk)(void *arg);
const void *quirk_data;
+ bool (*machine_check)(void *arg);
void *pdata;
struct snd_soc_acpi_mach_params mach_params;
const char *sof_tplg_filename;
const u32 tplg_quirk_mask;
+ int (*get_function_tplg_files)(struct snd_soc_card *card,
+ const struct snd_soc_acpi_mach *mach,
+ const char *prefix, const char ***tplg_files);
};
#define SND_SOC_ACPI_MAX_CODECS 3
@@ -231,7 +251,6 @@ static inline bool snd_soc_acpi_sof_parent(struct device *dev)
bool snd_soc_acpi_sdw_link_slaves_found(struct device *dev,
const struct snd_soc_acpi_link_adr *link,
- struct sdw_extended_slave_id *ids,
- int num_slaves);
+ struct sdw_peripherals *peripherals);
#endif
diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h
index 1f4c39922d82..ecc02e955279 100644
--- a/include/sound/soc-card.h
+++ b/include/sound/soc-card.h
@@ -30,8 +30,6 @@ static inline void snd_soc_card_mutex_unlock(struct snd_soc_card *card)
struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
const char *name);
-struct snd_kcontrol *snd_soc_card_get_kcontrol_locked(struct snd_soc_card *soc_card,
- const char *name);
int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
struct snd_soc_jack *jack);
int snd_soc_card_jack_new_pins(struct snd_soc_card *card, const char *id,
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index ceca69b46a82..d78cda866888 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -47,7 +47,7 @@ struct snd_compress_ops {
struct snd_compr_stream *stream, int cmd);
int (*pointer)(struct snd_soc_component *component,
struct snd_compr_stream *stream,
- struct snd_compr_tstamp *tstamp);
+ struct snd_compr_tstamp64 *tstamp);
int (*copy)(struct snd_soc_component *component,
struct snd_compr_stream *stream, char __user *buf,
size_t count);
@@ -206,7 +206,6 @@ struct snd_soc_component_driver {
struct snd_soc_component {
const char *name;
- int id;
const char *name_prefix;
struct device *dev;
struct snd_soc_card *card;
@@ -262,89 +261,18 @@ struct snd_soc_component {
list_for_each_entry_safe(dai, _dai, &(component)->dai_list, list)
/**
- * snd_soc_dapm_to_component() - Casts a DAPM context to the component it is
- * embedded in
- * @dapm: The DAPM context to cast to the component
- *
- * This function must only be used on DAPM contexts that are known to be part of
- * a component (e.g. in a component driver). Otherwise the behavior is
- * undefined.
- */
-static inline struct snd_soc_component *snd_soc_dapm_to_component(
- struct snd_soc_dapm_context *dapm)
-{
- return container_of(dapm, struct snd_soc_component, dapm);
-}
-
-/**
- * snd_soc_component_get_dapm() - Returns the DAPM context associated with a
+ * snd_soc_component_to_dapm() - Returns the DAPM context associated with a
* component
* @component: The component for which to get the DAPM context
*/
-static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
+static inline struct snd_soc_dapm_context *snd_soc_component_to_dapm(
struct snd_soc_component *component)
{
return &component->dapm;
}
-/**
- * snd_soc_component_init_bias_level() - Initialize COMPONENT DAPM bias level
- * @component: The COMPONENT for which to initialize the DAPM bias level
- * @level: The DAPM level to initialize to
- *
- * Initializes the COMPONENT DAPM bias level. See snd_soc_dapm_init_bias_level()
- */
-static inline void
-snd_soc_component_init_bias_level(struct snd_soc_component *component,
- enum snd_soc_bias_level level)
-{
- snd_soc_dapm_init_bias_level(
- snd_soc_component_get_dapm(component), level);
-}
-
-/**
- * snd_soc_component_get_bias_level() - Get current COMPONENT DAPM bias level
- * @component: The COMPONENT for which to get the DAPM bias level
- *
- * Returns: The current DAPM bias level of the COMPONENT.
- */
-static inline enum snd_soc_bias_level
-snd_soc_component_get_bias_level(struct snd_soc_component *component)
-{
- return snd_soc_dapm_get_bias_level(
- snd_soc_component_get_dapm(component));
-}
-
-/**
- * snd_soc_component_force_bias_level() - Set the COMPONENT DAPM bias level
- * @component: The COMPONENT for which to set the level
- * @level: The level to set to
- *
- * Forces the COMPONENT bias level to a specific state. See
- * snd_soc_dapm_force_bias_level().
- */
-static inline int
-snd_soc_component_force_bias_level(struct snd_soc_component *component,
- enum snd_soc_bias_level level)
-{
- return snd_soc_dapm_force_bias_level(
- snd_soc_component_get_dapm(component),
- level);
-}
-
-/**
- * snd_soc_dapm_kcontrol_component() - Returns the component associated to a
- * kcontrol
- * @kcontrol: The kcontrol
- *
- * This function must only be used on DAPM contexts that are known to be part of
- * a COMPONENT (e.g. in a COMPONENT driver). Otherwise the behavior is undefined
- */
-static inline struct snd_soc_component *snd_soc_dapm_kcontrol_component(
- struct snd_kcontrol *kcontrol)
-{
- return snd_soc_dapm_to_component(snd_soc_dapm_kcontrol_dapm(kcontrol));
-}
+// FIXME
+#define snd_soc_component_get_dapm snd_soc_component_to_dapm
/**
* snd_soc_component_cache_sync() - Sync the register cache with the hardware
@@ -462,6 +390,8 @@ int snd_soc_component_force_enable_pin_unlocked(
const char *pin);
/* component controls */
+struct snd_kcontrol *snd_soc_component_get_kcontrol(struct snd_soc_component *component,
+ const char * const ctl);
int snd_soc_component_notify_control(struct snd_soc_component *component,
const char * const ctl);
@@ -497,7 +427,7 @@ int snd_soc_component_compr_get_codec_caps(struct snd_compr_stream *cstream,
struct snd_compr_codec_caps *codec);
int snd_soc_component_compr_ack(struct snd_compr_stream *cstream, size_t bytes);
int snd_soc_component_compr_pointer(struct snd_compr_stream *cstream,
- struct snd_compr_tstamp *tstamp);
+ struct snd_compr_tstamp64 *tstamp);
int snd_soc_component_compr_copy(struct snd_compr_stream *cstream,
char __user *buf, size_t count);
int snd_soc_component_compr_set_metadata(struct snd_compr_stream *cstream,
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index adcd8719d343..224396927aef 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -118,12 +118,6 @@ struct snd_compr_stream;
#define SND_SOC_DAIFMT_CBP_CFC (3 << 12) /* codec clk provider & frame consumer */
#define SND_SOC_DAIFMT_CBC_CFC (4 << 12) /* codec clk consumer & frame consumer */
-/* previous definitions kept for backwards-compatibility, do not use in new contributions */
-#define SND_SOC_DAIFMT_CBM_CFM SND_SOC_DAIFMT_CBP_CFP
-#define SND_SOC_DAIFMT_CBS_CFM SND_SOC_DAIFMT_CBC_CFP
-#define SND_SOC_DAIFMT_CBM_CFS SND_SOC_DAIFMT_CBP_CFC
-#define SND_SOC_DAIFMT_CBS_CFS SND_SOC_DAIFMT_CBC_CFC
-
/* when passed to set_fmt directly indicate if the device is provider or consumer */
#define SND_SOC_DAIFMT_BP_FP SND_SOC_DAIFMT_CBP_CFP
#define SND_SOC_DAIFMT_BC_FP SND_SOC_DAIFMT_CBC_CFP
@@ -180,29 +174,32 @@ int snd_soc_dai_set_pll(struct snd_soc_dai *dai,
int snd_soc_dai_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio);
/* Digital Audio interface formatting */
-int snd_soc_dai_get_fmt_max_priority(struct snd_soc_pcm_runtime *rtd);
-u64 snd_soc_dai_get_fmt(struct snd_soc_dai *dai, int priority);
+int snd_soc_dai_get_fmt_max_priority(const struct snd_soc_pcm_runtime *rtd);
+u64 snd_soc_dai_get_fmt(const struct snd_soc_dai *dai, int priority);
int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt);
int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width);
int snd_soc_dai_set_channel_map(struct snd_soc_dai *dai,
- unsigned int tx_num, unsigned int *tx_slot,
- unsigned int rx_num, unsigned int *rx_slot);
+ unsigned int tx_num, const unsigned int *tx_slot,
+ unsigned int rx_num, const unsigned int *rx_slot);
int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate);
+int snd_soc_dai_prepare(struct snd_soc_dai *dai,
+ struct snd_pcm_substream *substream);
+
/* Digital Audio Interface mute */
int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute,
int direction);
+int snd_soc_dai_mute_is_ctrled_at_trigger(struct snd_soc_dai *dai);
-
-int snd_soc_dai_get_channel_map(struct snd_soc_dai *dai,
+int snd_soc_dai_get_channel_map(const struct snd_soc_dai *dai,
unsigned int *tx_num, unsigned int *tx_slot,
unsigned int *rx_num, unsigned int *rx_slot);
-int snd_soc_dai_is_dummy(struct snd_soc_dai *dai);
+int snd_soc_dai_is_dummy(const struct snd_soc_dai *dai);
int snd_soc_dai_hw_params(struct snd_soc_dai *dai,
struct snd_pcm_substream *substream,
@@ -216,10 +213,8 @@ void snd_soc_dai_shutdown(struct snd_soc_dai *dai,
struct snd_pcm_substream *substream, int rollback);
void snd_soc_dai_suspend(struct snd_soc_dai *dai);
void snd_soc_dai_resume(struct snd_soc_dai *dai);
-int snd_soc_dai_compress_new(struct snd_soc_dai *dai,
- struct snd_soc_pcm_runtime *rtd, int num);
-bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int stream);
-void snd_soc_dai_link_set_capabilities(struct snd_soc_dai_link *dai_link);
+int snd_soc_dai_compress_new(struct snd_soc_dai *dai, struct snd_soc_pcm_runtime *rtd);
+bool snd_soc_dai_stream_valid(const struct snd_soc_dai *dai, int stream);
void snd_soc_dai_action(struct snd_soc_dai *dai,
int stream, int action);
static inline void snd_soc_dai_activate(struct snd_soc_dai *dai,
@@ -232,7 +227,7 @@ static inline void snd_soc_dai_deactivate(struct snd_soc_dai *dai,
{
snd_soc_dai_action(dai, stream, -1);
}
-int snd_soc_dai_active(struct snd_soc_dai *dai);
+int snd_soc_dai_active(const struct snd_soc_dai *dai);
int snd_soc_pcm_dai_probe(struct snd_soc_pcm_runtime *rtd, int order);
int snd_soc_pcm_dai_remove(struct snd_soc_pcm_runtime *rtd, int order);
@@ -240,8 +235,6 @@ int snd_soc_pcm_dai_new(struct snd_soc_pcm_runtime *rtd);
int snd_soc_pcm_dai_prepare(struct snd_pcm_substream *substream);
int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream, int cmd,
int rollback);
-int snd_soc_pcm_dai_bespoke_trigger(struct snd_pcm_substream *substream,
- int cmd);
void snd_soc_pcm_dai_delay(struct snd_pcm_substream *substream,
snd_pcm_sframes_t *cpu_delay, snd_pcm_sframes_t *codec_delay);
@@ -263,7 +256,7 @@ int snd_soc_dai_compr_ack(struct snd_soc_dai *dai,
size_t bytes);
int snd_soc_dai_compr_pointer(struct snd_soc_dai *dai,
struct snd_compr_stream *cstream,
- struct snd_compr_tstamp *tstamp);
+ struct snd_compr_tstamp64 *tstamp);
int snd_soc_dai_compr_set_metadata(struct snd_soc_dai *dai,
struct snd_compr_stream *cstream,
struct snd_compr_metadata *metadata);
@@ -271,14 +264,14 @@ int snd_soc_dai_compr_get_metadata(struct snd_soc_dai *dai,
struct snd_compr_stream *cstream,
struct snd_compr_metadata *metadata);
-const char *snd_soc_dai_name_get(struct snd_soc_dai *dai);
+const char *snd_soc_dai_name_get(const struct snd_soc_dai *dai);
struct snd_soc_dai_ops {
/* DAI driver callbacks */
int (*probe)(struct snd_soc_dai *dai);
int (*remove)(struct snd_soc_dai *dai);
/* compress dai */
- int (*compress_new)(struct snd_soc_pcm_runtime *rtd, int num);
+ int (*compress_new)(struct snd_soc_pcm_runtime *rtd);
/* Optional Callback used at pcm creation*/
int (*pcm_new)(struct snd_soc_pcm_runtime *rtd,
struct snd_soc_dai *dai);
@@ -305,9 +298,9 @@ struct snd_soc_dai_ops {
unsigned int tx_mask, unsigned int rx_mask,
int slots, int slot_width);
int (*set_channel_map)(struct snd_soc_dai *dai,
- unsigned int tx_num, unsigned int *tx_slot,
- unsigned int rx_num, unsigned int *rx_slot);
- int (*get_channel_map)(struct snd_soc_dai *dai,
+ unsigned int tx_num, const unsigned int *tx_slot,
+ unsigned int rx_num, const unsigned int *rx_slot);
+ int (*get_channel_map)(const struct snd_soc_dai *dai,
unsigned int *tx_num, unsigned int *tx_slot,
unsigned int *rx_num, unsigned int *rx_slot);
int (*set_tristate)(struct snd_soc_dai *dai, int tristate);
@@ -345,8 +338,7 @@ struct snd_soc_dai_ops {
*/
int (*trigger)(struct snd_pcm_substream *, int,
struct snd_soc_dai *);
- int (*bespoke_trigger)(struct snd_pcm_substream *, int,
- struct snd_soc_dai *);
+
/*
* For hardware based FIFO caused delay reporting.
* Optional.
@@ -361,7 +353,7 @@ struct snd_soc_dai_ops {
* see
* snd_soc_dai_get_fmt()
*/
- u64 *auto_selectable_formats;
+ const u64 *auto_selectable_formats;
int num_auto_selectable_formats;
/* probe ordering - for components with runtime dependencies */
@@ -391,8 +383,9 @@ struct snd_soc_cdai_ops {
struct snd_compr_metadata *, struct snd_soc_dai *);
int (*trigger)(struct snd_compr_stream *, int,
struct snd_soc_dai *);
- int (*pointer)(struct snd_compr_stream *,
- struct snd_compr_tstamp *, struct snd_soc_dai *);
+ int (*pointer)(struct snd_compr_stream *stream,
+ struct snd_compr_tstamp64 *tstamp,
+ struct snd_soc_dai *dai);
int (*ack)(struct snd_compr_stream *, size_t,
struct snd_soc_dai *);
};
@@ -413,7 +406,7 @@ struct snd_soc_dai_driver {
unsigned int id;
unsigned int base;
struct snd_soc_dobj dobj;
- struct of_phandle_args *dai_args;
+ const struct of_phandle_args *dai_args;
/* ops */
const struct snd_soc_dai_ops *ops;
@@ -454,9 +447,9 @@ struct snd_soc_dai {
struct snd_soc_dai_stream stream[SNDRV_PCM_STREAM_LAST + 1];
/* Symmetry data - only valid if symmetry is being enforced */
- unsigned int rate;
- unsigned int channels;
- unsigned int sample_bits;
+ unsigned int symmetric_rate;
+ unsigned int symmetric_channels;
+ unsigned int symmetric_sample_bits;
/* parent platform/codec */
struct snd_soc_component *component;
@@ -471,9 +464,12 @@ struct snd_soc_dai {
/* bit field */
unsigned int probed:1;
+
+ /* DAI private data */
+ void *priv;
};
-static inline struct snd_soc_pcm_stream *
+static inline const struct snd_soc_pcm_stream *
snd_soc_dai_get_pcm_stream(const struct snd_soc_dai *dai, int stream)
{
return (stream == SNDRV_PCM_STREAM_PLAYBACK) ?
@@ -518,7 +514,8 @@ static inline void snd_soc_dai_init_dma_data(struct snd_soc_dai *dai, void *play
snd_soc_dai_dma_data_set_capture(dai, capture);
}
-static inline unsigned int snd_soc_dai_tdm_mask_get(struct snd_soc_dai *dai, int stream)
+static inline unsigned int snd_soc_dai_tdm_mask_get(const struct snd_soc_dai *dai,
+ int stream)
{
return dai->stream[stream].tdm_mask;
}
@@ -529,7 +526,8 @@ static inline void snd_soc_dai_tdm_mask_set(struct snd_soc_dai *dai, int stream,
dai->stream[stream].tdm_mask = tdm_mask;
}
-static inline unsigned int snd_soc_dai_stream_active(struct snd_soc_dai *dai, int stream)
+static inline unsigned int snd_soc_dai_stream_active(const struct snd_soc_dai *dai,
+ int stream)
{
/* see snd_soc_dai_action() for setup */
return dai->stream[stream].active;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 667ecd4daa68..75941324886b 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -16,9 +16,10 @@
#include <sound/asoc.h>
struct device;
+struct regulator;
+struct soc_enum;
struct snd_pcm_substream;
struct snd_soc_pcm_runtime;
-struct soc_enum;
/* widget has no PM register bit */
#define SND_SOC_NOPM -1
@@ -294,7 +295,7 @@ struct soc_enum;
#define SND_SOC_DAPM_CLOCK_SUPPLY(wname) \
(struct snd_soc_dapm_widget) { \
.id = snd_soc_dapm_clock_supply, .name = wname, \
- .reg = SND_SOC_NOPM, .event = dapm_clock_event, \
+ .reg = SND_SOC_NOPM, .event = snd_soc_dapm_clock_event, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD }
/* generic widgets */
@@ -311,7 +312,7 @@ struct soc_enum;
#define SND_SOC_DAPM_REGULATOR_SUPPLY(wname, wdelay, wflags) \
(struct snd_soc_dapm_widget) { \
.id = snd_soc_dapm_regulator_supply, .name = wname, \
- .reg = SND_SOC_NOPM, .shift = wdelay, .event = dapm_regulator_event, \
+ .reg = SND_SOC_NOPM, .shift = wdelay, .event = snd_soc_dapm_regulator_event, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
.on_val = wflags}
#define SND_SOC_DAPM_PINCTRL(wname, active, sleep) \
@@ -319,62 +320,51 @@ struct soc_enum;
.id = snd_soc_dapm_pinctrl, .name = wname, \
.priv = (&(struct snd_soc_dapm_pinctrl_priv) \
{ .active_state = active, .sleep_state = sleep,}), \
- .reg = SND_SOC_NOPM, .event = dapm_pinctrl_event, \
+ .reg = SND_SOC_NOPM, .event = snd_soc_dapm_pinctrl_event, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD }
/* dapm kcontrol types */
#define SOC_DAPM_DOUBLE(xname, reg, lshift, rshift, max, invert) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
- .info = snd_soc_info_volsw, \
- .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
- .private_value = SOC_DOUBLE_VALUE(reg, lshift, rshift, max, invert, 0) }
+ SOC_DOUBLE_EXT(xname, reg, lshift, rshift, max, invert, \
+ snd_soc_dapm_get_volsw, snd_soc_dapm_put_volsw)
#define SOC_DAPM_DOUBLE_R(xname, lreg, rreg, shift, max, invert) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
- .info = snd_soc_info_volsw, \
- .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
- .private_value = SOC_DOUBLE_R_VALUE(lreg, rreg, shift, max, invert) }
+ SOC_DOUBLE_R_EXT(xname, lreg, rreg, shift, max, invert, \
+ snd_soc_dapm_get_volsw, snd_soc_dapm_put_volsw)
#define SOC_DAPM_SINGLE(xname, reg, shift, max, invert) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
- .info = snd_soc_info_volsw, \
- .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
-#define SOC_DAPM_SINGLE_AUTODISABLE(xname, reg, shift, max, invert) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
- .info = snd_soc_info_volsw, \
- .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
+ SOC_SINGLE_EXT(xname, reg, shift, max, invert, \
+ snd_soc_dapm_get_volsw, snd_soc_dapm_put_volsw)
#define SOC_DAPM_SINGLE_VIRT(xname, max) \
SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0)
+#define SOC_DAPM_DOUBLE_R_TLV(xname, lreg, rreg, shift, max, invert, tlv_array) \
+ SOC_DOUBLE_R_EXT_TLV(xname, lreg, rreg, shift, max, invert, \
+ snd_soc_dapm_get_volsw, snd_soc_dapm_put_volsw, \
+ tlv_array)
#define SOC_DAPM_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
+ SOC_SINGLE_EXT_TLV(xname, reg, shift, max, invert, \
+ snd_soc_dapm_get_volsw, snd_soc_dapm_put_volsw, \
+ tlv_array)
+#define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \
+ SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array)
+#define SOC_DAPM_ENUM(xname, xenum) \
+ SOC_ENUM_EXT(xname, xenum, snd_soc_dapm_get_enum_double, \
+ snd_soc_dapm_put_enum_double)
+#define SOC_DAPM_ENUM_EXT(xname, xenum, xget, xput) \
+ SOC_ENUM_EXT(xname, xenum, xget, xput)
+
+#define SOC_DAPM_SINGLE_AUTODISABLE(xname, reg, shift, max, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_volsw, \
- .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
- .tlv.p = (tlv_array), \
.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
+ .private_value = SOC_SINGLE_VALUE(reg, shift, 0, max, invert, 1) }
#define SOC_DAPM_SINGLE_TLV_AUTODISABLE(xname, reg, shift, max, invert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_volsw, \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
.tlv.p = (tlv_array), \
.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
-#define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \
- SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array)
-#define SOC_DAPM_ENUM(xname, xenum) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
- .info = snd_soc_info_enum_double, \
- .get = snd_soc_dapm_get_enum_double, \
- .put = snd_soc_dapm_put_enum_double, \
- .private_value = (unsigned long)&xenum }
-#define SOC_DAPM_ENUM_EXT(xname, xenum, xget, xput) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
- .info = snd_soc_info_enum_double, \
- .get = xget, \
- .put = xput, \
- .private_value = (unsigned long)&xenum }
+ .private_value = SOC_SINGLE_VALUE(reg, shift, 0, max, invert, 1) }
#define SOC_DAPM_PIN_SWITCH(xname) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname " Switch", \
.info = snd_soc_dapm_info_pin_switch, \
@@ -410,17 +400,6 @@ struct soc_enum;
/* regulator widget flags */
#define SND_SOC_DAPM_REGULATOR_BYPASS 0x1 /* bypass when disabled */
-struct snd_soc_dapm_widget;
-enum snd_soc_dapm_type;
-struct snd_soc_dapm_path;
-struct snd_soc_dapm_pin;
-struct snd_soc_dapm_route;
-struct snd_soc_dapm_context;
-struct regulator;
-struct snd_soc_dapm_widget_list;
-struct snd_soc_dapm_update;
-enum snd_soc_dapm_direction;
-
/*
* Bias levels
*
@@ -439,96 +418,6 @@ enum snd_soc_bias_level {
SND_SOC_BIAS_ON = 3,
};
-int dapm_regulator_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event);
-int dapm_clock_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event);
-int dapm_pinctrl_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event);
-
-/* dapm controls */
-int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
-int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
-int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
-int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
-int snd_soc_dapm_info_pin_switch(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo);
-int snd_soc_dapm_get_pin_switch(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uncontrol);
-int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uncontrol);
-int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm,
- const struct snd_soc_dapm_widget *widget, int num);
-struct snd_soc_dapm_widget *snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
- const struct snd_soc_dapm_widget *widget);
-struct snd_soc_dapm_widget *snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
- const struct snd_soc_dapm_widget *widget);
-int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm, struct snd_soc_dai *dai);
-void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
-int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
-void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
-
-int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params, struct snd_soc_dai *dai);
-int snd_soc_dapm_widget_name_cmp(struct snd_soc_dapm_widget *widget, const char *s);
-
-/* dapm path setup */
-int snd_soc_dapm_new_widgets(struct snd_soc_card *card);
-void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm);
-void snd_soc_dapm_init(struct snd_soc_dapm_context *dapm,
- struct snd_soc_card *card, struct snd_soc_component *component);
-int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm,
- const struct snd_soc_dapm_route *route, int num);
-int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm,
- const struct snd_soc_dapm_route *route, int num);
-int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm,
- const struct snd_soc_dapm_route *route, int num);
-void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
-
-/* dapm events */
-void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream, int event);
-void snd_soc_dapm_stream_stop(struct snd_soc_pcm_runtime *rtd, int stream);
-void snd_soc_dapm_shutdown(struct snd_soc_card *card);
-
-/* external DAPM widget events */
-int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_context *dapm,
- struct snd_kcontrol *kcontrol, int connect, struct snd_soc_dapm_update *update);
-int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_context *dapm,
- struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e,
- struct snd_soc_dapm_update *update);
-
-/* dapm sys fs - used by the core */
-extern struct attribute *soc_dapm_dev_attrs[];
-void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm, struct dentry *parent);
-
-/* dapm audio pin control and status */
-int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin);
-int snd_soc_dapm_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin);
-int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm, const char *pin);
-int snd_soc_dapm_disable_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin);
-int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin);
-int snd_soc_dapm_nc_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin);
-int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm, const char *pin);
-int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm);
-int snd_soc_dapm_sync_unlocked(struct snd_soc_dapm_context *dapm);
-int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin);
-int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin);
-int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm, const char *pin);
-unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol);
-
-/* Mostly internal - should not normally be used */
-void dapm_mark_endpoints_dirty(struct snd_soc_card *card);
-
-/* dapm path query */
-int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
- struct snd_soc_dapm_widget_list **list,
- bool (*custom_stop_condition)(struct snd_soc_dapm_widget *, enum snd_soc_dapm_direction));
-void snd_soc_dapm_dai_free_widgets(struct snd_soc_dapm_widget_list **list);
-
-struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm(struct snd_kcontrol *kcontrol);
-struct snd_soc_dapm_widget *snd_soc_dapm_kcontrol_widget(struct snd_kcontrol *kcontrol);
-
-int snd_soc_dapm_force_bias_level(struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level);
-
/* dapm widget types */
enum snd_soc_dapm_type {
snd_soc_dapm_input = 0, /* input pin */
@@ -612,7 +501,6 @@ struct snd_soc_dapm_path {
/* status */
u32 connect:1; /* source and sink widgets are connected */
u32 walking:1; /* path is in the process of being walked */
- u32 weak:1; /* path ignored for power management */
u32 is_supply:1; /* At least one of the connected widgets is a supply */
int (*connected)(struct snd_soc_dapm_widget *source,
@@ -695,11 +583,9 @@ struct snd_soc_dapm_update {
struct snd_soc_dapm_context {
enum snd_soc_bias_level bias_level;
- /* bit field */
- unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
- unsigned int suspend_bias_off:1; /* Use BIAS_OFF in suspend if the DAPM is idle */
+ bool idle_bias; /* Use BIAS_OFF instead of STANDBY when false */
- struct device *dev; /* from parent - for debug */
+ struct device *dev; /* from parent - for debug */ /* REMOVE ME */
struct snd_soc_component *component; /* parent component */
struct snd_soc_card *card; /* parent card */
@@ -721,11 +607,6 @@ struct snd_soc_dapm_widget_list {
struct snd_soc_dapm_widget *widgets[] __counted_by(num_widgets);
};
-#define for_each_dapm_widgets(list, i, widget) \
- for ((i) = 0; \
- (i) < list->num_widgets && (widget = list->widgets[i]); \
- (i)++)
-
struct snd_soc_dapm_stats {
int power_checks;
int path_checks;
@@ -737,37 +618,6 @@ struct snd_soc_dapm_pinctrl_priv {
const char *sleep_state;
};
-/**
- * snd_soc_dapm_init_bias_level() - Initialize DAPM bias level
- * @dapm: The DAPM context to initialize
- * @level: The DAPM level to initialize to
- *
- * This function only sets the driver internal state of the DAPM level and will
- * not modify the state of the device. Hence it should not be used during normal
- * operation, but only to synchronize the internal state to the device state.
- * E.g. during driver probe to set the DAPM level to the one corresponding with
- * the power-on reset state of the device.
- *
- * To change the DAPM state of the device use snd_soc_dapm_set_bias_level().
- */
-static inline void snd_soc_dapm_init_bias_level(
- struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level)
-{
- dapm->bias_level = level;
-}
-
-/**
- * snd_soc_dapm_get_bias_level() - Get current DAPM bias level
- * @dapm: The context for which to get the bias level
- *
- * Returns: The current bias level of the passed DAPM context.
- */
-static inline enum snd_soc_bias_level snd_soc_dapm_get_bias_level(
- struct snd_soc_dapm_context *dapm)
-{
- return dapm->bias_level;
-}
-
enum snd_soc_dapm_direction {
SND_SOC_DAPM_DIR_IN,
SND_SOC_DAPM_DIR_OUT
@@ -778,6 +628,122 @@ enum snd_soc_dapm_direction {
#define SND_SOC_DAPM_EP_SOURCE SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_IN)
#define SND_SOC_DAPM_EP_SINK SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_OUT)
+int snd_soc_dapm_regulator_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event);
+int snd_soc_dapm_clock_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event);
+int snd_soc_dapm_pinctrl_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event);
+
+/* dapm controls */
+int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
+int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
+int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
+int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol);
+int snd_soc_dapm_info_pin_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo);
+int snd_soc_dapm_get_pin_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *uncontrol);
+int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *uncontrol);
+int snd_soc_dapm_get_component_pin_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uncontrol);
+int snd_soc_dapm_put_component_pin_switch(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uncontrol);
+int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_widget *widget, unsigned int num);
+struct snd_soc_dapm_widget *snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_widget *widget);
+struct snd_soc_dapm_widget *snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_widget *widget);
+int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm, struct snd_soc_dai *dai);
+void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
+int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
+void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
+
+int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai);
+int snd_soc_dapm_widget_name_cmp(struct snd_soc_dapm_widget *widget, const char *s);
+struct device *snd_soc_dapm_to_dev(struct snd_soc_dapm_context *dapm);
+struct snd_soc_card *snd_soc_dapm_to_card(struct snd_soc_dapm_context *dapm);
+struct snd_soc_component *snd_soc_dapm_to_component(struct snd_soc_dapm_context *dapm);
+
+bool snd_soc_dapm_get_idle_bias(struct snd_soc_dapm_context *dapm);
+void snd_soc_dapm_set_idle_bias(struct snd_soc_dapm_context *dapm, bool on);
+
+/* dapm path setup */
+int snd_soc_dapm_new_widgets(struct snd_soc_card *card);
+void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm);
+void snd_soc_dapm_init(struct snd_soc_dapm_context *dapm,
+ struct snd_soc_card *card, struct snd_soc_component *component);
+int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_route *route, int num);
+int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_route *route, int num);
+void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
+
+/* dapm events */
+void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream, int event);
+void snd_soc_dapm_stream_stop(struct snd_soc_pcm_runtime *rtd, int stream);
+void snd_soc_dapm_shutdown(struct snd_soc_card *card);
+
+/* external DAPM widget events */
+int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_context *dapm,
+ struct snd_kcontrol *kcontrol, int connect, struct snd_soc_dapm_update *update);
+int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_context *dapm,
+ struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e,
+ struct snd_soc_dapm_update *update);
+
+/* dapm sys fs - used by the core */
+extern struct attribute *snd_soc_dapm_dev_attrs[];
+void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm, struct dentry *parent);
+
+/* dapm audio pin control and status */
+int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_disable_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm);
+int snd_soc_dapm_sync_unlocked(struct snd_soc_dapm_context *dapm);
+int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm, const char *pin);
+void snd_soc_dapm_mark_endpoints_dirty(struct snd_soc_card *card);
+
+/*
+ * Marks the specified pin as being not connected, disabling it along
+ * any parent or child widgets. At present this is identical to
+ * snd_soc_dapm_disable_pin[_unlocked]() but in future it will be extended to do
+ * additional things such as disabling controls which only affect
+ * paths through the pin.
+ */
+#define snd_soc_dapm_nc_pin snd_soc_dapm_disable_pin
+#define snd_soc_dapm_nc_pin_unlocked snd_soc_dapm_disable_pin_unlocked
+
+/* dapm path query */
+int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
+ struct snd_soc_dapm_widget_list **list,
+ bool (*custom_stop_condition)(struct snd_soc_dapm_widget *, enum snd_soc_dapm_direction));
+void snd_soc_dapm_dai_free_widgets(struct snd_soc_dapm_widget_list **list);
+
+struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_to_dapm(struct snd_kcontrol *kcontrol);
+struct snd_soc_dapm_widget *snd_soc_dapm_kcontrol_to_widget(struct snd_kcontrol *kcontrol);
+struct snd_soc_component *snd_soc_dapm_kcontrol_to_component(struct snd_kcontrol *kcontrol);
+unsigned int snd_soc_dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol);
+
+int snd_soc_dapm_force_bias_level(struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level);
+enum snd_soc_bias_level snd_soc_dapm_get_bias_level(struct snd_soc_dapm_context *dapm);
+void snd_soc_dapm_init_bias_level(struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level);
+
+// REMOVE ME !!
+#define snd_soc_component_force_bias_level(c, l) snd_soc_dapm_force_bias_level(&(c)->dapm, l)
+#define snd_soc_component_get_bias_level(c) snd_soc_dapm_get_bias_level(&(c)->dapm)
+#define snd_soc_component_init_bias_level(c, l) snd_soc_dapm_init_bias_level(&(c)->dapm, l)
+#define snd_soc_dapm_kcontrol_widget snd_soc_dapm_kcontrol_to_widget
+#define snd_soc_dapm_kcontrol_dapm snd_soc_dapm_kcontrol_to_dapm
+#define dapm_kcontrol_get_value snd_soc_dapm_kcontrol_get_value
+#define snd_soc_dapm_kcontrol_component snd_soc_dapm_kcontrol_to_component
+
+#define for_each_dapm_widgets(list, i, widget) \
+ for ((i) = 0; \
+ (i) < list->num_widgets && (widget = list->widgets[i]); \
+ (i)++)
+
/**
* snd_soc_dapm_widget_for_each_path - Iterates over all paths in the
* specified direction of a widget
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index ebd24753dd00..af24665e37e8 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -58,7 +58,6 @@ enum snd_soc_dpcm_state {
enum snd_soc_dpcm_trigger {
SND_SOC_DPCM_TRIGGER_PRE = 0,
SND_SOC_DPCM_TRIGGER_POST,
- SND_SOC_DPCM_TRIGGER_BESPOKE,
};
/*
@@ -114,24 +113,6 @@ struct snd_soc_dpcm_runtime {
#define for_each_dpcm_be_rollback(fe, stream, _dpcm) \
list_for_each_entry_continue_reverse(_dpcm, &(fe)->dpcm[stream].be_clients, list_be)
-/* can this BE stop and free */
-int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
- struct snd_soc_pcm_runtime *be, int stream);
-
-/* can this BE perform a hw_params() */
-int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
- struct snd_soc_pcm_runtime *be, int stream);
-
-/* can this BE perform prepare */
-int snd_soc_dpcm_can_be_prepared(struct snd_soc_pcm_runtime *fe,
- struct snd_soc_pcm_runtime *be, int stream);
-
-/* is the current PCM operation for this FE ? */
-int snd_soc_dpcm_fe_can_update(struct snd_soc_pcm_runtime *fe, int stream);
-
-/* is the current PCM operation for this BE ? */
-int snd_soc_dpcm_be_can_update(struct snd_soc_pcm_runtime *fe,
- struct snd_soc_pcm_runtime *be, int stream);
/* get the substream for this BE */
struct snd_pcm_substream *
@@ -151,8 +132,8 @@ static inline void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd)
int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
int stream, struct snd_soc_dapm_widget_list **list_);
void dpcm_path_put(struct snd_soc_dapm_widget_list **list);
-int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
- int stream, struct snd_soc_dapm_widget_list **list, int new);
+int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream,
+ struct snd_soc_dapm_widget_list **list_);
int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream);
void dpcm_be_dai_stop(struct snd_soc_pcm_runtime *fe, int stream,
int do_hw_free, struct snd_soc_dpcm *last);
@@ -162,8 +143,8 @@ void dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream);
int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int tream);
int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, int cmd);
int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream);
-int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
- int event);
+void dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir, int event);
+
bool dpcm_end_walk_at_be(struct snd_soc_dapm_widget *widget, enum snd_soc_dapm_direction dir);
int widget_in_list(struct snd_soc_dapm_widget_list *list,
struct snd_soc_dapm_widget *widget);
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index f055c6917f6c..1eedd203ac29 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -178,7 +178,7 @@ static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
/* Dynamic Object loading and removal for component drivers */
int snd_soc_tplg_component_load(struct snd_soc_component *comp,
- struct snd_soc_tplg_ops *ops, const struct firmware *fw);
+ const struct snd_soc_tplg_ops *ops, const struct firmware *fw);
int snd_soc_tplg_component_remove(struct snd_soc_component *comp);
/* Binds event handlers to dynamic widgets */
diff --git a/include/sound/soc-usb.h b/include/sound/soc-usb.h
new file mode 100644
index 000000000000..124acb1939e5
--- /dev/null
+++ b/include/sound/soc-usb.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __LINUX_SND_SOC_USB_H
+#define __LINUX_SND_SOC_USB_H
+
+#include <sound/soc.h>
+
+enum snd_soc_usb_kctl {
+ SND_SOC_USB_KCTL_CARD_ROUTE,
+ SND_SOC_USB_KCTL_PCM_ROUTE,
+};
+
+/**
+ * struct snd_soc_usb_device - SoC USB representation of a USB sound device
+ * @card_idx: sound card index associated with USB device
+ * @chip_idx: USB sound chip array index
+ * @cpcm_idx: capture PCM index array associated with USB device
+ * @ppcm_idx: playback PCM index array associated with USB device
+ * @num_capture: number of capture streams
+ * @num_playback: number of playback streams
+ * @list: list head for SoC USB devices
+ **/
+struct snd_soc_usb_device {
+ int card_idx;
+ int chip_idx;
+
+ /* PCM index arrays */
+ unsigned int *cpcm_idx; /* TODO: capture path is not tested yet */
+ unsigned int *ppcm_idx;
+ int num_capture; /* TODO: capture path is not tested yet */
+ int num_playback;
+
+ struct list_head list;
+};
+
+/**
+ * struct snd_soc_usb - representation of a SoC USB backend entity
+ * @list: list head for SND SOC struct list
+ * @component: reference to ASoC component
+ * @connection_status_cb: callback to notify connection events
+ * @update_offload_route_info: callback to fetch mapped ASoC card and pcm
+ * device pair. This is unrelated to the concept
+ * of DAPM route. The "route" argument carries
+ * an array used for a kcontrol output for either
+ * the card or pcm index. "path" determines the
+ * which entry to look for. (ie mapped card or pcm)
+ * @priv_data: driver data
+ **/
+struct snd_soc_usb {
+ struct list_head list;
+ struct snd_soc_component *component;
+ int (*connection_status_cb)(struct snd_soc_usb *usb,
+ struct snd_soc_usb_device *sdev,
+ bool connected);
+ int (*update_offload_route_info)(struct snd_soc_component *component,
+ int card, int pcm, int direction,
+ enum snd_soc_usb_kctl path,
+ long *route);
+ void *priv_data;
+};
+
+#if IS_ENABLED(CONFIG_SND_SOC_USB)
+int snd_soc_usb_find_supported_format(int card_idx,
+ struct snd_pcm_hw_params *params,
+ int direction);
+
+int snd_soc_usb_connect(struct device *usbdev, struct snd_soc_usb_device *sdev);
+int snd_soc_usb_disconnect(struct device *usbdev, struct snd_soc_usb_device *sdev);
+void *snd_soc_usb_find_priv_data(struct device *usbdev);
+
+int snd_soc_usb_setup_offload_jack(struct snd_soc_component *component,
+ struct snd_soc_jack *jack);
+int snd_soc_usb_update_offload_route(struct device *dev, int card, int pcm,
+ int direction, enum snd_soc_usb_kctl path,
+ long *route);
+
+struct snd_soc_usb *snd_soc_usb_allocate_port(struct snd_soc_component *component,
+ void *data);
+void snd_soc_usb_free_port(struct snd_soc_usb *usb);
+void snd_soc_usb_add_port(struct snd_soc_usb *usb);
+void snd_soc_usb_remove_port(struct snd_soc_usb *usb);
+#else
+static inline int
+snd_soc_usb_find_supported_format(int card_idx, struct snd_pcm_hw_params *params,
+ int direction)
+{
+ return -EINVAL;
+}
+
+static inline int snd_soc_usb_connect(struct device *usbdev,
+ struct snd_soc_usb_device *sdev)
+{
+ return -ENODEV;
+}
+
+static inline int snd_soc_usb_disconnect(struct device *usbdev,
+ struct snd_soc_usb_device *sdev)
+{
+ return -EINVAL;
+}
+
+static inline void *snd_soc_usb_find_priv_data(struct device *usbdev)
+{
+ return NULL;
+}
+
+static inline int snd_soc_usb_setup_offload_jack(struct snd_soc_component *component,
+ struct snd_soc_jack *jack)
+{
+ return 0;
+}
+
+static int snd_soc_usb_update_offload_route(struct device *dev, int card, int pcm,
+ int direction, enum snd_soc_usb_kctl path,
+ long *route)
+{
+ return -ENODEV;
+}
+
+static inline struct snd_soc_usb *
+snd_soc_usb_allocate_port(struct snd_soc_component *component, void *data)
+{
+ return ERR_PTR(-ENOMEM);
+}
+
+static inline void snd_soc_usb_free_port(struct snd_soc_usb *usb)
+{ }
+
+static inline void snd_soc_usb_add_port(struct snd_soc_usb *usb)
+{ }
+
+static inline void snd_soc_usb_remove_port(struct snd_soc_usb *usb)
+{ }
+#endif /* IS_ENABLED(CONFIG_SND_SOC_USB) */
+#endif /*__LINUX_SND_SOC_USB_H */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 33671437ee89..aa0fe6b80293 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -39,47 +39,35 @@ struct platform_device;
/*
* Convenience kcontrol builders
*/
-#define SOC_DOUBLE_VALUE(xreg, shift_left, shift_right, xmax, xinvert, xautodisable) \
- ((unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .rreg = xreg, .shift = shift_left, \
- .rshift = shift_right, .max = xmax, \
- .invert = xinvert, .autodisable = xautodisable})
-#define SOC_DOUBLE_S_VALUE(xreg, shift_left, shift_right, xmin, xmax, xsign_bit, xinvert, xautodisable) \
+#define SOC_DOUBLE_S_VALUE(xreg, shift_left, shift_right, xmin, xmax, xsign_bit, \
+ xinvert, xautodisable) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = shift_left, \
.rshift = shift_right, .min = xmin, .max = xmax, \
.sign_bit = xsign_bit, .invert = xinvert, .autodisable = xautodisable})
-#define SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, xautodisable) \
- SOC_DOUBLE_VALUE(xreg, xshift, xshift, xmax, xinvert, xautodisable)
-#define SOC_SINGLE_VALUE_EXT(xreg, xmax, xinvert) \
- ((unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .max = xmax, .invert = xinvert})
-#define SOC_DOUBLE_R_VALUE(xlreg, xrreg, xshift, xmax, xinvert) \
- ((unsigned long)&(struct soc_mixer_control) \
- {.reg = xlreg, .rreg = xrreg, .shift = xshift, .rshift = xshift, \
- .max = xmax, .invert = xinvert})
+#define SOC_DOUBLE_VALUE(xreg, shift_left, shift_right, xmin, xmax, xinvert, xautodisable) \
+ SOC_DOUBLE_S_VALUE(xreg, shift_left, shift_right, xmin, xmax, 0, xinvert, \
+ xautodisable)
+#define SOC_SINGLE_VALUE(xreg, xshift, xmin, xmax, xinvert, xautodisable) \
+ SOC_DOUBLE_VALUE(xreg, xshift, xshift, xmin, xmax, xinvert, xautodisable)
#define SOC_DOUBLE_R_S_VALUE(xlreg, xrreg, xshift, xmin, xmax, xsign_bit, xinvert) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xlreg, .rreg = xrreg, .shift = xshift, .rshift = xshift, \
.max = xmax, .min = xmin, .sign_bit = xsign_bit, \
.invert = xinvert})
-#define SOC_DOUBLE_R_RANGE_VALUE(xlreg, xrreg, xshift, xmin, xmax, xinvert) \
- ((unsigned long)&(struct soc_mixer_control) \
- {.reg = xlreg, .rreg = xrreg, .shift = xshift, .rshift = xshift, \
- .min = xmin, .max = xmax, .invert = xinvert})
+#define SOC_DOUBLE_R_VALUE(xlreg, xrreg, xshift, xmin, xmax, xinvert) \
+ SOC_DOUBLE_R_S_VALUE(xlreg, xrreg, xshift, xmin, xmax, 0, xinvert)
+
#define SOC_SINGLE(xname, reg, shift, max, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
.put = snd_soc_put_volsw, \
- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
+ .private_value = SOC_SINGLE_VALUE(reg, shift, 0, max, invert, 0) }
#define SOC_SINGLE_RANGE(xname, xreg, xshift, xmin, xmax, xinvert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
- .info = snd_soc_info_volsw_range, .get = snd_soc_get_volsw_range, \
- .put = snd_soc_put_volsw_range, \
- .private_value = (unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .rreg = xreg, .shift = xshift, \
- .rshift = xshift, .min = xmin, .max = xmax, \
- .invert = xinvert} }
+ .info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
+ .put = snd_soc_put_volsw, \
+ .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmin, xmax, xinvert, 0) }
#define SOC_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -87,7 +75,7 @@ struct platform_device;
.tlv.p = (tlv_array), \
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
.put = snd_soc_put_volsw, \
- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
+ .private_value = SOC_SINGLE_VALUE(reg, shift, 0, max, invert, 0) }
#define SOC_SINGLE_SX_TLV(xname, xreg, xshift, xmin, xmax, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
@@ -96,27 +84,21 @@ struct platform_device;
.info = snd_soc_info_volsw_sx, \
.get = snd_soc_get_volsw_sx,\
.put = snd_soc_put_volsw_sx, \
- .private_value = (unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .rreg = xreg, \
- .shift = xshift, .rshift = xshift, \
- .max = xmax, .min = xmin} }
+ .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmin, xmax, 0, 0) }
#define SOC_SINGLE_RANGE_TLV(xname, xreg, xshift, xmin, xmax, xinvert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
SNDRV_CTL_ELEM_ACCESS_READWRITE,\
.tlv.p = (tlv_array), \
- .info = snd_soc_info_volsw_range, \
- .get = snd_soc_get_volsw_range, .put = snd_soc_put_volsw_range, \
- .private_value = (unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .rreg = xreg, .shift = xshift, \
- .rshift = xshift, .min = xmin, .max = xmax, \
- .invert = xinvert} }
+ .info = snd_soc_info_volsw, \
+ .get = snd_soc_get_volsw, .put = snd_soc_put_volsw, \
+ .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmin, xmax, xinvert, 0) }
#define SOC_DOUBLE(xname, reg, shift_left, shift_right, max, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
.put = snd_soc_put_volsw, \
.private_value = SOC_DOUBLE_VALUE(reg, shift_left, shift_right, \
- max, invert, 0) }
+ 0, max, invert, 0) }
#define SOC_DOUBLE_STS(xname, reg, shift_left, shift_right, max, invert) \
{ \
.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
@@ -124,19 +106,19 @@ struct platform_device;
.access = SNDRV_CTL_ELEM_ACCESS_READ | \
SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
.private_value = SOC_DOUBLE_VALUE(reg, shift_left, shift_right, \
- max, invert, 0) }
+ 0, max, invert, 0) }
#define SOC_DOUBLE_R(xname, reg_left, reg_right, xshift, xmax, xinvert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
.info = snd_soc_info_volsw, \
.get = snd_soc_get_volsw, .put = snd_soc_put_volsw, \
.private_value = SOC_DOUBLE_R_VALUE(reg_left, reg_right, xshift, \
- xmax, xinvert) }
+ 0, xmax, xinvert) }
#define SOC_DOUBLE_R_RANGE(xname, reg_left, reg_right, xshift, xmin, \
xmax, xinvert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
- .info = snd_soc_info_volsw_range, \
- .get = snd_soc_get_volsw_range, .put = snd_soc_put_volsw_range, \
- .private_value = SOC_DOUBLE_R_RANGE_VALUE(reg_left, reg_right, \
+ .info = snd_soc_info_volsw, \
+ .get = snd_soc_get_volsw, .put = snd_soc_put_volsw, \
+ .private_value = SOC_DOUBLE_R_VALUE(reg_left, reg_right, \
xshift, xmin, xmax, xinvert) }
#define SOC_DOUBLE_TLV(xname, reg, shift_left, shift_right, max, invert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
@@ -146,7 +128,7 @@ struct platform_device;
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
.put = snd_soc_put_volsw, \
.private_value = SOC_DOUBLE_VALUE(reg, shift_left, shift_right, \
- max, invert, 0) }
+ 0, max, invert, 0) }
#define SOC_DOUBLE_SX_TLV(xname, xreg, shift_left, shift_right, xmin, xmax, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
@@ -155,10 +137,8 @@ struct platform_device;
.info = snd_soc_info_volsw_sx, \
.get = snd_soc_get_volsw_sx, \
.put = snd_soc_put_volsw_sx, \
- .private_value = (unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .rreg = xreg, \
- .shift = shift_left, .rshift = shift_right, \
- .max = xmax, .min = xmin} }
+ .private_value = SOC_DOUBLE_VALUE(xreg, shift_left, shift_right, \
+ xmin, xmax, 0, 0) }
#define SOC_DOUBLE_RANGE_TLV(xname, xreg, xshift_left, xshift_right, xmin, xmax, \
xinvert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
@@ -167,10 +147,8 @@ struct platform_device;
.tlv.p = (tlv_array), \
.info = snd_soc_info_volsw, \
.get = snd_soc_get_volsw, .put = snd_soc_put_volsw, \
- .private_value = (unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .rreg = xreg, \
- .shift = xshift_left, .rshift = xshift_right, \
- .min = xmin, .max = xmax, .invert = xinvert} }
+ .private_value = SOC_DOUBLE_VALUE(xreg, xshift_left, xshift_right, \
+ xmin, xmax, xinvert, 0) }
#define SOC_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax, xinvert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -179,16 +157,16 @@ struct platform_device;
.info = snd_soc_info_volsw, \
.get = snd_soc_get_volsw, .put = snd_soc_put_volsw, \
.private_value = SOC_DOUBLE_R_VALUE(reg_left, reg_right, xshift, \
- xmax, xinvert) }
+ 0, xmax, xinvert) }
#define SOC_DOUBLE_R_RANGE_TLV(xname, reg_left, reg_right, xshift, xmin, \
xmax, xinvert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
SNDRV_CTL_ELEM_ACCESS_READWRITE,\
.tlv.p = (tlv_array), \
- .info = snd_soc_info_volsw_range, \
- .get = snd_soc_get_volsw_range, .put = snd_soc_put_volsw_range, \
- .private_value = SOC_DOUBLE_R_RANGE_VALUE(reg_left, reg_right, \
+ .info = snd_soc_info_volsw, \
+ .get = snd_soc_get_volsw, .put = snd_soc_put_volsw, \
+ .private_value = SOC_DOUBLE_R_VALUE(reg_left, reg_right, \
xshift, xmin, xmax, xinvert) }
#define SOC_DOUBLE_R_SX_TLV(xname, xreg, xrreg, xshift, xmin, xmax, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
@@ -198,10 +176,7 @@ struct platform_device;
.info = snd_soc_info_volsw_sx, \
.get = snd_soc_get_volsw_sx, \
.put = snd_soc_put_volsw_sx, \
- .private_value = (unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .rreg = xrreg, \
- .shift = xshift, .rshift = xshift, \
- .max = xmax, .min = xmin} }
+ .private_value = SOC_DOUBLE_R_VALUE(xreg, xrreg, xshift, xmin, xmax, 0) }
#define SOC_DOUBLE_R_S_TLV(xname, reg_left, reg_right, xshift, xmin, xmax, xsign_bit, xinvert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -261,21 +236,21 @@ struct platform_device;
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_volsw, \
.get = xhandler_get, .put = xhandler_put, \
- .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, 0) }
+ .private_value = SOC_SINGLE_VALUE(xreg, xshift, 0, xmax, xinvert, 0) }
#define SOC_DOUBLE_EXT(xname, reg, shift_left, shift_right, max, invert,\
xhandler_get, xhandler_put) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.info = snd_soc_info_volsw, \
.get = xhandler_get, .put = xhandler_put, \
.private_value = \
- SOC_DOUBLE_VALUE(reg, shift_left, shift_right, max, invert, 0) }
+ SOC_DOUBLE_VALUE(reg, shift_left, shift_right, 0, max, invert, 0) }
#define SOC_DOUBLE_R_EXT(xname, reg_left, reg_right, xshift, xmax, xinvert,\
xhandler_get, xhandler_put) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
.info = snd_soc_info_volsw, \
.get = xhandler_get, .put = xhandler_put, \
.private_value = SOC_DOUBLE_R_VALUE(reg_left, reg_right, xshift, \
- xmax, xinvert) }
+ 0, xmax, xinvert) }
#define SOC_SINGLE_EXT_TLV(xname, xreg, xshift, xmax, xinvert,\
xhandler_get, xhandler_put, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
@@ -284,19 +259,16 @@ struct platform_device;
.tlv.p = (tlv_array), \
.info = snd_soc_info_volsw, \
.get = xhandler_get, .put = xhandler_put, \
- .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, 0) }
+ .private_value = SOC_SINGLE_VALUE(xreg, xshift, 0, xmax, xinvert, 0) }
#define SOC_SINGLE_RANGE_EXT_TLV(xname, xreg, xshift, xmin, xmax, xinvert, \
xhandler_get, xhandler_put, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
SNDRV_CTL_ELEM_ACCESS_READWRITE,\
.tlv.p = (tlv_array), \
- .info = snd_soc_info_volsw_range, \
+ .info = snd_soc_info_volsw, \
.get = xhandler_get, .put = xhandler_put, \
- .private_value = (unsigned long)&(struct soc_mixer_control) \
- {.reg = xreg, .rreg = xreg, .shift = xshift, \
- .rshift = xshift, .min = xmin, .max = xmax, \
- .invert = xinvert} }
+ .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmin, xmax, xinvert, 0) }
#define SOC_DOUBLE_EXT_TLV(xname, xreg, shift_left, shift_right, xmax, xinvert,\
xhandler_get, xhandler_put, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
@@ -306,7 +278,7 @@ struct platform_device;
.info = snd_soc_info_volsw, \
.get = xhandler_get, .put = xhandler_put, \
.private_value = SOC_DOUBLE_VALUE(xreg, shift_left, shift_right, \
- xmax, xinvert, 0) }
+ 0, xmax, xinvert, 0) }
#define SOC_DOUBLE_R_EXT_TLV(xname, reg_left, reg_right, xshift, xmax, xinvert,\
xhandler_get, xhandler_put, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
@@ -316,7 +288,7 @@ struct platform_device;
.info = snd_soc_info_volsw, \
.get = xhandler_get, .put = xhandler_put, \
.private_value = SOC_DOUBLE_R_VALUE(reg_left, reg_right, xshift, \
- xmax, xinvert) }
+ 0, xmax, xinvert) }
#define SOC_DOUBLE_R_S_EXT_TLV(xname, reg_left, reg_right, xshift, xmin, xmax, \
xsign_bit, xinvert, xhandler_get, xhandler_put, \
tlv_array) \
@@ -347,6 +319,13 @@ struct platform_device;
#define SOC_VALUE_ENUM_EXT(xname, xenum, xhandler_get, xhandler_put) \
SOC_ENUM_EXT(xname, xenum, xhandler_get, xhandler_put)
+#define SOC_ENUM_EXT_ACC(xname, xenum, xhandler_get, xhandler_put, xaccess) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .access = xaccess, \
+ .info = snd_soc_info_enum_double, \
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = (unsigned long)&xenum }
+
#define SND_SOC_BYTES(xname, xbase, xregs) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_bytes_info, .get = snd_soc_bytes_get, \
@@ -359,6 +338,13 @@ struct platform_device;
.put = xhandler_put, .private_value = \
((unsigned long)&(struct soc_bytes) \
{.base = xbase, .num_regs = xregs }) }
+#define SND_SOC_BYTES_E_ACC(xname, xbase, xregs, xhandler_get, xhandler_put, xaccess) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .access = xaccess, \
+ .info = snd_soc_bytes_info, .get = xhandler_get, \
+ .put = xhandler_put, .private_value = \
+ ((unsigned long)&(struct soc_bytes) \
+ {.base = xbase, .num_regs = xregs }) }
#define SND_SOC_BYTES_MASK(xname, xbase, xregs, xmask) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
@@ -422,27 +408,20 @@ struct platform_device;
#define SOC_ENUM_SINGLE_VIRT_DECL(name, xtexts) \
const struct soc_enum name = SOC_ENUM_SINGLE_VIRT(ARRAY_SIZE(xtexts), xtexts)
-struct snd_jack;
struct snd_soc_card;
-struct snd_soc_pcm_stream;
-struct snd_soc_ops;
struct snd_soc_pcm_runtime;
struct snd_soc_dai;
struct snd_soc_dai_driver;
struct snd_soc_dai_link;
struct snd_soc_component;
struct snd_soc_component_driver;
-struct soc_enum;
struct snd_soc_jack;
-struct snd_soc_jack_zone;
struct snd_soc_jack_pin;
#include <sound/soc-dapm.h>
#include <sound/soc-dpcm.h>
#include <sound/soc-topology.h>
-struct snd_soc_jack_gpio;
-
enum snd_soc_pcm_subclass {
SND_SOC_PCM_CLASS_PCM = 0,
SND_SOC_PCM_CLASS_BE = 1,
@@ -451,6 +430,7 @@ enum snd_soc_pcm_subclass {
int snd_soc_register_card(struct snd_soc_card *card);
void snd_soc_unregister_card(struct snd_soc_card *card);
int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card);
+int devm_snd_soc_register_deferrable_card(struct device *dev, struct snd_soc_card *card);
#ifdef CONFIG_PM_SLEEP
int snd_soc_suspend(struct device *dev);
int snd_soc_resume(struct device *dev);
@@ -478,7 +458,7 @@ int snd_soc_register_component(struct device *dev,
int devm_snd_soc_register_component(struct device *dev,
const struct snd_soc_component_driver *component_driver,
struct snd_soc_dai_driver *dai_drv, int num_dai);
-void snd_soc_unregister_component(struct device *dev);
+#define snd_soc_unregister_component(dev) snd_soc_unregister_component_by_driver(dev, NULL)
void snd_soc_unregister_component_by_driver(struct device *dev,
const struct snd_soc_component_driver *component_driver);
struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev,
@@ -486,18 +466,16 @@ struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev,
struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
const char *driver_name);
-int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num);
+int soc_new_pcm(struct snd_soc_pcm_runtime *rtd);
#ifdef CONFIG_SND_SOC_COMPRESS
-int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num);
+int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd);
#else
-static inline int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
+static inline int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd)
{
return 0;
}
#endif
-void snd_soc_disconnect_sync(struct device *dev);
-
struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link);
@@ -522,27 +500,23 @@ int snd_soc_runtime_calc_hw(struct snd_soc_pcm_runtime *rtd,
int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
unsigned int dai_fmt);
-#ifdef CONFIG_DMI
-int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour);
-#else
-static inline int snd_soc_set_dmi_name(struct snd_soc_card *card,
- const char *flavour)
-{
- return 0;
-}
-#endif
-
/* Utility functions to get clock rates from various things */
int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
-int snd_soc_params_to_frame_size(struct snd_pcm_hw_params *params);
+int snd_soc_params_to_frame_size(const struct snd_pcm_hw_params *params);
int snd_soc_calc_bclk(int fs, int sample_size, int channels, int tdm_slots);
-int snd_soc_params_to_bclk(struct snd_pcm_hw_params *parms);
-int snd_soc_tdm_params_to_bclk(struct snd_pcm_hw_params *params,
+int snd_soc_params_to_bclk(const struct snd_pcm_hw_params *parms);
+int snd_soc_tdm_params_to_bclk(const struct snd_pcm_hw_params *params,
int tdm_width, int tdm_slots, int slot_multiple);
+int snd_soc_ret(const struct device *dev, int ret, const char *fmt, ...);
/* set runtime hw params */
-int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream,
- const struct snd_pcm_hardware *hw);
+static inline int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream,
+ const struct snd_pcm_hardware *hw)
+{
+ substream->runtime->hw = *hw;
+
+ return 0;
+}
struct snd_ac97 *snd_soc_alloc_ac97_component(struct snd_soc_component *component);
struct snd_ac97 *snd_soc_new_ac97_component(struct snd_soc_component *component,
@@ -601,12 +575,6 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
-int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo);
-int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
-int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
int snd_soc_limit_volume(struct snd_soc_card *card,
const char *name, int max);
int snd_soc_bytes_info(struct snd_kcontrol *kcontrol,
@@ -675,7 +643,18 @@ struct snd_soc_dai_link_component {
const char *name;
struct device_node *of_node;
const char *dai_name;
- struct of_phandle_args *dai_args;
+ const struct of_phandle_args *dai_args;
+
+ /*
+ * Extra format = SND_SOC_DAIFMT_Bx_Fx
+ *
+ * [Note] it is Bx_Fx base, not CBx_CFx
+ *
+ * It will be used with dai_link->dai_fmt
+ * see
+ * snd_soc_runtime_set_dai_fmt()
+ */
+ unsigned int ext_fmt;
};
/*
@@ -815,10 +794,6 @@ struct snd_soc_dai_link {
/* This DAI link can route to other DAI links at runtime (Frontend)*/
unsigned int dynamic:1;
- /* DPCM capture and Playback support */
- unsigned int dpcm_capture:1;
- unsigned int dpcm_playback:1;
-
/* DPCM used FE & BE merged format */
unsigned int dpcm_merged_format:1;
/* DPCM used FE & BE merged channel */
@@ -837,7 +812,8 @@ struct snd_soc_dai_link {
#endif
};
-static inline int snd_soc_link_num_ch_map(struct snd_soc_dai_link *link) {
+static inline int snd_soc_link_num_ch_map(const struct snd_soc_dai_link *link)
+{
return max(link->num_cpus, link->num_codecs);
}
@@ -965,7 +941,7 @@ snd_soc_link_to_platform(struct snd_soc_dai_link *link, int n) {
extern struct snd_soc_dai_link_component null_dailink_component[0];
extern struct snd_soc_dai_link_component snd_soc_dummy_dlc;
-
+int snd_soc_dlc_is_dummy(struct snd_soc_dai_link_component *dlc);
struct snd_soc_codec_conf {
/*
@@ -1102,7 +1078,6 @@ struct snd_soc_card {
/* Generic DAPM context for the card */
struct snd_soc_dapm_context dapm;
struct snd_soc_dapm_stats dapm_stats;
- struct snd_soc_dapm_update *update;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_card_root;
@@ -1116,9 +1091,9 @@ struct snd_soc_card {
unsigned int instantiated:1;
unsigned int topology_shortname_created:1;
unsigned int fully_routed:1;
- unsigned int disable_route_checks:1;
unsigned int probed:1;
unsigned int component_chaining:1;
+ struct device *devres_dev;
void *drvdata;
};
@@ -1159,6 +1134,11 @@ static inline int snd_soc_card_is_instantiated(struct snd_soc_card *card)
return card && card->instantiated;
}
+static inline struct snd_soc_dapm_context *snd_soc_card_to_dapm(struct snd_soc_card *card)
+{
+ return &card->dapm;
+}
+
/* SoC machine DAI configuration, glues a codec and cpu DAI together */
struct snd_soc_pcm_runtime {
struct device *dev;
@@ -1193,7 +1173,7 @@ struct snd_soc_pcm_runtime {
struct dentry *debugfs_dpcm_root;
#endif
- unsigned int num; /* 0-based and monotonic increasing */
+ unsigned int id; /* 0-based and monotonic increasing */
struct list_head list; /* rtd list of the soc card */
/* function mark */
@@ -1205,11 +1185,11 @@ struct snd_soc_pcm_runtime {
/* bit field */
unsigned int pop_wait:1;
unsigned int fe_compr:1; /* for Dynamic PCM */
+ unsigned int initialized:1;
- bool initialized;
-
+ /* CPU/Codec/Platform */
int num_components;
- struct snd_soc_component *components[]; /* CPU/Codec/Platform */
+ struct snd_soc_component *components[] __counted_by(num_components);
};
/* see soc_new_pcm_runtime() */
@@ -1249,12 +1229,17 @@ void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd);
/* mixer control */
struct soc_mixer_control {
- int min, max, platform_max;
+ /* Minimum and maximum specified as written to the hardware */
+ int min, max;
+ /* Limited maximum value specified as presented through the control */
+ int platform_max;
int reg, rreg;
unsigned int shift, rshift;
+ u32 num_channels;
unsigned int sign_bit;
unsigned int invert:1;
unsigned int autodisable:1;
+ unsigned int sdca_q78:1;
#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj;
#endif
@@ -1299,7 +1284,7 @@ struct soc_enum {
#endif
};
-static inline bool snd_soc_volsw_is_stereo(struct soc_mixer_control *mc)
+static inline bool snd_soc_volsw_is_stereo(const struct soc_mixer_control *mc)
{
if (mc->reg == mc->rreg && mc->shift == mc->rshift)
return false;
@@ -1311,7 +1296,7 @@ static inline bool snd_soc_volsw_is_stereo(struct soc_mixer_control *mc)
return true;
}
-static inline unsigned int snd_soc_enum_val_to_item(struct soc_enum *e,
+static inline unsigned int snd_soc_enum_val_to_item(const struct soc_enum *e,
unsigned int val)
{
unsigned int i;
@@ -1326,7 +1311,7 @@ static inline unsigned int snd_soc_enum_val_to_item(struct soc_enum *e,
return 0;
}
-static inline unsigned int snd_soc_enum_item_to_val(struct soc_enum *e,
+static inline unsigned int snd_soc_enum_item_to_val(const struct soc_enum *e,
unsigned int item)
{
if (!e->values)
@@ -1335,22 +1320,6 @@ static inline unsigned int snd_soc_enum_item_to_val(struct soc_enum *e,
return e->values[item];
}
-/**
- * snd_soc_kcontrol_component() - Returns the component that registered the
- * control
- * @kcontrol: The control for which to get the component
- *
- * Note: This function will work correctly if the control has been registered
- * for a component. With snd_soc_add_codec_controls() or via table based
- * setup for either a CODEC or component driver. Otherwise the behavior is
- * undefined.
- */
-static inline struct snd_soc_component *snd_soc_kcontrol_component(
- struct snd_kcontrol *kcontrol)
-{
- return snd_kcontrol_chip(kcontrol);
-}
-
int snd_soc_util_init(void);
void snd_soc_util_exit(void);
@@ -1401,7 +1370,7 @@ unsigned int snd_soc_daifmt_parse_clock_provider_raw(struct device_node *np,
snd_soc_daifmt_clock_provider_from_bitmap( \
snd_soc_daifmt_parse_clock_provider_as_bitmap(np, prefix))
-int snd_soc_get_stream_cpu(struct snd_soc_dai_link *dai_link, int stream);
+int snd_soc_get_stream_cpu(const struct snd_soc_dai_link *dai_link, int stream);
int snd_soc_get_dlc(const struct of_phandle_args *args,
struct snd_soc_dai_link_component *dlc);
int snd_soc_of_get_dlc(struct device_node *of_node,
@@ -1436,10 +1405,6 @@ struct snd_soc_dai *snd_soc_get_dai_via_args(const struct of_phandle_args *dai_a
struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
struct snd_soc_dai_driver *dai_drv,
bool legacy_dai_naming);
-struct snd_soc_dai *devm_snd_soc_register_dai(struct device *dev,
- struct snd_soc_component *component,
- struct snd_soc_dai_driver *dai_drv,
- bool legacy_dai_naming);
void snd_soc_unregister_dai(struct snd_soc_dai *dai);
struct snd_soc_dai *snd_soc_find_dai(
@@ -1516,22 +1481,22 @@ static inline void _snd_soc_dapm_mutex_assert_held_c(struct snd_soc_card *card)
static inline void _snd_soc_dapm_mutex_lock_root_d(struct snd_soc_dapm_context *dapm)
{
- _snd_soc_dapm_mutex_lock_root_c(dapm->card);
+ _snd_soc_dapm_mutex_lock_root_c(snd_soc_dapm_to_card(dapm));
}
static inline void _snd_soc_dapm_mutex_lock_d(struct snd_soc_dapm_context *dapm)
{
- _snd_soc_dapm_mutex_lock_c(dapm->card);
+ _snd_soc_dapm_mutex_lock_c(snd_soc_dapm_to_card(dapm));
}
static inline void _snd_soc_dapm_mutex_unlock_d(struct snd_soc_dapm_context *dapm)
{
- _snd_soc_dapm_mutex_unlock_c(dapm->card);
+ _snd_soc_dapm_mutex_unlock_c(snd_soc_dapm_to_card(dapm));
}
static inline void _snd_soc_dapm_mutex_assert_held_d(struct snd_soc_dapm_context *dapm)
{
- _snd_soc_dapm_mutex_assert_held_c(dapm->card);
+ _snd_soc_dapm_mutex_assert_held_c(snd_soc_dapm_to_card(dapm));
}
#define snd_soc_dapm_mutex_lock_root(x) _Generic((x), \
diff --git a/include/sound/soc_sdw_utils.h b/include/sound/soc_sdw_utils.h
new file mode 100644
index 000000000000..227347c8f0b3
--- /dev/null
+++ b/include/sound/soc_sdw_utils.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * This file incorporates work covered by the following copyright notice:
+ * Copyright (c) 2020 Intel Corporation
+ * Copyright(c) 2024 Advanced Micro Devices, Inc.
+ *
+ */
+
+#ifndef SOC_SDW_UTILS_H
+#define SOC_SDW_UTILS_H
+
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+
+#define SOC_SDW_MAX_DAI_NUM 8
+#define SOC_SDW_MAX_AUX_NUM 2
+#define SOC_SDW_MAX_NO_PROPS 2
+#define SOC_SDW_JACK_JDSRC(quirk) ((quirk) & GENMASK(3, 0))
+
+/* If a CODEC has an optional speaker output, this quirk will enable it */
+#define SOC_SDW_CODEC_SPKR BIT(15)
+/*
+ * If the CODEC has additional devices attached directly to it.
+ *
+ * For the cs42l43:
+ * - 0 - No speaker output
+ * - SOC_SDW_CODEC_SPKR - CODEC internal speaker
+ * - SOC_SDW_SIDECAR_AMPS - 2x Sidecar amplifiers + CODEC internal speaker
+ * - SOC_SDW_CODEC_SPKR | SOF_SIDECAR_AMPS - Not currently supported
+ */
+#define SOC_SDW_SIDECAR_AMPS BIT(16)
+#define SOC_SDW_CODEC_MIC BIT(17)
+
+#define SOC_SDW_UNUSED_DAI_ID -1
+#define SOC_SDW_JACK_OUT_DAI_ID 0
+#define SOC_SDW_JACK_IN_DAI_ID 1
+#define SOC_SDW_AMP_OUT_DAI_ID 2
+#define SOC_SDW_AMP_IN_DAI_ID 3
+#define SOC_SDW_DMIC_DAI_ID 4
+
+#define SOC_SDW_DAI_TYPE_JACK 0
+#define SOC_SDW_DAI_TYPE_AMP 1
+#define SOC_SDW_DAI_TYPE_MIC 2
+
+struct asoc_sdw_codec_info;
+
+struct asoc_sdw_dai_info {
+ const bool direction[2]; /* playback & capture support */
+ const char *codec_name;
+ const char *dai_name;
+ const char *component_name;
+ const int dai_type;
+ const int dailink[2]; /* dailink id for each direction */
+ const struct snd_kcontrol_new *controls;
+ const int num_controls;
+ const struct snd_soc_dapm_widget *widgets;
+ const int num_widgets;
+ int (*init)(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+ int (*exit)(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link);
+ int (*rtd_init)(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+ bool rtd_init_done; /* Indicate that the rtd_init callback is done */
+ unsigned long quirk;
+ bool quirk_exclude;
+};
+
+struct asoc_sdw_aux_info {
+ const char *codec_name;
+};
+
+struct asoc_sdw_codec_info {
+ const int part_id;
+ const int version_id;
+ const char *name_prefix;
+ int amp_num;
+ const u8 acpi_id[ACPI_ID_LEN];
+ const bool ignore_internal_dmic;
+ const struct snd_soc_ops *ops;
+ struct asoc_sdw_dai_info dais[SOC_SDW_MAX_DAI_NUM];
+ const int dai_num;
+ struct asoc_sdw_aux_info auxs[SOC_SDW_MAX_AUX_NUM];
+ const int aux_num;
+
+ int (*codec_card_late_probe)(struct snd_soc_card *card);
+
+ int (*count_sidecar)(struct snd_soc_card *card,
+ int *num_dais, int *num_devs);
+ int (*add_sidecar)(struct snd_soc_card *card,
+ struct snd_soc_dai_link **dai_links,
+ struct snd_soc_codec_conf **codec_conf);
+};
+
+struct asoc_sdw_mc_private {
+ struct snd_soc_card card;
+ struct snd_soc_jack sdw_headset;
+ struct device *headset_codec_dev; /* only one headset per card */
+ struct device *amp_dev1, *amp_dev2;
+ bool append_dai_type;
+ bool ignore_internal_dmic;
+ void *private;
+ unsigned long mc_quirk;
+ int codec_info_list_count;
+};
+
+struct asoc_sdw_endpoint {
+ struct list_head list;
+
+ u32 link_mask;
+ const char *codec_name;
+ const char *name_prefix;
+ bool include_sidecar;
+
+ struct asoc_sdw_codec_info *codec_info;
+ const struct asoc_sdw_dai_info *dai_info;
+};
+
+struct asoc_sdw_dailink {
+ bool initialised;
+
+ u8 group_id;
+ u32 link_mask[SNDRV_PCM_STREAM_LAST + 1];
+ int num_devs[SNDRV_PCM_STREAM_LAST + 1];
+ struct list_head endpoints;
+};
+
+extern struct asoc_sdw_codec_info codec_info_list[];
+int asoc_sdw_get_codec_info_list_count(void);
+
+int asoc_sdw_startup(struct snd_pcm_substream *substream);
+int asoc_sdw_prepare(struct snd_pcm_substream *substream);
+int asoc_sdw_prepare(struct snd_pcm_substream *substream);
+int asoc_sdw_trigger(struct snd_pcm_substream *substream, int cmd);
+int asoc_sdw_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+int asoc_sdw_hw_free(struct snd_pcm_substream *substream);
+void asoc_sdw_shutdown(struct snd_pcm_substream *substream);
+
+const char *asoc_sdw_get_codec_name(struct device *dev,
+ const struct asoc_sdw_dai_info *dai_info,
+ const struct snd_soc_acpi_link_adr *adr_link,
+ int adr_index);
+
+struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_part(const u64 adr);
+
+struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_acpi(const u8 *acpi_id);
+
+struct asoc_sdw_codec_info *asoc_sdw_find_codec_info_dai(const char *dai_name,
+ int *dai_index);
+
+struct snd_soc_dai_link *asoc_sdw_mc_find_codec_dai_used(struct snd_soc_card *card,
+ const char *dai_name);
+
+void asoc_sdw_mc_dailink_exit_loop(struct snd_soc_card *card);
+
+int asoc_sdw_card_late_probe(struct snd_soc_card *card);
+
+void asoc_sdw_init_dai_link(struct device *dev, struct snd_soc_dai_link *dai_links,
+ int *be_id, char *name, int playback, int capture,
+ struct snd_soc_dai_link_component *cpus, int cpus_num,
+ struct snd_soc_dai_link_component *platform_component,
+ int num_platforms, struct snd_soc_dai_link_component *codecs,
+ int codecs_num, int no_pcm,
+ int (*init)(struct snd_soc_pcm_runtime *rtd),
+ const struct snd_soc_ops *ops);
+
+int asoc_sdw_init_simple_dai_link(struct device *dev, struct snd_soc_dai_link *dai_links,
+ int *be_id, char *name, int playback, int capture,
+ const char *cpu_dai_name, const char *platform_comp_name,
+ const char *codec_name, const char *codec_dai_name,
+ int no_pcm, int (*init)(struct snd_soc_pcm_runtime *rtd),
+ const struct snd_soc_ops *ops);
+
+int asoc_sdw_count_sdw_endpoints(struct snd_soc_card *card,
+ int *num_devs, int *num_ends, int *num_aux);
+
+struct asoc_sdw_dailink *asoc_sdw_find_dailink(struct asoc_sdw_dailink *dailinks,
+ const struct snd_soc_acpi_endpoint *new);
+int asoc_sdw_get_dai_type(u32 type);
+
+int asoc_sdw_parse_sdw_endpoints(struct snd_soc_card *card,
+ struct snd_soc_aux_dev *soc_aux,
+ struct asoc_sdw_dailink *soc_dais,
+ struct asoc_sdw_endpoint *soc_ends,
+ int *num_devs);
+
+int asoc_sdw_rtd_init(struct snd_soc_pcm_runtime *rtd);
+
+/* DMIC support */
+int asoc_sdw_dmic_init(struct snd_soc_pcm_runtime *rtd);
+
+/* RT711 support */
+int asoc_sdw_rt711_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+int asoc_sdw_rt711_exit(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link);
+
+/* RT711-SDCA support */
+int asoc_sdw_rt_sdca_jack_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+int asoc_sdw_rt_sdca_jack_exit(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link);
+
+/* RT1308 I2S support */
+extern const struct snd_soc_ops soc_sdw_rt1308_i2s_ops;
+
+/* generic amp support */
+int asoc_sdw_rt_amp_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+int asoc_sdw_rt_amp_exit(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link);
+
+/* CS42L43 support */
+int asoc_sdw_cs42l43_spk_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+
+/* CS AMP support */
+int asoc_sdw_bridge_cs35l56_count_sidecar(struct snd_soc_card *card,
+ int *num_dais, int *num_devs);
+int asoc_sdw_bridge_cs35l56_add_sidecar(struct snd_soc_card *card,
+ struct snd_soc_dai_link **dai_links,
+ struct snd_soc_codec_conf **codec_conf);
+int asoc_sdw_bridge_cs35l56_spk_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+
+int asoc_sdw_cs_amp_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+int asoc_sdw_cs_spk_feedback_rtd_init(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dai *dai);
+int asoc_sdw_cs35l56_volume_limit(struct snd_soc_card *card, const char *name_prefix);
+
+/* MAXIM codec support */
+int asoc_sdw_maxim_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+
+/* dai_link init callbacks */
+int asoc_sdw_rt_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt_sdca_jack_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt_amp_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt700_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt711_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt_mf_sdca_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_rt5682_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs42l42_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs42l43_hs_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs42l43_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs42l43_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs42l45_hs_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs42l45_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_cs_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_maxim_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+/* TI */
+int asoc_sdw_ti_amp_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+int asoc_sdw_ti_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_ti_amp_initial_settings(struct snd_soc_card *card,
+ const char *name_prefix);
+
+#endif
diff --git a/include/sound/sof.h b/include/sound/sof.h
index ec6c30d54592..eddea82c7b5a 100644
--- a/include/sound/sof.h
+++ b/include/sound/sof.h
@@ -106,6 +106,7 @@ struct snd_sof_pdata {
const char *fw_filename;
const char *tplg_filename_prefix;
const char *tplg_filename;
+ bool disable_function_topology;
/* loadable external libraries available under this directory */
const char *fw_lib_prefix;
@@ -173,5 +174,6 @@ struct sof_dev_desc {
int sof_dai_get_mclk(struct snd_soc_pcm_runtime *rtd);
int sof_dai_get_bclk(struct snd_soc_pcm_runtime *rtd);
+int sof_dai_get_tdm_slots(struct snd_soc_pcm_runtime *rtd);
#endif
diff --git a/include/sound/sof/ext_manifest.h b/include/sound/sof/ext_manifest.h
index fc0231d04a94..7dfe3ccf1fe4 100644
--- a/include/sound/sof/ext_manifest.h
+++ b/include/sound/sof/ext_manifest.h
@@ -60,6 +60,7 @@ enum sof_ext_man_elem_type {
SOF_EXT_MAN_ELEM_FW_VERSION = 0,
SOF_EXT_MAN_ELEM_WINDOW = 1,
SOF_EXT_MAN_ELEM_CC_VERSION = 2,
+ SOF_EXT_MAN_ELEM_PROBE_INFO = 3,
SOF_EXT_MAN_ELEM_DBG_ABI = 4,
SOF_EXT_MAN_ELEM_CONFIG_DATA = 5, /**< ABI3.17 */
SOF_EXT_MAN_ELEM_PLATFORM_CONFIG_DATA = 6,
diff --git a/include/sound/sof/ipc4/header.h b/include/sound/sof/ipc4/header.h
index 0c0cf47946b1..15fac532688e 100644
--- a/include/sound/sof/ipc4/header.h
+++ b/include/sound/sof/ipc4/header.h
@@ -326,10 +326,14 @@ struct sof_ipc4_base_module_cfg {
#define SOF_IPC4_MOD_INSTANCE_SHIFT 16
#define SOF_IPC4_MOD_INSTANCE_MASK GENMASK(23, 16)
#define SOF_IPC4_MOD_INSTANCE(x) ((x) << SOF_IPC4_MOD_INSTANCE_SHIFT)
+#define SOF_IPC4_MOD_INSTANCE_GET(x) (((x) & SOF_IPC4_MOD_INSTANCE_MASK) \
+ >> SOF_IPC4_MOD_INSTANCE_SHIFT)
#define SOF_IPC4_MOD_ID_SHIFT 0
#define SOF_IPC4_MOD_ID_MASK GENMASK(15, 0)
#define SOF_IPC4_MOD_ID(x) ((x) << SOF_IPC4_MOD_ID_SHIFT)
+#define SOF_IPC4_MOD_ID_GET(x) (((x) & SOF_IPC4_MOD_ID_MASK) \
+ >> SOF_IPC4_MOD_ID_SHIFT)
/* init module ipc msg */
#define SOF_IPC4_MOD_EXT_PARAM_SIZE_SHIFT 0
@@ -396,6 +400,7 @@ enum sof_ipc4_base_fw_params {
SOF_IPC4_FW_PARAM_MODULES_INFO_GET,
SOF_IPC4_FW_PARAM_LIBRARIES_INFO_GET = 16,
SOF_IPC4_FW_PARAM_SYSTEM_TIME = 20,
+ SOF_IPC4_FW_PARAM_MIC_PRIVACY_STATE_CHANGE = 35,
};
enum sof_ipc4_fw_config_params {
@@ -446,6 +451,18 @@ struct sof_ipc4_dx_state_info {
uint32_t dx_mask;
} __packed __aligned(4);
+enum sof_ipc4_hw_config_params {
+ SOF_IPC4_HW_CFG_INTEL_MIC_PRIVACY_CAPS = 11,
+};
+
+#define SOF_IPC_INTEL_MIC_PRIVACY_VERSION_PTL 1
+
+struct sof_ipc4_intel_mic_privacy_cap {
+ uint32_t version;
+ uint32_t capabilities_length;
+ uint32_t capabilities[];
+} __packed;
+
/* Reply messages */
/*
@@ -485,6 +502,8 @@ struct sof_ipc4_dx_state_info {
#define SOF_IPC4_LOG_CORE_GET(x) (((x) & SOF_IPC4_LOG_CORE_MASK) >> \
SOF_IPC4_LOG_CORE_SHIFT)
+#define SOF_IPC4_FW_READY_LIB_RESTORED BIT(15)
+
/* Value of notification type field - must fit into 8 bits */
enum sof_ipc4_notification_type {
/* Phrase detected (notification from WoV module) */
diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h
index 3ba086f61983..449e93c25184 100644
--- a/include/sound/sof/topology.h
+++ b/include/sound/sof/topology.h
@@ -54,7 +54,7 @@ enum sof_comp_type {
struct sof_ipc_comp {
struct sof_ipc_cmd_hdr hdr;
uint32_t id;
- enum sof_comp_type type;
+ uint32_t type;
uint32_t pipeline_id;
uint32_t core;
diff --git a/include/sound/soundfont.h b/include/sound/soundfont.h
index 98ed98d89d6d..48f8cf6de3ac 100644
--- a/include/sound/soundfont.h
+++ b/include/sound/soundfont.h
@@ -86,9 +86,11 @@ struct snd_sf_list {
};
/* Prototypes for soundfont.c */
-int snd_soundfont_load(struct snd_sf_list *sflist, const void __user *data,
+int snd_soundfont_load(struct snd_card *card,
+ struct snd_sf_list *sflist, const void __user *data,
long count, int client);
-int snd_soundfont_load_guspatch(struct snd_sf_list *sflist, const char __user *data,
+int snd_soundfont_load_guspatch(struct snd_card *card,
+ struct snd_sf_list *sflist, const char __user *data,
long count);
int snd_soundfont_close_check(struct snd_sf_list *sflist, int client);
@@ -112,5 +114,23 @@ int snd_sf_calc_parm_decay(int msec);
extern int snd_sf_vol_table[128];
int snd_sf_linear_to_log(unsigned int amount, int offset, int ratio);
+/* lock access to sflist */
+static inline void snd_soundfont_lock_preset(struct snd_sf_list *sflist)
+{
+ mutex_lock(&sflist->presets_mutex);
+ guard(spinlock_irqsave)(&sflist->lock);
+ sflist->presets_locked = 1;
+}
+
+/* remove lock */
+static inline void snd_soundfont_unlock_preset(struct snd_sf_list *sflist)
+{
+ guard(spinlock_irqsave)(&sflist->lock);
+ sflist->presets_locked = 0;
+ mutex_unlock(&sflist->presets_mutex);
+}
+
+DEFINE_GUARD(snd_soundfont_lock_preset, struct snd_sf_list *,
+ snd_soundfont_lock_preset(_T), snd_soundfont_unlock_preset(_T))
#endif /* __SOUND_SOUNDFONT_H */
diff --git a/include/sound/tas2563-tlv.h b/include/sound/tas2563-tlv.h
new file mode 100644
index 000000000000..bb269b21f460
--- /dev/null
+++ b/include/sound/tas2563-tlv.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS2563 Audio Smart Amplifier
+//
+// Copyright (C) 2022 - 2024 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS2563 driver implements a flexible and configurable
+// algo coefficient setting for one, two, or even multiple
+// TAS2563 chips.
+//
+// Author: Shenghao Ding <shenghao-ding@ti.com>
+//
+
+#ifndef __TAS2563_TLV_H__
+#define __TAS2563_TLV_H__
+
+static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2563_dvc_tlv, -12150, 50, 1);
+
+/* pow(10, db/20) * pow(2,30) */
+static const __maybe_unused unsigned char tas2563_dvc_table[][4] = {
+ { 0X00, 0X00, 0X00, 0X00 }, /* -121.5db */
+ { 0X00, 0X00, 0X03, 0XBC }, /* -121.0db */
+ { 0X00, 0X00, 0X03, 0XF5 }, /* -120.5db */
+ { 0X00, 0X00, 0X04, 0X31 }, /* -120.0db */
+ { 0X00, 0X00, 0X04, 0X71 }, /* -119.5db */
+ { 0X00, 0X00, 0X04, 0XB4 }, /* -119.0db */
+ { 0X00, 0X00, 0X04, 0XFC }, /* -118.5db */
+ { 0X00, 0X00, 0X05, 0X47 }, /* -118.0db */
+ { 0X00, 0X00, 0X05, 0X97 }, /* -117.5db */
+ { 0X00, 0X00, 0X05, 0XEC }, /* -117.0db */
+ { 0X00, 0X00, 0X06, 0X46 }, /* -116.5db */
+ { 0X00, 0X00, 0X06, 0XA5 }, /* -116.0db */
+ { 0X00, 0X00, 0X07, 0X0A }, /* -115.5db */
+ { 0X00, 0X00, 0X07, 0X75 }, /* -115.0db */
+ { 0X00, 0X00, 0X07, 0XE6 }, /* -114.5db */
+ { 0X00, 0X00, 0X08, 0X5E }, /* -114.0db */
+ { 0X00, 0X00, 0X08, 0XDD }, /* -113.5db */
+ { 0X00, 0X00, 0X09, 0X63 }, /* -113.0db */
+ { 0X00, 0X00, 0X09, 0XF2 }, /* -112.5db */
+ { 0X00, 0X00, 0X0A, 0X89 }, /* -112.0db */
+ { 0X00, 0X00, 0X0B, 0X28 }, /* -111.5db */
+ { 0X00, 0X00, 0X0B, 0XD2 }, /* -111.0db */
+ { 0X00, 0X00, 0X0C, 0X85 }, /* -110.5db */
+ { 0X00, 0X00, 0X0D, 0X43 }, /* -110.0db */
+ { 0X00, 0X00, 0X0E, 0X0C }, /* -109.5db */
+ { 0X00, 0X00, 0X0E, 0XE1 }, /* -109.0db */
+ { 0X00, 0X00, 0X0F, 0XC3 }, /* -108.5db */
+ { 0X00, 0X00, 0X10, 0XB2 }, /* -108.0db */
+ { 0X00, 0X00, 0X11, 0XAF }, /* -107.5db */
+ { 0X00, 0X00, 0X12, 0XBC }, /* -107.0db */
+ { 0X00, 0X00, 0X13, 0XD8 }, /* -106.5db */
+ { 0X00, 0X00, 0X15, 0X05 }, /* -106.0db */
+ { 0X00, 0X00, 0X16, 0X44 }, /* -105.5db */
+ { 0X00, 0X00, 0X17, 0X96 }, /* -105.0db */
+ { 0X00, 0X00, 0X18, 0XFB }, /* -104.5db */
+ { 0X00, 0X00, 0X1A, 0X76 }, /* -104.0db */
+ { 0X00, 0X00, 0X1C, 0X08 }, /* -103.5db */
+ { 0X00, 0X00, 0X1D, 0XB1 }, /* -103.0db */
+ { 0X00, 0X00, 0X1F, 0X73 }, /* -102.5db */
+ { 0X00, 0X00, 0X21, 0X51 }, /* -102.0db */
+ { 0X00, 0X00, 0X23, 0X4A }, /* -101.5db */
+ { 0X00, 0X00, 0X25, 0X61 }, /* -101.0db */
+ { 0X00, 0X00, 0X27, 0X98 }, /* -100.5db */
+ { 0X00, 0X00, 0X29, 0XF1 }, /* -100.0db */
+ { 0X00, 0X00, 0X2C, 0X6D }, /* -99.5db */
+ { 0X00, 0X00, 0X2F, 0X0F }, /* -99.0db */
+ { 0X00, 0X00, 0X31, 0XD9 }, /* -98.5db */
+ { 0X00, 0X00, 0X34, 0XCD }, /* -98.0db */
+ { 0X00, 0X00, 0X37, 0XEE }, /* -97.5db */
+ { 0X00, 0X00, 0X3B, 0X3F }, /* -97.0db */
+ { 0X00, 0X00, 0X3E, 0XC1 }, /* -96.5db */
+ { 0X00, 0X00, 0X42, 0X79 }, /* -96.0db */
+ { 0X00, 0X00, 0X46, 0X6A }, /* -95.5db */
+ { 0X00, 0X00, 0X4A, 0X96 }, /* -95.0db */
+ { 0X00, 0X00, 0X4F, 0X01 }, /* -94.5db */
+ { 0X00, 0X00, 0X53, 0XAF }, /* -94.0db */
+ { 0X00, 0X00, 0X58, 0XA5 }, /* -93.5db */
+ { 0X00, 0X00, 0X5D, 0XE6 }, /* -93.0db */
+ { 0X00, 0X00, 0X63, 0X76 }, /* -92.5db */
+ { 0X00, 0X00, 0X69, 0X5B }, /* -92.0db */
+ { 0X00, 0X00, 0X6F, 0X99 }, /* -91.5db */
+ { 0X00, 0X00, 0X76, 0X36 }, /* -91.0db */
+ { 0X00, 0X00, 0X7D, 0X37 }, /* -90.5db */
+ { 0X00, 0X00, 0X84, 0XA2 }, /* -90.0db */
+ { 0X00, 0X00, 0X8C, 0X7E }, /* -89.5db */
+ { 0X00, 0X00, 0X94, 0XD1 }, /* -89.0db */
+ { 0X00, 0X00, 0X9D, 0XA3 }, /* -88.5db */
+ { 0X00, 0X00, 0XA6, 0XFA }, /* -88.0db */
+ { 0X00, 0X00, 0XB0, 0XDF }, /* -87.5db */
+ { 0X00, 0X00, 0XBB, 0X5A }, /* -87.0db */
+ { 0X00, 0X00, 0XC6, 0X74 }, /* -86.5db */
+ { 0X00, 0X00, 0XD2, 0X36 }, /* -86.0db */
+ { 0X00, 0X00, 0XDE, 0XAB }, /* -85.5db */
+ { 0X00, 0X00, 0XEB, 0XDC }, /* -85.0db */
+ { 0X00, 0X00, 0XF9, 0XD6 }, /* -84.5db */
+ { 0X00, 0X01, 0X08, 0XA4 }, /* -84.0db */
+ { 0X00, 0X01, 0X18, 0X52 }, /* -83.5db */
+ { 0X00, 0X01, 0X28, 0XEF }, /* -83.0db */
+ { 0X00, 0X01, 0X3A, 0X87 }, /* -82.5db */
+ { 0X00, 0X01, 0X4D, 0X2A }, /* -82.0db */
+ { 0X00, 0X01, 0X60, 0XE8 }, /* -81.5db */
+ { 0X00, 0X01, 0X75, 0XD1 }, /* -81.0db */
+ { 0X00, 0X01, 0X8B, 0XF7 }, /* -80.5db */
+ { 0X00, 0X01, 0XA3, 0X6E }, /* -80.0db */
+ { 0X00, 0X01, 0XBC, 0X48 }, /* -79.5db */
+ { 0X00, 0X01, 0XD6, 0X9B }, /* -79.0db */
+ { 0X00, 0X01, 0XF2, 0X7E }, /* -78.5db */
+ { 0X00, 0X02, 0X10, 0X08 }, /* -78.0db */
+ { 0X00, 0X02, 0X2F, 0X51 }, /* -77.5db */
+ { 0X00, 0X02, 0X50, 0X76 }, /* -77.0db */
+ { 0X00, 0X02, 0X73, 0X91 }, /* -76.5db */
+ { 0X00, 0X02, 0X98, 0XC0 }, /* -76.0db */
+ { 0X00, 0X02, 0XC0, 0X24 }, /* -75.5db */
+ { 0X00, 0X02, 0XE9, 0XDD }, /* -75.0db */
+ { 0X00, 0X03, 0X16, 0X0F }, /* -74.5db */
+ { 0X00, 0X03, 0X44, 0XDF }, /* -74.0db */
+ { 0X00, 0X03, 0X76, 0X76 }, /* -73.5db */
+ { 0X00, 0X03, 0XAA, 0XFC }, /* -73.0db */
+ { 0X00, 0X03, 0XE2, 0XA0 }, /* -72.5db */
+ { 0X00, 0X04, 0X1D, 0X8F }, /* -72.0db */
+ { 0X00, 0X04, 0X5B, 0XFD }, /* -71.5db */
+ { 0X00, 0X04, 0X9E, 0X1D }, /* -71.0db */
+ { 0X00, 0X04, 0XE4, 0X29 }, /* -70.5db */
+ { 0X00, 0X05, 0X2E, 0X5A }, /* -70.0db */
+ { 0X00, 0X05, 0X7C, 0XF2 }, /* -69.5db */
+ { 0X00, 0X05, 0XD0, 0X31 }, /* -69.0db */
+ { 0X00, 0X06, 0X28, 0X60 }, /* -68.5db */
+ { 0X00, 0X06, 0X85, 0XC8 }, /* -68.0db */
+ { 0X00, 0X06, 0XE8, 0XB9 }, /* -67.5db */
+ { 0X00, 0X07, 0X51, 0X86 }, /* -67.0db */
+ { 0X00, 0X07, 0XC0, 0X8A }, /* -66.5db */
+ { 0X00, 0X08, 0X36, 0X21 }, /* -66.0db */
+ { 0X00, 0X08, 0XB2, 0XB0 }, /* -65.5db */
+ { 0X00, 0X09, 0X36, 0XA1 }, /* -65.0db */
+ { 0X00, 0X09, 0XC2, 0X63 }, /* -64.5db */
+ { 0X00, 0X0A, 0X56, 0X6D }, /* -64.0db */
+ { 0X00, 0X0A, 0XF3, 0X3C }, /* -63.5db */
+ { 0X00, 0X0B, 0X99, 0X56 }, /* -63.0db */
+ { 0X00, 0X0C, 0X49, 0X48 }, /* -62.5db */
+ { 0X00, 0X0D, 0X03, 0XA7 }, /* -62.0db */
+ { 0X00, 0X0D, 0XC9, 0X11 }, /* -61.5db */
+ { 0X00, 0X0E, 0X9A, 0X2D }, /* -61.0db */
+ { 0X00, 0X0F, 0X77, 0XAD }, /* -60.5db */
+ { 0X00, 0X10, 0X62, 0X4D }, /* -60.0db */
+ { 0X00, 0X11, 0X5A, 0XD5 }, /* -59.5db */
+ { 0X00, 0X12, 0X62, 0X16 }, /* -59.0db */
+ { 0X00, 0X13, 0X78, 0XF0 }, /* -58.5db */
+ { 0X00, 0X14, 0XA0, 0X50 }, /* -58.0db */
+ { 0X00, 0X15, 0XD9, 0X31 }, /* -57.5db */
+ { 0X00, 0X17, 0X24, 0X9C }, /* -57.0db */
+ { 0X00, 0X18, 0X83, 0XAA }, /* -56.5db */
+ { 0X00, 0X19, 0XF7, 0X86 }, /* -56.0db */
+ { 0X00, 0X1B, 0X81, 0X6A }, /* -55.5db */
+ { 0X00, 0X1D, 0X22, 0XA4 }, /* -55.0db */
+ { 0X00, 0X1E, 0XDC, 0X98 }, /* -54.5db */
+ { 0X00, 0X20, 0XB0, 0XBC }, /* -54.0db */
+ { 0X00, 0X22, 0XA0, 0X9D }, /* -53.5db */
+ { 0X00, 0X24, 0XAD, 0XE0 }, /* -53.0db */
+ { 0X00, 0X26, 0XDA, 0X43 }, /* -52.5db */
+ { 0X00, 0X29, 0X27, 0X9D }, /* -52.0db */
+ { 0X00, 0X2B, 0X97, 0XE3 }, /* -51.5db */
+ { 0X00, 0X2E, 0X2D, 0X27 }, /* -51.0db */
+ { 0X00, 0X30, 0XE9, 0X9A }, /* -50.5db */
+ { 0X00, 0X33, 0XCF, 0X8D }, /* -50.0db */
+ { 0X00, 0X36, 0XE1, 0X78 }, /* -49.5db */
+ { 0X00, 0X3A, 0X21, 0XF3 }, /* -49.0db */
+ { 0X00, 0X3D, 0X93, 0XC3 }, /* -48.5db */
+ { 0X00, 0X41, 0X39, 0XD3 }, /* -48.0db */
+ { 0X00, 0X45, 0X17, 0X3B }, /* -47.5db */
+ { 0X00, 0X49, 0X2F, 0X44 }, /* -47.0db */
+ { 0X00, 0X4D, 0X85, 0X66 }, /* -46.5db */
+ { 0X00, 0X52, 0X1D, 0X50 }, /* -46.0db */
+ { 0X00, 0X56, 0XFA, 0XE8 }, /* -45.5db */
+ { 0X00, 0X5C, 0X22, 0X4E }, /* -45.0db */
+ { 0X00, 0X61, 0X97, 0XE1 }, /* -44.5db */
+ { 0X00, 0X67, 0X60, 0X44 }, /* -44.0db */
+ { 0X00, 0X6D, 0X80, 0X60 }, /* -43.5db */
+ { 0X00, 0X73, 0XFD, 0X65 }, /* -43.0db */
+ { 0X00, 0X7A, 0XDC, 0XD7 }, /* -42.5db */
+ { 0X00, 0X82, 0X24, 0X8A }, /* -42.0db */
+ { 0X00, 0X89, 0XDA, 0XAB }, /* -41.5db */
+ { 0X00, 0X92, 0X05, 0XC6 }, /* -41.0db */
+ { 0X00, 0X9A, 0XAC, 0XC8 }, /* -40.5db */
+ { 0X00, 0XA3, 0XD7, 0X0A }, /* -40.0db */
+ { 0X00, 0XAD, 0X8C, 0X52 }, /* -39.5db */
+ { 0X00, 0XB7, 0XD4, 0XDD }, /* -39.0db */
+ { 0X00, 0XC2, 0XB9, 0X65 }, /* -38.5db */
+ { 0X00, 0XCE, 0X43, 0X28 }, /* -38.0db */
+ { 0X00, 0XDA, 0X7B, 0XF1 }, /* -37.5db */
+ { 0X00, 0XE7, 0X6E, 0X1E }, /* -37.0db */
+ { 0X00, 0XF5, 0X24, 0XAC }, /* -36.5db */
+ { 0X01, 0X03, 0XAB, 0X3D }, /* -36.0db */
+ { 0X01, 0X13, 0X0E, 0X24 }, /* -35.5db */
+ { 0X01, 0X23, 0X5A, 0X71 }, /* -35.0db */
+ { 0X01, 0X34, 0X9D, 0XF8 }, /* -34.5db */
+ { 0X01, 0X46, 0XE7, 0X5D }, /* -34.0db */
+ { 0X01, 0X5A, 0X46, 0X27 }, /* -33.5db */
+ { 0X01, 0X6E, 0XCA, 0XC5 }, /* -33.0db */
+ { 0X01, 0X84, 0X86, 0X9F }, /* -32.5db */
+ { 0X01, 0X9B, 0X8C, 0X27 }, /* -32.0db */
+ { 0X01, 0XB3, 0XEE, 0XE5 }, /* -31.5db */
+ { 0X01, 0XCD, 0XC3, 0X8C }, /* -31.0db */
+ { 0X01, 0XE9, 0X20, 0X05 }, /* -30.5db */
+ { 0X02, 0X06, 0X1B, 0X89 }, /* -30.0db */
+ { 0X02, 0X24, 0XCE, 0XB0 }, /* -29.5db */
+ { 0X02, 0X45, 0X53, 0X85 }, /* -29.0db */
+ { 0X02, 0X67, 0XC5, 0XA2 }, /* -28.5db */
+ { 0X02, 0X8C, 0X42, 0X3F }, /* -28.0db */
+ { 0X02, 0XB2, 0XE8, 0X55 }, /* -27.5db */
+ { 0X02, 0XDB, 0XD8, 0XAD }, /* -27.0db */
+ { 0X03, 0X07, 0X36, 0X05 }, /* -26.5db */
+ { 0X03, 0X35, 0X25, 0X29 }, /* -26.0db */
+ { 0X03, 0X65, 0XCD, 0X13 }, /* -25.5db */
+ { 0X03, 0X99, 0X57, 0X0C }, /* -25.0db */
+ { 0X03, 0XCF, 0XEE, 0XCF }, /* -24.5db */
+ { 0X04, 0X09, 0XC2, 0XB0 }, /* -24.0db */
+ { 0X04, 0X47, 0X03, 0XC1 }, /* -23.5db */
+ { 0X04, 0X87, 0XE5, 0XFB }, /* -23.0db */
+ { 0X04, 0XCC, 0XA0, 0X6D }, /* -22.5db */
+ { 0X05, 0X15, 0X6D, 0X68 }, /* -22.0db */
+ { 0X05, 0X62, 0X8A, 0XB3 }, /* -21.5db */
+ { 0X05, 0XB4, 0X39, 0XBC }, /* -21.0db */
+ { 0X06, 0X0A, 0XBF, 0XD4 }, /* -20.5db */
+ { 0X06, 0X66, 0X66, 0X66 }, /* -20.0db */
+ { 0X06, 0XC7, 0X7B, 0X36 }, /* -19.5db */
+ { 0X07, 0X2E, 0X50, 0XA6 }, /* -19.0db */
+ { 0X07, 0X9B, 0X3D, 0XF6 }, /* -18.5db */
+ { 0X08, 0X0E, 0X9F, 0X96 }, /* -18.0db */
+ { 0X08, 0X88, 0XD7, 0X6D }, /* -17.5db */
+ { 0X09, 0X0A, 0X4D, 0X2F }, /* -17.0db */
+ { 0X09, 0X93, 0X6E, 0XB8 }, /* -16.5db */
+ { 0X0A, 0X24, 0XB0, 0X62 }, /* -16.0db */
+ { 0X0A, 0XBE, 0X8D, 0X70 }, /* -15.5db */
+ { 0X0B, 0X61, 0X88, 0X71 }, /* -15.0db */
+ { 0X0C, 0X0E, 0X2B, 0XB0 }, /* -14.5db */
+ { 0X0C, 0XC5, 0X09, 0XAB }, /* -14.0db */
+ { 0X0D, 0X86, 0XBD, 0X8D }, /* -13.5db */
+ { 0X0E, 0X53, 0XEB, 0XB3 }, /* -13.0db */
+ { 0X0F, 0X2D, 0X42, 0X38 }, /* -12.5db */
+ { 0X10, 0X13, 0X79, 0X87 }, /* -12.0db */
+ { 0X11, 0X07, 0X54, 0XF9 }, /* -11.5db */
+ { 0X12, 0X09, 0XA3, 0X7A }, /* -11.0db */
+ { 0X13, 0X1B, 0X40, 0X39 }, /* -10.5db */
+ { 0X14, 0X3D, 0X13, 0X62 }, /* -10.0db */
+ { 0X15, 0X70, 0X12, 0XE1 }, /* -9.5db */
+ { 0X16, 0XB5, 0X43, 0X37 }, /* -9.0db */
+ { 0X18, 0X0D, 0XB8, 0X54 }, /* -8.5db */
+ { 0X19, 0X7A, 0X96, 0X7F }, /* -8.0db */
+ { 0X1A, 0XFD, 0X13, 0X54 }, /* -7.5db */
+ { 0X1C, 0X96, 0X76, 0XC6 }, /* -7.0db */
+ { 0X1E, 0X48, 0X1C, 0X37 }, /* -6.5db */
+ { 0X20, 0X13, 0X73, 0X9E }, /* -6.0db */
+ { 0X21, 0XFA, 0X02, 0XBF }, /* -5.5db */
+ { 0X23, 0XFD, 0X66, 0X78 }, /* -5.0db */
+ { 0X26, 0X1F, 0X54, 0X1C }, /* -4.5db */
+ { 0X28, 0X61, 0X9A, 0XE9 }, /* -4.0db */
+ { 0X2A, 0XC6, 0X25, 0X91 }, /* -3.5db */
+ { 0X2D, 0X4E, 0XFB, 0XD5 }, /* -3.0db */
+ { 0X2F, 0XFE, 0X44, 0X48 }, /* -2.5db */
+ { 0X32, 0XD6, 0X46, 0X17 }, /* -2.0db */
+ { 0X35, 0XD9, 0X6B, 0X02 }, /* -1.5db */
+ { 0X39, 0X0A, 0X41, 0X5F }, /* -1.0db */
+ { 0X3C, 0X6B, 0X7E, 0X4F }, /* -0.5db */
+ { 0X40, 0X00, 0X00, 0X00 }, /* 0.0db */
+ { 0X43, 0XCA, 0XD0, 0X22 }, /* 0.5db */
+ { 0X47, 0XCF, 0X26, 0X7D }, /* 1.0db */
+ { 0X4C, 0X10, 0X6B, 0XA5 }, /* 1.5db */
+ { 0X50, 0X92, 0X3B, 0XE3 }, /* 2.0db */
+ { 0X55, 0X58, 0X6A, 0X46 }, /* 2.5db */
+ { 0X5A, 0X67, 0X03, 0XDF }, /* 3.0db */
+ { 0X5F, 0XC2, 0X53, 0X32 }, /* 3.5db */
+ { 0X65, 0X6E, 0XE3, 0XDB }, /* 4.0db */
+ { 0X6B, 0X71, 0X86, 0X68 }, /* 4.5db */
+ { 0X71, 0XCF, 0X54, 0X71 }, /* 5.0db */
+ { 0X78, 0X8D, 0XB4, 0XE9 }, /* 5.5db */
+ { 0X7F, 0XFF, 0XFF, 0XFF }, /* 6.0db */
+};
+#endif
diff --git a/include/sound/tas2770-tlv.h b/include/sound/tas2770-tlv.h
new file mode 100644
index 000000000000..c7380925417a
--- /dev/null
+++ b/include/sound/tas2770-tlv.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS2770 Audio Smart Amplifier
+//
+// Copyright (C) 2025 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS2770 hda driver implements for one, two, or even multiple
+// TAS2770 chips.
+//
+// Author: Baojun Xu <baojun.xu@ti.com>
+//
+
+#ifndef __TAS2770_TLV_H__
+#define __TAS2770_TLV_H__
+
+#define TAS2770_DVC_LEVEL TASDEVICE_REG(0x0, 0x0, 0x05)
+#define TAS2770_AMP_LEVEL TASDEVICE_REG(0x0, 0x0, 0x03)
+
+static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2770_dvc_tlv, -10000, 50, 0);
+static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2770_amp_tlv, 1100, 50, 0);
+
+#endif
diff --git a/include/sound/tas2781-comlib-i2c.h b/include/sound/tas2781-comlib-i2c.h
new file mode 100644
index 000000000000..a1afa5c444ba
--- /dev/null
+++ b/include/sound/tas2781-comlib-i2c.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS2563/TAS2781 Audio Smart Amplifier
+//
+// Copyright (C) 2025 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS2563/TAS2781 driver implements a flexible and configurable
+// algo coefficient setting for one, two, or even multiple
+// TAS2563/TAS2781 chips.
+//
+// Author: Shenghao Ding <shenghao-ding@ti.com>
+//
+
+#ifndef __TAS2781_COMLIB_I2C_H__
+#define __TAS2781_COMLIB_I2C_H__
+
+void tasdevice_reset(struct tasdevice_priv *tas_dev);
+int tascodec_init(struct tasdevice_priv *tas_priv, void *codec,
+ struct module *module,
+ void (*cont)(const struct firmware *fw, void *context));
+struct tasdevice_priv *tasdevice_kzalloc(struct i2c_client *i2c);
+int tasdevice_init(struct tasdevice_priv *tas_priv);
+int tasdev_chn_switch(struct tasdevice_priv *tas_priv,
+ unsigned short chn);
+int tasdevice_dev_update_bits(
+ struct tasdevice_priv *tasdevice, unsigned short chn,
+ unsigned int reg, unsigned int mask, unsigned int value);
+int tasdevice_amp_putvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
+int tasdevice_amp_getvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
+int tasdevice_digital_getvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
+int tasdevice_digital_putvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
+#endif /* __TAS2781_COMLIB_I2C_H__ */
diff --git a/include/sound/tas2781-dsp.h b/include/sound/tas2781-dsp.h
index 7fba7ea26a4b..dd6ee45ad096 100644
--- a/include/sound/tas2781-dsp.h
+++ b/include/sound/tas2781-dsp.h
@@ -2,7 +2,7 @@
//
// ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
//
-// Copyright (C) 2022 - 2024 Texas Instruments Incorporated
+// Copyright (C) 2022 - 2025 Texas Instruments Incorporated
// https://www.ti.com
//
// The TAS2781 driver implements a flexible and configurable
@@ -30,8 +30,11 @@
#define PRE_DEVICE_C 0x12
#define PRE_DEVICE_D 0x16
-#define PPC3_VERSION 0x4100
-#define PPC3_VERSION_TAS2781 0x14600
+#define PPC3_VERSION_BASE 0x4100
+#define PPC3_VERSION_TAS2781_BASIC_MIN 0x14600
+#define PPC3_VERSION_TAS2781_ALPHA_MIN 0x4a00
+#define PPC3_VERSION_TAS2781_BETA_MIN 0x19400
+#define PPC3_VERSION_TAS5825_BASE 0x114200
#define TASDEVICE_DEVICE_SUM 8
#define TASDEVICE_CONFIG_SUM 64
@@ -51,6 +54,8 @@ enum tasdevice_dsp_dev_idx {
TASDEVICE_DSP_TAS_2781_DUAL_MONO,
TASDEVICE_DSP_TAS_2781_21,
TASDEVICE_DSP_TAS_2781_QUAD,
+ TASDEVICE_DSP_TAS_5825_MONO,
+ TASDEVICE_DSP_TAS_5825_DUAL,
TASDEVICE_DSP_TAS_MAX_DEVICE
};
@@ -106,6 +111,27 @@ struct tasdevice_calibration {
struct tasdevice_data dev_data;
};
+struct fct_param_address {
+ /* Thermal data for PG 1.0 device */
+ unsigned char thr[3];
+ /* Thermal data for PG 2.0 device */
+ unsigned char thr2[3];
+ /* Pilot tone enable flag, usually the sine wave */
+ unsigned char plt_flg[3];
+ /* Pilot tone gain for calibration */
+ unsigned char sin_gn[3];
+ /* Pilot tone gain for calibration */
+ unsigned char sin_gn2[3];
+ /* high 32-bit of real-time spk impedance */
+ unsigned char r0_reg[3];
+ /* check spk connection */
+ unsigned char tf_reg[3];
+ /* check spk resonant frequency */
+ unsigned char a1_reg[3];
+ /* check spk resonant frequency */
+ unsigned char a2_reg[3];
+};
+
struct tasdevice_fw {
struct tasdevice_dspfw_hdr fw_hdr;
unsigned short nr_programs;
@@ -114,13 +140,21 @@ struct tasdevice_fw {
struct tasdevice_config *configs;
unsigned short nr_calibrations;
struct tasdevice_calibration *calibrations;
+ struct fct_param_address fct_par_addr;
struct device *dev;
};
-enum tasdevice_dsp_fw_state {
- TASDEVICE_DSP_FW_NONE = 0,
+enum tasdevice_fw_state {
+ /* Driver in startup mode, not load any firmware. */
TASDEVICE_DSP_FW_PENDING,
+ /* DSP firmware in the system, but parsing error. */
TASDEVICE_DSP_FW_FAIL,
+ /*
+ * Only RCA (Reconfigurable Architecture) firmware load
+ * successfully.
+ */
+ TASDEVICE_RCA_FW_OK,
+ /* Both RCA and DSP firmware load successfully. */
TASDEVICE_DSP_FW_ALL_OK,
};
@@ -167,6 +201,14 @@ struct tasdevice_rca {
int ncfgs;
struct tasdevice_config_info **cfg_info;
int profile_cfg_id;
+ /*
+ * Since version 0x105, the keyword 'init' was introduced into the
+ * profile, which is used for chip initialization, particularly to
+ * store common settings for other non-initialization profiles.
+ * if (init_profile_id < 0)
+ * No init profile inside the RCA firmware.
+ */
+ int init_profile_id;
};
void tasdevice_select_cfg_blk(void *context, int conf_no,
diff --git a/include/sound/tas2781-tlv.h b/include/sound/tas2781-tlv.h
index 1dc59005d241..273224df9282 100644
--- a/include/sound/tas2781-tlv.h
+++ b/include/sound/tas2781-tlv.h
@@ -2,7 +2,7 @@
//
// ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
//
-// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
+// Copyright (C) 2022 - 2025 Texas Instruments Incorporated
// https://www.ti.com
//
// The TAS2781 driver implements a flexible and configurable
@@ -15,7 +15,7 @@
#ifndef __TAS2781_TLV_H__
#define __TAS2781_TLV_H__
-static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0);
-static const DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0);
+static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2781_dvc_tlv, -10000, 50, 0);
+static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2781_amp_tlv, 1100, 50, 0);
#endif
diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h
index 99ca3e401fd1..9d3c54cb8223 100644
--- a/include/sound/tas2781.h
+++ b/include/sound/tas2781.h
@@ -2,7 +2,7 @@
//
// ALSA SoC Texas Instruments TAS2563/TAS2781 Audio Smart Amplifier
//
-// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
+// Copyright (C) 2022 - 2025 Texas Instruments Incorporated
// https://www.ti.com
//
// The TAS2563/TAS2781 driver implements a flexible and configurable
@@ -11,11 +11,16 @@
//
// Author: Shenghao Ding <shenghao-ding@ti.com>
// Author: Kevin Lu <kevin-lu@ti.com>
+// Author: Baojun Xu <baojun.xu@ti.com>
//
#ifndef __TAS2781_H__
#define __TAS2781_H__
+#ifdef CONFIG_SND_SOC_TAS2781_ACOUST_I2C
+#include <linux/debugfs.h>
+#endif
+
#include "tas2781-dsp.h"
/* version number */
@@ -31,7 +36,9 @@
SNDRV_PCM_FMTBIT_S24_LE | \
SNDRV_PCM_FMTBIT_S32_LE)
-/*PAGE Control Register (available in page0 of each book) */
+#define TASDEVICE_CRC8_POLYNOMIAL 0x4d
+
+/* PAGE Control Register (available in page0 of each book) */
#define TASDEVICE_PAGE_SELECT 0x00
#define TASDEVICE_BOOKCTL_PAGE 0x00
#define TASDEVICE_BOOKCTL_REG 127
@@ -42,34 +49,107 @@
#define TASDEVICE_REG(book, page, reg) (((book * 256 * 128) + \
(page * 128)) + reg)
-/*Software Reset */
-#define TAS2781_REG_SWRESET TASDEVICE_REG(0x0, 0X0, 0x01)
-#define TAS2781_REG_SWRESET_RESET BIT(0)
+/* Software Reset, compatble with new device (TAS5825). */
+#define TASDEVICE_REG_SWRESET TASDEVICE_REG(0x0, 0x0, 0x01)
+#define TASDEVICE_REG_SWRESET_RESET BIT(0)
+
+#define TAS5825_REG_SWRESET_RESET (BIT(0) | BIT(4))
+
+/* Checksum */
+#define TASDEVICE_CHECKSUM_REG TASDEVICE_REG(0x0, 0x0, 0x7e)
-/*I2C Checksum */
-#define TASDEVICE_I2CChecksum TASDEVICE_REG(0x0, 0x0, 0x7E)
+/* XM_340 */
+#define TASDEVICE_XM_A1_REG TASDEVICE_REG(0x64, 0x63, 0x3c)
+/* XM_341 */
+#define TASDEVICE_XM_A2_REG TASDEVICE_REG(0x64, 0x63, 0x38)
/* Volume control */
-#define TAS2781_DVC_LVL TASDEVICE_REG(0x0, 0x0, 0x1A)
+#define TAS2563_DVC_LVL TASDEVICE_REG(0x00, 0x02, 0x0c)
+#define TAS2781_DVC_LVL TASDEVICE_REG(0x0, 0x0, 0x1a)
#define TAS2781_AMP_LEVEL TASDEVICE_REG(0x0, 0x0, 0x03)
#define TAS2781_AMP_LEVEL_MASK GENMASK(5, 1)
-#define TASDEVICE_CMD_SING_W 0x1
-#define TASDEVICE_CMD_BURST 0x2
-#define TASDEVICE_CMD_DELAY 0x3
-#define TASDEVICE_CMD_FIELD_W 0x4
+#define TAS2563_IDLE TASDEVICE_REG(0x00, 0x00, 0x3e)
+#define TAS2563_PRM_R0_REG TASDEVICE_REG(0x00, 0x0f, 0x34)
+
+#define TAS2563_RUNTIME_RE_REG_TF TASDEVICE_REG(0x64, 0x02, 0x70)
+#define TAS2563_RUNTIME_RE_REG TASDEVICE_REG(0x64, 0x02, 0x48)
+
+#define TAS2563_PRM_ENFF_REG TASDEVICE_REG(0x00, 0x0d, 0x54)
+#define TAS2563_PRM_DISTCK_REG TASDEVICE_REG(0x00, 0x0d, 0x58)
+#define TAS2563_PRM_TE_SCTHR_REG TASDEVICE_REG(0x00, 0x0f, 0x60)
+#define TAS2563_PRM_PLT_FLAG_REG TASDEVICE_REG(0x00, 0x0d, 0x74)
+#define TAS2563_PRM_SINEGAIN_REG TASDEVICE_REG(0x00, 0x0d, 0x7c)
+/* prm_Int_B0 */
+#define TAS2563_TE_TA1_REG TASDEVICE_REG(0x00, 0x10, 0x0c)
+/* prm_Int_A1 */
+#define TAS2563_TE_TA1_AT_REG TASDEVICE_REG(0x00, 0x10, 0x10)
+/* prm_TE_Beta */
+#define TAS2563_TE_TA2_REG TASDEVICE_REG(0x00, 0x0f, 0x64)
+/* prm_TE_Beta1 */
+#define TAS2563_TE_AT_REG TASDEVICE_REG(0x00, 0x0f, 0x68)
+/* prm_TE_1_Beta1 */
+#define TAS2563_TE_DT_REG TASDEVICE_REG(0x00, 0x0f, 0x70)
+
+#define TAS2781_PRM_INT_MASK_REG TASDEVICE_REG(0x00, 0x00, 0x3b)
+#define TAS2781_PRM_CLK_CFG_REG TASDEVICE_REG(0x00, 0x00, 0x5c)
+#define TAS2781_PRM_RSVD_REG TASDEVICE_REG(0x00, 0x01, 0x19)
+#define TAS2781_PRM_TEST_57_REG TASDEVICE_REG(0x00, 0xfd, 0x39)
+#define TAS2781_PRM_TEST_62_REG TASDEVICE_REG(0x00, 0xfd, 0x3e)
+#define TAS2781_PRM_PVDD_UVLO_REG TASDEVICE_REG(0x00, 0x00, 0x71)
+#define TAS2781_PRM_CHNL_0_REG TASDEVICE_REG(0x00, 0x00, 0x03)
+#define TAS2781_PRM_NG_CFG0_REG TASDEVICE_REG(0x00, 0x00, 0x35)
+#define TAS2781_PRM_IDLE_CH_DET_REG TASDEVICE_REG(0x00, 0x00, 0x66)
+#define TAS2781_PRM_PLT_FLAG_REG TASDEVICE_REG(0x00, 0x14, 0x38)
+#define TAS2781_PRM_SINEGAIN_REG TASDEVICE_REG(0x00, 0x14, 0x40)
+#define TAS2781_PRM_SINEGAIN2_REG TASDEVICE_REG(0x00, 0x14, 0x44)
+
+#define TAS2781_TEST_UNLOCK_REG TASDEVICE_REG(0x00, 0xfd, 0x0d)
+#define TAS2781_TEST_PAGE_UNLOCK 0x0d
+
+#define TAS2781_RUNTIME_LATCH_RE_REG TASDEVICE_REG(0x00, 0x00, 0x49)
+#define TAS2781_RUNTIME_RE_REG_TF TASDEVICE_REG(0x64, 0x62, 0x48)
+#define TAS2781_RUNTIME_RE_REG TASDEVICE_REG(0x64, 0x63, 0x44)
enum audio_device {
+ TAS2020,
+ TAS2118,
+ TAS2120,
+ TAS2320,
TAS2563,
+ TAS2568,
+ TAS2570,
+ TAS2572,
+ TAS2574,
TAS2781,
+ TAS5802,
+ TAS5806M,
+ TAS5806MD,
+ TAS5815,
+ TAS5822,
+ TAS5825,
+ TAS5827,
+ TAS5828,
+ TAS5830,
+ TAS_OTHERS,
+};
+
+enum dspbin_type {
+ TASDEV_BASIC,
+ TASDEV_ALPHA,
+ TASDEV_BETA,
};
-enum device_catlog_id {
- LENOVO = 0,
- OTHERS
+struct bulk_reg_val {
+ int reg;
+ unsigned char val[4];
+ unsigned char val_len;
+ bool is_locked;
};
struct tasdevice {
+ struct bulk_reg_val *cali_data_backup;
+ struct bulk_reg_val alp_cali_bckp;
struct tasdevice_fw *cali_data_fmw;
unsigned int dev_addr;
unsigned int err_code;
@@ -80,39 +160,69 @@ struct tasdevice {
bool is_loaderr;
};
-struct tasdevice_irqinfo {
- int irq_gpio;
- int irq;
+struct cali_reg {
+ unsigned int r0_reg;
+ unsigned int r0_low_reg;
+ unsigned int invr0_reg;
+ unsigned int pow_reg;
+ unsigned int tlimit_reg;
};
struct calidata {
unsigned char *data;
unsigned long total_sz;
+ struct cali_reg cali_reg_array;
+ unsigned int cali_dat_sz_per_dev;
+};
+
+/*
+ * To enable CONFIG_SND_SOC_TAS2781_ACOUST_I2C will create a bridge to the
+ * acoustic tuning tool which can tune the chips' acoustic effect. Due to the
+ * whole directly exposing the registers, there exist some potential risks. So
+ * this define is invisible in Kconfig, anyone who wants to use acoustic tool
+ * have to edit the source manually.
+ */
+#ifdef CONFIG_SND_SOC_TAS2781_ACOUST_I2C
+#define TASDEV_DATA_PAYLOAD_SIZE 128
+struct acoustic_data {
+ unsigned char len;
+ unsigned char id;
+ unsigned char addr;
+ unsigned char book;
+ unsigned char page;
+ unsigned char reg;
+ unsigned char data[TASDEV_DATA_PAYLOAD_SIZE];
};
+#endif
struct tasdevice_priv {
struct tasdevice tasdevice[TASDEVICE_MAX_CHANNELS];
- struct tasdevice_irqinfo irq_info;
struct tasdevice_rca rcabin;
struct calidata cali_data;
+#ifdef CONFIG_SND_SOC_TAS2781_ACOUST_I2C
+ struct acoustic_data acou_data;
+#endif
struct tasdevice_fw *fmw;
struct gpio_desc *reset;
struct mutex codec_lock;
struct regmap *regmap;
struct device *dev;
- struct tm tm;
- enum device_catlog_id catlog_id;
unsigned char cal_binaryname[TASDEVICE_MAX_CHANNELS][64];
unsigned char crc8_lkp_tbl[CRC8_TABLE_SIZE];
unsigned char coef_binaryname[64];
unsigned char rca_binaryname[64];
unsigned char dev_name[32];
+ const unsigned char (*dvc_tlv_table)[4];
+ const char *name_prefix;
unsigned char ndev;
+ unsigned int dspbin_typ;
unsigned int magic_num;
unsigned int chip_id;
unsigned int sysclk;
+ int speaker_id;
+ int irq;
int cur_prog;
int cur_conf;
int fw_state;
@@ -122,6 +232,8 @@ struct tasdevice_priv {
bool force_fwload_status;
bool playback_started;
bool isacpi;
+ bool isspi;
+ bool is_user_space_calidata;
unsigned int global_addr;
int (*fw_parse_variable_header)(struct tasdevice_priv *tas_priv,
@@ -132,42 +244,33 @@ struct tasdevice_priv {
int (*fw_parse_configuration_data)(struct tasdevice_priv *tas_priv,
struct tasdevice_fw *tas_fmw,
const struct firmware *fmw, int offset);
+ int (*fw_parse_fct_param_address)(struct tasdevice_priv *tas_priv,
+ struct tasdevice_fw *tas_fmw,
+ const struct firmware *fmw, int offset);
int (*tasdevice_load_block)(struct tasdevice_priv *tas_priv,
struct tasdev_blk *block);
- int (*save_calibration)(struct tasdevice_priv *tas_priv);
- void (*apply_calibration)(struct tasdevice_priv *tas_priv);
+ int (*change_chn_book)(struct tasdevice_priv *tas_priv,
+ unsigned short chn, int book);
+ int (*update_bits)(struct tasdevice_priv *tas_priv,
+ unsigned short chn, unsigned int reg, unsigned int mask,
+ unsigned int value);
+ int (*dev_read)(struct tasdevice_priv *tas_priv,
+ unsigned short chn, unsigned int reg, unsigned int *value);
+ int (*dev_bulk_read)(struct tasdevice_priv *tas_priv,
+ unsigned short chn, unsigned int reg, unsigned char *p_data,
+ unsigned int n_length);
};
-void tas2781_reset(struct tasdevice_priv *tas_dev);
-int tascodec_init(struct tasdevice_priv *tas_priv, void *codec,
- struct module *module,
- void (*cont)(const struct firmware *fw, void *context));
-struct tasdevice_priv *tasdevice_kzalloc(struct i2c_client *i2c);
-int tasdevice_init(struct tasdevice_priv *tas_priv);
-void tasdevice_remove(struct tasdevice_priv *tas_priv);
-int tasdevice_save_calibration(struct tasdevice_priv *tas_priv);
-void tasdevice_apply_calibration(struct tasdevice_priv *tas_priv);
int tasdevice_dev_read(struct tasdevice_priv *tas_priv,
unsigned short chn, unsigned int reg, unsigned int *value);
+int tasdevice_dev_bulk_read(struct tasdevice_priv *tas_priv,
+ unsigned short chn, unsigned int reg, unsigned char *p_data,
+ unsigned int n_length);
int tasdevice_dev_write(struct tasdevice_priv *tas_priv,
unsigned short chn, unsigned int reg, unsigned int value);
int tasdevice_dev_bulk_write(
struct tasdevice_priv *tas_priv, unsigned short chn,
unsigned int reg, unsigned char *p_data, unsigned int n_length);
-int tasdevice_dev_bulk_read(struct tasdevice_priv *tas_priv,
- unsigned short chn, unsigned int reg, unsigned char *p_data,
- unsigned int n_length);
-int tasdevice_dev_update_bits(
- struct tasdevice_priv *tasdevice, unsigned short chn,
- unsigned int reg, unsigned int mask, unsigned int value);
-int tasdevice_amp_putvol(struct tasdevice_priv *tas_priv,
- struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
-int tasdevice_amp_getvol(struct tasdevice_priv *tas_priv,
- struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
-int tasdevice_digital_putvol(struct tasdevice_priv *tas_priv,
- struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
-int tasdevice_digital_getvol(struct tasdevice_priv *tas_priv,
- struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
-
+void tasdevice_remove(struct tasdevice_priv *tas_priv);
#endif /* __TAS2781_H__ */
diff --git a/include/sound/tas2x20-tlv.h b/include/sound/tas2x20-tlv.h
new file mode 100644
index 000000000000..6e6bcec4a0a1
--- /dev/null
+++ b/include/sound/tas2x20-tlv.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS2x20/TAS2118 Audio Smart Amplifier
+//
+// Copyright (C) 2025 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS2x20/TAS2118 hda driver implements for one, two, or even multiple
+// TAS2x20/TAS2118 chips.
+//
+// Author: Baojun Xu <baojun.xu@ti.com>
+//
+
+#ifndef __TAS2X20_TLV_H__
+#define __TAS2X20_TLV_H__
+
+#define TAS2X20_DVC_LEVEL TASDEVICE_REG(0x0, 0x2, 0x0c)
+#define TAS2X20_AMP_LEVEL TASDEVICE_REG(0x0, 0x0, 0x07)
+
+static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2x20_dvc_tlv, 1650, 50, 0);
+static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2x20_amp_tlv, 2100, 50, 0);
+
+/* pow(10, db/20) * pow(2,22) */
+static const __maybe_unused unsigned char tas2x20_dvc_table[][4] = {
+ { 0X00, 0X00, 0X0D, 0X00 }, /* -110.0db */
+ { 0X00, 0X00, 0X0E, 0X00 }, /* -109.5db */
+ { 0X00, 0X00, 0X0E, 0X00 }, /* -109.0db */
+ { 0X00, 0X00, 0X0F, 0X00 }, /* -108.5db */
+ { 0X00, 0X00, 0X10, 0X00 }, /* -108.0db */
+ { 0X00, 0X00, 0X11, 0X00 }, /* -107.5db */
+ { 0X00, 0X00, 0X12, 0X00 }, /* -107.0db */
+ { 0X00, 0X00, 0X13, 0X00 }, /* -106.5db */
+ { 0X00, 0X00, 0X15, 0X00 }, /* -106.0db */
+ { 0X00, 0X00, 0X16, 0X00 }, /* -105.5db */
+ { 0X00, 0X00, 0X17, 0X00 }, /* -105.0db */
+ { 0X00, 0X00, 0X18, 0X00 }, /* -104.5db */
+ { 0X00, 0X00, 0X1A, 0X00 }, /* -104.0db */
+ { 0X00, 0X00, 0X1C, 0X00 }, /* -103.5db */
+ { 0X00, 0X00, 0X1D, 0X00 }, /* -103.0db */
+ { 0X00, 0X00, 0X1F, 0X00 }, /* -102.5db */
+ { 0X00, 0X00, 0X21, 0X00 }, /* -102.0db */
+ { 0X00, 0X00, 0X23, 0X00 }, /* -101.5db */
+ { 0X00, 0X00, 0X25, 0X00 }, /* -101.0db */
+ { 0X00, 0X00, 0X27, 0X00 }, /* -100.5db */
+ { 0X00, 0X00, 0X29, 0X00 }, /* -100.0db */
+ { 0X00, 0X00, 0X2C, 0X00 }, /* -99.5db */
+ { 0X00, 0X00, 0X2F, 0X00 }, /* -99.0db */
+ { 0X00, 0X00, 0X31, 0X00 }, /* -98.5db */
+ { 0X00, 0X00, 0X34, 0X00 }, /* -98.0db */
+ { 0X00, 0X00, 0X37, 0X00 }, /* -97.5db */
+ { 0X00, 0X00, 0X3B, 0X00 }, /* -97.0db */
+ { 0X00, 0X00, 0X3E, 0X00 }, /* -96.5db */
+ { 0X00, 0X00, 0X42, 0X00 }, /* -96.0db */
+ { 0X00, 0X00, 0X46, 0X00 }, /* -95.5db */
+ { 0X00, 0X00, 0X4A, 0X00 }, /* -95.0db */
+ { 0X00, 0X00, 0X4F, 0X00 }, /* -94.5db */
+ { 0X00, 0X00, 0X53, 0X00 }, /* -94.0db */
+ { 0X00, 0X00, 0X58, 0X00 }, /* -93.5db */
+ { 0X00, 0X00, 0X5D, 0X00 }, /* -93.0db */
+ { 0X00, 0X00, 0X63, 0X00 }, /* -92.5db */
+ { 0X00, 0X00, 0X69, 0X00 }, /* -92.0db */
+ { 0X00, 0X00, 0X6F, 0X00 }, /* -91.5db */
+ { 0X00, 0X00, 0X76, 0X00 }, /* -91.0db */
+ { 0X00, 0X00, 0X7D, 0X00 }, /* -90.5db */
+ { 0X00, 0X00, 0X84, 0X00 }, /* -90.0db */
+ { 0X00, 0X00, 0X8C, 0X00 }, /* -89.5db */
+ { 0X00, 0X00, 0X94, 0X00 }, /* -89.0db */
+ { 0X00, 0X00, 0X9D, 0X00 }, /* -88.5db */
+ { 0X00, 0X00, 0XA6, 0X00 }, /* -88.0db */
+ { 0X00, 0X00, 0XB0, 0X00 }, /* -87.5db */
+ { 0X00, 0X00, 0XBB, 0X00 }, /* -87.0db */
+ { 0X00, 0X00, 0XC6, 0X00 }, /* -86.5db */
+ { 0X00, 0X00, 0XD2, 0X00 }, /* -86.0db */
+ { 0X00, 0X00, 0XDE, 0X00 }, /* -85.5db */
+ { 0X00, 0X00, 0XEB, 0X00 }, /* -85.0db */
+ { 0X00, 0X00, 0XF9, 0X00 }, /* -84.5db */
+ { 0X00, 0X01, 0X08, 0X00 }, /* -84.0db */
+ { 0X00, 0X01, 0X18, 0X00 }, /* -83.5db */
+ { 0X00, 0X01, 0X28, 0X00 }, /* -83.0db */
+ { 0X00, 0X01, 0X3A, 0X00 }, /* -82.5db */
+ { 0X00, 0X01, 0X4D, 0X00 }, /* -82.0db */
+ { 0X00, 0X01, 0X60, 0X00 }, /* -81.5db */
+ { 0X00, 0X01, 0X75, 0X00 }, /* -81.0db */
+ { 0X00, 0X01, 0X8B, 0X00 }, /* -80.5db */
+ { 0X00, 0X01, 0XA3, 0X00 }, /* -80.0db */
+ { 0X00, 0X01, 0XBC, 0X00 }, /* -79.5db */
+ { 0X00, 0X01, 0XD6, 0X00 }, /* -79.0db */
+ { 0X00, 0X01, 0XF2, 0X00 }, /* -78.5db */
+ { 0X00, 0X02, 0X10, 0X00 }, /* -78.0db */
+ { 0X00, 0X02, 0X2F, 0X00 }, /* -77.5db */
+ { 0X00, 0X02, 0X50, 0X00 }, /* -77.0db */
+ { 0X00, 0X02, 0X73, 0X00 }, /* -76.5db */
+ { 0X00, 0X02, 0X98, 0X00 }, /* -76.0db */
+ { 0X00, 0X02, 0XC0, 0X00 }, /* -75.5db */
+ { 0X00, 0X02, 0XE9, 0X00 }, /* -75.0db */
+ { 0X00, 0X03, 0X16, 0X00 }, /* -74.5db */
+ { 0X00, 0X03, 0X44, 0X00 }, /* -74.0db */
+ { 0X00, 0X03, 0X76, 0X00 }, /* -73.5db */
+ { 0X00, 0X03, 0XAA, 0X00 }, /* -73.0db */
+ { 0X00, 0X03, 0XE2, 0X00 }, /* -72.5db */
+ { 0X00, 0X04, 0X1D, 0X00 }, /* -72.0db */
+ { 0X00, 0X04, 0X5B, 0X00 }, /* -71.5db */
+ { 0X00, 0X04, 0X9E, 0X00 }, /* -71.0db */
+ { 0X00, 0X04, 0XE4, 0X00 }, /* -70.5db */
+ { 0X00, 0X05, 0X2E, 0X00 }, /* -70.0db */
+ { 0X00, 0X05, 0X7C, 0X00 }, /* -69.5db */
+ { 0X00, 0X05, 0XD0, 0X00 }, /* -69.0db */
+ { 0X00, 0X06, 0X28, 0X00 }, /* -68.5db */
+ { 0X00, 0X06, 0X85, 0X00 }, /* -68.0db */
+ { 0X00, 0X06, 0XE8, 0X00 }, /* -67.5db */
+ { 0X00, 0X07, 0X51, 0X00 }, /* -67.0db */
+ { 0X00, 0X07, 0XC0, 0X00 }, /* -66.5db */
+ { 0X00, 0X08, 0X36, 0X00 }, /* -66.0db */
+ { 0X00, 0X08, 0XB2, 0X00 }, /* -65.5db */
+ { 0X00, 0X09, 0X36, 0X00 }, /* -65.0db */
+ { 0X00, 0X09, 0XC2, 0X00 }, /* -64.5db */
+ { 0X00, 0X0A, 0X56, 0X00 }, /* -64.0db */
+ { 0X00, 0X0A, 0XF3, 0X00 }, /* -63.5db */
+ { 0X00, 0X0B, 0X99, 0X00 }, /* -63.0db */
+ { 0X00, 0X0C, 0X49, 0X00 }, /* -62.5db */
+ { 0X00, 0X0D, 0X03, 0X00 }, /* -62.0db */
+ { 0X00, 0X0D, 0XC9, 0X00 }, /* -61.5db */
+ { 0X00, 0X0E, 0X9A, 0X00 }, /* -61.0db */
+ { 0X00, 0X0F, 0X77, 0X00 }, /* -60.5db */
+ { 0X00, 0X10, 0X62, 0X00 }, /* -60.0db */
+ { 0X00, 0X11, 0X5A, 0X00 }, /* -59.5db */
+ { 0X00, 0X12, 0X62, 0X00 }, /* -59.0db */
+ { 0X00, 0X13, 0X78, 0X00 }, /* -58.5db */
+ { 0X00, 0X14, 0XA0, 0X00 }, /* -58.0db */
+ { 0X00, 0X15, 0XD9, 0X00 }, /* -57.5db */
+ { 0X00, 0X17, 0X24, 0X00 }, /* -57.0db */
+ { 0X00, 0X18, 0X83, 0X00 }, /* -56.5db */
+ { 0X00, 0X19, 0XF7, 0X00 }, /* -56.0db */
+ { 0X00, 0X1B, 0X81, 0X00 }, /* -55.5db */
+ { 0X00, 0X1D, 0X22, 0X00 }, /* -55.0db */
+ { 0X00, 0X1E, 0XDC, 0X00 }, /* -54.5db */
+ { 0X00, 0X20, 0XB0, 0X00 }, /* -54.0db */
+ { 0X00, 0X22, 0XA0, 0X00 }, /* -53.5db */
+ { 0X00, 0X24, 0XAD, 0X00 }, /* -53.0db */
+ { 0X00, 0X26, 0XDA, 0X00 }, /* -52.5db */
+ { 0X00, 0X29, 0X27, 0X00 }, /* -52.0db */
+ { 0X00, 0X2B, 0X97, 0X00 }, /* -51.5db */
+ { 0X00, 0X2E, 0X2D, 0X00 }, /* -51.0db */
+ { 0X00, 0X30, 0XE9, 0X00 }, /* -50.5db */
+ { 0X00, 0X33, 0XCF, 0X00 }, /* -50.0db */
+ { 0X00, 0X36, 0XE1, 0X00 }, /* -49.5db */
+ { 0X00, 0X3A, 0X21, 0X00 }, /* -49.0db */
+ { 0X00, 0X3D, 0X93, 0X00 }, /* -48.5db */
+ { 0X00, 0X41, 0X39, 0X00 }, /* -48.0db */
+ { 0X00, 0X45, 0X17, 0X00 }, /* -47.5db */
+ { 0X00, 0X49, 0X2F, 0X00 }, /* -47.0db */
+ { 0X00, 0X4D, 0X85, 0X00 }, /* -46.5db */
+ { 0X00, 0X52, 0X1D, 0X00 }, /* -46.0db */
+ { 0X00, 0X56, 0XFA, 0X00 }, /* -45.5db */
+ { 0X00, 0X5C, 0X22, 0X00 }, /* -45.0db */
+ { 0X00, 0X61, 0X97, 0X00 }, /* -44.5db */
+ { 0X00, 0X67, 0X60, 0X00 }, /* -44.0db */
+ { 0X00, 0X6D, 0X80, 0X00 }, /* -43.5db */
+ { 0X00, 0X73, 0XFD, 0X00 }, /* -43.0db */
+ { 0X00, 0X7A, 0XDC, 0X00 }, /* -42.5db */
+ { 0X00, 0X82, 0X24, 0X00 }, /* -42.0db */
+ { 0X00, 0X89, 0XDA, 0X00 }, /* -41.5db */
+ { 0X00, 0X92, 0X05, 0X00 }, /* -41.0db */
+ { 0X00, 0X9A, 0XAC, 0X00 }, /* -40.5db */
+ { 0X00, 0XA3, 0XD7, 0X00 }, /* -40.0db */
+ { 0X00, 0XAD, 0X8C, 0X00 }, /* -39.5db */
+ { 0X00, 0XB7, 0XD4, 0X00 }, /* -39.0db */
+ { 0X00, 0XC2, 0XB9, 0X00 }, /* -38.5db */
+ { 0X00, 0XCE, 0X43, 0X00 }, /* -38.0db */
+ { 0X00, 0XDA, 0X7B, 0X00 }, /* -37.5db */
+ { 0X00, 0XE7, 0X6E, 0X00 }, /* -37.0db */
+ { 0X00, 0XF5, 0X24, 0X00 }, /* -36.5db */
+ { 0X01, 0X03, 0XAB, 0X00 }, /* -36.0db */
+ { 0X01, 0X13, 0X0E, 0X00 }, /* -35.5db */
+ { 0X01, 0X23, 0X5A, 0X00 }, /* -35.0db */
+ { 0X01, 0X34, 0X9D, 0X00 }, /* -34.5db */
+ { 0X01, 0X46, 0XE7, 0X00 }, /* -34.0db */
+ { 0X01, 0X5A, 0X46, 0X00 }, /* -33.5db */
+ { 0X01, 0X6E, 0XCA, 0X00 }, /* -33.0db */
+ { 0X01, 0X84, 0X86, 0X00 }, /* -32.5db */
+ { 0X01, 0X9B, 0X8C, 0X00 }, /* -32.0db */
+ { 0X01, 0XB3, 0XEE, 0X00 }, /* -31.5db */
+ { 0X01, 0XCD, 0XC3, 0X00 }, /* -31.0db */
+ { 0X01, 0XE9, 0X20, 0X00 }, /* -30.5db */
+ { 0X02, 0X06, 0X1B, 0X00 }, /* -30.0db */
+ { 0X02, 0X24, 0XCE, 0X00 }, /* -29.5db */
+ { 0X02, 0X45, 0X53, 0X00 }, /* -29.0db */
+ { 0X02, 0X67, 0XC5, 0X00 }, /* -28.5db */
+ { 0X02, 0X8C, 0X42, 0X00 }, /* -28.0db */
+ { 0X02, 0XB2, 0XE8, 0X00 }, /* -27.5db */
+ { 0X02, 0XDB, 0XD8, 0X00 }, /* -27.0db */
+ { 0X03, 0X07, 0X36, 0X00 }, /* -26.5db */
+ { 0X03, 0X35, 0X25, 0X00 }, /* -26.0db */
+ { 0X03, 0X65, 0XCD, 0X00 }, /* -25.5db */
+ { 0X03, 0X99, 0X57, 0X00 }, /* -25.0db */
+ { 0X03, 0XCF, 0XEE, 0X00 }, /* -24.5db */
+ { 0X04, 0X09, 0XC2, 0X00 }, /* -24.0db */
+ { 0X04, 0X47, 0X03, 0X00 }, /* -23.5db */
+ { 0X04, 0X87, 0XE5, 0X00 }, /* -23.0db */
+ { 0X04, 0XCC, 0XA0, 0X00 }, /* -22.5db */
+ { 0X05, 0X15, 0X6D, 0X00 }, /* -22.0db */
+ { 0X05, 0X62, 0X8A, 0X00 }, /* -21.5db */
+ { 0X05, 0XB4, 0X39, 0X00 }, /* -21.0db */
+ { 0X06, 0X0A, 0XBF, 0X00 }, /* -20.5db */
+ { 0X06, 0X66, 0X66, 0X00 }, /* -20.0db */
+ { 0X06, 0XC7, 0X7B, 0X00 }, /* -19.5db */
+ { 0X07, 0X2E, 0X50, 0X00 }, /* -19.0db */
+ { 0X07, 0X9B, 0X3D, 0X00 }, /* -18.5db */
+ { 0X08, 0X0E, 0X9F, 0X00 }, /* -18.0db */
+ { 0X08, 0X88, 0XD7, 0X00 }, /* -17.5db */
+ { 0X09, 0X0A, 0X4D, 0X00 }, /* -17.0db */
+ { 0X09, 0X93, 0X6E, 0X00 }, /* -16.5db */
+ { 0X0A, 0X24, 0XB0, 0X00 }, /* -16.0db */
+ { 0X0A, 0XBE, 0X8D, 0X00 }, /* -15.5db */
+ { 0X0B, 0X61, 0X88, 0X00 }, /* -15.0db */
+ { 0X0C, 0X0E, 0X2B, 0X00 }, /* -14.5db */
+ { 0X0C, 0XC5, 0X09, 0X00 }, /* -14.0db */
+ { 0X0D, 0X86, 0XBD, 0X00 }, /* -13.5db */
+ { 0X0E, 0X53, 0XEB, 0X00 }, /* -13.0db */
+ { 0X0F, 0X2D, 0X42, 0X00 }, /* -12.5db */
+ { 0X10, 0X13, 0X79, 0X00 }, /* -12.0db */
+ { 0X11, 0X07, 0X54, 0X00 }, /* -11.5db */
+ { 0X12, 0X09, 0XA3, 0X00 }, /* -11.0db */
+ { 0X13, 0X1B, 0X40, 0X00 }, /* -10.5db */
+ { 0X14, 0X3D, 0X13, 0X00 }, /* -10.0db */
+ { 0X15, 0X70, 0X12, 0X00 }, /* -9.5db */
+ { 0X16, 0XB5, 0X43, 0X00 }, /* -9.0db */
+ { 0X18, 0X0D, 0XB8, 0X00 }, /* -8.5db */
+ { 0X19, 0X7A, 0X96, 0X00 }, /* -8.0db */
+ { 0X1A, 0XFD, 0X13, 0X00 }, /* -7.5db */
+ { 0X1C, 0X96, 0X76, 0X00 }, /* -7.0db */
+ { 0X1E, 0X48, 0X1C, 0X00 }, /* -6.5db */
+ { 0X20, 0X13, 0X73, 0X00 }, /* -6.0db */
+ { 0X21, 0XFA, 0X02, 0X00 }, /* -5.5db */
+ { 0X23, 0XFD, 0X66, 0X00 }, /* -5.0db */
+ { 0X26, 0X1F, 0X54, 0X00 }, /* -4.5db */
+ { 0X28, 0X61, 0X9A, 0X00 }, /* -4.0db */
+ { 0X2A, 0XC6, 0X25, 0X00 }, /* -3.5db */
+ { 0X2D, 0X4E, 0XFB, 0X00 }, /* -3.0db */
+ { 0X2F, 0XFE, 0X44, 0X00 }, /* -2.5db */
+ { 0X32, 0XD6, 0X46, 0X00 }, /* -2.0db */
+ { 0X35, 0XD9, 0X6B, 0X00 }, /* -1.5db */
+ { 0X39, 0X0A, 0X41, 0X00 }, /* -1.0db */
+ { 0X3C, 0X6B, 0X7E, 0X00 }, /* -0.5db */
+ { 0X40, 0X00, 0X00, 0X00 }, /* 0.0db */
+ { 0X43, 0XCA, 0XD0, 0X00 }, /* 0.5db */
+ { 0X47, 0XCF, 0X26, 0X00 }, /* 1.0db */
+ { 0X4C, 0X10, 0X6B, 0X00 }, /* 1.5db */
+ { 0X50, 0X92, 0X3B, 0X00 }, /* 2.0db */
+ { 0X55, 0X58, 0X6A, 0X00 }, /* 2.5db */
+ { 0X5A, 0X67, 0X03, 0X00 }, /* 3.0db */
+ { 0X5F, 0XC2, 0X53, 0X00 }, /* 3.5db */
+ { 0X65, 0X6E, 0XE3, 0X00 }, /* 4.0db */
+ { 0X6B, 0X71, 0X86, 0X00 }, /* 4.5db */
+ { 0X71, 0XCF, 0X54, 0X00 }, /* 5.0db */
+ { 0X78, 0X8D, 0XB4, 0X00 }, /* 5.5db */
+ { 0X7F, 0XB2, 0X61, 0X00 }, /* 6.0db */
+};
+#endif
diff --git a/include/sound/tas5825-tlv.h b/include/sound/tas5825-tlv.h
new file mode 100644
index 000000000000..95f2d3fad120
--- /dev/null
+++ b/include/sound/tas5825-tlv.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS5825 Audio Smart Amplifier
+//
+// Copyright (C) 2025 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS5825 hda driver implements for one or two TAS5825 chips.
+//
+// Author: Baojun Xu <baojun.xu@ti.com>
+//
+
+#ifndef __TAS5825_TLV_H__
+#define __TAS5825_TLV_H__
+
+#define TAS5825_DVC_LEVEL TASDEVICE_REG(0x0, 0x0, 0x4c)
+#define TAS5825_AMP_LEVEL TASDEVICE_REG(0x0, 0x0, 0x54)
+
+static const __maybe_unused DECLARE_TLV_DB_SCALE(
+ tas5825_dvc_tlv, -10300, 50, 0);
+static const __maybe_unused DECLARE_TLV_DB_SCALE(
+ tas5825_amp_tlv, -1550, 50, 0);
+
+#endif
diff --git a/include/sound/tlv320aic32x4.h b/include/sound/tlv320aic32x4.h
index 0abf74d7edbd..b779d671a995 100644
--- a/include/sound/tlv320aic32x4.h
+++ b/include/sound/tlv320aic32x4.h
@@ -40,13 +40,4 @@
struct aic32x4_setup_data {
unsigned int gpio_func[5];
};
-
-struct aic32x4_pdata {
- struct aic32x4_setup_data *setup;
- u32 power_cfg;
- u32 micpga_routing;
- bool swapdacs;
- int rstn_gpio;
-};
-
#endif
diff --git a/include/sound/tlv320dac33-plat.h b/include/sound/tlv320dac33-plat.h
deleted file mode 100644
index 7a7249a896e3..000000000000
--- a/include/sound/tlv320dac33-plat.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Platform header for Texas Instruments TLV320DAC33 codec driver
- *
- * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * Copyright: (C) 2009 Nokia Corporation
- */
-
-#ifndef __TLV320DAC33_PLAT_H
-#define __TLV320DAC33_PLAT_H
-
-struct tlv320dac33_platform_data {
- int power_gpio;
- int mode1_latency; /* latency caused by the i2c writes in us */
- int auto_fifo_config; /* FIFO config based on the period size */
- int keep_bclk; /* Keep the BCLK running in FIFO modes */
- u8 burst_bclkdiv;
-};
-
-#endif /* __TLV320DAC33_PLAT_H */
diff --git a/include/sound/tpa6130a2-plat.h b/include/sound/tpa6130a2-plat.h
deleted file mode 100644
index a60930e36e93..000000000000
--- a/include/sound/tpa6130a2-plat.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * TPA6130A2 driver platform header
- *
- * Copyright (C) Nokia Corporation
- *
- * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
- */
-
-#ifndef TPA6130A2_PLAT_H
-#define TPA6130A2_PLAT_H
-
-struct tpa6130a2_platform_data {
- int power_gpio;
-};
-
-#endif
diff --git a/include/sound/ump.h b/include/sound/ump.h
index 91238dabe307..73f97f88e2ed 100644
--- a/include/sound/ump.h
+++ b/include/sound/ump.h
@@ -13,6 +13,15 @@ struct snd_ump_ops;
struct ump_cvt_to_ump;
struct snd_seq_ump_ops;
+struct snd_ump_group {
+ int group; /* group index (0-based) */
+ unsigned int dir_bits; /* directions */
+ bool active; /* activeness */
+ bool valid; /* valid group (referred by blocks) */
+ bool is_midi1; /* belongs to a MIDI1 FB */
+ char name[64]; /* group name */
+};
+
struct snd_ump_endpoint {
struct snd_rawmidi core; /* raw UMP access */
@@ -41,6 +50,8 @@ struct snd_ump_endpoint {
struct mutex open_mutex;
+ struct snd_ump_group groups[SNDRV_UMP_MAX_GROUPS]; /* table of groups */
+
#if IS_ENABLED(CONFIG_SND_UMP_LEGACY_RAWMIDI)
spinlock_t legacy_locks[2];
struct snd_rawmidi *legacy_rmidi;
@@ -72,6 +83,7 @@ struct snd_ump_ops {
struct snd_seq_ump_ops {
void (*input_receive)(struct snd_ump_endpoint *ump,
const u32 *data, int words);
+ int (*notify_ep_change)(struct snd_ump_endpoint *ump);
int (*notify_fb_change)(struct snd_ump_endpoint *ump,
struct snd_ump_block *fb);
int (*switch_protocol)(struct snd_ump_endpoint *ump);
@@ -112,6 +124,7 @@ static inline int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump,
int snd_ump_receive_ump_val(struct snd_ump_endpoint *ump, u32 val);
int snd_ump_switch_protocol(struct snd_ump_endpoint *ump, unsigned int protocol);
+void snd_ump_update_group_attrs(struct snd_ump_endpoint *ump);
/*
* Some definitions for UMP
diff --git a/include/sound/ump_convert.h b/include/sound/ump_convert.h
index 28c364c63245..682499b871ea 100644
--- a/include/sound/ump_convert.h
+++ b/include/sound/ump_convert.h
@@ -13,12 +13,13 @@ struct ump_cvt_to_ump_bank {
unsigned char cc_nrpn_msb, cc_nrpn_lsb;
unsigned char cc_data_msb, cc_data_lsb;
unsigned char cc_bank_msb, cc_bank_lsb;
+ bool cc_data_msb_set, cc_data_lsb_set;
};
/* context for converting from MIDI1 byte stream to UMP packet */
struct ump_cvt_to_ump {
/* MIDI1 intermediate buffer */
- unsigned char buf[4];
+ unsigned char buf[6]; /* up to 6 bytes for SysEx */
int len;
int cmd_bytes;
diff --git a/include/sound/ump_msg.h b/include/sound/ump_msg.h
index 72f60ddfea75..9556b4755a1e 100644
--- a/include/sound/ump_msg.h
+++ b/include/sound/ump_msg.h
@@ -604,7 +604,7 @@ struct snd_ump_stream_msg_ep_info {
} __packed;
/* UMP Stream Message: Device Info Notification (128bit) */
-struct snd_ump_stream_msg_devince_info {
+struct snd_ump_stream_msg_device_info {
#ifdef __BIG_ENDIAN_BITFIELD
/* 0 */
u32 type:4;
@@ -754,7 +754,7 @@ struct snd_ump_stream_msg_fb_name {
union snd_ump_stream_msg {
struct snd_ump_stream_msg_ep_discovery ep_discovery;
struct snd_ump_stream_msg_ep_info ep_info;
- struct snd_ump_stream_msg_devince_info device_info;
+ struct snd_ump_stream_msg_device_info device_info;
struct snd_ump_stream_msg_stream_cfg stream_cfg;
struct snd_ump_stream_msg_fb_discovery fb_discovery;
struct snd_ump_stream_msg_fb_info fb_info;
diff --git a/include/sound/vx_core.h b/include/sound/vx_core.h
index 1ddd3036bdfc..ca87fa6a8135 100644
--- a/include/sound/vx_core.h
+++ b/include/sound/vx_core.h
@@ -155,7 +155,6 @@ struct vx_core {
unsigned int chip_status;
unsigned int pcm_running;
- struct device *dev;
struct snd_hwdep *hwdep;
struct vx_rmh irq_rmh; /* RMH used in interrupts */
diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h
index 88ac1870510e..8b2c16b524f7 100644
--- a/include/sound/wm8904.h
+++ b/include/sound/wm8904.h
@@ -151,6 +151,9 @@ struct wm8904_pdata {
int num_retune_mobile_cfgs;
struct wm8904_retune_mobile_cfg *retune_mobile_cfgs;
+ bool in1l_as_dmicdat1;
+ bool in1r_as_dmicdat2;
+
u32 gpio_cfg[WM8904_GPIO_REGS];
u32 mic_cfg[WM8904_MIC_REGS];
};
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 60af7c63b34e..51ca80abacf7 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -576,9 +576,6 @@ struct iscsit_conn {
spinlock_t state_lock;
spinlock_t login_timer_lock;
spinlock_t login_worker_lock;
- /* libcrypto RX and TX contexts for crc32c */
- struct ahash_request *conn_rx_hash;
- struct ahash_request *conn_tx_hash;
/* Used for scheduling TX and RX connection kthreads */
cpumask_var_t conn_cpumask;
cpumask_var_t allowed_cpumask;
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 739df993aa5e..e32de80854b6 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -3,7 +3,7 @@
#define TARGET_CORE_BACKEND_H
#include <linux/types.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <target/target_core_base.h>
#define TRANSPORT_FLAG_PASSTHROUGH 0x1
@@ -121,8 +121,10 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
-bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct block_device *bdev);
+bool target_configure_unmap_from_bdev(struct se_dev_attrib *attrib,
+ struct block_device *bdev);
+void target_configure_write_atomic_from_bdev(struct se_dev_attrib *attrib,
+ struct block_device *bdev);
static inline bool target_dev_configured(struct se_device *se_dev)
{
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 97099a5e3f6c..7016d93fa383 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -157,6 +157,8 @@ enum se_cmd_flags_table {
SCF_USE_CPUID = (1 << 16),
SCF_TASK_ATTR_SET = (1 << 17),
SCF_TREAT_READ_AS_NORMAL = (1 << 18),
+ SCF_TASK_ORDERED_SYNC = (1 << 19),
+ SCF_ATOMIC = (1 << 20),
};
/*
@@ -669,15 +671,19 @@ struct se_lun_acl {
struct se_ml_stat_grps ml_stat_grps;
};
+struct se_dev_entry_io_stats {
+ u64 total_cmds;
+ u64 read_bytes;
+ u64 write_bytes;
+};
+
struct se_dev_entry {
u64 mapped_lun;
u64 pr_res_key;
u64 creation_time;
bool lun_access_ro;
u32 attach_count;
- atomic_long_t total_cmds;
- atomic_long_t read_bytes;
- atomic_long_t write_bytes;
+ struct se_dev_entry_io_stats __percpu *stats;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
struct kref pr_kref;
struct completion pr_comp;
@@ -726,6 +732,11 @@ struct se_dev_attrib {
u32 unmap_granularity;
u32 unmap_granularity_alignment;
u32 max_write_same_len;
+ u32 atomic_max_len;
+ u32 atomic_alignment;
+ u32 atomic_granularity;
+ u32 atomic_max_with_boundary;
+ u32 atomic_max_boundary;
u8 submit_type;
struct se_device *da_dev;
struct config_group da_group;
@@ -739,9 +750,9 @@ struct se_port_stat_grps {
};
struct scsi_port_stats {
- atomic_long_t cmd_pdus;
- atomic_long_t tx_data_octets;
- atomic_long_t rx_data_octets;
+ u64 cmd_pdus;
+ u64 tx_data_octets;
+ u64 rx_data_octets;
};
struct se_lun {
@@ -768,7 +779,7 @@ struct se_lun {
spinlock_t lun_tg_pt_gp_lock;
struct se_portal_group *lun_tpg;
- struct scsi_port_stats lun_stats;
+ struct scsi_port_stats __percpu *lun_stats;
struct config_group lun_group;
struct se_port_stat_grps port_stat_grps;
struct completion lun_shutdown_comp;
@@ -800,6 +811,12 @@ struct se_device_queue {
struct se_cmd_queue sq;
};
+struct se_dev_io_stats {
+ u64 total_cmds;
+ u64 read_bytes;
+ u64 write_bytes;
+};
+
struct se_device {
/* Used for SAM Task Attribute ordering */
u32 dev_cur_ordered_id;
@@ -821,13 +838,10 @@ struct se_device {
atomic_long_t num_resets;
atomic_long_t aborts_complete;
atomic_long_t aborts_no_task;
- atomic_long_t num_cmds;
- atomic_long_t read_bytes;
- atomic_long_t write_bytes;
+ struct se_dev_io_stats __percpu *stats;
/* Active commands on this virtual SE device */
- atomic_t non_ordered;
+ struct percpu_ref non_ordered;
bool ordered_sync_in_progress;
- atomic_t delayed_cmd_count;
atomic_t dev_qf_count;
u32 export_count;
spinlock_t delayed_cmd_lock;
@@ -890,7 +904,7 @@ struct target_opcode_descriptor {
u8 specific_timeout;
u16 nominal_timeout;
u16 recommended_timeout;
- bool (*enabled)(struct target_opcode_descriptor *descr,
+ bool (*enabled)(const struct target_opcode_descriptor *descr,
struct se_cmd *cmd);
void (*update_usage_bits)(u8 *usage_bits,
struct se_device *dev);
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index a2ea11cc912e..9391d54d3f12 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -53,6 +53,20 @@ __bpf_trace_##call(void *__data, proto) \
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
__BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args))
+#define __BPF_DECLARE_TRACE_SYSCALL(call, proto, args) \
+static notrace void \
+__bpf_trace_##call(void *__data, proto) \
+{ \
+ might_fault(); \
+ preempt_disable_notrace(); \
+ CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(__data, CAST_TO_U64(args)); \
+ preempt_enable_notrace(); \
+}
+
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \
+ __BPF_DECLARE_TRACE_SYSCALL(call, PARAMS(proto), PARAMS(args))
+
/*
* This part is compiled out, it is only here as a build time check
* to make sure that if the tracepoint handling changes, the
@@ -105,14 +119,14 @@ static inline void bpf_test_buffer_##call(void) \
#undef DECLARE_TRACE
#define DECLARE_TRACE(call, proto, args) \
- __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \
- __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), 0)
+ __BPF_DECLARE_TRACE(call##_tp, PARAMS(proto), PARAMS(args)) \
+ __DEFINE_EVENT(call##_tp, call##_tp, PARAMS(proto), PARAMS(args), 0)
#undef DECLARE_TRACE_WRITABLE
#define DECLARE_TRACE_WRITABLE(call, proto, args, size) \
__CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \
- __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \
- __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), size)
+ __BPF_DECLARE_TRACE(call##_tp, PARAMS(proto), PARAMS(args)) \
+ __DEFINE_EVENT(call##_tp, call##_tp, PARAMS(proto), PARAMS(args), size)
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index 00723935dcc7..b2ba5a80583f 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -46,6 +46,10 @@
assign, print, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg, PARAMS(proto), PARAMS(args))
+#undef TRACE_EVENT_SYSCALL
+#define TRACE_EVENT_SYSCALL(name, proto, args, struct, assign, print, reg, unreg) \
+ DEFINE_TRACE_SYSCALL(name, reg, unreg, PARAMS(proto), PARAMS(args))
+
#undef TRACE_EVENT_NOP
#define TRACE_EVENT_NOP(name, proto, args, struct, assign, print)
@@ -70,8 +74,27 @@
#undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args) \
+ DEFINE_TRACE(name##_tp, PARAMS(proto), PARAMS(args))
+
+#undef DECLARE_TRACE_CONDITION
+#define DECLARE_TRACE_CONDITION(name, proto, args, cond) \
+ DEFINE_TRACE(name##_tp, PARAMS(proto), PARAMS(args))
+
+#undef DECLARE_TRACE_EVENT
+#define DECLARE_TRACE_EVENT(name, proto, args) \
+ DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
+
+#undef DECLARE_TRACE_EVENT_CONDITION
+#define DECLARE_TRACE_EVENT_CONDITION(name, proto, args, cond) \
DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
+/* If requested, create helpers for calling these tracepoints from Rust. */
+#ifdef CREATE_RUST_TRACE_POINTS
+#undef DEFINE_RUST_DO_TRACE
+#define DEFINE_RUST_DO_TRACE(name, proto, args) \
+ __DEFINE_RUST_DO_TRACE(name, PARAMS(proto), PARAMS(args))
+#endif
+
#undef TRACE_INCLUDE
#undef __TRACE_INCLUDE
@@ -97,6 +120,13 @@
/* Make all open coded DECLARE_TRACE nops */
#undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args)
+#undef DECLARE_TRACE_CONDITION
+#define DECLARE_TRACE_CONDITION(name, proto, args, cond)
+
+#undef DECLARE_TRACE_EVENT
+#define DECLARE_TRACE_EVENT(name, proto, args)
+#undef DECLARE_TRACE_EVENT_CONDITION
+#define DECLARE_TRACE_EVENT_CONDITION(name, proto, args, cond)
#ifdef TRACEPOINTS_ENABLED
#include <trace/trace_events.h>
@@ -107,6 +137,7 @@
#undef TRACE_EVENT
#undef TRACE_EVENT_FN
#undef TRACE_EVENT_FN_COND
+#undef TRACE_EVENT_SYSCALL
#undef TRACE_EVENT_CONDITION
#undef TRACE_EVENT_NOP
#undef DEFINE_EVENT_NOP
@@ -117,6 +148,9 @@
#undef DEFINE_EVENT_CONDITION
#undef TRACE_HEADER_MULTI_READ
#undef DECLARE_TRACE
+#undef DECLARE_TRACE_CONDITION
+#undef DECLARE_TRACE_EVENT
+#undef DECLARE_TRACE_EVENT_CONDITION
/* Only undef what we defined in this file */
#ifdef UNDEF_TRACE_INCLUDE_FILE
@@ -129,6 +163,11 @@
# undef UNDEF_TRACE_INCLUDE_PATH
#endif
+#ifdef CREATE_RUST_TRACE_POINTS
+# undef DEFINE_RUST_DO_TRACE
+# define DEFINE_RUST_DO_TRACE(name, proto, args)
+#endif
+
/* We may be processing more files */
#define CREATE_TRACE_POINTS
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 450c44c83a5d..1b3c48b5591d 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -69,6 +69,9 @@ enum afs_fs_operation {
yfs_FS_RemoveACL = 64171,
yfs_FS_RemoveFile2 = 64173,
yfs_FS_StoreOpaqueACL2 = 64174,
+ yfs_FS_Rename_Replace = 64176,
+ yfs_FS_Rename_NoReplace = 64177,
+ yfs_FS_Rename_Exchange = 64187,
yfs_FS_InlineBulkStatus = 64536, /* YFS Fetch multiple file statuses with errors */
yfs_FS_FetchData64 = 64537, /* YFS Fetch file data */
yfs_FS_StoreData64 = 64538, /* YFS Store file data */
@@ -118,6 +121,8 @@ enum yfs_cm_operation {
*/
#define afs_call_traces \
EM(afs_call_trace_alloc, "ALLOC") \
+ EM(afs_call_trace_async_abort, "ASYAB") \
+ EM(afs_call_trace_async_kill, "ASYKL") \
EM(afs_call_trace_free, "FREE ") \
EM(afs_call_trace_get, "GET ") \
EM(afs_call_trace_put, "PUT ") \
@@ -125,27 +130,29 @@ enum yfs_cm_operation {
E_(afs_call_trace_work, "QUEUE")
#define afs_server_traces \
- EM(afs_server_trace_alloc, "ALLOC ") \
EM(afs_server_trace_callback, "CALLBACK ") \
EM(afs_server_trace_destroy, "DESTROY ") \
EM(afs_server_trace_free, "FREE ") \
EM(afs_server_trace_gc, "GC ") \
- EM(afs_server_trace_get_by_addr, "GET addr ") \
- EM(afs_server_trace_get_by_uuid, "GET uuid ") \
- EM(afs_server_trace_get_caps, "GET caps ") \
- EM(afs_server_trace_get_install, "GET inst ") \
- EM(afs_server_trace_get_new_cbi, "GET cbi ") \
EM(afs_server_trace_get_probe, "GET probe") \
- EM(afs_server_trace_give_up_cb, "giveup-cb") \
EM(afs_server_trace_purging, "PURGE ") \
- EM(afs_server_trace_put_call, "PUT call ") \
EM(afs_server_trace_put_cbi, "PUT cbi ") \
- EM(afs_server_trace_put_find_rsq, "PUT f-rsq") \
EM(afs_server_trace_put_probe, "PUT probe") \
- EM(afs_server_trace_put_slist, "PUT slist") \
- EM(afs_server_trace_put_slist_isort, "PUT isort") \
- EM(afs_server_trace_put_uuid_rsq, "PUT u-req") \
- E_(afs_server_trace_update, "UPDATE")
+ EM(afs_server_trace_see_destroyer, "SEE destr") \
+ EM(afs_server_trace_see_expired, "SEE expd ") \
+ EM(afs_server_trace_see_purge, "SEE purge") \
+ EM(afs_server_trace_see_timer, "SEE timer") \
+ EM(afs_server_trace_unuse_call, "UNU call ") \
+ EM(afs_server_trace_unuse_create_fail, "UNU cfail") \
+ EM(afs_server_trace_unuse_slist, "UNU slist") \
+ EM(afs_server_trace_unuse_slist_isort, "UNU isort") \
+ EM(afs_server_trace_update, "UPDATE ") \
+ EM(afs_server_trace_use_by_uuid, "USE uuid ") \
+ EM(afs_server_trace_use_cm_call, "USE cm-cl") \
+ EM(afs_server_trace_use_get_caps, "USE gcaps") \
+ EM(afs_server_trace_use_give_up_cb, "USE gvupc") \
+ EM(afs_server_trace_use_install, "USE inst ") \
+ E_(afs_server_trace_wait_create, "WAIT crt ")
#define afs_volume_traces \
EM(afs_volume_trace_alloc, "ALLOC ") \
@@ -167,37 +174,48 @@ enum yfs_cm_operation {
#define afs_cell_traces \
EM(afs_cell_trace_alloc, "ALLOC ") \
+ EM(afs_cell_trace_destroy, "DESTROY ") \
EM(afs_cell_trace_free, "FREE ") \
- EM(afs_cell_trace_get_queue_dns, "GET q-dns ") \
- EM(afs_cell_trace_get_queue_manage, "GET q-mng ") \
- EM(afs_cell_trace_get_queue_new, "GET q-new ") \
+ EM(afs_cell_trace_get_atcell, "GET atcell") \
+ EM(afs_cell_trace_get_server, "GET server") \
EM(afs_cell_trace_get_vol, "GET vol ") \
- EM(afs_cell_trace_insert, "INSERT ") \
- EM(afs_cell_trace_manage, "MANAGE ") \
+ EM(afs_cell_trace_purge, "PURGE ") \
+ EM(afs_cell_trace_put_atcell, "PUT atcell") \
EM(afs_cell_trace_put_candidate, "PUT candid") \
- EM(afs_cell_trace_put_destroy, "PUT destry") \
- EM(afs_cell_trace_put_queue_work, "PUT q-work") \
- EM(afs_cell_trace_put_queue_fail, "PUT q-fail") \
+ EM(afs_cell_trace_put_final, "PUT final ") \
+ EM(afs_cell_trace_put_server, "PUT server") \
EM(afs_cell_trace_put_vol, "PUT vol ") \
+ EM(afs_cell_trace_queue_again, "QUE again ") \
+ EM(afs_cell_trace_queue_dns, "QUE dns ") \
+ EM(afs_cell_trace_queue_new, "QUE new ") \
+ EM(afs_cell_trace_queue_purge, "QUE purge ") \
+ EM(afs_cell_trace_manage, "MANAGE ") \
+ EM(afs_cell_trace_managed, "MANAGED ") \
EM(afs_cell_trace_see_source, "SEE source") \
- EM(afs_cell_trace_see_ws, "SEE ws ") \
+ EM(afs_cell_trace_see_mgmt_timer, "SEE mtimer") \
EM(afs_cell_trace_unuse_alias, "UNU alias ") \
EM(afs_cell_trace_unuse_check_alias, "UNU chk-al") \
EM(afs_cell_trace_unuse_delete, "UNU delete") \
+ EM(afs_cell_trace_unuse_dynroot_mntpt, "UNU dyn-mp") \
EM(afs_cell_trace_unuse_fc, "UNU fc ") \
- EM(afs_cell_trace_unuse_lookup, "UNU lookup") \
+ EM(afs_cell_trace_unuse_lookup_dynroot, "UNU lu-dyn") \
+ EM(afs_cell_trace_unuse_lookup_error, "UNU lu-err") \
EM(afs_cell_trace_unuse_mntpt, "UNU mntpt ") \
EM(afs_cell_trace_unuse_no_pin, "UNU no-pin") \
EM(afs_cell_trace_unuse_parse, "UNU parse ") \
EM(afs_cell_trace_unuse_pin, "UNU pin ") \
- EM(afs_cell_trace_unuse_probe, "UNU probe ") \
EM(afs_cell_trace_unuse_sbi, "UNU sbi ") \
EM(afs_cell_trace_unuse_ws, "UNU ws ") \
EM(afs_cell_trace_use_alias, "USE alias ") \
EM(afs_cell_trace_use_check_alias, "USE chk-al") \
EM(afs_cell_trace_use_fc, "USE fc ") \
EM(afs_cell_trace_use_fc_alias, "USE fc-al ") \
- EM(afs_cell_trace_use_lookup, "USE lookup") \
+ EM(afs_cell_trace_use_lookup_add, "USE lu-add") \
+ EM(afs_cell_trace_use_lookup_canonical, "USE lu-can") \
+ EM(afs_cell_trace_use_lookup_dynroot, "USE lu-dyn") \
+ EM(afs_cell_trace_use_lookup_mntpt, "USE lu-mpt") \
+ EM(afs_cell_trace_use_lookup_mount, "USE lu-mnt") \
+ EM(afs_cell_trace_use_lookup_ws, "USE lu-ws ") \
EM(afs_cell_trace_use_mntpt, "USE mntpt ") \
EM(afs_cell_trace_use_pin, "USE pin ") \
EM(afs_cell_trace_use_probe, "USE probe ") \
@@ -214,7 +232,7 @@ enum yfs_cm_operation {
EM(afs_alist_trace_put_getaddru, "PUT GtAdrU") \
EM(afs_alist_trace_put_parse_empty, "PUT p-empt") \
EM(afs_alist_trace_put_parse_error, "PUT p-err ") \
- EM(afs_alist_trace_put_server_dup, "PUT sv-dup") \
+ EM(afs_alist_trace_put_server_create, "PUT sv-crt") \
EM(afs_alist_trace_put_server_oom, "PUT sv-oom") \
EM(afs_alist_trace_put_server_update, "PUT sv-upd") \
EM(afs_alist_trace_put_vlgetcaps, "PUT vgtcap") \
@@ -285,6 +303,9 @@ enum yfs_cm_operation {
EM(yfs_FS_RemoveACL, "YFS.RemoveACL") \
EM(yfs_FS_RemoveFile2, "YFS.RemoveFile2") \
EM(yfs_FS_StoreOpaqueACL2, "YFS.StoreOpaqueACL2") \
+ EM(yfs_FS_Rename_Replace, "YFS.Rename_Replace") \
+ EM(yfs_FS_Rename_NoReplace, "YFS.Rename_NoReplace") \
+ EM(yfs_FS_Rename_Exchange, "YFS.Rename_Exchange") \
EM(yfs_FS_InlineBulkStatus, "YFS.InlineBulkStatus") \
EM(yfs_FS_FetchData64, "YFS.FetchData64") \
EM(yfs_FS_StoreData64, "YFS.StoreData64") \
@@ -323,6 +344,44 @@ enum yfs_cm_operation {
EM(yfs_CB_TellMeAboutYourself, "YFSCB.TellMeAboutYourself") \
E_(yfs_CB_CallBack, "YFSCB.CallBack")
+#define afs_cb_promise_traces \
+ EM(afs_cb_promise_clear_cb_break, "CLEAR cb-break") \
+ EM(afs_cb_promise_clear_rmdir, "CLEAR rmdir") \
+ EM(afs_cb_promise_clear_rotate_server, "CLEAR rot-srv") \
+ EM(afs_cb_promise_clear_server_change, "CLEAR srv-chg") \
+ EM(afs_cb_promise_clear_vol_init_cb, "CLEAR vol-init-cb") \
+ EM(afs_cb_promise_set_apply_cb, "SET apply-cb") \
+ EM(afs_cb_promise_set_new_inode, "SET new-inode") \
+ E_(afs_cb_promise_set_new_symlink, "SET new-symlink")
+
+#define afs_vnode_invalid_traces \
+ EM(afs_vnode_invalid_trace_cb_ro_snapshot, "cb-ro-snapshot") \
+ EM(afs_vnode_invalid_trace_cb_scrub, "cb-scrub") \
+ EM(afs_vnode_invalid_trace_cb_v_break, "cb-v-break") \
+ EM(afs_vnode_invalid_trace_expired, "expired") \
+ EM(afs_vnode_invalid_trace_no_cb_promise, "no-cb-promise") \
+ EM(afs_vnode_invalid_trace_vol_expired, "vol-expired") \
+ EM(afs_vnode_invalid_trace_zap_data, "zap-data") \
+ E_(afs_vnode_valid_trace, "valid")
+
+#define afs_dir_invalid_traces \
+ EM(afs_dir_invalid_edit_add_bad_size, "edit-add-bad-size") \
+ EM(afs_dir_invalid_edit_add_no_slots, "edit-add-no-slots") \
+ EM(afs_dir_invalid_edit_add_too_many_blocks, "edit-add-too-many-blocks") \
+ EM(afs_dir_invalid_edit_get_block, "edit-get-block") \
+ EM(afs_dir_invalid_edit_mkdir, "edit-mkdir") \
+ EM(afs_dir_invalid_edit_rem_bad_size, "edit-rem-bad-size") \
+ EM(afs_dir_invalid_edit_rem_wrong_name, "edit-rem-wrong_name") \
+ EM(afs_dir_invalid_edit_upd_bad_size, "edit-upd-bad-size") \
+ EM(afs_dir_invalid_edit_upd_no_dd, "edit-upd-no-dotdot") \
+ EM(afs_dir_invalid_dv_mismatch, "dv-mismatch") \
+ EM(afs_dir_invalid_inval_folio, "inv-folio") \
+ EM(afs_dir_invalid_iter_stale, "iter-stale") \
+ EM(afs_dir_invalid_reclaimed_folio, "reclaimed-folio") \
+ EM(afs_dir_invalid_release_folio, "rel-folio") \
+ EM(afs_dir_invalid_remote, "remote") \
+ E_(afs_dir_invalid_subdir_removed, "subdir-removed")
+
#define afs_edit_dir_ops \
EM(afs_edit_dir_create, "create") \
EM(afs_edit_dir_create_error, "c_fail") \
@@ -331,7 +390,12 @@ enum yfs_cm_operation {
EM(afs_edit_dir_delete, "delete") \
EM(afs_edit_dir_delete_error, "d_err ") \
EM(afs_edit_dir_delete_inval, "d_invl") \
- E_(afs_edit_dir_delete_noent, "d_nent")
+ EM(afs_edit_dir_delete_noent, "d_nent") \
+ EM(afs_edit_dir_mkdir, "mk_ent") \
+ EM(afs_edit_dir_update_dd, "u_ddot") \
+ EM(afs_edit_dir_update_error, "u_fail") \
+ EM(afs_edit_dir_update_inval, "u_invl") \
+ E_(afs_edit_dir_update_nodd, "u_nodd")
#define afs_edit_dir_reasons \
EM(afs_edit_dir_for_create, "Create") \
@@ -340,6 +404,7 @@ enum yfs_cm_operation {
EM(afs_edit_dir_for_rename_0, "Renam0") \
EM(afs_edit_dir_for_rename_1, "Renam1") \
EM(afs_edit_dir_for_rename_2, "Renam2") \
+ EM(afs_edit_dir_for_rename_sub, "RnmSub") \
EM(afs_edit_dir_for_rmdir, "RmDir ") \
EM(afs_edit_dir_for_silly_0, "S_Ren0") \
EM(afs_edit_dir_for_silly_1, "S_Ren1") \
@@ -380,6 +445,7 @@ enum yfs_cm_operation {
EM(afs_file_error_dir_over_end, "DIR_ENT_OVER_END") \
EM(afs_file_error_dir_small, "DIR_SMALL") \
EM(afs_file_error_dir_unmarked_ext, "DIR_UNMARKED_EXT") \
+ EM(afs_file_error_symlink_big, "SYM_BIG") \
EM(afs_file_error_mntpt, "MNTPT_READ_FAILED") \
E_(afs_file_error_writeback_fail, "WRITEBACK_FAILED")
@@ -482,7 +548,9 @@ enum yfs_cm_operation {
enum afs_alist_trace { afs_alist_traces } __mode(byte);
enum afs_call_trace { afs_call_traces } __mode(byte);
enum afs_cb_break_reason { afs_cb_break_reasons } __mode(byte);
+enum afs_cb_promise_trace { afs_cb_promise_traces } __mode(byte);
enum afs_cell_trace { afs_cell_traces } __mode(byte);
+enum afs_dir_invalid_trace { afs_dir_invalid_traces} __mode(byte);
enum afs_edit_dir_op { afs_edit_dir_ops } __mode(byte);
enum afs_edit_dir_reason { afs_edit_dir_reasons } __mode(byte);
enum afs_eproto_cause { afs_eproto_causes } __mode(byte);
@@ -493,6 +561,7 @@ enum afs_flock_operation { afs_flock_operations } __mode(byte);
enum afs_io_error { afs_io_errors } __mode(byte);
enum afs_rotate_trace { afs_rotate_traces } __mode(byte);
enum afs_server_trace { afs_server_traces } __mode(byte);
+enum afs_vnode_invalid_trace { afs_vnode_invalid_traces} __mode(byte);
enum afs_volume_trace { afs_volume_traces } __mode(byte);
#endif /* end __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY */
@@ -508,8 +577,10 @@ enum afs_volume_trace { afs_volume_traces } __mode(byte);
afs_alist_traces;
afs_call_traces;
afs_cb_break_reasons;
+afs_cb_promise_traces;
afs_cell_traces;
afs_cm_operations;
+afs_dir_invalid_traces;
afs_edit_dir_ops;
afs_edit_dir_reasons;
afs_eproto_causes;
@@ -521,6 +592,7 @@ afs_fs_operations;
afs_io_errors;
afs_rotate_traces;
afs_server_traces;
+afs_vnode_invalid_traces;
afs_vl_operations;
yfs_cm_operations;
@@ -597,19 +669,26 @@ TRACE_EVENT(afs_cb_call,
__field(unsigned int, call)
__field(u32, op)
__field(u16, service_id)
+ __field(u8, security_ix)
+ __field(u32, enctype)
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->op = call->operation_ID;
__entry->service_id = call->service_id;
+ __entry->security_ix = call->security_ix;
+ __entry->enctype = call->enctype;
),
- TP_printk("c=%08x %s",
+ TP_printk("c=%08x %s sv=%u sx=%u en=%u",
__entry->call,
__entry->service_id == 2501 ?
__print_symbolic(__entry->op, yfs_cm_operations) :
- __print_symbolic(__entry->op, afs_cm_operations))
+ __print_symbolic(__entry->op, afs_cm_operations),
+ __entry->service_id,
+ __entry->security_ix,
+ __entry->enctype)
);
TRACE_EVENT(afs_call,
@@ -665,7 +744,7 @@ TRACE_EVENT(afs_make_fs_call,
}
),
- TP_printk("c=%08x %06llx:%06llx:%06x %s",
+ TP_printk("c=%08x V=%llx i=%llx:%x %s",
__entry->call,
__entry->fid.vid,
__entry->fid.vnode,
@@ -699,7 +778,7 @@ TRACE_EVENT(afs_make_fs_calli,
}
),
- TP_printk("c=%08x %06llx:%06llx:%06x %s i=%u",
+ TP_printk("c=%08x V=%llx i=%llx:%x %s i=%u",
__entry->call,
__entry->fid.vid,
__entry->fid.vnode,
@@ -736,7 +815,7 @@ TRACE_EVENT(afs_make_fs_call1,
__entry->name[__len] = 0;
),
- TP_printk("c=%08x %06llx:%06llx:%06x %s \"%s\"",
+ TP_printk("c=%08x V=%llx i=%llx:%x %s \"%s\"",
__entry->call,
__entry->fid.vid,
__entry->fid.vnode,
@@ -777,7 +856,7 @@ TRACE_EVENT(afs_make_fs_call2,
__entry->name2[__len2] = 0;
),
- TP_printk("c=%08x %06llx:%06llx:%06x %s \"%s\" \"%s\"",
+ TP_printk("c=%08x V=%llx i=%llx:%x %s \"%s\" \"%s\"",
__entry->call,
__entry->fid.vid,
__entry->fid.vnode,
@@ -882,9 +961,9 @@ TRACE_EVENT(afs_sent_data,
);
TRACE_EVENT(afs_dir_check_failed,
- TP_PROTO(struct afs_vnode *vnode, loff_t off, loff_t i_size),
+ TP_PROTO(struct afs_vnode *vnode, loff_t off),
- TP_ARGS(vnode, off, i_size),
+ TP_ARGS(vnode, off),
TP_STRUCT__entry(
__field(struct afs_vnode *, vnode)
@@ -895,7 +974,7 @@ TRACE_EVENT(afs_dir_check_failed,
TP_fast_assign(
__entry->vnode = vnode;
__entry->off = off;
- __entry->i_size = i_size;
+ __entry->i_size = i_size_read(&vnode->netfs.inode);
),
TP_printk("vn=%p %llx/%llx",
@@ -997,7 +1076,7 @@ TRACE_EVENT(afs_edit_dir,
__entry->name[__len] = 0;
),
- TP_printk("d=%x:%x %s %s %u[%u] f=%x:%x \"%s\"",
+ TP_printk("di=%x:%x %s %s %u[%u] fi=%x:%x \"%s\"",
__entry->vnode, __entry->unique,
__print_symbolic(__entry->why, afs_edit_dir_reasons),
__print_symbolic(__entry->op, afs_edit_dir_ops),
@@ -1006,6 +1085,122 @@ TRACE_EVENT(afs_edit_dir,
__entry->name)
);
+TRACE_EVENT(afs_dir_invalid,
+ TP_PROTO(const struct afs_vnode *dvnode, enum afs_dir_invalid_trace trace),
+
+ TP_ARGS(dvnode, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(enum afs_dir_invalid_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = dvnode->fid.vnode;
+ __entry->unique = dvnode->fid.unique;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("di=%x:%x %s",
+ __entry->vnode, __entry->unique,
+ __print_symbolic(__entry->trace, afs_dir_invalid_traces))
+ );
+
+TRACE_EVENT(afs_cb_promise,
+ TP_PROTO(const struct afs_vnode *vnode, enum afs_cb_promise_trace trace),
+
+ TP_ARGS(vnode, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(enum afs_cb_promise_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = vnode->fid.vnode;
+ __entry->unique = vnode->fid.unique;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("di=%x:%x %s",
+ __entry->vnode, __entry->unique,
+ __print_symbolic(__entry->trace, afs_cb_promise_traces))
+ );
+
+TRACE_EVENT(afs_vnode_invalid,
+ TP_PROTO(const struct afs_vnode *vnode, enum afs_vnode_invalid_trace trace),
+
+ TP_ARGS(vnode, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(enum afs_vnode_invalid_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = vnode->fid.vnode;
+ __entry->unique = vnode->fid.unique;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("di=%x:%x %s",
+ __entry->vnode, __entry->unique,
+ __print_symbolic(__entry->trace, afs_vnode_invalid_traces))
+ );
+
+TRACE_EVENT(afs_set_dv,
+ TP_PROTO(const struct afs_vnode *dvnode, u64 new_dv),
+
+ TP_ARGS(dvnode, new_dv),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(u64, old_dv)
+ __field(u64, new_dv)
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = dvnode->fid.vnode;
+ __entry->unique = dvnode->fid.unique;
+ __entry->old_dv = dvnode->status.data_version;
+ __entry->new_dv = new_dv;
+ ),
+
+ TP_printk("di=%x:%x dv=%llx -> dv=%llx",
+ __entry->vnode, __entry->unique,
+ __entry->old_dv, __entry->new_dv)
+ );
+
+TRACE_EVENT(afs_dv_mismatch,
+ TP_PROTO(const struct afs_vnode *dvnode, u64 before_dv, int delta, u64 new_dv),
+
+ TP_ARGS(dvnode, before_dv, delta, new_dv),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(int, delta)
+ __field(u64, before_dv)
+ __field(u64, new_dv)
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = dvnode->fid.vnode;
+ __entry->unique = dvnode->fid.unique;
+ __entry->delta = delta;
+ __entry->before_dv = before_dv;
+ __entry->new_dv = new_dv;
+ ),
+
+ TP_printk("di=%x:%x xdv=%llx+%d dv=%llx",
+ __entry->vnode, __entry->unique,
+ __entry->before_dv, __entry->delta, __entry->new_dv)
+ );
+
TRACE_EVENT(afs_protocol_error,
TP_PROTO(struct afs_call *call, enum afs_eproto_cause cause),
@@ -1097,7 +1292,7 @@ TRACE_EVENT(afs_bulkstat_error,
);
TRACE_EVENT(afs_cm_no_server,
- TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx),
+ TP_PROTO(struct afs_call *call, const struct sockaddr_rxrpc *srx),
TP_ARGS(call, srx),
@@ -1356,7 +1551,7 @@ TRACE_EVENT(afs_server,
__entry->reason = reason;
),
- TP_printk("s=%08x %s u=%d a=%d",
+ TP_printk("s=%08x %s r=%d a=%d",
__entry->server,
__print_symbolic(__entry->reason, afs_server_traces),
__entry->ref,
@@ -1364,25 +1559,29 @@ TRACE_EVENT(afs_server,
);
TRACE_EVENT(afs_volume,
- TP_PROTO(afs_volid_t vid, int ref, enum afs_volume_trace reason),
+ TP_PROTO(unsigned int debug_id, afs_volid_t vid, int ref,
+ enum afs_volume_trace reason),
- TP_ARGS(vid, ref, reason),
+ TP_ARGS(debug_id, vid, ref, reason),
TP_STRUCT__entry(
+ __field(unsigned int, debug_id)
__field(afs_volid_t, vid)
__field(int, ref)
__field(enum afs_volume_trace, reason)
),
TP_fast_assign(
- __entry->vid = vid;
- __entry->ref = ref;
- __entry->reason = reason;
+ __entry->debug_id = debug_id;
+ __entry->vid = vid;
+ __entry->ref = ref;
+ __entry->reason = reason;
),
- TP_printk("V=%llx %s ur=%d",
- __entry->vid,
+ TP_printk("V=%08x %s vid=%llx r=%d",
+ __entry->debug_id,
__print_symbolic(__entry->reason, afs_volume_traces),
+ __entry->vid,
__entry->ref)
);
@@ -1606,6 +1805,36 @@ TRACE_EVENT(afs_make_call,
__entry->fid.unique)
);
+TRACE_EVENT(afs_read_recv,
+ TP_PROTO(const struct afs_operation *op, const struct afs_call *call),
+
+ TP_ARGS(op, call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq)
+ __field(unsigned int, sreq)
+ __field(unsigned int, op)
+ __field(unsigned int, op_flags)
+ __field(unsigned int, call)
+ __field(enum afs_call_state, call_state)
+ ),
+
+ TP_fast_assign(
+ __entry->op = op->debug_id;
+ __entry->sreq = op->fetch.subreq->debug_index;
+ __entry->rreq = op->fetch.subreq->rreq->debug_id;
+ __entry->op_flags = op->flags;
+ __entry->call = call->debug_id;
+ __entry->call_state = call->state;
+ ),
+
+ TP_printk("R=%08x[%x] OP=%08x c=%08x cs=%x of=%x",
+ __entry->rreq, __entry->sreq,
+ __entry->op,
+ __entry->call, __entry->call_state,
+ __entry->op_flags)
+ );
+
#endif /* _TRACE_AFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/alarmtimer.h b/include/trace/events/alarmtimer.h
index 13483c7ca70b..8e9c76a7f21b 100644
--- a/include/trace/events/alarmtimer.h
+++ b/include/trace/events/alarmtimer.h
@@ -20,6 +20,7 @@ TRACE_DEFINE_ENUM(ALARM_BOOTTIME_FREEZER);
{ 1 << ALARM_REALTIME_FREEZER, "REALTIME Freezer" }, \
{ 1 << ALARM_BOOTTIME_FREEZER, "BOOTTIME Freezer" })
+#ifdef CONFIG_RTC_CLASS
TRACE_EVENT(alarmtimer_suspend,
TP_PROTO(ktime_t expires, int flag),
@@ -41,6 +42,7 @@ TRACE_EVENT(alarmtimer_suspend,
__entry->expires
)
);
+#endif /* CONFIG_RTC_CLASS */
DECLARE_EVENT_CLASS(alarm_class,
diff --git a/include/trace/events/amdxdna.h b/include/trace/events/amdxdna.h
new file mode 100644
index 000000000000..c6cb2da7b706
--- /dev/null
+++ b/include/trace/events/amdxdna.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM amdxdna
+
+#if !defined(_TRACE_AMDXDNA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_AMDXDNA_H
+
+#include <drm/gpu_scheduler.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(amdxdna_debug_point,
+ TP_PROTO(const char *name, u64 number, const char *str),
+
+ TP_ARGS(name, number, str),
+
+ TP_STRUCT__entry(__string(name, name)
+ __field(u64, number)
+ __string(str, str)),
+
+ TP_fast_assign(__assign_str(name);
+ __entry->number = number;
+ __assign_str(str);),
+
+ TP_printk("%s:%llu %s", __get_str(name), __entry->number,
+ __get_str(str))
+);
+
+TRACE_EVENT(xdna_job,
+ TP_PROTO(struct drm_sched_job *sched_job, const char *name, const char *str, u64 seq),
+
+ TP_ARGS(sched_job, name, str, seq),
+
+ TP_STRUCT__entry(__string(name, name)
+ __string(str, str)
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
+ __field(u64, seq)),
+
+ TP_fast_assign(__assign_str(name);
+ __assign_str(str);
+ __entry->fence_context = sched_job->s_fence->finished.context;
+ __entry->fence_seqno = sched_job->s_fence->finished.seqno;
+ __entry->seq = seq;),
+
+ TP_printk("fence=(context:%llu, seqno:%lld), %s seq#:%lld %s",
+ __entry->fence_context, __entry->fence_seqno,
+ __get_str(name), __entry->seq,
+ __get_str(str))
+);
+
+DECLARE_EVENT_CLASS(xdna_mbox_msg,
+ TP_PROTO(char *name, u8 chann_id, u32 opcode, u32 msg_id),
+
+ TP_ARGS(name, chann_id, opcode, msg_id),
+
+ TP_STRUCT__entry(__string(name, name)
+ __field(u32, chann_id)
+ __field(u32, opcode)
+ __field(u32, msg_id)),
+
+ TP_fast_assign(__assign_str(name);
+ __entry->chann_id = chann_id;
+ __entry->opcode = opcode;
+ __entry->msg_id = msg_id;),
+
+ TP_printk("%s.%d id 0x%x opcode 0x%x", __get_str(name),
+ __entry->chann_id, __entry->msg_id, __entry->opcode)
+);
+
+DEFINE_EVENT(xdna_mbox_msg, mbox_set_tail,
+ TP_PROTO(char *name, u8 chann_id, u32 opcode, u32 id),
+ TP_ARGS(name, chann_id, opcode, id)
+);
+
+DEFINE_EVENT(xdna_mbox_msg, mbox_set_head,
+ TP_PROTO(char *name, u8 chann_id, u32 opcode, u32 id),
+ TP_ARGS(name, chann_id, opcode, id)
+);
+
+TRACE_EVENT(mbox_irq_handle,
+ TP_PROTO(char *name, int irq),
+
+ TP_ARGS(name, irq),
+
+ TP_STRUCT__entry(__string(name, name)
+ __field(int, irq)),
+
+ TP_fast_assign(__assign_str(name);
+ __entry->irq = irq;),
+
+ TP_printk("%s.%d", __get_str(name), __entry->irq)
+);
+
+#endif /* !defined(_TRACE_AMDXDNA_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 202fc3680c36..4a645549164e 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -8,6 +8,7 @@
#include <linux/ktime.h>
#include <linux/tracepoint.h>
#include <sound/jack.h>
+#include <sound/pcm.h>
#define DAPM_DIRECT "(direct)"
#define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
@@ -26,8 +27,8 @@ DECLARE_EVENT_CLASS(snd_soc_dapm,
TP_ARGS(dapm, val),
TP_STRUCT__entry(
- __string( card_name, dapm->card->name)
- __string( comp_name, dapm->component ? dapm->component->name : "(none)")
+ __string( card_name, snd_soc_dapm_to_card(dapm)->name)
+ __string( comp_name, snd_soc_dapm_to_component(dapm) ? snd_soc_dapm_to_component(dapm)->name : "(none)")
__field( int, val)
),
@@ -212,7 +213,7 @@ TRACE_EVENT(snd_soc_dapm_connected,
),
TP_printk("%s: found %d paths",
- __entry->stream ? "capture" : "playback", __entry->paths)
+ snd_pcm_direction_name(__entry->stream), __entry->paths)
);
TRACE_EVENT(snd_soc_jack_irq,
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 0e128ad51460..6aa79e2d799c 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -9,8 +9,16 @@
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/tracepoint.h>
+#include <uapi/linux/ioprio.h>
-#define RWBS_LEN 8
+#define RWBS_LEN 10
+
+#define IOPRIO_CLASS_STRINGS \
+ { IOPRIO_CLASS_NONE, "none" }, \
+ { IOPRIO_CLASS_RT, "rt" }, \
+ { IOPRIO_CLASS_BE, "be" }, \
+ { IOPRIO_CLASS_IDLE, "idle" }, \
+ { IOPRIO_CLASS_INVALID, "invalid"}
#ifdef CONFIG_BUFFER_HEAD
DECLARE_EVENT_CLASS(block_buffer,
@@ -82,6 +90,7 @@ TRACE_EVENT(block_rq_requeue,
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
+ __field( unsigned short, ioprio )
__array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, 1 )
),
@@ -90,16 +99,20 @@ TRACE_EVENT(block_rq_requeue,
__entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
+ __entry->ioprio = req_get_ioprio(rq);
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
),
- TP_printk("%d,%d %s (%s) %llu + %u [%d]",
+ TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __get_str(cmd),
- (unsigned long long)__entry->sector,
- __entry->nr_sector, 0)
+ (unsigned long long)__entry->sector, __entry->nr_sector,
+ __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio),
+ IOPRIO_CLASS_STRINGS),
+ IOPRIO_PRIO_HINT(__entry->ioprio),
+ IOPRIO_PRIO_LEVEL(__entry->ioprio), 0)
);
DECLARE_EVENT_CLASS(block_rq_completion,
@@ -113,6 +126,7 @@ DECLARE_EVENT_CLASS(block_rq_completion,
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( int , error )
+ __field( unsigned short, ioprio )
__array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, 1 )
),
@@ -122,16 +136,20 @@ DECLARE_EVENT_CLASS(block_rq_completion,
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = nr_bytes >> 9;
__entry->error = blk_status_to_errno(error);
+ __entry->ioprio = req_get_ioprio(rq);
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
),
- TP_printk("%d,%d %s (%s) %llu + %u [%d]",
+ TP_printk("%d,%d %s (%s) %llu + %u %s,%u,%u [%d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __get_str(cmd),
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->error)
+ (unsigned long long)__entry->sector, __entry->nr_sector,
+ __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio),
+ IOPRIO_CLASS_STRINGS),
+ IOPRIO_PRIO_HINT(__entry->ioprio),
+ IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->error)
);
/**
@@ -180,6 +198,7 @@ DECLARE_EVENT_CLASS(block_rq,
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( unsigned int, bytes )
+ __field( unsigned short, ioprio )
__array( char, rwbs, RWBS_LEN )
__array( char, comm, TASK_COMM_LEN )
__dynamic_array( char, cmd, 1 )
@@ -190,17 +209,21 @@ DECLARE_EVENT_CLASS(block_rq,
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
__entry->bytes = blk_rq_bytes(rq);
+ __entry->ioprio = req_get_ioprio(rq);
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
__get_str(cmd)[0] = '\0';
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
- TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
+ TP_printk("%d,%d %s %u (%s) %llu + %u %s,%u,%u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __entry->bytes, __get_str(cmd),
- (unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->comm)
+ (unsigned long long)__entry->sector, __entry->nr_sector,
+ __print_symbolic(IOPRIO_PRIO_CLASS(__entry->ioprio),
+ IOPRIO_CLASS_STRINGS),
+ IOPRIO_PRIO_HINT(__entry->ioprio),
+ IOPRIO_PRIO_LEVEL(__entry->ioprio), __entry->comm)
);
/**
@@ -338,21 +361,6 @@ DECLARE_EVENT_CLASS(block_bio,
);
/**
- * block_bio_bounce - used bounce buffer when processing block operation
- * @bio: block operation
- *
- * A bounce buffer was used to handle the block operation @bio in @q.
- * This occurs when hardware limitations prevent a direct transfer of
- * data between the @bio data memory area and the IO device. Use of a
- * bounce buffer requires extra copying of data and decreases
- * performance.
- */
-DEFINE_EVENT(block_bio, block_bio_bounce,
- TP_PROTO(struct bio *bio),
- TP_ARGS(bio)
-);
-
-/**
* block_bio_backmerge - merging block operation to the end of an existing operation
* @bio: new block operation to merge
*
@@ -397,6 +405,17 @@ DEFINE_EVENT(block_bio, block_getrq,
);
/**
+ * blk_zone_append_update_request_bio - update bio sector after zone append
+ * @rq: the completed request that sets the bio sector
+ *
+ * Update the bio's bi_sector after a zone append command has been completed.
+ */
+DEFINE_EVENT(block_rq, blk_zone_append_update_request_bio,
+ TP_PROTO(struct request *rq),
+ TP_ARGS(rq)
+);
+
+/**
* block_plug - keep operations requests in request queue
* @q: request queue to plug
*
@@ -580,6 +599,84 @@ TRACE_EVENT(block_rq_remap,
(unsigned long long)__entry->old_sector, __entry->nr_bios)
);
+/**
+ * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
+ * @bio: The block IO operation sent down to the device
+ * @nr_sectors: The number of sectors affected by this operation
+ *
+ * Execute a zone management operation on a specified range of zones. This
+ * range is encoded in %nr_sectors, which has to be a multiple of the zone
+ * size.
+ */
+TRACE_EVENT(blkdev_zone_mgmt,
+
+ TP_PROTO(struct bio *bio, sector_t nr_sectors),
+
+ TP_ARGS(bio, nr_sectors),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( sector_t, nr_sectors )
+ __array( char, rwbs, RWBS_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio_dev(bio);
+ __entry->sector = bio->bi_iter.bi_sector;
+ __entry->nr_sectors = bio_sectors(bio);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
+ ),
+
+ TP_printk("%d,%d %s %llu + %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sectors)
+);
+
+DECLARE_EVENT_CLASS(block_zwplug,
+
+ TP_PROTO(struct request_queue *q, unsigned int zno, sector_t sector,
+ unsigned int nr_sectors),
+
+ TP_ARGS(q, zno, sector, nr_sectors),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( unsigned int, zno )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sectors )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = disk_devt(q->disk);
+ __entry->zno = zno;
+ __entry->sector = sector;
+ __entry->nr_sectors = nr_sectors;
+ ),
+
+ TP_printk("%d,%d zone %u, BIO %llu + %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->zno,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sectors)
+);
+
+DEFINE_EVENT(block_zwplug, disk_zone_wplug_add_bio,
+
+ TP_PROTO(struct request_queue *q, unsigned int zno, sector_t sector,
+ unsigned int nr_sectors),
+
+ TP_ARGS(q, zno, sector, nr_sectors)
+);
+
+DEFINE_EVENT(block_zwplug, blk_zone_wplug_bio,
+
+ TP_PROTO(struct request_queue *q, unsigned int zno, sector_t sector,
+ unsigned int nr_sectors),
+
+ TP_ARGS(q, zno, sector, nr_sectors)
+);
+
#endif /* _TRACE_BLOCK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index fadf406b5260..7e418f065b94 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -100,7 +100,8 @@ struct find_free_extent_ctl;
EM( ALLOC_CHUNK, "ALLOC_CHUNK") \
EM( ALLOC_CHUNK_FORCE, "ALLOC_CHUNK_FORCE") \
EM( RUN_DELAYED_IPUTS, "RUN_DELAYED_IPUTS") \
- EMe(COMMIT_TRANS, "COMMIT_TRANS")
+ EM( COMMIT_TRANS, "COMMIT_TRANS") \
+ EMe(RESET_ZONES, "RESET_ZONES")
/*
* First define the enums in the above macros to be exported to userspace via
@@ -142,9 +143,9 @@ FLUSH_STATES
#define EXTENT_FLAGS \
{ EXTENT_DIRTY, "DIRTY"}, \
- { EXTENT_UPTODATE, "UPTODATE"}, \
{ EXTENT_LOCKED, "LOCKED"}, \
- { EXTENT_NEW, "NEW"}, \
+ { EXTENT_DIRTY_LOG1, "DIRTY_LOG1"}, \
+ { EXTENT_DIRTY_LOG2, "DIRTY_LOG2"}, \
{ EXTENT_DELALLOC, "DELALLOC"}, \
{ EXTENT_DEFRAG, "DEFRAG"}, \
{ EXTENT_BOUNDARY, "BOUNDARY"}, \
@@ -223,8 +224,7 @@ DECLARE_EVENT_CLASS(btrfs__inode,
__entry->generation = BTRFS_I(inode)->generation;
__entry->last_trans = BTRFS_I(inode)->last_trans;
__entry->logged_trans = BTRFS_I(inode)->logged_trans;
- __entry->root_objectid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root);
),
TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%llu blocks=%llu "
@@ -291,35 +291,24 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__field( u64, ino )
__field( u64, start )
__field( u64, len )
- __field( u64, orig_start )
- __field( u64, block_start )
- __field( u64, block_len )
__field( u32, flags )
__field( int, refs )
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(root);
__entry->ino = btrfs_ino(inode);
__entry->start = map->start;
__entry->len = map->len;
- __entry->orig_start = map->orig_start;
- __entry->block_start = map->block_start;
- __entry->block_len = map->block_len;
__entry->flags = map->flags;
__entry->refs = refcount_read(&map->refs);
),
- TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu "
- "orig_start=%llu block_start=%llu(%s) "
- "block_len=%llu flags=%s refs=%u",
+ TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu flags=%s refs=%u",
show_root_type(__entry->root_objectid),
__entry->ino,
__entry->start,
__entry->len,
- __entry->orig_start,
- show_map_type(__entry->block_start),
- __entry->block_len,
show_map_flags(__entry->flags),
__entry->refs)
);
@@ -386,7 +375,7 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular,
),
TP_fast_assign_btrfs(bi->root->fs_info,
- __entry->root_obj = bi->root->root_key.objectid;
+ __entry->root_obj = btrfs_root_id(bi->root);
__entry->ino = btrfs_ino(bi);
__entry->isize = bi->vfs_inode.i_size;
__entry->disk_isize = bi->disk_i_size;
@@ -437,7 +426,7 @@ DECLARE_EVENT_CLASS(
TP_fast_assign_btrfs(
bi->root->fs_info,
- __entry->root_obj = bi->root->root_key.objectid;
+ __entry->root_obj = btrfs_root_id(bi->root);
__entry->ino = btrfs_ino(bi);
__entry->isize = bi->vfs_inode.i_size;
__entry->disk_isize = bi->disk_i_size;
@@ -537,7 +526,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
__entry->flags = ordered->flags;
__entry->compress_type = ordered->compress_type;
__entry->refs = refcount_read(&ordered->refs);
- __entry->root_objectid = inode->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(inode->root);
__entry->truncated_len = ordered->truncated_len;
),
@@ -674,7 +663,7 @@ TRACE_EVENT(btrfs_finish_ordered_extent,
__entry->start = start;
__entry->len = len;
__entry->uptodate = uptodate;
- __entry->root_objectid = inode->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(inode->root);
),
TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu uptodate=%d",
@@ -685,10 +674,10 @@ TRACE_EVENT(btrfs_finish_ordered_extent,
DECLARE_EVENT_CLASS(btrfs__writepage,
- TP_PROTO(const struct page *page, const struct inode *inode,
+ TP_PROTO(const struct folio *folio, const struct inode *inode,
const struct writeback_control *wbc),
- TP_ARGS(page, inode, wbc),
+ TP_ARGS(folio, inode, wbc),
TP_STRUCT__entry_btrfs(
__field( u64, ino )
@@ -698,7 +687,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__field( loff_t, range_start )
__field( loff_t, range_end )
__field( char, for_kupdate )
- __field( char, for_reclaim )
__field( char, range_cyclic )
__field( unsigned long, writeback_index )
__field( u64, root_objectid )
@@ -706,38 +694,35 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
__entry->ino = btrfs_ino(BTRFS_I(inode));
- __entry->index = page->index;
+ __entry->index = folio->index;
__entry->nr_to_write = wbc->nr_to_write;
__entry->pages_skipped = wbc->pages_skipped;
__entry->range_start = wbc->range_start;
__entry->range_end = wbc->range_end;
__entry->for_kupdate = wbc->for_kupdate;
- __entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic;
__entry->writeback_index = inode->i_mapping->writeback_index;
- __entry->root_objectid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root);
),
TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu "
"nr_to_write=%ld pages_skipped=%ld range_start=%llu "
"range_end=%llu for_kupdate=%d "
- "for_reclaim=%d range_cyclic=%d writeback_index=%lu",
+ "range_cyclic=%d writeback_index=%lu",
show_root_type(__entry->root_objectid),
__entry->ino, __entry->index,
__entry->nr_to_write, __entry->pages_skipped,
__entry->range_start, __entry->range_end,
- __entry->for_kupdate,
- __entry->for_reclaim, __entry->range_cyclic,
+ __entry->for_kupdate, __entry->range_cyclic,
__entry->writeback_index)
);
-DEFINE_EVENT(btrfs__writepage, __extent_writepage,
+DEFINE_EVENT(btrfs__writepage, extent_writepage,
- TP_PROTO(const struct page *page, const struct inode *inode,
+ TP_PROTO(const struct folio *folio, const struct inode *inode,
const struct writeback_control *wbc),
- TP_ARGS(page, inode, wbc)
+ TP_ARGS(folio, inode, wbc)
);
TRACE_EVENT(btrfs_writepage_end_io_hook,
@@ -760,7 +745,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
__entry->start = start;
__entry->end = end;
__entry->uptodate = uptodate;
- __entry->root_objectid = inode->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(inode->root);
),
TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu end=%llu uptodate=%d",
@@ -790,8 +775,7 @@ TRACE_EVENT(btrfs_sync_file,
__entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->parent = btrfs_ino(BTRFS_I(d_inode(dentry->d_parent)));
__entry->datasync = datasync;
- __entry->root_objectid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root);
),
TP_printk_btrfs("root=%llu(%s) ino=%llu parent=%llu datasync=%d",
@@ -1062,7 +1046,7 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
__entry->sub_stripes = map->sub_stripes;
__entry->offset = offset;
__entry->size = size;
- __entry->root_objectid = fs_info->chunk_root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(fs_info->chunk_root);
),
TP_printk_btrfs("root=%llu(%s) offset=%llu size=%llu "
@@ -1107,9 +1091,9 @@ TRACE_EVENT(btrfs_cow_block,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(root);
__entry->buf_start = buf->start;
- __entry->refs = atomic_read(&buf->refs);
+ __entry->refs = refcount_read(&buf->refs);
__entry->cow_start = cow->start;
__entry->buf_level = btrfs_header_level(buf);
__entry->cow_level = btrfs_header_level(cow);
@@ -1250,7 +1234,7 @@ DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free,
TP_ARGS(fs_info, start, len)
);
-TRACE_EVENT(find_free_extent,
+TRACE_EVENT(btrfs_find_free_extent,
TP_PROTO(const struct btrfs_root *root,
const struct find_free_extent_ctl *ffe_ctl),
@@ -1265,7 +1249,7 @@ TRACE_EVENT(find_free_extent,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(root);
__entry->num_bytes = ffe_ctl->num_bytes;
__entry->empty_size = ffe_ctl->empty_size;
__entry->flags = ffe_ctl->flags;
@@ -1278,7 +1262,7 @@ TRACE_EVENT(find_free_extent,
BTRFS_GROUP_FLAGS))
);
-TRACE_EVENT(find_free_extent_search_loop,
+TRACE_EVENT(btrfs_find_free_extent_search_loop,
TP_PROTO(const struct btrfs_root *root,
const struct find_free_extent_ctl *ffe_ctl),
@@ -1294,7 +1278,7 @@ TRACE_EVENT(find_free_extent_search_loop,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(root);
__entry->num_bytes = ffe_ctl->num_bytes;
__entry->empty_size = ffe_ctl->empty_size;
__entry->flags = ffe_ctl->flags;
@@ -1308,7 +1292,7 @@ TRACE_EVENT(find_free_extent_search_loop,
__entry->loop)
);
-TRACE_EVENT(find_free_extent_have_block_group,
+TRACE_EVENT(btrfs_find_free_extent_have_block_group,
TP_PROTO(const struct btrfs_root *root,
const struct find_free_extent_ctl *ffe_ctl,
@@ -1328,7 +1312,7 @@ TRACE_EVENT(find_free_extent_have_block_group,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(root);
__entry->num_bytes = ffe_ctl->num_bytes;
__entry->empty_size = ffe_ctl->empty_size;
__entry->flags = ffe_ctl->flags;
@@ -1490,7 +1474,7 @@ TRACE_EVENT(btrfs_setup_cluster,
);
struct extent_state;
-TRACE_EVENT(alloc_extent_state,
+TRACE_EVENT(btrfs_alloc_extent_state,
TP_PROTO(const struct extent_state *state,
gfp_t mask, unsigned long IP),
@@ -1513,7 +1497,7 @@ TRACE_EVENT(alloc_extent_state,
show_gfp_flags(__entry->mask), __entry->ip)
);
-TRACE_EVENT(free_extent_state,
+TRACE_EVENT(btrfs_free_extent_state,
TP_PROTO(const struct extent_state *state, unsigned long IP),
@@ -1682,8 +1666,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
),
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
- __entry->rootid =
- BTRFS_I(inode)->root->root_key.objectid;
+ __entry->rootid = btrfs_root_id(BTRFS_I(inode)->root);
__entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->start = start;
__entry->len = len;
@@ -1717,9 +1700,10 @@ DEFINE_EVENT(btrfs__qgroup_rsv_data, btrfs_qgroup_release_data,
DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_qgroup_extent_record *rec),
+ const struct btrfs_qgroup_extent_record *rec,
+ u64 bytenr),
- TP_ARGS(fs_info, rec),
+ TP_ARGS(fs_info, rec, bytenr),
TP_STRUCT__entry_btrfs(
__field( u64, bytenr )
@@ -1727,7 +1711,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
),
TP_fast_assign_btrfs(fs_info,
- __entry->bytenr = rec->bytenr,
+ __entry->bytenr = bytenr;
__entry->num_bytes = rec->num_bytes;
),
@@ -1738,20 +1722,22 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_qgroup_extent_record *rec),
+ const struct btrfs_qgroup_extent_record *rec,
+ u64 bytenr),
- TP_ARGS(fs_info, rec)
+ TP_ARGS(fs_info, rec, bytenr)
);
DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_trace_extent,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_qgroup_extent_record *rec),
+ const struct btrfs_qgroup_extent_record *rec,
+ u64 bytenr),
- TP_ARGS(fs_info, rec)
+ TP_ARGS(fs_info, rec, bytenr)
);
-TRACE_EVENT(qgroup_num_dirty_extents,
+TRACE_EVENT(btrfs_qgroup_num_dirty_extents,
TP_PROTO(const struct btrfs_fs_info *fs_info, u64 transid,
u64 num_dirty_extents),
@@ -1805,7 +1791,7 @@ TRACE_EVENT(btrfs_qgroup_account_extent,
__entry->nr_new_roots)
);
-TRACE_EVENT(qgroup_update_counters,
+TRACE_EVENT(btrfs_qgroup_update_counters,
TP_PROTO(const struct btrfs_fs_info *fs_info,
const struct btrfs_qgroup *qgroup,
@@ -1834,9 +1820,9 @@ TRACE_EVENT(qgroup_update_counters,
__entry->cur_old_count, __entry->cur_new_count)
);
-TRACE_EVENT(qgroup_update_reserve,
+TRACE_EVENT(btrfs_qgroup_update_reserve,
- TP_PROTO(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup,
+ TP_PROTO(const struct btrfs_fs_info *fs_info, const struct btrfs_qgroup *qgroup,
s64 diff, int type),
TP_ARGS(fs_info, qgroup, diff, type),
@@ -1860,9 +1846,9 @@ TRACE_EVENT(qgroup_update_reserve,
__entry->cur_reserved, __entry->diff)
);
-TRACE_EVENT(qgroup_meta_reserve,
+TRACE_EVENT(btrfs_qgroup_meta_reserve,
- TP_PROTO(struct btrfs_root *root, s64 diff, int type),
+ TP_PROTO(const struct btrfs_root *root, s64 diff, int type),
TP_ARGS(root, diff, type),
@@ -1873,7 +1859,7 @@ TRACE_EVENT(qgroup_meta_reserve,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->refroot = root->root_key.objectid;
+ __entry->refroot = btrfs_root_id(root);
__entry->diff = diff;
__entry->type = type;
),
@@ -1883,9 +1869,9 @@ TRACE_EVENT(qgroup_meta_reserve,
__print_symbolic(__entry->type, QGROUP_RSV_TYPES), __entry->diff)
);
-TRACE_EVENT(qgroup_meta_convert,
+TRACE_EVENT(btrfs_qgroup_meta_convert,
- TP_PROTO(struct btrfs_root *root, s64 diff),
+ TP_PROTO(const struct btrfs_root *root, s64 diff),
TP_ARGS(root, diff),
@@ -1895,7 +1881,7 @@ TRACE_EVENT(qgroup_meta_convert,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->refroot = root->root_key.objectid;
+ __entry->refroot = btrfs_root_id(root);
__entry->diff = diff;
),
@@ -1906,7 +1892,7 @@ TRACE_EVENT(qgroup_meta_convert,
__entry->diff)
);
-TRACE_EVENT(qgroup_meta_free_all_pertrans,
+TRACE_EVENT(btrfs_qgroup_meta_free_all_pertrans,
TP_PROTO(struct btrfs_root *root),
@@ -1919,7 +1905,7 @@ TRACE_EVENT(qgroup_meta_free_all_pertrans,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->refroot = root->root_key.objectid;
+ __entry->refroot = btrfs_root_id(root);
spin_lock(&root->qgroup_meta_rsv_lock);
__entry->diff = -(s64)root->qgroup_meta_rsv_pertrans;
spin_unlock(&root->qgroup_meta_rsv_lock);
@@ -1935,7 +1921,7 @@ DECLARE_EVENT_CLASS(btrfs__prelim_ref,
TP_PROTO(const struct btrfs_fs_info *fs_info,
const struct prelim_ref *oldref,
const struct prelim_ref *newref, u64 tree_size),
- TP_ARGS(fs_info, newref, oldref, tree_size),
+ TP_ARGS(fs_info, oldref, newref, tree_size),
TP_STRUCT__entry_btrfs(
__field( u64, root_id )
@@ -2001,7 +1987,7 @@ TRACE_EVENT(btrfs_inode_mod_outstanding_extents,
),
TP_fast_assign_btrfs(root->fs_info,
- __entry->root_objectid = root->root_key.objectid;
+ __entry->root_objectid = btrfs_root_id(root);
__entry->ino = ino;
__entry->mod = mod;
__entry->outstanding = outstanding;
@@ -2081,12 +2067,12 @@ TRACE_EVENT(btrfs_set_extent_bit,
__field( unsigned, set_bits)
),
- TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
- const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
+ TP_fast_assign_btrfs(btrfs_extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = btrfs_extent_io_tree_to_inode(tree);
__entry->owner = tree->owner;
__entry->ino = inode ? btrfs_ino(inode) : 0;
- __entry->rootid = inode ? inode->root->root_key.objectid : 0;
+ __entry->rootid = inode ? btrfs_root_id(inode->root) : 0;
__entry->start = start;
__entry->len = len;
__entry->set_bits = set_bits;
@@ -2114,12 +2100,12 @@ TRACE_EVENT(btrfs_clear_extent_bit,
__field( unsigned, clear_bits)
),
- TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
- const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
+ TP_fast_assign_btrfs(btrfs_extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = btrfs_extent_io_tree_to_inode(tree);
__entry->owner = tree->owner;
__entry->ino = inode ? btrfs_ino(inode) : 0;
- __entry->rootid = inode ? inode->root->root_key.objectid : 0;
+ __entry->rootid = inode ? btrfs_root_id(inode->root) : 0;
__entry->start = start;
__entry->len = len;
__entry->clear_bits = clear_bits;
@@ -2148,12 +2134,12 @@ TRACE_EVENT(btrfs_convert_extent_bit,
__field( unsigned, clear_bits)
),
- TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
- const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
+ TP_fast_assign_btrfs(btrfs_extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = btrfs_extent_io_tree_to_inode(tree);
__entry->owner = tree->owner;
__entry->ino = inode ? btrfs_ino(inode) : 0;
- __entry->rootid = inode ? inode->root->root_key.objectid : 0;
+ __entry->rootid = inode ? btrfs_root_id(inode->root) : 0;
__entry->start = start;
__entry->len = len;
__entry->set_bits = set_bits;
@@ -2348,12 +2334,7 @@ DEFINE_EVENT(btrfs_locking_events, name, \
DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_unlock);
DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock);
-DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock_blocking);
-DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_read);
-DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_write);
DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_read_lock);
-DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_write_lock);
-DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic);
DECLARE_EVENT_CLASS(btrfs__space_info_update,
@@ -2394,6 +2375,14 @@ DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned,
TP_ARGS(fs_info, sinfo, old, diff)
);
+DEFINE_EVENT(btrfs__space_info_update, update_bytes_zone_unusable,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo, u64 old, s64 diff),
+
+ TP_ARGS(fs_info, sinfo, old, diff)
+);
+
DECLARE_EVENT_CLASS(btrfs_raid56_bio,
TP_PROTO(const struct btrfs_raid_bio *rbio,
@@ -2556,9 +2545,9 @@ TRACE_EVENT(btrfs_extent_map_shrinker_count,
TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter,
- TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_to_scan, long nr),
+ TP_PROTO(const struct btrfs_fs_info *fs_info, long nr),
- TP_ARGS(fs_info, nr_to_scan, nr),
+ TP_ARGS(fs_info, nr),
TP_STRUCT__entry_btrfs(
__field( long, nr_to_scan )
@@ -2568,10 +2557,11 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter,
),
TP_fast_assign_btrfs(fs_info,
- __entry->nr_to_scan = nr_to_scan;
+ __entry->nr_to_scan = \
+ atomic64_read(&fs_info->em_shrinker_nr_to_scan);
__entry->nr = nr;
- __entry->last_root_id = fs_info->extent_map_shrinker_last_root;
- __entry->last_ino = fs_info->extent_map_shrinker_last_ino;
+ __entry->last_root_id = fs_info->em_shrinker_last_root;
+ __entry->last_ino = fs_info->em_shrinker_last_ino;
),
TP_printk_btrfs("nr_to_scan=%ld nr=%ld last_root=%llu(%s) last_ino=%llu",
@@ -2595,8 +2585,8 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_exit,
TP_fast_assign_btrfs(fs_info,
__entry->nr_dropped = nr_dropped;
__entry->nr = nr;
- __entry->last_root_id = fs_info->extent_map_shrinker_last_root;
- __entry->last_ino = fs_info->extent_map_shrinker_last_ino;
+ __entry->last_root_id = fs_info->em_shrinker_last_root;
+ __entry->last_ino = fs_info->em_shrinker_last_ino;
),
TP_printk_btrfs("nr_dropped=%ld nr=%ld last_root=%llu(%s) last_ino=%llu",
@@ -2615,24 +2605,20 @@ TRACE_EVENT(btrfs_extent_map_shrinker_remove_em,
__field( u64, root_id )
__field( u64, start )
__field( u64, len )
- __field( u64, block_start )
__field( u32, flags )
),
TP_fast_assign_btrfs(inode->root->fs_info,
__entry->ino = btrfs_ino(inode);
- __entry->root_id = inode->root->root_key.objectid;
+ __entry->root_id = btrfs_root_id(inode->root);
__entry->start = em->start;
__entry->len = em->len;
- __entry->block_start = em->block_start;
__entry->flags = em->flags;
),
- TP_printk_btrfs(
-"ino=%llu root=%llu(%s) start=%llu len=%llu block_start=%llu(%s) flags=%s",
+ TP_printk_btrfs("ino=%llu root=%llu(%s) start=%llu len=%llu flags=%s",
__entry->ino, show_root_type(__entry->root_id),
__entry->start, __entry->len,
- show_map_type(__entry->block_start),
show_map_flags(__entry->flags))
);
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
index cf4b98b9a9ed..a743b2a35ea7 100644
--- a/include/trace/events/cachefiles.h
+++ b/include/trace/events/cachefiles.h
@@ -33,6 +33,8 @@ enum cachefiles_obj_ref_trace {
cachefiles_obj_see_withdrawal,
cachefiles_obj_get_ondemand_fd,
cachefiles_obj_put_ondemand_fd,
+ cachefiles_obj_get_read_req,
+ cachefiles_obj_put_read_req,
};
enum fscache_why_object_killed {
@@ -127,7 +129,11 @@ enum cachefiles_error_trace {
EM(cachefiles_obj_see_lookup_cookie, "SEE lookup_cookie") \
EM(cachefiles_obj_see_lookup_failed, "SEE lookup_failed") \
EM(cachefiles_obj_see_withdraw_cookie, "SEE withdraw_cookie") \
- E_(cachefiles_obj_see_withdrawal, "SEE withdrawal")
+ EM(cachefiles_obj_see_withdrawal, "SEE withdrawal") \
+ EM(cachefiles_obj_get_ondemand_fd, "GET ondemand_fd") \
+ EM(cachefiles_obj_put_ondemand_fd, "PUT ondemand_fd") \
+ EM(cachefiles_obj_get_read_req, "GET read_req") \
+ E_(cachefiles_obj_put_read_req, "PUT read_req")
#define cachefiles_coherency_traces \
EM(cachefiles_coherency_check_aux, "BAD aux ") \
@@ -217,10 +223,10 @@ TRACE_EVENT(cachefiles_ref,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, cookie )
- __field(enum cachefiles_obj_ref_trace, why )
- __field(int, usage )
+ __field(unsigned int, obj)
+ __field(unsigned int, cookie)
+ __field(enum cachefiles_obj_ref_trace, why)
+ __field(int, usage)
),
TP_fast_assign(
@@ -243,10 +249,10 @@ TRACE_EVENT(cachefiles_lookup,
TP_ARGS(obj, dir, de),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(short, error )
- __field(unsigned long, dino )
- __field(unsigned long, ino )
+ __field(unsigned int, obj)
+ __field(short, error)
+ __field(unsigned long, dino)
+ __field(unsigned long, ino)
),
TP_fast_assign(
@@ -267,8 +273,8 @@ TRACE_EVENT(cachefiles_mkdir,
TP_ARGS(dir, subdir),
TP_STRUCT__entry(
- __field(unsigned int, dir )
- __field(unsigned int, subdir )
+ __field(unsigned int, dir)
+ __field(unsigned int, subdir)
),
TP_fast_assign(
@@ -287,8 +293,8 @@ TRACE_EVENT(cachefiles_tmpfile,
TP_ARGS(obj, backer),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
),
TP_fast_assign(
@@ -307,8 +313,8 @@ TRACE_EVENT(cachefiles_link,
TP_ARGS(obj, backer),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
),
TP_fast_assign(
@@ -330,9 +336,9 @@ TRACE_EVENT(cachefiles_unlink,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, ino )
- __field(enum fscache_why_object_killed, why )
+ __field(unsigned int, obj)
+ __field(unsigned int, ino)
+ __field(enum fscache_why_object_killed, why)
),
TP_fast_assign(
@@ -355,9 +361,9 @@ TRACE_EVENT(cachefiles_rename,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, ino )
- __field(enum fscache_why_object_killed, why )
+ __field(unsigned int, obj)
+ __field(unsigned int, ino)
+ __field(enum fscache_why_object_killed, why)
),
TP_fast_assign(
@@ -374,17 +380,20 @@ TRACE_EVENT(cachefiles_rename,
TRACE_EVENT(cachefiles_coherency,
TP_PROTO(struct cachefiles_object *obj,
ino_t ino,
+ u64 disk_aux,
enum cachefiles_content content,
enum cachefiles_coherency_trace why),
- TP_ARGS(obj, ino, content, why),
+ TP_ARGS(obj, ino, disk_aux, content, why),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(enum cachefiles_coherency_trace, why )
- __field(enum cachefiles_content, content )
- __field(u64, ino )
+ __field(unsigned int, obj)
+ __field(enum cachefiles_coherency_trace, why)
+ __field(enum cachefiles_content, content)
+ __field(u64, ino)
+ __field(u64, aux)
+ __field(u64, disk_aux)
),
TP_fast_assign(
@@ -392,13 +401,17 @@ TRACE_EVENT(cachefiles_coherency,
__entry->why = why;
__entry->content = content;
__entry->ino = ino;
+ __entry->aux = be64_to_cpup((__be64 *)obj->cookie->inline_aux);
+ __entry->disk_aux = disk_aux;
),
- TP_printk("o=%08x %s B=%llx c=%u",
+ TP_printk("o=%08x %s B=%llx c=%u aux=%llx dsk=%llx",
__entry->obj,
__print_symbolic(__entry->why, cachefiles_coherency_traces),
__entry->ino,
- __entry->content)
+ __entry->content,
+ __entry->aux,
+ __entry->disk_aux)
);
TRACE_EVENT(cachefiles_vol_coherency,
@@ -410,9 +423,9 @@ TRACE_EVENT(cachefiles_vol_coherency,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, vol )
- __field(enum cachefiles_coherency_trace, why )
- __field(u64, ino )
+ __field(unsigned int, vol)
+ __field(enum cachefiles_coherency_trace, why)
+ __field(u64, ino)
),
TP_fast_assign(
@@ -439,14 +452,14 @@ TRACE_EVENT(cachefiles_prep_read,
TP_ARGS(obj, start, len, flags, source, why, cache_inode, netfs_inode),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned short, flags )
- __field(enum netfs_io_source, source )
- __field(enum cachefiles_prepare_read_trace, why )
- __field(size_t, len )
- __field(loff_t, start )
- __field(unsigned int, netfs_inode )
- __field(unsigned int, cache_inode )
+ __field(unsigned int, obj)
+ __field(unsigned short, flags)
+ __field(enum netfs_io_source, source)
+ __field(enum cachefiles_prepare_read_trace, why)
+ __field(size_t, len)
+ __field(loff_t, start)
+ __field(unsigned int, netfs_inode)
+ __field(unsigned int, cache_inode)
),
TP_fast_assign(
@@ -478,10 +491,10 @@ TRACE_EVENT(cachefiles_read,
TP_ARGS(obj, backer, start, len),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(size_t, len )
- __field(loff_t, start )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(size_t, len)
+ __field(loff_t, start)
),
TP_fast_assign(
@@ -507,10 +520,10 @@ TRACE_EVENT(cachefiles_write,
TP_ARGS(obj, backer, start, len),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(size_t, len )
- __field(loff_t, start )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(size_t, len)
+ __field(loff_t, start)
),
TP_fast_assign(
@@ -534,11 +547,11 @@ TRACE_EVENT(cachefiles_trunc,
TP_ARGS(obj, backer, from, to, why),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(enum cachefiles_trunc_trace, why )
- __field(loff_t, from )
- __field(loff_t, to )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(enum cachefiles_trunc_trace, why)
+ __field(loff_t, from)
+ __field(loff_t, to)
),
TP_fast_assign(
@@ -565,8 +578,8 @@ TRACE_EVENT(cachefiles_mark_active,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(ino_t, inode )
+ __field(unsigned int, obj)
+ __field(ino_t, inode)
),
TP_fast_assign(
@@ -586,8 +599,8 @@ TRACE_EVENT(cachefiles_mark_failed,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(ino_t, inode )
+ __field(unsigned int, obj)
+ __field(ino_t, inode)
),
TP_fast_assign(
@@ -607,8 +620,8 @@ TRACE_EVENT(cachefiles_mark_inactive,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(ino_t, inode )
+ __field(unsigned int, obj)
+ __field(ino_t, inode)
),
TP_fast_assign(
@@ -627,10 +640,10 @@ TRACE_EVENT(cachefiles_vfs_error,
TP_ARGS(obj, backer, error, where),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(enum cachefiles_error_trace, where )
- __field(short, error )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(enum cachefiles_error_trace, where)
+ __field(short, error)
),
TP_fast_assign(
@@ -654,10 +667,10 @@ TRACE_EVENT(cachefiles_io_error,
TP_ARGS(obj, backer, error, where),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(enum cachefiles_error_trace, where )
- __field(short, error )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(enum cachefiles_error_trace, where)
+ __field(short, error)
),
TP_fast_assign(
@@ -681,11 +694,11 @@ TRACE_EVENT(cachefiles_ondemand_open,
TP_ARGS(obj, msg, load),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, msg_id )
- __field(unsigned int, object_id )
- __field(unsigned int, fd )
- __field(unsigned int, flags )
+ __field(unsigned int, obj)
+ __field(unsigned int, msg_id)
+ __field(unsigned int, object_id)
+ __field(unsigned int, fd)
+ __field(unsigned int, flags)
),
TP_fast_assign(
@@ -711,9 +724,9 @@ TRACE_EVENT(cachefiles_ondemand_copen,
TP_ARGS(obj, msg_id, len),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, msg_id )
- __field(long, len )
+ __field(unsigned int, obj)
+ __field(unsigned int, msg_id)
+ __field(long, len)
),
TP_fast_assign(
@@ -734,9 +747,9 @@ TRACE_EVENT(cachefiles_ondemand_close,
TP_ARGS(obj, msg),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, msg_id )
- __field(unsigned int, object_id )
+ __field(unsigned int, obj)
+ __field(unsigned int, msg_id)
+ __field(unsigned int, object_id)
),
TP_fast_assign(
@@ -758,11 +771,11 @@ TRACE_EVENT(cachefiles_ondemand_read,
TP_ARGS(obj, msg, load),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, msg_id )
- __field(unsigned int, object_id )
- __field(loff_t, start )
- __field(size_t, len )
+ __field(unsigned int, obj)
+ __field(unsigned int, msg_id)
+ __field(unsigned int, object_id)
+ __field(loff_t, start)
+ __field(size_t, len)
),
TP_fast_assign(
@@ -787,8 +800,8 @@ TRACE_EVENT(cachefiles_ondemand_cread,
TP_ARGS(obj, msg_id),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, msg_id )
+ __field(unsigned int, obj)
+ __field(unsigned int, msg_id)
),
TP_fast_assign(
@@ -808,10 +821,10 @@ TRACE_EVENT(cachefiles_ondemand_fd_write,
TP_ARGS(obj, backer, start, len),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(loff_t, start )
- __field(size_t, len )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(loff_t, start)
+ __field(size_t, len)
),
TP_fast_assign(
@@ -834,8 +847,8 @@ TRACE_EVENT(cachefiles_ondemand_fd_release,
TP_ARGS(obj, object_id),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, object_id )
+ __field(unsigned int, obj)
+ __field(unsigned int, object_id)
),
TP_fast_assign(
diff --git a/include/trace/events/capability.h b/include/trace/events/capability.h
new file mode 100644
index 000000000000..17340257946c
--- /dev/null
+++ b/include/trace/events/capability.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM capability
+
+#if !defined(_TRACE_CAPABILITY_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CAPABILITY_H
+
+#include <linux/cred.h>
+#include <linux/tracepoint.h>
+#include <linux/user_namespace.h>
+
+/**
+ * cap_capable - called after it's determined if a task has a particular
+ * effective capability
+ *
+ * @cred: The credentials used
+ * @target_ns: The user namespace of the resource being accessed
+ * @capable_ns: The user namespace in which the credential provides the
+ * capability to access the targeted resource.
+ * This will be NULL if ret is not 0.
+ * @cap: The capability to check for
+ * @ret: The return value of the check: 0 if it does, -ve if it does not
+ *
+ * Allows to trace calls to cap_capable in commoncap.c
+ */
+TRACE_EVENT(cap_capable,
+
+ TP_PROTO(const struct cred *cred, struct user_namespace *target_ns,
+ const struct user_namespace *capable_ns, int cap, int ret),
+
+ TP_ARGS(cred, target_ns, capable_ns, cap, ret),
+
+ TP_STRUCT__entry(
+ __field(const struct cred *, cred)
+ __field(struct user_namespace *, target_ns)
+ __field(const struct user_namespace *, capable_ns)
+ __field(int, cap)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->cred = cred;
+ __entry->target_ns = target_ns;
+ __entry->capable_ns = ret == 0 ? capable_ns : NULL;
+ __entry->cap = cap;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("cred %p, target_ns %p, capable_ns %p, cap %d, ret %d",
+ __entry->cred, __entry->target_ns, __entry->capable_ns, __entry->cap,
+ __entry->ret)
+);
+
+#endif /* _TRACE_CAPABILITY_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
index af2755bda6eb..ba9229af9a34 100644
--- a/include/trace/events/cgroup.h
+++ b/include/trace/events/cgroup.h
@@ -231,7 +231,11 @@ DECLARE_EVENT_CLASS(cgroup_rstat,
__entry->cpu, __entry->contended)
);
-/* Related to global: cgroup_rstat_lock */
+/*
+ * Related to locks:
+ * global rstat_base_lock for base stats
+ * cgroup_subsys::rstat_ss_lock for subsystem stats
+ */
DEFINE_EVENT(cgroup_rstat, cgroup_rstat_lock_contended,
TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
@@ -253,49 +257,6 @@ DEFINE_EVENT(cgroup_rstat, cgroup_rstat_unlock,
TP_ARGS(cgrp, cpu, contended)
);
-/* Related to per CPU: cgroup_rstat_cpu_lock */
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_lock_contended,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_lock_contended_fastpath,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_locked,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_locked_fastpath,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_unlock,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_unlock_fastpath,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
#endif /* _TRACE_CGROUP_H */
/* This part must be outside protection */
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
index 383c09f583ac..37195edf2498 100644
--- a/include/trace/events/cma.h
+++ b/include/trace/events/cma.h
@@ -38,25 +38,32 @@ TRACE_EVENT(cma_release,
TRACE_EVENT(cma_alloc_start,
- TP_PROTO(const char *name, unsigned long count, unsigned int align),
+ TP_PROTO(const char *name, unsigned long request_count, unsigned long available_count,
+ unsigned long total_count, unsigned int align),
- TP_ARGS(name, count, align),
+ TP_ARGS(name, request_count, available_count, total_count, align),
TP_STRUCT__entry(
__string(name, name)
- __field(unsigned long, count)
+ __field(unsigned long, request_count)
+ __field(unsigned long, available_count)
+ __field(unsigned long, total_count)
__field(unsigned int, align)
),
TP_fast_assign(
__assign_str(name);
- __entry->count = count;
+ __entry->request_count = request_count;
+ __entry->available_count = available_count;
+ __entry->total_count = total_count;
__entry->align = align;
),
- TP_printk("name=%s count=%lu align=%u",
+ TP_printk("name=%s request_count=%lu available_count=%lu total_count=%lu align=%u",
__get_str(name),
- __entry->count,
+ __entry->request_count,
+ __entry->available_count,
+ __entry->total_count,
__entry->align)
);
diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h
index 23200aabccac..852d725afea2 100644
--- a/include/trace/events/damon.h
+++ b/include/trace/events/damon.h
@@ -9,13 +9,37 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
+TRACE_EVENT(damos_esz,
+
+ TP_PROTO(unsigned int context_idx, unsigned int scheme_idx,
+ unsigned long esz),
+
+ TP_ARGS(context_idx, scheme_idx, esz),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, context_idx)
+ __field(unsigned int, scheme_idx)
+ __field(unsigned long, esz)
+ ),
+
+ TP_fast_assign(
+ __entry->context_idx = context_idx;
+ __entry->scheme_idx = scheme_idx;
+ __entry->esz = esz;
+ ),
+
+ TP_printk("ctx_idx=%u scheme_idx=%u esz=%lu",
+ __entry->context_idx, __entry->scheme_idx,
+ __entry->esz)
+);
+
TRACE_EVENT_CONDITION(damos_before_apply,
TP_PROTO(unsigned int context_idx, unsigned int scheme_idx,
unsigned int target_idx, struct damon_region *r,
unsigned int nr_regions, bool do_trace),
- TP_ARGS(context_idx, target_idx, scheme_idx, r, nr_regions, do_trace),
+ TP_ARGS(context_idx, scheme_idx, target_idx, r, nr_regions, do_trace),
TP_CONDITION(do_trace),
@@ -48,6 +72,23 @@ TRACE_EVENT_CONDITION(damos_before_apply,
__entry->nr_accesses, __entry->age)
);
+TRACE_EVENT(damon_monitor_intervals_tune,
+
+ TP_PROTO(unsigned long sample_us),
+
+ TP_ARGS(sample_us),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, sample_us)
+ ),
+
+ TP_fast_assign(
+ __entry->sample_us = sample_us;
+ ),
+
+ TP_printk("sample_us=%lu", __entry->sample_us)
+);
+
TRACE_EVENT(damon_aggregated,
TP_PROTO(unsigned int target_id, struct damon_region *r,
diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h
new file mode 100644
index 000000000000..b3fef140ae15
--- /dev/null
+++ b/include/trace/events/dma.h
@@ -0,0 +1,475 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dma
+
+#if !defined(_TRACE_DMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DMA_H
+
+#include <linux/tracepoint.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <trace/events/mmflags.h>
+
+TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
+TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
+TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
+TRACE_DEFINE_ENUM(DMA_NONE);
+
+#define decode_dma_data_direction(dir) \
+ __print_symbolic(dir, \
+ { DMA_BIDIRECTIONAL, "BIDIRECTIONAL" }, \
+ { DMA_TO_DEVICE, "TO_DEVICE" }, \
+ { DMA_FROM_DEVICE, "FROM_DEVICE" }, \
+ { DMA_NONE, "NONE" })
+
+#define decode_dma_attrs(attrs) \
+ __print_flags(attrs, "|", \
+ { DMA_ATTR_WEAK_ORDERING, "WEAK_ORDERING" }, \
+ { DMA_ATTR_WRITE_COMBINE, "WRITE_COMBINE" }, \
+ { DMA_ATTR_NO_KERNEL_MAPPING, "NO_KERNEL_MAPPING" }, \
+ { DMA_ATTR_SKIP_CPU_SYNC, "SKIP_CPU_SYNC" }, \
+ { DMA_ATTR_FORCE_CONTIGUOUS, "FORCE_CONTIGUOUS" }, \
+ { DMA_ATTR_ALLOC_SINGLE_PAGES, "ALLOC_SINGLE_PAGES" }, \
+ { DMA_ATTR_NO_WARN, "NO_WARN" }, \
+ { DMA_ATTR_PRIVILEGED, "PRIVILEGED" }, \
+ { DMA_ATTR_MMIO, "MMIO" })
+
+DECLARE_EVENT_CLASS(dma_map,
+ TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, phys_addr)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->phys_addr = phys_addr;
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addr=%llx attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size,
+ __entry->phys_addr,
+ decode_dma_attrs(__entry->attrs))
+);
+
+#define DEFINE_MAP_EVENT(name) \
+DEFINE_EVENT(dma_map, name, \
+ TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr, \
+ size_t size, enum dma_data_direction dir, unsigned long attrs), \
+ TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs))
+
+DEFINE_MAP_EVENT(dma_map_phys);
+
+DECLARE_EVENT_CLASS(dma_unmap,
+ TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, addr, size, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->addr = addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->addr,
+ __entry->size,
+ decode_dma_attrs(__entry->attrs))
+);
+
+#define DEFINE_UNMAP_EVENT(name) \
+DEFINE_EVENT(dma_unmap, name, \
+ TP_PROTO(struct device *dev, dma_addr_t addr, size_t size, \
+ enum dma_data_direction dir, unsigned long attrs), \
+ TP_ARGS(dev, addr, size, dir, attrs))
+
+DEFINE_UNMAP_EVENT(dma_unmap_phys);
+
+DECLARE_EVENT_CLASS(dma_alloc_class,
+ TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, gfp_t flags,
+ unsigned long attrs),
+ TP_ARGS(dev, virt_addr, dma_addr, size, dir, flags, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(void *, virt_addr)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(gfp_t, flags)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->virt_addr = virt_addr;
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->flags = flags;
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu virt_addr=%p flags=%s attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size,
+ __entry->virt_addr,
+ show_gfp_flags(__entry->flags),
+ decode_dma_attrs(__entry->attrs))
+);
+
+#define DEFINE_ALLOC_EVENT(name) \
+DEFINE_EVENT(dma_alloc_class, name, \
+ TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, \
+ size_t size, enum dma_data_direction dir, gfp_t flags, \
+ unsigned long attrs), \
+ TP_ARGS(dev, virt_addr, dma_addr, size, dir, flags, attrs))
+
+DEFINE_ALLOC_EVENT(dma_alloc);
+DEFINE_ALLOC_EVENT(dma_alloc_pages);
+DEFINE_ALLOC_EVENT(dma_alloc_sgt_err);
+
+TRACE_EVENT(dma_alloc_sgt,
+ TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size,
+ enum dma_data_direction dir, gfp_t flags, unsigned long attrs),
+ TP_ARGS(dev, sgt, size, dir, flags, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, phys_addrs, sgt->orig_nents)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ __field(gfp_t, flags)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
+ ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
+ __entry->dma_addr = sg_dma_address(sgt->sgl);
+ __entry->size = size;
+ __entry->dir = dir;
+ __entry->flags = flags;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addrs=%s flags=%s attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size,
+ __print_array(__get_dynamic_array(phys_addrs),
+ __get_dynamic_array_len(phys_addrs) /
+ sizeof(u64), sizeof(u64)),
+ show_gfp_flags(__entry->flags),
+ decode_dma_attrs(__entry->attrs))
+);
+
+DECLARE_EVENT_CLASS(dma_free_class,
+ TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, virt_addr, dma_addr, size, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(void *, virt_addr)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->virt_addr = virt_addr;
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu virt_addr=%p attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size,
+ __entry->virt_addr,
+ decode_dma_attrs(__entry->attrs))
+);
+
+#define DEFINE_FREE_EVENT(name) \
+DEFINE_EVENT(dma_free_class, name, \
+ TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, \
+ size_t size, enum dma_data_direction dir, unsigned long attrs), \
+ TP_ARGS(dev, virt_addr, dma_addr, size, dir, attrs))
+
+DEFINE_FREE_EVENT(dma_free);
+DEFINE_FREE_EVENT(dma_free_pages);
+
+TRACE_EVENT(dma_free_sgt,
+ TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, sgt, size, dir),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, phys_addrs, sgt->orig_nents)
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
+ ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
+ __entry->dma_addr = sg_dma_address(sgt->sgl);
+ __entry->size = size;
+ __entry->dir = dir;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size,
+ __print_array(__get_dynamic_array(phys_addrs),
+ __get_dynamic_array_len(phys_addrs) /
+ sizeof(u64), sizeof(u64)))
+);
+
+TRACE_EVENT(dma_map_sg,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+ int ents, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, sgl, nents, ents, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, phys_addrs, nents)
+ __dynamic_array(u64, dma_addrs, ents)
+ __dynamic_array(unsigned int, lengths, ents)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgl, sg, nents, i)
+ ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
+ for_each_sg(sgl, sg, ents, i) {
+ ((u64 *)__get_dynamic_array(dma_addrs))[i] =
+ sg_dma_address(sg);
+ ((unsigned int *)__get_dynamic_array(lengths))[i] =
+ sg_dma_len(sg);
+ }
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addrs=%s sizes=%s phys_addrs=%s attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(dma_addrs),
+ __get_dynamic_array_len(dma_addrs) /
+ sizeof(u64), sizeof(u64)),
+ __print_array(__get_dynamic_array(lengths),
+ __get_dynamic_array_len(lengths) /
+ sizeof(unsigned int), sizeof(unsigned int)),
+ __print_array(__get_dynamic_array(phys_addrs),
+ __get_dynamic_array_len(phys_addrs) /
+ sizeof(u64), sizeof(u64)),
+ decode_dma_attrs(__entry->attrs))
+);
+
+TRACE_EVENT(dma_map_sg_err,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+ int err, enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, sgl, nents, err, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, phys_addrs, nents)
+ __field(int, err)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgl, sg, nents, i)
+ ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg);
+ __entry->err = err;
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s dma_addrs=%s err=%d attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(phys_addrs),
+ __get_dynamic_array_len(phys_addrs) /
+ sizeof(u64), sizeof(u64)),
+ __entry->err,
+ decode_dma_attrs(__entry->attrs))
+);
+
+TRACE_EVENT(dma_unmap_sg,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction dir, unsigned long attrs),
+ TP_ARGS(dev, sgl, nents, dir, attrs),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, addrs, nents)
+ __field(enum dma_data_direction, dir)
+ __field(unsigned long, attrs)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgl, sg, nents, i)
+ ((u64 *)__get_dynamic_array(addrs))[i] = sg_phys(sg);
+ __entry->dir = dir;
+ __entry->attrs = attrs;
+ ),
+
+ TP_printk("%s dir=%s phys_addrs=%s attrs=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(addrs),
+ __get_dynamic_array_len(addrs) /
+ sizeof(u64), sizeof(u64)),
+ decode_dma_attrs(__entry->attrs))
+);
+
+DECLARE_EVENT_CLASS(dma_sync_single,
+ TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, dma_addr, size, dir),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __field(u64, dma_addr)
+ __field(size_t, size)
+ __field(enum dma_data_direction, dir)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __entry->dma_addr = dma_addr;
+ __entry->size = size;
+ __entry->dir = dir;
+ ),
+
+ TP_printk("%s dir=%s dma_addr=%llx size=%zu",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __entry->dma_addr,
+ __entry->size)
+);
+
+#define DEFINE_SYNC_SINGLE_EVENT(name) \
+DEFINE_EVENT(dma_sync_single, name, \
+ TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size, \
+ enum dma_data_direction dir), \
+ TP_ARGS(dev, dma_addr, size, dir))
+
+DEFINE_SYNC_SINGLE_EVENT(dma_sync_single_for_cpu);
+DEFINE_SYNC_SINGLE_EVENT(dma_sync_single_for_device);
+
+DECLARE_EVENT_CLASS(dma_sync_sg,
+ TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents,
+ enum dma_data_direction dir),
+ TP_ARGS(dev, sgl, nents, dir),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __dynamic_array(u64, dma_addrs, nents)
+ __dynamic_array(unsigned int, lengths, nents)
+ __field(enum dma_data_direction, dir)
+ ),
+
+ TP_fast_assign(
+ struct scatterlist *sg;
+ int i;
+
+ __assign_str(device);
+ for_each_sg(sgl, sg, nents, i) {
+ ((u64 *)__get_dynamic_array(dma_addrs))[i] =
+ sg_dma_address(sg);
+ ((unsigned int *)__get_dynamic_array(lengths))[i] =
+ sg_dma_len(sg);
+ }
+ __entry->dir = dir;
+ ),
+
+ TP_printk("%s dir=%s dma_addrs=%s sizes=%s",
+ __get_str(device),
+ decode_dma_data_direction(__entry->dir),
+ __print_array(__get_dynamic_array(dma_addrs),
+ __get_dynamic_array_len(dma_addrs) /
+ sizeof(u64), sizeof(u64)),
+ __print_array(__get_dynamic_array(lengths),
+ __get_dynamic_array_len(lengths) /
+ sizeof(unsigned int), sizeof(unsigned int)))
+);
+
+#define DEFINE_SYNC_SG_EVENT(name) \
+DEFINE_EVENT(dma_sync_sg, name, \
+ TP_PROTO(struct device *dev, struct scatterlist *sg, int nents, \
+ enum dma_data_direction dir), \
+ TP_ARGS(dev, sg, nents, dir))
+
+DEFINE_SYNC_SG_EVENT(dma_sync_sg_for_cpu);
+DEFINE_SYNC_SG_EVENT(dma_sync_sg_for_device);
+
+#endif /* _TRACE_DMA_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h
index a4de3df8500b..4814a65b68dc 100644
--- a/include/trace/events/dma_fence.h
+++ b/include/trace/events/dma_fence.h
@@ -16,6 +16,36 @@ DECLARE_EVENT_CLASS(dma_fence,
TP_ARGS(fence),
TP_STRUCT__entry(
+ __string(driver, dma_fence_driver_name(fence))
+ __string(timeline, dma_fence_timeline_name(fence))
+ __field(unsigned int, context)
+ __field(unsigned int, seqno)
+ ),
+
+ TP_fast_assign(
+ __assign_str(driver);
+ __assign_str(timeline);
+ __entry->context = fence->context;
+ __entry->seqno = fence->seqno;
+ ),
+
+ TP_printk("driver=%s timeline=%s context=%u seqno=%u",
+ __get_str(driver), __get_str(timeline), __entry->context,
+ __entry->seqno)
+);
+
+/*
+ * Safe only for call sites which are guaranteed to not race with fence
+ * signaling,holding the fence->lock and having checked for not signaled, or the
+ * signaling path itself.
+ */
+DECLARE_EVENT_CLASS(dma_fence_unsignaled,
+
+ TP_PROTO(struct dma_fence *fence),
+
+ TP_ARGS(fence),
+
+ TP_STRUCT__entry(
__string(driver, fence->ops->get_driver_name(fence))
__string(timeline, fence->ops->get_timeline_name(fence))
__field(unsigned int, context)
@@ -34,14 +64,14 @@ DECLARE_EVENT_CLASS(dma_fence,
__entry->seqno)
);
-DEFINE_EVENT(dma_fence, dma_fence_emit,
+DEFINE_EVENT(dma_fence_unsignaled, dma_fence_emit,
TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence, dma_fence_init,
+DEFINE_EVENT(dma_fence_unsignaled, dma_fence_init,
TP_PROTO(struct dma_fence *fence),
@@ -55,14 +85,14 @@ DEFINE_EVENT(dma_fence, dma_fence_destroy,
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence, dma_fence_enable_signal,
+DEFINE_EVENT(dma_fence_unsignaled, dma_fence_enable_signal,
TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence, dma_fence_signaled,
+DEFINE_EVENT(dma_fence_unsignaled, dma_fence_signaled,
TP_PROTO(struct dma_fence *fence),
diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
index b9bbfd855f2a..dad7360f42f9 100644
--- a/include/trace/events/erofs.h
+++ b/include/trace/events/erofs.h
@@ -75,7 +75,7 @@ TRACE_EVENT(erofs_fill_inode,
__entry->ofs = erofs_blkoff(inode->i_sb, erofs_iloc(inode));
),
- TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u",
+ TP_printk("dev = (%d,%d), nid = %llu, blkaddr %llu ofs %u",
show_dev_nid(__entry),
__entry->blkaddr, __entry->ofs)
);
@@ -113,7 +113,7 @@ TRACE_EVENT(erofs_read_folio,
__entry->raw)
);
-TRACE_EVENT(erofs_readpages,
+TRACE_EVENT(erofs_readahead,
TP_PROTO(struct inode *inode, pgoff_t start, unsigned int nrpage,
bool raw),
@@ -143,7 +143,8 @@ TRACE_EVENT(erofs_readpages,
__entry->raw)
);
-DECLARE_EVENT_CLASS(erofs__map_blocks_enter,
+TRACE_EVENT(erofs_map_blocks_enter,
+
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned int flags),
@@ -171,21 +172,8 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_enter,
__entry->flags ? show_map_flags(__entry->flags) : "NULL")
);
-DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_enter,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned flags),
-
- TP_ARGS(inode, map, flags)
-);
-
-DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned int flags),
-
- TP_ARGS(inode, map, flags)
-);
+TRACE_EVENT(erofs_map_blocks_exit,
-DECLARE_EVENT_CLASS(erofs__map_blocks_exit,
TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
unsigned int flags, int ret),
@@ -223,38 +211,6 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_exit,
show_mflags(__entry->mflags), __entry->ret)
);
-DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_exit,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned flags, int ret),
-
- TP_ARGS(inode, map, flags, ret)
-);
-
-DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit,
- TP_PROTO(struct inode *inode, struct erofs_map_blocks *map,
- unsigned int flags, int ret),
-
- TP_ARGS(inode, map, flags, ret)
-);
-
-TRACE_EVENT(erofs_destroy_inode,
- TP_PROTO(struct inode *inode),
-
- TP_ARGS(inode),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( erofs_nid_t, nid )
- ),
-
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->nid = EROFS_I(inode)->nid;
- ),
-
- TP_printk("dev = (%d,%d), nid = %llu", show_dev_nid(__entry))
-);
-
#endif /* _TRACE_EROFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/exceptions.h b/include/trace/events/exceptions.h
new file mode 100644
index 000000000000..a631f8de8917
--- /dev/null
+++ b/include/trace/events/exceptions.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM exceptions
+
+#if !defined(_TRACE_PAGE_FAULT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PAGE_FAULT_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(exceptions,
+
+ TP_PROTO(unsigned long address, struct pt_regs *regs,
+ unsigned long error_code),
+
+ TP_ARGS(address, regs, error_code),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, address )
+ __field( unsigned long, ip )
+ __field( unsigned long, error_code )
+ ),
+
+ TP_fast_assign(
+ __entry->address = address;
+ __entry->ip = instruction_pointer(regs);
+ __entry->error_code = error_code;
+ ),
+
+ TP_printk("address=%ps ip=%ps error_code=0x%lx",
+ (void *)__entry->address, (void *)__entry->ip,
+ __entry->error_code) );
+
+DEFINE_EVENT(exceptions, page_fault_user,
+ TP_PROTO(unsigned long address, struct pt_regs *regs, unsigned long error_code),
+ TP_ARGS(address, regs, error_code));
+DEFINE_EVENT(exceptions, page_fault_kernel,
+ TP_PROTO(unsigned long address, struct pt_regs *regs, unsigned long error_code),
+ TP_ARGS(address, regs, error_code));
+
+#endif /* _TRACE_PAGE_FAULT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index a697f4b77162..fd76d14c2776 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -23,10 +23,7 @@ struct partial_cluster;
#define show_mballoc_flags(flags) __print_flags(flags, "|", \
{ EXT4_MB_HINT_MERGE, "HINT_MERGE" }, \
- { EXT4_MB_HINT_RESERVED, "HINT_RESV" }, \
- { EXT4_MB_HINT_METADATA, "HINT_MDATA" }, \
{ EXT4_MB_HINT_FIRST, "HINT_FIRST" }, \
- { EXT4_MB_HINT_BEST, "HINT_BEST" }, \
{ EXT4_MB_HINT_DATA, "HINT_DATA" }, \
{ EXT4_MB_HINT_NOPREALLOC, "HINT_NOPREALLOC" }, \
{ EXT4_MB_HINT_GROUP_ALLOC, "HINT_GRP_ALLOC" }, \
@@ -42,7 +39,7 @@ struct partial_cluster;
{ EXT4_GET_BLOCKS_CREATE, "CREATE" }, \
{ EXT4_GET_BLOCKS_UNWRIT_EXT, "UNWRIT" }, \
{ EXT4_GET_BLOCKS_DELALLOC_RESERVE, "DELALLOC" }, \
- { EXT4_GET_BLOCKS_PRE_IO, "PRE_IO" }, \
+ { EXT4_GET_BLOCKS_SPLIT_NOMERGE, "SPLIT_NOMERGE" }, \
{ EXT4_GET_BLOCKS_CONVERT, "CONVERT" }, \
{ EXT4_GET_BLOCKS_METADATA_NOFAIL, "METADATA_NOFAIL" }, \
{ EXT4_GET_BLOCKS_NO_NORMALIZE, "NO_NORMALIZE" }, \
@@ -91,9 +88,9 @@ TRACE_DEFINE_ENUM(ES_REFERENCED_B);
#define show_falloc_mode(mode) __print_flags(mode, "|", \
{ FALLOC_FL_KEEP_SIZE, "KEEP_SIZE"}, \
{ FALLOC_FL_PUNCH_HOLE, "PUNCH_HOLE"}, \
- { FALLOC_FL_NO_HIDE_STALE, "NO_HIDE_STALE"}, \
{ FALLOC_FL_COLLAPSE_RANGE, "COLLAPSE_RANGE"}, \
- { FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"})
+ { FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"}, \
+ { FALLOC_FL_WRITE_ZEROES, "WRITE_ZEROES"})
TRACE_DEFINE_ENUM(EXT4_FC_REASON_XATTR);
TRACE_DEFINE_ENUM(EXT4_FC_REASON_CROSS_RENAME);
@@ -483,16 +480,17 @@ TRACE_EVENT(ext4_writepages,
(unsigned long) __entry->writeback_index)
);
-TRACE_EVENT(ext4_da_write_pages,
- TP_PROTO(struct inode *inode, pgoff_t first_page,
+TRACE_EVENT(ext4_da_write_folios_start,
+ TP_PROTO(struct inode *inode, loff_t start_pos, loff_t next_pos,
struct writeback_control *wbc),
- TP_ARGS(inode, first_page, wbc),
+ TP_ARGS(inode, start_pos, next_pos, wbc),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( pgoff_t, first_page )
+ __field( loff_t, start_pos )
+ __field( loff_t, next_pos )
__field( long, nr_to_write )
__field( int, sync_mode )
),
@@ -500,18 +498,48 @@ TRACE_EVENT(ext4_da_write_pages,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->first_page = first_page;
+ __entry->start_pos = start_pos;
+ __entry->next_pos = next_pos;
__entry->nr_to_write = wbc->nr_to_write;
__entry->sync_mode = wbc->sync_mode;
),
- TP_printk("dev %d,%d ino %lu first_page %lu nr_to_write %ld "
- "sync_mode %d",
+ TP_printk("dev %d,%d ino %lu start_pos 0x%llx next_pos 0x%llx nr_to_write %ld sync_mode %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->first_page,
+ (unsigned long) __entry->ino, __entry->start_pos, __entry->next_pos,
__entry->nr_to_write, __entry->sync_mode)
);
+TRACE_EVENT(ext4_da_write_folios_end,
+ TP_PROTO(struct inode *inode, loff_t start_pos, loff_t next_pos,
+ struct writeback_control *wbc, int ret),
+
+ TP_ARGS(inode, start_pos, next_pos, wbc, ret),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( loff_t, start_pos )
+ __field( loff_t, next_pos )
+ __field( long, nr_to_write )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->start_pos = start_pos;
+ __entry->next_pos = next_pos;
+ __entry->nr_to_write = wbc->nr_to_write;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev %d,%d ino %lu start_pos 0x%llx next_pos 0x%llx nr_to_write %ld ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->start_pos, __entry->next_pos,
+ __entry->nr_to_write, __entry->ret)
+);
+
TRACE_EVENT(ext4_da_write_pages_extent,
TP_PROTO(struct inode *inode, struct ext4_map_blocks *map),
@@ -1246,14 +1274,15 @@ TRACE_EVENT(ext4_da_update_reserve_space,
);
TRACE_EVENT(ext4_da_reserve_space,
- TP_PROTO(struct inode *inode),
+ TP_PROTO(struct inode *inode, int nr_resv),
- TP_ARGS(inode),
+ TP_ARGS(inode, nr_resv),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( __u64, i_blocks )
+ __field( int, reserve_blocks )
__field( int, reserved_data_blocks )
__field( __u16, mode )
),
@@ -1262,16 +1291,17 @@ TRACE_EVENT(ext4_da_reserve_space,
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->i_blocks = inode->i_blocks;
+ __entry->reserve_blocks = nr_resv;
__entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
__entry->mode = inode->i_mode;
),
- TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu "
+ TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu reserve_blocks %d"
"reserved_data_blocks %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->mode, __entry->i_blocks,
- __entry->reserved_data_blocks)
+ __entry->reserve_blocks, __entry->reserved_data_blocks)
);
TRACE_EVENT(ext4_da_release_space,
@@ -2180,7 +2210,8 @@ DECLARE_EVENT_CLASS(ext4__es_extent,
__field( ext4_lblk_t, lblk )
__field( ext4_lblk_t, len )
__field( ext4_fsblk_t, pblk )
- __field( char, status )
+ __field( char, status )
+ __field( u64, seq )
),
TP_fast_assign(
@@ -2190,13 +2221,15 @@ DECLARE_EVENT_CLASS(ext4__es_extent,
__entry->len = es->es_len;
__entry->pblk = ext4_es_show_pblock(es);
__entry->status = ext4_es_status(es);
+ __entry->seq = EXT4_I(inode)->i_es_seq;
),
- TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
+ TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s seq %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->lblk, __entry->len,
- __entry->pblk, show_extent_status(__entry->status))
+ __entry->pblk, show_extent_status(__entry->status),
+ __entry->seq)
);
DEFINE_EVENT(ext4__es_extent, ext4_es_insert_extent,
@@ -2221,6 +2254,7 @@ TRACE_EVENT(ext4_es_remove_extent,
__field( ino_t, ino )
__field( loff_t, lblk )
__field( loff_t, len )
+ __field( u64, seq )
),
TP_fast_assign(
@@ -2228,12 +2262,13 @@ TRACE_EVENT(ext4_es_remove_extent,
__entry->ino = inode->i_ino;
__entry->lblk = lblk;
__entry->len = len;
+ __entry->seq = EXT4_I(inode)->i_es_seq;
),
- TP_printk("dev %d,%d ino %lu es [%lld/%lld)",
+ TP_printk("dev %d,%d ino %lu es [%lld/%lld) seq %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
- __entry->lblk, __entry->len)
+ __entry->lblk, __entry->len, __entry->seq)
);
TRACE_EVENT(ext4_es_find_extent_range_enter,
@@ -2478,11 +2513,11 @@ TRACE_EVENT(ext4_es_shrink,
__entry->scan_time, __entry->nr_skipped, __entry->retried)
);
-TRACE_EVENT(ext4_es_insert_delayed_block,
+TRACE_EVENT(ext4_es_insert_delayed_extent,
TP_PROTO(struct inode *inode, struct extent_status *es,
- bool allocated),
+ bool lclu_allocated, bool end_allocated),
- TP_ARGS(inode, es, allocated),
+ TP_ARGS(inode, es, lclu_allocated, end_allocated),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -2491,7 +2526,9 @@ TRACE_EVENT(ext4_es_insert_delayed_block,
__field( ext4_lblk_t, len )
__field( ext4_fsblk_t, pblk )
__field( char, status )
- __field( bool, allocated )
+ __field( bool, lclu_allocated )
+ __field( bool, end_allocated )
+ __field( u64, seq )
),
TP_fast_assign(
@@ -2501,16 +2538,18 @@ TRACE_EVENT(ext4_es_insert_delayed_block,
__entry->len = es->es_len;
__entry->pblk = ext4_es_show_pblock(es);
__entry->status = ext4_es_status(es);
- __entry->allocated = allocated;
+ __entry->lclu_allocated = lclu_allocated;
+ __entry->end_allocated = end_allocated;
+ __entry->seq = EXT4_I(inode)->i_es_seq;
),
- TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s "
- "allocated %d",
+ TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s allocated %d %d seq %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
__entry->lblk, __entry->len,
__entry->pblk, show_extent_status(__entry->status),
- __entry->allocated)
+ __entry->lclu_allocated, __entry->end_allocated,
+ __entry->seq)
);
/* fsmap traces */
@@ -2977,6 +3016,80 @@ TRACE_EVENT(ext4_update_sb,
__entry->fsblk, __entry->flags)
);
+TRACE_EVENT(ext4_move_extent_enter,
+ TP_PROTO(struct inode *orig_inode, struct ext4_map_blocks *orig_map,
+ struct inode *donor_inode, ext4_lblk_t donor_lblk),
+
+ TP_ARGS(orig_inode, orig_map, donor_inode, donor_lblk),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, orig_ino)
+ __field(ext4_lblk_t, orig_lblk)
+ __field(unsigned int, orig_flags)
+ __field(ino_t, donor_ino)
+ __field(ext4_lblk_t, donor_lblk)
+ __field(unsigned int, len)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = orig_inode->i_sb->s_dev;
+ __entry->orig_ino = orig_inode->i_ino;
+ __entry->orig_lblk = orig_map->m_lblk;
+ __entry->orig_flags = orig_map->m_flags;
+ __entry->donor_ino = donor_inode->i_ino;
+ __entry->donor_lblk = donor_lblk;
+ __entry->len = orig_map->m_len;
+ ),
+
+ TP_printk("dev %d,%d origin ino %lu lblk %u flags %s donor ino %lu lblk %u len %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->orig_ino, __entry->orig_lblk,
+ show_mflags(__entry->orig_flags),
+ (unsigned long) __entry->donor_ino, __entry->donor_lblk,
+ __entry->len)
+);
+
+TRACE_EVENT(ext4_move_extent_exit,
+ TP_PROTO(struct inode *orig_inode, ext4_lblk_t orig_lblk,
+ struct inode *donor_inode, ext4_lblk_t donor_lblk,
+ unsigned int m_len, u64 move_len, int move_type, int ret),
+
+ TP_ARGS(orig_inode, orig_lblk, donor_inode, donor_lblk, m_len,
+ move_len, move_type, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, orig_ino)
+ __field(ext4_lblk_t, orig_lblk)
+ __field(ino_t, donor_ino)
+ __field(ext4_lblk_t, donor_lblk)
+ __field(unsigned int, m_len)
+ __field(u64, move_len)
+ __field(int, move_type)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = orig_inode->i_sb->s_dev;
+ __entry->orig_ino = orig_inode->i_ino;
+ __entry->orig_lblk = orig_lblk;
+ __entry->donor_ino = donor_inode->i_ino;
+ __entry->donor_lblk = donor_lblk;
+ __entry->m_len = m_len;
+ __entry->move_len = move_len;
+ __entry->move_type = move_type;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev %d,%d origin ino %lu lblk %u donor ino %lu lblk %u m_len %u, move_len %llu type %d ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->orig_ino, __entry->orig_lblk,
+ (unsigned long) __entry->donor_ino, __entry->donor_lblk,
+ __entry->m_len, __entry->move_len, __entry->move_type,
+ __entry->ret)
+);
+
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index ed794b5fefbe..df4017dcc701 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -50,6 +50,9 @@ TRACE_DEFINE_ENUM(CP_PAUSE);
TRACE_DEFINE_ENUM(CP_RESIZE);
TRACE_DEFINE_ENUM(EX_READ);
TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
+TRACE_DEFINE_ENUM(CP_PHASE_START_BLOCK_OPS);
+TRACE_DEFINE_ENUM(CP_PHASE_FINISH_BLOCK_OPS);
+TRACE_DEFINE_ENUM(CP_PHASE_FINISH_CHECKPOINT);
#define show_block_type(type) \
__print_symbolic(type, \
@@ -139,7 +142,8 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
{ CP_NODE_NEED_CP, "node needs cp" }, \
{ CP_FASTBOOT_MODE, "fastboot mode" }, \
{ CP_SPEC_LOG_NUM, "log type is 2" }, \
- { CP_RECOVER_DIR, "dir needs recovery" })
+ { CP_RECOVER_DIR, "dir needs recovery" }, \
+ { CP_XATTR_DIR, "dir's xattr updated" })
#define show_shutdown_mode(type) \
__print_symbolic(type, \
@@ -174,6 +178,12 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
#define S_ALL_PERM (S_ISUID | S_ISGID | S_ISVTX | \
S_IRWXU | S_IRWXG | S_IRWXO)
+#define show_cp_phase(phase) \
+ __print_symbolic(phase, \
+ { CP_PHASE_START_BLOCK_OPS, "start block_ops" }, \
+ { CP_PHASE_FINISH_BLOCK_OPS, "finish block_ops" }, \
+ { CP_PHASE_FINISH_CHECKPOINT, "finish checkpoint" })
+
struct f2fs_sb_info;
struct f2fs_io_info;
struct extent_info;
@@ -203,7 +213,7 @@ DECLARE_EVENT_CLASS(f2fs__inode,
__entry->pino = F2FS_I(inode)->i_pino;
__entry->mode = inode->i_mode;
__entry->nlink = inode->i_nlink;
- __entry->size = inode->i_size;
+ __entry->size = i_size_read(inode);
__entry->blocks = inode->i_blocks;
__entry->advise = F2FS_I(inode)->i_advise;
),
@@ -352,7 +362,7 @@ TRACE_EVENT(f2fs_unlink_enter,
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->ino = dir->i_ino;
- __entry->size = dir->i_size;
+ __entry->size = i_size_read(dir);
__entry->blocks = dir->i_blocks;
__assign_str(name);
),
@@ -432,7 +442,7 @@ DECLARE_EVENT_CLASS(f2fs__truncate_op,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->size = inode->i_size;
+ __entry->size = i_size_read(inode);
__entry->blocks = inode->i_blocks;
__entry->from = from;
),
@@ -585,6 +595,38 @@ TRACE_EVENT(f2fs_file_write_iter,
__entry->ret)
);
+TRACE_EVENT(f2fs_fadvise,
+
+ TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int advice),
+
+ TP_ARGS(inode, offset, len, advice),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(loff_t, size)
+ __field(loff_t, offset)
+ __field(loff_t, len)
+ __field(int, advice)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->size = i_size_read(inode);
+ __entry->offset = offset;
+ __entry->len = len;
+ __entry->advice = advice;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, i_size = %lld offset:%llu, len:%llu, advise:%d",
+ show_dev_ino(__entry),
+ (unsigned long long)__entry->size,
+ __entry->offset,
+ __entry->len,
+ __entry->advice)
+);
+
TRACE_EVENT(f2fs_map_blocks,
TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int flag,
int ret),
@@ -1005,7 +1047,7 @@ TRACE_EVENT(f2fs_fallocate,
__entry->mode = mode;
__entry->offset = offset;
__entry->len = len;
- __entry->size = inode->i_size;
+ __entry->size = i_size_read(inode);
__entry->blocks = inode->i_blocks;
__entry->ret = ret;
),
@@ -1118,11 +1160,11 @@ TRACE_EVENT(f2fs_reserve_new_blocks,
(unsigned long long)__entry->count)
);
-DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
+DECLARE_EVENT_CLASS(f2fs__submit_folio_bio,
- TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+ TP_PROTO(struct folio *folio, struct f2fs_io_info *fio),
- TP_ARGS(page, fio),
+ TP_ARGS(folio, fio),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -1137,9 +1179,9 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
),
TP_fast_assign(
- __entry->dev = page_file_mapping(page)->host->i_sb->s_dev;
- __entry->ino = page_file_mapping(page)->host->i_ino;
- __entry->index = page->index;
+ __entry->dev = folio->mapping->host->i_sb->s_dev;
+ __entry->ino = folio->mapping->host->i_ino;
+ __entry->index = folio->index;
__entry->old_blkaddr = fio->old_blkaddr;
__entry->new_blkaddr = fio->new_blkaddr;
__entry->op = fio->op;
@@ -1148,7 +1190,7 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
__entry->type = fio->type;
),
- TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
+ TP_printk("dev = (%d,%d), ino = %lu, folio_index = 0x%lx, "
"oldaddr = 0x%llx, newaddr = 0x%llx, rw = %s(%s), type = %s_%s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
@@ -1159,22 +1201,22 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
show_block_type(__entry->type))
);
-DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_bio,
+DEFINE_EVENT_CONDITION(f2fs__submit_folio_bio, f2fs_submit_folio_bio,
- TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+ TP_PROTO(struct folio *folio, struct f2fs_io_info *fio),
- TP_ARGS(page, fio),
+ TP_ARGS(folio, fio),
- TP_CONDITION(page->mapping)
+ TP_CONDITION(folio->mapping)
);
-DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_write,
+DEFINE_EVENT_CONDITION(f2fs__submit_folio_bio, f2fs_submit_folio_write,
- TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+ TP_PROTO(struct folio *folio, struct f2fs_io_info *fio),
- TP_ARGS(page, fio),
+ TP_ARGS(folio, fio),
- TP_CONDITION(page->mapping)
+ TP_CONDITION(folio->mapping)
);
DECLARE_EVENT_CLASS(f2fs__bio,
@@ -1321,12 +1363,11 @@ DECLARE_EVENT_CLASS(f2fs__folio,
),
TP_fast_assign(
- __entry->dev = folio_file_mapping(folio)->host->i_sb->s_dev;
- __entry->ino = folio_file_mapping(folio)->host->i_ino;
+ __entry->dev = folio->mapping->host->i_sb->s_dev;
+ __entry->ino = folio->mapping->host->i_ino;
__entry->type = type;
- __entry->dir =
- S_ISDIR(folio_file_mapping(folio)->host->i_mode);
- __entry->index = folio_index(folio);
+ __entry->dir = S_ISDIR(folio->mapping->host->i_mode);
+ __entry->index = folio->index;
__entry->dirty = folio_test_dirty(folio);
__entry->uptodate = folio_test_uptodate(folio);
),
@@ -1472,7 +1513,6 @@ TRACE_EVENT(f2fs_writepages,
__field(char, for_kupdate)
__field(char, for_background)
__field(char, tagged_writepages)
- __field(char, for_reclaim)
__field(char, range_cyclic)
__field(char, for_sync)
),
@@ -1491,14 +1531,13 @@ TRACE_EVENT(f2fs_writepages,
__entry->for_kupdate = wbc->for_kupdate;
__entry->for_background = wbc->for_background;
__entry->tagged_writepages = wbc->tagged_writepages;
- __entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic;
__entry->for_sync = wbc->for_sync;
),
TP_printk("dev = (%d,%d), ino = %lu, %s, %s, nr_to_write %ld, "
"skipped %ld, start %lld, end %lld, wb_idx %lu, sync_mode %d, "
- "kupdate %u background %u tagged %u reclaim %u cyclic %u sync %u",
+ "kupdate %u background %u tagged %u cyclic %u sync %u",
show_dev_ino(__entry),
show_block_type(__entry->type),
show_file_type(__entry->dir),
@@ -1511,7 +1550,6 @@ TRACE_EVENT(f2fs_writepages,
__entry->for_kupdate,
__entry->for_background,
__entry->tagged_writepages,
- __entry->for_reclaim,
__entry->range_cyclic,
__entry->for_sync)
);
@@ -1544,26 +1582,26 @@ TRACE_EVENT(f2fs_readpages,
TRACE_EVENT(f2fs_write_checkpoint,
- TP_PROTO(struct super_block *sb, int reason, const char *msg),
+ TP_PROTO(struct super_block *sb, int reason, u16 phase),
- TP_ARGS(sb, reason, msg),
+ TP_ARGS(sb, reason, phase),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(int, reason)
- __string(dest_msg, msg)
+ __field(u16, phase)
),
TP_fast_assign(
__entry->dev = sb->s_dev;
__entry->reason = reason;
- __assign_str(dest_msg);
+ __entry->phase = phase;
),
TP_printk("dev = (%d,%d), checkpoint for %s, state = %s",
show_dev(__entry->dev),
show_cpreason(__entry->reason),
- __get_str(dest_msg))
+ show_cp_phase(__entry->phase))
);
DECLARE_EVENT_CLASS(f2fs_discard,
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
index 20b914250ce9..feb28b359eff 100644
--- a/include/trace/events/fib.h
+++ b/include/trace/events/fib.h
@@ -7,6 +7,8 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <net/flow.h>
+#include <net/inet_dscp.h>
#include <net/ip_fib.h>
#include <linux/tracepoint.h>
@@ -44,7 +46,7 @@ TRACE_EVENT(fib_table_lookup,
__entry->err = err;
__entry->oif = flp->flowi4_oif;
__entry->iif = flp->flowi4_iif;
- __entry->tos = flp->flowi4_tos;
+ __entry->tos = inet_dscp_to_dsfield(flp->flowi4_dscp);
__entry->scope = flp->flowi4_scope;
__entry->flags = flp->flowi4_flags;
diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h
index 5d7ee2610728..8d22b2e98d48 100644
--- a/include/trace/events/fib6.h
+++ b/include/trace/events/fib6.h
@@ -22,6 +22,7 @@ TRACE_EVENT(fib6_table_lookup,
__field( int, err )
__field( int, oif )
__field( int, iif )
+ __field( u32, flowlabel )
__field( __u8, tos )
__field( __u8, scope )
__field( __u8, flags )
@@ -42,6 +43,7 @@ TRACE_EVENT(fib6_table_lookup,
__entry->err = ip6_rt_type_to_error(res->fib6_type);
__entry->oif = flp->flowi6_oif;
__entry->iif = flp->flowi6_iif;
+ __entry->flowlabel = ntohl(flowi6_get_flowlabel(flp));
__entry->tos = ip6_tclass(flp->flowlabel);
__entry->scope = flp->flowi6_scope;
__entry->flags = flp->flowi6_flags;
@@ -76,11 +78,11 @@ TRACE_EVENT(fib6_table_lookup,
}
),
- TP_printk("table %3u oif %d iif %d proto %u %pI6c/%u -> %pI6c/%u tos %d scope %d flags %x ==> dev %s gw %pI6c err %d",
+ TP_printk("table %3u oif %d iif %d proto %u %pI6c/%u -> %pI6c/%u flowlabel %#x tos %d scope %d flags %x ==> dev %s gw %pI6c err %d",
__entry->tb_id, __entry->oif, __entry->iif, __entry->proto,
__entry->src, __entry->sport, __entry->dst, __entry->dport,
- __entry->tos, __entry->scope, __entry->flags,
- __entry->name, __entry->gw, __entry->err)
+ __entry->flowlabel, __entry->tos, __entry->scope,
+ __entry->flags, __entry->name, __entry->gw, __entry->err)
);
#endif /* _TRACE_FIB6_H */
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index b8d1e00a7982..370016c38a5b 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -27,7 +27,8 @@
{ FL_SLEEP, "FL_SLEEP" }, \
{ FL_DOWNGRADE_PENDING, "FL_DOWNGRADE_PENDING" }, \
{ FL_UNLOCK_PENDING, "FL_UNLOCK_PENDING" }, \
- { FL_OFDLCK, "FL_OFDLCK" })
+ { FL_OFDLCK, "FL_OFDLCK" }, \
+ { FL_RECLAIM, "FL_RECLAIM"})
#define show_fl_type(val) \
__print_symbolic(val, \
@@ -189,7 +190,7 @@ TRACE_EVENT(generic_add_lease,
__entry->i_ino = inode->i_ino;
__entry->wcount = atomic_read(&inode->i_writecount);
__entry->rcount = atomic_read(&inode->i_readcount);
- __entry->icount = atomic_read(&inode->i_count);
+ __entry->icount = icount_read(inode);
__entry->owner = fl->c.flc_owner;
__entry->flags = fl->c.flc_flags;
__entry->type = fl->c.flc_type;
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index 46c89c1e460c..f48fe637bfd2 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -56,6 +56,90 @@ DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
TP_ARGS(folio)
);
+DECLARE_EVENT_CLASS(mm_filemap_op_page_cache_range,
+
+ TP_PROTO(
+ struct address_space *mapping,
+ pgoff_t index,
+ pgoff_t last_index
+ ),
+
+ TP_ARGS(mapping, index, last_index),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(unsigned long, index)
+ __field(unsigned long, last_index)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = mapping->host->i_ino;
+ if (mapping->host->i_sb)
+ __entry->s_dev =
+ mapping->host->i_sb->s_dev;
+ else
+ __entry->s_dev = mapping->host->i_rdev;
+ __entry->index = index;
+ __entry->last_index = last_index;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx ofs=%lld-%lld",
+ MAJOR(__entry->s_dev),
+ MINOR(__entry->s_dev), __entry->i_ino,
+ ((loff_t)__entry->index) << PAGE_SHIFT,
+ ((((loff_t)__entry->last_index + 1) << PAGE_SHIFT) - 1)
+ )
+);
+
+DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_get_pages,
+ TP_PROTO(
+ struct address_space *mapping,
+ pgoff_t index,
+ pgoff_t last_index
+ ),
+ TP_ARGS(mapping, index, last_index)
+);
+
+DEFINE_EVENT(mm_filemap_op_page_cache_range, mm_filemap_map_pages,
+ TP_PROTO(
+ struct address_space *mapping,
+ pgoff_t index,
+ pgoff_t last_index
+ ),
+ TP_ARGS(mapping, index, last_index)
+);
+
+TRACE_EVENT(mm_filemap_fault,
+ TP_PROTO(struct address_space *mapping, pgoff_t index),
+
+ TP_ARGS(mapping, index),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(unsigned long, index)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = mapping->host->i_ino;
+ if (mapping->host->i_sb)
+ __entry->s_dev =
+ mapping->host->i_sb->s_dev;
+ else
+ __entry->s_dev = mapping->host->i_rdev;
+ __entry->index = index;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx ofs=%lld",
+ MAJOR(__entry->s_dev),
+ MINOR(__entry->s_dev), __entry->i_ino,
+ ((loff_t)__entry->index) << PAGE_SHIFT
+ )
+);
+
TRACE_EVENT(filemap_set_wb_err,
TP_PROTO(struct address_space *mapping, errseq_t eseq),
diff --git a/include/trace/events/firewire.h b/include/trace/events/firewire.h
index d695a560673f..ad0e0cf82b9c 100644
--- a/include/trace/events/firewire.h
+++ b/include/trace/events/firewire.h
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
// Copyright (c) 2024 Takashi Sakamoto
+#undef TRACE_SYSTEM
#define TRACE_SYSTEM firewire
#if !defined(_FIREWIRE_TRACE_EVENT_H) || defined(TRACE_HEADER_MULTI_READ)
@@ -11,7 +12,7 @@
#include <linux/firewire-constants.h>
-#include "../../../drivers/firewire/packet-header-definitions.h"
+// Some macros are defined in 'drivers/firewire/packet-header-definitions.h'.
// The content of TP_printk field is preprocessed, then put to the module binary.
#define ASYNC_HEADER_GET_DESTINATION(header) \
@@ -36,10 +37,11 @@
#define QUADLET_SIZE 4
DECLARE_EVENT_CLASS(async_outbound_initiate_template,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, header, data, data_count),
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, header, data, data_count),
TP_STRUCT__entry(
__field(u64, transaction)
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, scode)
__array(u32, header, ASYNC_HEADER_QUADLET_COUNT)
@@ -47,6 +49,7 @@ DECLARE_EVENT_CLASS(async_outbound_initiate_template,
),
TP_fast_assign(
__entry->transaction = transaction;
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->scode = scode;
memcpy(__entry->header, header, QUADLET_SIZE * ASYNC_HEADER_QUADLET_COUNT);
@@ -54,8 +57,9 @@ DECLARE_EVENT_CLASS(async_outbound_initiate_template,
),
// This format is for the request subaction.
TP_printk(
- "transaction=0x%llx generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s",
+ "transaction=0x%llx card_index=%u generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s",
__entry->transaction,
+ __entry->card_index,
__entry->generation,
__entry->scode,
ASYNC_HEADER_GET_DESTINATION(__entry->header),
@@ -71,10 +75,11 @@ DECLARE_EVENT_CLASS(async_outbound_initiate_template,
// The value of status is one of ack codes and rcodes specific to Linux FireWire subsystem.
DECLARE_EVENT_CLASS(async_outbound_complete_template,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
- TP_ARGS(transaction, generation, scode, status, timestamp),
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp),
TP_STRUCT__entry(
__field(u64, transaction)
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, scode)
__field(u8, status)
@@ -82,14 +87,16 @@ DECLARE_EVENT_CLASS(async_outbound_complete_template,
),
TP_fast_assign(
__entry->transaction = transaction;
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->scode = scode;
__entry->status = status;
__entry->timestamp = timestamp;
),
TP_printk(
- "transaction=0x%llx generation=%u scode=%u status=%u timestamp=0x%04x",
+ "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x",
__entry->transaction,
+ __entry->card_index,
__entry->generation,
__entry->scode,
__entry->status,
@@ -99,10 +106,11 @@ DECLARE_EVENT_CLASS(async_outbound_complete_template,
// The value of status is one of ack codes and rcodes specific to Linux FireWire subsystem.
DECLARE_EVENT_CLASS(async_inbound_template,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, status, timestamp, header, data, data_count),
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count),
TP_STRUCT__entry(
__field(u64, transaction)
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, scode)
__field(u8, status)
@@ -112,6 +120,7 @@ DECLARE_EVENT_CLASS(async_inbound_template,
),
TP_fast_assign(
__entry->transaction = transaction;
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->scode = scode;
__entry->status = status;
@@ -121,8 +130,9 @@ DECLARE_EVENT_CLASS(async_inbound_template,
),
// This format is for the response subaction.
TP_printk(
- "transaction=0x%llx generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s",
+ "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s",
__entry->transaction,
+ __entry->card_index,
__entry->generation,
__entry->scode,
__entry->status,
@@ -139,26 +149,27 @@ DECLARE_EVENT_CLASS(async_inbound_template,
);
DEFINE_EVENT(async_outbound_initiate_template, async_request_outbound_initiate,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, header, data, data_count)
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, header, data, data_count)
);
DEFINE_EVENT(async_outbound_complete_template, async_request_outbound_complete,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
- TP_ARGS(transaction, generation, scode, status, timestamp)
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp)
);
DEFINE_EVENT(async_inbound_template, async_response_inbound,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, status, timestamp, header, data, data_count)
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count)
);
DEFINE_EVENT_PRINT(async_inbound_template, async_request_inbound,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, status, timestamp, header, data, data_count),
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count),
TP_printk(
- "transaction=0x%llx generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s",
+ "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s",
__entry->transaction,
+ __entry->card_index,
__entry->generation,
__entry->scode,
__entry->status,
@@ -175,11 +186,12 @@ DEFINE_EVENT_PRINT(async_inbound_template, async_request_inbound,
);
DEFINE_EVENT_PRINT(async_outbound_initiate_template, async_response_outbound_initiate,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
- TP_ARGS(transaction, generation, scode, header, data, data_count),
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count),
+ TP_ARGS(transaction, card_index, generation, scode, header, data, data_count),
TP_printk(
- "transaction=0x%llx generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s",
+ "transaction=0x%llx card_index=%u generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s",
__entry->transaction,
+ __entry->card_index,
__entry->generation,
__entry->scode,
ASYNC_HEADER_GET_DESTINATION(__entry->header),
@@ -194,8 +206,8 @@ DEFINE_EVENT_PRINT(async_outbound_initiate_template, async_response_outbound_ini
);
DEFINE_EVENT(async_outbound_complete_template, async_response_outbound_complete,
- TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
- TP_ARGS(transaction, generation, scode, status, timestamp)
+ TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp),
+ TP_ARGS(transaction, card_index, generation, scode, status, timestamp)
);
#undef ASYNC_HEADER_GET_DESTINATION
@@ -206,23 +218,26 @@ DEFINE_EVENT(async_outbound_complete_template, async_response_outbound_complete,
#undef ASYNC_HEADER_GET_RCODE
TRACE_EVENT(async_phy_outbound_initiate,
- TP_PROTO(u64 packet, unsigned int generation, u32 first_quadlet, u32 second_quadlet),
- TP_ARGS(packet, generation, first_quadlet, second_quadlet),
+ TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, u32 first_quadlet, u32 second_quadlet),
+ TP_ARGS(packet, card_index, generation, first_quadlet, second_quadlet),
TP_STRUCT__entry(
__field(u64, packet)
+ __field(u8, card_index)
__field(u8, generation)
__field(u32, first_quadlet)
__field(u32, second_quadlet)
),
TP_fast_assign(
__entry->packet = packet;
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->first_quadlet = first_quadlet;
__entry->second_quadlet = second_quadlet
),
TP_printk(
- "packet=0x%llx generation=%u first_quadlet=0x%08x second_quadlet=0x%08x",
+ "packet=0x%llx card_index=%u generation=%u first_quadlet=0x%08x second_quadlet=0x%08x",
__entry->packet,
+ __entry->card_index,
__entry->generation,
__entry->first_quadlet,
__entry->second_quadlet
@@ -230,23 +245,26 @@ TRACE_EVENT(async_phy_outbound_initiate,
);
TRACE_EVENT(async_phy_outbound_complete,
- TP_PROTO(u64 packet, unsigned int generation, unsigned int status, unsigned int timestamp),
- TP_ARGS(packet, generation, status, timestamp),
+ TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, unsigned int status, unsigned int timestamp),
+ TP_ARGS(packet, card_index, generation, status, timestamp),
TP_STRUCT__entry(
__field(u64, packet)
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, status)
__field(u16, timestamp)
),
TP_fast_assign(
__entry->packet = packet;
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->status = status;
__entry->timestamp = timestamp;
),
TP_printk(
- "packet=0x%llx generation=%u status=%u timestamp=0x%04x",
+ "packet=0x%llx card_index=%u generation=%u status=%u timestamp=0x%04x",
__entry->packet,
+ __entry->card_index,
__entry->generation,
__entry->status,
__entry->timestamp
@@ -254,10 +272,11 @@ TRACE_EVENT(async_phy_outbound_complete,
);
TRACE_EVENT(async_phy_inbound,
- TP_PROTO(u64 packet, unsigned int generation, unsigned int status, unsigned int timestamp, u32 first_quadlet, u32 second_quadlet),
- TP_ARGS(packet, generation, status, timestamp, first_quadlet, second_quadlet),
+ TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, unsigned int status, unsigned int timestamp, u32 first_quadlet, u32 second_quadlet),
+ TP_ARGS(packet, card_index, generation, status, timestamp, first_quadlet, second_quadlet),
TP_STRUCT__entry(
__field(u64, packet)
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, status)
__field(u16, timestamp)
@@ -273,8 +292,9 @@ TRACE_EVENT(async_phy_inbound,
__entry->second_quadlet = second_quadlet
),
TP_printk(
- "packet=0x%llx generation=%u status=%u timestamp=0x%04x first_quadlet=0x%08x second_quadlet=0x%08x",
+ "packet=0x%llx card_index=%u generation=%u status=%u timestamp=0x%04x first_quadlet=0x%08x second_quadlet=0x%08x",
__entry->packet,
+ __entry->card_index,
__entry->generation,
__entry->status,
__entry->timestamp,
@@ -284,55 +304,61 @@ TRACE_EVENT(async_phy_inbound,
);
DECLARE_EVENT_CLASS(bus_reset_arrange_template,
- TP_PROTO(unsigned int generation, bool short_reset),
- TP_ARGS(generation, short_reset),
+ TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset),
+ TP_ARGS(card_index, generation, short_reset),
TP_STRUCT__entry(
+ __field(u8, card_index)
__field(u8, generation)
__field(bool, short_reset)
),
TP_fast_assign(
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->short_reset = short_reset;
),
TP_printk(
- "generation=%u short_reset=%s",
+ "card_index=%u generation=%u short_reset=%s",
+ __entry->card_index,
__entry->generation,
__entry->short_reset ? "true" : "false"
)
);
DEFINE_EVENT(bus_reset_arrange_template, bus_reset_initiate,
- TP_PROTO(unsigned int generation, bool short_reset),
- TP_ARGS(generation, short_reset)
+ TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset),
+ TP_ARGS(card_index, generation, short_reset)
);
DEFINE_EVENT(bus_reset_arrange_template, bus_reset_schedule,
- TP_PROTO(unsigned int generation, bool short_reset),
- TP_ARGS(generation, short_reset)
+ TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset),
+ TP_ARGS(card_index, generation, short_reset)
);
DEFINE_EVENT(bus_reset_arrange_template, bus_reset_postpone,
- TP_PROTO(unsigned int generation, bool short_reset),
- TP_ARGS(generation, short_reset)
+ TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset),
+ TP_ARGS(card_index, generation, short_reset)
);
TRACE_EVENT(bus_reset_handle,
- TP_PROTO(unsigned int generation, unsigned int node_id, bool bm_abdicate, u32 *self_ids, unsigned int self_id_count),
- TP_ARGS(generation, node_id, bm_abdicate, self_ids, self_id_count),
+ TP_PROTO(unsigned int card_index, unsigned int generation, unsigned int node_id, bool bm_abdicate, u32 *self_ids, unsigned int self_id_count),
+ TP_ARGS(card_index, generation, node_id, bm_abdicate, self_ids, self_id_count),
TP_STRUCT__entry(
+ __field(u8, card_index)
__field(u8, generation)
__field(u8, node_id)
__field(bool, bm_abdicate)
__dynamic_array(u32, self_ids, self_id_count)
),
TP_fast_assign(
+ __entry->card_index = card_index;
__entry->generation = generation;
__entry->node_id = node_id;
__entry->bm_abdicate = bm_abdicate;
memcpy(__get_dynamic_array(self_ids), self_ids, __get_dynamic_array_len(self_ids));
),
TP_printk(
- "generation=%u node_id=0x%04x bm_abdicate=%s self_ids=%s",
+ "card_index=%u generation=%u node_id=0x%04x bm_abdicate=%s self_ids=%s",
+ __entry->card_index,
__entry->generation,
__entry->node_id,
__entry->bm_abdicate ? "true" : "false",
@@ -341,6 +367,544 @@ TRACE_EVENT(bus_reset_handle,
)
);
+// Some macros are defined in 'drivers/firewire/phy-packet-definitions.h'.
+
+// The content of TP_printk field is preprocessed, then put to the module binary.
+
+#define PHY_PACKET_SELF_ID_GET_PHY_ID(quads) \
+ ((((const u32 *)quads)[0] & SELF_ID_PHY_ID_MASK) >> SELF_ID_PHY_ID_SHIFT)
+
+#define PHY_PACKET_SELF_ID_GET_LINK_ACTIVE(quads) \
+ ((((const u32 *)quads)[0] & SELF_ID_ZERO_LINK_ACTIVE_MASK) >> SELF_ID_ZERO_LINK_ACTIVE_SHIFT)
+
+#define PHY_PACKET_SELF_ID_GET_GAP_COUNT(quads) \
+ ((((const u32 *)quads)[0] & SELF_ID_ZERO_GAP_COUNT_MASK) >> SELF_ID_ZERO_GAP_COUNT_SHIFT)
+
+#define PHY_PACKET_SELF_ID_GET_SCODE(quads) \
+ ((((const u32 *)quads)[0] & SELF_ID_ZERO_SCODE_MASK) >> SELF_ID_ZERO_SCODE_SHIFT)
+
+#define PHY_PACKET_SELF_ID_GET_CONTENDER(quads) \
+ ((((const u32 *)quads)[0] & SELF_ID_ZERO_CONTENDER_MASK) >> SELF_ID_ZERO_CONTENDER_SHIFT)
+
+#define PHY_PACKET_SELF_ID_GET_POWER_CLASS(quads) \
+ ((((const u32 *)quads)[0] & SELF_ID_ZERO_POWER_CLASS_MASK) >> SELF_ID_ZERO_POWER_CLASS_SHIFT)
+
+#define PHY_PACKET_SELF_ID_GET_INITIATED_RESET(quads) \
+ ((((const u32 *)quads)[0] & SELF_ID_ZERO_INITIATED_RESET_MASK) >> SELF_ID_ZERO_INITIATED_RESET_SHIFT)
+
+TRACE_EVENT(self_id_sequence,
+ TP_PROTO(unsigned int card_index, const u32 *self_id_sequence, unsigned int quadlet_count, unsigned int generation),
+ TP_ARGS(card_index, self_id_sequence, quadlet_count, generation),
+ TP_STRUCT__entry(
+ __field(u8, card_index)
+ __field(u8, generation)
+ __dynamic_array(u8, port_status, self_id_sequence_get_port_capacity(quadlet_count))
+ __dynamic_array(u32, self_id_sequence, quadlet_count)
+ ),
+ TP_fast_assign(
+ __entry->card_index = card_index;
+ __entry->generation = generation;
+ {
+ u8 *port_status = __get_dynamic_array(port_status);
+ unsigned int port_index;
+
+ for (port_index = 0; port_index < __get_dynamic_array_len(port_status); ++port_index) {
+ port_status[port_index] =
+ self_id_sequence_get_port_status(self_id_sequence,
+ quadlet_count, port_index);
+ }
+ }
+ memcpy(__get_dynamic_array(self_id_sequence), self_id_sequence,
+ __get_dynamic_array_len(self_id_sequence));
+ ),
+ TP_printk(
+ "card_index=%u generation=%u phy_id=0x%02x link_active=%s gap_count=%u scode=%u contender=%s power_class=%u initiated_reset=%s port_status=%s self_id_sequence=%s",
+ __entry->card_index,
+ __entry->generation,
+ PHY_PACKET_SELF_ID_GET_PHY_ID(__get_dynamic_array(self_id_sequence)),
+ PHY_PACKET_SELF_ID_GET_LINK_ACTIVE(__get_dynamic_array(self_id_sequence)) ? "true" : "false",
+ PHY_PACKET_SELF_ID_GET_GAP_COUNT(__get_dynamic_array(self_id_sequence)),
+ PHY_PACKET_SELF_ID_GET_SCODE(__get_dynamic_array(self_id_sequence)),
+ PHY_PACKET_SELF_ID_GET_CONTENDER(__get_dynamic_array(self_id_sequence)) ? "true" : "false",
+ PHY_PACKET_SELF_ID_GET_POWER_CLASS(__get_dynamic_array(self_id_sequence)),
+ PHY_PACKET_SELF_ID_GET_INITIATED_RESET(__get_dynamic_array(self_id_sequence)) ? "true" : "false",
+ __print_array(__get_dynamic_array(port_status), __get_dynamic_array_len(port_status), 1),
+ __print_array(__get_dynamic_array(self_id_sequence),
+ __get_dynamic_array_len(self_id_sequence) / QUADLET_SIZE, QUADLET_SIZE)
+ )
+);
+
+#undef PHY_PACKET_SELF_ID_GET_PHY_ID
+#undef PHY_PACKET_SELF_ID_GET_LINK_ACTIVE
+#undef PHY_PACKET_SELF_ID_GET_GAP_COUNT
+#undef PHY_PACKET_SELF_ID_GET_SCODE
+#undef PHY_PACKET_SELF_ID_GET_CONTENDER
+#undef PHY_PACKET_SELF_ID_GET_POWER_CLASS
+#undef PHY_PACKET_SELF_ID_GET_INITIATED_RESET
+
+TRACE_EVENT_CONDITION(isoc_outbound_allocate,
+ TP_PROTO(const struct fw_iso_context *ctx, unsigned int channel, unsigned int scode),
+ TP_ARGS(ctx, channel, scode),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ __field(u8, channel)
+ __field(u8, scode)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ __entry->channel = channel;
+ __entry->scode = scode;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u channel=%u scode=%u",
+ __entry->context,
+ __entry->card_index,
+ __entry->channel,
+ __entry->scode
+ )
+);
+
+TRACE_EVENT_CONDITION(isoc_inbound_single_allocate,
+ TP_PROTO(const struct fw_iso_context *ctx, unsigned int channel, unsigned int header_size),
+ TP_ARGS(ctx, channel, header_size),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ __field(u8, channel)
+ __field(u8, header_size)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ __entry->channel = channel;
+ __entry->header_size = header_size;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u channel=%u header_size=%u",
+ __entry->context,
+ __entry->card_index,
+ __entry->channel,
+ __entry->header_size
+ )
+);
+
+TRACE_EVENT_CONDITION(isoc_inbound_multiple_allocate,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u",
+ __entry->context,
+ __entry->card_index
+ )
+);
+
+DECLARE_EVENT_CLASS(isoc_destroy_template,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u",
+ __entry->context,
+ __entry->card_index
+ )
+)
+
+DEFINE_EVENT_CONDITION(isoc_destroy_template, isoc_outbound_destroy,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT)
+);
+
+DEFINE_EVENT_CONDITION(isoc_destroy_template, isoc_inbound_single_destroy,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
+);
+
+DEFINE_EVENT_CONDITION(isoc_destroy_template, isoc_inbound_multiple_destroy,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
+);
+
+TRACE_EVENT(isoc_inbound_multiple_channels,
+ TP_PROTO(const struct fw_iso_context *ctx, u64 channels),
+ TP_ARGS(ctx, channels),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ __field(u64, channels)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ __entry->channels = channels;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u channels=0x%016llx",
+ __entry->context,
+ __entry->card_index,
+ __entry->channels
+ )
+);
+
+TRACE_EVENT_CONDITION(isoc_outbound_start,
+ TP_PROTO(const struct fw_iso_context *ctx, int cycle_match),
+ TP_ARGS(ctx, cycle_match),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ __field(bool, cycle_match)
+ __field(u16, cycle)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ __entry->cycle_match = cycle_match < 0 ? false : true;
+ __entry->cycle = __entry->cycle_match ? (u16)cycle_match : 0;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u cycle_match=%s cycle=0x%04x",
+ __entry->context,
+ __entry->card_index,
+ __entry->cycle_match ? "true" : "false",
+ __entry->cycle
+ )
+);
+
+DECLARE_EVENT_CLASS(isoc_inbound_start_template,
+ TP_PROTO(const struct fw_iso_context *ctx, int cycle_match, unsigned int sync, unsigned int tags),
+ TP_ARGS(ctx, cycle_match, sync, tags),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ __field(bool, cycle_match)
+ __field(u16, cycle)
+ __field(u8, sync)
+ __field(u8, tags)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ __entry->cycle_match = cycle_match < 0 ? false : true;
+ __entry->cycle = __entry->cycle_match ? (u16)cycle_match : 0;
+ __entry->sync = sync;
+ __entry->tags = tags;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u cycle_match=%s cycle=0x%04x sync=%u tags=%s",
+ __entry->context,
+ __entry->card_index,
+ __entry->cycle_match ? "true" : "false",
+ __entry->cycle,
+ __entry->sync,
+ __print_flags(__entry->tags, "|",
+ { FW_ISO_CONTEXT_MATCH_TAG0, "0" },
+ { FW_ISO_CONTEXT_MATCH_TAG1, "1" },
+ { FW_ISO_CONTEXT_MATCH_TAG2, "2" },
+ { FW_ISO_CONTEXT_MATCH_TAG3, "3" }
+ )
+ )
+);
+
+DEFINE_EVENT_CONDITION(isoc_inbound_start_template, isoc_inbound_single_start,
+ TP_PROTO(const struct fw_iso_context *ctx, int cycle_match, unsigned int sync, unsigned int tags),
+ TP_ARGS(ctx, cycle_match, sync, tags),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
+);
+
+DEFINE_EVENT_CONDITION(isoc_inbound_start_template, isoc_inbound_multiple_start,
+ TP_PROTO(const struct fw_iso_context *ctx, int cycle_match, unsigned int sync, unsigned int tags),
+ TP_ARGS(ctx, cycle_match, sync, tags),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
+);
+
+DECLARE_EVENT_CLASS(isoc_stop_template,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u",
+ __entry->context,
+ __entry->card_index
+ )
+)
+
+DEFINE_EVENT_CONDITION(isoc_stop_template, isoc_outbound_stop,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT)
+);
+
+DEFINE_EVENT_CONDITION(isoc_stop_template, isoc_inbound_single_stop,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
+);
+
+DEFINE_EVENT_CONDITION(isoc_stop_template, isoc_inbound_multiple_stop,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
+);
+
+DECLARE_EVENT_CLASS(isoc_flush_template,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u",
+ __entry->context,
+ __entry->card_index
+ )
+);
+
+DEFINE_EVENT_CONDITION(isoc_flush_template, isoc_outbound_flush,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT)
+);
+
+DEFINE_EVENT_CONDITION(isoc_flush_template, isoc_inbound_single_flush,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
+);
+
+DEFINE_EVENT_CONDITION(isoc_flush_template, isoc_inbound_multiple_flush,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
+);
+
+DECLARE_EVENT_CLASS(isoc_flush_completions_template,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u",
+ __entry->context,
+ __entry->card_index
+ )
+);
+
+DEFINE_EVENT_CONDITION(isoc_flush_completions_template, isoc_outbound_flush_completions,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT)
+);
+
+DEFINE_EVENT_CONDITION(isoc_flush_completions_template, isoc_inbound_single_flush_completions,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
+);
+
+DEFINE_EVENT_CONDITION(isoc_flush_completions_template, isoc_inbound_multiple_flush_completions,
+ TP_PROTO(const struct fw_iso_context *ctx),
+ TP_ARGS(ctx),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
+);
+
+#define TP_STRUCT__entry_iso_packet(ctx, buffer_offset, packet) \
+ TP_STRUCT__entry( \
+ __field(u64, context) \
+ __field(u8, card_index) \
+ __field(u32, buffer_offset) \
+ __field(bool, interrupt) \
+ __field(bool, skip) \
+ __field(u8, sy) \
+ __field(u8, tag) \
+ __dynamic_array(u32, header, packet->header_length / QUADLET_SIZE) \
+ )
+
+#define TP_fast_assign_iso_packet(ctx, buffer_offset, packet) \
+ TP_fast_assign( \
+ __entry->context = (uintptr_t)ctx; \
+ __entry->card_index = ctx->card->index; \
+ __entry->buffer_offset = buffer_offset; \
+ __entry->interrupt = packet->interrupt; \
+ __entry->skip = packet->skip; \
+ __entry->sy = packet->sy; \
+ __entry->tag = packet->tag; \
+ memcpy(__get_dynamic_array(header), packet->header, \
+ __get_dynamic_array_len(header)); \
+ )
+
+TRACE_EVENT_CONDITION(isoc_outbound_queue,
+ TP_PROTO(const struct fw_iso_context *ctx, unsigned long buffer_offset, const struct fw_iso_packet *packet),
+ TP_ARGS(ctx, buffer_offset, packet),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT),
+ TP_STRUCT__entry_iso_packet(ctx, buffer_offset, packet),
+ TP_fast_assign_iso_packet(ctx, buffer_offset, packet),
+ TP_printk(
+ "context=0x%llx card_index=%u buffer_offset=0x%x interrupt=%s skip=%s sy=%d tag=%u header=%s",
+ __entry->context,
+ __entry->card_index,
+ __entry->buffer_offset,
+ __entry->interrupt ? "true" : "false",
+ __entry->skip ? "true" : "false",
+ __entry->sy,
+ __entry->tag,
+ __print_array(__get_dynamic_array(header),
+ __get_dynamic_array_len(header) / QUADLET_SIZE, QUADLET_SIZE)
+ )
+);
+
+TRACE_EVENT_CONDITION(isoc_inbound_single_queue,
+ TP_PROTO(const struct fw_iso_context *ctx, unsigned long buffer_offset, const struct fw_iso_packet *packet),
+ TP_ARGS(ctx, buffer_offset, packet),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE),
+ TP_STRUCT__entry_iso_packet(ctx, buffer_offset, packet),
+ TP_fast_assign_iso_packet(ctx, buffer_offset, packet),
+ TP_printk(
+ "context=0x%llx card_index=%u buffer_offset=0x%x interrupt=%s skip=%s",
+ __entry->context,
+ __entry->card_index,
+ __entry->buffer_offset,
+ __entry->interrupt ? "true" : "false",
+ __entry->skip ? "true" : "false"
+ )
+);
+
+TRACE_EVENT_CONDITION(isoc_inbound_multiple_queue,
+ TP_PROTO(const struct fw_iso_context *ctx, unsigned long buffer_offset, const struct fw_iso_packet *packet),
+ TP_ARGS(ctx, buffer_offset, packet),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL),
+ TP_STRUCT__entry_iso_packet(ctx, buffer_offset, packet),
+ TP_fast_assign_iso_packet(ctx, buffer_offset, packet),
+ TP_printk(
+ "context=0x%llx card_index=%u buffer_offset=0x%x interrupt=%s",
+ __entry->context,
+ __entry->card_index,
+ __entry->buffer_offset,
+ __entry->interrupt ? "true" : "false"
+ )
+);
+
+#undef TP_STRUCT__entry_iso_packet
+#undef TP_fast_assign_iso_packet
+
+#ifndef show_cause
+enum fw_iso_context_completions_cause {
+ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH = 0,
+ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT,
+ FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW,
+};
+#define show_cause(cause) \
+ __print_symbolic(cause, \
+ { FW_ISO_CONTEXT_COMPLETIONS_CAUSE_FLUSH, "FLUSH" }, \
+ { FW_ISO_CONTEXT_COMPLETIONS_CAUSE_INTERRUPT, "INTERRUPT" }, \
+ { FW_ISO_CONTEXT_COMPLETIONS_CAUSE_HEADER_OVERFLOW, "HEADER_OVERFLOW" } \
+ )
+#endif
+
+DECLARE_EVENT_CLASS(isoc_single_completions_template,
+ TP_PROTO(const struct fw_iso_context *ctx, u16 timestamp, enum fw_iso_context_completions_cause cause, const u32 *header, unsigned int header_length),
+ TP_ARGS(ctx, timestamp, cause, header, header_length),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ __field(u16, timestamp)
+ __field(u8, cause)
+ __dynamic_array(u32, header, header_length / QUADLET_SIZE)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ __entry->timestamp = timestamp;
+ __entry->cause = cause;
+ memcpy(__get_dynamic_array(header), header, __get_dynamic_array_len(header));
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u timestamp=0x%04x cause=%s header=%s",
+ __entry->context,
+ __entry->card_index,
+ __entry->timestamp,
+ show_cause(__entry->cause),
+ __print_array(__get_dynamic_array(header),
+ __get_dynamic_array_len(header) / QUADLET_SIZE, QUADLET_SIZE)
+ )
+)
+
+DEFINE_EVENT_CONDITION(isoc_single_completions_template, isoc_outbound_completions,
+ TP_PROTO(const struct fw_iso_context *ctx, u16 timestamp, enum fw_iso_context_completions_cause cause, const u32 *header, unsigned int header_length),
+ TP_ARGS(ctx, timestamp, cause, header, header_length),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_TRANSMIT)
+);
+
+DEFINE_EVENT_CONDITION(isoc_single_completions_template, isoc_inbound_single_completions,
+ TP_PROTO(const struct fw_iso_context *ctx, u16 timestamp, enum fw_iso_context_completions_cause cause, const u32 *header, unsigned int header_length),
+ TP_ARGS(ctx, timestamp, cause, header, header_length),
+ TP_CONDITION(ctx->type == FW_ISO_CONTEXT_RECEIVE)
+);
+
+TRACE_EVENT(isoc_inbound_multiple_completions,
+ TP_PROTO(const struct fw_iso_context *ctx, unsigned int completed, enum fw_iso_context_completions_cause cause),
+ TP_ARGS(ctx, completed, cause),
+ TP_STRUCT__entry(
+ __field(u64, context)
+ __field(u8, card_index)
+ __field(u16, completed)
+ __field(u8, cause)
+ ),
+ TP_fast_assign(
+ __entry->context = (uintptr_t)ctx;
+ __entry->card_index = ctx->card->index;
+ __entry->completed = completed;
+ __entry->cause = cause;
+ ),
+ TP_printk(
+ "context=0x%llx card_index=%u completed=%u cause=%s",
+ __entry->context,
+ __entry->card_index,
+ __entry->completed,
+ show_cause(__entry->cause)
+ )
+);
+
#undef QUADLET_SIZE
#endif // _FIREWIRE_TRACE_EVENT_H
diff --git a/include/trace/events/firewire_ohci.h b/include/trace/events/firewire_ohci.h
new file mode 100644
index 000000000000..4f9a7f2577f3
--- /dev/null
+++ b/include/trace/events/firewire_ohci.h
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Takashi Sakamoto
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM firewire_ohci
+
+#if !defined(_FIREWIRE_OHCI_TRACE_EVENT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _FIREWIRE_OHCI_TRACE_EVENT_H
+
+#include <linux/tracepoint.h>
+
+// Some macros and helper functions are defined in 'drivers/firewire/ohci.c'.
+
+TRACE_EVENT(irqs,
+ TP_PROTO(unsigned int card_index, u32 events),
+ TP_ARGS(card_index, events),
+ TP_STRUCT__entry(
+ __field(u8, card_index)
+ __field(u32, events)
+ ),
+ TP_fast_assign(
+ __entry->card_index = card_index;
+ __entry->events = events;
+ ),
+ TP_printk(
+ "card_index=%u events=%s",
+ __entry->card_index,
+ __print_flags(__entry->events, "|",
+ { OHCI1394_selfIDComplete, "selfIDComplete" },
+ { OHCI1394_RQPkt, "RQPkt" },
+ { OHCI1394_RSPkt, "RSPkt" },
+ { OHCI1394_reqTxComplete, "reqTxComplete" },
+ { OHCI1394_respTxComplete, "respTxComplete" },
+ { OHCI1394_isochRx, "isochRx" },
+ { OHCI1394_isochTx, "isochTx" },
+ { OHCI1394_postedWriteErr, "postedWriteErr" },
+ { OHCI1394_cycleTooLong, "cycleTooLong" },
+ { OHCI1394_cycle64Seconds, "cycle64Seconds" },
+ { OHCI1394_cycleInconsistent, "cycleInconsistent" },
+ { OHCI1394_regAccessFail, "regAccessFail" },
+ { OHCI1394_unrecoverableError, "unrecoverableError" },
+ { OHCI1394_busReset, "busReset" }
+ )
+ )
+);
+
+#define QUADLET_SIZE 4
+
+#define SELF_ID_COUNT_IS_ERROR(reg) \
+ (!!(((reg) & OHCI1394_SelfIDCount_selfIDError_MASK) >> OHCI1394_SelfIDCount_selfIDError_SHIFT))
+
+#define SELF_ID_COUNT_GET_GENERATION(reg) \
+ (((reg) & OHCI1394_SelfIDCount_selfIDGeneration_MASK) >> OHCI1394_SelfIDCount_selfIDGeneration_SHIFT)
+
+#define SELF_ID_RECEIVE_Q0_GET_GENERATION(quadlet) \
+ (((quadlet) & OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_MASK) >> OHCI1394_SELF_ID_RECEIVE_Q0_GENERATION_SHIFT)
+
+#define SELF_ID_RECEIVE_Q0_GET_TIMESTAMP(quadlet) \
+ (((quadlet) & OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_MASK) >> OHCI1394_SELF_ID_RECEIVE_Q0_TIMESTAMP_SHIFT)
+
+TRACE_EVENT(self_id_complete,
+ TP_PROTO(unsigned int card_index, u32 reg, const __le32 *self_id_receive, bool has_be_header_quirk),
+ TP_ARGS(card_index, reg, self_id_receive, has_be_header_quirk),
+ TP_STRUCT__entry(
+ __field(u8, card_index)
+ __field(u32, reg)
+ __dynamic_array(u32, self_id_receive, ohci1394_self_id_count_get_size(reg))
+ ),
+ TP_fast_assign(
+ __entry->card_index = card_index;
+ __entry->reg = reg;
+ {
+ u32 *ptr = __get_dynamic_array(self_id_receive);
+ int i;
+
+ for (i = 0; i < __get_dynamic_array_len(self_id_receive) / QUADLET_SIZE; ++i)
+ ptr[i] = cond_le32_to_cpu(self_id_receive[i], has_be_header_quirk);
+ }
+ ),
+ TP_printk(
+ "card_index=%u is_error=%s generation_at_bus_reset=%u generation_at_completion=%u timestamp=0x%04x packet_data=%s",
+ __entry->card_index,
+ SELF_ID_COUNT_IS_ERROR(__entry->reg) ? "true" : "false",
+ SELF_ID_COUNT_GET_GENERATION(__entry->reg),
+ SELF_ID_RECEIVE_Q0_GET_GENERATION(((const u32 *)__get_dynamic_array(self_id_receive))[0]),
+ SELF_ID_RECEIVE_Q0_GET_TIMESTAMP(((const u32 *)__get_dynamic_array(self_id_receive))[0]),
+ __print_array(((const u32 *)__get_dynamic_array(self_id_receive)) + 1,
+ (__get_dynamic_array_len(self_id_receive) / QUADLET_SIZE) - 1, QUADLET_SIZE)
+ )
+);
+
+#undef SELF_ID_COUNT_IS_ERROR
+#undef SELF_ID_COUNT_GET_GENERATION
+#undef SELF_ID_RECEIVE_Q0_GET_GENERATION
+#undef SELF_ID_RECEIVE_Q0_GET_TIMESTAMP
+
+#undef QUADLET_SIZE
+
+#endif // _FIREWIRE_OHCI_TRACE_EVENT_H
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/fs_dax.h b/include/trace/events/fs_dax.h
index 86fe6aecff1e..50ebc1290ab0 100644
--- a/include/trace/events/fs_dax.h
+++ b/include/trace/events/fs_dax.h
@@ -15,7 +15,7 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class,
__field(unsigned long, ino)
__field(unsigned long, vm_start)
__field(unsigned long, vm_end)
- __field(unsigned long, vm_flags)
+ __field(vm_flags_t, vm_flags)
__field(unsigned long, address)
__field(pgoff_t, pgoff)
__field(pgoff_t, max_pgoff)
@@ -67,7 +67,7 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
TP_ARGS(inode, vmf, zero_folio, radix_entry),
TP_STRUCT__entry(
__field(unsigned long, ino)
- __field(unsigned long, vm_flags)
+ __field(vm_flags_t, vm_flags)
__field(unsigned long, address)
__field(struct folio *, zero_folio)
__field(void *, radix_entry)
@@ -102,60 +102,12 @@ DEFINE_EVENT(dax_pmd_load_hole_class, name, \
DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole);
DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole_fallback);
-DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class,
- TP_PROTO(struct inode *inode, struct vm_fault *vmf,
- long length, pfn_t pfn, void *radix_entry),
- TP_ARGS(inode, vmf, length, pfn, radix_entry),
- TP_STRUCT__entry(
- __field(unsigned long, ino)
- __field(unsigned long, vm_flags)
- __field(unsigned long, address)
- __field(long, length)
- __field(u64, pfn_val)
- __field(void *, radix_entry)
- __field(dev_t, dev)
- __field(int, write)
- ),
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->vm_flags = vmf->vma->vm_flags;
- __entry->address = vmf->address;
- __entry->write = vmf->flags & FAULT_FLAG_WRITE;
- __entry->length = length;
- __entry->pfn_val = pfn.val;
- __entry->radix_entry = radix_entry;
- ),
- TP_printk("dev %d:%d ino %#lx %s %s address %#lx length %#lx "
- "pfn %#llx %s radix_entry %#lx",
- MAJOR(__entry->dev),
- MINOR(__entry->dev),
- __entry->ino,
- __entry->vm_flags & VM_SHARED ? "shared" : "private",
- __entry->write ? "write" : "read",
- __entry->address,
- __entry->length,
- __entry->pfn_val & ~PFN_FLAGS_MASK,
- __print_flags_u64(__entry->pfn_val & PFN_FLAGS_MASK, "|",
- PFN_FLAGS_TRACE),
- (unsigned long)__entry->radix_entry
- )
-)
-
-#define DEFINE_PMD_INSERT_MAPPING_EVENT(name) \
-DEFINE_EVENT(dax_pmd_insert_mapping_class, name, \
- TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
- long length, pfn_t pfn, void *radix_entry), \
- TP_ARGS(inode, vmf, length, pfn, radix_entry))
-
-DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping);
-
DECLARE_EVENT_CLASS(dax_pte_fault_class,
TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result),
TP_ARGS(inode, vmf, result),
TP_STRUCT__entry(
__field(unsigned long, ino)
- __field(unsigned long, vm_flags)
+ __field(vm_flags_t, vm_flags)
__field(unsigned long, address)
__field(pgoff_t, pgoff)
__field(dev_t, dev)
@@ -194,36 +146,6 @@ DEFINE_PTE_FAULT_EVENT(dax_load_hole);
DEFINE_PTE_FAULT_EVENT(dax_insert_pfn_mkwrite_no_entry);
DEFINE_PTE_FAULT_EVENT(dax_insert_pfn_mkwrite);
-TRACE_EVENT(dax_insert_mapping,
- TP_PROTO(struct inode *inode, struct vm_fault *vmf, void *radix_entry),
- TP_ARGS(inode, vmf, radix_entry),
- TP_STRUCT__entry(
- __field(unsigned long, ino)
- __field(unsigned long, vm_flags)
- __field(unsigned long, address)
- __field(void *, radix_entry)
- __field(dev_t, dev)
- __field(int, write)
- ),
- TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
- __entry->vm_flags = vmf->vma->vm_flags;
- __entry->address = vmf->address;
- __entry->write = vmf->flags & FAULT_FLAG_WRITE;
- __entry->radix_entry = radix_entry;
- ),
- TP_printk("dev %d:%d ino %#lx %s %s address %#lx radix_entry %#lx",
- MAJOR(__entry->dev),
- MINOR(__entry->dev),
- __entry->ino,
- __entry->vm_flags & VM_SHARED ? "shared" : "private",
- __entry->write ? "write" : "read",
- __entry->address,
- (unsigned long)__entry->radix_entry
- )
-)
-
DECLARE_EVENT_CLASS(dax_writeback_range_class,
TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index),
TP_ARGS(inode, start_index, end_index),
diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h
index a6190aa1b406..f1a73aa83fbb 100644
--- a/include/trace/events/fscache.h
+++ b/include/trace/events/fscache.h
@@ -35,12 +35,14 @@ enum fscache_volume_trace {
fscache_volume_get_cookie,
fscache_volume_get_create_work,
fscache_volume_get_hash_collision,
+ fscache_volume_get_withdraw,
fscache_volume_free,
fscache_volume_new_acquire,
fscache_volume_put_cookie,
fscache_volume_put_create_work,
fscache_volume_put_hash_collision,
fscache_volume_put_relinquish,
+ fscache_volume_put_withdraw,
fscache_volume_see_create_work,
fscache_volume_see_hash_wake,
fscache_volume_wait_create_work,
@@ -120,12 +122,14 @@ enum fscache_access_trace {
EM(fscache_volume_get_cookie, "GET cook ") \
EM(fscache_volume_get_create_work, "GET creat") \
EM(fscache_volume_get_hash_collision, "GET hcoll") \
+ EM(fscache_volume_get_withdraw, "GET withd") \
EM(fscache_volume_free, "FREE ") \
EM(fscache_volume_new_acquire, "NEW acq ") \
EM(fscache_volume_put_cookie, "PUT cook ") \
EM(fscache_volume_put_create_work, "PUT creat") \
EM(fscache_volume_put_hash_collision, "PUT hcoll") \
EM(fscache_volume_put_relinquish, "PUT relnq") \
+ EM(fscache_volume_put_withdraw, "PUT withd") \
EM(fscache_volume_see_create_work, "SEE creat") \
EM(fscache_volume_see_hash_wake, "SEE hwake") \
E_(fscache_volume_wait_create_work, "WAIT crea")
diff --git a/include/trace/events/habanalabs.h b/include/trace/events/habanalabs.h
index 4a2bb2c896d1..fa0d2c6bace4 100644
--- a/include/trace/events/habanalabs.h
+++ b/include/trace/events/habanalabs.h
@@ -145,7 +145,7 @@ DECLARE_EVENT_CLASS(habanalabs_comms_template,
__entry->op_str = op_str;
),
- TP_printk("%s: cms: %s",
+ TP_printk("%s: cmd: %s",
__get_str(dname),
__entry->op_str)
);
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index b5f5369b6300..4cde53b45a85 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -10,8 +10,7 @@
#define SCAN_STATUS \
EM( SCAN_FAIL, "failed") \
EM( SCAN_SUCCEED, "succeeded") \
- EM( SCAN_PMD_NULL, "pmd_null") \
- EM( SCAN_PMD_NONE, "pmd_none") \
+ EM( SCAN_NO_PTE_TABLE, "no_pte_table") \
EM( SCAN_PMD_MAPPED, "page_pmd_mapped") \
EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \
EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \
@@ -19,7 +18,6 @@
EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \
EM( SCAN_PTE_UFFD_WP, "pte_uffd_wp") \
EM( SCAN_PTE_MAPPED_HUGEPAGE, "pte_mapped_hugepage") \
- EM( SCAN_PAGE_RO, "no_writable_page") \
EM( SCAN_LACK_REFERENCED_PAGE, "lack_referenced_page") \
EM( SCAN_PAGE_NULL, "page_null") \
EM( SCAN_SCAN_ABORT, "scan_aborted") \
@@ -55,15 +53,14 @@ SCAN_STATUS
TRACE_EVENT(mm_khugepaged_scan_pmd,
- TP_PROTO(struct mm_struct *mm, struct page *page, bool writable,
+ TP_PROTO(struct mm_struct *mm, struct folio *folio,
int referenced, int none_or_zero, int status, int unmapped),
- TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped),
+ TP_ARGS(mm, folio, referenced, none_or_zero, status, unmapped),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
__field(unsigned long, pfn)
- __field(bool, writable)
__field(int, referenced)
__field(int, none_or_zero)
__field(int, status)
@@ -72,18 +69,16 @@ TRACE_EVENT(mm_khugepaged_scan_pmd,
TP_fast_assign(
__entry->mm = mm;
- __entry->pfn = page ? page_to_pfn(page) : -1;
- __entry->writable = writable;
+ __entry->pfn = folio ? folio_pfn(folio) : -1;
__entry->referenced = referenced;
__entry->none_or_zero = none_or_zero;
__entry->status = status;
__entry->unmapped = unmapped;
),
- TP_printk("mm=%p, scan_pfn=0x%lx, writable=%d, referenced=%d, none_or_zero=%d, status=%s, unmapped=%d",
+ TP_printk("mm=%p, scan_pfn=0x%lx, referenced=%d, none_or_zero=%d, status=%s, unmapped=%d",
__entry->mm,
__entry->pfn,
- __entry->writable,
__entry->referenced,
__entry->none_or_zero,
__print_symbolic(__entry->status, SCAN_STATUS),
@@ -116,32 +111,29 @@ TRACE_EVENT(mm_collapse_huge_page,
TRACE_EVENT(mm_collapse_huge_page_isolate,
- TP_PROTO(struct page *page, int none_or_zero,
- int referenced, bool writable, int status),
+ TP_PROTO(struct folio *folio, int none_or_zero,
+ int referenced, int status),
- TP_ARGS(page, none_or_zero, referenced, writable, status),
+ TP_ARGS(folio, none_or_zero, referenced, status),
TP_STRUCT__entry(
__field(unsigned long, pfn)
__field(int, none_or_zero)
__field(int, referenced)
- __field(bool, writable)
__field(int, status)
),
TP_fast_assign(
- __entry->pfn = page ? page_to_pfn(page) : -1;
+ __entry->pfn = folio ? folio_pfn(folio) : -1;
__entry->none_or_zero = none_or_zero;
__entry->referenced = referenced;
- __entry->writable = writable;
__entry->status = status;
),
- TP_printk("scan_pfn=0x%lx, none_or_zero=%d, referenced=%d, writable=%d, status=%s",
+ TP_printk("scan_pfn=0x%lx, none_or_zero=%d, referenced=%d, status=%s",
__entry->pfn,
__entry->none_or_zero,
__entry->referenced,
- __entry->writable,
__print_symbolic(__entry->status, SCAN_STATUS))
);
@@ -208,7 +200,7 @@ TRACE_EVENT(mm_khugepaged_scan_file,
TRACE_EVENT(mm_khugepaged_collapse_file,
TP_PROTO(struct mm_struct *mm, struct folio *new_folio, pgoff_t index,
- bool is_shmem, unsigned long addr, struct file *file,
+ unsigned long addr, bool is_shmem, struct file *file,
int nr, int result),
TP_ARGS(mm, new_folio, index, addr, is_shmem, file, nr, result),
TP_STRUCT__entry(
@@ -233,7 +225,7 @@ TRACE_EVENT(mm_khugepaged_collapse_file,
__entry->result = result;
),
- TP_printk("mm=%p, hpage_pfn=0x%lx, index=%ld, addr=%ld, is_shmem=%d, filename=%s, nr=%d, result=%s",
+ TP_printk("mm=%p, hpage_pfn=0x%lx, index=%ld, addr=%lx, is_shmem=%d, filename=%s, nr=%d, result=%s",
__entry->mm,
__entry->hpfn,
__entry->index,
diff --git a/include/trace/events/hugetlbfs.h b/include/trace/events/hugetlbfs.h
new file mode 100644
index 000000000000..59605dfaeeb4
--- /dev/null
+++ b/include/trace/events/hugetlbfs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hugetlbfs
+
+#if !defined(_TRACE_HUGETLBFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HUGETLBFS_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(hugetlbfs_alloc_inode,
+
+ TP_PROTO(struct inode *inode, struct inode *dir, int mode),
+
+ TP_ARGS(inode, dir, mode),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(ino_t, dir)
+ __field(__u16, mode)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->dir = dir ? dir->i_ino : 0;
+ __entry->mode = mode;
+ ),
+
+ TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino,
+ (unsigned long) __entry->dir, __entry->mode)
+);
+
+DECLARE_EVENT_CLASS(hugetlbfs__inode,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(__u16, mode)
+ __field(loff_t, size)
+ __field(unsigned int, nlink)
+ __field(unsigned int, seals)
+ __field(blkcnt_t, blocks)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
+ __entry->size = inode->i_size;
+ __entry->nlink = inode->i_nlink;
+ __entry->seals = HUGETLBFS_I(inode)->seals;
+ __entry->blocks = inode->i_blocks;
+ ),
+
+ TP_printk("dev %d,%d ino %lu mode 0%o size %lld nlink %u seals %u blocks %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino,
+ __entry->mode, __entry->size, __entry->nlink, __entry->seals,
+ (unsigned long long)__entry->blocks)
+);
+
+DEFINE_EVENT(hugetlbfs__inode, hugetlbfs_evict_inode,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+DEFINE_EVENT(hugetlbfs__inode, hugetlbfs_free_inode,
+
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode)
+);
+
+TRACE_EVENT(hugetlbfs_setattr,
+
+ TP_PROTO(struct inode *inode, struct dentry *dentry,
+ struct iattr *attr),
+
+ TP_ARGS(inode, dentry, attr),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned int, d_len)
+ __string(d_name, dentry->d_name.name)
+ __field(unsigned int, ia_valid)
+ __field(unsigned int, ia_mode)
+ __field(loff_t, old_size)
+ __field(loff_t, ia_size)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->d_len = dentry->d_name.len;
+ __assign_str(d_name);
+ __entry->ia_valid = attr->ia_valid;
+ __entry->ia_mode = attr->ia_mode;
+ __entry->old_size = inode->i_size;
+ __entry->ia_size = attr->ia_size;
+ ),
+
+ TP_printk("dev %d,%d ino %lu name %.*s valid %#x mode 0%o old_size %lld size %lld",
+ MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long)__entry->ino,
+ __entry->d_len, __get_str(d_name), __entry->ia_valid, __entry->ia_mode,
+ __entry->old_size, __entry->ia_size)
+);
+
+TRACE_EVENT(hugetlbfs_fallocate,
+
+ TP_PROTO(struct inode *inode, int mode,
+ loff_t offset, loff_t len, int ret),
+
+ TP_ARGS(inode, mode, offset, len, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(int, mode)
+ __field(loff_t, offset)
+ __field(loff_t, len)
+ __field(loff_t, size)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->mode = mode;
+ __entry->offset = offset;
+ __entry->len = len;
+ __entry->size = inode->i_size;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev %d,%d ino %lu mode 0%o offset %lld len %lld size %lld ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long)__entry->ino, __entry->mode,
+ (unsigned long long)__entry->offset,
+ (unsigned long long)__entry->len,
+ (unsigned long long)__entry->size,
+ __entry->ret)
+);
+
+#endif /* _TRACE_HUGETLBFS_H */
+
+ /* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/hwmon.h b/include/trace/events/hwmon.h
index d1ff560cd9b5..3865098f21f1 100644
--- a/include/trace/events/hwmon.h
+++ b/include/trace/events/hwmon.h
@@ -9,14 +9,14 @@
DECLARE_EVENT_CLASS(hwmon_attr_class,
- TP_PROTO(int index, const char *attr_name, long val),
+ TP_PROTO(int index, const char *attr_name, long long val),
TP_ARGS(index, attr_name, val),
TP_STRUCT__entry(
__field(int, index)
__string(attr_name, attr_name)
- __field(long, val)
+ __field(long long, val)
),
TP_fast_assign(
@@ -25,20 +25,20 @@ DECLARE_EVENT_CLASS(hwmon_attr_class,
__entry->val = val;
),
- TP_printk("index=%d, attr_name=%s, val=%ld",
+ TP_printk("index=%d, attr_name=%s, val=%lld",
__entry->index, __get_str(attr_name), __entry->val)
);
DEFINE_EVENT(hwmon_attr_class, hwmon_attr_show,
- TP_PROTO(int index, const char *attr_name, long val),
+ TP_PROTO(int index, const char *attr_name, long long val),
TP_ARGS(index, attr_name, val)
);
DEFINE_EVENT(hwmon_attr_class, hwmon_attr_store,
- TP_PROTO(int index, const char *attr_name, long val),
+ TP_PROTO(int index, const char *attr_name, long long val),
TP_ARGS(index, attr_name, val)
);
diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h
index 0d88ebf2c980..70323acde1de 100644
--- a/include/trace/events/intel_ifs.h
+++ b/include/trace/events/intel_ifs.h
@@ -35,6 +35,33 @@ TRACE_EVENT(ifs_status,
__entry->status)
);
+TRACE_EVENT(ifs_sbaf,
+
+ TP_PROTO(int batch, union ifs_sbaf activate, union ifs_sbaf_status status),
+
+ TP_ARGS(batch, activate, status),
+
+ TP_STRUCT__entry(
+ __field( u64, status )
+ __field( int, batch )
+ __field( u16, bundle )
+ __field( u16, pgm )
+ ),
+
+ TP_fast_assign(
+ __entry->status = status.data;
+ __entry->batch = batch;
+ __entry->bundle = activate.bundle_idx;
+ __entry->pgm = activate.pgm_idx;
+ ),
+
+ TP_printk("batch: 0x%.2x, bundle_idx: 0x%.4x, pgm_idx: 0x%.4x, status: 0x%.16llx",
+ __entry->batch,
+ __entry->bundle,
+ __entry->pgm,
+ __entry->status)
+);
+
#endif /* _TRACE_IFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index 412c9c210a32..34b31a855ea4 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -133,15 +133,15 @@ TRACE_EVENT(io_uring_file_get,
* io_uring_queue_async_work - called before submitting a new async work
*
* @req: pointer to a submitted request
- * @rw: type of workqueue, hashed or normal
+ * @hashed: whether async work is hashed
*
* Allows to trace asynchronous work submission.
*/
TRACE_EVENT(io_uring_queue_async_work,
- TP_PROTO(struct io_kiocb *req, int rw),
+ TP_PROTO(struct io_kiocb *req, bool hashed),
- TP_ARGS(req, rw),
+ TP_ARGS(req, hashed),
TP_STRUCT__entry (
__field( void *, ctx )
@@ -150,7 +150,7 @@ TRACE_EVENT(io_uring_queue_async_work,
__field( u8, opcode )
__field( unsigned long long, flags )
__field( struct io_wq_work *, work )
- __field( int, rw )
+ __field( bool, hashed )
__string( op_str, io_uring_get_opcode(req->opcode) )
),
@@ -162,7 +162,7 @@ TRACE_EVENT(io_uring_queue_async_work,
__entry->flags = (__force unsigned long long) req->flags;
__entry->opcode = req->opcode;
__entry->work = &req->work;
- __entry->rw = rw;
+ __entry->hashed = hashed;
__assign_str(op_str);
),
@@ -170,7 +170,7 @@ TRACE_EVENT(io_uring_queue_async_work,
TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%llx, %s queue, work %p",
__entry->ctx, __entry->req, __entry->user_data,
__get_str(op_str), __entry->flags,
- __entry->rw ? "hashed" : "normal", __entry->work)
+ __entry->hashed ? "hashed" : "normal", __entry->work)
);
/**
@@ -315,20 +315,14 @@ TRACE_EVENT(io_uring_fail_link,
* io_uring_complete - called when completing an SQE
*
* @ctx: pointer to a ring context structure
- * @req: pointer to a submitted request
- * @user_data: user data associated with the request
- * @res: result of the request
- * @cflags: completion flags
- * @extra1: extra 64-bit data for CQE32
- * @extra2: extra 64-bit data for CQE32
- *
+ * @req: (optional) pointer to a submitted request
+ * @cqe: pointer to the filled in CQE being posted
*/
TRACE_EVENT(io_uring_complete,
- TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags,
- u64 extra1, u64 extra2),
+TP_PROTO(struct io_ring_ctx *ctx, void *req, struct io_uring_cqe *cqe),
- TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2),
+ TP_ARGS(ctx, req, cqe),
TP_STRUCT__entry (
__field( void *, ctx )
@@ -343,11 +337,11 @@ TRACE_EVENT(io_uring_complete,
TP_fast_assign(
__entry->ctx = ctx;
__entry->req = req;
- __entry->user_data = user_data;
- __entry->res = res;
- __entry->cflags = cflags;
- __entry->extra1 = extra1;
- __entry->extra2 = extra2;
+ __entry->user_data = cqe->user_data;
+ __entry->res = cqe->res;
+ __entry->cflags = cqe->flags;
+ __entry->extra1 = ctx->flags & IORING_SETUP_CQE32 || cqe->flags & IORING_CQE_F_32 ? cqe->big_cqe[0] : 0;
+ __entry->extra2 = ctx->flags & IORING_SETUP_CQE32 || cqe->flags & IORING_CQE_F_32 ? cqe->big_cqe[1] : 0;
),
TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x "
@@ -651,7 +645,7 @@ TRACE_EVENT(io_uring_short_write,
/*
* io_uring_local_work_run - ran ring local task work
*
- * @tctx: pointer to a io_uring_ctx
+ * @ctx: pointer to an io_ring_ctx
* @count: how many functions it ran
* @loops: how many loops it ran
*
diff --git a/include/trace/events/ipi.h b/include/trace/events/ipi.h
index 3de9bfc982ce..9912f0ded81d 100644
--- a/include/trace/events/ipi.h
+++ b/include/trace/events/ipi.h
@@ -7,34 +7,6 @@
#include <linux/tracepoint.h>
-/**
- * ipi_raise - called when a smp cross call is made
- *
- * @mask: mask of recipient CPUs for the IPI
- * @reason: string identifying the IPI purpose
- *
- * It is necessary for @reason to be a static string declared with
- * __tracepoint_string.
- */
-TRACE_EVENT(ipi_raise,
-
- TP_PROTO(const struct cpumask *mask, const char *reason),
-
- TP_ARGS(mask, reason),
-
- TP_STRUCT__entry(
- __bitmask(target_cpus, nr_cpumask_bits)
- __field(const char *, reason)
- ),
-
- TP_fast_assign(
- __assign_bitmask(target_cpus, cpumask_bits(mask), nr_cpumask_bits);
- __entry->reason = reason;
- ),
-
- TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason)
-);
-
TRACE_EVENT(ipi_send_cpu,
TP_PROTO(const unsigned int cpu, unsigned long callsite, void *callback),
@@ -79,6 +51,35 @@ TRACE_EVENT(ipi_send_cpumask,
__get_cpumask(cpumask), __entry->callsite, __entry->callback)
);
+#ifdef CONFIG_HAVE_EXTRA_IPI_TRACEPOINTS
+/**
+ * ipi_raise - called when a smp cross call is made
+ *
+ * @mask: mask of recipient CPUs for the IPI
+ * @reason: string identifying the IPI purpose
+ *
+ * It is necessary for @reason to be a static string declared with
+ * __tracepoint_string.
+ */
+TRACE_EVENT(ipi_raise,
+
+ TP_PROTO(const struct cpumask *mask, const char *reason),
+
+ TP_ARGS(mask, reason),
+
+ TP_STRUCT__entry(
+ __bitmask(target_cpus, nr_cpumask_bits)
+ __field(const char *, reason)
+ ),
+
+ TP_fast_assign(
+ __assign_bitmask(target_cpus, cpumask_bits(mask), nr_cpumask_bits);
+ __entry->reason = reason;
+ ),
+
+ TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason)
+);
+
DECLARE_EVENT_CLASS(ipi_handler,
TP_PROTO(const char *reason),
@@ -127,6 +128,7 @@ DEFINE_EVENT(ipi_handler, ipi_exit,
TP_ARGS(reason)
);
+#endif /* CONFIG_HAVE_EXTRA_IPI_TRACEPOINTS */
#endif /* _TRACE_IPI_H */
diff --git a/include/trace/events/irq_matrix.h b/include/trace/events/irq_matrix.h
index 267d4cbbf360..93244078b4e6 100644
--- a/include/trace/events/irq_matrix.h
+++ b/include/trace/events/irq_matrix.h
@@ -138,14 +138,6 @@ DEFINE_EVENT(irq_matrix_global_update, irq_matrix_assign_system,
TP_ARGS(bit, matrix)
);
-DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc_reserved,
-
- TP_PROTO(int bit, unsigned int cpu,
- struct irq_matrix *matrix, struct cpumap *cmap),
-
- TP_ARGS(bit, cpu, matrix, cmap)
-);
-
DEFINE_EVENT(irq_matrix_cpu, irq_matrix_reserve_managed,
TP_PROTO(int bit, unsigned int cpu,
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 8a829e0f6e55..7f93e754da5c 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -22,6 +22,7 @@ TRACE_EVENT(kmem_cache_alloc,
TP_STRUCT__entry(
__field( unsigned long, call_site )
__field( const void *, ptr )
+ __string( name, s->name )
__field( size_t, bytes_req )
__field( size_t, bytes_alloc )
__field( unsigned long, gfp_flags )
@@ -32,18 +33,20 @@ TRACE_EVENT(kmem_cache_alloc,
TP_fast_assign(
__entry->call_site = call_site;
__entry->ptr = ptr;
+ __assign_str(name);
__entry->bytes_req = s->object_size;
__entry->bytes_alloc = s->size;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->node = node;
- __entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
+ __entry->accounted = IS_ENABLED(CONFIG_MEMCG) ?
((gfp_flags & __GFP_ACCOUNT) ||
(s->flags & SLAB_ACCOUNT)) : false;
),
- TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
+ TP_printk("call_site=%pS ptr=%p name=%s bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
(void *)__entry->call_site,
__entry->ptr,
+ __get_str(name),
__entry->bytes_req,
__entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags),
@@ -87,7 +90,7 @@ TRACE_EVENT(kmalloc,
__entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags),
__entry->node,
- (IS_ENABLED(CONFIG_MEMCG_KMEM) &&
+ (IS_ENABLED(CONFIG_MEMCG) &&
(__entry->gfp_flags & (__force unsigned long)__GFP_ACCOUNT)) ? "true" : "false")
);
@@ -304,44 +307,84 @@ TRACE_EVENT(mm_page_alloc_extfrag,
__entry->change_ownership)
);
-TRACE_EVENT(mm_alloc_contig_migrate_range_info,
+TRACE_EVENT(mm_setup_per_zone_wmarks,
+
+ TP_PROTO(struct zone *zone),
+
+ TP_ARGS(zone),
+
+ TP_STRUCT__entry(
+ __field(int, node_id)
+ __string(name, zone->name)
+ __field(unsigned long, watermark_min)
+ __field(unsigned long, watermark_low)
+ __field(unsigned long, watermark_high)
+ __field(unsigned long, watermark_promo)
+ ),
+
+ TP_fast_assign(
+ __entry->node_id = zone->zone_pgdat->node_id;
+ __assign_str(name);
+ __entry->watermark_min = zone->_watermark[WMARK_MIN];
+ __entry->watermark_low = zone->_watermark[WMARK_LOW];
+ __entry->watermark_high = zone->_watermark[WMARK_HIGH];
+ __entry->watermark_promo = zone->_watermark[WMARK_PROMO];
+ ),
+
+ TP_printk("node_id=%d zone name=%s watermark min=%lu low=%lu high=%lu promo=%lu",
+ __entry->node_id,
+ __get_str(name),
+ __entry->watermark_min,
+ __entry->watermark_low,
+ __entry->watermark_high,
+ __entry->watermark_promo)
+);
+
+TRACE_EVENT(mm_setup_per_zone_lowmem_reserve,
+
+ TP_PROTO(struct zone *zone, struct zone *upper_zone, long lowmem_reserve),
+
+ TP_ARGS(zone, upper_zone, lowmem_reserve),
+
+ TP_STRUCT__entry(
+ __field(int, node_id)
+ __string(name, zone->name)
+ __string(upper_name, upper_zone->name)
+ __field(long, lowmem_reserve)
+ ),
+
+ TP_fast_assign(
+ __entry->node_id = zone->zone_pgdat->node_id;
+ __assign_str(name);
+ __assign_str(upper_name);
+ __entry->lowmem_reserve = lowmem_reserve;
+ ),
+
+ TP_printk("node_id=%d zone name=%s upper_zone name=%s lowmem_reserve_pages=%ld",
+ __entry->node_id,
+ __get_str(name),
+ __get_str(upper_name),
+ __entry->lowmem_reserve)
+);
+
+TRACE_EVENT(mm_calculate_totalreserve_pages,
- TP_PROTO(unsigned long start,
- unsigned long end,
- unsigned long nr_migrated,
- unsigned long nr_reclaimed,
- unsigned long nr_mapped,
- int migratetype),
+ TP_PROTO(unsigned long totalreserve_pages),
- TP_ARGS(start, end, nr_migrated, nr_reclaimed, nr_mapped, migratetype),
+ TP_ARGS(totalreserve_pages),
TP_STRUCT__entry(
- __field(unsigned long, start)
- __field(unsigned long, end)
- __field(unsigned long, nr_migrated)
- __field(unsigned long, nr_reclaimed)
- __field(unsigned long, nr_mapped)
- __field(int, migratetype)
+ __field(unsigned long, totalreserve_pages)
),
TP_fast_assign(
- __entry->start = start;
- __entry->end = end;
- __entry->nr_migrated = nr_migrated;
- __entry->nr_reclaimed = nr_reclaimed;
- __entry->nr_mapped = nr_mapped;
- __entry->migratetype = migratetype;
+ __entry->totalreserve_pages = totalreserve_pages;
),
- TP_printk("start=0x%lx end=0x%lx migratetype=%d nr_migrated=%lu nr_reclaimed=%lu nr_mapped=%lu",
- __entry->start,
- __entry->end,
- __entry->migratetype,
- __entry->nr_migrated,
- __entry->nr_reclaimed,
- __entry->nr_mapped)
+ TP_printk("totalreserve_pages=%lu", __entry->totalreserve_pages)
);
+
/*
* Required for uniquely and securely identifying mm in rss_stat tracepoint.
*/
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 74e40d5d4af4..b282e3a86769 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -82,95 +82,15 @@ TRACE_EVENT(kvm_set_irq,
TP_printk("gsi %u level %d source %d",
__entry->gsi, __entry->level, __entry->irq_source_id)
);
-#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
-
-#if defined(__KVM_HAVE_IOAPIC)
-#define kvm_deliver_mode \
- {0x0, "Fixed"}, \
- {0x1, "LowPrio"}, \
- {0x2, "SMI"}, \
- {0x3, "Res3"}, \
- {0x4, "NMI"}, \
- {0x5, "INIT"}, \
- {0x6, "SIPI"}, \
- {0x7, "ExtINT"}
-
-TRACE_EVENT(kvm_ioapic_set_irq,
- TP_PROTO(__u64 e, int pin, bool coalesced),
- TP_ARGS(e, pin, coalesced),
-
- TP_STRUCT__entry(
- __field( __u64, e )
- __field( int, pin )
- __field( bool, coalesced )
- ),
-
- TP_fast_assign(
- __entry->e = e;
- __entry->pin = pin;
- __entry->coalesced = coalesced;
- ),
-
- TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
- __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
- __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
- (__entry->e & (1<<11)) ? "logical" : "physical",
- (__entry->e & (1<<15)) ? "level" : "edge",
- (__entry->e & (1<<16)) ? "|masked" : "",
- __entry->coalesced ? " (coalesced)" : "")
-);
-TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
- TP_PROTO(__u64 e),
- TP_ARGS(e),
-
- TP_STRUCT__entry(
- __field( __u64, e )
- ),
-
- TP_fast_assign(
- __entry->e = e;
- ),
-
- TP_printk("dst %x vec %u (%s|%s|%s%s)",
- (u8)(__entry->e >> 56), (u8)__entry->e,
- __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
- (__entry->e & (1<<11)) ? "logical" : "physical",
- (__entry->e & (1<<15)) ? "level" : "edge",
- (__entry->e & (1<<16)) ? "|masked" : "")
-);
-
-TRACE_EVENT(kvm_msi_set_irq,
- TP_PROTO(__u64 address, __u64 data),
- TP_ARGS(address, data),
-
- TP_STRUCT__entry(
- __field( __u64, address )
- __field( __u64, data )
- ),
-
- TP_fast_assign(
- __entry->address = address;
- __entry->data = data;
- ),
-
- TP_printk("dst %llx vec %u (%s|%s|%s%s)",
- (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
- (u8)__entry->data,
- __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
- (__entry->address & (1<<2)) ? "logical" : "physical",
- (__entry->data & (1<<15)) ? "level" : "edge",
- (__entry->address & (1<<3)) ? "|rh" : "")
-);
+#ifdef CONFIG_KVM_IOAPIC
#define kvm_irqchips \
{KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
{KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
{KVM_IRQCHIP_IOAPIC, "IOAPIC"}
-#endif /* defined(__KVM_HAVE_IOAPIC) */
-
-#if defined(CONFIG_HAVE_KVM_IRQCHIP)
+#endif /* CONFIG_KVM_IOAPIC */
#ifdef kvm_irqchips
#define kvm_ack_irq_string "irqchip %s pin %u"
@@ -438,6 +358,33 @@ TRACE_EVENT(kvm_dirty_ring_exit,
TP_printk("vcpu %d", __entry->vcpu_id)
);
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+/*
+ * @start: Starting address of guest memory range
+ * @end: End address of guest memory range
+ * @attr: The value of the attribute being set.
+ */
+TRACE_EVENT(kvm_vm_set_mem_attributes,
+ TP_PROTO(gfn_t start, gfn_t end, unsigned long attr),
+ TP_ARGS(start, end, attr),
+
+ TP_STRUCT__entry(
+ __field(gfn_t, start)
+ __field(gfn_t, end)
+ __field(unsigned long, attr)
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ __entry->attr = attr;
+ ),
+
+ TP_printk("%#016llx -- %#016llx [0x%lx]",
+ __entry->start, __entry->end, __entry->attr)
+);
+#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
+
TRACE_EVENT(kvm_unmap_hva_range,
TP_PROTO(unsigned long start, unsigned long end),
TP_ARGS(start, end),
diff --git a/include/trace/events/mce.h b/include/trace/events/mce.h
index f0f7b3cb2041..c1c50df9ecfd 100644
--- a/include/trace/events/mce.h
+++ b/include/trace/events/mce.h
@@ -19,9 +19,9 @@
TRACE_EVENT(mce_record,
- TP_PROTO(struct mce *m),
+ TP_PROTO(struct mce_hw_err *err),
- TP_ARGS(m),
+ TP_ARGS(err),
TP_STRUCT__entry(
__field( u64, mcgcap )
@@ -43,31 +43,33 @@ TRACE_EVENT(mce_record,
__field( u8, bank )
__field( u8, cpuvendor )
__field( u32, microcode )
+ __dynamic_array(u8, v_data, sizeof(err->vendor))
),
TP_fast_assign(
- __entry->mcgcap = m->mcgcap;
- __entry->mcgstatus = m->mcgstatus;
- __entry->status = m->status;
- __entry->addr = m->addr;
- __entry->misc = m->misc;
- __entry->synd = m->synd;
- __entry->ipid = m->ipid;
- __entry->ip = m->ip;
- __entry->tsc = m->tsc;
- __entry->ppin = m->ppin;
- __entry->walltime = m->time;
- __entry->cpu = m->extcpu;
- __entry->cpuid = m->cpuid;
- __entry->apicid = m->apicid;
- __entry->socketid = m->socketid;
- __entry->cs = m->cs;
- __entry->bank = m->bank;
- __entry->cpuvendor = m->cpuvendor;
- __entry->microcode = m->microcode;
+ __entry->mcgcap = err->m.mcgcap;
+ __entry->mcgstatus = err->m.mcgstatus;
+ __entry->status = err->m.status;
+ __entry->addr = err->m.addr;
+ __entry->misc = err->m.misc;
+ __entry->synd = err->m.synd;
+ __entry->ipid = err->m.ipid;
+ __entry->ip = err->m.ip;
+ __entry->tsc = err->m.tsc;
+ __entry->ppin = err->m.ppin;
+ __entry->walltime = err->m.time;
+ __entry->cpu = err->m.extcpu;
+ __entry->cpuid = err->m.cpuid;
+ __entry->apicid = err->m.apicid;
+ __entry->socketid = err->m.socketid;
+ __entry->cs = err->m.cs;
+ __entry->bank = err->m.bank;
+ __entry->cpuvendor = err->m.cpuvendor;
+ __entry->microcode = err->m.microcode;
+ memcpy(__get_dynamic_array(v_data), &err->vendor, sizeof(err->vendor));
),
- TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, IPID: %016Lx, ADDR: %016Lx, MISC: %016Lx, SYND: %016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PPIN: %llx, vendor: %u, CPUID: %x, time: %llu, socket: %u, APIC: %x, microcode: %x",
+ TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016llx, IPID: %016llx, ADDR: %016llx, MISC: %016llx, SYND: %016llx, RIP: %02x:<%016llx>, TSC: %llx, PPIN: %llx, vendor: %u, CPUID: %x, time: %llu, socket: %u, APIC: %x, microcode: %x, vendor data: %s",
__entry->cpu,
__entry->mcgcap, __entry->mcgstatus,
__entry->bank, __entry->status,
@@ -83,7 +85,8 @@ TRACE_EVENT(mce_record,
__entry->walltime,
__entry->socketid,
__entry->apicid,
- __entry->microcode)
+ __entry->microcode,
+ __print_dynamic_array(v_data, sizeof(u8)))
);
#endif /* _TRACE_MCE_H */
diff --git a/include/trace/events/memcg.h b/include/trace/events/memcg.h
new file mode 100644
index 000000000000..dfe2f51019b4
--- /dev/null
+++ b/include/trace/events/memcg.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM memcg
+
+#if !defined(_TRACE_MEMCG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MEMCG_H
+
+#include <linux/memcontrol.h>
+#include <linux/tracepoint.h>
+
+
+DECLARE_EVENT_CLASS(memcg_rstat_stats,
+
+ TP_PROTO(struct mem_cgroup *memcg, int item, int val),
+
+ TP_ARGS(memcg, item, val),
+
+ TP_STRUCT__entry(
+ __field(u64, id)
+ __field(int, item)
+ __field(int, val)
+ ),
+
+ TP_fast_assign(
+ __entry->id = cgroup_id(memcg->css.cgroup);
+ __entry->item = item;
+ __entry->val = val;
+ ),
+
+ TP_printk("memcg_id=%llu item=%d val=%d",
+ __entry->id, __entry->item, __entry->val)
+);
+
+DEFINE_EVENT(memcg_rstat_stats, mod_memcg_state,
+
+ TP_PROTO(struct mem_cgroup *memcg, int item, int val),
+
+ TP_ARGS(memcg, item, val)
+);
+
+DEFINE_EVENT(memcg_rstat_stats, mod_memcg_lruvec_state,
+
+ TP_PROTO(struct mem_cgroup *memcg, int item, int val),
+
+ TP_ARGS(memcg, item, val)
+);
+
+DECLARE_EVENT_CLASS(memcg_rstat_events,
+
+ TP_PROTO(struct mem_cgroup *memcg, int item, unsigned long val),
+
+ TP_ARGS(memcg, item, val),
+
+ TP_STRUCT__entry(
+ __field(u64, id)
+ __field(int, item)
+ __field(unsigned long, val)
+ ),
+
+ TP_fast_assign(
+ __entry->id = cgroup_id(memcg->css.cgroup);
+ __entry->item = item;
+ __entry->val = val;
+ ),
+
+ TP_printk("memcg_id=%llu item=%d val=%lu",
+ __entry->id, __entry->item, __entry->val)
+);
+
+DEFINE_EVENT(memcg_rstat_events, count_memcg_events,
+
+ TP_PROTO(struct mem_cgroup *memcg, int item, unsigned long val),
+
+ TP_ARGS(memcg, item, val)
+);
+
+TRACE_EVENT(memcg_flush_stats,
+
+ TP_PROTO(struct mem_cgroup *memcg, s64 stats_updates,
+ bool force, bool needs_flush),
+
+ TP_ARGS(memcg, stats_updates, force, needs_flush),
+
+ TP_STRUCT__entry(
+ __field(u64, id)
+ __field(s64, stats_updates)
+ __field(bool, force)
+ __field(bool, needs_flush)
+ ),
+
+ TP_fast_assign(
+ __entry->id = cgroup_id(memcg->css.cgroup);
+ __entry->stats_updates = stats_updates;
+ __entry->force = force;
+ __entry->needs_flush = needs_flush;
+ ),
+
+ TP_printk("memcg_id=%llu stats_updates=%lld force=%d needs_flush=%d",
+ __entry->id, __entry->stats_updates,
+ __entry->force, __entry->needs_flush)
+);
+
+#endif /* _TRACE_MEMCG_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/memory-failure.h b/include/trace/events/memory-failure.h
new file mode 100644
index 000000000000..aa57cc8f896b
--- /dev/null
+++ b/include/trace/events/memory-failure.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM memory_failure
+#define TRACE_INCLUDE_FILE memory-failure
+
+#if !defined(_TRACE_MEMORY_FAILURE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MEMORY_FAILURE_H
+
+#include <linux/tracepoint.h>
+#include <linux/mm.h>
+
+/*
+ * memory-failure recovery action result event
+ *
+ * unsigned long pfn - Page Frame Number of the corrupted page
+ * int type - Page types of the corrupted page
+ * int result - Result of recovery action
+ */
+
+#define MF_ACTION_RESULT \
+ EM ( MF_IGNORED, "Ignored" ) \
+ EM ( MF_FAILED, "Failed" ) \
+ EM ( MF_DELAYED, "Delayed" ) \
+ EMe ( MF_RECOVERED, "Recovered" )
+
+#define MF_PAGE_TYPE \
+ EM ( MF_MSG_KERNEL, "reserved kernel page" ) \
+ EM ( MF_MSG_KERNEL_HIGH_ORDER, "high-order kernel page" ) \
+ EM ( MF_MSG_HUGE, "huge page" ) \
+ EM ( MF_MSG_FREE_HUGE, "free huge page" ) \
+ EM ( MF_MSG_GET_HWPOISON, "get hwpoison page" ) \
+ EM ( MF_MSG_UNMAP_FAILED, "unmapping failed page" ) \
+ EM ( MF_MSG_DIRTY_SWAPCACHE, "dirty swapcache page" ) \
+ EM ( MF_MSG_CLEAN_SWAPCACHE, "clean swapcache page" ) \
+ EM ( MF_MSG_DIRTY_MLOCKED_LRU, "dirty mlocked LRU page" ) \
+ EM ( MF_MSG_CLEAN_MLOCKED_LRU, "clean mlocked LRU page" ) \
+ EM ( MF_MSG_DIRTY_UNEVICTABLE_LRU, "dirty unevictable LRU page" ) \
+ EM ( MF_MSG_CLEAN_UNEVICTABLE_LRU, "clean unevictable LRU page" ) \
+ EM ( MF_MSG_DIRTY_LRU, "dirty LRU page" ) \
+ EM ( MF_MSG_CLEAN_LRU, "clean LRU page" ) \
+ EM ( MF_MSG_TRUNCATED_LRU, "already truncated LRU page" ) \
+ EM ( MF_MSG_BUDDY, "free buddy page" ) \
+ EM ( MF_MSG_DAX, "dax page" ) \
+ EM ( MF_MSG_UNSPLIT_THP, "unsplit thp" ) \
+ EM ( MF_MSG_ALREADY_POISONED, "already poisoned" ) \
+ EM ( MF_MSG_PFN_MAP, "non struct page pfn" ) \
+ EMe ( MF_MSG_UNKNOWN, "unknown page" )
+
+/*
+ * First define the enums in MM_ACTION_RESULT to be exported to userspace
+ * via TRACE_DEFINE_ENUM().
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+MF_ACTION_RESULT
+MF_PAGE_TYPE
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a, b) { a, b },
+#define EMe(a, b) { a, b }
+
+TRACE_EVENT(memory_failure_event,
+ TP_PROTO(unsigned long pfn,
+ int type,
+ int result),
+
+ TP_ARGS(pfn, type, result),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(int, type)
+ __field(int, result)
+ ),
+
+ TP_fast_assign(
+ __entry->pfn = pfn;
+ __entry->type = type;
+ __entry->result = result;
+ ),
+
+ TP_printk("pfn %#lx: recovery action for %s: %s",
+ __entry->pfn,
+ __print_symbolic(__entry->type, MF_PAGE_TYPE),
+ __print_symbolic(__entry->result, MF_ACTION_RESULT)
+ )
+);
+#endif /* _TRACE_MEMORY_FAILURE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 0190ef725b43..cd01dd7b3640 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -22,7 +22,8 @@
EM( MR_NUMA_MISPLACED, "numa_misplaced") \
EM( MR_CONTIG_RANGE, "contig_range") \
EM( MR_LONGTERM_PIN, "longterm_pin") \
- EMe(MR_DEMOTION, "demotion")
+ EM( MR_DEMOTION, "demotion") \
+ EMe(MR_DAMON, "damon")
/*
* First define the enums in the above macros to be exported to userspace
diff --git a/include/trace/events/mmap.h b/include/trace/events/mmap.h
index f8d61485de16..ee2843a5daef 100644
--- a/include/trace/events/mmap.h
+++ b/include/trace/events/mmap.h
@@ -43,58 +43,6 @@ TRACE_EVENT(vm_unmapped_area,
__entry->align_offset)
);
-TRACE_EVENT(vma_mas_szero,
- TP_PROTO(struct maple_tree *mt, unsigned long start,
- unsigned long end),
-
- TP_ARGS(mt, start, end),
-
- TP_STRUCT__entry(
- __field(struct maple_tree *, mt)
- __field(unsigned long, start)
- __field(unsigned long, end)
- ),
-
- TP_fast_assign(
- __entry->mt = mt;
- __entry->start = start;
- __entry->end = end;
- ),
-
- TP_printk("mt_mod %p, (NULL), SNULL, %lu, %lu,",
- __entry->mt,
- (unsigned long) __entry->start,
- (unsigned long) __entry->end
- )
-);
-
-TRACE_EVENT(vma_store,
- TP_PROTO(struct maple_tree *mt, struct vm_area_struct *vma),
-
- TP_ARGS(mt, vma),
-
- TP_STRUCT__entry(
- __field(struct maple_tree *, mt)
- __field(struct vm_area_struct *, vma)
- __field(unsigned long, vm_start)
- __field(unsigned long, vm_end)
- ),
-
- TP_fast_assign(
- __entry->mt = mt;
- __entry->vma = vma;
- __entry->vm_start = vma->vm_start;
- __entry->vm_end = vma->vm_end - 1;
- ),
-
- TP_printk("mt_mod %p, (%p), STORE, %lu, %lu,",
- __entry->mt, __entry->vma,
- (unsigned long) __entry->vm_start,
- (unsigned long) __entry->vm_end
- )
-);
-
-
TRACE_EVENT(exit_mmap,
TP_PROTO(struct mm_struct *mm),
diff --git a/include/trace/events/mmap_lock.h b/include/trace/events/mmap_lock.h
index f2827f98a44f..cf9f9faf8914 100644
--- a/include/trace/events/mmap_lock.h
+++ b/include/trace/events/mmap_lock.h
@@ -5,80 +5,72 @@
#if !defined(_TRACE_MMAP_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_MMAP_LOCK_H
+#include <linux/memcontrol.h>
#include <linux/tracepoint.h>
#include <linux/types.h>
struct mm_struct;
-extern int trace_mmap_lock_reg(void);
-extern void trace_mmap_lock_unreg(void);
-
DECLARE_EVENT_CLASS(mmap_lock,
- TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write),
+ TP_PROTO(struct mm_struct *mm, bool write),
- TP_ARGS(mm, memcg_path, write),
+ TP_ARGS(mm, write),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
- __string(memcg_path, memcg_path)
+ __field(u64, memcg_id)
__field(bool, write)
),
TP_fast_assign(
__entry->mm = mm;
- __assign_str(memcg_path);
+ __entry->memcg_id = cgroup_id_from_mm(mm);
__entry->write = write;
),
TP_printk(
- "mm=%p memcg_path=%s write=%s",
- __entry->mm,
- __get_str(memcg_path),
+ "mm=%p memcg_id=%llu write=%s",
+ __entry->mm, __entry->memcg_id,
__entry->write ? "true" : "false"
)
);
#define DEFINE_MMAP_LOCK_EVENT(name) \
- DEFINE_EVENT_FN(mmap_lock, name, \
- TP_PROTO(struct mm_struct *mm, const char *memcg_path, \
- bool write), \
- TP_ARGS(mm, memcg_path, write), \
- trace_mmap_lock_reg, trace_mmap_lock_unreg)
+ DEFINE_EVENT(mmap_lock, name, \
+ TP_PROTO(struct mm_struct *mm, bool write), \
+ TP_ARGS(mm, write))
DEFINE_MMAP_LOCK_EVENT(mmap_lock_start_locking);
DEFINE_MMAP_LOCK_EVENT(mmap_lock_released);
-TRACE_EVENT_FN(mmap_lock_acquire_returned,
+TRACE_EVENT(mmap_lock_acquire_returned,
- TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write,
- bool success),
+ TP_PROTO(struct mm_struct *mm, bool write, bool success),
- TP_ARGS(mm, memcg_path, write, success),
+ TP_ARGS(mm, write, success),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
- __string(memcg_path, memcg_path)
+ __field(u64, memcg_id)
__field(bool, write)
__field(bool, success)
),
TP_fast_assign(
__entry->mm = mm;
- __assign_str(memcg_path);
+ __entry->memcg_id = cgroup_id_from_mm(mm);
__entry->write = write;
__entry->success = success;
),
TP_printk(
- "mm=%p memcg_path=%s write=%s success=%s",
+ "mm=%p memcg_id=%llu write=%s success=%s",
__entry->mm,
- __get_str(memcg_path),
+ __entry->memcg_id,
__entry->write ? "true" : "false",
__entry->success ? "true" : "false"
- ),
-
- trace_mmap_lock_reg, trace_mmap_lock_unreg
+ )
);
#endif /* _TRACE_MMAP_LOCK_H */
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index e46d6e82765e..a6e5a44c9b42 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -13,8 +13,78 @@
* Thus most bits set go first.
*/
+/* These define the values that are enums (the bits) */
+#define TRACE_GFP_FLAGS_GENERAL \
+ TRACE_GFP_EM(DMA) \
+ TRACE_GFP_EM(HIGHMEM) \
+ TRACE_GFP_EM(DMA32) \
+ TRACE_GFP_EM(MOVABLE) \
+ TRACE_GFP_EM(RECLAIMABLE) \
+ TRACE_GFP_EM(HIGH) \
+ TRACE_GFP_EM(IO) \
+ TRACE_GFP_EM(FS) \
+ TRACE_GFP_EM(ZERO) \
+ TRACE_GFP_EM(DIRECT_RECLAIM) \
+ TRACE_GFP_EM(KSWAPD_RECLAIM) \
+ TRACE_GFP_EM(WRITE) \
+ TRACE_GFP_EM(NOWARN) \
+ TRACE_GFP_EM(RETRY_MAYFAIL) \
+ TRACE_GFP_EM(NOFAIL) \
+ TRACE_GFP_EM(NORETRY) \
+ TRACE_GFP_EM(MEMALLOC) \
+ TRACE_GFP_EM(COMP) \
+ TRACE_GFP_EM(NOMEMALLOC) \
+ TRACE_GFP_EM(HARDWALL) \
+ TRACE_GFP_EM(THISNODE) \
+ TRACE_GFP_EM(ACCOUNT) \
+ TRACE_GFP_EM(ZEROTAGS)
+
+#ifdef CONFIG_KASAN_HW_TAGS
+# define TRACE_GFP_FLAGS_KASAN \
+ TRACE_GFP_EM(SKIP_ZERO) \
+ TRACE_GFP_EM(SKIP_KASAN)
+#else
+# define TRACE_GFP_FLAGS_KASAN
+#endif
+
+#ifdef CONFIG_LOCKDEP
+# define TRACE_GFP_FLAGS_LOCKDEP \
+ TRACE_GFP_EM(NOLOCKDEP)
+#else
+# define TRACE_GFP_FLAGS_LOCKDEP
+#endif
+
+#ifdef CONFIG_SLAB_OBJ_EXT
+# define TRACE_GFP_FLAGS_SLAB \
+ TRACE_GFP_EM(NO_OBJ_EXT)
+#else
+# define TRACE_GFP_FLAGS_SLAB
+#endif
+
+#define TRACE_GFP_FLAGS \
+ TRACE_GFP_FLAGS_GENERAL \
+ TRACE_GFP_FLAGS_KASAN \
+ TRACE_GFP_FLAGS_LOCKDEP \
+ TRACE_GFP_FLAGS_SLAB
+
+#undef TRACE_GFP_EM
+#define TRACE_GFP_EM(a) TRACE_DEFINE_ENUM(___GFP_##a##_BIT);
+
+TRACE_GFP_FLAGS
+
+/* Just in case these are ever used */
+TRACE_DEFINE_ENUM(___GFP_UNUSED_BIT);
+TRACE_DEFINE_ENUM(___GFP_LAST_BIT);
+
#define gfpflag_string(flag) {(__force unsigned long)flag, #flag}
+/*
+ * For the values that match the bits, use the TRACE_GFP_FLAGS
+ * which will allow any updates to be included automatically.
+ */
+#undef TRACE_GFP_EM
+#define TRACE_GFP_EM(a) gfpflag_string(__GFP_##a),
+
#define __def_gfpflag_names \
gfpflag_string(GFP_TRANSHUGE), \
gfpflag_string(GFP_TRANSHUGE_LIGHT), \
@@ -28,41 +98,13 @@
gfpflag_string(GFP_NOIO), \
gfpflag_string(GFP_NOWAIT), \
gfpflag_string(GFP_DMA), \
- gfpflag_string(__GFP_HIGHMEM), \
gfpflag_string(GFP_DMA32), \
- gfpflag_string(__GFP_HIGH), \
- gfpflag_string(__GFP_IO), \
- gfpflag_string(__GFP_FS), \
- gfpflag_string(__GFP_NOWARN), \
- gfpflag_string(__GFP_RETRY_MAYFAIL), \
- gfpflag_string(__GFP_NOFAIL), \
- gfpflag_string(__GFP_NORETRY), \
- gfpflag_string(__GFP_COMP), \
- gfpflag_string(__GFP_ZERO), \
- gfpflag_string(__GFP_NOMEMALLOC), \
- gfpflag_string(__GFP_MEMALLOC), \
- gfpflag_string(__GFP_HARDWALL), \
- gfpflag_string(__GFP_THISNODE), \
- gfpflag_string(__GFP_RECLAIMABLE), \
- gfpflag_string(__GFP_MOVABLE), \
- gfpflag_string(__GFP_ACCOUNT), \
- gfpflag_string(__GFP_WRITE), \
gfpflag_string(__GFP_RECLAIM), \
- gfpflag_string(__GFP_DIRECT_RECLAIM), \
- gfpflag_string(__GFP_KSWAPD_RECLAIM), \
- gfpflag_string(__GFP_ZEROTAGS)
-
-#ifdef CONFIG_KASAN_HW_TAGS
-#define __def_gfpflag_names_kasan , \
- gfpflag_string(__GFP_SKIP_ZERO), \
- gfpflag_string(__GFP_SKIP_KASAN)
-#else
-#define __def_gfpflag_names_kasan
-#endif
+ TRACE_GFP_FLAGS \
+ { 0, NULL }
#define show_gfp_flags(flags) \
- (flags) ? __print_flags(flags, "|", \
- __def_gfpflag_names __def_gfpflag_names_kasan \
+ (flags) ? __print_flags(flags, "|", __def_gfpflag_names \
) : "none"
#ifdef CONFIG_MMU
@@ -71,12 +113,6 @@
#define IF_HAVE_PG_MLOCK(_name)
#endif
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
-#define IF_HAVE_PG_UNCACHED(_name) ,{1UL << PG_##_name, __stringify(_name)}
-#else
-#define IF_HAVE_PG_UNCACHED(_name)
-#endif
-
#ifdef CONFIG_MEMORY_FAILURE
#define IF_HAVE_PG_HWPOISON(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
@@ -89,10 +125,16 @@
#define IF_HAVE_PG_IDLE(_name)
#endif
-#ifdef CONFIG_ARCH_USES_PG_ARCH_X
-#define IF_HAVE_PG_ARCH_X(_name) ,{1UL << PG_##_name, __stringify(_name)}
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
+#define IF_HAVE_PG_ARCH_2(_name) ,{1UL << PG_##_name, __stringify(_name)}
#else
-#define IF_HAVE_PG_ARCH_X(_name)
+#define IF_HAVE_PG_ARCH_2(_name)
+#endif
+
+#ifdef CONFIG_ARCH_USES_PG_ARCH_3
+#define IF_HAVE_PG_ARCH_3(_name) ,{1UL << PG_##_name, __stringify(_name)}
+#else
+#define IF_HAVE_PG_ARCH_3(_name)
#endif
#define DEF_PAGEFLAG_NAME(_name) { 1UL << PG_##_name, __stringify(_name) }
@@ -100,7 +142,6 @@
#define __def_pageflag_names \
DEF_PAGEFLAG_NAME(locked), \
DEF_PAGEFLAG_NAME(waiters), \
- DEF_PAGEFLAG_NAME(error), \
DEF_PAGEFLAG_NAME(referenced), \
DEF_PAGEFLAG_NAME(uptodate), \
DEF_PAGEFLAG_NAME(dirty), \
@@ -108,42 +149,30 @@
DEF_PAGEFLAG_NAME(active), \
DEF_PAGEFLAG_NAME(workingset), \
DEF_PAGEFLAG_NAME(owner_priv_1), \
+ DEF_PAGEFLAG_NAME(owner_2), \
DEF_PAGEFLAG_NAME(arch_1), \
DEF_PAGEFLAG_NAME(reserved), \
DEF_PAGEFLAG_NAME(private), \
DEF_PAGEFLAG_NAME(private_2), \
DEF_PAGEFLAG_NAME(writeback), \
DEF_PAGEFLAG_NAME(head), \
- DEF_PAGEFLAG_NAME(mappedtodisk), \
DEF_PAGEFLAG_NAME(reclaim), \
DEF_PAGEFLAG_NAME(swapbacked), \
- DEF_PAGEFLAG_NAME(unevictable) \
+ DEF_PAGEFLAG_NAME(unevictable), \
+ DEF_PAGEFLAG_NAME(dropbehind) \
IF_HAVE_PG_MLOCK(mlocked) \
-IF_HAVE_PG_UNCACHED(uncached) \
IF_HAVE_PG_HWPOISON(hwpoison) \
IF_HAVE_PG_IDLE(idle) \
IF_HAVE_PG_IDLE(young) \
-IF_HAVE_PG_ARCH_X(arch_2) \
-IF_HAVE_PG_ARCH_X(arch_3)
+IF_HAVE_PG_ARCH_2(arch_2) \
+IF_HAVE_PG_ARCH_3(arch_3)
#define show_page_flags(flags) \
(flags) ? __print_flags(flags, "|", \
__def_pageflag_names \
) : "none"
-#define DEF_PAGETYPE_NAME(_name) { PG_##_name, __stringify(_name) }
-
-#define __def_pagetype_names \
- DEF_PAGETYPE_NAME(slab), \
- DEF_PAGETYPE_NAME(hugetlb), \
- DEF_PAGETYPE_NAME(offline), \
- DEF_PAGETYPE_NAME(guard), \
- DEF_PAGETYPE_NAME(table), \
- DEF_PAGETYPE_NAME(buddy)
-
-#if defined(CONFIG_X86)
-#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
-#elif defined(CONFIG_PPC)
+#if defined(CONFIG_PPC64)
#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
#elif defined(CONFIG_PARISC)
#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
@@ -165,6 +194,12 @@ IF_HAVE_PG_ARCH_X(arch_3)
# define IF_HAVE_UFFD_MINOR(flag, name)
#endif
+#if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
+# define IF_HAVE_VM_DROPPABLE(flag, name) {flag, name},
+#else
+# define IF_HAVE_VM_DROPPABLE(flag, name)
+#endif
+
#define __def_vmaflag_names \
{VM_READ, "read" }, \
{VM_WRITE, "write" }, \
@@ -178,6 +213,7 @@ IF_HAVE_PG_ARCH_X(arch_3)
{VM_UFFD_MISSING, "uffd_missing" }, \
IF_HAVE_UFFD_MINOR(VM_UFFD_MINOR, "uffd_minor" ) \
{VM_PFNMAP, "pfnmap" }, \
+ {VM_MAYBE_GUARD, "maybe_guard" }, \
{VM_UFFD_WP, "uffd_wp" }, \
{VM_LOCKED, "locked" }, \
{VM_IO, "io" }, \
@@ -197,6 +233,7 @@ IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
{VM_MIXEDMAP, "mixedmap" }, \
{VM_HUGEPAGE, "hugepage" }, \
{VM_NOHUGEPAGE, "nohugepage" }, \
+IF_HAVE_VM_DROPPABLE(VM_DROPPABLE, "droppable" ) \
{VM_MERGEABLE, "mergeable" } \
#define show_vma_flags(flags) \
diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h
index 09e72215b9f9..085b749cdd97 100644
--- a/include/trace/events/mptcp.h
+++ b/include/trace/events/mptcp.h
@@ -34,7 +34,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
struct sock *ssk;
__entry->active = mptcp_subflow_active(subflow);
- __entry->backup = subflow->backup;
+ __entry->backup = subflow->backup || subflow->request_bkup;
if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
__entry->free = sk_stream_memory_free(subflow->tcp_sock);
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index d55162c12f90..fdd9ad474ce3 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -35,6 +35,7 @@ TRACE_EVENT(net_dev_start_xmit,
__field( u16, gso_size )
__field( u16, gso_segs )
__field( u16, gso_type )
+ __field( u64, net_cookie )
),
TP_fast_assign(
@@ -57,16 +58,18 @@ TRACE_EVENT(net_dev_start_xmit,
__entry->gso_size = skb_shinfo(skb)->gso_size;
__entry->gso_segs = skb_shinfo(skb)->gso_segs;
__entry->gso_type = skb_shinfo(skb)->gso_type;
+ __entry->net_cookie = dev_net(dev)->net_cookie;
),
- TP_printk("dev=%s queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d len=%u data_len=%u network_offset=%d transport_offset_valid=%d transport_offset=%d tx_flags=%d gso_size=%d gso_segs=%d gso_type=%#x",
+ TP_printk("dev=%s queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d len=%u data_len=%u network_offset=%d transport_offset_valid=%d transport_offset=%d tx_flags=%d gso_size=%d gso_segs=%d gso_type=%#x net_cookie=%llu",
__get_str(name), __entry->queue_mapping, __entry->skbaddr,
__entry->vlan_tagged, __entry->vlan_proto, __entry->vlan_tci,
__entry->protocol, __entry->ip_summed, __entry->len,
__entry->data_len,
__entry->network_offset, __entry->transport_offset_valid,
__entry->transport_offset, __entry->tx_flags,
- __entry->gso_size, __entry->gso_segs, __entry->gso_type)
+ __entry->gso_size, __entry->gso_segs,
+ __entry->gso_type, __entry->net_cookie)
);
TRACE_EVENT(net_dev_xmit,
@@ -83,17 +86,21 @@ TRACE_EVENT(net_dev_xmit,
__field( unsigned int, len )
__field( int, rc )
__string( name, dev->name )
+ __field( u64, net_cookie )
),
TP_fast_assign(
__entry->skbaddr = skb;
__entry->len = skb_len;
__entry->rc = rc;
+ __entry->net_cookie = dev_net(dev)->net_cookie;
__assign_str(name);
),
- TP_printk("dev=%s skbaddr=%p len=%u rc=%d",
- __get_str(name), __entry->skbaddr, __entry->len, __entry->rc)
+ TP_printk("dev=%s skbaddr=%p len=%u rc=%d net_cookie=%llu",
+ __get_str(name), __entry->skbaddr,
+ __entry->len, __entry->rc,
+ __entry->net_cookie)
);
TRACE_EVENT(net_dev_xmit_timeout,
@@ -107,16 +114,19 @@ TRACE_EVENT(net_dev_xmit_timeout,
__string( name, dev->name )
__string( driver, netdev_drivername(dev))
__field( int, queue_index )
+ __field( u64, net_cookie )
),
TP_fast_assign(
__assign_str(name);
__assign_str(driver);
__entry->queue_index = queue_index;
+ __entry->net_cookie = dev_net(dev)->net_cookie;
),
- TP_printk("dev=%s driver=%s queue=%d",
- __get_str(name), __get_str(driver), __entry->queue_index)
+ TP_printk("dev=%s driver=%s queue=%d net_cookie=%llu",
+ __get_str(name), __get_str(driver),
+ __entry->queue_index, __entry->net_cookie)
);
DECLARE_EVENT_CLASS(net_dev_template,
@@ -129,16 +139,20 @@ DECLARE_EVENT_CLASS(net_dev_template,
__field( void *, skbaddr )
__field( unsigned int, len )
__string( name, skb->dev->name )
+ __field( u64, net_cookie )
),
TP_fast_assign(
__entry->skbaddr = skb;
__entry->len = skb->len;
+ __entry->net_cookie = dev_net(skb->dev)->net_cookie;
__assign_str(name);
),
- TP_printk("dev=%s skbaddr=%p len=%u",
- __get_str(name), __entry->skbaddr, __entry->len)
+ TP_printk("dev=%s skbaddr=%p len=%u net_cookie=%llu",
+ __get_str(name), __entry->skbaddr,
+ __entry->len,
+ __entry->net_cookie)
)
DEFINE_EVENT(net_dev_template, net_dev_queue,
@@ -188,6 +202,7 @@ DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
__field( unsigned char, nr_frags )
__field( u16, gso_size )
__field( u16, gso_type )
+ __field( u64, net_cookie )
),
TP_fast_assign(
@@ -214,16 +229,18 @@ DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
__entry->nr_frags = skb_shinfo(skb)->nr_frags;
__entry->gso_size = skb_shinfo(skb)->gso_size;
__entry->gso_type = skb_shinfo(skb)->gso_type;
+ __entry->net_cookie = dev_net(skb->dev)->net_cookie;
),
- TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d hash=0x%08x l4_hash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x",
+ TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d hash=0x%08x l4_hash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x net_cookie=%llu",
__get_str(name), __entry->napi_id, __entry->queue_mapping,
__entry->skbaddr, __entry->vlan_tagged, __entry->vlan_proto,
__entry->vlan_tci, __entry->protocol, __entry->ip_summed,
__entry->hash, __entry->l4_hash, __entry->len,
__entry->data_len, __entry->truesize,
__entry->mac_header_valid, __entry->mac_header,
- __entry->nr_frags, __entry->gso_size, __entry->gso_type)
+ __entry->nr_frags, __entry->gso_size,
+ __entry->gso_type, __entry->net_cookie)
);
DEFINE_EVENT(net_dev_rx_verbose_template, napi_gro_frags_entry,
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index da23484268df..64a382fbc31a 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -20,6 +20,8 @@
EM(netfs_read_trace_expanded, "EXPANDED ") \
EM(netfs_read_trace_readahead, "READAHEAD") \
EM(netfs_read_trace_readpage, "READPAGE ") \
+ EM(netfs_read_trace_read_gaps, "READ-GAPS") \
+ EM(netfs_read_trace_read_single, "READ-SNGL") \
EM(netfs_read_trace_prefetch_for_write, "PREFETCHW") \
E_(netfs_read_trace_write_begin, "WRITEBEGN")
@@ -28,57 +30,100 @@
EM(netfs_write_trace_dio_write, "DIO-WRITE") \
EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \
EM(netfs_write_trace_writeback, "WRITEBACK") \
+ EM(netfs_write_trace_writeback_single, "WB-SINGLE") \
E_(netfs_write_trace_writethrough, "WRITETHRU")
#define netfs_rreq_origins \
EM(NETFS_READAHEAD, "RA") \
EM(NETFS_READPAGE, "RP") \
+ EM(NETFS_READ_GAPS, "RG") \
+ EM(NETFS_READ_SINGLE, "R1") \
EM(NETFS_READ_FOR_WRITE, "RW") \
- EM(NETFS_COPY_TO_CACHE, "CC") \
+ EM(NETFS_UNBUFFERED_READ, "UR") \
+ EM(NETFS_DIO_READ, "DR") \
EM(NETFS_WRITEBACK, "WB") \
+ EM(NETFS_WRITEBACK_SINGLE, "W1") \
EM(NETFS_WRITETHROUGH, "WT") \
EM(NETFS_UNBUFFERED_WRITE, "UW") \
- EM(NETFS_DIO_READ, "DR") \
- E_(NETFS_DIO_WRITE, "DW")
+ EM(NETFS_DIO_WRITE, "DW") \
+ E_(NETFS_PGPRIV2_COPY_TO_CACHE, "2C")
#define netfs_rreq_traces \
EM(netfs_rreq_trace_assess, "ASSESS ") \
- EM(netfs_rreq_trace_copy, "COPY ") \
EM(netfs_rreq_trace_collect, "COLLECT") \
+ EM(netfs_rreq_trace_complete, "COMPLET") \
+ EM(netfs_rreq_trace_copy, "COPY ") \
+ EM(netfs_rreq_trace_dirty, "DIRTY ") \
EM(netfs_rreq_trace_done, "DONE ") \
+ EM(netfs_rreq_trace_end_copy_to_cache, "END-C2C") \
EM(netfs_rreq_trace_free, "FREE ") \
+ EM(netfs_rreq_trace_ki_complete, "KI-CMPL") \
+ EM(netfs_rreq_trace_recollect, "RECLLCT") \
EM(netfs_rreq_trace_redirty, "REDIRTY") \
EM(netfs_rreq_trace_resubmit, "RESUBMT") \
+ EM(netfs_rreq_trace_set_abandon, "S-ABNDN") \
EM(netfs_rreq_trace_set_pause, "PAUSE ") \
EM(netfs_rreq_trace_unlock, "UNLOCK ") \
+ EM(netfs_rreq_trace_unlock_pgpriv2, "UNLCK-2") \
EM(netfs_rreq_trace_unmark, "UNMARK ") \
+ EM(netfs_rreq_trace_unpause, "UNPAUSE") \
EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \
- EM(netfs_rreq_trace_wait_pause, "WT-PAUS") \
+ EM(netfs_rreq_trace_wait_pause, "--PAUSED--") \
+ EM(netfs_rreq_trace_wait_quiesce, "WAIT-QUIESCE") \
+ EM(netfs_rreq_trace_waited_ip, "DONE-IP") \
+ EM(netfs_rreq_trace_waited_pause, "--UNPAUSED--") \
+ EM(netfs_rreq_trace_waited_quiesce, "DONE-QUIESCE") \
EM(netfs_rreq_trace_wake_ip, "WAKE-IP") \
- EM(netfs_rreq_trace_unpause, "UNPAUSE") \
+ EM(netfs_rreq_trace_wake_queue, "WAKE-Q ") \
E_(netfs_rreq_trace_write_done, "WR-DONE")
#define netfs_sreq_sources \
+ EM(NETFS_SOURCE_UNKNOWN, "----") \
EM(NETFS_FILL_WITH_ZEROES, "ZERO") \
EM(NETFS_DOWNLOAD_FROM_SERVER, "DOWN") \
EM(NETFS_READ_FROM_CACHE, "READ") \
EM(NETFS_INVALID_READ, "INVL") \
EM(NETFS_UPLOAD_TO_SERVER, "UPLD") \
- EM(NETFS_WRITE_TO_CACHE, "WRIT") \
- E_(NETFS_INVALID_WRITE, "INVL")
+ E_(NETFS_WRITE_TO_CACHE, "WRIT")
#define netfs_sreq_traces \
+ EM(netfs_sreq_trace_abandoned, "ABNDN") \
+ EM(netfs_sreq_trace_add_donations, "+DON ") \
+ EM(netfs_sreq_trace_added, "ADD ") \
+ EM(netfs_sreq_trace_cache_nowrite, "CA-NW") \
+ EM(netfs_sreq_trace_cache_prepare, "CA-PR") \
+ EM(netfs_sreq_trace_cache_write, "CA-WR") \
+ EM(netfs_sreq_trace_cancel, "CANCL") \
+ EM(netfs_sreq_trace_clear, "CLEAR") \
+ EM(netfs_sreq_trace_consumed, "CONSM") \
EM(netfs_sreq_trace_discard, "DSCRD") \
+ EM(netfs_sreq_trace_donate_to_prev, "DON-P") \
+ EM(netfs_sreq_trace_donate_to_next, "DON-N") \
EM(netfs_sreq_trace_download_instead, "RDOWN") \
EM(netfs_sreq_trace_fail, "FAIL ") \
EM(netfs_sreq_trace_free, "FREE ") \
+ EM(netfs_sreq_trace_hit_eof, "EOF ") \
+ EM(netfs_sreq_trace_io_bad, "I-BAD") \
+ EM(netfs_sreq_trace_io_malformed, "I-MLF") \
+ EM(netfs_sreq_trace_io_unknown, "I-UNK") \
+ EM(netfs_sreq_trace_io_progress, "I-OK ") \
+ EM(netfs_sreq_trace_io_req_submitted, "I-RSB") \
+ EM(netfs_sreq_trace_io_retry_needed, "I-RTR") \
EM(netfs_sreq_trace_limited, "LIMIT") \
+ EM(netfs_sreq_trace_need_clear, "N-CLR") \
+ EM(netfs_sreq_trace_partial_read, "PARTR") \
+ EM(netfs_sreq_trace_need_retry, "ND-RT") \
EM(netfs_sreq_trace_prepare, "PREP ") \
EM(netfs_sreq_trace_prep_failed, "PRPFL") \
- EM(netfs_sreq_trace_resubmit_short, "SHORT") \
+ EM(netfs_sreq_trace_progress, "PRGRS") \
+ EM(netfs_sreq_trace_reprep_failed, "REPFL") \
EM(netfs_sreq_trace_retry, "RETRY") \
+ EM(netfs_sreq_trace_short, "SHORT") \
+ EM(netfs_sreq_trace_split, "SPLIT") \
EM(netfs_sreq_trace_submit, "SUBMT") \
+ EM(netfs_sreq_trace_superfluous, "SPRFL") \
EM(netfs_sreq_trace_terminated, "TERM ") \
+ EM(netfs_sreq_trace_wait_for, "_WAIT") \
EM(netfs_sreq_trace_write, "WRITE") \
EM(netfs_sreq_trace_write_skip, "SKIP ") \
E_(netfs_sreq_trace_write_term, "WTERM")
@@ -96,28 +141,27 @@
#define netfs_rreq_ref_traces \
EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND") \
EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \
- EM(netfs_rreq_trace_get_work, "GET WORK ") \
EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \
EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \
EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \
EM(netfs_rreq_trace_put_no_submit, "PUT NO-SUBM") \
EM(netfs_rreq_trace_put_return, "PUT RETURN ") \
EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \
- EM(netfs_rreq_trace_put_work, "PUT WORK ") \
- EM(netfs_rreq_trace_put_work_complete, "PUT WORK CP") \
- EM(netfs_rreq_trace_put_work_nq, "PUT WORK NQ") \
+ EM(netfs_rreq_trace_put_work_ip, "PUT WORK IP ") \
EM(netfs_rreq_trace_see_work, "SEE WORK ") \
+ EM(netfs_rreq_trace_see_work_complete, "SEE WORK CP") \
E_(netfs_rreq_trace_new, "NEW ")
#define netfs_sreq_ref_traces \
EM(netfs_sreq_trace_get_copy_to_cache, "GET COPY2C ") \
- EM(netfs_sreq_trace_get_resubmit, "GET RESUBMIT") \
- EM(netfs_sreq_trace_get_submit, "GET SUBMIT") \
+ EM(netfs_sreq_trace_get_resubmit, "GET RESUBMT") \
+ EM(netfs_sreq_trace_get_submit, "GET SUBMIT ") \
EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \
EM(netfs_sreq_trace_new, "NEW ") \
+ EM(netfs_sreq_trace_put_abandon, "PUT ABANDON") \
EM(netfs_sreq_trace_put_cancel, "PUT CANCEL ") \
EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \
- EM(netfs_sreq_trace_put_discard, "PUT DISCARD") \
+ EM(netfs_sreq_trace_put_consumed, "PUT CONSUME") \
EM(netfs_sreq_trace_put_done, "PUT DONE ") \
EM(netfs_sreq_trace_put_failed, "PUT FAILED ") \
EM(netfs_sreq_trace_put_merged, "PUT MERGED ") \
@@ -128,7 +172,6 @@
E_(netfs_sreq_trace_put_terminated, "PUT TERM ")
#define netfs_folio_traces \
- /* The first few correspond to enum netfs_how_to_modify */ \
EM(netfs_folio_is_uptodate, "mod-uptodate") \
EM(netfs_just_prefetch, "mod-prefetch") \
EM(netfs_whole_folio_modify, "mod-whole-f") \
@@ -138,13 +181,16 @@
EM(netfs_flush_content, "flush") \
EM(netfs_streaming_filled_page, "mod-streamw-f") \
EM(netfs_streaming_cont_filled_page, "mod-streamw-f+") \
- /* The rest are for writeback */ \
+ EM(netfs_folio_trace_abandon, "abandon") \
+ EM(netfs_folio_trace_alloc_buffer, "alloc-buf") \
EM(netfs_folio_trace_cancel_copy, "cancel-copy") \
+ EM(netfs_folio_trace_cancel_store, "cancel-store") \
EM(netfs_folio_trace_clear, "clear") \
EM(netfs_folio_trace_clear_cc, "clear-cc") \
EM(netfs_folio_trace_clear_g, "clear-g") \
EM(netfs_folio_trace_clear_s, "clear-s") \
EM(netfs_folio_trace_copy_to_cache, "mark-copy") \
+ EM(netfs_folio_trace_end_copy, "end-copy") \
EM(netfs_folio_trace_filled_gaps, "filled-gaps") \
EM(netfs_folio_trace_kill, "kill") \
EM(netfs_folio_trace_kill_cc, "kill-cc") \
@@ -153,7 +199,12 @@
EM(netfs_folio_trace_mkwrite, "mkwrite") \
EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \
EM(netfs_folio_trace_not_under_wback, "!wback") \
+ EM(netfs_folio_trace_not_locked, "!locked") \
+ EM(netfs_folio_trace_put, "put") \
+ EM(netfs_folio_trace_read, "read") \
+ EM(netfs_folio_trace_read_done, "read-done") \
EM(netfs_folio_trace_read_gaps, "read-gaps") \
+ EM(netfs_folio_trace_read_unlock, "read-unlock") \
EM(netfs_folio_trace_redirtied, "redirtied") \
EM(netfs_folio_trace_store, "store") \
EM(netfs_folio_trace_store_copy, "store-copy") \
@@ -166,6 +217,20 @@
EM(netfs_contig_trace_jump, "-->JUMP-->") \
E_(netfs_contig_trace_unlock, "Unlock")
+#define netfs_donate_traces \
+ EM(netfs_trace_donate_tail_to_prev, "tail-to-prev") \
+ EM(netfs_trace_donate_to_prev, "to-prev") \
+ EM(netfs_trace_donate_to_next, "to-next") \
+ E_(netfs_trace_donate_to_deferred_next, "defer-next")
+
+#define netfs_folioq_traces \
+ EM(netfs_trace_folioq_alloc_buffer, "alloc-buf") \
+ EM(netfs_trace_folioq_clear, "clear") \
+ EM(netfs_trace_folioq_delete, "delete") \
+ EM(netfs_trace_folioq_make_space, "make-space") \
+ EM(netfs_trace_folioq_rollbuf_init, "roll-init") \
+ E_(netfs_trace_folioq_read_progress, "r-progress")
+
#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
@@ -183,6 +248,8 @@ enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte);
enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
enum netfs_folio_trace { netfs_folio_traces } __mode(byte);
enum netfs_collect_contig_trace { netfs_collect_contig_traces } __mode(byte);
+enum netfs_donate_trace { netfs_donate_traces } __mode(byte);
+enum netfs_folioq_trace { netfs_folioq_traces } __mode(byte);
#endif
@@ -205,6 +272,8 @@ netfs_rreq_ref_traces;
netfs_sreq_ref_traces;
netfs_folio_traces;
netfs_collect_contig_traces;
+netfs_donate_traces;
+netfs_folioq_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -223,29 +292,31 @@ TRACE_EVENT(netfs_read,
TP_ARGS(rreq, start, len, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(unsigned int, cookie )
- __field(loff_t, start )
- __field(size_t, len )
- __field(enum netfs_read_trace, what )
- __field(unsigned int, netfs_inode )
+ __field(unsigned int, rreq)
+ __field(unsigned int, cookie)
+ __field(loff_t, i_size)
+ __field(loff_t, start)
+ __field(size_t, len)
+ __field(enum netfs_read_trace, what)
+ __field(unsigned int, netfs_inode)
),
TP_fast_assign(
__entry->rreq = rreq->debug_id;
__entry->cookie = rreq->cache_resources.debug_id;
+ __entry->i_size = rreq->i_size;
__entry->start = start;
__entry->len = len;
__entry->what = what;
__entry->netfs_inode = rreq->inode->i_ino;
),
- TP_printk("R=%08x %s c=%08x ni=%x s=%llx %zx",
+ TP_printk("R=%08x %s c=%08x ni=%x s=%llx l=%zx sz=%llx",
__entry->rreq,
__print_symbolic(__entry->what, netfs_read_traces),
__entry->cookie,
__entry->netfs_inode,
- __entry->start, __entry->len)
+ __entry->start, __entry->len, __entry->i_size)
);
TRACE_EVENT(netfs_rreq,
@@ -255,10 +326,10 @@ TRACE_EVENT(netfs_rreq,
TP_ARGS(rreq, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(unsigned int, flags )
- __field(enum netfs_io_origin, origin )
- __field(enum netfs_rreq_trace, what )
+ __field(unsigned int, rreq)
+ __field(unsigned int, flags)
+ __field(enum netfs_io_origin, origin)
+ __field(enum netfs_rreq_trace, what)
),
TP_fast_assign(
@@ -282,15 +353,16 @@ TRACE_EVENT(netfs_sreq,
TP_ARGS(sreq, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(unsigned short, index )
- __field(short, error )
- __field(unsigned short, flags )
- __field(enum netfs_io_source, source )
- __field(enum netfs_sreq_trace, what )
- __field(size_t, len )
- __field(size_t, transferred )
- __field(loff_t, start )
+ __field(unsigned int, rreq)
+ __field(unsigned short, index)
+ __field(short, error)
+ __field(unsigned short, flags)
+ __field(enum netfs_io_source, source)
+ __field(enum netfs_sreq_trace, what)
+ __field(u8, slot)
+ __field(size_t, len)
+ __field(size_t, transferred)
+ __field(loff_t, start)
),
TP_fast_assign(
@@ -303,15 +375,16 @@ TRACE_EVENT(netfs_sreq,
__entry->len = sreq->len;
__entry->transferred = sreq->transferred;
__entry->start = sreq->start;
+ __entry->slot = sreq->io_iter.folioq_slot;
),
- TP_printk("R=%08x[%x] %s %s f=%02x s=%llx %zx/%zx e=%d",
+ TP_printk("R=%08x[%x] %s %s f=%03x s=%llx %zx/%zx s=%u e=%d",
__entry->rreq, __entry->index,
__print_symbolic(__entry->source, netfs_sreq_sources),
__print_symbolic(__entry->what, netfs_sreq_traces),
__entry->flags,
__entry->start, __entry->transferred, __entry->len,
- __entry->error)
+ __entry->slot, __entry->error)
);
TRACE_EVENT(netfs_failure,
@@ -322,15 +395,15 @@ TRACE_EVENT(netfs_failure,
TP_ARGS(rreq, sreq, error, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(short, index )
- __field(short, error )
- __field(unsigned short, flags )
- __field(enum netfs_io_source, source )
- __field(enum netfs_failure, what )
- __field(size_t, len )
- __field(size_t, transferred )
- __field(loff_t, start )
+ __field(unsigned int, rreq)
+ __field(short, index)
+ __field(short, error)
+ __field(unsigned short, flags)
+ __field(enum netfs_io_source, source)
+ __field(enum netfs_failure, what)
+ __field(size_t, len)
+ __field(size_t, transferred)
+ __field(loff_t, start)
),
TP_fast_assign(
@@ -361,9 +434,9 @@ TRACE_EVENT(netfs_rreq_ref,
TP_ARGS(rreq_debug_id, ref, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(int, ref )
- __field(enum netfs_rreq_ref_trace, what )
+ __field(unsigned int, rreq)
+ __field(int, ref)
+ __field(enum netfs_rreq_ref_trace, what)
),
TP_fast_assign(
@@ -385,10 +458,10 @@ TRACE_EVENT(netfs_sreq_ref,
TP_ARGS(rreq_debug_id, subreq_debug_index, ref, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(unsigned int, subreq )
- __field(int, ref )
- __field(enum netfs_sreq_ref_trace, what )
+ __field(unsigned int, rreq)
+ __field(unsigned int, subreq)
+ __field(int, ref)
+ __field(enum netfs_sreq_ref_trace, what)
),
TP_fast_assign(
@@ -418,9 +491,10 @@ TRACE_EVENT(netfs_folio,
),
TP_fast_assign(
- __entry->ino = folio->mapping->host->i_ino;
+ struct address_space *__m = READ_ONCE(folio->mapping);
+ __entry->ino = __m ? __m->host->i_ino : 0;
__entry->why = why;
- __entry->index = folio_index(folio);
+ __entry->index = folio->index;
__entry->nr = folio_nr_pages(folio);
),
@@ -435,10 +509,10 @@ TRACE_EVENT(netfs_write_iter,
TP_ARGS(iocb, from),
TP_STRUCT__entry(
- __field(unsigned long long, start )
- __field(size_t, len )
- __field(unsigned int, flags )
- __field(unsigned int, ino )
+ __field(unsigned long long, start)
+ __field(size_t, len)
+ __field(unsigned int, flags)
+ __field(unsigned int, ino)
),
TP_fast_assign(
@@ -459,12 +533,12 @@ TRACE_EVENT(netfs_write,
TP_ARGS(wreq, what),
TP_STRUCT__entry(
- __field(unsigned int, wreq )
- __field(unsigned int, cookie )
- __field(unsigned int, ino )
- __field(enum netfs_write_trace, what )
- __field(unsigned long long, start )
- __field(unsigned long long, len )
+ __field(unsigned int, wreq)
+ __field(unsigned int, cookie)
+ __field(unsigned int, ino)
+ __field(enum netfs_write_trace, what)
+ __field(unsigned long long, start)
+ __field(unsigned long long, len)
),
TP_fast_assign(
@@ -486,56 +560,58 @@ TRACE_EVENT(netfs_write,
__entry->start, __entry->start + __entry->len - 1)
);
-TRACE_EVENT(netfs_collect,
- TP_PROTO(const struct netfs_io_request *wreq),
+TRACE_EVENT(netfs_copy2cache,
+ TP_PROTO(const struct netfs_io_request *rreq,
+ const struct netfs_io_request *creq),
- TP_ARGS(wreq),
+ TP_ARGS(rreq, creq),
TP_STRUCT__entry(
- __field(unsigned int, wreq )
- __field(unsigned int, len )
- __field(unsigned long long, transferred )
- __field(unsigned long long, start )
+ __field(unsigned int, rreq)
+ __field(unsigned int, creq)
+ __field(unsigned int, cookie)
+ __field(unsigned int, ino)
),
TP_fast_assign(
- __entry->wreq = wreq->debug_id;
- __entry->start = wreq->start;
- __entry->len = wreq->len;
- __entry->transferred = wreq->transferred;
+ struct netfs_inode *__ctx = netfs_inode(rreq->inode);
+ struct fscache_cookie *__cookie = netfs_i_cookie(__ctx);
+ __entry->rreq = rreq->debug_id;
+ __entry->creq = creq->debug_id;
+ __entry->cookie = __cookie ? __cookie->debug_id : 0;
+ __entry->ino = rreq->inode->i_ino;
),
- TP_printk("R=%08x s=%llx-%llx",
- __entry->wreq,
- __entry->start + __entry->transferred,
- __entry->start + __entry->len)
+ TP_printk("R=%08x CR=%08x c=%08x i=%x ",
+ __entry->rreq,
+ __entry->creq,
+ __entry->cookie,
+ __entry->ino)
);
-TRACE_EVENT(netfs_collect_contig,
- TP_PROTO(const struct netfs_io_request *wreq, unsigned long long to,
- enum netfs_collect_contig_trace type),
+TRACE_EVENT(netfs_collect,
+ TP_PROTO(const struct netfs_io_request *wreq),
- TP_ARGS(wreq, to, type),
+ TP_ARGS(wreq),
TP_STRUCT__entry(
__field(unsigned int, wreq)
- __field(enum netfs_collect_contig_trace, type)
- __field(unsigned long long, contiguity)
- __field(unsigned long long, to)
+ __field(unsigned int, len)
+ __field(unsigned long long, transferred)
+ __field(unsigned long long, start)
),
TP_fast_assign(
__entry->wreq = wreq->debug_id;
- __entry->type = type;
- __entry->contiguity = wreq->contiguity;
- __entry->to = to;
+ __entry->start = wreq->start;
+ __entry->len = wreq->len;
+ __entry->transferred = wreq->transferred;
),
- TP_printk("R=%08x %llx -> %llx %s",
+ TP_printk("R=%08x s=%llx-%llx",
__entry->wreq,
- __entry->contiguity,
- __entry->to,
- __print_symbolic(__entry->type, netfs_collect_contig_traces))
+ __entry->start + __entry->transferred,
+ __entry->start + __entry->len)
);
TRACE_EVENT(netfs_collect_sreq,
@@ -545,12 +621,12 @@ TRACE_EVENT(netfs_collect_sreq,
TP_ARGS(wreq, subreq),
TP_STRUCT__entry(
- __field(unsigned int, wreq )
- __field(unsigned int, subreq )
- __field(unsigned int, stream )
- __field(unsigned int, len )
- __field(unsigned int, transferred )
- __field(unsigned long long, start )
+ __field(unsigned int, wreq)
+ __field(unsigned int, subreq)
+ __field(unsigned int, stream)
+ __field(unsigned int, len)
+ __field(unsigned int, transferred)
+ __field(unsigned long long, start)
),
TP_fast_assign(
@@ -576,11 +652,11 @@ TRACE_EVENT(netfs_collect_folio,
TP_ARGS(wreq, folio, fend, collected_to),
TP_STRUCT__entry(
- __field(unsigned int, wreq )
- __field(unsigned long, index )
- __field(unsigned long long, fend )
- __field(unsigned long long, cleaned_to )
- __field(unsigned long long, collected_to )
+ __field(unsigned int, wreq)
+ __field(unsigned long, index)
+ __field(unsigned long long, fend)
+ __field(unsigned long long, cleaned_to)
+ __field(unsigned long long, collected_to)
),
TP_fast_assign(
@@ -605,11 +681,10 @@ TRACE_EVENT(netfs_collect_state,
TP_ARGS(wreq, collected_to, notes),
TP_STRUCT__entry(
- __field(unsigned int, wreq )
- __field(unsigned int, notes )
- __field(unsigned long long, collected_to )
- __field(unsigned long long, cleaned_to )
- __field(unsigned long long, contiguity )
+ __field(unsigned int, wreq)
+ __field(unsigned int, notes)
+ __field(unsigned long long, collected_to)
+ __field(unsigned long long, cleaned_to)
),
TP_fast_assign(
@@ -617,12 +692,11 @@ TRACE_EVENT(netfs_collect_state,
__entry->notes = notes;
__entry->collected_to = collected_to;
__entry->cleaned_to = wreq->cleaned_to;
- __entry->contiguity = wreq->contiguity;
),
- TP_printk("R=%08x cto=%llx fto=%llx ctg=%llx n=%x",
+ TP_printk("R=%08x col=%llx cln=%llx n=%x",
__entry->wreq, __entry->collected_to,
- __entry->cleaned_to, __entry->contiguity,
+ __entry->cleaned_to,
__entry->notes)
);
@@ -679,6 +753,29 @@ TRACE_EVENT(netfs_collect_stream,
__entry->collected_to, __entry->front)
);
+TRACE_EVENT(netfs_folioq,
+ TP_PROTO(const struct folio_queue *fq,
+ enum netfs_folioq_trace trace),
+
+ TP_ARGS(fq, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq)
+ __field(unsigned int, id)
+ __field(enum netfs_folioq_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->rreq = fq ? fq->rreq_id : 0;
+ __entry->id = fq ? fq->debug_id : 0;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("R=%08x fq=%x %s",
+ __entry->rreq, __entry->id,
+ __print_symbolic(__entry->trace, netfs_folioq_traces))
+ );
+
#undef EM
#undef E_
#endif /* _TRACE_NETFS_H */
diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h
index a42be4c8563b..9f0a5d1482c4 100644
--- a/include/trace/events/oom.h
+++ b/include/trace/events/oom.h
@@ -55,8 +55,8 @@ TRACE_EVENT(reclaim_retry_zone,
),
TP_fast_assign(
- __entry->node = zone_to_nid(zoneref->zone);
- __entry->zone_idx = zoneref->zone_idx;
+ __entry->node = zonelist_node_idx(zoneref);
+ __entry->zone_idx = zonelist_zone_idx(zoneref);
__entry->order = order;
__entry->reclaimable = reclaimable;
__entry->available = available;
diff --git a/include/trace/events/osnoise.h b/include/trace/events/osnoise.h
index a2379a4f0684..3f4273623801 100644
--- a/include/trace/events/osnoise.h
+++ b/include/trace/events/osnoise.h
@@ -3,9 +3,105 @@
#define TRACE_SYSTEM osnoise
#if !defined(_OSNOISE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+
+#ifndef _OSNOISE_TRACE_H
#define _OSNOISE_TRACE_H
+/*
+ * osnoise sample structure definition. Used to store the statistics of a
+ * sample run.
+ */
+struct osnoise_sample {
+ u64 runtime; /* runtime */
+ u64 noise; /* noise */
+ u64 max_sample; /* max single noise sample */
+ int hw_count; /* # HW (incl. hypervisor) interference */
+ int nmi_count; /* # NMIs during this sample */
+ int irq_count; /* # IRQs during this sample */
+ int softirq_count; /* # softirqs during this sample */
+ int thread_count; /* # threads during this sample */
+};
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * timerlat sample structure definition. Used to store the statistics of
+ * a sample run.
+ */
+struct timerlat_sample {
+ u64 timer_latency; /* timer_latency */
+ unsigned int seqnum; /* unique sequence */
+ int context; /* timer context */
+};
+#endif // CONFIG_TIMERLAT_TRACER
+#endif // _OSNOISE_TRACE_H
#include <linux/tracepoint.h>
+TRACE_EVENT(osnoise_sample,
+
+ TP_PROTO(struct osnoise_sample *s),
+
+ TP_ARGS(s),
+
+ TP_STRUCT__entry(
+ __field( u64, runtime )
+ __field( u64, noise )
+ __field( u64, max_sample )
+ __field( int, hw_count )
+ __field( int, irq_count )
+ __field( int, nmi_count )
+ __field( int, softirq_count )
+ __field( int, thread_count )
+ ),
+
+ TP_fast_assign(
+ __entry->runtime = s->runtime;
+ __entry->noise = s->noise;
+ __entry->max_sample = s->max_sample;
+ __entry->hw_count = s->hw_count;
+ __entry->irq_count = s->irq_count;
+ __entry->nmi_count = s->nmi_count;
+ __entry->softirq_count = s->softirq_count;
+ __entry->thread_count = s->thread_count;
+ ),
+
+ TP_printk("runtime=%llu noise=%llu max_sample=%llu hw_count=%d"
+ " irq_count=%d nmi_count=%d softirq_count=%d"
+ " thread_count=%d",
+ __entry->runtime,
+ __entry->noise,
+ __entry->max_sample,
+ __entry->hw_count,
+ __entry->irq_count,
+ __entry->nmi_count,
+ __entry->softirq_count,
+ __entry->thread_count)
+);
+
+#ifdef CONFIG_TIMERLAT_TRACER
+TRACE_EVENT(timerlat_sample,
+
+ TP_PROTO(struct timerlat_sample *s),
+
+ TP_ARGS(s),
+
+ TP_STRUCT__entry(
+ __field( u64, timer_latency )
+ __field( unsigned int, seqnum )
+ __field( int, context )
+ ),
+
+ TP_fast_assign(
+ __entry->timer_latency = s->timer_latency;
+ __entry->seqnum = s->seqnum;
+ __entry->context = s->context;
+ ),
+
+ TP_printk("timer_latency=%llu seqnum=%u context=%d",
+ __entry->timer_latency,
+ __entry->seqnum,
+ __entry->context)
+);
+#endif // CONFIG_TIMERLAT_TRACER
+
TRACE_EVENT(thread_noise,
TP_PROTO(struct task_struct *t, u64 start, u64 duration),
diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h
index 6834356b2d2a..31825ed30032 100644
--- a/include/trace/events/page_pool.h
+++ b/include/trace/events/page_pool.h
@@ -42,51 +42,53 @@ TRACE_EVENT(page_pool_release,
TRACE_EVENT(page_pool_state_release,
TP_PROTO(const struct page_pool *pool,
- const struct page *page, u32 release),
+ netmem_ref netmem, u32 release),
- TP_ARGS(pool, page, release),
+ TP_ARGS(pool, netmem, release),
TP_STRUCT__entry(
__field(const struct page_pool *, pool)
- __field(const struct page *, page)
+ __field(unsigned long, netmem)
__field(u32, release)
__field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
- __entry->page = page;
+ __entry->netmem = (__force unsigned long)netmem;
__entry->release = release;
- __entry->pfn = page_to_pfn(page);
+ __entry->pfn = netmem_pfn_trace(netmem);
),
- TP_printk("page_pool=%p page=%p pfn=0x%lx release=%u",
- __entry->pool, __entry->page, __entry->pfn, __entry->release)
+ TP_printk("page_pool=%p netmem=%p is_net_iov=%lu pfn=0x%lx release=%u",
+ __entry->pool, (void *)__entry->netmem,
+ __entry->netmem & NET_IOV, __entry->pfn, __entry->release)
);
TRACE_EVENT(page_pool_state_hold,
TP_PROTO(const struct page_pool *pool,
- const struct page *page, u32 hold),
+ netmem_ref netmem, u32 hold),
- TP_ARGS(pool, page, hold),
+ TP_ARGS(pool, netmem, hold),
TP_STRUCT__entry(
__field(const struct page_pool *, pool)
- __field(const struct page *, page)
+ __field(unsigned long, netmem)
__field(u32, hold)
__field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
- __entry->page = page;
+ __entry->netmem = (__force unsigned long)netmem;
__entry->hold = hold;
- __entry->pfn = page_to_pfn(page);
+ __entry->pfn = netmem_pfn_trace(netmem);
),
- TP_printk("page_pool=%p page=%p pfn=0x%lx hold=%u",
- __entry->pool, __entry->page, __entry->pfn, __entry->hold)
+ TP_printk("page_pool=%p netmem=%p is_net_iov=%lu, pfn=0x%lx hold=%u",
+ __entry->pool, (void *)__entry->netmem,
+ __entry->netmem & NET_IOV, __entry->pfn, __entry->hold)
);
TRACE_EVENT(page_pool_update_nid,
diff --git a/include/trace/events/page_ref.h b/include/trace/events/page_ref.h
index fe33a255b7d0..ea6b5c4baf3d 100644
--- a/include/trace/events/page_ref.h
+++ b/include/trace/events/page_ref.h
@@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_template,
TP_fast_assign(
__entry->pfn = page_to_pfn(page);
- __entry->flags = page->flags;
+ __entry->flags = page->flags.f;
__entry->count = page_ref_count(page);
__entry->mapcount = atomic_read(&page->_mapcount);
__entry->mapping = page->mapping;
@@ -77,7 +77,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_and_test_template,
TP_fast_assign(
__entry->pfn = page_to_pfn(page);
- __entry->flags = page->flags;
+ __entry->flags = page->flags.f;
__entry->count = page_ref_count(page);
__entry->mapcount = atomic_read(&page->_mapcount);
__entry->mapping = page->mapping;
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index d2349b6b531a..370f8df2fdb4 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -62,28 +62,45 @@ TRACE_EVENT(cpu_idle_miss,
(unsigned long)__entry->state, (__entry->below)?"below":"above")
);
-TRACE_EVENT(powernv_throttle,
+#ifdef CONFIG_ARM_PSCI_CPUIDLE
+DECLARE_EVENT_CLASS(psci_domain_idle,
- TP_PROTO(int chip_id, const char *reason, int pmax),
+ TP_PROTO(unsigned int cpu_id, unsigned int state, bool s2idle),
- TP_ARGS(chip_id, reason, pmax),
+ TP_ARGS(cpu_id, state, s2idle),
TP_STRUCT__entry(
- __field(int, chip_id)
- __string(reason, reason)
- __field(int, pmax)
+ __field(u32, cpu_id)
+ __field(u32, state)
+ __field(bool, s2idle)
),
TP_fast_assign(
- __entry->chip_id = chip_id;
- __assign_str(reason);
- __entry->pmax = pmax;
+ __entry->cpu_id = cpu_id;
+ __entry->state = state;
+ __entry->s2idle = s2idle;
),
- TP_printk("Chip %d Pmax %d %s", __entry->chip_id,
- __entry->pmax, __get_str(reason))
+ TP_printk("cpu_id=%lu state=0x%lx is_s2idle=%s",
+ (unsigned long)__entry->cpu_id, (unsigned long)__entry->state,
+ (__entry->s2idle)?"yes":"no")
+);
+
+DEFINE_EVENT(psci_domain_idle, psci_domain_idle_enter,
+
+ TP_PROTO(unsigned int cpu_id, unsigned int state, bool s2idle),
+
+ TP_ARGS(cpu_id, state, s2idle)
);
+DEFINE_EVENT(psci_domain_idle, psci_domain_idle_exit,
+
+ TP_PROTO(unsigned int cpu_id, unsigned int state, bool s2idle),
+
+ TP_ARGS(cpu_id, state, s2idle)
+);
+#endif
+
TRACE_EVENT(pstate_sample,
TP_PROTO(u32 core_busy,
@@ -162,7 +179,8 @@ TRACE_EVENT(pstate_sample,
{ PM_EVENT_HIBERNATE, "hibernate" }, \
{ PM_EVENT_THAW, "thaw" }, \
{ PM_EVENT_RESTORE, "restore" }, \
- { PM_EVENT_RECOVER, "recover" })
+ { PM_EVENT_RECOVER, "recover" }, \
+ { PM_EVENT_POWEROFF, "poweroff" })
DEFINE_EVENT(cpu, cpu_frequency,
@@ -195,6 +213,7 @@ TRACE_EVENT(cpu_frequency_limits,
(unsigned long)__entry->cpu_id)
);
+#ifdef CONFIG_PM_SLEEP
TRACE_EVENT(device_pm_callback_start,
TP_PROTO(struct device *dev, const char *pm_ops, int event),
@@ -243,6 +262,7 @@ TRACE_EVENT(device_pm_callback_end,
TP_printk("%s %s, err=%d",
__get_str(driver), __get_str(device), __entry->error)
);
+#endif
TRACE_EVENT(suspend_resume,
@@ -300,53 +320,7 @@ DEFINE_EVENT(wakeup_source, wakeup_source_deactivate,
TP_ARGS(name, state)
);
-/*
- * The clock events are used for clock enable/disable and for
- * clock rate change
- */
-DECLARE_EVENT_CLASS(clock,
-
- TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
-
- TP_ARGS(name, state, cpu_id),
-
- TP_STRUCT__entry(
- __string( name, name )
- __field( u64, state )
- __field( u64, cpu_id )
- ),
-
- TP_fast_assign(
- __assign_str(name);
- __entry->state = state;
- __entry->cpu_id = cpu_id;
- ),
-
- TP_printk("%s state=%lu cpu_id=%lu", __get_str(name),
- (unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
-);
-
-DEFINE_EVENT(clock, clock_enable,
-
- TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
-
- TP_ARGS(name, state, cpu_id)
-);
-
-DEFINE_EVENT(clock, clock_disable,
-
- TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
-
- TP_ARGS(name, state, cpu_id)
-);
-
-DEFINE_EVENT(clock, clock_set_rate,
-
- TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id),
-
- TP_ARGS(name, state, cpu_id)
-);
-
+#ifdef CONFIG_ARCH_OMAP2PLUS
/*
* The power domain events are used for power domains transitions
*/
@@ -378,6 +352,7 @@ DEFINE_EVENT(power_domain, power_domain_target,
TP_ARGS(name, state, cpu_id)
);
+#endif
/*
* CPU latency QoS events used for global CPU latency QoS list updates
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
index 3f249e150c0c..f99562d2b496 100644
--- a/include/trace/events/preemptirq.h
+++ b/include/trace/events/preemptirq.h
@@ -43,8 +43,6 @@ DEFINE_EVENT(preemptirq_template, irq_enable,
#else
#define trace_irq_enable(...)
#define trace_irq_disable(...)
-#define trace_irq_enable_rcuidle(...)
-#define trace_irq_disable_rcuidle(...)
#endif
#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
@@ -58,8 +56,6 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
#else
#define trace_preempt_enable(...)
#define trace_preempt_disable(...)
-#define trace_preempt_enable_rcuidle(...)
-#define trace_preempt_disable_rcuidle(...)
#endif
#endif /* _TRACE_PREEMPTIRQ_H */
@@ -69,10 +65,6 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
#else /* !CONFIG_PREEMPTIRQ_TRACEPOINTS */
#define trace_irq_enable(...)
#define trace_irq_disable(...)
-#define trace_irq_enable_rcuidle(...)
-#define trace_irq_disable_rcuidle(...)
#define trace_preempt_enable(...)
#define trace_preempt_disable(...)
-#define trace_preempt_enable_rcuidle(...)
-#define trace_preempt_disable_rcuidle(...)
#endif
diff --git a/include/trace/events/pwm.h b/include/trace/events/pwm.h
index 12b35e4ff917..8ba898fd335c 100644
--- a/include/trace/events/pwm.h
+++ b/include/trace/events/pwm.h
@@ -8,14 +8,135 @@
#include <linux/pwm.h>
#include <linux/tracepoint.h>
+#define TP_PROTO_pwm(args...) \
+ TP_PROTO(struct pwm_device *pwm, args)
+
+#define TP_ARGS_pwm(args...) \
+ TP_ARGS(pwm, args)
+
+#define TP_STRUCT__entry_pwm(args...) \
+ TP_STRUCT__entry( \
+ __field(unsigned int, chipid) \
+ __field(unsigned int, hwpwm) \
+ args)
+
+#define TP_fast_assign_pwm(args...) \
+ TP_fast_assign( \
+ __entry->chipid = pwm->chip->id; \
+ __entry->hwpwm = pwm->hwpwm; \
+ args)
+
+#define TP_printk_pwm(fmt, args...) \
+ TP_printk("pwmchip%u.%u: " fmt, __entry->chipid, __entry->hwpwm, args)
+
+#define __field_pwmwf(wf) \
+ __field(u64, wf ## _period_length_ns) \
+ __field(u64, wf ## _duty_length_ns) \
+ __field(u64, wf ## _duty_offset_ns) \
+
+#define fast_assign_pwmwf(wf) \
+ __entry->wf ## _period_length_ns = wf->period_length_ns; \
+ __entry->wf ## _duty_length_ns = wf->duty_length_ns; \
+ __entry->wf ## _duty_offset_ns = wf->duty_offset_ns
+
+#define printk_pwmwf_format(wf) \
+ "%lld/%lld [+%lld]"
+
+#define printk_pwmwf_formatargs(wf) \
+ __entry->wf ## _duty_length_ns, __entry->wf ## _period_length_ns, __entry->wf ## _duty_offset_ns
+
+TRACE_EVENT(pwm_round_waveform_tohw,
+
+ TP_PROTO_pwm(const struct pwm_waveform *wf, void *wfhw, int err),
+
+ TP_ARGS_pwm(wf, wfhw, err),
+
+ TP_STRUCT__entry_pwm(
+ __field_pwmwf(wf)
+ __field(void *, wfhw)
+ __field(int, err)
+ ),
+
+ TP_fast_assign_pwm(
+ fast_assign_pwmwf(wf);
+ __entry->wfhw = wfhw;
+ __entry->err = err;
+ ),
+
+ TP_printk_pwm(printk_pwmwf_format(wf) " > %p err=%d",
+ printk_pwmwf_formatargs(wf), __entry->wfhw, __entry->err)
+);
+
+TRACE_EVENT(pwm_round_waveform_fromhw,
+
+ TP_PROTO_pwm(const void *wfhw, struct pwm_waveform *wf, int err),
+
+ TP_ARGS_pwm(wfhw, wf, err),
+
+ TP_STRUCT__entry_pwm(
+ __field(const void *, wfhw)
+ __field_pwmwf(wf)
+ __field(int, err)
+ ),
+
+ TP_fast_assign_pwm(
+ __entry->wfhw = wfhw;
+ fast_assign_pwmwf(wf);
+ __entry->err = err;
+ ),
+
+ TP_printk_pwm("%p > " printk_pwmwf_format(wf) " err=%d",
+ __entry->wfhw, printk_pwmwf_formatargs(wf), __entry->err)
+);
+
+TRACE_EVENT(pwm_read_waveform,
+
+ TP_PROTO_pwm(void *wfhw, int err),
+
+ TP_ARGS_pwm(wfhw, err),
+
+ TP_STRUCT__entry_pwm(
+ __field(void *, wfhw)
+ __field(int, err)
+ ),
+
+ TP_fast_assign_pwm(
+ __entry->wfhw = wfhw;
+ __entry->err = err;
+ ),
+
+ TP_printk_pwm("%p err=%d",
+ __entry->wfhw, __entry->err)
+);
+
+TRACE_EVENT(pwm_write_waveform,
+
+ TP_PROTO_pwm(const void *wfhw, int err),
+
+ TP_ARGS_pwm(wfhw, err),
+
+ TP_STRUCT__entry_pwm(
+ __field(const void *, wfhw)
+ __field(int, err)
+ ),
+
+ TP_fast_assign_pwm(
+ __entry->wfhw = wfhw;
+ __entry->err = err;
+ ),
+
+ TP_printk_pwm("%p err=%d",
+ __entry->wfhw, __entry->err)
+);
+
+
DECLARE_EVENT_CLASS(pwm,
TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state, int err),
TP_ARGS(pwm, state, err),
- TP_STRUCT__entry(
- __field(struct pwm_device *, pwm)
+ TP_STRUCT__entry_pwm(
__field(u64, period)
__field(u64, duty_cycle)
__field(enum pwm_polarity, polarity)
@@ -23,8 +144,7 @@ DECLARE_EVENT_CLASS(pwm,
__field(int, err)
),
- TP_fast_assign(
- __entry->pwm = pwm;
+ TP_fast_assign_pwm(
__entry->period = state->period;
__entry->duty_cycle = state->duty_cycle;
__entry->polarity = state->polarity;
@@ -32,8 +152,8 @@ DECLARE_EVENT_CLASS(pwm,
__entry->err = err;
),
- TP_printk("%p: period=%llu duty_cycle=%llu polarity=%d enabled=%d err=%d",
- __entry->pwm, __entry->period, __entry->duty_cycle,
+ TP_printk_pwm("period=%llu duty_cycle=%llu polarity=%d enabled=%d err=%d",
+ __entry->period, __entry->duty_cycle,
__entry->polarity, __entry->enabled, __entry->err)
);
diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
index f1b5e816e7e5..ff33f41a9db7 100644
--- a/include/trace/events/qdisc.h
+++ b/include/trace/events/qdisc.h
@@ -81,7 +81,7 @@ TRACE_EVENT(qdisc_reset,
TP_ARGS(q),
TP_STRUCT__entry(
- __string( dev, qdisc_dev(q)->name )
+ __string( dev, qdisc_dev(q) ? qdisc_dev(q)->name : "(null)" )
__string( kind, q->ops->id )
__field( u32, parent )
__field( u32, handle )
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 31b3e0d3e65f..5fbdabe3faea 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -207,7 +207,7 @@ TRACE_EVENT_RCU(rcu_exp_grace_period,
__entry->gpevent = gpevent;
),
- TP_printk("%s %ld %s",
+ TP_printk("%s %#lx %s",
__entry->rcuname, __entry->gpseq, __entry->gpevent)
);
@@ -466,40 +466,40 @@ TRACE_EVENT(rcu_stall_warning,
/*
* Tracepoint for dyntick-idle entry/exit events. These take 2 strings
* as argument:
- * polarity: "Start", "End", "StillNonIdle" for entering, exiting or still not
- * being in dyntick-idle mode.
+ * polarity: "Start", "End", "StillWatching" for entering, exiting or still not
+ * being in EQS mode.
* context: "USER" or "IDLE" or "IRQ".
- * NMIs nested in IRQs are inferred with dynticks_nesting > 1 in IRQ context.
+ * NMIs nested in IRQs are inferred with nesting > 1 in IRQ context.
*
* These events also take a pair of numbers, which indicate the nesting
* depth before and after the event of interest, and a third number that is
- * the ->dynticks counter. Note that task-related and interrupt-related
+ * the RCU_WATCHING counter. Note that task-related and interrupt-related
* events use two separate counters, and that the "++=" and "--=" events
* for irq/NMI will change the counter by two, otherwise by one.
*/
-TRACE_EVENT_RCU(rcu_dyntick,
+TRACE_EVENT_RCU(rcu_watching,
- TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
+ TP_PROTO(const char *polarity, long oldnesting, long newnesting, int counter),
- TP_ARGS(polarity, oldnesting, newnesting, dynticks),
+ TP_ARGS(polarity, oldnesting, newnesting, counter),
TP_STRUCT__entry(
__field(const char *, polarity)
__field(long, oldnesting)
__field(long, newnesting)
- __field(int, dynticks)
+ __field(int, counter)
),
TP_fast_assign(
__entry->polarity = polarity;
__entry->oldnesting = oldnesting;
__entry->newnesting = newnesting;
- __entry->dynticks = dynticks;
+ __entry->counter = counter;
),
TP_printk("%s %lx %lx %#3x", __entry->polarity,
__entry->oldnesting, __entry->newnesting,
- __entry->dynticks & 0xfff)
+ __entry->counter & 0xfff)
);
/*
@@ -561,40 +561,6 @@ TRACE_EVENT_RCU(rcu_segcb_stats,
);
/*
- * Tracepoint for the registration of a single RCU callback of the special
- * kvfree() form. The first argument is the RCU type, the second argument
- * is a pointer to the RCU callback, the third argument is the offset
- * of the callback within the enclosing RCU-protected data structure,
- * the fourth argument is the number of lazy callbacks queued, and the
- * fifth argument is the total number of callbacks queued.
- */
-TRACE_EVENT_RCU(rcu_kvfree_callback,
-
- TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
- long qlen),
-
- TP_ARGS(rcuname, rhp, offset, qlen),
-
- TP_STRUCT__entry(
- __field(const char *, rcuname)
- __field(void *, rhp)
- __field(unsigned long, offset)
- __field(long, qlen)
- ),
-
- TP_fast_assign(
- __entry->rcuname = rcuname;
- __entry->rhp = rhp;
- __entry->offset = offset;
- __entry->qlen = qlen;
- ),
-
- TP_printk("%s rhp=%p func=%ld %ld",
- __entry->rcuname, __entry->rhp, __entry->offset,
- __entry->qlen)
-);
-
-/*
* Tracepoint for marking the beginning rcu_do_batch, performed to start
* RCU callback invocation. The first argument is the RCU flavor,
* the second is the number of lazy callbacks queued, the third is
diff --git a/include/trace/events/readahead.h b/include/trace/events/readahead.h
new file mode 100644
index 000000000000..0997ac5eceab
--- /dev/null
+++ b/include/trace/events/readahead.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM readahead
+
+#if !defined(_TRACE_FILEMAP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_READAHEAD_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+
+TRACE_EVENT(page_cache_ra_unbounded,
+ TP_PROTO(struct inode *inode, pgoff_t index, unsigned long nr_to_read,
+ unsigned long lookahead_size),
+
+ TP_ARGS(inode, index, nr_to_read, lookahead_size),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(pgoff_t, index)
+ __field(unsigned long, nr_to_read)
+ __field(unsigned long, lookahead_size)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = inode->i_ino;
+ __entry->s_dev = inode->i_sb->s_dev;
+ __entry->index = index;
+ __entry->nr_to_read = nr_to_read;
+ __entry->lookahead_size = lookahead_size;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx index=%lu nr_to_read=%lu lookahead_size=%lu",
+ MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino,
+ __entry->index, __entry->nr_to_read, __entry->lookahead_size
+ )
+);
+
+TRACE_EVENT(page_cache_ra_order,
+ TP_PROTO(struct inode *inode, pgoff_t index, struct file_ra_state *ra),
+
+ TP_ARGS(inode, index, ra),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(pgoff_t, index)
+ __field(unsigned int, order)
+ __field(unsigned int, size)
+ __field(unsigned int, async_size)
+ __field(unsigned int, ra_pages)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = inode->i_ino;
+ __entry->s_dev = inode->i_sb->s_dev;
+ __entry->index = index;
+ __entry->order = ra->order;
+ __entry->size = ra->size;
+ __entry->async_size = ra->async_size;
+ __entry->ra_pages = ra->ra_pages;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx index=%lu order=%u size=%u async_size=%u ra_pages=%u",
+ MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino,
+ __entry->index, __entry->order, __entry->size,
+ __entry->async_size, __entry->ra_pages
+ )
+);
+
+DECLARE_EVENT_CLASS(page_cache_ra_op,
+ TP_PROTO(struct inode *inode, pgoff_t index, struct file_ra_state *ra,
+ unsigned long req_count),
+
+ TP_ARGS(inode, index, ra, req_count),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(pgoff_t, index)
+ __field(unsigned int, order)
+ __field(unsigned int, size)
+ __field(unsigned int, async_size)
+ __field(unsigned int, ra_pages)
+ __field(unsigned int, mmap_miss)
+ __field(loff_t, prev_pos)
+ __field(unsigned long, req_count)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = inode->i_ino;
+ __entry->s_dev = inode->i_sb->s_dev;
+ __entry->index = index;
+ __entry->order = ra->order;
+ __entry->size = ra->size;
+ __entry->async_size = ra->async_size;
+ __entry->ra_pages = ra->ra_pages;
+ __entry->mmap_miss = ra->mmap_miss;
+ __entry->prev_pos = ra->prev_pos;
+ __entry->req_count = req_count;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx index=%lu req_count=%lu order=%u size=%u async_size=%u ra_pages=%u mmap_miss=%u prev_pos=%lld",
+ MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino,
+ __entry->index, __entry->req_count, __entry->order,
+ __entry->size, __entry->async_size, __entry->ra_pages,
+ __entry->mmap_miss, __entry->prev_pos
+ )
+);
+
+DEFINE_EVENT(page_cache_ra_op, page_cache_sync_ra,
+ TP_PROTO(struct inode *inode, pgoff_t index, struct file_ra_state *ra,
+ unsigned long req_count),
+ TP_ARGS(inode, index, ra, req_count)
+);
+
+DEFINE_EVENT(page_cache_ra_op, page_cache_async_ra,
+ TP_PROTO(struct inode *inode, pgoff_t index, struct file_ra_state *ra,
+ unsigned long req_count),
+ TP_ARGS(inode, index, ra, req_count)
+);
+
+#endif /* _TRACE_FILEMAP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index 7f0c1ceae726..8aeae06cf434 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -54,7 +54,7 @@ TRACE_DEFINE_ENUM(GSS_S_UNSEQ_TOKEN);
TRACE_DEFINE_ENUM(GSS_S_GAP_TOKEN);
#define show_gss_status(x) \
- __print_flags(x, "|", \
+ __print_symbolic(x, \
{ GSS_S_BAD_MECH, "GSS_S_BAD_MECH" }, \
{ GSS_S_BAD_NAME, "GSS_S_BAD_NAME" }, \
{ GSS_S_BAD_NAMETYPE, "GSS_S_BAD_NAMETYPE" }, \
@@ -409,7 +409,7 @@ TRACE_EVENT(rpcgss_seqno,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
- __entry->seqno = rqst->rq_seqno;
+ __entry->seqno = *rqst->rq_seqnos;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x seqno=%u",
@@ -440,7 +440,7 @@ TRACE_EVENT(rpcgss_need_reencode,
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
__entry->seq_xmit = seq_xmit;
- __entry->seqno = task->tk_rqstp->rq_seqno;
+ __entry->seqno = *task->tk_rqstp->rq_seqnos;
__entry->ret = ret;
),
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 14392652273a..e6a72646c507 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -669,6 +669,29 @@ TRACE_EVENT(xprtrdma_inline_thresh,
DEFINE_CONN_EVENT(connect);
DEFINE_CONN_EVENT(disconnect);
+TRACE_EVENT(xprtrdma_device_removal,
+ TP_PROTO(
+ const struct rdma_cm_id *id
+ ),
+
+ TP_ARGS(id),
+
+ TP_STRUCT__entry(
+ __string(name, id->device->name)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ memcpy(__entry->addr, &id->route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("device %s to be removed, disconnecting %pISpc\n",
+ __get_str(name), __entry->addr
+ )
+);
+
DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
TRACE_EVENT(xprtrdma_op_connect,
@@ -2149,6 +2172,29 @@ TRACE_EVENT(svcrdma_qp_error,
)
);
+TRACE_EVENT(svcrdma_device_removal,
+ TP_PROTO(
+ const struct rdma_cm_id *id
+ ),
+
+ TP_ARGS(id),
+
+ TP_STRUCT__entry(
+ __string(name, id->device->name)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ memcpy(__entry->addr, &id->route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+
+ TP_printk("device %s to be removed, disconnecting %pISpc\n",
+ __get_str(name), __entry->addr
+ )
+);
+
DECLARE_EVENT_CLASS(svcrdma_sendqueue_class,
TP_PROTO(
const struct svcxprt_rdma *rdma,
@@ -2220,6 +2266,76 @@ TRACE_EVENT(svcrdma_sq_post_err,
)
);
+DECLARE_EVENT_CLASS(rpcrdma_client_device_class,
+ TP_PROTO(
+ const struct ib_device *device
+ ),
+
+ TP_ARGS(device),
+
+ TP_STRUCT__entry(
+ __string(name, device->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ ),
+
+ TP_printk("device=%s",
+ __get_str(name)
+ )
+);
+
+#define DEFINE_CLIENT_DEVICE_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_client_device_class, name, \
+ TP_PROTO( \
+ const struct ib_device *device \
+ ), \
+ TP_ARGS(device) \
+ )
+
+DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_completion);
+DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_add_one);
+DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_remove_one);
+DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_wait_on);
+DEFINE_CLIENT_DEVICE_EVENT(rpcrdma_client_remove_one_done);
+
+DECLARE_EVENT_CLASS(rpcrdma_client_register_class,
+ TP_PROTO(
+ const struct ib_device *device,
+ const struct rpcrdma_notification *rn
+ ),
+
+ TP_ARGS(device, rn),
+
+ TP_STRUCT__entry(
+ __string(name, device->name)
+ __field(void *, callback)
+ __field(u32, index)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->callback = rn->rn_done;
+ __entry->index = rn->rn_index;
+ ),
+
+ TP_printk("device=%s index=%u done callback=%pS\n",
+ __get_str(name), __entry->index, __entry->callback
+ )
+);
+
+#define DEFINE_CLIENT_REGISTER_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_client_register_class, name, \
+ TP_PROTO( \
+ const struct ib_device *device, \
+ const struct rpcrdma_notification *rn \
+ ), \
+ TP_ARGS(device, rn))
+
+DEFINE_CLIENT_REGISTER_EVENT(rpcrdma_client_register);
+DEFINE_CLIENT_REGISTER_EVENT(rpcrdma_client_unregister);
+
#endif /* _TRACE_RPCRDMA_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/rseq.h b/include/trace/events/rseq.h
index 823b47d1ba1e..ce85d650bf4b 100644
--- a/include/trace/events/rseq.h
+++ b/include/trace/events/rseq.h
@@ -21,9 +21,9 @@ TRACE_EVENT(rseq_update,
),
TP_fast_assign(
- __entry->cpu_id = raw_smp_processor_id();
+ __entry->cpu_id = t->rseq.ids.cpu_id;
__entry->node_id = cpu_to_node(__entry->cpu_id);
- __entry->mm_cid = task_mm_cid(t);
+ __entry->mm_cid = t->rseq.ids.mm_cid;
),
TP_printk("cpu_id=%d node_id=%d mm_cid=%d", __entry->cpu_id,
diff --git a/include/trace/events/rust_sample.h b/include/trace/events/rust_sample.h
new file mode 100644
index 000000000000..dbc80ca2e465
--- /dev/null
+++ b/include/trace/events/rust_sample.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Tracepoints for `samples/rust/rust_print.rs`.
+ *
+ * Copyright (C) 2024 Google, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rust_sample
+
+#if !defined(_RUST_SAMPLE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RUST_SAMPLE_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(rust_sample_loaded,
+ TP_PROTO(int magic_number),
+ TP_ARGS(magic_number),
+ TP_STRUCT__entry(
+ __field(int, magic_number)
+ ),
+ TP_fast_assign(
+ __entry->magic_number = magic_number;
+ ),
+ TP_printk("magic=%d", __entry->magic_number)
+);
+
+#endif /* _RUST_SAMPLE_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rv.h b/include/trace/events/rv.h
deleted file mode 100644
index 56592da9301c..000000000000
--- a/include/trace/events/rv.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM rv
-
-#if !defined(_TRACE_RV_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_RV_H
-
-#include <linux/rv.h>
-#include <linux/tracepoint.h>
-
-#ifdef CONFIG_DA_MON_EVENTS_IMPLICIT
-DECLARE_EVENT_CLASS(event_da_monitor,
-
- TP_PROTO(char *state, char *event, char *next_state, bool final_state),
-
- TP_ARGS(state, event, next_state, final_state),
-
- TP_STRUCT__entry(
- __array( char, state, MAX_DA_NAME_LEN )
- __array( char, event, MAX_DA_NAME_LEN )
- __array( char, next_state, MAX_DA_NAME_LEN )
- __field( bool, final_state )
- ),
-
- TP_fast_assign(
- memcpy(__entry->state, state, MAX_DA_NAME_LEN);
- memcpy(__entry->event, event, MAX_DA_NAME_LEN);
- memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN);
- __entry->final_state = final_state;
- ),
-
- TP_printk("%s x %s -> %s %s",
- __entry->state,
- __entry->event,
- __entry->next_state,
- __entry->final_state ? "(final)" : "")
-);
-
-DECLARE_EVENT_CLASS(error_da_monitor,
-
- TP_PROTO(char *state, char *event),
-
- TP_ARGS(state, event),
-
- TP_STRUCT__entry(
- __array( char, state, MAX_DA_NAME_LEN )
- __array( char, event, MAX_DA_NAME_LEN )
- ),
-
- TP_fast_assign(
- memcpy(__entry->state, state, MAX_DA_NAME_LEN);
- memcpy(__entry->event, event, MAX_DA_NAME_LEN);
- ),
-
- TP_printk("event %s not expected in the state %s",
- __entry->event,
- __entry->state)
-);
-
-#ifdef CONFIG_RV_MON_WIP
-DEFINE_EVENT(event_da_monitor, event_wip,
- TP_PROTO(char *state, char *event, char *next_state, bool final_state),
- TP_ARGS(state, event, next_state, final_state));
-
-DEFINE_EVENT(error_da_monitor, error_wip,
- TP_PROTO(char *state, char *event),
- TP_ARGS(state, event));
-#endif /* CONFIG_RV_MON_WIP */
-#endif /* CONFIG_DA_MON_EVENTS_IMPLICIT */
-
-#ifdef CONFIG_DA_MON_EVENTS_ID
-DECLARE_EVENT_CLASS(event_da_monitor_id,
-
- TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state),
-
- TP_ARGS(id, state, event, next_state, final_state),
-
- TP_STRUCT__entry(
- __field( int, id )
- __array( char, state, MAX_DA_NAME_LEN )
- __array( char, event, MAX_DA_NAME_LEN )
- __array( char, next_state, MAX_DA_NAME_LEN )
- __field( bool, final_state )
- ),
-
- TP_fast_assign(
- memcpy(__entry->state, state, MAX_DA_NAME_LEN);
- memcpy(__entry->event, event, MAX_DA_NAME_LEN);
- memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN);
- __entry->id = id;
- __entry->final_state = final_state;
- ),
-
- TP_printk("%d: %s x %s -> %s %s",
- __entry->id,
- __entry->state,
- __entry->event,
- __entry->next_state,
- __entry->final_state ? "(final)" : "")
-);
-
-DECLARE_EVENT_CLASS(error_da_monitor_id,
-
- TP_PROTO(int id, char *state, char *event),
-
- TP_ARGS(id, state, event),
-
- TP_STRUCT__entry(
- __field( int, id )
- __array( char, state, MAX_DA_NAME_LEN )
- __array( char, event, MAX_DA_NAME_LEN )
- ),
-
- TP_fast_assign(
- memcpy(__entry->state, state, MAX_DA_NAME_LEN);
- memcpy(__entry->event, event, MAX_DA_NAME_LEN);
- __entry->id = id;
- ),
-
- TP_printk("%d: event %s not expected in the state %s",
- __entry->id,
- __entry->event,
- __entry->state)
-);
-
-#ifdef CONFIG_RV_MON_WWNR
-/* id is the pid of the task */
-DEFINE_EVENT(event_da_monitor_id, event_wwnr,
- TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state),
- TP_ARGS(id, state, event, next_state, final_state));
-
-DEFINE_EVENT(error_da_monitor_id, error_wwnr,
- TP_PROTO(int id, char *state, char *event),
- TP_ARGS(id, state, event));
-#endif /* CONFIG_RV_MON_WWNR */
-
-#endif /* CONFIG_DA_MON_EVENTS_ID */
-#endif /* _TRACE_RV_H */
-
-/* This part ust be outside protection */
-#undef TRACE_INCLUDE_PATH
-#include <trace/define_trace.h>
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index a1b126a6b0d7..de6f6d25767c 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -25,6 +25,7 @@
EM(afs_abort_probeuuid_negative, "afs-probeuuid-neg") \
EM(afs_abort_send_data_error, "afs-send-data") \
EM(afs_abort_unmarshal_error, "afs-unmarshal") \
+ EM(afs_abort_unsupported_sec_class, "afs-unsup-sec-class") \
/* rxperf errors */ \
EM(rxperf_abort_general_error, "rxperf-error") \
EM(rxperf_abort_oom, "rxperf-oom") \
@@ -68,6 +69,38 @@
EM(rxkad_abort_resp_tkt_sname, "rxkad-resp-tk-sname") \
EM(rxkad_abort_resp_unknown_tkt, "rxkad-resp-unknown-tkt") \
EM(rxkad_abort_resp_version, "rxkad-resp-version") \
+ /* RxGK security errors */ \
+ EM(rxgk_abort_1_verify_mic_eproto, "rxgk1-vfy-mic-eproto") \
+ EM(rxgk_abort_2_decrypt_eproto, "rxgk2-dec-eproto") \
+ EM(rxgk_abort_2_short_data, "rxgk2-short-data") \
+ EM(rxgk_abort_2_short_encdata, "rxgk2-short-encdata") \
+ EM(rxgk_abort_2_short_header, "rxgk2-short-hdr") \
+ EM(rxgk_abort_bad_key_number, "rxgk-bad-key-num") \
+ EM(rxgk_abort_chall_key_expired, "rxgk-chall-key-exp") \
+ EM(rxgk_abort_chall_no_key, "rxgk-chall-nokey") \
+ EM(rxgk_abort_chall_short, "rxgk-chall-short") \
+ EM(rxgk_abort_resp_auth_dec, "rxgk-resp-auth-dec") \
+ EM(rxgk_abort_resp_bad_callid, "rxgk-resp-bad-callid") \
+ EM(rxgk_abort_resp_bad_nonce, "rxgk-resp-bad-nonce") \
+ EM(rxgk_abort_resp_bad_param, "rxgk-resp-bad-param") \
+ EM(rxgk_abort_resp_call_ctr, "rxgk-resp-call-ctr") \
+ EM(rxgk_abort_resp_call_state, "rxgk-resp-call-state") \
+ EM(rxgk_abort_resp_internal_error, "rxgk-resp-int-error") \
+ EM(rxgk_abort_resp_nopkg, "rxgk-resp-nopkg") \
+ EM(rxgk_abort_resp_short_applen, "rxgk-resp-short-applen") \
+ EM(rxgk_abort_resp_short_auth, "rxgk-resp-short-auth") \
+ EM(rxgk_abort_resp_short_call_list, "rxgk-resp-short-callls") \
+ EM(rxgk_abort_resp_short_packet, "rxgk-resp-short-packet") \
+ EM(rxgk_abort_resp_short_yfs_klen, "rxgk-resp-short-yfs-klen") \
+ EM(rxgk_abort_resp_short_yfs_key, "rxgk-resp-short-yfs-key") \
+ EM(rxgk_abort_resp_short_yfs_tkt, "rxgk-resp-short-yfs-tkt") \
+ EM(rxgk_abort_resp_tok_dec, "rxgk-resp-tok-dec") \
+ EM(rxgk_abort_resp_tok_internal_error, "rxgk-resp-tok-int-err") \
+ EM(rxgk_abort_resp_tok_keyerr, "rxgk-resp-tok-keyerr") \
+ EM(rxgk_abort_resp_tok_nokey, "rxgk-resp-tok-nokey") \
+ EM(rxgk_abort_resp_tok_nopkg, "rxgk-resp-tok-nopkg") \
+ EM(rxgk_abort_resp_tok_short, "rxgk-resp-tok-short") \
+ EM(rxgk_abort_resp_xdr_align, "rxgk-resp-xdr-align") \
/* rxrpc errors */ \
EM(rxrpc_abort_call_improper_term, "call-improper-term") \
EM(rxrpc_abort_call_reset, "call-reset") \
@@ -77,6 +110,7 @@
EM(rxrpc_abort_call_timeout, "call-timeout") \
EM(rxrpc_abort_no_service_key, "no-serv-key") \
EM(rxrpc_abort_nomem, "nomem") \
+ EM(rxrpc_abort_response_sendmsg, "resp-sendmsg") \
EM(rxrpc_abort_service_not_offered, "serv-not-offered") \
EM(rxrpc_abort_shut_down, "shut-down") \
EM(rxrpc_abort_unsupported_security, "unsup-sec") \
@@ -117,8 +151,10 @@
#define rxrpc_call_poke_traces \
EM(rxrpc_call_poke_abort, "Abort") \
EM(rxrpc_call_poke_complete, "Compl") \
+ EM(rxrpc_call_poke_conn_abort, "Conn-abort") \
EM(rxrpc_call_poke_error, "Error") \
EM(rxrpc_call_poke_idle, "Idle") \
+ EM(rxrpc_call_poke_rx_packet, "Rx-packet") \
EM(rxrpc_call_poke_set_timeout, "Set-timo") \
EM(rxrpc_call_poke_start, "Start") \
EM(rxrpc_call_poke_timer, "Timer") \
@@ -127,28 +163,37 @@
#define rxrpc_skb_traces \
EM(rxrpc_skb_eaten_by_unshare, "ETN unshare ") \
EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \
+ EM(rxrpc_skb_get_call_rx, "GET call-rx ") \
EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \
EM(rxrpc_skb_get_conn_work, "GET conn-work") \
- EM(rxrpc_skb_get_last_nack, "GET last-nack") \
EM(rxrpc_skb_get_local_work, "GET locl-work") \
+ EM(rxrpc_skb_get_post_oob, "GET post-oob ") \
EM(rxrpc_skb_get_reject_work, "GET rej-work ") \
EM(rxrpc_skb_get_to_recvmsg, "GET to-recv ") \
EM(rxrpc_skb_get_to_recvmsg_oos, "GET to-recv-o") \
EM(rxrpc_skb_new_encap_rcv, "NEW encap-rcv") \
EM(rxrpc_skb_new_error_report, "NEW error-rpt") \
EM(rxrpc_skb_new_jumbo_subpacket, "NEW jumbo-sub") \
+ EM(rxrpc_skb_new_response_rxgk, "NEW resp-rxgk") \
+ EM(rxrpc_skb_new_response_rxkad, "NEW resp-rxkd") \
EM(rxrpc_skb_new_unshared, "NEW unshared ") \
+ EM(rxrpc_skb_put_call_rx, "PUT call-rx ") \
+ EM(rxrpc_skb_put_challenge, "PUT challenge") \
EM(rxrpc_skb_put_conn_secured, "PUT conn-secd") \
EM(rxrpc_skb_put_conn_work, "PUT conn-work") \
EM(rxrpc_skb_put_error_report, "PUT error-rep") \
EM(rxrpc_skb_put_input, "PUT input ") \
EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \
- EM(rxrpc_skb_put_last_nack, "PUT last-nack") \
+ EM(rxrpc_skb_put_oob, "PUT oob ") \
EM(rxrpc_skb_put_purge, "PUT purge ") \
+ EM(rxrpc_skb_put_purge_oob, "PUT purge-oob") \
+ EM(rxrpc_skb_put_response, "PUT response ") \
EM(rxrpc_skb_put_rotate, "PUT rotate ") \
EM(rxrpc_skb_put_unknown, "PUT unknown ") \
EM(rxrpc_skb_see_conn_work, "SEE conn-work") \
+ EM(rxrpc_skb_see_oob_challenge, "SEE oob-chall") \
EM(rxrpc_skb_see_recvmsg, "SEE recvmsg ") \
+ EM(rxrpc_skb_see_recvmsg_oob, "SEE recvm-oob") \
EM(rxrpc_skb_see_reject, "SEE reject ") \
EM(rxrpc_skb_see_rotate, "SEE rotate ") \
E_(rxrpc_skb_see_version, "SEE version ")
@@ -214,19 +259,24 @@
EM(rxrpc_conn_free, "FREE ") \
EM(rxrpc_conn_get_activate_call, "GET act-call") \
EM(rxrpc_conn_get_call_input, "GET inp-call") \
+ EM(rxrpc_conn_get_challenge_input, "GET inp-chal") \
EM(rxrpc_conn_get_conn_input, "GET inp-conn") \
EM(rxrpc_conn_get_idle, "GET idle ") \
EM(rxrpc_conn_get_poke_abort, "GET pk-abort") \
+ EM(rxrpc_conn_get_poke_response, "GET response") \
+ EM(rxrpc_conn_get_poke_secured, "GET secured ") \
EM(rxrpc_conn_get_poke_timer, "GET poke ") \
EM(rxrpc_conn_get_service_conn, "GET svc-conn") \
EM(rxrpc_conn_new_client, "NEW client ") \
EM(rxrpc_conn_new_service, "NEW service ") \
EM(rxrpc_conn_put_call, "PUT call ") \
EM(rxrpc_conn_put_call_input, "PUT inp-call") \
+ EM(rxrpc_conn_put_challenge_input, "PUT inp-chal") \
EM(rxrpc_conn_put_conn_input, "PUT inp-conn") \
EM(rxrpc_conn_put_discard_idle, "PUT disc-idl") \
EM(rxrpc_conn_put_local_dead, "PUT loc-dead") \
EM(rxrpc_conn_put_noreuse, "PUT noreuse ") \
+ EM(rxrpc_conn_put_oob, "PUT oob ") \
EM(rxrpc_conn_put_poke, "PUT poke ") \
EM(rxrpc_conn_put_service_reaped, "PUT svc-reap") \
EM(rxrpc_conn_put_unbundle, "PUT unbundle") \
@@ -272,26 +322,31 @@
EM(rxrpc_call_put_kernel, "PUT kernel ") \
EM(rxrpc_call_put_poke, "PUT poke ") \
EM(rxrpc_call_put_recvmsg, "PUT recvmsg ") \
+ EM(rxrpc_call_put_release_recvmsg_q, "PUT rls-rcmq") \
EM(rxrpc_call_put_release_sock, "PUT rls-sock") \
EM(rxrpc_call_put_release_sock_tba, "PUT rls-sk-a") \
EM(rxrpc_call_put_sendmsg, "PUT sendmsg ") \
- EM(rxrpc_call_put_unnotify, "PUT unnotify") \
EM(rxrpc_call_put_userid_exists, "PUT u-exists") \
EM(rxrpc_call_put_userid, "PUT user-id ") \
EM(rxrpc_call_see_accept, "SEE accept ") \
EM(rxrpc_call_see_activate_client, "SEE act-clnt") \
+ EM(rxrpc_call_see_already_released, "SEE alrdy-rl") \
EM(rxrpc_call_see_connect_failed, "SEE con-fail") \
EM(rxrpc_call_see_connected, "SEE connect ") \
+ EM(rxrpc_call_see_conn_abort, "SEE conn-abt") \
+ EM(rxrpc_call_see_discard, "SEE discard ") \
EM(rxrpc_call_see_disconnected, "SEE disconn ") \
EM(rxrpc_call_see_distribute_error, "SEE dist-err") \
EM(rxrpc_call_see_input, "SEE input ") \
+ EM(rxrpc_call_see_notify_released, "SEE nfy-rlsd") \
+ EM(rxrpc_call_see_recvmsg, "SEE recvmsg ") \
EM(rxrpc_call_see_release, "SEE release ") \
EM(rxrpc_call_see_userid_exists, "SEE u-exists") \
+ EM(rxrpc_call_see_waiting_call, "SEE q-conn ") \
E_(rxrpc_call_see_zap, "SEE zap ")
#define rxrpc_txqueue_traces \
EM(rxrpc_txqueue_await_reply, "AWR") \
- EM(rxrpc_txqueue_dequeue, "DEQ") \
EM(rxrpc_txqueue_end, "END") \
EM(rxrpc_txqueue_queue, "QUE") \
EM(rxrpc_txqueue_queue_last, "QLS") \
@@ -299,6 +354,13 @@
EM(rxrpc_txqueue_rotate_last, "RLS") \
E_(rxrpc_txqueue_wait, "WAI")
+#define rxrpc_txdata_traces \
+ EM(rxrpc_txdata_inject_loss, " *INJ-LOSS*") \
+ EM(rxrpc_txdata_new_data, " ") \
+ EM(rxrpc_txdata_retransmit, " *RETRANS*") \
+ EM(rxrpc_txdata_tlp_new_data, " *TLP-NEW*") \
+ E_(rxrpc_txdata_tlp_retransmit, " *TLP-RETRANS*")
+
#define rxrpc_receive_traces \
EM(rxrpc_receive_end, "END") \
EM(rxrpc_receive_front, "FRN") \
@@ -320,6 +382,7 @@
EM(rxrpc_recvmsg_full, "FULL") \
EM(rxrpc_recvmsg_hole, "HOLE") \
EM(rxrpc_recvmsg_next, "NEXT") \
+ EM(rxrpc_recvmsg_oobq, "OOBQ") \
EM(rxrpc_recvmsg_requeue, "REQU") \
EM(rxrpc_recvmsg_return, "RETN") \
EM(rxrpc_recvmsg_terminal, "TERM") \
@@ -334,11 +397,10 @@
E_(rxrpc_rtt_tx_ping, "PING")
#define rxrpc_rtt_rx_traces \
- EM(rxrpc_rtt_rx_other_ack, "OACK") \
+ EM(rxrpc_rtt_rx_data_ack, "DACK") \
EM(rxrpc_rtt_rx_obsolete, "OBSL") \
EM(rxrpc_rtt_rx_lost, "LOST") \
- EM(rxrpc_rtt_rx_ping_response, "PONG") \
- E_(rxrpc_rtt_rx_requested_ack, "RACK")
+ E_(rxrpc_rtt_rx_ping_response, "PONG")
#define rxrpc_timer_traces \
EM(rxrpc_timer_trace_delayed_ack, "DelayAck ") \
@@ -346,11 +408,12 @@
EM(rxrpc_timer_trace_hard, "HardLimit") \
EM(rxrpc_timer_trace_idle, "IdleLimit") \
EM(rxrpc_timer_trace_keepalive, "KeepAlive") \
- EM(rxrpc_timer_trace_lost_ack, "LostAck ") \
EM(rxrpc_timer_trace_ping, "DelayPing") \
- EM(rxrpc_timer_trace_resend, "Resend ") \
- EM(rxrpc_timer_trace_resend_reset, "ResendRst") \
- E_(rxrpc_timer_trace_resend_tx, "ResendTx ")
+ EM(rxrpc_timer_trace_rack_off, "RACK-OFF ") \
+ EM(rxrpc_timer_trace_rack_zwp, "RACK-ZWP ") \
+ EM(rxrpc_timer_trace_rack_reo, "RACK-Reo ") \
+ EM(rxrpc_timer_trace_rack_tlp_pto, "TLP-PTO ") \
+ E_(rxrpc_timer_trace_rack_rto, "RTO ")
#define rxrpc_propose_ack_traces \
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
@@ -361,22 +424,24 @@
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
EM(rxrpc_propose_ack_ping_for_0_retrans, "0-Retrn") \
+ EM(rxrpc_propose_ack_ping_for_mtu_probe, "MTUProb") \
EM(rxrpc_propose_ack_ping_for_old_rtt, "OldRtt ") \
EM(rxrpc_propose_ack_ping_for_params, "Params ") \
EM(rxrpc_propose_ack_ping_for_rtt, "Rtt ") \
EM(rxrpc_propose_ack_processing_op, "ProcOp ") \
EM(rxrpc_propose_ack_respond_to_ack, "Rsp2Ack") \
EM(rxrpc_propose_ack_respond_to_ping, "Rsp2Png") \
+ EM(rxrpc_propose_ack_retransmit, "Retrans") \
EM(rxrpc_propose_ack_retry_tx, "RetryTx") \
EM(rxrpc_propose_ack_rotate_rx, "RxAck ") \
EM(rxrpc_propose_ack_rx_idle, "RxIdle ") \
E_(rxrpc_propose_ack_terminal_ack, "ClTerm ")
-#define rxrpc_congest_modes \
- EM(RXRPC_CALL_CONGEST_AVOIDANCE, "CongAvoid") \
- EM(RXRPC_CALL_FAST_RETRANSMIT, "FastReTx ") \
- EM(RXRPC_CALL_PACKET_LOSS, "PktLoss ") \
- E_(RXRPC_CALL_SLOW_START, "SlowStart")
+#define rxrpc_ca_states \
+ EM(RXRPC_CA_CONGEST_AVOIDANCE, "CongAvoid") \
+ EM(RXRPC_CA_FAST_RETRANSMIT, "FastReTx ") \
+ EM(RXRPC_CA_PACKET_LOSS, "PktLoss ") \
+ E_(RXRPC_CA_SLOW_START, "SlowStart")
#define rxrpc_congest_changes \
EM(rxrpc_cong_begin_retransmission, " Retrans") \
@@ -442,14 +507,15 @@
EM(rxrpc_tx_point_call_final_resend, "CallFinalResend") \
EM(rxrpc_tx_point_conn_abort, "ConnAbort") \
EM(rxrpc_tx_point_reject, "Reject") \
+ EM(rxrpc_tx_point_rxgk_challenge, "RxGKChall") \
EM(rxrpc_tx_point_rxkad_challenge, "RxkadChall") \
- EM(rxrpc_tx_point_rxkad_response, "RxkadResp") \
+ EM(rxrpc_tx_point_response, "Response") \
EM(rxrpc_tx_point_version_keepalive, "VerKeepalive") \
E_(rxrpc_tx_point_version_reply, "VerReply")
#define rxrpc_req_ack_traces \
EM(rxrpc_reqack_ack_lost, "ACK-LOST ") \
- EM(rxrpc_reqack_already_on, "ALREADY-ON") \
+ EM(rxrpc_reqack_app_stall, "APP-STALL ") \
EM(rxrpc_reqack_more_rtt, "MORE-RTT ") \
EM(rxrpc_reqack_no_srv_last, "NO-SRVLAST") \
EM(rxrpc_reqack_old_rtt, "OLD-RTT ") \
@@ -459,21 +525,62 @@
/* ---- Must update size of stat_why_req_ack[] if more are added! */
#define rxrpc_txbuf_traces \
- EM(rxrpc_txbuf_alloc_ack, "ALLOC ACK ") \
EM(rxrpc_txbuf_alloc_data, "ALLOC DATA ") \
+ EM(rxrpc_txbuf_alloc_response, "ALLOC RESP ") \
EM(rxrpc_txbuf_free, "FREE ") \
EM(rxrpc_txbuf_get_buffer, "GET BUFFER ") \
EM(rxrpc_txbuf_get_trans, "GET TRANS ") \
EM(rxrpc_txbuf_get_retrans, "GET RETRANS") \
- EM(rxrpc_txbuf_put_ack_tx, "PUT ACK TX ") \
EM(rxrpc_txbuf_put_cleaned, "PUT CLEANED") \
EM(rxrpc_txbuf_put_nomem, "PUT NOMEM ") \
EM(rxrpc_txbuf_put_rotated, "PUT ROTATED") \
+ EM(rxrpc_txbuf_put_response_tx, "PUT RESP TX") \
EM(rxrpc_txbuf_put_send_aborted, "PUT SEND-X ") \
EM(rxrpc_txbuf_put_trans, "PUT TRANS ") \
+ EM(rxrpc_txbuf_see_lost, "SEE LOST ") \
EM(rxrpc_txbuf_see_out_of_step, "OUT-OF-STEP") \
- EM(rxrpc_txbuf_see_send_more, "SEE SEND+ ") \
- E_(rxrpc_txbuf_see_unacked, "SEE UNACKED")
+ E_(rxrpc_txbuf_see_send_more, "SEE SEND+ ")
+
+#define rxrpc_tq_traces \
+ EM(rxrpc_tq_alloc, "ALLOC") \
+ EM(rxrpc_tq_cleaned, "CLEAN") \
+ EM(rxrpc_tq_decant, "DCNT ") \
+ EM(rxrpc_tq_decant_advance, "DCNT>") \
+ EM(rxrpc_tq_queue, "QUEUE") \
+ EM(rxrpc_tq_queue_dup, "QUE!!") \
+ EM(rxrpc_tq_rotate, "ROT ") \
+ EM(rxrpc_tq_rotate_and_free, "ROT-F") \
+ EM(rxrpc_tq_rotate_and_keep, "ROT-K") \
+ EM(rxrpc_tq_transmit, "XMIT ") \
+ E_(rxrpc_tq_transmit_advance, "XMIT>")
+
+#define rxrpc_pmtud_reduce_traces \
+ EM(rxrpc_pmtud_reduce_ack, "Ack ") \
+ EM(rxrpc_pmtud_reduce_icmp, "Icmp ") \
+ E_(rxrpc_pmtud_reduce_route, "Route")
+
+#define rxrpc_rotate_traces \
+ EM(rxrpc_rotate_trace_hack, "hard-ack") \
+ EM(rxrpc_rotate_trace_sack, "soft-ack") \
+ E_(rxrpc_rotate_trace_snak, "soft-nack")
+
+#define rxrpc_rack_timer_modes \
+ EM(RXRPC_CALL_RACKTIMER_OFF, "---") \
+ EM(RXRPC_CALL_RACKTIMER_RACK_REORDER, "REO") \
+ EM(RXRPC_CALL_RACKTIMER_TLP_PTO, "TLP") \
+ E_(RXRPC_CALL_RACKTIMER_RTO, "RTO")
+
+#define rxrpc_tlp_probe_traces \
+ EM(rxrpc_tlp_probe_trace_busy, "busy") \
+ EM(rxrpc_tlp_probe_trace_transmit_new, "transmit-new") \
+ E_(rxrpc_tlp_probe_trace_retransmit, "retransmit")
+
+#define rxrpc_tlp_ack_traces \
+ EM(rxrpc_tlp_ack_trace_acked, "acked") \
+ EM(rxrpc_tlp_ack_trace_dup_acked, "dup-acked") \
+ EM(rxrpc_tlp_ack_trace_hard_beyond, "hard-beyond") \
+ EM(rxrpc_tlp_ack_trace_incomplete, "incomplete") \
+ E_(rxrpc_tlp_ack_trace_new_data, "new-data")
/*
* Generate enums for tracing information.
@@ -495,18 +602,24 @@ enum rxrpc_congest_change { rxrpc_congest_changes } __mode(byte);
enum rxrpc_conn_trace { rxrpc_conn_traces } __mode(byte);
enum rxrpc_local_trace { rxrpc_local_traces } __mode(byte);
enum rxrpc_peer_trace { rxrpc_peer_traces } __mode(byte);
+enum rxrpc_pmtud_reduce_trace { rxrpc_pmtud_reduce_traces } __mode(byte);
enum rxrpc_propose_ack_outcome { rxrpc_propose_ack_outcomes } __mode(byte);
enum rxrpc_propose_ack_trace { rxrpc_propose_ack_traces } __mode(byte);
enum rxrpc_receive_trace { rxrpc_receive_traces } __mode(byte);
enum rxrpc_recvmsg_trace { rxrpc_recvmsg_traces } __mode(byte);
enum rxrpc_req_ack_trace { rxrpc_req_ack_traces } __mode(byte);
+enum rxrpc_rotate_trace { rxrpc_rotate_traces } __mode(byte);
enum rxrpc_rtt_rx_trace { rxrpc_rtt_rx_traces } __mode(byte);
enum rxrpc_rtt_tx_trace { rxrpc_rtt_tx_traces } __mode(byte);
enum rxrpc_sack_trace { rxrpc_sack_traces } __mode(byte);
enum rxrpc_skb_trace { rxrpc_skb_traces } __mode(byte);
enum rxrpc_timer_trace { rxrpc_timer_traces } __mode(byte);
+enum rxrpc_tlp_ack_trace { rxrpc_tlp_ack_traces } __mode(byte);
+enum rxrpc_tlp_probe_trace { rxrpc_tlp_probe_traces } __mode(byte);
+enum rxrpc_tq_trace { rxrpc_tq_traces } __mode(byte);
enum rxrpc_tx_point { rxrpc_tx_points } __mode(byte);
enum rxrpc_txbuf_trace { rxrpc_txbuf_traces } __mode(byte);
+enum rxrpc_txdata_trace { rxrpc_txdata_traces } __mode(byte);
enum rxrpc_txqueue_trace { rxrpc_txqueue_traces } __mode(byte);
#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
@@ -524,24 +637,31 @@ enum rxrpc_txqueue_trace { rxrpc_txqueue_traces } __mode(byte);
rxrpc_abort_reasons;
rxrpc_bundle_traces;
+rxrpc_ca_states;
rxrpc_call_poke_traces;
rxrpc_call_traces;
rxrpc_client_traces;
rxrpc_congest_changes;
-rxrpc_congest_modes;
rxrpc_conn_traces;
rxrpc_local_traces;
+rxrpc_pmtud_reduce_traces;
rxrpc_propose_ack_traces;
+rxrpc_rack_timer_modes;
rxrpc_receive_traces;
rxrpc_recvmsg_traces;
rxrpc_req_ack_traces;
+rxrpc_rotate_traces;
rxrpc_rtt_rx_traces;
rxrpc_rtt_tx_traces;
rxrpc_sack_traces;
rxrpc_skb_traces;
rxrpc_timer_traces;
+rxrpc_tlp_ack_traces;
+rxrpc_tlp_probe_traces;
+rxrpc_tq_traces;
rxrpc_tx_points;
rxrpc_txbuf_traces;
+rxrpc_txdata_traces;
rxrpc_txqueue_traces;
/*
@@ -580,6 +700,20 @@ TRACE_EVENT(rxrpc_local,
__entry->usage)
);
+TRACE_EVENT(rxrpc_iothread_rx,
+ TP_PROTO(struct rxrpc_local *local, unsigned int nr_rx),
+ TP_ARGS(local, nr_rx),
+ TP_STRUCT__entry(
+ __field(unsigned int, local)
+ __field(unsigned int, nr_rx)
+ ),
+ TP_fast_assign(
+ __entry->local = local->debug_id;
+ __entry->nr_rx = nr_rx;
+ ),
+ TP_printk("L=%08x nrx=%u", __entry->local, __entry->nr_rx)
+ );
+
TRACE_EVENT(rxrpc_peer,
TP_PROTO(unsigned int peer_debug_id, int ref, enum rxrpc_peer_trace why),
@@ -772,6 +906,31 @@ TRACE_EVENT(rxrpc_rx_done,
TP_printk("r=%d a=%d", __entry->result, __entry->abort_code)
);
+TRACE_EVENT(rxrpc_abort_call,
+ TP_PROTO(const struct rxrpc_call *call, int abort_code),
+
+ TP_ARGS(call, abort_code),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call_nr)
+ __field(enum rxrpc_abort_reason, why)
+ __field(int, abort_code)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ __entry->call_nr = call->debug_id;
+ __entry->why = call->send_abort_why;
+ __entry->abort_code = abort_code;
+ __entry->error = call->send_abort_err;
+ ),
+
+ TP_printk("c=%08x a=%d e=%d %s",
+ __entry->call_nr,
+ __entry->abort_code, __entry->error,
+ __print_symbolic(__entry->why, rxrpc_abort_reasons))
+ );
+
TRACE_EVENT(rxrpc_abort,
TP_PROTO(unsigned int call_nr, enum rxrpc_abort_reason why,
u32 cid, u32 call_id, rxrpc_seq_t seq, int abort_code, int error),
@@ -839,34 +998,101 @@ TRACE_EVENT(rxrpc_txqueue,
TP_STRUCT__entry(
__field(unsigned int, call)
__field(enum rxrpc_txqueue_trace, why)
- __field(rxrpc_seq_t, acks_hard_ack)
__field(rxrpc_seq_t, tx_bottom)
+ __field(rxrpc_seq_t, acks_hard_ack)
__field(rxrpc_seq_t, tx_top)
- __field(rxrpc_seq_t, tx_prepared)
+ __field(rxrpc_seq_t, send_top)
__field(int, tx_winsize)
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->why = why;
- __entry->acks_hard_ack = call->acks_hard_ack;
__entry->tx_bottom = call->tx_bottom;
+ __entry->acks_hard_ack = call->acks_hard_ack;
__entry->tx_top = call->tx_top;
- __entry->tx_prepared = call->tx_prepared;
+ __entry->send_top = call->send_top;
__entry->tx_winsize = call->tx_winsize;
),
- TP_printk("c=%08x %s f=%08x h=%08x n=%u/%u/%u/%u",
+ TP_printk("c=%08x %s b=%08x h=%08x n=%u/%u/%u/%u",
__entry->call,
__print_symbolic(__entry->why, rxrpc_txqueue_traces),
__entry->tx_bottom,
__entry->acks_hard_ack,
- __entry->tx_top - __entry->tx_bottom,
+ __entry->acks_hard_ack - __entry->tx_bottom,
__entry->tx_top - __entry->acks_hard_ack,
- __entry->tx_prepared - __entry->tx_bottom,
+ __entry->send_top - __entry->tx_top,
__entry->tx_winsize)
);
+TRACE_EVENT(rxrpc_transmit,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t send_top, int space),
+
+ TP_ARGS(call, send_top, space),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(u16, space)
+ __field(u16, tx_winsize)
+ __field(u16, cong_cwnd)
+ __field(u16, cong_extra)
+ __field(u16, in_flight)
+ __field(u16, prepared)
+ __field(u16, pmtud_jumbo)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->seq = call->tx_top + 1;
+ __entry->space = space;
+ __entry->tx_winsize = call->tx_winsize;
+ __entry->cong_cwnd = call->cong_cwnd;
+ __entry->cong_extra = call->cong_extra;
+ __entry->prepared = send_top - call->tx_bottom;
+ __entry->in_flight = call->tx_top - call->tx_bottom;
+ __entry->pmtud_jumbo = call->peer->pmtud_jumbo;
+ ),
+
+ TP_printk("c=%08x q=%08x sp=%u tw=%u cw=%u+%u pr=%u if=%u pj=%u",
+ __entry->call,
+ __entry->seq,
+ __entry->space,
+ __entry->tx_winsize,
+ __entry->cong_cwnd,
+ __entry->cong_extra,
+ __entry->prepared,
+ __entry->in_flight,
+ __entry->pmtud_jumbo)
+ );
+
+TRACE_EVENT(rxrpc_tx_rotate,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, rxrpc_seq_t to),
+
+ TP_ARGS(call, seq, to),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, seq)
+ __field(rxrpc_seq_t, to)
+ __field(rxrpc_seq_t, top)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->seq = seq;
+ __entry->to = to;
+ __entry->top = call->tx_top;
+ ),
+
+ TP_printk("c=%08x q=%08x-%08x-%08x",
+ __entry->call,
+ __entry->seq,
+ __entry->to,
+ __entry->top)
+ );
+
TRACE_EVENT(rxrpc_rx_data,
TP_PROTO(unsigned int call, rxrpc_seq_t seq,
rxrpc_serial_t serial, u8 flags),
@@ -895,11 +1121,9 @@ TRACE_EVENT(rxrpc_rx_data,
);
TRACE_EVENT(rxrpc_rx_ack,
- TP_PROTO(struct rxrpc_call *call,
- rxrpc_serial_t serial, rxrpc_serial_t ack_serial,
- rxrpc_seq_t first, rxrpc_seq_t prev, u8 reason, u8 n_acks),
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_skb_priv *sp),
- TP_ARGS(call, serial, ack_serial, first, prev, reason, n_acks),
+ TP_ARGS(call, sp),
TP_STRUCT__entry(
__field(unsigned int, call)
@@ -909,23 +1133,26 @@ TRACE_EVENT(rxrpc_rx_ack,
__field(rxrpc_seq_t, prev)
__field(u8, reason)
__field(u8, n_acks)
+ __field(u8, user_status)
),
TP_fast_assign(
- __entry->call = call->debug_id;
- __entry->serial = serial;
- __entry->ack_serial = ack_serial;
- __entry->first = first;
- __entry->prev = prev;
- __entry->reason = reason;
- __entry->n_acks = n_acks;
+ __entry->call = call->debug_id;
+ __entry->serial = sp->hdr.serial;
+ __entry->user_status = sp->hdr.userStatus;
+ __entry->ack_serial = sp->ack.acked_serial;
+ __entry->first = sp->ack.first_ack;
+ __entry->prev = sp->ack.prev_ack;
+ __entry->reason = sp->ack.reason;
+ __entry->n_acks = sp->ack.nr_acks;
),
- TP_printk("c=%08x %08x %s r=%08x f=%08x p=%08x n=%u",
+ TP_printk("c=%08x %08x %s r=%08x us=%02x f=%08x p=%08x n=%u",
__entry->call,
__entry->serial,
__print_symbolic(__entry->reason, rxrpc_ack_names),
__entry->ack_serial,
+ __entry->user_status,
__entry->first,
__entry->prev,
__entry->n_acks)
@@ -955,6 +1182,62 @@ TRACE_EVENT(rxrpc_rx_abort,
__entry->abort_code)
);
+TRACE_EVENT(rxrpc_rx_conn_abort,
+ TP_PROTO(const struct rxrpc_connection *conn, const struct sk_buff *skb),
+
+ TP_ARGS(conn, skb),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, conn)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, abort_code)
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn->debug_id;
+ __entry->serial = rxrpc_skb(skb)->hdr.serial;
+ __entry->abort_code = skb->priority;
+ ),
+
+ TP_printk("C=%08x ABORT %08x ac=%d",
+ __entry->conn,
+ __entry->serial,
+ __entry->abort_code)
+ );
+
+TRACE_EVENT(rxrpc_tx_challenge,
+ TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
+ u32 version, u32 nonce),
+
+ TP_ARGS(conn, serial, version, nonce),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, conn)
+ __field(rxrpc_serial_t, serial)
+ __field(u32, version)
+ __field(u32, nonce)
+ __field(u16, service_id)
+ __field(u8, security_ix)
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn->debug_id;
+ __entry->serial = serial;
+ __entry->version = version;
+ __entry->nonce = nonce;
+ __entry->service_id = conn->service_id;
+ __entry->security_ix = conn->security_ix;
+ ),
+
+ TP_printk("C=%08x CHALLENGE r=%08x sv=%u+%u v=%x n=%x",
+ __entry->conn,
+ __entry->serial,
+ __entry->service_id,
+ __entry->security_ix,
+ __entry->version,
+ __entry->nonce)
+ );
+
TRACE_EVENT(rxrpc_rx_challenge,
TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
u32 version, u32 nonce, u32 min_level),
@@ -967,6 +1250,8 @@ TRACE_EVENT(rxrpc_rx_challenge,
__field(u32, version)
__field(u32, nonce)
__field(u32, min_level)
+ __field(u16, service_id)
+ __field(u8, security_ix)
),
TP_fast_assign(
@@ -975,16 +1260,60 @@ TRACE_EVENT(rxrpc_rx_challenge,
__entry->version = version;
__entry->nonce = nonce;
__entry->min_level = min_level;
+ __entry->service_id = conn->service_id;
+ __entry->security_ix = conn->security_ix;
),
- TP_printk("C=%08x CHALLENGE %08x v=%x n=%x ml=%x",
+ TP_printk("C=%08x CHALLENGE r=%08x sv=%u+%u v=%x n=%x ml=%x",
__entry->conn,
__entry->serial,
+ __entry->service_id,
+ __entry->security_ix,
__entry->version,
__entry->nonce,
__entry->min_level)
);
+TRACE_EVENT(rxrpc_tx_response,
+ TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
+ struct rxrpc_skb_priv *rsp),
+
+ TP_ARGS(conn, serial, rsp),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, conn)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_serial_t, challenge)
+ __field(u32, version)
+ __field(u32, kvno)
+ __field(u16, ticket_len)
+ __field(u16, appdata_len)
+ __field(u16, service_id)
+ __field(u8, security_ix)
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn->debug_id;
+ __entry->serial = serial;
+ __entry->challenge = rsp->resp.challenge_serial;
+ __entry->version = rsp->resp.version;
+ __entry->kvno = rsp->resp.kvno;
+ __entry->ticket_len = rsp->resp.ticket_len;
+ __entry->service_id = conn->service_id;
+ __entry->security_ix = conn->security_ix;
+ ),
+
+ TP_printk("C=%08x RESPONSE r=%08x cr=%08x sv=%u+%u v=%x kv=%x tl=%u",
+ __entry->conn,
+ __entry->serial,
+ __entry->challenge,
+ __entry->service_id,
+ __entry->security_ix,
+ __entry->version,
+ __entry->kvno,
+ __entry->ticket_len)
+ );
+
TRACE_EVENT(rxrpc_rx_response,
TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
u32 version, u32 kvno, u32 ticket_len),
@@ -997,6 +1326,7 @@ TRACE_EVENT(rxrpc_rx_response,
__field(u32, version)
__field(u32, kvno)
__field(u32, ticket_len)
+ __field(u8, security_ix)
),
TP_fast_assign(
@@ -1005,11 +1335,13 @@ TRACE_EVENT(rxrpc_rx_response,
__entry->version = version;
__entry->kvno = kvno;
__entry->ticket_len = ticket_len;
+ __entry->security_ix = conn->security_ix;
),
- TP_printk("C=%08x RESPONSE %08x v=%x kvno=%x tl=%x",
+ TP_printk("C=%08x RESPONSE r=%08x sx=%u v=%x kvno=%x tl=%x",
__entry->conn,
__entry->serial,
+ __entry->security_ix,
__entry->version,
__entry->kvno,
__entry->ticket_len)
@@ -1076,9 +1408,10 @@ TRACE_EVENT(rxrpc_tx_packet,
TRACE_EVENT(rxrpc_tx_data,
TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
- rxrpc_serial_t serial, unsigned int flags, bool lose),
+ rxrpc_serial_t serial, unsigned int flags,
+ enum rxrpc_txdata_trace trace),
- TP_ARGS(call, seq, serial, flags, lose),
+ TP_ARGS(call, seq, serial, flags, trace),
TP_STRUCT__entry(
__field(unsigned int, call)
@@ -1087,7 +1420,7 @@ TRACE_EVENT(rxrpc_tx_data,
__field(u32, cid)
__field(u32, call_id)
__field(u16, flags)
- __field(bool, lose)
+ __field(enum rxrpc_txdata_trace, trace)
),
TP_fast_assign(
@@ -1097,26 +1430,26 @@ TRACE_EVENT(rxrpc_tx_data,
__entry->seq = seq;
__entry->serial = serial;
__entry->flags = flags;
- __entry->lose = lose;
+ __entry->trace = trace;
),
- TP_printk("c=%08x DATA %08x:%08x %08x q=%08x fl=%02x%s%s",
+ TP_printk("c=%08x DATA %08x:%08x %08x q=%08x fl=%02x%s",
__entry->call,
__entry->cid,
__entry->call_id,
__entry->serial,
__entry->seq,
__entry->flags & RXRPC_TXBUF_WIRE_FLAGS,
- __entry->flags & RXRPC_TXBUF_RESENT ? " *RETRANS*" : "",
- __entry->lose ? " *LOSE*" : "")
+ __print_symbolic(__entry->trace, rxrpc_txdata_traces))
);
TRACE_EVENT(rxrpc_tx_ack,
TP_PROTO(unsigned int call, rxrpc_serial_t serial,
rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial,
- u8 reason, u8 n_acks, u16 rwind),
+ u8 reason, u8 n_acks, u16 rwind,
+ enum rxrpc_propose_ack_trace trace),
- TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks, rwind),
+ TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks, rwind, trace),
TP_STRUCT__entry(
__field(unsigned int, call)
@@ -1126,6 +1459,7 @@ TRACE_EVENT(rxrpc_tx_ack,
__field(u8, reason)
__field(u8, n_acks)
__field(u16, rwind)
+ __field(enum rxrpc_propose_ack_trace, trace)
),
TP_fast_assign(
@@ -1136,16 +1470,18 @@ TRACE_EVENT(rxrpc_tx_ack,
__entry->reason = reason;
__entry->n_acks = n_acks;
__entry->rwind = rwind;
+ __entry->trace = trace;
),
- TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u rw=%u",
+ TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u rw=%u %s",
__entry->call,
__entry->serial,
__print_symbolic(__entry->reason, rxrpc_ack_names),
__entry->ack_first,
__entry->ack_serial,
__entry->n_acks,
- __entry->rwind)
+ __entry->rwind,
+ __print_symbolic(__entry->trace, rxrpc_propose_ack_traces))
);
TRACE_EVENT(rxrpc_receive,
@@ -1270,9 +1606,9 @@ TRACE_EVENT(rxrpc_rtt_rx,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
int slot,
rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
- u32 rtt, u32 rto),
+ u32 rtt, u32 srtt, u32 rto),
- TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, rto),
+ TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, srtt, rto),
TP_STRUCT__entry(
__field(unsigned int, call)
@@ -1281,7 +1617,9 @@ TRACE_EVENT(rxrpc_rtt_rx,
__field(rxrpc_serial_t, send_serial)
__field(rxrpc_serial_t, resp_serial)
__field(u32, rtt)
+ __field(u32, srtt)
__field(u32, rto)
+ __field(u32, min_rtt)
),
TP_fast_assign(
@@ -1291,17 +1629,21 @@ TRACE_EVENT(rxrpc_rtt_rx,
__entry->send_serial = send_serial;
__entry->resp_serial = resp_serial;
__entry->rtt = rtt;
+ __entry->srtt = srtt;
__entry->rto = rto;
+ __entry->min_rtt = minmax_get(&call->min_rtt)
),
- TP_printk("c=%08x [%d] %s sr=%08x rr=%08x rtt=%u rto=%u",
+ TP_printk("c=%08x [%d] %s sr=%08x rr=%08x rtt=%u srtt=%u rto=%u min=%u",
__entry->call,
__entry->slot,
__print_symbolic(__entry->why, rxrpc_rtt_rx_traces),
__entry->send_serial,
__entry->resp_serial,
__entry->rtt,
- __entry->rto)
+ __entry->srtt / 8,
+ __entry->rto,
+ __entry->min_rtt)
);
TRACE_EVENT(rxrpc_timer_set,
@@ -1518,112 +1860,125 @@ TRACE_EVENT(rxrpc_drop_ack,
);
TRACE_EVENT(rxrpc_retransmit,
- TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
- rxrpc_serial_t serial, ktime_t expiry),
+ TP_PROTO(struct rxrpc_call *call,
+ struct rxrpc_send_data_req *req,
+ struct rxrpc_txbuf *txb),
- TP_ARGS(call, seq, serial, expiry),
+ TP_ARGS(call, req, txb),
TP_STRUCT__entry(
__field(unsigned int, call)
+ __field(unsigned int, qbase)
__field(rxrpc_seq_t, seq)
__field(rxrpc_serial_t, serial)
- __field(ktime_t, expiry)
),
TP_fast_assign(
__entry->call = call->debug_id;
- __entry->seq = seq;
- __entry->serial = serial;
- __entry->expiry = expiry;
+ __entry->qbase = req->tq->qbase;
+ __entry->seq = req->seq;
+ __entry->serial = txb->serial;
),
- TP_printk("c=%08x q=%x r=%x xp=%lld",
+ TP_printk("c=%08x tq=%x q=%x r=%x",
__entry->call,
+ __entry->qbase,
__entry->seq,
- __entry->serial,
- ktime_to_us(__entry->expiry))
+ __entry->serial)
);
TRACE_EVENT(rxrpc_congest,
- TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
- rxrpc_serial_t ack_serial, enum rxrpc_congest_change change),
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary),
- TP_ARGS(call, summary, ack_serial, change),
+ TP_ARGS(call, summary),
TP_STRUCT__entry(
__field(unsigned int, call)
- __field(enum rxrpc_congest_change, change)
+ __field(enum rxrpc_ca_state, ca_state)
__field(rxrpc_seq_t, hard_ack)
__field(rxrpc_seq_t, top)
__field(rxrpc_seq_t, lowest_nak)
- __field(rxrpc_serial_t, ack_serial)
+ __field(u16, nr_sacks)
+ __field(u16, nr_snacks)
+ __field(u16, cwnd)
+ __field(u16, ssthresh)
+ __field(u16, cumul_acks)
+ __field(u16, dup_acks)
__field_struct(struct rxrpc_ack_summary, sum)
),
TP_fast_assign(
__entry->call = call->debug_id;
- __entry->change = change;
+ __entry->ca_state = call->cong_ca_state;
__entry->hard_ack = call->acks_hard_ack;
__entry->top = call->tx_top;
__entry->lowest_nak = call->acks_lowest_nak;
- __entry->ack_serial = ack_serial;
+ __entry->nr_sacks = call->acks_nr_sacks;
+ __entry->nr_snacks = call->acks_nr_snacks;
+ __entry->cwnd = call->cong_cwnd;
+ __entry->ssthresh = call->cong_ssthresh;
+ __entry->cumul_acks = call->cong_cumul_acks;
+ __entry->dup_acks = call->cong_dup_acks;
memcpy(&__entry->sum, summary, sizeof(__entry->sum));
),
- TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u,%u b=%u u=%u d=%u l=%x%s%s%s",
+ TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u A=%u+%u/%u+%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
__entry->call,
- __entry->ack_serial,
+ __entry->sum.acked_serial,
__print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
__entry->hard_ack,
- __print_symbolic(__entry->sum.mode, rxrpc_congest_modes),
- __entry->sum.cwnd,
- __entry->sum.ssthresh,
- __entry->sum.nr_acks, __entry->sum.nr_retained_nacks,
- __entry->sum.nr_new_acks,
- __entry->sum.nr_new_nacks,
+ __print_symbolic(__entry->ca_state, rxrpc_ca_states),
+ __entry->cwnd,
+ __entry->ssthresh,
+ __entry->nr_sacks, __entry->sum.nr_new_sacks,
+ __entry->nr_snacks, __entry->sum.nr_new_snacks,
+ __entry->sum.nr_new_hacks,
__entry->top - __entry->hard_ack,
- __entry->sum.cumulative_acks,
- __entry->sum.dup_acks,
- __entry->lowest_nak, __entry->sum.new_low_nack ? "!" : "",
- __print_symbolic(__entry->change, rxrpc_congest_changes),
+ __entry->cumul_acks,
+ __entry->dup_acks,
+ __entry->lowest_nak, __entry->sum.new_low_snack ? "!" : "",
+ __print_symbolic(__entry->sum.change, rxrpc_congest_changes),
__entry->sum.retrans_timeo ? " rTxTo" : "")
);
TRACE_EVENT(rxrpc_reset_cwnd,
- TP_PROTO(struct rxrpc_call *call, ktime_t now),
+ TP_PROTO(struct rxrpc_call *call, ktime_t since_last_tx, ktime_t rtt),
- TP_ARGS(call, now),
+ TP_ARGS(call, since_last_tx, rtt),
TP_STRUCT__entry(
__field(unsigned int, call)
- __field(enum rxrpc_congest_mode, mode)
+ __field(enum rxrpc_ca_state, ca_state)
__field(unsigned short, cwnd)
__field(unsigned short, extra)
__field(rxrpc_seq_t, hard_ack)
__field(rxrpc_seq_t, prepared)
__field(ktime_t, since_last_tx)
+ __field(ktime_t, rtt)
__field(bool, has_data)
),
TP_fast_assign(
__entry->call = call->debug_id;
- __entry->mode = call->cong_mode;
+ __entry->ca_state = call->cong_ca_state;
__entry->cwnd = call->cong_cwnd;
__entry->extra = call->cong_extra;
__entry->hard_ack = call->acks_hard_ack;
- __entry->prepared = call->tx_prepared - call->tx_bottom;
- __entry->since_last_tx = ktime_sub(now, call->tx_last_sent);
- __entry->has_data = !list_empty(&call->tx_sendmsg);
+ __entry->prepared = call->send_top - call->tx_bottom;
+ __entry->since_last_tx = since_last_tx;
+ __entry->rtt = rtt;
+ __entry->has_data = call->tx_bottom != call->tx_top;
),
- TP_printk("c=%08x q=%08x %s cw=%u+%u pr=%u tm=%llu d=%u",
+ TP_printk("c=%08x q=%08x %s cw=%u+%u pr=%u tm=%llu/%llu d=%u",
__entry->call,
__entry->hard_ack,
- __print_symbolic(__entry->mode, rxrpc_congest_modes),
+ __print_symbolic(__entry->ca_state, rxrpc_ca_states),
__entry->cwnd,
__entry->extra,
__entry->prepared,
- ktime_to_ns(__entry->since_last_tx),
+ ktime_to_us(__entry->since_last_tx),
+ ktime_to_us(__entry->rtt),
__entry->has_data)
);
@@ -1696,10 +2051,36 @@ TRACE_EVENT(rxrpc_connect_call,
&__entry->srx.transport)
);
+TRACE_EVENT(rxrpc_apply_acks,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_txqueue *tq),
+
+ TP_ARGS(call, tq),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(unsigned int, nr_rep)
+ __field(rxrpc_seq_t, qbase)
+ __field(unsigned long, acks)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->qbase = tq->qbase;
+ __entry->acks = tq->segment_acked;
+ __entry->nr_rep = tq->nr_reported_acks;
+ ),
+
+ TP_printk("c=%08x tq=%x acks=%016lx rep=%u",
+ __entry->call,
+ __entry->qbase,
+ __entry->acks,
+ __entry->nr_rep)
+ );
+
TRACE_EVENT(rxrpc_resend,
- TP_PROTO(struct rxrpc_call *call, struct sk_buff *ack),
+ TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t ack_serial),
- TP_ARGS(call, ack),
+ TP_ARGS(call, ack_serial),
TP_STRUCT__entry(
__field(unsigned int, call)
@@ -1709,11 +2090,10 @@ TRACE_EVENT(rxrpc_resend,
),
TP_fast_assign(
- struct rxrpc_skb_priv *sp = ack ? rxrpc_skb(ack) : NULL;
__entry->call = call->debug_id;
__entry->seq = call->acks_hard_ack;
__entry->transmitted = call->tx_transmitted;
- __entry->ack_serial = sp ? sp->hdr.serial : 0;
+ __entry->ack_serial = ack_serial;
),
TP_printk("c=%08x r=%x q=%x tq=%x",
@@ -1723,6 +2103,63 @@ TRACE_EVENT(rxrpc_resend,
__entry->transmitted)
);
+TRACE_EVENT(rxrpc_resend_lost,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_txqueue *tq, unsigned long lost),
+
+ TP_ARGS(call, tq, lost),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, qbase)
+ __field(u8, nr_rep)
+ __field(unsigned long, lost)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->qbase = tq->qbase;
+ __entry->nr_rep = tq->nr_reported_acks;
+ __entry->lost = lost;
+ ),
+
+ TP_printk("c=%08x tq=%x lost=%016lx nr=%u",
+ __entry->call,
+ __entry->qbase,
+ __entry->lost,
+ __entry->nr_rep)
+ );
+
+TRACE_EVENT(rxrpc_rotate,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_txqueue *tq,
+ struct rxrpc_ack_summary *summary, rxrpc_seq_t seq,
+ enum rxrpc_rotate_trace trace),
+
+ TP_ARGS(call, tq, summary, seq, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, qbase)
+ __field(rxrpc_seq_t, seq)
+ __field(unsigned int, nr_rep)
+ __field(enum rxrpc_rotate_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->qbase = tq->qbase;
+ __entry->seq = seq;
+ __entry->nr_rep = tq->nr_reported_acks;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("c=%08x tq=%x q=%x nr=%x %s",
+ __entry->call,
+ __entry->qbase,
+ __entry->seq,
+ __entry->nr_rep,
+ __print_symbolic(__entry->trace, rxrpc_rotate_traces))
+ );
+
TRACE_EVENT(rxrpc_rx_icmp,
TP_PROTO(struct rxrpc_peer *peer, struct sock_extended_err *ee,
struct sockaddr_rxrpc *srx),
@@ -1832,38 +2269,36 @@ TRACE_EVENT(rxrpc_notify_socket,
);
TRACE_EVENT(rxrpc_rx_discard_ack,
- TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial,
- rxrpc_seq_t first_soft_ack, rxrpc_seq_t call_ackr_first,
- rxrpc_seq_t prev_pkt, rxrpc_seq_t call_ackr_prev),
+ TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
+ rxrpc_seq_t hard_ack, rxrpc_seq_t prev_pkt),
- TP_ARGS(debug_id, serial, first_soft_ack, call_ackr_first,
- prev_pkt, call_ackr_prev),
+ TP_ARGS(call, serial, hard_ack, prev_pkt),
TP_STRUCT__entry(
__field(unsigned int, debug_id)
__field(rxrpc_serial_t, serial)
- __field(rxrpc_seq_t, first_soft_ack)
- __field(rxrpc_seq_t, call_ackr_first)
+ __field(rxrpc_seq_t, hard_ack)
__field(rxrpc_seq_t, prev_pkt)
- __field(rxrpc_seq_t, call_ackr_prev)
+ __field(rxrpc_seq_t, acks_hard_ack)
+ __field(rxrpc_seq_t, acks_prev_seq)
),
TP_fast_assign(
- __entry->debug_id = debug_id;
+ __entry->debug_id = call->debug_id;
__entry->serial = serial;
- __entry->first_soft_ack = first_soft_ack;
- __entry->call_ackr_first = call_ackr_first;
+ __entry->hard_ack = hard_ack;
__entry->prev_pkt = prev_pkt;
- __entry->call_ackr_prev = call_ackr_prev;
+ __entry->acks_hard_ack = call->acks_hard_ack;
+ __entry->acks_prev_seq = call->acks_prev_seq;
),
TP_printk("c=%08x r=%08x %08x<%08x %08x<%08x",
__entry->debug_id,
__entry->serial,
- __entry->first_soft_ack,
- __entry->call_ackr_first,
+ __entry->hard_ack,
+ __entry->acks_hard_ack,
__entry->prev_pkt,
- __entry->call_ackr_prev)
+ __entry->acks_prev_seq)
);
TRACE_EVENT(rxrpc_req_ack,
@@ -1921,6 +2356,33 @@ TRACE_EVENT(rxrpc_txbuf,
__entry->ref)
);
+TRACE_EVENT(rxrpc_tq,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_txqueue *tq,
+ rxrpc_seq_t seq, enum rxrpc_tq_trace trace),
+
+ TP_ARGS(call, tq, seq, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_seq_t, qbase)
+ __field(rxrpc_seq_t, seq)
+ __field(enum rxrpc_tq_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->call_debug_id = call->debug_id;
+ __entry->qbase = tq ? tq->qbase : call->tx_qbase;
+ __entry->seq = seq;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("c=%08x bq=%08x q=%08x %s",
+ __entry->call_debug_id,
+ __entry->qbase,
+ __entry->seq,
+ __print_symbolic(__entry->trace, rxrpc_tq_traces))
+ );
+
TRACE_EVENT(rxrpc_poke_call,
TP_PROTO(struct rxrpc_call *call, bool busy,
enum rxrpc_call_poke_trace what),
@@ -1989,6 +2451,384 @@ TRACE_EVENT(rxrpc_sack,
__entry->sack)
);
+TRACE_EVENT(rxrpc_pmtud_tx,
+ TP_PROTO(struct rxrpc_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, peer_debug_id)
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_serial_t, ping_serial)
+ __field(unsigned short, pmtud_trial)
+ __field(unsigned short, pmtud_good)
+ __field(unsigned short, pmtud_bad)
+ ),
+
+ TP_fast_assign(
+ __entry->peer_debug_id = call->peer->debug_id;
+ __entry->call_debug_id = call->debug_id;
+ __entry->ping_serial = call->conn->pmtud_probe;
+ __entry->pmtud_trial = call->peer->pmtud_trial;
+ __entry->pmtud_good = call->peer->pmtud_good;
+ __entry->pmtud_bad = call->peer->pmtud_bad;
+ ),
+
+ TP_printk("P=%08x c=%08x pr=%08x %u-%u-%u",
+ __entry->peer_debug_id,
+ __entry->call_debug_id,
+ __entry->ping_serial,
+ __entry->pmtud_good,
+ __entry->pmtud_trial,
+ __entry->pmtud_bad)
+ );
+
+TRACE_EVENT(rxrpc_pmtud_rx,
+ TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t resp_serial),
+
+ TP_ARGS(conn, resp_serial),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, peer_debug_id)
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_serial_t, ping_serial)
+ __field(rxrpc_serial_t, resp_serial)
+ __field(unsigned short, max_data)
+ __field(u8, jumbo_max)
+ ),
+
+ TP_fast_assign(
+ __entry->peer_debug_id = conn->peer->debug_id;
+ __entry->call_debug_id = conn->pmtud_call;
+ __entry->ping_serial = conn->pmtud_probe;
+ __entry->resp_serial = resp_serial;
+ __entry->max_data = conn->peer->max_data;
+ __entry->jumbo_max = conn->peer->pmtud_jumbo;
+ ),
+
+ TP_printk("P=%08x c=%08x pr=%08x rr=%08x max=%u jm=%u",
+ __entry->peer_debug_id,
+ __entry->call_debug_id,
+ __entry->ping_serial,
+ __entry->resp_serial,
+ __entry->max_data,
+ __entry->jumbo_max)
+ );
+
+TRACE_EVENT(rxrpc_pmtud_lost,
+ TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t resp_serial),
+
+ TP_ARGS(conn, resp_serial),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, peer_debug_id)
+ __field(unsigned int, call_debug_id)
+ __field(rxrpc_serial_t, ping_serial)
+ __field(rxrpc_serial_t, resp_serial)
+ ),
+
+ TP_fast_assign(
+ __entry->peer_debug_id = conn->peer->debug_id;
+ __entry->call_debug_id = conn->pmtud_call;
+ __entry->ping_serial = conn->pmtud_probe;
+ __entry->resp_serial = resp_serial;
+ ),
+
+ TP_printk("P=%08x c=%08x pr=%08x rr=%08x",
+ __entry->peer_debug_id,
+ __entry->call_debug_id,
+ __entry->ping_serial,
+ __entry->resp_serial)
+ );
+
+TRACE_EVENT(rxrpc_pmtud_reduce,
+ TP_PROTO(struct rxrpc_peer *peer, rxrpc_serial_t serial,
+ unsigned int max_data, enum rxrpc_pmtud_reduce_trace reason),
+
+ TP_ARGS(peer, serial, max_data, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, peer_debug_id)
+ __field(rxrpc_serial_t, serial)
+ __field(unsigned int, max_data)
+ __field(enum rxrpc_pmtud_reduce_trace, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->peer_debug_id = peer->debug_id;
+ __entry->serial = serial;
+ __entry->max_data = max_data;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("P=%08x %s r=%08x m=%u",
+ __entry->peer_debug_id,
+ __print_symbolic(__entry->reason, rxrpc_pmtud_reduce_traces),
+ __entry->serial, __entry->max_data)
+ );
+
+TRACE_EVENT(rxrpc_rack,
+ TP_PROTO(struct rxrpc_call *call, ktime_t timo),
+
+ TP_ARGS(call, timo),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, ack_serial)
+ __field(rxrpc_seq_t, seq)
+ __field(enum rxrpc_rack_timer_mode, mode)
+ __field(unsigned short, nr_sent)
+ __field(unsigned short, nr_lost)
+ __field(unsigned short, nr_resent)
+ __field(unsigned short, nr_sacked)
+ __field(ktime_t, timo)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->ack_serial = call->rx_serial;
+ __entry->seq = call->rack_end_seq;
+ __entry->mode = call->rack_timer_mode;
+ __entry->nr_sent = call->tx_nr_sent;
+ __entry->nr_lost = call->tx_nr_lost;
+ __entry->nr_resent = call->tx_nr_resent;
+ __entry->nr_sacked = call->acks_nr_sacks;
+ __entry->timo = timo;
+ ),
+
+ TP_printk("c=%08x r=%08x q=%08x %s slrs=%u,%u,%u,%u t=%lld",
+ __entry->call, __entry->ack_serial, __entry->seq,
+ __print_symbolic(__entry->mode, rxrpc_rack_timer_modes),
+ __entry->nr_sent, __entry->nr_lost,
+ __entry->nr_resent, __entry->nr_sacked,
+ ktime_to_us(__entry->timo))
+ );
+
+TRACE_EVENT(rxrpc_rack_update,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary),
+
+ TP_ARGS(call, summary),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, ack_serial)
+ __field(rxrpc_seq_t, seq)
+ __field(int, xmit_ts)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->ack_serial = call->rx_serial;
+ __entry->seq = call->rack_end_seq;
+ __entry->xmit_ts = ktime_sub(call->acks_latest_ts, call->rack_xmit_ts);
+ ),
+
+ TP_printk("c=%08x r=%08x q=%08x xt=%lld",
+ __entry->call, __entry->ack_serial, __entry->seq,
+ ktime_to_us(__entry->xmit_ts))
+ );
+
+TRACE_EVENT(rxrpc_rack_scan_loss,
+ TP_PROTO(struct rxrpc_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(ktime_t, rack_rtt)
+ __field(ktime_t, rack_reo_wnd)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->rack_rtt = call->rack_rtt;
+ __entry->rack_reo_wnd = call->rack_reo_wnd;
+ ),
+
+ TP_printk("c=%08x rtt=%lld reow=%lld",
+ __entry->call, ktime_to_us(__entry->rack_rtt),
+ ktime_to_us(__entry->rack_reo_wnd))
+ );
+
+TRACE_EVENT(rxrpc_rack_scan_loss_tq,
+ TP_PROTO(struct rxrpc_call *call, const struct rxrpc_txqueue *tq,
+ unsigned long nacks),
+
+ TP_ARGS(call, tq, nacks),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, qbase)
+ __field(unsigned long, nacks)
+ __field(unsigned long, lost)
+ __field(unsigned long, retrans)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->qbase = tq->qbase;
+ __entry->nacks = nacks;
+ __entry->lost = tq->segment_lost;
+ __entry->retrans = tq->segment_retransmitted;
+ ),
+
+ TP_printk("c=%08x q=%08x n=%lx l=%lx r=%lx",
+ __entry->call, __entry->qbase,
+ __entry->nacks, __entry->lost, __entry->retrans)
+ );
+
+TRACE_EVENT(rxrpc_rack_detect_loss,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
+ rxrpc_seq_t seq),
+
+ TP_ARGS(call, summary, seq),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, ack_serial)
+ __field(rxrpc_seq_t, seq)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->ack_serial = call->rx_serial;
+ __entry->seq = seq;
+ ),
+
+ TP_printk("c=%08x r=%08x q=%08x",
+ __entry->call, __entry->ack_serial, __entry->seq)
+ );
+
+TRACE_EVENT(rxrpc_rack_mark_loss_tq,
+ TP_PROTO(struct rxrpc_call *call, const struct rxrpc_txqueue *tq),
+
+ TP_ARGS(call, tq),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_seq_t, qbase)
+ __field(rxrpc_seq_t, trans)
+ __field(unsigned long, acked)
+ __field(unsigned long, lost)
+ __field(unsigned long, retrans)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->qbase = tq->qbase;
+ __entry->trans = call->tx_transmitted;
+ __entry->acked = tq->segment_acked;
+ __entry->lost = tq->segment_lost;
+ __entry->retrans = tq->segment_retransmitted;
+ ),
+
+ TP_printk("c=%08x tq=%08x txq=%08x a=%lx l=%lx r=%lx",
+ __entry->call, __entry->qbase, __entry->trans,
+ __entry->acked, __entry->lost, __entry->retrans)
+ );
+
+TRACE_EVENT(rxrpc_tlp_probe,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_tlp_probe_trace trace),
+
+ TP_ARGS(call, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_seq_t, seq)
+ __field(enum rxrpc_tlp_probe_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->serial = call->tlp_serial;
+ __entry->seq = call->tlp_seq;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("c=%08x r=%08x pq=%08x %s",
+ __entry->call, __entry->serial, __entry->seq,
+ __print_symbolic(__entry->trace, rxrpc_tlp_probe_traces))
+ );
+
+TRACE_EVENT(rxrpc_tlp_ack,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
+ enum rxrpc_tlp_ack_trace trace),
+
+ TP_ARGS(call, summary, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(rxrpc_serial_t, serial)
+ __field(rxrpc_seq_t, tlp_seq)
+ __field(rxrpc_seq_t, hard_ack)
+ __field(enum rxrpc_tlp_ack_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->serial = call->tlp_serial;
+ __entry->tlp_seq = call->tlp_seq;
+ __entry->hard_ack = call->acks_hard_ack;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("c=%08x r=%08x pq=%08x hq=%08x %s",
+ __entry->call, __entry->serial,
+ __entry->tlp_seq, __entry->hard_ack,
+ __print_symbolic(__entry->trace, rxrpc_tlp_ack_traces))
+ );
+
+TRACE_EVENT(rxrpc_rack_timer,
+ TP_PROTO(struct rxrpc_call *call, ktime_t delay, bool exp),
+
+ TP_ARGS(call, delay, exp),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(bool, exp)
+ __field(enum rxrpc_rack_timer_mode, mode)
+ __field(ktime_t, delay)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->exp = exp;
+ __entry->mode = call->rack_timer_mode;
+ __entry->delay = delay;
+ ),
+
+ TP_printk("c=%08x %s %s to=%lld",
+ __entry->call,
+ __entry->exp ? "Exp" : "Set",
+ __print_symbolic(__entry->mode, rxrpc_rack_timer_modes),
+ ktime_to_us(__entry->delay))
+ );
+
+TRACE_EVENT(rxrpc_rxgk_rekey,
+ TP_PROTO(struct rxrpc_connection *conn,
+ unsigned int current_key, unsigned int requested_key),
+
+ TP_ARGS(conn, current_key, requested_key),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, conn)
+ __field(unsigned int, current_key)
+ __field(unsigned int, requested_key)
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn->debug_id;
+ __entry->current_key = current_key;
+ __entry->requested_key = requested_key;
+ ),
+
+ TP_printk("C=%08x cur=%x req=%x",
+ __entry->conn,
+ __entry->current_key,
+ __entry->requested_key)
+ );
+
#undef EM
#undef E_
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 6df2b4685b08..7b2645b50e78 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -20,16 +20,16 @@ TRACE_EVENT(sched_kthread_stop,
TP_ARGS(t),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
+ __string( comm, t->comm )
+ __field( pid_t, pid )
),
TP_fast_assign(
- memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
+ __assign_str(comm);
__entry->pid = t->pid;
),
- TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
+ TP_printk("comm=%s pid=%d", __get_str(comm), __entry->pid)
);
/*
@@ -193,9 +193,7 @@ static inline long __trace_sched_switch_state(bool preempt,
{
unsigned int state;
-#ifdef CONFIG_SCHED_DEBUG
BUG_ON(p != current);
-#endif /* CONFIG_SCHED_DEBUG */
/*
* Preemption ignores task state, therefore preempted tasks are always
@@ -239,11 +237,11 @@ TRACE_EVENT(sched_switch,
),
TP_fast_assign(
- memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
+ memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->prev_pid = prev->pid;
__entry->prev_prio = prev->prio;
__entry->prev_state = __trace_sched_switch_state(preempt, prev_state, prev);
- memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
+ memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
__entry->next_prio = next->prio;
/* XXX SCHED_DEADLINE */
@@ -278,15 +276,15 @@ TRACE_EVENT(sched_migrate_task,
TP_ARGS(p, dest_cpu),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( int, prio )
- __field( int, orig_cpu )
- __field( int, dest_cpu )
+ __string( comm, p->comm )
+ __field( pid_t, pid )
+ __field( int, prio )
+ __field( int, orig_cpu )
+ __field( int, dest_cpu )
),
TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __assign_str(comm);
__entry->pid = p->pid;
__entry->prio = p->prio; /* XXX SCHED_DEADLINE */
__entry->orig_cpu = task_cpu(p);
@@ -294,7 +292,7 @@ TRACE_EVENT(sched_migrate_task,
),
TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
- __entry->comm, __entry->pid, __entry->prio,
+ __get_str(comm), __entry->pid, __entry->prio,
__entry->orig_cpu, __entry->dest_cpu)
);
@@ -305,19 +303,19 @@ DECLARE_EVENT_CLASS(sched_process_template,
TP_ARGS(p),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( int, prio )
+ __string( comm, p->comm )
+ __field( pid_t, pid )
+ __field( int, prio )
),
TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __assign_str(comm);
__entry->pid = p->pid;
__entry->prio = p->prio; /* XXX SCHED_DEADLINE */
),
TP_printk("comm=%s pid=%d prio=%d",
- __entry->comm, __entry->pid, __entry->prio)
+ __get_str(comm), __entry->pid, __entry->prio)
);
/*
@@ -328,11 +326,37 @@ DEFINE_EVENT(sched_process_template, sched_process_free,
TP_ARGS(p));
/*
- * Tracepoint for a task exiting:
+ * Tracepoint for a task exiting.
+ * Note, it's a superset of sched_process_template and should be kept
+ * compatible as much as possible. sched_process_exits has an extra
+ * `group_dead` argument, so sched_process_template can't be used,
+ * unfortunately, just like sched_migrate_task above.
*/
-DEFINE_EVENT(sched_process_template, sched_process_exit,
- TP_PROTO(struct task_struct *p),
- TP_ARGS(p));
+TRACE_EVENT(sched_process_exit,
+
+ TP_PROTO(struct task_struct *p, bool group_dead),
+
+ TP_ARGS(p, group_dead),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, prio )
+ __field( bool, group_dead )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+ __entry->pid = p->pid;
+ __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
+ __entry->group_dead = group_dead;
+ ),
+
+ TP_printk("comm=%s pid=%d prio=%d group_dead=%s",
+ __entry->comm, __entry->pid, __entry->prio,
+ __entry->group_dead ? "true" : "false"
+ )
+);
/*
* Tracepoint for waiting on task to unschedule:
@@ -351,19 +375,19 @@ TRACE_EVENT(sched_process_wait,
TP_ARGS(pid),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
+ __string( comm, current->comm )
__field( pid_t, pid )
__field( int, prio )
),
TP_fast_assign(
- memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ __assign_str(comm);
__entry->pid = pid_nr(pid);
__entry->prio = current->prio; /* XXX SCHED_DEADLINE */
),
TP_printk("comm=%s pid=%d prio=%d",
- __entry->comm, __entry->pid, __entry->prio)
+ __get_str(comm), __entry->pid, __entry->prio)
);
/*
@@ -376,22 +400,22 @@ TRACE_EVENT(sched_process_fork,
TP_ARGS(parent, child),
TP_STRUCT__entry(
- __array( char, parent_comm, TASK_COMM_LEN )
- __field( pid_t, parent_pid )
- __array( char, child_comm, TASK_COMM_LEN )
- __field( pid_t, child_pid )
+ __string( parent_comm, parent->comm )
+ __field( pid_t, parent_pid )
+ __string( child_comm, child->comm )
+ __field( pid_t, child_pid )
),
TP_fast_assign(
- memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
+ __assign_str(parent_comm);
__entry->parent_pid = parent->pid;
- memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
+ __assign_str(child_comm);
__entry->child_pid = child->pid;
),
TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
- __entry->parent_comm, __entry->parent_pid,
- __entry->child_comm, __entry->child_pid)
+ __get_str(parent_comm), __entry->parent_pid,
+ __get_str(child_comm), __entry->child_pid)
);
/*
@@ -475,19 +499,19 @@ DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
TP_ARGS(__perf_task(tsk), __perf_count(delay)),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( u64, delay )
+ __string( comm, tsk->comm )
+ __field( pid_t, pid )
+ __field( u64, delay )
),
TP_fast_assign(
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __assign_str(comm);
__entry->pid = tsk->pid;
__entry->delay = delay;
),
TP_printk("comm=%s pid=%d delay=%Lu [ns]",
- __entry->comm, __entry->pid,
+ __get_str(comm), __entry->pid,
(unsigned long long)__entry->delay)
);
@@ -533,19 +557,19 @@ DECLARE_EVENT_CLASS(sched_stat_runtime,
TP_ARGS(tsk, __perf_count(runtime)),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( u64, runtime )
+ __string( comm, tsk->comm )
+ __field( pid_t, pid )
+ __field( u64, runtime )
),
TP_fast_assign(
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __assign_str(comm);
__entry->pid = tsk->pid;
__entry->runtime = runtime;
),
TP_printk("comm=%s pid=%d runtime=%Lu [ns]",
- __entry->comm, __entry->pid,
+ __get_str(comm), __entry->pid,
(unsigned long long)__entry->runtime)
);
@@ -564,14 +588,14 @@ TRACE_EVENT(sched_pi_setprio,
TP_ARGS(tsk, pi_task),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
- __field( int, oldprio )
- __field( int, newprio )
+ __string( comm, tsk->comm )
+ __field( pid_t, pid )
+ __field( int, oldprio )
+ __field( int, newprio )
),
TP_fast_assign(
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __assign_str(comm);
__entry->pid = tsk->pid;
__entry->oldprio = tsk->prio;
__entry->newprio = pi_task ?
@@ -581,7 +605,7 @@ TRACE_EVENT(sched_pi_setprio,
),
TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
- __entry->comm, __entry->pid,
+ __get_str(comm), __entry->pid,
__entry->oldprio, __entry->newprio)
);
@@ -591,19 +615,20 @@ TRACE_EVENT(sched_process_hang,
TP_ARGS(tsk),
TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
- __field( pid_t, pid )
+ __string( comm, tsk->comm )
+ __field( pid_t, pid )
),
TP_fast_assign(
- memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __assign_str(comm);
__entry->pid = tsk->pid;
),
- TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
+ TP_printk("comm=%s pid=%d", __get_str(comm), __entry->pid)
);
#endif /* CONFIG_DETECT_HUNG_TASK */
+#ifdef CONFIG_NUMA_BALANCING
/*
* Tracks migration of tasks from one runqueue to another. Can be used to
* detect if automatic NUMA balancing is bouncing between nodes.
@@ -696,7 +721,6 @@ DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
);
-#ifdef CONFIG_NUMA_BALANCING
#define NUMAB_SKIP_REASON \
EM( NUMAB_SKIP_UNSUITABLE, "unsuitable" ) \
EM( NUMAB_SKIP_SHARED_RO, "shared_ro" ) \
@@ -747,6 +771,39 @@ TRACE_EVENT(sched_skip_vma_numa,
__entry->vm_end,
__print_symbolic(__entry->reason, NUMAB_SKIP_REASON))
);
+
+TRACE_EVENT(sched_skip_cpuset_numa,
+
+ TP_PROTO(struct task_struct *tsk, nodemask_t *mem_allowed_ptr),
+
+ TP_ARGS(tsk, mem_allowed_ptr),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( pid_t, tgid )
+ __field( pid_t, ngid )
+ __array( unsigned long, mem_allowed, BITS_TO_LONGS(MAX_NUMNODES))
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = task_pid_nr(tsk);
+ __entry->tgid = task_tgid_nr(tsk);
+ __entry->ngid = task_numa_group_id(tsk);
+ BUILD_BUG_ON(sizeof(nodemask_t) != \
+ BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long));
+ memcpy(__entry->mem_allowed, mem_allowed_ptr->bits,
+ sizeof(__entry->mem_allowed));
+ ),
+
+ TP_printk("comm=%s pid=%d tgid=%d ngid=%d mem_nodes_allowed=%*pbl",
+ __entry->comm,
+ __entry->pid,
+ __entry->tgid,
+ __entry->ngid,
+ MAX_NUMNODES, __entry->mem_allowed)
+);
#endif /* CONFIG_NUMA_BALANCING */
/*
@@ -772,58 +829,73 @@ TRACE_EVENT(sched_wake_idle_without_ipi,
/*
* Following tracepoints are not exported in tracefs and provide hooking
* mechanisms only for testing and debugging purposes.
- *
- * Postfixed with _tp to make them easily identifiable in the code.
*/
-DECLARE_TRACE(pelt_cfs_tp,
+DECLARE_TRACE(pelt_cfs,
TP_PROTO(struct cfs_rq *cfs_rq),
TP_ARGS(cfs_rq));
-DECLARE_TRACE(pelt_rt_tp,
+DECLARE_TRACE(pelt_rt,
TP_PROTO(struct rq *rq),
TP_ARGS(rq));
-DECLARE_TRACE(pelt_dl_tp,
+DECLARE_TRACE(pelt_dl,
TP_PROTO(struct rq *rq),
TP_ARGS(rq));
-DECLARE_TRACE(pelt_hw_tp,
+DECLARE_TRACE(pelt_hw,
TP_PROTO(struct rq *rq),
TP_ARGS(rq));
-DECLARE_TRACE(pelt_irq_tp,
+DECLARE_TRACE(pelt_irq,
TP_PROTO(struct rq *rq),
TP_ARGS(rq));
-DECLARE_TRACE(pelt_se_tp,
+DECLARE_TRACE(pelt_se,
TP_PROTO(struct sched_entity *se),
TP_ARGS(se));
-DECLARE_TRACE(sched_cpu_capacity_tp,
+DECLARE_TRACE(sched_cpu_capacity,
TP_PROTO(struct rq *rq),
TP_ARGS(rq));
-DECLARE_TRACE(sched_overutilized_tp,
+DECLARE_TRACE(sched_overutilized,
TP_PROTO(struct root_domain *rd, bool overutilized),
TP_ARGS(rd, overutilized));
-DECLARE_TRACE(sched_util_est_cfs_tp,
+DECLARE_TRACE(sched_util_est_cfs,
TP_PROTO(struct cfs_rq *cfs_rq),
TP_ARGS(cfs_rq));
-DECLARE_TRACE(sched_util_est_se_tp,
+DECLARE_TRACE(sched_util_est_se,
TP_PROTO(struct sched_entity *se),
TP_ARGS(se));
-DECLARE_TRACE(sched_update_nr_running_tp,
+DECLARE_TRACE(sched_update_nr_running,
TP_PROTO(struct rq *rq, int change),
TP_ARGS(rq, change));
-DECLARE_TRACE(sched_compute_energy_tp,
+DECLARE_TRACE(sched_compute_energy,
TP_PROTO(struct task_struct *p, int dst_cpu, unsigned long energy,
unsigned long max_util, unsigned long busy_time),
TP_ARGS(p, dst_cpu, energy, max_util, busy_time));
+DECLARE_TRACE(sched_entry,
+ TP_PROTO(bool preempt),
+ TP_ARGS(preempt));
+
+DECLARE_TRACE(sched_exit,
+ TP_PROTO(bool is_switch),
+ TP_ARGS(is_switch));
+
+DECLARE_TRACE_CONDITION(sched_set_state,
+ TP_PROTO(struct task_struct *tsk, int state),
+ TP_ARGS(tsk, state),
+ TP_CONDITION(!!(tsk->__state) != !!state));
+
+DECLARE_TRACE(sched_set_need_resched,
+ TP_PROTO(struct task_struct *tsk, int cpu, int tif),
+ TP_ARGS(tsk, cpu, tif));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sched_ext.h b/include/trace/events/sched_ext.h
new file mode 100644
index 000000000000..d1bf5acd59c5
--- /dev/null
+++ b/include/trace/events/sched_ext.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sched_ext
+
+#if !defined(_TRACE_SCHED_EXT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCHED_EXT_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sched_ext_dump,
+
+ TP_PROTO(const char *line),
+
+ TP_ARGS(line),
+
+ TP_STRUCT__entry(
+ __string(line, line)
+ ),
+
+ TP_fast_assign(
+ __assign_str(line);
+ ),
+
+ TP_printk("%s",
+ __get_str(line)
+ )
+);
+
+TRACE_EVENT(sched_ext_event,
+ TP_PROTO(const char *name, __s64 delta),
+ TP_ARGS(name, delta),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field( __s64, delta )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->delta = delta;
+ ),
+
+ TP_printk("name %s delta %lld",
+ __get_str(name), __entry->delta
+ )
+);
+
+TRACE_EVENT(sched_ext_bypass_lb,
+
+ TP_PROTO(__u32 node, __u32 nr_cpus, __u32 nr_tasks, __u32 nr_balanced,
+ __u32 before_min, __u32 before_max,
+ __u32 after_min, __u32 after_max),
+
+ TP_ARGS(node, nr_cpus, nr_tasks, nr_balanced,
+ before_min, before_max, after_min, after_max),
+
+ TP_STRUCT__entry(
+ __field( __u32, node )
+ __field( __u32, nr_cpus )
+ __field( __u32, nr_tasks )
+ __field( __u32, nr_balanced )
+ __field( __u32, before_min )
+ __field( __u32, before_max )
+ __field( __u32, after_min )
+ __field( __u32, after_max )
+ ),
+
+ TP_fast_assign(
+ __entry->node = node;
+ __entry->nr_cpus = nr_cpus;
+ __entry->nr_tasks = nr_tasks;
+ __entry->nr_balanced = nr_balanced;
+ __entry->before_min = before_min;
+ __entry->before_max = before_max;
+ __entry->after_min = after_min;
+ __entry->after_max = after_max;
+ ),
+
+ TP_printk("node %u: nr_cpus=%u nr_tasks=%u nr_balanced=%u min=%u->%u max=%u->%u",
+ __entry->node, __entry->nr_cpus,
+ __entry->nr_tasks, __entry->nr_balanced,
+ __entry->before_min, __entry->after_min,
+ __entry->before_max, __entry->after_max
+ )
+);
+
+#endif /* _TRACE_SCHED_EXT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h
index 127300481123..703b7bb68e44 100644
--- a/include/trace/events/scmi.h
+++ b/include/trace/events/scmi.h
@@ -36,8 +36,8 @@ TRACE_EVENT(scmi_fc_call,
TRACE_EVENT(scmi_xfer_begin,
TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
- bool poll),
- TP_ARGS(transfer_id, msg_id, protocol_id, seq, poll),
+ bool poll, int inflight),
+ TP_ARGS(transfer_id, msg_id, protocol_id, seq, poll, inflight),
TP_STRUCT__entry(
__field(int, transfer_id)
@@ -45,6 +45,7 @@ TRACE_EVENT(scmi_xfer_begin,
__field(u8, protocol_id)
__field(u16, seq)
__field(bool, poll)
+ __field(int, inflight)
),
TP_fast_assign(
@@ -53,11 +54,12 @@ TRACE_EVENT(scmi_xfer_begin,
__entry->protocol_id = protocol_id;
__entry->seq = seq;
__entry->poll = poll;
+ __entry->inflight = inflight;
),
- TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X poll=%u",
- __entry->protocol_id, __entry->msg_id, __entry->seq,
- __entry->transfer_id, __entry->poll)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X poll=%u inflight=%d",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->poll, __entry->inflight)
);
TRACE_EVENT(scmi_xfer_response_wait,
@@ -90,8 +92,8 @@ TRACE_EVENT(scmi_xfer_response_wait,
TRACE_EVENT(scmi_xfer_end,
TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
- int status),
- TP_ARGS(transfer_id, msg_id, protocol_id, seq, status),
+ int status, int inflight),
+ TP_ARGS(transfer_id, msg_id, protocol_id, seq, status, inflight),
TP_STRUCT__entry(
__field(int, transfer_id)
@@ -99,6 +101,7 @@ TRACE_EVENT(scmi_xfer_end,
__field(u8, protocol_id)
__field(u16, seq)
__field(int, status)
+ __field(int, inflight)
),
TP_fast_assign(
@@ -107,11 +110,12 @@ TRACE_EVENT(scmi_xfer_end,
__entry->protocol_id = protocol_id;
__entry->seq = seq;
__entry->status = status;
+ __entry->inflight = inflight;
),
- TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X s=%d",
- __entry->protocol_id, __entry->msg_id, __entry->seq,
- __entry->transfer_id, __entry->status)
+ TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X s=%d inflight=%d",
+ __entry->protocol_id, __entry->msg_id, __entry->seq,
+ __entry->transfer_id, __entry->status, __entry->inflight)
);
TRACE_EVENT(scmi_rx_done,
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
index 8e2d9b1b0e77..c36c72ab7f2b 100644
--- a/include/trace/events/scsi.h
+++ b/include/trace/events/scsi.h
@@ -29,8 +29,8 @@
scsi_opcode_name(INQUIRY), \
scsi_opcode_name(RECOVER_BUFFERED_DATA), \
scsi_opcode_name(MODE_SELECT), \
- scsi_opcode_name(RESERVE), \
- scsi_opcode_name(RELEASE), \
+ scsi_opcode_name(RESERVE_6), \
+ scsi_opcode_name(RELEASE_6), \
scsi_opcode_name(COPY), \
scsi_opcode_name(ERASE), \
scsi_opcode_name(MODE_SENSE), \
@@ -102,6 +102,7 @@
scsi_opcode_name(WRITE_32), \
scsi_opcode_name(WRITE_SAME_32), \
scsi_opcode_name(ATA_16), \
+ scsi_opcode_name(WRITE_ATOMIC_16), \
scsi_opcode_name(ATA_12))
#define scsi_hostbyte_name(result) { result, #result }
@@ -199,6 +200,14 @@ TRACE_EVENT(scsi_dispatch_cmd_start,
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len))
);
+#define scsi_rtn_name(result) { result, #result }
+#define show_rtn_name(val) \
+ __print_symbolic(val, \
+ scsi_rtn_name(SCSI_MLQUEUE_HOST_BUSY), \
+ scsi_rtn_name(SCSI_MLQUEUE_DEVICE_BUSY), \
+ scsi_rtn_name(SCSI_MLQUEUE_EH_RETRY), \
+ scsi_rtn_name(SCSI_MLQUEUE_TARGET_BUSY))
+
TRACE_EVENT(scsi_dispatch_cmd_error,
TP_PROTO(struct scsi_cmnd *cmd, int rtn),
@@ -239,14 +248,15 @@ TRACE_EVENT(scsi_dispatch_cmd_error,
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
" prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s)" \
- " rtn=%d",
+ " rtn=%s",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
show_prot_op_name(__entry->prot_op), __entry->driver_tag,
__entry->scheduler_tag, show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
- __entry->rtn)
+ show_rtn_name(__entry->rtn)
+ )
);
DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 07e0715628ec..b877133cd93a 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -24,13 +24,14 @@ DEFINE_DROP_REASON(FN, FN)
TRACE_EVENT(kfree_skb,
TP_PROTO(struct sk_buff *skb, void *location,
- enum skb_drop_reason reason),
+ enum skb_drop_reason reason, struct sock *rx_sk),
- TP_ARGS(skb, location, reason),
+ TP_ARGS(skb, location, reason, rx_sk),
TP_STRUCT__entry(
__field(void *, skbaddr)
__field(void *, location)
+ __field(void *, rx_sk)
__field(unsigned short, protocol)
__field(enum skb_drop_reason, reason)
),
@@ -38,12 +39,14 @@ TRACE_EVENT(kfree_skb,
TP_fast_assign(
__entry->skbaddr = skb;
__entry->location = location;
+ __entry->rx_sk = rx_sk;
__entry->protocol = ntohs(skb->protocol);
__entry->reason = reason;
),
- TP_printk("skbaddr=%p protocol=%u location=%pS reason: %s",
- __entry->skbaddr, __entry->protocol, __entry->location,
+ TP_printk("skbaddr=%p rx_sk=%p protocol=%u location=%pS reason: %s",
+ __entry->skbaddr, __entry->rx_sk, __entry->protocol,
+ __entry->location,
__print_symbolic(__entry->reason,
DEFINE_DROP_REASON(FN, FNe)))
);
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index 3836de435d9d..b5310439536e 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -19,7 +19,6 @@
/* The protocol traced by inet_sock_set_state */
#define inet_protocol_names \
EM(IPPROTO_TCP) \
- EM(IPPROTO_DCCP) \
EM(IPPROTO_SCTP) \
EMe(IPPROTO_MPTCP)
diff --git a/include/trace/events/spi-mem.h b/include/trace/events/spi-mem.h
new file mode 100644
index 000000000000..d13f0bcff5e7
--- /dev/null
+++ b/include/trace/events/spi-mem.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM spi-mem
+
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR spi_mem
+
+#if !defined(_TRACE_SPI_MEM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SPI_MEM_H
+
+#include <linux/tracepoint.h>
+#include <linux/spi/spi-mem.h>
+
+#define decode_dtr(dtr) \
+ __print_symbolic(dtr, \
+ { 0, "S" }, \
+ { 1, "D" })
+
+TRACE_EVENT(spi_mem_start_op,
+ TP_PROTO(struct spi_mem *mem, const struct spi_mem_op *op),
+ TP_ARGS(mem, op),
+
+ TP_STRUCT__entry(
+ __string(name, mem->name)
+ __dynamic_array(u8, op, 1 + op->addr.nbytes + op->dummy.nbytes)
+ __dynamic_array(u8, data, op->data.dir == SPI_MEM_DATA_OUT ?
+ min(op->data.nbytes, 64) : 0)
+ __field(u32, data_len)
+ __field(u32, max_freq)
+ __field(u8, cmd_buswidth)
+ __field(bool, cmd_dtr)
+ __field(u8, addr_buswidth)
+ __field(bool, addr_dtr)
+ __field(u8, dummy_nbytes)
+ __field(u8, data_buswidth)
+ __field(bool, data_dtr)
+ ),
+
+ TP_fast_assign(
+ int i;
+
+ __assign_str(name);
+ __entry->max_freq = op->max_freq ?: mem->spi->max_speed_hz;
+
+ __entry->cmd_buswidth = op->cmd.buswidth;
+ __entry->cmd_dtr = op->cmd.dtr;
+ *((u8 *)__get_dynamic_array(op)) = op->cmd.opcode;
+
+ __entry->addr_buswidth = op->addr.buswidth;
+ __entry->addr_dtr = op->addr.dtr;
+ for (i = 0; i < op->addr.nbytes; i++)
+ ((u8 *)__get_dynamic_array(op))[i + 1] =
+ op->addr.val >> (8 * (op->addr.nbytes - i - 1));
+
+ memset(((u8 *)__get_dynamic_array(op)) + op->addr.nbytes + 1,
+ 0xff, op->dummy.nbytes);
+
+ __entry->data_len = op->data.nbytes;
+ __entry->data_buswidth = op->data.buswidth;
+ __entry->data_dtr = op->data.dtr;
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ memcpy(__get_dynamic_array(data), op->data.buf.out,
+ __get_dynamic_array_len(data));
+ ),
+
+ TP_printk("%s %u%s-%u%s-%u%s @%u Hz op=[%*phD] len=%u tx=[%*phD]",
+ __get_str(name),
+ __entry->cmd_buswidth, decode_dtr(__entry->cmd_dtr),
+ __entry->addr_buswidth, decode_dtr(__entry->addr_dtr),
+ __entry->data_buswidth, decode_dtr(__entry->data_dtr),
+ __entry->max_freq,
+ __get_dynamic_array_len(op), __get_dynamic_array(op),
+ __entry->data_len,
+ __get_dynamic_array_len(data), __get_dynamic_array(data))
+);
+
+TRACE_EVENT(spi_mem_stop_op,
+ TP_PROTO(struct spi_mem *mem, const struct spi_mem_op *op),
+ TP_ARGS(mem, op),
+
+ TP_STRUCT__entry(
+ __string(name, mem->name)
+ __dynamic_array(u8, data, op->data.dir == SPI_MEM_DATA_IN ?
+ min(op->data.nbytes, 64) : 0)
+ __field(u32, data_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->data_len = op->data.nbytes;
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ memcpy(__get_dynamic_array(data), op->data.buf.in,
+ __get_dynamic_array_len(data));
+ ),
+
+ TP_printk("%s len=%u rx=[%*phD]",
+ __get_str(name),
+ __entry->data_len,
+ __get_dynamic_array_len(data), __get_dynamic_array(data))
+);
+
+
+#endif /* _TRACE_SPI_MEM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 5e8495216689..750ecce56930 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -21,7 +21,6 @@ TRACE_DEFINE_ENUM(SOCK_DGRAM);
TRACE_DEFINE_ENUM(SOCK_RAW);
TRACE_DEFINE_ENUM(SOCK_RDM);
TRACE_DEFINE_ENUM(SOCK_SEQPACKET);
-TRACE_DEFINE_ENUM(SOCK_DCCP);
TRACE_DEFINE_ENUM(SOCK_PACKET);
#define show_socket_type(type) \
@@ -31,7 +30,6 @@ TRACE_DEFINE_ENUM(SOCK_PACKET);
{ SOCK_RAW, "RAW" }, \
{ SOCK_RDM, "RDM" }, \
{ SOCK_SEQPACKET, "SEQPACKET" }, \
- { SOCK_DCCP, "DCCP" }, \
{ SOCK_PACKET, "PACKET" })
/* This list is known to be incomplete, add new enums as needed. */
@@ -343,6 +341,7 @@ TRACE_EVENT(rpc_request,
{ RPC_TASK_MOVEABLE, "MOVEABLE" }, \
{ RPC_TASK_NULLCREDS, "NULLCREDS" }, \
{ RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \
+ { RPC_TASK_NETUNREACH_FATAL, "NETUNREACH_FATAL"}, \
{ RPC_TASK_DYNAMIC, "DYNAMIC" }, \
{ RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" }, \
{ RPC_TASK_SOFT, "SOFT" }, \
@@ -360,8 +359,7 @@ TRACE_EVENT(rpc_request,
{ (1UL << RPC_TASK_ACTIVE), "ACTIVE" }, \
{ (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" }, \
{ (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" }, \
- { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" }, \
- { (1UL << RPC_TASK_SIGNALLED), "SIGNALLED" })
+ { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" })
DECLARE_EVENT_CLASS(rpc_task_running,
@@ -719,7 +717,7 @@ TRACE_EVENT(rpc_xdr_overflow,
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
- " %sv%d %s requested=%zu p=%p end=%p xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
+ " %sv%d %s requested=%zu p=%p end=%p xdr=[%p,%zu]/%u/[%p,%zu]/%u",
__entry->task_id, __entry->client_id,
__get_str(progname), __entry->version, __get_str(procedure),
__entry->requested, __entry->p, __entry->end,
@@ -777,7 +775,7 @@ TRACE_EVENT(rpc_xdr_alignment,
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
- " %sv%d %s offset=%zu copied=%u xdr=[%p,%zu]/%u/[%p,%zu]/%u\n",
+ " %sv%d %s offset=%zu copied=%u xdr=[%p,%zu]/%u/[%p,%zu]/%u",
__entry->task_id, __entry->client_id,
__get_str(progname), __entry->version, __get_str(procedure),
__entry->offset, __entry->copied,
@@ -1100,7 +1098,7 @@ TRACE_EVENT(xprt_transmit,
__entry->client_id = rqst->rq_task->tk_client ?
rqst->rq_task->tk_client->cl_clid : -1;
__entry->xid = be32_to_cpu(rqst->rq_xid);
- __entry->seqno = rqst->rq_seqno;
+ __entry->seqno = *rqst->rq_seqnos;
__entry->status = status;
),
@@ -1693,7 +1691,6 @@ SVC_RQST_FLAG_LIST
__print_flags(flags, "|", SVC_RQST_FLAG_LIST)
TRACE_DEFINE_ENUM(SVC_GARBAGE);
-TRACE_DEFINE_ENUM(SVC_SYSERR);
TRACE_DEFINE_ENUM(SVC_VALID);
TRACE_DEFINE_ENUM(SVC_NEGATIVE);
TRACE_DEFINE_ENUM(SVC_OK);
@@ -1706,7 +1703,6 @@ TRACE_DEFINE_ENUM(SVC_COMPLETE);
#define show_svc_auth_status(status) \
__print_symbolic(status, \
{ SVC_GARBAGE, "SVC_GARBAGE" }, \
- { SVC_SYSERR, "SVC_SYSERR" }, \
{ SVC_VALID, "SVC_VALID" }, \
{ SVC_NEGATIVE, "SVC_NEGATIVE" }, \
{ SVC_OK, "SVC_OK" }, \
@@ -2040,19 +2036,20 @@ TRACE_EVENT(svc_xprt_dequeue,
TP_STRUCT__entry(
SVC_XPRT_ENDPOINT_FIELDS(rqst->rq_xprt)
-
__field(unsigned long, wakeup)
+ __field(unsigned long, qtime)
),
TP_fast_assign(
- SVC_XPRT_ENDPOINT_ASSIGNMENTS(rqst->rq_xprt);
+ ktime_t ktime = ktime_get();
- __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(),
- rqst->rq_qtime));
+ SVC_XPRT_ENDPOINT_ASSIGNMENTS(rqst->rq_xprt);
+ __entry->wakeup = ktime_to_us(ktime_sub(ktime, rqst->rq_qtime));
+ __entry->qtime = ktime_to_us(ktime_sub(ktime, rqst->rq_xprt->xpt_qtime));
),
- TP_printk(SVC_XPRT_ENDPOINT_FORMAT " wakeup-us=%lu",
- SVC_XPRT_ENDPOINT_VARARGS, __entry->wakeup)
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT " wakeup-us=%lu qtime-us=%lu",
+ SVC_XPRT_ENDPOINT_VARARGS, __entry->wakeup, __entry->qtime)
);
DECLARE_EVENT_CLASS(svc_xprt_event,
@@ -2124,22 +2121,35 @@ TRACE_EVENT(svc_xprt_accept,
)
);
-TRACE_EVENT(svc_wake_up,
- TP_PROTO(int pid),
+DECLARE_EVENT_CLASS(svc_pool_thread_event,
+ TP_PROTO(const struct svc_pool *pool, pid_t pid),
- TP_ARGS(pid),
+ TP_ARGS(pool, pid),
TP_STRUCT__entry(
- __field(int, pid)
+ __field(unsigned int, pool_id)
+ __field(pid_t, pid)
),
TP_fast_assign(
+ __entry->pool_id = pool->sp_id;
__entry->pid = pid;
),
- TP_printk("pid=%d", __entry->pid)
+ TP_printk("pool=%u pid=%d", __entry->pool_id, __entry->pid)
);
+#define DEFINE_SVC_POOL_THREAD_EVENT(name) \
+ DEFINE_EVENT(svc_pool_thread_event, svc_pool_thread_##name, \
+ TP_PROTO( \
+ const struct svc_pool *pool, pid_t pid \
+ ), \
+ TP_ARGS(pool, pid))
+
+DEFINE_SVC_POOL_THREAD_EVENT(wake);
+DEFINE_SVC_POOL_THREAD_EVENT(running);
+DEFINE_SVC_POOL_THREAD_EVENT(noidle);
+
TRACE_EVENT(svc_alloc_arg_err,
TP_PROTO(
unsigned int requested,
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index b6e0cbc2c71f..f31ff446b468 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -15,7 +15,7 @@
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
-TRACE_EVENT_FN(sys_enter,
+TRACE_EVENT_SYSCALL(sys_enter,
TP_PROTO(struct pt_regs *regs, long id),
@@ -41,7 +41,7 @@ TRACE_EVENT_FN(sys_enter,
TRACE_EVENT_FLAGS(sys_enter, TRACE_EVENT_FL_CAP_ANY)
-TRACE_EVENT_FN(sys_exit,
+TRACE_EVENT_SYSCALL(sys_exit,
TP_PROTO(struct pt_regs *regs, long ret),
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
index a13cbf2b3405..7e2e20ba26f1 100644
--- a/include/trace/events/target.h
+++ b/include/trace/events/target.h
@@ -31,8 +31,8 @@
scsi_opcode_name(INQUIRY), \
scsi_opcode_name(RECOVER_BUFFERED_DATA), \
scsi_opcode_name(MODE_SELECT), \
- scsi_opcode_name(RESERVE), \
- scsi_opcode_name(RELEASE), \
+ scsi_opcode_name(RESERVE_6), \
+ scsi_opcode_name(RELEASE_6), \
scsi_opcode_name(COPY), \
scsi_opcode_name(ERASE), \
scsi_opcode_name(MODE_SENSE), \
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
index 47b527464d1a..4f0759634306 100644
--- a/include/trace/events/task.h
+++ b/include/trace/events/task.h
@@ -8,14 +8,14 @@
TRACE_EVENT(task_newtask,
- TP_PROTO(struct task_struct *task, unsigned long clone_flags),
+ TP_PROTO(struct task_struct *task, u64 clone_flags),
TP_ARGS(task, clone_flags),
TP_STRUCT__entry(
__field( pid_t, pid)
__array( char, comm, TASK_COMM_LEN)
- __field( unsigned long, clone_flags)
+ __field( u64, clone_flags)
__field( short, oom_score_adj)
),
@@ -26,7 +26,7 @@ TRACE_EVENT(task_newtask,
__entry->oom_score_adj = task->signal->oom_score_adj;
),
- TP_printk("pid=%d comm=%s clone_flags=%lx oom_score_adj=%hd",
+ TP_printk("pid=%d comm=%s clone_flags=%llx oom_score_adj=%hd",
__entry->pid, __entry->comm,
__entry->clone_flags, __entry->oom_score_adj)
);
@@ -38,22 +38,56 @@ TRACE_EVENT(task_rename,
TP_ARGS(task, comm),
TP_STRUCT__entry(
- __field( pid_t, pid)
__array( char, oldcomm, TASK_COMM_LEN)
__array( char, newcomm, TASK_COMM_LEN)
__field( short, oom_score_adj)
),
TP_fast_assign(
- __entry->pid = task->pid;
memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
strscpy(entry->newcomm, comm, TASK_COMM_LEN);
__entry->oom_score_adj = task->signal->oom_score_adj;
),
- TP_printk("pid=%d oldcomm=%s newcomm=%s oom_score_adj=%hd",
- __entry->pid, __entry->oldcomm,
- __entry->newcomm, __entry->oom_score_adj)
+ TP_printk("oldcomm=%s newcomm=%s oom_score_adj=%hd",
+ __entry->oldcomm, __entry->newcomm, __entry->oom_score_adj)
+);
+
+/**
+ * task_prctl_unknown - called on unknown prctl() option
+ * @option: option passed
+ * @arg2: arg2 passed
+ * @arg3: arg3 passed
+ * @arg4: arg4 passed
+ * @arg5: arg5 passed
+ *
+ * Called on an unknown prctl() option.
+ */
+TRACE_EVENT(task_prctl_unknown,
+
+ TP_PROTO(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5),
+
+ TP_ARGS(option, arg2, arg3, arg4, arg5),
+
+ TP_STRUCT__entry(
+ __field( int, option)
+ __field( unsigned long, arg2)
+ __field( unsigned long, arg3)
+ __field( unsigned long, arg4)
+ __field( unsigned long, arg5)
+ ),
+
+ TP_fast_assign(
+ __entry->option = option;
+ __entry->arg2 = arg2;
+ __entry->arg3 = arg3;
+ __entry->arg4 = arg4;
+ __entry->arg5 = arg5;
+ ),
+
+ TP_printk("option=%d arg2=%ld arg3=%ld arg4=%ld arg5=%ld",
+ __entry->option, __entry->arg2, __entry->arg3, __entry->arg4, __entry->arg5)
);
#endif
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 49b5ee091cf6..6757233bd064 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -13,17 +13,11 @@
#include <linux/sock_diag.h>
#include <net/rstreason.h>
-/*
- * tcp event with arguments sk and skb
- *
- * Note: this class requires a valid sk pointer; while skb pointer could
- * be NULL.
- */
-DECLARE_EVENT_CLASS(tcp_event_sk_skb,
+TRACE_EVENT(tcp_retransmit_skb,
- TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb, int err),
- TP_ARGS(sk, skb),
+ TP_ARGS(sk, skb, err),
TP_STRUCT__entry(
__field(const void *, skbaddr)
@@ -36,6 +30,7 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
__array(__u8, daddr, 4)
__array(__u8, saddr_v6, 16)
__array(__u8, daddr_v6, 16)
+ __field(int, err)
),
TP_fast_assign(
@@ -58,21 +53,17 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
+
+ __entry->err = err;
),
- TP_printk("skbaddr=%p skaddr=%p family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s",
+ TP_printk("skbaddr=%p skaddr=%p family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s err=%d",
__entry->skbaddr, __entry->skaddr,
show_family_name(__entry->family),
__entry->sport, __entry->dport, __entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6,
- show_tcp_state_name(__entry->state))
-);
-
-DEFINE_EVENT(tcp_event_sk_skb, tcp_retransmit_skb,
-
- TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
-
- TP_ARGS(sk, skb)
+ show_tcp_state_name(__entry->state),
+ __entry->err)
);
#undef FN
@@ -91,10 +82,10 @@ DEFINE_RST_REASON(FN, FN)
TRACE_EVENT(tcp_send_reset,
TP_PROTO(const struct sock *sk,
- const struct sk_buff *skb,
+ const struct sk_buff *skb__nullable,
const enum sk_rst_reason reason),
- TP_ARGS(sk, skb, reason),
+ TP_ARGS(sk, skb__nullable, reason),
TP_STRUCT__entry(
__field(const void *, skbaddr)
@@ -106,7 +97,7 @@ TRACE_EVENT(tcp_send_reset,
),
TP_fast_assign(
- __entry->skbaddr = skb;
+ __entry->skbaddr = skb__nullable;
__entry->skaddr = sk;
/* Zero means unknown state. */
__entry->state = sk ? sk->sk_state : 0;
@@ -118,13 +109,13 @@ TRACE_EVENT(tcp_send_reset,
const struct inet_sock *inet = inet_sk(sk);
TP_STORE_ADDR_PORTS(__entry, inet, sk);
- } else if (skb) {
- const struct tcphdr *th = (const struct tcphdr *)skb->data;
+ } else if (skb__nullable) {
+ const struct tcphdr *th = (const struct tcphdr *)skb__nullable->data;
/*
* We should reverse the 4-tuple of skb, so later
* it can print the right flow direction of rst.
*/
- TP_STORE_ADDR_PORTS_SKB(skb, th, entry->daddr, entry->saddr);
+ TP_STORE_ADDR_PORTS_SKB(skb__nullable, th, entry->daddr, entry->saddr);
}
__entry->reason = reason;
),
@@ -213,6 +204,88 @@ DEFINE_EVENT(tcp_event_sk, tcp_rcv_space_adjust,
TP_ARGS(sk)
);
+TRACE_EVENT(tcp_rcvbuf_grow,
+
+ TP_PROTO(struct sock *sk, int time),
+
+ TP_ARGS(sk, time),
+
+ TP_STRUCT__entry(
+ __field(int, time)
+ __field(__u32, rtt_us)
+ __field(__u32, copied)
+ __field(__u32, inq)
+ __field(__u32, space)
+ __field(__u32, ooo_space)
+ __field(__u32, rcvbuf)
+ __field(__u32, rcv_ssthresh)
+ __field(__u32, window_clamp)
+ __field(__u32, rcv_wnd)
+ __field(__u8, scaling_ratio)
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+ __array(__u8, saddr, 4)
+ __array(__u8, daddr, 4)
+ __array(__u8, saddr_v6, 16)
+ __array(__u8, daddr_v6, 16)
+ __field(const void *, skaddr)
+ __field(__u64, sock_cookie)
+ ),
+
+ TP_fast_assign(
+ struct inet_sock *inet = inet_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ __be32 *p32;
+
+ __entry->time = time;
+ __entry->rtt_us = tp->rcv_rtt_est.rtt_us >> 3;
+ __entry->copied = tp->copied_seq - tp->rcvq_space.seq;
+ __entry->inq = tp->rcv_nxt - tp->copied_seq;
+ __entry->space = tp->rcvq_space.space;
+ __entry->ooo_space = RB_EMPTY_ROOT(&tp->out_of_order_queue) ? 0 :
+ TCP_SKB_CB(tp->ooo_last_skb)->end_seq -
+ tp->rcv_nxt;
+
+ __entry->rcvbuf = sk->sk_rcvbuf;
+ __entry->rcv_ssthresh = tp->rcv_ssthresh;
+ __entry->window_clamp = tp->window_clamp;
+ __entry->rcv_wnd = tp->rcv_wnd;
+ __entry->scaling_ratio = tp->scaling_ratio;
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+ __entry->family = sk->sk_family;
+
+ p32 = (__be32 *) __entry->saddr;
+ *p32 = inet->inet_saddr;
+
+ p32 = (__be32 *) __entry->daddr;
+ *p32 = inet->inet_daddr;
+
+ TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+ sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
+
+ __entry->skaddr = sk;
+ __entry->sock_cookie = sock_gen_cookie(sk);
+ ),
+
+ TP_printk("time=%u rtt_us=%u copied=%u inq=%u space=%u ooo=%u scaling_ratio=%u rcvbuf=%u "
+ "rcv_ssthresh=%u window_clamp=%u rcv_wnd=%u "
+ "family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 "
+ "saddrv6=%pI6c daddrv6=%pI6c skaddr=%p sock_cookie=%llx",
+ __entry->time, __entry->rtt_us, __entry->copied,
+ __entry->inq, __entry->space, __entry->ooo_space,
+ __entry->scaling_ratio, __entry->rcvbuf,
+ __entry->rcv_ssthresh, __entry->window_clamp,
+ __entry->rcv_wnd,
+ show_family_name(__entry->family),
+ __entry->sport, __entry->dport,
+ __entry->saddr, __entry->daddr,
+ __entry->saddr_v6, __entry->daddr_v6,
+ __entry->skaddr,
+ __entry->sock_cookie)
+);
+
TRACE_EVENT(tcp_retransmit_synack,
TP_PROTO(const struct sock *sk, const struct request_sock *req),
@@ -259,11 +332,41 @@ TRACE_EVENT(tcp_retransmit_synack,
__entry->saddr_v6, __entry->daddr_v6)
);
+TRACE_EVENT(tcp_sendmsg_locked,
+ TP_PROTO(const struct sock *sk, const struct msghdr *msg,
+ const struct sk_buff *skb, int size_goal),
+
+ TP_ARGS(sk, msg, skb, size_goal),
+
+ TP_STRUCT__entry(
+ __field(const void *, skb_addr)
+ __field(int, skb_len)
+ __field(int, msg_left)
+ __field(int, size_goal)
+ ),
+
+ TP_fast_assign(
+ __entry->skb_addr = skb;
+ __entry->skb_len = skb ? skb->len : 0;
+ __entry->msg_left = msg_data_left(msg);
+ __entry->size_goal = size_goal;
+ ),
+
+ TP_printk("skb_addr %p skb_len %d msg_left %d size_goal %d",
+ __entry->skb_addr, __entry->skb_len, __entry->msg_left,
+ __entry->size_goal));
+
+DECLARE_TRACE(tcp_cwnd_reduction,
+ TP_PROTO(const struct sock *sk, int newly_acked_sacked,
+ int newly_lost, int flag),
+ TP_ARGS(sk, newly_acked_sacked, newly_lost, flag)
+);
+
#include <trace/events/net_probe_common.h>
TRACE_EVENT(tcp_probe,
- TP_PROTO(struct sock *sk, struct sk_buff *skb),
+ TP_PROTO(struct sock *sk, const struct sk_buff *skb),
TP_ARGS(sk, skb),
@@ -411,6 +514,325 @@ TRACE_EVENT(tcp_cong_state_set,
__entry->cong_state)
);
+DECLARE_EVENT_CLASS(tcp_hash_event,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+
+ TP_ARGS(sk, skb),
+
+ TP_STRUCT__entry(
+ __field(__u64, net_cookie)
+ __field(const void *, skbaddr)
+ __field(const void *, skaddr)
+ __field(int, state)
+
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ __field(int, l3index)
+
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+
+ __field(bool, fin)
+ __field(bool, syn)
+ __field(bool, rst)
+ __field(bool, psh)
+ __field(bool, ack)
+ ),
+
+ TP_fast_assign(
+ const struct tcphdr *th = (const struct tcphdr *)skb->data;
+
+ __entry->net_cookie = sock_net(sk)->net_cookie;
+ __entry->skbaddr = skb;
+ __entry->skaddr = sk;
+ __entry->state = sk->sk_state;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS_SKB(skb, th, __entry->saddr, __entry->daddr);
+ __entry->l3index = inet_sdif(skb) ? inet_iif(skb) : 0;
+
+ /* For filtering use */
+ __entry->sport = ntohs(th->source);
+ __entry->dport = ntohs(th->dest);
+ __entry->family = sk->sk_family;
+
+ __entry->fin = th->fin;
+ __entry->syn = th->syn;
+ __entry->rst = th->rst;
+ __entry->psh = th->psh;
+ __entry->ack = th->ack;
+ ),
+
+ TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc L3index=%d [%c%c%c%c%c]",
+ __entry->net_cookie,
+ show_tcp_state_name(__entry->state),
+ show_family_name(__entry->family),
+ __entry->saddr, __entry->daddr,
+ __entry->l3index,
+ __entry->fin ? 'F' : ' ',
+ __entry->syn ? 'S' : ' ',
+ __entry->rst ? 'R' : ' ',
+ __entry->psh ? 'P' : ' ',
+ __entry->ack ? '.' : ' ')
+);
+
+DEFINE_EVENT(tcp_hash_event, tcp_hash_bad_header,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_ARGS(sk, skb)
+);
+
+DEFINE_EVENT(tcp_hash_event, tcp_hash_md5_required,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_ARGS(sk, skb)
+);
+
+DEFINE_EVENT(tcp_hash_event, tcp_hash_md5_unexpected,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_ARGS(sk, skb)
+);
+
+DEFINE_EVENT(tcp_hash_event, tcp_hash_md5_mismatch,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_ARGS(sk, skb)
+);
+
+DEFINE_EVENT(tcp_hash_event, tcp_hash_ao_required,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
+ TP_ARGS(sk, skb)
+);
+
+DECLARE_EVENT_CLASS(tcp_ao_event,
+
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+
+ TP_ARGS(sk, skb, keyid, rnext, maclen),
+
+ TP_STRUCT__entry(
+ __field(__u64, net_cookie)
+ __field(const void *, skbaddr)
+ __field(const void *, skaddr)
+ __field(int, state)
+
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ __field(int, l3index)
+
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+
+ __field(bool, fin)
+ __field(bool, syn)
+ __field(bool, rst)
+ __field(bool, psh)
+ __field(bool, ack)
+
+ __field(__u8, keyid)
+ __field(__u8, rnext)
+ __field(__u8, maclen)
+ ),
+
+ TP_fast_assign(
+ const struct tcphdr *th = (const struct tcphdr *)skb->data;
+
+ __entry->net_cookie = sock_net(sk)->net_cookie;
+ __entry->skbaddr = skb;
+ __entry->skaddr = sk;
+ __entry->state = sk->sk_state;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS_SKB(skb, th, __entry->saddr, __entry->daddr);
+ __entry->l3index = inet_sdif(skb) ? inet_iif(skb) : 0;
+
+ /* For filtering use */
+ __entry->sport = ntohs(th->source);
+ __entry->dport = ntohs(th->dest);
+ __entry->family = sk->sk_family;
+
+ __entry->fin = th->fin;
+ __entry->syn = th->syn;
+ __entry->rst = th->rst;
+ __entry->psh = th->psh;
+ __entry->ack = th->ack;
+
+ __entry->keyid = keyid;
+ __entry->rnext = rnext;
+ __entry->maclen = maclen;
+ ),
+
+ TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc L3index=%d [%c%c%c%c%c] keyid=%u rnext=%u maclen=%u",
+ __entry->net_cookie,
+ show_tcp_state_name(__entry->state),
+ show_family_name(__entry->family),
+ __entry->saddr, __entry->daddr,
+ __entry->l3index,
+ __entry->fin ? 'F' : ' ',
+ __entry->syn ? 'S' : ' ',
+ __entry->rst ? 'R' : ' ',
+ __entry->psh ? 'P' : ' ',
+ __entry->ack ? '.' : ' ',
+ __entry->keyid, __entry->rnext, __entry->maclen)
+);
+
+DEFINE_EVENT(tcp_ao_event, tcp_ao_handshake_failure,
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+ TP_ARGS(sk, skb, keyid, rnext, maclen)
+);
+
+#ifdef CONFIG_TCP_AO
+DEFINE_EVENT(tcp_ao_event, tcp_ao_wrong_maclen,
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+ TP_ARGS(sk, skb, keyid, rnext, maclen)
+);
+
+DEFINE_EVENT(tcp_ao_event, tcp_ao_mismatch,
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+ TP_ARGS(sk, skb, keyid, rnext, maclen)
+);
+
+DEFINE_EVENT(tcp_ao_event, tcp_ao_key_not_found,
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+ TP_ARGS(sk, skb, keyid, rnext, maclen)
+);
+
+DEFINE_EVENT(tcp_ao_event, tcp_ao_rnext_request,
+ TP_PROTO(const struct sock *sk, const struct sk_buff *skb,
+ const __u8 keyid, const __u8 rnext, const __u8 maclen),
+ TP_ARGS(sk, skb, keyid, rnext, maclen)
+);
+
+DECLARE_EVENT_CLASS(tcp_ao_event_sk,
+
+ TP_PROTO(const struct sock *sk, const __u8 keyid, const __u8 rnext),
+
+ TP_ARGS(sk, keyid, rnext),
+
+ TP_STRUCT__entry(
+ __field(__u64, net_cookie)
+ __field(const void *, skaddr)
+ __field(int, state)
+
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+
+ __field(__u8, keyid)
+ __field(__u8, rnext)
+ ),
+
+ TP_fast_assign(
+ const struct inet_sock *inet = inet_sk(sk);
+
+ __entry->net_cookie = sock_net(sk)->net_cookie;
+ __entry->skaddr = sk;
+ __entry->state = sk->sk_state;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+ /* For filtering use */
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+ __entry->family = sk->sk_family;
+
+ __entry->keyid = keyid;
+ __entry->rnext = rnext;
+ ),
+
+ TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc keyid=%u rnext=%u",
+ __entry->net_cookie,
+ show_tcp_state_name(__entry->state),
+ show_family_name(__entry->family),
+ __entry->saddr, __entry->daddr,
+ __entry->keyid, __entry->rnext)
+);
+
+DEFINE_EVENT(tcp_ao_event_sk, tcp_ao_synack_no_key,
+ TP_PROTO(const struct sock *sk, const __u8 keyid, const __u8 rnext),
+ TP_ARGS(sk, keyid, rnext)
+);
+
+DECLARE_EVENT_CLASS(tcp_ao_event_sne,
+
+ TP_PROTO(const struct sock *sk, __u32 new_sne),
+
+ TP_ARGS(sk, new_sne),
+
+ TP_STRUCT__entry(
+ __field(__u64, net_cookie)
+ __field(const void *, skaddr)
+ __field(int, state)
+
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+
+ __field(__u16, sport)
+ __field(__u16, dport)
+ __field(__u16, family)
+
+ __field(__u32, new_sne)
+ ),
+
+ TP_fast_assign(
+ const struct inet_sock *inet = inet_sk(sk);
+
+ __entry->net_cookie = sock_net(sk)->net_cookie;
+ __entry->skaddr = sk;
+ __entry->state = sk->sk_state;
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+ /* For filtering use */
+ __entry->sport = ntohs(inet->inet_sport);
+ __entry->dport = ntohs(inet->inet_dport);
+ __entry->family = sk->sk_family;
+
+ __entry->new_sne = new_sne;
+ ),
+
+ TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc sne=%u",
+ __entry->net_cookie,
+ show_tcp_state_name(__entry->state),
+ show_family_name(__entry->family),
+ __entry->saddr, __entry->daddr,
+ __entry->new_sne)
+);
+
+DEFINE_EVENT(tcp_ao_event_sne, tcp_ao_snd_sne_update,
+ TP_PROTO(const struct sock *sk, __u32 new_sne),
+ TP_ARGS(sk, new_sne)
+);
+
+DEFINE_EVENT(tcp_ao_event_sne, tcp_ao_rcv_sne_update,
+ TP_PROTO(const struct sock *sk, __u32 new_sne),
+ TP_ARGS(sk, new_sne)
+);
+#endif /* CONFIG_TCP_AO */
+
#endif /* _TRACE_TCP_H */
/* This part must be outside protection */
diff --git a/include/trace/events/thp.h b/include/trace/events/thp.h
index f50048af5fcc..c8fe879d5828 100644
--- a/include/trace/events/thp.h
+++ b/include/trace/events/thp.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
+#ifdef CONFIG_PPC_BOOK3S_64
DECLARE_EVENT_CLASS(hugepage_set,
TP_PROTO(unsigned long addr, unsigned long pte),
@@ -66,6 +67,7 @@ DEFINE_EVENT(hugepage_update, hugepage_update_pud,
TP_PROTO(unsigned long addr, unsigned long pud, unsigned long clr, unsigned long set),
TP_ARGS(addr, pud, clr, set)
);
+#endif /* CONFIG_PPC_BOOK3S_64 */
DECLARE_EVENT_CLASS(migration_pmd,
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 1ef58a04fc57..1641ae3e6ca0 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -185,12 +185,12 @@ TRACE_EVENT(timer_base_idle,
{ HRTIMER_MODE_REL_PINNED_HARD, "REL|PINNED|HARD" })
/**
- * hrtimer_init - called when the hrtimer is initialized
+ * hrtimer_setup - called when the hrtimer is initialized
* @hrtimer: pointer to struct hrtimer
* @clockid: the hrtimers clock
* @mode: the hrtimers mode
*/
-TRACE_EVENT(hrtimer_init,
+TRACE_EVENT(hrtimer_setup,
TP_PROTO(struct hrtimer *hrtimer, clockid_t clockid,
enum hrtimer_mode mode),
@@ -235,7 +235,7 @@ TRACE_EVENT(hrtimer_start,
TP_fast_assign(
__entry->hrtimer = hrtimer;
- __entry->function = hrtimer->function;
+ __entry->function = ACCESS_PRIVATE(hrtimer, function);
__entry->expires = hrtimer_get_expires(hrtimer);
__entry->softexpires = hrtimer_get_softexpires(hrtimer);
__entry->mode = mode;
@@ -271,7 +271,7 @@ TRACE_EVENT(hrtimer_expire_entry,
TP_fast_assign(
__entry->hrtimer = hrtimer;
__entry->now = *now;
- __entry->function = hrtimer->function;
+ __entry->function = ACCESS_PRIVATE(hrtimer, function);
),
TP_printk("hrtimer=%p function=%ps now=%llu",
diff --git a/include/trace/events/timer_migration.h b/include/trace/events/timer_migration.h
index 79f19e76a80b..61171b13c687 100644
--- a/include/trace/events/timer_migration.h
+++ b/include/trace/events/timer_migration.h
@@ -43,7 +43,7 @@ TRACE_EVENT(tmigr_connect_child_parent,
__field( unsigned int, lvl )
__field( unsigned int, numa_node )
__field( unsigned int, num_children )
- __field( u32, childmask )
+ __field( u32, groupmask )
),
TP_fast_assign(
@@ -52,11 +52,11 @@ TRACE_EVENT(tmigr_connect_child_parent,
__entry->lvl = child->parent->level;
__entry->numa_node = child->parent->numa_node;
__entry->num_children = child->parent->num_children;
- __entry->childmask = child->childmask;
+ __entry->groupmask = child->groupmask;
),
- TP_printk("group=%p childmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
- __entry->child, __entry->childmask, __entry->parent,
+ TP_printk("group=%p groupmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
+ __entry->child, __entry->groupmask, __entry->parent,
__entry->lvl, __entry->numa_node, __entry->num_children)
);
@@ -72,7 +72,7 @@ TRACE_EVENT(tmigr_connect_cpu_parent,
__field( unsigned int, lvl )
__field( unsigned int, numa_node )
__field( unsigned int, num_children )
- __field( u32, childmask )
+ __field( u32, groupmask )
),
TP_fast_assign(
@@ -81,11 +81,11 @@ TRACE_EVENT(tmigr_connect_cpu_parent,
__entry->lvl = tmc->tmgroup->level;
__entry->numa_node = tmc->tmgroup->numa_node;
__entry->num_children = tmc->tmgroup->num_children;
- __entry->childmask = tmc->childmask;
+ __entry->groupmask = tmc->groupmask;
),
- TP_printk("cpu=%d childmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
- __entry->cpu, __entry->childmask, __entry->parent,
+ TP_printk("cpu=%d groupmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
+ __entry->cpu, __entry->groupmask, __entry->parent,
__entry->lvl, __entry->numa_node, __entry->num_children)
);
@@ -173,14 +173,14 @@ DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_active,
TP_ARGS(tmc)
);
-DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_online,
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_available,
TP_PROTO(struct tmigr_cpu *tmc),
TP_ARGS(tmc)
);
-DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_offline,
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_unavailable,
TP_PROTO(struct tmigr_cpu *tmc),
diff --git a/include/trace/events/timestamp.h b/include/trace/events/timestamp.h
new file mode 100644
index 000000000000..c9e5ec930054
--- /dev/null
+++ b/include/trace/events/timestamp.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM timestamp
+
+#if !defined(_TRACE_TIMESTAMP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_TIMESTAMP_H
+
+#include <linux/tracepoint.h>
+#include <linux/fs.h>
+
+#define CTIME_QUERIED_FLAGS \
+ { I_CTIME_QUERIED, "Q" }
+
+DECLARE_EVENT_CLASS(ctime,
+ TP_PROTO(struct inode *inode,
+ struct timespec64 *ctime),
+
+ TP_ARGS(inode, ctime),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(time64_t, ctime_s)
+ __field(u32, ctime_ns)
+ __field(u32, gen)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->gen = inode->i_generation;
+ __entry->ctime_s = ctime->tv_sec;
+ __entry->ctime_ns = ctime->tv_nsec;
+ ),
+
+ TP_printk("ino=%d:%d:%ld:%u ctime=%lld.%u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen,
+ __entry->ctime_s, __entry->ctime_ns
+ )
+);
+
+DEFINE_EVENT(ctime, inode_set_ctime_to_ts,
+ TP_PROTO(struct inode *inode,
+ struct timespec64 *ctime),
+ TP_ARGS(inode, ctime));
+
+DEFINE_EVENT(ctime, ctime_xchg_skip,
+ TP_PROTO(struct inode *inode,
+ struct timespec64 *ctime),
+ TP_ARGS(inode, ctime));
+
+TRACE_EVENT(ctime_ns_xchg,
+ TP_PROTO(struct inode *inode,
+ u32 old,
+ u32 new,
+ u32 cur),
+
+ TP_ARGS(inode, old, new, cur),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(u32, gen)
+ __field(u32, old)
+ __field(u32, new)
+ __field(u32, cur)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->gen = inode->i_generation;
+ __entry->old = old;
+ __entry->new = new;
+ __entry->cur = cur;
+ ),
+
+ TP_printk("ino=%d:%d:%ld:%u old=%u:%s new=%u cur=%u:%s",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen,
+ __entry->old & ~I_CTIME_QUERIED,
+ __print_flags(__entry->old & I_CTIME_QUERIED, "|", CTIME_QUERIED_FLAGS),
+ __entry->new,
+ __entry->cur & ~I_CTIME_QUERIED,
+ __print_flags(__entry->cur & I_CTIME_QUERIED, "|", CTIME_QUERIED_FLAGS)
+ )
+);
+
+TRACE_EVENT(fill_mg_cmtime,
+ TP_PROTO(struct inode *inode,
+ struct timespec64 *ctime,
+ struct timespec64 *mtime),
+
+ TP_ARGS(inode, ctime, mtime),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(time64_t, ctime_s)
+ __field(time64_t, mtime_s)
+ __field(u32, ctime_ns)
+ __field(u32, mtime_ns)
+ __field(u32, gen)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->gen = inode->i_generation;
+ __entry->ctime_s = ctime->tv_sec;
+ __entry->mtime_s = mtime->tv_sec;
+ __entry->ctime_ns = ctime->tv_nsec;
+ __entry->mtime_ns = mtime->tv_nsec;
+ ),
+
+ TP_printk("ino=%d:%d:%ld:%u ctime=%lld.%u mtime=%lld.%u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen,
+ __entry->ctime_s, __entry->ctime_ns,
+ __entry->mtime_s, __entry->mtime_ns
+ )
+);
+#endif /* _TRACE_TIMESTAMP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/tsm_mr.h b/include/trace/events/tsm_mr.h
new file mode 100644
index 000000000000..f40de4ad3e2d
--- /dev/null
+++ b/include/trace/events/tsm_mr.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tsm_mr
+
+#if !defined(_TRACE_TSM_MR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_TSM_MR_H
+
+#include <linux/tracepoint.h>
+#include <linux/tsm-mr.h>
+
+TRACE_EVENT(tsm_mr_read,
+
+ TP_PROTO(const struct tsm_measurement_register *mr),
+
+ TP_ARGS(mr),
+
+ TP_STRUCT__entry(
+ __string(mr, mr->mr_name)
+ __string(hash, mr->mr_flags & TSM_MR_F_NOHASH ?
+ "data" : hash_algo_name[mr->mr_hash])
+ __dynamic_array(u8, d, mr->mr_size)
+ ),
+
+ TP_fast_assign(
+ __assign_str(mr);
+ __assign_str(hash);
+ memcpy(__get_dynamic_array(d), mr->mr_value, __get_dynamic_array_len(d));
+ ),
+
+ TP_printk("[%s] %s:%s", __get_str(mr), __get_str(hash),
+ __print_hex_str(__get_dynamic_array(d), __get_dynamic_array_len(d)))
+);
+
+TRACE_EVENT(tsm_mr_refresh,
+
+ TP_PROTO(const struct tsm_measurement_register *mr, int rc),
+
+ TP_ARGS(mr, rc),
+
+ TP_STRUCT__entry(
+ __string(mr, mr->mr_name)
+ __field(int, rc)
+ ),
+
+ TP_fast_assign(
+ __assign_str(mr);
+ __entry->rc = rc;
+ ),
+
+ TP_printk("[%s] %s:%d", __get_str(mr),
+ __entry->rc ? "failed" : "succeeded", __entry->rc)
+);
+
+TRACE_EVENT(tsm_mr_write,
+
+ TP_PROTO(const struct tsm_measurement_register *mr, const u8 *data),
+
+ TP_ARGS(mr, data),
+
+ TP_STRUCT__entry(
+ __string(mr, mr->mr_name)
+ __string(hash, mr->mr_flags & TSM_MR_F_NOHASH ?
+ "data" : hash_algo_name[mr->mr_hash])
+ __dynamic_array(u8, d, mr->mr_size)
+ ),
+
+ TP_fast_assign(
+ __assign_str(mr);
+ __assign_str(hash);
+ memcpy(__get_dynamic_array(d), data, __get_dynamic_array_len(d));
+ ),
+
+ TP_printk("[%s] %s:%s", __get_str(mr), __get_str(hash),
+ __print_hex_str(__get_dynamic_array(d), __get_dynamic_array_len(d)))
+);
+
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
deleted file mode 100644
index c4e209fbdfbb..000000000000
--- a/include/trace/events/ufs.h
+++ /dev/null
@@ -1,399 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
- */
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM ufs
-
-#if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_UFS_H
-
-#include <linux/tracepoint.h>
-
-#define str_opcode(opcode) \
- __print_symbolic(opcode, \
- { WRITE_16, "WRITE_16" }, \
- { WRITE_10, "WRITE_10" }, \
- { READ_16, "READ_16" }, \
- { READ_10, "READ_10" }, \
- { SYNCHRONIZE_CACHE, "SYNC" }, \
- { UNMAP, "UNMAP" })
-
-#define UFS_LINK_STATES \
- EM(UIC_LINK_OFF_STATE, "UIC_LINK_OFF_STATE") \
- EM(UIC_LINK_ACTIVE_STATE, "UIC_LINK_ACTIVE_STATE") \
- EMe(UIC_LINK_HIBERN8_STATE, "UIC_LINK_HIBERN8_STATE")
-
-#define UFS_PWR_MODES \
- EM(UFS_ACTIVE_PWR_MODE, "UFS_ACTIVE_PWR_MODE") \
- EM(UFS_SLEEP_PWR_MODE, "UFS_SLEEP_PWR_MODE") \
- EM(UFS_POWERDOWN_PWR_MODE, "UFS_POWERDOWN_PWR_MODE") \
- EMe(UFS_DEEPSLEEP_PWR_MODE, "UFS_DEEPSLEEP_PWR_MODE")
-
-#define UFSCHD_CLK_GATING_STATES \
- EM(CLKS_OFF, "CLKS_OFF") \
- EM(CLKS_ON, "CLKS_ON") \
- EM(REQ_CLKS_OFF, "REQ_CLKS_OFF") \
- EMe(REQ_CLKS_ON, "REQ_CLKS_ON")
-
-#define UFS_CMD_TRACE_STRINGS \
- EM(UFS_CMD_SEND, "send_req") \
- EM(UFS_CMD_COMP, "complete_rsp") \
- EM(UFS_DEV_COMP, "dev_complete") \
- EM(UFS_QUERY_SEND, "query_send") \
- EM(UFS_QUERY_COMP, "query_complete") \
- EM(UFS_QUERY_ERR, "query_complete_err") \
- EM(UFS_TM_SEND, "tm_send") \
- EM(UFS_TM_COMP, "tm_complete") \
- EMe(UFS_TM_ERR, "tm_complete_err")
-
-#define UFS_CMD_TRACE_TSF_TYPES \
- EM(UFS_TSF_CDB, "CDB") \
- EM(UFS_TSF_OSF, "OSF") \
- EM(UFS_TSF_TM_INPUT, "TM_INPUT") \
- EMe(UFS_TSF_TM_OUTPUT, "TM_OUTPUT")
-
-/* Enums require being exported to userspace, for user tool parsing */
-#undef EM
-#undef EMe
-#define EM(a, b) TRACE_DEFINE_ENUM(a);
-#define EMe(a, b) TRACE_DEFINE_ENUM(a);
-
-UFS_LINK_STATES;
-UFS_PWR_MODES;
-UFSCHD_CLK_GATING_STATES;
-UFS_CMD_TRACE_STRINGS
-UFS_CMD_TRACE_TSF_TYPES
-
-/*
- * Now redefine the EM() and EMe() macros to map the enums to the strings
- * that will be printed in the output.
- */
-#undef EM
-#undef EMe
-#define EM(a, b) {a, b},
-#define EMe(a, b) {a, b}
-
-#define show_ufs_cmd_trace_str(str_t) \
- __print_symbolic(str_t, UFS_CMD_TRACE_STRINGS)
-#define show_ufs_cmd_trace_tsf(tsf) \
- __print_symbolic(tsf, UFS_CMD_TRACE_TSF_TYPES)
-
-TRACE_EVENT(ufshcd_clk_gating,
-
- TP_PROTO(const char *dev_name, int state),
-
- TP_ARGS(dev_name, state),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __field(int, state)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name);
- __entry->state = state;
- ),
-
- TP_printk("%s: gating state changed to %s",
- __get_str(dev_name),
- __print_symbolic(__entry->state, UFSCHD_CLK_GATING_STATES))
-);
-
-TRACE_EVENT(ufshcd_clk_scaling,
-
- TP_PROTO(const char *dev_name, const char *state, const char *clk,
- u32 prev_state, u32 curr_state),
-
- TP_ARGS(dev_name, state, clk, prev_state, curr_state),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __string(state, state)
- __string(clk, clk)
- __field(u32, prev_state)
- __field(u32, curr_state)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name);
- __assign_str(state);
- __assign_str(clk);
- __entry->prev_state = prev_state;
- __entry->curr_state = curr_state;
- ),
-
- TP_printk("%s: %s %s from %u to %u Hz",
- __get_str(dev_name), __get_str(state), __get_str(clk),
- __entry->prev_state, __entry->curr_state)
-);
-
-TRACE_EVENT(ufshcd_auto_bkops_state,
-
- TP_PROTO(const char *dev_name, const char *state),
-
- TP_ARGS(dev_name, state),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __string(state, state)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name);
- __assign_str(state);
- ),
-
- TP_printk("%s: auto bkops - %s",
- __get_str(dev_name), __get_str(state))
-);
-
-DECLARE_EVENT_CLASS(ufshcd_profiling_template,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
- int err),
-
- TP_ARGS(dev_name, profile_info, time_us, err),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __string(profile_info, profile_info)
- __field(s64, time_us)
- __field(int, err)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name);
- __assign_str(profile_info);
- __entry->time_us = time_us;
- __entry->err = err;
- ),
-
- TP_printk("%s: %s: took %lld usecs, err %d",
- __get_str(dev_name), __get_str(profile_info),
- __entry->time_us, __entry->err)
-);
-
-DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
- int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
-
-DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
- int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
-
-DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
- int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
-
-DECLARE_EVENT_CLASS(ufshcd_template,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
-
- TP_ARGS(dev_name, err, usecs, dev_state, link_state),
-
- TP_STRUCT__entry(
- __field(s64, usecs)
- __field(int, err)
- __string(dev_name, dev_name)
- __field(int, dev_state)
- __field(int, link_state)
- ),
-
- TP_fast_assign(
- __entry->usecs = usecs;
- __entry->err = err;
- __assign_str(dev_name);
- __entry->dev_state = dev_state;
- __entry->link_state = link_state;
- ),
-
- TP_printk(
- "%s: took %lld usecs, dev_state: %s, link_state: %s, err %d",
- __get_str(dev_name),
- __entry->usecs,
- __print_symbolic(__entry->dev_state, UFS_PWR_MODES),
- __print_symbolic(__entry->link_state, UFS_LINK_STATES),
- __entry->err
- )
-);
-
-DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_system_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_init,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
- int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
-
-TRACE_EVENT(ufshcd_command,
- TP_PROTO(struct scsi_device *sdev, enum ufs_trace_str_t str_t,
- unsigned int tag, u32 doorbell, u32 hwq_id, int transfer_len,
- u32 intr, u64 lba, u8 opcode, u8 group_id),
-
- TP_ARGS(sdev, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba,
- opcode, group_id),
-
- TP_STRUCT__entry(
- __field(struct scsi_device *, sdev)
- __field(enum ufs_trace_str_t, str_t)
- __field(unsigned int, tag)
- __field(u32, doorbell)
- __field(u32, hwq_id)
- __field(u32, intr)
- __field(u64, lba)
- __field(int, transfer_len)
- __field(u8, opcode)
- __field(u8, group_id)
- ),
-
- TP_fast_assign(
- __entry->sdev = sdev;
- __entry->str_t = str_t;
- __entry->tag = tag;
- __entry->doorbell = doorbell;
- __entry->hwq_id = hwq_id;
- __entry->intr = intr;
- __entry->lba = lba;
- __entry->transfer_len = transfer_len;
- __entry->opcode = opcode;
- __entry->group_id = group_id;
- ),
-
- TP_printk(
- "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x (%s), group_id: 0x%x, hwq_id: %d",
- show_ufs_cmd_trace_str(__entry->str_t),
- dev_name(&__entry->sdev->sdev_dev), __entry->tag,
- __entry->doorbell, __entry->transfer_len, __entry->intr,
- __entry->lba, (u32)__entry->opcode, str_opcode(__entry->opcode),
- (u32)__entry->group_id, __entry->hwq_id
- )
-);
-
-TRACE_EVENT(ufshcd_uic_command,
- TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, u32 cmd,
- u32 arg1, u32 arg2, u32 arg3),
-
- TP_ARGS(dev_name, str_t, cmd, arg1, arg2, arg3),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __field(enum ufs_trace_str_t, str_t)
- __field(u32, cmd)
- __field(u32, arg1)
- __field(u32, arg2)
- __field(u32, arg3)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name);
- __entry->str_t = str_t;
- __entry->cmd = cmd;
- __entry->arg1 = arg1;
- __entry->arg2 = arg2;
- __entry->arg3 = arg3;
- ),
-
- TP_printk(
- "%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x",
- show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
- __entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3
- )
-);
-
-TRACE_EVENT(ufshcd_upiu,
- TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, void *hdr,
- void *tsf, enum ufs_trace_tsf_t tsf_t),
-
- TP_ARGS(dev_name, str_t, hdr, tsf, tsf_t),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __field(enum ufs_trace_str_t, str_t)
- __array(unsigned char, hdr, 12)
- __array(unsigned char, tsf, 16)
- __field(enum ufs_trace_tsf_t, tsf_t)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name);
- __entry->str_t = str_t;
- memcpy(__entry->hdr, hdr, sizeof(__entry->hdr));
- memcpy(__entry->tsf, tsf, sizeof(__entry->tsf));
- __entry->tsf_t = tsf_t;
- ),
-
- TP_printk(
- "%s: %s: HDR:%s, %s:%s",
- show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
- __print_hex(__entry->hdr, sizeof(__entry->hdr)),
- show_ufs_cmd_trace_tsf(__entry->tsf_t),
- __print_hex(__entry->tsf, sizeof(__entry->tsf))
- )
-);
-
-TRACE_EVENT(ufshcd_exception_event,
-
- TP_PROTO(const char *dev_name, u16 status),
-
- TP_ARGS(dev_name, status),
-
- TP_STRUCT__entry(
- __string(dev_name, dev_name)
- __field(u16, status)
- ),
-
- TP_fast_assign(
- __assign_str(dev_name);
- __entry->status = status;
- ),
-
- TP_printk("%s: status 0x%x",
- __get_str(dev_name), __entry->status
- )
-);
-
-#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 1a488c30afa5..490958fa10de 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -346,6 +346,51 @@ TRACE_EVENT(mm_vmscan_write_folio,
show_reclaim_flags(__entry->reclaim_flags))
);
+TRACE_EVENT(mm_vmscan_reclaim_pages,
+
+ TP_PROTO(int nid,
+ unsigned long nr_scanned, unsigned long nr_reclaimed,
+ struct reclaim_stat *stat),
+
+ TP_ARGS(nid, nr_scanned, nr_reclaimed, stat),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(unsigned long, nr_scanned)
+ __field(unsigned long, nr_reclaimed)
+ __field(unsigned long, nr_dirty)
+ __field(unsigned long, nr_writeback)
+ __field(unsigned long, nr_congested)
+ __field(unsigned long, nr_immediate)
+ __field(unsigned int, nr_activate0)
+ __field(unsigned int, nr_activate1)
+ __field(unsigned long, nr_ref_keep)
+ __field(unsigned long, nr_unmap_fail)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->nr_scanned = nr_scanned;
+ __entry->nr_reclaimed = nr_reclaimed;
+ __entry->nr_dirty = stat->nr_dirty;
+ __entry->nr_writeback = stat->nr_writeback;
+ __entry->nr_congested = stat->nr_congested;
+ __entry->nr_immediate = stat->nr_immediate;
+ __entry->nr_activate0 = stat->nr_activate[0];
+ __entry->nr_activate1 = stat->nr_activate[1];
+ __entry->nr_ref_keep = stat->nr_ref_keep;
+ __entry->nr_unmap_fail = stat->nr_unmap_fail;
+ ),
+
+ TP_printk("nid=%d nr_scanned=%ld nr_reclaimed=%ld nr_dirty=%ld nr_writeback=%ld nr_congested=%ld nr_immediate=%ld nr_activate_anon=%d nr_activate_file=%d nr_ref_keep=%ld nr_unmap_fail=%ld",
+ __entry->nid,
+ __entry->nr_scanned, __entry->nr_reclaimed,
+ __entry->nr_dirty, __entry->nr_writeback,
+ __entry->nr_congested, __entry->nr_immediate,
+ __entry->nr_activate0, __entry->nr_activate1,
+ __entry->nr_ref_keep, __entry->nr_unmap_fail)
+);
+
TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
TP_PROTO(int nid,
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 54e353c9f919..311a341e6fe4 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -20,7 +20,15 @@
{I_CLEAR, "I_CLEAR"}, \
{I_SYNC, "I_SYNC"}, \
{I_DIRTY_TIME, "I_DIRTY_TIME"}, \
- {I_REFERENCED, "I_REFERENCED"} \
+ {I_REFERENCED, "I_REFERENCED"}, \
+ {I_LINKABLE, "I_LINKABLE"}, \
+ {I_WB_SWITCH, "I_WB_SWITCH"}, \
+ {I_OVL_INUSE, "I_OVL_INUSE"}, \
+ {I_CREATING, "I_CREATING"}, \
+ {I_DONTCACHE, "I_DONTCACHE"}, \
+ {I_SYNC_QUEUED, "I_SYNC_QUEUED"}, \
+ {I_PINNING_NETFS_WB, "I_PINNING_NETFS_WB"}, \
+ {I_LRU_ISOLATING, "I_LRU_ISOLATING"} \
)
/* enums need to be exported to user space */
@@ -112,7 +120,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
/* may be called for files on pseudo FSes w/ unregistered bdi */
strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
__entry->ino = inode->i_ino;
- __entry->state = inode->i_state;
+ __entry->state = inode_state_read_once(inode);
__entry->flags = flags;
),
@@ -205,6 +213,35 @@ TRACE_EVENT(inode_foreign_history,
)
);
+TRACE_EVENT(inode_switch_wbs_queue,
+
+ TP_PROTO(struct bdi_writeback *old_wb, struct bdi_writeback *new_wb,
+ unsigned int count),
+
+ TP_ARGS(old_wb, new_wb, count),
+
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(ino_t, old_cgroup_ino)
+ __field(ino_t, new_cgroup_ino)
+ __field(unsigned int, count)
+ ),
+
+ TP_fast_assign(
+ strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32);
+ __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb);
+ __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
+ __entry->count = count;
+ ),
+
+ TP_printk("bdi %s: old_cgroup_ino=%lu new_cgroup_ino=%lu count=%u",
+ __entry->name,
+ (unsigned long)__entry->old_cgroup_ino,
+ (unsigned long)__entry->new_cgroup_ino,
+ __entry->count
+ )
+);
+
TRACE_EVENT(inode_switch_wbs,
TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb,
@@ -451,7 +488,6 @@ DECLARE_EVENT_CLASS(wbc_class,
__field(int, sync_mode)
__field(int, for_kupdate)
__field(int, for_background)
- __field(int, for_reclaim)
__field(int, range_cyclic)
__field(long, range_start)
__field(long, range_end)
@@ -465,23 +501,20 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->sync_mode = wbc->sync_mode;
__entry->for_kupdate = wbc->for_kupdate;
__entry->for_background = wbc->for_background;
- __entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic;
__entry->range_start = (long)wbc->range_start;
__entry->range_end = (long)wbc->range_end;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
),
- TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
- "bgrd=%d reclm=%d cyclic=%d "
- "start=0x%lx end=0x%lx cgroup_ino=%lu",
+ TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d bgrd=%d "
+ "cyclic=%d start=0x%lx end=0x%lx cgroup_ino=%lu",
__entry->name,
__entry->nr_to_write,
__entry->pages_skipped,
__entry->sync_mode,
__entry->for_kupdate,
__entry->for_background,
- __entry->for_reclaim,
__entry->range_cyclic,
__entry->range_start,
__entry->range_end,
@@ -621,11 +654,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
TRACE_EVENT(balance_dirty_pages,
TP_PROTO(struct bdi_writeback *wb,
- unsigned long thresh,
- unsigned long bg_thresh,
- unsigned long dirty,
- unsigned long bdi_thresh,
- unsigned long bdi_dirty,
+ struct dirty_throttle_control *dtc,
unsigned long dirty_ratelimit,
unsigned long task_ratelimit,
unsigned long dirtied,
@@ -633,7 +662,7 @@ TRACE_EVENT(balance_dirty_pages,
long pause,
unsigned long start_time),
- TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
+ TP_ARGS(wb, dtc,
dirty_ratelimit, task_ratelimit,
dirtied, period, pause, start_time),
@@ -642,8 +671,8 @@ TRACE_EVENT(balance_dirty_pages,
__field(unsigned long, limit)
__field(unsigned long, setpoint)
__field(unsigned long, dirty)
- __field(unsigned long, bdi_setpoint)
- __field(unsigned long, bdi_dirty)
+ __field(unsigned long, wb_setpoint)
+ __field(unsigned long, wb_dirty)
__field(unsigned long, dirty_ratelimit)
__field(unsigned long, task_ratelimit)
__field(unsigned int, dirtied)
@@ -656,16 +685,15 @@ TRACE_EVENT(balance_dirty_pages,
),
TP_fast_assign(
- unsigned long freerun = (thresh + bg_thresh) / 2;
+ unsigned long freerun = (dtc->thresh + dtc->bg_thresh) / 2;
strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
- __entry->limit = global_wb_domain.dirty_limit;
- __entry->setpoint = (global_wb_domain.dirty_limit +
- freerun) / 2;
- __entry->dirty = dirty;
- __entry->bdi_setpoint = __entry->setpoint *
- bdi_thresh / (thresh + 1);
- __entry->bdi_dirty = bdi_dirty;
+ __entry->limit = dtc->limit;
+ __entry->setpoint = (dtc->limit + freerun) / 2;
+ __entry->dirty = dtc->dirty;
+ __entry->wb_setpoint = __entry->setpoint *
+ dtc->wb_thresh / (dtc->thresh + 1);
+ __entry->wb_dirty = dtc->wb_dirty;
__entry->dirty_ratelimit = KBps(dirty_ratelimit);
__entry->task_ratelimit = KBps(task_ratelimit);
__entry->dirtied = dirtied;
@@ -681,7 +709,7 @@ TRACE_EVENT(balance_dirty_pages,
TP_printk("bdi %s: "
"limit=%lu setpoint=%lu dirty=%lu "
- "bdi_setpoint=%lu bdi_dirty=%lu "
+ "wb_setpoint=%lu wb_dirty=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
"dirtied=%u dirtied_pause=%u "
"paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
@@ -689,8 +717,8 @@ TRACE_EVENT(balance_dirty_pages,
__entry->limit,
__entry->setpoint,
__entry->dirty,
- __entry->bdi_setpoint,
- __entry->bdi_dirty,
+ __entry->wb_setpoint,
+ __entry->wb_dirty,
__entry->dirty_ratelimit,
__entry->task_ratelimit,
__entry->dirtied,
@@ -720,7 +748,7 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
strscpy_pad(__entry->name,
bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
- __entry->state = inode->i_state;
+ __entry->state = inode_state_read_once(inode);
__entry->dirtied_when = inode->dirtied_when;
__entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode));
),
@@ -759,7 +787,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
strscpy_pad(__entry->name,
bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
- __entry->state = inode->i_state;
+ __entry->state = inode_state_read_once(inode);
__entry->dirtied_when = inode->dirtied_when;
__entry->writeback_index = inode->i_mapping->writeback_index;
__entry->nr_to_write = nr_to_write;
@@ -811,7 +839,7 @@ DECLARE_EVENT_CLASS(writeback_inode_template,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->state = inode->i_state;
+ __entry->state = inode_state_read_once(inode);
__entry->mode = inode->i_mode;
__entry->dirtied_when = inode->dirtied_when;
),
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index a7e5452b5d21..18c0ac514fcb 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -168,25 +168,7 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
#define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \
trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index)
-/* not used anymore, but kept around so as not to break old programs */
-DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
- TP_PROTO(const struct net_device *dev,
- const struct bpf_prog *xdp,
- const void *tgt, int err,
- enum bpf_map_type map_type,
- u32 map_id, u32 index),
- TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
-);
-
-DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
- TP_PROTO(const struct net_device *dev,
- const struct bpf_prog *xdp,
- const void *tgt, int err,
- enum bpf_map_type map_type,
- u32 map_id, u32 index),
- TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
-);
-
+#ifdef CONFIG_BPF_SYSCALL
TRACE_EVENT(xdp_cpumap_kthread,
TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
@@ -300,6 +282,7 @@ TRACE_EVENT(xdp_devmap_xmit,
__entry->sent, __entry->drops,
__entry->err)
);
+#endif /* CONFIG_BPF_SYSCALL */
/* Expect users already include <net/xdp.h>, but not xdp_priv.h */
#include <net/xdp_priv.h>
@@ -379,32 +362,6 @@ TRACE_EVENT(mem_connect,
)
);
-TRACE_EVENT(mem_return_failed,
-
- TP_PROTO(const struct xdp_mem_info *mem,
- const struct page *page),
-
- TP_ARGS(mem, page),
-
- TP_STRUCT__entry(
- __field(const struct page *, page)
- __field(u32, mem_id)
- __field(u32, mem_type)
- ),
-
- TP_fast_assign(
- __entry->page = page;
- __entry->mem_id = mem->id;
- __entry->mem_type = mem->type;
- ),
-
- TP_printk("mem_id=%d mem_type=%s page=%p",
- __entry->mem_id,
- __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
- __entry->page
- )
-);
-
TRACE_EVENT(bpf_xdp_link_attach_failed,
TP_PROTO(const char *msg),
diff --git a/include/trace/misc/fs.h b/include/trace/misc/fs.h
index 738b97f22f36..7ead1c61f0cb 100644
--- a/include/trace/misc/fs.h
+++ b/include/trace/misc/fs.h
@@ -120,3 +120,46 @@
{ LOOKUP_BENEATH, "BENEATH" }, \
{ LOOKUP_IN_ROOT, "IN_ROOT" }, \
{ LOOKUP_CACHED, "CACHED" })
+
+#define show_ia_valid_flags(flags) \
+ __print_flags(flags, "|", \
+ { ATTR_MODE, "MODE" }, \
+ { ATTR_UID, "UID" }, \
+ { ATTR_GID, "GID" }, \
+ { ATTR_SIZE, "SIZE" }, \
+ { ATTR_ATIME, "ATIME" }, \
+ { ATTR_MTIME, "MTIME" }, \
+ { ATTR_CTIME, "CTIME" }, \
+ { ATTR_ATIME_SET, "ATIME_SET" }, \
+ { ATTR_MTIME_SET, "MTIME_SET" }, \
+ { ATTR_FORCE, "FORCE" }, \
+ { ATTR_KILL_SUID, "KILL_SUID" }, \
+ { ATTR_KILL_SGID, "KILL_SGID" }, \
+ { ATTR_FILE, "FILE" }, \
+ { ATTR_KILL_PRIV, "KILL_PRIV" }, \
+ { ATTR_OPEN, "OPEN" }, \
+ { ATTR_TIMES_SET, "TIMES_SET" }, \
+ { ATTR_TOUCH, "TOUCH"}, \
+ { ATTR_DELEG, "DELEG"})
+
+#define show_statx_mask(flags) \
+ __print_flags(flags, "|", \
+ { STATX_TYPE, "TYPE" }, \
+ { STATX_MODE, "MODE" }, \
+ { STATX_NLINK, "NLINK" }, \
+ { STATX_UID, "UID" }, \
+ { STATX_GID, "GID" }, \
+ { STATX_ATIME, "ATIME" }, \
+ { STATX_MTIME, "MTIME" }, \
+ { STATX_CTIME, "CTIME" }, \
+ { STATX_INO, "INO" }, \
+ { STATX_SIZE, "SIZE" }, \
+ { STATX_BLOCKS, "BLOCKS" }, \
+ { STATX_BASIC_STATS, "BASIC_STATS" }, \
+ { STATX_BTIME, "BTIME" }, \
+ { STATX_MNT_ID, "MNT_ID" }, \
+ { STATX_DIOALIGN, "DIOALIGN" }, \
+ { STATX_MNT_ID_UNIQUE, "MNT_ID_UNIQUE" }, \
+ { STATX_SUBVOL, "SUBVOL" }, \
+ { STATX_WRITE_ATOMIC, "WRITE_ATOMIC" }, \
+ { STATX_DIO_READ_ALIGN, "DIO_READ_ALIGN" })
diff --git a/include/trace/misc/nfs.h b/include/trace/misc/nfs.h
index 7b221d51133a..c82233e950ac 100644
--- a/include/trace/misc/nfs.h
+++ b/include/trace/misc/nfs.h
@@ -51,6 +51,7 @@ TRACE_DEFINE_ENUM(NFSERR_JUKEBOX);
{ NFSERR_IO, "IO" }, \
{ NFSERR_NXIO, "NXIO" }, \
{ ECHILD, "CHILD" }, \
+ { ETIMEDOUT, "TIMEDOUT" }, \
{ NFSERR_EAGAIN, "AGAIN" }, \
{ NFSERR_ACCES, "ACCES" }, \
{ NFSERR_EXIST, "EXIST" }, \
diff --git a/include/trace/perf.h b/include/trace/perf.h
index 2c11181c82e0..a1754b73a8f5 100644
--- a/include/trace/perf.h
+++ b/include/trace/perf.h
@@ -12,10 +12,10 @@
#undef __perf_task
#define __perf_task(t) (__task = (t))
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+#undef __DECLARE_EVENT_CLASS
+#define __DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace void \
-perf_trace_##call(void *__data, proto) \
+do_perf_trace_##call(void *__data, proto) \
{ \
struct trace_event_call *event_call = __data; \
struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
@@ -56,6 +56,41 @@ perf_trace_##call(void *__data, proto) \
}
/*
+ * Define unused __count and __task variables to use @args to pass
+ * arguments to do_perf_trace_##call. This is needed because the
+ * macros __perf_count and __perf_task introduce the side-effect to
+ * store copies into those local variables.
+ */
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
+ PARAMS(assign), PARAMS(print)) \
+static notrace void \
+perf_trace_##call(void *__data, proto) \
+{ \
+ u64 __count __attribute__((unused)); \
+ struct task_struct *__task __attribute__((unused)); \
+ \
+ do_perf_trace_##call(__data, args); \
+}
+
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \
+__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
+ PARAMS(assign), PARAMS(print)) \
+static notrace void \
+perf_trace_##call(void *__data, proto) \
+{ \
+ u64 __count __attribute__((unused)); \
+ struct task_struct *__task __attribute__((unused)); \
+ \
+ might_fault(); \
+ preempt_disable_notrace(); \
+ do_perf_trace_##call(__data, args); \
+ preempt_enable_notrace(); \
+}
+
+/*
* This part is compiled out, it is only here as a build time check
* to make sure that if the tracepoint handling changes, the
* perf probe will fail to compile unless it too is updated.
@@ -73,4 +108,7 @@ static inline void perf_test_probe_##call(void) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef __DECLARE_EVENT_CLASS
+
#endif /* CONFIG_PERF_EVENTS */
diff --git a/include/trace/stages/stage3_trace_output.h b/include/trace/stages/stage3_trace_output.h
index c1fb1355d309..1e7b0bef95f5 100644
--- a/include/trace/stages/stage3_trace_output.h
+++ b/include/trace/stages/stage3_trace_output.h
@@ -119,6 +119,14 @@
trace_print_array_seq(p, array, count, el_size); \
})
+#undef __print_dynamic_array
+#define __print_dynamic_array(array, el_size) \
+ ({ \
+ __print_array(__get_dynamic_array(array), \
+ __get_dynamic_array_len(array) / (el_size), \
+ (el_size)); \
+ })
+
#undef __print_hex_dump
#define __print_hex_dump(prefix_str, prefix_type, \
rowsize, groupsize, buf, len, ascii) \
diff --git a/include/trace/stages/stage7_class_define.h b/include/trace/stages/stage7_class_define.h
index bcb960d16fc0..fcd564a590f4 100644
--- a/include/trace/stages/stage7_class_define.h
+++ b/include/trace/stages/stage7_class_define.h
@@ -22,6 +22,7 @@
#undef __get_rel_cpumask
#undef __get_rel_sockaddr
#undef __print_array
+#undef __print_dynamic_array
#undef __print_hex_dump
#undef __get_buf
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 8e193f3a33b3..0dd7f2b33431 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -16,6 +16,9 @@
* @name: name of the syscall
* @syscall_nr: number of the syscall
* @nb_args: number of parameters it takes
+ * @user_arg_is_str: set if the arg for @user_arg_size is a string
+ * @user_arg_size: holds @arg that has size of the user space to read
+ * @user_mask: mask of @args that will read user space
* @types: list of types as strings
* @args: list of args as strings (args[i] matches types[i])
* @enter_fields: list of fields for syscall_enter trace event
@@ -25,7 +28,10 @@
struct syscall_metadata {
const char *name;
int syscall_nr;
- int nb_args;
+ u8 nb_args:7;
+ u8 user_arg_is_str:1;
+ s8 user_arg_size;
+ short user_mask;
const char **types;
const char **args;
struct list_head enter_fields;
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index c2f9cabf154d..4f22136fd465 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -45,6 +45,16 @@
PARAMS(print)); \
DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
+#undef TRACE_EVENT_SYSCALL
+#define TRACE_EVENT_SYSCALL(name, proto, args, tstruct, assign, print, reg, unreg) \
+ DECLARE_EVENT_SYSCALL_CLASS(name, \
+ PARAMS(proto), \
+ PARAMS(args), \
+ PARAMS(tstruct), \
+ PARAMS(assign), \
+ PARAMS(print)); \
+ DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
+
#include "stages/stage1_struct_define.h"
#undef DECLARE_EVENT_CLASS
@@ -57,6 +67,9 @@
\
static struct trace_event_class event_class_##name;
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS
+
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
static struct trace_event_call __used \
@@ -117,6 +130,9 @@
tstruct; \
};
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS
+
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)
@@ -208,6 +224,9 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \
.trace = trace_raw_output_##call, \
};
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS
+
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
static notrace enum print_line_t \
@@ -244,6 +263,9 @@ static struct trace_event_fields trace_event_fields_##call[] = { \
tstruct \
{} };
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS
+
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
@@ -265,6 +287,9 @@ static inline notrace int trace_event_get_offsets_##call( \
return __data_size; \
}
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS
+
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
@@ -374,11 +399,11 @@ static inline notrace int trace_event_get_offsets_##call( \
#include "stages/stage6_event_callback.h"
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
- \
+
+#undef __DECLARE_EVENT_CLASS
+#define __DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace void \
-trace_event_raw_event_##call(void *__data, proto) \
+do_trace_event_raw_event_##call(void *__data, proto) \
{ \
struct trace_event_file *trace_file = __data; \
struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
@@ -403,6 +428,30 @@ trace_event_raw_event_##call(void *__data, proto) \
\
trace_event_buffer_commit(&fbuffer); \
}
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
+ PARAMS(assign), PARAMS(print)) \
+static notrace void \
+trace_event_raw_event_##call(void *__data, proto) \
+{ \
+ do_trace_event_raw_event_##call(__data, args); \
+}
+
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \
+__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
+ PARAMS(assign), PARAMS(print)) \
+static notrace void \
+trace_event_raw_event_##call(void *__data, proto) \
+{ \
+ might_fault(); \
+ preempt_disable_notrace(); \
+ do_trace_event_raw_event_##call(__data, args); \
+ preempt_enable_notrace(); \
+}
+
/*
* The ftrace_test_probe is compiled out, it is only here as a build time check
* to make sure that if the tracepoint handling changes, the ftrace probe will
@@ -418,6 +467,8 @@ static inline void ftrace_test_probe_##call(void) \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+#undef __DECLARE_EVENT_CLASS
+
#include "stages/stage7_class_define.h"
#undef DECLARE_EVENT_CLASS
@@ -434,6 +485,9 @@ static struct trace_event_class __used __refdata event_class_##call = { \
_TRACE_PERF_INIT(call) \
};
+#undef DECLARE_EVENT_SYSCALL_CLASS
+#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS
+
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
\
diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h
index 80f37a0d40d7..613475285643 100644
--- a/include/uapi/asm-generic/fcntl.h
+++ b/include/uapi/asm-generic/fcntl.h
@@ -6,7 +6,6 @@
/*
* FMODE_EXEC is 0x20
- * FMODE_NONOTIFY is 0x4000000
* These cannot be used by userspace O_* until internal and external open
* flags are split.
* -Eric Paris
diff --git a/include/uapi/asm-generic/ioctl.h b/include/uapi/asm-generic/ioctl.h
index a84f4db8a250..e3290a5824c9 100644
--- a/include/uapi/asm-generic/ioctl.h
+++ b/include/uapi/asm-generic/ioctl.h
@@ -82,13 +82,13 @@
* NOTE: _IOW means userland is writing and kernel is reading. _IOR
* means userland is reading and kernel is writing.
*/
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,argtype) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(argtype)))
+#define _IOW(type,nr,argtype) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(argtype)))
+#define _IOWR(type,nr,argtype) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(argtype)))
+#define _IOR_BAD(type,nr,argtype) _IOC(_IOC_READ,(type),(nr),sizeof(argtype))
+#define _IOW_BAD(type,nr,argtype) _IOC(_IOC_WRITE,(type),(nr),sizeof(argtype))
+#define _IOWR_BAD(type,nr,argtype) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(argtype))
/* used to decode ioctl numbers.. */
#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index 6ce1f1ceb432..ef1c27fa3c57 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -79,9 +79,13 @@
#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */
+#define MADV_GUARD_INSTALL 102 /* fatal signal on access to range */
+#define MADV_GUARD_REMOVE 103 /* unguard range */
+
/* compatibility flags */
#define MAP_FILE 0
+#define PKEY_UNRESTRICTED 0x0
#define PKEY_DISABLE_ACCESS 0x1
#define PKEY_DISABLE_WRITE 0x2
#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
diff --git a/include/uapi/asm-generic/mman.h b/include/uapi/asm-generic/mman.h
index 57e8195d0b53..5e3d61ddbd8c 100644
--- a/include/uapi/asm-generic/mman.h
+++ b/include/uapi/asm-generic/mman.h
@@ -19,4 +19,8 @@
#define MCL_FUTURE 2 /* lock all future mappings */
#define MCL_ONFAULT 4 /* lock all pages that are faulted in */
+#define SHADOW_STACK_SET_TOKEN (1ULL << 0) /* Set up a restore token in the shadow stack */
+#define SHADOW_STACK_SET_MARKER (1ULL << 1) /* Set up a top of stack marker in the shadow stack */
+
+
#endif /* __ASM_GENERIC_MMAN_H */
diff --git a/include/uapi/asm-generic/param.h b/include/uapi/asm-generic/param.h
index baad02ea7f93..3ed505dfea13 100644
--- a/include/uapi/asm-generic/param.h
+++ b/include/uapi/asm-generic/param.h
@@ -2,8 +2,12 @@
#ifndef _UAPI__ASM_GENERIC_PARAM_H
#define _UAPI__ASM_GENERIC_PARAM_H
+#ifndef __USER_HZ
+#define __USER_HZ 100
+#endif
+
#ifndef HZ
-#define HZ 100
+#define HZ __USER_HZ
#endif
#ifndef EXEC_PAGESIZE
diff --git a/include/uapi/asm-generic/posix_types.h b/include/uapi/asm-generic/posix_types.h
index b5f7594eee7a..0a90ad92dbf3 100644
--- a/include/uapi/asm-generic/posix_types.h
+++ b/include/uapi/asm-generic/posix_types.h
@@ -86,6 +86,7 @@ typedef struct {
*/
typedef __kernel_long_t __kernel_off_t;
typedef long long __kernel_loff_t;
+typedef unsigned long long __kernel_uoff_t;
typedef __kernel_long_t __kernel_old_time_t;
#ifndef __KERNEL__
typedef __kernel_long_t __kernel_time_t;
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index b7bc545ec3b2..5a1ca43b5fc6 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -46,7 +46,7 @@ union __sifields {
__kernel_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
+ int _sys_private; /* Not used by the kernel. Historic leftover. Always 0. */
} _timer;
/* POSIX.1b signals */
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 8ce8a39a1e5f..53b5a8c002b1 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -135,6 +135,21 @@
#define SO_PASSPIDFD 76
#define SO_PEERPIDFD 77
+#define SO_DEVMEM_LINEAR 78
+#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
+#define SO_DEVMEM_DMABUF 79
+#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
+#define SO_DEVMEM_DONTNEED 80
+
+#define SCM_TS_OPT_ID 81
+
+#define SO_RCVPRIORITY 82
+
+#define SO_PASSRIGHTS 83
+
+#define SO_INQ 84
+#define SCM_INQ SO_INQ
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index d983c48a3b6a..942370b3f5d2 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -737,7 +737,7 @@ __SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64)
#define __NR_ppoll_time64 414
__SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64)
#define __NR_io_pgetevents_time64 416
-__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
+__SC_COMP(__NR_io_pgetevents_time64, sys_io_pgetevents, compat_sys_io_pgetevents_time64)
#define __NR_recvmmsg_time64 417
__SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64)
#define __NR_mq_timedsend_time64 418
@@ -776,12 +776,8 @@ __SYSCALL(__NR_fsmount, sys_fsmount)
__SYSCALL(__NR_fspick, sys_fspick)
#define __NR_pidfd_open 434
__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
-
-#ifdef __ARCH_WANT_SYS_CLONE3
#define __NR_clone3 435
__SYSCALL(__NR_clone3, sys_clone3)
-#endif
-
#define __NR_close_range 436
__SYSCALL(__NR_close_range, sys_close_range)
#define __NR_openat2 437
@@ -845,8 +841,27 @@ __SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
#define __NR_mseal 462
__SYSCALL(__NR_mseal, sys_mseal)
+#define __NR_setxattrat 463
+__SYSCALL(__NR_setxattrat, sys_setxattrat)
+#define __NR_getxattrat 464
+__SYSCALL(__NR_getxattrat, sys_getxattrat)
+#define __NR_listxattrat 465
+__SYSCALL(__NR_listxattrat, sys_listxattrat)
+#define __NR_removexattrat 466
+__SYSCALL(__NR_removexattrat, sys_removexattrat)
+#define __NR_open_tree_attr 467
+__SYSCALL(__NR_open_tree_attr, sys_open_tree_attr)
+
+/* fs/inode.c */
+#define __NR_file_getattr 468
+__SYSCALL(__NR_file_getattr, sys_file_getattr)
+#define __NR_file_setattr 469
+__SYSCALL(__NR_file_setattr, sys_file_setattr)
+#define __NR_listns 470
+__SYSCALL(__NR_listns, sys_listns)
+
#undef __NR_syscalls
-#define __NR_syscalls 463
+#define __NR_syscalls 471
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/cxl/features.h b/include/uapi/cxl/features.h
new file mode 100644
index 000000000000..490606d7694b
--- /dev/null
+++ b/include/uapi/cxl/features.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2024,2025, Intel Corporation
+ *
+ * These are definitions for the mailbox command interface of CXL subsystem.
+ */
+#ifndef _UAPI_CXL_FEATURES_H_
+#define _UAPI_CXL_FEATURES_H_
+
+#include <linux/types.h>
+
+typedef unsigned char __uapi_uuid_t[16];
+
+#ifdef __KERNEL__
+#include <linux/uuid.h>
+/*
+ * Note, __uapi_uuid_t is 1-byte aligned on modern compilers and 4-byte
+ * aligned on others. Ensure that __uapi_uuid_t in a struct is placed at
+ * a 4-byte aligned offset, or the structure is packed, to ensure
+ * consistent padding.
+ */
+static_assert(sizeof(__uapi_uuid_t) == sizeof(uuid_t));
+#define __uapi_uuid_t uuid_t
+#endif
+
+/*
+ * struct cxl_mbox_get_sup_feats_in - Get Supported Features input
+ *
+ * @count: bytes of Feature data to return in output
+ * @start_idx: index of first requested Supported Feature Entry, 0 based.
+ * @reserved: reserved field, must be 0s.
+ *
+ * Get Supported Features (0x500h) CXL r3.2 8.2.9.6.1 command.
+ * Input block for Get support Feature
+ */
+struct cxl_mbox_get_sup_feats_in {
+ __le32 count;
+ __le16 start_idx;
+ __u8 reserved[2];
+} __attribute__ ((__packed__));
+
+/* CXL spec r3.2 Table 8-87 command effects */
+#define CXL_CMD_CONFIG_CHANGE_COLD_RESET BIT(0)
+#define CXL_CMD_CONFIG_CHANGE_IMMEDIATE BIT(1)
+#define CXL_CMD_DATA_CHANGE_IMMEDIATE BIT(2)
+#define CXL_CMD_POLICY_CHANGE_IMMEDIATE BIT(3)
+#define CXL_CMD_LOG_CHANGE_IMMEDIATE BIT(4)
+#define CXL_CMD_SECURITY_STATE_CHANGE BIT(5)
+#define CXL_CMD_BACKGROUND BIT(6)
+#define CXL_CMD_BGCMD_ABORT_SUPPORTED BIT(7)
+#define CXL_CMD_EFFECTS_VALID BIT(9)
+#define CXL_CMD_CONFIG_CHANGE_CONV_RESET BIT(10)
+#define CXL_CMD_CONFIG_CHANGE_CXL_RESET BIT(11)
+#define CXL_CMD_EFFECTS_RESERVED GENMASK(15, 12)
+
+/*
+ * struct cxl_feat_entry - Supported Feature Entry
+ * @uuid: UUID of the Feature
+ * @id: id to identify the feature. 0 based
+ * @get_feat_size: max bytes required for Get Feature command for this Feature
+ * @set_feat_size: max bytes required for Set Feature command for this Feature
+ * @flags: attribute flags
+ * @get_feat_ver: Get Feature version
+ * @set_feat_ver: Set Feature version
+ * @effects: Set Feature command effects
+ * @reserved: reserved, must be 0
+ *
+ * CXL spec r3.2 Table 8-109
+ * Get Supported Features Supported Feature Entry
+ */
+struct cxl_feat_entry {
+ __uapi_uuid_t uuid;
+ __le16 id;
+ __le16 get_feat_size;
+ __le16 set_feat_size;
+ __le32 flags;
+ __u8 get_feat_ver;
+ __u8 set_feat_ver;
+ __le16 effects;
+ __u8 reserved[18];
+} __attribute__ ((__packed__));
+
+/* @flags field for 'struct cxl_feat_entry' */
+#define CXL_FEATURE_F_CHANGEABLE BIT(0)
+#define CXL_FEATURE_F_PERSIST_FW_UPDATE BIT(4)
+#define CXL_FEATURE_F_DEFAULT_SEL BIT(5)
+#define CXL_FEATURE_F_SAVED_SEL BIT(6)
+
+/*
+ * struct cxl_mbox_get_sup_feats_out - Get Supported Features output
+ * @num_entries: number of Supported Feature Entries returned
+ * @supported_feats: number of supported Features
+ * @reserved: reserved, must be 0s.
+ * @ents: Supported Feature Entries array
+ *
+ * CXL spec r3.2 Table 8-108
+ * Get supported Features Output Payload
+ */
+struct cxl_mbox_get_sup_feats_out {
+ __struct_group(cxl_mbox_get_sup_feats_out_hdr, hdr, /* no attrs */,
+ __le16 num_entries;
+ __le16 supported_feats;
+ __u8 reserved[4];
+ );
+ struct cxl_feat_entry ents[] __counted_by_le(num_entries);
+} __attribute__ ((__packed__));
+
+/*
+ * Get Feature CXL spec r3.2 Spec 8.2.9.6.2
+ */
+
+/*
+ * struct cxl_mbox_get_feat_in - Get Feature input
+ * @uuid: UUID for Feature
+ * @offset: offset of the first byte in Feature data for output payload
+ * @count: count in bytes of Feature data returned
+ * @selection: 0 current value, 1 default value, 2 saved value
+ *
+ * CXL spec r3.2 section 8.2.9.6.2 Table 8-99
+ */
+struct cxl_mbox_get_feat_in {
+ __uapi_uuid_t uuid;
+ __le16 offset;
+ __le16 count;
+ __u8 selection;
+} __attribute__ ((__packed__));
+
+/*
+ * enum cxl_get_feat_selection - selection field of Get Feature input
+ */
+enum cxl_get_feat_selection {
+ CXL_GET_FEAT_SEL_CURRENT_VALUE,
+ CXL_GET_FEAT_SEL_DEFAULT_VALUE,
+ CXL_GET_FEAT_SEL_SAVED_VALUE,
+ CXL_GET_FEAT_SEL_MAX
+};
+
+/*
+ * Set Feature CXL spec r3.2 8.2.9.6.3
+ */
+
+/*
+ * struct cxl_mbox_set_feat_in - Set Features input
+ * @uuid: UUID for Feature
+ * @flags: set feature flags
+ * @offset: byte offset of Feature data to update
+ * @version: Feature version of the data in Feature Data
+ * @rsvd: reserved, must be 0s.
+ * @feat_data: raw byte stream of Features data to update
+ *
+ * CXL spec r3.2 section 8.2.9.6.3 Table 8-101
+ */
+struct cxl_mbox_set_feat_in {
+ __struct_group(cxl_mbox_set_feat_hdr, hdr, /* no attrs */,
+ __uapi_uuid_t uuid;
+ __le32 flags;
+ __le16 offset;
+ __u8 version;
+ __u8 rsvd[9];
+ );
+ __u8 feat_data[];
+} __packed;
+
+/*
+ * enum cxl_set_feat_flag_data_transfer - Set Feature flags field
+ */
+enum cxl_set_feat_flag_data_transfer {
+ CXL_SET_FEAT_FLAG_FULL_DATA_TRANSFER = 0,
+ CXL_SET_FEAT_FLAG_INITIATE_DATA_TRANSFER,
+ CXL_SET_FEAT_FLAG_CONTINUE_DATA_TRANSFER,
+ CXL_SET_FEAT_FLAG_FINISH_DATA_TRANSFER,
+ CXL_SET_FEAT_FLAG_ABORT_DATA_TRANSFER,
+ CXL_SET_FEAT_FLAG_DATA_TRANSFER_MAX
+};
+
+#define CXL_SET_FEAT_FLAG_DATA_TRANSFER_MASK GENMASK(2, 0)
+#define CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET BIT(3)
+
+#endif
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 96e32dafd4f0..f80aa4c9d88f 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -54,6 +54,10 @@ extern "C" {
#define DRM_AMDGPU_VM 0x13
#define DRM_AMDGPU_FENCE_TO_HANDLE 0x14
#define DRM_AMDGPU_SCHED 0x15
+#define DRM_AMDGPU_USERQ 0x16
+#define DRM_AMDGPU_USERQ_SIGNAL 0x17
+#define DRM_AMDGPU_USERQ_WAIT 0x18
+#define DRM_AMDGPU_GEM_LIST_HANDLES 0x19
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -71,6 +75,10 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm)
#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
+#define DRM_IOCTL_AMDGPU_USERQ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq)
+#define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal)
+#define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait)
+#define DRM_IOCTL_AMDGPU_GEM_LIST_HANDLES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_LIST_HANDLES, struct drm_amdgpu_gem_list_handles)
/**
* DOC: memory domains
@@ -97,6 +105,8 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
* signalling user mode queues.
+ *
+ * %AMDGPU_GEM_DOMAIN_MMIO_REMAP MMIO remap page (special mapping for HDP flushing).
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -105,13 +115,15 @@ extern "C" {
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
+#define AMDGPU_GEM_DOMAIN_MMIO_REMAP 0x80
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
- AMDGPU_GEM_DOMAIN_OA | \
- AMDGPU_GEM_DOMAIN_DOORBELL)
+ AMDGPU_GEM_DOMAIN_OA | \
+ AMDGPU_GEM_DOMAIN_DOORBELL | \
+ AMDGPU_GEM_DOMAIN_MMIO_REMAP)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@@ -171,6 +183,8 @@ extern "C" {
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
*/
#define AMDGPU_GEM_CREATE_EXT_COHERENT (1 << 15)
+/* Set PTE.D and recompress during GTT->VRAM moves according to TILING flags. */
+#define AMDGPU_GEM_CREATE_GFX12_DCC (1 << 16)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@@ -317,6 +331,260 @@ union drm_amdgpu_ctx {
union drm_amdgpu_ctx_out out;
};
+/* user queue IOCTL operations */
+#define AMDGPU_USERQ_OP_CREATE 1
+#define AMDGPU_USERQ_OP_FREE 2
+
+/* queue priority levels */
+/* low < normal low < normal high < high */
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK 0x3
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT 0
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_LOW 0
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_LOW 1
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_NORMAL_HIGH 2
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH 3 /* admin only */
+/* for queues that need access to protected content */
+#define AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE (1 << 2)
+
+/*
+ * This structure is a container to pass input configuration
+ * info for all supported userqueue related operations.
+ * For operation AMDGPU_USERQ_OP_CREATE: user is expected
+ * to set all fields, excep the parameter 'queue_id'.
+ * For operation AMDGPU_USERQ_OP_FREE: the only input parameter expected
+ * to be set is 'queue_id', eveything else is ignored.
+ */
+struct drm_amdgpu_userq_in {
+ /** AMDGPU_USERQ_OP_* */
+ __u32 op;
+ /** Queue id passed for operation USERQ_OP_FREE */
+ __u32 queue_id;
+ /** the target GPU engine to execute workload (AMDGPU_HW_IP_*) */
+ __u32 ip_type;
+ /**
+ * @doorbell_handle: the handle of doorbell GEM object
+ * associated with this userqueue client.
+ */
+ __u32 doorbell_handle;
+ /**
+ * @doorbell_offset: 32-bit offset of the doorbell in the doorbell bo.
+ * Kernel will generate absolute doorbell offset using doorbell_handle
+ * and doorbell_offset in the doorbell bo.
+ */
+ __u32 doorbell_offset;
+ /**
+ * @flags: flags used for queue parameters
+ */
+ __u32 flags;
+ /**
+ * @queue_va: Virtual address of the GPU memory which holds the queue
+ * object. The queue holds the workload packets.
+ */
+ __u64 queue_va;
+ /**
+ * @queue_size: Size of the queue in bytes, this needs to be 256-byte
+ * aligned.
+ */
+ __u64 queue_size;
+ /**
+ * @rptr_va : Virtual address of the GPU memory which holds the ring RPTR.
+ * This object must be at least 8 byte in size and aligned to 8-byte offset.
+ */
+ __u64 rptr_va;
+ /**
+ * @wptr_va : Virtual address of the GPU memory which holds the ring WPTR.
+ * This object must be at least 8 byte in size and aligned to 8-byte offset.
+ *
+ * Queue, RPTR and WPTR can come from the same object, as long as the size
+ * and alignment related requirements are met.
+ */
+ __u64 wptr_va;
+ /**
+ * @mqd: MQD (memory queue descriptor) is a set of parameters which allow
+ * the GPU to uniquely define and identify a usermode queue.
+ *
+ * MQD data can be of different size for different GPU IP/engine and
+ * their respective versions/revisions, so this points to a __u64 *
+ * which holds IP specific MQD of this usermode queue.
+ */
+ __u64 mqd;
+ /**
+ * @size: size of MQD data in bytes, it must match the MQD structure
+ * size of the respective engine/revision defined in UAPI for ex, for
+ * gfx11 workloads, size = sizeof(drm_amdgpu_userq_mqd_gfx11).
+ */
+ __u64 mqd_size;
+};
+
+/* The structure to carry output of userqueue ops */
+struct drm_amdgpu_userq_out {
+ /**
+ * For operation AMDGPU_USERQ_OP_CREATE: This field contains a unique
+ * queue ID to represent the newly created userqueue in the system, otherwise
+ * it should be ignored.
+ */
+ __u32 queue_id;
+ __u32 _pad;
+};
+
+union drm_amdgpu_userq {
+ struct drm_amdgpu_userq_in in;
+ struct drm_amdgpu_userq_out out;
+};
+
+/* GFX V11 IP specific MQD parameters */
+struct drm_amdgpu_userq_mqd_gfx11 {
+ /**
+ * @shadow_va: Virtual address of the GPU memory to hold the shadow buffer.
+ * Use AMDGPU_INFO_IOCTL to find the exact size of the object.
+ */
+ __u64 shadow_va;
+ /**
+ * @csa_va: Virtual address of the GPU memory to hold the CSA buffer.
+ * Use AMDGPU_INFO_IOCTL to find the exact size of the object.
+ */
+ __u64 csa_va;
+};
+
+/* GFX V11 SDMA IP specific MQD parameters */
+struct drm_amdgpu_userq_mqd_sdma_gfx11 {
+ /**
+ * @csa_va: Virtual address of the GPU memory to hold the CSA buffer.
+ * This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL
+ * to get the size.
+ */
+ __u64 csa_va;
+};
+
+/* GFX V11 Compute IP specific MQD parameters */
+struct drm_amdgpu_userq_mqd_compute_gfx11 {
+ /**
+ * @eop_va: Virtual address of the GPU memory to hold the EOP buffer.
+ * This must be a from a separate GPU object, and use AMDGPU_INFO IOCTL
+ * to get the size.
+ */
+ __u64 eop_va;
+};
+
+/* userq signal/wait ioctl */
+struct drm_amdgpu_userq_signal {
+ /**
+ * @queue_id: Queue handle used by the userq fence creation function
+ * to retrieve the WPTR.
+ */
+ __u32 queue_id;
+ __u32 pad;
+ /**
+ * @syncobj_handles: The list of syncobj handles submitted by the user queue
+ * job to be signaled.
+ */
+ __u64 syncobj_handles;
+ /**
+ * @num_syncobj_handles: A count that represents the number of syncobj handles in
+ * @syncobj_handles.
+ */
+ __u64 num_syncobj_handles;
+ /**
+ * @bo_read_handles: The list of BO handles that the submitted user queue job
+ * is using for read only. This will update BO fences in the kernel.
+ */
+ __u64 bo_read_handles;
+ /**
+ * @bo_write_handles: The list of BO handles that the submitted user queue job
+ * is using for write only. This will update BO fences in the kernel.
+ */
+ __u64 bo_write_handles;
+ /**
+ * @num_bo_read_handles: A count that represents the number of read BO handles in
+ * @bo_read_handles.
+ */
+ __u32 num_bo_read_handles;
+ /**
+ * @num_bo_write_handles: A count that represents the number of write BO handles in
+ * @bo_write_handles.
+ */
+ __u32 num_bo_write_handles;
+};
+
+struct drm_amdgpu_userq_fence_info {
+ /**
+ * @va: A gpu address allocated for each queue which stores the
+ * read pointer (RPTR) value.
+ */
+ __u64 va;
+ /**
+ * @value: A 64 bit value represents the write pointer (WPTR) of the
+ * queue commands which compared with the RPTR value to signal the
+ * fences.
+ */
+ __u64 value;
+};
+
+struct drm_amdgpu_userq_wait {
+ /**
+ * @waitq_id: Queue handle used by the userq wait IOCTL to retrieve the
+ * wait queue and maintain the fence driver references in it.
+ */
+ __u32 waitq_id;
+ __u32 pad;
+ /**
+ * @syncobj_handles: The list of syncobj handles submitted by the user queue
+ * job to get the va/value pairs.
+ */
+ __u64 syncobj_handles;
+ /**
+ * @syncobj_timeline_handles: The list of timeline syncobj handles submitted by
+ * the user queue job to get the va/value pairs at given @syncobj_timeline_points.
+ */
+ __u64 syncobj_timeline_handles;
+ /**
+ * @syncobj_timeline_points: The list of timeline syncobj points submitted by the
+ * user queue job for the corresponding @syncobj_timeline_handles.
+ */
+ __u64 syncobj_timeline_points;
+ /**
+ * @bo_read_handles: The list of read BO handles submitted by the user queue
+ * job to get the va/value pairs.
+ */
+ __u64 bo_read_handles;
+ /**
+ * @bo_write_handles: The list of write BO handles submitted by the user queue
+ * job to get the va/value pairs.
+ */
+ __u64 bo_write_handles;
+ /**
+ * @num_syncobj_timeline_handles: A count that represents the number of timeline
+ * syncobj handles in @syncobj_timeline_handles.
+ */
+ __u16 num_syncobj_timeline_handles;
+ /**
+ * @num_fences: This field can be used both as input and output. As input it defines
+ * the maximum number of fences that can be returned and as output it will specify
+ * how many fences were actually returned from the ioctl.
+ */
+ __u16 num_fences;
+ /**
+ * @num_syncobj_handles: A count that represents the number of syncobj handles in
+ * @syncobj_handles.
+ */
+ __u32 num_syncobj_handles;
+ /**
+ * @num_bo_read_handles: A count that represents the number of read BO handles in
+ * @bo_read_handles.
+ */
+ __u32 num_bo_read_handles;
+ /**
+ * @num_bo_write_handles: A count that represents the number of write BO handles in
+ * @bo_write_handles.
+ */
+ __u32 num_bo_write_handles;
+ /**
+ * @out_fences: The field is a return value from the ioctl containing the list of
+ * address/value pairs to wait for.
+ */
+ __u64 out_fences;
+};
+
/* vm ioctl */
#define AMDGPU_VM_OP_RESERVE_VMID 1
#define AMDGPU_VM_OP_UNRESERVE_VMID 2
@@ -392,7 +660,7 @@ struct drm_amdgpu_gem_userptr {
#define AMDGPU_TILING_NUM_BANKS_SHIFT 21
#define AMDGPU_TILING_NUM_BANKS_MASK 0x3
-/* GFX9 and later: */
+/* GFX9 - GFX11: */
#define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
#define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
#define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5
@@ -406,6 +674,24 @@ struct drm_amdgpu_gem_userptr {
#define AMDGPU_TILING_SCANOUT_SHIFT 63
#define AMDGPU_TILING_SCANOUT_MASK 0x1
+/* GFX12 and later: */
+#define AMDGPU_TILING_GFX12_SWIZZLE_MODE_SHIFT 0
+#define AMDGPU_TILING_GFX12_SWIZZLE_MODE_MASK 0x7
+/* These are DCC recompression settings for memory management: */
+#define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_SHIFT 3
+#define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_MASK 0x3 /* 0:64B, 1:128B, 2:256B */
+#define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_SHIFT 5
+#define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_MASK 0x7 /* CB_COLOR0_INFO.NUMBER_TYPE */
+#define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_SHIFT 8
+#define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_MASK 0x3f /* [0:4]:CB_COLOR0_INFO.FORMAT, [5]:MM */
+/* When clearing the buffer or moving it from VRAM to GTT, don't compress and set DCC metadata
+ * to uncompressed. Set when parts of an allocation bypass DCC and read raw data. */
+#define AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE_SHIFT 14
+#define AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE_MASK 0x1
+/* bit gap */
+#define AMDGPU_TILING_GFX12_SCANOUT_SHIFT 63
+#define AMDGPU_TILING_GFX12_SCANOUT_MASK 0x1
+
/* Set/Get helpers for tiling flags. */
#define AMDGPU_TILING_SET(field, value) \
(((__u64)(value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
@@ -520,6 +806,21 @@ union drm_amdgpu_wait_fences {
#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
#define AMDGPU_GEM_OP_SET_PLACEMENT 1
+#define AMDGPU_GEM_OP_GET_MAPPING_INFO 2
+
+struct drm_amdgpu_gem_vm_entry {
+ /* Start of mapping (in bytes) */
+ __u64 addr;
+
+ /* Size of mapping (in bytes) */
+ __u64 size;
+
+ /* Mapping offset */
+ __u64 offset;
+
+ /* flags needed to recreate mapping */
+ __u64 flags;
+};
/* Sets or returns a value associated with a buffer. */
struct drm_amdgpu_gem_op {
@@ -527,8 +828,44 @@ struct drm_amdgpu_gem_op {
__u32 handle;
/** AMDGPU_GEM_OP_* */
__u32 op;
- /** Input or return value */
+ /** Input or return value. For MAPPING_INFO op: pointer to array of struct drm_amdgpu_gem_vm_entry */
__u64 value;
+ /** For MAPPING_INFO op: number of mappings (in/out) */
+ __u32 num_entries;
+
+ __u32 padding;
+};
+
+#define AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT (1 << 0)
+
+struct drm_amdgpu_gem_list_handles {
+ /* User pointer to array of drm_amdgpu_gem_bo_info_entry */
+ __u64 entries;
+
+ /* Size of entries buffer / Number of handles in process (if larger than size of buffer, must retry) */
+ __u32 num_entries;
+
+ __u32 padding;
+};
+
+struct drm_amdgpu_gem_list_handles_entry {
+ /* gem handle of buffer object */
+ __u32 gem_handle;
+
+ /* Currently just one flag: IS_IMPORT */
+ __u32 flags;
+
+ /* Size of bo */
+ __u64 size;
+
+ /* Preferred domains for GEM_CREATE */
+ __u64 preferred_domains;
+
+ /* GEM_CREATE flags for re-creation of buffer */
+ __u64 alloc_flags;
+
+ /* physical start_addr alignment in bytes for some HW requirements */
+ __u64 alignment;
};
#define AMDGPU_VA_OP_MAP 1
@@ -579,6 +916,19 @@ struct drm_amdgpu_gem_va {
__u64 offset_in_bo;
/** Specify mapping size. Must be correctly aligned. */
__u64 map_size;
+ /**
+ * vm_timeline_point is a sequence number used to add new timeline point.
+ */
+ __u64 vm_timeline_point;
+ /**
+ * The vm page table update fence is installed in given vm_timeline_syncobj_out
+ * at vm_timeline_point.
+ */
+ __u32 vm_timeline_syncobj_out;
+ /** the number of syncobj handles in @input_fence_syncobj_handles */
+ __u32 num_syncobj_handles;
+ /** Array of sync object handle to wait for given input fences */
+ __u64 input_fence_syncobj_handles;
};
#define AMDGPU_HW_IP_GFX 0
@@ -738,10 +1088,21 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
* Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
*
*/
-#define AMDGPU_IDS_FLAGS_FUSION 0x1
-#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
-#define AMDGPU_IDS_FLAGS_TMZ 0x4
-#define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x8
+#define AMDGPU_IDS_FLAGS_FUSION 0x01
+#define AMDGPU_IDS_FLAGS_PREEMPTION 0x02
+#define AMDGPU_IDS_FLAGS_TMZ 0x04
+#define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x08
+#define AMDGPU_IDS_FLAGS_GANG_SUBMIT 0x10
+
+/*
+ * Query h/w info: Flag identifying VF/PF/PT mode
+ *
+ */
+#define AMDGPU_IDS_FLAGS_MODE_MASK 0x300
+#define AMDGPU_IDS_FLAGS_MODE_SHIFT 0x8
+#define AMDGPU_IDS_FLAGS_MODE_PF 0x0
+#define AMDGPU_IDS_FLAGS_MODE_VF 0x1
+#define AMDGPU_IDS_FLAGS_MODE_PT 0x2
/* indicate if acceleration can be working */
#define AMDGPU_INFO_ACCEL_WORKING 0x00
@@ -910,6 +1271,8 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
#define AMDGPU_INFO_MAX_IBS 0x22
/* query last page fault info */
#define AMDGPU_INFO_GPUVM_FAULT 0x23
+/* query FW object size and alignment */
+#define AMDGPU_INFO_UQ_FW_AREAS 0x24
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -1063,6 +1426,7 @@ struct drm_amdgpu_info_vbios {
#define AMDGPU_VRAM_TYPE_DDR5 10
#define AMDGPU_VRAM_TYPE_LPDDR4 11
#define AMDGPU_VRAM_TYPE_LPDDR5 12
+#define AMDGPU_VRAM_TYPE_HBM3E 13
struct drm_amdgpu_info_device {
/** PCI Device ID */
@@ -1168,6 +1532,9 @@ struct drm_amdgpu_info_device {
__u32 csa_size;
/* context save area base virtual alignment for gfx11 */
__u32 csa_alignment;
+ /* Userq IP mask (1 << AMDGPU_HW_IP_*) */
+ __u32 userq_ip_mask;
+ __u32 pad;
};
struct drm_amdgpu_info_hw_ip {
@@ -1184,6 +1551,8 @@ struct drm_amdgpu_info_hw_ip {
__u32 available_rings;
/** version info: bits 23:16 major, 15:8 minor, 7:0 revision */
__u32 ip_discovery_version;
+ /* Userq available slots */
+ __u32 userq_num_slots;
};
struct drm_amdgpu_info_num_handles {
@@ -1249,6 +1618,23 @@ struct drm_amdgpu_info_gpuvm_fault {
__u32 vmhub;
};
+struct drm_amdgpu_info_uq_metadata_gfx {
+ /* shadow area size for gfx11 */
+ __u32 shadow_size;
+ /* shadow area base virtual alignment for gfx11 */
+ __u32 shadow_alignment;
+ /* context save area size for gfx11 */
+ __u32 csa_size;
+ /* context save area base virtual alignment for gfx11 */
+ __u32 csa_alignment;
+};
+
+struct drm_amdgpu_info_uq_metadata {
+ union {
+ struct drm_amdgpu_info_uq_metadata_gfx gfx;
+ };
+};
+
/*
* Supported GPU families
*/
@@ -1268,6 +1654,7 @@ struct drm_amdgpu_info_gpuvm_fault {
#define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */
#define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */
#define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */
+#define AMDGPU_FAMILY_GC_12_0_0 152 /* GC 12.0.0 */
#if defined(__cplusplus)
}
diff --git a/include/uapi/drm/amdxdna_accel.h b/include/uapi/drm/amdxdna_accel.h
new file mode 100644
index 000000000000..62c917fd4f7b
--- /dev/null
+++ b/include/uapi/drm/amdxdna_accel.h
@@ -0,0 +1,698 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _UAPI_AMDXDNA_ACCEL_H_
+#define _UAPI_AMDXDNA_ACCEL_H_
+
+#include <linux/stddef.h>
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define AMDXDNA_INVALID_CMD_HANDLE (~0UL)
+#define AMDXDNA_INVALID_ADDR (~0UL)
+#define AMDXDNA_INVALID_CTX_HANDLE 0
+#define AMDXDNA_INVALID_BO_HANDLE 0
+#define AMDXDNA_INVALID_FENCE_HANDLE 0
+
+enum amdxdna_device_type {
+ AMDXDNA_DEV_TYPE_UNKNOWN = -1,
+ AMDXDNA_DEV_TYPE_KMQ,
+};
+
+enum amdxdna_drm_ioctl_id {
+ DRM_AMDXDNA_CREATE_HWCTX,
+ DRM_AMDXDNA_DESTROY_HWCTX,
+ DRM_AMDXDNA_CONFIG_HWCTX,
+ DRM_AMDXDNA_CREATE_BO,
+ DRM_AMDXDNA_GET_BO_INFO,
+ DRM_AMDXDNA_SYNC_BO,
+ DRM_AMDXDNA_EXEC_CMD,
+ DRM_AMDXDNA_GET_INFO,
+ DRM_AMDXDNA_SET_STATE,
+ DRM_AMDXDNA_GET_ARRAY = 10,
+};
+
+/**
+ * struct qos_info - QoS information for driver.
+ * @gops: Giga operations per second.
+ * @fps: Frames per second.
+ * @dma_bandwidth: DMA bandwidtha.
+ * @latency: Frame response latency.
+ * @frame_exec_time: Frame execution time.
+ * @priority: Request priority.
+ *
+ * User program can provide QoS hints to driver.
+ */
+struct amdxdna_qos_info {
+ __u32 gops;
+ __u32 fps;
+ __u32 dma_bandwidth;
+ __u32 latency;
+ __u32 frame_exec_time;
+ __u32 priority;
+};
+
+/**
+ * struct amdxdna_drm_create_hwctx - Create hardware context.
+ * @ext: MBZ.
+ * @ext_flags: MBZ.
+ * @qos_p: Address of QoS info.
+ * @umq_bo: BO handle for user mode queue(UMQ).
+ * @log_buf_bo: BO handle for log buffer.
+ * @max_opc: Maximum operations per cycle.
+ * @num_tiles: Number of AIE tiles.
+ * @mem_size: Size of AIE tile memory.
+ * @umq_doorbell: Returned offset of doorbell associated with UMQ.
+ * @handle: Returned hardware context handle.
+ * @syncobj_handle: Returned syncobj handle for command completion.
+ */
+struct amdxdna_drm_create_hwctx {
+ __u64 ext;
+ __u64 ext_flags;
+ __u64 qos_p;
+ __u32 umq_bo;
+ __u32 log_buf_bo;
+ __u32 max_opc;
+ __u32 num_tiles;
+ __u32 mem_size;
+ __u32 umq_doorbell;
+ __u32 handle;
+ __u32 syncobj_handle;
+};
+
+/**
+ * struct amdxdna_drm_destroy_hwctx - Destroy hardware context.
+ * @handle: Hardware context handle.
+ * @pad: MBZ.
+ */
+struct amdxdna_drm_destroy_hwctx {
+ __u32 handle;
+ __u32 pad;
+};
+
+/**
+ * struct amdxdna_cu_config - configuration for one CU
+ * @cu_bo: CU configuration buffer bo handle.
+ * @cu_func: Function of a CU.
+ * @pad: MBZ.
+ */
+struct amdxdna_cu_config {
+ __u32 cu_bo;
+ __u8 cu_func;
+ __u8 pad[3];
+};
+
+/**
+ * struct amdxdna_hwctx_param_config_cu - configuration for CUs in hardware context
+ * @num_cus: Number of CUs to configure.
+ * @pad: MBZ.
+ * @cu_configs: Array of CU configurations of struct amdxdna_cu_config.
+ */
+struct amdxdna_hwctx_param_config_cu {
+ __u16 num_cus;
+ __u16 pad[3];
+ struct amdxdna_cu_config cu_configs[] __counted_by(num_cus);
+};
+
+enum amdxdna_drm_config_hwctx_param {
+ DRM_AMDXDNA_HWCTX_CONFIG_CU,
+ DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF,
+ DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF,
+};
+
+/**
+ * struct amdxdna_drm_config_hwctx - Configure hardware context.
+ * @handle: hardware context handle.
+ * @param_type: Value in enum amdxdna_drm_config_hwctx_param. Specifies the
+ * structure passed in via param_val.
+ * @param_val: A structure specified by the param_type struct member.
+ * @param_val_size: Size of the parameter buffer pointed to by the param_val.
+ * If param_val is not a pointer, driver can ignore this.
+ * @pad: MBZ.
+ *
+ * Note: if the param_val is a pointer pointing to a buffer, the maximum size
+ * of the buffer is 4KiB(PAGE_SIZE).
+ */
+struct amdxdna_drm_config_hwctx {
+ __u32 handle;
+ __u32 param_type;
+ __u64 param_val;
+ __u32 param_val_size;
+ __u32 pad;
+};
+
+enum amdxdna_bo_type {
+ AMDXDNA_BO_INVALID = 0,
+ AMDXDNA_BO_SHMEM,
+ AMDXDNA_BO_DEV_HEAP,
+ AMDXDNA_BO_DEV,
+ AMDXDNA_BO_CMD,
+};
+
+/**
+ * struct amdxdna_drm_va_entry
+ * @vaddr: Virtual address.
+ * @len: Size of entry.
+ */
+struct amdxdna_drm_va_entry {
+ __u64 vaddr;
+ __u64 len;
+};
+
+/**
+ * struct amdxdna_drm_va_tbl
+ * @dmabuf_fd: The fd of dmabuf.
+ * @num_entries: Number of va entries.
+ * @va_entries: Array of va entries.
+ *
+ * The input can be either a dmabuf fd or a virtual address entry table.
+ * When dmabuf_fd is used, num_entries must be zero.
+ */
+struct amdxdna_drm_va_tbl {
+ __s32 dmabuf_fd;
+ __u32 num_entries;
+ struct amdxdna_drm_va_entry va_entries[];
+};
+
+/**
+ * struct amdxdna_drm_create_bo - Create a buffer object.
+ * @flags: Buffer flags. MBZ.
+ * @vaddr: User VA of buffer if applied. MBZ.
+ * @size: Size in bytes.
+ * @type: Buffer type.
+ * @handle: Returned DRM buffer object handle.
+ */
+struct amdxdna_drm_create_bo {
+ __u64 flags;
+ __u64 vaddr;
+ __u64 size;
+ __u32 type;
+ __u32 handle;
+};
+
+/**
+ * struct amdxdna_drm_get_bo_info - Get buffer object information.
+ * @ext: MBZ.
+ * @ext_flags: MBZ.
+ * @handle: DRM buffer object handle.
+ * @pad: MBZ.
+ * @map_offset: Returned DRM fake offset for mmap().
+ * @vaddr: Returned user VA of buffer. 0 in case user needs mmap().
+ * @xdna_addr: Returned XDNA device virtual address.
+ */
+struct amdxdna_drm_get_bo_info {
+ __u64 ext;
+ __u64 ext_flags;
+ __u32 handle;
+ __u32 pad;
+ __u64 map_offset;
+ __u64 vaddr;
+ __u64 xdna_addr;
+};
+
+/**
+ * struct amdxdna_drm_sync_bo - Sync buffer object.
+ * @handle: Buffer object handle.
+ * @direction: Direction of sync, can be from device or to device.
+ * @offset: Offset in the buffer to sync.
+ * @size: Size in bytes.
+ */
+struct amdxdna_drm_sync_bo {
+ __u32 handle;
+#define SYNC_DIRECT_TO_DEVICE 0U
+#define SYNC_DIRECT_FROM_DEVICE 1U
+ __u32 direction;
+ __u64 offset;
+ __u64 size;
+};
+
+enum amdxdna_cmd_type {
+ AMDXDNA_CMD_SUBMIT_EXEC_BUF = 0,
+ AMDXDNA_CMD_SUBMIT_DEPENDENCY,
+ AMDXDNA_CMD_SUBMIT_SIGNAL,
+};
+
+/**
+ * struct amdxdna_drm_exec_cmd - Execute command.
+ * @ext: MBZ.
+ * @ext_flags: MBZ.
+ * @hwctx: Hardware context handle.
+ * @type: One of command type in enum amdxdna_cmd_type.
+ * @cmd_handles: Array of command handles or the command handle itself
+ * in case of just one.
+ * @args: Array of arguments for all command handles.
+ * @cmd_count: Number of command handles in the cmd_handles array.
+ * @arg_count: Number of arguments in the args array.
+ * @seq: Returned sequence number for this command.
+ */
+struct amdxdna_drm_exec_cmd {
+ __u64 ext;
+ __u64 ext_flags;
+ __u32 hwctx;
+ __u32 type;
+ __u64 cmd_handles;
+ __u64 args;
+ __u32 cmd_count;
+ __u32 arg_count;
+ __u64 seq;
+};
+
+/**
+ * struct amdxdna_drm_query_aie_status - Query the status of the AIE hardware
+ * @buffer: The user space buffer that will return the AIE status.
+ * @buffer_size: The size of the user space buffer.
+ * @cols_filled: A bitmap of AIE columns whose data has been returned in the buffer.
+ */
+struct amdxdna_drm_query_aie_status {
+ __u64 buffer; /* out */
+ __u32 buffer_size; /* in */
+ __u32 cols_filled; /* out */
+};
+
+/**
+ * struct amdxdna_drm_query_aie_version - Query the version of the AIE hardware
+ * @major: The major version number.
+ * @minor: The minor version number.
+ */
+struct amdxdna_drm_query_aie_version {
+ __u32 major; /* out */
+ __u32 minor; /* out */
+};
+
+/**
+ * struct amdxdna_drm_query_aie_tile_metadata - Query the metadata of AIE tile (core, mem, shim)
+ * @row_count: The number of rows.
+ * @row_start: The starting row number.
+ * @dma_channel_count: The number of dma channels.
+ * @lock_count: The number of locks.
+ * @event_reg_count: The number of events.
+ * @pad: Structure padding.
+ */
+struct amdxdna_drm_query_aie_tile_metadata {
+ __u16 row_count;
+ __u16 row_start;
+ __u16 dma_channel_count;
+ __u16 lock_count;
+ __u16 event_reg_count;
+ __u16 pad[3];
+};
+
+/**
+ * struct amdxdna_drm_query_aie_metadata - Query the metadata of the AIE hardware
+ * @col_size: The size of a column in bytes.
+ * @cols: The total number of columns.
+ * @rows: The total number of rows.
+ * @version: The version of the AIE hardware.
+ * @core: The metadata for all core tiles.
+ * @mem: The metadata for all mem tiles.
+ * @shim: The metadata for all shim tiles.
+ */
+struct amdxdna_drm_query_aie_metadata {
+ __u32 col_size;
+ __u16 cols;
+ __u16 rows;
+ struct amdxdna_drm_query_aie_version version;
+ struct amdxdna_drm_query_aie_tile_metadata core;
+ struct amdxdna_drm_query_aie_tile_metadata mem;
+ struct amdxdna_drm_query_aie_tile_metadata shim;
+};
+
+/**
+ * struct amdxdna_drm_query_clock - Metadata for a clock
+ * @name: The clock name.
+ * @freq_mhz: The clock frequency.
+ * @pad: Structure padding.
+ */
+struct amdxdna_drm_query_clock {
+ __u8 name[16];
+ __u32 freq_mhz;
+ __u32 pad;
+};
+
+/**
+ * struct amdxdna_drm_query_clock_metadata - Query metadata for clocks
+ * @mp_npu_clock: The metadata for MP-NPU clock.
+ * @h_clock: The metadata for H clock.
+ */
+struct amdxdna_drm_query_clock_metadata {
+ struct amdxdna_drm_query_clock mp_npu_clock;
+ struct amdxdna_drm_query_clock h_clock;
+};
+
+enum amdxdna_sensor_type {
+ AMDXDNA_SENSOR_TYPE_POWER
+};
+
+/**
+ * struct amdxdna_drm_query_sensor - The data for single sensor.
+ * @label: The name for a sensor.
+ * @input: The current value of the sensor.
+ * @max: The maximum value possible for the sensor.
+ * @average: The average value of the sensor.
+ * @highest: The highest recorded sensor value for this driver load for the sensor.
+ * @status: The sensor status.
+ * @units: The sensor units.
+ * @unitm: Translates value member variables into the correct unit via (pow(10, unitm) * value).
+ * @type: The sensor type from enum amdxdna_sensor_type.
+ * @pad: Structure padding.
+ */
+struct amdxdna_drm_query_sensor {
+ __u8 label[64];
+ __u32 input;
+ __u32 max;
+ __u32 average;
+ __u32 highest;
+ __u8 status[64];
+ __u8 units[16];
+ __s8 unitm;
+ __u8 type;
+ __u8 pad[6];
+};
+
+/**
+ * struct amdxdna_drm_query_hwctx - The data for single context.
+ * @context_id: The ID for this context.
+ * @start_col: The starting column for the partition assigned to this context.
+ * @num_col: The number of columns in the partition assigned to this context.
+ * @pad: Structure padding.
+ * @pid: The Process ID of the process that created this context.
+ * @command_submissions: The number of commands submitted to this context.
+ * @command_completions: The number of commands completed by this context.
+ * @migrations: The number of times this context has been moved to a different partition.
+ * @preemptions: The number of times this context has been preempted by another context in the
+ * same partition.
+ * @errors: The errors for this context.
+ */
+struct amdxdna_drm_query_hwctx {
+ __u32 context_id;
+ __u32 start_col;
+ __u32 num_col;
+ __u32 pad;
+ __s64 pid;
+ __u64 command_submissions;
+ __u64 command_completions;
+ __u64 migrations;
+ __u64 preemptions;
+ __u64 errors;
+};
+
+enum amdxdna_power_mode_type {
+ POWER_MODE_DEFAULT, /* Fallback to calculated DPM */
+ POWER_MODE_LOW, /* Set frequency to lowest DPM */
+ POWER_MODE_MEDIUM, /* Set frequency to medium DPM */
+ POWER_MODE_HIGH, /* Set frequency to highest DPM */
+ POWER_MODE_TURBO, /* Maximum power */
+};
+
+/**
+ * struct amdxdna_drm_get_power_mode - Get the configured power mode
+ * @power_mode: The mode type from enum amdxdna_power_mode_type
+ * @pad: Structure padding.
+ */
+struct amdxdna_drm_get_power_mode {
+ __u8 power_mode;
+ __u8 pad[7];
+};
+
+/**
+ * struct amdxdna_drm_query_firmware_version - Query the firmware version
+ * @major: The major version number
+ * @minor: The minor version number
+ * @patch: The patch level version number
+ * @build: The build ID
+ */
+struct amdxdna_drm_query_firmware_version {
+ __u32 major; /* out */
+ __u32 minor; /* out */
+ __u32 patch; /* out */
+ __u32 build; /* out */
+};
+
+enum amdxdna_drm_get_param {
+ DRM_AMDXDNA_QUERY_AIE_STATUS,
+ DRM_AMDXDNA_QUERY_AIE_METADATA,
+ DRM_AMDXDNA_QUERY_AIE_VERSION,
+ DRM_AMDXDNA_QUERY_CLOCK_METADATA,
+ DRM_AMDXDNA_QUERY_SENSORS,
+ DRM_AMDXDNA_QUERY_HW_CONTEXTS,
+ DRM_AMDXDNA_QUERY_FIRMWARE_VERSION = 8,
+ DRM_AMDXDNA_GET_POWER_MODE,
+ DRM_AMDXDNA_QUERY_TELEMETRY,
+ DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE,
+ DRM_AMDXDNA_QUERY_RESOURCE_INFO,
+ DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE,
+};
+
+/**
+ * struct amdxdna_drm_get_resource_info - Get resource information
+ */
+struct amdxdna_drm_get_resource_info {
+ /** @npu_clk_max: max H-Clocks */
+ __u64 npu_clk_max;
+ /** @npu_tops_max: max TOPs */
+ __u64 npu_tops_max;
+ /** @npu_task_max: max number of tasks */
+ __u64 npu_task_max;
+ /** @npu_tops_curr: current TOPs */
+ __u64 npu_tops_curr;
+ /** @npu_task_curr: current number of tasks */
+ __u64 npu_task_curr;
+};
+
+/**
+ * struct amdxdna_drm_attribute_state - State of an attribute
+ */
+struct amdxdna_drm_attribute_state {
+ /** @state: enabled or disabled */
+ __u8 state;
+ /** @pad: MBZ */
+ __u8 pad[7];
+};
+
+/**
+ * struct amdxdna_drm_query_telemetry_header - Telemetry data header
+ */
+struct amdxdna_drm_query_telemetry_header {
+ /** @major: Firmware telemetry interface major version number */
+ __u32 major;
+ /** @minor: Firmware telemetry interface minor version number */
+ __u32 minor;
+ /** @type: Telemetry query type */
+ __u32 type;
+ /** @map_num_elements: Total number of elements in the map table */
+ __u32 map_num_elements;
+ /** @map: Element map */
+ __u32 map[];
+};
+
+/**
+ * struct amdxdna_drm_get_info - Get some information from the AIE hardware.
+ * @param: Value in enum amdxdna_drm_get_param. Specifies the structure passed in the buffer.
+ * @buffer_size: Size of the input buffer. Size needed/written by the kernel.
+ * @buffer: A structure specified by the param struct member.
+ */
+struct amdxdna_drm_get_info {
+ __u32 param; /* in */
+ __u32 buffer_size; /* in/out */
+ __u64 buffer; /* in/out */
+};
+
+#define AMDXDNA_HWCTX_STATE_IDLE 0
+#define AMDXDNA_HWCTX_STATE_ACTIVE 1
+
+/**
+ * struct amdxdna_drm_hwctx_entry - The hardware context array entry
+ */
+struct amdxdna_drm_hwctx_entry {
+ /** @context_id: Context ID. */
+ __u32 context_id;
+ /** @start_col: Start AIE array column assigned to context. */
+ __u32 start_col;
+ /** @num_col: Number of AIE array columns assigned to context. */
+ __u32 num_col;
+ /** @hwctx_id: The real hardware context id. */
+ __u32 hwctx_id;
+ /** @pid: ID of process which created this context. */
+ __s64 pid;
+ /** @command_submissions: Number of commands submitted. */
+ __u64 command_submissions;
+ /** @command_completions: Number of commands completed. */
+ __u64 command_completions;
+ /** @migrations: Number of times been migrated. */
+ __u64 migrations;
+ /** @preemptions: Number of times been preempted. */
+ __u64 preemptions;
+ /** @errors: Number of errors happened. */
+ __u64 errors;
+ /** @priority: Context priority. */
+ __u64 priority;
+ /** @heap_usage: Usage of device heap buffer. */
+ __u64 heap_usage;
+ /** @suspensions: Number of times been suspended. */
+ __u64 suspensions;
+ /**
+ * @state: Context state.
+ * %AMDXDNA_HWCTX_STATE_IDLE
+ * %AMDXDNA_HWCTX_STATE_ACTIVE
+ */
+ __u32 state;
+ /** @pasid: PASID been bound. */
+ __u32 pasid;
+ /** @gops: Giga operations per second. */
+ __u32 gops;
+ /** @fps: Frames per second. */
+ __u32 fps;
+ /** @dma_bandwidth: DMA bandwidth. */
+ __u32 dma_bandwidth;
+ /** @latency: Frame response latency. */
+ __u32 latency;
+ /** @frame_exec_time: Frame execution time. */
+ __u32 frame_exec_time;
+ /** @txn_op_idx: Index of last control code executed. */
+ __u32 txn_op_idx;
+ /** @ctx_pc: Program counter. */
+ __u32 ctx_pc;
+ /** @fatal_error_type: Fatal error type if context crashes. */
+ __u32 fatal_error_type;
+ /** @fatal_error_exception_type: Firmware exception type. */
+ __u32 fatal_error_exception_type;
+ /** @fatal_error_exception_pc: Firmware exception program counter. */
+ __u32 fatal_error_exception_pc;
+ /** @fatal_error_app_module: Exception module name. */
+ __u32 fatal_error_app_module;
+ /** @pad: Structure pad. */
+ __u32 pad;
+};
+
+/**
+ * struct amdxdna_async_error - XDNA async error structure
+ */
+struct amdxdna_async_error {
+ /** @err_code: Error code. */
+ __u64 err_code;
+ /** @ts_us: Timestamp. */
+ __u64 ts_us;
+ /** @ex_err_code: Extra error code */
+ __u64 ex_err_code;
+};
+
+#define DRM_AMDXDNA_HW_CONTEXT_ALL 0
+#define DRM_AMDXDNA_HW_LAST_ASYNC_ERR 2
+
+/**
+ * struct amdxdna_drm_get_array - Get information array.
+ */
+struct amdxdna_drm_get_array {
+ /**
+ * @param:
+ *
+ * Supported params:
+ *
+ * %DRM_AMDXDNA_HW_CONTEXT_ALL:
+ * Returns all created hardware contexts.
+ */
+ __u32 param;
+ /**
+ * @element_size:
+ *
+ * Specifies maximum element size and returns the actual element size.
+ */
+ __u32 element_size;
+ /**
+ * @num_element:
+ *
+ * Specifies maximum number of elements and returns the actual number
+ * of elements.
+ */
+ __u32 num_element; /* in/out */
+ /** @pad: MBZ */
+ __u32 pad;
+ /**
+ * @buffer:
+ *
+ * Specifies the match conditions and returns the matched information
+ * array.
+ */
+ __u64 buffer;
+};
+
+enum amdxdna_drm_set_param {
+ DRM_AMDXDNA_SET_POWER_MODE,
+ DRM_AMDXDNA_WRITE_AIE_MEM,
+ DRM_AMDXDNA_WRITE_AIE_REG,
+ DRM_AMDXDNA_SET_FORCE_PREEMPT,
+ DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT,
+};
+
+/**
+ * struct amdxdna_drm_set_state - Set the state of the AIE hardware.
+ * @param: Value in enum amdxdna_drm_set_param.
+ * @buffer_size: Size of the input param.
+ * @buffer: Pointer to the input param.
+ */
+struct amdxdna_drm_set_state {
+ __u32 param; /* in */
+ __u32 buffer_size; /* in */
+ __u64 buffer; /* in */
+};
+
+/**
+ * struct amdxdna_drm_set_power_mode - Set the power mode of the AIE hardware
+ * @power_mode: The sensor type from enum amdxdna_power_mode_type
+ * @pad: MBZ.
+ */
+struct amdxdna_drm_set_power_mode {
+ __u8 power_mode;
+ __u8 pad[7];
+};
+
+#define DRM_IOCTL_AMDXDNA_CREATE_HWCTX \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_CREATE_HWCTX, \
+ struct amdxdna_drm_create_hwctx)
+
+#define DRM_IOCTL_AMDXDNA_DESTROY_HWCTX \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_DESTROY_HWCTX, \
+ struct amdxdna_drm_destroy_hwctx)
+
+#define DRM_IOCTL_AMDXDNA_CONFIG_HWCTX \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_CONFIG_HWCTX, \
+ struct amdxdna_drm_config_hwctx)
+
+#define DRM_IOCTL_AMDXDNA_CREATE_BO \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_CREATE_BO, \
+ struct amdxdna_drm_create_bo)
+
+#define DRM_IOCTL_AMDXDNA_GET_BO_INFO \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_GET_BO_INFO, \
+ struct amdxdna_drm_get_bo_info)
+
+#define DRM_IOCTL_AMDXDNA_SYNC_BO \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_SYNC_BO, \
+ struct amdxdna_drm_sync_bo)
+
+#define DRM_IOCTL_AMDXDNA_EXEC_CMD \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_EXEC_CMD, \
+ struct amdxdna_drm_exec_cmd)
+
+#define DRM_IOCTL_AMDXDNA_GET_INFO \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_GET_INFO, \
+ struct amdxdna_drm_get_info)
+
+#define DRM_IOCTL_AMDXDNA_SET_STATE \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_SET_STATE, \
+ struct amdxdna_drm_set_state)
+
+#define DRM_IOCTL_AMDXDNA_GET_ARRAY \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_GET_ARRAY, \
+ struct amdxdna_drm_get_array)
+
+#if defined(__cplusplus)
+} /* extern c end */
+#endif
+
+#endif /* _UAPI_AMDXDNA_ACCEL_H_ */
diff --git a/include/uapi/drm/asahi_drm.h b/include/uapi/drm/asahi_drm.h
new file mode 100644
index 000000000000..de67f1c603af
--- /dev/null
+++ b/include/uapi/drm/asahi_drm.h
@@ -0,0 +1,1194 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) The Asahi Linux Contributors
+ * Copyright (C) 2018-2023 Collabora Ltd.
+ * Copyright (C) 2014-2018 Broadcom
+ */
+#ifndef _ASAHI_DRM_H_
+#define _ASAHI_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * DOC: Introduction to the Asahi UAPI
+ *
+ * This documentation describes the Asahi IOCTLs.
+ *
+ * Just a few generic rules about the data passed to the Asahi IOCTLs (cribbed
+ * from Panthor):
+ *
+ * - Structures must be aligned on 64-bit/8-byte. If the object is not
+ * naturally aligned, a padding field must be added.
+ * - Fields must be explicitly aligned to their natural type alignment with
+ * pad[0..N] fields.
+ * - All padding fields will be checked by the driver to make sure they are
+ * zeroed.
+ * - Flags can be added, but not removed/replaced.
+ * - New fields can be added to the main structures (the structures
+ * directly passed to the ioctl). Those fields can be added at the end of
+ * the structure, or replace existing padding fields. Any new field being
+ * added must preserve the behavior that existed before those fields were
+ * added when a value of zero is passed.
+ * - New fields can be added to indirect objects (objects pointed by the
+ * main structure), iff those objects are passed a size to reflect the
+ * size known by the userspace driver (see
+ * drm_asahi_cmd_header::size).
+ * - If the kernel driver is too old to know some fields, those will be
+ * ignored if zero, and otherwise rejected (and so will be zero on output).
+ * - If userspace is too old to know some fields, those will be zeroed
+ * (input) before the structure is parsed by the kernel driver.
+ * - Each new flag/field addition must come with a driver version update so
+ * the userspace driver doesn't have to guess which flags are supported.
+ * - Structures should not contain unions, as this would defeat the
+ * extensibility of such structures.
+ * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed
+ * at the end of the drm_asahi_ioctl_id enum.
+ */
+
+/**
+ * enum drm_asahi_ioctl_id - IOCTL IDs
+ *
+ * Place new ioctls at the end, don't re-order, don't replace or remove entries.
+ *
+ * These IDs are not meant to be used directly. Use the DRM_IOCTL_ASAHI_xxx
+ * definitions instead.
+ */
+enum drm_asahi_ioctl_id {
+ /** @DRM_ASAHI_GET_PARAMS: Query device properties. */
+ DRM_ASAHI_GET_PARAMS = 0,
+
+ /** @DRM_ASAHI_GET_TIME: Query device time. */
+ DRM_ASAHI_GET_TIME,
+
+ /** @DRM_ASAHI_VM_CREATE: Create a GPU VM address space. */
+ DRM_ASAHI_VM_CREATE,
+
+ /** @DRM_ASAHI_VM_DESTROY: Destroy a VM. */
+ DRM_ASAHI_VM_DESTROY,
+
+ /** @DRM_ASAHI_VM_BIND: Bind/unbind memory to a VM. */
+ DRM_ASAHI_VM_BIND,
+
+ /** @DRM_ASAHI_GEM_CREATE: Create a buffer object. */
+ DRM_ASAHI_GEM_CREATE,
+
+ /**
+ * @DRM_ASAHI_GEM_MMAP_OFFSET: Get offset to pass to mmap() to map a
+ * given GEM handle.
+ */
+ DRM_ASAHI_GEM_MMAP_OFFSET,
+
+ /** @DRM_ASAHI_GEM_BIND_OBJECT: Bind memory as a special object */
+ DRM_ASAHI_GEM_BIND_OBJECT,
+
+ /** @DRM_ASAHI_QUEUE_CREATE: Create a scheduling queue. */
+ DRM_ASAHI_QUEUE_CREATE,
+
+ /** @DRM_ASAHI_QUEUE_DESTROY: Destroy a scheduling queue. */
+ DRM_ASAHI_QUEUE_DESTROY,
+
+ /** @DRM_ASAHI_SUBMIT: Submit commands to a queue. */
+ DRM_ASAHI_SUBMIT,
+};
+
+#define DRM_ASAHI_MAX_CLUSTERS 64
+
+/**
+ * struct drm_asahi_params_global - Global parameters.
+ *
+ * This struct may be queried by drm_asahi_get_params.
+ */
+struct drm_asahi_params_global {
+ /** @features: Feature bits from drm_asahi_feature */
+ __u64 features;
+
+ /** @gpu_generation: GPU generation, e.g. 13 for G13G */
+ __u32 gpu_generation;
+
+ /** @gpu_variant: GPU variant as a character, e.g. 'C' for G13C */
+ __u32 gpu_variant;
+
+ /**
+ * @gpu_revision: GPU revision in BCD, e.g. 0x00 for 'A0' or
+ * 0x21 for 'C1'
+ */
+ __u32 gpu_revision;
+
+ /** @chip_id: Chip ID in BCD, e.g. 0x8103 for T8103 */
+ __u32 chip_id;
+
+ /** @num_dies: Number of dies in the SoC */
+ __u32 num_dies;
+
+ /** @num_clusters_total: Number of GPU clusters (across all dies) */
+ __u32 num_clusters_total;
+
+ /**
+ * @num_cores_per_cluster: Number of logical cores per cluster
+ * (including inactive/nonexistent)
+ */
+ __u32 num_cores_per_cluster;
+
+ /** @max_frequency_khz: Maximum GPU core clock frequency */
+ __u32 max_frequency_khz;
+
+ /** @core_masks: Bitmask of present/enabled cores per cluster */
+ __u64 core_masks[DRM_ASAHI_MAX_CLUSTERS];
+
+ /**
+ * @vm_start: VM range start VMA. Together with @vm_end, this defines
+ * the window of valid GPU VAs. Userspace is expected to subdivide VAs
+ * out of this window.
+ *
+ * This window contains all virtual addresses that userspace needs to
+ * know about. There may be kernel-internal GPU VAs outside this range,
+ * but that detail is not relevant here.
+ */
+ __u64 vm_start;
+
+ /** @vm_end: VM range end VMA */
+ __u64 vm_end;
+
+ /**
+ * @vm_kernel_min_size: Minimum kernel VMA window size.
+ *
+ * When creating a VM, userspace is required to carve out a section of
+ * virtual addresses (within the range given by @vm_start and
+ * @vm_end). The kernel will allocate various internal structures
+ * within the specified VA range.
+ *
+ * Allowing userspace to choose the VA range for the kernel, rather than
+ * the kernel reserving VAs and requiring userspace to cope, can assist
+ * in implementing SVM.
+ */
+ __u64 vm_kernel_min_size;
+
+ /**
+ * @max_commands_per_submission: Maximum number of supported commands
+ * per submission. This mirrors firmware limits. Userspace must split up
+ * larger command buffers, which may require inserting additional
+ * synchronization.
+ */
+ __u32 max_commands_per_submission;
+
+ /**
+ * @max_attachments: Maximum number of drm_asahi_attachment's per
+ * command
+ */
+ __u32 max_attachments;
+
+ /**
+ * @command_timestamp_frequency_hz: Timebase frequency for timestamps
+ * written during command execution, specified via drm_asahi_timestamp
+ * structures. As this rate is controlled by the firmware, it is a
+ * queryable parameter.
+ *
+ * Userspace must divide by this frequency to convert timestamps to
+ * seconds, rather than hardcoding a particular firmware's rate.
+ */
+ __u64 command_timestamp_frequency_hz;
+};
+
+/**
+ * enum drm_asahi_feature - Feature bits
+ *
+ * This covers only features that userspace cannot infer from the architecture
+ * version. Most features don't need to be here.
+ */
+enum drm_asahi_feature {
+ /**
+ * @DRM_ASAHI_FEATURE_SOFT_FAULTS: GPU has "soft fault" enabled. Shader
+ * loads of unmapped memory will return zero. Shader stores to unmapped
+ * memory will be silently discarded. Note that only shader load/store
+ * is affected. Other hardware units are not affected, notably including
+ * texture sampling.
+ *
+ * Soft fault is set when initializing the GPU and cannot be runtime
+ * toggled. Therefore, it is exposed as a feature bit and not a
+ * userspace-settable flag on the VM. When soft fault is enabled,
+ * userspace can speculate memory accesses more aggressively.
+ */
+ DRM_ASAHI_FEATURE_SOFT_FAULTS = (1UL) << 0,
+};
+
+/**
+ * struct drm_asahi_get_params - Arguments passed to DRM_IOCTL_ASAHI_GET_PARAMS
+ */
+struct drm_asahi_get_params {
+ /** @param_group: Parameter group to fetch (MBZ) */
+ __u32 param_group;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @pointer: User pointer to write parameter struct */
+ __u64 pointer;
+
+ /**
+ * @size: Size of the user buffer. In case of older userspace, this may
+ * be less than sizeof(struct drm_asahi_params_global). The kernel will
+ * not write past the length specified here, allowing extensibility.
+ */
+ __u64 size;
+};
+
+/**
+ * struct drm_asahi_vm_create - Arguments passed to DRM_IOCTL_ASAHI_VM_CREATE
+ */
+struct drm_asahi_vm_create {
+ /**
+ * @kernel_start: Start of the kernel-reserved address range. See
+ * drm_asahi_params_global::vm_kernel_min_size.
+ *
+ * Both @kernel_start and @kernel_end must be within the range of
+ * valid VAs given by drm_asahi_params_global::vm_start and
+ * drm_asahi_params_global::vm_end. The size of the kernel range
+ * (@kernel_end - @kernel_start) must be at least
+ * drm_asahi_params_global::vm_kernel_min_size.
+ *
+ * Userspace must not bind any memory on this VM into this reserved
+ * range, it is for kernel use only.
+ */
+ __u64 kernel_start;
+
+ /**
+ * @kernel_end: End of the kernel-reserved address range. See
+ * @kernel_start.
+ */
+ __u64 kernel_end;
+
+ /** @vm_id: Returned VM ID */
+ __u32 vm_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_asahi_vm_destroy - Arguments passed to DRM_IOCTL_ASAHI_VM_DESTROY
+ */
+struct drm_asahi_vm_destroy {
+ /** @vm_id: VM ID to be destroyed */
+ __u32 vm_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * enum drm_asahi_gem_flags - Flags for GEM creation
+ */
+enum drm_asahi_gem_flags {
+ /**
+ * @DRM_ASAHI_GEM_WRITEBACK: BO should be CPU-mapped as writeback.
+ *
+ * Map as writeback instead of write-combine. This optimizes for CPU
+ * reads.
+ */
+ DRM_ASAHI_GEM_WRITEBACK = (1L << 0),
+
+ /**
+ * @DRM_ASAHI_GEM_VM_PRIVATE: BO is private to this GPU VM (no exports).
+ */
+ DRM_ASAHI_GEM_VM_PRIVATE = (1L << 1),
+};
+
+/**
+ * struct drm_asahi_gem_create - Arguments passed to DRM_IOCTL_ASAHI_GEM_CREATE
+ */
+struct drm_asahi_gem_create {
+ /** @size: Size of the BO */
+ __u64 size;
+
+ /** @flags: Combination of drm_asahi_gem_flags flags. */
+ __u32 flags;
+
+ /**
+ * @vm_id: VM ID to assign to the BO, if DRM_ASAHI_GEM_VM_PRIVATE is set
+ */
+ __u32 vm_id;
+
+ /** @handle: Returned GEM handle for the BO */
+ __u32 handle;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_asahi_gem_mmap_offset - Arguments passed to
+ * DRM_IOCTL_ASAHI_GEM_MMAP_OFFSET
+ */
+struct drm_asahi_gem_mmap_offset {
+ /** @handle: Handle for the object being mapped. */
+ __u32 handle;
+
+ /** @flags: Must be zero */
+ __u32 flags;
+
+ /** @offset: The fake offset to use for subsequent mmap call */
+ __u64 offset;
+};
+
+/**
+ * enum drm_asahi_bind_flags - Flags for GEM binding
+ */
+enum drm_asahi_bind_flags {
+ /**
+ * @DRM_ASAHI_BIND_UNBIND: Instead of binding a GEM object to the range,
+ * simply unbind the GPU VMA range.
+ */
+ DRM_ASAHI_BIND_UNBIND = (1L << 0),
+
+ /** @DRM_ASAHI_BIND_READ: Map BO with GPU read permission */
+ DRM_ASAHI_BIND_READ = (1L << 1),
+
+ /** @DRM_ASAHI_BIND_WRITE: Map BO with GPU write permission */
+ DRM_ASAHI_BIND_WRITE = (1L << 2),
+
+ /**
+ * @DRM_ASAHI_BIND_SINGLE_PAGE: Map a single page of the BO repeatedly
+ * across the VA range.
+ *
+ * This is useful to fill a VA range with scratch pages or zero pages.
+ * It is intended as a mechanism to accelerate sparse.
+ */
+ DRM_ASAHI_BIND_SINGLE_PAGE = (1L << 3),
+};
+
+/**
+ * struct drm_asahi_gem_bind_op - Description of a single GEM bind operation.
+ */
+struct drm_asahi_gem_bind_op {
+ /** @flags: Combination of drm_asahi_bind_flags flags. */
+ __u32 flags;
+
+ /** @handle: GEM object to bind (except for UNBIND) */
+ __u32 handle;
+
+ /**
+ * @offset: Offset into the object (except for UNBIND).
+ *
+ * For a regular bind, this is the beginning of the region of the GEM
+ * object to bind.
+ *
+ * For a single-page bind, this is the offset to the single page that
+ * will be repeatedly bound.
+ *
+ * Must be page-size aligned.
+ */
+ __u64 offset;
+
+ /**
+ * @range: Number of bytes to bind/unbind to @addr.
+ *
+ * Must be page-size aligned.
+ */
+ __u64 range;
+
+ /**
+ * @addr: Address to bind to.
+ *
+ * Must be page-size aligned.
+ */
+ __u64 addr;
+};
+
+/**
+ * struct drm_asahi_vm_bind - Arguments passed to
+ * DRM_IOCTL_ASAHI_VM_BIND
+ */
+struct drm_asahi_vm_bind {
+ /** @vm_id: The ID of the VM to bind to */
+ __u32 vm_id;
+
+ /** @num_binds: number of binds in this IOCTL. */
+ __u32 num_binds;
+
+ /**
+ * @stride: Stride in bytes between consecutive binds. This allows
+ * extensibility of drm_asahi_gem_bind_op.
+ */
+ __u32 stride;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /**
+ * @userptr: User pointer to an array of @num_binds structures of type
+ * @drm_asahi_gem_bind_op and size @stride bytes.
+ */
+ __u64 userptr;
+};
+
+/**
+ * enum drm_asahi_bind_object_op - Special object bind operation
+ */
+enum drm_asahi_bind_object_op {
+ /** @DRM_ASAHI_BIND_OBJECT_OP_BIND: Bind a BO as a special GPU object */
+ DRM_ASAHI_BIND_OBJECT_OP_BIND = 0,
+
+ /** @DRM_ASAHI_BIND_OBJECT_OP_UNBIND: Unbind a special GPU object */
+ DRM_ASAHI_BIND_OBJECT_OP_UNBIND = 1,
+};
+
+/**
+ * enum drm_asahi_bind_object_flags - Special object bind flags
+ */
+enum drm_asahi_bind_object_flags {
+ /**
+ * @DRM_ASAHI_BIND_OBJECT_USAGE_TIMESTAMPS: Map a BO as a timestamp
+ * buffer.
+ */
+ DRM_ASAHI_BIND_OBJECT_USAGE_TIMESTAMPS = (1L << 0),
+};
+
+/**
+ * struct drm_asahi_gem_bind_object - Arguments passed to
+ * DRM_IOCTL_ASAHI_GEM_BIND_OBJECT
+ */
+struct drm_asahi_gem_bind_object {
+ /** @op: Bind operation (enum drm_asahi_bind_object_op) */
+ __u32 op;
+
+ /** @flags: Combination of drm_asahi_bind_object_flags flags. */
+ __u32 flags;
+
+ /** @handle: GEM object to bind/unbind (BIND) */
+ __u32 handle;
+
+ /** @vm_id: The ID of the VM to operate on (MBZ currently) */
+ __u32 vm_id;
+
+ /** @offset: Offset into the object (BIND only) */
+ __u64 offset;
+
+ /** @range: Number of bytes to bind/unbind (BIND only) */
+ __u64 range;
+
+ /** @object_handle: Object handle (out for BIND, in for UNBIND) */
+ __u32 object_handle;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * enum drm_asahi_cmd_type - Command type
+ */
+enum drm_asahi_cmd_type {
+ /**
+ * @DRM_ASAHI_CMD_RENDER: Render command, executing on the render
+ * subqueue. Combined vertex and fragment operation.
+ *
+ * Followed by a @drm_asahi_cmd_render payload.
+ */
+ DRM_ASAHI_CMD_RENDER = 0,
+
+ /**
+ * @DRM_ASAHI_CMD_COMPUTE: Compute command on the compute subqueue.
+ *
+ * Followed by a @drm_asahi_cmd_compute payload.
+ */
+ DRM_ASAHI_CMD_COMPUTE = 1,
+
+ /**
+ * @DRM_ASAHI_SET_VERTEX_ATTACHMENTS: Software command to set
+ * attachments for subsequent vertex shaders in the same submit.
+ *
+ * Followed by (possibly multiple) @drm_asahi_attachment payloads.
+ */
+ DRM_ASAHI_SET_VERTEX_ATTACHMENTS = 2,
+
+ /**
+ * @DRM_ASAHI_SET_FRAGMENT_ATTACHMENTS: Software command to set
+ * attachments for subsequent fragment shaders in the same submit.
+ *
+ * Followed by (possibly multiple) @drm_asahi_attachment payloads.
+ */
+ DRM_ASAHI_SET_FRAGMENT_ATTACHMENTS = 3,
+
+ /**
+ * @DRM_ASAHI_SET_COMPUTE_ATTACHMENTS: Software command to set
+ * attachments for subsequent compute shaders in the same submit.
+ *
+ * Followed by (possibly multiple) @drm_asahi_attachment payloads.
+ */
+ DRM_ASAHI_SET_COMPUTE_ATTACHMENTS = 4,
+};
+
+/**
+ * enum drm_asahi_priority - Scheduling queue priority.
+ *
+ * These priorities are forwarded to the firmware to influence firmware
+ * scheduling. The exact policy is ultimately decided by firmware, but
+ * these enums allow userspace to communicate the intentions.
+ */
+enum drm_asahi_priority {
+ /** @DRM_ASAHI_PRIORITY_LOW: Low priority queue. */
+ DRM_ASAHI_PRIORITY_LOW = 0,
+
+ /** @DRM_ASAHI_PRIORITY_MEDIUM: Medium priority queue. */
+ DRM_ASAHI_PRIORITY_MEDIUM = 1,
+
+ /**
+ * @DRM_ASAHI_PRIORITY_HIGH: High priority queue.
+ *
+ * Reserved for future extension.
+ */
+ DRM_ASAHI_PRIORITY_HIGH = 2,
+
+ /**
+ * @DRM_ASAHI_PRIORITY_REALTIME: Real-time priority queue.
+ *
+ * Reserved for future extension.
+ */
+ DRM_ASAHI_PRIORITY_REALTIME = 3,
+};
+
+/**
+ * struct drm_asahi_queue_create - Arguments passed to
+ * DRM_IOCTL_ASAHI_QUEUE_CREATE
+ */
+struct drm_asahi_queue_create {
+ /** @flags: MBZ */
+ __u32 flags;
+
+ /** @vm_id: The ID of the VM this queue is bound to */
+ __u32 vm_id;
+
+ /** @priority: One of drm_asahi_priority */
+ __u32 priority;
+
+ /** @queue_id: The returned queue ID */
+ __u32 queue_id;
+
+ /**
+ * @usc_exec_base: GPU base address for all USC binaries (shaders) on
+ * this queue. USC addresses are 32-bit relative to this 64-bit base.
+ *
+ * This sets the following registers on all queue commands:
+ *
+ * USC_EXEC_BASE_TA (vertex)
+ * USC_EXEC_BASE_ISP (fragment)
+ * USC_EXEC_BASE_CP (compute)
+ *
+ * While the hardware lets us configure these independently per command,
+ * we do not have a use case for this. Instead, we expect userspace to
+ * fix a 4GiB VA carveout for USC memory and pass its base address here.
+ */
+ __u64 usc_exec_base;
+};
+
+/**
+ * struct drm_asahi_queue_destroy - Arguments passed to
+ * DRM_IOCTL_ASAHI_QUEUE_DESTROY
+ */
+struct drm_asahi_queue_destroy {
+ /** @queue_id: The queue ID to be destroyed */
+ __u32 queue_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * enum drm_asahi_sync_type - Sync item type
+ */
+enum drm_asahi_sync_type {
+ /** @DRM_ASAHI_SYNC_SYNCOBJ: Binary sync object */
+ DRM_ASAHI_SYNC_SYNCOBJ = 0,
+
+ /** @DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ: Timeline sync object */
+ DRM_ASAHI_SYNC_TIMELINE_SYNCOBJ = 1,
+};
+
+/**
+ * struct drm_asahi_sync - Sync item
+ */
+struct drm_asahi_sync {
+ /** @sync_type: One of drm_asahi_sync_type */
+ __u32 sync_type;
+
+ /** @handle: The sync object handle */
+ __u32 handle;
+
+ /** @timeline_value: Timeline value for timeline sync objects */
+ __u64 timeline_value;
+};
+
+/**
+ * define DRM_ASAHI_BARRIER_NONE - Command index for no barrier
+ *
+ * This special value may be passed in to drm_asahi_command::vdm_barrier or
+ * drm_asahi_command::cdm_barrier to indicate that the respective subqueue
+ * should not wait on any previous work.
+ */
+#define DRM_ASAHI_BARRIER_NONE (0xFFFFu)
+
+/**
+ * struct drm_asahi_cmd_header - Top level command structure
+ *
+ * This struct is core to the command buffer definition and therefore is not
+ * extensible.
+ */
+struct drm_asahi_cmd_header {
+ /** @cmd_type: One of drm_asahi_cmd_type */
+ __u16 cmd_type;
+
+ /**
+ * @size: Size of this command, not including this header.
+ *
+ * For hardware commands, this enables extensibility of commands without
+ * requiring extra command types. Passing a command that is shorter
+ * than expected is explicitly allowed for backwards-compatibility.
+ * Truncated fields will be zeroed.
+ *
+ * For the synthetic attachment setting commands, this implicitly
+ * encodes the number of attachments. These commands take multiple
+ * fixed-size @drm_asahi_attachment structures as their payload, so size
+ * equals number of attachments * sizeof(struct drm_asahi_attachment).
+ */
+ __u16 size;
+
+ /**
+ * @vdm_barrier: VDM (render) command index to wait on.
+ *
+ * Barriers are indices relative to the beginning of a given submit. A
+ * barrier of 0 waits on commands submitted to the respective subqueue
+ * in previous submit ioctls. A barrier of N waits on N previous
+ * commands on the subqueue within the current submit ioctl. As a
+ * special case, passing @DRM_ASAHI_BARRIER_NONE avoids waiting on any
+ * commands in the subqueue.
+ *
+ * Examples:
+ *
+ * 0: This waits on all previous work.
+ *
+ * NONE: This does not wait for anything on this subqueue.
+ *
+ * 1: This waits on the first render command in the submit.
+ * This is valid only if there are multiple render commands in the
+ * same submit.
+ *
+ * Barriers are valid only for hardware commands. Synthetic software
+ * commands to set attachments must pass NONE here.
+ */
+ __u16 vdm_barrier;
+
+ /**
+ * @cdm_barrier: CDM (compute) command index to wait on.
+ *
+ * See @vdm_barrier, and replace VDM/render with CDM/compute.
+ */
+ __u16 cdm_barrier;
+};
+
+/**
+ * struct drm_asahi_submit - Arguments passed to DRM_IOCTL_ASAHI_SUBMIT
+ */
+struct drm_asahi_submit {
+ /**
+ * @syncs: An optional pointer to an array of drm_asahi_sync. The first
+ * @in_sync_count elements are in-syncs, then the remaining
+ * @out_sync_count elements are out-syncs. Using a single array with
+ * explicit partitioning simplifies handling.
+ */
+ __u64 syncs;
+
+ /**
+ * @cmdbuf: Pointer to the command buffer to submit.
+ *
+ * This is a flat command buffer. By design, it contains no CPU
+ * pointers, which makes it suitable for a virtgpu wire protocol without
+ * requiring any serializing/deserializing step.
+ *
+ * It consists of a series of commands. Each command begins with a
+ * fixed-size @drm_asahi_cmd_header header and is followed by a
+ * variable-length payload according to the type and size in the header.
+ *
+ * The combined count of "real" hardware commands must be nonzero and at
+ * most drm_asahi_params_global::max_commands_per_submission.
+ */
+ __u64 cmdbuf;
+
+ /** @flags: Flags for command submission (MBZ) */
+ __u32 flags;
+
+ /** @queue_id: The queue ID to be submitted to */
+ __u32 queue_id;
+
+ /**
+ * @in_sync_count: Number of sync objects to wait on before starting
+ * this job.
+ */
+ __u32 in_sync_count;
+
+ /**
+ * @out_sync_count: Number of sync objects to signal upon completion of
+ * this job.
+ */
+ __u32 out_sync_count;
+
+ /** @cmdbuf_size: Command buffer size in bytes */
+ __u32 cmdbuf_size;
+
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_asahi_attachment - Describe an "attachment".
+ *
+ * Attachments are any memory written by shaders, notably including render
+ * target attachments written by the end-of-tile program. This is purely a hint
+ * about the accessed memory regions. It is optional to specify, which is
+ * fortunate as it cannot be specified precisely with bindless access anyway.
+ * But where possible, it's probably a good idea for userspace to include these
+ * hints, forwarded to the firmware.
+ *
+ * This struct is implicitly sized and therefore is not extensible.
+ */
+struct drm_asahi_attachment {
+ /** @pointer: Base address of the attachment */
+ __u64 pointer;
+
+ /** @size: Size of the attachment in bytes */
+ __u64 size;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @flags: MBZ */
+ __u32 flags;
+};
+
+enum drm_asahi_render_flags {
+ /**
+ * @DRM_ASAHI_RENDER_VERTEX_SCRATCH: A vertex stage shader uses scratch
+ * memory.
+ */
+ DRM_ASAHI_RENDER_VERTEX_SCRATCH = (1U << 0),
+
+ /**
+ * @DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES: Process even empty tiles.
+ * This must be set when clearing render targets.
+ */
+ DRM_ASAHI_RENDER_PROCESS_EMPTY_TILES = (1U << 1),
+
+ /**
+ * @DRM_ASAHI_RENDER_NO_VERTEX_CLUSTERING: Run vertex stage on a single
+ * cluster (on multi-cluster GPUs)
+ *
+ * This harms performance but can workaround certain sync/coherency
+ * bugs, and therefore is useful for debugging.
+ */
+ DRM_ASAHI_RENDER_NO_VERTEX_CLUSTERING = (1U << 2),
+
+ /**
+ * @DRM_ASAHI_RENDER_DBIAS_IS_INT: Use integer depth bias formula.
+ *
+ * Graphics specifications contain two alternate formulas for depth
+ * bias, a float formula used with floating-point depth buffers and an
+ * integer formula using with unorm depth buffers. This flag specifies
+ * that the integer formula should be used. If omitted, the float
+ * formula is used instead.
+ *
+ * This corresponds to bit 18 of the relevant hardware control register,
+ * so we match that here for efficiency.
+ */
+ DRM_ASAHI_RENDER_DBIAS_IS_INT = (1U << 18),
+};
+
+/**
+ * struct drm_asahi_zls_buffer - Describe a depth or stencil buffer.
+ *
+ * These fields correspond to hardware registers in the ZLS (Z Load/Store) unit.
+ * There are three hardware registers for each field respectively for loads,
+ * stores, and partial renders. In practice, it makes sense to set all to the
+ * same values, except in exceptional cases not yet implemented in userspace, so
+ * we do not duplicate here for simplicity/efficiency.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_zls_buffer {
+ /** @base: Base address of the buffer */
+ __u64 base;
+
+ /**
+ * @comp_base: If the load buffer is compressed, address of the
+ * compression metadata section.
+ */
+ __u64 comp_base;
+
+ /**
+ * @stride: If layered rendering is enabled, the number of bytes
+ * between each layer of the buffer.
+ */
+ __u32 stride;
+
+ /**
+ * @comp_stride: If layered rendering is enabled, the number of bytes
+ * between each layer of the compression metadata.
+ */
+ __u32 comp_stride;
+};
+
+/**
+ * struct drm_asahi_timestamp - Describe a timestamp write.
+ *
+ * The firmware can optionally write the GPU timestamp at render pass
+ * granularities, but it needs to be mapped specially via
+ * DRM_IOCTL_ASAHI_GEM_BIND_OBJECT. This structure therefore describes where to
+ * write as a handle-offset pair, rather than a GPU address like normal.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_timestamp {
+ /**
+ * @handle: Handle of the timestamp buffer, or 0 to skip this
+ * timestamp. If nonzero, this must equal the value returned in
+ * drm_asahi_gem_bind_object::object_handle.
+ */
+ __u32 handle;
+
+ /** @offset: Offset to write into the timestamp buffer */
+ __u32 offset;
+};
+
+/**
+ * struct drm_asahi_timestamps - Describe timestamp writes.
+ *
+ * Each operation that can be timestamped, can be timestamped at the start and
+ * end. Therefore, drm_asahi_timestamp structs always come in pairs, bundled
+ * together into drm_asahi_timestamps.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_timestamps {
+ /** @start: Timestamp recorded at the start of the operation */
+ struct drm_asahi_timestamp start;
+
+ /** @end: Timestamp recorded at the end of the operation */
+ struct drm_asahi_timestamp end;
+};
+
+/**
+ * struct drm_asahi_helper_program - Describe helper program configuration.
+ *
+ * The helper program is a compute-like kernel required for various hardware
+ * functionality. Its most important role is dynamically allocating
+ * scratch/stack memory for individual subgroups, by partitioning a static
+ * allocation shared for the whole device. It is supplied by userspace via
+ * drm_asahi_helper_program and internally dispatched by the hardware as needed.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_helper_program {
+ /**
+ * @binary: USC address to the helper program binary. This is a tagged
+ * pointer with configuration in the bottom bits.
+ */
+ __u32 binary;
+
+ /** @cfg: Additional configuration bits for the helper program. */
+ __u32 cfg;
+
+ /**
+ * @data: Data passed to the helper program. This value is not
+ * interpreted by the kernel, firmware, or hardware in any way. It is
+ * simply a sideband for userspace, set with the submit ioctl and read
+ * via special registers inside the helper program.
+ *
+ * In practice, userspace will pass a 64-bit GPU VA here pointing to the
+ * actual arguments, which presumably don't fit in 64-bits.
+ */
+ __u64 data;
+};
+
+/**
+ * struct drm_asahi_bg_eot - Describe a background or end-of-tile program.
+ *
+ * The background and end-of-tile programs are dispatched by the hardware at the
+ * beginning and end of rendering. As the hardware "tilebuffer" is simply local
+ * memory, these programs are necessary to implement API-level render targets.
+ * The fragment-like background program is responsible for loading either the
+ * clear colour or the existing render target contents, while the compute-like
+ * end-of-tile program stores the tilebuffer contents to memory.
+ *
+ * This struct is embedded in other structs and therefore is not extensible.
+ */
+struct drm_asahi_bg_eot {
+ /**
+ * @usc: USC address of the hardware USC words binding resources
+ * (including images and uniforms) and the program itself. Note this is
+ * an additional layer of indirection compared to the helper program,
+ * avoiding the need for a sideband for data. This is a tagged pointer
+ * with additional configuration in the bottom bits.
+ */
+ __u32 usc;
+
+ /**
+ * @rsrc_spec: Resource specifier for the program. This is a packed
+ * hardware data structure describing the required number of registers,
+ * uniforms, bound textures, and bound samplers.
+ */
+ __u32 rsrc_spec;
+};
+
+/**
+ * struct drm_asahi_cmd_render - Command to submit 3D
+ *
+ * This command submits a single render pass. The hardware control stream may
+ * include many draws and subpasses, but within the command, the framebuffer
+ * dimensions and attachments are fixed.
+ *
+ * The hardware requires the firmware to set a large number of Control Registers
+ * setting up state at render pass granularity before each command rendering 3D.
+ * The firmware bundles this state into data structures. Unfortunately, we
+ * cannot expose either any of that directly to userspace, because the
+ * kernel-firmware ABI is not stable. Although we can guarantee the firmware
+ * updates in tandem with the kernel, we cannot break old userspace when
+ * upgrading the firmware and kernel. Therefore, we need to abstract well the
+ * data structures to avoid tying our hands with future firmwares.
+ *
+ * The bulk of drm_asahi_cmd_render therefore consists of values of hardware
+ * control registers, marshalled via the firmware interface.
+ *
+ * The framebuffer/tilebuffer dimensions are also specified here. In addition to
+ * being passed to the firmware/hardware, the kernel requires these dimensions
+ * to calculate various essential tiling-related data structures. It is
+ * unfortunate that our submits are heavier than on vendors with saner
+ * hardware-software interfaces. The upshot is all of this information is
+ * readily available to userspace with all current APIs.
+ *
+ * It looks odd - but it's not overly burdensome and it ensures we can remain
+ * compatible with old userspace.
+ */
+struct drm_asahi_cmd_render {
+ /** @flags: Combination of drm_asahi_render_flags flags. */
+ __u32 flags;
+
+ /**
+ * @isp_zls_pixels: ISP_ZLS_PIXELS register value. This contains the
+ * depth/stencil width/height, which may differ from the framebuffer
+ * width/height.
+ */
+ __u32 isp_zls_pixels;
+
+ /**
+ * @vdm_ctrl_stream_base: VDM_CTRL_STREAM_BASE register value. GPU
+ * address to the beginning of the VDM control stream.
+ */
+ __u64 vdm_ctrl_stream_base;
+
+ /** @vertex_helper: Helper program used for the vertex shader */
+ struct drm_asahi_helper_program vertex_helper;
+
+ /** @fragment_helper: Helper program used for the fragment shader */
+ struct drm_asahi_helper_program fragment_helper;
+
+ /**
+ * @isp_scissor_base: ISP_SCISSOR_BASE register value. GPU address of an
+ * array of scissor descriptors indexed in the render pass.
+ */
+ __u64 isp_scissor_base;
+
+ /**
+ * @isp_dbias_base: ISP_DBIAS_BASE register value. GPU address of an
+ * array of depth bias values indexed in the render pass.
+ */
+ __u64 isp_dbias_base;
+
+ /**
+ * @isp_oclqry_base: ISP_OCLQRY_BASE register value. GPU address of an
+ * array of occlusion query results written by the render pass.
+ */
+ __u64 isp_oclqry_base;
+
+ /** @depth: Depth buffer */
+ struct drm_asahi_zls_buffer depth;
+
+ /** @stencil: Stencil buffer */
+ struct drm_asahi_zls_buffer stencil;
+
+ /** @zls_ctrl: ZLS_CTRL register value */
+ __u64 zls_ctrl;
+
+ /** @ppp_multisamplectl: PPP_MULTISAMPLECTL register value */
+ __u64 ppp_multisamplectl;
+
+ /**
+ * @sampler_heap: Base address of the sampler heap. This heap is used
+ * for both vertex shaders and fragment shaders. The registers are
+ * per-stage, but there is no known use case for separate heaps.
+ */
+ __u64 sampler_heap;
+
+ /** @ppp_ctrl: PPP_CTRL register value */
+ __u32 ppp_ctrl;
+
+ /** @width_px: Framebuffer width in pixels */
+ __u16 width_px;
+
+ /** @height_px: Framebuffer height in pixels */
+ __u16 height_px;
+
+ /** @layers: Number of layers in the framebuffer */
+ __u16 layers;
+
+ /** @sampler_count: Number of samplers in the sampler heap. */
+ __u16 sampler_count;
+
+ /** @utile_width_px: Width of a logical tilebuffer tile in pixels */
+ __u8 utile_width_px;
+
+ /** @utile_height_px: Height of a logical tilebuffer tile in pixels */
+ __u8 utile_height_px;
+
+ /** @samples: # of samples in the framebuffer. Must be 1, 2, or 4. */
+ __u8 samples;
+
+ /** @sample_size_B: # of bytes in the tilebuffer required per sample. */
+ __u8 sample_size_B;
+
+ /**
+ * @isp_merge_upper_x: 32-bit float used in the hardware triangle
+ * merging. Calculate as: tan(60 deg) * width.
+ *
+ * Making these values UAPI avoids requiring floating-point calculations
+ * in the kernel in the hot path.
+ */
+ __u32 isp_merge_upper_x;
+
+ /**
+ * @isp_merge_upper_y: 32-bit float. Calculate as: tan(60 deg) * height.
+ * See @isp_merge_upper_x.
+ */
+ __u32 isp_merge_upper_y;
+
+ /** @bg: Background program run for each tile at the start */
+ struct drm_asahi_bg_eot bg;
+
+ /** @eot: End-of-tile program ran for each tile at the end */
+ struct drm_asahi_bg_eot eot;
+
+ /**
+ * @partial_bg: Background program ran at the start of each tile when
+ * resuming the render pass during a partial render.
+ */
+ struct drm_asahi_bg_eot partial_bg;
+
+ /**
+ * @partial_eot: End-of-tile program ran at the end of each tile when
+ * pausing the render pass during a partial render.
+ */
+ struct drm_asahi_bg_eot partial_eot;
+
+ /**
+ * @isp_bgobjdepth: ISP_BGOBJDEPTH register value. This is the depth
+ * buffer clear value, encoded in the depth buffer's format: either a
+ * 32-bit float or a 16-bit unorm (with upper bits zeroed).
+ */
+ __u32 isp_bgobjdepth;
+
+ /**
+ * @isp_bgobjvals: ISP_BGOBJVALS register value. The bottom 8-bits
+ * contain the stencil buffer clear value.
+ */
+ __u32 isp_bgobjvals;
+
+ /** @ts_vtx: Timestamps for the vertex portion of the render */
+ struct drm_asahi_timestamps ts_vtx;
+
+ /** @ts_frag: Timestamps for the fragment portion of the render */
+ struct drm_asahi_timestamps ts_frag;
+};
+
+/**
+ * struct drm_asahi_cmd_compute - Command to submit compute
+ *
+ * This command submits a control stream consisting of compute dispatches. There
+ * is essentially no limit on how many compute dispatches may be included in a
+ * single compute command, although timestamps are at command granularity.